Cleanup simulator and add usage instructions (#1050)

* cleanup simulator files

* minor updates

* update readme

* keras runner builds

* hmm, still doesn't work

* keras runner works

* should work with python3 keras mod

* touchups
pull/1065/head
George Hotz 5 years ago committed by GitHub
parent 6b1506740e
commit c50c718293
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 4
      external/tensorflow/.gitignore
  2. 11
      external/tensorflow/download.sh
  3. 3
      models/supercombo.dlc.pb
  4. 12
      selfdrive/modeld/SConscript
  5. 2
      selfdrive/modeld/models/dmonitoring.cc
  6. 2
      selfdrive/modeld/models/driving.cc
  7. 53
      selfdrive/modeld/runners/keras_runner.py
  8. 2
      selfdrive/modeld/runners/runmodel.h
  9. 2
      selfdrive/modeld/runners/snpemodel.cc
  10. 2
      selfdrive/modeld/runners/snpemodel.h
  11. 196
      selfdrive/modeld/runners/tfmodel.cc
  12. 24
      selfdrive/modeld/runners/tfmodel.h
  13. 1
      tools/sim/.gitignore
  14. 24
      tools/sim/README
  15. 13
      tools/sim/bridge.py
  16. 10
      tools/sim/get.sh
  17. 0
      tools/sim/lib/__init__.py
  18. 0
      tools/sim/lib/can.py
  19. 0
      tools/sim/lib/replay.sh
  20. 4
      tools/sim/start.sh
  21. 19
      tools/sim/start_carla.sh

@ -1,4 +0,0 @@
*.tar.gz
include
lib
*LICENSE*

@ -1,11 +0,0 @@
#!/bin/bash
TF=libtensorflow-gpu-linux-x86_64-1.13.1.tar.gz
#TF=libtensorflow-gpu-linux-x86_64-1.14.0.tar.gz
#TF=libtensorflow-gpu-linux-x86_64-1.15.0.tar.gz
if [ ! -f $TF ]; then
wget https://storage.googleapis.com/tensorflow/libtensorflow/$TF
fi
rm -rf include lib
tar xvf $TF

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ba0738235e5c11f313f421afbc3f04af46eb87d5a562cd88fc40e896170e0e11
size 27387595

@ -14,13 +14,11 @@ if arch == "aarch64":
else: else:
libs += ['symphony-cpu', 'pthread'] libs += ['symphony-cpu', 'pthread']
if FindFile('libtensorflow.so', env['LIBPATH']): # for tensorflow support
# for tensorflow support common_src += ['runners/tfmodel.cc']
common_src += ['runners/tfmodel.cc'] # tell runners to use it
libs += ['tensorflow'] lenv['CFLAGS'].append("-DUSE_TF_MODEL")
# tell runners to use it lenv['CXXFLAGS'].append("-DUSE_TF_MODEL")
lenv['CFLAGS'].append("-DUSE_TF_MODEL")
lenv['CXXFLAGS'].append("-DUSE_TF_MODEL")
common = lenv.Object(common_src) common = lenv.Object(common_src)

@ -108,7 +108,7 @@ DMonitoringResult dmonitoring_eval_frame(DMonitoringModelState* s, void* stream_
delete[] cropped_buf; delete[] cropped_buf;
delete[] resized_buf; delete[] resized_buf;
s->m->execute(net_input_buf); s->m->execute(net_input_buf, yuv_buf_len);
delete[] net_input_buf; delete[] net_input_buf;
DMonitoringResult ret = {0}; DMonitoringResult ret = {0};

@ -70,7 +70,7 @@ ModelDataRaw model_eval_frame(ModelState* s, cl_command_queue q,
float *new_frame_buf = frame_prepare(&s->frame, q, yuv_cl, width, height, transform); float *new_frame_buf = frame_prepare(&s->frame, q, yuv_cl, width, height, transform);
memmove(&s->input_frames[0], &s->input_frames[MODEL_FRAME_SIZE], sizeof(float)*MODEL_FRAME_SIZE); memmove(&s->input_frames[0], &s->input_frames[MODEL_FRAME_SIZE], sizeof(float)*MODEL_FRAME_SIZE);
memmove(&s->input_frames[MODEL_FRAME_SIZE], new_frame_buf, sizeof(float)*MODEL_FRAME_SIZE); memmove(&s->input_frames[MODEL_FRAME_SIZE], new_frame_buf, sizeof(float)*MODEL_FRAME_SIZE);
s->m->execute(s->input_frames); s->m->execute(s->input_frames, MODEL_FRAME_SIZE*2);
#ifdef DUMP_YUV #ifdef DUMP_YUV
FILE *dump_yuv_file = fopen("/sdcard/dump.yuv", "wb"); FILE *dump_yuv_file = fopen("/sdcard/dump.yuv", "wb");

@ -0,0 +1,53 @@
#!/usr/bin/env python3
# TODO: why are the keras models saved with python 2?
from __future__ import print_function
import tensorflow as tf
import os
import sys
import tensorflow.keras as keras
import numpy as np
from tensorflow.keras.models import Model
from tensorflow.keras.models import load_model
def read(sz):
dd = []
gt = 0
while gt < sz*4:
st = os.read(0, sz*4 - gt)
assert(len(st) > 0)
dd.append(st)
gt += len(st)
return np.fromstring(b''.join(dd), dtype=np.float32)
def write(d):
os.write(1, d.tobytes())
def run_loop(m):
isize = m.inputs[0].shape[1]
osize = m.outputs[0].shape[1]
print("ready to run keras model %d -> %d" % (isize, osize), file=sys.stderr)
while 1:
idata = read(isize).reshape((1, isize))
ret = m.predict_on_batch(idata)
write(ret)
if __name__ == "__main__":
print(tf.__version__, file=sys.stderr)
m = load_model(sys.argv[1])
print(m, file=sys.stderr)
bs = [int(np.product(ii.shape[1:])) for ii in m.inputs]
ri = keras.layers.Input((sum(bs),))
tii = []
acc = 0
for i, ii in enumerate(m.inputs):
print(ii, file=sys.stderr)
ti = keras.layers.Lambda(lambda x: x[:,acc:acc+bs[i]], output_shape=(1, bs[i]))(ri)
acc += bs[i]
tr = keras.layers.Reshape(ii.shape[1:])(ti)
tii.append(tr)
no = keras.layers.Concatenate()(m(tii))
m = Model(inputs=ri, outputs=[no])
run_loop(m)

@ -5,7 +5,7 @@ class RunModel {
public: public:
virtual void addRecurrent(float *state, int state_size) {} virtual void addRecurrent(float *state, int state_size) {}
virtual void addDesire(float *state, int state_size) {} virtual void addDesire(float *state, int state_size) {}
virtual void execute(float *net_input_buf) {} virtual void execute(float *net_input_buf, int buf_size) {}
}; };
#endif #endif

@ -117,7 +117,7 @@ std::unique_ptr<zdl::DlSystem::IUserBuffer> SNPEModel::addExtra(float *state, in
return ret; return ret;
} }
void SNPEModel::execute(float *net_input_buf) { void SNPEModel::execute(float *net_input_buf, int buf_size) {
assert(inputBuffer->setBufferAddress(net_input_buf)); assert(inputBuffer->setBufferAddress(net_input_buf));
if (!snpe->execute(inputMap, outputMap)) { if (!snpe->execute(inputMap, outputMap)) {
PrintErrorStringAndExit(); PrintErrorStringAndExit();

@ -25,7 +25,7 @@ public:
} }
void addRecurrent(float *state, int state_size); void addRecurrent(float *state, int state_size);
void addDesire(float *state, int state_size); void addDesire(float *state, int state_size);
void execute(float *net_input_buf); void execute(float *net_input_buf, int buf_size);
private: private:
uint8_t *model_data = NULL; uint8_t *model_data = NULL;

@ -1,160 +1,98 @@
#include "tfmodel.h" #include "tfmodel.h"
#include <stdio.h>
#include <string> #include <string>
#include <string.h> #include <string.h>
#include <signal.h>
#include <unistd.h>
#include <stdlib.h> #include <stdlib.h>
#include <stdexcept> #include <stdexcept>
#include "common/util.h" #include "common/util.h"
#include "common/utilpp.h"
#include "common/swaglog.h" #include "common/swaglog.h"
#include <cassert> #include <cassert>
void TFModel::status_check() const {
if (TF_GetCode(this->status) != TF_OK) {
throw std::runtime_error(TF_Message(status));
}
}
TF_Tensor *TFModel::allocate_tensor_for_output(TF_Output out, float *dat) {
int num_dims = TF_GraphGetTensorNumDims(graph, out, status);
status_check();
int64_t *dims = new int64_t[num_dims];
TF_GraphGetTensorShape(graph, out, dims, num_dims, status);
status_check();
dims[0] = 1;
int total = 1;
for (int i = 0; i < num_dims; i++) total *= dims[i];
//printf("dims %d total %d wdat %p\n", num_dims, total, dat);
// don't deallocate the buffers
auto d = [](void* ddata, size_t, void* arg) {};
TF_Tensor *ret = TF_NewTensor(TF_FLOAT, dims, num_dims, (void*)dat, sizeof(float)*total, d, NULL);
//TF_Tensor *ret = TF_AllocateTensor(TF_FLOAT, dims, num_dims, sizeof(float)*total);
//memcpy(TF_TensorData(ret), dat, sizeof(float)*total);
assert(ret);
delete[] dims;
return ret;
}
TFModel::TFModel(const char *path, float *_output, size_t _output_size, int runtime) { TFModel::TFModel(const char *path, float *_output, size_t _output_size, int runtime) {
// load model output = _output;
{ output_size = _output_size;
TF_Buffer* buf;
size_t model_size;
char tmp[1024];
snprintf(tmp, sizeof(tmp), "%s.pb", path);
LOGD("loading model %s", tmp);
uint8_t *model_data = (uint8_t *)read_file(tmp, &model_size);
assert(model_data);
buf = TF_NewBuffer();
buf->data = model_data;
buf->length = model_size;
buf->data_deallocator = [](void *data, size_t) { free(data); };
LOGD("loaded model of size %d", model_size);
// import graph
status = TF_NewStatus();
graph = TF_NewGraph();
TF_ImportGraphDefOptions *opts = TF_NewImportGraphDefOptions();
// TODO: fix the GPU, currently it hangs if you set this to /gpu:0
//TF_ImportGraphDefOptionsSetDefaultDevice(opts, "/cpu:0");
TF_GraphImportGraphDef(graph, buf, opts, status);
TF_DeleteImportGraphDefOptions(opts);
TF_DeleteBuffer(buf);
status_check();
LOGD("imported graph");
}
// set up session
TF_SessionOptions* sess_opts = TF_NewSessionOptions();
// don't use all GPU memory char tmp[1024];
/*uint8_t config[15] = {0x32, 0xb, 0x9, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x20, 0x1, 0x38, 0x1}; strncpy(tmp, path, sizeof(tmp));
double gpu_memory_fraction = 0.2; strstr(tmp, ".dlc")[0] = '\0';
auto bytes = reinterpret_cast<std::uint8_t*>(&gpu_memory_fraction); strcat(tmp, ".keras");
for (std::size_t i = 0; i < sizeof(gpu_memory_fraction); ++i) { LOGD("loading model %s", tmp);
config[i + 3] = bytes[i];
assert(pipe(pipein) == 0);
assert(pipe(pipeout) == 0);
std::string exe_dir = util::dir_name(util::readlink("/proc/self/exe"));
std::string keras_runner = exe_dir + "/runners/keras_runner.py";
proc_pid = fork();
if (proc_pid == 0) {
LOGD("spawning keras process %s", keras_runner.c_str());
char *argv[] = {(char*)keras_runner.c_str(), tmp, NULL};
dup2(pipein[0], 0);
dup2(pipeout[1], 1);
close(pipein[0]);
close(pipein[1]);
close(pipeout[0]);
close(pipeout[1]);
execvp(keras_runner.c_str(), argv);
} }
TF_SetConfig(sess_opts, config, sizeof(config), status);
status_check();*/
// make session // parent
session = TF_NewSession(graph, sess_opts, status); close(pipein[0]);
TF_DeleteSessionOptions(sess_opts); close(pipeout[1]);
status_check(); }
// find tensors TFModel::~TFModel() {
// TODO: make this generic close(pipein[1]);
input_operation = {TF_GraphOperationByName(graph, "lambda/div"), 0}; close(pipeout[0]);
if (input_operation.oper == NULL) { kill(proc_pid, SIGTERM);
input_operation = {TF_GraphOperationByName(graph, "vision_lambda/div"), 0}; }
}
assert(input_operation.oper != NULL);
output_operation = {TF_GraphOperationByName(graph, "outputs/outputs/Identity"), 0}; void TFModel::pwrite(float *buf, int size) {
if (output_operation.oper == NULL) { char *cbuf = (char *)buf;
output_operation = {TF_GraphOperationByName(graph, "outputs/concat"), 0}; int tw = size*sizeof(float);
while (tw > 0) {
int err = write(pipein[1], cbuf, tw);
//printf("host write %d\n", err);
assert(err >= 0);
cbuf += err;
tw -= err;
} }
assert(output_operation.oper != NULL); //printf("host write done\n");
// output tensor is good to bind now
output = _output;
output_size = _output_size;
} }
TFModel::~TFModel() { void TFModel::pread(float *buf, int size) {
TF_DeleteSession(session, status); char *cbuf = (char *)buf;
status_check(); int tr = size*sizeof(float);
TF_DeleteGraph(graph); while (tr > 0) {
TF_DeleteStatus(status); int err = read(pipeout[0], cbuf, tr);
//printf("host read %d/%d\n", err, tr);
assert(err >= 0);
cbuf += err;
tr -= err;
}
//printf("host read done\n");
} }
void TFModel::addRecurrent(float *state, int state_size) { void TFModel::addRecurrent(float *state, int state_size) {
rnn_operation.oper = TF_GraphOperationByName(graph, "rnn_state");
rnn_operation.index = 0;
assert(rnn_operation.oper != NULL);
rnn_input_buf = state; rnn_input_buf = state;
rnn_state_size = state_size;
} }
void TFModel::addDesire(float *state, int state_size) { void TFModel::addDesire(float *state, int state_size) {
desire_operation.oper = TF_GraphOperationByName(graph, "desire");
desire_operation.index = 0;
assert(desire_operation.oper != NULL);
desire_input_buf = state; desire_input_buf = state;
desire_state_size = state_size;
} }
void TFModel::execute(float *net_input_buf) { void TFModel::execute(float *net_input_buf, int buf_size) {
TF_Tensor *input_tensor = allocate_tensor_for_output(input_operation, net_input_buf); // order must be this
assert(input_tensor); pwrite(net_input_buf, buf_size);
TF_Tensor *output_tensor = NULL; pwrite(desire_input_buf, desire_state_size);
pwrite(rnn_input_buf, rnn_state_size);
if (rnn_input_buf == NULL) { pread(output, output_size);
TF_SessionRun(session, NULL,
&input_operation, &input_tensor, 1,
&output_operation, &output_tensor, 1,
NULL, 0, NULL, status);
} else {
//printf("%f %f %f\n", net_input_buf[0], rnn_input_buf[0], desire_input_buf[0]);
TF_Tensor *rnn_tensor = allocate_tensor_for_output(rnn_operation, rnn_input_buf);
TF_Tensor *desire_tensor = allocate_tensor_for_output(desire_operation, desire_input_buf);
TF_Output io[] = {input_operation, rnn_operation, desire_operation};
TF_Tensor* id[] = {input_tensor, rnn_tensor, desire_tensor};
TF_SessionRun(session, NULL,
io, id, 3,
&output_operation, &output_tensor, 1,
NULL, 0, NULL, status);
TF_DeleteTensor(rnn_tensor);
TF_DeleteTensor(desire_tensor);
}
TF_DeleteTensor(input_tensor);
status_check();
assert(output_tensor);
memcpy((void*)output, TF_TensorData(output_tensor), output_size*sizeof(float));
TF_DeleteTensor(output_tensor);
} }

@ -4,8 +4,6 @@
#include <stdlib.h> #include <stdlib.h>
#include "runmodel.h" #include "runmodel.h"
#include "tensorflow/c/c_api.h"
struct TFState; struct TFState;
class TFModel : public RunModel { class TFModel : public RunModel {
@ -14,25 +12,23 @@ public:
~TFModel(); ~TFModel();
void addRecurrent(float *state, int state_size); void addRecurrent(float *state, int state_size);
void addDesire(float *state, int state_size); void addDesire(float *state, int state_size);
void execute(float *net_input_buf); void execute(float *net_input_buf, int buf_size);
private: private:
void status_check() const; int proc_pid;
TF_Tensor *allocate_tensor_for_output(TF_Output out, float *dat);
float *output; float *output;
size_t output_size; size_t output_size;
TF_Session* session;
TF_Graph* graph;
TF_Status* status;
TF_Output input_operation;
TF_Output rnn_operation;
TF_Output desire_operation;
TF_Output output_operation;
float *rnn_input_buf = NULL; float *rnn_input_buf = NULL;
int rnn_state_size;
float *desire_input_buf = NULL; float *desire_input_buf = NULL;
int desire_state_size;
// pipe to communicate to keras subprocess
void pread(float *buf, int size);
void pwrite(float *buf, int size);
int pipein[2];
int pipeout[2];
}; };
#endif #endif

@ -1,3 +1,4 @@
CARLA_*.tar.gz CARLA_*.tar.gz
carla carla
carla_tmp

@ -0,0 +1,24 @@
Needs Ubuntu 16.04
== Checkout openpilot ==
cd ~/
git clone https://github.com/commaai/openpilot.git
# Add export PYTHONPATH=$HOME/openpilot to your bashrc
# Have a working tensorflow+keras in python2
== Install (in tab 1) ==
cd ~/openpilot/tools/sim
./start_carla.sh # install CARLA 0.9.7 and start the server
== openpilot (in tab 2) ==
cd ~/openpilot/selfdrive/
PASSIVE=0 NOBOARD=1 ./manager.py
== bridge (in tab 3) ==
# links carla to openpilot, will "start the car" according to manager
cd ~/openpilot/tools/sim
./bridge.py

@ -7,11 +7,16 @@ import numpy as np
import threading import threading
import random import random
import cereal.messaging as messaging import cereal.messaging as messaging
import argparse
from common.params import Params from common.params import Params
from common.realtime import Ratekeeper from common.realtime import Ratekeeper
from can import can_function, sendcan_function from lib.can import can_function, sendcan_function
import queue import queue
parser = argparse.ArgumentParser(description='Bridge between CARLA and openpilot.')
parser.add_argument('--autopilot', action='store_true')
args = parser.parse_args()
pm = messaging.PubMaster(['frame', 'sensorEvents', 'can']) pm = messaging.PubMaster(['frame', 'sensorEvents', 'can'])
W,H = 1164, 874 W,H = 1164, 874
@ -99,7 +104,9 @@ def go():
vehicle_bp = random.choice(blueprint_library.filter('vehicle.bmw.*')) vehicle_bp = random.choice(blueprint_library.filter('vehicle.bmw.*'))
vehicle = world.spawn_actor(vehicle_bp, random.choice(world_map.get_spawn_points())) vehicle = world.spawn_actor(vehicle_bp, random.choice(world_map.get_spawn_points()))
#vehicle.set_autopilot(True)
if args.autopilot:
vehicle.set_autopilot(True)
blueprint = blueprint_library.find('sensor.camera.rgb') blueprint = blueprint_library.find('sensor.camera.rgb')
blueprint.set_attribute('image_size_x', str(W)) blueprint.set_attribute('image_size_x', str(W))
@ -123,7 +130,6 @@ def go():
print("done") print("done")
atexit.register(destroy) atexit.register(destroy)
# can loop # can loop
sendcan = messaging.sub_sock('sendcan') sendcan = messaging.sub_sock('sendcan')
rk = Ratekeeper(100) rk = Ratekeeper(100)
@ -148,6 +154,7 @@ if __name__ == "__main__":
from selfdrive.version import terms_version, training_version from selfdrive.version import terms_version, training_version
params.put("HasAcceptedTerms", terms_version) params.put("HasAcceptedTerms", terms_version)
params.put("CompletedTrainingVersion", training_version) params.put("CompletedTrainingVersion", training_version)
params.put("CommunityFeaturesToggle", "1")
threading.Thread(target=health_function).start() threading.Thread(target=health_function).start()
threading.Thread(target=fake_driver_monitoring).start() threading.Thread(target=fake_driver_monitoring).start()

@ -1,10 +0,0 @@
#!/bin/bash -e
FILE=CARLA_0.9.7.tar.gz
if [ ! -f $FILE ]; then
curl -O http://carla-assets-internal.s3.amazonaws.com/Releases/Linux/$FILE
fi
mkdir -p carla
cd carla
tar xvf ../$FILE
easy_install PythonAPI/carla/dist/carla-0.9.7-py3.5-linux-x86_64.egg

@ -1,4 +0,0 @@
#!/bin/bash
cd carla
./CarlaUE4.sh

@ -0,0 +1,19 @@
#!/bin/bash -e
FILE=CARLA_0.9.7.tar.gz
if [ ! -f $FILE ]; then
curl -O http://carla-assets-internal.s3.amazonaws.com/Releases/Linux/$FILE
fi
if [ ! -d carla ]; then
rm -rf carla_tmp
mkdir -p carla_tmp
cd carla_tmp
tar xvf ../$FILE
easy_install PythonAPI/carla/dist/carla-0.9.7-py3.5-linux-x86_64.egg
cd ../
mv carla_tmp carla
fi
cd carla
./CarlaUE4.sh
Loading…
Cancel
Save