add webcam to cameras (#1201)

old-commit-hash: a95e61edf4
commatwo_master
ZwX1616 5 years ago committed by GitHub
parent 3fa73442cc
commit a004735b19
  1. 6
      SConstruct
  2. 10
      selfdrive/camerad/SConscript
  3. 4
      selfdrive/camerad/cameras/camera_common.h
  4. 2
      selfdrive/camerad/cameras/camera_frame_stream.cc
  5. 269
      selfdrive/camerad/cameras/camera_webcam.cc
  6. 58
      selfdrive/camerad/cameras/camera_webcam.h
  7. 35
      selfdrive/camerad/main.cc
  8. 1
      selfdrive/camerad/transforms/rgb_to_yuv.c
  9. 2
      selfdrive/controls/controlsd.py
  10. 5
      selfdrive/manager.py
  11. 38
      selfdrive/modeld/models/dmonitoring.cc
  12. 5
      selfdrive/modeld/runners/keras_runner.py
  13. 4
      selfdrive/modeld/runners/tfmodel.cc
  14. 5
      tools/sim/bridge.py
  15. 3
      tools/sim/lib/can.py
  16. 43
      tools/webcam/README.md
  17. 9
      tools/webcam/accept_terms.py
  18. 35
      tools/webcam/front_mount_helper.py
  19. 45
      tools/webcam/warp_vis.py

@ -15,6 +15,8 @@ arch = subprocess.check_output(["uname", "-m"], encoding='utf8').rstrip()
if platform.system() == "Darwin":
arch = "Darwin"
webcam = bool(ARGUMENTS.get("use_webcam", 0))
if arch == "aarch64":
lenv = {
"LD_LIBRARY_PATH": '/data/data/com.termux/files/usr/lib',
@ -117,7 +119,7 @@ env = Environment(
"#phonelibs/json11",
"#phonelibs/eigen",
"#phonelibs/curl/include",
"#phonelibs/opencv/include",
#"#phonelibs/opencv/include", # use opencv4 instead
"#phonelibs/libgralloc/include",
"#phonelibs/android_frameworks_native/include",
"#phonelibs/android_hardware_libhardware/include",
@ -178,7 +180,7 @@ def abspath(x):
#zmq = 'zmq'
# still needed for apks
zmq = FindFile("libzmq.a", libpath)
Export('env', 'arch', 'zmq', 'SHARED')
Export('env', 'arch', 'zmq', 'SHARED', 'webcam')
# cereal and messaging are shared with the system
SConscript(['cereal/SConscript'])

@ -1,4 +1,4 @@
Import('env', 'arch', 'messaging', 'common', 'gpucommon', 'visionipc', 'cereal')
Import('env', 'arch', 'messaging', 'common', 'gpucommon', 'visionipc', 'cereal', 'webcam')
libs = ['m', 'pthread', common, 'jpeg', 'json', cereal, 'OpenCL', messaging, 'czmq', 'zmq', 'capnp', 'kj', 'capnp_c', visionipc, gpucommon]
@ -6,6 +6,14 @@ if arch == "aarch64":
libs += ['gsl', 'CB', 'adreno_utils', 'EGL', 'GLESv3', 'cutils', 'ui']
cameras = ['cameras/camera_qcom.c']
else:
if webcam:
libs += ['opencv_core', 'opencv_highgui', 'opencv_imgproc', 'opencv_videoio']
cameras = ['cameras/camera_webcam.cc']
env = env.Clone()
env.Append(CXXFLAGS = '-DWEBCAM')
env.Append(CFLAGS = '-DWEBCAM')
env.Append(CPPPATH = '/usr/local/include/opencv4')
else:
libs += []
cameras = ['cameras/camera_frame_stream.cc']

@ -10,7 +10,9 @@
#define CAMERA_ID_OV8865 3
#define CAMERA_ID_IMX298_FLIPPED 4
#define CAMERA_ID_OV10640 5
#define CAMERA_ID_MAX 6
#define CAMERA_ID_LGC920 6
#define CAMERA_ID_LGC615 7
#define CAMERA_ID_MAX 8
#ifdef __cplusplus
extern "C" {

@ -115,7 +115,7 @@ CameraInfo cameras_supported[CAMERA_ID_MAX] = {
.frame_width = 1632,
.frame_height = 1224,
.frame_stride = 2040, // seems right
.bayer = false,
.bayer = true,
.bayer_flip = 3,
.hdr = false
},

@ -0,0 +1,269 @@
#include "camera_webcam.h"
#include <unistd.h>
#include <string.h>
#include <signal.h>
#include <pthread.h>
#include "common/util.h"
#include "common/timing.h"
#include "common/swaglog.h"
#include "buffering.h"
#include <opencv2/opencv.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core.hpp>
#include <opencv2/videoio.hpp>
extern volatile sig_atomic_t do_exit;
#define FRAME_WIDTH 1164
#define FRAME_HEIGHT 874
#define FRAME_WIDTH_FRONT 1152
#define FRAME_HEIGHT_FRONT 864
namespace {
void camera_open(CameraState *s, VisionBuf *camera_bufs, bool rear) {
assert(camera_bufs);
s->camera_bufs = camera_bufs;
}
void camera_close(CameraState *s) {
tbuffer_stop(&s->camera_tb);
}
void camera_release_buffer(void *cookie, int buf_idx) {
CameraState *s = static_cast<CameraState *>(cookie);
}
void camera_init(CameraState *s, int camera_id, unsigned int fps) {
assert(camera_id < ARRAYSIZE(cameras_supported));
s->ci = cameras_supported[camera_id];
assert(s->ci.frame_width != 0);
s->frame_size = s->ci.frame_height * s->ci.frame_stride;
s->fps = fps;
tbuffer_init2(&s->camera_tb, FRAME_BUF_COUNT, "frame", camera_release_buffer, s);
}
static void* rear_thread(void *arg) {
int err;
set_thread_name("webcam_rear_thread");
CameraState* s = (CameraState*)arg;
cv::VideoCapture cap_rear(1); // road
cap_rear.set(cv::CAP_PROP_FRAME_WIDTH, 1280);
cap_rear.set(cv::CAP_PROP_FRAME_HEIGHT, 720);
cap_rear.set(cv::CAP_PROP_FPS, s->fps);
cap_rear.set(cv::CAP_PROP_AUTOFOCUS, 0); // off
cap_rear.set(cv::CAP_PROP_FOCUS, 0); // 0 - 255?
// cv::Rect roi_rear(160, 0, 960, 720);
cv::Size size;
size.height = s->ci.frame_height;
size.width = s->ci.frame_width;
// transforms calculation see tools/webcam/warp_vis.py
float ts[9] = {1.00220264, 0.0, -59.40969163,
0.0, 1.00220264, 76.20704846,
0.0, 0.0, 1.0};
const cv::Mat transform = cv::Mat(3, 3, CV_32F, ts);
if (!cap_rear.isOpened()) {
err = 1;
}
uint32_t frame_id = 0;
TBuffer* tb = &s->camera_tb;
while (!do_exit) {
cv::Mat frame_mat;
cv::Mat transformed_mat;
cap_rear >> frame_mat;
// int rows = frame_mat.rows;
// int cols = frame_mat.cols;
// printf("Raw Rear, R=%d, C=%d\n", rows, cols);
cv::warpPerspective(frame_mat, transformed_mat, transform, size, cv::INTER_LINEAR, cv::BORDER_CONSTANT, 0);
int transformed_size = transformed_mat.total() * transformed_mat.elemSize();
const int buf_idx = tbuffer_select(tb);
s->camera_bufs_metadata[buf_idx] = {
.frame_id = frame_id,
};
cl_command_queue q = s->camera_bufs[buf_idx].copy_q;
cl_mem yuv_cl = s->camera_bufs[buf_idx].buf_cl;
cl_event map_event;
void *yuv_buf = (void *)clEnqueueMapBuffer(q, yuv_cl, CL_TRUE,
CL_MAP_WRITE, 0, transformed_size,
0, NULL, &map_event, &err);
assert(err == 0);
clWaitForEvents(1, &map_event);
clReleaseEvent(map_event);
memcpy(yuv_buf, transformed_mat.data, transformed_size);
clEnqueueUnmapMemObject(q, yuv_cl, yuv_buf, 0, NULL, &map_event);
clWaitForEvents(1, &map_event);
clReleaseEvent(map_event);
tbuffer_dispatch(tb, buf_idx);
frame_id += 1;
frame_mat.release();
transformed_mat.release();
}
cap_rear.release();
return NULL;
}
void front_thread(CameraState *s) {
int err;
cv::VideoCapture cap_front(2); // driver
cap_front.set(cv::CAP_PROP_FRAME_WIDTH, 1280);
cap_front.set(cv::CAP_PROP_FRAME_HEIGHT, 720);
cap_front.set(cv::CAP_PROP_FPS, s->fps);
// cv::Rect roi_front(320, 0, 960, 720);
cv::Size size;
size.height = s->ci.frame_height;
size.width = s->ci.frame_width;
// transforms calculation see tools/webcam/warp_vis.py
float ts[9] = {0.94713656, 0.0, -30.16740088,
0.0, 0.94713656, 91.030837,
0.0, 0.0, 1.0};
const cv::Mat transform = cv::Mat(3, 3, CV_32F, ts);
if (!cap_front.isOpened()) {
err = 1;
}
uint32_t frame_id = 0;
TBuffer* tb = &s->camera_tb;
while (!do_exit) {
cv::Mat frame_mat;
cv::Mat transformed_mat;
cap_front >> frame_mat;
// int rows = frame_mat.rows;
// int cols = frame_mat.cols;
// printf("Raw Front, R=%d, C=%d\n", rows, cols);
cv::warpPerspective(frame_mat, transformed_mat, transform, size, cv::INTER_LINEAR, cv::BORDER_CONSTANT, 0);
int transformed_size = transformed_mat.total() * transformed_mat.elemSize();
const int buf_idx = tbuffer_select(tb);
s->camera_bufs_metadata[buf_idx] = {
.frame_id = frame_id,
};
cl_command_queue q = s->camera_bufs[buf_idx].copy_q;
cl_mem yuv_cl = s->camera_bufs[buf_idx].buf_cl;
cl_event map_event;
void *yuv_buf = (void *)clEnqueueMapBuffer(q, yuv_cl, CL_TRUE,
CL_MAP_WRITE, 0, transformed_size,
0, NULL, &map_event, &err);
assert(err == 0);
clWaitForEvents(1, &map_event);
clReleaseEvent(map_event);
memcpy(yuv_buf, transformed_mat.data, transformed_size);
clEnqueueUnmapMemObject(q, yuv_cl, yuv_buf, 0, NULL, &map_event);
clWaitForEvents(1, &map_event);
clReleaseEvent(map_event);
tbuffer_dispatch(tb, buf_idx);
frame_id += 1;
frame_mat.release();
transformed_mat.release();
}
cap_front.release();
return;
}
} // namespace
CameraInfo cameras_supported[CAMERA_ID_MAX] = {
// road facing
[CAMERA_ID_LGC920] = {
.frame_width = FRAME_WIDTH,
.frame_height = FRAME_HEIGHT,
.frame_stride = FRAME_WIDTH*3,
.bayer = false,
.bayer_flip = false,
},
// driver facing
[CAMERA_ID_LGC615] = {
.frame_width = FRAME_WIDTH_FRONT,
.frame_height = FRAME_HEIGHT_FRONT,
.frame_stride = FRAME_WIDTH_FRONT*3,
.bayer = false,
.bayer_flip = false,
},
};
void cameras_init(DualCameraState *s) {
memset(s, 0, sizeof(*s));
camera_init(&s->rear, CAMERA_ID_LGC920, 20);
s->rear.transform = (mat3){{
1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0,
}};
camera_init(&s->front, CAMERA_ID_LGC615, 10);
s->front.transform = (mat3){{
1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0,
}};
}
void camera_autoexposure(CameraState *s, float grey_frac) {}
void cameras_open(DualCameraState *s, VisionBuf *camera_bufs_rear,
VisionBuf *camera_bufs_focus, VisionBuf *camera_bufs_stats,
VisionBuf *camera_bufs_front) {
assert(camera_bufs_rear);
assert(camera_bufs_front);
int err;
// LOG("*** open front ***");
camera_open(&s->front, camera_bufs_front, false);
// LOG("*** open rear ***");
camera_open(&s->rear, camera_bufs_rear, true);
}
void cameras_close(DualCameraState *s) {
camera_close(&s->rear);
camera_close(&s->front);
}
void cameras_run(DualCameraState *s) {
set_thread_name("webcam_thread");
int err;
pthread_t rear_thread_handle;
err = pthread_create(&rear_thread_handle, NULL,
rear_thread, &s->rear);
assert(err == 0);
front_thread(&s->front);
err = pthread_join(rear_thread_handle, NULL);
assert(err == 0);
cameras_close(s);
}

@ -0,0 +1,58 @@
#ifndef CAMERA_WEBCAM
#define CAMERA_WEBCAM
#include <stdbool.h>
#ifdef __APPLE__
#include <OpenCL/cl.h>
#else
#include <CL/cl.h>
#endif
#include "common/mat.h"
#include "buffering.h"
#include "common/visionbuf.h"
#include "camera_common.h"
#define FRAME_BUF_COUNT 16
#ifdef __cplusplus
extern "C" {
#endif
typedef struct CameraState {
int camera_id;
CameraInfo ci;
int frame_size;
VisionBuf *camera_bufs;
FrameMetadata camera_bufs_metadata[FRAME_BUF_COUNT];
TBuffer camera_tb;
int fps;
float digital_gain;
float cur_gain_frac;
mat3 transform;
} CameraState;
typedef struct DualCameraState {
int ispif_fd;
CameraState rear;
CameraState front;
} DualCameraState;
void cameras_init(DualCameraState *s);
void cameras_open(DualCameraState *s, VisionBuf *camera_bufs_rear, VisionBuf *camera_bufs_focus, VisionBuf *camera_bufs_stats, VisionBuf *camera_bufs_front);
void cameras_run(DualCameraState *s);
void cameras_close(DualCameraState *s);
void camera_autoexposure(CameraState *s, float grey_frac);
#ifdef __cplusplus
} // extern "C"
#endif
#endif

@ -4,6 +4,8 @@
#ifdef QCOM
#include "cameras/camera_qcom.h"
#elif WEBCAM
#include "cameras/camera_webcam.h"
#else
#include "cameras/camera_frame_stream.h"
#endif
@ -160,31 +162,37 @@ void* frontview_thread(void *arg) {
}
int ui_idx = tbuffer_select(&s->ui_front_tb);
int rgb_idx = ui_idx;
FrameMetadata frame_data = s->cameras.front.camera_bufs_metadata[buf_idx];
double t1 = millis_since_boot();
cl_event debayer_event;
if (s->cameras.front.ci.bayer) {
err = clSetKernelArg(s->krnl_debayer_front, 0, sizeof(cl_mem), &s->front_camera_bufs_cl[buf_idx]);
assert(err == 0);
err = clSetKernelArg(s->krnl_debayer_front, 1, sizeof(cl_mem), &s->rgb_front_bufs_cl[ui_idx]);
assert(err == 0);
cl_check_error(err);
err = clSetKernelArg(s->krnl_debayer_front, 1, sizeof(cl_mem), &s->rgb_front_bufs_cl[rgb_idx]);
cl_check_error(err);
float digital_gain = 1.0;
err = clSetKernelArg(s->krnl_debayer_front, 2, sizeof(float), &digital_gain);
assert(err == 0);
cl_event debayer_event;
const size_t debayer_work_size = s->rgb_front_height;
const size_t debayer_work_size = s->rgb_front_height; // doesn't divide evenly, is this okay?
const size_t debayer_local_work_size = 128;
err = clEnqueueNDRangeKernel(q, s->krnl_debayer_front, 1, NULL,
&debayer_work_size, &debayer_local_work_size, 0, 0, &debayer_event);
assert(err == 0);
} else {
assert(s->rgb_front_buf_size >= s->cameras.front.frame_size);
assert(s->rgb_front_stride == s->cameras.front.ci.frame_stride);
err = clEnqueueCopyBuffer(q, s->front_camera_bufs_cl[buf_idx], s->rgb_front_bufs_cl[rgb_idx],
0, 0, s->rgb_front_buf_size, 0, 0, &debayer_event);
assert(err == 0);
}
clWaitForEvents(1, &debayer_event);
clReleaseEvent(debayer_event);
tbuffer_release(&s->cameras.front.camera_tb, buf_idx);
visionbuf_sync(&s->rgb_front_bufs[ui_idx], VISIONBUF_SYNC_FROM_DEVICE);
// set front camera metering target
Message *msg = monitoring_sock->receive(true);
if (msg != NULL) {
@ -288,6 +296,7 @@ void* frontview_thread(void *arg) {
// send frame event
{
if (s->front_frame_sock != NULL) {
capnp::MallocMessageBuilder msg;
cereal::Event::Builder event = msg.initRoot<cereal::Event>();
event.setLogMonoTime(nanos_since_boot());
@ -310,6 +319,7 @@ void* frontview_thread(void *arg) {
auto bytes = words.asBytes();
s->front_frame_sock->send((char*)bytes.begin(), bytes.size());
}
}
/*FILE *f = fopen("/tmp/test2", "wb");
printf("%d %d\n", s->rgb_front_height, s->rgb_front_stride);
@ -421,6 +431,7 @@ void* processing_thread(void *arg) {
// send frame event
{
if (s->frame_sock != NULL) {
capnp::MallocMessageBuilder msg;
cereal::Event::Builder event = msg.initRoot<cereal::Event>();
event.setLogMonoTime(nanos_since_boot());
@ -437,6 +448,7 @@ void* processing_thread(void *arg) {
framed.setLensErr(frame_data.lens_err);
framed.setLensTruePos(frame_data.lens_true_pos);
framed.setGainFrac(frame_data.gain_frac);
#ifdef QCOM
kj::ArrayPtr<const int16_t> focus_vals(&s->cameras.rear.focus[0], NUM_FOCUS);
kj::ArrayPtr<const uint8_t> focus_confs(&s->cameras.rear.confidence[0], NUM_FOCUS);
@ -451,7 +463,6 @@ void* processing_thread(void *arg) {
kj::ArrayPtr<const float> transform_vs(&s->yuv_transform.v[0], 9);
framed.setTransform(transform_vs);
if (s->frame_sock != NULL) {
auto words = capnp::messageToFlatArray(msg);
auto bytes = words.asBytes();
s->frame_sock->send((char*)bytes.begin(), bytes.size());
@ -939,8 +950,14 @@ void init_buffers(VisionState *s) {
tbuffer_init(&s->ui_tb, UI_BUF_COUNT, "rgb");
//assert(s->cameras.front.ci.bayer);
if (s->cameras.front.ci.bayer) {
s->rgb_front_width = s->cameras.front.ci.frame_width/2;
s->rgb_front_height = s->cameras.front.ci.frame_height/2;
} else {
s->rgb_front_width = s->cameras.front.ci.frame_width;
s->rgb_front_height = s->cameras.front.ci.frame_height;
}
for (int i=0; i<UI_BUF_COUNT; i++) {
VisionImg img = visionimg_alloc_rgb24(s->rgb_front_width, s->rgb_front_height, &s->rgb_front_bufs[i]);

@ -8,6 +8,7 @@
void rgb_to_yuv_init(RGBToYUVState* s, cl_context ctx, cl_device_id device_id, int width, int height, int rgb_stride) {
int err = 0;
memset(s, 0, sizeof(*s));
printf("width %d, height %d, rgb_stride %d\n", width, height, rgb_stride);
assert(width % 2 == 0);
assert(height % 2 == 0);
s->width = width;

@ -539,7 +539,7 @@ def controlsd_thread(sm=None, pm=None, can_sock=None):
events.append(create_event('commIssue', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if not sm['pathPlan'].mpcSolutionValid:
events.append(create_event('plannerError', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
if not sm['pathPlan'].sensorValid:
if not sm['pathPlan'].sensorValid and os.getenv("NOSENSOR") is None:
events.append(create_event('sensorDataInvalid', [ET.NO_ENTRY, ET.PERMANENT]))
if not sm['pathPlan'].paramsValid:
events.append(create_event('vehicleModelInvalid', [ET.WARNING]))

@ -11,6 +11,7 @@ import datetime
from common.basedir import BASEDIR, PARAMS
from common.android import ANDROID
WEBCAM = os.getenv("WEBCAM") is not None
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
@ -211,6 +212,10 @@ car_started_processes = [
'ubloxd',
'locationd',
]
if WEBCAM:
car_started_processes += [
'dmonitoringmodeld',
]
if ANDROID:
car_started_processes += [
'sensord',

@ -10,8 +10,19 @@
#define MODEL_HEIGHT 320
#define FULL_W 426
#ifdef QCOM
#define input_lambda(x) (x - 128.f) * 0.0078125f
#else
#define input_lambda(x) x // for non SNPE running platforms, assume keras model instead has lambda layer
#endif
void dmonitoring_init(DMonitoringModelState* s) {
s->m = new DefaultRunModel("../../models/dmonitoring_model_q.dlc", (float*)&s->output, OUTPUT_SIZE, USE_DSP_RUNTIME);
#ifdef QCOM
const char* model_path = "../../models/dmonitoring_model_q.dlc";
#else
const char* model_path = "../../models/dmonitoring_model.dlc";
#endif
s->m = new DefaultRunModel(model_path, (float*)&s->output, OUTPUT_SIZE, USE_DSP_RUNTIME);
}
DMonitoringResult dmonitoring_eval_frame(DMonitoringModelState* s, void* stream_buf, int width, int height) {
@ -84,27 +95,28 @@ DMonitoringResult dmonitoring_eval_frame(DMonitoringModelState* s, void* stream_
for (int r = 0; r < MODEL_HEIGHT/2; r++) {
for (int c = 0; c < MODEL_WIDTH/2; c++) {
// Y_ul
net_input_buf[(c*MODEL_HEIGHT/2) + r] = (resized_buf[(2*r*resized_width) + (2*c)] - 128.f) * 0.0078125f;
net_input_buf[(c*MODEL_HEIGHT/2) + r] = input_lambda(resized_buf[(2*r*resized_width) + (2*c)]);
// Y_ur
net_input_buf[(c*MODEL_HEIGHT/2) + r + ((MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = (resized_buf[(2*r*resized_width) + (2*c+1)] - 128.f) * 0.0078125f;
net_input_buf[(c*MODEL_HEIGHT/2) + r + ((MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = input_lambda(resized_buf[(2*r*resized_width) + (2*c+1)]);
// Y_dl
net_input_buf[(c*MODEL_HEIGHT/2) + r + (2*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = (resized_buf[(2*r*resized_width+1) + (2*c)] - 128.f) * 0.0078125f;
net_input_buf[(c*MODEL_HEIGHT/2) + r + (2*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = input_lambda(resized_buf[(2*r*resized_width+1) + (2*c)]);
// Y_dr
net_input_buf[(c*MODEL_HEIGHT/2) + r + (3*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = (resized_buf[(2*r*resized_width+1) + (2*c+1)] - 128.f) * 0.0078125f;
net_input_buf[(c*MODEL_HEIGHT/2) + r + (3*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = input_lambda(resized_buf[(2*r*resized_width+1) + (2*c+1)]);
// U
net_input_buf[(c*MODEL_HEIGHT/2) + r + (4*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = (resized_buf[(resized_width*resized_height) + (r*resized_width/2) + c] - 128.f) * 0.0078125f;
net_input_buf[(c*MODEL_HEIGHT/2) + r + (4*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = input_lambda(resized_buf[(resized_width*resized_height) + (r*resized_width/2) + c]);
// V
net_input_buf[(c*MODEL_HEIGHT/2) + r + (5*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = (resized_buf[(resized_width*resized_height) + ((resized_width/2)*(resized_height/2)) + (r*resized_width/2) + c] - 128.f) * 0.0078125f;
net_input_buf[(c*MODEL_HEIGHT/2) + r + (5*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = input_lambda(resized_buf[(resized_width*resized_height) + ((resized_width/2)*(resized_height/2)) + (r*resized_width/2) + c]);
}
}
// FILE *dump_yuv_file = fopen("/sdcard/rawdump.yuv", "wb");
// fwrite(raw_buf, height*width*3/2, sizeof(uint8_t), dump_yuv_file);
// fclose(dump_yuv_file);
//printf("preprocess completed. %d \n", yuv_buf_len);
//FILE *dump_yuv_file = fopen("/tmp/rawdump.yuv", "wb");
//fwrite(raw_buf, height*width*3/2, sizeof(uint8_t), dump_yuv_file);
//fclose(dump_yuv_file);
// FILE *dump_yuv_file2 = fopen("/sdcard/inputdump.yuv", "wb");
// fwrite(net_input_buf, MODEL_HEIGHT*MODEL_WIDTH*3/2, sizeof(float), dump_yuv_file2);
// fclose(dump_yuv_file2);
//FILE *dump_yuv_file2 = fopen("/tmp/inputdump.yuv", "wb");
//fwrite(net_input_buf, MODEL_HEIGHT*MODEL_WIDTH*3/2, sizeof(float), dump_yuv_file2);
//fclose(dump_yuv_file2);
delete[] cropped_buf;
delete[] resized_buf;

@ -34,6 +34,11 @@ def run_loop(m):
if __name__ == "__main__":
print(tf.__version__, file=sys.stderr)
# limit gram alloc
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=2048)])
m = load_model(sys.argv[1])
print(m, file=sys.stderr)
bs = [int(np.product(ii.shape[1:])) for ii in m.inputs]

@ -91,8 +91,12 @@ void TFModel::addDesire(float *state, int state_size) {
void TFModel::execute(float *net_input_buf, int buf_size) {
// order must be this
pwrite(net_input_buf, buf_size);
if (desire_input_buf != NULL) {
pwrite(desire_input_buf, desire_state_size);
}
if (rnn_input_buf != NULL) {
pwrite(rnn_input_buf, rnn_state_size);
}
pread(output, output_size);
}

@ -19,6 +19,7 @@ from selfdrive.car.honda.values import CruiseButtons
parser = argparse.ArgumentParser(description='Bridge between CARLA and openpilot.')
parser.add_argument('--autopilot', action='store_true')
parser.add_argument('--joystick', action='store_true')
parser.add_argument('--realmonitoring', action='store_true')
args = parser.parse_args()
pm = messaging.PubMaster(['frame', 'sensorEvents', 'can'])
@ -68,6 +69,8 @@ def health_function():
rk.keep_time()
def fake_driver_monitoring():
if args.realmonitoring:
return
pm = messaging.PubMaster(['driverState'])
while 1:
dat = messaging.new_message('driverState')
@ -198,7 +201,7 @@ def go(q):
vel = vehicle.get_velocity()
speed = math.sqrt(vel.x**2 + vel.y**2 + vel.z**2) * 3.6
can_function(pm, speed, fake_wheel.angle, rk.frame, cruise_button=cruise_button)
can_function(pm, speed, fake_wheel.angle, rk.frame, cruise_button=cruise_button, is_engaged=is_openpilot_engaged)
if rk.frame%1 == 0: # 20Hz?
throttle_op, brake_op, steer_torque_op = sendcan_function(sendcan)

@ -20,7 +20,7 @@ SR = 7.5
def angle_to_sangle(angle):
return - math.degrees(angle) * SR
def can_function(pm, speed, angle, idx, cruise_button=0):
def can_function(pm, speed, angle, idx, cruise_button=0, is_engaged=False):
msg = []
msg.append(packer.make_can_msg("ENGINE_DATA", 0, {"XMISSION_SPEED": speed}, idx))
msg.append(packer.make_can_msg("WHEEL_SPEEDS", 0,
@ -50,6 +50,7 @@ def can_function(pm, speed, angle, idx, cruise_button=0):
msg.append(packer.make_can_msg("CRUISE_PARAMS", 0, {}, idx))
msg.append(packer.make_can_msg("CRUISE", 0, {}, idx))
msg.append(packer.make_can_msg("SCM_FEEDBACK", 0, {"MAIN_ON": 1}, idx))
msg.append(packer.make_can_msg("POWERTRAIN_DATA", 0, {"ACC_STATUS": int(is_engaged)}, idx))
#print(msg)
# cam bus

@ -0,0 +1,43 @@
Run openpilot with webcam on PC/laptop
=====================
What's needed:
- Ubuntu 16.04
- Python 3.7.3
- GPU (recommended)
- Two USB webcams, at least 720p and 78 degrees FOV (e.g. Logitech C920/C615)
- [Car harness](https://comma.ai/shop/products/comma-car-harness) w/ black panda (or the outdated grey panda/giraffe combo) to connect to your car
- [Panda paw](https://comma.ai/shop/products/panda-paw) (or USB-A to USB-A cable) to connect panda to your computer
- Tape, Charger, ...
That's it!
## Clone openpilot and install the requirements
```
cd ~
git clone https://github.com/commaai/openpilot.git
# Follow [this readme](https://github.com/commaai/openpilot/tree/master/tools) to install the requirements
# Add line "export PYTHONPATH=$HOME/openpilot" to your ~/.bashrc
# You may also need to install tensorflow-gpu 2.0.0
# Install [OpenCV4](https://www.pyimagesearch.com/2018/08/15/how-to-install-opencv-4-on-ubuntu/)
```
## Build openpilot for webcam
```
cd ~/openpilot
scons use_webcam=1
touch prebuilt
```
## Connect the hardwares
```
# Connect the road facing camera first, then the driver facing camera
# (default indexes are 1 and 2; can be modified in selfdrive/camerad/cameras/camera_webcam.cc)
# Connect your computer to panda
```
## GO
```
cd ~/openpilot/tools/webcam
./accept_terms.py # accept the user terms so that thermald can detect the car started
cd ~/openpilot/selfdrive
PASSIVE=0 NOSENSOR=1 ./manager.py
# Start the car, then the UI should show the road webcam's view
# Adjust and secure the webcams (you can run tools/webcam/front_mount_helper.py to help mount the driver camera)
# Finish calibration and engage!
```

@ -0,0 +1,9 @@
#!/usr/bin/env python
from common.params import Params
from selfdrive.version import terms_version, training_version
if __name__ == '__main__':
params = Params()
params.put("HasAcceptedTerms", str(terms_version, 'utf-8'))
params.put("CompletedTrainingVersion", str(training_version, 'utf-8'))
print("Terms Accepted!")

@ -0,0 +1,35 @@
#!/usr/bin/env python
import numpy as np
# copied from common.transformations/camera.py
eon_dcam_focal_length = 860.0 # pixels
webcam_focal_length = 908.0 # pixels
eon_dcam_intrinsics = np.array([
[eon_dcam_focal_length, 0, 1152/2.],
[ 0, eon_dcam_focal_length, 864/2.],
[ 0, 0, 1]])
webcam_intrinsics = np.array([
[webcam_focal_length, 0., 1280/2.],
[ 0., webcam_focal_length, 720/2.],
[ 0., 0., 1.]])
cam_id = 2
if __name__ == "__main__":
import cv2
trans_webcam_to_eon_front = np.dot(eon_dcam_intrinsics,np.linalg.inv(webcam_intrinsics))
cap = cv2.VideoCapture(cam_id)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
while (True):
ret, img = cap.read()
if ret:
img = cv2.warpPerspective(img, trans_webcam_to_eon_front, (1152,864), borderMode=cv2.BORDER_CONSTANT, borderValue=0)
img = img[:,-864//2:,:]
cv2.imshow('preview', img)
cv2.waitKey(10)

@ -0,0 +1,45 @@
#!/usr/bin/env python
import numpy as np
# copied from common.transformations/camera.py
eon_focal_length = 910.0 # pixels
eon_dcam_focal_length = 860.0 # pixels
webcam_focal_length = 908.0 # pixels
eon_intrinsics = np.array([
[eon_focal_length, 0., 1164/2.],
[ 0., eon_focal_length, 874/2.],
[ 0., 0., 1.]])
eon_dcam_intrinsics = np.array([
[eon_dcam_focal_length, 0, 1152/2.],
[ 0, eon_dcam_focal_length, 864/2.],
[ 0, 0, 1]])
webcam_intrinsics = np.array([
[webcam_focal_length, 0., 1280/2.],
[ 0., webcam_focal_length, 720/2.],
[ 0., 0., 1.]])
if __name__ == "__main__":
import cv2
trans_webcam_to_eon_rear = np.dot(eon_intrinsics,np.linalg.inv(webcam_intrinsics))
trans_webcam_to_eon_front = np.dot(eon_dcam_intrinsics,np.linalg.inv(webcam_intrinsics))
print("trans_webcam_to_eon_rear:\n", trans_webcam_to_eon_rear)
print("trans_webcam_to_eon_front:\n", trans_webcam_to_eon_front)
cap = cv2.VideoCapture(1)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
while (True):
ret, img = cap.read()
if ret:
# img = cv2.warpPerspective(img, trans_webcam_to_eon_rear, (1164,874), borderMode=cv2.BORDER_CONSTANT, borderValue=0)
img = cv2.warpPerspective(img, trans_webcam_to_eon_front, (1164,874), borderMode=cv2.BORDER_CONSTANT, borderValue=0)
print(img.shape, end='\r')
cv2.imshow('preview', img)
cv2.waitKey(10)
Loading…
Cancel
Save