openpilot is an open source driver assistance system. openpilot performs the functions of Automated Lane Centering and Adaptive Cruise Control for over 200 supported car makes and models.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

216 lines
6.6 KiB

#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <eigen3/Eigen/Dense>
#include "common/visionbuf.h"
#include "common/visionipc.h"
#include "common/swaglog.h"
#include "common/clutil.h"
#include "common/utilpp.h"
#include "models/driving.h"
#include "messaging.hpp"
ExitHandler do_exit;
// globals
bool run_model;
mat3 cur_transform;
pthread_mutex_t transform_lock;
void* live_thread(void *arg) {
set_thread_name("live");
set_realtime_priority(50);
SubMaster sm({"liveCalibration"});
/*
import numpy as np
from common.transformations.model import medmodel_frame_from_road_frame
medmodel_frame_from_ground = medmodel_frame_from_road_frame[:, (0, 1, 3)]
ground_from_medmodel_frame = np.linalg.inv(medmodel_frame_from_ground)
*/
Eigen::Matrix<float, 3, 3> ground_from_medmodel_frame;
ground_from_medmodel_frame <<
0.00000000e+00, 0.00000000e+00, 1.00000000e+00,
-1.09890110e-03, 0.00000000e+00, 2.81318681e-01,
-1.84808520e-20, 9.00738606e-04,-4.28751576e-02;
Eigen::Matrix<float, 3, 3> fcam_intrinsics;
#ifndef QCOM2
fcam_intrinsics <<
910.0, 0.0, 582.0,
0.0, 910.0, 437.0,
0.0, 0.0, 1.0;
float db_s = 0.5; // debayering does a 2x downscale
#else
fcam_intrinsics <<
2648.0, 0.0, 1928.0/2,
0.0, 2648.0, 1208.0/2,
0.0, 0.0, 1.0;
float db_s = 1.0;
#endif
mat3 yuv_transform = transform_scale_buffer((mat3){{
1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0,
}}, db_s);
while (!do_exit) {
if (sm.update(100) > 0){
auto extrinsic_matrix = sm["liveCalibration"].getLiveCalibration().getExtrinsicMatrix();
Eigen::Matrix<float, 3, 4> extrinsic_matrix_eigen;
for (int i = 0; i < 4*3; i++){
extrinsic_matrix_eigen(i / 4, i % 4) = extrinsic_matrix[i];
}
auto camera_frame_from_road_frame = fcam_intrinsics * extrinsic_matrix_eigen;
Eigen::Matrix<float, 3, 3> camera_frame_from_ground;
camera_frame_from_ground.col(0) = camera_frame_from_road_frame.col(0);
camera_frame_from_ground.col(1) = camera_frame_from_road_frame.col(1);
camera_frame_from_ground.col(2) = camera_frame_from_road_frame.col(3);
auto warp_matrix = camera_frame_from_ground * ground_from_medmodel_frame;
mat3 transform = {};
for (int i=0; i<3*3; i++) {
transform.v[i] = warp_matrix(i / 3, i % 3);
}
mat3 model_transform = matmul3(yuv_transform, transform);
pthread_mutex_lock(&transform_lock);
cur_transform = model_transform;
run_model = true;
pthread_mutex_unlock(&transform_lock);
}
}
return NULL;
}
int main(int argc, char **argv) {
int err;
set_realtime_priority(54);
#ifdef QCOM
set_core_affinity(2);
#elif QCOM2
// CPU usage is much lower when pinned to a single big core
set_core_affinity(4);
#endif
pthread_mutex_init(&transform_lock, NULL);
// start calibration thread
pthread_t live_thread_handle;
err = pthread_create(&live_thread_handle, NULL, live_thread, NULL);
assert(err == 0);
// messaging
Torch model (#2452) * refactor draw model * rebase master * correct valid_len * rename function * rename variables * white space * rebase to master * e16c13ac-927d-455e-ae0a-81b482a2c787 * start rewriting * save proress * compiles! * oops * many fixes * seems to work * fix desires * finally cleaned * wrong std for ll * dont pulse none * compiles! * ready to test * WIP does not compile * compiles * various fixes * does something! * full 3d * not needed * draw up to 100m * fix segfault * wrong sign * fix flicker * add road edges * finish v2 packet * Added pytorch supercombo * fix rebase * no more keras * Hacky solution to the NCHW/NHWC incompatibility between SNPE and our frame data * dont break dmonitoringd, final model 229e3ce1-7259-412b-85e6-cc646d70f1d8/430 * fix hack * Revert "fix hack" This reverts commit 5550fc01a7881d065a5eddbbb42dac55ef7ec36c. * Removed axis permutation hack * Folded padding layers into conv layers * Removed the last pad layer from the dlc * Revert "Removed the last pad layer from the dlc" This reverts commit b85f24b9e1d04abf64e85901a7ff49e00d82020a. * Revert "Folded padding layers into conv layers" This reverts commit b8d1773e4e76dea481acebbfad6a6235fbb58463. * vision model: 5034ac8b-5703-4a49-948b-11c064d10880/780 temporal model: 229e3ce1-7259-412b-85e6-cc646d70f1d8/430 with permute + pool opt * fix ui drawing with clips * ./compile_torch.py 5034ac8b-5703-4a49-948b-11c064d10880/780 dfcd2375-81d8-49df-95bf-1d2d6ad86010/450 with variable history length * std::clamp * not sure how this compiled before * 2895ace6-a296-47ac-86e6-17ea800a74e5/550 * db090195-8810-42de-ab38-bb835d775d87/601 * 5m is very little * onnx runner * add onnxruntime to pipfile * run in real time without using the whole CPU * bump cereal; * add stds * set road edge opacity based on stddev * don't access the model packet in paint * convert mat.h to a c++ header file (#2499) * update tests * safety first Co-authored-by: deanlee <deanlee3@gmail.com> Co-authored-by: mitchell <mitchell@comma.ai> Co-authored-by: Comma Device <device@comma.ai> Co-authored-by: George Hotz <george@comma.ai> Co-authored-by: Adeeb Shihadeh <adeebshihadeh@gmail.com> old-commit-hash: 08846b5c0ec3a5299c8cb73904da15a18d3aaccf
5 years ago
PubMaster pm({"modelV2", "model", "cameraOdometry"});
SubMaster sm({"pathPlan", "frame"});
// cl init
cl_device_id device_id = cl_get_device_id(CL_DEVICE_TYPE_DEFAULT);
cl_context context = CL_CHECK_ERR(clCreateContext(NULL, 1, &device_id, NULL, NULL, &err));
// init the models
ModelState model;
model_init(&model, device_id, context);
LOGW("models loaded, modeld starting");
// loop
VisionStream stream;
while (!do_exit) {
VisionStreamBufs buf_info;
err = visionstream_init(&stream, VISION_STREAM_YUV, true, &buf_info);
if (err) {
LOGW("visionstream connect failed");
util::sleep_for(100);
continue;
}
LOGW("connected with buffer size: %d", buf_info.buf_len);
// setup filter to track dropped frames
const float dt = 1. / MODEL_FREQ;
const float ts = 10.0; // filter time constant (s)
const float frame_filter_k = (dt / ts) / (1. + dt / ts);
float frames_dropped = 0;
// one frame in memory
VisionBuf yuv_ion = visionbuf_allocate_cl(buf_info.buf_len, device_id, context);
uint32_t frame_id = 0, last_vipc_frame_id = 0;
double last = 0;
int desire = -1;
uint32_t run_count = 0;
while (!do_exit) {
VIPCBuf *buf;
VIPCBufExtra extra;
buf = visionstream_get(&stream, &extra);
if (buf == NULL) {
LOGW("visionstream get failed");
break;
}
pthread_mutex_lock(&transform_lock);
mat3 model_transform = cur_transform;
const bool run_model_this_iter = run_model;
pthread_mutex_unlock(&transform_lock);
if (sm.update(0) > 0){
// TODO: path planner timeout?
Torch model (#2452) * refactor draw model * rebase master * correct valid_len * rename function * rename variables * white space * rebase to master * e16c13ac-927d-455e-ae0a-81b482a2c787 * start rewriting * save proress * compiles! * oops * many fixes * seems to work * fix desires * finally cleaned * wrong std for ll * dont pulse none * compiles! * ready to test * WIP does not compile * compiles * various fixes * does something! * full 3d * not needed * draw up to 100m * fix segfault * wrong sign * fix flicker * add road edges * finish v2 packet * Added pytorch supercombo * fix rebase * no more keras * Hacky solution to the NCHW/NHWC incompatibility between SNPE and our frame data * dont break dmonitoringd, final model 229e3ce1-7259-412b-85e6-cc646d70f1d8/430 * fix hack * Revert "fix hack" This reverts commit 5550fc01a7881d065a5eddbbb42dac55ef7ec36c. * Removed axis permutation hack * Folded padding layers into conv layers * Removed the last pad layer from the dlc * Revert "Removed the last pad layer from the dlc" This reverts commit b85f24b9e1d04abf64e85901a7ff49e00d82020a. * Revert "Folded padding layers into conv layers" This reverts commit b8d1773e4e76dea481acebbfad6a6235fbb58463. * vision model: 5034ac8b-5703-4a49-948b-11c064d10880/780 temporal model: 229e3ce1-7259-412b-85e6-cc646d70f1d8/430 with permute + pool opt * fix ui drawing with clips * ./compile_torch.py 5034ac8b-5703-4a49-948b-11c064d10880/780 dfcd2375-81d8-49df-95bf-1d2d6ad86010/450 with variable history length * std::clamp * not sure how this compiled before * 2895ace6-a296-47ac-86e6-17ea800a74e5/550 * db090195-8810-42de-ab38-bb835d775d87/601 * 5m is very little * onnx runner * add onnxruntime to pipfile * run in real time without using the whole CPU * bump cereal; * add stds * set road edge opacity based on stddev * don't access the model packet in paint * convert mat.h to a c++ header file (#2499) * update tests * safety first Co-authored-by: deanlee <deanlee3@gmail.com> Co-authored-by: mitchell <mitchell@comma.ai> Co-authored-by: Comma Device <device@comma.ai> Co-authored-by: George Hotz <george@comma.ai> Co-authored-by: Adeeb Shihadeh <adeebshihadeh@gmail.com> old-commit-hash: 08846b5c0ec3a5299c8cb73904da15a18d3aaccf
5 years ago
desire = ((int)sm["pathPlan"].getPathPlan().getDesire());
frame_id = sm["frame"].getFrame().getFrameId();
}
double mt1 = 0, mt2 = 0;
if (run_model_this_iter) {
run_count++;
float vec_desire[DESIRE_LEN] = {0};
if (desire >= 0 && desire < DESIRE_LEN) {
vec_desire[desire] = 1.0;
}
mt1 = millis_since_boot();
// TODO: don't make copies!
memcpy(yuv_ion.addr, buf->addr, buf_info.buf_len);
visionbuf_sync(&yuv_ion, VISIONBUF_SYNC_TO_DEVICE);
ModelDataRaw model_buf =
model_eval_frame(&model, yuv_ion.buf_cl, buf_info.width, buf_info.height,
model_transform, vec_desire);
mt2 = millis_since_boot();
float model_execution_time = (mt2 - mt1) / 1000.0;
// tracked dropped frames
uint32_t vipc_dropped_frames = extra.frame_id - last_vipc_frame_id - 1;
frames_dropped = (1. - frame_filter_k) * frames_dropped + frame_filter_k * (float)std::min(vipc_dropped_frames, 10U);
if (run_count < 10) frames_dropped = 0; // let frame drops warm up
float frame_drop_ratio = frames_dropped / (1 + frames_dropped);
const float *raw_pred_ptr = send_raw_pred ? &model.output[0] : nullptr;
model_publish(pm, extra.frame_id, frame_id, frame_drop_ratio, model_buf, raw_pred_ptr, extra.timestamp_eof, model_execution_time);
posenet_publish(pm, extra.frame_id, vipc_dropped_frames, model_buf, extra.timestamp_eof);
LOGD("model process: %.2fms, from last %.2fms, vipc_frame_id %zu, frame_id, %zu, frame_drop %.3f", mt2-mt1, mt1-last, extra.frame_id, frame_id, frame_drop_ratio);
last = mt1;
last_vipc_frame_id = extra.frame_id;
}
}
visionbuf_free(&yuv_ion);
visionstream_destroy(&stream);
}
model_free(&model);
LOG("joining live_thread");
err = pthread_join(live_thread_handle, NULL);
assert(err == 0);
CL_CHECK(clReleaseContext(context));
pthread_mutex_destroy(&transform_lock);
return 0;
}