Revert fullframe DM model (#24812)

* Revert "fullframe DM: flip RHD yaw to use matching thresholds"

This reverts commit ce7daabc8847d18ba46e5d1879f5a6958d04ccc7.

* Revert "fullframe DM model (#24762)"

This reverts commit 817be81fb19004f4873881f6b29dcdfffbe7e3a8.

* revert cereal
old-commit-hash: c646eeee0a
taco
ZwX1616 3 years ago committed by GitHub
parent 77224f3332
commit c73d4e5fc6
  1. 2
      cereal
  2. 6
      common/modeldata.h
  3. 2
      selfdrive/camerad/cameras/camera_common.cc
  4. 2
      selfdrive/camerad/cameras/camera_qcom2.cc
  5. 2
      selfdrive/hardware/tici/test_power_draw.py
  6. 6
      selfdrive/modeld/dmonitoringmodeld.cc
  7. 232
      selfdrive/modeld/models/dmonitoring.cc
  8. 33
      selfdrive/modeld/models/dmonitoring.h
  9. 4
      selfdrive/modeld/models/dmonitoring_model.current
  10. 4
      selfdrive/modeld/models/dmonitoring_model.onnx
  11. 4
      selfdrive/modeld/models/dmonitoring_model_q.dlc
  12. 21
      selfdrive/modeld/runners/onnx_runner.py
  13. 6
      selfdrive/modeld/runners/onnxmodel.cc
  14. 3
      selfdrive/modeld/runners/onnxmodel.h
  15. 12
      selfdrive/modeld/runners/snpemodel.cc
  16. 3
      selfdrive/modeld/runners/snpemodel.h
  17. 7
      selfdrive/monitoring/dmonitoringd.py
  18. 90
      selfdrive/monitoring/driver_monitor.py
  19. 24
      selfdrive/monitoring/test_monitoring.py
  20. 8
      selfdrive/test/process_replay/model_replay.py
  21. 2
      selfdrive/test/process_replay/model_replay_ref_commit
  22. 2
      selfdrive/test/process_replay/process_replay.py
  23. 6
      selfdrive/test/test_onroad.py
  24. 43
      selfdrive/ui/qt/offroad/driverview.cc
  25. 9
      selfdrive/ui/qt/widgets/cameraview.cc

@ -1 +1 @@
Subproject commit 96cbed052ab1d69ea15da94dc2b882d59a786347
Subproject commit e9d3597d23311a3e33a2def74ceec839e5ff4bf5

@ -24,6 +24,12 @@ constexpr auto T_IDXS_FLOAT = build_idxs<float, TRAJECTORY_SIZE>(10.0);
constexpr auto X_IDXS = build_idxs<double, TRAJECTORY_SIZE>(192.0);
constexpr auto X_IDXS_FLOAT = build_idxs<float, TRAJECTORY_SIZE>(192.0);
namespace tici_dm_crop {
const int x_offset = -72;
const int y_offset = -144;
const int width = 954;
};
const mat3 fcam_intrinsic_matrix = (mat3){{2648.0, 0.0, 1928.0 / 2,
0.0, 2648.0, 1208.0 / 2,
0.0, 0.0, 1.0}};

@ -239,7 +239,7 @@ static kj::Array<capnp::byte> yuv420_to_jpeg(const CameraBuf *b, int thumbnail_w
int in_stride = b->cur_yuv_buf->stride;
// make the buffer big enough. jpeg_write_raw_data requires 16-pixels aligned height to be used.
std::unique_ptr<uint8_t[]> buf(new uint8_t[(thumbnail_width * ((thumbnail_height + 15) & ~15) * 3) / 2]);
std::unique_ptr<uint8[]> buf(new uint8_t[(thumbnail_width * ((thumbnail_height + 15) & ~15) * 3) / 2]);
uint8_t *y_plane = buf.get();
uint8_t *u_plane = y_plane + thumbnail_width * thumbnail_height;
uint8_t *v_plane = u_plane + (thumbnail_width * thumbnail_height) / 4;

@ -837,7 +837,7 @@ void cameras_init(VisionIpcServer *v, MultiCameraState *s, cl_device_id device_i
s->road_cam.camera_init(s, v, CAMERA_ID_AR0231, 1, 20, device_id, ctx, VISION_STREAM_RGB_ROAD, VISION_STREAM_ROAD, !env_disable_road);
s->wide_road_cam.camera_init(s, v, CAMERA_ID_AR0231, 0, 20, device_id, ctx, VISION_STREAM_RGB_WIDE_ROAD, VISION_STREAM_WIDE_ROAD, !env_disable_wide_road);
s->sm = new SubMaster({"driverStateV2"});
s->sm = new SubMaster({"driverState"});
s->pm = new PubMaster({"roadCameraState", "driverCameraState", "wideRoadCameraState", "thumbnail"});
}

@ -21,7 +21,7 @@ class Proc:
PROCS = [
Proc('camerad', 2.15),
Proc('modeld', 1.0),
Proc('dmonitoringmodeld', 0.35),
Proc('dmonitoringmodeld', 0.25),
Proc('encoderd', 0.23),
]

@ -12,7 +12,7 @@
ExitHandler do_exit;
void run_model(DMonitoringModelState &model, VisionIpcClient &vipc_client) {
PubMaster pm({"driverStateV2"});
PubMaster pm({"driverState"});
SubMaster sm({"liveCalibration"});
float calib[CALIB_LEN] = {0};
double last = 0;
@ -31,11 +31,11 @@ void run_model(DMonitoringModelState &model, VisionIpcClient &vipc_client) {
}
double t1 = millis_since_boot();
DMonitoringModelResult model_res = dmonitoring_eval_frame(&model, buf->addr, buf->width, buf->height, buf->stride, buf->uv_offset, calib);
DMonitoringResult res = dmonitoring_eval_frame(&model, buf->addr, buf->width, buf->height, buf->stride, buf->uv_offset, calib);
double t2 = millis_since_boot();
// send dm packet
dmonitoring_publish(pm, extra.frame_id, model_res, (t2 - t1) / 1000.0, model.output);
dmonitoring_publish(pm, extra.frame_id, res, (t2 - t1) / 1000.0, model.output);
//printf("dmonitoring process: %.2fms, from last %.2fms\n", t2 - t1, t1 - last);
last = t1;

@ -10,8 +10,8 @@
#include "selfdrive/modeld/models/dmonitoring.h"
constexpr int MODEL_WIDTH = 1440;
constexpr int MODEL_HEIGHT = 960;
constexpr int MODEL_WIDTH = 320;
constexpr int MODEL_HEIGHT = 640;
template <class T>
static inline T *get_buffer(std::vector<T> &buf, const size_t size) {
@ -19,115 +19,199 @@ static inline T *get_buffer(std::vector<T> &buf, const size_t size) {
return buf.data();
}
static inline void init_yuv_buf(std::vector<uint8_t> &buf, const int width, int height) {
uint8_t *y = get_buffer(buf, width * height * 3 / 2);
uint8_t *u = y + width * height;
uint8_t *v = u + (width / 2) * (height / 2);
// needed on comma two to make the padded border black
// equivalent to RGB(0,0,0) in YUV space
memset(y, 16, width * height);
memset(u, 128, (width / 2) * (height / 2));
memset(v, 128, (width / 2) * (height / 2));
}
void dmonitoring_init(DMonitoringModelState* s) {
s->is_rhd = Params().getBool("IsRHD");
for (int x = 0; x < std::size(s->tensor); ++x) {
s->tensor[x] = (x - 128.f) * 0.0078125f;
}
init_yuv_buf(s->resized_buf, MODEL_WIDTH, MODEL_HEIGHT);
#ifdef USE_ONNX_MODEL
s->m = new ONNXModel("models/dmonitoring_model.onnx", &s->output[0], OUTPUT_SIZE, USE_DSP_RUNTIME, false, true);
s->m = new ONNXModel("models/dmonitoring_model.onnx", &s->output[0], OUTPUT_SIZE, USE_DSP_RUNTIME);
#else
s->m = new SNPEModel("models/dmonitoring_model_q.dlc", &s->output[0], OUTPUT_SIZE, USE_DSP_RUNTIME, false, true);
s->m = new SNPEModel("models/dmonitoring_model_q.dlc", &s->output[0], OUTPUT_SIZE, USE_DSP_RUNTIME);
#endif
s->m->addCalib(s->calib, CALIB_LEN);
}
void parse_driver_data(DriverStateResult &ds_res, const DMonitoringModelState* s, int out_idx_offset) {
for (int i = 0; i < 3; ++i) {
ds_res.face_orientation[i] = s->output[out_idx_offset+i] * REG_SCALE;
ds_res.face_orientation_std[i] = exp(s->output[out_idx_offset+6+i]);
}
for (int i = 0; i < 2; ++i) {
ds_res.face_position[i] = s->output[out_idx_offset+3+i] * REG_SCALE;
ds_res.face_position_std[i] = exp(s->output[out_idx_offset+9+i]);
}
for (int i = 0; i < 4; ++i) {
ds_res.ready_prob[i] = sigmoid(s->output[out_idx_offset+35+i]);
static inline auto get_yuv_buf(std::vector<uint8_t> &buf, const int width, int height) {
uint8_t *y = get_buffer(buf, width * height * 3 / 2);
uint8_t *u = y + width * height;
uint8_t *v = u + (width /2) * (height / 2);
return std::make_tuple(y, u, v);
}
struct Rect {int x, y, w, h;};
void crop_nv12_to_yuv(uint8_t *raw, int stride, int uv_offset, uint8_t *y, uint8_t *u, uint8_t *v, const Rect &rect) {
uint8_t *raw_y = raw;
uint8_t *raw_uv = raw_y + uv_offset;
for (int r = 0; r < rect.h / 2; r++) {
memcpy(y + 2 * r * rect.w, raw_y + (2 * r + rect.y) * stride + rect.x, rect.w);
memcpy(y + (2 * r + 1) * rect.w, raw_y + (2 * r + rect.y + 1) * stride + rect.x, rect.w);
for (int h = 0; h < rect.w / 2; h++) {
u[r * rect.w/2 + h] = raw_uv[(r + (rect.y/2)) * stride + (rect.x/2 + h)*2];
v[r * rect.w/2 + h] = raw_uv[(r + (rect.y/2)) * stride + (rect.x/2 + h)*2 + 1];
}
for (int i = 0; i < 2; ++i) {
ds_res.not_ready_prob[i] = sigmoid(s->output[out_idx_offset+39+i]);
}
ds_res.face_prob = sigmoid(s->output[out_idx_offset+12]);
ds_res.left_eye_prob = sigmoid(s->output[out_idx_offset+21]);
ds_res.right_eye_prob = sigmoid(s->output[out_idx_offset+30]);
ds_res.left_blink_prob = sigmoid(s->output[out_idx_offset+31]);
ds_res.right_blink_prob = sigmoid(s->output[out_idx_offset+32]);
ds_res.sunglasses_prob = sigmoid(s->output[out_idx_offset+33]);
ds_res.occluded_prob = sigmoid(s->output[out_idx_offset+34]);
}
void fill_driver_data(cereal::DriverStateV2::DriverData::Builder ddata, const DriverStateResult &ds_res) {
ddata.setFaceOrientation(ds_res.face_orientation);
ddata.setFaceOrientationStd(ds_res.face_orientation_std);
ddata.setFacePosition(ds_res.face_position);
ddata.setFacePositionStd(ds_res.face_position_std);
ddata.setFaceProb(ds_res.face_prob);
ddata.setLeftEyeProb(ds_res.left_eye_prob);
ddata.setRightEyeProb(ds_res.right_eye_prob);
ddata.setLeftBlinkProb(ds_res.left_blink_prob);
ddata.setRightBlinkProb(ds_res.right_blink_prob);
ddata.setSunglassesProb(ds_res.sunglasses_prob);
ddata.setOccludedProb(ds_res.occluded_prob);
ddata.setReadyProb(ds_res.ready_prob);
ddata.setNotReadyProb(ds_res.not_ready_prob);
}
DMonitoringResult dmonitoring_eval_frame(DMonitoringModelState* s, void* stream_buf, int width, int height, int stride, int uv_offset, float *calib) {
const int cropped_height = tici_dm_crop::width / 1.33;
Rect crop_rect = {width / 2 - tici_dm_crop::width / 2 + tici_dm_crop::x_offset,
height / 2 - cropped_height / 2 + tici_dm_crop::y_offset,
cropped_height / 2,
cropped_height};
if (!s->is_rhd) {
crop_rect.x += tici_dm_crop::width - crop_rect.w;
}
DMonitoringModelResult dmonitoring_eval_frame(DMonitoringModelState* s, void* stream_buf, int width, int height, int stride, int uv_offset, float *calib) {
int v_off = height - MODEL_HEIGHT;
int h_off = (width - MODEL_WIDTH) / 2;
int yuv_buf_len = MODEL_WIDTH * MODEL_HEIGHT;
int resized_width = MODEL_WIDTH;
int resized_height = MODEL_HEIGHT;
auto [cropped_y, cropped_u, cropped_v] = get_yuv_buf(s->cropped_buf, crop_rect.w, crop_rect.h);
if (!s->is_rhd) {
crop_nv12_to_yuv((uint8_t *)stream_buf, stride, uv_offset, cropped_y, cropped_u, cropped_v, crop_rect);
} else {
auto [mirror_y, mirror_u, mirror_v] = get_yuv_buf(s->premirror_cropped_buf, crop_rect.w, crop_rect.h);
crop_nv12_to_yuv((uint8_t *)stream_buf, stride, uv_offset, mirror_y, mirror_u, mirror_v, crop_rect);
libyuv::I420Mirror(mirror_y, crop_rect.w,
mirror_u, crop_rect.w / 2,
mirror_v, crop_rect.w / 2,
cropped_y, crop_rect.w,
cropped_u, crop_rect.w / 2,
cropped_v, crop_rect.w / 2,
crop_rect.w, crop_rect.h);
}
uint8_t *raw_buf = (uint8_t *) stream_buf;
// vertical crop free
uint8_t *raw_y_start = raw_buf + stride * v_off;
auto [resized_buf, resized_u, resized_v] = get_yuv_buf(s->resized_buf, resized_width, resized_height);
uint8_t *resized_y = resized_buf;
libyuv::FilterMode mode = libyuv::FilterModeEnum::kFilterBilinear;
libyuv::I420Scale(cropped_y, crop_rect.w,
cropped_u, crop_rect.w / 2,
cropped_v, crop_rect.w / 2,
crop_rect.w, crop_rect.h,
resized_y, resized_width,
resized_u, resized_width / 2,
resized_v, resized_width / 2,
resized_width, resized_height,
mode);
int yuv_buf_len = (MODEL_WIDTH/2) * (MODEL_HEIGHT/2) * 6; // Y|u|v -> y|y|y|y|u|v
float *net_input_buf = get_buffer(s->net_input_buf, yuv_buf_len);
// one shot conversion, O(n) anyway
// yuvframe2tensor, normalize
for (int r = 0; r < MODEL_HEIGHT/2; r++) {
for (int c = 0; c < MODEL_WIDTH/2; c++) {
// Y_ul
net_input_buf[(r*MODEL_WIDTH/2) + c + (0*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = s->tensor[resized_y[(2*r)*resized_width + 2*c]];
// Y_dl
net_input_buf[(r*MODEL_WIDTH/2) + c + (1*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = s->tensor[resized_y[(2*r+1)*resized_width + 2*c]];
// Y_ur
net_input_buf[(r*MODEL_WIDTH/2) + c + (2*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = s->tensor[resized_y[(2*r)*resized_width + 2*c+1]];
// Y_dr
net_input_buf[(r*MODEL_WIDTH/2) + c + (3*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = s->tensor[resized_y[(2*r+1)*resized_width + 2*c+1]];
// U
net_input_buf[(r*MODEL_WIDTH/2) + c + (4*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = s->tensor[resized_u[r*resized_width/2 + c]];
// V
net_input_buf[(r*MODEL_WIDTH/2) + c + (5*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = s->tensor[resized_v[r*resized_width/2 + c]];
}
}
uint8_t *net_input_buf = get_buffer(s->net_input_buf, yuv_buf_len);
//printf("preprocess completed. %d \n", yuv_buf_len);
//FILE *dump_yuv_file = fopen("/tmp/rawdump.yuv", "wb");
//fwrite(resized_buf, yuv_buf_len, sizeof(uint8_t), dump_yuv_file);
//fclose(dump_yuv_file);
// here makes a uint8 copy
for (int r = 0; r < MODEL_HEIGHT; ++r) {
memcpy(net_input_buf + r * MODEL_WIDTH, raw_y_start + r * stride + h_off, MODEL_WIDTH);
}
// *** testing ***
// idat = np.frombuffer(open("/tmp/inputdump.yuv", "rb").read(), np.float32).reshape(6, 160, 320)
// imshow(cv2.cvtColor(tensor_to_frames(idat[None]/0.0078125+128)[0], cv2.COLOR_YUV2RGB_I420))
// printf("preprocess completed. %d \n", yuv_buf_len);
// FILE *dump_yuv_file = fopen("/tmp/rawdump.yuv", "wb");
// fwrite(net_input_buf, yuv_buf_len, sizeof(uint8_t), dump_yuv_file);
// fclose(dump_yuv_file);
//FILE *dump_yuv_file2 = fopen("/tmp/inputdump.yuv", "wb");
//fwrite(net_input_buf, MODEL_HEIGHT*MODEL_WIDTH*3/2, sizeof(float), dump_yuv_file2);
//fclose(dump_yuv_file2);
double t1 = millis_since_boot();
s->m->addImage((float*)net_input_buf, yuv_buf_len / 4);
s->m->addImage(net_input_buf, yuv_buf_len);
for (int i = 0; i < CALIB_LEN; i++) {
s->calib[i] = calib[i];
}
s->m->execute();
double t2 = millis_since_boot();
DMonitoringModelResult model_res = {0};
parse_driver_data(model_res.driver_state_lhd, s, 0);
parse_driver_data(model_res.driver_state_rhd, s, 41);
model_res.poor_vision_prob = sigmoid(s->output[82]);
model_res.wheel_on_right_prob = sigmoid(s->output[83]);
model_res.dsp_execution_time = (t2 - t1) / 1000.;
return model_res;
DMonitoringResult ret = {0};
for (int i = 0; i < 3; ++i) {
ret.face_orientation[i] = s->output[i] * REG_SCALE;
ret.face_orientation_meta[i] = exp(s->output[6 + i]);
}
for (int i = 0; i < 2; ++i) {
ret.face_position[i] = s->output[3 + i] * REG_SCALE;
ret.face_position_meta[i] = exp(s->output[9 + i]);
}
for (int i = 0; i < 4; ++i) {
ret.ready_prob[i] = sigmoid(s->output[39 + i]);
}
for (int i = 0; i < 2; ++i) {
ret.not_ready_prob[i] = sigmoid(s->output[43 + i]);
}
ret.face_prob = sigmoid(s->output[12]);
ret.left_eye_prob = sigmoid(s->output[21]);
ret.right_eye_prob = sigmoid(s->output[30]);
ret.left_blink_prob = sigmoid(s->output[31]);
ret.right_blink_prob = sigmoid(s->output[32]);
ret.sg_prob = sigmoid(s->output[33]);
ret.poor_vision = sigmoid(s->output[34]);
ret.partial_face = sigmoid(s->output[35]);
ret.distracted_pose = sigmoid(s->output[36]);
ret.distracted_eyes = sigmoid(s->output[37]);
ret.occluded_prob = sigmoid(s->output[38]);
ret.dsp_execution_time = (t2 - t1) / 1000.;
return ret;
}
void dmonitoring_publish(PubMaster &pm, uint32_t frame_id, const DMonitoringModelResult &model_res, float execution_time, kj::ArrayPtr<const float> raw_pred) {
void dmonitoring_publish(PubMaster &pm, uint32_t frame_id, const DMonitoringResult &res, float execution_time, kj::ArrayPtr<const float> raw_pred) {
// make msg
MessageBuilder msg;
auto framed = msg.initEvent().initDriverStateV2();
auto framed = msg.initEvent().initDriverState();
framed.setFrameId(frame_id);
framed.setModelExecutionTime(execution_time);
framed.setDspExecutionTime(model_res.dsp_execution_time);
framed.setPoorVisionProb(model_res.poor_vision_prob);
framed.setWheelOnRightProb(model_res.wheel_on_right_prob);
fill_driver_data(framed.initLeftDriverData(), model_res.driver_state_lhd);
fill_driver_data(framed.initRightDriverData(), model_res.driver_state_rhd);
framed.setDspExecutionTime(res.dsp_execution_time);
framed.setFaceOrientation(res.face_orientation);
framed.setFaceOrientationStd(res.face_orientation_meta);
framed.setFacePosition(res.face_position);
framed.setFacePositionStd(res.face_position_meta);
framed.setFaceProb(res.face_prob);
framed.setLeftEyeProb(res.left_eye_prob);
framed.setRightEyeProb(res.right_eye_prob);
framed.setLeftBlinkProb(res.left_blink_prob);
framed.setRightBlinkProb(res.right_blink_prob);
framed.setSunglassesProb(res.sg_prob);
framed.setPoorVision(res.poor_vision);
framed.setPartialFace(res.partial_face);
framed.setDistractedPose(res.distracted_pose);
framed.setDistractedEyes(res.distracted_eyes);
framed.setOccludedProb(res.occluded_prob);
framed.setReadyProb(res.ready_prob);
framed.setNotReadyProb(res.not_ready_prob);
if (send_raw_pred) {
framed.setRawPredictions(raw_pred.asBytes());
}
pm.send("driverStateV2", msg);
pm.send("driverState", msg);
}
void dmonitoring_free(DMonitoringModelState* s) {

@ -9,43 +9,44 @@
#define CALIB_LEN 3
#define OUTPUT_SIZE 84
#define OUTPUT_SIZE 45
#define REG_SCALE 0.25f
typedef struct DriverStateResult {
typedef struct DMonitoringResult {
float face_orientation[3];
float face_orientation_std[3];
float face_orientation_meta[3];
float face_position[2];
float face_position_std[2];
float face_position_meta[2];
float face_prob;
float left_eye_prob;
float right_eye_prob;
float left_blink_prob;
float right_blink_prob;
float sunglasses_prob;
float sg_prob;
float poor_vision;
float partial_face;
float distracted_pose;
float distracted_eyes;
float occluded_prob;
float ready_prob[4];
float not_ready_prob[2];
} DriverStateResult;
typedef struct DMonitoringModelResult {
DriverStateResult driver_state_lhd;
DriverStateResult driver_state_rhd;
float poor_vision_prob;
float wheel_on_right_prob;
float dsp_execution_time;
} DMonitoringModelResult;
} DMonitoringResult;
typedef struct DMonitoringModelState {
RunModel *m;
bool is_rhd;
float output[OUTPUT_SIZE];
std::vector<uint8_t> net_input_buf;
std::vector<uint8_t> resized_buf;
std::vector<uint8_t> cropped_buf;
std::vector<uint8_t> premirror_cropped_buf;
std::vector<float> net_input_buf;
float calib[CALIB_LEN];
float tensor[UINT8_MAX + 1];
} DMonitoringModelState;
void dmonitoring_init(DMonitoringModelState* s);
DMonitoringModelResult dmonitoring_eval_frame(DMonitoringModelState* s, void* stream_buf, int width, int height, int stride, int uv_offset, float *calib);
void dmonitoring_publish(PubMaster &pm, uint32_t frame_id, const DMonitoringModelResult &model_res, float execution_time, kj::ArrayPtr<const float> raw_pred);
DMonitoringResult dmonitoring_eval_frame(DMonitoringModelState* s, void* stream_buf, int width, int height, int stride, int uv_offset, float *calib);
void dmonitoring_publish(PubMaster &pm, uint32_t frame_id, const DMonitoringResult &res, float execution_time, kj::ArrayPtr<const float> raw_pred);
void dmonitoring_free(DMonitoringModelState* s);

@ -1,2 +1,2 @@
5b02cff5-2b29-431d-b186-372e9c6fd0c7
bf33cc569076984626ac7e027f927aa593261fa7
a8236e30-5bee-4689-8ea0-fc102e2770e5
d508c79bae1c1c451f3af3e2bc231ce33678cb43

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:aca1dd411b5f488bea605dc360656e631fc4301968a589ea072e2220c8092600
size 9157561
oid sha256:00731ebd06fcff7e5837607b91bc56cad3bed5d7ee89052c911c981e8f665308
size 3679940

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d146b1d1fd9d40d57d058e51d285f83676866e26d9e5aff9fa27623ce343b58a
size 2636941
oid sha256:667df5e925570a0f6a33dfb890e186a1f13f101885b46db47ec45305737dffb6
size 1145921

@ -9,24 +9,20 @@ os.environ["OMP_WAIT_POLICY"] = "PASSIVE"
import onnxruntime as ort # pylint: disable=import-error
def read(sz, tf8=False):
def read(sz):
dd = []
gt = 0
szof = 1 if tf8 else 4
while gt < sz * szof:
st = os.read(0, sz * szof - gt)
while gt < sz * 4:
st = os.read(0, sz * 4 - gt)
assert(len(st) > 0)
dd.append(st)
gt += len(st)
r = np.frombuffer(b''.join(dd), dtype=np.uint8 if tf8 else np.float32).astype(np.float32)
if tf8:
r = r / 255.
return r
return np.frombuffer(b''.join(dd), dtype=np.float32)
def write(d):
os.write(1, d.tobytes())
def run_loop(m, tf8_input=False):
def run_loop(m):
ishapes = [[1]+ii.shape[1:] for ii in m.get_inputs()]
keys = [x.name for x in m.get_inputs()]
@ -37,10 +33,10 @@ def run_loop(m, tf8_input=False):
print("ready to run onnx model", keys, ishapes, file=sys.stderr)
while 1:
inputs = []
for k, shp in zip(keys, ishapes):
for shp in ishapes:
ts = np.product(shp)
#print("reshaping %s with offset %d" % (str(shp), offset), file=sys.stderr)
inputs.append(read(ts, (k=='input_img' and tf8_input)).reshape(shp))
inputs.append(read(ts).reshape(shp))
ret = m.run(None, dict(zip(keys, inputs)))
#print(ret, file=sys.stderr)
for r in ret:
@ -48,7 +44,6 @@ def run_loop(m, tf8_input=False):
if __name__ == "__main__":
print(sys.argv, file=sys.stderr)
print("Onnx available providers: ", ort.get_available_providers(), file=sys.stderr)
options = ort.SessionOptions()
options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
@ -68,6 +63,6 @@ if __name__ == "__main__":
print("Onnx selected provider: ", [provider], file=sys.stderr)
ort_session = ort.InferenceSession(sys.argv[1], options, providers=[provider])
print("Onnx using ", ort_session.get_providers(), file=sys.stderr)
run_loop(ort_session, tf8_input=("--use_tf8" in sys.argv))
run_loop(ort_session)
except KeyboardInterrupt:
pass

@ -14,13 +14,12 @@
#include "common/swaglog.h"
#include "common/util.h"
ONNXModel::ONNXModel(const char *path, float *_output, size_t _output_size, int runtime, bool _use_extra, bool _use_tf8) {
ONNXModel::ONNXModel(const char *path, float *_output, size_t _output_size, int runtime, bool _use_extra) {
LOGD("loading model %s", path);
output = _output;
output_size = _output_size;
use_extra = _use_extra;
use_tf8 = _use_tf8;
int err = pipe(pipein);
assert(err == 0);
@ -29,12 +28,11 @@ ONNXModel::ONNXModel(const char *path, float *_output, size_t _output_size, int
std::string exe_dir = util::dir_name(util::readlink("/proc/self/exe"));
std::string onnx_runner = exe_dir + "/runners/onnx_runner.py";
std::string tf8_arg = use_tf8 ? "--use_tf8" : "";
proc_pid = fork();
if (proc_pid == 0) {
LOGD("spawning onnx process %s", onnx_runner.c_str());
char *argv[] = {(char*)onnx_runner.c_str(), (char*)path, (char*)tf8_arg.c_str(), nullptr};
char *argv[] = {(char*)onnx_runner.c_str(), (char*)path, nullptr};
dup2(pipein[0], 0);
dup2(pipeout[1], 1);
close(pipein[0]);

@ -6,7 +6,7 @@
class ONNXModel : public RunModel {
public:
ONNXModel(const char *path, float *output, size_t output_size, int runtime, bool use_extra = false, bool _use_tf8 = false);
ONNXModel(const char *path, float *output, size_t output_size, int runtime, bool use_extra = false);
~ONNXModel();
void addRecurrent(float *state, int state_size);
void addDesire(float *state, int state_size);
@ -31,7 +31,6 @@ private:
int calib_size;
float *image_input_buf = NULL;
int image_buf_size;
bool use_tf8;
float *extra_input_buf = NULL;
int extra_buf_size;
bool use_extra;

@ -14,11 +14,10 @@ void PrintErrorStringAndExit() {
std::exit(EXIT_FAILURE);
}
SNPEModel::SNPEModel(const char *path, float *loutput, size_t loutput_size, int runtime, bool luse_extra, bool luse_tf8) {
SNPEModel::SNPEModel(const char *path, float *loutput, size_t loutput_size, int runtime, bool luse_extra) {
output = loutput;
output_size = loutput_size;
use_extra = luse_extra;
use_tf8 = luse_tf8;
#ifdef QCOM2
if (runtime==USE_GPU_RUNTIME) {
Runtime = zdl::DlSystem::Runtime_t::GPU;
@ -71,16 +70,14 @@ SNPEModel::SNPEModel(const char *path, float *loutput, size_t loutput_size, int
printf("model: %s -> %s\n", input_tensor_name, output_tensor_name);
zdl::DlSystem::UserBufferEncodingFloat userBufferEncodingFloat;
zdl::DlSystem::UserBufferEncodingTf8 userBufferEncodingTf8(0, 1./255); // network takes 0-1
zdl::DlSystem::IUserBufferFactory& ubFactory = zdl::SNPE::SNPEFactory::getUserBufferFactory();
size_t size_of_input = use_tf8 ? sizeof(uint8_t) : sizeof(float);
// create input buffer
{
const auto &inputDims_opt = snpe->getInputDimensions(input_tensor_name);
const zdl::DlSystem::TensorShape& bufferShape = *inputDims_opt;
std::vector<size_t> strides(bufferShape.rank());
strides[strides.size() - 1] = size_of_input;
strides[strides.size() - 1] = sizeof(float);
size_t product = 1;
for (size_t i = 0; i < bufferShape.rank(); i++) product *= bufferShape[i];
size_t stride = strides[strides.size() - 1];
@ -89,10 +86,7 @@ SNPEModel::SNPEModel(const char *path, float *loutput, size_t loutput_size, int
strides[i-1] = stride;
}
printf("input product is %lu\n", product);
inputBuffer = ubFactory.createUserBuffer(NULL,
product*size_of_input,
strides,
use_tf8 ? (zdl::DlSystem::UserBufferEncoding*)&userBufferEncodingTf8 : (zdl::DlSystem::UserBufferEncoding*)&userBufferEncodingFloat);
inputBuffer = ubFactory.createUserBuffer(NULL, product*sizeof(float), strides, &userBufferEncodingFloat);
inputMap.add(input_tensor_name, inputBuffer.get());
}

@ -23,7 +23,7 @@
class SNPEModel : public RunModel {
public:
SNPEModel(const char *path, float *loutput, size_t loutput_size, int runtime, bool luse_extra = false, bool use_tf8 = false);
SNPEModel(const char *path, float *loutput, size_t loutput_size, int runtime, bool luse_extra = false);
void addRecurrent(float *state, int state_size);
void addTrafficConvention(float *state, int state_size);
void addCalib(float *state, int state_size);
@ -52,7 +52,6 @@ private:
std::unique_ptr<zdl::DlSystem::IUserBuffer> inputBuffer;
float *input;
size_t input_size;
bool use_tf8;
// snpe output stuff
zdl::DlSystem::UserBufferMap outputMap;

@ -18,7 +18,7 @@ def dmonitoringd_thread(sm=None, pm=None):
pm = messaging.PubMaster(['driverMonitoringState'])
if sm is None:
sm = messaging.SubMaster(['driverStateV2', 'liveCalibration', 'carState', 'controlsState', 'modelV2'], poll=['driverStateV2'])
sm = messaging.SubMaster(['driverState', 'liveCalibration', 'carState', 'controlsState', 'modelV2'], poll=['driverState'])
driver_status = DriverStatus(rhd=Params().get_bool("IsRHD"))
@ -34,7 +34,7 @@ def dmonitoringd_thread(sm=None, pm=None):
while True:
sm.update()
if not sm.updated['driverStateV2']:
if not sm.updated['driverState']:
continue
# Get interaction
@ -51,7 +51,7 @@ def dmonitoringd_thread(sm=None, pm=None):
# Get data from dmonitoringmodeld
events = Events()
driver_status.update_states(sm['driverStateV2'], sm['liveCalibration'].rpyCalib, sm['carState'].vEgo, sm['controlsState'].enabled)
driver_status.update_states(sm['driverState'], sm['liveCalibration'].rpyCalib, sm['carState'].vEgo, sm['controlsState'].enabled)
# Block engaging after max number of distrations
if driver_status.terminal_alert_cnt >= driver_status.settings._MAX_TERMINAL_ALERTS or \
@ -79,7 +79,6 @@ def dmonitoringd_thread(sm=None, pm=None):
"isLowStd": driver_status.pose.low_std,
"hiStdCount": driver_status.hi_stds,
"isActiveMode": driver_status.active_monitoring_mode,
"isRHD": driver_status.wheel_on_right,
}
pm.send('driverMonitoringState', dat)

@ -5,7 +5,6 @@ from common.numpy_fast import interp
from common.realtime import DT_DMON
from common.filter_simple import FirstOrderFilter
from common.stat_live import RunningStatFilter
from common.transformations.camera import tici_d_frame_size
EventName = car.CarEvent.EventName
@ -27,29 +26,32 @@ class DRIVER_MONITOR_SETTINGS():
self._DISTRACTED_PROMPT_TIME_TILL_TERMINAL = 6.
self._FACE_THRESHOLD = 0.5
self._PARTIAL_FACE_THRESHOLD = 0.8
self._EYE_THRESHOLD = 0.65
self._SG_THRESHOLD = 0.925
self._BLINK_THRESHOLD = 0.861
self._BLINK_THRESHOLD = 0.8
self._BLINK_THRESHOLD_SLACK = 0.9
self._BLINK_THRESHOLD_STRICT = self._BLINK_THRESHOLD
self._EE_THRESH11 = 0.75
self._EE_THRESH12 = 3.25
self._EE_THRESH21 = 0.01
self._EE_THRESH22 = 0.35
self._POSE_PITCH_THRESHOLD = 0.3133
self._POSE_PITCH_THRESHOLD_SLACK = 0.3237
self._POSE_PITCH_THRESHOLD = 0.3237
self._POSE_PITCH_THRESHOLD_SLACK = 0.3657
self._POSE_PITCH_THRESHOLD_STRICT = self._POSE_PITCH_THRESHOLD
self._POSE_YAW_THRESHOLD = 0.4020
self._POSE_YAW_THRESHOLD_SLACK = 0.5042
self._POSE_YAW_THRESHOLD = 0.3109
self._POSE_YAW_THRESHOLD_SLACK = 0.4294
self._POSE_YAW_THRESHOLD_STRICT = self._POSE_YAW_THRESHOLD
self._PITCH_NATURAL_OFFSET = 0.029 # initial value before offset is learned
self._YAW_NATURAL_OFFSET = 0.097 # initial value before offset is learned
self._PITCH_NATURAL_OFFSET = 0.057 # initial value before offset is learned
self._YAW_NATURAL_OFFSET = 0.11 # initial value before offset is learned
self._PITCH_MAX_OFFSET = 0.124
self._PITCH_MIN_OFFSET = -0.0881
self._YAW_MAX_OFFSET = 0.289
self._YAW_MIN_OFFSET = -0.0246
self._POSESTD_THRESHOLD = 0.3
self._POSESTD_THRESHOLD = 0.315
self._HI_STD_FALLBACK_TIME = int(10 / self._DT_DMON) # fall back to wheel touch if model is uncertain for 10s
self._DISTRACTED_FILTER_TS = 0.25 # 0.6Hz
@ -57,9 +59,6 @@ class DRIVER_MONITOR_SETTINGS():
self._POSE_OFFSET_MIN_COUNT = int(60 / self._DT_DMON) # valid data counts before calibration completes, 1min cumulative
self._POSE_OFFSET_MAX_COUNT = int(360 / self._DT_DMON) # stop deweighting new data after 6 min, aka "short term memory"
self._WHEELPOS_THRESHOLD = 0.5
self._WHEELPOS_FILTER_MIN_COUNT = int(5 / self._DT_DMON)
self._RECOVERY_FACTOR_MAX = 5. # relative to minus step change
self._RECOVERY_FACTOR_MIN = 1.25 # relative to minus step change
@ -67,9 +66,9 @@ class DRIVER_MONITOR_SETTINGS():
self._MAX_TERMINAL_DURATION = int(30 / self._DT_DMON) # not allowed to engage after 30s of terminal alerts
# model output refers to center of undistorted+leveled image
EFL = 598.0 # focal length in K
W, H = tici_d_frame_size # corrected image has same size as raw
# model output refers to center of cropped image, so need to apply the x displacement offset
RESIZED_FOCAL = 320.0
H, W, FULL_W = 320, 160, 426
class DistractedType:
NOT_DISTRACTED = 0
@ -77,22 +76,22 @@ class DistractedType:
DISTRACTED_BLINK = 2
DISTRACTED_E2E = 4
def face_orientation_from_net(angles_desc, pos_desc, rpy_calib):
def face_orientation_from_net(angles_desc, pos_desc, rpy_calib, is_rhd):
# the output of these angles are in device frame
# so from driver's perspective, pitch is up and yaw is right
pitch_net, yaw_net, roll_net = angles_desc
face_pixel_position = ((pos_desc[0]+0.5)*W, (pos_desc[1]+0.5)*H)
yaw_focal_angle = atan2(face_pixel_position[0] - W//2, EFL)
pitch_focal_angle = atan2(face_pixel_position[1] - H//2, EFL)
face_pixel_position = ((pos_desc[0] + .5)*W - W + FULL_W, (pos_desc[1]+.5)*H)
yaw_focal_angle = atan2(face_pixel_position[0] - FULL_W//2, RESIZED_FOCAL)
pitch_focal_angle = atan2(face_pixel_position[1] - H//2, RESIZED_FOCAL)
pitch = pitch_net + pitch_focal_angle
yaw = -yaw_net + yaw_focal_angle
# no calib for roll
pitch -= rpy_calib[1]
yaw -= rpy_calib[2]
yaw -= rpy_calib[2] * (1 - 2 * int(is_rhd)) # lhd -> -=, rhd -> +=
return roll_net, pitch, yaw
class DriverPose():
@ -113,6 +112,7 @@ class DriverBlink():
def __init__(self):
self.left_blink = 0.
self.right_blink = 0.
self.cfactor = 1.
class DriverStatus():
def __init__(self, rhd=False, settings=DRIVER_MONITOR_SETTINGS()):
@ -120,7 +120,7 @@ class DriverStatus():
self.settings = settings
# init driver status
# self.wheelpos_learner = RunningStatFilter()
self.is_rhd_region = rhd
self.pose = DriverPose(self.settings._POSE_OFFSET_MAX_COUNT)
self.pose_calibrated = False
self.blink = DriverBlink()
@ -137,8 +137,8 @@ class DriverStatus():
self.distracted_types = []
self.driver_distracted = False
self.driver_distraction_filter = FirstOrderFilter(0., self.settings._DISTRACTED_FILTER_TS, self.settings._DT_DMON)
self.wheel_on_right = rhd
self.face_detected = False
self.face_partial = False
self.terminal_alert_cnt = 0
self.terminal_time = 0
self.step_change = 0.
@ -197,7 +197,7 @@ class DriverStatus():
yaw_error > self.settings._POSE_YAW_THRESHOLD*self.pose.cfactor_yaw:
distracted_types.append(DistractedType.DISTRACTED_POSE)
if (self.blink.left_blink + self.blink.right_blink)*0.5 > self.settings._BLINK_THRESHOLD:
if (self.blink.left_blink + self.blink.right_blink)*0.5 > self.settings._BLINK_THRESHOLD*self.blink.cfactor:
distracted_types.append(DistractedType.DISTRACTED_BLINK)
if self.ee1_calibrated:
@ -214,7 +214,13 @@ class DriverStatus():
return distracted_types
def set_policy(self, model_data, car_speed):
ep = min(model_data.meta.engagedProb, 0.8) / 0.8 # engaged prob
bp = model_data.meta.disengagePredictions.brakeDisengageProbs[0] # brake disengage prob in next 2s
# TODO: retune adaptive blink
self.blink.cfactor = interp(ep, [0, 0.5, 1],
[self.settings._BLINK_THRESHOLD_STRICT,
self.settings._BLINK_THRESHOLD,
self.settings._BLINK_THRESHOLD_SLACK]) / self.settings._BLINK_THRESHOLD
k1 = max(-0.00156*((car_speed-16)**2)+0.6, 0.2)
bp_normal = max(min(bp / k1, 0.5),0)
self.pose.cfactor_pitch = interp(bp_normal, [0, 0.5],
@ -225,36 +231,28 @@ class DriverStatus():
self.settings._POSE_YAW_THRESHOLD_STRICT]) / self.settings._POSE_YAW_THRESHOLD
def update_states(self, driver_state, cal_rpy, car_speed, op_engaged):
# rhd_pred = driver_state.wheelOnRightProb
# if car_speed > 0.01:
# self.wheelpos_learner.push_and_update(rhd_pred)
# if self.wheelpos_learner.filtered_stat.n > self.settings._WHEELPOS_FILTER_MIN_COUNT:
# self.wheel_on_right = self.wheelpos_learner.filtered_stat.M > self.settings._WHEELPOS_THRESHOLD
# else:
# self.wheel_on_right = rhd_pred > self.settings._WHEELPOS_THRESHOLD
driver_data = driver_state.rightDriverData if self.wheel_on_right else driver_state.leftDriverData
if not all(len(x) > 0 for x in (driver_data.faceOrientation, driver_data.facePosition,
driver_data.faceOrientationStd, driver_data.facePositionStd,
driver_data.readyProb, driver_data.notReadyProb)):
if not all(len(x) > 0 for x in (driver_state.faceOrientation, driver_state.facePosition,
driver_state.faceOrientationStd, driver_state.facePositionStd,
driver_state.readyProb, driver_state.notReadyProb)):
return
self.face_detected = driver_data.faceProb > self.settings._FACE_THRESHOLD
self.pose.roll, self.pose.pitch, self.pose.yaw = face_orientation_from_net(driver_data.faceOrientation, driver_data.facePosition, cal_rpy)
if self.wheel_on_right:
self.pose.yaw *= -1
self.pose.pitch_std = driver_data.faceOrientationStd[0]
self.pose.yaw_std = driver_data.faceOrientationStd[1]
self.face_partial = driver_state.partialFace > self.settings._PARTIAL_FACE_THRESHOLD
self.face_detected = driver_state.faceProb > self.settings._FACE_THRESHOLD or self.face_partial
self.pose.roll, self.pose.pitch, self.pose.yaw = face_orientation_from_net(driver_state.faceOrientation, driver_state.facePosition, cal_rpy, self.is_rhd_region)
self.pose.pitch_std = driver_state.faceOrientationStd[0]
self.pose.yaw_std = driver_state.faceOrientationStd[1]
# self.pose.roll_std = driver_state.faceOrientationStd[2]
model_std_max = max(self.pose.pitch_std, self.pose.yaw_std)
self.pose.low_std = model_std_max < self.settings._POSESTD_THRESHOLD
self.blink.left_blink = driver_data.leftBlinkProb * (driver_data.leftEyeProb > self.settings._EYE_THRESHOLD) * (driver_data.sunglassesProb < self.settings._SG_THRESHOLD)
self.blink.right_blink = driver_data.rightBlinkProb * (driver_data.rightEyeProb > self.settings._EYE_THRESHOLD) * (driver_data.sunglassesProb < self.settings._SG_THRESHOLD)
self.eev1 = driver_data.notReadyProb[1]
self.eev2 = driver_data.readyProb[0]
self.pose.low_std = model_std_max < self.settings._POSESTD_THRESHOLD and not self.face_partial
self.blink.left_blink = driver_state.leftBlinkProb * (driver_state.leftEyeProb > self.settings._EYE_THRESHOLD) * (driver_state.sunglassesProb < self.settings._SG_THRESHOLD)
self.blink.right_blink = driver_state.rightBlinkProb * (driver_state.rightEyeProb > self.settings._EYE_THRESHOLD) * (driver_state.sunglassesProb < self.settings._SG_THRESHOLD)
self.eev1 = driver_state.notReadyProb[1]
self.eev2 = driver_state.readyProb[0]
self.distracted_types = self._get_distracted_types()
self.driver_distracted = (DistractedType.DISTRACTED_POSE in self.distracted_types or
DistractedType.DISTRACTED_BLINK in self.distracted_types) and \
driver_data.faceProb > self.settings._FACE_THRESHOLD and self.pose.low_std
driver_state.faceProb > self.settings._FACE_THRESHOLD and self.pose.low_std
self.driver_distraction_filter.update(self.driver_distracted)
# update offseter

@ -17,19 +17,19 @@ INVISIBLE_SECONDS_TO_ORANGE = dm_settings._AWARENESS_TIME - dm_settings._AWARENE
INVISIBLE_SECONDS_TO_RED = dm_settings._AWARENESS_TIME + 1
def make_msg(face_detected, distracted=False, model_uncertain=False):
ds = log.DriverStateV2.new_message()
ds.leftDriverData.faceOrientation = [0., 0., 0.]
ds.leftDriverData.facePosition = [0., 0.]
ds.leftDriverData.faceProb = 1. * face_detected
ds.leftDriverData.leftEyeProb = 1.
ds.leftDriverData.rightEyeProb = 1.
ds.leftDriverData.leftBlinkProb = 1. * distracted
ds.leftDriverData.rightBlinkProb = 1. * distracted
ds.leftDriverData.faceOrientationStd = [1.*model_uncertain, 1.*model_uncertain, 1.*model_uncertain]
ds.leftDriverData.facePositionStd = [1.*model_uncertain, 1.*model_uncertain]
ds = log.DriverState.new_message()
ds.faceOrientation = [0., 0., 0.]
ds.facePosition = [0., 0.]
ds.faceProb = 1. * face_detected
ds.leftEyeProb = 1.
ds.rightEyeProb = 1.
ds.leftBlinkProb = 1. * distracted
ds.rightBlinkProb = 1. * distracted
ds.faceOrientationStd = [1.*model_uncertain, 1.*model_uncertain, 1.*model_uncertain]
ds.facePositionStd = [1.*model_uncertain, 1.*model_uncertain]
# TODO: test both separately when e2e is used
ds.leftDriverData.readyProb = [0., 0., 0., 0.]
ds.leftDriverData.notReadyProb = [0., 0.]
ds.readyProb = [0., 0., 0., 0.]
ds.notReadyProb = [0., 0.]
return ds

@ -52,7 +52,7 @@ def model_replay(lr, frs):
vipc_server.create_buffers(VisionStreamType.VISION_STREAM_WIDE_ROAD, 40, False, *(tici_f_frame_size))
vipc_server.start_listener()
sm = messaging.SubMaster(['modelV2', 'driverStateV2'])
sm = messaging.SubMaster(['modelV2', 'driverState'])
pm = messaging.PubMaster(['roadCameraState', 'wideRoadCameraState', 'driverCameraState', 'liveCalibration', 'lateralPlan'])
try:
@ -112,7 +112,7 @@ def model_replay(lr, frs):
if min(frame_idxs['roadCameraState'], frame_idxs['wideRoadCameraState']) > recv_cnt['modelV2']:
recv = "modelV2"
elif msg.which() == 'driverCameraState':
recv = "driverStateV2"
recv = "driverState"
# wait for a response
with Timeout(15, f"timed out waiting for {recv}"):
@ -170,8 +170,8 @@ if __name__ == "__main__":
'logMonoTime',
'modelV2.frameDropPerc',
'modelV2.modelExecutionTime',
'driverStateV2.modelExecutionTime',
'driverStateV2.dspExecutionTime'
'driverState.modelExecutionTime',
'driverState.dspExecutionTime'
]
# TODO this tolerence is absurdly large
tolerance = 5e-1 if PC else None

@ -1 +1 @@
1d0979ca59dbc89bc5890656e9501e83f0556d50
f74ab97371be93fdc28333e5ea12bbb78c3a32d0

@ -293,7 +293,7 @@ CONFIGS = [
ProcessConfig(
proc_name="dmonitoringd",
pub_sub={
"driverStateV2": ["driverMonitoringState"],
"driverState": ["driverMonitoringState"],
"liveCalibration": [], "carState": [], "modelV2": [], "controlsState": [],
},
ignore=["logMonoTime", "valid"],

@ -33,7 +33,7 @@ PROCS = {
"selfdrive.controls.radard": 4.5,
"./_modeld": 4.48,
"./boardd": 3.63,
"./_dmonitoringmodeld": 5.0,
"./_dmonitoringmodeld": 10.0,
"selfdrive.thermald.thermald": 3.87,
"selfdrive.locationd.calibrationd": 2.0,
"./_soundd": 1.0,
@ -60,7 +60,7 @@ TIMINGS = {
"roadCameraState": [2.5, 0.35],
"driverCameraState": [2.5, 0.35],
"modelV2": [2.5, 0.35],
"driverStateV2": [2.5, 0.40],
"driverState": [2.5, 0.40],
"liveLocationKalman": [2.5, 0.35],
"wideRoadCameraState": [1.5, 0.35],
}
@ -221,7 +221,7 @@ class TestOnroad(unittest.TestCase):
# TODO: this went up when plannerd cpu usage increased, why?
cfgs = [
("modelV2", 0.050, 0.036),
("driverStateV2", 0.050, 0.026),
("driverState", 0.050, 0.026),
]
for (s, instant_max, avg_max) in cfgs:
ts = [getattr(getattr(m, s), "modelExecutionTime") for m in self.lr if m.which() == s]

@ -25,7 +25,7 @@ void DriverViewWindow::mouseReleaseEvent(QMouseEvent* e) {
emit done();
}
DriverViewScene::DriverViewScene(QWidget* parent) : sm({"driverStateV2"}), QWidget(parent) {
DriverViewScene::DriverViewScene(QWidget* parent) : sm({"driverState"}), QWidget(parent) {
face_img = loadPixmap("../assets/img_driver_face.png", {FACE_IMG_SIZE, FACE_IMG_SIZE});
}
@ -57,36 +57,43 @@ void DriverViewScene::paintEvent(QPaintEvent* event) {
return;
}
cereal::DriverStateV2::Reader driver_state = sm["driverStateV2"].getDriverStateV2();
cereal::DriverStateV2::DriverData::Reader driver_data;
const int width = 4 * height() / 3;
const QRect rect2 = {rect().center().x() - width / 2, rect().top(), width, rect().height()};
const QRect valid_rect = {is_rhd ? rect2.right() - rect2.height() / 2 : rect2.left(), rect2.top(), rect2.height() / 2, rect2.height()};
// is_rhd = driver_state.getWheelOnRightProb() > 0.5;
driver_data = is_rhd ? driver_state.getRightDriverData() : driver_state.getLeftDriverData();
// blackout
const QColor bg(0, 0, 0, 140);
const QRect& blackout_rect = rect();
p.fillRect(blackout_rect.adjusted(0, 0, valid_rect.left() - blackout_rect.right(), 0), bg);
p.fillRect(blackout_rect.adjusted(valid_rect.right() - blackout_rect.left(), 0, 0, 0), bg);
p.fillRect(blackout_rect.adjusted(valid_rect.left()-blackout_rect.left()+1, 0, valid_rect.right()-blackout_rect.right()-1, -valid_rect.height()*7/10), bg); // top dz
bool face_detected = driver_data.getFaceProb() > 0.5;
// face bounding box
cereal::DriverState::Reader driver_state = sm["driverState"].getDriverState();
bool face_detected = driver_state.getFaceProb() > 0.5;
if (face_detected) {
auto fxy_list = driver_data.getFacePosition();
auto std_list = driver_data.getFaceOrientationStd();
auto fxy_list = driver_state.getFacePosition();
auto std_list = driver_state.getFaceOrientationStd();
float face_x = fxy_list[0];
float face_y = fxy_list[1];
float face_std = std::max(std_list[0], std_list[1]);
float alpha = 0.7;
if (face_std > 0.15) {
alpha = std::max(0.7 - (face_std-0.15)*3.5, 0.0);
if (face_std > 0.08) {
alpha = std::max(0.7 - (face_std-0.08)*7, 0.0);
}
const int box_size = 220;
// use approx instead of distort_points
int fbox_x = 1080.0 - 1714.0 * face_x;
int fbox_y = -135.0 + (504.0 + std::abs(face_x)*112.0) + (1205.0 - std::abs(face_x)*724.0) * face_y;
const int box_size = 0.6 * rect2.height() / 2;
const float rhd_offset = 0.05; // lhd is shifted, so rhd is not mirrored
int fbox_x = valid_rect.center().x() + (is_rhd ? (face_x + rhd_offset) : -face_x) * valid_rect.width();
int fbox_y = valid_rect.center().y() + face_y * valid_rect.height();
p.setPen(QPen(QColor(255, 255, 255, alpha * 255), 10));
p.drawRoundedRect(fbox_x - box_size / 2, fbox_y - box_size / 2, box_size, box_size, 35.0, 35.0);
}
// icon
const int img_offset = 60;
const int img_x = rect().left() + img_offset;
const int img_y = rect().bottom() - FACE_IMG_SIZE - img_offset;
p.setOpacity(face_detected ? 1.0 : 0.2);
const int img_offset = 30;
const int img_x = is_rhd ? rect2.right() - FACE_IMG_SIZE - img_offset : rect2.left() + img_offset;
const int img_y = rect2.bottom() - FACE_IMG_SIZE - img_offset;
p.setOpacity(face_detected ? 1.0 : 0.3);
p.drawPixmap(img_x, img_y, face_img);
}

@ -54,15 +54,16 @@ const mat4 device_transform = {{
}};
mat4 get_driver_view_transform(int screen_width, int screen_height, int stream_width, int stream_height) {
const float driver_view_ratio = 2.0;
const float yscale = stream_height * driver_view_ratio / stream_width;
const float driver_view_ratio = 1.333;
const float yscale = stream_height * driver_view_ratio / tici_dm_crop::width;
const float xscale = yscale*screen_height/screen_width*stream_width/stream_height;
mat4 transform = (mat4){{
xscale, 0.0, 0.0, 0.0,
0.0, yscale, 0.0, 0.0,
xscale, 0.0, 0.0, xscale*tici_dm_crop::x_offset/stream_width*2,
0.0, yscale, 0.0, yscale*tici_dm_crop::y_offset/stream_height*2,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0,
}};
return transform;
}

Loading…
Cancel
Save