camerad: remove SpectraCamera inheritance from CameraState and simplify initialization (#33611)

remove inherit

Co-authored-by: Comma Device <device@comma.ai>
pull/33615/head
Dean Lee 7 months ago committed by GitHub
parent 580388e209
commit 07f3f93bd9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 146
      system/camerad/cameras/camera_qcom2.cc
  2. 6
      system/camerad/cameras/spectra.cc
  3. 3
      system/camerad/cameras/spectra.h

@ -29,9 +29,10 @@ const bool env_log_raw_frames = getenv("LOG_RAW_FRAMES") != nullptr;
const bool env_ctrl_exp_from_params = getenv("CTRL_EXP_FROM_PARAMS") != nullptr;
// high level camera state
class CameraState : public SpectraCamera {
class CameraState {
public:
SpectraCamera camera;
std::thread thread;
int exposure_time = 5;
bool dc_gain_enabled = false;
int dc_gain_weight = 0;
@ -49,27 +50,35 @@ public:
float fl_pix = 0;
CameraState(SpectraMaster *master, const CameraConfig &config) : SpectraCamera(master, config) {};
CameraState(SpectraMaster *master, const CameraConfig &config) : camera(master, config) {};
~CameraState();
void init(VisionIpcServer *v, cl_device_id device_id, cl_context ctx);
void update_exposure_score(float desired_ev, int exp_t, int exp_g_idx, float exp_gain);
void set_camera_exposure(float grey_frac);
void set_exposure_rect();
void run();
float get_gain_factor() const {
return (1 + dc_gain_weight * (sensor->dc_gain_factor-1) / sensor->dc_gain_max_weight);
return (1 + dc_gain_weight * (camera.sensor->dc_gain_factor-1) / camera.sensor->dc_gain_max_weight);
}
};
void init() {
fl_pix = cc.focal_len / sensor->pixel_size_mm;
set_exposure_rect();
void CameraState::init(VisionIpcServer *v, cl_device_id device_id, cl_context ctx) {
camera.camera_open(v, device_id, ctx);
dc_gain_weight = sensor->dc_gain_min_weight;
gain_idx = sensor->analog_gain_rec_idx;
cur_ev[0] = cur_ev[1] = cur_ev[2] = get_gain_factor() * sensor->sensor_analog_gains[gain_idx] * exposure_time;
};
};
fl_pix = camera.cc.focal_len / camera.sensor->pixel_size_mm;
set_exposure_rect();
dc_gain_weight = camera.sensor->dc_gain_min_weight;
gain_idx = camera.sensor->analog_gain_rec_idx;
cur_ev[0] = cur_ev[1] = cur_ev[2] = get_gain_factor() * camera.sensor->sensor_analog_gains[gain_idx] * exposure_time;
if (camera.enabled) thread = std::thread(&CameraState::run, this);
}
CameraState::~CameraState() {
if (thread.joinable()) thread.join();
}
void CameraState::set_exposure_rect() {
// set areas for each camera, shouldn't be changed
@ -88,20 +97,20 @@ void CameraState::set_exposure_rect() {
[0, 0, 1]
]
*/
auto ae_target = ae_targets[cc.camera_num];
auto ae_target = ae_targets[camera.cc.camera_num];
Rect xywh_ref = ae_target.first;
float fl_ref = ae_target.second;
ae_xywh = (Rect){
std::max(0, buf.out_img_width / 2 - (int)(fl_pix / fl_ref * xywh_ref.w / 2)),
std::max(0, buf.out_img_height / 2 - (int)(fl_pix / fl_ref * (h_ref / 2 - xywh_ref.y))),
std::min((int)(fl_pix / fl_ref * xywh_ref.w), buf.out_img_width / 2 + (int)(fl_pix / fl_ref * xywh_ref.w / 2)),
std::min((int)(fl_pix / fl_ref * xywh_ref.h), buf.out_img_height / 2 + (int)(fl_pix / fl_ref * (h_ref / 2 - xywh_ref.y)))
std::max(0, camera.buf.out_img_width / 2 - (int)(fl_pix / fl_ref * xywh_ref.w / 2)),
std::max(0, camera.buf.out_img_height / 2 - (int)(fl_pix / fl_ref * (h_ref / 2 - xywh_ref.y))),
std::min((int)(fl_pix / fl_ref * xywh_ref.w), camera.buf.out_img_width / 2 + (int)(fl_pix / fl_ref * xywh_ref.w / 2)),
std::min((int)(fl_pix / fl_ref * xywh_ref.h), camera.buf.out_img_height / 2 + (int)(fl_pix / fl_ref * (h_ref / 2 - xywh_ref.y)))
};
}
void CameraState::update_exposure_score(float desired_ev, int exp_t, int exp_g_idx, float exp_gain) {
float score = sensor->getExposureScore(desired_ev, exp_t, exp_g_idx, exp_gain, gain_idx);
float score = camera.sensor->getExposureScore(desired_ev, exp_t, exp_g_idx, exp_gain, gain_idx);
if (score < best_ev_score) {
new_exp_t = exp_t;
new_exp_g = exp_g_idx;
@ -110,7 +119,7 @@ void CameraState::update_exposure_score(float desired_ev, int exp_t, int exp_g_i
}
void CameraState::set_camera_exposure(float grey_frac) {
if (!enabled) return;
if (!camera.enabled) return;
const float dt = 0.05;
const float ts_grey = 10.0;
@ -124,7 +133,8 @@ void CameraState::set_camera_exposure(float grey_frac) {
// Therefore we use the target EV from 3 frames ago, the grey fraction that was just measured was the result of that control action.
// TODO: Lower latency to 2 frames, by using the histogram outputted by the sensor we can do AE before the debayering is complete
const float cur_ev_ = cur_ev[buf.cur_frame_data.frame_id % 3];
const float cur_ev_ = cur_ev[camera.buf.cur_frame_data.frame_id % 3];
const auto &sensor = camera.sensor;
// Scale target grey between 0.1 and 0.4 depending on lighting conditions
float new_target_grey = std::clamp(0.4 - 0.3 * log2(1.0 + sensor->target_grey_factor*cur_ev_) / log2(6000.0), 0.1, 0.4);
@ -194,72 +204,69 @@ void CameraState::set_camera_exposure(float grey_frac) {
dc_gain_enabled = enable_dc_gain;
float gain = analog_gain_frac * get_gain_factor();
cur_ev[buf.cur_frame_data.frame_id % 3] = exposure_time * gain;
cur_ev[camera.buf.cur_frame_data.frame_id % 3] = exposure_time * gain;
// Processing a frame takes right about 50ms, so we need to wait a few ms
// so we don't send i2c commands around the frame start.
int ms = (nanos_since_boot() - buf.cur_frame_data.timestamp_sof) / 1000000;
int ms = (nanos_since_boot() - camera.buf.cur_frame_data.timestamp_sof) / 1000000;
if (ms < 60) {
util::sleep_for(60 - ms);
}
// LOGE("ae - camera %d, cur_t %.5f, sof %.5f, dt %.5f", cc.camera_num, 1e-9 * nanos_since_boot(), 1e-9 * buf.cur_frame_data.timestamp_sof, 1e-9 * (nanos_since_boot() - buf.cur_frame_data.timestamp_sof));
// LOGE("ae - camera %d, cur_t %.5f, sof %.5f, dt %.5f", camera.cc.camera_num, 1e-9 * nanos_since_boot(), 1e-9 * camera.buf.cur_frame_data.timestamp_sof, 1e-9 * (nanos_since_boot() - camera.buf.cur_frame_data.timestamp_sof));
auto exp_reg_array = sensor->getExposureRegisters(exposure_time, new_exp_g, dc_gain_enabled);
sensors_i2c(exp_reg_array.data(), exp_reg_array.size(), CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG, sensor->data_word);
camera.sensors_i2c(exp_reg_array.data(), exp_reg_array.size(), CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG, camera.sensor->data_word);
}
void CameraState::run() {
util::set_thread_name(cc.publish_name);
util::set_thread_name(camera.cc.publish_name);
std::vector<const char*> pubs = {cc.publish_name};
if (cc.stream_type == VISION_STREAM_ROAD) pubs.push_back("thumbnail");
std::vector<const char*> pubs = {camera.cc.publish_name};
if (camera.cc.stream_type == VISION_STREAM_ROAD) pubs.push_back("thumbnail");
PubMaster pm(pubs);
for (uint32_t cnt = 0; !do_exit; ++cnt) {
// Acquire the buffer; continue if acquisition fails
if (!buf.acquire(exposure_time)) continue;
if (!camera.buf.acquire(exposure_time)) continue;
MessageBuilder msg;
auto framed = (msg.initEvent().*cc.init_camera_state)();
framed.setFrameId(buf.cur_frame_data.frame_id);
framed.setRequestId(buf.cur_frame_data.request_id);
framed.setTimestampEof(buf.cur_frame_data.timestamp_eof);
framed.setTimestampSof(buf.cur_frame_data.timestamp_sof);
auto framed = (msg.initEvent().*camera.cc.init_camera_state)();
const FrameMetadata &meta = camera.buf.cur_frame_data;
framed.setFrameId(meta.frame_id);
framed.setRequestId(meta.request_id);
framed.setTimestampEof(meta.timestamp_eof);
framed.setTimestampSof(meta.timestamp_sof);
framed.setIntegLines(exposure_time);
framed.setGain(analog_gain_frac * get_gain_factor());
framed.setHighConversionGain(dc_gain_enabled);
framed.setMeasuredGreyFraction(measured_grey_fraction);
framed.setTargetGreyFraction(target_grey_fraction);
framed.setProcessingTime(buf.cur_frame_data.processing_time);
framed.setProcessingTime(meta.processing_time);
const float ev = cur_ev[buf.cur_frame_data.frame_id % 3];
const float perc = util::map_val(ev, sensor->min_ev, sensor->max_ev, 0.0f, 100.0f);
const float ev = cur_ev[meta.frame_id % 3];
const float perc = util::map_val(ev, camera.sensor->min_ev, camera.sensor->max_ev, 0.0f, 100.0f);
framed.setExposureValPercent(perc);
framed.setSensor(sensor->image_sensor);
framed.setSensor(camera.sensor->image_sensor);
// Log raw frames for road camera
if (env_log_raw_frames && cc.stream_type == VISION_STREAM_ROAD && cnt % 100 == 5) { // no overlap with qlog decimation
framed.setImage(get_raw_frame_image(&buf));
if (env_log_raw_frames && camera.cc.stream_type == VISION_STREAM_ROAD && cnt % 100 == 5) { // no overlap with qlog decimation
framed.setImage(get_raw_frame_image(&camera.buf));
}
// Process camera registers and set camera exposure
sensor->processRegisters((uint8_t *)buf.cur_camera_buf->addr, framed);
set_camera_exposure(set_exposure_target(&buf, ae_xywh, 2, cc.stream_type != VISION_STREAM_DRIVER ? 2 : 4));
camera.sensor->processRegisters((uint8_t *)camera.buf.cur_camera_buf->addr, framed);
set_camera_exposure(set_exposure_target(&camera.buf, ae_xywh, 2, camera.cc.stream_type != VISION_STREAM_DRIVER ? 2 : 4));
// Send the message
pm.send(cc.publish_name, msg);
if (cc.stream_type == VISION_STREAM_ROAD && cnt % 100 == 3) {
publish_thumbnail(&pm, &buf); // this takes 10ms???
pm.send(camera.cc.publish_name, msg);
if (camera.cc.stream_type == VISION_STREAM_ROAD && cnt % 100 == 3) {
publish_thumbnail(&pm, &camera.buf); // this takes 10ms???
}
}
}
void camerad_thread() {
/*
TODO: future cleanups
- centralize enabled handling
- open/init/etc mess
*/
// TODO: centralize enabled handling
cl_device_id device_id = cl_get_device_id(CL_DEVICE_TYPE_DEFAULT);
const cl_context_properties props[] = {CL_CONTEXT_PRIORITY_HINT_QCOM, CL_PRIORITY_HINT_HIGH_QCOM, 0};
@ -272,34 +279,23 @@ void camerad_thread() {
m.init();
// *** per-cam init ***
std::vector<CameraState*> cams = {
new CameraState(&m, ROAD_CAMERA_CONFIG),
new CameraState(&m, WIDE_ROAD_CAMERA_CONFIG),
new CameraState(&m, DRIVER_CAMERA_CONFIG),
};
for (auto cam : cams) cam->camera_open();
for (auto cam : cams) cam->camera_init(&v, device_id, ctx);
for (auto cam : cams) cam->init();
v.start_listener();
LOG("-- Starting threads");
std::vector<std::thread> threads;
for (auto cam : cams) {
if (cam->enabled) threads.emplace_back(&CameraState::run, cam);
std::vector<std::unique_ptr<CameraState>> cams;
for (const auto &config : {ROAD_CAMERA_CONFIG, WIDE_ROAD_CAMERA_CONFIG, DRIVER_CAMERA_CONFIG}) {
auto cam = std::make_unique<CameraState>(&m, config);
cam->init(&v, device_id ,ctx);
cams.emplace_back(std::move(cam));
}
v.start_listener();
// start devices
LOG("-- Starting devices");
for (auto cam : cams) cam->sensors_start();
for (auto &cam : cams) cam->camera.sensors_start();
// poll events
LOG("-- Dequeueing Video events");
while (!do_exit) {
struct pollfd fds[1] = {{0}};
fds[0].fd = m.video0_fd;
fds[0].events = POLLPRI;
struct pollfd fds[1] = {{.fd = m.video0_fd, .events = POLLPRI}};
int ret = poll(fds, std::size(fds), 1000);
if (ret < 0) {
if (errno == EINTR || errno == EAGAIN) continue;
@ -320,9 +316,9 @@ void camerad_thread() {
do_exit = do_exit || event_data->u.frame_msg.frame_id > (1*20);
}
for (auto cam : cams) {
if (event_data->session_hdl == cam->session_handle) {
cam->handle_camera_event(event_data);
for (auto &cam : cams) {
if (event_data->session_hdl == cam->camera.session_handle) {
cam->camera.handle_camera_event(event_data);
break;
}
}
@ -333,8 +329,4 @@ void camerad_thread() {
LOGE("VIDIOC_DQEVENT failed, errno=%d", errno);
}
}
LOG(" ************** STOPPING **************");
for (auto &t : threads) t.join();
for (auto cam : cams) delete cam;
}

@ -218,7 +218,7 @@ int SpectraCamera::clear_req_queue() {
return ret;
}
void SpectraCamera::camera_open() {
void SpectraCamera::camera_open(VisionIpcServer *v, cl_device_id device_id, cl_context ctx) {
if (!enabled) return;
if (!openSensor()) {
@ -229,10 +229,6 @@ void SpectraCamera::camera_open() {
configISP();
configCSIPHY();
linkDevices();
}
void SpectraCamera::camera_init(VisionIpcServer *v, cl_device_id device_id, cl_context ctx) {
if (!enabled) return;
LOGD("camera init %d", cc.camera_num);
buf.init(device_id, ctx, this, v, FRAME_BUF_COUNT, cc.stream_type);

@ -63,11 +63,10 @@ public:
SpectraCamera(SpectraMaster *master, const CameraConfig &config);
~SpectraCamera();
void camera_open();
void camera_open(VisionIpcServer *v, cl_device_id device_id, cl_context ctx);
void handle_camera_event(const cam_req_mgr_message *event_data);
void camera_close();
void camera_map_bufs();
void camera_init(VisionIpcServer *v, cl_device_id device_id, cl_context ctx);
void config_isp(int io_mem_handle, int fence, int request_id, int buf0_idx);
int clear_req_queue();

Loading…
Cancel
Save