camerad: prep to unify IFE + BPS processing (#33770)

* unify

* fixup

* start porting over ife branch

* setup new bufs

---------

Co-authored-by: Comma Device <device@comma.ai>
pull/33773/head
Adeeb Shihadeh 7 months ago committed by GitHub
parent 7150c145ae
commit 747acaac71
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 29
      system/camerad/cameras/camera_common.cc
  2. 1
      system/camerad/cameras/camera_common.h
  3. 215
      system/camerad/cameras/spectra.cc
  4. 43
      system/camerad/cameras/spectra.h
  5. 3
      system/camerad/sensors/sensor.h
  6. 5
      system/camerad/test/debug.sh
  7. 2
      system/camerad/test/intercept.sh

@ -65,39 +65,38 @@ void CameraBuf::init(cl_device_id device_id, cl_context context, SpectraCamera *
const SensorInfo *sensor = cam->sensor.get();
// RAW frame
const int frame_size = (sensor->frame_height + sensor->extra_height) * sensor->frame_stride;
camera_bufs = std::make_unique<VisionBuf[]>(frame_buf_count);
camera_bufs_raw = std::make_unique<VisionBuf[]>(frame_buf_count);
camera_bufs_metadata = std::make_unique<FrameMetadata[]>(frame_buf_count);
// RAW + final frames from ISP
const int raw_frame_size = (sensor->frame_height + sensor->extra_height) * sensor->frame_stride;
for (int i = 0; i < frame_buf_count; i++) {
camera_bufs[i].allocate(frame_size);
camera_bufs[i].allocate(cam->yuv_size);
camera_bufs[i].init_cl(device_id, context);
camera_bufs_raw[i].allocate(raw_frame_size);
camera_bufs_raw[i].init_cl(device_id, context);
}
LOGD("allocated %d CL buffers", frame_buf_count);
out_img_width = sensor->frame_width;
out_img_height = sensor->hdr_offset > 0 ? (sensor->frame_height - sensor->hdr_offset) / 2 : sensor->frame_height;
int nv12_width = VENUS_Y_STRIDE(COLOR_FMT_NV12, out_img_width);
int nv12_height = VENUS_Y_SCANLINES(COLOR_FMT_NV12, out_img_height);
assert(nv12_width == VENUS_UV_STRIDE(COLOR_FMT_NV12, out_img_width));
assert(nv12_height/2 == VENUS_UV_SCANLINES(COLOR_FMT_NV12, out_img_height));
size_t nv12_uv_offset = nv12_width * nv12_height;
// the encoder HW tells us the size it wants after setting it up.
// TODO: VENUS_BUFFER_SIZE should give the size, but it's too small. dependent on encoder settings?
size_t nv12_size = (out_img_width <= 1344 ? 2900 : 2346)*nv12_width;
size_t nv12_size = (out_img_width <= 1344 ? 2900 : 2346)*cam->stride;
vipc_server->create_buffers_with_sizes(stream_type, YUV_BUFFER_COUNT, out_img_width, out_img_height, nv12_size, nv12_width, nv12_uv_offset);
LOGD("created %d YUV vipc buffers with size %dx%d", YUV_BUFFER_COUNT, nv12_width, nv12_height);
vipc_server->create_buffers_with_sizes(stream_type, YUV_BUFFER_COUNT, out_img_width, out_img_height, nv12_size, cam->stride, cam->uv_offset);
LOGD("created %d YUV vipc buffers with size %dx%d", YUV_BUFFER_COUNT, cam->stride, cam->y_height);
imgproc = new ImgProc(device_id, context, this, sensor, cam->cc.camera_num, nv12_width, nv12_uv_offset);
imgproc = new ImgProc(device_id, context, this, sensor, cam->cc.camera_num, cam->stride, cam->uv_offset);
}
CameraBuf::~CameraBuf() {
for (int i = 0; i < frame_buf_count; i++) {
camera_bufs[i].free();
camera_bufs_raw[i].free();
}
if (imgproc) delete imgproc;
}
@ -112,10 +111,10 @@ bool CameraBuf::acquire(int expo_time) {
cur_frame_data = camera_bufs_metadata[cur_buf_idx];
cur_yuv_buf = vipc_server->get_buffer(stream_type);
cur_camera_buf = &camera_bufs[cur_buf_idx];
cur_camera_buf = &camera_bufs_raw[cur_buf_idx];
double start_time = millis_since_boot();
imgproc->runKernel(camera_bufs[cur_buf_idx].buf_cl, cur_yuv_buf->buf_cl, out_img_width, out_img_height, expo_time);
imgproc->runKernel(camera_bufs_raw[cur_buf_idx].buf_cl, cur_yuv_buf->buf_cl, out_img_width, out_img_height, expo_time);
cur_frame_data.processing_time = (millis_since_boot() - start_time) / 1000.0;
VisionIpcBufExtra extra = {

@ -36,6 +36,7 @@ public:
VisionBuf *cur_yuv_buf;
VisionBuf *cur_camera_buf;
std::unique_ptr<VisionBuf[]> camera_bufs;
std::unique_ptr<VisionBuf[]> camera_bufs_raw;
std::unique_ptr<FrameMetadata[]> camera_bufs_metadata;
int out_img_width, out_img_height;

@ -1,3 +1,5 @@
#include "cdm.h"
#include <stdint.h>
#include <cassert>
#include <sys/ioctl.h>
@ -5,9 +7,11 @@
#include "media/cam_defs.h"
#include "media/cam_isp.h"
#include "media/cam_icp.h"
#include "media/cam_isp_ife.h"
#include "media/cam_sensor_cmn_header.h"
#include "media/cam_sync.h"
#include "third_party/linux/include/msm_media_info.h"
#include "common/util.h"
#include "common/swaglog.h"
@ -16,6 +20,23 @@
// For debugging:
// echo "4294967295" > /sys/module/cam_debug_util/parameters/debug_mdl
int write_cont(uint8_t *dst, uint32_t reg, std::vector<uint32_t> vals) {
struct cdm_regcontinuous_cmd *cmd = (struct cdm_regcontinuous_cmd*)dst;
cmd->cmd = CAM_CDM_CMD_REG_CONT;
cmd->count = vals.size();
cmd->offset = reg;
cmd->reserved0 = 0;
cmd->reserved1 = 0;
uint32_t *vd = (uint32_t*)(dst + sizeof(struct cdm_regcontinuous_cmd));
for (int i = 0; i < vals.size(); i++) {
*vd = vals[i];
vd++;
}
return sizeof(struct cdm_regcontinuous_cmd) + vals.size()*sizeof(uint32_t);
}
// ************** low level camera helpers ****************
int do_cam_control(int fd, int op_code, void *handle, int size) {
@ -190,8 +211,12 @@ void SpectraMaster::init() {
assert(isp_fd >= 0);
LOGD("opened isp");
// query icp for MMU handles
LOG("-- Query ICP for MMU handles");
//icp_fd = open_v4l_by_name_and_index("cam-icp");
//assert(icp_fd >= 0);
//LOGD("opened icp");
// query ISP for MMU handles
LOG("-- Query for MMU handles");
struct cam_isp_query_cap_cmd isp_query_cap_cmd = {0};
struct cam_query_cap_cmd query_cap_cmd = {0};
query_cap_cmd.handle_type = 1;
@ -204,6 +229,17 @@ void SpectraMaster::init() {
device_iommu = isp_query_cap_cmd.device_iommu.non_secure;
cdm_iommu = isp_query_cap_cmd.cdm_iommu.non_secure;
// query ICP for MMU handles
/*
struct cam_icp_query_cap_cmd icp_query_cap_cmd = {0};
query_cap_cmd.caps_handle = (uint64_t)&icp_query_cap_cmd;
query_cap_cmd.size = sizeof(icp_query_cap_cmd);
ret = do_cam_control(icp_fd, CAM_QUERY_CAP, &query_cap_cmd, sizeof(query_cap_cmd));
assert(ret == 0);
LOGD("using ICP MMU handle: %x", icp_query_cap_cmd.dev_iommu_handle.non_secure);
icp_device_iommu = icp_query_cap_cmd.dev_iommu_handle.non_secure;
*/
// subscribe
LOG("-- Subscribing");
struct v4l2_event_subscription sub = {0};
@ -245,8 +281,20 @@ void SpectraCamera::camera_open(VisionIpcServer *v, cl_device_id device_id, cl_c
return;
}
// size is driven by all the HW that handles frames,
// the video encoder has certain alignment requirements in this case
stride = VENUS_Y_STRIDE(COLOR_FMT_NV12, sensor->frame_width);
y_height = VENUS_Y_SCANLINES(COLOR_FMT_NV12, sensor->frame_height);
uv_height = VENUS_UV_SCANLINES(COLOR_FMT_NV12, sensor->frame_height);
uv_offset = stride*y_height;
yuv_size = uv_offset + stride*uv_height;
// TODO: for when the frames are coming out of the IFE directly
//uv_offset = ALIGNED_SIZE(stride*y_height, 0x1000);
//yuv_size = uv_offset + ALIGNED_SIZE(stride*uv_height, 0x1000);
open = true;
configISP();
//configICP(); // needs the new AGNOS kernel
configCSIPHY();
linkDevices();
@ -411,20 +459,29 @@ int SpectraCamera::sensors_init() {
return ret;
}
void SpectraCamera::config_ife(int io_mem_handle, int fence, int request_id, int buf0_idx) {
void SpectraCamera::config_bps(int idx, int request_id) {
/*
Handles per-frame BPS config.
* BPS = Bayer Processing Segment
*/
(void)idx;
(void)request_id;
}
void SpectraCamera::config_ife(int idx, int request_id, bool init) {
/*
Handles initial + per-frame IFE config.
IFE = Image Front End
* IFE = Image Front End
*/
int size = sizeof(struct cam_packet) + sizeof(struct cam_cmd_buf_desc)*2;
if (io_mem_handle != 0) {
if (!init) {
size += sizeof(struct cam_buf_io_cfg);
}
uint32_t cam_packet_handle = 0;
auto pkt = mm.alloc<struct cam_packet>(size, &cam_packet_handle);
if (io_mem_handle != 0) {
if (!init) {
pkt->header.op_code = CSLDeviceTypeIFE | OpcodesIFEUpdate; // 0xf000001
pkt->header.request_id = request_id;
} else {
@ -446,12 +503,12 @@ void SpectraCamera::config_ife(int io_mem_handle, int fence, int request_id, int
// *** first command ***
// TODO: support MMU
buf_desc[0].size = buf0_size;
buf_desc[0].size = ife_cmd.size;
buf_desc[0].length = 0;
buf_desc[0].type = CAM_CMD_BUF_DIRECT;
buf_desc[0].meta_data = 3;
buf_desc[0].mem_handle = buf0_handle;
buf_desc[0].offset = ALIGNED_SIZE(buf0_size, buf0_alignment)*buf0_idx;
buf_desc[0].meta_data = CAM_ISP_PACKET_META_COMMON;
buf_desc[0].mem_handle = ife_cmd.handle;
buf_desc[0].offset = ife_cmd.aligned_size()*idx;
// *** second command ***
// parsed by cam_isp_packet_generic_blob_handler
@ -514,7 +571,7 @@ void SpectraCamera::config_ife(int io_mem_handle, int fence, int request_id, int
static_assert(offsetof(struct isp_packet, type_2) == 0x60);
buf_desc[1].size = sizeof(tmp);
buf_desc[1].offset = io_mem_handle != 0 ? 0x60 : 0;
buf_desc[1].offset = !init ? 0x60 : 0;
buf_desc[1].length = buf_desc[1].size - buf_desc[1].offset;
buf_desc[1].type = CAM_CMD_BUF_GENERIC;
buf_desc[1].meta_data = CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON;
@ -523,37 +580,27 @@ void SpectraCamera::config_ife(int io_mem_handle, int fence, int request_id, int
}
// *** io config ***
if (io_mem_handle != 0) {
if (!init) {
// configure output frame
pkt->num_io_configs = 1;
pkt->io_configs_offset = sizeof(struct cam_cmd_buf_desc)*pkt->num_cmd_buf;
struct cam_buf_io_cfg *io_cfg = (struct cam_buf_io_cfg *)((char*)&pkt->payload + pkt->io_configs_offset);
io_cfg[0].offsets[0] = 0;
io_cfg[0].mem_handle[0] = io_mem_handle;
io_cfg[0].mem_handle[0] = buf_handle_raw[idx];
io_cfg[0].planes[0] = (struct cam_plane_cfg){
.width = sensor->frame_width,
.height = sensor->frame_height + sensor->extra_height,
.plane_stride = sensor->frame_stride,
.slice_height = sensor->frame_height + sensor->extra_height,
// these are for UBWC, we'll want that eventually
.meta_stride = 0x0,
.meta_size = 0x0,
.meta_offset = 0x0,
.packer_config = 0x0,
.mode_config = 0x0,
.tile_config = 0x0,
.h_init = 0x0,
.v_init = 0x0,
};
io_cfg[0].format = sensor->mipi_format; // CAM_FORMAT_UBWC_TP10 for YUV
io_cfg[0].color_space = CAM_COLOR_SPACE_BASE; // CAM_COLOR_SPACE_BT601_FULL for YUV
io_cfg[0].color_pattern = 0x5; // 0x0 for YUV
io_cfg[0].bpp = (sensor->mipi_format == CAM_FORMAT_MIPI_RAW_10 ? 0xa : 0xc); // bits per pixel
io_cfg[0].resource_type = CAM_ISP_IFE_OUT_RES_RDI_0; // CAM_ISP_IFE_OUT_RES_FULL for YUV
io_cfg[0].fence = fence;
io_cfg[0].format = sensor->mipi_format;
io_cfg[0].color_space = CAM_COLOR_SPACE_BASE;
io_cfg[0].color_pattern = 0x5;
io_cfg[0].bpp = (sensor->mipi_format == CAM_FORMAT_MIPI_RAW_10 ? 0xa : 0xc);
io_cfg[0].resource_type = CAM_ISP_IFE_OUT_RES_RDI_0;
io_cfg[0].fence = sync_objs[idx];
io_cfg[0].direction = CAM_BUF_OUTPUT;
io_cfg[0].subsample_pattern = 0x1;
io_cfg[0].framedrop_pattern = 0x1;
@ -573,7 +620,7 @@ void SpectraCamera::enqueue_buffer(int i, bool dp) {
int ret;
uint64_t request_id = request_ids[i];
if (buf_handle[i] && sync_objs[i]) {
if (buf_handle_raw[i] && sync_objs[i]) {
// wait
struct cam_sync_wait sync_wait = {0};
sync_wait.sync_obj = sync_objs[i];
@ -588,15 +635,18 @@ void SpectraCamera::enqueue_buffer(int i, bool dp) {
if (dp) buf.queue(i);
// destroy old output fence
struct cam_sync_info sync_destroy = {0};
sync_destroy.sync_obj = sync_objs[i];
ret = do_sync_control(m->cam_sync_fd, CAM_SYNC_DESTROY, &sync_destroy, sizeof(sync_destroy));
if (ret != 0) {
LOGE("failed to destroy sync object: %d %d", ret, sync_destroy.sync_obj);
for (auto so : {sync_objs, sync_objs_bps_out}) {
if (so[i] == 0) continue;
struct cam_sync_info sync_destroy = {0};
sync_destroy.sync_obj = so[i];
ret = do_sync_control(m->cam_sync_fd, CAM_SYNC_DESTROY, &sync_destroy, sizeof(sync_destroy));
if (ret != 0) {
LOGE("failed to destroy sync object: %d %d", ret, sync_destroy.sync_obj);
}
}
}
// create output fence
// create output fences
struct cam_sync_info sync_create = {0};
strcpy(sync_create.name, "NodeOutputPortFence");
ret = do_sync_control(m->cam_sync_fd, CAM_SYNC_CREATE, &sync_create, sizeof(sync_create));
@ -605,6 +655,14 @@ void SpectraCamera::enqueue_buffer(int i, bool dp) {
}
sync_objs[i] = sync_create.sync_obj;
/*
ret = do_cam_control(m->cam_sync_fd, CAM_SYNC_CREATE, &sync_create, sizeof(sync_create));
if (ret != 0) {
LOGE("failed to create fence: %d %d", ret, sync_create.sync_obj);
}
sync_objs_bps_out[i] = sync_create.sync_obj;
*/
// schedule request with camera request manager
struct cam_req_mgr_sched_request req_mgr_sched_request = {0};
req_mgr_sched_request.session_hdl = session_handle;
@ -618,20 +676,33 @@ void SpectraCamera::enqueue_buffer(int i, bool dp) {
// poke sensor, must happen after schedule
sensors_poke(request_id);
// submit request to the ife
config_ife(buf_handle[i], sync_objs[i], request_id, i);
// submit request to IFE and BPS
config_ife(i, request_id);
config_bps(i, request_id);
}
void SpectraCamera::camera_map_bufs() {
int ret;
for (int i = 0; i < FRAME_BUF_COUNT; i++) {
// configure ISP to put the image in place
struct cam_mem_mgr_map_cmd mem_mgr_map_cmd = {0};
mem_mgr_map_cmd.mmu_hdls[0] = m->device_iommu;
//mem_mgr_map_cmd.mmu_hdls[1] = m->icp_device_iommu;
//mem_mgr_map_cmd.num_hdl = 2;
mem_mgr_map_cmd.num_hdl = 1;
mem_mgr_map_cmd.flags = CAM_MEM_FLAG_HW_READ_WRITE;
// RAW bayer images
mem_mgr_map_cmd.fd = buf.camera_bufs_raw[i].fd;
ret = do_cam_control(m->video0_fd, CAM_REQ_MGR_MAP_BUF, &mem_mgr_map_cmd, sizeof(mem_mgr_map_cmd));
assert(ret == 0);
LOGD("map buf req: (fd: %d) 0x%x %d", buf.camera_bufs_raw[i].fd, mem_mgr_map_cmd.out.buf_handle, ret);
buf_handle_raw[i] = mem_mgr_map_cmd.out.buf_handle;
// final processed images
mem_mgr_map_cmd.fd = buf.camera_bufs[i].fd;
int ret = do_cam_control(m->video0_fd, CAM_REQ_MGR_MAP_BUF, &mem_mgr_map_cmd, sizeof(mem_mgr_map_cmd));
LOGD("map buf req: (fd: %d) 0x%x %d", buf.camera_bufs[i].fd, mem_mgr_map_cmd.out.buf_handle, ret);
ret = do_cam_control(m->video0_fd, CAM_REQ_MGR_MAP_BUF, &mem_mgr_map_cmd, sizeof(mem_mgr_map_cmd));
LOGD("map buf req: (fd: %d) 0x%x %d", buf.camera_bufs_raw[i].fd, mem_mgr_map_cmd.out.buf_handle, ret);
buf_handle[i] = mem_mgr_map_cmd.out.buf_handle;
}
enqueue_req_multi(1, FRAME_BUF_COUNT, 0);
@ -691,7 +762,7 @@ void SpectraCamera::configISP() {
.dt = sensor->frame_data_type,
.format = sensor->mipi_format,
.test_pattern = 0x2, // 0x3?
.test_pattern = sensor->bayer_pattern,
.usage_type = 0x0,
.left_start = 0,
@ -735,10 +806,64 @@ void SpectraCamera::configISP() {
LOGD("acquire isp dev");
// config IFE
alloc_w_mmu_hdl(m->video0_fd, FRAME_BUF_COUNT*ALIGNED_SIZE(buf0_size, buf0_alignment), (uint32_t*)&buf0_handle, buf0_alignment,
CAM_MEM_FLAG_HW_READ_WRITE | CAM_MEM_FLAG_KMD_ACCESS | CAM_MEM_FLAG_UMD_ACCESS | CAM_MEM_FLAG_CMD_BUF_TYPE,
m->device_iommu, m->cdm_iommu);
config_ife(0, 0, 1, 0);
ife_cmd.init(m, 67984, 0x20,
CAM_MEM_FLAG_HW_READ_WRITE | CAM_MEM_FLAG_KMD_ACCESS | CAM_MEM_FLAG_UMD_ACCESS | CAM_MEM_FLAG_CMD_BUF_TYPE,
m->device_iommu, m->cdm_iommu, FRAME_BUF_COUNT);
config_ife(0, 1, true);
}
void SpectraCamera::configICP() {
if (!enabled) return;
/*
Configures both the ICP and BPS.
*/
struct cam_icp_acquire_dev_info icp_info = {
.scratch_mem_size = 0x0,
.dev_type = 0x1, // BPS
.io_config_cmd_size = 0,
.io_config_cmd_handle = 0,
.secure_mode = 0,
.num_out_res = 1,
.in_res = (struct cam_icp_res_info){
.format = 0x9, // RAW MIPI
.width = sensor->frame_width,
.height = sensor->frame_height,
.fps = 20,
},
.out_res[0] = (struct cam_icp_res_info){
.format = 0x3, // YUV420NV12
.width = sensor->frame_width,
.height = sensor->frame_height,
.fps = 20,
},
};
auto h = device_acquire(m->icp_fd, session_handle, &icp_info);
assert(h);
icp_dev_handle = *h;
LOGD("acquire icp dev");
// BPS CMD buffer
unsigned char striping_out[] = "\x00";
bps_cmd.init(m, FRAME_BUF_COUNT*ALIGNED_SIZE(464, 0x20), 0x20,
CAM_MEM_FLAG_HW_READ_WRITE | CAM_MEM_FLAG_KMD_ACCESS | CAM_MEM_FLAG_UMD_ACCESS | CAM_MEM_FLAG_CMD_BUF_TYPE | CAM_MEM_FLAG_HW_SHARED_ACCESS,
m->icp_device_iommu);
bps_iq.init(m, 560, 0x20,
CAM_MEM_FLAG_HW_READ_WRITE | CAM_MEM_FLAG_KMD_ACCESS | CAM_MEM_FLAG_UMD_ACCESS | CAM_MEM_FLAG_CMD_BUF_TYPE | CAM_MEM_FLAG_HW_SHARED_ACCESS,
m->icp_device_iommu);
bps_cdm_program_array.init(m, 0x40, 0x20,
CAM_MEM_FLAG_HW_READ_WRITE | CAM_MEM_FLAG_KMD_ACCESS | CAM_MEM_FLAG_UMD_ACCESS | CAM_MEM_FLAG_CMD_BUF_TYPE | CAM_MEM_FLAG_HW_SHARED_ACCESS,
m->icp_device_iommu);
bps_striping.init(m, sizeof(striping_out), 0x20,
CAM_MEM_FLAG_HW_READ_WRITE | CAM_MEM_FLAG_KMD_ACCESS | CAM_MEM_FLAG_UMD_ACCESS | CAM_MEM_FLAG_CMD_BUF_TYPE | CAM_MEM_FLAG_HW_SHARED_ACCESS,
m->icp_device_iommu);
memcpy(bps_striping.ptr, striping_out, sizeof(striping_out));
bps_cdm_striping_bl.init(m, 65216, 0x20,
CAM_MEM_FLAG_HW_READ_WRITE | CAM_MEM_FLAG_KMD_ACCESS | CAM_MEM_FLAG_UMD_ACCESS | CAM_MEM_FLAG_CMD_BUF_TYPE | CAM_MEM_FLAG_HW_SHARED_ACCESS,
m->icp_device_iommu);
}
void SpectraCamera::configCSIPHY() {

@ -63,8 +63,27 @@ public:
unique_fd video0_fd;
unique_fd cam_sync_fd;
unique_fd isp_fd;
unique_fd icp_fd;
int device_iommu = -1;
int cdm_iommu = -1;
int icp_device_iommu = -1;
};
class SpectraBuf {
public:
void init(SpectraMaster *m, int s, int a, int flags, int mmu_hdl = 0, int mmu_hdl2 = 0, int count=1) {
size = s;
alignment = a;
ptr = alloc_w_mmu_hdl(m->video0_fd, ALIGNED_SIZE(size, alignment)*count, (uint32_t*)&handle, alignment, flags, mmu_hdl, mmu_hdl2);
assert(ptr != NULL);
};
uint32_t aligned_size() {
return ALIGNED_SIZE(size, alignment);
};
void *ptr;
int size, alignment, handle;
};
class SpectraCamera {
@ -76,7 +95,8 @@ public:
void handle_camera_event(const cam_req_mgr_message *event_data);
void camera_close();
void camera_map_bufs();
void config_ife(int io_mem_handle, int fence, int request_id, int buf0_idx);
void config_bps(int idx, int request_id);
void config_ife(int idx, int request_id, bool init=false);
int clear_req_queue();
void enqueue_buffer(int i, bool dp);
@ -89,6 +109,7 @@ public:
bool openSensor();
void configISP();
void configICP();
void configCSIPHY();
void linkDevices();
@ -99,22 +120,38 @@ public:
CameraConfig cc;
std::unique_ptr<const SensorInfo> sensor;
// YUV image size
uint32_t stride;
uint32_t y_height;
uint32_t uv_height;
uint32_t uv_offset;
uint32_t yuv_size;
unique_fd sensor_fd;
unique_fd csiphy_fd;
int32_t session_handle = -1;
int32_t sensor_dev_handle = -1;
int32_t isp_dev_handle = -1;
int32_t icp_dev_handle = -1;
int32_t csiphy_dev_handle = -1;
int32_t link_handle = -1;
const int buf0_size = 65624; // unclear what this is and how it's determined, for internal ISP use? it's just copied from an ioctl dump
const int buf0_alignment = 0x20;
SpectraBuf ife_cmd;
SpectraBuf bps_cmd;
SpectraBuf bps_cdm_buffer;
SpectraBuf bps_cdm_program_array;
SpectraBuf bps_cdm_striping_bl;
SpectraBuf bps_iq;
SpectraBuf bps_striping;
int buf0_handle = 0;
int buf_handle[FRAME_BUF_COUNT] = {};
int buf_handle_raw[FRAME_BUF_COUNT] = {};
int sync_objs[FRAME_BUF_COUNT] = {};
int sync_objs_bps_out[FRAME_BUF_COUNT] = {};
uint64_t request_ids[FRAME_BUF_COUNT] = {};
uint64_t request_id_last = 0;
uint64_t frame_id_last = 0;

@ -5,6 +5,8 @@
#include <map>
#include <utility>
#include <vector>
#include "media/cam_isp.h"
#include "media/cam_sensor.h"
#include "cereal/gen/cpp/log.capnp.h"
@ -58,6 +60,7 @@ public:
std::vector<i2c_random_wr_payload> start_reg_array;
std::vector<i2c_random_wr_payload> init_reg_array;
uint32_t bayer_pattern = CAM_ISP_PATTERN_BAYER_GRGRGR;
uint32_t mipi_format;
uint32_t mclk_frequency;
uint32_t frame_data_type;

@ -5,11 +5,12 @@ set -e
# no CCI and UTIL, very spammy
echo 0xfffdbfff | sudo tee /sys/module/cam_debug_util/parameters/debug_mdl
echo 0 | sudo tee /sys/module/cam_debug_util/parameters/debug_mdl
sudo dmesg -C
scons -u -j8 --minimal .
export DEBUG_FRAMES=1
#export DISABLE_ROAD=1 DISABLE_WIDE_ROAD=1
export DISABLE_DRIVER=1
export DISABLE_ROAD=1 DISABLE_WIDE_ROAD=1
#export DISABLE_DRIVER=1
#export LOGPRINT=debug
./camerad

@ -0,0 +1,2 @@
#!/usr/bin/env bash
DISABLE_ROAD=1 DISABLE_WIDE_ROAD=1 DEBUG_FRAMES=1 LOGPRINT=debug LD_PRELOAD=/data/tici_test_scripts/isp/interceptor/tmpioctl.so ./camerad
Loading…
Cancel
Save