diff --git a/.vscode/launch.json b/.vscode/launch.json
index a6b341d9ea..f090061c42 100644
--- a/.vscode/launch.json
+++ b/.vscode/launch.json
@@ -23,6 +23,11 @@
"id": "args",
"description": "Arguments to pass to the process",
"type": "promptString"
+ },
+ {
+ "id": "replayArg",
+ "type": "promptString",
+ "description": "Enter route or segment to replay."
}
],
"configurations": [
@@ -40,7 +45,41 @@
"type": "cppdbg",
"request": "launch",
"program": "${workspaceFolder}/${input:cpp_process}",
- "cwd": "${workspaceFolder}",
+ "cwd": "${workspaceFolder}"
+ },
+ {
+ "name": "Attach LLDB to Replay drive",
+ "type": "lldb",
+ "request": "attach",
+ "pid": "${command:pickMyProcess}",
+ "initCommands": [
+ "script import time; time.sleep(3)"
+ ]
+ },
+ {
+ "name": "Replay drive",
+ "type": "debugpy",
+ "request": "launch",
+ "program": "${workspaceFolder}/opendbc/safety/tests/safety_replay/replay_drive.py",
+ "args": [
+ "${input:replayArg}"
+ ],
+ "console": "integratedTerminal",
+ "justMyCode": false,
+ "env": {
+ "PYTHONPATH": "${workspaceFolder}"
+ },
+ "subProcess": true,
+ "stopOnEntry": false
+ }
+ ],
+ "compounds": [
+ {
+ "name": "Replay drive + Safety LLDB",
+ "configurations": [
+ "Replay drive",
+ "Attach LLDB to Replay drive"
+ ]
}
]
}
\ No newline at end of file
diff --git a/docs/DEBUGGING_SAFETY.md b/docs/DEBUGGING_SAFETY.md
new file mode 100644
index 0000000000..cd0a46b446
--- /dev/null
+++ b/docs/DEBUGGING_SAFETY.md
@@ -0,0 +1,30 @@
+# Debugging Panda Safety with Replay Drive + LLDB
+
+## 1. Start the debugger in VS Code
+
+* Select **Replay drive + Safety LLDB**.
+* Enter the route or segment when prompted.
+[
](https://github.com/user-attachments/assets/b0cc320a-083e-46a7-a9f8-ca775bbe5604)
+
+## 2. Attach LLDB
+
+* When prompted, pick the running **`replay_drive` process**.
+* ⚠️ Attach quickly, or `replay_drive` will start consuming messages.
+
+> [!TIP]
+> Add a Python breakpoint at the start of `replay_drive.py` to pause execution and give yourself time to attach LLDB.
+
+## 3. Set breakpoints in VS Code
+Breakpoints can be set directly in `modes/xxx.h` (or any C file).
+No extra LLDB commands are required — just place breakpoints in the editor.
+
+## 4. Resume execution
+Once attached, you can step through both Python (on the replay) and C safety code as CAN logs are replayed.
+
+> [!NOTE]
+> * Use short routes for quicker iteration.
+> * Pause `replay_drive` early to avoid wasting log messages.
+
+## Video
+
+View a demo of this workflow on the PR that added it: https://github.com/commaai/openpilot/pull/36055#issue-3352911578
\ No newline at end of file
diff --git a/selfdrive/car/tests/test_car_interfaces.py b/selfdrive/car/tests/test_car_interfaces.py
index 5c4729ee9a..c40443d7e7 100644
--- a/selfdrive/car/tests/test_car_interfaces.py
+++ b/selfdrive/car/tests/test_car_interfaces.py
@@ -1,15 +1,12 @@
import os
-import math
import hypothesis.strategies as st
from hypothesis import Phase, given, settings
from parameterized import parameterized
from cereal import car
from opendbc.car import DT_CTRL
-from opendbc.car.car_helpers import interfaces
from opendbc.car.structs import CarParams
-from opendbc.car.tests.test_car_interfaces import get_fuzzy_car_interface_args
-from opendbc.car.fw_versions import FW_VERSIONS, FW_QUERY_CONFIGS
+from opendbc.car.tests.test_car_interfaces import get_fuzzy_car_interface
from opendbc.car.mock.values import CAR as MOCK
from opendbc.car.values import PLATFORMS
from openpilot.selfdrive.controls.lib.latcontrol_angle import LatControlAngle
@@ -18,11 +15,6 @@ from openpilot.selfdrive.controls.lib.latcontrol_torque import LatControlTorque
from openpilot.selfdrive.controls.lib.longcontrol import LongControl
from openpilot.selfdrive.test.fuzzy_generation import FuzzyGenerator
-ALL_ECUS = {ecu for ecus in FW_VERSIONS.values() for ecu in ecus.keys()}
-ALL_ECUS |= {ecu for config in FW_QUERY_CONFIGS.values() for ecu in config.extra_ecus}
-
-ALL_REQUESTS = {tuple(r.request) for config in FW_QUERY_CONFIGS.values() for r in config.requests}
-
MAX_EXAMPLES = int(os.environ.get('MAX_EXAMPLES', '60'))
@@ -34,39 +26,8 @@ class TestCarInterfaces:
phases=(Phase.reuse, Phase.generate, Phase.shrink))
@given(data=st.data())
def test_car_interfaces(self, car_name, data):
- CarInterface = interfaces[car_name]
-
- args = get_fuzzy_car_interface_args(data.draw)
-
- car_params = CarInterface.get_params(car_name, args['fingerprints'], args['car_fw'],
- alpha_long=args['alpha_long'], is_release=False, docs=False)
- car_params = car_params.as_reader()
- car_interface = CarInterface(car_params)
- assert car_params
- assert car_interface
-
- assert car_params.mass > 1
- assert car_params.wheelbase > 0
- # centerToFront is center of gravity to front wheels, assert a reasonable range
- assert car_params.wheelbase * 0.3 < car_params.centerToFront < car_params.wheelbase * 0.7
- assert car_params.maxLateralAccel > 0
-
- # Longitudinal sanity checks
- assert len(car_params.longitudinalTuning.kpV) == len(car_params.longitudinalTuning.kpBP)
- assert len(car_params.longitudinalTuning.kiV) == len(car_params.longitudinalTuning.kiBP)
-
- # Lateral sanity checks
- if car_params.steerControlType != CarParams.SteerControlType.angle:
- tune = car_params.lateralTuning
- if tune.which() == 'pid':
- if car_name != MOCK.MOCK:
- assert not math.isnan(tune.pid.kf) and tune.pid.kf > 0
- assert len(tune.pid.kpV) > 0 and len(tune.pid.kpV) == len(tune.pid.kpBP)
- assert len(tune.pid.kiV) > 0 and len(tune.pid.kiV) == len(tune.pid.kiBP)
-
- elif tune.which() == 'torque':
- assert not math.isnan(tune.torque.kf) and tune.torque.kf > 0
- assert not math.isnan(tune.torque.friction) and tune.torque.friction > 0
+ car_interface = get_fuzzy_car_interface(car_name, data.draw)
+ car_params = car_interface.CP.as_reader()
cc_msg = FuzzyGenerator.get_random_msg(data.draw, car.CarControl, real_floats=True)
# Run car interface
diff --git a/selfdrive/controls/lib/desire_helper.py b/selfdrive/controls/lib/desire_helper.py
index 730aaeb8f1..ee4567f1e9 100644
--- a/selfdrive/controls/lib/desire_helper.py
+++ b/selfdrive/controls/lib/desire_helper.py
@@ -40,6 +40,10 @@ class DesireHelper:
self.prev_one_blinker = False
self.desire = log.Desire.none
+ @staticmethod
+ def get_lane_change_direction(CS):
+ return LaneChangeDirection.left if CS.leftBlinker else LaneChangeDirection.right
+
def update(self, carstate, lateral_active, lane_change_prob):
v_ego = carstate.vEgo
one_blinker = carstate.leftBlinker != carstate.rightBlinker
@@ -53,12 +57,13 @@ class DesireHelper:
if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:
self.lane_change_state = LaneChangeState.preLaneChange
self.lane_change_ll_prob = 1.0
+ # Initialize lane change direction to prevent UI alert flicker
+ self.lane_change_direction = self.get_lane_change_direction(carstate)
# LaneChangeState.preLaneChange
elif self.lane_change_state == LaneChangeState.preLaneChange:
- # Set lane change direction
- self.lane_change_direction = LaneChangeDirection.left if \
- carstate.leftBlinker else LaneChangeDirection.right
+ # Update lane change direction
+ self.lane_change_direction = self.get_lane_change_direction(carstate)
torque_applied = carstate.steeringPressed and \
((carstate.steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or
diff --git a/selfdrive/modeld/models/driving_policy.onnx b/selfdrive/modeld/models/driving_policy.onnx
index eb6bd7b8a4..7b87846748 100644
--- a/selfdrive/modeld/models/driving_policy.onnx
+++ b/selfdrive/modeld/models/driving_policy.onnx
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:72e98a95541f200bd2faeae8d718997483696fd4801fc7d718c167b05854707d
+oid sha256:ebb38a934d6472c061cc6010f46d9720ca132d631a47e585a893bdd41ade2419
size 12343535
diff --git a/selfdrive/modeld/models/driving_vision.onnx b/selfdrive/modeld/models/driving_vision.onnx
index ce0dc927e7..4b4fa05df8 100644
--- a/selfdrive/modeld/models/driving_vision.onnx
+++ b/selfdrive/modeld/models/driving_vision.onnx
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:e66bb8d53eced3786ed71a59b55ffc6810944cb217f0518621cc76303260a1ef
+oid sha256:befac016a247b7ad5dc5b55d339d127774ed7bd2b848f1583f72aa4caee37781
size 46271991
diff --git a/selfdrive/test/process_replay/ref_commit b/selfdrive/test/process_replay/ref_commit
index e1133061f5..a833fadb94 100644
--- a/selfdrive/test/process_replay/ref_commit
+++ b/selfdrive/test/process_replay/ref_commit
@@ -1 +1 @@
-4c677a3ebcbd3d4faa3de98e3fb9c0bb83b47926
\ No newline at end of file
+afcab1abb62b9d5678342956cced4712f44e909e
\ No newline at end of file
diff --git a/tools/replay/SConscript b/tools/replay/SConscript
index 18849407cf..99c8263a8c 100644
--- a/tools/replay/SConscript
+++ b/tools/replay/SConscript
@@ -12,7 +12,7 @@ else:
base_libs.append('OpenCL')
replay_lib_src = ["replay.cc", "consoleui.cc", "camera.cc", "filereader.cc", "logreader.cc", "framereader.cc",
- "route.cc", "util.cc", "seg_mgr.cc", "timeline.cc", "api.cc"]
+ "route.cc", "util.cc", "seg_mgr.cc", "timeline.cc", "api.cc", "qcom_decoder.cc"]
replay_lib = replay_env.Library("replay", replay_lib_src, LIBS=base_libs, FRAMEWORKS=base_frameworks)
Export('replay_lib')
replay_libs = [replay_lib, 'avutil', 'avcodec', 'avformat', 'bz2', 'zstd', 'curl', 'yuv', 'ncurses'] + base_libs
diff --git a/tools/replay/framereader.cc b/tools/replay/framereader.cc
index ed88626be3..f2b1faf2c4 100644
--- a/tools/replay/framereader.cc
+++ b/tools/replay/framereader.cc
@@ -8,6 +8,7 @@
#include "common/util.h"
#include "third_party/libyuv/include/libyuv.h"
#include "tools/replay/util.h"
+#include "system/hardware/hw.h"
#ifdef __APPLE__
#define HW_DEVICE_TYPE AV_HWDEVICE_TYPE_VIDEOTOOLBOX
@@ -37,7 +38,13 @@ struct DecoderManager {
return it->second.get();
}
- auto decoder = std::make_unique();
+ std::unique_ptr decoder;
+ if (Hardware::TICI() && hw_decoder) {
+ decoder = std::make_unique();
+ } else {
+ decoder = std::make_unique();
+ }
+
if (!decoder->open(codecpar, hw_decoder)) {
decoder.reset(nullptr);
}
@@ -114,19 +121,19 @@ bool FrameReader::get(int idx, VisionBuf *buf) {
// class VideoDecoder
-VideoDecoder::VideoDecoder() {
+FFmpegVideoDecoder::FFmpegVideoDecoder() {
av_frame_ = av_frame_alloc();
hw_frame_ = av_frame_alloc();
}
-VideoDecoder::~VideoDecoder() {
+FFmpegVideoDecoder::~FFmpegVideoDecoder() {
if (hw_device_ctx) av_buffer_unref(&hw_device_ctx);
if (decoder_ctx) avcodec_free_context(&decoder_ctx);
av_frame_free(&av_frame_);
av_frame_free(&hw_frame_);
}
-bool VideoDecoder::open(AVCodecParameters *codecpar, bool hw_decoder) {
+bool FFmpegVideoDecoder::open(AVCodecParameters *codecpar, bool hw_decoder) {
const AVCodec *decoder = avcodec_find_decoder(codecpar->codec_id);
if (!decoder) return false;
@@ -149,7 +156,7 @@ bool VideoDecoder::open(AVCodecParameters *codecpar, bool hw_decoder) {
return true;
}
-bool VideoDecoder::initHardwareDecoder(AVHWDeviceType hw_device_type) {
+bool FFmpegVideoDecoder::initHardwareDecoder(AVHWDeviceType hw_device_type) {
const AVCodecHWConfig *config = nullptr;
for (int i = 0; (config = avcodec_get_hw_config(decoder_ctx->codec, i)) != nullptr; i++) {
if (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX && config->device_type == hw_device_type) {
@@ -175,7 +182,7 @@ bool VideoDecoder::initHardwareDecoder(AVHWDeviceType hw_device_type) {
return true;
}
-bool VideoDecoder::decode(FrameReader *reader, int idx, VisionBuf *buf) {
+bool FFmpegVideoDecoder::decode(FrameReader *reader, int idx, VisionBuf *buf) {
int current_idx = idx;
if (idx != reader->prev_idx + 1) {
// seeking to the nearest key frame
@@ -219,7 +226,7 @@ bool VideoDecoder::decode(FrameReader *reader, int idx, VisionBuf *buf) {
return false;
}
-AVFrame *VideoDecoder::decodeFrame(AVPacket *pkt) {
+AVFrame *FFmpegVideoDecoder::decodeFrame(AVPacket *pkt) {
int ret = avcodec_send_packet(decoder_ctx, pkt);
if (ret < 0) {
rError("Error sending a packet for decoding: %d", ret);
@@ -239,7 +246,7 @@ AVFrame *VideoDecoder::decodeFrame(AVPacket *pkt) {
return (av_frame_->format == hw_pix_fmt) ? hw_frame_ : av_frame_;
}
-bool VideoDecoder::copyBuffer(AVFrame *f, VisionBuf *buf) {
+bool FFmpegVideoDecoder::copyBuffer(AVFrame *f, VisionBuf *buf) {
if (hw_pix_fmt == HW_PIX_FMT) {
for (int i = 0; i < height/2; i++) {
memcpy(buf->y + (i*2 + 0)*buf->stride, f->data[0] + (i*2 + 0)*f->linesize[0], width);
@@ -256,3 +263,45 @@ bool VideoDecoder::copyBuffer(AVFrame *f, VisionBuf *buf) {
}
return true;
}
+
+bool QcomVideoDecoder::open(AVCodecParameters *codecpar, bool hw_decoder) {
+ if (codecpar->codec_id != AV_CODEC_ID_HEVC) {
+ rError("Hardware decoder only supports HEVC codec");
+ return false;
+ }
+ width = codecpar->width;
+ height = codecpar->height;
+ msm_vidc.init(VIDEO_DEVICE, width, height, V4L2_PIX_FMT_HEVC);
+ return true;
+}
+
+bool QcomVideoDecoder::decode(FrameReader *reader, int idx, VisionBuf *buf) {
+ int from_idx = idx;
+ if (idx != reader->prev_idx + 1) {
+ // seeking to the nearest key frame
+ for (int i = idx; i >= 0; --i) {
+ if (reader->packets_info[i].flags & AV_PKT_FLAG_KEY) {
+ from_idx = i;
+ break;
+ }
+ }
+
+ auto pos = reader->packets_info[from_idx].pos;
+ int ret = avformat_seek_file(reader->input_ctx, 0, pos, pos, pos, AVSEEK_FLAG_BYTE);
+ if (ret < 0) {
+ rError("Failed to seek to byte position %lld: %d", pos, AVERROR(ret));
+ return false;
+ }
+ }
+ reader->prev_idx = idx;
+ bool result = false;
+ AVPacket pkt;
+ msm_vidc.avctx = reader->input_ctx;
+ for (int i = from_idx; i <= idx; ++i) {
+ if (av_read_frame(reader->input_ctx, &pkt) == 0) {
+ result = msm_vidc.decodeFrame(&pkt, buf) && (i == idx);
+ av_packet_unref(&pkt);
+ }
+ }
+ return result;
+}
diff --git a/tools/replay/framereader.h b/tools/replay/framereader.h
index a15847e311..1fb3cdfeb1 100644
--- a/tools/replay/framereader.h
+++ b/tools/replay/framereader.h
@@ -6,6 +6,7 @@
#include "msgq/visionipc/visionbuf.h"
#include "tools/replay/filereader.h"
#include "tools/replay/util.h"
+#include "tools/replay/qcom_decoder.h"
extern "C" {
#include
@@ -40,11 +41,18 @@ public:
class VideoDecoder {
public:
- VideoDecoder();
- ~VideoDecoder();
- bool open(AVCodecParameters *codecpar, bool hw_decoder);
- bool decode(FrameReader *reader, int idx, VisionBuf *buf);
+ virtual ~VideoDecoder() = default;
+ virtual bool open(AVCodecParameters *codecpar, bool hw_decoder) = 0;
+ virtual bool decode(FrameReader *reader, int idx, VisionBuf *buf) = 0;
int width = 0, height = 0;
+};
+
+class FFmpegVideoDecoder : public VideoDecoder {
+public:
+ FFmpegVideoDecoder();
+ ~FFmpegVideoDecoder() override;
+ bool open(AVCodecParameters *codecpar, bool hw_decoder) override;
+ bool decode(FrameReader *reader, int idx, VisionBuf *buf) override;
private:
bool initHardwareDecoder(AVHWDeviceType hw_device_type);
@@ -56,3 +64,14 @@ private:
AVPixelFormat hw_pix_fmt = AV_PIX_FMT_NONE;
AVBufferRef *hw_device_ctx = nullptr;
};
+
+class QcomVideoDecoder : public VideoDecoder {
+public:
+ QcomVideoDecoder() {};
+ ~QcomVideoDecoder() override {};
+ bool open(AVCodecParameters *codecpar, bool hw_decoder) override;
+ bool decode(FrameReader *reader, int idx, VisionBuf *buf) override;
+
+private:
+ MsmVidc msm_vidc = MsmVidc();
+};
diff --git a/tools/replay/qcom_decoder.cc b/tools/replay/qcom_decoder.cc
new file mode 100644
index 0000000000..eb5409daa3
--- /dev/null
+++ b/tools/replay/qcom_decoder.cc
@@ -0,0 +1,346 @@
+#include "qcom_decoder.h"
+
+#include
+#include "third_party/linux/include/v4l2-controls.h"
+#include
+
+
+#include "common/swaglog.h"
+#include "common/util.h"
+
+// echo "0xFFFF" > /sys/kernel/debug/msm_vidc/debug_level
+
+static void copyBuffer(VisionBuf *src_buf, VisionBuf *dst_buf) {
+ // Copy Y plane
+ memcpy(dst_buf->y, src_buf->y, src_buf->height * src_buf->stride);
+ // Copy UV plane
+ memcpy(dst_buf->uv, src_buf->uv, src_buf->height / 2 * src_buf->stride);
+}
+
+static void request_buffers(int fd, v4l2_buf_type buf_type, unsigned int count) {
+ struct v4l2_requestbuffers reqbuf = {
+ .count = count,
+ .type = buf_type,
+ .memory = V4L2_MEMORY_USERPTR
+ };
+ util::safe_ioctl(fd, VIDIOC_REQBUFS, &reqbuf, "VIDIOC_REQBUFS failed");
+}
+
+MsmVidc::~MsmVidc() {
+ if (fd > 0) {
+ close(fd);
+ }
+}
+
+bool MsmVidc::init(const char* dev, size_t width, size_t height, uint64_t codec) {
+ LOG("Initializing msm_vidc device %s", dev);
+ this->w = width;
+ this->h = height;
+ this->fd = open(dev, O_RDWR, 0);
+ if (fd < 0) {
+ LOGE("failed to open video device %s", dev);
+ return false;
+ }
+ subscribeEvents();
+ v4l2_buf_type out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ setPlaneFormat(out_type, V4L2_PIX_FMT_HEVC); // Also allocates the output buffer
+ setFPS(FPS);
+ request_buffers(fd, out_type, OUTPUT_BUFFER_COUNT);
+ util::safe_ioctl(fd, VIDIOC_STREAMON, &out_type, "VIDIOC_STREAMON OUTPUT failed");
+ restartCapture();
+ setupPolling();
+
+ this->initialized = true;
+ return true;
+}
+
+VisionBuf* MsmVidc::decodeFrame(AVPacket *pkt, VisionBuf *buf) {
+ assert(initialized && (pkt != nullptr) && (buf != nullptr));
+
+ this->frame_ready = false;
+ this->current_output_buf = buf;
+ bool sent_packet = false;
+
+ while (!this->frame_ready) {
+ if (!sent_packet) {
+ int buf_index = getBufferUnlocked();
+ if (buf_index >= 0) {
+ assert(buf_index < out_buf_cnt);
+ sendPacket(buf_index, pkt);
+ sent_packet = true;
+ }
+ }
+
+ if (poll(pfd, nfds, -1) < 0) {
+ LOGE("poll() error: %d", errno);
+ return nullptr;
+ }
+
+ if (VisionBuf* result = processEvents()) {
+ return result;
+ }
+ }
+
+ return buf;
+}
+
+VisionBuf* MsmVidc::processEvents() {
+ for (int idx = 0; idx < nfds; idx++) {
+ short revents = pfd[idx].revents;
+ if (!revents) continue;
+
+ if (idx == ev[EV_VIDEO]) {
+ if (revents & (POLLIN | POLLRDNORM)) {
+ VisionBuf *result = handleCapture();
+ if (result == this->current_output_buf) {
+ this->frame_ready = true;
+ }
+ }
+ if (revents & (POLLOUT | POLLWRNORM)) {
+ handleOutput();
+ }
+ if (revents & POLLPRI) {
+ handleEvent();
+ }
+ } else {
+ LOGE("Unexpected event on fd %d", pfd[idx].fd);
+ }
+ }
+ return nullptr;
+}
+
+VisionBuf* MsmVidc::handleCapture() {
+ struct v4l2_buffer buf = {0};
+ struct v4l2_plane planes[1] = {0};
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ buf.memory = V4L2_MEMORY_USERPTR;
+ buf.m.planes = planes;
+ buf.length = 1;
+ util::safe_ioctl(this->fd, VIDIOC_DQBUF, &buf, "VIDIOC_DQBUF CAPTURE failed");
+
+ if (this->reconfigure_pending || buf.m.planes[0].bytesused == 0) {
+ return nullptr;
+ }
+
+ copyBuffer(&cap_bufs[buf.index], this->current_output_buf);
+ queueCaptureBuffer(buf.index);
+ return this->current_output_buf;
+}
+
+bool MsmVidc::subscribeEvents() {
+ for (uint32_t event : subscriptions) {
+ struct v4l2_event_subscription sub = { .type = event};
+ util::safe_ioctl(fd, VIDIOC_SUBSCRIBE_EVENT, &sub, "VIDIOC_SUBSCRIBE_EVENT failed");
+ }
+ return true;
+}
+
+bool MsmVidc::setPlaneFormat(enum v4l2_buf_type type, uint32_t fourcc) {
+ struct v4l2_format fmt = {.type = type};
+ struct v4l2_pix_format_mplane *pix = &fmt.fmt.pix_mp;
+ *pix = {
+ .width = (__u32)this->w,
+ .height = (__u32)this->h,
+ .pixelformat = fourcc
+ };
+ util::safe_ioctl(fd, VIDIOC_S_FMT, &fmt, "VIDIOC_S_FMT failed");
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ this->out_buf_size = pix->plane_fmt[0].sizeimage;
+ int ion_size = this->out_buf_size * OUTPUT_BUFFER_COUNT; // Output (input) buffers are ION buffer.
+ this->out_buf.allocate(ion_size); // mmap rw
+ for (int i = 0; i < OUTPUT_BUFFER_COUNT; i++) {
+ this->out_buf_off[i] = i * this->out_buf_size;
+ this->out_buf_addr[i] = (char *)this->out_buf.addr + this->out_buf_off[i];
+ this->out_buf_flag[i] = false;
+ }
+ LOGD("Set output buffer size to %d, count %d, addr %p", this->out_buf_size, OUTPUT_BUFFER_COUNT, this->out_buf.addr);
+ } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ request_buffers(this->fd, type, CAPTURE_BUFFER_COUNT);
+ util::safe_ioctl(fd, VIDIOC_G_FMT, &fmt, "VIDIOC_G_FMT failed");
+ const __u32 y_size = pix->plane_fmt[0].sizeimage;
+ const __u32 y_stride = pix->plane_fmt[0].bytesperline;
+ for (int i = 0; i < CAPTURE_BUFFER_COUNT; i++) {
+ size_t uv_offset = (size_t)y_stride * pix->height;
+ size_t required = uv_offset + (y_stride * pix->height / 2); // enough for Y + UV. For linear NV12, UV plane starts at y_stride * height.
+ size_t alloc_size = std::max(y_size, required);
+ this->cap_bufs[i].allocate(alloc_size);
+ this->cap_bufs[i].init_yuv(pix->width, pix->height, y_stride, uv_offset);
+ }
+ LOGD("Set capture buffer size to %d, count %d, addr %p, extradata size %d",
+ pix->plane_fmt[0].sizeimage, CAPTURE_BUFFER_COUNT, this->cap_bufs[0].addr, pix->plane_fmt[1].sizeimage);
+ }
+ return true;
+}
+
+bool MsmVidc::setFPS(uint32_t fps) {
+ struct v4l2_streamparm streamparam = {
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .parm.output.timeperframe = {1, fps}
+ };
+ util::safe_ioctl(fd, VIDIOC_S_PARM, &streamparam, "VIDIOC_S_PARM failed");
+ return true;
+}
+
+bool MsmVidc::restartCapture() {
+ // stop if already initialized
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ if (this->initialized) {
+ LOGD("Restarting capture, flushing buffers...");
+ util::safe_ioctl(this->fd, VIDIOC_STREAMOFF, &type, "VIDIOC_STREAMOFF CAPTURE failed");
+ struct v4l2_requestbuffers reqbuf = {.type = type, .memory = V4L2_MEMORY_USERPTR};
+ util::safe_ioctl(this->fd, VIDIOC_REQBUFS, &reqbuf, "VIDIOC_REQBUFS failed");
+ for (size_t i = 0; i < CAPTURE_BUFFER_COUNT; ++i) {
+ this->cap_bufs[i].free();
+ this->cap_buf_flag[i] = false; // mark as not queued
+ cap_bufs[i].~VisionBuf();
+ new (&cap_bufs[i]) VisionBuf();
+ }
+ }
+ // setup, start and queue capture buffers
+ setDBP();
+ setPlaneFormat(type, V4L2_PIX_FMT_NV12);
+ util::safe_ioctl(this->fd, VIDIOC_STREAMON, &type, "VIDIOC_STREAMON CAPTURE failed");
+ for (size_t i = 0; i < CAPTURE_BUFFER_COUNT; ++i) {
+ queueCaptureBuffer(i);
+ }
+
+ return true;
+}
+
+bool MsmVidc::queueCaptureBuffer(int i) {
+ struct v4l2_buffer buf = {0};
+ struct v4l2_plane planes[1] = {0};
+
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ buf.memory = V4L2_MEMORY_USERPTR;
+ buf.index = i;
+ buf.m.planes = planes;
+ buf.length = 1;
+ // decoded frame plane
+ planes[0].m.userptr = (unsigned long)this->cap_bufs[i].addr; // no security
+ planes[0].length = this->cap_bufs[i].len;
+ planes[0].reserved[0] = this->cap_bufs[i].fd; // ION fd
+ planes[0].reserved[1] = 0;
+ planes[0].bytesused = this->cap_bufs[i].len;
+ planes[0].data_offset = 0;
+ util::safe_ioctl(this->fd, VIDIOC_QBUF, &buf, "VIDIOC_QBUF failed");
+ this->cap_buf_flag[i] = true; // mark as queued
+ return true;
+}
+
+bool MsmVidc::queueOutputBuffer(int i, size_t size) {
+ struct v4l2_buffer buf = {0};
+ struct v4l2_plane planes[1] = {0};
+
+ buf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ buf.memory = V4L2_MEMORY_USERPTR;
+ buf.index = i;
+ buf.m.planes = planes;
+ buf.length = 1;
+ // decoded frame plane
+ planes[0].m.userptr = (unsigned long)this->out_buf_off[i]; // check this
+ planes[0].length = this->out_buf_size;
+ planes[0].reserved[0] = this->out_buf.fd; // ION fd
+ planes[0].reserved[1] = 0;
+ planes[0].bytesused = size;
+ planes[0].data_offset = 0;
+ assert((this->out_buf_off[i] & 0xfff) == 0); // must be 4 KiB aligned
+ assert(this->out_buf_size % 4096 == 0); // ditto for size
+
+ util::safe_ioctl(this->fd, VIDIOC_QBUF, &buf, "VIDIOC_QBUF failed");
+ this->out_buf_flag[i] = true; // mark as queued
+ return true;
+}
+
+bool MsmVidc::setDBP() {
+ struct v4l2_ext_control control[2] = {0};
+ struct v4l2_ext_controls controls = {0};
+ control[0].id = V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE;
+ control[0].value = 1; // V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_SECONDARY
+ control[1].id = V4L2_CID_MPEG_VIDC_VIDEO_DPB_COLOR_FORMAT;
+ control[1].value = 0; // V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_NONE
+ controls.count = 2;
+ controls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+ controls.controls = control;
+ util::safe_ioctl(fd, VIDIOC_S_EXT_CTRLS, &controls, "VIDIOC_S_EXT_CTRLS failed");
+ return true;
+}
+
+bool MsmVidc::setupPolling() {
+ // Initialize poll array
+ pfd[EV_VIDEO] = {fd, POLLIN | POLLOUT | POLLWRNORM | POLLRDNORM | POLLPRI, 0};
+ ev[EV_VIDEO] = EV_VIDEO;
+ nfds = 1;
+ return true;
+}
+
+bool MsmVidc::sendPacket(int buf_index, AVPacket *pkt) {
+ assert(buf_index >= 0 && buf_index < out_buf_cnt);
+ assert(pkt != nullptr && pkt->data != nullptr && pkt->size > 0);
+ // Prepare output buffer
+ memset(this->out_buf_addr[buf_index], 0, this->out_buf_size);
+ uint8_t * data = (uint8_t *)this->out_buf_addr[buf_index];
+ memcpy(data, pkt->data, pkt->size);
+ queueOutputBuffer(buf_index, pkt->size);
+ return true;
+}
+
+int MsmVidc::getBufferUnlocked() {
+ for (int i = 0; i < this->out_buf_cnt; i++) {
+ if (!out_buf_flag[i]) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+
+bool MsmVidc::handleOutput() {
+ struct v4l2_buffer buf = {0};
+ struct v4l2_plane planes[1];
+ buf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ buf.memory = V4L2_MEMORY_USERPTR;
+ buf.m.planes = planes;
+ buf.length = 1;
+ util::safe_ioctl(this->fd, VIDIOC_DQBUF, &buf, "VIDIOC_DQBUF OUTPUT failed");
+ this->out_buf_flag[buf.index] = false; // mark as not queued
+ return true;
+}
+
+bool MsmVidc::handleEvent() {
+ // dequeue event
+ struct v4l2_event event = {0};
+ util::safe_ioctl(this->fd, VIDIOC_DQEVENT, &event, "VIDIOC_DQEVENT failed");
+ switch (event.type) {
+ case V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_INSUFFICIENT: {
+ unsigned int *ptr = (unsigned int *)event.u.data;
+ unsigned int height = ptr[0];
+ unsigned int width = ptr[1];
+ this->w = width;
+ this->h = height;
+ LOGD("Port Reconfig received insufficient, new size %ux%u, flushing capture bufs...", width, height); // This is normal
+ struct v4l2_decoder_cmd dec;
+ dec.flags = V4L2_QCOM_CMD_FLUSH_CAPTURE;
+ dec.cmd = V4L2_QCOM_CMD_FLUSH;
+ util::safe_ioctl(this->fd, VIDIOC_DECODER_CMD, &dec, "VIDIOC_DECODER_CMD FLUSH_CAPTURE failed");
+ this->reconfigure_pending = true;
+ LOGD("Waiting for flush done event to reconfigure capture queue");
+ break;
+ }
+
+ case V4L2_EVENT_MSM_VIDC_FLUSH_DONE: {
+ unsigned int *ptr = (unsigned int *)event.u.data;
+ unsigned int flags = ptr[0];
+ if (flags & V4L2_QCOM_CMD_FLUSH_CAPTURE) {
+ if (this->reconfigure_pending) {
+ this->restartCapture();
+ this->reconfigure_pending = false;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return true;
+}
\ No newline at end of file
diff --git a/tools/replay/qcom_decoder.h b/tools/replay/qcom_decoder.h
new file mode 100644
index 0000000000..1d7522cc70
--- /dev/null
+++ b/tools/replay/qcom_decoder.h
@@ -0,0 +1,88 @@
+#pragma once
+
+#include
+#include
+
+#include "msgq/visionipc/visionbuf.h"
+
+extern "C" {
+ #include
+ #include
+}
+
+#define V4L2_EVENT_MSM_VIDC_START (V4L2_EVENT_PRIVATE_START + 0x00001000)
+#define V4L2_EVENT_MSM_VIDC_FLUSH_DONE (V4L2_EVENT_MSM_VIDC_START + 1)
+#define V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_INSUFFICIENT (V4L2_EVENT_MSM_VIDC_START + 3)
+#define V4L2_CID_MPEG_MSM_VIDC_BASE 0x00992000
+#define V4L2_CID_MPEG_VIDC_VIDEO_DPB_COLOR_FORMAT (V4L2_CID_MPEG_MSM_VIDC_BASE + 44)
+#define V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE (V4L2_CID_MPEG_MSM_VIDC_BASE + 22)
+#define V4L2_QCOM_CMD_FLUSH_CAPTURE (1 << 1)
+#define V4L2_QCOM_CMD_FLUSH (4)
+
+#define VIDEO_DEVICE "/dev/video32"
+#define OUTPUT_BUFFER_COUNT 8
+#define CAPTURE_BUFFER_COUNT 8
+#define FPS 20
+
+
+class MsmVidc {
+public:
+ MsmVidc() = default;
+ ~MsmVidc();
+
+ bool init(const char* dev, size_t width, size_t height, uint64_t codec);
+ VisionBuf* decodeFrame(AVPacket* pkt, VisionBuf* buf);
+
+ AVFormatContext* avctx = nullptr;
+ int fd = 0;
+
+private:
+ bool initialized = false;
+ bool reconfigure_pending = false;
+ bool frame_ready = false;
+
+ VisionBuf* current_output_buf = nullptr;
+ VisionBuf out_buf; // Single input buffer
+ VisionBuf cap_bufs[CAPTURE_BUFFER_COUNT]; // Capture (output) buffers
+
+ size_t w = 1928, h = 1208;
+ size_t cap_height = 0, cap_width = 0;
+
+ int cap_buf_size = 0;
+ int out_buf_size = 0;
+
+ size_t cap_plane_off[CAPTURE_BUFFER_COUNT] = {0};
+ size_t cap_plane_stride[CAPTURE_BUFFER_COUNT] = {0};
+ bool cap_buf_flag[CAPTURE_BUFFER_COUNT] = {false};
+
+ size_t out_buf_off[OUTPUT_BUFFER_COUNT] = {0};
+ void* out_buf_addr[OUTPUT_BUFFER_COUNT] = {0};
+ bool out_buf_flag[OUTPUT_BUFFER_COUNT] = {false};
+ const int out_buf_cnt = OUTPUT_BUFFER_COUNT;
+
+ const int subscriptions[2] = {
+ V4L2_EVENT_MSM_VIDC_FLUSH_DONE,
+ V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_INSUFFICIENT
+ };
+
+ enum { EV_VIDEO, EV_COUNT };
+ struct pollfd pfd[EV_COUNT] = {0};
+ int ev[EV_COUNT] = {-1};
+ int nfds = 0;
+
+ VisionBuf* processEvents();
+ bool setupOutput();
+ bool subscribeEvents();
+ bool setPlaneFormat(v4l2_buf_type type, uint32_t fourcc);
+ bool setFPS(uint32_t fps);
+ bool restartCapture();
+ bool queueCaptureBuffer(int i);
+ bool queueOutputBuffer(int i, size_t size);
+ bool setDBP();
+ bool setupPolling();
+ bool sendPacket(int buf_index, AVPacket* pkt);
+ int getBufferUnlocked();
+ VisionBuf* handleCapture();
+ bool handleOutput();
+ bool handleEvent();
+};