diff --git a/selfdrive/ui/tests/test_ui/run.py b/selfdrive/ui/tests/test_ui/run.py index 98a600eebc..0b1d842499 100644 --- a/selfdrive/ui/tests/test_ui/run.py +++ b/selfdrive/ui/tests/test_ui/run.py @@ -19,7 +19,6 @@ from openpilot.common.realtime import DT_MDL from openpilot.common.transformations.camera import DEVICE_CAMERAS from openpilot.selfdrive.test.helpers import with_processes from openpilot.selfdrive.test.process_replay.vision_meta import meta_from_camera_state -from openpilot.tools.webcam.camera import Camera UI_DELAY = 0.5 # may be slower on CI? @@ -76,7 +75,7 @@ def setup_onroad(click, pm: PubMaster): time.sleep(0.5) # give time for vipc server to start - IMG = Camera.bgr2nv12(np.random.randint(0, 255, (d.fcam.width, d.fcam.height, 3), dtype=np.uint8)) + IMG = np.zeros((int(d.fcam.width*1.5), d.fcam.height), dtype=np.uint8) IMG_BYTES = IMG.flatten().tobytes() cams = ('roadCameraState', 'wideRoadCameraState') diff --git a/tools/webcam/camera.py b/tools/webcam/camera.py index ddff46c5d3..10900b60ff 100644 --- a/tools/webcam/camera.py +++ b/tools/webcam/camera.py @@ -1,6 +1,4 @@ -# TODO: remove the cv2 dependency, it's only used here -import cv2 as cv -import numpy as np +import av class Camera: def __init__(self, cam_type_state, stream_type, camera_id): @@ -12,23 +10,19 @@ class Camera: self.stream_type = stream_type self.cur_frame_id = 0 - self.cap = cv.VideoCapture(camera_id) - self.W = self.cap.get(cv.CAP_PROP_FRAME_WIDTH) - self.H = self.cap.get(cv.CAP_PROP_FRAME_HEIGHT) + self.container = av.open(camera_id) + self.video_stream = self.container.streams.video[0] + self.W = self.video_stream.codec_context.width + self.H = self.video_stream.codec_context.height @classmethod def bgr2nv12(self, bgr): - yuv = cv.cvtColor(bgr, cv.COLOR_BGR2YUV_I420) - uv_row_cnt = yuv.shape[0] // 3 - uv_plane = np.transpose(yuv[uv_row_cnt * 2:].reshape(2, -1), [1, 0]) - yuv[uv_row_cnt * 2:] = uv_plane.reshape(uv_row_cnt, -1) - return yuv + frame = av.VideoFrame.from_ndarray(bgr, format='bgr24') + return frame.reformat(format='nv12').to_ndarray() def read_frames(self): - while True: - sts , frame = self.cap.read() - if not sts: - break - yuv = Camera.bgr2nv12(frame) + for frame in self.container.decode(self.video_stream): + img = frame.to_rgb().to_ndarray()[:,:, ::-1] # convert to bgr24 + yuv = Camera.bgr2nv12(img) yield yuv.data.tobytes() - self.cap.release() + self.container.close()