UI: support multiple frame sizes (#2099)

* support multiple frame sizes

* Fix lookup

* Fix focal

* linter
pull/2144/head
Willem Melching 5 years ago committed by GitHub
parent 6a8cd6e054
commit 6b0ac6c8b7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 34
      tools/replay/lib/ui_helpers.py
  2. 18
      tools/replay/sensorium.py
  3. 40
      tools/replay/ui.py

@ -22,14 +22,27 @@ WHITE = (255, 255, 255)
_PATH_X = np.arange(192.)
_PATH_XD = np.arange(192.)
_PATH_PINV = compute_path_pinv(50)
#_BB_OFFSET = 290, 332
_BB_OFFSET = 0, 0
_BB_SCALE = 1164/640.
_BB_TO_FULL_FRAME = np.asarray([
[_BB_SCALE, 0., _BB_OFFSET[0]],
[0., _BB_SCALE, _BB_OFFSET[1]],
_FULL_FRAME_SIZE = {
}
_BB_TO_FULL_FRAME = {}
_FULL_FRAME_TO_BB = {}
_INTRINSICS = {}
for width, height, focal in [(1164, 874, 910), (1928, 1208, 2648)]:
sz = width * height
_BB_SCALE = width / 640.
_BB_TO_FULL_FRAME[sz] = np.asarray([
[_BB_SCALE, 0., 0.],
[0., _BB_SCALE, 0.],
[0., 0., 1.]])
_FULL_FRAME_TO_BB[sz] = np.linalg.inv(_BB_TO_FULL_FRAME[sz])
_FULL_FRAME_SIZE[sz] = (width, height)
_INTRINSICS[sz] = np.array([
[focal, 0., width / 2.],
[0., focal, height / 2.],
[0., 0., 1.]])
_FULL_FRAME_TO_BB = np.linalg.inv(_BB_TO_FULL_FRAME)
METER_WIDTH = 20
@ -188,14 +201,15 @@ def draw_mpc(liveMpc, top_down):
class CalibrationTransformsForWarpMatrix(object):
def __init__(self, model_to_full_frame, K, E):
def __init__(self, num_px, model_to_full_frame, K, E):
self._model_to_full_frame = model_to_full_frame
self._K = K
self._E = E
self.num_px = num_px
@property
def model_to_bb(self):
return _FULL_FRAME_TO_BB.dot(self._model_to_full_frame)
return _FULL_FRAME_TO_BB[self.num_px].dot(self._model_to_full_frame)
@lazy_property
def model_to_full_frame(self):
@ -208,7 +222,7 @@ class CalibrationTransformsForWarpMatrix(object):
@lazy_property
def car_to_bb(self):
return _BB_TO_FULL_FRAME.dot(self._K).dot(self._E[:, [0, 1, 3]])
return _BB_TO_FULL_FRAME[self.num_px].dot(self._K).dot(self._E[:, [0, 1, 3]])
def pygame_modules_have_loaded():

@ -14,15 +14,15 @@ else:
from common.transformations.model import MEDMODEL_INPUT_SIZE
from common.transformations.model import get_camera_frame_from_medmodel_frame
from common.transformations.camera import FULL_FRAME_SIZE, eon_intrinsics
from tools.replay.lib.ui_helpers import CalibrationTransformsForWarpMatrix
from tools.replay.lib.ui_helpers import CalibrationTransformsForWarpMatrix, _FULL_FRAME_SIZE, _INTRINSICS
if __name__ == "__main__":
sm = messaging.SubMaster(['liveCalibration'])
frame = messaging.sub_sock('frame', conflate=True)
win = Window(MEDMODEL_INPUT_SIZE[0], MEDMODEL_INPUT_SIZE[1], double=True)
num_px = 0
calibration = None
imgff = None
while 1:
fpkt = messaging.recv_one(frame)
@ -30,19 +30,23 @@ if __name__ == "__main__":
continue
sm.update(timeout=1)
rgb_img_raw = fpkt.frame.image
num_px = len(rgb_img_raw) // 3
if rgb_img_raw and num_px in _FULL_FRAME_SIZE.keys():
FULL_FRAME_SIZE = _FULL_FRAME_SIZE[num_px]
imgff = np.frombuffer(rgb_img_raw, dtype=np.uint8).reshape((FULL_FRAME_SIZE[1], FULL_FRAME_SIZE[0], 3))
imgff = imgff[:, :, ::-1] # Convert BGR to RGB
if sm.updated['liveCalibration']:
intrinsic_matrix = eon_intrinsics
if sm.updated['liveCalibration'] and num_px:
intrinsic_matrix = _INTRINSICS[num_px]
img_transform = np.array(fpkt.frame.transform).reshape(3, 3)
extrinsic_matrix = np.asarray(sm['liveCalibration'].extrinsicMatrix).reshape(3, 4)
ke = intrinsic_matrix.dot(extrinsic_matrix)
warp_matrix = get_camera_frame_from_medmodel_frame(ke)
calibration = CalibrationTransformsForWarpMatrix(warp_matrix, intrinsic_matrix, extrinsic_matrix)
calibration = CalibrationTransformsForWarpMatrix(num_px, warp_matrix, intrinsic_matrix, extrinsic_matrix)
transform = np.dot(img_transform, calibration.model_to_full_frame)
if calibration is not None:
if calibration is not None and imgff is not None:
imgw = cv2.warpAffine(imgff, transform[:2],
(MEDMODEL_INPUT_SIZE[0], MEDMODEL_INPUT_SIZE[1]),
flags=cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC)

@ -10,14 +10,14 @@ import numpy as np
import pygame # pylint: disable=import-error
from common.basedir import BASEDIR
from common.transformations.camera import FULL_FRAME_SIZE, eon_intrinsics
from common.transformations.model import (MODEL_CX, MODEL_CY, MODEL_INPUT_SIZE,
get_camera_frame_from_model_frame)
from selfdrive.car.toyota.interface import CarInterface as ToyotaInterface
from selfdrive.config import UIParams as UP
from selfdrive.controls.lib.vehicle_model import VehicleModel
import cereal.messaging as messaging
from tools.replay.lib.ui_helpers import (_BB_TO_FULL_FRAME, BLACK, BLUE, GREEN,
from tools.replay.lib.ui_helpers import (_BB_TO_FULL_FRAME, _FULL_FRAME_SIZE, _INTRINSICS,
BLACK, BLUE, GREEN,
YELLOW, RED,
CalibrationTransformsForWarpMatrix,
draw_lead_car, draw_lead_on, draw_mpc,
@ -67,11 +67,14 @@ def ui_thread(addr, frame_address):
frame = messaging.sub_sock('frame', addr=addr, conflate=True)
sm = messaging.SubMaster(['carState', 'plan', 'carControl', 'radarState', 'liveCalibration', 'controlsState',
'liveTracks', 'model', 'liveMpc', 'liveParameters', 'pathPlan'], addr=addr)
'liveTracks', 'model', 'liveMpc', 'liveParameters', 'pathPlan', 'frame'], addr=addr)
calibration = None
img = np.zeros((480, 640, 3), dtype='uint8')
imgff = np.zeros((FULL_FRAME_SIZE[1], FULL_FRAME_SIZE[0], 3), dtype=np.uint8)
imgff = None
num_px = 0
img_transform = np.eye(3)
imgw = np.zeros((160, 320, 3), dtype=np.uint8) # warped image
lid_overlay_blank = get_blank_lid_overlay(UP)
@ -124,26 +127,27 @@ def ui_thread(addr, frame_address):
if fpkt.frame.transform:
img_transform = np.array(fpkt.frame.transform).reshape(3, 3)
else:
# assume frame is flipped
img_transform = np.array([
[-1.0, 0.0, FULL_FRAME_SIZE[0]-1],
[ 0.0, -1.0, FULL_FRAME_SIZE[1]-1],
[ 0.0, 0.0, 1.0]
])
if rgb_img_raw and len(rgb_img_raw) == FULL_FRAME_SIZE[0] * FULL_FRAME_SIZE[1] * 3:
num_px = len(rgb_img_raw) // 3
if rgb_img_raw and num_px in _FULL_FRAME_SIZE.keys():
FULL_FRAME_SIZE = _FULL_FRAME_SIZE[num_px]
imgff_shape = (FULL_FRAME_SIZE[1], FULL_FRAME_SIZE[0], 3)
if imgff is None or imgff.shape != imgff_shape:
imgff = np.zeros(imgff_shape, dtype=np.uint8)
imgff = np.frombuffer(rgb_img_raw, dtype=np.uint8).reshape((FULL_FRAME_SIZE[1], FULL_FRAME_SIZE[0], 3))
imgff = imgff[:, :, ::-1] # Convert BGR to RGB
cv2.warpAffine(imgff, np.dot(img_transform, _BB_TO_FULL_FRAME)[:2],
cv2.warpAffine(imgff, np.dot(img_transform, _BB_TO_FULL_FRAME[num_px])[:2],
(img.shape[1], img.shape[0]), dst=img, flags=cv2.WARP_INVERSE_MAP)
intrinsic_matrix = eon_intrinsics
intrinsic_matrix = _INTRINSICS[num_px]
else:
img.fill(0)
intrinsic_matrix = np.eye(3)
if calibration is not None:
if calibration is not None and imgff is not None:
transform = np.dot(img_transform, calibration.model_to_full_frame)
imgw = cv2.warpAffine(imgff, transform[:2], (MODEL_INPUT_SIZE[0], MODEL_INPUT_SIZE[1]), flags=cv2.WARP_INVERSE_MAP)
else:
@ -189,11 +193,11 @@ def ui_thread(addr, frame_address):
# draw all radar points
maybe_update_radar_points(sm['liveTracks'], top_down[1])
if sm.updated['liveCalibration']:
if sm.updated['liveCalibration'] and num_px:
extrinsic_matrix = np.asarray(sm['liveCalibration'].extrinsicMatrix).reshape(3, 4)
ke = intrinsic_matrix.dot(extrinsic_matrix)
warp_matrix = get_camera_frame_from_model_frame(ke)
calibration = CalibrationTransformsForWarpMatrix(warp_matrix, intrinsic_matrix, extrinsic_matrix)
calibration = CalibrationTransformsForWarpMatrix(num_px, warp_matrix, intrinsic_matrix, extrinsic_matrix)
# draw red pt for lead car in the main img
for lead in [sm['radarState'].leadOne, sm['radarState'].leadTwo]:

Loading…
Cancel
Save