parent
3a78eee2f9
commit
bb8a2ff65b
7 changed files with 0 additions and 488 deletions
@ -1,109 +0,0 @@ |
||||
import numpy as np |
||||
from openpilot.selfdrive.controls.radard import RADAR_TO_CAMERA |
||||
|
||||
# Color palette used for rerun AnnotationContext |
||||
rerunColorPalette = [(96, "red", (255, 0, 0)), |
||||
(100, "pink", (255, 36, 0)), |
||||
(124, "yellow", (255, 255, 0)), |
||||
(230, "vibrantpink", (255, 36, 170)), |
||||
(240, "orange", (255, 146, 0)), |
||||
(255, "white", (255, 255, 255)), |
||||
(110, "carColor", (255,0,127)), |
||||
(0, "background", (0, 0, 0))] |
||||
|
||||
|
||||
class UIParams: |
||||
lidar_x, lidar_y, lidar_zoom = 384, 960, 6 |
||||
lidar_car_x, lidar_car_y = lidar_x / 2., lidar_y / 1.1 |
||||
car_hwidth = 1.7272 / 2 * lidar_zoom |
||||
car_front = 2.6924 * lidar_zoom |
||||
car_back = 1.8796 * lidar_zoom |
||||
car_color = rerunColorPalette[6][0] |
||||
UP = UIParams |
||||
|
||||
|
||||
def to_topdown_pt(y, x): |
||||
px, py = x * UP.lidar_zoom + UP.lidar_car_x, -y * UP.lidar_zoom + UP.lidar_car_y |
||||
if px > 0 and py > 0 and px < UP.lidar_x and py < UP.lidar_y: |
||||
return int(px), int(py) |
||||
return -1, -1 |
||||
|
||||
|
||||
def draw_path(path, lid_overlay, lid_color=None): |
||||
x, y = np.asarray(path.x), np.asarray(path.y) |
||||
# draw lidar path point on lidar |
||||
if lid_color is not None and lid_overlay is not None: |
||||
for i in range(len(x)): |
||||
px, py = to_topdown_pt(x[i], y[i]) |
||||
if px != -1: |
||||
lid_overlay[px, py] = lid_color |
||||
|
||||
|
||||
def plot_model(m, lid_overlay): |
||||
if lid_overlay is None: |
||||
return |
||||
for lead in m.leadsV3: |
||||
if lead.prob < 0.5: |
||||
continue |
||||
x, y = lead.x[0], lead.y[0] |
||||
x_std = lead.xStd[0] |
||||
x -= RADAR_TO_CAMERA |
||||
_, py_top = to_topdown_pt(x + x_std, y) |
||||
px, py_bottom = to_topdown_pt(x - x_std, y) |
||||
lid_overlay[int(round(px - 4)):int(round(px + 4)), py_top:py_bottom] = rerunColorPalette[2][0] |
||||
|
||||
for path in m.laneLines: |
||||
draw_path(path, lid_overlay, rerunColorPalette[2][0]) |
||||
for edge in m.roadEdges: |
||||
draw_path(edge, lid_overlay, rerunColorPalette[0][0]) |
||||
draw_path(m.position, lid_overlay, rerunColorPalette[0][0]) |
||||
|
||||
|
||||
def plot_lead(rs, lid_overlay): |
||||
for lead in [rs.leadOne, rs.leadTwo]: |
||||
if not lead.status: |
||||
continue |
||||
x = lead.dRel |
||||
px_left, py = to_topdown_pt(x, -10) |
||||
px_right, _ = to_topdown_pt(x, 10) |
||||
lid_overlay[px_left:px_right, py] = rerunColorPalette[0][0] |
||||
|
||||
|
||||
def update_radar_points(lt, lid_overlay): |
||||
ar_pts = [] |
||||
if lt is not None: |
||||
ar_pts = {} |
||||
for track in lt: |
||||
ar_pts[track.trackId] = [track.dRel, track.yRel, track.vRel, track.aRel, track.oncoming, track.stationary] |
||||
for ids, pt in ar_pts.items(): |
||||
# negative here since radar is left positive |
||||
px, py = to_topdown_pt(pt[0], -pt[1]) |
||||
if px != -1: |
||||
if pt[-1]: |
||||
color = rerunColorPalette[4][0] |
||||
elif pt[-2]: |
||||
color = rerunColorPalette[3][0] |
||||
else: |
||||
color = rerunColorPalette[5][0] |
||||
if int(ids) == 1: |
||||
lid_overlay[px - 2:px + 2, py - 10:py + 10] = rerunColorPalette[1][0] |
||||
else: |
||||
lid_overlay[px - 2:px + 2, py - 2:py + 2] = color |
||||
|
||||
|
||||
def get_blank_lid_overlay(UP): |
||||
lid_overlay = np.zeros((UP.lidar_x, UP.lidar_y), 'uint8') |
||||
# Draw the car. |
||||
lid_overlay[int(round(UP.lidar_car_x - UP.car_hwidth)):int( |
||||
round(UP.lidar_car_x + UP.car_hwidth)), int(round(UP.lidar_car_y - |
||||
UP.car_front))] = UP.car_color |
||||
lid_overlay[int(round(UP.lidar_car_x - UP.car_hwidth)):int( |
||||
round(UP.lidar_car_x + UP.car_hwidth)), int(round(UP.lidar_car_y + |
||||
UP.car_back))] = UP.car_color |
||||
lid_overlay[int(round(UP.lidar_car_x - UP.car_hwidth)), int( |
||||
round(UP.lidar_car_y - UP.car_front)):int(round( |
||||
UP.lidar_car_y + UP.car_back))] = UP.car_color |
||||
lid_overlay[int(round(UP.lidar_car_x + UP.car_hwidth)), int( |
||||
round(UP.lidar_car_y - UP.car_front)):int(round( |
||||
UP.lidar_car_y + UP.car_back))] = UP.car_color |
||||
return lid_overlay |
@ -1,60 +0,0 @@ |
||||
#!/usr/bin/env python3 |
||||
import argparse |
||||
import os |
||||
import sys |
||||
import numpy as np |
||||
import rerun as rr |
||||
import cereal.messaging as messaging |
||||
from openpilot.common.basedir import BASEDIR |
||||
from openpilot.tools.replay.lib.rp_helpers import (UP, rerunColorPalette, |
||||
get_blank_lid_overlay, |
||||
update_radar_points, plot_lead, |
||||
plot_model) |
||||
from msgq.visionipc import VisionIpcClient, VisionStreamType |
||||
|
||||
os.environ['BASEDIR'] = BASEDIR |
||||
|
||||
UP.lidar_zoom = 6 |
||||
|
||||
def visualize(addr): |
||||
sm = messaging.SubMaster(['radarState', 'liveTracks', 'modelV2'], addr=addr) |
||||
vipc_client = VisionIpcClient("camerad", VisionStreamType.VISION_STREAM_ROAD, True) |
||||
while True: |
||||
if not vipc_client.is_connected(): |
||||
vipc_client.connect(True) |
||||
new_data = vipc_client.recv() |
||||
if new_data is None or not new_data.data.any(): |
||||
continue |
||||
|
||||
sm.update(0) |
||||
lid_overlay = get_blank_lid_overlay(UP) |
||||
if sm.recv_frame['modelV2']: |
||||
plot_model(sm['modelV2'], lid_overlay) |
||||
if sm.recv_frame['radarState']: |
||||
plot_lead(sm['radarState'], lid_overlay) |
||||
liveTracksTime = sm.logMonoTime['liveTracks'] |
||||
if sm.updated['liveTracks']: |
||||
update_radar_points(sm['liveTracks'], lid_overlay) |
||||
rr.set_time_nanos("TIMELINE", liveTracksTime) |
||||
rr.log("tracks", rr.SegmentationImage(np.flip(np.rot90(lid_overlay, k=-1), axis=1))) |
||||
|
||||
|
||||
def get_arg_parser(): |
||||
parser = argparse.ArgumentParser( |
||||
description="Show replay data in a UI.", |
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter) |
||||
parser.add_argument("ip_address", nargs="?", default="127.0.0.1", |
||||
help="The ip address on which to receive zmq messages.") |
||||
parser.add_argument("--frame-address", default=None, |
||||
help="The frame address (fully qualified ZMQ endpoint for frames) on which to receive zmq messages.") |
||||
return parser |
||||
|
||||
|
||||
if __name__ == "__main__": |
||||
args = get_arg_parser().parse_args(sys.argv[1:]) |
||||
if args.ip_address != "127.0.0.1": |
||||
os.environ["ZMQ"] = "1" |
||||
messaging.reset_context() |
||||
rr.init("RadarPoints", spawn= True) |
||||
rr.log("tracks", rr.AnnotationContext(rerunColorPalette), static=True) |
||||
visualize(args.ip_address) |
@ -1,37 +0,0 @@ |
||||
# Rerun |
||||
Rerun is a tool to quickly visualize time series data. It supports all openpilot logs , both the `logMessages` and video logs. |
||||
|
||||
[Instructions](https://rerun.io/docs/reference/viewer/overview) for navigation within the Rerun Viewer. |
||||
|
||||
## Usage |
||||
``` |
||||
usage: run.py [-h] [--demo] [--qcam] [--fcam] [--ecam] [--dcam] [route_or_segment_name] |
||||
|
||||
A helper to run rerun on openpilot routes |
||||
|
||||
positional arguments: |
||||
route_or_segment_name |
||||
The route or segment name to plot (default: None) |
||||
|
||||
options: |
||||
-h, --help show this help message and exit |
||||
--demo Use the demo route instead of providing one (default: False) |
||||
--qcam Show low-res road camera (default: False) |
||||
--fcam Show driving camera (default: False) |
||||
--ecam Show wide camera (default: False) |
||||
--dcam Show driver monitoring camera (default: False) |
||||
``` |
||||
|
||||
Examples using route name to observe accelerometer and qcamera: |
||||
|
||||
`./run.py --qcam "a2a0ccea32023010/2023-07-27--13-01-19"` |
||||
|
||||
Examples using segment range (more on [SegmentRange](https://github.com/commaai/openpilot/tree/master/tools/lib)): |
||||
|
||||
`./run.py --qcam "a2a0ccea32023010/2023-07-27--13-01-19/2:4"` |
||||
|
||||
## Cautions: |
||||
- Showing hevc videos (`--fcam`, `--ecam`, and `--dcam`) are expensive, and it's recommended to use `--qcam` for optimized performance. If possible, limiting your route to a few segments using `SegmentRange` will speed up logging and reduce memory usage |
||||
|
||||
## Demo |
||||
`./run.py --qcam --demo` |
@ -1,93 +0,0 @@ |
||||
import tqdm |
||||
import subprocess |
||||
import multiprocessing |
||||
from enum import StrEnum |
||||
from functools import partial |
||||
from collections import namedtuple |
||||
|
||||
from openpilot.tools.lib.framereader import ffprobe |
||||
|
||||
CameraConfig = namedtuple("CameraConfig", ["qcam", "fcam", "ecam", "dcam"]) |
||||
|
||||
class CameraType(StrEnum): |
||||
qcam = "qcamera" |
||||
fcam = "fcamera" |
||||
ecam = "ecamera" |
||||
dcam = "dcamera" |
||||
|
||||
|
||||
def probe_packet_info(camera_path): |
||||
args = ["ffprobe", "-v", "quiet", "-show_packets", "-probesize", "10M", camera_path] |
||||
dat = subprocess.check_output(args) |
||||
dat = dat.decode().split() |
||||
return dat |
||||
|
||||
|
||||
class _FrameReader: |
||||
def __init__(self, camera_path, segment, h, w, start_time): |
||||
self.camera_path = camera_path |
||||
self.segment = segment |
||||
self.h = h |
||||
self.w = w |
||||
self.start_time = start_time |
||||
|
||||
self.ts = self._get_ts() |
||||
|
||||
def _read_stream_nv12(self): |
||||
frame_sz = self.w * self.h * 3 // 2 |
||||
proc = subprocess.Popen( |
||||
["ffmpeg", "-v", "quiet", "-i", self.camera_path, "-f", "rawvideo", "-pix_fmt", "nv12", "-"], |
||||
stdin=subprocess.PIPE, |
||||
stdout=subprocess.PIPE, |
||||
stderr=subprocess.DEVNULL |
||||
) |
||||
try: |
||||
while True: |
||||
dat = proc.stdout.read(frame_sz) |
||||
if len(dat) == 0: |
||||
break |
||||
yield dat |
||||
finally: |
||||
proc.kill() |
||||
|
||||
def _get_ts(self): |
||||
dat = probe_packet_info(self.camera_path) |
||||
try: |
||||
ret = [float(d.split('=')[1]) for d in dat if d.startswith("pts_time=")] |
||||
except ValueError: |
||||
# pts_times aren't available. Infer timestamps from duration_times |
||||
ret = [d for d in dat if d.startswith("duration_time")] |
||||
ret = [float(d.split('=')[1])*(i+1)+(self.segment*60)+self.start_time for i, d in enumerate(ret)] |
||||
return ret |
||||
|
||||
def __iter__(self): |
||||
for i, frame in enumerate(self._read_stream_nv12()): |
||||
yield self.ts[i], frame |
||||
|
||||
|
||||
class CameraReader: |
||||
def __init__(self, camera_paths, start_time, seg_idxs): |
||||
self.seg_idxs = seg_idxs |
||||
self.camera_paths = camera_paths |
||||
self.start_time = start_time |
||||
|
||||
probe = ffprobe(camera_paths[0])["streams"][0] |
||||
self.h = probe["height"] |
||||
self.w = probe["width"] |
||||
|
||||
self.__frs = {} |
||||
|
||||
def _get_fr(self, i): |
||||
if i not in self.__frs: |
||||
self.__frs[i] = _FrameReader(self.camera_paths[i], segment=i, h=self.h, w=self.w, start_time=self.start_time) |
||||
return self.__frs[i] |
||||
|
||||
def _run_on_segment(self, func, i): |
||||
return func(self._get_fr(i)) |
||||
|
||||
def run_across_segments(self, num_processes, func, desc=None): |
||||
with multiprocessing.Pool(num_processes) as pool: |
||||
num_segs = len(self.seg_idxs) |
||||
for _ in tqdm.tqdm(pool.imap_unordered(partial(self._run_on_segment, func), self.seg_idxs), total=num_segs, desc=desc): |
||||
continue |
||||
|
@ -1,180 +0,0 @@ |
||||
#!/usr/bin/env python3 |
||||
|
||||
import sys |
||||
import argparse |
||||
import multiprocessing |
||||
import rerun as rr |
||||
import rerun.blueprint as rrb |
||||
from functools import partial |
||||
from collections import defaultdict |
||||
|
||||
from cereal.services import SERVICE_LIST |
||||
from openpilot.tools.rerun.camera_reader import probe_packet_info, CameraReader, CameraConfig, CameraType |
||||
from openpilot.tools.lib.logreader import LogReader |
||||
from openpilot.tools.lib.route import Route, SegmentRange |
||||
|
||||
|
||||
NUM_CPUS = multiprocessing.cpu_count() |
||||
DEMO_ROUTE = "a2a0ccea32023010|2023-07-27--13-01-19" |
||||
RR_TIMELINE_NAME = "Timeline" |
||||
RR_WIN = "openpilot logs" |
||||
|
||||
|
||||
""" |
||||
Relevant upstream Rerun issues: |
||||
- loading videos directly: https://github.com/rerun-io/rerun/issues/6532 |
||||
""" |
||||
|
||||
class Rerunner: |
||||
def __init__(self, route, segment_range, camera_config): |
||||
self.lr = LogReader(route_or_segment_name) |
||||
|
||||
# hevc files don't have start_time. We get it from qcamera.ts |
||||
start_time = 0 |
||||
dat = probe_packet_info(route.qcamera_paths()[0]) |
||||
for d in dat: |
||||
if d.startswith("pts_time="): |
||||
start_time = float(d.split('=')[1]) |
||||
break |
||||
|
||||
qcam, fcam, ecam, dcam = camera_config |
||||
self.camera_readers = {} |
||||
if qcam: |
||||
self.camera_readers[CameraType.qcam] = CameraReader(route.qcamera_paths(), start_time, segment_range.seg_idxs) |
||||
if fcam: |
||||
self.camera_readers[CameraType.fcam] = CameraReader(route.camera_paths(), start_time, segment_range.seg_idxs) |
||||
if ecam: |
||||
self.camera_readers[CameraType.ecam] = CameraReader(route.ecamera_paths(), start_time, segment_range.seg_idxs) |
||||
if dcam: |
||||
self.camera_readers[CameraType.dcam] = CameraReader(route.dcamera_paths(), start_time, segment_range.seg_idxs) |
||||
|
||||
def _create_blueprint(self): |
||||
blueprint = None |
||||
service_views = [] |
||||
|
||||
for topic in sorted(SERVICE_LIST.keys()): |
||||
View = rrb.TimeSeriesView if topic != "thumbnail" else rrb.Spatial2DView |
||||
service_views.append(View(name=topic, origin=f"/{topic}/", visible=False)) |
||||
rr.log(topic, rr.SeriesLine(name=topic), timeless=True) |
||||
|
||||
center_view = [rrb.Vertical(*service_views, name="streams")] |
||||
if len(self.camera_readers): |
||||
center_view.append(rrb.Vertical(*[rrb.Spatial2DView(name=cam_type, origin=cam_type) for cam_type in self.camera_readers.keys()], name="cameras")) |
||||
|
||||
blueprint = rrb.Blueprint( |
||||
rrb.Horizontal( |
||||
*center_view |
||||
), |
||||
rrb.SelectionPanel(expanded=False), |
||||
rrb.TimePanel(expanded=False), |
||||
) |
||||
return blueprint |
||||
|
||||
@staticmethod |
||||
def _parse_msg(msg, parent_key=''): |
||||
stack = [(msg, parent_key)] |
||||
while stack: |
||||
current_msg, current_parent_key = stack.pop() |
||||
if isinstance(current_msg, list): |
||||
for index, item in enumerate(current_msg): |
||||
new_key = f"{current_parent_key}/{index}" |
||||
if isinstance(item, (int, float)): |
||||
yield new_key, item |
||||
elif isinstance(item, dict): |
||||
stack.append((item, new_key)) |
||||
elif isinstance(current_msg, dict): |
||||
for key, value in current_msg.items(): |
||||
new_key = f"{current_parent_key}/{key}" |
||||
if isinstance(value, (int, float)): |
||||
yield new_key, value |
||||
elif isinstance(value, dict): |
||||
stack.append((value, new_key)) |
||||
elif isinstance(value, list): |
||||
for index, item in enumerate(value): |
||||
if isinstance(item, (int, float)): |
||||
yield f"{new_key}/{index}", item |
||||
else: |
||||
pass # Not a plottable value |
||||
|
||||
@staticmethod |
||||
@rr.shutdown_at_exit |
||||
def _process_log_msgs(blueprint, lr): |
||||
rr.init(RR_WIN) |
||||
rr.connect() |
||||
rr.send_blueprint(blueprint) |
||||
|
||||
log_msgs = defaultdict(lambda: defaultdict(list)) |
||||
for msg in lr: |
||||
msg_type = msg.which() |
||||
|
||||
if msg_type == "thumbnail": |
||||
continue |
||||
|
||||
for entity_path, dat in Rerunner._parse_msg(msg.to_dict()[msg_type], msg_type): |
||||
log_msgs[entity_path]["times"].append(msg.logMonoTime) |
||||
log_msgs[entity_path]["data"].append(dat) |
||||
|
||||
for entity_path, log_msg in log_msgs.items(): |
||||
rr.send_columns( |
||||
entity_path, |
||||
times=[rr.TimeNanosColumn(RR_TIMELINE_NAME, log_msg["times"])], |
||||
components=[rr.components.ScalarBatch(log_msg["data"])] |
||||
) |
||||
|
||||
return [] |
||||
|
||||
@staticmethod |
||||
@rr.shutdown_at_exit |
||||
def _process_cam_readers(blueprint, cam_type, h, w, fr): |
||||
rr.init(RR_WIN) |
||||
rr.connect() |
||||
rr.send_blueprint(blueprint) |
||||
|
||||
for ts, frame in fr: |
||||
rr.set_time_nanos(RR_TIMELINE_NAME, int(ts * 1e9)) |
||||
rr.log(cam_type, rr.Image(bytes=frame, width=w, height=h, pixel_format=rr.PixelFormat.NV12)) |
||||
|
||||
def load_data(self): |
||||
rr.init(RR_WIN, spawn=True) |
||||
|
||||
startup_blueprint = self._create_blueprint() |
||||
self.lr.run_across_segments(NUM_CPUS, partial(self._process_log_msgs, startup_blueprint), desc="Log messages") |
||||
for cam_type, cr in self.camera_readers.items(): |
||||
cr.run_across_segments(NUM_CPUS, partial(self._process_cam_readers, startup_blueprint, cam_type, cr.h, cr.w), desc=cam_type) |
||||
|
||||
rr.send_blueprint(self._create_blueprint()) |
||||
|
||||
|
||||
if __name__ == '__main__': |
||||
parser = argparse.ArgumentParser(description="A helper to run rerun on openpilot routes", |
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter) |
||||
parser.add_argument("--demo", action="store_true", help="Use the demo route instead of providing one") |
||||
parser.add_argument("--qcam", action="store_true", help="Show low-res road camera") |
||||
parser.add_argument("--fcam", action="store_true", help="Show driving camera") |
||||
parser.add_argument("--ecam", action="store_true", help="Show wide camera") |
||||
parser.add_argument("--dcam", action="store_true", help="Show driver monitoring camera") |
||||
parser.add_argument("route_or_segment_name", nargs='?', help="The route or segment name to plot") |
||||
args = parser.parse_args() |
||||
|
||||
if not args.demo and not args.route_or_segment_name: |
||||
parser.print_help() |
||||
sys.exit() |
||||
|
||||
camera_config = CameraConfig(args.qcam, args.fcam, args.ecam, args.dcam) |
||||
route_or_segment_name = DEMO_ROUTE if args.demo else args.route_or_segment_name.strip() |
||||
|
||||
sr = SegmentRange(route_or_segment_name) |
||||
r = Route(sr.route_name) |
||||
|
||||
hevc_requested = any(camera_config[1:]) |
||||
if len(sr.seg_idxs) > 1 and hevc_requested: |
||||
print("You're requesting more than 1 segment with hevc videos, " + \ |
||||
"please be aware that might take a lot of memory " + \ |
||||
"since rerun isn't yet well supported for high resolution video logging") |
||||
response = input("Do you wish to continue? (Y/n): ") |
||||
if response.strip().lower() != "y": |
||||
sys.exit() |
||||
|
||||
rerunner = Rerunner(r, sr, camera_config) |
||||
rerunner.load_data() |
||||
|
Loading…
Reference in new issue