pull/30273/head
Yassine 2 years ago
commit d26fd5b4da
  1. 10
      .github/pull_request_template.md
  2. 2
      .github/workflows/badges.yaml
  3. 62
      .github/workflows/docs.yaml
  4. 2
      .github/workflows/labeler.yaml
  5. 2
      .github/workflows/prebuilt.yaml
  6. 2
      .github/workflows/release.yaml
  7. 2
      .github/workflows/repo-maintenance.yaml
  8. 26
      .github/workflows/selfdrive_tests.yaml
  9. 24
      .github/workflows/tools_tests.yaml
  10. 2
      cereal
  11. 42
      docs/docker/Dockerfile
  12. 15
      docs/docker/nginx.conf
  13. 13
      release/files_common
  14. 3
      selfdrive/car/subaru/values.py
  15. 2
      selfdrive/car/toyota/values.py
  16. 2
      selfdrive/car/volkswagen/values.py
  17. 36
      selfdrive/controls/controlsd.py
  18. 18
      selfdrive/controls/lib/events.py
  19. 15
      selfdrive/controls/plannerd.py
  20. 50
      selfdrive/locationd/helpers.py
  21. 5
      selfdrive/locationd/laikad.py
  22. 8
      selfdrive/locationd/paramsd.py
  23. 60
      selfdrive/locationd/torqued.py
  24. 4
      selfdrive/modeld/SConscript
  25. 13
      selfdrive/monitoring/dmonitoringd.py
  26. 8
      selfdrive/navd/navd.py
  27. 3
      selfdrive/test/docker_common.sh
  28. 40
      selfdrive/test/process_replay/compare_logs.py
  29. 8
      selfdrive/test/process_replay/model_replay.py
  30. 2
      selfdrive/test/process_replay/model_replay_ref_commit
  31. 8
      selfdrive/test/process_replay/test_processes.py
  32. 2
      selfdrive/test/test_onroad.py
  33. 1
      selfdrive/ui/soundd/soundd
  34. 1
      selfdrive/ui/spinner
  35. 1
      selfdrive/ui/text
  36. 1
      selfdrive/ui/ui
  37. 2
      selfdrive/ui/ui.h
  38. 3
      system/loggerd/logger.cc
  39. 11
      system/micd.py
  40. 2
      system/sensord/.gitignore
  41. 2
      system/sensord/SConscript
  42. 4
      system/sensord/sensord
  43. 2
      system/sensord/tests/test_sensord.py
  44. 2
      tinygrad_repo
  45. 1
      tools/cabana/binaryview.cc
  46. 7
      tools/cabana/chart/chartswidget.cc
  47. 1
      tools/cabana/chart/chartswidget.h
  48. 6
      tools/cabana/detailwidget.cc
  49. 16
      tools/cabana/videowidget.cc
  50. 6
      tools/cabana/videowidget.h
  51. 4
      tools/replay/ui.py

@ -1,5 +1,15 @@
<!-- Please copy and paste the relevant template -->
<!--- ***** Template: Fingerprint *****
**Car**
Which car (make, model, year) this fingerprint is for
**Route**
A route with the fingerprint
-->
<!--- ***** Template: Car bug fix *****
**Description** [](A description of the bug and the fix. Also link any relevant issues.)

@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-20.04
if: github.repository == 'commaai/openpilot'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry

@ -0,0 +1,62 @@
name: docs
on:
push:
branches:
- master
pull_request:
concurrency:
group: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' && github.run_id || github.head_ref || github.ref }}-${{ github.workflow }}-${{ github.event_name }}
cancel-in-progress: true
env:
BASE_IMAGE: openpilot-base
BUILD: selfdrive/test/docker_build.sh base
RUN: docker run --shm-size 1G -v $GITHUB_WORKSPACE:/tmp/openpilot -w /tmp/openpilot -e FILEREADER_CACHE=1 -e PYTHONPATH=/tmp/openpilot -e NUM_JOBS -e JOB_ID -e GITHUB_ACTION -e GITHUB_REF -e GITHUB_HEAD_REF -e GITHUB_SHA -e GITHUB_REPOSITORY -e GITHUB_RUN_ID -v $GITHUB_WORKSPACE/.ci_cache/scons_cache:/tmp/scons_cache -v $GITHUB_WORKSPACE/.ci_cache/comma_download_cache:/tmp/comma_download_cache -v $GITHUB_WORKSPACE/.ci_cache/openpilot_cache:/tmp/openpilot_cache $BASE_IMAGE /bin/sh -c
jobs:
docs:
name: build docs
runs-on: ubuntu-20.04
timeout-minutes: 45
steps:
- uses: actions/checkout@v4
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
- name: Build openpilot
run: |
${{ env.RUN }} "scons -j$(nproc)"
- name: Build docs
run: |
${{ env.RUN }} "apt update && apt install -y doxygen && cd docs && make html"
- uses: actions/checkout@v4
if: github.ref == 'refs/heads/master' && github.repository == 'commaai/openpilot'
with:
path: openpilot-docs
ssh-key: ${{ secrets.OPENPILOT_DOCS_KEY }}
repository: commaai/openpilot-docs
- name: Push
if: github.ref == 'refs/heads/master' && github.repository == 'commaai/openpilot'
run: |
set -x
source release/identity.sh
cd openpilot-docs
git checkout --orphan tmp
git rm -rf --cached .
cp -r ../build/docs/html/ docs/
touch docs/.nojekyll
git add -f .
git commit -m "build docs"
# docs live in different repo to not bloat openpilot's full clone size
git push -f origin gh-pages

@ -9,7 +9,7 @@ jobs:
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: false
- uses: actions/labeler@v5.0.0-alpha.1

@ -24,7 +24,7 @@ jobs:
wait-interval: 30
running-workflow-name: 'build prebuilt'
check-regexp: ^((?!.*(build master-ci).*).)*$
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
- run: git lfs pull

@ -27,7 +27,7 @@ jobs:
wait-interval: 30
running-workflow-name: 'build master-ci'
check-regexp: ^((?!.*(build prebuilt).*).)*$
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
fetch-depth: 0

@ -12,7 +12,7 @@ jobs:
container:
image: ghcr.io/commaai/openpilot-base:latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: poetry lock
run: |
pip install poetry

@ -35,7 +35,7 @@ jobs:
env:
STRIPPED_DIR: /tmp/releasepilot
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
- run: git lfs pull
@ -77,7 +77,7 @@ jobs:
(github.event.pull_request.head.repo.full_name == 'commaai/openpilot'))) && '["x86_64", "aarch64"]' || '["x86_64"]' ) }}
runs-on: ${{ (matrix.arch == 'aarch64') && 'buildjet-2vcpu-ubuntu-2204-arm' || 'ubuntu-20.04' }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
@ -97,7 +97,7 @@ jobs:
runs-on: ${{ (matrix.arch == 'aarch64') && 'buildjet-2vcpu-ubuntu-2204-arm' || 'ubuntu-20.04' }}
if: github.ref == 'refs/heads/master' && github.event_name != 'pull_request' && github.repository == 'commaai/openpilot'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
- name: Setup to push to repo
@ -120,7 +120,7 @@ jobs:
if: github.ref == 'refs/heads/master' && github.event_name != 'pull_request' && github.repository == 'commaai/openpilot'
needs: [docker_push]
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: false
- name: Setup docker
@ -135,7 +135,7 @@ jobs:
name: static analysis
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
@ -147,7 +147,7 @@ jobs:
name: valgrind
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
@ -165,7 +165,7 @@ jobs:
name: unit tests
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
@ -190,7 +190,7 @@ jobs:
((github.event_name != 'pull_request') ||
(github.event.pull_request.head.repo.full_name == 'commaai/openpilot'))) && 'buildjet-8vcpu-ubuntu-2004' || 'ubuntu-20.04' }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
@ -232,7 +232,7 @@ jobs:
name: regen
runs-on: 'ubuntu-20.04'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
@ -259,7 +259,7 @@ jobs:
name: model tests
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
@ -295,7 +295,7 @@ jobs:
matrix:
job: [0, 1, 2, 3, 4]
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
@ -323,7 +323,7 @@ jobs:
runs-on: ubuntu-20.04
if: github.event_name == 'pull_request'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
ref: ${{ github.event.pull_request.base.ref }}
@ -333,7 +333,7 @@ jobs:
run: |
${{ env.RUN }} "scons -j$(nproc) && python selfdrive/debug/dump_car_info.py --path /tmp/openpilot_cache/base_car_info"
sudo chown -R $USER:$USER ${{ github.workspace }}
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
path: current

@ -30,7 +30,7 @@ jobs:
runs-on: ubuntu-20.04
timeout-minutes: 45
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
@ -48,7 +48,7 @@ jobs:
if: github.repository == 'commaai/openpilot'
timeout-minutes: 45
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
@ -63,29 +63,11 @@ jobs:
run: |
selfdrive/test/docker_build.sh sim
docs:
name: build docs
runs-on: ubuntu-20.04
timeout-minutes: 45
steps:
- uses: actions/checkout@v3
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
- name: Setup to push to repo
if: github.ref == 'refs/heads/master' && github.event_name != 'pull_request' && github.repository == 'commaai/openpilot'
run: |
echo "PUSH_IMAGE=true" >> "$GITHUB_ENV"
$DOCKER_LOGIN
- name: Build and push docs image
run: |
selfdrive/test/docker_build.sh docs
devcontainer:
name: devcontainer
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry

@ -1 +1 @@
Subproject commit b1a1afebb8533a96ff0e8efbba7c10eb47df2af0
Subproject commit 24a522f6ba47aba12a9baea53020a8323673c79c

@ -1,42 +0,0 @@
FROM ghcr.io/commaai/openpilot-base:latest
ENV PYTHONUNBUFFERED 1
ENV OPENPILOT_PATH /tmp/openpilot
ENV PYTHONPATH ${OPENPILOT_PATH}:${PYTHONPATH}
ENV POETRY_VIRUALENVS_CREATE false
RUN mkdir -p ${OPENPILOT_PATH}
WORKDIR ${OPENPILOT_PATH}
COPY SConstruct ${OPENPILOT_PATH}
COPY ./openpilot ${OPENPILOT_PATH}/openpilot
COPY ./body ${OPENPILOT_PATH}/body
COPY ./third_party ${OPENPILOT_PATH}/third_party
COPY ./site_scons ${OPENPILOT_PATH}/site_scons
COPY ./laika_repo ${OPENPILOT_PATH}/laika_repo
RUN ln -s ${OPENPILOT_PATH}/laika_repo/laika/ ${OPENPILOT_PATH}/laika
COPY ./rednose ${OPENPILOT_PATH}/rednose
COPY ./rednose_repo ${OPENPILOT_PATH}/rednose_repo
COPY ./tools ${OPENPILOT_PATH}/tools
COPY ./release ${OPENPILOT_PATH}/release
COPY ./common ${OPENPILOT_PATH}/common
COPY ./opendbc ${OPENPILOT_PATH}/opendbc
COPY ./cereal ${OPENPILOT_PATH}/cereal
COPY ./panda ${OPENPILOT_PATH}/panda
COPY ./selfdrive ${OPENPILOT_PATH}/selfdrive
COPY ./system ${OPENPILOT_PATH}/system
COPY ./*.md ${OPENPILOT_PATH}/
RUN --mount=type=bind,source=.ci_cache/scons_cache,target=/tmp/scons_cache,rw scons -j$(nproc) --cache-readonly
RUN apt update && apt install doxygen -y
COPY ./docs ${OPENPILOT_PATH}/docs
RUN git init .
WORKDIR ${OPENPILOT_PATH}/docs
RUN make html
FROM nginx:1.21
COPY --from=0 /tmp/openpilot/build/docs/html /usr/share/nginx/html
COPY ./docs/docker/nginx.conf /etc/nginx/conf.d/default.conf

@ -1,15 +0,0 @@
server {
listen 80;
listen [::]:80;
server_name localhost;
gzip on;
gzip_types text/html text/plain text/css text/xml text/javascript application/javascript application/x-javascript;
gzip_min_length 1024;
gzip_vary on;
root /usr/share/nginx/html;
location / {
try_files $uri $uri/ /index.html;
}
}

@ -246,6 +246,7 @@ selfdrive/locationd/models/gnss_helpers.py
selfdrive/locationd/torqued.py
selfdrive/locationd/calibrationd.py
selfdrive/locationd/helpers.py
system/logcatd/.gitignore
system/logcatd/SConscript
@ -284,7 +285,6 @@ system/sensord/SConscript
system/sensord/sensors_qcom2.cc
system/sensord/sensors/*.cc
system/sensord/sensors/*.h
system/sensord/sensord
system/sensord/pigeond.py
selfdrive/thermald/thermald.py
@ -592,9 +592,16 @@ tinygrad_repo/extra/onnx.py
tinygrad_repo/extra/onnx_ops.py
tinygrad_repo/extra/thneed.py
tinygrad_repo/extra/utils.py
tinygrad_repo/tinygrad/codegen/ast.py
tinygrad_repo/tinygrad/codegen/gpu.py
tinygrad_repo/tinygrad/codegen/kernel.py
tinygrad_repo/tinygrad/codegen/linearizer.py
tinygrad_repo/tinygrad/codegen/optimizer.py
tinygrad_repo/tinygrad/features/image.py
tinygrad_repo/tinygrad/nn/*
tinygrad_repo/tinygrad/renderer/cstyle.py
tinygrad_repo/tinygrad/renderer/opencl.py
tinygrad_repo/tinygrad/runtime/lib.py
tinygrad_repo/tinygrad/runtime/ops_cpu.py
tinygrad_repo/tinygrad/runtime/ops_disk.py
tinygrad_repo/tinygrad/runtime/ops_gpu.py
tinygrad_repo/tinygrad/shape/*
tinygrad_repo/tinygrad/*.py

@ -324,6 +324,7 @@ FW_VERSIONS = {
b'\x00\x00eq\x1f@ "',
b'\x00\x00eq\x00\x00\x00\x00',
b'\x00\x00e\x8f\x00\x00\x00\x00',
b'\x00\x00e\x92\x00\x00\x00\x00',
b'\x00\x00e\xa4\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
@ -332,6 +333,7 @@ FW_VERSIONS = {
b'\xca!`0\a',
b'\xcc\"f0\a',
b'\xcc!fp\a',
b'\xcc!`p\x07',
b'\xca!f@\x07',
b'\xca!fp\x07',
b'\xf3"f@\x07',
@ -344,6 +346,7 @@ FW_VERSIONS = {
(Ecu.transmission, 0x7e1, None): [
b'\xe6\xf5\004\000\000',
b'\xe6\xf5$\000\000',
b'\xe7\xf5\x04\x00\x00',
b'\xe7\xf6B0\000',
b'\xe7\xf5D0\000',
b'\xf1\x00\xd7\x10@',

@ -1005,6 +1005,7 @@ FW_VERSIONS = {
b'\x0230A10000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230A11000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZN4000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZN5000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x03312K7000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
b'\x03312M3000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
@ -1060,6 +1061,7 @@ FW_VERSIONS = {
b'\x01F152612B91\x00\x00\x00\x00\x00\x00',
b'\x01F15260A070\x00\x00\x00\x00\x00\x00',
b'\x01F152676250\x00\x00\x00\x00\x00\x00',
b'\x01F152602470\x00\x00\x00\x00\x00\x00',
b'F152612590\x00\x00\x00\x00\x00\x00',
b'F152612691\x00\x00\x00\x00\x00\x00',
b'F152612692\x00\x00\x00\x00\x00\x00',

@ -717,12 +717,14 @@ FW_VERSIONS = {
CAR.PASSAT_NMS: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8706K906016C \xf1\x899609',
b'\xf1\x8706K906016E \xf1\x899830',
b'\xf1\x8706K906016G \xf1\x891124',
b'\xf1\x8706K906071BJ\xf1\x894891',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x8709G927158AB\xf1\x893318',
b'\xf1\x8709G927158BD\xf1\x893121',
b'\xf1\x8709G927158DK\xf1\x893594',
b'\xf1\x8709G927158FQ\xf1\x893745',
],
(Ecu.srs, 0x715, None): [

@ -56,39 +56,33 @@ ENABLED_STATES = (State.preEnabled, *ACTIVE_STATES)
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None, CI=None):
def __init__(self, CI=None):
config_realtime_process(4, Priority.CTRL_HIGH)
# Ensure the current branch is cached, otherwise the first iteration of controlsd lags
self.branch = get_short_branch("")
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.sensor_packets = ["accelerometer", "gyroscope"]
self.camera_packets = ["roadCameraState", "driverCameraState", "wideRoadCameraState"]
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 20
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 20
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
self.log_sock = messaging.sub_sock('androidLog')
self.params = Params()
self.sm = sm
if self.sm is None:
ignore = self.sensor_packets + ['testJoystick']
if SIMULATION:
ignore += ['driverCameraState', 'managerState']
self.sm = messaging.SubMaster(['deviceState', 'pandaStates', 'peripheralState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'managerState', 'liveParameters', 'radarState', 'liveTorqueParameters',
'testJoystick'] + self.camera_packets + self.sensor_packets,
ignore_alive=ignore, ignore_avg_freq=['radarState', 'testJoystick'])
ignore = self.sensor_packets + ['testJoystick']
if SIMULATION:
ignore += ['driverCameraState', 'managerState']
self.sm = messaging.SubMaster(['deviceState', 'pandaStates', 'peripheralState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'managerState', 'liveParameters', 'radarState', 'liveTorqueParameters',
'testJoystick'] + self.camera_packets + self.sensor_packets,
ignore_alive=ignore, ignore_avg_freq=['radarState', 'testJoystick'])
if CI is None:
# wait for one pandaState and one CAN packet
@ -879,8 +873,8 @@ class Controls:
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
def main():
controls = Controls()
controls.controlsd_thread()

@ -970,10 +970,10 @@ EVENTS: Dict[int, Dict[str, Union[Alert, AlertCallbackType]]] = {
if __name__ == '__main__':
# print all alerts by type and priority
from cereal.services import SERVICE_LIST
from collections import defaultdict, OrderedDict
from collections import defaultdict
event_names = {v: k for k, v in EventName.schema.enumerants.items()}
alerts_by_type: Dict[str, Dict[int, List[str]]] = defaultdict(lambda: defaultdict(list))
alerts_by_type: Dict[str, Dict[Priority, List[str]]] = defaultdict(lambda: defaultdict(list))
CP = car.CarParams.new_message()
CS = car.CarState.new_message()
@ -983,18 +983,14 @@ if __name__ == '__main__':
for et, alert in alerts.items():
if callable(alert):
alert = alert(CP, CS, sm, False, 1)
priority = alert.priority
alerts_by_type[et][priority].append(event_names[i])
alerts_by_type[et][alert.priority].append(event_names[i])
all_alerts = {}
all_alerts: Dict[str, List[tuple[Priority, List[str]]]] = {}
for et, priority_alerts in alerts_by_type.items():
all_alerts[et] = OrderedDict([
(str(priority), l)
for priority, l in sorted(priority_alerts.items(), key=lambda x: -int(x[0]))
])
all_alerts[et] = sorted(priority_alerts.items(), key=lambda x: x[0], reverse=True)
for status, evs in sorted(all_alerts.items(), key=lambda x: x[0]):
print(f"**** {status} ****")
for p, alert_list in evs.items():
print(f" {p}:")
for p, alert_list in evs:
print(f" {repr(p)}:")
print(" ", ', '.join(alert_list), "\n")

@ -27,7 +27,7 @@ def publish_ui_plan(sm, pm, lateral_planner, longitudinal_planner):
uiPlan.accel = longitudinal_planner.a_desired_trajectory_full.tolist()
pm.send('uiPlan', ui_send)
def plannerd_thread(sm=None, pm=None):
def plannerd_thread():
config_realtime_process(5, Priority.CTRL_LOW)
cloudlog.info("plannerd is waiting for CarParams")
@ -41,12 +41,9 @@ def plannerd_thread(sm=None, pm=None):
longitudinal_planner = LongitudinalPlanner(CP)
lateral_planner = LateralPlanner(CP, debug=debug_mode)
if sm is None:
sm = messaging.SubMaster(['carControl', 'carState', 'controlsState', 'radarState', 'modelV2'],
poll=['radarState', 'modelV2'], ignore_avg_freq=['radarState'])
if pm is None:
pm = messaging.PubMaster(['longitudinalPlan', 'lateralPlan', 'uiPlan'])
pm = messaging.PubMaster(['longitudinalPlan', 'lateralPlan', 'uiPlan'])
sm = messaging.SubMaster(['carControl', 'carState', 'controlsState', 'radarState', 'modelV2'],
poll=['radarState', 'modelV2'], ignore_avg_freq=['radarState'])
while True:
sm.update()
@ -58,8 +55,8 @@ def plannerd_thread(sm=None, pm=None):
longitudinal_planner.publish(sm, pm)
publish_ui_plan(sm, pm, lateral_planner, longitudinal_planner)
def main(sm=None, pm=None):
plannerd_thread(sm, pm)
def main():
plannerd_thread()
if __name__ == "__main__":

@ -0,0 +1,50 @@
import numpy as np
from typing import List, Optional, Tuple, Any
class NPQueue:
def __init__(self, maxlen: int, rowsize: int) -> None:
self.maxlen = maxlen
self.arr = np.empty((0, rowsize))
def __len__(self) -> int:
return len(self.arr)
def append(self, pt: List[float]) -> None:
if len(self.arr) < self.maxlen:
self.arr = np.append(self.arr, [pt], axis=0)
else:
self.arr[:-1] = self.arr[1:]
self.arr[-1] = pt
class PointBuckets:
def __init__(self, x_bounds: List[Tuple[float, float]], min_points: List[float], min_points_total: int, points_per_bucket: int, rowsize: int) -> None:
self.x_bounds = x_bounds
self.buckets = {bounds: NPQueue(maxlen=points_per_bucket, rowsize=rowsize) for bounds in x_bounds}
self.buckets_min_points = dict(zip(x_bounds, min_points, strict=True))
self.min_points_total = min_points_total
def bucket_lengths(self) -> List[int]:
return [len(v) for v in self.buckets.values()]
def __len__(self) -> int:
return sum(self.bucket_lengths())
def is_valid(self) -> bool:
individual_buckets_valid = all(len(v) >= min_pts for v, min_pts in zip(self.buckets.values(), self.buckets_min_points.values(), strict=True))
total_points_valid = self.__len__() >= self.min_points_total
return individual_buckets_valid and total_points_valid
def add_point(self, x: float, y: float, bucket_val: float) -> None:
raise NotImplementedError
def get_points(self, num_points: Optional[int] = None) -> Any:
points = np.vstack([x.arr for x in self.buckets.values()])
if num_points is None:
return points
return points[np.random.choice(np.arange(len(points)), min(len(points), num_points), replace=False)]
def load_points(self, points: List[List[float]]) -> None:
for point in points:
self.add_point(*point)

@ -440,7 +440,7 @@ def clear_tmp_cache():
os.mkdir(Paths.download_cache_root())
def main(sm=None, pm=None):
def main():
#clear_tmp_cache()
use_qcom = not Params().get_bool("UbloxAvailable")
@ -449,8 +449,7 @@ def main(sm=None, pm=None):
else:
raw_name = "ubloxGnss"
raw_gnss_sock = messaging.sub_sock(raw_name, conflate=False)
if pm is None:
pm = messaging.PubMaster(['gnssMeasurements'])
pm = messaging.PubMaster(['gnssMeasurements'])
# disable until set as main gps source, to better analyze startup time
# TODO ensure low CPU usage before enabling

@ -117,16 +117,14 @@ def check_valid_with_hysteresis(current_valid: bool, val: float, threshold: floa
return current_valid
def main(sm=None, pm=None):
def main():
config_realtime_process([0, 1, 2, 3], 5)
DEBUG = bool(int(os.getenv("DEBUG", "0")))
REPLAY = bool(int(os.getenv("REPLAY", "0")))
if sm is None:
sm = messaging.SubMaster(['liveLocationKalman', 'carState'], poll=['liveLocationKalman'])
if pm is None:
pm = messaging.PubMaster(['liveParameters'])
pm = messaging.PubMaster(['liveParameters'])
sm = messaging.SubMaster(['liveLocationKalman', 'carState'], poll=['liveLocationKalman'])
params_reader = Params()
# wait for stats about the car to come in from controls

@ -12,6 +12,7 @@ from openpilot.common.realtime import config_realtime_process, DT_MDL
from openpilot.common.filter_simple import FirstOrderFilter
from openpilot.system.swaglog import cloudlog
from openpilot.selfdrive.controls.lib.vehicle_model import ACCELERATION_DUE_TO_GRAVITY
from openpilot.selfdrive.locationd.helpers import PointBuckets
HISTORY = 5 # secs
POINTS_PER_BUCKET = 1500
@ -43,55 +44,13 @@ def slope2rot(slope):
return np.array([[cos, -sin], [sin, cos]])
class NPQueue:
def __init__(self, maxlen, rowsize):
self.maxlen = maxlen
self.arr = np.empty((0, rowsize))
def __len__(self):
return len(self.arr)
def append(self, pt):
if len(self.arr) < self.maxlen:
self.arr = np.append(self.arr, [pt], axis=0)
else:
self.arr[:-1] = self.arr[1:]
self.arr[-1] = pt
class PointBuckets:
def __init__(self, x_bounds, min_points, min_points_total):
self.x_bounds = x_bounds
self.buckets = {bounds: NPQueue(maxlen=POINTS_PER_BUCKET, rowsize=3) for bounds in x_bounds}
self.buckets_min_points = dict(zip(x_bounds, min_points, strict=True))
self.min_points_total = min_points_total
def bucket_lengths(self):
return [len(v) for v in self.buckets.values()]
def __len__(self):
return sum(self.bucket_lengths())
def is_valid(self):
return all(len(v) >= min_pts for v, min_pts in zip(self.buckets.values(), self.buckets_min_points.values(), strict=True)) \
and (self.__len__() >= self.min_points_total)
class TorqueBuckets(PointBuckets):
def add_point(self, x, y):
for bound_min, bound_max in self.x_bounds:
if (x >= bound_min) and (x < bound_max):
self.buckets[(bound_min, bound_max)].append([x, 1.0, y])
break
def get_points(self, num_points=None):
points = np.vstack([x.arr for x in self.buckets.values()])
if num_points is None:
return points
return points[np.random.choice(np.arange(len(points)), min(len(points), num_points), replace=False)]
def load_points(self, points):
for x, y in points:
self.add_point(x, y)
class TorqueEstimator:
def __init__(self, CP, decimated=False):
@ -175,7 +134,11 @@ class TorqueEstimator:
self.resets += 1.0
self.decay = MIN_FILTER_DECAY
self.raw_points = defaultdict(lambda: deque(maxlen=self.hist_len))
self.filtered_points = PointBuckets(x_bounds=STEER_BUCKET_BOUNDS, min_points=self.min_bucket_points, min_points_total=self.min_points_total)
self.filtered_points = TorqueBuckets(x_bounds=STEER_BUCKET_BOUNDS,
min_points=self.min_bucket_points,
min_points_total=self.min_points_total,
points_per_bucket=POINTS_PER_BUCKET,
rowsize=3)
def estimate_params(self):
points = self.filtered_points.get_points(self.fit_points)
@ -255,14 +218,11 @@ class TorqueEstimator:
return msg
def main(sm=None, pm=None):
def main():
config_realtime_process([0, 1, 2, 3], 5)
if sm is None:
sm = messaging.SubMaster(['carControl', 'carState', 'liveLocationKalman'], poll=['liveLocationKalman'])
if pm is None:
pm = messaging.PubMaster(['liveTorqueParameters'])
pm = messaging.PubMaster(['liveTorqueParameters'])
sm = messaging.SubMaster(['carControl', 'carState', 'liveLocationKalman'], poll=['liveLocationKalman'])
params = Params()
with car.CarParams.from_bytes(params.get("CarParams", block=True)) as CP:

@ -52,13 +52,13 @@ lenvCython.Program('models/commonmodel_pyx.so', 'models/commonmodel_pyx.pyx', LI
# Get model metadata
fn = File("models/supercombo").abspath
cmd = f' python3 {Dir("#selfdrive/modeld").abspath}/get_model_metadata.py {fn}.onnx'
cmd = f'python3 {Dir("#selfdrive/modeld").abspath}/get_model_metadata.py {fn}.onnx'
files = sum([lenv.Glob("#"+x) for x in open(File("#release/files_common").abspath).read().split("\n") if x.endswith("get_model_metadata.py")], [])
lenv.Command(fn + "_metadata.pkl", [fn + ".onnx"]+files, cmd)
# Build thneed model
if arch == "larch64" or GetOption('pc_thneed'):
tinygrad_opts = ["NATIVE_EXPLOG=1", "VALIDHACKS=1", "OPTLOCAL=1", "IMAGE=2", "GPU=1", "ENABLE_METHOD_CACHE=1"]
tinygrad_opts = ["NOLOCALS=1", "IMAGE=2", "GPU=1"]
if not GetOption('pc_thneed'):
# use FLOAT16 on device for speed + don't cache the CL kernels for space
tinygrad_opts += ["FLOAT16=1", "PYOPENCL_NO_CACHE=1"]

@ -10,15 +10,12 @@ from openpilot.selfdrive.controls.lib.events import Events
from openpilot.selfdrive.monitoring.driver_monitor import DriverStatus
def dmonitoringd_thread(sm=None, pm=None):
def dmonitoringd_thread():
gc.disable()
set_realtime_priority(2)
if pm is None:
pm = messaging.PubMaster(['driverMonitoringState'])
if sm is None:
sm = messaging.SubMaster(['driverStateV2', 'liveCalibration', 'carState', 'controlsState', 'modelV2'], poll=['driverStateV2'])
pm = messaging.PubMaster(['driverMonitoringState'])
sm = messaging.SubMaster(['driverStateV2', 'liveCalibration', 'carState', 'controlsState', 'modelV2'], poll=['driverStateV2'])
driver_status = DriverStatus(rhd_saved=Params().get_bool("IsRhdDetected"))
@ -89,8 +86,8 @@ def dmonitoringd_thread(sm=None, pm=None):
driver_status.wheel_on_right == (driver_status.wheelpos_learner.filtered_stat.M > driver_status.settings._WHEELPOS_THRESHOLD)):
put_bool_nonblocking("IsRhdDetected", driver_status.wheel_on_right)
def main(sm=None, pm=None):
dmonitoringd_thread(sm, pm)
def main():
dmonitoringd_thread()
if __name__ == '__main__':

@ -344,11 +344,9 @@ class RouteEngine:
# TODO: Check for going wrong way in segment
def main(sm=None, pm=None):
if sm is None:
sm = messaging.SubMaster(['liveLocationKalman', 'managerState'])
if pm is None:
pm = messaging.PubMaster(['navInstruction', 'navRoute'])
def main():
pm = messaging.PubMaster(['navInstruction', 'navRoute'])
sm = messaging.SubMaster(['liveLocationKalman', 'managerState'])
rk = Ratekeeper(1.0)
route_engine = RouteEngine(sm, pm)

@ -1,9 +1,6 @@
if [ $1 = "base" ]; then
export DOCKER_IMAGE=openpilot-base
export DOCKER_FILE=Dockerfile.openpilot_base
elif [ $1 = "docs" ]; then
export DOCKER_IMAGE=openpilot-docs
export DOCKER_FILE=docs/docker/Dockerfile
elif [ $1 = "sim" ]; then
export DOCKER_IMAGE=openpilot-sim
export DOCKER_FILE=tools/sim/Dockerfile.sim

@ -91,42 +91,40 @@ def compare_logs(log1, log2, ignore_fields=None, ignore_msgs=None, tolerance=Non
def format_diff(results, log_paths, ref_commit):
diff1, diff2 = "", ""
diff2 += f"***** tested against commit {ref_commit} *****\n"
diff_short, diff_long = "", ""
diff_long += f"***** tested against commit {ref_commit} *****\n"
failed = False
for segment, result in list(results.items()):
diff1 += f"***** results for segment {segment} *****\n"
diff2 += f"***** differences for segment {segment} *****\n"
diff_short += f"***** results for segment {segment} *****\n"
diff_long += f"***** differences for segment {segment} *****\n"
for proc, diff in list(result.items()):
# long diff
diff2 += f"*** process: {proc} ***\n"
diff2 += f"\tref: {log_paths[segment][proc]['ref']}\n"
diff2 += f"\tnew: {log_paths[segment][proc]['new']}\n\n"
diff_long += f"*** process: {proc} ***\n"
diff_long += f"\tref: {log_paths[segment][proc]['ref']}\n"
diff_long += f"\tnew: {log_paths[segment][proc]['new']}\n\n"
# short diff
diff1 += f" {proc}\n"
diff_short += f" {proc}\n"
if isinstance(diff, str):
diff1 += f" ref: {log_paths[segment][proc]['ref']}\n"
diff1 += f" new: {log_paths[segment][proc]['new']}\n\n"
diff1 += f" {diff}\n"
diff_short += f" ref: {log_paths[segment][proc]['ref']}\n"
diff_short += f" new: {log_paths[segment][proc]['new']}\n\n"
diff_short += f" {diff}\n"
failed = True
elif len(diff):
diff1 += f" ref: {log_paths[segment][proc]['ref']}\n"
diff1 += f" new: {log_paths[segment][proc]['new']}\n\n"
diff_short += f" ref: {log_paths[segment][proc]['ref']}\n"
diff_short += f" new: {log_paths[segment][proc]['new']}\n\n"
cnt: Dict[str, int] = {}
for d in diff:
diff2 += f"\t{str(d)}\n"
diff_long += f"\t{str(d)}\n"
k = str(d[1])
cnt[k] = 1 if k not in cnt else cnt[k] + 1
for k, v in sorted(cnt.items()):
diff1 += f" {k}: {v}\n"
diff_short += f" {k}: {v}\n"
failed = True
return diff1, diff2, failed
return diff_short, diff_long, failed
if __name__ == "__main__":
@ -135,7 +133,7 @@ if __name__ == "__main__":
ignore_fields = sys.argv[3:] or ["logMonoTime", "controlsState.startMonoTime", "controlsState.cumLagMs"]
results = {"segment": {"proc": compare_logs(log1, log2, ignore_fields)}}
log_paths = {"segment": {"proc": {"ref": sys.argv[1], "new": sys.argv[2]}}}
diff1, diff2, failed = format_diff(results, log_paths, None)
diff_short, diff_long, failed = format_diff(results, log_paths, None)
print(diff2)
print(diff1)
print(diff_long)
print(diff_short)

@ -212,13 +212,13 @@ if __name__ == "__main__":
results: Any = {TEST_ROUTE: {}}
log_paths: Any = {TEST_ROUTE: {"models": {'ref': BASE_URL + log_fn, 'new': log_fn}}}
results[TEST_ROUTE]["models"] = compare_logs(cmp_log, log_msgs, tolerance=tolerance, ignore_fields=ignore)
diff1, diff2, failed = format_diff(results, log_paths, ref_commit)
diff_short, diff_long, failed = format_diff(results, log_paths, ref_commit)
print(diff2)
print(diff_long)
print('-------------\n'*5)
print(diff1)
print(diff_short)
with open("model_diff.txt", "w") as f:
f.write(diff2)
f.write(diff_long)
except Exception as e:
print(str(e))
failed = True

@ -1 +1 @@
ed2d58ec217fafb7b6b8f5e27ec622acd9e734f4
f851c7e7f90eff828a59444d20fac5df8cd7ae0c

@ -61,7 +61,7 @@ segments = [
]
# dashcamOnly makes don't need to be tested until a full port is done
excluded_interfaces = ["mock", "mazda", "tesla"]
excluded_interfaces = ["mock", "tesla"]
BASE_URL = "https://commadataci.blob.core.windows.net/openpilotci/"
REF_COMMIT_FN = os.path.join(PROC_REPLAY_DIR, "ref_commit")
@ -207,11 +207,11 @@ if __name__ == "__main__":
if not args.upload_only:
results[segment][proc] = result
diff1, diff2, failed = format_diff(results, log_paths, ref_commit)
diff_short, diff_long, failed = format_diff(results, log_paths, ref_commit)
if not upload:
with open(os.path.join(PROC_REPLAY_DIR, "diff.txt"), "w") as f:
f.write(diff2)
print(diff1)
f.write(diff_long)
print(diff_short)
if failed:
print("TEST FAILED")

@ -34,7 +34,7 @@ PROCS = {
"selfdrive.controls.plannerd": 16.5,
"./_ui": 18.0,
"selfdrive.locationd.paramsd": 9.0,
"./_sensord": 7.0,
"./sensord": 7.0,
"selfdrive.controls.radard": 4.5,
"selfdrive.modeld.modeld": 13.0,
"selfdrive.modeld.dmonitoringmodeld": 8.0,

@ -1,5 +1,4 @@
#!/bin/sh
cd "$(dirname "$0")"
export LD_LIBRARY_PATH="/system/lib64:$LD_LIBRARY_PATH"
export QT_QPA_PLATFORM="offscreen"
exec ./_soundd

@ -4,5 +4,4 @@ if [ -f /TICI ] && [ ! -f qt/spinner ]; then
cp qt/spinner_larch64 qt/spinner
fi
export LD_LIBRARY_PATH="/system/lib64:$LD_LIBRARY_PATH"
exec ./qt/spinner "$1"

@ -4,5 +4,4 @@ if [ -f /TICI ] && [ ! -f qt/text ]; then
cp qt/text_larch64 qt/text
fi
export LD_LIBRARY_PATH="/system/lib64:$LD_LIBRARY_PATH"
exec ./qt/text "$1"

@ -1,5 +1,4 @@
#!/bin/sh
cd "$(dirname "$0")"
export LD_LIBRARY_PATH="/system/lib64:$LD_LIBRARY_PATH"
export QT_DBL_CLICK_DIST=150
exec ./_ui

@ -26,7 +26,7 @@ typedef cereal::CarControl::HUDControl::AudibleAlert AudibleAlert;
const mat3 DEFAULT_CALIBRATION = {{ 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0 }};
const vec3 default_face_kpts_3d[] = {
constexpr vec3 default_face_kpts_3d[] = {
{-5.98, -51.20, 8.00}, {-17.64, -49.14, 8.00}, {-23.81, -46.40, 8.00}, {-29.98, -40.91, 8.00}, {-32.04, -37.49, 8.00},
{-34.10, -32.00, 8.00}, {-36.16, -21.03, 8.00}, {-36.16, 6.40, 8.00}, {-35.47, 10.51, 8.00}, {-32.73, 19.43, 8.00},
{-29.30, 26.29, 8.00}, {-24.50, 33.83, 8.00}, {-19.01, 41.37, 8.00}, {-14.21, 46.17, 8.00}, {-12.16, 47.54, 8.00},

@ -24,9 +24,12 @@
// ***** log metadata *****
kj::Array<capnp::word> logger_build_init_data() {
uint64_t wall_time = nanos_since_epoch();
MessageBuilder msg;
auto init = msg.initEvent().initInitData();
init.setWallTimeNanos(wall_time);
init.setVersion(COMMA_VERSION);
init.setDirty(!getenv("CLEAN"));
init.setDeviceType(Hardware::get_device_type());

@ -40,9 +40,9 @@ def apply_a_weighting(measurements: np.ndarray) -> np.ndarray:
class Mic:
def __init__(self, pm):
self.pm = pm
def __init__(self):
self.rk = Ratekeeper(RATE)
self.pm = messaging.PubMaster(['microphone'])
self.measurements = np.empty(0)
@ -93,11 +93,8 @@ class Mic:
self.update()
def main(pm=None):
if pm is None:
pm = messaging.PubMaster(['microphone'])
mic = Mic(pm)
def main():
mic = Mic()
mic.micd_thread()

@ -1 +1 @@
_sensord
sensord

@ -14,4 +14,4 @@ sensors = [
libs = [common, cereal, messaging, 'capnp', 'zmq', 'kj', 'pthread']
if arch == "larch64":
libs.append('i2c')
env.Program('_sensord', ['sensors_qcom2.cc'] + sensors, LIBS=libs)
env.Program('sensord', ['sensors_qcom2.cc'] + sensors, LIBS=libs)

@ -1,4 +0,0 @@
#!/bin/sh
cd "$(dirname "$0")"
export LD_LIBRARY_PATH="/system/lib64:$LD_LIBRARY_PATH"
exec ./_sensord

@ -108,7 +108,7 @@ class TestSensord(unittest.TestCase):
os.environ["LSM_SELF_TEST"] = "1"
# read initial sensor values every test case can use
os.system("pkill -f ./_sensord")
os.system("pkill -f ./sensord")
try:
managed_processes["sensord"].start()
cls.sample_secs = int(os.getenv("SAMPLE_SECS", "10"))

@ -1 +1 @@
Subproject commit d8dda2af3afcef0bb772fff580cfa8b3eabf7f69
Subproject commit 5a4a62ecaecb2bfd8bb0f77033aca46df4e668bd

@ -24,6 +24,7 @@ BinaryView::BinaryView(QWidget *parent) : QTableView(parent) {
delegate = new BinaryItemDelegate(this);
setItemDelegate(delegate);
horizontalHeader()->setSectionResizeMode(QHeaderView::Stretch);
horizontalHeader()->setFont(QFontDatabase::systemFont(QFontDatabase::FixedFont));
verticalHeader()->setSectionsClickable(false);
verticalHeader()->setSectionResizeMode(QHeaderView::Fixed);
verticalHeader()->setDefaultSectionSize(CELL_HEIGHT);

@ -370,6 +370,10 @@ void ChartsWidget::doAutoScroll() {
}
}
QSize ChartsWidget::minimumSizeHint() const {
return QSize(CHART_MIN_WIDTH, QWidget::minimumSizeHint().height());
}
void ChartsWidget::resizeEvent(QResizeEvent *event) {
QWidget::resizeEvent(event);
updateLayout();
@ -405,16 +409,15 @@ void ChartsWidget::removeAll() {
tabbar->removeTab(1);
}
tab_charts.clear();
zoomReset();
if (!charts.isEmpty()) {
for (auto c : charts) {
delete c;
}
charts.clear();
updateToolBar();
emit seriesChanged();
}
zoomReset();
}
void ChartsWidget::alignCharts() {

@ -54,6 +54,7 @@ signals:
void seriesChanged();
private:
QSize minimumSizeHint() const override;
void resizeEvent(QResizeEvent *event) override;
bool event(QEvent *event) override;
void alignCharts();

@ -134,11 +134,13 @@ void DetailWidget::refresh() {
for (auto s : binary_view->getOverlappingSignals()) {
warnings.push_back(tr("%1 has overlapping bits.").arg(s->name));
}
name_label->setText(QString("%1 (%2)").arg(msgName(msg_id), msg->transmitter));
} else {
warnings.push_back(tr("Drag-Select in binary view to create new signal."));
name_label->setText(msgName(msg_id));
}
QString msg_name = msg ? QString("%1 (%2)").arg(msg->name, msg->transmitter) : msgName(msg_id);
name_label->setText(msg_name);
name_label->setToolTip(msg_name);
remove_btn->setEnabled(msg != nullptr);
if (!warnings.isEmpty()) {

@ -39,13 +39,15 @@ VideoWidget::VideoWidget(QWidget *parent) : QFrame(parent) {
group->setExclusive(true);
QHBoxLayout *control_layout = new QHBoxLayout();
play_btn = new QPushButton();
play_btn = new QToolButton();
play_btn->setSizePolicy(QSizePolicy::Preferred, QSizePolicy::Preferred);
control_layout->addWidget(play_btn);
if (can->liveStreaming()) {
control_layout->addWidget(skip_to_end_btn = new QPushButton(utils::icon("skip-end-fill"), {}));
control_layout->addWidget(skip_to_end_btn = new QToolButton(this));
skip_to_end_btn->setSizePolicy(QSizePolicy::Preferred, QSizePolicy::Preferred);
skip_to_end_btn->setIcon(utils::icon("skip-end-fill"));
skip_to_end_btn->setToolTip(tr("Skip to the end"));
QObject::connect(skip_to_end_btn, &QPushButton::clicked, [group]() {
QObject::connect(skip_to_end_btn, &QToolButton::clicked, [group]() {
// set speed to 1.0
group->buttons()[2]->click();
can->pause(false);
@ -54,9 +56,11 @@ VideoWidget::VideoWidget(QWidget *parent) : QFrame(parent) {
}
for (float speed : {0.1, 0.5, 1., 2.}) {
QPushButton *btn = new QPushButton(QString("%1x").arg(speed), this);
QToolButton *btn = new QToolButton(this);
btn->setSizePolicy(QSizePolicy::Preferred, QSizePolicy::Preferred);
btn->setText(QString("%1x").arg(speed));
btn->setCheckable(true);
QObject::connect(btn, &QPushButton::clicked, [speed]() { can->setSpeed(speed); });
QObject::connect(btn, &QToolButton::clicked, [speed]() { can->setSpeed(speed); });
control_layout->addWidget(btn);
group->addButton(btn);
if (speed == 1.0) btn->setChecked(true);
@ -64,7 +68,7 @@ VideoWidget::VideoWidget(QWidget *parent) : QFrame(parent) {
main_layout->addLayout(control_layout);
setSizePolicy(QSizePolicy::Preferred, QSizePolicy::Maximum);
QObject::connect(play_btn, &QPushButton::clicked, []() { can->pause(!can->isPaused()); });
QObject::connect(play_btn, &QToolButton::clicked, []() { can->pause(!can->isPaused()); });
QObject::connect(can, &AbstractStream::paused, this, &VideoWidget::updatePlayBtnState);
QObject::connect(can, &AbstractStream::resume, this, &VideoWidget::updatePlayBtnState);
QObject::connect(&settings, &Settings::changed, this, &VideoWidget::updatePlayBtnState);

@ -9,8 +9,8 @@
#include <QFuture>
#include <QLabel>
#include <QPushButton>
#include <QSlider>
#include <QToolButton>
#include "selfdrive/ui/qt/widgets/cameraview.h"
#include "tools/cabana/streams/abstractstream.h"
@ -80,8 +80,8 @@ protected:
double maximum_time = 0;
QLabel *end_time_label;
QLabel *time_label;
QPushButton *play_btn;
QPushButton *skip_to_end_btn = nullptr;
QToolButton *play_btn;
QToolButton *skip_to_end_btn = nullptr;
InfoLabel *alert_label;
Slider *slider;
};

@ -116,9 +116,9 @@ def ui_thread(addr):
if yuv_img_raw is None or not yuv_img_raw.data.any():
continue
imgff = np.frombuffer(yuv_img_raw.data, dtype=np.uint8).reshape((vipc_client.height * 3 // 2, vipc_client.width))
imgff = np.frombuffer(yuv_img_raw.data, dtype=np.uint8).reshape((len(yuv_img_raw.data) // vipc_client.stride, vipc_client.stride))
num_px = vipc_client.width * vipc_client.height
bgr = cv2.cvtColor(imgff, cv2.COLOR_YUV2RGB_NV12)
bgr = cv2.cvtColor(imgff[:vipc_client.height * 3 // 2, :vipc_client.width], cv2.COLOR_YUV2RGB_NV12)
zoom_matrix = _BB_TO_FULL_FRAME[num_px]
cv2.warpAffine(bgr, zoom_matrix[:2], (img.shape[1], img.shape[0]), dst=img, flags=cv2.WARP_INVERSE_MAP)

Loading…
Cancel
Save