openpilot v0.10.1 release

date: 2025-08-23T03:07:58
master commit: ef2bb7f2fc
Vehicle Researcher 11 hours ago
commit da3916ece3
  1. 19
      .clang-tidy
  2. 39
      .dockerignore
  3. 11
      .editorconfig
  4. 47
      .github/ISSUE_TEMPLATE/bug_report.yml
  5. 14
      .github/ISSUE_TEMPLATE/config.yml
  6. 8
      .github/ISSUE_TEMPLATE/enhancement.md
  7. 42
      .github/ISSUE_TEMPLATE/pc_bug_report.yml
  8. 27
      .github/labeler.yaml
  9. 68
      .github/pull_request_template.md
  10. 104
      .gitignore
  11. 8
      .vscode/extensions.json
  12. 46
      .vscode/launch.json
  13. 27
      .vscode/settings.json
  14. 12
      Dockerfile.openpilot
  15. 81
      Dockerfile.openpilot_base
  16. 283
      Jenkinsfile
  17. 7
      LICENSE
  18. 107
      README.md
  19. 1078
      RELEASES.md
  20. 362
      SConstruct
  21. 5
      SECURITY.md
  22. 95
      cereal/README.md
  23. 20
      cereal/SConscript
  24. 11
      cereal/__init__.py
  25. 1
      cereal/car.capnp
  26. 71
      cereal/custom.capnp
  27. 26
      cereal/include/c++.capnp
  28. 574
      cereal/legacy.capnp
  29. 2691
      cereal/log.capnp
  30. 257
      cereal/messaging/__init__.py
  31. 71
      cereal/messaging/bridge.cc
  32. 102
      cereal/messaging/messaging.h
  33. 144
      cereal/messaging/msgq_to_zmq.cc
  34. 37
      cereal/messaging/msgq_to_zmq.h
  35. 203
      cereal/messaging/socketmaster.cc
  36. 0
      cereal/messaging/tests/__init__.py
  37. 185
      cereal/messaging/tests/test_messaging.py
  38. 160
      cereal/messaging/tests/test_pub_sub_master.py
  39. 21
      cereal/messaging/tests/test_services.py
  40. 126
      cereal/services.py
  41. 1
      common/.gitignore
  42. 35
      common/SConscript
  43. 0
      common/__init__.py
  44. 46
      common/api.py
  45. 4
      common/basedir.py
  46. 98
      common/clutil.cc
  47. 28
      common/clutil.h
  48. 23
      common/constants.py
  49. 9
      common/dict_helpers.py
  50. 58
      common/file_helpers.py
  51. 17
      common/filter_simple.py
  52. 42
      common/git.py
  53. 89
      common/gpio.py
  54. 8
      common/gps.py
  55. 249
      common/logging_extra.py
  56. 45
      common/markdown.py
  57. 85
      common/mat.h
  58. 50
      common/mock/__init__.py
  59. 14
      common/mock/generators.py
  60. 242
      common/params.cc
  61. 90
      common/params.h
  62. 19
      common/params.py
  63. 132
      common/params_keys.h
  64. 196
      common/params_pyx.pyx
  65. 64
      common/pid.py
  66. 39
      common/prefix.h
  67. 59
      common/prefix.py
  68. 52
      common/queue.h
  69. 40
      common/ratekeeper.cc
  70. 23
      common/ratekeeper.h
  71. 96
      common/realtime.py
  72. 30
      common/retry.py
  73. 28
      common/run.py
  74. 54
      common/simple_kalman.py
  75. 52
      common/spinner.py
  76. 73
      common/stat_live.py
  77. 155
      common/swaglog.cc
  78. 76
      common/swaglog.h
  79. 144
      common/swaglog.py
  80. 1
      common/tests/.gitignore
  81. 0
      common/tests/__init__.py
  82. 19
      common/tests/test_file_helpers.py
  83. 15
      common/tests/test_markdown.py
  84. 27
      common/tests/test_params.cc
  85. 141
      common/tests/test_params.py
  86. 2
      common/tests/test_runner.cc
  87. 29
      common/tests/test_simple_kalman.py
  88. 88
      common/tests/test_swaglog.cc
  89. 147
      common/tests/test_util.cc
  90. 63
      common/text_window.py
  91. 15
      common/time_helpers.py
  92. 27
      common/timeout.py
  93. 51
      common/timing.h
  94. 2
      common/transformations/.gitignore
  95. 70
      common/transformations/README.md
  96. 5
      common/transformations/SConscript
  97. 0
      common/transformations/__init__.py
  98. 179
      common/transformations/camera.py
  99. 100
      common/transformations/coordinates.cc
  100. 43
      common/transformations/coordinates.hpp
  101. Some files were not shown because too many files have changed in this diff Show More

@ -0,0 +1,19 @@
---
Checks: '
bugprone-*,
-bugprone-integer-division,
-bugprone-narrowing-conversions,
performance-*,
clang-analyzer-*,
misc-*,
-misc-unused-parameters,
modernize-*,
-modernize-avoid-c-arrays,
-modernize-deprecated-headers,
-modernize-use-auto,
-modernize-use-using,
-modernize-use-nullptr,
-modernize-use-trailing-return-type,
'
CheckOptions:
...

@ -0,0 +1,39 @@
**/.git
.DS_Store
*.dylib
*.DSYM
*.d
*.pyc
*.pyo
.*.swp
.*.swo
.*.un~
*.tmp
*.o
*.o-*
*.os
*.os-*
*.so
*.a
venv/
.venv/
notebooks
phone
massivemap
neos
installer
chffr/app2
chffr/backend/env
selfdrive/nav
selfdrive/baseui
selfdrive/test/simulator2
**/cache_data
xx/plus
xx/community
xx/projects
!xx/projects/eon_testing_master
!xx/projects/map3d
xx/ops
xx/junk

@ -0,0 +1,11 @@
root = true
[*]
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
[*.{py,pyx,pxd}]
charset = utf-8
indent_style = space
indent_size = 2

@ -0,0 +1,47 @@
name: Bug report
description: For issues with running openpilot on your comma device
labels: ["bug"]
body:
- type: markdown
attributes:
value: >
Before creating a **bug report**, please check the following:
* If the issue likely only affects your car model or make, go back and open a **car bug report** instead.
* If the issue is related to the driving or driver monitoring models, you should open a [discussion](https://github.com/commaai/openpilot/discussions/categories/model-feedback) instead.
* Ensure you're running the latest openpilot release.
* Ensure you're using officially supported hardware. Issues running on PCs have a different issue template.
* Ensure there isn't an existing issue for your bug. If there is, leave a comment on the existing issue.
* Ensure you're running stock openpilot. We cannot look into bug reports from forks.
If you're unsure whether you've hit a bug, check out the #installation-help channel in the [community Discord server](https://discord.comma.ai).
- type: textarea
attributes:
label: Describe the bug
description: Also include a description of how to reproduce the bug
validations:
required: true
- type: input
id: route
attributes:
label: Provide a route where the issue occurs
description: Ensure the route is fully uploaded at https://useradmin.comma.ai. We cannot look into issues without routes, or at least a Dongle ID.
placeholder: 77611a1fac303767|2020-05-11--16-37-07
validations:
required: true
- type: input
id: version
attributes:
label: openpilot version
description: If you're not on release, provide the commit hash
placeholder: 0.8.10
validations:
required: true
- type: textarea
attributes:
label: Additional info

@ -0,0 +1,14 @@
blank_issues_enabled: false
contact_links:
- name: Car bug report
url: https://github.com/commaai/opendbc/issues/new
about: For issues with a particular car make or model
- name: Join the Discord
url: https://discord.comma.ai
about: The community Discord is for both openpilot development and experience discussion
- name: Report driving behavior feedback
url: https://discord.com/channels/469524606043160576/1254834193066623017
about: Feedback for the driving and driver monitoring models goes in the #driving-feedback in Discord
- name: Community Wiki
url: https://github.com/commaai/openpilot/wiki
about: Check out our community wiki

@ -0,0 +1,8 @@
---
name: Enhancement
about: For openpilot enhancement suggestions
title: ''
labels: 'enhancement'
assignees: ''
---

@ -0,0 +1,42 @@
name: PC bug report
description: For issues with running openpilot on PC
labels: ["PC"]
body:
- type: markdown
attributes:
value: >
Before creating a **bug report**, please check the following:
* Ensure you're running the latest openpilot release.
* Ensure there isn't an existing issue for your bug. If there is, leave a comment on the existing issue.
* Ensure you're running stock openpilot. We cannot look into bug reports from forks.
If you're unsure whether you've hit a bug, check out the #installation-help channel in the [community Discord server](https://discord.comma.ai).
- type: textarea
attributes:
label: Describe the bug
description: Also include a description of how to reproduce the bug
validations:
required: true
- type: input
id: os-version
attributes:
label: OS Version
placeholder: Ubuntu 24.04
validations:
required: true
- type: input
id: version
attributes:
label: openpilot version or commit
placeholder: bd36f2ec8d3559909678eff2690c10a520938367
validations:
required: false
- type: textarea
attributes:
label: Additional info

@ -0,0 +1,27 @@
CI / testing:
- changed-files:
- any-glob-to-all-files: "{.github/**,**/test_*,**/test/**,Jenkinsfile}"
car:
- changed-files:
- any-glob-to-all-files: '{selfdrive/car/**,opendbc_repo}'
simulation:
- changed-files:
- any-glob-to-all-files: 'tools/sim/**'
ui:
- changed-files:
- any-glob-to-all-files: '{selfdrive/ui/**,system/ui/**}'
tools:
- changed-files:
- any-glob-to-all-files: 'tools/**'
multilanguage:
- changed-files:
- any-glob-to-all-files: 'selfdrive/ui/translations/**'
autonomy:
- changed-files:
- any-glob-to-all-files: "{selfdrive/modeld/models/**,selfdrive/test/process_replay/model_replay_ref_commit}"

@ -0,0 +1,68 @@
<!-- Please copy and paste the relevant template -->
<!--- ***** Template: Fingerprint *****
**Car**
Which car (make, model, year) this fingerprint is for
**Route**
A route with the fingerprint
-->
<!--- ***** Template: Car Bugfix *****
**Description**
A description of the bug and the fix. Also link the issue if it exists.
**Verification**
Explain how you tested this bug fix.
**Route**
Route: [a route with the bug fix]
-->
<!--- ***** Template: Bugfix *****
**Description**
A description of the bug and the fix. Also link the issue if it exists.
**Verification**
Explain how you tested this bug fix.
-->
<!--- ***** Template: Car Port *****
**Checklist**
- [ ] added entry to CAR in selfdrive/car/*/values.py and ran `selfdrive/car/docs.py` to generate new docs
- [ ] test route added to [routes.py](https://github.com/commaai/openpilot/blob/master/selfdrive/car/tests/routes.py)
- [ ] route with openpilot:
- [ ] route with stock system:
- [ ] car harness used (if comma doesn't sell it, put N/A):
-->
<!--- ***** Template: Refactor *****
**Description**
A description of the refactor, including the goals it accomplishes.
**Verification**
Explain how you tested the refactor for regressions.
-->

104
.gitignore vendored

@ -0,0 +1,104 @@
venv/
.venv/
.ci_cache
.env
.clang-format
.DS_Store
.tags
.ipynb_checkpoints
.idea
.overlay_init
.overlay_consistent
.sconsign.dblite
model2.png
a.out
.hypothesis
.cache/
/docs_site/
*.mp4
*.dylib
*.DSYM
*.d
*.pyc
*.pyo
.*.swp
.*.swo
.*.un~
*.tmp
*.o
*.o-*
*.os
*.os-*
*.so
*.a
*.clb
*.class
*.pyxbldc
*.vcd
*.qm
*_pyx.cpp
config.json
clcache
compile_commands.json
compare_runtime*.html
persist
selfdrive/pandad/pandad
cereal/services.h
cereal/gen
cereal/messaging/bridge
selfdrive/mapd/default_speeds_by_region.json
system/proclogd/proclogd
selfdrive/ui/translations/tmp
selfdrive/test/longitudinal_maneuvers/out
selfdrive/car/tests/cars_dump
system/camerad/camerad
system/camerad/test/ae_gray_test
notebooks
hyperthneed
provisioning
.coverage*
coverage.xml
htmlcov
pandaextra
.mypy_cache/
flycheck_*
cppcheck_report.txt
comma*.sh
selfdrive/modeld/models/*.pkl
*.bz2
*.zst
build/
!**/.gitkeep
poetry.toml
Pipfile
### VisualStudioCode ###
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
!.vscode/*.code-snippets
# Local History for Visual Studio Code
.history/
# Built Visual Studio Code Extensions
*.vsix
### VisualStudioCode Patch ###
# Ignore all local history of files
.history
.ionide

@ -0,0 +1,8 @@
{
"recommendations": [
"ms-python.python",
"ms-vscode.cpptools",
"elagil.pre-commit-helper",
"charliermarsh.ruff",
]
}

@ -0,0 +1,46 @@
{
"version": "0.2.0",
"inputs": [
{
"id": "python_process",
"type": "pickString",
"description": "Select the process to debug",
"options": [
"selfdrive/controls/controlsd.py",
"system/timed/timed.py",
"tools/sim/run_bridge.py"
]
},
{
"id": "cpp_process",
"type": "pickString",
"description": "Select the process to debug",
"options": [
"selfdrive/ui/ui"
]
},
{
"id": "args",
"description": "Arguments to pass to the process",
"type": "promptString"
}
],
"configurations": [
{
"name": "Python: openpilot Process",
"type": "debugpy",
"request": "launch",
"program": "${input:python_process}",
"console": "integratedTerminal",
"justMyCode": true,
"args": "${input:args}"
},
{
"name": "C++: openpilot Process",
"type": "cppdbg",
"request": "launch",
"program": "${workspaceFolder}/${input:cpp_process}",
"cwd": "${workspaceFolder}",
}
]
}

@ -0,0 +1,27 @@
{
"editor.tabSize": 2,
"editor.insertSpaces": true,
"editor.renderWhitespace": "trailing",
"files.trimTrailingWhitespace": true,
"search.exclude": {
"**/.git": true,
"**/.venv": true,
"**/__pycache__": true
},
"files.exclude": {
"**/.git": true,
"**/.venv": true,
"**/__pycache__": true
},
"python.analysis.exclude": [
"**/.git",
"**/.venv",
"**/__pycache__",
// exclude directories that should be using the symlinked version
"common/**",
"selfdrive/**",
"system/**",
"third_party/**",
"tools/**",
]
}

@ -0,0 +1,12 @@
FROM ghcr.io/commaai/openpilot-base:latest
ENV PYTHONUNBUFFERED=1
ENV OPENPILOT_PATH=/home/batman/openpilot
RUN mkdir -p ${OPENPILOT_PATH}
WORKDIR ${OPENPILOT_PATH}
COPY . ${OPENPILOT_PATH}/
RUN scons --cache-readonly -j$(nproc)

@ -0,0 +1,81 @@
FROM ubuntu:24.04
ENV PYTHONUNBUFFERED=1
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y --no-install-recommends sudo tzdata locales ssh pulseaudio xvfb x11-xserver-utils gnome-screenshot python3-tk python3-dev && \
rm -rf /var/lib/apt/lists/*
RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && locale-gen
ENV LANG=en_US.UTF-8
ENV LANGUAGE=en_US:en
ENV LC_ALL=en_US.UTF-8
COPY tools/install_ubuntu_dependencies.sh /tmp/tools/
RUN /tmp/tools/install_ubuntu_dependencies.sh && \
rm -rf /var/lib/apt/lists/* /tmp/* && \
cd /usr/lib/gcc/arm-none-eabi/* && \
rm -rf arm/ thumb/nofp thumb/v6* thumb/v8* thumb/v7+fp thumb/v7-r+fp.sp
# Add OpenCL
RUN apt-get update && apt-get install -y --no-install-recommends \
apt-utils \
alien \
unzip \
tar \
curl \
xz-utils \
dbus \
gcc-arm-none-eabi \
tmux \
vim \
libx11-6 \
wget \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir -p /tmp/opencl-driver-intel && \
cd /tmp/opencl-driver-intel && \
wget https://github.com/intel/llvm/releases/download/2024-WW14/oclcpuexp-2024.17.3.0.09_rel.tar.gz && \
wget https://github.com/oneapi-src/oneTBB/releases/download/v2021.12.0/oneapi-tbb-2021.12.0-lin.tgz && \
mkdir -p /opt/intel/oclcpuexp_2024.17.3.0.09_rel && \
cd /opt/intel/oclcpuexp_2024.17.3.0.09_rel && \
tar -zxvf /tmp/opencl-driver-intel/oclcpuexp-2024.17.3.0.09_rel.tar.gz && \
mkdir -p /etc/OpenCL/vendors && \
echo /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64/libintelocl.so > /etc/OpenCL/vendors/intel_expcpu.icd && \
cd /opt/intel && \
tar -zxvf /tmp/opencl-driver-intel/oneapi-tbb-2021.12.0-lin.tgz && \
ln -s /opt/intel/oneapi-tbb-2021.12.0/lib/intel64/gcc4.8/libtbb.so /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64 && \
ln -s /opt/intel/oneapi-tbb-2021.12.0/lib/intel64/gcc4.8/libtbbmalloc.so /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64 && \
ln -s /opt/intel/oneapi-tbb-2021.12.0/lib/intel64/gcc4.8/libtbb.so.12 /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64 && \
ln -s /opt/intel/oneapi-tbb-2021.12.0/lib/intel64/gcc4.8/libtbbmalloc.so.2 /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64 && \
mkdir -p /etc/ld.so.conf.d && \
echo /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64 > /etc/ld.so.conf.d/libintelopenclexp.conf && \
ldconfig -f /etc/ld.so.conf.d/libintelopenclexp.conf && \
cd / && \
rm -rf /tmp/opencl-driver-intel
ENV NVIDIA_VISIBLE_DEVICES=all
ENV NVIDIA_DRIVER_CAPABILITIES=graphics,utility,compute
ENV QTWEBENGINE_DISABLE_SANDBOX=1
RUN dbus-uuidgen > /etc/machine-id
ARG USER=batman
ARG USER_UID=1001
RUN useradd -m -s /bin/bash -u $USER_UID $USER
RUN usermod -aG sudo $USER
RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
USER $USER
COPY --chown=$USER pyproject.toml uv.lock /home/$USER
COPY --chown=$USER tools/install_python_dependencies.sh /home/$USER/tools/
ENV VIRTUAL_ENV=/home/$USER/.venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
RUN cd /home/$USER && \
tools/install_python_dependencies.sh && \
rm -rf tools/ pyproject.toml uv.lock .cache
USER root
RUN sudo git config --global --add safe.directory /tmp/openpilot

283
Jenkinsfile vendored

@ -0,0 +1,283 @@
def retryWithDelay(int maxRetries, int delay, Closure body) {
for (int i = 0; i < maxRetries; i++) {
try {
return body()
} catch (Exception e) {
sleep(delay)
}
}
throw Exception("Failed after ${maxRetries} retries")
}
def device(String ip, String step_label, String cmd) {
withCredentials([file(credentialsId: 'id_rsa', variable: 'key_file')]) {
def ssh_cmd = """
ssh -o ConnectTimeout=5 -o ServerAliveInterval=5 -o ServerAliveCountMax=2 -o BatchMode=yes -o StrictHostKeyChecking=no -i ${key_file} 'comma@${ip}' exec /usr/bin/bash <<'END'
set -e
export TERM=xterm-256color
shopt -s huponexit # kill all child processes when the shell exits
export CI=1
export PYTHONWARNINGS=error
export LOGPRINT=debug
export TEST_DIR=${env.TEST_DIR}
export SOURCE_DIR=${env.SOURCE_DIR}
export GIT_BRANCH=${env.GIT_BRANCH}
export GIT_COMMIT=${env.GIT_COMMIT}
export CI_ARTIFACTS_TOKEN=${env.CI_ARTIFACTS_TOKEN}
export GITHUB_COMMENTS_TOKEN=${env.GITHUB_COMMENTS_TOKEN}
export AZURE_TOKEN='${env.AZURE_TOKEN}'
# only use 1 thread for tici tests since most require HIL
export PYTEST_ADDOPTS="-n0 -s"
export GIT_SSH_COMMAND="ssh -i /data/gitkey"
source ~/.bash_profile
if [ -f /TICI ]; then
source /etc/profile
rm -rf /tmp/tmp*
rm -rf ~/.commacache
rm -rf /dev/shm/*
rm -rf /dev/tmp/tmp*
if ! systemctl is-active --quiet systemd-resolved; then
echo "restarting resolved"
sudo systemctl start systemd-resolved
sleep 3
fi
# restart aux USB
if [ -e /sys/bus/usb/drivers/hub/3-0:1.0 ]; then
echo "restarting aux usb"
echo "3-0:1.0" | sudo tee /sys/bus/usb/drivers/hub/unbind
sleep 0.5
echo "3-0:1.0" | sudo tee /sys/bus/usb/drivers/hub/bind
fi
fi
if [ -f /data/openpilot/launch_env.sh ]; then
source /data/openpilot/launch_env.sh
fi
ln -snf ${env.TEST_DIR} /data/pythonpath
cd ${env.TEST_DIR} || true
time ${cmd}
END"""
sh script: ssh_cmd, label: step_label
}
}
def deviceStage(String stageName, String deviceType, List extra_env, def steps) {
stage(stageName) {
if (currentBuild.result != null) {
return
}
if (isReplay()) {
error("REPLAYING TESTS IS NOT ALLOWED. FIX THEM INSTEAD.")
}
def extra = extra_env.collect { "export ${it}" }.join('\n');
def branch = env.BRANCH_NAME ?: 'master';
def gitDiff = sh returnStdout: true, script: 'curl -s -H "Authorization: Bearer ${GITHUB_COMMENTS_TOKEN}" https://api.github.com/repos/commaai/openpilot/compare/master...${GIT_BRANCH} | jq .files[].filename || echo "/"', label: 'Getting changes'
lock(resource: "", label: deviceType, inversePrecedence: true, variable: 'device_ip', quantity: 1, resourceSelectStrategy: 'random') {
docker.image('ghcr.io/commaai/alpine-ssh').inside('--user=root') {
timeout(time: 35, unit: 'MINUTES') {
retry (3) {
def date = sh(script: 'date', returnStdout: true).trim();
device(device_ip, "set time", "date -s '" + date + "'")
device(device_ip, "git checkout", extra + "\n" + readFile("selfdrive/test/setup_device_ci.sh"))
}
steps.each { item ->
def name = item[0]
def cmd = item[1]
def args = item[2]
def diffPaths = args.diffPaths ?: []
def cmdTimeout = args.timeout ?: 9999
if (branch != "master" && !branch.contains("__jenkins_loop_") && diffPaths && !hasPathChanged(gitDiff, diffPaths)) {
println "Skipping ${name}: no changes in ${diffPaths}."
return
} else {
timeout(time: cmdTimeout, unit: 'SECONDS') {
device(device_ip, name, cmd)
}
}
}
}
}
}
}
}
def hasPathChanged(String gitDiff, List<String> paths) {
for (path in paths) {
if (gitDiff.contains(path)) {
return true
}
}
return false
}
def isReplay() {
def replayClass = "org.jenkinsci.plugins.workflow.cps.replay.ReplayCause"
return currentBuild.rawBuild.getCauses().any{ cause -> cause.toString().contains(replayClass) }
}
def setupCredentials() {
withCredentials([
string(credentialsId: 'azure_token', variable: 'AZURE_TOKEN'),
]) {
env.AZURE_TOKEN = "${AZURE_TOKEN}"
}
withCredentials([
string(credentialsId: 'ci_artifacts_pat', variable: 'CI_ARTIFACTS_TOKEN'),
]) {
env.CI_ARTIFACTS_TOKEN = "${CI_ARTIFACTS_TOKEN}"
}
withCredentials([
string(credentialsId: 'post_comments_github_pat', variable: 'GITHUB_COMMENTS_TOKEN'),
]) {
env.GITHUB_COMMENTS_TOKEN = "${GITHUB_COMMENTS_TOKEN}"
}
}
def step(String name, String cmd, Map args = [:]) {
return [name, cmd, args]
}
node {
env.CI = "1"
env.PYTHONWARNINGS = "error"
env.TEST_DIR = "/data/openpilot"
env.SOURCE_DIR = "/data/openpilot_source/"
setupCredentials()
env.GIT_BRANCH = checkout(scm).GIT_BRANCH
env.GIT_COMMIT = checkout(scm).GIT_COMMIT
def excludeBranches = ['__nightly', 'devel', 'devel-staging', 'release3', 'release3-staging',
'release-tici', 'testing-closet*', 'hotfix-*']
def excludeRegex = excludeBranches.join('|').replaceAll('\\*', '.*')
if (env.BRANCH_NAME != 'master' && !env.BRANCH_NAME.contains('__jenkins_loop_')) {
properties([
disableConcurrentBuilds(abortPrevious: true)
])
}
try {
if (env.BRANCH_NAME == 'devel-staging') {
deviceStage("build release3-staging", "tici-needs-can", [], [
step("build release3-staging", "RELEASE_BRANCH=release3-staging $SOURCE_DIR/release/build_release.sh"),
])
}
if (env.BRANCH_NAME == '__nightly') {
parallel (
'nightly': {
deviceStage("build nightly", "tici-needs-can", [], [
step("build nightly", "RELEASE_BRANCH=nightly $SOURCE_DIR/release/build_release.sh"),
])
},
'nightly-dev': {
deviceStage("build nightly-dev", "tici-needs-can", [], [
step("build nightly-dev", "PANDA_DEBUG_BUILD=1 RELEASE_BRANCH=nightly-dev $SOURCE_DIR/release/build_release.sh"),
])
},
)
}
if (!env.BRANCH_NAME.matches(excludeRegex)) {
parallel (
// tici tests
'onroad tests': {
deviceStage("onroad", "tici-needs-can", ["UNSAFE=1"], [
step("build openpilot", "cd system/manager && ./build.py"),
step("check dirty", "release/check-dirty.sh"),
step("onroad tests", "pytest selfdrive/test/test_onroad.py -s", [timeout: 60]),
])
},
'HW + Unit Tests': {
deviceStage("tici-hardware", "tici-common", ["UNSAFE=1"], [
step("build", "cd system/manager && ./build.py"),
step("test pandad", "pytest selfdrive/pandad/tests/test_pandad.py", [diffPaths: ["panda", "selfdrive/pandad/"]]),
step("test power draw", "pytest -s system/hardware/tici/tests/test_power_draw.py"),
step("test encoder", "LD_LIBRARY_PATH=/usr/local/lib pytest system/loggerd/tests/test_encoder.py", [diffPaths: ["system/loggerd/"]]),
step("test pigeond", "pytest system/ubloxd/tests/test_pigeond.py", [diffPaths: ["system/ubloxd/"]]),
step("test manager", "pytest system/manager/test/test_manager.py"),
])
},
'loopback': {
deviceStage("loopback", "tici-loopback", ["UNSAFE=1"], [
step("build openpilot", "cd system/manager && ./build.py"),
step("test pandad loopback", "pytest selfdrive/pandad/tests/test_pandad_loopback.py"),
])
},
'camerad AR0231': {
deviceStage("AR0231", "tici-ar0231", ["UNSAFE=1"], [
step("build", "cd system/manager && ./build.py"),
step("test camerad", "pytest system/camerad/test/test_camerad.py", [timeout: 60]),
step("test exposure", "pytest system/camerad/test/test_exposure.py"),
])
},
'camerad OX03C10': {
deviceStage("OX03C10", "tici-ox03c10", ["UNSAFE=1"], [
step("build", "cd system/manager && ./build.py"),
step("test camerad", "pytest system/camerad/test/test_camerad.py", [timeout: 60]),
step("test exposure", "pytest system/camerad/test/test_exposure.py"),
])
},
'camerad OS04C10': {
deviceStage("OS04C10", "tici-os04c10", ["UNSAFE=1"], [
step("build", "cd system/manager && ./build.py"),
step("test camerad", "pytest system/camerad/test/test_camerad.py", [timeout: 60]),
step("test exposure", "pytest system/camerad/test/test_exposure.py"),
])
},
'sensord': {
deviceStage("LSM + MMC", "tici-lsmc", ["UNSAFE=1"], [
step("build", "cd system/manager && ./build.py"),
step("test sensord", "pytest system/sensord/tests/test_sensord.py"),
])
deviceStage("BMX + LSM", "tici-bmx-lsm", ["UNSAFE=1"], [
step("build", "cd system/manager && ./build.py"),
step("test sensord", "pytest system/sensord/tests/test_sensord.py"),
])
},
'replay': {
deviceStage("model-replay", "tici-replay", ["UNSAFE=1"], [
step("build", "cd system/manager && ./build.py", [diffPaths: ["selfdrive/modeld/", "tinygrad_repo", "selfdrive/test/process_replay/model_replay.py"]]),
step("model replay", "selfdrive/test/process_replay/model_replay.py", [diffPaths: ["selfdrive/modeld/", "tinygrad_repo", "selfdrive/test/process_replay/model_replay.py"]]),
])
},
'tizi': {
deviceStage("tizi", "tizi", ["UNSAFE=1"], [
step("build openpilot", "cd system/manager && ./build.py"),
step("test pandad loopback", "SINGLE_PANDA=1 pytest selfdrive/pandad/tests/test_pandad_loopback.py"),
step("test pandad spi", "pytest selfdrive/pandad/tests/test_pandad_spi.py"),
step("test pandad", "pytest selfdrive/pandad/tests/test_pandad.py", [diffPaths: ["panda", "selfdrive/pandad/"]]),
step("test amp", "pytest system/hardware/tici/tests/test_amplifier.py"),
// TODO: enable once new AGNOS is available
// step("test esim", "pytest system/hardware/tici/tests/test_esim.py"),
step("test qcomgpsd", "pytest system/qcomgpsd/tests/test_qcomgpsd.py", [diffPaths: ["system/qcomgpsd/"]]),
])
},
)
}
} catch (Exception e) {
currentBuild.result = 'FAILED'
throw e
}
}

@ -0,0 +1,7 @@
Copyright (c) 2018, Comma.ai, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

@ -0,0 +1,107 @@
<div align="center" style="text-align: center;">
<h1>openpilot</h1>
<p>
<b>openpilot is an operating system for robotics.</b>
<br>
Currently, it upgrades the driver assistance system in 300+ supported cars.
</p>
<h3>
<a href="https://docs.comma.ai">Docs</a>
<span> · </span>
<a href="https://docs.comma.ai/contributing/roadmap/">Roadmap</a>
<span> · </span>
<a href="https://github.com/commaai/openpilot/blob/master/docs/CONTRIBUTING.md">Contribute</a>
<span> · </span>
<a href="https://discord.comma.ai">Community</a>
<span> · </span>
<a href="https://comma.ai/shop">Try it on a comma 3X</a>
</h3>
Quick start: `bash <(curl -fsSL openpilot.comma.ai)`
[![openpilot tests](https://github.com/commaai/openpilot/actions/workflows/selfdrive_tests.yaml/badge.svg)](https://github.com/commaai/openpilot/actions/workflows/selfdrive_tests.yaml)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](LICENSE)
[![X Follow](https://img.shields.io/twitter/follow/comma_ai)](https://x.com/comma_ai)
[![Discord](https://img.shields.io/discord/469524606043160576)](https://discord.comma.ai)
</div>
<table>
<tr>
<td><a href="https://youtu.be/NmBfgOanCyk" title="Video By Greer Viau"><img src="https://github.com/commaai/openpilot/assets/8762862/2f7112ae-f748-4f39-b617-fabd689c3772"></a></td>
<td><a href="https://youtu.be/VHKyqZ7t8Gw" title="Video By Logan LeGrand"><img src="https://github.com/commaai/openpilot/assets/8762862/92351544-2833-40d7-9e0b-7ef7ae37ec4c"></a></td>
<td><a href="https://youtu.be/SUIZYzxtMQs" title="A drive to Taco Bell"><img src="https://github.com/commaai/openpilot/assets/8762862/05ceefc5-2628-439c-a9b2-89ce77dc6f63"></a></td>
</tr>
</table>
Using openpilot in a car
------
To use openpilot in a car, you need four things:
1. **Supported Device:** a comma 3/3X, available at [comma.ai/shop](https://comma.ai/shop/comma-3x).
2. **Software:** The setup procedure for the comma 3/3X allows users to enter a URL for custom software. Use the URL `openpilot.comma.ai` to install the release version.
3. **Supported Car:** Ensure that you have one of [the 275+ supported cars](docs/CARS.md).
4. **Car Harness:** You will also need a [car harness](https://comma.ai/shop/car-harness) to connect your comma 3/3X to your car.
We have detailed instructions for [how to install the harness and device in a car](https://comma.ai/setup). Note that it's possible to run openpilot on [other hardware](https://blog.comma.ai/self-driving-car-for-free/), although it's not plug-and-play.
### Branches
| branch | URL | description |
|------------------|----------------------------------------|-------------------------------------------------------------------------------------|
| `release3` | openpilot.comma.ai | This is openpilot's release branch. |
| `release3-staging` | openpilot-test.comma.ai | This is the staging branch for releases. Use it to get new releases slightly early. |
| `nightly` | openpilot-nightly.comma.ai | This is the bleeding edge development branch. Do not expect this to be stable. |
| `nightly-dev` | installer.comma.ai/commaai/nightly-dev | Same as nightly, but includes experimental development features for some cars. |
To start developing openpilot
------
openpilot is developed by [comma](https://comma.ai/) and by users like you. We welcome both pull requests and issues on [GitHub](http://github.com/commaai/openpilot).
* Join the [community Discord](https://discord.comma.ai)
* Check out [the contributing docs](docs/CONTRIBUTING.md)
* Check out the [openpilot tools](tools/)
* Code documentation lives at https://docs.comma.ai
* Information about running openpilot lives on the [community wiki](https://github.com/commaai/openpilot/wiki)
Want to get paid to work on openpilot? [comma is hiring](https://comma.ai/jobs#open-positions) and offers lots of [bounties](https://comma.ai/bounties) for external contributors.
Safety and Testing
----
* openpilot observes [ISO26262](https://en.wikipedia.org/wiki/ISO_26262) guidelines, see [SAFETY.md](docs/SAFETY.md) for more details.
* openpilot has software-in-the-loop [tests](.github/workflows/selfdrive_tests.yaml) that run on every commit.
* The code enforcing the safety model lives in panda and is written in C, see [code rigor](https://github.com/commaai/panda#code-rigor) for more details.
* panda has software-in-the-loop [safety tests](https://github.com/commaai/panda/tree/master/tests/safety).
* Internally, we have a hardware-in-the-loop Jenkins test suite that builds and unit tests the various processes.
* panda has additional hardware-in-the-loop [tests](https://github.com/commaai/panda/blob/master/Jenkinsfile).
* We run the latest openpilot in a testing closet containing 10 comma devices continuously replaying routes.
<details>
<summary>MIT Licensed</summary>
openpilot is released under the MIT license. Some parts of the software are released under other licenses as specified.
Any user of this software shall indemnify and hold harmless Comma.ai, Inc. and its directors, officers, employees, agents, stockholders, affiliates, subcontractors and customers from and against all allegations, claims, actions, suits, demands, damages, liabilities, obligations, losses, settlements, judgments, costs and expenses (including without limitation attorneys’ fees and costs) which arise out of, relate to or result from any use of this software by user.
**THIS IS ALPHA QUALITY SOFTWARE FOR RESEARCH PURPOSES ONLY. THIS IS NOT A PRODUCT.
YOU ARE RESPONSIBLE FOR COMPLYING WITH LOCAL LAWS AND REGULATIONS.
NO WARRANTY EXPRESSED OR IMPLIED.**
</details>
<details>
<summary>User Data and comma Account</summary>
By default, openpilot uploads the driving data to our servers. You can also access your data through [comma connect](https://connect.comma.ai/). We use your data to train better models and improve openpilot for everyone.
openpilot is open source software: the user is free to disable data collection if they wish to do so.
openpilot logs the road-facing cameras, CAN, GPS, IMU, magnetometer, thermal sensors, crashes, and operating system logs.
The driver-facing camera and microphone are only logged if you explicitly opt-in in settings.
By using openpilot, you agree to [our Privacy Policy](https://comma.ai/privacy). You understand that use of this software or its related services will generate certain types of user data, which may be logged and stored at the sole discretion of comma. By accepting this agreement, you grant an irrevocable, perpetual, worldwide right to comma for the use of this data.
</details>

File diff suppressed because it is too large Load Diff

@ -0,0 +1,362 @@
import os
import subprocess
import sys
import sysconfig
import platform
import numpy as np
import SCons.Errors
SCons.Warnings.warningAsException(True)
# pending upstream fix - https://github.com/SCons/scons/issues/4461
#SetOption('warn', 'all')
TICI = os.path.isfile('/TICI')
AGNOS = TICI
Decider('MD5-timestamp')
SetOption('num_jobs', max(1, int(os.cpu_count()/2)))
AddOption('--kaitai',
action='store_true',
help='Regenerate kaitai struct parsers')
AddOption('--asan',
action='store_true',
help='turn on ASAN')
AddOption('--ubsan',
action='store_true',
help='turn on UBSan')
AddOption('--coverage',
action='store_true',
help='build with test coverage options')
AddOption('--clazy',
action='store_true',
help='build with clazy')
AddOption('--ccflags',
action='store',
type='string',
default='',
help='pass arbitrary flags over the command line')
AddOption('--external-sconscript',
action='store',
metavar='FILE',
dest='external_sconscript',
help='add an external SConscript to the build')
AddOption('--mutation',
action='store_true',
help='generate mutation-ready code')
AddOption('--minimal',
action='store_false',
dest='extras',
default=os.path.exists(File('#.lfsconfig').abspath), # minimal by default on release branch (where there's no LFS)
help='the minimum build to run openpilot. no tests, tools, etc.')
## Architecture name breakdown (arch)
## - larch64: linux tici aarch64
## - aarch64: linux pc aarch64
## - x86_64: linux pc x64
## - Darwin: mac x64 or arm64
real_arch = arch = subprocess.check_output(["uname", "-m"], encoding='utf8').rstrip()
if platform.system() == "Darwin":
arch = "Darwin"
brew_prefix = subprocess.check_output(['brew', '--prefix'], encoding='utf8').strip()
elif arch == "aarch64" and AGNOS:
arch = "larch64"
assert arch in ["larch64", "aarch64", "x86_64", "Darwin"]
lenv = {
"PATH": os.environ['PATH'],
"PYTHONPATH": Dir("#").abspath + ':' + Dir(f"#third_party/acados").abspath,
"ACADOS_SOURCE_DIR": Dir("#third_party/acados").abspath,
"ACADOS_PYTHON_INTERFACE_PATH": Dir("#third_party/acados/acados_template").abspath,
"TERA_PATH": Dir("#").abspath + f"/third_party/acados/{arch}/t_renderer"
}
rpath = []
if arch == "larch64":
cpppath = [
"#third_party/opencl/include",
]
libpath = [
"/usr/local/lib",
"/system/vendor/lib64",
f"#third_party/acados/{arch}/lib",
]
libpath += [
"#third_party/libyuv/larch64/lib",
"/usr/lib/aarch64-linux-gnu"
]
cflags = ["-DQCOM2", "-mcpu=cortex-a57"]
cxxflags = ["-DQCOM2", "-mcpu=cortex-a57"]
rpath += ["/usr/local/lib"]
else:
cflags = []
cxxflags = []
cpppath = []
rpath += []
# MacOS
if arch == "Darwin":
libpath = [
f"#third_party/libyuv/{arch}/lib",
f"#third_party/acados/{arch}/lib",
f"{brew_prefix}/lib",
f"{brew_prefix}/opt/openssl@3.0/lib",
"/System/Library/Frameworks/OpenGL.framework/Libraries",
]
cflags += ["-DGL_SILENCE_DEPRECATION"]
cxxflags += ["-DGL_SILENCE_DEPRECATION"]
cpppath += [
f"{brew_prefix}/include",
f"{brew_prefix}/opt/openssl@3.0/include",
]
# Linux
else:
libpath = [
f"#third_party/acados/{arch}/lib",
f"#third_party/libyuv/{arch}/lib",
"/usr/lib",
"/usr/local/lib",
]
if GetOption('asan'):
ccflags = ["-fsanitize=address", "-fno-omit-frame-pointer"]
ldflags = ["-fsanitize=address"]
elif GetOption('ubsan'):
ccflags = ["-fsanitize=undefined"]
ldflags = ["-fsanitize=undefined"]
else:
ccflags = []
ldflags = []
# no --as-needed on mac linker
if arch != "Darwin":
ldflags += ["-Wl,--as-needed", "-Wl,--no-undefined"]
ccflags_option = GetOption('ccflags')
if ccflags_option:
ccflags += ccflags_option.split(' ')
env = Environment(
ENV=lenv,
CCFLAGS=[
"-g",
"-fPIC",
"-O2",
"-Wunused",
"-Werror",
"-Wshadow",
"-Wno-unknown-warning-option",
"-Wno-inconsistent-missing-override",
"-Wno-c99-designator",
"-Wno-reorder-init-list",
"-Wno-vla-cxx-extension",
] + cflags + ccflags,
CPPPATH=cpppath + [
"#",
"#third_party/acados/include",
"#third_party/acados/include/blasfeo/include",
"#third_party/acados/include/hpipm/include",
"#third_party/catch2/include",
"#third_party/libyuv/include",
"#third_party/json11",
"#third_party/linux/include",
"#third_party",
"#msgq",
],
CC='clang',
CXX='clang++',
LINKFLAGS=ldflags,
RPATH=rpath,
CFLAGS=["-std=gnu11"] + cflags,
CXXFLAGS=["-std=c++1z"] + cxxflags,
LIBPATH=libpath + [
"#msgq_repo",
"#third_party",
"#selfdrive/pandad",
"#common",
"#rednose/helpers",
],
CYTHONCFILESUFFIX=".cpp",
COMPILATIONDB_USE_ABSPATH=True,
REDNOSE_ROOT="#",
tools=["default", "cython", "compilation_db", "rednose_filter"],
toolpath=["#site_scons/site_tools", "#rednose_repo/site_scons/site_tools"],
)
if arch == "Darwin":
# RPATH is not supported on macOS, instead use the linker flags
darwin_rpath_link_flags = [f"-Wl,-rpath,{path}" for path in env["RPATH"]]
env["LINKFLAGS"] += darwin_rpath_link_flags
env.CompilationDatabase('compile_commands.json')
# Setup cache dir
cache_dir = '/data/scons_cache' if AGNOS else '/tmp/scons_cache'
CacheDir(cache_dir)
Clean(["."], cache_dir)
node_interval = 5
node_count = 0
def progress_function(node):
global node_count
node_count += node_interval
sys.stderr.write("progress: %d\n" % node_count)
if os.environ.get('SCONS_PROGRESS'):
Progress(progress_function, interval=node_interval)
# Cython build environment
py_include = sysconfig.get_paths()['include']
envCython = env.Clone()
envCython["CPPPATH"] += [py_include, np.get_include()]
envCython["CCFLAGS"] += ["-Wno-#warnings", "-Wno-shadow", "-Wno-deprecated-declarations"]
envCython["CCFLAGS"].remove("-Werror")
envCython["LIBS"] = []
if arch == "Darwin":
envCython["LINKFLAGS"] = ["-bundle", "-undefined", "dynamic_lookup"] + darwin_rpath_link_flags
else:
envCython["LINKFLAGS"] = ["-pthread", "-shared"]
np_version = SCons.Script.Value(np.__version__)
Export('envCython', 'np_version')
# Qt build environment
qt_env = env.Clone()
qt_modules = ["Widgets", "Gui", "Core", "Network", "Concurrent", "DBus", "Xml"]
qt_libs = []
if arch == "Darwin":
qt_env['QTDIR'] = f"{brew_prefix}/opt/qt@5"
qt_dirs = [
os.path.join(qt_env['QTDIR'], "include"),
]
qt_dirs += [f"{qt_env['QTDIR']}/include/Qt{m}" for m in qt_modules]
qt_env["LINKFLAGS"] += ["-F" + os.path.join(qt_env['QTDIR'], "lib")]
qt_env["FRAMEWORKS"] += [f"Qt{m}" for m in qt_modules] + ["OpenGL"]
qt_env.AppendENVPath('PATH', os.path.join(qt_env['QTDIR'], "bin"))
else:
qt_install_prefix = subprocess.check_output(['qmake', '-query', 'QT_INSTALL_PREFIX'], encoding='utf8').strip()
qt_install_headers = subprocess.check_output(['qmake', '-query', 'QT_INSTALL_HEADERS'], encoding='utf8').strip()
qt_env['QTDIR'] = qt_install_prefix
qt_dirs = [
f"{qt_install_headers}",
]
qt_gui_path = os.path.join(qt_install_headers, "QtGui")
qt_gui_dirs = [d for d in os.listdir(qt_gui_path) if os.path.isdir(os.path.join(qt_gui_path, d))]
qt_dirs += [f"{qt_install_headers}/QtGui/{qt_gui_dirs[0]}/QtGui", ] if qt_gui_dirs else []
qt_dirs += [f"{qt_install_headers}/Qt{m}" for m in qt_modules]
qt_libs = [f"Qt5{m}" for m in qt_modules]
if arch == "larch64":
qt_libs += ["GLESv2", "wayland-client"]
qt_env.PrependENVPath('PATH', Dir("#third_party/qt5/larch64/bin/").abspath)
elif arch != "Darwin":
qt_libs += ["GL"]
qt_env['QT3DIR'] = qt_env['QTDIR']
qt_env.Tool('qt3')
qt_env['CPPPATH'] += qt_dirs + ["#third_party/qrcode"]
qt_flags = [
"-D_REENTRANT",
"-DQT_NO_DEBUG",
"-DQT_WIDGETS_LIB",
"-DQT_GUI_LIB",
"-DQT_CORE_LIB",
"-DQT_MESSAGELOGCONTEXT",
]
qt_env['CXXFLAGS'] += qt_flags
qt_env['LIBPATH'] += ['#selfdrive/ui', ]
qt_env['LIBS'] = qt_libs
if GetOption("clazy"):
checks = [
"level0",
"level1",
"no-range-loop",
"no-non-pod-global-static",
]
qt_env['CXX'] = 'clazy'
qt_env['ENV']['CLAZY_IGNORE_DIRS'] = qt_dirs[0]
qt_env['ENV']['CLAZY_CHECKS'] = ','.join(checks)
Export('env', 'qt_env', 'arch', 'real_arch')
# Build common module
SConscript(['common/SConscript'])
Import('_common', '_gpucommon')
common = [_common, 'json11', 'zmq']
gpucommon = [_gpucommon]
Export('common', 'gpucommon')
# Build messaging (cereal + msgq + socketmaster + their dependencies)
# Enable swaglog include in submodules
env_swaglog = env.Clone()
env_swaglog['CXXFLAGS'].append('-DSWAGLOG="\\"common/swaglog.h\\""')
SConscript(['msgq_repo/SConscript'], exports={'env': env_swaglog})
SConscript(['opendbc_repo/SConscript'], exports={'env': env_swaglog})
SConscript(['cereal/SConscript'])
Import('socketmaster', 'msgq')
messaging = [socketmaster, msgq, 'capnp', 'kj',]
Export('messaging')
# Build other submodules
SConscript(['panda/SConscript'])
# Build rednose library
SConscript(['rednose/SConscript'])
# Build system services
SConscript([
'system/ubloxd/SConscript',
'system/loggerd/SConscript',
])
if arch != "Darwin":
SConscript([
'system/logcatd/SConscript',
'system/proclogd/SConscript',
])
if arch == "larch64":
SConscript(['system/camerad/SConscript'])
# Build openpilot
SConscript(['third_party/SConscript'])
SConscript(['selfdrive/SConscript'])
if Dir('#tools/cabana/').exists() and GetOption('extras'):
SConscript(['tools/replay/SConscript'])
if arch != "larch64":
SConscript(['tools/cabana/SConscript'])
external_sconscript = GetOption('external_sconscript')
if external_sconscript:
SConscript([external_sconscript])

@ -0,0 +1,5 @@
# Security Policy
## Reporting a Vulnerability
Suspected vulnerabilities can be reported to both `adeeb@comma.ai` and `security@comma.ai`.

@ -0,0 +1,95 @@
# What is cereal?
cereal is the messaging system for openpilot. It uses [msgq](https://github.com/commaai/msgq) as a pub/sub backend, and [Cap'n proto](https://capnproto.org/capnp-tool.html) for serialization of the structs.
## Messaging Spec
You'll find the message types in [log.capnp](log.capnp). It uses [Cap'n proto](https://capnproto.org/capnp-tool.html) and defines one struct called `Event`.
All `Events` have a `logMonoTime` and a `valid`. Then a big union defines the packet type.
### Best Practices
- **All fields must describe quantities in SI units**, unless otherwise specified in the field name.
- In the context of the message they are in, field names should be completely unambiguous.
- All values should be easy to plot and be human-readable with minimal parsing.
### Maintaining backwards-compatibility
When making changes to the messaging spec you want to maintain backwards-compatibility, such that old logs can
be parsed with a new version of cereal. Adding structs and adding members to structs is generally safe, most other
things are not. Read more details [here](https://capnproto.org/language.html).
### Custom forks
Forks of [openpilot](https://github.com/commaai/openpilot) might want to add things to the messaging
spec, however this could conflict with future changes made in mainline cereal/openpilot. Rebasing against mainline openpilot
then means breaking backwards-compatibility with all old logs of your fork. So we added reserved events in
[custom.capnp](custom.capnp) that we will leave empty in mainline cereal/openpilot. **If you only modify those, you can ensure your
fork will remain backwards-compatible with all versions of mainline openpilot and your fork.**
An example of compatible changes:
```diff
diff --git a/cereal/custom.capnp b/cereal/custom.capnp
index 3348e859e..3365c7b98 100644
--- a/cereal/custom.capnp
+++ b/cereal/custom.capnp
@@ -10,7 +10,11 @@ $Cxx.namespace("cereal");
# DO rename the structs
# DON'T change the identifier (e.g. @0x81c2f05a394cf4af)
-struct CustomReserved0 @0x81c2f05a394cf4af {
+struct SteeringInfo @0x81c2f05a394cf4af {
+ active @0 :Bool;
+ steeringAngleDeg @1 :Float32;
+ steeringRateDeg @2 :Float32;
+ steeringAccelDeg @3 :Float32;
}
struct CustomReserved1 @0xaedffd8f31e7b55d {
diff --git a/cereal/log.capnp b/cereal/log.capnp
index 1209f3fd9..b189f58b6 100644
--- a/cereal/log.capnp
+++ b/cereal/log.capnp
@@ -2558,14 +2558,14 @@ struct Event {
# DO change the name of the field
# DON'T change anything after the "@"
- customReservedRawData0 @124 :Data;
+ rawCanData @124 :Data;
customReservedRawData1 @125 :Data;
customReservedRawData2 @126 :Data;
# DO change the name of the field and struct
# DON'T change the ID (e.g. @107)
# DON'T change which struct it points to
- customReserved0 @107 :Custom.CustomReserved0;
+ steeringInfo @107 :Custom.SteeringInfo;
customReserved1 @108 :Custom.CustomReserved1;
customReserved2 @109 :Custom.CustomReserved2;
customReserved3 @110 :Custom.CustomReserved3;
```
---
Example
---
```python
import cereal.messaging as messaging
# in subscriber
sm = messaging.SubMaster(['sensorEvents'])
while 1:
sm.update()
print(sm['sensorEvents'])
```
```python
# in publisher
pm = messaging.PubMaster(['sensorEvents'])
dat = messaging.new_message('sensorEvents', size=1)
dat.sensorEvents[0] = {"gyro": {"v": [0.1, -0.1, 0.1]}}
pm.send('sensorEvents', dat)
```

@ -0,0 +1,20 @@
Import('env', 'common', 'msgq')
cereal_dir = Dir('.')
gen_dir = Dir('gen')
# Build cereal
schema_files = ['log.capnp', 'car.capnp', 'legacy.capnp', 'custom.capnp']
env.Command([f'gen/cpp/{s}.c++' for s in schema_files] + [f'gen/cpp/{s}.h' for s in schema_files],
schema_files,
f"capnpc --src-prefix={cereal_dir.path} $SOURCES -o c++:{gen_dir.path}/cpp/")
cereal = env.Library('cereal', [f'gen/cpp/{s}.c++' for s in schema_files])
# Build messaging
services_h = env.Command(['services.h'], ['services.py'], 'python3 ' + cereal_dir.path + '/services.py > $TARGET')
env.Program('messaging/bridge', ['messaging/bridge.cc', 'messaging/msgq_to_zmq.cc'], LIBS=[msgq, common, 'pthread'])
socketmaster = env.Library('socketmaster', ['messaging/socketmaster.cc'])
Export('cereal', 'socketmaster')

@ -0,0 +1,11 @@
import os
import capnp
from importlib.resources import as_file, files
capnp.remove_import_hook()
with as_file(files("cereal")) as fspath:
CEREAL_PATH = fspath.as_posix()
log = capnp.load(os.path.join(CEREAL_PATH, "log.capnp"))
car = capnp.load(os.path.join(CEREAL_PATH, "car.capnp"))
custom = capnp.load(os.path.join(CEREAL_PATH, "custom.capnp"))

@ -0,0 +1 @@
../opendbc_repo/opendbc/car/car.capnp

@ -0,0 +1,71 @@
using Cxx = import "./include/c++.capnp";
$Cxx.namespace("cereal");
@0xb526ba661d550a59;
# custom.capnp: a home for empty structs reserved for custom forks
# These structs are guaranteed to remain reserved and empty in mainline
# cereal, so use these if you want custom events in your fork.
# DO rename the structs
# DON'T change the identifier (e.g. @0x81c2f05a394cf4af)
struct CustomReserved0 @0x81c2f05a394cf4af {
}
struct CustomReserved1 @0xaedffd8f31e7b55d {
}
struct CustomReserved2 @0xf35cc4560bbf6ec2 {
}
struct CustomReserved3 @0xda96579883444c35 {
}
struct CustomReserved4 @0x80ae746ee2596b11 {
}
struct CustomReserved5 @0xa5cd762cd951a455 {
}
struct CustomReserved6 @0xf98d843bfd7004a3 {
}
struct CustomReserved7 @0xb86e6369214c01c8 {
}
struct CustomReserved8 @0xf416ec09499d9d19 {
}
struct CustomReserved9 @0xa1680744031fdb2d {
}
struct CustomReserved10 @0xcb9fd56c7057593a {
}
struct CustomReserved11 @0xc2243c65e0340384 {
}
struct CustomReserved12 @0x9ccdc8676701b412 {
}
struct CustomReserved13 @0xcd96dafb67a082d0 {
}
struct CustomReserved14 @0xb057204d7deadf3f {
}
struct CustomReserved15 @0xbd443b539493bc68 {
}
struct CustomReserved16 @0xfc6241ed8877b611 {
}
struct CustomReserved17 @0xa30662f84033036c {
}
struct CustomReserved18 @0xc86a3d38d13eb3ef {
}
struct CustomReserved19 @0xa4f1eb3323f5f582 {
}

@ -0,0 +1,26 @@
# Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors
# Licensed under the MIT License:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
@0xbdf87d7bb8304e81;
$namespace("capnp::annotations");
annotation namespace(file): Text;
annotation name(field, enumerant, struct, enum, interface, method, param, group, union): Text;

@ -0,0 +1,574 @@
using Cxx = import "./include/c++.capnp";
$Cxx.namespace("cereal");
@0x80ef1ec4889c2a63;
# legacy.capnp: a home for deprecated structs
struct LogRotate @0x9811e1f38f62f2d1 {
segmentNum @0 :Int32;
path @1 :Text;
}
struct LiveUI @0xc08240f996aefced {
rearViewCam @0 :Bool;
alertText1 @1 :Text;
alertText2 @2 :Text;
awarenessStatus @3 :Float32;
}
struct UiLayoutState @0x88dcce08ad29dda0 {
activeApp @0 :App;
sidebarCollapsed @1 :Bool;
mapEnabled @2 :Bool;
mockEngaged @3 :Bool;
enum App @0x9917470acf94d285 {
home @0;
music @1;
nav @2;
settings @3;
none @4;
}
}
struct OrbslamCorrection @0x8afd33dc9b35e1aa {
correctionMonoTime @0 :UInt64;
prePositionECEF @1 :List(Float64);
postPositionECEF @2 :List(Float64);
prePoseQuatECEF @3 :List(Float32);
postPoseQuatECEF @4 :List(Float32);
numInliers @5 :UInt32;
}
struct EthernetPacket @0xa99a9d5b33cf5859 {
pkt @0 :Data;
ts @1 :Float32;
}
struct CellInfo @0xcff7566681c277ce {
timestamp @0 :UInt64;
repr @1 :Text; # android toString() for now
}
struct WifiScan @0xd4df5a192382ba0b {
bssid @0 :Text;
ssid @1 :Text;
capabilities @2 :Text;
frequency @3 :Int32;
level @4 :Int32;
timestamp @5 :Int64;
centerFreq0 @6 :Int32;
centerFreq1 @7 :Int32;
channelWidth @8 :ChannelWidth;
operatorFriendlyName @9 :Text;
venueName @10 :Text;
is80211mcResponder @11 :Bool;
passpoint @12 :Bool;
distanceCm @13 :Int32;
distanceSdCm @14 :Int32;
enum ChannelWidth @0xcb6a279f015f6b51 {
w20Mhz @0;
w40Mhz @1;
w80Mhz @2;
w160Mhz @3;
w80Plus80Mhz @4;
}
}
struct LiveEventData @0x94b7baa90c5c321e {
name @0 :Text;
value @1 :Int32;
}
struct ModelData @0xb8aad62cffef28a9 {
frameId @0 :UInt32;
frameAge @12 :UInt32;
frameDropPerc @13 :Float32;
timestampEof @9 :UInt64;
modelExecutionTime @14 :Float32;
gpuExecutionTime @16 :Float32;
rawPred @15 :Data;
path @1 :PathData;
leftLane @2 :PathData;
rightLane @3 :PathData;
lead @4 :LeadData;
freePath @6 :List(Float32);
settings @5 :ModelSettings;
leadFuture @7 :LeadData;
speed @8 :List(Float32);
meta @10 :MetaData;
longitudinal @11 :LongitudinalData;
struct PathData @0x8817eeea389e9f08 {
points @0 :List(Float32);
prob @1 :Float32;
std @2 :Float32;
stds @3 :List(Float32);
poly @4 :List(Float32);
validLen @5 :Float32;
}
struct LeadData @0xd1c9bef96d26fa91 {
dist @0 :Float32;
prob @1 :Float32;
std @2 :Float32;
relVel @3 :Float32;
relVelStd @4 :Float32;
relY @5 :Float32;
relYStd @6 :Float32;
relA @7 :Float32;
relAStd @8 :Float32;
}
struct ModelSettings @0xa26e3710efd3e914 {
bigBoxX @0 :UInt16;
bigBoxY @1 :UInt16;
bigBoxWidth @2 :UInt16;
bigBoxHeight @3 :UInt16;
boxProjection @4 :List(Float32);
yuvCorrection @5 :List(Float32);
inputTransform @6 :List(Float32);
}
struct MetaData @0x9744f25fb60f2bf8 {
engagedProb @0 :Float32;
desirePrediction @1 :List(Float32);
brakeDisengageProb @2 :Float32;
gasDisengageProb @3 :Float32;
steerOverrideProb @4 :Float32;
desireState @5 :List(Float32);
}
struct LongitudinalData @0xf98f999c6a071122 {
distances @2 :List(Float32);
speeds @0 :List(Float32);
accelerations @1 :List(Float32);
}
}
struct ECEFPoint @0xc25bbbd524983447 {
x @0 :Float64;
y @1 :Float64;
z @2 :Float64;
}
struct ECEFPointDEPRECATED @0xe10e21168db0c7f7 {
x @0 :Float32;
y @1 :Float32;
z @2 :Float32;
}
struct GPSPlannerPoints @0xab54c59699f8f9f3 {
curPosDEPRECATED @0 :ECEFPointDEPRECATED;
pointsDEPRECATED @1 :List(ECEFPointDEPRECATED);
curPos @6 :ECEFPoint;
points @7 :List(ECEFPoint);
valid @2 :Bool;
trackName @3 :Text;
speedLimit @4 :Float32;
accelTarget @5 :Float32;
}
struct GPSPlannerPlan @0xf5ad1d90cdc1dd6b {
valid @0 :Bool;
poly @1 :List(Float32);
trackName @2 :Text;
speed @3 :Float32;
acceleration @4 :Float32;
pointsDEPRECATED @5 :List(ECEFPointDEPRECATED);
points @6 :List(ECEFPoint);
xLookahead @7 :Float32;
}
struct UiNavigationEvent @0x90c8426c3eaddd3b {
type @0: Type;
status @1: Status;
distanceTo @2: Float32;
endRoadPointDEPRECATED @3: ECEFPointDEPRECATED;
endRoadPoint @4: ECEFPoint;
enum Type @0xe8db07dcf8fcea05 {
none @0;
laneChangeLeft @1;
laneChangeRight @2;
mergeLeft @3;
mergeRight @4;
turnLeft @5;
turnRight @6;
}
enum Status @0xb9aa88c75ef99a1f {
none @0;
passive @1;
approaching @2;
active @3;
}
}
struct LiveLocationData @0xb99b2bc7a57e8128 {
status @0 :UInt8;
# 3D fix
lat @1 :Float64;
lon @2 :Float64;
alt @3 :Float32; # m
# speed
speed @4 :Float32; # m/s
# NED velocity components
vNED @5 :List(Float32);
# roll, pitch, heading (x,y,z)
roll @6 :Float32; # WRT to center of earth?
pitch @7 :Float32; # WRT to center of earth?
heading @8 :Float32; # WRT to north?
# what are these?
wanderAngle @9 :Float32;
trackAngle @10 :Float32;
# car frame -- https://upload.wikimedia.org/wikipedia/commons/f/f5/RPY_angles_of_cars.png
# gyro, in car frame, deg/s
gyro @11 :List(Float32);
# accel, in car frame, m/s^2
accel @12 :List(Float32);
accuracy @13 :Accuracy;
source @14 :SensorSource;
# if we are fixing a location in the past
fixMonoTime @15 :UInt64;
gpsWeek @16 :Int32;
timeOfWeek @17 :Float64;
positionECEF @18 :List(Float64);
poseQuatECEF @19 :List(Float32);
pitchCalibration @20 :Float32;
yawCalibration @21 :Float32;
imuFrame @22 :List(Float32);
struct Accuracy @0x943dc4625473b03f {
pNEDError @0 :List(Float32);
vNEDError @1 :List(Float32);
rollError @2 :Float32;
pitchError @3 :Float32;
headingError @4 :Float32;
ellipsoidSemiMajorError @5 :Float32;
ellipsoidSemiMinorError @6 :Float32;
ellipsoidOrientationError @7 :Float32;
}
enum SensorSource @0xc871d3cc252af657 {
applanix @0;
kalman @1;
orbslam @2;
timing @3;
dummy @4;
}
}
struct OrbOdometry @0xd7700859ed1f5b76 {
# timing first
startMonoTime @0 :UInt64;
endMonoTime @1 :UInt64;
# fundamental matrix and error
f @2: List(Float64);
err @3: Float64;
# number of inlier points
inliers @4: Int32;
# for debug only
# indexed by endMonoTime features
# value is startMonoTime feature match
# -1 if no match
matches @5: List(Int16);
}
struct OrbFeatures @0xcd60164a8a0159ef {
timestampEof @0 :UInt64;
# transposed arrays of normalized image coordinates
# len(xs) == len(ys) == len(descriptors) * 32
xs @1 :List(Float32);
ys @2 :List(Float32);
descriptors @3 :Data;
octaves @4 :List(Int8);
# match index to last OrbFeatures
# -1 if no match
timestampLastEof @5 :UInt64;
matches @6: List(Int16);
}
struct OrbFeaturesSummary @0xd500d30c5803fa4f {
timestampEof @0 :UInt64;
timestampLastEof @1 :UInt64;
featureCount @2 :UInt16;
matchCount @3 :UInt16;
computeNs @4 :UInt64;
}
struct OrbKeyFrame @0xc8233c0345e27e24 {
# this is a globally unique id for the KeyFrame
id @0: UInt64;
# this is the location of the KeyFrame
pos @1: ECEFPoint;
# these are the features in the world
# len(dpos) == len(descriptors) * 32
dpos @2 :List(ECEFPoint);
descriptors @3 :Data;
}
struct KalmanOdometry @0x92e21bb7ea38793a {
trans @0 :List(Float32); # m/s in device frame
rot @1 :List(Float32); # rad/s in device frame
transStd @2 :List(Float32); # std m/s in device frame
rotStd @3 :List(Float32); # std rad/s in device frame
}
struct OrbObservation @0x9b326d4e436afec7 {
observationMonoTime @0 :UInt64;
normalizedCoordinates @1 :List(Float32);
locationECEF @2 :List(Float64);
matchDistance @3: UInt32;
}
struct CalibrationFeatures @0x8fdfadb254ea867a {
frameId @0 :UInt32;
p0 @1 :List(Float32);
p1 @2 :List(Float32);
status @3 :List(Int8);
}
struct NavStatus @0xbd8822120928120c {
isNavigating @0 :Bool;
currentAddress @1 :Address;
struct Address @0xce7cd672cacc7814 {
title @0 :Text;
lat @1 :Float64;
lng @2 :Float64;
house @3 :Text;
address @4 :Text;
street @5 :Text;
city @6 :Text;
state @7 :Text;
country @8 :Text;
}
}
struct NavUpdate @0xdb98be6565516acb {
isNavigating @0 :Bool;
curSegment @1 :Int32;
segments @2 :List(Segment);
struct LatLng @0x9eaef9187cadbb9b {
lat @0 :Float64;
lng @1 :Float64;
}
struct Segment @0xa5b39b4fc4d7da3f {
from @0 :LatLng;
to @1 :LatLng;
updateTime @2 :Int32;
distance @3 :Int32;
crossTime @4 :Int32;
exitNo @5 :Int32;
instruction @6 :Instruction;
parts @7 :List(LatLng);
enum Instruction @0xc5417a637451246f {
turnLeft @0;
turnRight @1;
keepLeft @2;
keepRight @3;
straight @4;
roundaboutExitNumber @5;
roundaboutExit @6;
roundaboutTurnLeft @7;
unkn8 @8;
roundaboutStraight @9;
unkn10 @10;
roundaboutTurnRight @11;
unkn12 @12;
roundaboutUturn @13;
unkn14 @14;
arrive @15;
exitLeft @16;
exitRight @17;
unkn18 @18;
uturn @19;
# ...
}
}
}
struct TrafficEvent @0xacfa74a094e62626 {
type @0 :Type;
distance @1 :Float32;
action @2 :Action;
resuming @3 :Bool;
enum Type @0xd85d75253435bf4b {
stopSign @0;
lightRed @1;
lightYellow @2;
lightGreen @3;
stopLight @4;
}
enum Action @0xa6f6ce72165ccb49 {
none @0;
yield @1;
stop @2;
resumeReady @3;
}
}
struct AndroidGnss @0xdfdf30d03fc485bd {
union {
measurements @0 :Measurements;
navigationMessage @1 :NavigationMessage;
}
struct Measurements @0xa20710d4f428d6cd {
clock @0 :Clock;
measurements @1 :List(Measurement);
struct Clock @0xa0e27b453a38f450 {
timeNanos @0 :Int64;
hardwareClockDiscontinuityCount @1 :Int32;
hasTimeUncertaintyNanos @2 :Bool;
timeUncertaintyNanos @3 :Float64;
hasLeapSecond @4 :Bool;
leapSecond @5 :Int32;
hasFullBiasNanos @6 :Bool;
fullBiasNanos @7 :Int64;
hasBiasNanos @8 :Bool;
biasNanos @9 :Float64;
hasBiasUncertaintyNanos @10 :Bool;
biasUncertaintyNanos @11 :Float64;
hasDriftNanosPerSecond @12 :Bool;
driftNanosPerSecond @13 :Float64;
hasDriftUncertaintyNanosPerSecond @14 :Bool;
driftUncertaintyNanosPerSecond @15 :Float64;
}
struct Measurement @0xd949bf717d77614d {
svId @0 :Int32;
constellation @1 :Constellation;
timeOffsetNanos @2 :Float64;
state @3 :Int32;
receivedSvTimeNanos @4 :Int64;
receivedSvTimeUncertaintyNanos @5 :Int64;
cn0DbHz @6 :Float64;
pseudorangeRateMetersPerSecond @7 :Float64;
pseudorangeRateUncertaintyMetersPerSecond @8 :Float64;
accumulatedDeltaRangeState @9 :Int32;
accumulatedDeltaRangeMeters @10 :Float64;
accumulatedDeltaRangeUncertaintyMeters @11 :Float64;
hasCarrierFrequencyHz @12 :Bool;
carrierFrequencyHz @13 :Float32;
hasCarrierCycles @14 :Bool;
carrierCycles @15 :Int64;
hasCarrierPhase @16 :Bool;
carrierPhase @17 :Float64;
hasCarrierPhaseUncertainty @18 :Bool;
carrierPhaseUncertainty @19 :Float64;
hasSnrInDb @20 :Bool;
snrInDb @21 :Float64;
multipathIndicator @22 :MultipathIndicator;
enum Constellation @0x9ef1f3ff0deb5ffb {
unknown @0;
gps @1;
sbas @2;
glonass @3;
qzss @4;
beidou @5;
galileo @6;
}
enum State @0xcbb9490adce12d72 {
unknown @0;
codeLock @1;
bitSync @2;
subframeSync @3;
towDecoded @4;
msecAmbiguous @5;
symbolSync @6;
gloStringSync @7;
gloTodDecoded @8;
bdsD2BitSync @9;
bdsD2SubframeSync @10;
galE1bcCodeLock @11;
galE1c2ndCodeLock @12;
galE1bPageSync @13;
sbasSync @14;
}
enum MultipathIndicator @0xc04e7b6231d4caa8 {
unknown @0;
detected @1;
notDetected @2;
}
}
}
struct NavigationMessage @0xe2517b083095fd4e {
type @0 :Int32;
svId @1 :Int32;
messageId @2 :Int32;
submessageId @3 :Int32;
data @4 :Data;
status @5 :Status;
enum Status @0xec1ff7996b35366f {
unknown @0;
parityPassed @1;
parityRebuilt @2;
}
}
}
struct LidarPts @0xe3d6685d4e9d8f7a {
r @0 :List(UInt16); # uint16 m*500.0
theta @1 :List(UInt16); # uint16 deg*100.0
reflect @2 :List(UInt8); # uint8 0-255
# For storing out of file.
idx @3 :UInt64;
# For storing in file
pkt @4 :Data;
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,257 @@
# must be built with scons
from msgq.ipc_pyx import Context, Poller, SubSocket, PubSocket, SocketEventHandle, toggle_fake_events, \
set_fake_prefix, get_fake_prefix, delete_fake_prefix, wait_for_one_event
from msgq.ipc_pyx import MultiplePublishersError, IpcError
from msgq import fake_event_handle, pub_sock, sub_sock, drain_sock_raw
import msgq
import os
import capnp
import time
from typing import Optional, List, Union, Dict
from cereal import log
from cereal.services import SERVICE_LIST
from openpilot.common.util import MovingAverage
NO_TRAVERSAL_LIMIT = 2**64-1
def reset_context():
msgq.context = Context()
def log_from_bytes(dat: bytes, struct: capnp.lib.capnp._StructModule = log.Event) -> capnp.lib.capnp._DynamicStructReader:
with struct.from_bytes(dat, traversal_limit_in_words=NO_TRAVERSAL_LIMIT) as msg:
return msg
def new_message(service: Optional[str], size: Optional[int] = None, **kwargs) -> capnp.lib.capnp._DynamicStructBuilder:
args = {
'valid': False,
'logMonoTime': int(time.monotonic() * 1e9),
**kwargs
}
dat = log.Event.new_message(**args)
if service is not None:
if size is None:
dat.init(service)
else:
dat.init(service, size)
return dat
def drain_sock(sock: SubSocket, wait_for_one: bool = False) -> List[capnp.lib.capnp._DynamicStructReader]:
"""Receive all message currently available on the queue"""
msgs = drain_sock_raw(sock, wait_for_one=wait_for_one)
return [log_from_bytes(m) for m in msgs]
# TODO: print when we drop packets?
def recv_sock(sock: SubSocket, wait: bool = False) -> Optional[capnp.lib.capnp._DynamicStructReader]:
"""Same as drain sock, but only returns latest message. Consider using conflate instead."""
dat = None
while 1:
if wait and dat is None:
recv = sock.receive()
else:
recv = sock.receive(non_blocking=True)
if recv is None: # Timeout hit
break
dat = recv
if dat is not None:
dat = log_from_bytes(dat)
return dat
def recv_one(sock: SubSocket) -> Optional[capnp.lib.capnp._DynamicStructReader]:
dat = sock.receive()
if dat is not None:
dat = log_from_bytes(dat)
return dat
def recv_one_or_none(sock: SubSocket) -> Optional[capnp.lib.capnp._DynamicStructReader]:
dat = sock.receive(non_blocking=True)
if dat is not None:
dat = log_from_bytes(dat)
return dat
def recv_one_retry(sock: SubSocket) -> capnp.lib.capnp._DynamicStructReader:
"""Keep receiving until we get a message"""
while True:
dat = sock.receive()
if dat is not None:
return log_from_bytes(dat)
class FrequencyTracker:
def __init__(self, service_freq: float, update_freq: float, is_poll: bool):
freq = max(min(service_freq, update_freq), 1.)
if is_poll:
min_freq = max_freq = freq
else:
max_freq = min(freq, update_freq)
if service_freq >= 2 * update_freq:
min_freq = update_freq
elif update_freq >= 2* service_freq:
min_freq = freq
else:
min_freq = min(freq, freq / 2.)
self.min_freq = min_freq * 0.8
self.max_freq = max_freq * 1.2
self.avg_dt = MovingAverage(int(10 * freq))
self.recent_avg_dt = MovingAverage(int(freq))
self.prev_time = 0.0
def record_recv_time(self, cur_time: float) -> None:
# TODO: Handle case where cur_time is less than prev_time
if self.prev_time > 1e-5:
dt = cur_time - self.prev_time
self.avg_dt.add_value(dt)
self.recent_avg_dt.add_value(dt)
self.prev_time = cur_time
@property
def valid(self) -> bool:
if self.avg_dt.count == 0:
return False
avg_freq = 1.0 / self.avg_dt.get_average()
if self.min_freq <= avg_freq <= self.max_freq:
return True
avg_freq_recent = 1.0 / self.recent_avg_dt.get_average()
return self.min_freq <= avg_freq_recent <= self.max_freq
class SubMaster:
def __init__(self, services: List[str], poll: Optional[str] = None,
ignore_alive: Optional[List[str]] = None, ignore_avg_freq: Optional[List[str]] = None,
ignore_valid: Optional[List[str]] = None, addr: str = "127.0.0.1", frequency: Optional[float] = None):
self.frame = -1
self.services = services
self.seen = {s: False for s in services}
self.updated = {s: False for s in services}
self.recv_time = {s: 0. for s in services}
self.recv_frame = {s: 0 for s in services}
self.sock = {}
self.data = {}
self.logMonoTime = {s: 0 for s in services}
# zero-frequency / on-demand services are always alive and presumed valid; all others must pass checks
on_demand = {s: SERVICE_LIST[s].frequency <= 1e-5 for s in services}
self.static_freq_services = set(s for s in services if not on_demand[s])
self.alive = {s: on_demand[s] for s in services}
self.freq_ok = {s: on_demand[s] for s in services}
self.valid = {s: on_demand[s] for s in services}
self.freq_tracker: Dict[str, FrequencyTracker] = {}
self.poller = Poller()
polled_services = set([poll, ] if poll is not None else services)
self.non_polled_services = set(services) - polled_services
self.ignore_average_freq = [] if ignore_avg_freq is None else ignore_avg_freq
self.ignore_alive = [] if ignore_alive is None else ignore_alive
self.ignore_valid = [] if ignore_valid is None else ignore_valid
self.simulation = bool(int(os.getenv("SIMULATION", "0")))
# if freq and poll aren't specified, assume the max to be conservative
assert frequency is None or poll is None, "Do not specify 'frequency' - frequency of the polled service will be used."
self.update_freq = frequency or max([SERVICE_LIST[s].frequency for s in polled_services])
for s in services:
p = self.poller if s not in self.non_polled_services else None
self.sock[s] = sub_sock(s, poller=p, addr=addr, conflate=True)
try:
data = new_message(s)
except capnp.lib.capnp.KjException:
data = new_message(s, 0) # lists
self.data[s] = getattr(data.as_reader(), s)
self.freq_tracker[s] = FrequencyTracker(SERVICE_LIST[s].frequency, self.update_freq, s == poll)
def __getitem__(self, s: str) -> capnp.lib.capnp._DynamicStructReader:
return self.data[s]
def _check_avg_freq(self, s: str) -> bool:
return SERVICE_LIST[s].frequency > 0.99 and (s not in self.ignore_average_freq) and (s not in self.ignore_alive)
def update(self, timeout: int = 100) -> None:
msgs = []
for sock in self.poller.poll(timeout):
msgs.append(recv_one_or_none(sock))
# non-blocking receive for non-polled sockets
for s in self.non_polled_services:
msgs.append(recv_one_or_none(self.sock[s]))
self.update_msgs(time.monotonic(), msgs)
def update_msgs(self, cur_time: float, msgs: List[capnp.lib.capnp._DynamicStructReader]) -> None:
self.frame += 1
self.updated = dict.fromkeys(self.services, False)
for msg in msgs:
if msg is None:
continue
s = msg.which()
self.seen[s] = True
self.updated[s] = True
self.freq_tracker[s].record_recv_time(cur_time)
self.recv_time[s] = cur_time
self.recv_frame[s] = self.frame
self.data[s] = getattr(msg, s)
self.logMonoTime[s] = msg.logMonoTime
self.valid[s] = msg.valid
for s in self.static_freq_services:
# alive if delay is within 10x the expected frequency; checks relaxed in simulator
self.alive[s] = (cur_time - self.recv_time[s]) < (10. / SERVICE_LIST[s].frequency) or (self.seen[s] and self.simulation)
self.freq_ok[s] = self.freq_tracker[s].valid or self.simulation
def all_alive(self, service_list: Optional[List[str]] = None) -> bool:
return all(self.alive[s] for s in (service_list or self.services) if s not in self.ignore_alive)
def all_freq_ok(self, service_list: Optional[List[str]] = None) -> bool:
return all(self.freq_ok[s] for s in (service_list or self.services) if self._check_avg_freq(s))
def all_valid(self, service_list: Optional[List[str]] = None) -> bool:
return all(self.valid[s] for s in (service_list or self.services) if s not in self.ignore_valid)
def all_checks(self, service_list: Optional[List[str]] = None) -> bool:
return self.all_alive(service_list) and self.all_freq_ok(service_list) and self.all_valid(service_list)
class PubMaster:
def __init__(self, services: List[str]):
self.sock = {}
for s in services:
self.sock[s] = pub_sock(s)
def send(self, s: str, dat: Union[bytes, capnp.lib.capnp._DynamicStructBuilder]) -> None:
if not isinstance(dat, bytes):
dat = dat.to_bytes()
self.sock[s].send(dat)
def wait_for_readers_to_update(self, s: str, timeout: int, dt: float = 0.05) -> bool:
for _ in range(int(timeout*(1./dt))):
if self.sock[s].all_readers_updated():
return True
time.sleep(dt)
return False
def all_readers_updated(self, s: str) -> bool:
return self.sock[s].all_readers_updated() # type: ignore

@ -0,0 +1,71 @@
#include <cassert>
#include "cereal/messaging/msgq_to_zmq.h"
#include "cereal/services.h"
#include "common/util.h"
ExitHandler do_exit;
static std::vector<std::string> get_services(const std::string &whitelist_str, bool zmq_to_msgq) {
std::vector<std::string> service_list;
for (const auto& it : services) {
std::string name = it.second.name;
bool in_whitelist = whitelist_str.find(name) != std::string::npos;
if (zmq_to_msgq && !in_whitelist) {
continue;
}
service_list.push_back(name);
}
return service_list;
}
void msgq_to_zmq(const std::vector<std::string> &endpoints, const std::string &ip) {
MsgqToZmq bridge;
bridge.run(endpoints, ip);
}
void zmq_to_msgq(const std::vector<std::string> &endpoints, const std::string &ip) {
auto poller = std::make_unique<ZMQPoller>();
auto pub_context = std::make_unique<MSGQContext>();
auto sub_context = std::make_unique<ZMQContext>();
std::map<SubSocket *, PubSocket *> sub2pub;
for (auto endpoint : endpoints) {
auto pub_sock = new MSGQPubSocket();
auto sub_sock = new ZMQSubSocket();
pub_sock->connect(pub_context.get(), endpoint);
sub_sock->connect(sub_context.get(), endpoint, ip, false);
poller->registerSocket(sub_sock);
sub2pub[sub_sock] = pub_sock;
}
while (!do_exit) {
for (auto sub_sock : poller->poll(100)) {
std::unique_ptr<Message> msg(sub_sock->receive(true));
if (msg) {
sub2pub[sub_sock]->sendMessage(msg.get());
}
}
}
// Clean up allocated sockets
for (auto &[sub_sock, pub_sock] : sub2pub) {
delete sub_sock;
delete pub_sock;
}
}
int main(int argc, char **argv) {
bool is_zmq_to_msgq = argc > 2;
std::string ip = is_zmq_to_msgq ? argv[1] : "127.0.0.1";
std::string whitelist_str = is_zmq_to_msgq ? std::string(argv[2]) : "";
std::vector<std::string> endpoints = get_services(whitelist_str, is_zmq_to_msgq);
if (is_zmq_to_msgq) {
zmq_to_msgq(endpoints, ip);
} else {
msgq_to_zmq(endpoints, ip);
}
return 0;
}

@ -0,0 +1,102 @@
#pragma once
#include <cstddef>
#include <map>
#include <string>
#include <vector>
#include <utility>
#include <capnp/serialize.h>
#include "cereal/gen/cpp/log.capnp.h"
#include "common/timing.h"
#include "msgq/ipc.h"
class SubMaster {
public:
SubMaster(const std::vector<const char *> &service_list, const std::vector<const char *> &poll = {},
const char *address = nullptr, const std::vector<const char *> &ignore_alive = {});
void update(int timeout = 1000);
void update_msgs(uint64_t current_time, const std::vector<std::pair<std::string, cereal::Event::Reader>> &messages);
inline bool allAlive(const std::vector<const char *> &service_list = {}) { return all_(service_list, false, true); }
inline bool allValid(const std::vector<const char *> &service_list = {}) { return all_(service_list, true, false); }
inline bool allAliveAndValid(const std::vector<const char *> &service_list = {}) { return all_(service_list, true, true); }
void drain();
~SubMaster();
uint64_t frame = 0;
bool updated(const char *name) const;
bool alive(const char *name) const;
bool valid(const char *name) const;
uint64_t rcv_frame(const char *name) const;
uint64_t rcv_time(const char *name) const;
cereal::Event::Reader &operator[](const char *name) const;
private:
bool all_(const std::vector<const char *> &service_list, bool valid, bool alive);
Poller *poller_ = nullptr;
struct SubMessage;
std::map<SubSocket *, SubMessage *> messages_;
std::map<std::string, SubMessage *> services_;
};
class MessageBuilder : public capnp::MallocMessageBuilder {
public:
MessageBuilder() = default;
cereal::Event::Builder initEvent(bool valid = true) {
cereal::Event::Builder event = initRoot<cereal::Event>();
event.setLogMonoTime(nanos_since_boot());
event.setValid(valid);
return event;
}
kj::ArrayPtr<capnp::byte> toBytes() {
heapArray_ = capnp::messageToFlatArray(*this);
return heapArray_.asBytes();
}
size_t getSerializedSize() {
return capnp::computeSerializedSizeInWords(*this) * sizeof(capnp::word);
}
int serializeToBuffer(unsigned char *buffer, size_t buffer_size) {
size_t serialized_size = getSerializedSize();
if (serialized_size > buffer_size) { return -1; }
kj::ArrayOutputStream out(kj::ArrayPtr<capnp::byte>(buffer, buffer_size));
capnp::writeMessage(out, *this);
return serialized_size;
}
private:
kj::Array<capnp::word> heapArray_;
};
class PubMaster {
public:
PubMaster(const std::vector<const char *> &service_list);
inline int send(const char *name, capnp::byte *data, size_t size) { return sockets_.at(name)->send((char *)data, size); }
int send(const char *name, MessageBuilder &msg);
~PubMaster();
private:
std::map<std::string, PubSocket *> sockets_;
};
class AlignedBuffer {
public:
kj::ArrayPtr<const capnp::word> align(const char *data, const size_t size) {
words_size = size / sizeof(capnp::word) + 1;
if (aligned_buf.size() < words_size) {
aligned_buf = kj::heapArray<capnp::word>(words_size < 512 ? 512 : words_size);
}
memcpy(aligned_buf.begin(), data, size);
return aligned_buf.slice(0, words_size);
}
inline kj::ArrayPtr<const capnp::word> align(Message *m) {
return align(m->getData(), m->getSize());
}
private:
kj::Array<capnp::word> aligned_buf;
size_t words_size;
};

@ -0,0 +1,144 @@
#include "cereal/messaging/msgq_to_zmq.h"
#include <cassert>
#include "common/util.h"
extern ExitHandler do_exit;
// Max messages to process per socket per poll
constexpr int MAX_MESSAGES_PER_SOCKET = 50;
static std::string recv_zmq_msg(void *sock) {
zmq_msg_t msg;
zmq_msg_init(&msg);
std::string ret;
if (zmq_msg_recv(&msg, sock, 0) > 0) {
ret.assign((char *)zmq_msg_data(&msg), zmq_msg_size(&msg));
}
zmq_msg_close(&msg);
return ret;
}
void MsgqToZmq::run(const std::vector<std::string> &endpoints, const std::string &ip) {
zmq_context = std::make_unique<ZMQContext>();
msgq_context = std::make_unique<MSGQContext>();
// Create ZMQPubSockets for each endpoint
for (const auto &endpoint : endpoints) {
auto &socket_pair = socket_pairs.emplace_back();
socket_pair.endpoint = endpoint;
socket_pair.pub_sock = std::make_unique<ZMQPubSocket>();
int ret = socket_pair.pub_sock->connect(zmq_context.get(), endpoint);
if (ret != 0) {
printf("Failed to create ZMQ publisher for [%s]: %s\n", endpoint.c_str(), zmq_strerror(zmq_errno()));
return;
}
}
// Start ZMQ monitoring thread to monitor socket events
std::thread thread(&MsgqToZmq::zmqMonitorThread, this);
// Main loop for processing messages
while (!do_exit) {
{
std::unique_lock lk(mutex);
cv.wait(lk, [this]() { return do_exit || !sub2pub.empty(); });
if (do_exit) break;
for (auto sub_sock : msgq_poller->poll(100)) {
// Process messages for each socket
ZMQPubSocket *pub_sock = sub2pub.at(sub_sock);
for (int i = 0; i < MAX_MESSAGES_PER_SOCKET; ++i) {
auto msg = std::unique_ptr<Message>(sub_sock->receive(true));
if (!msg) break;
while (pub_sock->sendMessage(msg.get()) == -1) {
if (errno != EINTR) break;
}
}
}
}
util::sleep_for(1); // Give zmqMonitorThread a chance to acquire the mutex
}
thread.join();
}
void MsgqToZmq::zmqMonitorThread() {
std::vector<zmq_pollitem_t> pollitems;
// Set up ZMQ monitor for each pub socket
for (int i = 0; i < socket_pairs.size(); ++i) {
std::string addr = "inproc://op-bridge-monitor-" + std::to_string(i);
zmq_socket_monitor(socket_pairs[i].pub_sock->sock, addr.c_str(), ZMQ_EVENT_ACCEPTED | ZMQ_EVENT_DISCONNECTED);
void *monitor_socket = zmq_socket(zmq_context->getRawContext(), ZMQ_PAIR);
zmq_connect(monitor_socket, addr.c_str());
pollitems.emplace_back(zmq_pollitem_t{.socket = monitor_socket, .events = ZMQ_POLLIN});
}
while (!do_exit) {
int ret = zmq_poll(pollitems.data(), pollitems.size(), 1000);
if (ret < 0) {
if (errno == EINTR) {
// Due to frequent EINTR signals from msgq, introduce a brief delay (200 ms)
// to reduce CPU usage during retry attempts.
util::sleep_for(200);
}
continue;
}
for (int i = 0; i < pollitems.size(); ++i) {
if (pollitems[i].revents & ZMQ_POLLIN) {
// First frame in message contains event number and value
std::string frame = recv_zmq_msg(pollitems[i].socket);
if (frame.empty()) continue;
uint16_t event_type = *(uint16_t *)(frame.data());
// Second frame in message contains event address
frame = recv_zmq_msg(pollitems[i].socket);
if (frame.empty()) continue;
std::unique_lock lk(mutex);
auto &pair = socket_pairs[i];
if (event_type & ZMQ_EVENT_ACCEPTED) {
printf("socket [%s] connected\n", pair.endpoint.c_str());
if (++pair.connected_clients == 1) {
// Create new MSGQ subscriber socket and map to ZMQ publisher
pair.sub_sock = std::make_unique<MSGQSubSocket>();
pair.sub_sock->connect(msgq_context.get(), pair.endpoint, "127.0.0.1");
sub2pub[pair.sub_sock.get()] = pair.pub_sock.get();
registerSockets();
}
} else if (event_type & ZMQ_EVENT_DISCONNECTED) {
printf("socket [%s] disconnected\n", pair.endpoint.c_str());
if (pair.connected_clients == 0 || --pair.connected_clients == 0) {
// Remove MSGQ subscriber socket from mapping and reset it
sub2pub.erase(pair.sub_sock.get());
pair.sub_sock.reset(nullptr);
registerSockets();
}
}
cv.notify_one();
}
}
}
// Clean up monitor sockets
for (int i = 0; i < pollitems.size(); ++i) {
zmq_socket_monitor(socket_pairs[i].pub_sock->sock, nullptr, 0);
zmq_close(pollitems[i].socket);
}
cv.notify_one();
}
void MsgqToZmq::registerSockets() {
msgq_poller = std::make_unique<MSGQPoller>();
for (const auto &socket_pair : socket_pairs) {
if (socket_pair.sub_sock) {
msgq_poller->registerSocket(socket_pair.sub_sock.get());
}
}
}

@ -0,0 +1,37 @@
#pragma once
#include <condition_variable>
#include <map>
#include <memory>
#include <mutex>
#include <string>
#include <vector>
#define private public
#include "msgq/impl_msgq.h"
#include "msgq/impl_zmq.h"
class MsgqToZmq {
public:
MsgqToZmq() {}
void run(const std::vector<std::string> &endpoints, const std::string &ip);
protected:
void registerSockets();
void zmqMonitorThread();
struct SocketPair {
std::string endpoint;
std::unique_ptr<ZMQPubSocket> pub_sock;
std::unique_ptr<MSGQSubSocket> sub_sock;
int connected_clients = 0;
};
std::unique_ptr<MSGQContext> msgq_context;
std::unique_ptr<ZMQContext> zmq_context;
std::mutex mutex;
std::condition_variable cv;
std::unique_ptr<MSGQPoller> msgq_poller;
std::map<SubSocket *, ZMQPubSocket *> sub2pub;
std::vector<SocketPair> socket_pairs;
};

@ -0,0 +1,203 @@
#include <assert.h>
#include <stdlib.h>
#include <string>
#include <mutex>
#include "cereal/services.h"
#include "cereal/messaging/messaging.h"
const bool SIMULATION = (getenv("SIMULATION") != nullptr) && (std::string(getenv("SIMULATION")) == "1");
static inline bool inList(const std::vector<const char *> &list, const char *value) {
for (auto &v : list) {
if (strcmp(value, v) == 0) return true;
}
return false;
}
class MessageContext {
public:
MessageContext() : ctx_(nullptr) {}
~MessageContext() { delete ctx_; }
inline Context *context() {
std::call_once(init_flag, [=]() { ctx_ = Context::create(); });
return ctx_;
}
private:
Context *ctx_;
std::once_flag init_flag;
};
MessageContext message_context;
struct SubMaster::SubMessage {
std::string name;
SubSocket *socket = nullptr;
int freq = 0;
bool updated = false, alive = false, valid = false, ignore_alive;
uint64_t rcv_time = 0, rcv_frame = 0;
void *allocated_msg_reader = nullptr;
bool is_polled = false;
capnp::FlatArrayMessageReader *msg_reader = nullptr;
AlignedBuffer aligned_buf;
cereal::Event::Reader event;
};
SubMaster::SubMaster(const std::vector<const char *> &service_list, const std::vector<const char *> &poll,
const char *address, const std::vector<const char *> &ignore_alive) {
poller_ = Poller::create();
for (auto name : service_list) {
assert(services.count(std::string(name)) > 0);
service serv = services.at(std::string(name));
SubSocket *socket = SubSocket::create(message_context.context(), name, address ? address : "127.0.0.1", true);
assert(socket != 0);
bool is_polled = inList(poll, name) || poll.empty();
if (is_polled) poller_->registerSocket(socket);
SubMessage *m = new SubMessage{
.name = name,
.socket = socket,
.freq = serv.frequency,
.ignore_alive = inList(ignore_alive, name),
.allocated_msg_reader = malloc(sizeof(capnp::FlatArrayMessageReader)),
.is_polled = is_polled};
m->msg_reader = new (m->allocated_msg_reader) capnp::FlatArrayMessageReader({});
messages_[socket] = m;
services_[name] = m;
}
}
void SubMaster::update(int timeout) {
for (auto &kv : messages_) kv.second->updated = false;
auto sockets = poller_->poll(timeout);
// add non-polled sockets for non-blocking receive
for (auto &kv : messages_) {
SubMessage *m = kv.second;
SubSocket *s = kv.first;
if (!m->is_polled) sockets.push_back(s);
}
uint64_t current_time = nanos_since_boot();
std::vector<std::pair<std::string, cereal::Event::Reader>> messages;
for (auto s : sockets) {
Message *msg = s->receive(true);
if (msg == nullptr) continue;
SubMessage *m = messages_.at(s);
m->msg_reader->~FlatArrayMessageReader();
capnp::ReaderOptions options;
options.traversalLimitInWords = kj::maxValue; // Don't limit
m->msg_reader = new (m->allocated_msg_reader) capnp::FlatArrayMessageReader(m->aligned_buf.align(msg), options);
delete msg;
messages.push_back({m->name, m->msg_reader->getRoot<cereal::Event>()});
}
update_msgs(current_time, messages);
}
void SubMaster::update_msgs(uint64_t current_time, const std::vector<std::pair<std::string, cereal::Event::Reader>> &messages){
if (++frame == UINT64_MAX) frame = 1;
for (auto &kv : messages) {
auto m_find = services_.find(kv.first);
if (m_find == services_.end()){
continue;
}
SubMessage *m = m_find->second;
m->event = kv.second;
m->updated = true;
m->rcv_time = current_time;
m->rcv_frame = frame;
m->valid = m->event.getValid();
if (SIMULATION) m->alive = true;
}
if (!SIMULATION) {
for (auto &kv : messages_) {
SubMessage *m = kv.second;
m->alive = (m->freq <= (1e-5) || ((current_time - m->rcv_time) * (1e-9)) < (10.0 / m->freq));
}
}
}
bool SubMaster::all_(const std::vector<const char *> &service_list, bool valid, bool alive) {
int found = 0;
for (auto &kv : messages_) {
SubMessage *m = kv.second;
if (service_list.size() == 0 || inList(service_list, m->name.c_str())) {
found += (!valid || m->valid) && (!alive || (m->alive || m->ignore_alive));
}
}
return service_list.size() == 0 ? found == messages_.size() : found == service_list.size();
}
void SubMaster::drain() {
while (true) {
auto polls = poller_->poll(0);
if (polls.size() == 0)
break;
for (auto sock : polls) {
Message *msg = sock->receive(true);
delete msg;
}
}
}
bool SubMaster::updated(const char *name) const {
return services_.at(name)->updated;
}
bool SubMaster::alive(const char *name) const {
return services_.at(name)->alive;
}
bool SubMaster::valid(const char *name) const {
return services_.at(name)->valid;
}
uint64_t SubMaster::rcv_frame(const char *name) const {
return services_.at(name)->rcv_frame;
}
uint64_t SubMaster::rcv_time(const char *name) const {
return services_.at(name)->rcv_time;
}
cereal::Event::Reader &SubMaster::operator[](const char *name) const {
return services_.at(name)->event;
}
SubMaster::~SubMaster() {
delete poller_;
for (auto &kv : messages_) {
SubMessage *m = kv.second;
m->msg_reader->~FlatArrayMessageReader();
free(m->allocated_msg_reader);
delete m->socket;
delete m;
}
}
PubMaster::PubMaster(const std::vector<const char *> &service_list) {
for (auto name : service_list) {
assert(services.count(name) > 0);
PubSocket *socket = PubSocket::create(message_context.context(), name);
assert(socket);
sockets_[name] = socket;
}
}
int PubMaster::send(const char *name, MessageBuilder &msg) {
auto bytes = msg.toBytes();
return send(name, bytes.begin(), bytes.size());
}
PubMaster::~PubMaster() {
for (auto s : sockets_) delete s.second;
}

@ -0,0 +1,185 @@
import os
import capnp
import multiprocessing
import numbers
import random
import threading
import time
from parameterized import parameterized
import pytest
from cereal import log, car
import cereal.messaging as messaging
from cereal.services import SERVICE_LIST
events = [evt for evt in log.Event.schema.union_fields if evt in SERVICE_LIST.keys()]
def random_sock():
return random.choice(events)
def random_socks(num_socks=10):
return list({random_sock() for _ in range(num_socks)})
def random_bytes(length=1000):
return bytes([random.randrange(0xFF) for _ in range(length)])
def zmq_sleep(t=1):
if "ZMQ" in os.environ:
time.sleep(t)
# TODO: this should take any capnp struct and returrn a msg with random populated data
def random_carstate():
fields = ["vEgo", "aEgo", "brake", "steeringAngleDeg"]
msg = messaging.new_message("carState")
cs = msg.carState
for f in fields:
setattr(cs, f, random.random() * 10)
return msg
# TODO: this should compare any capnp structs
def assert_carstate(cs1, cs2):
for f in car.CarState.schema.non_union_fields:
# TODO: check all types
val1, val2 = getattr(cs1, f), getattr(cs2, f)
if isinstance(val1, numbers.Number):
assert val1 == val2, f"{f}: sent '{val1}' vs recvd '{val2}'"
def delayed_send(delay, sock, dat):
def send_func():
sock.send(dat)
threading.Timer(delay, send_func).start()
class TestMessaging:
def setUp(self):
# TODO: ZMQ tests are too slow; all sleeps will need to be
# replaced with logic to block on the necessary condition
if "ZMQ" in os.environ:
pytest.skip()
# ZMQ pub socket takes too long to die
# sleep to prevent multiple publishers error between tests
zmq_sleep()
@parameterized.expand(events)
def test_new_message(self, evt):
try:
msg = messaging.new_message(evt)
except capnp.lib.capnp.KjException:
msg = messaging.new_message(evt, random.randrange(200))
assert (time.monotonic() - msg.logMonoTime) < 0.1
assert not msg.valid
assert evt == msg.which()
@parameterized.expand(events)
def test_pub_sock(self, evt):
messaging.pub_sock(evt)
@parameterized.expand(events)
def test_sub_sock(self, evt):
messaging.sub_sock(evt)
@parameterized.expand([
(messaging.drain_sock, capnp._DynamicStructReader),
(messaging.drain_sock_raw, bytes),
])
def test_drain_sock(self, func, expected_type):
sock = "carState"
pub_sock = messaging.pub_sock(sock)
sub_sock = messaging.sub_sock(sock, timeout=1000)
zmq_sleep()
# no wait and no msgs in queue
msgs = func(sub_sock)
assert isinstance(msgs, list)
assert len(msgs) == 0
# no wait but msgs are queued up
num_msgs = random.randrange(3, 10)
for _ in range(num_msgs):
pub_sock.send(messaging.new_message(sock).to_bytes())
time.sleep(0.1)
msgs = func(sub_sock)
assert isinstance(msgs, list)
assert all(isinstance(msg, expected_type) for msg in msgs)
assert len(msgs) == num_msgs
def test_recv_sock(self):
sock = "carState"
pub_sock = messaging.pub_sock(sock)
sub_sock = messaging.sub_sock(sock, timeout=100)
zmq_sleep()
# no wait and no msg in queue, socket should timeout
recvd = messaging.recv_sock(sub_sock)
assert recvd is None
# no wait and one msg in queue
msg = random_carstate()
pub_sock.send(msg.to_bytes())
time.sleep(0.01)
recvd = messaging.recv_sock(sub_sock)
assert isinstance(recvd, capnp._DynamicStructReader)
# https://github.com/python/mypy/issues/13038
assert_carstate(msg.carState, recvd.carState)
def test_recv_one(self):
sock = "carState"
pub_sock = messaging.pub_sock(sock)
sub_sock = messaging.sub_sock(sock, timeout=1000)
zmq_sleep()
# no msg in queue, socket should timeout
recvd = messaging.recv_one(sub_sock)
assert recvd is None
# one msg in queue
msg = random_carstate()
pub_sock.send(msg.to_bytes())
recvd = messaging.recv_one(sub_sock)
assert isinstance(recvd, capnp._DynamicStructReader)
assert_carstate(msg.carState, recvd.carState)
@pytest.mark.xfail(condition="ZMQ" in os.environ, reason='ZMQ detected')
def test_recv_one_or_none(self):
sock = "carState"
pub_sock = messaging.pub_sock(sock)
sub_sock = messaging.sub_sock(sock)
zmq_sleep()
# no msg in queue, socket shouldn't block
recvd = messaging.recv_one_or_none(sub_sock)
assert recvd is None
# one msg in queue
msg = random_carstate()
pub_sock.send(msg.to_bytes())
recvd = messaging.recv_one_or_none(sub_sock)
assert isinstance(recvd, capnp._DynamicStructReader)
assert_carstate(msg.carState, recvd.carState)
def test_recv_one_retry(self):
sock = "carState"
sock_timeout = 0.1
pub_sock = messaging.pub_sock(sock)
sub_sock = messaging.sub_sock(sock, timeout=round(sock_timeout*1000))
zmq_sleep()
# this test doesn't work with ZMQ since multiprocessing interrupts it
if "ZMQ" not in os.environ:
# wait 5 socket timeouts and make sure it's still retrying
p = multiprocessing.Process(target=messaging.recv_one_retry, args=(sub_sock,))
p.start()
time.sleep(sock_timeout*5)
assert p.is_alive()
p.terminate()
# wait 5 socket timeouts before sending
msg = random_carstate()
start_time = time.monotonic()
delayed_send(sock_timeout*5, pub_sock, msg.to_bytes())
recvd = messaging.recv_one_retry(sub_sock)
assert (time.monotonic() - start_time) >= sock_timeout*5
assert isinstance(recvd, capnp._DynamicStructReader)
assert_carstate(msg.carState, recvd.carState)

@ -0,0 +1,160 @@
import random
import time
from typing import Sized, cast
import cereal.messaging as messaging
from cereal.messaging.tests.test_messaging import events, random_sock, random_socks, \
random_bytes, random_carstate, assert_carstate, \
zmq_sleep
from cereal.services import SERVICE_LIST
class TestSubMaster:
def setup_method(self):
# ZMQ pub socket takes too long to die
# sleep to prevent multiple publishers error between tests
zmq_sleep(3)
def test_init(self):
sm = messaging.SubMaster(events)
for p in [sm.updated, sm.recv_time, sm.recv_frame, sm.alive,
sm.sock, sm.data, sm.logMonoTime, sm.valid]:
assert len(cast(Sized, p)) == len(events)
def test_init_state(self):
socks = random_socks()
sm = messaging.SubMaster(socks)
assert sm.frame == -1
assert not any(sm.updated.values())
assert not any(sm.seen.values())
on_demand = {s: SERVICE_LIST[s].frequency <= 1e-5 for s in sm.services}
assert all(sm.alive[s] == sm.valid[s] == sm.freq_ok[s] == on_demand[s] for s in sm.services)
assert all(t == 0. for t in sm.recv_time.values())
assert all(f == 0 for f in sm.recv_frame.values())
assert all(t == 0 for t in sm.logMonoTime.values())
for p in [sm.updated, sm.recv_time, sm.recv_frame, sm.alive,
sm.sock, sm.data, sm.logMonoTime, sm.valid]:
assert len(cast(Sized, p)) == len(socks)
def test_getitem(self):
sock = "carState"
pub_sock = messaging.pub_sock(sock)
sm = messaging.SubMaster([sock,])
zmq_sleep()
msg = random_carstate()
pub_sock.send(msg.to_bytes())
sm.update(1000)
assert_carstate(msg.carState, sm[sock])
# TODO: break this test up to individually test SubMaster.update and SubMaster.update_msgs
def test_update(self):
sock = "carState"
pub_sock = messaging.pub_sock(sock)
sm = messaging.SubMaster([sock,])
zmq_sleep()
for i in range(10):
msg = messaging.new_message(sock)
pub_sock.send(msg.to_bytes())
sm.update(1000)
assert sm.frame == i
assert all(sm.updated.values())
def test_update_timeout(self):
sock = random_sock()
sm = messaging.SubMaster([sock,])
timeout = random.randrange(1000, 3000)
start_time = time.monotonic()
sm.update(timeout)
t = time.monotonic() - start_time
assert t >= timeout/1000.
assert t < 3
assert not any(sm.updated.values())
def test_avg_frequency_checks(self):
for poll in (True, False):
sm = messaging.SubMaster(["modelV2", "carParams", "carState", "cameraOdometry", "liveCalibration"],
poll=("modelV2" if poll else None),
frequency=(20. if not poll else None))
checks = {
"carState": (20, 20),
"modelV2": (20, 20 if poll else 10),
"cameraOdometry": (20, 10),
"liveCalibration": (4, 4),
"carParams": (None, None),
"userBookmark": (None, None),
}
for service, (max_freq, min_freq) in checks.items():
if max_freq is not None:
assert sm._check_avg_freq(service)
assert sm.freq_tracker[service].max_freq == max_freq*1.2
assert sm.freq_tracker[service].min_freq == min_freq*0.8
else:
assert not sm._check_avg_freq(service)
def test_alive(self):
pass
def test_ignore_alive(self):
pass
def test_valid(self):
pass
# SubMaster should always conflate
def test_conflate(self):
sock = "carState"
pub_sock = messaging.pub_sock(sock)
sm = messaging.SubMaster([sock,])
n = 10
for i in range(n+1):
msg = messaging.new_message(sock)
msg.carState.vEgo = i
pub_sock.send(msg.to_bytes())
time.sleep(0.01)
sm.update(1000)
assert sm[sock].vEgo == n
class TestPubMaster:
def setup_method(self):
# ZMQ pub socket takes too long to die
# sleep to prevent multiple publishers error between tests
zmq_sleep(3)
def test_init(self):
messaging.PubMaster(events)
def test_send(self):
socks = random_socks()
pm = messaging.PubMaster(socks)
sub_socks = {s: messaging.sub_sock(s, conflate=True, timeout=1000) for s in socks}
zmq_sleep()
# PubMaster accepts either a capnp msg builder or bytes
for capnp in [True, False]:
for i in range(100):
sock = socks[i % len(socks)]
if capnp:
try:
msg = messaging.new_message(sock)
except Exception:
msg = messaging.new_message(sock, random.randrange(50))
else:
msg = random_bytes()
pm.send(sock, msg)
recvd = sub_socks[sock].receive()
if capnp:
msg.clear_write_flag()
msg = msg.to_bytes()
assert msg == recvd, i

@ -0,0 +1,21 @@
import os
import tempfile
from typing import Dict
from parameterized import parameterized
import cereal.services as services
from cereal.services import SERVICE_LIST
class TestServices:
@parameterized.expand(SERVICE_LIST.keys())
def test_services(self, s):
service = SERVICE_LIST[s]
assert service.frequency <= 104
assert service.decimation != 0
def test_generated_header(self):
with tempfile.NamedTemporaryFile(suffix=".h") as f:
ret = os.system(f"python3 {services.__file__} > {f.name} && clang++ {f.name} -std=c++11")
assert ret == 0, "generated services header is not valid C"

@ -0,0 +1,126 @@
#!/usr/bin/env python3
from typing import Optional
class Service:
def __init__(self, should_log: bool, frequency: float, decimation: Optional[int] = None):
self.should_log = should_log
self.frequency = frequency
self.decimation = decimation
_services: dict[str, tuple] = {
# service: (should_log, frequency, qlog decimation (optional))
# note: the "EncodeIdx" packets will still be in the log
"gyroscope": (True, 104., 104),
"gyroscope2": (True, 100., 100),
"accelerometer": (True, 104., 104),
"accelerometer2": (True, 100., 100),
"magnetometer": (True, 25.),
"lightSensor": (True, 100., 100),
"temperatureSensor": (True, 2., 200),
"temperatureSensor2": (True, 2., 200),
"gpsNMEA": (True, 9.),
"deviceState": (True, 2., 1),
"touch": (True, 20., 1),
"can": (True, 100., 2053), # decimation gives ~3 msgs in a full segment
"controlsState": (True, 100., 10),
"selfdriveState": (True, 100., 10),
"pandaStates": (True, 10., 1),
"peripheralState": (True, 2., 1),
"radarState": (True, 20., 5),
"roadEncodeIdx": (False, 20., 1),
"liveTracks": (True, 20.),
"sendcan": (True, 100., 139),
"logMessage": (True, 0.),
"errorLogMessage": (True, 0., 1),
"liveCalibration": (True, 4., 4),
"liveTorqueParameters": (True, 4., 1),
"liveDelay": (True, 4., 1),
"androidLog": (True, 0.),
"carState": (True, 100., 10),
"carControl": (True, 100., 10),
"carOutput": (True, 100., 10),
"longitudinalPlan": (True, 20., 10),
"driverAssistance": (True, 20., 20),
"procLog": (True, 0.5, 15),
"gpsLocationExternal": (True, 10., 10),
"gpsLocation": (True, 1., 1),
"ubloxGnss": (True, 10.),
"qcomGnss": (True, 2.),
"gnssMeasurements": (True, 10., 10),
"clocks": (True, 0.1, 1),
"ubloxRaw": (True, 20.),
"livePose": (True, 20., 4),
"liveParameters": (True, 20., 5),
"cameraOdometry": (True, 20., 10),
"thumbnail": (True, 1 / 60., 1),
"onroadEvents": (True, 1., 1),
"carParams": (True, 0.02, 1),
"roadCameraState": (True, 20., 20),
"driverCameraState": (True, 20., 20),
"driverEncodeIdx": (False, 20., 1),
"driverStateV2": (True, 20., 10),
"driverMonitoringState": (True, 20., 10),
"wideRoadEncodeIdx": (False, 20., 1),
"wideRoadCameraState": (True, 20., 20),
"drivingModelData": (True, 20., 10),
"modelV2": (True, 20.),
"managerState": (True, 2., 1),
"uploaderState": (True, 0., 1),
"navInstruction": (True, 1., 10),
"navRoute": (True, 0.),
"navThumbnail": (True, 0.),
"qRoadEncodeIdx": (False, 20.),
"userBookmark": (True, 0., 1),
"soundPressure": (True, 10., 10),
"rawAudioData": (False, 20.),
"bookmarkButton": (True, 0., 1),
"audioFeedback": (True, 0., 1),
# debug
"uiDebug": (True, 0., 1),
"testJoystick": (True, 0.),
"alertDebug": (True, 20., 5),
"roadEncodeData": (False, 20.),
"driverEncodeData": (False, 20.),
"wideRoadEncodeData": (False, 20.),
"qRoadEncodeData": (False, 20.),
"livestreamWideRoadEncodeIdx": (False, 20.),
"livestreamRoadEncodeIdx": (False, 20.),
"livestreamDriverEncodeIdx": (False, 20.),
"livestreamWideRoadEncodeData": (False, 20.),
"livestreamRoadEncodeData": (False, 20.),
"livestreamDriverEncodeData": (False, 20.),
"customReservedRawData0": (True, 0.),
"customReservedRawData1": (True, 0.),
"customReservedRawData2": (True, 0.),
}
SERVICE_LIST = {name: Service(*vals) for
idx, (name, vals) in enumerate(_services.items())}
def build_header():
h = ""
h += "/* THIS IS AN AUTOGENERATED FILE, PLEASE EDIT services.py */\n"
h += "#ifndef __SERVICES_H\n"
h += "#define __SERVICES_H\n"
h += "#include <map>\n"
h += "#include <string>\n"
h += "struct service { std::string name; bool should_log; int frequency; int decimation; };\n"
h += "static std::map<std::string, service> services = {\n"
for k, v in SERVICE_LIST.items():
should_log = "true" if v.should_log else "false"
decimation = -1 if v.decimation is None else v.decimation
h += ' { "%s", {"%s", %s, %d, %d}},\n' % \
(k, k, should_log, v.frequency, decimation)
h += "};\n"
h += "#endif\n"
return h
if __name__ == "__main__":
print(build_header())

1
common/.gitignore vendored

@ -0,0 +1 @@
*.cpp

@ -0,0 +1,35 @@
Import('env', 'envCython', 'arch')
common_libs = [
'params.cc',
'swaglog.cc',
'util.cc',
'watchdog.cc',
'ratekeeper.cc'
]
_common = env.Library('common', common_libs, LIBS="json11")
files = [
'clutil.cc',
]
_gpucommon = env.Library('gpucommon', files)
Export('_common', '_gpucommon')
if GetOption('extras'):
env.Program('tests/test_common',
['tests/test_runner.cc', 'tests/test_params.cc', 'tests/test_util.cc', 'tests/test_swaglog.cc'],
LIBS=[_common, 'json11', 'zmq', 'pthread'])
# Cython bindings
params_python = envCython.Program('params_pyx.so', 'params_pyx.pyx', LIBS=envCython['LIBS'] + [_common, 'zmq', 'json11'])
SConscript([
'transformations/SConscript',
])
Import('transformations_python')
common_python = [params_python, transformations_python]
Export('common_python')

@ -0,0 +1,46 @@
import jwt
import os
import requests
from datetime import datetime, timedelta, UTC
from openpilot.system.hardware.hw import Paths
from openpilot.system.version import get_version
API_HOST = os.getenv('API_HOST', 'https://api.commadotai.com')
class Api:
def __init__(self, dongle_id):
self.dongle_id = dongle_id
with open(Paths.persist_root()+'/comma/id_rsa') as f:
self.private_key = f.read()
def get(self, *args, **kwargs):
return self.request('GET', *args, **kwargs)
def post(self, *args, **kwargs):
return self.request('POST', *args, **kwargs)
def request(self, method, endpoint, timeout=None, access_token=None, **params):
return api_get(endpoint, method=method, timeout=timeout, access_token=access_token, **params)
def get_token(self, expiry_hours=1):
now = datetime.now(UTC).replace(tzinfo=None)
payload = {
'identity': self.dongle_id,
'nbf': now,
'iat': now,
'exp': now + timedelta(hours=expiry_hours)
}
token = jwt.encode(payload, self.private_key, algorithm='RS256')
if isinstance(token, bytes):
token = token.decode('utf8')
return token
def api_get(endpoint, method='GET', timeout=None, access_token=None, **params):
headers = {}
if access_token is not None:
headers['Authorization'] = "JWT " + access_token
headers['User-Agent'] = "openpilot-" + get_version()
return requests.request(method, API_HOST + "/" + endpoint, timeout=timeout, headers=headers, params=params)

@ -0,0 +1,4 @@
import os
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../"))

@ -0,0 +1,98 @@
#include "common/clutil.h"
#include <cassert>
#include <iostream>
#include <memory>
#include "common/util.h"
#include "common/swaglog.h"
namespace { // helper functions
template <typename Func, typename Id, typename Name>
std::string get_info(Func get_info_func, Id id, Name param_name) {
size_t size = 0;
CL_CHECK(get_info_func(id, param_name, 0, NULL, &size));
std::string info(size, '\0');
CL_CHECK(get_info_func(id, param_name, size, info.data(), NULL));
return info;
}
inline std::string get_platform_info(cl_platform_id id, cl_platform_info name) { return get_info(&clGetPlatformInfo, id, name); }
inline std::string get_device_info(cl_device_id id, cl_device_info name) { return get_info(&clGetDeviceInfo, id, name); }
void cl_print_info(cl_platform_id platform, cl_device_id device) {
size_t work_group_size = 0;
cl_device_type device_type = 0;
clGetDeviceInfo(device, CL_DEVICE_MAX_WORK_GROUP_SIZE, sizeof(work_group_size), &work_group_size, NULL);
clGetDeviceInfo(device, CL_DEVICE_TYPE, sizeof(device_type), &device_type, NULL);
const char *type_str = "Other...";
switch (device_type) {
case CL_DEVICE_TYPE_CPU: type_str ="CL_DEVICE_TYPE_CPU"; break;
case CL_DEVICE_TYPE_GPU: type_str = "CL_DEVICE_TYPE_GPU"; break;
case CL_DEVICE_TYPE_ACCELERATOR: type_str = "CL_DEVICE_TYPE_ACCELERATOR"; break;
}
LOGD("vendor: %s", get_platform_info(platform, CL_PLATFORM_VENDOR).c_str());
LOGD("platform version: %s", get_platform_info(platform, CL_PLATFORM_VERSION).c_str());
LOGD("profile: %s", get_platform_info(platform, CL_PLATFORM_PROFILE).c_str());
LOGD("extensions: %s", get_platform_info(platform, CL_PLATFORM_EXTENSIONS).c_str());
LOGD("name: %s", get_device_info(device, CL_DEVICE_NAME).c_str());
LOGD("device version: %s", get_device_info(device, CL_DEVICE_VERSION).c_str());
LOGD("max work group size: %zu", work_group_size);
LOGD("type = %d, %s", (int)device_type, type_str);
}
void cl_print_build_errors(cl_program program, cl_device_id device) {
cl_build_status status;
clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_STATUS, sizeof(status), &status, NULL);
size_t log_size;
clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG, 0, NULL, &log_size);
std::string log(log_size, '\0');
clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG, log_size, &log[0], NULL);
LOGE("build failed; status=%d, log: %s", status, log.c_str());
}
} // namespace
cl_device_id cl_get_device_id(cl_device_type device_type) {
cl_uint num_platforms = 0;
CL_CHECK(clGetPlatformIDs(0, NULL, &num_platforms));
std::unique_ptr<cl_platform_id[]> platform_ids = std::make_unique<cl_platform_id[]>(num_platforms);
CL_CHECK(clGetPlatformIDs(num_platforms, &platform_ids[0], NULL));
for (size_t i = 0; i < num_platforms; ++i) {
LOGD("platform[%zu] CL_PLATFORM_NAME: %s", i, get_platform_info(platform_ids[i], CL_PLATFORM_NAME).c_str());
// Get first device
if (cl_device_id device_id = NULL; clGetDeviceIDs(platform_ids[i], device_type, 1, &device_id, NULL) == 0 && device_id) {
cl_print_info(platform_ids[i], device_id);
return device_id;
}
}
LOGE("No valid openCL platform found");
assert(0);
return nullptr;
}
cl_context cl_create_context(cl_device_id device_id) {
return CL_CHECK_ERR(clCreateContext(NULL, 1, &device_id, NULL, NULL, &err));
}
void cl_release_context(cl_context context) {
clReleaseContext(context);
}
cl_program cl_program_from_file(cl_context ctx, cl_device_id device_id, const char* path, const char* args) {
return cl_program_from_source(ctx, device_id, util::read_file(path), args);
}
cl_program cl_program_from_source(cl_context ctx, cl_device_id device_id, const std::string& src, const char* args) {
const char *csrc = src.c_str();
cl_program prg = CL_CHECK_ERR(clCreateProgramWithSource(ctx, 1, &csrc, NULL, &err));
if (int err = clBuildProgram(prg, 1, &device_id, args, NULL, NULL); err != 0) {
cl_print_build_errors(prg, device_id);
assert(0);
}
return prg;
}

@ -0,0 +1,28 @@
#pragma once
#ifdef __APPLE__
#include <OpenCL/cl.h>
#else
#include <CL/cl.h>
#endif
#include <string>
#define CL_CHECK(_expr) \
do { \
assert(CL_SUCCESS == (_expr)); \
} while (0)
#define CL_CHECK_ERR(_expr) \
({ \
cl_int err = CL_INVALID_VALUE; \
__typeof__(_expr) _ret = _expr; \
assert(_ret&& err == CL_SUCCESS); \
_ret; \
})
cl_device_id cl_get_device_id(cl_device_type device_type);
cl_context cl_create_context(cl_device_id device_id);
void cl_release_context(cl_context context);
cl_program cl_program_from_source(cl_context ctx, cl_device_id device_id, const std::string& src, const char* args = nullptr);
cl_program cl_program_from_file(cl_context ctx, cl_device_id device_id, const char* path, const char* args);

@ -0,0 +1,23 @@
import numpy as np
# conversions
class CV:
# Speed
MPH_TO_KPH = 1.609344
KPH_TO_MPH = 1. / MPH_TO_KPH
MS_TO_KPH = 3.6
KPH_TO_MS = 1. / MS_TO_KPH
MS_TO_MPH = MS_TO_KPH * KPH_TO_MPH
MPH_TO_MS = MPH_TO_KPH * KPH_TO_MS
MS_TO_KNOTS = 1.9438
KNOTS_TO_MS = 1. / MS_TO_KNOTS
# Angle
DEG_TO_RAD = np.pi / 180.
RAD_TO_DEG = 1. / DEG_TO_RAD
# Mass
LB_TO_KG = 0.453592
ACCELERATION_DUE_TO_GRAVITY = 9.81 # m/s^2

@ -0,0 +1,9 @@
# remove all keys that end in DEPRECATED
def strip_deprecated_keys(d):
for k in list(d.keys()):
if isinstance(k, str):
if k.endswith('DEPRECATED'):
d.pop(k)
elif isinstance(d[k], dict):
strip_deprecated_keys(d[k])
return d

@ -0,0 +1,58 @@
import io
import os
import tempfile
import contextlib
import zstandard as zstd
LOG_COMPRESSION_LEVEL = 10 # little benefit up to level 15. level ~17 is a small step change
class CallbackReader:
"""Wraps a file, but overrides the read method to also
call a callback function with the number of bytes read so far."""
def __init__(self, f, callback, *args):
self.f = f
self.callback = callback
self.cb_args = args
self.total_read = 0
def __getattr__(self, attr):
return getattr(self.f, attr)
def read(self, *args, **kwargs):
chunk = self.f.read(*args, **kwargs)
self.total_read += len(chunk)
self.callback(*self.cb_args, self.total_read)
return chunk
@contextlib.contextmanager
def atomic_write_in_dir(path: str, mode: str = 'w', buffering: int = -1, encoding: str = None, newline: str = None,
overwrite: bool = False):
"""Write to a file atomically using a temporary file in the same directory as the destination file."""
dir_name = os.path.dirname(path)
if not overwrite and os.path.exists(path):
raise FileExistsError(f"File '{path}' already exists. To overwrite it, set 'overwrite' to True.")
with tempfile.NamedTemporaryFile(mode=mode, buffering=buffering, encoding=encoding, newline=newline, dir=dir_name, delete=False) as tmp_file:
yield tmp_file
tmp_file_name = tmp_file.name
os.replace(tmp_file_name, path)
def get_upload_stream(filepath: str, should_compress: bool) -> tuple[io.BufferedIOBase, int]:
if not should_compress:
file_size = os.path.getsize(filepath)
file_stream = open(filepath, "rb")
return file_stream, file_size
# Compress the file on the fly
compressed_stream = io.BytesIO()
compressor = zstd.ZstdCompressor(level=LOG_COMPRESSION_LEVEL)
with open(filepath, "rb") as f:
compressor.copy_stream(f, compressed_stream)
compressed_size = compressed_stream.tell()
compressed_stream.seek(0)
return compressed_stream, compressed_size

@ -0,0 +1,17 @@
class FirstOrderFilter:
def __init__(self, x0, rc, dt, initialized=True):
self.x = x0
self.dt = dt
self.update_alpha(rc)
self.initialized = initialized
def update_alpha(self, rc):
self.alpha = self.dt / (rc + self.dt)
def update(self, x):
if self.initialized:
self.x = (1. - self.alpha) * self.x + self.alpha * x
else:
self.initialized = True
self.x = x
return self.x

@ -0,0 +1,42 @@
from functools import cache
import subprocess
from openpilot.common.run import run_cmd, run_cmd_default
@cache
def get_commit(cwd: str = None, branch: str = "HEAD") -> str:
return run_cmd_default(["git", "rev-parse", branch], cwd=cwd)
@cache
def get_commit_date(cwd: str = None, commit: str = "HEAD") -> str:
return run_cmd_default(["git", "show", "--no-patch", "--format='%ct %ci'", commit], cwd=cwd)
@cache
def get_short_branch(cwd: str = None) -> str:
return run_cmd_default(["git", "rev-parse", "--abbrev-ref", "HEAD"], cwd=cwd)
@cache
def get_branch(cwd: str = None) -> str:
return run_cmd_default(["git", "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}"], cwd=cwd)
@cache
def get_origin(cwd: str = None) -> str:
try:
local_branch = run_cmd(["git", "name-rev", "--name-only", "HEAD"], cwd=cwd)
tracking_remote = run_cmd(["git", "config", "branch." + local_branch + ".remote"], cwd=cwd)
return run_cmd(["git", "config", "remote." + tracking_remote + ".url"], cwd=cwd)
except subprocess.CalledProcessError: # Not on a branch, fallback
return run_cmd_default(["git", "config", "--get", "remote.origin.url"], cwd=cwd)
@cache
def get_normalized_origin(cwd: str = None) -> str:
return get_origin(cwd) \
.replace("git@", "", 1) \
.replace(".git", "", 1) \
.replace("https://", "", 1) \
.replace(":", "/", 1)

@ -0,0 +1,89 @@
import os
import fcntl
import ctypes
from functools import cache
def gpio_init(pin: int, output: bool) -> None:
try:
with open(f"/sys/class/gpio/gpio{pin}/direction", 'wb') as f:
f.write(b"out" if output else b"in")
except Exception as e:
print(f"Failed to set gpio {pin} direction: {e}")
def gpio_set(pin: int, high: bool) -> None:
try:
with open(f"/sys/class/gpio/gpio{pin}/value", 'wb') as f:
f.write(b"1" if high else b"0")
except Exception as e:
print(f"Failed to set gpio {pin} value: {e}")
def gpio_read(pin: int) -> bool | None:
val = None
try:
with open(f"/sys/class/gpio/gpio{pin}/value", 'rb') as f:
val = bool(int(f.read().strip()))
except Exception as e:
print(f"Failed to set gpio {pin} value: {e}")
return val
def gpio_export(pin: int) -> None:
if os.path.isdir(f"/sys/class/gpio/gpio{pin}"):
return
try:
with open("/sys/class/gpio/export", 'w') as f:
f.write(str(pin))
except Exception:
print(f"Failed to export gpio {pin}")
@cache
def get_irq_action(irq: int) -> list[str]:
try:
with open(f"/sys/kernel/irq/{irq}/actions") as f:
actions = f.read().strip().split(',')
return actions
except FileNotFoundError:
return []
def get_irqs_for_action(action: str) -> list[str]:
ret = []
with open("/proc/interrupts") as f:
for l in f.readlines():
irq = l.split(':')[0].strip()
if irq.isdigit() and action in get_irq_action(irq):
ret.append(irq)
return ret
# *** gpiochip ***
class gpioevent_data(ctypes.Structure):
_fields_ = [
("timestamp", ctypes.c_uint64),
("id", ctypes.c_uint32),
]
class gpioevent_request(ctypes.Structure):
_fields_ = [
("lineoffset", ctypes.c_uint32),
("handleflags", ctypes.c_uint32),
("eventflags", ctypes.c_uint32),
("label", ctypes.c_char * 32),
("fd", ctypes.c_int)
]
def gpiochip_get_ro_value_fd(label: str, gpiochip_id: int, pin: int) -> int:
GPIOEVENT_REQUEST_BOTH_EDGES = 0x3
GPIOHANDLE_REQUEST_INPUT = 0x1
GPIO_GET_LINEEVENT_IOCTL = 0xc030b404
rq = gpioevent_request()
rq.lineoffset = pin
rq.handleflags = GPIOHANDLE_REQUEST_INPUT
rq.eventflags = GPIOEVENT_REQUEST_BOTH_EDGES
rq.label = label.encode('utf-8')[:31] + b'\0'
fd = os.open(f"/dev/gpiochip{gpiochip_id}", os.O_RDONLY)
fcntl.ioctl(fd, GPIO_GET_LINEEVENT_IOCTL, rq)
os.close(fd)
return int(rq.fd)

@ -0,0 +1,8 @@
from openpilot.common.params import Params
def get_gps_location_service(params: Params) -> str:
if params.get_bool("UbloxAvailable"):
return "gpsLocationExternal"
else:
return "gpsLocation"

@ -0,0 +1,249 @@
import io
import os
import sys
import copy
import json
import time
import uuid
import socket
import logging
import traceback
import numpy as np
from threading import local
from collections import OrderedDict
from contextlib import contextmanager
LOG_TIMESTAMPS = "LOG_TIMESTAMPS" in os.environ
def json_handler(obj):
if isinstance(obj, np.bool_):
return bool(obj)
# if isinstance(obj, (datetime.date, datetime.time)):
# return obj.isoformat()
return repr(obj)
def json_robust_dumps(obj):
return json.dumps(obj, default=json_handler)
class NiceOrderedDict(OrderedDict):
def __str__(self):
return json_robust_dumps(self)
class SwagFormatter(logging.Formatter):
def __init__(self, swaglogger):
logging.Formatter.__init__(self, None, '%a %b %d %H:%M:%S %Z %Y')
self.swaglogger = swaglogger
self.host = socket.gethostname()
def format_dict(self, record):
record_dict = NiceOrderedDict()
if isinstance(record.msg, dict):
record_dict['msg'] = record.msg
else:
try:
record_dict['msg'] = record.getMessage()
except (ValueError, TypeError):
record_dict['msg'] = [record.msg]+record.args
record_dict['ctx'] = self.swaglogger.get_ctx()
if record.exc_info:
record_dict['exc_info'] = self.formatException(record.exc_info)
record_dict['level'] = record.levelname
record_dict['levelnum'] = record.levelno
record_dict['name'] = record.name
record_dict['filename'] = record.filename
record_dict['lineno'] = record.lineno
record_dict['pathname'] = record.pathname
record_dict['module'] = record.module
record_dict['funcName'] = record.funcName
record_dict['host'] = self.host
record_dict['process'] = record.process
record_dict['thread'] = record.thread
record_dict['threadName'] = record.threadName
record_dict['created'] = record.created
return record_dict
def format(self, record):
if self.swaglogger is None:
raise Exception("must set swaglogger before calling format()")
return json_robust_dumps(self.format_dict(record))
class SwagLogFileFormatter(SwagFormatter):
def fix_kv(self, k, v):
# append type to names to preserve legacy naming in logs
# avoids overlapping key namespaces with different types
# e.g. log.info() creates 'msg' -> 'msg$s'
# log.event() creates 'msg.health.logMonoTime' -> 'msg.health.logMonoTime$i'
# because overlapping namespace 'msg' caused problems
if isinstance(v, (str, bytes)):
k += "$s"
elif isinstance(v, float):
k += "$f"
elif isinstance(v, bool):
k += "$b"
elif isinstance(v, int):
k += "$i"
elif isinstance(v, dict):
nv = {}
for ik, iv in v.items():
ik, iv = self.fix_kv(ik, iv)
nv[ik] = iv
v = nv
elif isinstance(v, list):
k += "$a"
return k, v
def format(self, record):
if isinstance(record, str):
v = json.loads(record)
else:
v = self.format_dict(record)
mk, mv = self.fix_kv('msg', v['msg'])
del v['msg']
v[mk] = mv
v['id'] = uuid.uuid4().hex
return json_robust_dumps(v)
class SwagErrorFilter(logging.Filter):
def filter(self, record):
return record.levelno < logging.ERROR
def _tmpfunc():
return 0
def _srcfile():
return os.path.normcase(_tmpfunc.__code__.co_filename)
class SwagLogger(logging.Logger):
def __init__(self):
logging.Logger.__init__(self, "swaglog")
self.global_ctx = {}
self.log_local = local()
self.log_local.ctx = {}
def local_ctx(self):
try:
return self.log_local.ctx
except AttributeError:
self.log_local.ctx = {}
return self.log_local.ctx
def get_ctx(self):
return dict(self.local_ctx(), **self.global_ctx)
@contextmanager
def ctx(self, **kwargs):
old_ctx = self.local_ctx()
self.log_local.ctx = copy.copy(old_ctx) or {}
self.log_local.ctx.update(kwargs)
try:
yield
finally:
self.log_local.ctx = old_ctx
def bind(self, **kwargs):
self.local_ctx().update(kwargs)
def bind_global(self, **kwargs):
self.global_ctx.update(kwargs)
def event(self, event, *args, **kwargs):
evt = NiceOrderedDict()
evt['event'] = event
if args:
evt['args'] = args
evt.update(kwargs)
if 'error' in kwargs:
self.error(evt)
elif 'debug' in kwargs:
self.debug(evt)
else:
self.info(evt)
def timestamp(self, event_name):
if LOG_TIMESTAMPS:
t = time.monotonic()
tstp = NiceOrderedDict()
tstp['timestamp'] = NiceOrderedDict()
tstp['timestamp']["event"] = event_name
tstp['timestamp']["time"] = t*1e9
self.debug(tstp)
def findCaller(self, stack_info=False, stacklevel=1):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = sys._getframe(3)
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
orig_f = f
while f and stacklevel > 1:
f = f.f_back
stacklevel -= 1
if not f:
f = orig_f
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
if __name__ == "__main__":
log = SwagLogger()
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.INFO)
stdout_handler.addFilter(SwagErrorFilter())
log.addHandler(stdout_handler)
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setLevel(logging.ERROR)
log.addHandler(stderr_handler)
log.info("asdasd %s", "a")
log.info({'wut': 1})
log.warning("warning")
log.error("error")
log.critical("critical")
log.event("test", x="y")
with log.ctx():
stdout_handler.setFormatter(SwagFormatter(log))
stderr_handler.setFormatter(SwagFormatter(log))
log.bind(user="some user")
log.info("in req")
print("")
log.warning("warning")
print("")
log.error("error")
print("")
log.critical("critical")
print("")
log.event("do_req", a=1, b="c")

@ -0,0 +1,45 @@
HTML_REPLACEMENTS = [
(r'&', r'&amp;'),
(r'"', r'&quot;'),
]
def parse_markdown(text: str, tab_length: int = 2) -> str:
lines = text.split("\n")
output: list[str] = []
list_level = 0
def end_outstanding_lists(level: int, end_level: int) -> int:
while level > end_level:
level -= 1
output.append("</ul>")
if level > 0:
output.append("</li>")
return end_level
for i, line in enumerate(lines):
if i + 1 < len(lines) and lines[i + 1].startswith("==="): # heading
output.append(f"<h1>{line}</h1>")
elif line.startswith("==="):
pass
elif line.lstrip().startswith("* "): # list
line_level = 1 + line.count(" " * tab_length, 0, line.index("*"))
if list_level >= line_level:
list_level = end_outstanding_lists(list_level, line_level)
else:
list_level += 1
if list_level > 1:
output[-1] = output[-1].replace("</li>", "")
output.append("<ul>")
output.append(f"<li>{line.replace('*', '', 1).lstrip()}</li>")
else:
list_level = end_outstanding_lists(list_level, 0)
if len(line) > 0:
output.append(line)
end_outstanding_lists(list_level, 0)
output_str = "\n".join(output) + "\n"
for (fr, to) in HTML_REPLACEMENTS:
output_str = output_str.replace(fr, to)
return output_str

@ -0,0 +1,85 @@
#pragma once
typedef struct vec3 {
float v[3];
} vec3;
typedef struct vec4 {
float v[4];
} vec4;
typedef struct mat3 {
float v[3*3];
} mat3;
typedef struct mat4 {
float v[4*4];
} mat4;
static inline mat3 matmul3(const mat3 &a, const mat3 &b) {
mat3 ret = {{0.0}};
for (int r=0; r<3; r++) {
for (int c=0; c<3; c++) {
float v = 0.0;
for (int k=0; k<3; k++) {
v += a.v[r*3+k] * b.v[k*3+c];
}
ret.v[r*3+c] = v;
}
}
return ret;
}
static inline vec3 matvecmul3(const mat3 &a, const vec3 &b) {
vec3 ret = {{0.0}};
for (int r=0; r<3; r++) {
for (int c=0; c<3; c++) {
ret.v[r] += a.v[r*3+c] * b.v[c];
}
}
return ret;
}
static inline mat4 matmul(const mat4 &a, const mat4 &b) {
mat4 ret = {{0.0}};
for (int r=0; r<4; r++) {
for (int c=0; c<4; c++) {
float v = 0.0;
for (int k=0; k<4; k++) {
v += a.v[r*4+k] * b.v[k*4+c];
}
ret.v[r*4+c] = v;
}
}
return ret;
}
static inline vec4 matvecmul(const mat4 &a, const vec4 &b) {
vec4 ret = {{0.0}};
for (int r=0; r<4; r++) {
for (int c=0; c<4; c++) {
ret.v[r] += a.v[r*4+c] * b.v[c];
}
}
return ret;
}
// scales the input and output space of a transformation matrix
// that assumes pixel-center origin.
static inline mat3 transform_scale_buffer(const mat3 &in, float s) {
// in_pt = ( transform(out_pt/s + 0.5) - 0.5) * s
mat3 transform_out = {{
1.0f/s, 0.0f, 0.5f,
0.0f, 1.0f/s, 0.5f,
0.0f, 0.0f, 1.0f,
}};
mat3 transform_in = {{
s, 0.0f, -0.5f*s,
0.0f, s, -0.5f*s,
0.0f, 0.0f, 1.0f,
}};
return matmul3(transform_in, matmul3(in, transform_out));
}

@ -0,0 +1,50 @@
"""
Utilities for generating mock messages for testing.
example in common/tests/test_mock.py
"""
import functools
import threading
from cereal.messaging import PubMaster
from cereal.services import SERVICE_LIST
from openpilot.common.mock.generators import generate_livePose
from openpilot.common.realtime import Ratekeeper
MOCK_GENERATOR = {
"livePose": generate_livePose
}
def generate_messages_loop(services: list[str], done: threading.Event):
pm = PubMaster(services)
rk = Ratekeeper(100)
i = 0
while not done.is_set():
for s in services:
should_send = i % (100/SERVICE_LIST[s].frequency) == 0
if should_send:
message = MOCK_GENERATOR[s]()
pm.send(s, message)
i += 1
rk.keep_time()
def mock_messages(services: list[str] | str):
if isinstance(services, str):
services = [services]
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
done = threading.Event()
t = threading.Thread(target=generate_messages_loop, args=(services, done))
t.start()
try:
return func(*args, **kwargs)
finally:
done.set()
t.join()
return wrapper
return decorator

@ -0,0 +1,14 @@
from cereal import messaging
def generate_livePose():
msg = messaging.new_message('livePose')
meas = {'x': 0.0, 'y': 0.0, 'z': 0.0, 'xStd': 0.0, 'yStd': 0.0, 'zStd': 0.0, 'valid': True}
msg.livePose.orientationNED = meas
msg.livePose.velocityDevice = meas
msg.livePose.angularVelocityDevice = meas
msg.livePose.accelerationDevice = meas
msg.livePose.inputsOK = True
msg.livePose.posenetOK = True
msg.livePose.sensorsOK = True
return msg

@ -0,0 +1,242 @@
#include "common/params.h"
#include <dirent.h>
#include <sys/file.h>
#include <algorithm>
#include <cassert>
#include <csignal>
#include <unordered_map>
#include "common/params_keys.h"
#include "common/queue.h"
#include "common/swaglog.h"
#include "common/util.h"
#include "system/hardware/hw.h"
namespace {
volatile sig_atomic_t params_do_exit = 0;
void params_sig_handler(int signal) {
params_do_exit = 1;
}
int fsync_dir(const std::string &path) {
int result = -1;
int fd = HANDLE_EINTR(open(path.c_str(), O_RDONLY, 0755));
if (fd >= 0) {
result = HANDLE_EINTR(fsync(fd));
HANDLE_EINTR(close(fd));
}
return result;
}
bool create_params_path(const std::string &param_path, const std::string &key_path) {
// Make sure params path exists
if (!util::file_exists(param_path) && !util::create_directories(param_path, 0775)) {
return false;
}
// See if the symlink exists, otherwise create it
if (!util::file_exists(key_path)) {
// 1) Create temp folder
// 2) Symlink it to temp link
// 3) Move symlink to <params>/d
std::string tmp_path = param_path + "/.tmp_XXXXXX";
// this should be OK since mkdtemp just replaces characters in place
char *tmp_dir = mkdtemp((char *)tmp_path.c_str());
if (tmp_dir == NULL) {
return false;
}
std::string link_path = std::string(tmp_dir) + ".link";
if (symlink(tmp_dir, link_path.c_str()) != 0) {
return false;
}
// don't return false if it has been created by other
if (rename(link_path.c_str(), key_path.c_str()) != 0 && errno != EEXIST) {
return false;
}
}
return true;
}
std::string ensure_params_path(const std::string &prefix, const std::string &path = {}) {
std::string params_path = path.empty() ? Path::params() : path;
if (!create_params_path(params_path, params_path + prefix)) {
throw std::runtime_error(util::string_format(
"Failed to ensure params path, errno=%d, path=%s, param_prefix=%s",
errno, params_path.c_str(), prefix.c_str()));
}
return params_path;
}
class FileLock {
public:
FileLock(const std::string &fn) {
fd_ = HANDLE_EINTR(open(fn.c_str(), O_CREAT, 0775));
if (fd_ < 0 || HANDLE_EINTR(flock(fd_, LOCK_EX)) < 0) {
LOGE("Failed to lock file %s, errno=%d", fn.c_str(), errno);
}
}
~FileLock() { close(fd_); }
private:
int fd_ = -1;
};
} // namespace
Params::Params(const std::string &path) {
params_prefix = "/" + util::getenv("OPENPILOT_PREFIX", "d");
params_path = ensure_params_path(params_prefix, path);
}
Params::~Params() {
if (future.valid()) {
future.wait();
}
assert(queue.empty());
}
std::vector<std::string> Params::allKeys() const {
std::vector<std::string> ret;
for (auto &p : keys) {
ret.push_back(p.first);
}
return ret;
}
bool Params::checkKey(const std::string &key) {
return keys.find(key) != keys.end();
}
ParamKeyFlag Params::getKeyFlag(const std::string &key) {
return static_cast<ParamKeyFlag>(keys[key].flags);
}
ParamKeyType Params::getKeyType(const std::string &key) {
return keys[key].type;
}
std::optional<std::string> Params::getKeyDefaultValue(const std::string &key) {
return keys[key].default_value;
}
int Params::put(const char* key, const char* value, size_t value_size) {
// Information about safely and atomically writing a file: https://lwn.net/Articles/457667/
// 1) Create temp file
// 2) Write data to temp file
// 3) fsync() the temp file
// 4) rename the temp file to the real name
// 5) fsync() the containing directory
std::string tmp_path = params_path + "/.tmp_value_XXXXXX";
int tmp_fd = mkstemp((char*)tmp_path.c_str());
if (tmp_fd < 0) return -1;
int result = -1;
do {
// Write value to temp.
ssize_t bytes_written = HANDLE_EINTR(write(tmp_fd, value, value_size));
if (bytes_written < 0 || (size_t)bytes_written != value_size) {
result = -20;
break;
}
// fsync to force persist the changes.
if ((result = HANDLE_EINTR(fsync(tmp_fd))) < 0) break;
FileLock file_lock(params_path + "/.lock");
// Move temp into place.
if ((result = rename(tmp_path.c_str(), getParamPath(key).c_str())) < 0) break;
// fsync parent directory
result = fsync_dir(getParamPath());
} while (false);
close(tmp_fd);
if (result != 0) {
::unlink(tmp_path.c_str());
}
return result;
}
int Params::remove(const std::string &key) {
FileLock file_lock(params_path + "/.lock");
int result = unlink(getParamPath(key).c_str());
if (result != 0) {
return result;
}
return fsync_dir(getParamPath());
}
std::string Params::get(const std::string &key, bool block) {
if (!block) {
return util::read_file(getParamPath(key));
} else {
// blocking read until successful
params_do_exit = 0;
void (*prev_handler_sigint)(int) = std::signal(SIGINT, params_sig_handler);
void (*prev_handler_sigterm)(int) = std::signal(SIGTERM, params_sig_handler);
std::string value;
while (!params_do_exit) {
if (value = util::read_file(getParamPath(key)); !value.empty()) {
break;
}
util::sleep_for(100); // 0.1 s
}
std::signal(SIGINT, prev_handler_sigint);
std::signal(SIGTERM, prev_handler_sigterm);
return value;
}
}
std::map<std::string, std::string> Params::readAll() {
FileLock file_lock(params_path + "/.lock");
return util::read_files_in_dir(getParamPath());
}
void Params::clearAll(ParamKeyFlag key_flag) {
FileLock file_lock(params_path + "/.lock");
// 1) delete params of key_flag
// 2) delete files that are not defined in the keys.
if (DIR *d = opendir(getParamPath().c_str())) {
struct dirent *de = NULL;
while ((de = readdir(d))) {
if (de->d_type != DT_DIR) {
auto it = keys.find(de->d_name);
if (it == keys.end() || (it->second.flags & key_flag)) {
unlink(getParamPath(de->d_name).c_str());
}
}
}
closedir(d);
}
fsync_dir(getParamPath());
}
void Params::putNonBlocking(const std::string &key, const std::string &val) {
queue.push(std::make_pair(key, val));
// start thread on demand
if (!future.valid() || future.wait_for(std::chrono::milliseconds(0)) == std::future_status::ready) {
future = std::async(std::launch::async, &Params::asyncWriteThread, this);
}
}
void Params::asyncWriteThread() {
// TODO: write the latest one if a key has multiple values in the queue.
std::pair<std::string, std::string> p;
while (queue.try_pop(p, 0)) {
// Params::put is Thread-Safe
put(p.first, p.second);
}
}

@ -0,0 +1,90 @@
#pragma once
#include <future>
#include <map>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "common/queue.h"
enum ParamKeyFlag {
PERSISTENT = 0x02,
CLEAR_ON_MANAGER_START = 0x04,
CLEAR_ON_ONROAD_TRANSITION = 0x08,
CLEAR_ON_OFFROAD_TRANSITION = 0x10,
DONT_LOG = 0x20,
DEVELOPMENT_ONLY = 0x40,
CLEAR_ON_IGNITION_ON = 0x80,
ALL = 0xFFFFFFFF
};
enum ParamKeyType {
STRING = 0, // must be utf-8 decodable
BOOL = 1,
INT = 2,
FLOAT = 3,
TIME = 4, // ISO 8601
JSON = 5,
BYTES = 6
};
struct ParamKeyAttributes {
uint32_t flags;
ParamKeyType type;
std::optional<std::string> default_value = std::nullopt;
};
class Params {
public:
explicit Params(const std::string &path = {});
~Params();
// Not copyable.
Params(const Params&) = delete;
Params& operator=(const Params&) = delete;
std::vector<std::string> allKeys() const;
bool checkKey(const std::string &key);
ParamKeyFlag getKeyFlag(const std::string &key);
ParamKeyType getKeyType(const std::string &key);
std::optional<std::string> getKeyDefaultValue(const std::string &key);
inline std::string getParamPath(const std::string &key = {}) {
return params_path + params_prefix + (key.empty() ? "" : "/" + key);
}
// Delete a value
int remove(const std::string &key);
void clearAll(ParamKeyFlag flag);
// helpers for reading values
std::string get(const std::string &key, bool block = false);
inline bool getBool(const std::string &key, bool block = false) {
return get(key, block) == "1";
}
std::map<std::string, std::string> readAll();
// helpers for writing values
int put(const char *key, const char *val, size_t value_size);
inline int put(const std::string &key, const std::string &val) {
return put(key.c_str(), val.data(), val.size());
}
inline int putBool(const std::string &key, bool val) {
return put(key.c_str(), val ? "1" : "0", 1);
}
void putNonBlocking(const std::string &key, const std::string &val);
inline void putBoolNonBlocking(const std::string &key, bool val) {
putNonBlocking(key, val ? "1" : "0");
}
private:
void asyncWriteThread();
std::string params_path;
std::string params_prefix;
// for nonblocking write
std::future<void> future;
SafeQueue<std::pair<std::string, std::string>> queue;
};

@ -0,0 +1,19 @@
from openpilot.common.params_pyx import Params, ParamKeyFlag, ParamKeyType, UnknownKeyName
assert Params
assert ParamKeyFlag
assert ParamKeyType
assert UnknownKeyName
if __name__ == "__main__":
import sys
params = Params()
key = sys.argv[1]
assert params.check_key(key), f"unknown param: {key}"
if len(sys.argv) == 3:
val = sys.argv[2]
print(f"SET: {key} = {val}")
params.put(key, val)
elif len(sys.argv) == 2:
print(f"GET: {key} = {params.get(key)}")

@ -0,0 +1,132 @@
#pragma once
#include <string>
#include <unordered_map>
#include "cereal/gen/cpp/log.capnp.h"
inline static std::unordered_map<std::string, ParamKeyAttributes> keys = {
{"AccessToken", {CLEAR_ON_MANAGER_START | DONT_LOG, STRING}},
{"AdbEnabled", {PERSISTENT, BOOL}},
{"AlwaysOnDM", {PERSISTENT, BOOL}},
{"ApiCache_Device", {PERSISTENT, STRING}},
{"ApiCache_FirehoseStats", {PERSISTENT, JSON}},
{"AssistNowToken", {PERSISTENT, STRING}},
{"AthenadPid", {PERSISTENT, INT}},
{"AthenadUploadQueue", {PERSISTENT, JSON}},
{"AthenadRecentlyViewedRoutes", {PERSISTENT, STRING}},
{"BootCount", {PERSISTENT, INT}},
{"CalibrationParams", {PERSISTENT, BYTES}},
{"CameraDebugExpGain", {CLEAR_ON_MANAGER_START, STRING}},
{"CameraDebugExpTime", {CLEAR_ON_MANAGER_START, STRING}},
{"CarBatteryCapacity", {PERSISTENT, INT}},
{"CarParams", {CLEAR_ON_MANAGER_START | CLEAR_ON_ONROAD_TRANSITION, BYTES}},
{"CarParamsCache", {CLEAR_ON_MANAGER_START, BYTES}},
{"CarParamsPersistent", {PERSISTENT, BYTES}},
{"CarParamsPrevRoute", {PERSISTENT, BYTES}},
{"CompletedTrainingVersion", {PERSISTENT, STRING, "0"}},
{"ControlsReady", {CLEAR_ON_MANAGER_START | CLEAR_ON_ONROAD_TRANSITION, BOOL}},
{"CurrentBootlog", {PERSISTENT, STRING}},
{"CurrentRoute", {CLEAR_ON_MANAGER_START | CLEAR_ON_ONROAD_TRANSITION, STRING}},
{"DisableLogging", {CLEAR_ON_MANAGER_START | CLEAR_ON_ONROAD_TRANSITION, BOOL}},
{"DisablePowerDown", {PERSISTENT, BOOL}},
{"DisableUpdates", {PERSISTENT, BOOL}},
{"DisengageOnAccelerator", {PERSISTENT, BOOL, "0"}},
{"DongleId", {PERSISTENT, STRING}},
{"DoReboot", {CLEAR_ON_MANAGER_START, BOOL}},
{"DoShutdown", {CLEAR_ON_MANAGER_START, BOOL}},
{"DoUninstall", {CLEAR_ON_MANAGER_START, BOOL}},
{"DriverTooDistracted", {CLEAR_ON_MANAGER_START | CLEAR_ON_IGNITION_ON, BOOL}},
{"AlphaLongitudinalEnabled", {PERSISTENT | DEVELOPMENT_ONLY, BOOL}},
{"ExperimentalMode", {PERSISTENT, BOOL}},
{"ExperimentalModeConfirmed", {PERSISTENT, BOOL}},
{"FirmwareQueryDone", {CLEAR_ON_MANAGER_START | CLEAR_ON_ONROAD_TRANSITION, BOOL}},
{"ForcePowerDown", {PERSISTENT, BOOL}},
{"GitBranch", {PERSISTENT, STRING}},
{"GitCommit", {PERSISTENT, STRING}},
{"GitCommitDate", {PERSISTENT, STRING}},
{"GitDiff", {PERSISTENT, STRING}},
{"GithubSshKeys", {PERSISTENT, STRING}},
{"GithubUsername", {PERSISTENT, STRING}},
{"GitRemote", {PERSISTENT, STRING}},
{"GsmApn", {PERSISTENT, STRING}},
{"GsmMetered", {PERSISTENT, BOOL, "1"}},
{"GsmRoaming", {PERSISTENT, BOOL}},
{"HardwareSerial", {PERSISTENT, STRING}},
{"HasAcceptedTerms", {PERSISTENT, STRING, "0"}},
{"InstallDate", {PERSISTENT, TIME}},
{"IsDriverViewEnabled", {CLEAR_ON_MANAGER_START, BOOL}},
{"IsEngaged", {PERSISTENT, BOOL}},
{"IsLdwEnabled", {PERSISTENT, BOOL}},
{"IsMetric", {PERSISTENT, BOOL}},
{"IsOffroad", {CLEAR_ON_MANAGER_START, BOOL}},
{"IsOnroad", {PERSISTENT, BOOL}},
{"IsRhdDetected", {PERSISTENT, BOOL}},
{"IsReleaseBranch", {CLEAR_ON_MANAGER_START, BOOL}},
{"IsTakingSnapshot", {CLEAR_ON_MANAGER_START, BOOL}},
{"IsTestedBranch", {CLEAR_ON_MANAGER_START, BOOL}},
{"JoystickDebugMode", {CLEAR_ON_MANAGER_START | CLEAR_ON_OFFROAD_TRANSITION, BOOL}},
{"LanguageSetting", {PERSISTENT, STRING, "main_en"}},
{"LastAthenaPingTime", {CLEAR_ON_MANAGER_START, INT}},
{"LastGPSPosition", {PERSISTENT, STRING}},
{"LastManagerExitReason", {CLEAR_ON_MANAGER_START, STRING}},
{"LastOffroadStatusPacket", {CLEAR_ON_MANAGER_START | CLEAR_ON_OFFROAD_TRANSITION, JSON}},
{"LastPowerDropDetected", {CLEAR_ON_MANAGER_START, STRING}},
{"LastUpdateException", {CLEAR_ON_MANAGER_START, STRING}},
{"LastUpdateRouteCount", {PERSISTENT, INT, "0"}},
{"LastUpdateTime", {PERSISTENT, TIME}},
{"LastUpdateUptimeOnroad", {PERSISTENT, FLOAT, "0.0"}},
{"LiveDelay", {PERSISTENT, BYTES}},
{"LiveParameters", {PERSISTENT, JSON}},
{"LiveParametersV2", {PERSISTENT, BYTES}},
{"LiveTorqueParameters", {PERSISTENT | DONT_LOG, BYTES}},
{"LocationFilterInitialState", {PERSISTENT, BYTES}},
{"LongitudinalManeuverMode", {CLEAR_ON_MANAGER_START | CLEAR_ON_OFFROAD_TRANSITION, BOOL}},
{"LongitudinalPersonality", {PERSISTENT, INT, std::to_string(static_cast<int>(cereal::LongitudinalPersonality::STANDARD))}},
{"NetworkMetered", {PERSISTENT, BOOL}},
{"ObdMultiplexingChanged", {CLEAR_ON_MANAGER_START | CLEAR_ON_ONROAD_TRANSITION, BOOL}},
{"ObdMultiplexingEnabled", {CLEAR_ON_MANAGER_START | CLEAR_ON_ONROAD_TRANSITION, BOOL}},
{"Offroad_CarUnrecognized", {CLEAR_ON_MANAGER_START | CLEAR_ON_ONROAD_TRANSITION, JSON}},
{"Offroad_ConnectivityNeeded", {CLEAR_ON_MANAGER_START, JSON}},
{"Offroad_ConnectivityNeededPrompt", {CLEAR_ON_MANAGER_START, JSON}},
{"Offroad_ExcessiveActuation", {PERSISTENT, JSON}},
{"Offroad_IsTakingSnapshot", {CLEAR_ON_MANAGER_START, JSON}},
{"Offroad_NeosUpdate", {CLEAR_ON_MANAGER_START, JSON}},
{"Offroad_NoFirmware", {CLEAR_ON_MANAGER_START | CLEAR_ON_ONROAD_TRANSITION, JSON}},
{"Offroad_Recalibration", {CLEAR_ON_MANAGER_START | CLEAR_ON_ONROAD_TRANSITION, JSON}},
{"Offroad_StorageMissing", {CLEAR_ON_MANAGER_START, JSON}},
{"Offroad_TemperatureTooHigh", {CLEAR_ON_MANAGER_START, JSON}},
{"Offroad_UnregisteredHardware", {CLEAR_ON_MANAGER_START, JSON}},
{"Offroad_UpdateFailed", {CLEAR_ON_MANAGER_START, JSON}},
{"OnroadCycleRequested", {CLEAR_ON_MANAGER_START, BOOL}},
{"OpenpilotEnabledToggle", {PERSISTENT, BOOL, "1"}},
{"PandaHeartbeatLost", {CLEAR_ON_MANAGER_START | CLEAR_ON_OFFROAD_TRANSITION, BOOL}},
{"PandaSomResetTriggered", {CLEAR_ON_MANAGER_START | CLEAR_ON_OFFROAD_TRANSITION, BOOL}},
{"PandaSignatures", {CLEAR_ON_MANAGER_START, BYTES}},
{"PrimeType", {PERSISTENT, INT}},
{"RecordAudio", {PERSISTENT, BOOL}},
{"RecordAudioFeedback", {PERSISTENT, BOOL, "0"}},
{"RecordFront", {PERSISTENT, BOOL}},
{"RecordFrontLock", {PERSISTENT, BOOL}}, // for the internal fleet
{"SecOCKey", {PERSISTENT | DONT_LOG, STRING}},
{"RouteCount", {PERSISTENT, INT, "0"}},
{"SnoozeUpdate", {CLEAR_ON_MANAGER_START | CLEAR_ON_OFFROAD_TRANSITION, BOOL}},
{"SshEnabled", {PERSISTENT, BOOL}},
{"TermsVersion", {PERSISTENT, STRING}},
{"TrainingVersion", {PERSISTENT, STRING}},
{"UbloxAvailable", {PERSISTENT, BOOL}},
{"UpdateAvailable", {CLEAR_ON_MANAGER_START | CLEAR_ON_ONROAD_TRANSITION, BOOL}},
{"UpdateFailedCount", {CLEAR_ON_MANAGER_START, INT}},
{"UpdaterAvailableBranches", {PERSISTENT, STRING}},
{"UpdaterCurrentDescription", {CLEAR_ON_MANAGER_START, STRING}},
{"UpdaterCurrentReleaseNotes", {CLEAR_ON_MANAGER_START, BYTES}},
{"UpdaterFetchAvailable", {CLEAR_ON_MANAGER_START, BOOL}},
{"UpdaterNewDescription", {CLEAR_ON_MANAGER_START, STRING}},
{"UpdaterNewReleaseNotes", {CLEAR_ON_MANAGER_START, BYTES}},
{"UpdaterState", {CLEAR_ON_MANAGER_START, STRING}},
{"UpdaterTargetBranch", {CLEAR_ON_MANAGER_START, STRING}},
{"UpdaterLastFetchTime", {PERSISTENT, TIME}},
{"UptimeOffroad", {PERSISTENT, FLOAT, "0.0"}},
{"UptimeOnroad", {PERSISTENT, FLOAT, "0.0"}},
{"Version", {PERSISTENT, STRING}},
};

@ -0,0 +1,196 @@
# distutils: language = c++
# cython: language_level = 3
import builtins
import datetime
import json
from libcpp cimport bool
from libcpp.string cimport string
from libcpp.vector cimport vector
from libcpp.optional cimport optional
from openpilot.common.swaglog import cloudlog
cdef extern from "common/params.h":
cpdef enum ParamKeyFlag:
PERSISTENT
CLEAR_ON_MANAGER_START
CLEAR_ON_ONROAD_TRANSITION
CLEAR_ON_OFFROAD_TRANSITION
DEVELOPMENT_ONLY
CLEAR_ON_IGNITION_ON
ALL
cpdef enum ParamKeyType:
STRING
BOOL
INT
FLOAT
TIME
JSON
BYTES
cdef cppclass c_Params "Params":
c_Params(string) except + nogil
string get(string, bool) nogil
bool getBool(string, bool) nogil
int remove(string) nogil
int put(string, string) nogil
void putNonBlocking(string, string) nogil
void putBoolNonBlocking(string, bool) nogil
int putBool(string, bool) nogil
bool checkKey(string) nogil
ParamKeyType getKeyType(string) nogil
optional[string] getKeyDefaultValue(string) nogil
string getParamPath(string) nogil
void clearAll(ParamKeyFlag)
vector[string] allKeys()
PYTHON_2_CPP = {
(str, STRING): lambda v: v,
(builtins.bool, BOOL): lambda v: "1" if v else "0",
(int, INT): str,
(float, FLOAT): str,
(datetime.datetime, TIME): lambda v: v.isoformat(),
(dict, JSON): json.dumps,
(list, JSON): json.dumps,
(bytes, BYTES): lambda v: v,
}
CPP_2_PYTHON = {
STRING: lambda v: v.decode("utf-8"),
BOOL: lambda v: v == b"1",
INT: int,
FLOAT: float,
TIME: lambda v: datetime.datetime.fromisoformat(v.decode("utf-8")),
JSON: json.loads,
BYTES: lambda v: v,
}
def ensure_bytes(v):
return v.encode() if isinstance(v, str) else v
class UnknownKeyName(Exception):
pass
cdef class Params:
cdef c_Params* p
cdef str d
def __cinit__(self, d=""):
cdef string path = <string>d.encode()
with nogil:
self.p = new c_Params(path)
self.d = d
def __reduce__(self):
return (type(self), (self.d,))
def __dealloc__(self):
del self.p
def clear_all(self, tx_flag=ParamKeyFlag.ALL):
self.p.clearAll(tx_flag)
def check_key(self, key):
key = ensure_bytes(key)
if not self.p.checkKey(key):
raise UnknownKeyName(key)
return key
def python2cpp(self, proposed_type, expected_type, value, key):
cast = PYTHON_2_CPP.get((proposed_type, expected_type))
if cast:
return cast(value)
raise TypeError(f"Type mismatch while writing param {key}: {proposed_type=} {expected_type=} {value=}")
def _cpp2python(self, t, value, default, key):
if value is None:
return None
try:
return CPP_2_PYTHON[t](value)
except (KeyError, TypeError, ValueError):
cloudlog.warning(f"Failed to cast param {key} with {value=} from type {t=}")
return self._cpp2python(t, default, None, key)
def get(self, key, bool block=False, bool return_default=False):
cdef string k = self.check_key(key)
cdef ParamKeyType t = self.p.getKeyType(k)
cdef optional[string] default = self.p.getKeyDefaultValue(k)
cdef string val
with nogil:
val = self.p.get(k, block)
default_val = (default.value() if default.has_value() else None) if return_default else None
if val == b"":
if block:
# If we got no value while running in blocked mode
# it means we got an interrupt while waiting
raise KeyboardInterrupt
else:
return self._cpp2python(t, default_val, None, key)
return self._cpp2python(t, val, default_val, key)
def get_bool(self, key, bool block=False):
cdef string k = self.check_key(key)
cdef bool r
with nogil:
r = self.p.getBool(k, block)
return r
def _put_cast(self, key, dat):
cdef string k = self.check_key(key)
cdef ParamKeyType t = self.p.getKeyType(k)
return ensure_bytes(self.python2cpp(type(dat), t, dat, key))
def put(self, key, dat):
"""
Warning: This function blocks until the param is written to disk!
In very rare cases this can take over a second, and your code will hang.
Use the put_nonblocking, put_bool_nonblocking in time sensitive code, but
in general try to avoid writing params as much as possible.
"""
cdef string k = self.check_key(key)
cdef string dat_bytes = self._put_cast(key, dat)
with nogil:
self.p.put(k, dat_bytes)
def put_bool(self, key, bool val):
cdef string k = self.check_key(key)
with nogil:
self.p.putBool(k, val)
def put_nonblocking(self, key, dat):
cdef string k = self.check_key(key)
cdef string dat_bytes = self._put_cast(key, dat)
with nogil:
self.p.putNonBlocking(k, dat_bytes)
def put_bool_nonblocking(self, key, bool val):
cdef string k = self.check_key(key)
with nogil:
self.p.putBoolNonBlocking(k, val)
def remove(self, key):
cdef string k = self.check_key(key)
with nogil:
self.p.remove(k)
def get_param_path(self, key=""):
cdef string key_bytes = ensure_bytes(key)
return self.p.getParamPath(key_bytes).decode("utf-8")
def get_type(self, key):
return self.p.getKeyType(self.check_key(key))
def all_keys(self):
return self.p.allKeys()
def get_default_value(self, key):
cdef string k = self.check_key(key)
cdef ParamKeyType t = self.p.getKeyType(k)
cdef optional[string] default = self.p.getKeyDefaultValue(k)
return self._cpp2python(t, default.value(), None, key) if default.has_value() else None
def cpp2python(self, key, value):
cdef string k = self.check_key(key)
cdef ParamKeyType t = self.p.getKeyType(k)
return self._cpp2python(t, value, None, key)

@ -0,0 +1,64 @@
import numpy as np
from numbers import Number
class PIDController:
def __init__(self, k_p, k_i, k_f=0., k_d=0., pos_limit=1e308, neg_limit=-1e308, rate=100):
self._k_p = k_p
self._k_i = k_i
self._k_d = k_d
self.k_f = k_f # feedforward gain
if isinstance(self._k_p, Number):
self._k_p = [[0], [self._k_p]]
if isinstance(self._k_i, Number):
self._k_i = [[0], [self._k_i]]
if isinstance(self._k_d, Number):
self._k_d = [[0], [self._k_d]]
self.set_limits(pos_limit, neg_limit)
self.i_rate = 1.0 / rate
self.speed = 0.0
self.reset()
@property
def k_p(self):
return np.interp(self.speed, self._k_p[0], self._k_p[1])
@property
def k_i(self):
return np.interp(self.speed, self._k_i[0], self._k_i[1])
@property
def k_d(self):
return np.interp(self.speed, self._k_d[0], self._k_d[1])
def reset(self):
self.p = 0.0
self.i = 0.0
self.d = 0.0
self.f = 0.0
self.control = 0
def set_limits(self, pos_limit, neg_limit):
self.pos_limit = pos_limit
self.neg_limit = neg_limit
def update(self, error, error_rate=0.0, speed=0.0, feedforward=0., freeze_integrator=False):
self.speed = speed
self.p = float(error) * self.k_p
self.f = feedforward * self.k_f
self.d = error_rate * self.k_d
if not freeze_integrator:
i = self.i + error * self.k_i * self.i_rate
# Don't allow windup if already clipping
test_control = self.p + i + self.d + self.f
i_upperbound = self.i if test_control > self.pos_limit else self.pos_limit
i_lowerbound = self.i if test_control < self.neg_limit else self.neg_limit
self.i = np.clip(i, i_lowerbound, i_upperbound)
control = self.p + self.i + self.d + self.f
self.control = np.clip(control, self.neg_limit, self.pos_limit)
return self.control

@ -0,0 +1,39 @@
#pragma once
#include <cassert>
#include <string>
#include "common/params.h"
#include "common/util.h"
#include "system/hardware/hw.h"
class OpenpilotPrefix {
public:
OpenpilotPrefix(std::string prefix = {}) {
if (prefix.empty()) {
prefix = util::random_string(15);
}
msgq_path = Path::shm_path() + "/" + prefix;
bool ret = util::create_directories(msgq_path, 0777);
assert(ret);
setenv("OPENPILOT_PREFIX", prefix.c_str(), 1);
}
~OpenpilotPrefix() {
auto param_path = Params().getParamPath();
if (util::file_exists(param_path)) {
std::string real_path = util::readlink(param_path);
system(util::string_format("rm %s -rf", real_path.c_str()).c_str());
unlink(param_path.c_str());
}
if (getenv("COMMA_CACHE") == nullptr) {
system(util::string_format("rm %s -rf", Path::download_cache_root().c_str()).c_str());
}
system(util::string_format("rm %s -rf", Path::comma_home().c_str()).c_str());
system(util::string_format("rm %s -rf", msgq_path.c_str()).c_str());
unsetenv("OPENPILOT_PREFIX");
}
private:
std::string msgq_path;
};

@ -0,0 +1,59 @@
import os
import shutil
import uuid
from openpilot.common.params import Params
from openpilot.system.hardware import PC
from openpilot.system.hardware.hw import Paths
from openpilot.system.hardware.hw import DEFAULT_DOWNLOAD_CACHE_ROOT
class OpenpilotPrefix:
def __init__(self, prefix: str = None, create_dirs_on_enter: bool = True, clean_dirs_on_exit: bool = True, shared_download_cache: bool = False):
self.prefix = prefix if prefix else str(uuid.uuid4().hex[0:15])
self.msgq_path = os.path.join(Paths.shm_path(), self.prefix)
self.create_dirs_on_enter = create_dirs_on_enter
self.clean_dirs_on_exit = clean_dirs_on_exit
self.shared_download_cache = shared_download_cache
def __enter__(self):
self.original_prefix = os.environ.get('OPENPILOT_PREFIX', None)
os.environ['OPENPILOT_PREFIX'] = self.prefix
if self.create_dirs_on_enter:
self.create_dirs()
if self.shared_download_cache:
os.environ["COMMA_CACHE"] = DEFAULT_DOWNLOAD_CACHE_ROOT
return self
def __exit__(self, exc_type, exc_obj, exc_tb):
if self.clean_dirs_on_exit:
self.clean_dirs()
try:
del os.environ['OPENPILOT_PREFIX']
if self.original_prefix is not None:
os.environ['OPENPILOT_PREFIX'] = self.original_prefix
except KeyError:
pass
return False
def create_dirs(self):
try:
os.mkdir(self.msgq_path)
except FileExistsError:
pass
os.makedirs(Paths.log_root(), exist_ok=True)
def clean_dirs(self):
symlink_path = Params().get_param_path()
if os.path.exists(symlink_path):
shutil.rmtree(os.path.realpath(symlink_path), ignore_errors=True)
os.remove(symlink_path)
shutil.rmtree(self.msgq_path, ignore_errors=True)
if PC:
shutil.rmtree(Paths.log_root(), ignore_errors=True)
if not os.environ.get("COMMA_CACHE", False):
shutil.rmtree(Paths.download_cache_root(), ignore_errors=True)
shutil.rmtree(Paths.comma_home(), ignore_errors=True)

@ -0,0 +1,52 @@
#pragma once
#include <condition_variable>
#include <mutex>
#include <queue>
template <class T>
class SafeQueue {
public:
SafeQueue() = default;
void push(const T& v) {
{
std::unique_lock lk(m);
q.push(v);
}
cv.notify_one();
}
T pop() {
std::unique_lock lk(m);
cv.wait(lk, [this] { return !q.empty(); });
T v = q.front();
q.pop();
return v;
}
bool try_pop(T& v, int timeout_ms = 0) {
std::unique_lock lk(m);
if (!cv.wait_for(lk, std::chrono::milliseconds(timeout_ms), [this] { return !q.empty(); })) {
return false;
}
v = q.front();
q.pop();
return true;
}
bool empty() const {
std::scoped_lock lk(m);
return q.empty();
}
size_t size() const {
std::scoped_lock lk(m);
return q.size();
}
private:
mutable std::mutex m;
std::condition_variable cv;
std::queue<T> q;
};

@ -0,0 +1,40 @@
#include "common/ratekeeper.h"
#include <algorithm>
#include "common/swaglog.h"
#include "common/timing.h"
#include "common/util.h"
RateKeeper::RateKeeper(const std::string &name, float rate, float print_delay_threshold)
: name(name),
print_delay_threshold(std::max(0.f, print_delay_threshold)) {
interval = 1 / rate;
last_monitor_time = seconds_since_boot();
next_frame_time = last_monitor_time + interval;
}
bool RateKeeper::keepTime() {
bool lagged = monitorTime();
if (remaining_ > 0) {
util::sleep_for(remaining_ * 1000);
}
return lagged;
}
bool RateKeeper::monitorTime() {
++frame_;
last_monitor_time = seconds_since_boot();
remaining_ = next_frame_time - last_monitor_time;
bool lagged = remaining_ < 0;
if (lagged) {
if (print_delay_threshold > 0 && remaining_ < -print_delay_threshold) {
LOGW("%s lagging by %.2f ms", name.c_str(), -remaining_ * 1000);
}
next_frame_time = last_monitor_time + interval;
} else {
next_frame_time += interval;
}
return lagged;
}

@ -0,0 +1,23 @@
#pragma once
#include <cstdint>
#include <string>
class RateKeeper {
public:
RateKeeper(const std::string &name, float rate, float print_delay_threshold = 0);
~RateKeeper() {}
bool keepTime();
bool monitorTime();
inline uint64_t frame() const { return frame_; }
inline double remaining() const { return remaining_; }
private:
double interval;
double next_frame_time;
double last_monitor_time;
double remaining_ = 0;
float print_delay_threshold = 0;
uint64_t frame_ = 0;
std::string name;
};

@ -0,0 +1,96 @@
"""Utilities for reading real time clocks and keeping soft real time constraints."""
import gc
import os
import sys
import time
from setproctitle import getproctitle
from openpilot.common.util import MovingAverage
from openpilot.system.hardware import PC
# time step for each process
DT_CTRL = 0.01 # controlsd
DT_MDL = 0.05 # model
DT_HW = 0.5 # hardwared and manager
DT_DMON = 0.05 # driver monitoring
class Priority:
# CORE 2
# - modeld = 55
# - camerad = 54
CTRL_LOW = 51 # plannerd & radard
# CORE 3
# - pandad = 55
CTRL_HIGH = 53
def set_core_affinity(cores: list[int]) -> None:
if sys.platform == 'linux' and not PC:
os.sched_setaffinity(0, cores)
def config_realtime_process(cores: int | list[int], priority: int) -> None:
gc.disable()
if sys.platform == 'linux' and not PC:
os.sched_setscheduler(0, os.SCHED_FIFO, os.sched_param(priority))
c = cores if isinstance(cores, list) else [cores, ]
set_core_affinity(c)
class Ratekeeper:
def __init__(self, rate: float, print_delay_threshold: float | None = 0.0) -> None:
"""Rate in Hz for ratekeeping. print_delay_threshold must be nonnegative."""
self._interval = 1. / rate
self._print_delay_threshold = print_delay_threshold
self._frame = 0
self._remaining = 0.0
self._process_name = getproctitle()
self._last_monitor_time = -1.
self._next_frame_time = -1.
self.avg_dt = MovingAverage(100)
self.avg_dt.add_value(self._interval)
@property
def frame(self) -> int:
return self._frame
@property
def remaining(self) -> float:
return self._remaining
@property
def lagging(self) -> bool:
expected_dt = self._interval * (1 / 0.9)
return self.avg_dt.get_average() > expected_dt
# Maintain loop rate by calling this at the end of each loop
def keep_time(self) -> bool:
lagged = self.monitor_time()
if self._remaining > 0:
time.sleep(self._remaining)
return lagged
# Monitors the cumulative lag, but does not enforce a rate
def monitor_time(self) -> bool:
if self._last_monitor_time < 0:
self._next_frame_time = time.monotonic() + self._interval
self._last_monitor_time = time.monotonic()
prev = self._last_monitor_time
self._last_monitor_time = time.monotonic()
self.avg_dt.add_value(self._last_monitor_time - prev)
lagged = False
remaining = self._next_frame_time - time.monotonic()
self._next_frame_time += self._interval
if self._print_delay_threshold is not None and remaining < -self._print_delay_threshold:
print(f"{self._process_name} lagging by {-remaining * 1000:.2f} ms")
lagged = True
self._frame += 1
self._remaining = remaining
return lagged

@ -0,0 +1,30 @@
import time
import functools
from openpilot.common.swaglog import cloudlog
def retry(attempts=3, delay=1.0, ignore_failure=False):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
for _ in range(attempts):
try:
return func(*args, **kwargs)
except Exception:
cloudlog.exception(f"{func.__name__} failed, trying again")
time.sleep(delay)
if ignore_failure:
cloudlog.error(f"{func.__name__} failed after retry")
else:
raise Exception(f"{func.__name__} failed after retry")
return wrapper
return decorator
if __name__ == "__main__":
@retry(attempts=10)
def abc():
raise ValueError("abc failed :(")
abc()

@ -0,0 +1,28 @@
import subprocess
from contextlib import contextmanager
from subprocess import Popen, PIPE, TimeoutExpired
def run_cmd(cmd: list[str], cwd=None, env=None) -> str:
return subprocess.check_output(cmd, encoding='utf8', cwd=cwd, env=env).strip()
def run_cmd_default(cmd: list[str], default: str = "", cwd=None, env=None) -> str:
try:
return run_cmd(cmd, cwd=cwd, env=env)
except subprocess.CalledProcessError:
return default
@contextmanager
def managed_proc(cmd: list[str], env: dict[str, str]):
proc = Popen(cmd, env=env, stdout=PIPE, stderr=PIPE)
try:
yield proc
finally:
if proc.poll() is None:
proc.terminate()
try:
proc.wait(timeout=5)
except TimeoutExpired:
proc.kill()

@ -0,0 +1,54 @@
import numpy as np
def get_kalman_gain(dt, A, C, Q, R, iterations=100):
P = np.zeros_like(Q)
for _ in range(iterations):
P = A.dot(P).dot(A.T) + dt * Q
S = C.dot(P).dot(C.T) + R
K = P.dot(C.T).dot(np.linalg.inv(S))
P = (np.eye(len(P)) - K.dot(C)).dot(P)
return K
class KF1D:
# this EKF assumes constant covariance matrix, so calculations are much simpler
# the Kalman gain also needs to be precomputed using the control module
def __init__(self, x0, A, C, K):
self.x0_0 = x0[0][0]
self.x1_0 = x0[1][0]
self.A0_0 = A[0][0]
self.A0_1 = A[0][1]
self.A1_0 = A[1][0]
self.A1_1 = A[1][1]
self.C0_0 = C[0]
self.C0_1 = C[1]
self.K0_0 = K[0][0]
self.K1_0 = K[1][0]
self.A_K_0 = self.A0_0 - self.K0_0 * self.C0_0
self.A_K_1 = self.A0_1 - self.K0_0 * self.C0_1
self.A_K_2 = self.A1_0 - self.K1_0 * self.C0_0
self.A_K_3 = self.A1_1 - self.K1_0 * self.C0_1
# K matrix needs to be pre-computed as follow:
# import control
# (x, l, K) = control.dare(np.transpose(self.A), np.transpose(self.C), Q, R)
# self.K = np.transpose(K)
def update(self, meas):
#self.x = np.dot(self.A_K, self.x) + np.dot(self.K, meas)
x0_0 = self.A_K_0 * self.x0_0 + self.A_K_1 * self.x1_0 + self.K0_0 * meas
x1_0 = self.A_K_2 * self.x0_0 + self.A_K_3 * self.x1_0 + self.K1_0 * meas
self.x0_0 = x0_0
self.x1_0 = x1_0
return [self.x0_0, self.x1_0]
@property
def x(self):
return [[self.x0_0], [self.x1_0]]
def set_x(self, x):
self.x0_0 = x[0][0]
self.x1_0 = x[1][0]

@ -0,0 +1,52 @@
import os
import subprocess
from openpilot.common.basedir import BASEDIR
class Spinner:
def __init__(self):
try:
self.spinner_proc = subprocess.Popen(["./spinner.py"],
stdin=subprocess.PIPE,
cwd=os.path.join(BASEDIR, "system", "ui"),
close_fds=True)
except OSError:
self.spinner_proc = None
def __enter__(self):
return self
def update(self, spinner_text: str):
if self.spinner_proc is not None:
self.spinner_proc.stdin.write(spinner_text.encode('utf8') + b"\n")
try:
self.spinner_proc.stdin.flush()
except BrokenPipeError:
pass
def update_progress(self, cur: float, total: float):
self.update(str(round(100 * cur / total)))
def close(self):
if self.spinner_proc is not None:
self.spinner_proc.kill()
try:
self.spinner_proc.communicate(timeout=2.)
except subprocess.TimeoutExpired:
print("WARNING: failed to kill spinner")
self.spinner_proc = None
def __del__(self):
self.close()
def __exit__(self, exc_type, exc_value, traceback):
self.close()
if __name__ == "__main__":
import time
with Spinner() as s:
s.update("Spinner text")
time.sleep(5.0)
print("gone")
time.sleep(5.0)

@ -0,0 +1,73 @@
import numpy as np
class RunningStat:
# tracks realtime mean and standard deviation without storing any data
def __init__(self, priors=None, max_trackable=-1):
self.max_trackable = max_trackable
if priors is not None:
# initialize from history
self.M = priors[0]
self.S = priors[1]
self.n = priors[2]
self.M_last = self.M
self.S_last = self.S
else:
self.reset()
def reset(self):
self.M = 0.
self.S = 0.
self.M_last = 0.
self.S_last = 0.
self.n = 0
def push_data(self, new_data):
# short term memory hack
if self.max_trackable < 0 or self.n < self.max_trackable:
self.n += 1
if self.n == 0:
self.M_last = new_data
self.M = self.M_last
self.S_last = 0.
else:
self.M = self.M_last + (new_data - self.M_last) / self.n
self.S = self.S_last + (new_data - self.M_last) * (new_data - self.M)
self.M_last = self.M
self.S_last = self.S
def mean(self):
return self.M
def variance(self):
if self.n >= 2:
return self.S / (self.n - 1.)
else:
return 0
def std(self):
return np.sqrt(self.variance())
def params_to_save(self):
return [self.M, self.S, self.n]
class RunningStatFilter:
def __init__(self, raw_priors=None, filtered_priors=None, max_trackable=-1):
self.raw_stat = RunningStat(raw_priors, -1)
self.filtered_stat = RunningStat(filtered_priors, max_trackable)
def reset(self):
self.raw_stat.reset()
self.filtered_stat.reset()
def push_and_update(self, new_data):
_std_last = self.raw_stat.std()
self.raw_stat.push_data(new_data)
_delta_std = self.raw_stat.std() - _std_last
if _delta_std <= 0:
self.filtered_stat.push_data(new_data)
else:
pass
# self.filtered_stat.push_data(self.filtered_stat.mean())
# class SequentialBayesian():

@ -0,0 +1,155 @@
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include "common/swaglog.h"
#include <cassert>
#include <limits>
#include <mutex>
#include <string>
#include <zmq.h>
#include <stdarg.h>
#include "third_party/json11/json11.hpp"
#include "common/version.h"
#include "system/hardware/hw.h"
class SwaglogState {
public:
SwaglogState() {
zctx = zmq_ctx_new();
sock = zmq_socket(zctx, ZMQ_PUSH);
// Timeout on shutdown for messages to be received by the logging process
int timeout = 100;
zmq_setsockopt(sock, ZMQ_LINGER, &timeout, sizeof(timeout));
zmq_connect(sock, Path::swaglog_ipc().c_str());
// workaround for https://github.com/dropbox/json11/issues/38
setlocale(LC_NUMERIC, "C");
print_level = CLOUDLOG_WARNING;
if (const char* print_lvl = getenv("LOGPRINT")) {
if (strcmp(print_lvl, "debug") == 0) {
print_level = CLOUDLOG_DEBUG;
} else if (strcmp(print_lvl, "info") == 0) {
print_level = CLOUDLOG_INFO;
} else if (strcmp(print_lvl, "warning") == 0) {
print_level = CLOUDLOG_WARNING;
}
}
ctx_j = json11::Json::object{};
if (char* dongle_id = getenv("DONGLE_ID")) {
ctx_j["dongle_id"] = dongle_id;
}
if (char* git_origin = getenv("GIT_ORIGIN")) {
ctx_j["origin"] = git_origin;
}
if (char* git_branch = getenv("GIT_BRANCH")) {
ctx_j["branch"] = git_branch;
}
if (char* git_commit = getenv("GIT_COMMIT")) {
ctx_j["commit"] = git_commit;
}
if (char* daemon_name = getenv("MANAGER_DAEMON")) {
ctx_j["daemon"] = daemon_name;
}
ctx_j["version"] = COMMA_VERSION;
ctx_j["dirty"] = !getenv("CLEAN");
ctx_j["device"] = Hardware::get_name();
}
~SwaglogState() {
zmq_close(sock);
zmq_ctx_destroy(zctx);
}
void log(int levelnum, const char* filename, int lineno, const char* func, const char* msg, const std::string& log_s) {
std::lock_guard lk(lock);
if (levelnum >= print_level) {
printf("%s: %s\n", filename, msg);
}
zmq_send(sock, log_s.data(), log_s.length(), ZMQ_NOBLOCK);
}
std::mutex lock;
void* zctx = nullptr;
void* sock = nullptr;
int print_level;
json11::Json::object ctx_j;
};
bool LOG_TIMESTAMPS = getenv("LOG_TIMESTAMPS");
uint32_t NO_FRAME_ID = std::numeric_limits<uint32_t>::max();
static void cloudlog_common(int levelnum, const char* filename, int lineno, const char* func,
char* msg_buf, const json11::Json::object &msg_j={}) {
static SwaglogState s;
json11::Json::object log_j = json11::Json::object {
{"ctx", s.ctx_j},
{"levelnum", levelnum},
{"filename", filename},
{"lineno", lineno},
{"funcname", func},
{"created", seconds_since_epoch()}
};
if (msg_j.empty()) {
log_j["msg"] = msg_buf;
} else {
log_j["msg"] = msg_j;
}
std::string log_s;
log_s += (char)levelnum;
((json11::Json)log_j).dump(log_s);
s.log(levelnum, filename, lineno, func, msg_buf, log_s);
free(msg_buf);
}
void cloudlog_e(int levelnum, const char* filename, int lineno, const char* func,
const char* fmt, ...) {
va_list args;
va_start(args, fmt);
char* msg_buf = nullptr;
int ret = vasprintf(&msg_buf, fmt, args);
va_end(args);
if (ret <= 0 || !msg_buf) return;
cloudlog_common(levelnum, filename, lineno, func, msg_buf);
}
void cloudlog_t_common(int levelnum, const char* filename, int lineno, const char* func,
uint32_t frame_id, const char* fmt, va_list args) {
if (!LOG_TIMESTAMPS) return;
char* msg_buf = nullptr;
int ret = vasprintf(&msg_buf, fmt, args);
if (ret <= 0 || !msg_buf) return;
json11::Json::object tspt_j = json11::Json::object{
{"event", msg_buf},
{"time", std::to_string(nanos_since_boot())}
};
if (frame_id < NO_FRAME_ID) {
tspt_j["frame_id"] = std::to_string(frame_id);
}
tspt_j = json11::Json::object{{"timestamp", tspt_j}};
cloudlog_common(levelnum, filename, lineno, func, msg_buf, tspt_j);
}
void cloudlog_te(int levelnum, const char* filename, int lineno, const char* func,
const char* fmt, ...) {
va_list args;
va_start(args, fmt);
cloudlog_t_common(levelnum, filename, lineno, func, NO_FRAME_ID, fmt, args);
va_end(args);
}
void cloudlog_te(int levelnum, const char* filename, int lineno, const char* func,
uint32_t frame_id, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
cloudlog_t_common(levelnum, filename, lineno, func, frame_id, fmt, args);
va_end(args);
}

@ -0,0 +1,76 @@
#pragma once
#include "common/timing.h"
#define CLOUDLOG_DEBUG 10
#define CLOUDLOG_INFO 20
#define CLOUDLOG_WARNING 30
#define CLOUDLOG_ERROR 40
#define CLOUDLOG_CRITICAL 50
#ifdef __GNUC__
#define SWAG_LOG_CHECK_FMT(a, b) __attribute__ ((format (printf, a, b)))
#else
#define SWAG_LOG_CHECK_FMT(a, b)
#endif
void cloudlog_e(int levelnum, const char* filename, int lineno, const char* func,
const char* fmt, ...) SWAG_LOG_CHECK_FMT(5, 6);
void cloudlog_te(int levelnum, const char* filename, int lineno, const char* func,
const char* fmt, ...) SWAG_LOG_CHECK_FMT(5, 6);
void cloudlog_te(int levelnum, const char* filename, int lineno, const char* func,
uint32_t frame_id, const char* fmt, ...) SWAG_LOG_CHECK_FMT(6, 7);
#define cloudlog(lvl, fmt, ...) cloudlog_e(lvl, __FILE__, __LINE__, \
__func__, \
fmt, ## __VA_ARGS__)
#define cloudlog_t(lvl, ...) cloudlog_te(lvl, __FILE__, __LINE__, \
__func__, \
__VA_ARGS__)
#define cloudlog_rl(burst, millis, lvl, fmt, ...) \
{ \
static uint64_t __begin = 0; \
static int __printed = 0; \
static int __missed = 0; \
\
int __burst = (burst); \
int __millis = (millis); \
uint64_t __ts = nanos_since_boot(); \
\
if (!__begin) { __begin = __ts; } \
\
if (__begin + __millis*1000000ULL < __ts) { \
if (__missed) { \
cloudlog(CLOUDLOG_WARNING, "cloudlog: %d messages suppressed", __missed); \
} \
__begin = 0; \
__printed = 0; \
__missed = 0; \
} \
\
if (__printed < __burst) { \
cloudlog(lvl, fmt, ## __VA_ARGS__); \
__printed++; \
} else { \
__missed++; \
} \
}
#define LOGT(...) cloudlog_t(CLOUDLOG_DEBUG, __VA_ARGS__)
#define LOGD(fmt, ...) cloudlog(CLOUDLOG_DEBUG, fmt, ## __VA_ARGS__)
#define LOG(fmt, ...) cloudlog(CLOUDLOG_INFO, fmt, ## __VA_ARGS__)
#define LOGW(fmt, ...) cloudlog(CLOUDLOG_WARNING, fmt, ## __VA_ARGS__)
#define LOGE(fmt, ...) cloudlog(CLOUDLOG_ERROR, fmt, ## __VA_ARGS__)
#define LOGD_100(fmt, ...) cloudlog_rl(2, 100, CLOUDLOG_DEBUG, fmt, ## __VA_ARGS__)
#define LOG_100(fmt, ...) cloudlog_rl(2, 100, CLOUDLOG_INFO, fmt, ## __VA_ARGS__)
#define LOGW_100(fmt, ...) cloudlog_rl(2, 100, CLOUDLOG_WARNING, fmt, ## __VA_ARGS__)
#define LOGE_100(fmt, ...) cloudlog_rl(2, 100, CLOUDLOG_ERROR, fmt, ## __VA_ARGS__)

@ -0,0 +1,144 @@
import logging
import os
import time
import warnings
from pathlib import Path
from logging.handlers import BaseRotatingHandler
import zmq
from openpilot.common.logging_extra import SwagLogger, SwagFormatter, SwagLogFileFormatter
from openpilot.system.hardware.hw import Paths
def get_file_handler():
Path(Paths.swaglog_root()).mkdir(parents=True, exist_ok=True)
base_filename = os.path.join(Paths.swaglog_root(), "swaglog")
handler = SwaglogRotatingFileHandler(base_filename)
return handler
class SwaglogRotatingFileHandler(BaseRotatingHandler):
def __init__(self, base_filename, interval=60, max_bytes=1024*256, backup_count=2500, encoding=None):
super().__init__(base_filename, mode="a", encoding=encoding, delay=True)
self.base_filename = base_filename
self.interval = interval # seconds
self.max_bytes = max_bytes
self.backup_count = backup_count
self.log_files = self.get_existing_logfiles()
log_indexes = [f.split(".")[-1] for f in self.log_files]
self.last_file_idx = max([int(i) for i in log_indexes if i.isdigit()] or [-1])
self.last_rollover = None
self.doRollover()
def _open(self):
self.last_rollover = time.monotonic()
self.last_file_idx += 1
next_filename = f"{self.base_filename}.{self.last_file_idx:010}"
stream = open(next_filename, self.mode, encoding=self.encoding)
self.log_files.insert(0, next_filename)
return stream
def get_existing_logfiles(self):
log_files = list()
base_dir = os.path.dirname(self.base_filename)
for fn in os.listdir(base_dir):
fp = os.path.join(base_dir, fn)
if fp.startswith(self.base_filename) and os.path.isfile(fp):
log_files.append(fp)
return sorted(log_files)
def shouldRollover(self, record):
size_exceeded = self.max_bytes > 0 and self.stream.tell() >= self.max_bytes
time_exceeded = self.interval > 0 and self.last_rollover + self.interval <= time.monotonic()
return size_exceeded or time_exceeded
def doRollover(self):
if self.stream:
self.stream.close()
self.stream = self._open()
if self.backup_count > 0:
while len(self.log_files) > self.backup_count:
to_delete = self.log_files.pop()
if os.path.exists(to_delete): # just being safe, should always exist
os.remove(to_delete)
class UnixDomainSocketHandler(logging.Handler):
def __init__(self, formatter):
logging.Handler.__init__(self)
self.setFormatter(formatter)
self.pid = None
self.zctx = None
self.sock = None
def __del__(self):
self.close()
def close(self):
if self.sock is not None:
self.sock.close()
if self.zctx is not None:
self.zctx.term()
def connect(self):
self.zctx = zmq.Context()
self.sock = self.zctx.socket(zmq.PUSH)
self.sock.setsockopt(zmq.LINGER, 10)
self.sock.connect(Paths.swaglog_ipc())
self.pid = os.getpid()
def emit(self, record):
if os.getpid() != self.pid:
# TODO suppresses warning about forking proc with zmq socket, fix root cause
warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*<zmq.*>")
self.connect()
msg = self.format(record).rstrip('\n')
# print("SEND".format(repr(msg)))
try:
s = chr(record.levelno)+msg
self.sock.send(s.encode('utf8'), zmq.NOBLOCK)
except zmq.error.Again:
# drop :/
pass
class ForwardingHandler(logging.Handler):
def __init__(self, target_logger):
super().__init__()
self.target_logger = target_logger
def emit(self, record):
self.target_logger.handle(record)
def add_file_handler(log):
"""
Function to add the file log handler to swaglog.
This can be used to store logs when logmessaged is not running.
"""
handler = get_file_handler()
handler.setFormatter(SwagLogFileFormatter(log))
log.addHandler(handler)
cloudlog = log = SwagLogger()
log.setLevel(logging.DEBUG)
outhandler = logging.StreamHandler()
print_level = os.environ.get('LOGPRINT', 'warning')
if print_level == 'debug':
outhandler.setLevel(logging.DEBUG)
elif print_level == 'info':
outhandler.setLevel(logging.INFO)
elif print_level == 'warning':
outhandler.setLevel(logging.WARNING)
ipchandler = UnixDomainSocketHandler(SwagFormatter(log))
log.addHandler(outhandler)
# logs are sent through IPC before writing to disk to prevent disk I/O blocking
log.addHandler(ipchandler)

@ -0,0 +1 @@
test_common

@ -0,0 +1,19 @@
import os
from uuid import uuid4
from openpilot.common.file_helpers import atomic_write_in_dir
class TestFileHelpers:
def run_atomic_write_func(self, atomic_write_func):
path = f"/tmp/tmp{uuid4()}"
with atomic_write_func(path) as f:
f.write("test")
assert not os.path.exists(path)
with open(path) as f:
assert f.read() == "test"
os.remove(path)
def test_atomic_write_in_dir(self):
self.run_atomic_write_func(atomic_write_in_dir)

@ -0,0 +1,15 @@
import os
from openpilot.common.basedir import BASEDIR
from openpilot.common.markdown import parse_markdown
class TestMarkdown:
def test_all_release_notes(self):
with open(os.path.join(BASEDIR, "RELEASES.md")) as f:
release_notes = f.read().split("\n\n")
assert len(release_notes) > 10
for rn in release_notes:
md = parse_markdown(rn)
assert len(md) > 0

@ -0,0 +1,27 @@
#include "catch2/catch.hpp"
#define private public
#include "common/params.h"
#include "common/util.h"
TEST_CASE("params_nonblocking_put") {
char tmp_path[] = "/tmp/asyncWriter_XXXXXX";
const std::string param_path = mkdtemp(tmp_path);
auto param_names = {"CarParams", "IsMetric"};
{
Params params(param_path);
for (const auto &name : param_names) {
params.putNonBlocking(name, "1");
// param is empty
REQUIRE(params.get(name).empty());
}
// check if thread is running
REQUIRE(params.future.valid());
REQUIRE(params.future.wait_for(std::chrono::milliseconds(0)) == std::future_status::timeout);
}
// check results
Params p(param_path);
for (const auto &name : param_names) {
REQUIRE(p.get(name) == "1");
}
}

@ -0,0 +1,141 @@
import pytest
import datetime
import os
import threading
import time
import uuid
from openpilot.common.params import Params, ParamKeyFlag, UnknownKeyName
class TestParams:
def setup_method(self):
self.params = Params()
def test_params_put_and_get(self):
self.params.put("DongleId", "cb38263377b873ee")
assert self.params.get("DongleId") == "cb38263377b873ee"
def test_params_non_ascii(self):
st = b"\xe1\x90\xff"
self.params.put("CarParams", st)
assert self.params.get("CarParams") == st
def test_params_get_cleared_manager_start(self):
self.params.put("CarParams", b"test")
self.params.put("DongleId", "cb38263377b873ee")
assert self.params.get("CarParams") == b"test"
undefined_param = self.params.get_param_path(uuid.uuid4().hex)
with open(undefined_param, "w") as f:
f.write("test")
assert os.path.isfile(undefined_param)
self.params.clear_all(ParamKeyFlag.CLEAR_ON_MANAGER_START)
assert self.params.get("CarParams") is None
assert self.params.get("DongleId") is not None
assert not os.path.isfile(undefined_param)
def test_params_two_things(self):
self.params.put("DongleId", "bob")
self.params.put("AthenadPid", 123)
assert self.params.get("DongleId") == "bob"
assert self.params.get("AthenadPid") == 123
def test_params_get_block(self):
def _delayed_writer():
time.sleep(0.1)
self.params.put("CarParams", b"test")
threading.Thread(target=_delayed_writer).start()
assert self.params.get("CarParams") is None
assert self.params.get("CarParams", block=True) == b"test"
def test_params_unknown_key_fails(self):
with pytest.raises(UnknownKeyName):
self.params.get("swag")
with pytest.raises(UnknownKeyName):
self.params.get_bool("swag")
with pytest.raises(UnknownKeyName):
self.params.put("swag", "abc")
with pytest.raises(UnknownKeyName):
self.params.put_bool("swag", True)
def test_remove_not_there(self):
assert self.params.get("CarParams") is None
self.params.remove("CarParams")
assert self.params.get("CarParams") is None
def test_get_bool(self):
self.params.remove("IsMetric")
assert not self.params.get_bool("IsMetric")
self.params.put_bool("IsMetric", True)
assert self.params.get_bool("IsMetric")
self.params.put_bool("IsMetric", False)
assert not self.params.get_bool("IsMetric")
self.params.put("IsMetric", True)
assert self.params.get_bool("IsMetric")
self.params.put("IsMetric", False)
assert not self.params.get_bool("IsMetric")
def test_put_non_blocking_with_get_block(self):
q = Params()
def _delayed_writer():
time.sleep(0.1)
Params().put_nonblocking("CarParams", b"test")
threading.Thread(target=_delayed_writer).start()
assert q.get("CarParams") is None
assert q.get("CarParams", True) == b"test"
def test_put_bool_non_blocking_with_get_block(self):
q = Params()
def _delayed_writer():
time.sleep(0.1)
Params().put_bool_nonblocking("CarParams", True)
threading.Thread(target=_delayed_writer).start()
assert q.get("CarParams") is None
assert q.get("CarParams", True) == b"1"
def test_params_all_keys(self):
keys = Params().all_keys()
# sanity checks
assert len(keys) > 20
assert len(keys) == len(set(keys))
assert b"CarParams" in keys
def test_params_default_value(self):
self.params.remove("LanguageSetting")
self.params.remove("LongitudinalPersonality")
self.params.remove("LiveParameters")
assert self.params.get("LanguageSetting") is None
assert self.params.get("LanguageSetting", return_default=False) is None
assert isinstance(self.params.get("LanguageSetting", return_default=True), str)
assert isinstance(self.params.get("LongitudinalPersonality", return_default=True), int)
assert self.params.get("LiveParameters") is None
assert self.params.get("LiveParameters", return_default=True) is None
def test_params_get_type(self):
# json
self.params.put("ApiCache_FirehoseStats", {"a": 0})
assert self.params.get("ApiCache_FirehoseStats") == {"a": 0}
# int
self.params.put("BootCount", 1441)
assert self.params.get("BootCount") == 1441
# bool
self.params.put("AdbEnabled", True)
assert self.params.get("AdbEnabled")
assert isinstance(self.params.get("AdbEnabled"), bool)
# time
now = datetime.datetime.now(datetime.UTC)
self.params.put("InstallDate", now)
assert self.params.get("InstallDate") == now

@ -0,0 +1,2 @@
#define CATCH_CONFIG_MAIN
#include "catch2/catch.hpp"

@ -0,0 +1,29 @@
from openpilot.common.simple_kalman import KF1D
class TestSimpleKalman:
def setup_method(self):
dt = 0.01
x0_0 = 0.0
x1_0 = 0.0
A0_0 = 1.0
A0_1 = dt
A1_0 = 0.0
A1_1 = 1.0
C0_0 = 1.0
C0_1 = 0.0
K0_0 = 0.12287673
K1_0 = 0.29666309
self.kf = KF1D(x0=[[x0_0], [x1_0]],
A=[[A0_0, A0_1], [A1_0, A1_1]],
C=[C0_0, C0_1],
K=[[K0_0], [K1_0]])
def test_getter_setter(self):
self.kf.set_x([[1.0], [1.0]])
assert self.kf.x == [[1.0], [1.0]]
def test_update_returns_state(self):
x = self.kf.update(100)
assert x == [i[0] for i in self.kf.x]

@ -0,0 +1,88 @@
#include <zmq.h>
#include <iostream>
#include "catch2/catch.hpp"
#include "common/swaglog.h"
#include "common/util.h"
#include "common/version.h"
#include "system/hardware/hw.h"
#include "third_party/json11/json11.hpp"
std::string daemon_name = "testy";
std::string dongle_id = "test_dongle_id";
int LINE_NO = 0;
void log_thread(int thread_id, int msg_cnt) {
for (int i = 0; i < msg_cnt; ++i) {
LOGD("%d", thread_id);
LINE_NO = __LINE__ - 1;
usleep(1);
}
}
void recv_log(int thread_cnt, int thread_msg_cnt) {
void *zctx = zmq_ctx_new();
void *sock = zmq_socket(zctx, ZMQ_PULL);
zmq_bind(sock, Path::swaglog_ipc().c_str());
std::vector<int> thread_msgs(thread_cnt);
int total_count = 0;
for (auto start = std::chrono::steady_clock::now(), now = start;
now < start + std::chrono::seconds{1} && total_count < (thread_cnt * thread_msg_cnt);
now = std::chrono::steady_clock::now()) {
char buf[4096] = {};
if (zmq_recv(sock, buf, sizeof(buf), ZMQ_DONTWAIT) <= 0) {
if (errno == EAGAIN || errno == EINTR || errno == EFSM) continue;
break;
}
REQUIRE(buf[0] == CLOUDLOG_DEBUG);
std::string err;
auto msg = json11::Json::parse(buf + 1, err);
REQUIRE(!msg.is_null());
REQUIRE(msg["levelnum"].int_value() == CLOUDLOG_DEBUG);
REQUIRE_THAT(msg["filename"].string_value(), Catch::Contains("test_swaglog.cc"));
REQUIRE(msg["funcname"].string_value() == "log_thread");
REQUIRE(msg["lineno"].int_value() == LINE_NO);
auto ctx = msg["ctx"];
REQUIRE(ctx["daemon"].string_value() == daemon_name);
REQUIRE(ctx["dongle_id"].string_value() == dongle_id);
REQUIRE(ctx["dirty"].bool_value() == true);
REQUIRE(ctx["version"].string_value() == COMMA_VERSION);
std::string device = Hardware::get_name();
REQUIRE(ctx["device"].string_value() == device);
int thread_id = atoi(msg["msg"].string_value().c_str());
REQUIRE((thread_id >= 0 && thread_id < thread_cnt));
thread_msgs[thread_id]++;
total_count++;
}
for (int i = 0; i < thread_cnt; ++i) {
INFO("thread :" << i);
REQUIRE(thread_msgs[i] == thread_msg_cnt);
}
zmq_close(sock);
zmq_ctx_destroy(zctx);
}
TEST_CASE("swaglog") {
setenv("MANAGER_DAEMON", daemon_name.c_str(), 1);
setenv("DONGLE_ID", dongle_id.c_str(), 1);
setenv("dirty", "1", 1);
const int thread_cnt = 5;
const int thread_msg_cnt = 100;
std::vector<std::thread> log_threads;
for (int i = 0; i < thread_cnt; ++i) {
log_threads.push_back(std::thread(log_thread, i, thread_msg_cnt));
}
for (auto &t : log_threads) t.join();
recv_log(thread_cnt, thread_msg_cnt);
}

@ -0,0 +1,147 @@
#include <dirent.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <algorithm>
#include <climits>
#include <fstream>
#include <random>
#include <string>
#include "catch2/catch.hpp"
#include "common/util.h"
std::string random_bytes(int size) {
std::random_device rd;
std::independent_bits_engine<std::default_random_engine, CHAR_BIT, unsigned char> rbe(rd());
std::string bytes(size + 1, '\0');
std::generate(bytes.begin(), bytes.end(), std::ref(rbe));
return bytes;
}
TEST_CASE("util::read_file") {
SECTION("read /proc/version") {
std::string ret = util::read_file("/proc/version");
REQUIRE(ret.find("Linux version") != std::string::npos);
}
SECTION("read from sysfs") {
std::string ret = util::read_file("/sys/power/wakeup_count");
REQUIRE(!ret.empty());
}
SECTION("read file") {
char filename[] = "/tmp/test_read_XXXXXX";
int fd = mkstemp(filename);
REQUIRE(util::read_file(filename).empty());
std::string content = random_bytes(64 * 1024);
write(fd, content.c_str(), content.size());
std::string ret = util::read_file(filename);
bool equal = (ret == content);
REQUIRE(equal);
close(fd);
}
SECTION("read directory") {
REQUIRE(util::read_file(".").empty());
}
SECTION("read non-existent file") {
std::string ret = util::read_file("does_not_exist");
REQUIRE(ret.empty());
}
SECTION("read non-permission") {
REQUIRE(util::read_file("/proc/kmsg").empty());
}
}
TEST_CASE("util::file_exists") {
char filename[] = "/tmp/test_file_exists_XXXXXX";
int fd = mkstemp(filename);
REQUIRE(fd != -1);
close(fd);
SECTION("existent file") {
REQUIRE(util::file_exists(filename));
REQUIRE(util::file_exists("/tmp"));
}
SECTION("nonexistent file") {
std::string fn = filename;
REQUIRE(!util::file_exists(fn + "/nonexistent"));
}
SECTION("file has no access permissions") {
std::string fn = "/proc/kmsg";
std::ifstream f(fn);
REQUIRE(f.good() == false);
REQUIRE(util::file_exists(fn));
}
::remove(filename);
}
TEST_CASE("util::read_files_in_dir") {
char tmp_path[] = "/tmp/test_XXXXXX";
const std::string test_path = mkdtemp(tmp_path);
const std::string files[] = {".test1", "'test2'", "test3"};
for (auto fn : files) {
std::ofstream{test_path + "/" + fn} << fn;
}
mkdir((test_path + "/dir").c_str(), 0777);
std::map<std::string, std::string> result = util::read_files_in_dir(test_path);
REQUIRE(result.find("dir") == result.end());
REQUIRE(result.size() == std::size(files));
for (auto& [k, v] : result) {
REQUIRE(k == v);
}
}
TEST_CASE("util::safe_fwrite") {
char filename[] = "/tmp/XXXXXX";
int fd = mkstemp(filename);
close(fd);
std::string dat = random_bytes(1024 * 1024);
FILE *f = util::safe_fopen(filename, "wb");
REQUIRE(f != nullptr);
size_t size = util::safe_fwrite(dat.data(), 1, dat.size(), f);
REQUIRE(size == dat.size());
int ret = util::safe_fflush(f);
REQUIRE(ret == 0);
ret = fclose(f);
REQUIRE(ret == 0);
bool equal = (dat == util::read_file(filename));
REQUIRE(equal);
}
TEST_CASE("util::create_directories") {
system("rm /tmp/test_create_directories -rf");
std::string dir = "/tmp/test_create_directories/a/b/c/d/e/f";
auto check_dir_permissions = [](const std::string &dir, mode_t mode) -> bool {
struct stat st = {};
return stat(dir.c_str(), &st) == 0 && (st.st_mode & S_IFMT) == S_IFDIR && (st.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO)) == mode;
};
SECTION("create_directories") {
REQUIRE(util::create_directories(dir, 0755));
REQUIRE(check_dir_permissions(dir, 0755));
}
SECTION("dir already exists") {
REQUIRE(util::create_directories(dir, 0755));
REQUIRE(util::create_directories(dir, 0755));
}
SECTION("a file exists with the same name") {
REQUIRE(util::create_directories(dir, 0755));
int f = open((dir + "/file").c_str(), O_RDWR | O_CREAT);
REQUIRE(f != -1);
close(f);
REQUIRE(util::create_directories(dir + "/file", 0755) == false);
REQUIRE(util::create_directories(dir + "/file/1/2/3", 0755) == false);
}
SECTION("end with slashes") {
REQUIRE(util::create_directories(dir + "/", 0755));
}
SECTION("empty") {
REQUIRE(util::create_directories("", 0755) == false);
}
}

@ -0,0 +1,63 @@
#!/usr/bin/env python3
import os
import time
import subprocess
from openpilot.common.basedir import BASEDIR
class TextWindow:
def __init__(self, text):
try:
self.text_proc = subprocess.Popen(["./text.py", text],
stdin=subprocess.PIPE,
cwd=os.path.join(BASEDIR, "system", "ui"),
close_fds=True)
except OSError:
self.text_proc = None
def get_status(self):
if self.text_proc is not None:
self.text_proc.poll()
return self.text_proc.returncode
return None
def __enter__(self):
return self
def close(self):
if self.text_proc is not None:
self.text_proc.terminate()
self.text_proc = None
def wait_for_exit(self):
if self.text_proc is not None:
while True:
if self.get_status() == 1:
return
time.sleep(0.1)
def __del__(self):
self.close()
def __exit__(self, exc_type, exc_value, traceback):
self.close()
if __name__ == "__main__":
text = """Traceback (most recent call last):
File "./controlsd.py", line 608, in <module>
main()
File "./controlsd.py", line 604, in main
controlsd_thread(sm, pm, logcan)
File "./controlsd.py", line 455, in controlsd_thread
1/0
ZeroDivisionError: division by zero"""
print(text)
with TextWindow(text) as s:
for _ in range(100):
if s.get_status() == 1:
print("Got exit button")
break
time.sleep(0.1)
print("gone")

@ -0,0 +1,15 @@
import datetime
from pathlib import Path
MIN_DATE = datetime.datetime(year=2025, month=2, day=21)
def min_date():
# on systemd systems, the default time is the systemd build time
systemd_path = Path("/lib/systemd/systemd")
if systemd_path.exists():
d = datetime.datetime.fromtimestamp(systemd_path.stat().st_mtime)
return max(MIN_DATE, d + datetime.timedelta(days=1))
return MIN_DATE
def system_time_valid():
return datetime.datetime.now() > min_date()

@ -0,0 +1,27 @@
import signal
class TimeoutException(Exception):
pass
class Timeout:
"""
Timeout context manager.
For example this code will raise a TimeoutException:
with Timeout(seconds=5, error_msg="Sleep was too long"):
time.sleep(10)
"""
def __init__(self, seconds, error_msg=None):
if error_msg is None:
error_msg = f'Timed out after {seconds} seconds'
self.seconds = seconds
self.error_msg = error_msg
def handle_timeout(self, signume, frame):
raise TimeoutException(self.error_msg)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, exc_type, exc_val, exc_tb):
signal.alarm(0)

@ -0,0 +1,51 @@
#pragma once
#include <cstdint>
#include <ctime>
#ifdef __APPLE__
#define CLOCK_BOOTTIME CLOCK_MONOTONIC
#endif
static inline uint64_t nanos_since_boot() {
struct timespec t;
clock_gettime(CLOCK_BOOTTIME, &t);
return t.tv_sec * 1000000000ULL + t.tv_nsec;
}
static inline double millis_since_boot() {
struct timespec t;
clock_gettime(CLOCK_BOOTTIME, &t);
return t.tv_sec * 1000.0 + t.tv_nsec * 1e-6;
}
static inline double seconds_since_boot() {
struct timespec t;
clock_gettime(CLOCK_BOOTTIME, &t);
return (double)t.tv_sec + t.tv_nsec * 1e-9;
}
static inline uint64_t nanos_since_epoch() {
struct timespec t;
clock_gettime(CLOCK_REALTIME, &t);
return t.tv_sec * 1000000000ULL + t.tv_nsec;
}
static inline double seconds_since_epoch() {
struct timespec t;
clock_gettime(CLOCK_REALTIME, &t);
return (double)t.tv_sec + t.tv_nsec * 1e-9;
}
// you probably should use nanos_since_boot instead
static inline uint64_t nanos_monotonic() {
struct timespec t;
clock_gettime(CLOCK_MONOTONIC, &t);
return t.tv_sec * 1000000000ULL + t.tv_nsec;
}
static inline uint64_t nanos_monotonic_raw() {
struct timespec t;
clock_gettime(CLOCK_MONOTONIC_RAW, &t);
return t.tv_sec * 1000000000ULL + t.tv_nsec;
}

@ -0,0 +1,2 @@
transformations
transformations.cpp

@ -0,0 +1,70 @@
Reference Frames
------
Many reference frames are used throughout. This
folder contains all helper functions needed to
transform between them. Generally this is done
by generating a rotation matrix and multiplying.
| Name | [x, y, z] | Units | Notes |
| :-------------: |:-------------:| :-----:| :----: |
| Geodetic | [Latitude, Longitude, Altitude] | geodetic coordinates | Sometimes used as [lon, lat, alt], avoid this frame. |
| ECEF | [x, y, z] | meters | We use **ITRF14 (IGS14)**, NOT NAD83. <br> This is the global Mesh3D frame. |
| NED | [North, East, Down] | meters | Relative to earth's surface, useful for visualizing. |
| Device | [Forward, Right, Down] | meters | This is the Mesh3D local frame. <br> Relative to camera, **not imu.** <br> ![img](http://upload.wikimedia.org/wikipedia/commons/thumb/2/2f/RPY_angles_of_airplanes.png/440px-RPY_angles_of_airplanes.png)|
| Calibrated | [Forward, Right, Down] | meters | This is the frame the model outputs are in. <br> More details below. <br>|
| Car | [Forward, Right, Down] | meters | This is useful for estimating position of points on the road. <br> More details below. <br>|
| View | [Right, Down, Forward] | meters | Like device frame, but according to camera conventions. |
| Camera | [u, v, focal] | pixels | Like view frame, but 2d on the camera image.|
| Normalized Camera | [u / focal, v / focal, 1] | / | |
| Model | [u, v, focal] | pixels | The sampled rectangle of the full camera frame the model uses. |
| Normalized Model | [u / focal, v / focal, 1] | / | |
Orientation Conventions
------
Quaternions, rotation matrices and euler angles are three
equivalent representations of orientation and all three are
used throughout the code base.
For euler angles the preferred convention is [roll, pitch, yaw]
which corresponds to rotations around the [x, y, z] axes. All
euler angles should always be in radians or radians/s unless
for plotting or display purposes. For quaternions the hamilton
notations is preferred which is [q<sub>w</sub>, q<sub>x</sub>, q<sub>y</sub>, q<sub>z</sub>]. All quaternions
should always be normalized with a strictly positive q<sub>w</sub>. **These
quaternions are a unique representation of orientation whereas euler angles
or rotation matrices are not.**
To rotate from one frame into another with euler angles the
convention is to rotate around roll, then pitch and then yaw,
while rotating around the rotated axes, not the original axes.
Car frame
------
Device frame is aligned with the road-facing camera used by openpilot. However, when controlling the vehicle it is helpful to think in a reference frame aligned with the vehicle. These two reference frames can be different.
The orientation of car frame is defined to be aligned with the car's direction of travel and the road plane when the vehicle is driving on a flat road and not turning. The origin of car frame is defined to be directly below device frame (in car frame), such that it is on the road plane. The position and orientation of this frame is not necessarily always aligned with the direction of travel or the road plane due to suspension movements and other effects.
Calibrated frame
------
It is helpful for openpilot's driving model to take in images that look similar when mounted differently in different cars. To achieve this we "calibrate" the images by transforming it into calibrated frame. Calibrated frame is defined to be aligned with car frame in pitch and yaw, and aligned with device frame in roll. It also has the same origin as device frame.
Example
------
To transform global Mesh3D positions and orientations (positions_ecef, quats_ecef) into the local frame described by the
first position and orientation from Mesh3D one would do:
```
ecef_from_local = rot_from_quat(quats_ecef[0])
local_from_ecef = ecef_from_local.T
positions_local = np.einsum('ij,kj->ki', local_from_ecef, postions_ecef - positions_ecef[0])
rotations_global = rot_from_quat(quats_ecef)
rotations_local = np.einsum('ij,kjl->kil', local_from_ecef, rotations_global)
eulers_local = euler_from_rot(rotations_local)
```

@ -0,0 +1,5 @@
Import('env', 'envCython')
transformations = env.Library('transformations', ['orientation.cc', 'coordinates.cc'])
transformations_python = envCython.Program('transformations.so', 'transformations.pyx')
Export('transformations', 'transformations_python')

@ -0,0 +1,179 @@
import itertools
import numpy as np
from dataclasses import dataclass
import openpilot.common.transformations.orientation as orient
## -- hardcoded hardware params --
@dataclass(frozen=True)
class CameraConfig:
width: int
height: int
focal_length: float
@property
def size(self):
return (self.width, self.height)
@property
def intrinsics(self):
# aka 'K' aka camera_frame_from_view_frame
return np.array([
[self.focal_length, 0.0, float(self.width)/2],
[0.0, self.focal_length, float(self.height)/2],
[0.0, 0.0, 1.0]
])
@property
def intrinsics_inv(self):
# aka 'K_inv' aka view_frame_from_camera_frame
return np.linalg.inv(self.intrinsics)
@dataclass(frozen=True)
class _NoneCameraConfig(CameraConfig):
width: int = 0
height: int = 0
focal_length: float = 0
@dataclass(frozen=True)
class DeviceCameraConfig:
fcam: CameraConfig
dcam: CameraConfig
ecam: CameraConfig
def all_cams(self):
for cam in ['fcam', 'dcam', 'ecam']:
if not isinstance(getattr(self, cam), _NoneCameraConfig):
yield cam, getattr(self, cam)
_ar_ox_fisheye = CameraConfig(1928, 1208, 567.0) # focal length probably wrong? magnification is not consistent across frame
_os_fisheye = CameraConfig(2688 // 2, 1520 // 2, 567.0 / 4 * 3)
_ar_ox_config = DeviceCameraConfig(CameraConfig(1928, 1208, 2648.0), _ar_ox_fisheye, _ar_ox_fisheye)
_os_config = DeviceCameraConfig(CameraConfig(2688 // 2, 1520 // 2, 1522.0 * 3 / 4), _os_fisheye, _os_fisheye)
_neo_config = DeviceCameraConfig(CameraConfig(1164, 874, 910.0), CameraConfig(816, 612, 650.0), _NoneCameraConfig())
DEVICE_CAMERAS = {
# A "device camera" is defined by a device type and sensor
# sensor type was never set on eon/neo/two
("neo", "unknown"): _neo_config,
# unknown here is AR0231, field was added with OX03C10 support
("tici", "unknown"): _ar_ox_config,
# before deviceState.deviceType was set, assume tici AR config
("unknown", "ar0231"): _ar_ox_config,
("unknown", "ox03c10"): _ar_ox_config,
# simulator (emulates a tici)
("pc", "unknown"): _ar_ox_config,
}
prods = itertools.product(('tici', 'tizi', 'mici'), (('ar0231', _ar_ox_config), ('ox03c10', _ar_ox_config), ('os04c10', _os_config)))
DEVICE_CAMERAS.update({(d, c[0]): c[1] for d, c in prods})
# device/mesh : x->forward, y-> right, z->down
# view : x->right, y->down, z->forward
device_frame_from_view_frame = np.array([
[ 0., 0., 1.],
[ 1., 0., 0.],
[ 0., 1., 0.]
])
view_frame_from_device_frame = device_frame_from_view_frame.T
# aka 'extrinsic_matrix'
# road : x->forward, y -> left, z->up
def get_view_frame_from_road_frame(roll, pitch, yaw, height):
device_from_road = orient.rot_from_euler([roll, pitch, yaw]).dot(np.diag([1, -1, -1]))
view_from_road = view_frame_from_device_frame.dot(device_from_road)
return np.hstack((view_from_road, [[0], [height], [0]]))
# aka 'extrinsic_matrix'
def get_view_frame_from_calib_frame(roll, pitch, yaw, height):
device_from_calib= orient.rot_from_euler([roll, pitch, yaw])
view_from_calib = view_frame_from_device_frame.dot(device_from_calib)
return np.hstack((view_from_calib, [[0], [height], [0]]))
def vp_from_ke(m):
"""
Computes the vanishing point from the product of the intrinsic and extrinsic
matrices C = KE.
The vanishing point is defined as lim x->infinity C (x, 0, 0, 1).T
"""
return (m[0, 0]/m[2, 0], m[1, 0]/m[2, 0])
def roll_from_ke(m):
# note: different from calibration.h/RollAnglefromKE: i think that one's just wrong
return np.arctan2(-(m[1, 0] - m[1, 1] * m[2, 0] / m[2, 1]),
-(m[0, 0] - m[0, 1] * m[2, 0] / m[2, 1]))
def normalize(img_pts, intrinsics):
# normalizes image coordinates
# accepts single pt or array of pts
intrinsics_inv = np.linalg.inv(intrinsics)
img_pts = np.array(img_pts)
input_shape = img_pts.shape
img_pts = np.atleast_2d(img_pts)
img_pts = np.hstack((img_pts, np.ones((img_pts.shape[0], 1))))
img_pts_normalized = img_pts.dot(intrinsics_inv.T)
img_pts_normalized[(img_pts < 0).any(axis=1)] = np.nan
return img_pts_normalized[:, :2].reshape(input_shape)
def denormalize(img_pts, intrinsics, width=np.inf, height=np.inf):
# denormalizes image coordinates
# accepts single pt or array of pts
img_pts = np.array(img_pts)
input_shape = img_pts.shape
img_pts = np.atleast_2d(img_pts)
img_pts = np.hstack((img_pts, np.ones((img_pts.shape[0], 1), dtype=img_pts.dtype)))
img_pts_denormalized = img_pts.dot(intrinsics.T)
if np.isfinite(width):
img_pts_denormalized[img_pts_denormalized[:, 0] > width] = np.nan
img_pts_denormalized[img_pts_denormalized[:, 0] < 0] = np.nan
if np.isfinite(height):
img_pts_denormalized[img_pts_denormalized[:, 1] > height] = np.nan
img_pts_denormalized[img_pts_denormalized[:, 1] < 0] = np.nan
return img_pts_denormalized[:, :2].reshape(input_shape)
def get_calib_from_vp(vp, intrinsics):
vp_norm = normalize(vp, intrinsics)
yaw_calib = np.arctan(vp_norm[0])
pitch_calib = -np.arctan(vp_norm[1]*np.cos(yaw_calib))
roll_calib = 0
return roll_calib, pitch_calib, yaw_calib
def device_from_ecef(pos_ecef, orientation_ecef, pt_ecef):
# device from ecef frame
# device frame is x -> forward, y-> right, z -> down
# accepts single pt or array of pts
input_shape = pt_ecef.shape
pt_ecef = np.atleast_2d(pt_ecef)
ecef_from_device_rot = orient.rotations_from_quats(orientation_ecef)
device_from_ecef_rot = ecef_from_device_rot.T
pt_ecef_rel = pt_ecef - pos_ecef
pt_device = np.einsum('jk,ik->ij', device_from_ecef_rot, pt_ecef_rel)
return pt_device.reshape(input_shape)
def img_from_device(pt_device):
# img coordinates from pts in device frame
# first transforms to view frame, then to img coords
# accepts single pt or array of pts
input_shape = pt_device.shape
pt_device = np.atleast_2d(pt_device)
pt_view = np.einsum('jk,ik->ij', view_frame_from_device_frame, pt_device)
# This function should never return negative depths
pt_view[pt_view[:, 2] < 0] = np.nan
pt_img = pt_view/pt_view[:, 2:3]
return pt_img.reshape(input_shape)[:, :2]

@ -0,0 +1,100 @@
#define _USE_MATH_DEFINES
#include "common/transformations/coordinates.hpp"
#include <iostream>
#include <cmath>
#include <eigen3/Eigen/Dense>
double a = 6378137; // lgtm [cpp/short-global-name]
double b = 6356752.3142; // lgtm [cpp/short-global-name]
double esq = 6.69437999014 * 0.001; // lgtm [cpp/short-global-name]
double e1sq = 6.73949674228 * 0.001;
static Geodetic to_degrees(Geodetic geodetic){
geodetic.lat = RAD2DEG(geodetic.lat);
geodetic.lon = RAD2DEG(geodetic.lon);
return geodetic;
}
static Geodetic to_radians(Geodetic geodetic){
geodetic.lat = DEG2RAD(geodetic.lat);
geodetic.lon = DEG2RAD(geodetic.lon);
return geodetic;
}
ECEF geodetic2ecef(const Geodetic &geodetic) {
auto g = to_radians(geodetic);
double xi = sqrt(1.0 - esq * pow(sin(g.lat), 2));
double x = (a / xi + g.alt) * cos(g.lat) * cos(g.lon);
double y = (a / xi + g.alt) * cos(g.lat) * sin(g.lon);
double z = (a / xi * (1.0 - esq) + g.alt) * sin(g.lat);
return {x, y, z};
}
Geodetic ecef2geodetic(const ECEF &e) {
// Convert from ECEF to geodetic using Ferrari's methods
// https://en.wikipedia.org/wiki/Geographic_coordinate_conversion#Ferrari.27s_solution
double x = e.x;
double y = e.y;
double z = e.z;
double r = sqrt(x * x + y * y);
double Esq = a * a - b * b;
double F = 54 * b * b * z * z;
double G = r * r + (1 - esq) * z * z - esq * Esq;
double C = (esq * esq * F * r * r) / (pow(G, 3));
double S = cbrt(1 + C + sqrt(C * C + 2 * C));
double P = F / (3 * pow((S + 1 / S + 1), 2) * G * G);
double Q = sqrt(1 + 2 * esq * esq * P);
double r_0 = -(P * esq * r) / (1 + Q) + sqrt(0.5 * a * a*(1 + 1.0 / Q) - P * (1 - esq) * z * z / (Q * (1 + Q)) - 0.5 * P * r * r);
double U = sqrt(pow((r - esq * r_0), 2) + z * z);
double V = sqrt(pow((r - esq * r_0), 2) + (1 - esq) * z * z);
double Z_0 = b * b * z / (a * V);
double h = U * (1 - b * b / (a * V));
double lat = atan((z + e1sq * Z_0) / r);
double lon = atan2(y, x);
return to_degrees({lat, lon, h});
}
LocalCoord::LocalCoord(const Geodetic &geodetic, const ECEF &e) {
init_ecef << e.x, e.y, e.z;
auto g = to_radians(geodetic);
ned2ecef_matrix <<
-sin(g.lat)*cos(g.lon), -sin(g.lon), -cos(g.lat)*cos(g.lon),
-sin(g.lat)*sin(g.lon), cos(g.lon), -cos(g.lat)*sin(g.lon),
cos(g.lat), 0, -sin(g.lat);
ecef2ned_matrix = ned2ecef_matrix.transpose();
}
NED LocalCoord::ecef2ned(const ECEF &e) {
Eigen::Vector3d ecef;
ecef << e.x, e.y, e.z;
Eigen::Vector3d ned = (ecef2ned_matrix * (ecef - init_ecef));
return {ned[0], ned[1], ned[2]};
}
ECEF LocalCoord::ned2ecef(const NED &n) {
Eigen::Vector3d ned;
ned << n.n, n.e, n.d;
Eigen::Vector3d ecef = (ned2ecef_matrix * ned) + init_ecef;
return {ecef[0], ecef[1], ecef[2]};
}
NED LocalCoord::geodetic2ned(const Geodetic &g) {
ECEF e = ::geodetic2ecef(g);
return ecef2ned(e);
}
Geodetic LocalCoord::ned2geodetic(const NED &n) {
ECEF e = ned2ecef(n);
return ::ecef2geodetic(e);
}

@ -0,0 +1,43 @@
#pragma once
#include <eigen3/Eigen/Dense>
#define DEG2RAD(x) ((x) * M_PI / 180.0)
#define RAD2DEG(x) ((x) * 180.0 / M_PI)
struct ECEF {
double x, y, z;
Eigen::Vector3d to_vector() const {
return Eigen::Vector3d(x, y, z);
}
};
struct NED {
double n, e, d;
Eigen::Vector3d to_vector() const {
return Eigen::Vector3d(n, e, d);
}
};
struct Geodetic {
double lat, lon, alt;
bool radians=false;
};
ECEF geodetic2ecef(const Geodetic &g);
Geodetic ecef2geodetic(const ECEF &e);
class LocalCoord {
public:
Eigen::Matrix3d ned2ecef_matrix;
Eigen::Matrix3d ecef2ned_matrix;
Eigen::Vector3d init_ecef;
LocalCoord(const Geodetic &g, const ECEF &e);
LocalCoord(const Geodetic &g) : LocalCoord(g, ::geodetic2ecef(g)) {}
LocalCoord(const ECEF &e) : LocalCoord(::ecef2geodetic(e), e) {}
NED ecef2ned(const ECEF &e);
ECEF ned2ecef(const NED &n);
NED geodetic2ned(const Geodetic &g);
Geodetic ned2geodetic(const NED &n);
};

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save