commit
743259bbb1
255 changed files with 1839 additions and 2971 deletions
@ -0,0 +1,28 @@ |
||||
name: release |
||||
on: |
||||
schedule: |
||||
- cron: '0 * * * *' |
||||
workflow_dispatch: |
||||
|
||||
jobs: |
||||
build_masterci: |
||||
name: build master-ci |
||||
runs-on: ubuntu-20.04 |
||||
timeout-minutes: 60 |
||||
if: github.repository == 'commaai/openpilot' |
||||
steps: |
||||
- name: Wait for green check mark |
||||
uses: lewagon/wait-on-check-action@v0.2 |
||||
with: |
||||
ref: master |
||||
wait-interval: 30 |
||||
running-workflow-name: 'build master-ci' |
||||
- uses: actions/checkout@v3 |
||||
with: |
||||
submodules: true |
||||
fetch-depth: 0 |
||||
- name: Pull LFS |
||||
run: git lfs pull |
||||
- name: Build master-ci |
||||
run: | |
||||
BRANCH=master-ci release/build_devel.sh |
@ -1 +1 @@ |
||||
Subproject commit 7fc82bbc06900ff723992af4d7add10dc3afc0f5 |
||||
Subproject commit d61736e301cf27d10b74beac278214f974c55fe4 |
@ -1,4 +1,39 @@ |
||||
Import('envCython', 'common') |
||||
Import('env', 'envCython', 'arch', 'SHARED') |
||||
|
||||
if SHARED: |
||||
fxn = env.SharedLibrary |
||||
else: |
||||
fxn = env.Library |
||||
|
||||
common_libs = [ |
||||
'params.cc', |
||||
'statlog.cc', |
||||
'swaglog.cc', |
||||
'util.cc', |
||||
'gpio.cc', |
||||
'i2c.cc', |
||||
'watchdog.cc', |
||||
] |
||||
|
||||
_common = fxn('common', common_libs, LIBS="json11") |
||||
|
||||
files = [ |
||||
'clutil.cc', |
||||
'visionimg.cc', |
||||
] |
||||
|
||||
if arch == "larch64": |
||||
_gpu_libs = ["GLESv2"] |
||||
else: |
||||
_gpu_libs = ["GL"] |
||||
|
||||
_gpucommon = fxn('gpucommon', files, LIBS=_gpu_libs) |
||||
Export('_common', '_gpucommon', '_gpu_libs') |
||||
|
||||
if GetOption('test'): |
||||
env.Program('tests/test_util', ['tests/test_util.cc'], LIBS=[_common]) |
||||
env.Program('tests/test_swaglog', ['tests/test_swaglog.cc'], LIBS=[_common, 'json11', 'zmq', 'pthread']) |
||||
|
||||
# Cython |
||||
envCython.Program('clock.so', 'clock.pyx') |
||||
envCython.Program('params_pyx.so', 'params_pyx.pyx', LIBS=envCython['LIBS'] + [common, 'zmq']) |
||||
envCython.Program('params_pyx.so', 'params_pyx.pyx', LIBS=envCython['LIBS'] + [_common, 'zmq', 'json11']) |
||||
|
@ -1,10 +1,10 @@ |
||||
#include "selfdrive/common/clutil.h" |
||||
#include "common/clutil.h" |
||||
|
||||
#include <cassert> |
||||
#include <iostream> |
||||
#include <memory> |
||||
|
||||
#include "selfdrive/common/util.h" |
||||
#include "common/util.h" |
||||
|
||||
namespace { // helper functions
|
||||
|
@ -1,11 +1,11 @@ |
||||
#include "selfdrive/common/gpio.h" |
||||
#include "common/gpio.h" |
||||
|
||||
#include <fcntl.h> |
||||
#include <unistd.h> |
||||
|
||||
#include <cstring> |
||||
|
||||
#include "selfdrive/common/util.h" |
||||
#include "common/util.h" |
||||
|
||||
// We assume that all pins have already been exported on boot,
|
||||
// and that we have permission to write to them.
|
@ -1,48 +0,0 @@ |
||||
from typing import List |
||||
|
||||
HTML_REPLACEMENTS = [ |
||||
(r'&', r'&'), |
||||
(r'"', r'"'), |
||||
] |
||||
|
||||
|
||||
def parse_markdown(text: str, tab_length: int = 2) -> str: |
||||
lines = text.split("\n") |
||||
output: List[str] = [] |
||||
list_level = 0 |
||||
|
||||
def end_outstanding_lists(level: int, end_level: int) -> int: |
||||
while level > end_level: |
||||
level -= 1 |
||||
output.append("</ul>") |
||||
if level > 0: |
||||
output.append("</li>") |
||||
return end_level |
||||
|
||||
for i, line in enumerate(lines): |
||||
if i + 1 < len(lines) and lines[i + 1].startswith("==="): # heading |
||||
output.append(f"<h1>{line}</h1>") |
||||
elif line.startswith("==="): |
||||
pass |
||||
elif line.lstrip().startswith("* "): # list |
||||
line_level = 1 + line.count(" " * tab_length, 0, line.index("*")) |
||||
if list_level >= line_level: |
||||
list_level = end_outstanding_lists(list_level, line_level) |
||||
else: |
||||
list_level += 1 |
||||
if list_level > 1: |
||||
output[-1] = output[-1].replace("</li>", "") |
||||
output.append("<ul>") |
||||
output.append(f"<li>{line.replace('*', '', 1).lstrip()}</li>") |
||||
else: |
||||
list_level = end_outstanding_lists(list_level, 0) |
||||
if len(line) > 0: |
||||
output.append(line) |
||||
|
||||
end_outstanding_lists(list_level, 0) |
||||
output_str = "\n".join(output) + "\n" |
||||
|
||||
for (fr, to) in HTML_REPLACEMENTS: |
||||
output_str = output_str.replace(fr, to) |
||||
|
||||
return output_str |
@ -1,7 +1,7 @@ |
||||
#pragma once |
||||
|
||||
#include <array> |
||||
#include "selfdrive/common/mat.h" |
||||
#include "common/mat.h" |
||||
#include "selfdrive/hardware/hw.h" |
||||
|
||||
const int TRAJECTORY_SIZE = 33; |
@ -1,6 +0,0 @@ |
||||
def replace_right(s, old, new, occurrence): |
||||
# replace_right('1232425', '2', ' ', 1) -> '12324 5' |
||||
# replace_right('1232425', '2', ' ', 2) -> '123 4 5' |
||||
|
||||
split = s.rsplit(old, occurrence) |
||||
return new.join(split) |
@ -1,6 +1,6 @@ |
||||
#pragma once |
||||
|
||||
#include "selfdrive/common/timing.h" |
||||
#include "common/timing.h" |
||||
|
||||
#define CLOUDLOG_DEBUG 10 |
||||
#define CLOUDLOG_INFO 20 |
@ -0,0 +1,2 @@ |
||||
test_util |
||||
test_swaglog |
@ -1,26 +0,0 @@ |
||||
#!/usr/bin/env python3 |
||||
from markdown_it import MarkdownIt |
||||
import os |
||||
import unittest |
||||
|
||||
from common.basedir import BASEDIR |
||||
from common.markdown import parse_markdown |
||||
|
||||
|
||||
class TestMarkdown(unittest.TestCase): |
||||
# validate that our simple markdown parser produces the same output as `markdown_it` from pip |
||||
def test_current_release_notes(self): |
||||
self.maxDiff = None |
||||
|
||||
with open(os.path.join(BASEDIR, "RELEASES.md")) as f: |
||||
for r in f.read().split("\n\n"): |
||||
|
||||
# No hyperlink support is ok |
||||
if '[' in r: |
||||
continue |
||||
|
||||
self.assertEqual(MarkdownIt().render(r), parse_markdown(r)) |
||||
|
||||
|
||||
if __name__ == "__main__": |
||||
unittest.main() |
@ -1,47 +0,0 @@ |
||||
import os |
||||
import tempfile |
||||
import shutil |
||||
import unittest |
||||
|
||||
from common.xattr import getxattr, setxattr, listxattr, removexattr |
||||
|
||||
class TestParams(unittest.TestCase): |
||||
USER_TEST='user.test' |
||||
def setUp(self): |
||||
self.tmpdir = tempfile.mkdtemp() |
||||
self.tmpfn = os.path.join(self.tmpdir, 'test.txt') |
||||
open(self.tmpfn, 'w').close() |
||||
#print("using", self.tmpfn) |
||||
|
||||
def tearDown(self): |
||||
shutil.rmtree(self.tmpdir) |
||||
|
||||
def test_getxattr_none(self): |
||||
a = getxattr(self.tmpfn, TestParams.USER_TEST) |
||||
assert a is None |
||||
|
||||
def test_listxattr_none(self): |
||||
l = listxattr(self.tmpfn) |
||||
assert l == [] |
||||
|
||||
def test_setxattr(self): |
||||
setxattr(self.tmpfn, TestParams.USER_TEST, b'123') |
||||
a = getxattr(self.tmpfn, TestParams.USER_TEST) |
||||
assert a == b'123' |
||||
|
||||
def test_listxattr(self): |
||||
setxattr(self.tmpfn, 'user.test1', b'123') |
||||
setxattr(self.tmpfn, 'user.test2', b'123') |
||||
l = listxattr(self.tmpfn) |
||||
assert l == ['user.test1', 'user.test2'] |
||||
|
||||
def test_removexattr(self): |
||||
setxattr(self.tmpfn, TestParams.USER_TEST, b'123') |
||||
a = getxattr(self.tmpfn, TestParams.USER_TEST) |
||||
assert a == b'123' |
||||
removexattr(self.tmpfn, TestParams.USER_TEST) |
||||
a = getxattr(self.tmpfn, TestParams.USER_TEST) |
||||
assert a is None |
||||
|
||||
if __name__ == "__main__": |
||||
unittest.main() |
@ -1,4 +1,4 @@ |
||||
#include "selfdrive/common/util.h" |
||||
#include "common/util.h" |
||||
|
||||
#include <sys/stat.h> |
||||
#include <dirent.h> |
@ -1,4 +1,4 @@ |
||||
#include "selfdrive/common/visionimg.h" |
||||
#include "common/visionimg.h" |
||||
|
||||
#include <cassert> |
||||
|
@ -1,6 +1,6 @@ |
||||
#include "selfdrive/common/watchdog.h" |
||||
#include "selfdrive/common/timing.h" |
||||
#include "selfdrive/common/util.h" |
||||
#include "common/watchdog.h" |
||||
#include "common/timing.h" |
||||
#include "common/util.h" |
||||
|
||||
const std::string watchdog_fn_prefix = "/dev/shm/wd_"; // + <pid>
|
||||
|
@ -1 +1 @@ |
||||
Subproject commit be1a213a5ffa3cafe2b4f2d53f6df5d2452ad910 |
||||
Subproject commit f5f76d28b4827c3fb706d542729651ceef6c06bd |
@ -1 +1 @@ |
||||
Subproject commit 9564b74d80525c9f289b730febbb2348c529c9cc |
||||
Subproject commit 210237fa635eeb76ad855c2031d2cad3bde3a2c0 |
@ -1 +1 @@ |
||||
Subproject commit d5bd81e5b517c79e164d87b96355e6bc75915da0 |
||||
Subproject commit 36c62afa0c170aa1b7a39bcae3316ffb844499e8 |
@ -1,5 +1,9 @@ |
||||
selfdrive/ui/replay/* |
||||
selfdrive/modeld/runners/onnx* |
||||
|
||||
third_party/mapbox-gl-native-qt/x86_64/** |
||||
third_party/mapbox-gl-native-qt/x86_64/*.so |
||||
|
||||
third_party/qt-plugins/x86_64/** |
||||
third_party/qt-plugins/x86_64/geoservices/*.so |
||||
|
||||
third_party/libyuv/x64/** |
||||
third_party/snpe/x86_64/** |
||||
third_party/snpe/x86_64-linux-clang/** |
||||
|
Binary file not shown.
@ -1,125 +0,0 @@ |
||||
#include "selfdrive/camerad/cameras/camera_replay.h" |
||||
|
||||
#include <cassert> |
||||
#include <thread> |
||||
|
||||
#include "selfdrive/common/clutil.h" |
||||
#include "selfdrive/common/util.h" |
||||
|
||||
extern ExitHandler do_exit; |
||||
|
||||
void camera_autoexposure(CameraState *s, float grey_frac) {} |
||||
|
||||
namespace { |
||||
|
||||
const char *BASE_URL = "https://commadataci.blob.core.windows.net/openpilotci/"; |
||||
|
||||
const std::string road_camera_route = "0c94aa1e1296d7c6|2021-05-05--19-48-37"; |
||||
// const std::string driver_camera_route = "534ccd8a0950a00c|2021-06-08--12-15-37";
|
||||
|
||||
std::string get_url(std::string route_name, const std::string &camera, int segment_num) { |
||||
std::replace(route_name.begin(), route_name.end(), '|', '/'); |
||||
return util::string_format("%s%s/%d/%s.hevc", BASE_URL, route_name.c_str(), segment_num, camera.c_str()); |
||||
} |
||||
|
||||
void camera_init(VisionIpcServer *v, CameraState *s, int camera_id, unsigned int fps, cl_device_id device_id, cl_context ctx, VisionStreamType rgb_type, VisionStreamType yuv_type, const std::string &url) { |
||||
s->frame = new FrameReader(); |
||||
if (!s->frame->load(url)) { |
||||
printf("failed to load stream from %s", url.c_str()); |
||||
assert(0); |
||||
} |
||||
|
||||
CameraInfo ci = { |
||||
.frame_width = (uint32_t)s->frame->width, |
||||
.frame_height = (uint32_t)s->frame->height, |
||||
.frame_stride = (uint32_t)s->frame->width * 3, |
||||
}; |
||||
s->ci = ci; |
||||
s->camera_num = camera_id; |
||||
s->fps = fps; |
||||
s->buf.init(device_id, ctx, s, v, FRAME_BUF_COUNT, rgb_type, yuv_type); |
||||
} |
||||
|
||||
void camera_close(CameraState *s) { |
||||
delete s->frame; |
||||
} |
||||
|
||||
void run_camera(CameraState *s) { |
||||
uint32_t stream_frame_id = 0, frame_id = 0; |
||||
size_t buf_idx = 0; |
||||
std::unique_ptr<uint8_t[]> rgb_buf = std::make_unique<uint8_t[]>(s->frame->getRGBSize()); |
||||
std::unique_ptr<uint8_t[]> yuv_buf = std::make_unique<uint8_t[]>(s->frame->getYUVSize()); |
||||
while (!do_exit) { |
||||
if (stream_frame_id == s->frame->getFrameCount()) { |
||||
// loop stream
|
||||
stream_frame_id = 0; |
||||
} |
||||
if (s->frame->get(stream_frame_id++, rgb_buf.get(), yuv_buf.get())) { |
||||
s->buf.camera_bufs_metadata[buf_idx] = {.frame_id = frame_id}; |
||||
auto &buf = s->buf.camera_bufs[buf_idx]; |
||||
CL_CHECK(clEnqueueWriteBuffer(buf.copy_q, buf.buf_cl, CL_TRUE, 0, s->frame->getRGBSize(), rgb_buf.get(), 0, NULL, NULL)); |
||||
s->buf.queue(buf_idx); |
||||
++frame_id; |
||||
buf_idx = (buf_idx + 1) % FRAME_BUF_COUNT; |
||||
} |
||||
util::sleep_for(1000 / s->fps); |
||||
} |
||||
} |
||||
|
||||
void road_camera_thread(CameraState *s) { |
||||
util::set_thread_name("replay_road_camera_thread"); |
||||
run_camera(s); |
||||
} |
||||
|
||||
// void driver_camera_thread(CameraState *s) {
|
||||
// util::set_thread_name("replay_driver_camera_thread");
|
||||
// run_camera(s);
|
||||
// }
|
||||
|
||||
void process_road_camera(MultiCameraState *s, CameraState *c, int cnt) { |
||||
const CameraBuf *b = &c->buf; |
||||
MessageBuilder msg; |
||||
auto framed = msg.initEvent().initRoadCameraState(); |
||||
fill_frame_data(framed, b->cur_frame_data); |
||||
framed.setImage(kj::arrayPtr((const uint8_t *)b->cur_yuv_buf->addr, b->cur_yuv_buf->len)); |
||||
framed.setTransform(b->yuv_transform.v); |
||||
s->pm->send("roadCameraState", msg); |
||||
} |
||||
|
||||
// void process_driver_camera(MultiCameraState *s, CameraState *c, int cnt) {
|
||||
// MessageBuilder msg;
|
||||
// auto framed = msg.initEvent().initDriverCameraState();
|
||||
// framed.setFrameType(cereal::FrameData::FrameType::FRONT);
|
||||
// fill_frame_data(framed, c->buf.cur_frame_data);
|
||||
// s->pm->send("driverCameraState", msg);
|
||||
// }
|
||||
|
||||
} // namespace
|
||||
|
||||
void cameras_init(VisionIpcServer *v, MultiCameraState *s, cl_device_id device_id, cl_context ctx) { |
||||
camera_init(v, &s->road_cam, CAMERA_ID_LGC920, 20, device_id, ctx, |
||||
VISION_STREAM_RGB_ROAD, VISION_STREAM_ROAD, get_url(road_camera_route, "fcamera", 0)); |
||||
// camera_init(v, &s->driver_cam, CAMERA_ID_LGC615, 10, device_id, ctx,
|
||||
// VISION_STREAM_RGB_DRIVER, VISION_STREAM_DRIVER, get_url(driver_camera_route, "dcamera", 0));
|
||||
s->pm = new PubMaster({"roadCameraState", "driverCameraState", "thumbnail"}); |
||||
} |
||||
|
||||
void cameras_open(MultiCameraState *s) {} |
||||
|
||||
void cameras_close(MultiCameraState *s) { |
||||
camera_close(&s->road_cam); |
||||
camera_close(&s->driver_cam); |
||||
delete s->pm; |
||||
} |
||||
|
||||
void cameras_run(MultiCameraState *s) { |
||||
std::vector<std::thread> threads; |
||||
threads.push_back(start_process_thread(s, &s->road_cam, process_road_camera)); |
||||
// threads.push_back(start_process_thread(s, &s->driver_cam, process_driver_camera));
|
||||
// threads.push_back(std::thread(driver_camera_thread, &s->driver_cam));
|
||||
road_camera_thread(&s->road_cam); |
||||
|
||||
for (auto &t : threads) t.join(); |
||||
|
||||
cameras_close(s); |
||||
} |
@ -1,25 +0,0 @@ |
||||
#pragma once |
||||
|
||||
#include "selfdrive/camerad/cameras/camera_common.h" |
||||
#include "selfdrive/ui/replay/framereader.h" |
||||
|
||||
#define FRAME_BUF_COUNT 16 |
||||
|
||||
typedef struct CameraState { |
||||
int camera_num; |
||||
CameraInfo ci; |
||||
|
||||
int fps; |
||||
float digital_gain = 0; |
||||
|
||||
CameraBuf buf; |
||||
FrameReader *frame = nullptr; |
||||
} CameraState; |
||||
|
||||
typedef struct MultiCameraState { |
||||
CameraState road_cam; |
||||
CameraState driver_cam; |
||||
|
||||
SubMaster *sm = nullptr; |
||||
PubMaster *pm = nullptr; |
||||
} MultiCameraState; |
@ -1,197 +0,0 @@ |
||||
#include "selfdrive/camerad/cameras/camera_webcam.h" |
||||
|
||||
#include <unistd.h> |
||||
|
||||
#include <cassert> |
||||
#include <cstring> |
||||
|
||||
#pragma clang diagnostic push |
||||
#pragma clang diagnostic ignored "-Wundefined-inline" |
||||
#include <opencv2/core.hpp> |
||||
#include <opencv2/highgui.hpp> |
||||
#include <opencv2/opencv.hpp> |
||||
#include <opencv2/videoio.hpp> |
||||
#pragma clang diagnostic pop |
||||
|
||||
#include "selfdrive/common/clutil.h" |
||||
#include "selfdrive/common/swaglog.h" |
||||
#include "selfdrive/common/timing.h" |
||||
#include "selfdrive/common/util.h" |
||||
|
||||
// id of the video capturing device
|
||||
const int ROAD_CAMERA_ID = util::getenv("ROADCAM_ID", 1); |
||||
const int DRIVER_CAMERA_ID = util::getenv("DRIVERCAM_ID", 2); |
||||
|
||||
#define FRAME_WIDTH 1164 |
||||
#define FRAME_HEIGHT 874 |
||||
#define FRAME_WIDTH_FRONT 1152 |
||||
#define FRAME_HEIGHT_FRONT 864 |
||||
|
||||
extern ExitHandler do_exit; |
||||
|
||||
namespace { |
||||
|
||||
CameraInfo cameras_supported[CAMERA_ID_MAX] = { |
||||
// road facing
|
||||
[CAMERA_ID_LGC920] = { |
||||
.frame_width = FRAME_WIDTH, |
||||
.frame_height = FRAME_HEIGHT, |
||||
.frame_stride = FRAME_WIDTH*3, |
||||
.bayer = false, |
||||
.bayer_flip = false, |
||||
}, |
||||
// driver facing
|
||||
[CAMERA_ID_LGC615] = { |
||||
.frame_width = FRAME_WIDTH_FRONT, |
||||
.frame_height = FRAME_HEIGHT_FRONT, |
||||
.frame_stride = FRAME_WIDTH_FRONT*3, |
||||
.bayer = false, |
||||
.bayer_flip = false, |
||||
}, |
||||
}; |
||||
|
||||
void camera_open(CameraState *s, bool rear) { |
||||
// empty
|
||||
} |
||||
|
||||
void camera_close(CameraState *s) { |
||||
// empty
|
||||
} |
||||
|
||||
void camera_init(VisionIpcServer * v, CameraState *s, int camera_id, unsigned int fps, cl_device_id device_id, cl_context ctx, VisionStreamType rgb_type, VisionStreamType yuv_type) { |
||||
assert(camera_id < std::size(cameras_supported)); |
||||
s->ci = cameras_supported[camera_id]; |
||||
assert(s->ci.frame_width != 0); |
||||
|
||||
s->camera_num = camera_id; |
||||
s->fps = fps; |
||||
s->buf.init(device_id, ctx, s, v, FRAME_BUF_COUNT, rgb_type, yuv_type); |
||||
} |
||||
|
||||
void run_camera(CameraState *s, cv::VideoCapture &video_cap, float *ts) { |
||||
assert(video_cap.isOpened()); |
||||
|
||||
cv::Size size(s->ci.frame_width, s->ci.frame_height); |
||||
const cv::Mat transform = cv::Mat(3, 3, CV_32F, ts); |
||||
uint32_t frame_id = 0; |
||||
size_t buf_idx = 0; |
||||
|
||||
while (!do_exit) { |
||||
cv::Mat frame_mat, transformed_mat; |
||||
video_cap >> frame_mat; |
||||
if (frame_mat.empty()) continue; |
||||
|
||||
cv::warpPerspective(frame_mat, transformed_mat, transform, size, cv::INTER_LINEAR, cv::BORDER_CONSTANT, 0); |
||||
|
||||
s->buf.camera_bufs_metadata[buf_idx] = {.frame_id = frame_id}; |
||||
|
||||
auto &buf = s->buf.camera_bufs[buf_idx]; |
||||
int transformed_size = transformed_mat.total() * transformed_mat.elemSize(); |
||||
CL_CHECK(clEnqueueWriteBuffer(buf.copy_q, buf.buf_cl, CL_TRUE, 0, transformed_size, transformed_mat.data, 0, NULL, NULL)); |
||||
|
||||
s->buf.queue(buf_idx); |
||||
|
||||
++frame_id; |
||||
buf_idx = (buf_idx + 1) % FRAME_BUF_COUNT; |
||||
} |
||||
} |
||||
|
||||
static void road_camera_thread(CameraState *s) { |
||||
util::set_thread_name("webcam_road_camera_thread"); |
||||
|
||||
cv::VideoCapture cap_road(ROAD_CAMERA_ID, cv::CAP_V4L2); // road
|
||||
cap_road.set(cv::CAP_PROP_FRAME_WIDTH, 853); |
||||
cap_road.set(cv::CAP_PROP_FRAME_HEIGHT, 480); |
||||
cap_road.set(cv::CAP_PROP_FPS, s->fps); |
||||
cap_road.set(cv::CAP_PROP_AUTOFOCUS, 0); // off
|
||||
cap_road.set(cv::CAP_PROP_FOCUS, 0); // 0 - 255?
|
||||
// cv::Rect roi_rear(160, 0, 960, 720);
|
||||
|
||||
// transforms calculation see tools/webcam/warp_vis.py
|
||||
float ts[9] = {1.50330396, 0.0, -59.40969163, |
||||
0.0, 1.50330396, 76.20704846, |
||||
0.0, 0.0, 1.0}; |
||||
// if camera upside down:
|
||||
// float ts[9] = {-1.50330396, 0.0, 1223.4,
|
||||
// 0.0, -1.50330396, 797.8,
|
||||
// 0.0, 0.0, 1.0};
|
||||
|
||||
run_camera(s, cap_road, ts); |
||||
} |
||||
|
||||
void driver_camera_thread(CameraState *s) { |
||||
cv::VideoCapture cap_driver(DRIVER_CAMERA_ID, cv::CAP_V4L2); // driver
|
||||
cap_driver.set(cv::CAP_PROP_FRAME_WIDTH, 853); |
||||
cap_driver.set(cv::CAP_PROP_FRAME_HEIGHT, 480); |
||||
cap_driver.set(cv::CAP_PROP_FPS, s->fps); |
||||
// cv::Rect roi_front(320, 0, 960, 720);
|
||||
|
||||
// transforms calculation see tools/webcam/warp_vis.py
|
||||
float ts[9] = {1.42070485, 0.0, -30.16740088, |
||||
0.0, 1.42070485, 91.030837, |
||||
0.0, 0.0, 1.0}; |
||||
// if camera upside down:
|
||||
// float ts[9] = {-1.42070485, 0.0, 1182.2,
|
||||
// 0.0, -1.42070485, 773.0,
|
||||
// 0.0, 0.0, 1.0};
|
||||
run_camera(s, cap_driver, ts); |
||||
} |
||||
|
||||
} // namespace
|
||||
|
||||
void cameras_init(VisionIpcServer *v, MultiCameraState *s, cl_device_id device_id, cl_context ctx) { |
||||
camera_init(v, &s->road_cam, CAMERA_ID_LGC920, 20, device_id, ctx, |
||||
VISION_STREAM_RGB_ROAD, VISION_STREAM_ROAD); |
||||
camera_init(v, &s->driver_cam, CAMERA_ID_LGC615, 10, device_id, ctx, |
||||
VISION_STREAM_RGB_DRIVER, VISION_STREAM_DRIVER); |
||||
s->pm = new PubMaster({"roadCameraState", "driverCameraState", "thumbnail"}); |
||||
} |
||||
|
||||
void camera_autoexposure(CameraState *s, float grey_frac) {} |
||||
|
||||
void cameras_open(MultiCameraState *s) { |
||||
// LOG("*** open driver camera ***");
|
||||
camera_open(&s->driver_cam, false); |
||||
// LOG("*** open road camera ***");
|
||||
camera_open(&s->road_cam, true); |
||||
} |
||||
|
||||
void cameras_close(MultiCameraState *s) { |
||||
camera_close(&s->road_cam); |
||||
camera_close(&s->driver_cam); |
||||
delete s->pm; |
||||
} |
||||
|
||||
void process_driver_camera(MultiCameraState *s, CameraState *c, int cnt) { |
||||
MessageBuilder msg; |
||||
auto framed = msg.initEvent().initDriverCameraState(); |
||||
framed.setFrameType(cereal::FrameData::FrameType::FRONT); |
||||
fill_frame_data(framed, c->buf.cur_frame_data); |
||||
s->pm->send("driverCameraState", msg); |
||||
} |
||||
|
||||
void process_road_camera(MultiCameraState *s, CameraState *c, int cnt) { |
||||
const CameraBuf *b = &c->buf; |
||||
MessageBuilder msg; |
||||
auto framed = msg.initEvent().initRoadCameraState(); |
||||
fill_frame_data(framed, b->cur_frame_data); |
||||
framed.setImage(kj::arrayPtr((const uint8_t *)b->cur_yuv_buf->addr, b->cur_yuv_buf->len)); |
||||
framed.setTransform(b->yuv_transform.v); |
||||
s->pm->send("roadCameraState", msg); |
||||
} |
||||
|
||||
void cameras_run(MultiCameraState *s) { |
||||
std::vector<std::thread> threads; |
||||
threads.push_back(start_process_thread(s, &s->road_cam, process_road_camera)); |
||||
threads.push_back(start_process_thread(s, &s->driver_cam, process_driver_camera)); |
||||
|
||||
std::thread t_rear = std::thread(road_camera_thread, &s->road_cam); |
||||
util::set_thread_name("webcam_thread"); |
||||
driver_camera_thread(&s->driver_cam); |
||||
|
||||
t_rear.join(); |
||||
|
||||
for (auto &t : threads) t.join(); |
||||
|
||||
cameras_close(s); |
||||
} |
@ -1,28 +0,0 @@ |
||||
#pragma once |
||||
|
||||
#ifdef __APPLE__ |
||||
#include <OpenCL/cl.h> |
||||
#else |
||||
#include <CL/cl.h> |
||||
#endif |
||||
|
||||
#include "selfdrive/camerad/cameras/camera_common.h" |
||||
|
||||
#define FRAME_BUF_COUNT 16 |
||||
|
||||
typedef struct CameraState { |
||||
CameraInfo ci; |
||||
int camera_num; |
||||
int fps; |
||||
float digital_gain; |
||||
CameraBuf buf; |
||||
} CameraState; |
||||
|
||||
|
||||
typedef struct MultiCameraState { |
||||
CameraState road_cam; |
||||
CameraState driver_cam; |
||||
|
||||
SubMaster *sm; |
||||
PubMaster *pm; |
||||
} MultiCameraState; |
@ -1,189 +1,157 @@ |
||||
#ifdef HALF_AS_FLOAT |
||||
#define half float |
||||
#define half3 float3 |
||||
#else |
||||
#pragma OPENCL EXTENSION cl_khr_fp16 : enable |
||||
#endif |
||||
|
||||
// post wb CCM |
||||
const __constant half3 color_correction_0 = (half3)(1.82717181, -0.31231438, 0.07307673); |
||||
const __constant half3 color_correction_1 = (half3)(-0.5743977, 1.36858544, -0.53183455); |
||||
const __constant half3 color_correction_2 = (half3)(-0.25277411, -0.05627105, 1.45875782); |
||||
|
||||
// tone mapping params |
||||
const half gamma_k = 0.75; |
||||
const half gamma_b = 0.125; |
||||
const half mp = 0.01; // ideally midpoint should be adaptive |
||||
const half rk = 9 - 100*mp; |
||||
|
||||
inline half3 gamma_apply(half3 x) { |
||||
#define UV_WIDTH RGB_WIDTH / 2 |
||||
#define UV_HEIGHT RGB_HEIGHT / 2 |
||||
#define U_OFFSET RGB_WIDTH * RGB_HEIGHT |
||||
#define V_OFFSET RGB_WIDTH * RGB_HEIGHT + UV_WIDTH * UV_HEIGHT |
||||
|
||||
#define RGB_TO_Y(r, g, b) ((((mul24(b, 13) + mul24(g, 65) + mul24(r, 33)) + 64) >> 7) + 16) |
||||
#define RGB_TO_U(r, g, b) ((mul24(b, 56) - mul24(g, 37) - mul24(r, 19) + 0x8080) >> 8) |
||||
#define RGB_TO_V(r, g, b) ((mul24(r, 56) - mul24(g, 47) - mul24(b, 9) + 0x8080) >> 8) |
||||
#define AVERAGE(x, y, z, w) ((convert_ushort(x) + convert_ushort(y) + convert_ushort(z) + convert_ushort(w) + 1) >> 1) |
||||
|
||||
float3 color_correct(float3 rgb) { |
||||
// color correction |
||||
float3 x = rgb.x * (float3)(1.82717181, -0.31231438, 0.07307673); |
||||
x += rgb.y * (float3)(-0.5743977, 1.36858544, -0.53183455); |
||||
x += rgb.z * (float3)(-0.25277411, -0.05627105, 1.45875782); |
||||
|
||||
// tone mapping params |
||||
const float gamma_k = 0.75; |
||||
const float gamma_b = 0.125; |
||||
const float mp = 0.01; // ideally midpoint should be adaptive |
||||
const float rk = 9 - 100*mp; |
||||
|
||||
// poly approximation for s curve |
||||
return (x > mp) ? |
||||
((rk * (x-mp) * (1-(gamma_k*mp+gamma_b)) * (1+1/(rk*(1-mp))) / (1+rk*(x-mp))) + gamma_k*mp + gamma_b) : |
||||
((rk * (x-mp) * (gamma_k*mp+gamma_b) * (1+1/(rk*mp)) / (1-rk*(x-mp))) + gamma_k*mp + gamma_b); |
||||
} |
||||
|
||||
inline half3 color_correct(half3 rgb) { |
||||
half3 ret = (half)rgb.x * color_correction_0; |
||||
ret += (half)rgb.y * color_correction_1; |
||||
ret += (half)rgb.z * color_correction_2; |
||||
return gamma_apply(ret); |
||||
} |
||||
|
||||
inline half get_vignetting_s(float r) { |
||||
float get_vignetting_s(float r) { |
||||
if (r < 62500) { |
||||
return (half)(1.0f + 0.0000008f*r); |
||||
return (1.0f + 0.0000008f*r); |
||||
} else if (r < 490000) { |
||||
return (half)(0.9625f + 0.0000014f*r); |
||||
return (0.9625f + 0.0000014f*r); |
||||
} else if (r < 1102500) { |
||||
return (half)(1.26434f + 0.0000000000016f*r*r); |
||||
return (1.26434f + 0.0000000000016f*r*r); |
||||
} else { |
||||
return (half)(0.53503625f + 0.0000000000022f*r*r); |
||||
return (0.53503625f + 0.0000000000022f*r*r); |
||||
} |
||||
} |
||||
|
||||
inline half val_from_10(const uchar * source, int gx, int gy, half black_level) { |
||||
// parse 12bit |
||||
int start = gy * FRAME_STRIDE + (3 * (gx / 2)) + (FRAME_STRIDE * FRAME_OFFSET); |
||||
int offset = gx % 2; |
||||
uint major = (uint)source[start + offset] << 4; |
||||
uint minor = (source[start + 2] >> (4 * offset)) & 0xf; |
||||
half pv = (half)((major + minor)/4); |
||||
|
||||
// normalize |
||||
pv = max((half)0.0, pv - black_level); |
||||
pv /= (1024.0 - black_level); |
||||
|
||||
// correct vignetting |
||||
if (CAM_NUM == 1) { // fcamera |
||||
gx = (gx - RGB_WIDTH/2); |
||||
gy = (gy - RGB_HEIGHT/2); |
||||
pv *= get_vignetting_s(gx*gx + gy*gy); |
||||
} |
||||
|
||||
pv = clamp(pv, (half)0.0, (half)1.0); |
||||
return pv; |
||||
float4 val4_from_12(uchar8 pvs, float gain) { |
||||
uint4 parsed = (uint4)(((uint)pvs.s0<<4) + (pvs.s1>>4), // is from the previous 10 bit |
||||
((uint)pvs.s2<<4) + (pvs.s4&0xF), |
||||
((uint)pvs.s3<<4) + (pvs.s4>>4), |
||||
((uint)pvs.s5<<4) + (pvs.s7&0xF)); |
||||
// normalize and scale |
||||
float4 pv = (convert_float4(parsed) - 168.0) / (4096.0 - 168.0); |
||||
return clamp(pv*gain, 0.0, 1.0); |
||||
} |
||||
|
||||
inline half get_k(half a, half b, half c, half d) { |
||||
float get_k(float a, float b, float c, float d) { |
||||
return 2.0 - (fabs(a - b) + fabs(c - d)); |
||||
} |
||||
|
||||
__kernel void debayer10(const __global uchar * in, |
||||
__global uchar * out, |
||||
__local half * cached, |
||||
float black_level |
||||
) |
||||
__kernel void debayer10(const __global uchar * in, __global uchar * out) |
||||
{ |
||||
const int x_global = get_global_id(0); |
||||
const int y_global = get_global_id(1); |
||||
|
||||
const int x_local = get_local_id(0); |
||||
const int y_local = get_local_id(1); |
||||
|
||||
const int localRowLen = 2 + get_local_size(0); // 2 padding |
||||
const int localColLen = 2 + get_local_size(1); |
||||
const int gid_x = get_global_id(0); |
||||
const int gid_y = get_global_id(1); |
||||
|
||||
const int localOffset = (y_local + 1) * localRowLen + x_local + 1; |
||||
const int y_top_mod = (gid_y == 0) ? 2: 0; |
||||
const int y_bot_mod = (gid_y == (RGB_HEIGHT/2 - 1)) ? 1: 3; |
||||
|
||||
int out_idx = 3 * x_global + 3 * y_global * RGB_WIDTH; |
||||
float3 rgb; |
||||
uchar3 rgb_out[4]; |
||||
|
||||
// cache padding |
||||
int localColOffset = -1; |
||||
int globalColOffset; |
||||
int start = (2 * gid_y - 1) * FRAME_STRIDE + (3 * gid_x - 2) + (FRAME_STRIDE * FRAME_OFFSET); |
||||
|
||||
const int x_global_mod = (x_global == 0 || x_global == RGB_WIDTH - 1) ? -1: 1; |
||||
const int y_global_mod = (y_global == 0 || y_global == RGB_HEIGHT - 1) ? -1: 1; |
||||
// read in 8x4 chars |
||||
uchar8 dat[4]; |
||||
dat[0] = vload8(0, in + start + FRAME_STRIDE*y_top_mod); |
||||
dat[1] = vload8(0, in + start + FRAME_STRIDE*1); |
||||
dat[2] = vload8(0, in + start + FRAME_STRIDE*2); |
||||
dat[3] = vload8(0, in + start + FRAME_STRIDE*y_bot_mod); |
||||
|
||||
half pv = val_from_10(in, x_global, y_global, black_level); |
||||
cached[localOffset] = pv; |
||||
|
||||
// cache padding |
||||
if (x_local < 1) { |
||||
localColOffset = x_local; |
||||
globalColOffset = -1; |
||||
cached[(y_local + 1) * localRowLen + x_local] = val_from_10(in, x_global-x_global_mod, y_global, black_level); |
||||
} else if (x_local >= get_local_size(0) - 1) { |
||||
localColOffset = x_local + 2; |
||||
globalColOffset = 1; |
||||
cached[localOffset + 1] = val_from_10(in, x_global+x_global_mod, y_global, black_level); |
||||
} |
||||
|
||||
if (y_local < 1) { |
||||
cached[y_local * localRowLen + x_local + 1] = val_from_10(in, x_global, y_global-y_global_mod, black_level); |
||||
if (localColOffset != -1) { |
||||
cached[y_local * localRowLen + localColOffset] = val_from_10(in, x_global+(x_global_mod*globalColOffset), y_global-y_global_mod, black_level); |
||||
} |
||||
} else if (y_local >= get_local_size(1) - 1) { |
||||
cached[(y_local + 2) * localRowLen + x_local + 1] = val_from_10(in, x_global, y_global+y_global_mod, black_level); |
||||
if (localColOffset != -1) { |
||||
cached[(y_local + 2) * localRowLen + localColOffset] = val_from_10(in, x_global+(x_global_mod*globalColOffset), y_global+y_global_mod, black_level); |
||||
} |
||||
// correct vignetting |
||||
#if VIGNETTING |
||||
int gx = (gid_x*2 - RGB_WIDTH/2); |
||||
int gy = (gid_y*2 - RGB_HEIGHT/2); |
||||
const float gain = get_vignetting_s(gx*gx + gy*gy); |
||||
#else |
||||
const float gain = 1.0; |
||||
#endif |
||||
|
||||
// process them to floats |
||||
float4 va = val4_from_12(dat[0], gain); |
||||
float4 vb = val4_from_12(dat[1], gain); |
||||
float4 vc = val4_from_12(dat[2], gain); |
||||
float4 vd = val4_from_12(dat[3], gain); |
||||
|
||||
if (gid_x == 0) { |
||||
va.s0 = va.s2; |
||||
vb.s0 = vb.s2; |
||||
vc.s0 = vc.s2; |
||||
vd.s0 = vd.s2; |
||||
} else if (gid_x == RGB_WIDTH/2 - 1) { |
||||
va.s3 = va.s1; |
||||
vb.s3 = vb.s1; |
||||
vc.s3 = vc.s1; |
||||
vd.s3 = vd.s1; |
||||
} |
||||
|
||||
// sync |
||||
barrier(CLK_LOCAL_MEM_FENCE); |
||||
|
||||
half d1 = cached[localOffset - localRowLen - 1]; |
||||
half d2 = cached[localOffset - localRowLen + 1]; |
||||
half d3 = cached[localOffset + localRowLen - 1]; |
||||
half d4 = cached[localOffset + localRowLen + 1]; |
||||
half n1 = cached[localOffset - localRowLen]; |
||||
half n2 = cached[localOffset + 1]; |
||||
half n3 = cached[localOffset + localRowLen]; |
||||
half n4 = cached[localOffset - 1]; |
||||
|
||||
half3 rgb; |
||||
|
||||
// a simplified version of https://opensignalprocessingjournal.com/contents/volumes/V6/TOSIGPJ-6-1/TOSIGPJ-6-1.pdf |
||||
if (x_global % 2 == 0) { |
||||
if (y_global % 2 == 0) { |
||||
rgb.y = pv; // G1(R) |
||||
half k1 = get_k(d1, pv, d2, pv); |
||||
half k2 = get_k(d2, pv, d4, pv); |
||||
half k3 = get_k(d3, pv, d4, pv); |
||||
half k4 = get_k(d1, pv, d3, pv); |
||||
// R_G1 |
||||
rgb.x = (k2*n2+k4*n4)/(k2+k4); |
||||
// B_G1 |
||||
rgb.z = (k1*n1+k3*n3)/(k1+k3); |
||||
} else { |
||||
rgb.z = pv; // B |
||||
half k1 = get_k(d1, d3, d2, d4); |
||||
half k2 = get_k(n1, n4, n2, n3); |
||||
half k3 = get_k(d1, d2, d3, d4); |
||||
half k4 = get_k(n1, n2, n3, n4); |
||||
// G_B |
||||
rgb.y = (k1*(n1+n3)*0.5+k3*(n2+n4)*0.5)/(k1+k3); |
||||
// R_B |
||||
rgb.x = (k2*(d2+d3)*0.5+k4*(d1+d4)*0.5)/(k2+k4); |
||||
} |
||||
} else { |
||||
if (y_global % 2 == 0) { |
||||
rgb.x = pv; // R |
||||
half k1 = get_k(d1, d3, d2, d4); |
||||
half k2 = get_k(n1, n4, n2, n3); |
||||
half k3 = get_k(d1, d2, d3, d4); |
||||
half k4 = get_k(n1, n2, n3, n4); |
||||
// G_R |
||||
rgb.y = (k1*(n1+n3)*0.5+k3*(n2+n4)*0.5)/(k1+k3); |
||||
// B_R |
||||
rgb.z = (k2*(d2+d3)*0.5+k4*(d1+d4)*0.5)/(k2+k4); |
||||
} else { |
||||
rgb.y = pv; // G2(B) |
||||
half k1 = get_k(d1, pv, d2, pv); |
||||
half k2 = get_k(d2, pv, d4, pv); |
||||
half k3 = get_k(d3, pv, d4, pv); |
||||
half k4 = get_k(d1, pv, d3, pv); |
||||
// R_G2 |
||||
rgb.x = (k1*n1+k3*n3)/(k1+k3); |
||||
// B_G2 |
||||
rgb.z = (k2*n2+k4*n4)/(k2+k4); |
||||
} |
||||
} |
||||
|
||||
uchar3 rgbc = convert_uchar3_sat(color_correct(clamp(rgb, (half)0.0, (half)1.0)) * 255.0); |
||||
out[out_idx + 0] = rgbc.z; |
||||
out[out_idx + 1] = rgbc.y; |
||||
out[out_idx + 2] = rgbc.x; |
||||
const float k01 = get_k(va.s0, vb.s1, va.s2, vb.s1); |
||||
const float k02 = get_k(va.s2, vb.s1, vc.s2, vb.s1); |
||||
const float k03 = get_k(vc.s0, vb.s1, vc.s2, vb.s1); |
||||
const float k04 = get_k(va.s0, vb.s1, vc.s0, vb.s1); |
||||
rgb.x = (k02*vb.s2+k04*vb.s0)/(k02+k04); // R_G1 |
||||
rgb.y = vb.s1; // G1(R) |
||||
rgb.z = (k01*va.s1+k03*vc.s1)/(k01+k03); // B_G1 |
||||
rgb_out[0] = convert_uchar3_sat(color_correct(clamp(rgb, 0.0, 1.0)) * 255.0); |
||||
|
||||
const float k11 = get_k(va.s1, vc.s1, va.s3, vc.s3); |
||||
const float k12 = get_k(va.s2, vb.s1, vb.s3, vc.s2); |
||||
const float k13 = get_k(va.s1, va.s3, vc.s1, vc.s3); |
||||
const float k14 = get_k(va.s2, vb.s3, vc.s2, vb.s1); |
||||
rgb.x = vb.s2; // R |
||||
rgb.y = (k11*(va.s2+vc.s2)*0.5+k13*(vb.s3+vb.s1)*0.5)/(k11+k13); // G_R |
||||
rgb.z = (k12*(va.s3+vc.s1)*0.5+k14*(va.s1+vc.s3)*0.5)/(k12+k14); // B_R |
||||
rgb_out[1] = convert_uchar3_sat(color_correct(clamp(rgb, 0.0, 1.0)) * 255.0); |
||||
|
||||
const float k21 = get_k(vb.s0, vd.s0, vb.s2, vd.s2); |
||||
const float k22 = get_k(vb.s1, vc.s0, vc.s2, vd.s1); |
||||
const float k23 = get_k(vb.s0, vb.s2, vd.s0, vd.s2); |
||||
const float k24 = get_k(vb.s1, vc.s2, vd.s1, vc.s0); |
||||
rgb.x = (k22*(vb.s2+vd.s0)*0.5+k24*(vb.s0+vd.s2)*0.5)/(k22+k24); // R_B |
||||
rgb.y = (k21*(vb.s1+vd.s1)*0.5+k23*(vc.s2+vc.s0)*0.5)/(k21+k23); // G_B |
||||
rgb.z = vc.s1; // B |
||||
rgb_out[2] = convert_uchar3_sat(color_correct(clamp(rgb, 0.0, 1.0)) * 255.0); |
||||
|
||||
const float k31 = get_k(vb.s1, vc.s2, vb.s3, vc.s2); |
||||
const float k32 = get_k(vb.s3, vc.s2, vd.s3, vc.s2); |
||||
const float k33 = get_k(vd.s1, vc.s2, vd.s3, vc.s2); |
||||
const float k34 = get_k(vb.s1, vc.s2, vd.s1, vc.s2); |
||||
rgb.x = (k31*vb.s2+k33*vd.s2)/(k31+k33); // R_G2 |
||||
rgb.y = vc.s2; // G2(B) |
||||
rgb.z = (k32*vc.s3+k34*vc.s1)/(k32+k34); // B_G2 |
||||
rgb_out[3] = convert_uchar3_sat(color_correct(clamp(rgb, 0.0, 1.0)) * 255.0); |
||||
|
||||
// write ys |
||||
uchar2 yy = (uchar2)( |
||||
RGB_TO_Y(rgb_out[0].s0, rgb_out[0].s1, rgb_out[0].s2), |
||||
RGB_TO_Y(rgb_out[1].s0, rgb_out[1].s1, rgb_out[1].s2) |
||||
); |
||||
vstore2(yy, 0, out + mad24(gid_y * 2, RGB_WIDTH, gid_x * 2)); |
||||
yy = (uchar2)( |
||||
RGB_TO_Y(rgb_out[2].s0, rgb_out[2].s1, rgb_out[2].s2), |
||||
RGB_TO_Y(rgb_out[3].s0, rgb_out[3].s1, rgb_out[3].s2) |
||||
); |
||||
vstore2(yy, 0, out + mad24(gid_y * 2 + 1, RGB_WIDTH, gid_x * 2)); |
||||
|
||||
// write uvs |
||||
const short ar = AVERAGE(rgb_out[0].s0, rgb_out[1].s0, rgb_out[2].s0, rgb_out[3].s0); |
||||
const short ag = AVERAGE(rgb_out[0].s1, rgb_out[1].s1, rgb_out[2].s1, rgb_out[3].s1); |
||||
const short ab = AVERAGE(rgb_out[0].s2, rgb_out[1].s2, rgb_out[2].s2, rgb_out[3].s2); |
||||
out[U_OFFSET + mad24(gid_y, UV_WIDTH, gid_x)] = RGB_TO_U(ar, ag, ab); |
||||
out[V_OFFSET + mad24(gid_y, UV_WIDTH, gid_x)] = RGB_TO_V(ar, ag, ab); |
||||
} |
||||
|
File diff suppressed because one or more lines are too long
@ -0,0 +1,26 @@ |
||||
// TODO: cleanup AE tests
|
||||
// needed by camera_common.cc
|
||||
void camera_autoexposure(CameraState *s, float grey_frac) {} |
||||
void cameras_init(VisionIpcServer *v, MultiCameraState *s, cl_device_id device_id, cl_context ctx) {} |
||||
void cameras_open(MultiCameraState *s) {} |
||||
void cameras_run(MultiCameraState *s) {} |
||||
|
||||
typedef struct CameraState { |
||||
int camera_num; |
||||
CameraInfo ci; |
||||
|
||||
int fps; |
||||
float digital_gain = 0; |
||||
|
||||
CameraBuf buf; |
||||
} CameraState; |
||||
|
||||
typedef struct MultiCameraState { |
||||
CameraState road_cam; |
||||
CameraState driver_cam; |
||||
|
||||
SubMaster *sm = nullptr; |
||||
PubMaster *pm = nullptr; |
||||
} MultiCameraState; |
||||
|
||||
|
@ -1,35 +0,0 @@ |
||||
Import('env', 'arch', 'SHARED') |
||||
|
||||
if SHARED: |
||||
fxn = env.SharedLibrary |
||||
else: |
||||
fxn = env.Library |
||||
|
||||
common_libs = [ |
||||
'params.cc', |
||||
'statlog.cc', |
||||
'swaglog.cc', |
||||
'util.cc', |
||||
'gpio.cc', |
||||
'i2c.cc', |
||||
'watchdog.cc', |
||||
] |
||||
|
||||
_common = fxn('common', common_libs, LIBS="json11") |
||||
|
||||
files = [ |
||||
'clutil.cc', |
||||
'visionimg.cc', |
||||
] |
||||
|
||||
if arch == "larch64": |
||||
_gpu_libs = ["GLESv2"] |
||||
else: |
||||
_gpu_libs = ["GL"] |
||||
|
||||
_gpucommon = fxn('gpucommon', files, LIBS=_gpu_libs) |
||||
Export('_common', '_gpucommon', '_gpu_libs') |
||||
|
||||
if GetOption('test'): |
||||
env.Program('tests/test_util', ['tests/test_util.cc'], LIBS=[_common]) |
||||
env.Program('tests/test_swaglog', ['tests/test_swaglog.cc'], LIBS=[_common, 'json11', 'zmq', 'pthread']) |
@ -0,0 +1,84 @@ |
||||
import math |
||||
import numpy as np |
||||
|
||||
from common.numpy_fast import clip |
||||
from common.realtime import DT_CTRL |
||||
from cereal import log |
||||
from selfdrive.controls.lib.latcontrol import LatControl, MIN_STEER_SPEED |
||||
|
||||
|
||||
class LatControlLQR(LatControl): |
||||
def __init__(self, CP, CI): |
||||
super().__init__(CP, CI) |
||||
self.scale = CP.lateralTuning.lqr.scale |
||||
self.ki = CP.lateralTuning.lqr.ki |
||||
|
||||
self.A = np.array(CP.lateralTuning.lqr.a).reshape((2, 2)) |
||||
self.B = np.array(CP.lateralTuning.lqr.b).reshape((2, 1)) |
||||
self.C = np.array(CP.lateralTuning.lqr.c).reshape((1, 2)) |
||||
self.K = np.array(CP.lateralTuning.lqr.k).reshape((1, 2)) |
||||
self.L = np.array(CP.lateralTuning.lqr.l).reshape((2, 1)) |
||||
self.dc_gain = CP.lateralTuning.lqr.dcGain |
||||
|
||||
self.x_hat = np.array([[0], [0]]) |
||||
self.i_unwind_rate = 0.3 * DT_CTRL |
||||
self.i_rate = 1.0 * DT_CTRL |
||||
|
||||
self.reset() |
||||
|
||||
def reset(self): |
||||
super().reset() |
||||
self.i_lqr = 0.0 |
||||
|
||||
def update(self, active, CS, VM, params, last_actuators, desired_curvature, desired_curvature_rate, llk): |
||||
lqr_log = log.ControlsState.LateralLQRState.new_message() |
||||
|
||||
torque_scale = (0.45 + CS.vEgo / 60.0)**2 # Scale actuator model with speed |
||||
|
||||
# Subtract offset. Zero angle should correspond to zero torque |
||||
steering_angle_no_offset = CS.steeringAngleDeg - params.angleOffsetAverageDeg |
||||
|
||||
desired_angle = math.degrees(VM.get_steer_from_curvature(-desired_curvature, CS.vEgo, params.roll)) |
||||
|
||||
instant_offset = params.angleOffsetDeg - params.angleOffsetAverageDeg |
||||
desired_angle += instant_offset # Only add offset that originates from vehicle model errors |
||||
lqr_log.steeringAngleDesiredDeg = desired_angle |
||||
|
||||
# Update Kalman filter |
||||
angle_steers_k = float(self.C.dot(self.x_hat)) |
||||
e = steering_angle_no_offset - angle_steers_k |
||||
self.x_hat = self.A.dot(self.x_hat) + self.B.dot(CS.steeringTorqueEps / torque_scale) + self.L.dot(e) |
||||
|
||||
if CS.vEgo < MIN_STEER_SPEED or not active: |
||||
lqr_log.active = False |
||||
lqr_output = 0. |
||||
output_steer = 0. |
||||
self.reset() |
||||
else: |
||||
lqr_log.active = True |
||||
|
||||
# LQR |
||||
u_lqr = float(desired_angle / self.dc_gain - self.K.dot(self.x_hat)) |
||||
lqr_output = torque_scale * u_lqr / self.scale |
||||
|
||||
# Integrator |
||||
if CS.steeringPressed: |
||||
self.i_lqr -= self.i_unwind_rate * float(np.sign(self.i_lqr)) |
||||
else: |
||||
error = desired_angle - angle_steers_k |
||||
i = self.i_lqr + self.ki * self.i_rate * error |
||||
control = lqr_output + i |
||||
|
||||
if (error >= 0 and (control <= self.steer_max or i < 0.0)) or \ |
||||
(error <= 0 and (control >= -self.steer_max or i > 0.0)): |
||||
self.i_lqr = i |
||||
|
||||
output_steer = lqr_output + self.i_lqr |
||||
output_steer = clip(output_steer, -self.steer_max, self.steer_max) |
||||
|
||||
lqr_log.steeringAngleDeg = angle_steers_k |
||||
lqr_log.i = self.i_lqr |
||||
lqr_log.output = output_steer |
||||
lqr_log.lqrOutput = lqr_output |
||||
lqr_log.saturated = self._check_saturation(self.steer_max - abs(output_steer) < 1e-3, CS) |
||||
return output_steer, desired_angle, lqr_log |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue