loggerd: Separate writer from encoder (#24257)

* seperate writer from encoder

* video writer in release

* confirm loggerd on PC works, add YUV option to compressed_vipc

* make raw_logger use video_writer

* put this back to master

* close codec

* put this back

* use unique_ptr

Co-authored-by: Comma Device <device@comma.ai>
old-commit-hash: 8d24655787
taco
George Hotz 3 years ago committed by GitHub
parent 5a55e1bb79
commit b872fe92ab
  1. 2
      release/files_common
  2. 2
      selfdrive/loggerd/SConscript
  3. 129
      selfdrive/loggerd/omx_encoder.cc
  4. 17
      selfdrive/loggerd/omx_encoder.h
  5. 119
      selfdrive/loggerd/raw_logger.cc
  6. 10
      selfdrive/loggerd/raw_logger.h
  7. 118
      selfdrive/loggerd/video_writer.cc
  8. 25
      selfdrive/loggerd/video_writer.h
  9. 28
      tools/camerastream/compressed_vipc.py

@ -301,6 +301,8 @@ selfdrive/loggerd/SConscript
selfdrive/loggerd/encoder.h
selfdrive/loggerd/omx_encoder.cc
selfdrive/loggerd/omx_encoder.h
selfdrive/loggerd/video_writer.cc
selfdrive/loggerd/video_writer.h
selfdrive/loggerd/logger.cc
selfdrive/loggerd/logger.h
selfdrive/loggerd/loggerd.cc

@ -6,7 +6,7 @@ libs = [common, cereal, messaging, visionipc,
'avformat', 'avcodec', 'swscale', 'avutil',
'yuv', 'bz2', 'OpenCL', 'pthread']
src = ['logger.cc', 'loggerd.cc']
src = ['logger.cc', 'loggerd.cc', 'video_writer.cc']
if arch == "larch64":
src += ['omx_encoder.cc']
libs += ['OmxCore', 'gsl', 'CB'] + gpucommon

@ -324,7 +324,7 @@ OmxEncoder::OmxEncoder(const char* filename, CameraType type, int in_width, int
service_name = this->type == DriverCam ? "driverEncodeData" :
(this->type == WideRoadCam ? "wideRoadEncodeData" :
(this->remuxing ? "qRoadEncodeData" : "roadEncodeData"));
pm = new PubMaster({service_name});
pm.reset(new PubMaster({service_name}));
}
void OmxEncoder::callback_handler(OmxEncoder *e) {
@ -393,66 +393,12 @@ void OmxEncoder::write_and_broadcast_handler(OmxEncoder *e){
void OmxEncoder::handle_out_buf(OmxEncoder *e, OmxBuffer *out_buf) {
int err;
if (out_buf->header.nFlags & OMX_BUFFERFLAG_CODECCONFIG) {
if (e->codec_config_len < out_buf->header.nFilledLen) {
e->codec_config = (uint8_t *)realloc(e->codec_config, out_buf->header.nFilledLen);
}
e->codec_config_len = out_buf->header.nFilledLen;
memcpy(e->codec_config, out_buf->data, out_buf->header.nFilledLen);
// TODO: is still needed?
#ifdef QCOM2
out_buf->header.nTimeStamp = 0;
#endif
}
if (e->of) {
//printf("write %d flags 0x%x\n", out_buf->nFilledLen, out_buf->nFlags);
size_t written = util::safe_fwrite(out_buf->data, 1, out_buf->header.nFilledLen, e->of);
if (written != out_buf->header.nFilledLen) {
LOGE("failed to write file.errno=%d", errno);
}
}
if (e->remuxing) {
if (!e->wrote_codec_config && e->codec_config_len > 0) {
// extradata will be freed by av_free() in avcodec_free_context()
e->codec_ctx->extradata = (uint8_t*)av_mallocz(e->codec_config_len + AV_INPUT_BUFFER_PADDING_SIZE);
e->codec_ctx->extradata_size = e->codec_config_len;
memcpy(e->codec_ctx->extradata, e->codec_config, e->codec_config_len);
err = avcodec_parameters_from_context(e->out_stream->codecpar, e->codec_ctx);
assert(err >= 0);
err = avformat_write_header(e->ofmt_ctx, NULL);
assert(err >= 0);
e->wrote_codec_config = true;
}
if (out_buf->header.nTimeStamp > 0) {
// input timestamps are in microseconds
AVRational in_timebase = {1, 1000000};
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = out_buf->data;
pkt.size = out_buf->header.nFilledLen;
enum AVRounding rnd = static_cast<enum AVRounding>(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt.pts = pkt.dts = av_rescale_q_rnd(out_buf->header.nTimeStamp, in_timebase, e->ofmt_ctx->streams[0]->time_base, rnd);
pkt.duration = av_rescale_q(50*1000, in_timebase, e->ofmt_ctx->streams[0]->time_base);
if (out_buf->header.nFlags & OMX_BUFFERFLAG_SYNCFRAME) {
pkt.flags |= AV_PKT_FLAG_KEY;
}
err = av_write_frame(e->ofmt_ctx, &pkt);
if (err < 0) { LOGW("ts encoder write issue"); }
av_free_packet(&pkt);
}
if (!(out_buf->header.nFlags & OMX_BUFFERFLAG_EOS) && e->writer) {
e->writer->write(out_buf->data,
out_buf->header.nFilledLen,
out_buf->header.nTimeStamp,
out_buf->header.nFlags & OMX_BUFFERFLAG_CODECCONFIG,
out_buf->header.nFlags & OMX_BUFFERFLAG_SYNCFRAME);
}
}
@ -527,49 +473,10 @@ int OmxEncoder::encode_frame(const uint8_t *y_ptr, const uint8_t *u_ptr, const u
}
void OmxEncoder::encoder_open(const char* path) {
int err;
snprintf(this->vid_path, sizeof(this->vid_path), "%s/%s", path, this->filename);
LOGD("encoder_open %s remuxing:%d", this->vid_path, this->remuxing);
if (this->remuxing) {
avformat_alloc_output_context2(&this->ofmt_ctx, NULL, NULL, this->vid_path);
assert(this->ofmt_ctx);
this->out_stream = avformat_new_stream(this->ofmt_ctx, NULL);
assert(this->out_stream);
// set codec correctly
av_register_all();
AVCodec *codec = NULL;
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
assert(codec);
this->codec_ctx = avcodec_alloc_context3(codec);
assert(this->codec_ctx);
this->codec_ctx->width = this->width;
this->codec_ctx->height = this->height;
this->codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
this->codec_ctx->time_base = (AVRational){ 1, this->fps };
err = avio_open(&this->ofmt_ctx->pb, this->vid_path, AVIO_FLAG_WRITE);
assert(err >= 0);
this->wrote_codec_config = false;
} else {
if (this->write) {
this->of = util::safe_fopen(this->vid_path, "wb");
assert(this->of);
}
if (this->write) {
writer.reset(new VideoWriter(path, this->filename, this->remuxing, this->width, this->height, this->fps, !this->remuxing, false));
}
// create camera lock file
snprintf(this->lock_path, sizeof(this->lock_path), "%s/%s.lock", path, this->filename);
int lock_fd = HANDLE_EINTR(open(this->lock_path, O_RDWR | O_CREAT, 0664));
assert(lock_fd >= 0);
close(lock_fd);
// start writer threads
callback_handler_thread = std::thread(OmxEncoder::callback_handler, this);
write_handler_thread = std::thread(OmxEncoder::write_and_broadcast_handler, this);
@ -597,19 +504,7 @@ void OmxEncoder::encoder_close() {
callback_handler_thread.join();
write_handler_thread.join();
if (this->remuxing) {
av_write_trailer(this->ofmt_ctx);
avcodec_free_context(&this->codec_ctx);
avio_closep(&this->ofmt_ctx->pb);
avformat_free_context(this->ofmt_ctx);
} else {
if (this->of) {
util::safe_fflush(this->of);
fclose(this->of);
this->of = nullptr;
}
}
unlink(this->lock_path);
writer.reset();
}
this->is_open = false;
}
@ -644,10 +539,6 @@ OmxEncoder::~OmxEncoder() {
free(write_buf);
};
if (this->codec_config) {
free(this->codec_config);
}
if (this->downscale) {
free(this->y_ptr2);
free(this->u_ptr2);

@ -6,12 +6,10 @@
#include <thread>
#include <OMX_Component.h>
extern "C" {
#include <libavformat/avformat.h>
}
#include "selfdrive/common/queue.h"
#include "selfdrive/loggerd/encoder.h"
#include "selfdrive/loggerd/video_writer.h"
struct OmxBuffer {
OMX_BUFFERHEADERTYPE header;
@ -45,8 +43,6 @@ private:
int in_width_, in_height_;
int width, height, fps;
char vid_path[1024];
char lock_path[1024];
bool is_open = false;
bool dirty = false;
bool write = false;
@ -54,17 +50,12 @@ private:
std::thread callback_handler_thread;
std::thread write_handler_thread;
int segment_num = -1;
PubMaster *pm;
std::unique_ptr<PubMaster> pm;
const char *service_name;
const char* filename;
FILE *of = nullptr;
CameraType type;
size_t codec_config_len;
uint8_t *codec_config = NULL;
bool wrote_codec_config;
std::mutex state_lock;
std::condition_variable state_cv;
OMX_STATETYPE state = OMX_StateLoaded;
@ -80,10 +71,8 @@ private:
SafeQueue<OMX_BUFFERHEADERTYPE *> done_out;
SafeQueue<OmxBuffer *> to_write;
AVFormatContext *ofmt_ctx;
AVCodecContext *codec_ctx;
AVStream *out_stream;
bool remuxing;
std::unique_ptr<VideoWriter> writer;
bool downscale;
uint8_t *y_ptr2, *u_ptr2, *v_ptr2;

@ -25,32 +25,10 @@ extern "C" {
RawLogger::RawLogger(const char* filename, CameraType type, int in_width, int in_height, int fps,
int bitrate, bool h265, int out_width, int out_height, bool write)
: in_width_(in_width), in_height_(in_height), filename(filename), fps(fps) {
// TODO: respect write arg
codec = avcodec_find_encoder(AV_CODEC_ID_FFVHUFF);
// codec = avcodec_find_encoder(AV_CODEC_ID_FFV1);
assert(codec);
codec_ctx = avcodec_alloc_context3(codec);
assert(codec_ctx);
codec_ctx->width = out_width;
codec_ctx->height = out_height;
codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
// codec_ctx->thread_count = 2;
// ffv1enc doesn't respect AV_PICTURE_TYPE_I. make every frame a key frame for now.
// codec_ctx->gop_size = 0;
codec_ctx->time_base = (AVRational){ 1, fps };
int err = avcodec_open2(codec_ctx, codec, NULL);
assert(err >= 0);
frame = av_frame_alloc();
assert(frame);
frame->format = codec_ctx->pix_fmt;
frame->format = AV_PIX_FMT_YUV420P;
frame->width = out_width;
frame->height = out_height;
frame->linesize[0] = out_width;
@ -63,60 +41,20 @@ RawLogger::RawLogger(const char* filename, CameraType type, int in_width, int in
}
RawLogger::~RawLogger() {
encoder_close();
av_frame_free(&frame);
avcodec_close(codec_ctx);
av_free(codec_ctx);
}
void RawLogger::encoder_open(const char* path) {
vid_path = util::string_format("%s/%s", path, filename);
// create camera lock file
lock_path = util::string_format("%s/%s.lock", path, filename);
LOG("open %s\n", lock_path.c_str());
int lock_fd = HANDLE_EINTR(open(lock_path.c_str(), O_RDWR | O_CREAT, 0664));
assert(lock_fd >= 0);
close(lock_fd);
format_ctx = NULL;
avformat_alloc_output_context2(&format_ctx, NULL, "matroska", vid_path.c_str());
assert(format_ctx);
stream = avformat_new_stream(format_ctx, codec);
// AVStream *stream = avformat_new_stream(format_ctx, NULL);
assert(stream);
stream->id = 0;
stream->time_base = (AVRational){ 1, fps };
// codec_ctx->time_base = stream->time_base;
int err = avcodec_parameters_from_context(stream->codecpar, codec_ctx);
assert(err >= 0);
err = avio_open(&format_ctx->pb, vid_path.c_str(), AVIO_FLAG_WRITE);
assert(err >= 0);
err = avformat_write_header(format_ctx, NULL);
assert(err >= 0);
writer = new VideoWriter(path, this->filename, true, frame->width, frame->height, this->fps, false, true);
// write the header
writer->write(NULL, 0, 0, true, false);
is_open = true;
counter = 0;
}
void RawLogger::encoder_close() {
if (!is_open) return;
int err = av_write_trailer(format_ctx);
assert(err == 0);
err = avio_closep(&format_ctx->pb);
assert(err == 0);
avformat_free_context(format_ctx);
format_ctx = NULL;
unlink(lock_path.c_str());
delete writer;
is_open = false;
}
@ -124,23 +62,19 @@ int RawLogger::encode_frame(const uint8_t *y_ptr, const uint8_t *u_ptr, const ui
int in_width, int in_height, uint64_t ts) {
assert(in_width == this->in_width_);
assert(in_height == this->in_height_);
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
if (downscale_buf.size() > 0) {
uint8_t *out_y = downscale_buf.data();
uint8_t *out_u = out_y + codec_ctx->width * codec_ctx->height;
uint8_t *out_v = out_u + (codec_ctx->width / 2) * (codec_ctx->height / 2);
uint8_t *out_u = out_y + frame->width * frame->height;
uint8_t *out_v = out_u + (frame->width / 2) * (frame->height / 2);
libyuv::I420Scale(y_ptr, in_width,
u_ptr, in_width/2,
v_ptr, in_width/2,
in_width, in_height,
out_y, codec_ctx->width,
out_u, codec_ctx->width/2,
out_v, codec_ctx->width/2,
codec_ctx->width, codec_ctx->height,
out_y, frame->width,
out_u, frame->width/2,
out_v, frame->width/2,
frame->width, frame->height,
libyuv::kFilterNone);
frame->data[0] = out_y;
frame->data[1] = out_u;
@ -150,18 +84,22 @@ int RawLogger::encode_frame(const uint8_t *y_ptr, const uint8_t *u_ptr, const ui
frame->data[1] = (uint8_t*)u_ptr;
frame->data[2] = (uint8_t*)v_ptr;
}
frame->pts = counter;
frame->pts = counter*50*1000; // 50ms per frame
int ret = counter;
int err = avcodec_send_frame(codec_ctx, frame);
if (ret < 0) {
LOGE("avcode_send_frame error %d", err);
int err = avcodec_send_frame(writer->codec_ctx, frame);
if (err < 0) {
LOGE("avcodec_send_frame error %d", err);
ret = -1;
}
while (ret >= 0){
err = avcodec_receive_packet(codec_ctx, &pkt);
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
while (ret >= 0) {
err = avcodec_receive_packet(writer->codec_ctx, &pkt);
if (err == AVERROR_EOF) {
break;
} else if (err == AVERROR(EAGAIN)) {
@ -174,18 +112,9 @@ int RawLogger::encode_frame(const uint8_t *y_ptr, const uint8_t *u_ptr, const ui
break;
}
av_packet_rescale_ts(&pkt, codec_ctx->time_base, stream->time_base);
pkt.stream_index = 0;
err = av_interleaved_write_frame(format_ctx, &pkt);
if (err < 0) {
LOGE("av_interleaved_write_frame %d", err);
ret = -1;
} else {
counter++;
}
writer->write(pkt.data, pkt.size, pkt.pts, false, pkt.flags & AV_PKT_FLAG_KEY);
counter++;
}
av_packet_unref(&pkt);
return ret;
}

@ -12,6 +12,7 @@ extern "C" {
}
#include "selfdrive/loggerd/encoder.h"
#include "selfdrive/loggerd/video_writer.h"
class RawLogger : public VideoEncoder {
public:
@ -31,14 +32,9 @@ private:
bool is_open = false;
int in_width_, in_height_;
std::string vid_path, lock_path;
const AVCodec *codec = NULL;
AVCodecContext *codec_ctx = NULL;
AVStream *stream = NULL;
AVFormatContext *format_ctx = NULL;
AVFrame *frame = NULL;
std::vector<uint8_t> downscale_buf;
VideoWriter *writer = NULL;
};

@ -0,0 +1,118 @@
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#include <cassert>
#include <cstdlib>
#include "selfdrive/loggerd/video_writer.h"
#include "selfdrive/common/swaglog.h"
#include "selfdrive/common/util.h"
VideoWriter::VideoWriter(const char *path, const char *filename, bool remuxing, int width, int height, int fps, bool h265, bool raw)
: remuxing(remuxing), raw(raw) {
vid_path = util::string_format("%s/%s", path, filename);
lock_path = util::string_format("%s/%s.lock", path, filename);
int lock_fd = HANDLE_EINTR(open(lock_path.c_str(), O_RDWR | O_CREAT, 0664));
assert(lock_fd >= 0);
close(lock_fd);
LOGD("encoder_open %s remuxing:%d", this->vid_path.c_str(), this->remuxing);
if (this->remuxing) {
avformat_alloc_output_context2(&this->ofmt_ctx, NULL, raw ? "matroska" : NULL, this->vid_path.c_str());
assert(this->ofmt_ctx);
// set codec correctly. needed?
av_register_all();
AVCodec *codec = NULL;
assert(!h265);
codec = avcodec_find_encoder(raw ? AV_CODEC_ID_FFVHUFF : AV_CODEC_ID_H264);
assert(codec);
this->codec_ctx = avcodec_alloc_context3(codec);
assert(this->codec_ctx);
this->codec_ctx->width = width;
this->codec_ctx->height = height;
this->codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
this->codec_ctx->time_base = (AVRational){ 1, fps };
if (raw) {
// since the codec is actually used, we open it
int err = avcodec_open2(this->codec_ctx, codec, NULL);
assert(err >= 0);
}
this->out_stream = avformat_new_stream(this->ofmt_ctx, raw ? codec : NULL);
assert(this->out_stream);
int err = avio_open(&this->ofmt_ctx->pb, this->vid_path.c_str(), AVIO_FLAG_WRITE);
assert(err >= 0);
this->wrote_codec_config = false;
} else {
this->of = util::safe_fopen(this->vid_path.c_str(), "wb");
assert(this->of);
}
}
void VideoWriter::write(uint8_t *data, int len, long long timestamp, bool codecconfig, bool keyframe) {
if (of && data) {
size_t written = util::safe_fwrite(data, 1, len, of);
if (written != len) {
LOGE("failed to write file.errno=%d", errno);
}
}
if (remuxing) {
if (codecconfig) {
if (data) {
codec_ctx->extradata = (uint8_t*)av_mallocz(len + AV_INPUT_BUFFER_PADDING_SIZE);
codec_ctx->extradata_size = len;
memcpy(codec_ctx->extradata, data, len);
}
int err = avcodec_parameters_from_context(out_stream->codecpar, codec_ctx);
assert(err >= 0);
err = avformat_write_header(ofmt_ctx, NULL);
assert(err >= 0);
} else {
// input timestamps are in microseconds
AVRational in_timebase = {1, 1000000};
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = data;
pkt.size = len;
enum AVRounding rnd = static_cast<enum AVRounding>(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt.pts = pkt.dts = av_rescale_q_rnd(timestamp, in_timebase, ofmt_ctx->streams[0]->time_base, rnd);
pkt.duration = av_rescale_q(50*1000, in_timebase, ofmt_ctx->streams[0]->time_base);
if (keyframe) {
pkt.flags |= AV_PKT_FLAG_KEY;
}
// TODO: can use av_write_frame for non raw?
int err = av_interleaved_write_frame(ofmt_ctx, &pkt);
if (err < 0) { LOGW("ts encoder write issue"); }
av_free_packet(&pkt);
}
}
}
VideoWriter::~VideoWriter() {
if (this->remuxing) {
if (this->raw) { avcodec_close(this->codec_ctx); }
int err = av_write_trailer(this->ofmt_ctx);
if (err != 0) LOGE("av_write_trailer failed %d", err);
avcodec_free_context(&this->codec_ctx);
err = avio_closep(&this->ofmt_ctx->pb);
if (err != 0) LOGE("avio_closep failed %d", err);
avformat_free_context(this->ofmt_ctx);
} else {
util::safe_fflush(this->of);
fclose(this->of);
this->of = nullptr;
}
unlink(this->lock_path.c_str());
}

@ -0,0 +1,25 @@
#pragma once
#include <string>
extern "C" {
#include <libavformat/avformat.h>
}
class VideoWriter {
public:
VideoWriter(const char *path, const char *filename, bool remuxing, int width, int height, int fps, bool h265, bool raw);
void write(uint8_t *data, int len, long long timestamp, bool codecconfig, bool keyframe);
~VideoWriter();
AVCodecContext *codec_ctx;
private:
std::string vid_path, lock_path;
FILE *of = nullptr;
AVFormatContext *ofmt_ctx;
AVStream *out_stream;
bool remuxing, raw;
bool wrote_codec_config;
};

@ -37,15 +37,21 @@ def writer(fn, addr, sock_name):
fifo_file.write(evta.data)
fifo_file.flush()
def decoder_nvidia(fn, vipc_server, vst):
def decoder_nvidia(fn, vipc_server, vst, yuv=False):
sys.path.append("/raid.dell2/PyNvCodec")
import PyNvCodec as nvc # pylint: disable=import-error
decoder = nvc.PyNvDecoder(fn, 0, {"probesize": "32"})
conv = nvc.PySurfaceConverter(W, H, nvc.PixelFormat.NV12, nvc.PixelFormat.BGR, 0)
cc1 = nvc.ColorspaceConversionContext(nvc.ColorSpace.BT_709, nvc.ColorRange.JPEG)
nvDwn = nvc.PySurfaceDownloader(W, H, nvc.PixelFormat.BGR, 0)
conv = nvc.PySurfaceConverter(W, H, nvc.PixelFormat.NV12, nvc.PixelFormat.BGR, 0)
nvDwn = nvc.PySurfaceDownloader(W, H, nvc.PixelFormat.BGR, 0)
img = np.ndarray((H,W,3), dtype=np.uint8)
if yuv:
conv_yuv = nvc.PySurfaceConverter(W, H, nvc.PixelFormat.NV12, nvc.PixelFormat.YUV420, 0)
nvDwn_yuv = nvc.PySurfaceDownloader(W, H, nvc.PixelFormat.YUV420, 0)
img_yuv = np.ndarray((H*W//2*3), dtype=np.uint8)
cnt = 0
while 1:
rawSurface = decoder.DecodeSingleSurface()
@ -54,15 +60,22 @@ def decoder_nvidia(fn, vipc_server, vst):
convSurface = conv.Execute(rawSurface, cc1)
nvDwn.DownloadSingleSurface(convSurface, img)
vipc_server.send(vst, img.flatten().data, cnt, 0, 0)
if yuv:
convSurface = conv_yuv.Execute(rawSurface, cc1)
nvDwn_yuv.DownloadSingleSurface(convSurface, img_yuv)
vipc_server.send(vst+3, img_yuv.flatten().data, cnt, 0, 0)
cnt += 1
def decoder_ffmpeg(fn, vipc_server, vst):
def decoder_ffmpeg(fn, vipc_server, vst, yuv=False):
import av # pylint: disable=import-error
container = av.open(fn, options={"probesize": "32"})
cnt = 0
for frame in container.decode(video=0):
img = frame.to_ndarray(format=av.video.format.VideoFormat('bgr24'))
vipc_server.send(vst, img.flatten().data, cnt, 0, 0)
if yuv:
img_yuv = frame.to_ndarray(format=av.video.format.VideoFormat('yuv420p'))
vipc_server.send(vst+3, img_yuv.flatten().data, cnt, 0, 0)
cnt += 1
import argparse
@ -70,6 +83,7 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Decode video streams and broacast on VisionIPC')
parser.add_argument("addr", help="Address of comma 3")
parser.add_argument('--nvidia', action='store_true', help='Use nvidia instead of ffmpeg')
parser.add_argument('--yuv', action='store_true', help='Also broadcast YUV')
parser.add_argument("--cams", default="0,1,2", help="Cameras to decode")
args = parser.parse_args()
@ -83,6 +97,8 @@ if __name__ == "__main__":
vipc_server = VisionIpcServer("camerad")
for vst in cams.values():
vipc_server.create_buffers(vst, 4, True, W, H)
if args.yuv:
vipc_server.create_buffers(vst+3, 4, False, W, H)
vipc_server.start_listener()
for k,v in cams.items():
@ -92,6 +108,6 @@ if __name__ == "__main__":
os.mkfifo(FIFO_NAME)
multiprocessing.Process(target=writer, args=(FIFO_NAME, sys.argv[1], k)).start()
if args.nvidia:
multiprocessing.Process(target=decoder_nvidia, args=(FIFO_NAME, vipc_server, v)).start()
multiprocessing.Process(target=decoder_nvidia, args=(FIFO_NAME, vipc_server, v, args.yuv)).start()
else:
multiprocessing.Process(target=decoder_ffmpeg, args=(FIFO_NAME, vipc_server, v)).start()
multiprocessing.Process(target=decoder_ffmpeg, args=(FIFO_NAME, vipc_server, v, args.yuv)).start()

Loading…
Cancel
Save