pull/34892/head
Trey Moen 6 months ago
parent 196eb50d23
commit 9395bd5399
  1. 3
      selfdrive/ui/SConscript
  2. 79
      selfdrive/ui/main.cc
  3. 4
      selfdrive/ui/qt/onroad/annotated_camera.cc
  4. 185
      selfdrive/ui/qt/recorder/recorder.cc
  5. 31
      selfdrive/ui/qt/recorder/recorder.h
  6. 2
      selfdrive/ui/qt/widgets/cameraview.cc

@ -3,7 +3,7 @@ import json
Import('qt_env', 'arch', 'common', 'messaging', 'visionipc', 'transformations')
base_libs = [common, messaging, visionipc, transformations,
'm', 'OpenCL', 'ssl', 'crypto', 'pthread'] + qt_env["LIBS"]
'm', 'OpenCL', 'ssl', 'crypto', 'pthread', 'avformat', 'avcodec', 'avutil', 'swscale'] + qt_env["LIBS"]
if arch == 'larch64':
base_libs.append('EGL')
@ -27,6 +27,7 @@ Export('widgets')
qt_libs = [widgets, qt_util] + base_libs
qt_src = ["main.cc", "ui.cc", "qt/sidebar.cc", "qt/body.cc",
"qt/recorder/recorder.cc",
"qt/window.cc", "qt/home.cc", "qt/offroad/settings.cc",
"qt/offroad/software_settings.cc", "qt/offroad/developer_panel.cc", "qt/offroad/onboarding.cc",
"qt/offroad/driverview.cc", "qt/offroad/experimental_mode.cc", "qt/offroad/firehose.cc",

@ -2,11 +2,25 @@
#include <QApplication>
#include <QTranslator>
#include <QImage>
#include <QTimer>
#include <QPainter>
#include <QBuffer>
#include <QDebug>
#include <QPixmap>
#include <QScreen>
#include <QEventLoop>
#include <QOffscreenSurface>
#include <QOpenGLContext>
#include <QCommandLineParser>
#include <QCommandLineOption>
#include <QScopedPointer>
#include "system/hardware/hw.h"
#include "selfdrive/ui/qt/qt_window.h"
#include "selfdrive/ui/qt/util.h"
#include "selfdrive/ui/qt/window.h"
#include "selfdrive/ui/qt/recorder/recorder.h"
int main(int argc, char *argv[]) {
setpriority(PRIO_PROCESS, 0, -20);
@ -14,17 +28,80 @@ int main(int argc, char *argv[]) {
qInstallMessageHandler(swagLogMessageHandler);
initApp(argc, argv);
QApplication a(argc, argv);
QCommandLineParser parser;
parser.setApplicationDescription("OpenPilot UI with screen recording");
parser.addHelpOption();
parser.addVersionOption();
QCommandLineOption outputOption(QStringList() << "o" << "output",
"Output file path for the recorded video", "file");
parser.addOption(outputOption);
parser.process(a);
if (!parser.isSet(outputOption)) {
qCritical() << "Error: Output file path is required. Use --output or -o to specify it.";
return -1;
}
QString outputFile = parser.value(outputOption);
QTranslator translator;
QString translation_file = QString::fromStdString(Params().get("LanguageSetting"));
if (!translator.load(QString(":/%1").arg(translation_file)) && translation_file.length()) {
qCritical() << "Failed to load translation file:" << translation_file;
}
QApplication a(argc, argv);
a.installTranslator(&translator);
MainWindow w;
w.setAttribute(Qt::WA_DontShowOnScreen);
w.setAttribute(Qt::WA_Mapped);
w.setAttribute(Qt::WA_NoSystemBackground);
w.resize(DEVICE_SCREEN_SIZE);
setMainWindow(&w);
a.installEventFilter(&w);
QSurfaceFormat format;
format.setRenderableType(QSurfaceFormat::OpenGLES);
format.setVersion(3, 0);
format.setProfile(QSurfaceFormat::CoreProfile);
QSurfaceFormat::setDefaultFormat(format);
QOffscreenSurface surface;
surface.create();
QOpenGLContext context;
context.create();
context.makeCurrent(&surface);
QScopedPointer<FFmpegEncoder> encoder(new FFmpegEncoder(outputFile, DEVICE_SCREEN_SIZE.width(), DEVICE_SCREEN_SIZE.height(), 30));
encoder->startRecording();
QScopedPointer<QTimer> captureTimer(new QTimer);
QObject::connect(captureTimer.data(), &QTimer::timeout, [&]() {
context.makeCurrent(&surface);
QCoreApplication::processEvents();
QImage image = w.grab().toImage();
if (image.isNull() || image.size() != DEVICE_SCREEN_SIZE) {
qWarning() << "Invalid image captured";
context.doneCurrent();
return;
}
image = image.convertToFormat(QImage::Format_ARGB32_Premultiplied);
if (!encoder->writeFrame(image)) {
qWarning() << "Failed to write frame";
}
context.doneCurrent();
});
captureTimer->start(1000/30);
return a.exec();
}

@ -2,6 +2,7 @@
#include "selfdrive/ui/qt/onroad/annotated_camera.h"
#include <QPainter>
#include <QOpenGLPaintDevice>
#include <algorithm>
#include <cmath>
@ -125,7 +126,8 @@ void AnnotatedCameraWidget::paintGL() {
CameraWidget::paintGL();
}
QPainter painter(this);
QOpenGLPaintDevice fboPaintDevice(width(), height());
QPainter painter(&fboPaintDevice);
painter.setRenderHint(QPainter::Antialiasing);
painter.setPen(Qt::NoPen);

@ -0,0 +1,185 @@
#include "selfdrive/ui/qt/recorder/recorder.h"
#include <QDebug>
FFmpegEncoder::FFmpegEncoder(const QString& outputFile, int width, int height, int fps) {
// Enable FFmpeg logging to stderr
av_log_set_level(AV_LOG_ERROR);
av_log_set_callback([](void* ptr, int level, const char* fmt, va_list vargs) {
if (level <= av_log_get_level()) {
vfprintf(stderr, fmt, vargs);
}
});
// Allocate output context
avformat_alloc_output_context2(&format_ctx, nullptr, nullptr, outputFile.toStdString().c_str());
if (!format_ctx) {
return;
}
// Find the H264 encoder
const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec) {
return;
}
// Create video stream
stream = avformat_new_stream(format_ctx, codec);
if (!stream) {
return;
}
// Create codec context
codec_ctx = avcodec_alloc_context3(codec);
if (!codec_ctx) {
return;
}
// Set codec parameters
codec_ctx->codec_id = AV_CODEC_ID_H264;
codec_ctx->width = width;
codec_ctx->height = height;
codec_ctx->time_base = (AVRational){1, fps};
codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
codec_ctx->gop_size = 24;
codec_ctx->max_b_frames = 0;
codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
// Set encoding parameters using AVDictionary
AVDictionary* opts = nullptr;
av_dict_set(&opts, "preset", "ultrafast", 0);
av_dict_set(&opts, "profile", "baseline", 0);
av_dict_set(&opts, "crf", "28", 0);
// Open codec with options
if (avcodec_open2(codec_ctx, codec, &opts) < 0) {
av_dict_free(&opts);
return;
}
// Free options dictionary
av_dict_free(&opts);
// Copy codec parameters to stream
if (avcodec_parameters_from_context(stream->codecpar, codec_ctx) < 0) {
return;
}
// Set stream time base
stream->time_base = codec_ctx->time_base;
// Open output file
if (!(format_ctx->oformat->flags & AVFMT_NOFILE)) {
if (avio_open(&format_ctx->pb, outputFile.toStdString().c_str(), AVIO_FLAG_WRITE) < 0) {
return;
}
}
// Allocate frame
frame = av_frame_alloc();
if (!frame) {
return;
}
frame->format = codec_ctx->pix_fmt;
frame->width = width;
frame->height = height;
// Allocate frame buffer
int ret = av_image_alloc(frame->data, frame->linesize,
width, height, codec_ctx->pix_fmt, 1);
if (ret < 0) {
return;
}
// Create scaling context
sws_ctx = sws_getContext(width, height, AV_PIX_FMT_BGRA,
width, height, codec_ctx->pix_fmt,
SWS_BILINEAR, nullptr, nullptr, nullptr);
// Allocate packet
packet = av_packet_alloc();
if (!packet) {
return;
}
initialized = true;
}
FFmpegEncoder::~FFmpegEncoder() {
if (initialized) {
// Write trailer
av_write_trailer(format_ctx);
// Close output file
if (!(format_ctx->oformat->flags & AVFMT_NOFILE)) {
avio_closep(&format_ctx->pb);
}
// Free resources
avcodec_free_context(&codec_ctx);
avformat_free_context(format_ctx);
av_frame_free(&frame);
av_packet_free(&packet);
sws_freeContext(sws_ctx);
}
}
bool FFmpegEncoder::startRecording() {
if (!initialized) return false;
// Write header
if (avformat_write_header(format_ctx, nullptr) < 0) {
return false;
}
return true;
}
bool FFmpegEncoder::writeFrame(const QImage& image) {
if (!initialized) return false;
// Convert BGRA to YUV420P
uint8_t* inData[4] = { (uint8_t*)image.bits(), nullptr, nullptr, nullptr };
int inLinesize[4] = { image.bytesPerLine(), 0, 0, 0 };
sws_scale(sws_ctx, inData, inLinesize, 0, image.height(),
frame->data, frame->linesize);
// Set frame timestamp and duration
frame->pts = frame_count;
frame->duration = 1; // Each frame has duration of 1 in the time base units
// Send frame to encoder
int ret = avcodec_send_frame(codec_ctx, frame);
if (ret < 0) {
return false;
}
// Read encoded packets
while (ret >= 0) {
ret = avcodec_receive_packet(codec_ctx, packet);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
} else if (ret < 0) {
return false;
}
// Set packet timestamp and duration
packet->pts = av_rescale_q(frame_count, codec_ctx->time_base, stream->time_base);
packet->dts = packet->pts;
packet->duration = av_rescale_q(1, codec_ctx->time_base, stream->time_base);
// Write packet to output file
packet->stream_index = stream->index;
ret = av_interleaved_write_frame(format_ctx, packet);
if (ret < 0) {
return false;
}
av_packet_unref(packet);
}
frame_count++;
return true;
}

@ -0,0 +1,31 @@
#pragma once
#include <QImage>
#include <QString>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/imgutils.h>
#include <libavutil/dict.h>
#include <libswscale/swscale.h>
}
class FFmpegEncoder {
public:
FFmpegEncoder(const QString& outputFile, int width, int height, int fps);
~FFmpegEncoder();
bool writeFrame(const QImage& image);
bool startRecording();
private:
bool initialized = false;
AVFormatContext* format_ctx = nullptr;
AVCodecContext* codec_ctx = nullptr;
AVStream* stream = nullptr;
AVFrame* frame = nullptr;
AVPacket* packet = nullptr;
SwsContext* sws_ctx = nullptr;
int64_t frame_count = 0;
};

@ -8,6 +8,8 @@
#include <cmath>
#include <QApplication>
#include <QPainter>
#include <QOpenGLPaintDevice>
namespace {

Loading…
Cancel
Save