From 36aa81cdc34d3702db8256c4f4d02602025e36f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Sch=C3=A4fer?= Date: Mon, 5 Jun 2023 22:17:41 -0700 Subject: [PATCH] Driving Personality setting (also changes follow distance) (#24742) * Skeleton * Adjustable follow parameter * fix rebase * long planner * typo * Add ui toggle * Fix icon * Improve text * Better toggle position * Im a UX engineer now * add param reader * CHange jerk to have same crash test performance * Try reading param * Unused comment * translate ui text * std stoi * Parametrized buttons * Empty strings are ints * Move to generic * Update translations * Fix translation diffs * Release notes * update refs * tweaks * Misc fixes * No param is standard old-commit-hash: 205c900742a18ca4662f4535a1a4008624c05995 --- RELEASES.md | 5 ++ common/params.cc | 1 + .../lib/longitudinal_mpc_lib/long_mpc.py | 49 ++++++++---- .../controls/lib/longitudinal_planner.py | 21 +++++- selfdrive/manager/manager.py | 2 + selfdrive/test/process_replay/ref_commit | 2 +- selfdrive/ui/qt/offroad/settings.cc | 12 +++ selfdrive/ui/qt/widgets/controls.h | 74 +++++++++++++++++++ selfdrive/ui/translations/main_de.ts | 20 +++++ selfdrive/ui/translations/main_ja.ts | 20 +++++ selfdrive/ui/translations/main_ko.ts | 20 +++++ selfdrive/ui/translations/main_pt-BR.ts | 20 +++++ selfdrive/ui/translations/main_zh-CHS.ts | 20 +++++ selfdrive/ui/translations/main_zh-CHT.ts | 20 +++++ 14 files changed, 270 insertions(+), 16 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 547add8a80..482e959d9b 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,6 +1,11 @@ Version 0.9.3 (2023-06-XX) ======================== * New driving model +* New driving personality setting + * Three settings: aggressive, standard, and relaxed + * Standard is recommended and the default + * In aggressive mode lead follow distance is shorter and quicker gas/brake response + * In relaxed mode lead follow distance is longer Version 0.9.2 (2023-05-22) ======================== diff --git a/common/params.cc b/common/params.cc index 530ffe3051..843c57c564 100644 --- a/common/params.cc +++ b/common/params.cc @@ -104,6 +104,7 @@ std::unordered_map keys = { {"DisablePowerDown", PERSISTENT}, {"ExperimentalMode", PERSISTENT}, {"ExperimentalModeConfirmed", PERSISTENT}, + {"LongitudinalPersonality", PERSISTENT}, {"ExperimentalLongitudinalEnabled", PERSISTENT}, {"DisableUpdates", PERSISTENT}, {"DisengageOnAccelerator", PERSISTENT}, diff --git a/selfdrive/controls/lib/longitudinal_mpc_lib/long_mpc.py b/selfdrive/controls/lib/longitudinal_mpc_lib/long_mpc.py index 660002691a..23fb1b7790 100644 --- a/selfdrive/controls/lib/longitudinal_mpc_lib/long_mpc.py +++ b/selfdrive/controls/lib/longitudinal_mpc_lib/long_mpc.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 import os import numpy as np - +from cereal import log from common.realtime import sec_since_boot from common.numpy_fast import clip from system.swaglog import cloudlog @@ -54,18 +54,38 @@ FCW_IDXS = T_IDXS < 5.0 T_DIFFS = np.diff(T_IDXS, prepend=[0.]) MIN_ACCEL = -3.5 MAX_ACCEL = 2.0 -T_FOLLOW = 1.45 COMFORT_BRAKE = 2.5 STOP_DISTANCE = 6.0 +def get_jerk_factor(personality=log.LongitudinalPersonality.standard): + if personality==log.LongitudinalPersonality.relaxed: + return 1.0 + elif personality==log.LongitudinalPersonality.standard: + return 1.0 + elif personality==log.LongitudinalPersonality.aggressive: + return 0.5 + else: + raise NotImplementedError("Longitudinal personality not supported") + + +def get_T_FOLLOW(personality=log.LongitudinalPersonality.standard): + if personality==log.LongitudinalPersonality.relaxed: + return 1.75 + elif personality==log.LongitudinalPersonality.standard: + return 1.45 + elif personality==log.LongitudinalPersonality.aggressive: + return 1.25 + else: + raise NotImplementedError("Longitudinal personality not supported") + def get_stopped_equivalence_factor(v_lead): return (v_lead**2) / (2 * COMFORT_BRAKE) -def get_safe_obstacle_distance(v_ego, t_follow=T_FOLLOW): +def get_safe_obstacle_distance(v_ego, t_follow): return (v_ego**2) / (2 * COMFORT_BRAKE) + t_follow * v_ego + STOP_DISTANCE -def desired_follow_distance(v_ego, v_lead): - return get_safe_obstacle_distance(v_ego) - get_stopped_equivalence_factor(v_lead) +def desired_follow_distance(v_ego, v_lead, t_follow=get_T_FOLLOW()): + return get_safe_obstacle_distance(v_ego, t_follow) - get_stopped_equivalence_factor(v_lead) def gen_long_model(): @@ -161,7 +181,8 @@ def gen_long_ocp(): x0 = np.zeros(X_DIM) ocp.constraints.x0 = x0 - ocp.parameter_values = np.array([-1.2, 1.2, 0.0, 0.0, T_FOLLOW, LEAD_DANGER_FACTOR]) + ocp.parameter_values = np.array([-1.2, 1.2, 0.0, 0.0, get_T_FOLLOW(), LEAD_DANGER_FACTOR]) + # We put all constraint cost weights to 0 and only set them at runtime cost_weights = np.zeros(CONSTR_DIM) @@ -249,10 +270,11 @@ class LongitudinalMpc: for i in range(N): self.solver.cost_set(i, 'Zl', Zl) - def set_weights(self, prev_accel_constraint=True): + def set_weights(self, prev_accel_constraint=True, personality=log.LongitudinalPersonality.standard): + jerk_factor = get_jerk_factor(personality) if self.mode == 'acc': a_change_cost = A_CHANGE_COST if prev_accel_constraint else 0 - cost_weights = [X_EGO_OBSTACLE_COST, X_EGO_COST, V_EGO_COST, A_EGO_COST, a_change_cost, J_EGO_COST] + cost_weights = [X_EGO_OBSTACLE_COST, X_EGO_COST, V_EGO_COST, A_EGO_COST, jerk_factor * a_change_cost, jerk_factor * J_EGO_COST] constraint_cost_weights = [LIMIT_COST, LIMIT_COST, LIMIT_COST, DANGER_ZONE_COST] elif self.mode == 'blended': a_change_cost = 40.0 if prev_accel_constraint else 0 @@ -307,7 +329,8 @@ class LongitudinalMpc: self.cruise_min_a = min_a self.max_a = max_a - def update(self, radarstate, v_cruise, x, v, a, j): + def update(self, radarstate, v_cruise, x, v, a, j, personality=log.LongitudinalPersonality.standard): + t_follow = get_T_FOLLOW(personality) v_ego = self.x0[1] self.status = radarstate.leadOne.status or radarstate.leadTwo.status @@ -334,7 +357,7 @@ class LongitudinalMpc: v_cruise_clipped = np.clip(v_cruise * np.ones(N+1), v_lower, v_upper) - cruise_obstacle = np.cumsum(T_DIFFS * v_cruise_clipped) + get_safe_obstacle_distance(v_cruise_clipped) + cruise_obstacle = np.cumsum(T_DIFFS * v_cruise_clipped) + get_safe_obstacle_distance(v_cruise_clipped, get_T_FOLLOW()) x_obstacles = np.column_stack([lead_0_obstacle, lead_1_obstacle, cruise_obstacle]) self.source = SOURCES[np.argmin(x_obstacles[0])] @@ -368,7 +391,7 @@ class LongitudinalMpc: self.params[:,2] = np.min(x_obstacles, axis=1) self.params[:,3] = np.copy(self.prev_a) - self.params[:,4] = T_FOLLOW + self.params[:,4] = t_follow self.run() if (np.any(lead_xv_0[FCW_IDXS,0] - self.x_sol[FCW_IDXS,0] < CRASH_DISTANCE) and @@ -380,9 +403,9 @@ class LongitudinalMpc: # Check if it got within lead comfort range # TODO This should be done cleaner if self.mode == 'blended': - if any((lead_0_obstacle - get_safe_obstacle_distance(self.x_sol[:,1], T_FOLLOW))- self.x_sol[:,0] < 0.0): + if any((lead_0_obstacle - get_safe_obstacle_distance(self.x_sol[:,1], t_follow))- self.x_sol[:,0] < 0.0): self.source = 'lead0' - if any((lead_1_obstacle - get_safe_obstacle_distance(self.x_sol[:,1], T_FOLLOW))- self.x_sol[:,0] < 0.0) and \ + if any((lead_1_obstacle - get_safe_obstacle_distance(self.x_sol[:,1], t_follow))- self.x_sol[:,0] < 0.0) and \ (lead_1_obstacle[0] - lead_0_obstacle[0]): self.source = 'lead1' diff --git a/selfdrive/controls/lib/longitudinal_planner.py b/selfdrive/controls/lib/longitudinal_planner.py index 3089499687..8ad10818fa 100755 --- a/selfdrive/controls/lib/longitudinal_planner.py +++ b/selfdrive/controls/lib/longitudinal_planner.py @@ -2,6 +2,8 @@ import math import numpy as np from common.numpy_fast import clip, interp +from common.params import Params +from cereal import log import cereal.messaging as messaging from common.conversions import Conversions as CV @@ -57,6 +59,17 @@ class LongitudinalPlanner: self.a_desired_trajectory = np.zeros(CONTROL_N) self.j_desired_trajectory = np.zeros(CONTROL_N) self.solverExecutionTime = 0.0 + self.params = Params() + self.param_read_counter = 0 + self.read_param() + self.personality = log.LongitudinalPersonality.standard + + def read_param(self): + param_value = self.params.get('LongitudinalPersonality') + if param_value is not None: + self.personality = int(param_value) + else: + self.personality = log.LongitudinalPersonality.standard @staticmethod def parse_model(model_msg, model_error): @@ -75,6 +88,9 @@ class LongitudinalPlanner: return x, v, a, j def update(self, sm): + if self.param_read_counter % 50 == 0: + self.read_param() + self.param_read_counter += 1 self.mpc.mode = 'blended' if sm['controlsState'].experimentalMode else 'acc' v_ego = sm['carState'].vEgo @@ -114,11 +130,11 @@ class LongitudinalPlanner: accel_limits_turns[0] = min(accel_limits_turns[0], self.a_desired + 0.05) accel_limits_turns[1] = max(accel_limits_turns[1], self.a_desired - 0.05) - self.mpc.set_weights(prev_accel_constraint) + self.mpc.set_weights(prev_accel_constraint, personality=self.personality) self.mpc.set_accel_limits(accel_limits_turns[0], accel_limits_turns[1]) self.mpc.set_cur_state(self.v_desired_filter.x, self.a_desired) x, v, a, j = self.parse_model(sm['modelV2'], self.v_model_error) - self.mpc.update(sm['radarState'], v_cruise, x, v, a, j) + self.mpc.update(sm['radarState'], v_cruise, x, v, a, j, personality=self.personality) self.v_desired_trajectory_full = np.interp(T_IDXS, T_IDXS_MPC, self.mpc.v_solution) self.a_desired_trajectory_full = np.interp(T_IDXS, T_IDXS_MPC, self.mpc.a_solution) @@ -154,5 +170,6 @@ class LongitudinalPlanner: longitudinalPlan.fcw = self.fcw longitudinalPlan.solverExecutionTime = self.mpc.solve_time + longitudinalPlan.personality = self.personality pm.send('longitudinalPlan', plan_send) diff --git a/selfdrive/manager/manager.py b/selfdrive/manager/manager.py index c6b135935f..e9a1b2cb5b 100755 --- a/selfdrive/manager/manager.py +++ b/selfdrive/manager/manager.py @@ -7,6 +7,7 @@ import sys import traceback from typing import List, Tuple, Union +from cereal import log import cereal.messaging as messaging import selfdrive.sentry as sentry from common.basedir import BASEDIR @@ -44,6 +45,7 @@ def manager_init() -> None: ("HasAcceptedTerms", "0"), ("LanguageSetting", "main_en"), ("OpenpilotEnabledToggle", "1"), + ("LongitudinalPersonality", str(log.LongitudinalPersonality.standard)), ] if not PC: default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8'))) diff --git a/selfdrive/test/process_replay/ref_commit b/selfdrive/test/process_replay/ref_commit index c794a0434a..2332054b69 100644 --- a/selfdrive/test/process_replay/ref_commit +++ b/selfdrive/test/process_replay/ref_commit @@ -1 +1 @@ -dfe1e4a5fd7a464c91c0d2e70592b4b1470fda8d \ No newline at end of file +3e684aef4483b8d311d71bab3bb543d7bad26563 diff --git a/selfdrive/ui/qt/offroad/settings.cc b/selfdrive/ui/qt/offroad/settings.cc index 0c8132343c..c68e94b323 100644 --- a/selfdrive/ui/qt/offroad/settings.cc +++ b/selfdrive/ui/qt/offroad/settings.cc @@ -89,6 +89,13 @@ TogglesPanel::TogglesPanel(SettingsWindow *parent) : ListWidget(parent) { #endif }; + + std::vector longi_button_texts{tr("Aggressive"), tr("Standard"), tr("Relaxed")}; + std::vector longi_button_widths{300, 250, 225}; + ButtonParamControl* long_personality_setting = new ButtonParamControl("LongitudinalPersonality", tr("Driving Personality"), + tr("Standard is recommended. In aggressive mode, openpilot will follow lead cars closer and be more aggressive with the gas and brake."), + "../assets/offroad/icon_speed_limit.png", + longi_button_texts, longi_button_widths); for (auto &[param, title, desc, icon] : toggle_defs) { auto toggle = new ParamControl(param, title, desc, icon, this); @@ -97,6 +104,11 @@ TogglesPanel::TogglesPanel(SettingsWindow *parent) : ListWidget(parent) { addItem(toggle); toggles[param.toStdString()] = toggle; + + // insert longitudinal personality after NDOG toggle + if (param == "DisengageOnAccelerator") { + addItem(long_personality_setting); + } } // Toggles with confirmation dialogs diff --git a/selfdrive/ui/qt/widgets/controls.h b/selfdrive/ui/qt/widgets/controls.h index 770b9b92dd..a1b50c1bd4 100644 --- a/selfdrive/ui/qt/widgets/controls.h +++ b/selfdrive/ui/qt/widgets/controls.h @@ -196,6 +196,80 @@ private: bool confirm = false; bool store_confirm = false; }; + +class ButtonParamControl : public AbstractControl { + Q_OBJECT +public: + ButtonParamControl(const QString ¶m, const QString &title, const QString &desc, const QString &icon, + std::vector button_texts, std::vector button_widths) : AbstractControl(title, desc, icon) { + select_style = (R"( + padding: 0; + border-radius: 50px; + font-size: 45px; + font-weight: 500; + color: #E4E4E4; + background-color: #33Ab4C; + )"); + unselect_style = (R"( + padding: 0; + border-radius: 50px; + font-size: 45px; + font-weight: 350; + color: #E4E4E4; + background-color: #393939; + )"); + key = param.toStdString(); + for (int i = 0; i < button_texts.size(); i++) { + QPushButton *button = new QPushButton(); + button->setText(button_texts[i]); + hlayout->addWidget(button); + button->setFixedSize(button_widths[i], 100); + button->setStyleSheet(unselect_style); + buttons.push_back(button); + QObject::connect(button, &QPushButton::clicked, [=]() { + params.put(key, (QString::number(i)).toStdString()); + refresh(); + }); + } + refresh(); + } + + void read_param() { + auto value = params.get(key); + if (!value.empty()) { + param_value = std::stoi(value); + } + }; + void set_param(int new_value) { + QString values = QString::number(new_value); + params.put(key, values.toStdString()); + refresh(); + }; + + void refresh() { + read_param(); + for (int i = 0; i < buttons.size(); i++) { + buttons[i]->setStyleSheet(unselect_style); + if (param_value == i) { + buttons[i]->setStyleSheet(select_style); + } + } + }; + void showEvent(QShowEvent *event) override { + refresh(); + }; + +private: + std::string key; + std::vector buttons; + QString unselect_style; + QString select_style; + Params params; + int param_value = 0; +}; + + + class ListWidget : public QWidget { Q_OBJECT diff --git a/selfdrive/ui/translations/main_de.ts b/selfdrive/ui/translations/main_de.ts index 7fda980e92..24b3d6f299 100644 --- a/selfdrive/ui/translations/main_de.ts +++ b/selfdrive/ui/translations/main_de.ts @@ -1056,6 +1056,26 @@ This may take up to a minute. On this car, openpilot defaults to the car's built-in ACC instead of openpilot's longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha. + + Aggressive + + + + Standard + + + + Relaxed + + + + Driving Personality + + + + Standard is recommended. In aggressive mode, openpilot will follow lead cars closer and be more aggressive with the gas and brake. + + Updater diff --git a/selfdrive/ui/translations/main_ja.ts b/selfdrive/ui/translations/main_ja.ts index 10da629cf9..b6b039eedd 100644 --- a/selfdrive/ui/translations/main_ja.ts +++ b/selfdrive/ui/translations/main_ja.ts @@ -1050,6 +1050,26 @@ This may take up to a minute. On this car, openpilot defaults to the car's built-in ACC instead of openpilot's longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha. + + Aggressive + + + + Standard + + + + Relaxed + + + + Driving Personality + + + + Standard is recommended. In aggressive mode, openpilot will follow lead cars closer and be more aggressive with the gas and brake. + + Updater diff --git a/selfdrive/ui/translations/main_ko.ts b/selfdrive/ui/translations/main_ko.ts index ad9a4e05d9..26e20a6b15 100644 --- a/selfdrive/ui/translations/main_ko.ts +++ b/selfdrive/ui/translations/main_ko.ts @@ -1051,6 +1051,26 @@ This may take up to a minute. On this car, openpilot defaults to the car's built-in ACC instead of openpilot's longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha. 이 차량은 openpilot 롱컨트롤 대신 차량의 내장 ACC로 기본 설정됩니다. openpilot 롱컨트롤으로 전환하려면 이 기능을 활성화하세요. openpilot 롱컨트롤 알파를 활성화하는경우 실험적 모드 활성화를 권장합니다. + + Aggressive + + + + Standard + + + + Relaxed + + + + Driving Personality + + + + Standard is recommended. In aggressive mode, openpilot will follow lead cars closer and be more aggressive with the gas and brake. + + Updater diff --git a/selfdrive/ui/translations/main_pt-BR.ts b/selfdrive/ui/translations/main_pt-BR.ts index 5aa4245fda..794b0c025e 100644 --- a/selfdrive/ui/translations/main_pt-BR.ts +++ b/selfdrive/ui/translations/main_pt-BR.ts @@ -1055,6 +1055,26 @@ Isso pode levar até um minuto. On this car, openpilot defaults to the car's built-in ACC instead of openpilot's longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha. Neste carro, o openpilot tem como padrão o ACC embutido do carro em vez do controle longitudinal do openpilot. Habilite isso para alternar para o controle longitudinal openpilot. Recomenda-se ativar o modo Experimental ao ativar o alfa de controle longitudinal openpilot. + + Aggressive + + + + Standard + + + + Relaxed + + + + Driving Personality + + + + Standard is recommended. In aggressive mode, openpilot will follow lead cars closer and be more aggressive with the gas and brake. + + Updater diff --git a/selfdrive/ui/translations/main_zh-CHS.ts b/selfdrive/ui/translations/main_zh-CHS.ts index e7382075fb..65ff1a4632 100644 --- a/selfdrive/ui/translations/main_zh-CHS.ts +++ b/selfdrive/ui/translations/main_zh-CHS.ts @@ -1048,6 +1048,26 @@ This may take up to a minute. On this car, openpilot defaults to the car's built-in ACC instead of openpilot's longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha. + + Aggressive + + + + Standard + + + + Relaxed + + + + Driving Personality + + + + Standard is recommended. In aggressive mode, openpilot will follow lead cars closer and be more aggressive with the gas and brake. + + Updater diff --git a/selfdrive/ui/translations/main_zh-CHT.ts b/selfdrive/ui/translations/main_zh-CHT.ts index d112d86b08..de40c0f9f8 100644 --- a/selfdrive/ui/translations/main_zh-CHT.ts +++ b/selfdrive/ui/translations/main_zh-CHT.ts @@ -1050,6 +1050,26 @@ This may take up to a minute. On this car, openpilot defaults to the car's built-in ACC instead of openpilot's longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha. + + Aggressive + + + + Standard + + + + Relaxed + + + + Driving Personality + + + + Standard is recommended. In aggressive mode, openpilot will follow lead cars closer and be more aggressive with the gas and brake. + + Updater