Driving Personality setting (also changes follow distance) (#24742)

* Skeleton

* Adjustable follow parameter

* fix rebase

* long planner

* typo

* Add ui toggle

* Fix icon

* Improve text

* Better toggle position

* Im a UX engineer now

* add param reader

* CHange jerk to have same crash test performance

* Try reading param

* Unused comment

* translate ui text

* std stoi

* Parametrized buttons

* Empty strings are ints

* Move to generic

* Update translations

* Fix translation diffs

* Release notes

* update refs

* tweaks

* Misc fixes

* No param is standard
old-commit-hash: 205c900742
beeps
Harald Schäfer 2 years ago committed by GitHub
parent 6d32cb8dc4
commit 36aa81cdc3
  1. 5
      RELEASES.md
  2. 1
      common/params.cc
  3. 49
      selfdrive/controls/lib/longitudinal_mpc_lib/long_mpc.py
  4. 21
      selfdrive/controls/lib/longitudinal_planner.py
  5. 2
      selfdrive/manager/manager.py
  6. 2
      selfdrive/test/process_replay/ref_commit
  7. 12
      selfdrive/ui/qt/offroad/settings.cc
  8. 74
      selfdrive/ui/qt/widgets/controls.h
  9. 20
      selfdrive/ui/translations/main_de.ts
  10. 20
      selfdrive/ui/translations/main_ja.ts
  11. 20
      selfdrive/ui/translations/main_ko.ts
  12. 20
      selfdrive/ui/translations/main_pt-BR.ts
  13. 20
      selfdrive/ui/translations/main_zh-CHS.ts
  14. 20
      selfdrive/ui/translations/main_zh-CHT.ts

@ -1,6 +1,11 @@
Version 0.9.3 (2023-06-XX) Version 0.9.3 (2023-06-XX)
======================== ========================
* New driving model * New driving model
* New driving personality setting
* Three settings: aggressive, standard, and relaxed
* Standard is recommended and the default
* In aggressive mode lead follow distance is shorter and quicker gas/brake response
* In relaxed mode lead follow distance is longer
Version 0.9.2 (2023-05-22) Version 0.9.2 (2023-05-22)
======================== ========================

@ -104,6 +104,7 @@ std::unordered_map<std::string, uint32_t> keys = {
{"DisablePowerDown", PERSISTENT}, {"DisablePowerDown", PERSISTENT},
{"ExperimentalMode", PERSISTENT}, {"ExperimentalMode", PERSISTENT},
{"ExperimentalModeConfirmed", PERSISTENT}, {"ExperimentalModeConfirmed", PERSISTENT},
{"LongitudinalPersonality", PERSISTENT},
{"ExperimentalLongitudinalEnabled", PERSISTENT}, {"ExperimentalLongitudinalEnabled", PERSISTENT},
{"DisableUpdates", PERSISTENT}, {"DisableUpdates", PERSISTENT},
{"DisengageOnAccelerator", PERSISTENT}, {"DisengageOnAccelerator", PERSISTENT},

@ -1,7 +1,7 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import os import os
import numpy as np import numpy as np
from cereal import log
from common.realtime import sec_since_boot from common.realtime import sec_since_boot
from common.numpy_fast import clip from common.numpy_fast import clip
from system.swaglog import cloudlog from system.swaglog import cloudlog
@ -54,18 +54,38 @@ FCW_IDXS = T_IDXS < 5.0
T_DIFFS = np.diff(T_IDXS, prepend=[0.]) T_DIFFS = np.diff(T_IDXS, prepend=[0.])
MIN_ACCEL = -3.5 MIN_ACCEL = -3.5
MAX_ACCEL = 2.0 MAX_ACCEL = 2.0
T_FOLLOW = 1.45
COMFORT_BRAKE = 2.5 COMFORT_BRAKE = 2.5
STOP_DISTANCE = 6.0 STOP_DISTANCE = 6.0
def get_jerk_factor(personality=log.LongitudinalPersonality.standard):
if personality==log.LongitudinalPersonality.relaxed:
return 1.0
elif personality==log.LongitudinalPersonality.standard:
return 1.0
elif personality==log.LongitudinalPersonality.aggressive:
return 0.5
else:
raise NotImplementedError("Longitudinal personality not supported")
def get_T_FOLLOW(personality=log.LongitudinalPersonality.standard):
if personality==log.LongitudinalPersonality.relaxed:
return 1.75
elif personality==log.LongitudinalPersonality.standard:
return 1.45
elif personality==log.LongitudinalPersonality.aggressive:
return 1.25
else:
raise NotImplementedError("Longitudinal personality not supported")
def get_stopped_equivalence_factor(v_lead): def get_stopped_equivalence_factor(v_lead):
return (v_lead**2) / (2 * COMFORT_BRAKE) return (v_lead**2) / (2 * COMFORT_BRAKE)
def get_safe_obstacle_distance(v_ego, t_follow=T_FOLLOW): def get_safe_obstacle_distance(v_ego, t_follow):
return (v_ego**2) / (2 * COMFORT_BRAKE) + t_follow * v_ego + STOP_DISTANCE return (v_ego**2) / (2 * COMFORT_BRAKE) + t_follow * v_ego + STOP_DISTANCE
def desired_follow_distance(v_ego, v_lead): def desired_follow_distance(v_ego, v_lead, t_follow=get_T_FOLLOW()):
return get_safe_obstacle_distance(v_ego) - get_stopped_equivalence_factor(v_lead) return get_safe_obstacle_distance(v_ego, t_follow) - get_stopped_equivalence_factor(v_lead)
def gen_long_model(): def gen_long_model():
@ -161,7 +181,8 @@ def gen_long_ocp():
x0 = np.zeros(X_DIM) x0 = np.zeros(X_DIM)
ocp.constraints.x0 = x0 ocp.constraints.x0 = x0
ocp.parameter_values = np.array([-1.2, 1.2, 0.0, 0.0, T_FOLLOW, LEAD_DANGER_FACTOR]) ocp.parameter_values = np.array([-1.2, 1.2, 0.0, 0.0, get_T_FOLLOW(), LEAD_DANGER_FACTOR])
# We put all constraint cost weights to 0 and only set them at runtime # We put all constraint cost weights to 0 and only set them at runtime
cost_weights = np.zeros(CONSTR_DIM) cost_weights = np.zeros(CONSTR_DIM)
@ -249,10 +270,11 @@ class LongitudinalMpc:
for i in range(N): for i in range(N):
self.solver.cost_set(i, 'Zl', Zl) self.solver.cost_set(i, 'Zl', Zl)
def set_weights(self, prev_accel_constraint=True): def set_weights(self, prev_accel_constraint=True, personality=log.LongitudinalPersonality.standard):
jerk_factor = get_jerk_factor(personality)
if self.mode == 'acc': if self.mode == 'acc':
a_change_cost = A_CHANGE_COST if prev_accel_constraint else 0 a_change_cost = A_CHANGE_COST if prev_accel_constraint else 0
cost_weights = [X_EGO_OBSTACLE_COST, X_EGO_COST, V_EGO_COST, A_EGO_COST, a_change_cost, J_EGO_COST] cost_weights = [X_EGO_OBSTACLE_COST, X_EGO_COST, V_EGO_COST, A_EGO_COST, jerk_factor * a_change_cost, jerk_factor * J_EGO_COST]
constraint_cost_weights = [LIMIT_COST, LIMIT_COST, LIMIT_COST, DANGER_ZONE_COST] constraint_cost_weights = [LIMIT_COST, LIMIT_COST, LIMIT_COST, DANGER_ZONE_COST]
elif self.mode == 'blended': elif self.mode == 'blended':
a_change_cost = 40.0 if prev_accel_constraint else 0 a_change_cost = 40.0 if prev_accel_constraint else 0
@ -307,7 +329,8 @@ class LongitudinalMpc:
self.cruise_min_a = min_a self.cruise_min_a = min_a
self.max_a = max_a self.max_a = max_a
def update(self, radarstate, v_cruise, x, v, a, j): def update(self, radarstate, v_cruise, x, v, a, j, personality=log.LongitudinalPersonality.standard):
t_follow = get_T_FOLLOW(personality)
v_ego = self.x0[1] v_ego = self.x0[1]
self.status = radarstate.leadOne.status or radarstate.leadTwo.status self.status = radarstate.leadOne.status or radarstate.leadTwo.status
@ -334,7 +357,7 @@ class LongitudinalMpc:
v_cruise_clipped = np.clip(v_cruise * np.ones(N+1), v_cruise_clipped = np.clip(v_cruise * np.ones(N+1),
v_lower, v_lower,
v_upper) v_upper)
cruise_obstacle = np.cumsum(T_DIFFS * v_cruise_clipped) + get_safe_obstacle_distance(v_cruise_clipped) cruise_obstacle = np.cumsum(T_DIFFS * v_cruise_clipped) + get_safe_obstacle_distance(v_cruise_clipped, get_T_FOLLOW())
x_obstacles = np.column_stack([lead_0_obstacle, lead_1_obstacle, cruise_obstacle]) x_obstacles = np.column_stack([lead_0_obstacle, lead_1_obstacle, cruise_obstacle])
self.source = SOURCES[np.argmin(x_obstacles[0])] self.source = SOURCES[np.argmin(x_obstacles[0])]
@ -368,7 +391,7 @@ class LongitudinalMpc:
self.params[:,2] = np.min(x_obstacles, axis=1) self.params[:,2] = np.min(x_obstacles, axis=1)
self.params[:,3] = np.copy(self.prev_a) self.params[:,3] = np.copy(self.prev_a)
self.params[:,4] = T_FOLLOW self.params[:,4] = t_follow
self.run() self.run()
if (np.any(lead_xv_0[FCW_IDXS,0] - self.x_sol[FCW_IDXS,0] < CRASH_DISTANCE) and if (np.any(lead_xv_0[FCW_IDXS,0] - self.x_sol[FCW_IDXS,0] < CRASH_DISTANCE) and
@ -380,9 +403,9 @@ class LongitudinalMpc:
# Check if it got within lead comfort range # Check if it got within lead comfort range
# TODO This should be done cleaner # TODO This should be done cleaner
if self.mode == 'blended': if self.mode == 'blended':
if any((lead_0_obstacle - get_safe_obstacle_distance(self.x_sol[:,1], T_FOLLOW))- self.x_sol[:,0] < 0.0): if any((lead_0_obstacle - get_safe_obstacle_distance(self.x_sol[:,1], t_follow))- self.x_sol[:,0] < 0.0):
self.source = 'lead0' self.source = 'lead0'
if any((lead_1_obstacle - get_safe_obstacle_distance(self.x_sol[:,1], T_FOLLOW))- self.x_sol[:,0] < 0.0) and \ if any((lead_1_obstacle - get_safe_obstacle_distance(self.x_sol[:,1], t_follow))- self.x_sol[:,0] < 0.0) and \
(lead_1_obstacle[0] - lead_0_obstacle[0]): (lead_1_obstacle[0] - lead_0_obstacle[0]):
self.source = 'lead1' self.source = 'lead1'

@ -2,6 +2,8 @@
import math import math
import numpy as np import numpy as np
from common.numpy_fast import clip, interp from common.numpy_fast import clip, interp
from common.params import Params
from cereal import log
import cereal.messaging as messaging import cereal.messaging as messaging
from common.conversions import Conversions as CV from common.conversions import Conversions as CV
@ -57,6 +59,17 @@ class LongitudinalPlanner:
self.a_desired_trajectory = np.zeros(CONTROL_N) self.a_desired_trajectory = np.zeros(CONTROL_N)
self.j_desired_trajectory = np.zeros(CONTROL_N) self.j_desired_trajectory = np.zeros(CONTROL_N)
self.solverExecutionTime = 0.0 self.solverExecutionTime = 0.0
self.params = Params()
self.param_read_counter = 0
self.read_param()
self.personality = log.LongitudinalPersonality.standard
def read_param(self):
param_value = self.params.get('LongitudinalPersonality')
if param_value is not None:
self.personality = int(param_value)
else:
self.personality = log.LongitudinalPersonality.standard
@staticmethod @staticmethod
def parse_model(model_msg, model_error): def parse_model(model_msg, model_error):
@ -75,6 +88,9 @@ class LongitudinalPlanner:
return x, v, a, j return x, v, a, j
def update(self, sm): def update(self, sm):
if self.param_read_counter % 50 == 0:
self.read_param()
self.param_read_counter += 1
self.mpc.mode = 'blended' if sm['controlsState'].experimentalMode else 'acc' self.mpc.mode = 'blended' if sm['controlsState'].experimentalMode else 'acc'
v_ego = sm['carState'].vEgo v_ego = sm['carState'].vEgo
@ -114,11 +130,11 @@ class LongitudinalPlanner:
accel_limits_turns[0] = min(accel_limits_turns[0], self.a_desired + 0.05) accel_limits_turns[0] = min(accel_limits_turns[0], self.a_desired + 0.05)
accel_limits_turns[1] = max(accel_limits_turns[1], self.a_desired - 0.05) accel_limits_turns[1] = max(accel_limits_turns[1], self.a_desired - 0.05)
self.mpc.set_weights(prev_accel_constraint) self.mpc.set_weights(prev_accel_constraint, personality=self.personality)
self.mpc.set_accel_limits(accel_limits_turns[0], accel_limits_turns[1]) self.mpc.set_accel_limits(accel_limits_turns[0], accel_limits_turns[1])
self.mpc.set_cur_state(self.v_desired_filter.x, self.a_desired) self.mpc.set_cur_state(self.v_desired_filter.x, self.a_desired)
x, v, a, j = self.parse_model(sm['modelV2'], self.v_model_error) x, v, a, j = self.parse_model(sm['modelV2'], self.v_model_error)
self.mpc.update(sm['radarState'], v_cruise, x, v, a, j) self.mpc.update(sm['radarState'], v_cruise, x, v, a, j, personality=self.personality)
self.v_desired_trajectory_full = np.interp(T_IDXS, T_IDXS_MPC, self.mpc.v_solution) self.v_desired_trajectory_full = np.interp(T_IDXS, T_IDXS_MPC, self.mpc.v_solution)
self.a_desired_trajectory_full = np.interp(T_IDXS, T_IDXS_MPC, self.mpc.a_solution) self.a_desired_trajectory_full = np.interp(T_IDXS, T_IDXS_MPC, self.mpc.a_solution)
@ -154,5 +170,6 @@ class LongitudinalPlanner:
longitudinalPlan.fcw = self.fcw longitudinalPlan.fcw = self.fcw
longitudinalPlan.solverExecutionTime = self.mpc.solve_time longitudinalPlan.solverExecutionTime = self.mpc.solve_time
longitudinalPlan.personality = self.personality
pm.send('longitudinalPlan', plan_send) pm.send('longitudinalPlan', plan_send)

@ -7,6 +7,7 @@ import sys
import traceback import traceback
from typing import List, Tuple, Union from typing import List, Tuple, Union
from cereal import log
import cereal.messaging as messaging import cereal.messaging as messaging
import selfdrive.sentry as sentry import selfdrive.sentry as sentry
from common.basedir import BASEDIR from common.basedir import BASEDIR
@ -44,6 +45,7 @@ def manager_init() -> None:
("HasAcceptedTerms", "0"), ("HasAcceptedTerms", "0"),
("LanguageSetting", "main_en"), ("LanguageSetting", "main_en"),
("OpenpilotEnabledToggle", "1"), ("OpenpilotEnabledToggle", "1"),
("LongitudinalPersonality", str(log.LongitudinalPersonality.standard)),
] ]
if not PC: if not PC:
default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8'))) default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')))

@ -1 +1 @@
dfe1e4a5fd7a464c91c0d2e70592b4b1470fda8d 3e684aef4483b8d311d71bab3bb543d7bad26563

@ -89,6 +89,13 @@ TogglesPanel::TogglesPanel(SettingsWindow *parent) : ListWidget(parent) {
#endif #endif
}; };
std::vector<QString> longi_button_texts{tr("Aggressive"), tr("Standard"), tr("Relaxed")};
std::vector<int> longi_button_widths{300, 250, 225};
ButtonParamControl* long_personality_setting = new ButtonParamControl("LongitudinalPersonality", tr("Driving Personality"),
tr("Standard is recommended. In aggressive mode, openpilot will follow lead cars closer and be more aggressive with the gas and brake."),
"../assets/offroad/icon_speed_limit.png",
longi_button_texts, longi_button_widths);
for (auto &[param, title, desc, icon] : toggle_defs) { for (auto &[param, title, desc, icon] : toggle_defs) {
auto toggle = new ParamControl(param, title, desc, icon, this); auto toggle = new ParamControl(param, title, desc, icon, this);
@ -97,6 +104,11 @@ TogglesPanel::TogglesPanel(SettingsWindow *parent) : ListWidget(parent) {
addItem(toggle); addItem(toggle);
toggles[param.toStdString()] = toggle; toggles[param.toStdString()] = toggle;
// insert longitudinal personality after NDOG toggle
if (param == "DisengageOnAccelerator") {
addItem(long_personality_setting);
}
} }
// Toggles with confirmation dialogs // Toggles with confirmation dialogs

@ -197,6 +197,80 @@ private:
bool store_confirm = false; bool store_confirm = false;
}; };
class ButtonParamControl : public AbstractControl {
Q_OBJECT
public:
ButtonParamControl(const QString &param, const QString &title, const QString &desc, const QString &icon,
std::vector<QString> button_texts, std::vector<int> button_widths) : AbstractControl(title, desc, icon) {
select_style = (R"(
padding: 0;
border-radius: 50px;
font-size: 45px;
font-weight: 500;
color: #E4E4E4;
background-color: #33Ab4C;
)");
unselect_style = (R"(
padding: 0;
border-radius: 50px;
font-size: 45px;
font-weight: 350;
color: #E4E4E4;
background-color: #393939;
)");
key = param.toStdString();
for (int i = 0; i < button_texts.size(); i++) {
QPushButton *button = new QPushButton();
button->setText(button_texts[i]);
hlayout->addWidget(button);
button->setFixedSize(button_widths[i], 100);
button->setStyleSheet(unselect_style);
buttons.push_back(button);
QObject::connect(button, &QPushButton::clicked, [=]() {
params.put(key, (QString::number(i)).toStdString());
refresh();
});
}
refresh();
}
void read_param() {
auto value = params.get(key);
if (!value.empty()) {
param_value = std::stoi(value);
}
};
void set_param(int new_value) {
QString values = QString::number(new_value);
params.put(key, values.toStdString());
refresh();
};
void refresh() {
read_param();
for (int i = 0; i < buttons.size(); i++) {
buttons[i]->setStyleSheet(unselect_style);
if (param_value == i) {
buttons[i]->setStyleSheet(select_style);
}
}
};
void showEvent(QShowEvent *event) override {
refresh();
};
private:
std::string key;
std::vector<QPushButton*> buttons;
QString unselect_style;
QString select_style;
Params params;
int param_value = 0;
};
class ListWidget : public QWidget { class ListWidget : public QWidget {
Q_OBJECT Q_OBJECT
public: public:

@ -1056,6 +1056,26 @@ This may take up to a minute.</source>
<source>On this car, openpilot defaults to the car&apos;s built-in ACC instead of openpilot&apos;s longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha.</source> <source>On this car, openpilot defaults to the car&apos;s built-in ACC instead of openpilot&apos;s longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha.</source>
<translation type="unfinished"></translation> <translation type="unfinished"></translation>
</message> </message>
<message>
<source>Aggressive</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Standard</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Relaxed</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Driving Personality</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Standard is recommended. In aggressive mode, openpilot will follow lead cars closer and be more aggressive with the gas and brake.</source>
<translation type="unfinished"></translation>
</message>
</context> </context>
<context> <context>
<name>Updater</name> <name>Updater</name>

@ -1050,6 +1050,26 @@ This may take up to a minute.</source>
<source>On this car, openpilot defaults to the car&apos;s built-in ACC instead of openpilot&apos;s longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha.</source> <source>On this car, openpilot defaults to the car&apos;s built-in ACC instead of openpilot&apos;s longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha.</source>
<translation type="unfinished"></translation> <translation type="unfinished"></translation>
</message> </message>
<message>
<source>Aggressive</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Standard</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Relaxed</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Driving Personality</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Standard is recommended. In aggressive mode, openpilot will follow lead cars closer and be more aggressive with the gas and brake.</source>
<translation type="unfinished"></translation>
</message>
</context> </context>
<context> <context>
<name>Updater</name> <name>Updater</name>

@ -1051,6 +1051,26 @@ This may take up to a minute.</source>
<source>On this car, openpilot defaults to the car&apos;s built-in ACC instead of openpilot&apos;s longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha.</source> <source>On this car, openpilot defaults to the car&apos;s built-in ACC instead of openpilot&apos;s longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha.</source>
<translation> openpilot ACC로 . openpilot . openpilot .</translation> <translation> openpilot ACC로 . openpilot . openpilot .</translation>
</message> </message>
<message>
<source>Aggressive</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Standard</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Relaxed</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Driving Personality</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Standard is recommended. In aggressive mode, openpilot will follow lead cars closer and be more aggressive with the gas and brake.</source>
<translation type="unfinished"></translation>
</message>
</context> </context>
<context> <context>
<name>Updater</name> <name>Updater</name>

@ -1055,6 +1055,26 @@ Isso pode levar até um minuto.</translation>
<source>On this car, openpilot defaults to the car&apos;s built-in ACC instead of openpilot&apos;s longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha.</source> <source>On this car, openpilot defaults to the car&apos;s built-in ACC instead of openpilot&apos;s longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha.</source>
<translation>Neste carro, o openpilot tem como padrão o ACC embutido do carro em vez do controle longitudinal do openpilot. Habilite isso para alternar para o controle longitudinal openpilot. Recomenda-se ativar o modo Experimental ao ativar o alfa de controle longitudinal openpilot.</translation> <translation>Neste carro, o openpilot tem como padrão o ACC embutido do carro em vez do controle longitudinal do openpilot. Habilite isso para alternar para o controle longitudinal openpilot. Recomenda-se ativar o modo Experimental ao ativar o alfa de controle longitudinal openpilot.</translation>
</message> </message>
<message>
<source>Aggressive</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Standard</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Relaxed</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Driving Personality</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Standard is recommended. In aggressive mode, openpilot will follow lead cars closer and be more aggressive with the gas and brake.</source>
<translation type="unfinished"></translation>
</message>
</context> </context>
<context> <context>
<name>Updater</name> <name>Updater</name>

@ -1048,6 +1048,26 @@ This may take up to a minute.</source>
<source>On this car, openpilot defaults to the car&apos;s built-in ACC instead of openpilot&apos;s longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha.</source> <source>On this car, openpilot defaults to the car&apos;s built-in ACC instead of openpilot&apos;s longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha.</source>
<translation type="unfinished"></translation> <translation type="unfinished"></translation>
</message> </message>
<message>
<source>Aggressive</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Standard</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Relaxed</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Driving Personality</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Standard is recommended. In aggressive mode, openpilot will follow lead cars closer and be more aggressive with the gas and brake.</source>
<translation type="unfinished"></translation>
</message>
</context> </context>
<context> <context>
<name>Updater</name> <name>Updater</name>

@ -1050,6 +1050,26 @@ This may take up to a minute.</source>
<source>On this car, openpilot defaults to the car&apos;s built-in ACC instead of openpilot&apos;s longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha.</source> <source>On this car, openpilot defaults to the car&apos;s built-in ACC instead of openpilot&apos;s longitudinal control. Enable this to switch to openpilot longitudinal control. Enabling Experimental mode is recommended when enabling openpilot longitudinal control alpha.</source>
<translation type="unfinished"></translation> <translation type="unfinished"></translation>
</message> </message>
<message>
<source>Aggressive</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Standard</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Relaxed</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Driving Personality</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Standard is recommended. In aggressive mode, openpilot will follow lead cars closer and be more aggressive with the gas and brake.</source>
<translation type="unfinished"></translation>
</message>
</context> </context>
<context> <context>
<name>Updater</name> <name>Updater</name>

Loading…
Cancel
Save