More conservative lead policy in e2e long mode (#25684)

* Add params for lead and danger

* fix long params

* E2e passes simple maneuver tests

* Make tests run with e2e long mode

* Slightly more error allowed in e2e mode

* FCW back and populate long source field

* Fix planner name

* FCW still doesnt work

* Slightly less aggressive

* Doesn't need to simulate from stop
old-commit-hash: 7899fb79c1
taco
HaraldSchafer 3 years ago committed by GitHub
parent 1c2c840a8a
commit 0b87a4ddb1
  1. 40
      selfdrive/controls/lib/longitudinal_mpc_lib/long_mpc.py
  2. 2
      selfdrive/controls/lib/longitudinal_planner.py
  3. 4
      selfdrive/controls/plannerd.py
  4. 20
      selfdrive/controls/tests/test_cruise_speed.py
  5. 23
      selfdrive/controls/tests/test_following_distance.py
  6. 4
      selfdrive/test/longitudinal_maneuvers/maneuver.py
  7. 21
      selfdrive/test/longitudinal_maneuvers/plant.py
  8. 11
      selfdrive/test/longitudinal_maneuvers/test_longitudinal.py

@ -24,7 +24,7 @@ SOURCES = ['lead0', 'lead1', 'cruise', 'e2e']
X_DIM = 3
U_DIM = 1
PARAM_DIM = 4
PARAM_DIM = 6
COST_E_DIM = 5
COST_DIM = COST_E_DIM + 1
CONSTR_DIM = 4
@ -37,6 +37,7 @@ J_EGO_COST = 5.0
A_CHANGE_COST = 200.
DANGER_ZONE_COST = 100.
CRASH_DISTANCE = .5
LEAD_DANGER_FACTOR = 0.75
LIMIT_COST = 1e6
ACADOS_SOLVER_TYPE = 'SQP_RTI'
@ -58,8 +59,8 @@ STOP_DISTANCE = 6.0
def get_stopped_equivalence_factor(v_lead):
return (v_lead**2) / (2 * COMFORT_BRAKE)
def get_safe_obstacle_distance(v_ego):
return (v_ego**2) / (2 * COMFORT_BRAKE) + T_FOLLOW * v_ego + STOP_DISTANCE
def get_safe_obstacle_distance(v_ego, t_follow=T_FOLLOW):
return (v_ego**2) / (2 * COMFORT_BRAKE) + t_follow * v_ego + STOP_DISTANCE
def desired_follow_distance(v_ego, v_lead):
return get_safe_obstacle_distance(v_ego) - get_stopped_equivalence_factor(v_lead)
@ -90,7 +91,9 @@ def gen_long_model():
a_max = SX.sym('a_max')
x_obstacle = SX.sym('x_obstacle')
prev_a = SX.sym('prev_a')
model.p = vertcat(a_min, a_max, x_obstacle, prev_a)
lead_t_follow = SX.sym('lead_t_follow')
lead_danger_factor = SX.sym('lead_danger_factor')
model.p = vertcat(a_min, a_max, x_obstacle, prev_a, lead_t_follow, lead_danger_factor)
# dynamics model
f_expl = vertcat(v_ego, a_ego, j_ego)
@ -124,11 +127,13 @@ def gen_long_ocp():
a_min, a_max = ocp.model.p[0], ocp.model.p[1]
x_obstacle = ocp.model.p[2]
prev_a = ocp.model.p[3]
lead_t_follow = ocp.model.p[4]
lead_danger_factor = ocp.model.p[5]
ocp.cost.yref = np.zeros((COST_DIM, ))
ocp.cost.yref_e = np.zeros((COST_E_DIM, ))
desired_dist_comfort = get_safe_obstacle_distance(v_ego)
desired_dist_comfort = get_safe_obstacle_distance(v_ego, lead_t_follow)
# The main cost in normal operation is how close you are to the "desired" distance
# from an obstacle at every timestep. This obstacle can be a lead car
@ -149,12 +154,12 @@ def gen_long_ocp():
constraints = vertcat(v_ego,
(a_ego - a_min),
(a_max - a_ego),
((x_obstacle - x_ego) - (3/4) * (desired_dist_comfort)) / (v_ego + 10.))
((x_obstacle - x_ego) - lead_danger_factor * (desired_dist_comfort)) / (v_ego + 10.))
ocp.model.con_h_expr = constraints
x0 = np.zeros(X_DIM)
ocp.constraints.x0 = x0
ocp.parameter_values = np.array([-1.2, 1.2, 0.0, 0.0])
ocp.parameter_values = np.array([-1.2, 1.2, 0.0, 0.0, T_FOLLOW, LEAD_DANGER_FACTOR])
# We put all constraint cost weights to 0 and only set them at runtime
cost_weights = np.zeros(CONSTR_DIM)
@ -249,7 +254,7 @@ class LongitudinalMpc:
constraint_cost_weights = [LIMIT_COST, LIMIT_COST, LIMIT_COST, DANGER_ZONE_COST]
elif self.mode == 'blended':
cost_weights = [0., 0.2, 0.25, 1.0, 0.0, 1.0]
constraint_cost_weights = [LIMIT_COST, LIMIT_COST, LIMIT_COST, 5.0]
constraint_cost_weights = [LIMIT_COST, LIMIT_COST, LIMIT_COST, 50.0]
elif self.mode == 'e2e':
cost_weights = [0., 0.2, 0.25, 1., 0.0, .1]
constraint_cost_weights = [LIMIT_COST, LIMIT_COST, LIMIT_COST, 0.0]
@ -317,6 +322,7 @@ class LongitudinalMpc:
if self.mode == 'acc':
self.params[:,0] = MIN_ACCEL if self.status else self.cruise_min_a
self.params[:,1] = self.cruise_max_a
self.params[:,5] = LEAD_DANGER_FACTOR
# Fake an obstacle for cruise, this ensures smooth acceleration to set speed
# when the leads are no factor.
@ -335,6 +341,7 @@ class LongitudinalMpc:
elif self.mode == 'blended':
self.params[:,0] = MIN_ACCEL
self.params[:,1] = MAX_ACCEL
self.params[:,5] = 1.0
x_obstacles = np.column_stack([lead_0_obstacle,
lead_1_obstacle])
@ -344,7 +351,8 @@ class LongitudinalMpc:
x_and_cruise = np.column_stack([x, cruise_target])
x = np.min(x_and_cruise, axis=1)
self.source = 'e2e'
self.source = 'e2e' if x_and_cruise[0,0] < x_and_cruise[0,1] else 'cruise'
else:
raise NotImplementedError(f'Planner mode {self.mode} not recognized in planner update')
@ -359,6 +367,7 @@ class LongitudinalMpc:
self.params[:,2] = np.min(x_obstacles, axis=1)
self.params[:,3] = np.copy(self.prev_a)
self.params[:,4] = T_FOLLOW
self.run()
if (np.any(lead_xv_0[:,0] - self.x_sol[:,0] < CRASH_DISTANCE) and
@ -367,10 +376,23 @@ class LongitudinalMpc:
else:
self.crash_cnt = 0
# Check if it got within lead comfort range
# TODO This should be done cleaner
if self.mode == 'blended':
if any((lead_0_obstacle - get_safe_obstacle_distance(self.x_sol[:,1], T_FOLLOW))- self.x_sol[:,0] < 0.0):
self.source = 'lead0'
if any((lead_1_obstacle - get_safe_obstacle_distance(self.x_sol[:,1], T_FOLLOW))- self.x_sol[:,0] < 0.0) and \
(lead_1_obstacle[0] - lead_0_obstacle[0]):
self.source = 'lead1'
def update_with_xva(self, x, v, a):
self.params[:,0] = -10.
self.params[:,1] = 10.
self.params[:,2] = 1e5
self.params[:,4] = T_FOLLOW
self.params[:,5] = LEAD_DANGER_FACTOR
# v, and a are in local frame, but x is wrt the x[0] position
# In >90degree turns, x goes to 0 (and may even be -ve)

@ -45,7 +45,7 @@ def limit_accel_in_turns(v_ego, angle_steers, a_target, CP):
return [a_target[0], min(a_target[1], a_x_allowed)]
class Planner:
class LongitudinalPlanner:
def __init__(self, CP, init_v=0.0, init_a=0.0):
self.CP = CP
params = Params()

@ -3,7 +3,7 @@ from cereal import car
from common.params import Params
from common.realtime import Priority, config_realtime_process
from system.swaglog import cloudlog
from selfdrive.controls.lib.longitudinal_planner import Planner
from selfdrive.controls.lib.longitudinal_planner import LongitudinalPlanner
from selfdrive.controls.lib.lateral_planner import LateralPlanner
import cereal.messaging as messaging
@ -16,7 +16,7 @@ def plannerd_thread(sm=None, pm=None):
CP = car.CarParams.from_bytes(params.get("CarParams", block=True))
cloudlog.info("plannerd got CarParams: %s", CP.carName)
longitudinal_planner = Planner(CP)
longitudinal_planner = LongitudinalPlanner(CP)
lateral_planner = LateralPlanner(CP)
if sm is None:

@ -1,13 +1,16 @@
#!/usr/bin/env python3
import unittest
import numpy as np
from common.params import Params
from selfdrive.test.longitudinal_maneuvers.maneuver import Maneuver
def run_cruise_simulation(cruise, t_end=100.):
def run_cruise_simulation(cruise, t_end=20.):
man = Maneuver(
'',
duration=t_end,
initial_speed=float(0.),
initial_speed=max(cruise - 1., 0.0),
lead_relevancy=True,
initial_distance_lead=100,
cruise_values=[cruise],
@ -21,12 +24,15 @@ def run_cruise_simulation(cruise, t_end=100.):
class TestCruiseSpeed(unittest.TestCase):
def test_cruise_speed(self):
for speed in np.arange(5, 40, 5):
print(f'Testing {speed} m/s')
cruise_speed = float(speed)
params = Params()
for e2e in [False, True]:
params.put_bool("EndToEndLong", e2e)
for speed in np.arange(5, 40, 5):
print(f'Testing {speed} m/s')
cruise_speed = float(speed)
simulation_steady_state = run_cruise_simulation(cruise_speed)
self.assertAlmostEqual(simulation_steady_state, cruise_speed, delta=.01, msg=f'Did not reach {speed} m/s')
simulation_steady_state = run_cruise_simulation(cruise_speed)
self.assertAlmostEqual(simulation_steady_state, cruise_speed, delta=.01, msg=f'Did not reach {speed} m/s')
if __name__ == "__main__":

@ -1,11 +1,13 @@
#!/usr/bin/env python3
import unittest
import numpy as np
from common.params import Params
from selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import desired_follow_distance
from selfdrive.test.longitudinal_maneuvers.maneuver import Maneuver
def run_following_distance_simulation(v_lead, t_end=100.0):
def run_following_distance_simulation(v_lead, t_end=100.0, e2e=False):
man = Maneuver(
'',
duration=t_end,
@ -14,6 +16,7 @@ def run_following_distance_simulation(v_lead, t_end=100.0):
initial_distance_lead=100,
speed_lead_values=[v_lead],
breakpoints=[0.],
e2e=e2e
)
valid, output = man.evaluate()
assert valid
@ -22,14 +25,16 @@ def run_following_distance_simulation(v_lead, t_end=100.0):
class TestFollowingDistance(unittest.TestCase):
def test_following_distance(self):
for speed in np.arange(0, 40, 5):
print(f'Testing {speed} m/s')
v_lead = float(speed)
simulation_steady_state = run_following_distance_simulation(v_lead)
correct_steady_state = desired_follow_distance(v_lead, v_lead)
self.assertAlmostEqual(simulation_steady_state, correct_steady_state, delta=(correct_steady_state*.1 + .5))
params = Params()
for e2e in [False, True]:
params.put_bool("EndToEndLong", e2e)
for speed in np.arange(0, 40, 5):
print(f'Testing {speed} m/s')
v_lead = float(speed)
simulation_steady_state = run_following_distance_simulation(v_lead)
correct_steady_state = desired_follow_distance(v_lead, v_lead)
err_ratio = 0.2 if e2e else 0.1
self.assertAlmostEqual(simulation_steady_state, correct_steady_state, delta=(err_ratio * correct_steady_state + .5))
if __name__ == "__main__":

@ -27,7 +27,7 @@ class Maneuver():
speed=self.speed,
distance_lead=self.distance_lead,
only_lead2=self.only_lead2,
only_radar=self.only_radar
only_radar=self.only_radar,
)
valid = True
@ -54,7 +54,7 @@ class Maneuver():
valid = False
if self.ensure_start and log['v_rel'] > 0 and log['speeds'][-1] <= 0.1:
print('Planner not starting!')
print('LongitudinalPlanner not starting!')
valid = False
print("maneuver end", valid)

@ -6,7 +6,8 @@ from cereal import log
import cereal.messaging as messaging
from common.realtime import Ratekeeper, DT_MDL
from selfdrive.controls.lib.longcontrol import LongCtrlState
from selfdrive.controls.lib.longitudinal_planner import Planner
from selfdrive.modeld.constants import T_IDXS
from selfdrive.controls.lib.longitudinal_planner import LongitudinalPlanner
class Plant():
@ -43,7 +44,8 @@ class Plant():
from selfdrive.car.honda.values import CAR
from selfdrive.car.honda.interface import CarInterface
self.planner = Planner(CarInterface.get_params(CAR.CIVIC), init_v=self.speed)
self.planner = LongitudinalPlanner(CarInterface.get_params(CAR.CIVIC), init_v=self.speed)
def current_time(self):
return float(self.rk.frame) / self.rate
@ -88,6 +90,21 @@ class Plant():
radar.radarState.leadOne = lead
radar.radarState.leadTwo = lead
# Simulate model predicting slightly faster speed
# this is to ensure lead policy is effective when model
# does not predict slowdown in e2e mode
position = log.ModelDataV2.XYZTData.new_message()
position.x = [float(x) for x in (self.speed + 0.5) * np.array(T_IDXS)]
model.modelV2.position = position
velocity = log.ModelDataV2.XYZTData.new_message()
velocity.x = [float(x) for x in (self.speed + 0.5) * np.ones_like(T_IDXS)]
model.modelV2.velocity = velocity
acceleration = log.ModelDataV2.XYZTData.new_message()
acceleration.x = [float(x) for x in np.zeros_like(T_IDXS)]
model.modelV2.acceleration = acceleration
control.controlsState.longControlState = LongCtrlState.pid
control.controlsState.vCruise = float(v_cruise * 3.6)
car_state.carState.vEgo = float(self.speed)

@ -140,16 +140,23 @@ class LongitudinalControl(unittest.TestCase):
def run_maneuver_worker(k):
def run(self):
params = Params()
man = maneuvers[k]
print(man.title)
params.put_bool("EndToEndLong", True)
print(man.title, ' in e2e mode')
valid, _ = man.evaluate()
self.assertTrue(valid, msg=man.title)
params.put_bool("EndToEndLong", False)
print(man.title, ' in acc mode')
valid, _ = man.evaluate()
self.assertTrue(valid, msg=man.title)
return run
for k in range(len(maneuvers)):
setattr(LongitudinalControl, f"test_longitudinal_maneuvers_{k+1}",
run_maneuver_worker(k))
if __name__ == "__main__":
unittest.main(failfast=True)

Loading…
Cancel
Save