|
|
|
@ -57,14 +57,11 @@ class ModelState: |
|
|
|
|
self.prev_desire = np.zeros(ModelConstants.DESIRE_LEN, dtype=np.float32) |
|
|
|
|
self.full_features_20Hz = np.zeros((ModelConstants.FULL_HISTORY_BUFFER_LEN, ModelConstants.FEATURE_LEN), dtype=np.float32) |
|
|
|
|
self.desire_20Hz = np.zeros((ModelConstants.FULL_HISTORY_BUFFER_LEN + 1, ModelConstants.DESIRE_LEN), dtype=np.float32) |
|
|
|
|
self.prev_desired_curv_20hz = np.zeros((ModelConstants.FULL_HISTORY_BUFFER_LEN + 1, ModelConstants.PREV_DESIRED_CURV_LEN), dtype=np.float32) |
|
|
|
|
|
|
|
|
|
# img buffers are managed in openCL transform code |
|
|
|
|
self.inputs = { |
|
|
|
|
'desire': np.zeros(ModelConstants.DESIRE_LEN * (ModelConstants.HISTORY_BUFFER_LEN+1), dtype=np.float32), |
|
|
|
|
'traffic_convention': np.zeros(ModelConstants.TRAFFIC_CONVENTION_LEN, dtype=np.float32), |
|
|
|
|
'lateral_control_params': np.zeros(ModelConstants.LATERAL_CONTROL_PARAMS_LEN, dtype=np.float32), |
|
|
|
|
'prev_desired_curv': np.zeros(ModelConstants.PREV_DESIRED_CURV_LEN * (ModelConstants.HISTORY_BUFFER_LEN+1), dtype=np.float32), |
|
|
|
|
'features_buffer': np.zeros(ModelConstants.HISTORY_BUFFER_LEN * ModelConstants.FEATURE_LEN, dtype=np.float32), |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -100,7 +97,6 @@ class ModelState: |
|
|
|
|
self.inputs['desire'][:] = self.desire_20Hz.reshape((25,4,-1)).max(axis=1).flatten() |
|
|
|
|
|
|
|
|
|
self.inputs['traffic_convention'][:] = inputs['traffic_convention'] |
|
|
|
|
self.inputs['lateral_control_params'][:] = inputs['lateral_control_params'] |
|
|
|
|
|
|
|
|
|
self.model.setInputBuffer("input_imgs", self.frame.prepare(buf, transform.flatten(), self.model.getCLBuffer("input_imgs"))) |
|
|
|
|
self.model.setInputBuffer("big_input_imgs", self.wide_frame.prepare(wbuf, transform_wide.flatten(), self.model.getCLBuffer("big_input_imgs"))) |
|
|
|
@ -114,13 +110,8 @@ class ModelState: |
|
|
|
|
self.full_features_20Hz[:-1] = self.full_features_20Hz[1:] |
|
|
|
|
self.full_features_20Hz[-1] = outputs['hidden_state'][0, :] |
|
|
|
|
|
|
|
|
|
self.prev_desired_curv_20hz[:-1] = self.prev_desired_curv_20hz[1:] |
|
|
|
|
self.prev_desired_curv_20hz[-1] = outputs['desired_curvature'][0, :] |
|
|
|
|
|
|
|
|
|
idxs = np.arange(-4,-100,-4)[::-1] |
|
|
|
|
self.inputs['features_buffer'][:] = self.full_features_20Hz[idxs].flatten() |
|
|
|
|
# TODO model only uses last value now, once that changes we need to input strided action history buffer |
|
|
|
|
self.inputs['prev_desired_curv'][-ModelConstants.PREV_DESIRED_CURV_LEN:] = 0. * self.prev_desired_curv_20hz[-4, :] |
|
|
|
|
return outputs |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -231,7 +222,6 @@ def main(demo=False): |
|
|
|
|
is_rhd = sm["driverMonitoringState"].isRHD |
|
|
|
|
frame_id = sm["roadCameraState"].frameId |
|
|
|
|
v_ego = max(sm["carState"].vEgo, 0.) |
|
|
|
|
lateral_control_params = np.array([v_ego, steer_delay], dtype=np.float32) |
|
|
|
|
if sm.updated["liveCalibration"] and sm.seen['roadCameraState'] and sm.seen['deviceState']: |
|
|
|
|
device_from_calib_euler = np.array(sm["liveCalibration"].rpyCalib, dtype=np.float32) |
|
|
|
|
dc = DEVICE_CAMERAS[(str(sm['deviceState'].deviceType), str(sm['roadCameraState'].sensor))] |
|
|
|
@ -262,7 +252,6 @@ def main(demo=False): |
|
|
|
|
inputs:dict[str, np.ndarray] = { |
|
|
|
|
'desire': vec_desire, |
|
|
|
|
'traffic_convention': traffic_convention, |
|
|
|
|
'lateral_control_params': lateral_control_params, |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
mt1 = time.perf_counter() |
|
|
|
|