onnxruntime: initialize CUDA provider before predictions (#24330)

* initialize CUDA runtime before predictions

* dmond pauses modeld execution...

* done in other PR
pull/24336/head
Shane Smiskol 3 years ago committed by GitHub
parent 59134c05d6
commit 60b7114024
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 5
      selfdrive/modeld/runners/onnx_runner.py
  2. 5
      selfdrive/test/process_replay/regen.py

@ -25,6 +25,11 @@ def write(d):
def run_loop(m):
ishapes = [[1]+ii.shape[1:] for ii in m.get_inputs()]
keys = [x.name for x in m.get_inputs()]
# run once to initialize CUDA provider
if "CUDAExecutionProvider" in m.get_providers():
m.run(None, dict(zip(keys, [np.zeros(shp, dtype=np.float32) for shp in ishapes])))
print("ready to run onnx model", keys, ishapes, file=sys.stderr)
while 1:
inputs = []

@ -224,6 +224,11 @@ def regen_segment(lr, frs=None, outdir=FAKEDATA):
}
try:
# TODO: make first run of onnxruntime CUDA provider fast
managed_processes["modeld"].start()
managed_processes["dmonitoringmodeld"].start()
time.sleep(5)
# start procs up
ignore = list(fake_daemons.keys()) + ['ui', 'manage_athenad', 'uploader']
ensure_running(managed_processes.values(), started=True, not_run=ignore)

Loading…
Cancel
Save