Revert Tinygrad (#34243)
* Revert "dmonitoringmodeld: use cl transform (#34235)" This reverts commitpull/34245/head684b0b9d4d
. * Revert "load model before calling convert_fp16_to_fp32" This reverts commit31606a7d15
. * Revert "bump tinygrad" This reverts commit44f58ff758
. * Revert "Tinygrad runner (#34171)" This reverts commit7b5a4fbb03
. * Allow init buffer * typo
parent
4c27878f67
commit
f6885dcbec
39 changed files with 1547 additions and 368 deletions
@ -1,4 +1,10 @@ |
|||||||
#!/usr/bin/env bash |
#!/usr/bin/env bash |
||||||
|
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null && pwd)" |
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null && pwd)" |
||||||
|
cd "$DIR/../../" |
||||||
|
|
||||||
|
if [ -f "$DIR/libthneed.so" ]; then |
||||||
|
export LD_PRELOAD="$DIR/libthneed.so" |
||||||
|
fi |
||||||
|
|
||||||
exec "$DIR/dmonitoringmodeld.py" "$@" |
exec "$DIR/dmonitoringmodeld.py" "$@" |
||||||
|
@ -1,61 +1,58 @@ |
|||||||
#include "selfdrive/modeld/models/commonmodel.h" |
#include "selfdrive/modeld/models/commonmodel.h" |
||||||
|
|
||||||
|
#include <cassert> |
||||||
#include <cmath> |
#include <cmath> |
||||||
#include <cstring> |
#include <cstring> |
||||||
|
|
||||||
#include "common/clutil.h" |
#include "common/clutil.h" |
||||||
|
|
||||||
DrivingModelFrame::DrivingModelFrame(cl_device_id device_id, cl_context context) : ModelFrame(device_id, context) { |
ModelFrame::ModelFrame(cl_device_id device_id, cl_context context) { |
||||||
input_frames = std::make_unique<uint8_t[]>(buf_size); |
input_frames = std::make_unique<uint8_t[]>(buf_size); |
||||||
input_frames_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, buf_size, NULL, &err)); |
|
||||||
|
q = CL_CHECK_ERR(clCreateCommandQueue(context, device_id, 0, &err)); |
||||||
|
y_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, MODEL_WIDTH * MODEL_HEIGHT, NULL, &err)); |
||||||
|
u_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, (MODEL_WIDTH / 2) * (MODEL_HEIGHT / 2), NULL, &err)); |
||||||
|
v_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, (MODEL_WIDTH / 2) * (MODEL_HEIGHT / 2), NULL, &err)); |
||||||
img_buffer_20hz_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, 5*frame_size_bytes, NULL, &err)); |
img_buffer_20hz_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, 5*frame_size_bytes, NULL, &err)); |
||||||
region.origin = 4 * frame_size_bytes; |
region.origin = 4 * frame_size_bytes; |
||||||
region.size = frame_size_bytes; |
region.size = frame_size_bytes; |
||||||
last_img_cl = CL_CHECK_ERR(clCreateSubBuffer(img_buffer_20hz_cl, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err)); |
last_img_cl = CL_CHECK_ERR(clCreateSubBuffer(img_buffer_20hz_cl, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err)); |
||||||
|
|
||||||
|
transform_init(&transform, context, device_id); |
||||||
loadyuv_init(&loadyuv, context, device_id, MODEL_WIDTH, MODEL_HEIGHT); |
loadyuv_init(&loadyuv, context, device_id, MODEL_WIDTH, MODEL_HEIGHT); |
||||||
init_transform(device_id, context, MODEL_WIDTH, MODEL_HEIGHT); |
|
||||||
} |
} |
||||||
|
|
||||||
cl_mem* DrivingModelFrame::prepare(cl_mem yuv_cl, int frame_width, int frame_height, int frame_stride, int frame_uv_offset, const mat3& projection) { |
uint8_t* ModelFrame::prepare(cl_mem yuv_cl, int frame_width, int frame_height, int frame_stride, int frame_uv_offset, const mat3 &projection, cl_mem *output) { |
||||||
run_transform(yuv_cl, MODEL_WIDTH, MODEL_HEIGHT, frame_width, frame_height, frame_stride, frame_uv_offset, projection); |
transform_queue(&this->transform, q, |
||||||
|
yuv_cl, frame_width, frame_height, frame_stride, frame_uv_offset, |
||||||
|
y_cl, u_cl, v_cl, MODEL_WIDTH, MODEL_HEIGHT, projection); |
||||||
|
|
||||||
for (int i = 0; i < 4; i++) { |
for (int i = 0; i < 4; i++) { |
||||||
CL_CHECK(clEnqueueCopyBuffer(q, img_buffer_20hz_cl, img_buffer_20hz_cl, (i+1)*frame_size_bytes, i*frame_size_bytes, frame_size_bytes, 0, nullptr, nullptr)); |
CL_CHECK(clEnqueueCopyBuffer(q, img_buffer_20hz_cl, img_buffer_20hz_cl, (i+1)*frame_size_bytes, i*frame_size_bytes, frame_size_bytes, 0, nullptr, nullptr)); |
||||||
} |
} |
||||||
loadyuv_queue(&loadyuv, q, y_cl, u_cl, v_cl, last_img_cl); |
loadyuv_queue(&loadyuv, q, y_cl, u_cl, v_cl, last_img_cl); |
||||||
|
if (output == NULL) { |
||||||
copy_queue(&loadyuv, q, img_buffer_20hz_cl, input_frames_cl, 0, 0, frame_size_bytes); |
CL_CHECK(clEnqueueReadBuffer(q, img_buffer_20hz_cl, CL_TRUE, 0, frame_size_bytes, &input_frames[0], 0, nullptr, nullptr)); |
||||||
copy_queue(&loadyuv, q, last_img_cl, input_frames_cl, 0, frame_size_bytes, frame_size_bytes); |
CL_CHECK(clEnqueueReadBuffer(q, last_img_cl, CL_TRUE, 0, frame_size_bytes, &input_frames[MODEL_FRAME_SIZE], 0, nullptr, nullptr)); |
||||||
|
clFinish(q); |
||||||
// NOTE: Since thneed is using a different command queue, this clFinish is needed to ensure the image is ready.
|
return &input_frames[0]; |
||||||
clFinish(q); |
} else { |
||||||
return &input_frames_cl; |
copy_queue(&loadyuv, q, img_buffer_20hz_cl, *output, 0, 0, frame_size_bytes); |
||||||
|
copy_queue(&loadyuv, q, last_img_cl, *output, 0, frame_size_bytes, frame_size_bytes); |
||||||
|
|
||||||
|
// NOTE: Since thneed is using a different command queue, this clFinish is needed to ensure the image is ready.
|
||||||
|
clFinish(q); |
||||||
|
return NULL; |
||||||
|
} |
||||||
} |
} |
||||||
|
|
||||||
DrivingModelFrame::~DrivingModelFrame() { |
ModelFrame::~ModelFrame() { |
||||||
deinit_transform(); |
transform_destroy(&transform); |
||||||
loadyuv_destroy(&loadyuv); |
loadyuv_destroy(&loadyuv); |
||||||
CL_CHECK(clReleaseMemObject(img_buffer_20hz_cl)); |
CL_CHECK(clReleaseMemObject(img_buffer_20hz_cl)); |
||||||
CL_CHECK(clReleaseMemObject(last_img_cl)); |
CL_CHECK(clReleaseMemObject(last_img_cl)); |
||||||
|
CL_CHECK(clReleaseMemObject(v_cl)); |
||||||
|
CL_CHECK(clReleaseMemObject(u_cl)); |
||||||
|
CL_CHECK(clReleaseMemObject(y_cl)); |
||||||
CL_CHECK(clReleaseCommandQueue(q)); |
CL_CHECK(clReleaseCommandQueue(q)); |
||||||
} |
} |
||||||
|
|
||||||
|
|
||||||
MonitoringModelFrame::MonitoringModelFrame(cl_device_id device_id, cl_context context) : ModelFrame(device_id, context) { |
|
||||||
input_frames = std::make_unique<uint8_t[]>(buf_size); |
|
||||||
input_frame_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, buf_size, NULL, &err)); |
|
||||||
|
|
||||||
init_transform(device_id, context, MODEL_WIDTH, MODEL_HEIGHT); |
|
||||||
} |
|
||||||
|
|
||||||
cl_mem* MonitoringModelFrame::prepare(cl_mem yuv_cl, int frame_width, int frame_height, int frame_stride, int frame_uv_offset, const mat3& projection) { |
|
||||||
run_transform(yuv_cl, MODEL_WIDTH, MODEL_HEIGHT, frame_width, frame_height, frame_stride, frame_uv_offset, projection); |
|
||||||
clFinish(q); |
|
||||||
return &y_cl; |
|
||||||
} |
|
||||||
|
|
||||||
MonitoringModelFrame::~MonitoringModelFrame() { |
|
||||||
deinit_transform(); |
|
||||||
CL_CHECK(clReleaseCommandQueue(q)); |
|
||||||
} |
|
@ -0,0 +1,27 @@ |
|||||||
|
import os |
||||||
|
from openpilot.system.hardware import TICI |
||||||
|
from openpilot.selfdrive.modeld.runners.runmodel_pyx import RunModel, Runtime |
||||||
|
assert Runtime |
||||||
|
|
||||||
|
USE_THNEED = int(os.getenv('USE_THNEED', str(int(TICI)))) |
||||||
|
USE_SNPE = int(os.getenv('USE_SNPE', str(int(TICI)))) |
||||||
|
|
||||||
|
class ModelRunner(RunModel): |
||||||
|
THNEED = 'THNEED' |
||||||
|
SNPE = 'SNPE' |
||||||
|
ONNX = 'ONNX' |
||||||
|
|
||||||
|
def __new__(cls, paths, *args, **kwargs): |
||||||
|
if ModelRunner.THNEED in paths and USE_THNEED: |
||||||
|
from openpilot.selfdrive.modeld.runners.thneedmodel_pyx import ThneedModel as Runner |
||||||
|
runner_type = ModelRunner.THNEED |
||||||
|
elif ModelRunner.SNPE in paths and USE_SNPE: |
||||||
|
from openpilot.selfdrive.modeld.runners.snpemodel_pyx import SNPEModel as Runner |
||||||
|
runner_type = ModelRunner.SNPE |
||||||
|
elif ModelRunner.ONNX in paths: |
||||||
|
from openpilot.selfdrive.modeld.runners.onnxmodel import ONNXModel as Runner |
||||||
|
runner_type = ModelRunner.ONNX |
||||||
|
else: |
||||||
|
raise Exception("Couldn't select a model runner, make sure to pass at least one valid model path") |
||||||
|
|
||||||
|
return Runner(str(paths[runner_type]), *args, **kwargs) |
@ -0,0 +1,98 @@ |
|||||||
|
import onnx |
||||||
|
import itertools |
||||||
|
import os |
||||||
|
import sys |
||||||
|
import numpy as np |
||||||
|
from typing import Any |
||||||
|
|
||||||
|
from openpilot.selfdrive.modeld.runners.runmodel_pyx import RunModel |
||||||
|
|
||||||
|
ORT_TYPES_TO_NP_TYPES = {'tensor(float16)': np.float16, 'tensor(float)': np.float32, 'tensor(uint8)': np.uint8} |
||||||
|
|
||||||
|
def attributeproto_fp16_to_fp32(attr): |
||||||
|
float32_list = np.frombuffer(attr.raw_data, dtype=np.float16) |
||||||
|
attr.data_type = 1 |
||||||
|
attr.raw_data = float32_list.astype(np.float32).tobytes() |
||||||
|
|
||||||
|
def convert_fp16_to_fp32(onnx_path_or_bytes): |
||||||
|
if isinstance(onnx_path_or_bytes, bytes): |
||||||
|
model = onnx.load_from_string(onnx_path_or_bytes) |
||||||
|
elif isinstance(onnx_path_or_bytes, str): |
||||||
|
model = onnx.load(onnx_path_or_bytes) |
||||||
|
|
||||||
|
for i in model.graph.initializer: |
||||||
|
if i.data_type == 10: |
||||||
|
attributeproto_fp16_to_fp32(i) |
||||||
|
for i in itertools.chain(model.graph.input, model.graph.output): |
||||||
|
if i.type.tensor_type.elem_type == 10: |
||||||
|
i.type.tensor_type.elem_type = 1 |
||||||
|
for i in model.graph.node: |
||||||
|
if i.op_type == 'Cast' and i.attribute[0].i == 10: |
||||||
|
i.attribute[0].i = 1 |
||||||
|
for a in i.attribute: |
||||||
|
if hasattr(a, 't'): |
||||||
|
if a.t.data_type == 10: |
||||||
|
attributeproto_fp16_to_fp32(a.t) |
||||||
|
return model.SerializeToString() |
||||||
|
|
||||||
|
def create_ort_session(path, fp16_to_fp32): |
||||||
|
os.environ["OMP_NUM_THREADS"] = "4" |
||||||
|
os.environ["OMP_WAIT_POLICY"] = "PASSIVE" |
||||||
|
|
||||||
|
import onnxruntime as ort |
||||||
|
print("Onnx available providers: ", ort.get_available_providers(), file=sys.stderr) |
||||||
|
options = ort.SessionOptions() |
||||||
|
options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_DISABLE_ALL |
||||||
|
|
||||||
|
provider: str | tuple[str, dict[Any, Any]] |
||||||
|
if 'OpenVINOExecutionProvider' in ort.get_available_providers() and 'ONNXCPU' not in os.environ: |
||||||
|
provider = 'OpenVINOExecutionProvider' |
||||||
|
elif 'CUDAExecutionProvider' in ort.get_available_providers() and 'ONNXCPU' not in os.environ: |
||||||
|
options.intra_op_num_threads = 2 |
||||||
|
provider = ('CUDAExecutionProvider', {'cudnn_conv_algo_search': 'EXHAUSTIVE'}) |
||||||
|
else: |
||||||
|
options.intra_op_num_threads = 2 |
||||||
|
options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL |
||||||
|
options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL |
||||||
|
provider = 'CPUExecutionProvider' |
||||||
|
|
||||||
|
model_data = convert_fp16_to_fp32(path) if fp16_to_fp32 else path |
||||||
|
print("Onnx selected provider: ", [provider], file=sys.stderr) |
||||||
|
ort_session = ort.InferenceSession(model_data, options, providers=[provider]) |
||||||
|
print("Onnx using ", ort_session.get_providers(), file=sys.stderr) |
||||||
|
return ort_session |
||||||
|
|
||||||
|
|
||||||
|
class ONNXModel(RunModel): |
||||||
|
def __init__(self, path, output, runtime, use_tf8, cl_context): |
||||||
|
self.inputs = {} |
||||||
|
self.output = output |
||||||
|
|
||||||
|
self.session = create_ort_session(path, fp16_to_fp32=True) |
||||||
|
self.input_names = [x.name for x in self.session.get_inputs()] |
||||||
|
self.input_shapes = {x.name: [1, *x.shape[1:]] for x in self.session.get_inputs()} |
||||||
|
self.input_dtypes = {x.name: ORT_TYPES_TO_NP_TYPES[x.type] for x in self.session.get_inputs()} |
||||||
|
|
||||||
|
# run once to initialize CUDA provider |
||||||
|
if "CUDAExecutionProvider" in self.session.get_providers(): |
||||||
|
self.session.run(None, {k: np.zeros(self.input_shapes[k], dtype=self.input_dtypes[k]) for k in self.input_names}) |
||||||
|
print("ready to run onnx model", self.input_shapes, file=sys.stderr) |
||||||
|
|
||||||
|
def addInput(self, name, buffer): |
||||||
|
assert name in self.input_names |
||||||
|
self.inputs[name] = buffer |
||||||
|
|
||||||
|
def setInputBuffer(self, name, buffer): |
||||||
|
assert name in self.inputs |
||||||
|
self.inputs[name] = buffer |
||||||
|
|
||||||
|
def getCLBuffer(self, name): |
||||||
|
return None |
||||||
|
|
||||||
|
def execute(self): |
||||||
|
inputs = {k: v.view(self.input_dtypes[k]) for k,v in self.inputs.items()} |
||||||
|
inputs = {k: v.reshape(self.input_shapes[k]).astype(self.input_dtypes[k]) for k,v in inputs.items()} |
||||||
|
outputs = self.session.run(None, inputs) |
||||||
|
assert len(outputs) == 1, "Only single model outputs are supported" |
||||||
|
self.output[:] = outputs[0] |
||||||
|
return self.output |
@ -1,37 +0,0 @@ |
|||||||
import onnx |
|
||||||
import onnxruntime as ort |
|
||||||
import numpy as np |
|
||||||
import itertools |
|
||||||
|
|
||||||
ORT_TYPES_TO_NP_TYPES = {'tensor(float16)': np.float16, 'tensor(float)': np.float32, 'tensor(uint8)': np.uint8} |
|
||||||
|
|
||||||
def attributeproto_fp16_to_fp32(attr): |
|
||||||
float32_list = np.frombuffer(attr.raw_data, dtype=np.float16) |
|
||||||
attr.data_type = 1 |
|
||||||
attr.raw_data = float32_list.astype(np.float32).tobytes() |
|
||||||
|
|
||||||
def convert_fp16_to_fp32(model): |
|
||||||
for i in model.graph.initializer: |
|
||||||
if i.data_type == 10: |
|
||||||
attributeproto_fp16_to_fp32(i) |
|
||||||
for i in itertools.chain(model.graph.input, model.graph.output): |
|
||||||
if i.type.tensor_type.elem_type == 10: |
|
||||||
i.type.tensor_type.elem_type = 1 |
|
||||||
for i in model.graph.node: |
|
||||||
if i.op_type == 'Cast' and i.attribute[0].i == 10: |
|
||||||
i.attribute[0].i = 1 |
|
||||||
for a in i.attribute: |
|
||||||
if hasattr(a, 't'): |
|
||||||
if a.t.data_type == 10: |
|
||||||
attributeproto_fp16_to_fp32(a.t) |
|
||||||
return model.SerializeToString() |
|
||||||
|
|
||||||
|
|
||||||
def make_onnx_cpu_runner(model_path): |
|
||||||
options = ort.SessionOptions() |
|
||||||
options.intra_op_num_threads = 4 |
|
||||||
options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL |
|
||||||
options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL |
|
||||||
model_data = convert_fp16_to_fp32(onnx.load(model_path)) |
|
||||||
return ort.InferenceSession(model_data, options, providers=['CPUExecutionProvider']) |
|
||||||
|
|
@ -0,0 +1,4 @@ |
|||||||
|
#pragma once |
||||||
|
|
||||||
|
#include "selfdrive/modeld/runners/runmodel.h" |
||||||
|
#include "selfdrive/modeld/runners/snpemodel.h" |
@ -0,0 +1,49 @@ |
|||||||
|
#pragma once |
||||||
|
|
||||||
|
#include <string> |
||||||
|
#include <vector> |
||||||
|
#include <memory> |
||||||
|
#include <cassert> |
||||||
|
|
||||||
|
#include "common/clutil.h" |
||||||
|
#include "common/swaglog.h" |
||||||
|
|
||||||
|
#define USE_CPU_RUNTIME 0 |
||||||
|
#define USE_GPU_RUNTIME 1 |
||||||
|
#define USE_DSP_RUNTIME 2 |
||||||
|
|
||||||
|
struct ModelInput { |
||||||
|
const std::string name; |
||||||
|
float *buffer; |
||||||
|
int size; |
||||||
|
|
||||||
|
ModelInput(const std::string _name, float *_buffer, int _size) : name(_name), buffer(_buffer), size(_size) {} |
||||||
|
virtual void setBuffer(float *_buffer, int _size) { |
||||||
|
assert(size == _size || size == 0); |
||||||
|
buffer = _buffer; |
||||||
|
size = _size; |
||||||
|
} |
||||||
|
}; |
||||||
|
|
||||||
|
class RunModel { |
||||||
|
public: |
||||||
|
std::vector<std::unique_ptr<ModelInput>> inputs; |
||||||
|
|
||||||
|
virtual ~RunModel() {} |
||||||
|
virtual void execute() {} |
||||||
|
virtual void* getCLBuffer(const std::string name) { return nullptr; } |
||||||
|
|
||||||
|
virtual void addInput(const std::string name, float *buffer, int size) { |
||||||
|
inputs.push_back(std::unique_ptr<ModelInput>(new ModelInput(name, buffer, size))); |
||||||
|
} |
||||||
|
virtual void setInputBuffer(const std::string name, float *buffer, int size) { |
||||||
|
for (auto &input : inputs) { |
||||||
|
if (name == input->name) { |
||||||
|
input->setBuffer(buffer, size); |
||||||
|
return; |
||||||
|
} |
||||||
|
} |
||||||
|
LOGE("Tried to update input `%s` but no input with this name exists", name.c_str()); |
||||||
|
assert(false); |
||||||
|
} |
||||||
|
}; |
@ -0,0 +1,14 @@ |
|||||||
|
# distutils: language = c++ |
||||||
|
|
||||||
|
from libcpp.string cimport string |
||||||
|
|
||||||
|
cdef extern from "selfdrive/modeld/runners/runmodel.h": |
||||||
|
cdef int USE_CPU_RUNTIME |
||||||
|
cdef int USE_GPU_RUNTIME |
||||||
|
cdef int USE_DSP_RUNTIME |
||||||
|
|
||||||
|
cdef cppclass RunModel: |
||||||
|
void addInput(string, float*, int) |
||||||
|
void setInputBuffer(string, float*, int) |
||||||
|
void * getCLBuffer(string) |
||||||
|
void execute() |
@ -0,0 +1,6 @@ |
|||||||
|
# distutils: language = c++ |
||||||
|
|
||||||
|
from .runmodel cimport RunModel as cppRunModel |
||||||
|
|
||||||
|
cdef class RunModel: |
||||||
|
cdef cppRunModel * model |
@ -0,0 +1,37 @@ |
|||||||
|
# distutils: language = c++ |
||||||
|
# cython: c_string_encoding=ascii, language_level=3 |
||||||
|
|
||||||
|
from libcpp.string cimport string |
||||||
|
|
||||||
|
from .runmodel cimport USE_CPU_RUNTIME, USE_GPU_RUNTIME, USE_DSP_RUNTIME |
||||||
|
from selfdrive.modeld.models.commonmodel_pyx cimport CLMem |
||||||
|
|
||||||
|
class Runtime: |
||||||
|
CPU = USE_CPU_RUNTIME |
||||||
|
GPU = USE_GPU_RUNTIME |
||||||
|
DSP = USE_DSP_RUNTIME |
||||||
|
|
||||||
|
cdef class RunModel: |
||||||
|
def __dealloc__(self): |
||||||
|
del self.model |
||||||
|
|
||||||
|
def addInput(self, string name, float[:] buffer): |
||||||
|
if buffer is not None: |
||||||
|
self.model.addInput(name, &buffer[0], len(buffer)) |
||||||
|
else: |
||||||
|
self.model.addInput(name, NULL, 0) |
||||||
|
|
||||||
|
def setInputBuffer(self, string name, float[:] buffer): |
||||||
|
if buffer is not None: |
||||||
|
self.model.setInputBuffer(name, &buffer[0], len(buffer)) |
||||||
|
else: |
||||||
|
self.model.setInputBuffer(name, NULL, 0) |
||||||
|
|
||||||
|
def getCLBuffer(self, string name): |
||||||
|
cdef void * cl_buf = self.model.getCLBuffer(name) |
||||||
|
if not cl_buf: |
||||||
|
return None |
||||||
|
return CLMem.create(cl_buf) |
||||||
|
|
||||||
|
def execute(self): |
||||||
|
self.model.execute() |
@ -0,0 +1,116 @@ |
|||||||
|
#pragma clang diagnostic ignored "-Wexceptions" |
||||||
|
|
||||||
|
#include "selfdrive/modeld/runners/snpemodel.h" |
||||||
|
|
||||||
|
#include <cstring> |
||||||
|
#include <memory> |
||||||
|
#include <string> |
||||||
|
#include <utility> |
||||||
|
#include <vector> |
||||||
|
|
||||||
|
#include "common/util.h" |
||||||
|
#include "common/timing.h" |
||||||
|
|
||||||
|
void PrintErrorStringAndExit() { |
||||||
|
std::cerr << zdl::DlSystem::getLastErrorString() << std::endl; |
||||||
|
std::exit(EXIT_FAILURE); |
||||||
|
} |
||||||
|
|
||||||
|
SNPEModel::SNPEModel(const std::string path, float *_output, size_t _output_size, int runtime, bool _use_tf8, cl_context context) { |
||||||
|
output = _output; |
||||||
|
output_size = _output_size; |
||||||
|
use_tf8 = _use_tf8; |
||||||
|
|
||||||
|
#ifdef QCOM2 |
||||||
|
if (runtime == USE_GPU_RUNTIME) { |
||||||
|
snpe_runtime = zdl::DlSystem::Runtime_t::GPU; |
||||||
|
} else if (runtime == USE_DSP_RUNTIME) { |
||||||
|
snpe_runtime = zdl::DlSystem::Runtime_t::DSP; |
||||||
|
} else { |
||||||
|
snpe_runtime = zdl::DlSystem::Runtime_t::CPU; |
||||||
|
} |
||||||
|
assert(zdl::SNPE::SNPEFactory::isRuntimeAvailable(snpe_runtime)); |
||||||
|
#endif |
||||||
|
model_data = util::read_file(path); |
||||||
|
assert(model_data.size() > 0); |
||||||
|
|
||||||
|
// load model
|
||||||
|
std::unique_ptr<zdl::DlContainer::IDlContainer> container = zdl::DlContainer::IDlContainer::open((uint8_t*)model_data.data(), model_data.size()); |
||||||
|
if (!container) { PrintErrorStringAndExit(); } |
||||||
|
LOGW("loaded model with size: %lu", model_data.size()); |
||||||
|
|
||||||
|
// create model runner
|
||||||
|
zdl::SNPE::SNPEBuilder snpe_builder(container.get()); |
||||||
|
while (!snpe) { |
||||||
|
#ifdef QCOM2 |
||||||
|
snpe = snpe_builder.setOutputLayers({}) |
||||||
|
.setRuntimeProcessor(snpe_runtime) |
||||||
|
.setUseUserSuppliedBuffers(true) |
||||||
|
.setPerformanceProfile(zdl::DlSystem::PerformanceProfile_t::HIGH_PERFORMANCE) |
||||||
|
.build(); |
||||||
|
#else |
||||||
|
snpe = snpe_builder.setOutputLayers({}) |
||||||
|
.setUseUserSuppliedBuffers(true) |
||||||
|
.setPerformanceProfile(zdl::DlSystem::PerformanceProfile_t::HIGH_PERFORMANCE) |
||||||
|
.build(); |
||||||
|
#endif |
||||||
|
if (!snpe) std::cerr << zdl::DlSystem::getLastErrorString() << std::endl; |
||||||
|
} |
||||||
|
|
||||||
|
// create output buffer
|
||||||
|
zdl::DlSystem::UserBufferEncodingFloat ub_encoding_float; |
||||||
|
zdl::DlSystem::IUserBufferFactory &ub_factory = zdl::SNPE::SNPEFactory::getUserBufferFactory(); |
||||||
|
|
||||||
|
const auto &output_tensor_names_opt = snpe->getOutputTensorNames(); |
||||||
|
if (!output_tensor_names_opt) throw std::runtime_error("Error obtaining output tensor names"); |
||||||
|
const auto &output_tensor_names = *output_tensor_names_opt; |
||||||
|
assert(output_tensor_names.size() == 1); |
||||||
|
const char *output_tensor_name = output_tensor_names.at(0); |
||||||
|
const zdl::DlSystem::TensorShape &buffer_shape = snpe->getInputOutputBufferAttributes(output_tensor_name)->getDims(); |
||||||
|
if (output_size != 0) { |
||||||
|
assert(output_size == buffer_shape[1]); |
||||||
|
} else { |
||||||
|
output_size = buffer_shape[1]; |
||||||
|
} |
||||||
|
std::vector<size_t> output_strides = {output_size * sizeof(float), sizeof(float)}; |
||||||
|
output_buffer = ub_factory.createUserBuffer(output, output_size * sizeof(float), output_strides, &ub_encoding_float); |
||||||
|
output_map.add(output_tensor_name, output_buffer.get()); |
||||||
|
} |
||||||
|
|
||||||
|
void SNPEModel::addInput(const std::string name, float *buffer, int size) { |
||||||
|
const int idx = inputs.size(); |
||||||
|
const auto &input_tensor_names_opt = snpe->getInputTensorNames(); |
||||||
|
if (!input_tensor_names_opt) throw std::runtime_error("Error obtaining input tensor names"); |
||||||
|
const auto &input_tensor_names = *input_tensor_names_opt; |
||||||
|
const char *input_tensor_name = input_tensor_names.at(idx); |
||||||
|
const bool input_tf8 = use_tf8 && strcmp(input_tensor_name, "input_img") == 0; // TODO: This is a terrible hack, get rid of this name check both here and in onnx_runner.py
|
||||||
|
LOGW("adding index %d: %s", idx, input_tensor_name); |
||||||
|
|
||||||
|
zdl::DlSystem::UserBufferEncodingFloat ub_encoding_float; |
||||||
|
zdl::DlSystem::UserBufferEncodingTf8 ub_encoding_tf8(0, 1./255); // network takes 0-1
|
||||||
|
zdl::DlSystem::IUserBufferFactory &ub_factory = zdl::SNPE::SNPEFactory::getUserBufferFactory(); |
||||||
|
zdl::DlSystem::UserBufferEncoding *input_encoding = input_tf8 ? (zdl::DlSystem::UserBufferEncoding*)&ub_encoding_tf8 : (zdl::DlSystem::UserBufferEncoding*)&ub_encoding_float; |
||||||
|
|
||||||
|
const auto &buffer_shape_opt = snpe->getInputDimensions(input_tensor_name); |
||||||
|
const zdl::DlSystem::TensorShape &buffer_shape = *buffer_shape_opt; |
||||||
|
size_t size_of_input = input_tf8 ? sizeof(uint8_t) : sizeof(float); |
||||||
|
std::vector<size_t> strides(buffer_shape.rank()); |
||||||
|
strides[strides.size() - 1] = size_of_input; |
||||||
|
size_t product = 1; |
||||||
|
for (size_t i = 0; i < buffer_shape.rank(); i++) product *= buffer_shape[i]; |
||||||
|
size_t stride = strides[strides.size() - 1]; |
||||||
|
for (size_t i = buffer_shape.rank() - 1; i > 0; i--) { |
||||||
|
stride *= buffer_shape[i]; |
||||||
|
strides[i-1] = stride; |
||||||
|
} |
||||||
|
|
||||||
|
auto input_buffer = ub_factory.createUserBuffer(buffer, product*size_of_input, strides, input_encoding); |
||||||
|
input_map.add(input_tensor_name, input_buffer.get()); |
||||||
|
inputs.push_back(std::unique_ptr<SNPEModelInput>(new SNPEModelInput(name, buffer, size, std::move(input_buffer)))); |
||||||
|
} |
||||||
|
|
||||||
|
void SNPEModel::execute() { |
||||||
|
if (!snpe->execute(input_map, output_map)) { |
||||||
|
PrintErrorStringAndExit(); |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,52 @@ |
|||||||
|
#pragma once |
||||||
|
#pragma clang diagnostic ignored "-Wdeprecated-declarations" |
||||||
|
|
||||||
|
#include <memory> |
||||||
|
#include <string> |
||||||
|
#include <utility> |
||||||
|
|
||||||
|
#include <DlContainer/IDlContainer.hpp> |
||||||
|
#include <DlSystem/DlError.hpp> |
||||||
|
#include <DlSystem/ITensor.hpp> |
||||||
|
#include <DlSystem/ITensorFactory.hpp> |
||||||
|
#include <DlSystem/IUserBuffer.hpp> |
||||||
|
#include <DlSystem/IUserBufferFactory.hpp> |
||||||
|
#include <SNPE/SNPE.hpp> |
||||||
|
#include <SNPE/SNPEBuilder.hpp> |
||||||
|
#include <SNPE/SNPEFactory.hpp> |
||||||
|
|
||||||
|
#include "selfdrive/modeld/runners/runmodel.h" |
||||||
|
|
||||||
|
struct SNPEModelInput : public ModelInput { |
||||||
|
std::unique_ptr<zdl::DlSystem::IUserBuffer> snpe_buffer; |
||||||
|
|
||||||
|
SNPEModelInput(const std::string _name, float *_buffer, int _size, std::unique_ptr<zdl::DlSystem::IUserBuffer> _snpe_buffer) : ModelInput(_name, _buffer, _size), snpe_buffer(std::move(_snpe_buffer)) {} |
||||||
|
void setBuffer(float *_buffer, int _size) { |
||||||
|
ModelInput::setBuffer(_buffer, _size); |
||||||
|
assert(snpe_buffer->setBufferAddress(_buffer) == true); |
||||||
|
} |
||||||
|
}; |
||||||
|
|
||||||
|
class SNPEModel : public RunModel { |
||||||
|
public: |
||||||
|
SNPEModel(const std::string path, float *_output, size_t _output_size, int runtime, bool use_tf8 = false, cl_context context = NULL); |
||||||
|
void addInput(const std::string name, float *buffer, int size); |
||||||
|
void execute(); |
||||||
|
|
||||||
|
private: |
||||||
|
std::string model_data; |
||||||
|
|
||||||
|
#ifdef QCOM2 |
||||||
|
zdl::DlSystem::Runtime_t snpe_runtime; |
||||||
|
#endif |
||||||
|
|
||||||
|
// snpe model stuff
|
||||||
|
std::unique_ptr<zdl::SNPE::SNPE> snpe; |
||||||
|
zdl::DlSystem::UserBufferMap input_map; |
||||||
|
zdl::DlSystem::UserBufferMap output_map; |
||||||
|
std::unique_ptr<zdl::DlSystem::IUserBuffer> output_buffer; |
||||||
|
|
||||||
|
bool use_tf8; |
||||||
|
float *output; |
||||||
|
size_t output_size; |
||||||
|
}; |
@ -0,0 +1,9 @@ |
|||||||
|
# distutils: language = c++ |
||||||
|
|
||||||
|
from libcpp.string cimport string |
||||||
|
|
||||||
|
from msgq.visionipc.visionipc cimport cl_context |
||||||
|
|
||||||
|
cdef extern from "selfdrive/modeld/runners/snpemodel.h": |
||||||
|
cdef cppclass SNPEModel: |
||||||
|
SNPEModel(string, float*, size_t, int, bool, cl_context) |
@ -0,0 +1,17 @@ |
|||||||
|
# distutils: language = c++ |
||||||
|
# cython: c_string_encoding=ascii, language_level=3 |
||||||
|
|
||||||
|
import os |
||||||
|
from libcpp cimport bool |
||||||
|
from libcpp.string cimport string |
||||||
|
|
||||||
|
from .snpemodel cimport SNPEModel as cppSNPEModel |
||||||
|
from selfdrive.modeld.models.commonmodel_pyx cimport CLContext |
||||||
|
from selfdrive.modeld.runners.runmodel_pyx cimport RunModel |
||||||
|
from selfdrive.modeld.runners.runmodel cimport RunModel as cppRunModel |
||||||
|
|
||||||
|
os.environ['ADSP_LIBRARY_PATH'] = "/data/pythonpath/third_party/snpe/dsp/" |
||||||
|
|
||||||
|
cdef class SNPEModel(RunModel): |
||||||
|
def __cinit__(self, string path, float[:] output, int runtime, bool use_tf8, CLContext context): |
||||||
|
self.model = <cppRunModel *> new cppSNPEModel(path, &output[0], len(output), runtime, use_tf8, context.context) |
@ -0,0 +1,58 @@ |
|||||||
|
#include "selfdrive/modeld/runners/thneedmodel.h" |
||||||
|
|
||||||
|
#include <string> |
||||||
|
|
||||||
|
#include "common/swaglog.h" |
||||||
|
|
||||||
|
ThneedModel::ThneedModel(const std::string path, float *_output, size_t _output_size, int runtime, bool luse_tf8, cl_context context) { |
||||||
|
thneed = new Thneed(true, context); |
||||||
|
thneed->load(path.c_str()); |
||||||
|
thneed->clexec(); |
||||||
|
|
||||||
|
recorded = false; |
||||||
|
output = _output; |
||||||
|
} |
||||||
|
|
||||||
|
void* ThneedModel::getCLBuffer(const std::string name) { |
||||||
|
int index = -1; |
||||||
|
for (int i = 0; i < inputs.size(); i++) { |
||||||
|
if (name == inputs[i]->name) { |
||||||
|
index = i; |
||||||
|
break; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if (index == -1) { |
||||||
|
LOGE("Tried to get CL buffer for input `%s` but no input with this name exists", name.c_str()); |
||||||
|
assert(false); |
||||||
|
} |
||||||
|
|
||||||
|
if (thneed->input_clmem.size() >= inputs.size()) { |
||||||
|
return &thneed->input_clmem[inputs.size() - index - 1]; |
||||||
|
} else { |
||||||
|
return nullptr; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
void ThneedModel::execute() { |
||||||
|
if (!recorded) { |
||||||
|
thneed->record = true; |
||||||
|
float *input_buffers[inputs.size()]; |
||||||
|
for (int i = 0; i < inputs.size(); i++) { |
||||||
|
input_buffers[inputs.size() - i - 1] = inputs[i]->buffer; |
||||||
|
} |
||||||
|
|
||||||
|
thneed->copy_inputs(input_buffers); |
||||||
|
thneed->clexec(); |
||||||
|
thneed->copy_output(output); |
||||||
|
thneed->stop(); |
||||||
|
|
||||||
|
recorded = true; |
||||||
|
} else { |
||||||
|
float *input_buffers[inputs.size()]; |
||||||
|
for (int i = 0; i < inputs.size(); i++) { |
||||||
|
input_buffers[inputs.size() - i - 1] = inputs[i]->buffer; |
||||||
|
} |
||||||
|
thneed->execute(input_buffers, output); |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,17 @@ |
|||||||
|
#pragma once |
||||||
|
|
||||||
|
#include <string> |
||||||
|
|
||||||
|
#include "selfdrive/modeld/runners/runmodel.h" |
||||||
|
#include "selfdrive/modeld/thneed/thneed.h" |
||||||
|
|
||||||
|
class ThneedModel : public RunModel { |
||||||
|
public: |
||||||
|
ThneedModel(const std::string path, float *_output, size_t _output_size, int runtime, bool use_tf8 = false, cl_context context = NULL); |
||||||
|
void *getCLBuffer(const std::string name); |
||||||
|
void execute(); |
||||||
|
private: |
||||||
|
Thneed *thneed = NULL; |
||||||
|
bool recorded; |
||||||
|
float *output; |
||||||
|
}; |
@ -0,0 +1,9 @@ |
|||||||
|
# distutils: language = c++ |
||||||
|
|
||||||
|
from libcpp.string cimport string |
||||||
|
|
||||||
|
from msgq.visionipc.visionipc cimport cl_context |
||||||
|
|
||||||
|
cdef extern from "selfdrive/modeld/runners/thneedmodel.h": |
||||||
|
cdef cppclass ThneedModel: |
||||||
|
ThneedModel(string, float*, size_t, int, bool, cl_context) |
@ -0,0 +1,14 @@ |
|||||||
|
# distutils: language = c++ |
||||||
|
# cython: c_string_encoding=ascii, language_level=3 |
||||||
|
|
||||||
|
from libcpp cimport bool |
||||||
|
from libcpp.string cimport string |
||||||
|
|
||||||
|
from .thneedmodel cimport ThneedModel as cppThneedModel |
||||||
|
from selfdrive.modeld.models.commonmodel_pyx cimport CLContext |
||||||
|
from selfdrive.modeld.runners.runmodel_pyx cimport RunModel |
||||||
|
from selfdrive.modeld.runners.runmodel cimport RunModel as cppRunModel |
||||||
|
|
||||||
|
cdef class ThneedModel(RunModel): |
||||||
|
def __cinit__(self, string path, float[:] output, int runtime, bool use_tf8, CLContext context): |
||||||
|
self.model = <cppRunModel *> new cppThneedModel(path, &output[0], len(output), runtime, use_tf8, context.context) |
@ -1,8 +0,0 @@ |
|||||||
|
|
||||||
from tinygrad.tensor import Tensor |
|
||||||
from tinygrad.helpers import to_mv |
|
||||||
|
|
||||||
def qcom_tensor_from_opencl_address(opencl_address, shape, dtype): |
|
||||||
cl_buf_desc_ptr = to_mv(opencl_address, 8).cast('Q')[0] |
|
||||||
rawbuf_ptr = to_mv(cl_buf_desc_ptr, 0x100).cast('Q')[20] # offset 0xA0 is a raw gpu pointer. |
|
||||||
return Tensor.from_blob(rawbuf_ptr, shape, dtype=dtype, device='QCOM') |
|
@ -0,0 +1,8 @@ |
|||||||
|
thneed is an SNPE accelerator. I know SNPE is already an accelerator, but sometimes things need to go even faster.. |
||||||
|
|
||||||
|
It runs on the local device, and caches a single model run. Then it replays it, but fast. |
||||||
|
|
||||||
|
thneed slices through abstraction layers like a fish. |
||||||
|
|
||||||
|
You need a thneed. |
||||||
|
|
@ -0,0 +1,154 @@ |
|||||||
|
#include <cassert> |
||||||
|
#include <set> |
||||||
|
|
||||||
|
#include "third_party/json11/json11.hpp" |
||||||
|
#include "common/util.h" |
||||||
|
#include "common/clutil.h" |
||||||
|
#include "common/swaglog.h" |
||||||
|
#include "selfdrive/modeld/thneed/thneed.h" |
||||||
|
using namespace json11; |
||||||
|
|
||||||
|
extern map<cl_program, string> g_program_source; |
||||||
|
|
||||||
|
void Thneed::load(const char *filename) { |
||||||
|
LOGD("Thneed::load: loading from %s\n", filename); |
||||||
|
|
||||||
|
string buf = util::read_file(filename); |
||||||
|
int jsz = *(int *)buf.data(); |
||||||
|
string jsonerr; |
||||||
|
string jj(buf.data() + sizeof(int), jsz); |
||||||
|
Json jdat = Json::parse(jj, jsonerr); |
||||||
|
|
||||||
|
map<cl_mem, cl_mem> real_mem; |
||||||
|
real_mem[NULL] = NULL; |
||||||
|
|
||||||
|
int ptr = sizeof(int)+jsz; |
||||||
|
for (auto &obj : jdat["objects"].array_items()) { |
||||||
|
auto mobj = obj.object_items(); |
||||||
|
int sz = mobj["size"].int_value(); |
||||||
|
cl_mem clbuf = NULL; |
||||||
|
|
||||||
|
if (mobj["buffer_id"].string_value().size() > 0) { |
||||||
|
// image buffer must already be allocated
|
||||||
|
clbuf = real_mem[*(cl_mem*)(mobj["buffer_id"].string_value().data())]; |
||||||
|
assert(mobj["needs_load"].bool_value() == false); |
||||||
|
} else { |
||||||
|
if (mobj["needs_load"].bool_value()) { |
||||||
|
clbuf = clCreateBuffer(context, CL_MEM_COPY_HOST_PTR | CL_MEM_READ_WRITE, sz, &buf[ptr], NULL); |
||||||
|
if (debug >= 1) printf("loading %p %d @ 0x%X\n", clbuf, sz, ptr); |
||||||
|
ptr += sz; |
||||||
|
} else { |
||||||
|
// TODO: is there a faster way to init zeroed out buffers?
|
||||||
|
void *host_zeros = calloc(sz, 1); |
||||||
|
clbuf = clCreateBuffer(context, CL_MEM_COPY_HOST_PTR | CL_MEM_READ_WRITE, sz, host_zeros, NULL); |
||||||
|
free(host_zeros); |
||||||
|
} |
||||||
|
} |
||||||
|
assert(clbuf != NULL); |
||||||
|
|
||||||
|
if (mobj["arg_type"] == "image2d_t" || mobj["arg_type"] == "image1d_t") { |
||||||
|
cl_image_desc desc = {0}; |
||||||
|
desc.image_type = (mobj["arg_type"] == "image2d_t") ? CL_MEM_OBJECT_IMAGE2D : CL_MEM_OBJECT_IMAGE1D_BUFFER; |
||||||
|
desc.image_width = mobj["width"].int_value(); |
||||||
|
desc.image_height = mobj["height"].int_value(); |
||||||
|
desc.image_row_pitch = mobj["row_pitch"].int_value(); |
||||||
|
assert(sz == desc.image_height*desc.image_row_pitch); |
||||||
|
#ifdef QCOM2 |
||||||
|
desc.buffer = clbuf; |
||||||
|
#else |
||||||
|
// TODO: we are creating unused buffers on PC
|
||||||
|
clReleaseMemObject(clbuf); |
||||||
|
#endif |
||||||
|
cl_image_format format = {0}; |
||||||
|
format.image_channel_order = CL_RGBA; |
||||||
|
format.image_channel_data_type = mobj["float32"].bool_value() ? CL_FLOAT : CL_HALF_FLOAT; |
||||||
|
|
||||||
|
cl_int errcode; |
||||||
|
|
||||||
|
#ifndef QCOM2 |
||||||
|
if (mobj["needs_load"].bool_value()) { |
||||||
|
clbuf = clCreateImage(context, CL_MEM_COPY_HOST_PTR | CL_MEM_READ_WRITE, &format, &desc, &buf[ptr-sz], &errcode); |
||||||
|
} else { |
||||||
|
clbuf = clCreateImage(context, CL_MEM_READ_WRITE, &format, &desc, NULL, &errcode); |
||||||
|
} |
||||||
|
#else |
||||||
|
clbuf = clCreateImage(context, CL_MEM_READ_WRITE, &format, &desc, NULL, &errcode); |
||||||
|
#endif |
||||||
|
if (clbuf == NULL) { |
||||||
|
LOGE("clError: %s create image %zux%zu rp %zu with buffer %p\n", cl_get_error_string(errcode), |
||||||
|
desc.image_width, desc.image_height, desc.image_row_pitch, desc.buffer); |
||||||
|
} |
||||||
|
assert(clbuf != NULL); |
||||||
|
} |
||||||
|
|
||||||
|
real_mem[*(cl_mem*)(mobj["id"].string_value().data())] = clbuf; |
||||||
|
} |
||||||
|
|
||||||
|
map<string, cl_program> g_programs; |
||||||
|
for (const auto &[name, source] : jdat["programs"].object_items()) { |
||||||
|
if (debug >= 1) printf("building %s with size %zu\n", name.c_str(), source.string_value().size()); |
||||||
|
g_programs[name] = cl_program_from_source(context, device_id, source.string_value()); |
||||||
|
} |
||||||
|
|
||||||
|
for (auto &obj : jdat["inputs"].array_items()) { |
||||||
|
auto mobj = obj.object_items(); |
||||||
|
int sz = mobj["size"].int_value(); |
||||||
|
cl_mem aa = real_mem[*(cl_mem*)(mobj["buffer_id"].string_value().data())]; |
||||||
|
input_clmem.push_back(aa); |
||||||
|
input_sizes.push_back(sz); |
||||||
|
LOGD("Thneed::load: adding input %s with size %d\n", mobj["name"].string_value().data(), sz); |
||||||
|
|
||||||
|
cl_int cl_err; |
||||||
|
void *ret = clEnqueueMapBuffer(command_queue, aa, CL_TRUE, CL_MAP_WRITE, 0, sz, 0, NULL, NULL, &cl_err); |
||||||
|
if (cl_err != CL_SUCCESS) LOGE("clError: %s map %p %d\n", cl_get_error_string(cl_err), aa, sz); |
||||||
|
assert(cl_err == CL_SUCCESS); |
||||||
|
inputs.push_back(ret); |
||||||
|
} |
||||||
|
|
||||||
|
for (auto &obj : jdat["outputs"].array_items()) { |
||||||
|
auto mobj = obj.object_items(); |
||||||
|
int sz = mobj["size"].int_value(); |
||||||
|
LOGD("Thneed::save: adding output with size %d\n", sz); |
||||||
|
// TODO: support multiple outputs
|
||||||
|
output = real_mem[*(cl_mem*)(mobj["buffer_id"].string_value().data())]; |
||||||
|
assert(output != NULL); |
||||||
|
} |
||||||
|
|
||||||
|
for (auto &obj : jdat["binaries"].array_items()) { |
||||||
|
string name = obj["name"].string_value(); |
||||||
|
size_t length = obj["length"].int_value(); |
||||||
|
if (debug >= 1) printf("binary %s with size %zu\n", name.c_str(), length); |
||||||
|
g_programs[name] = cl_program_from_binary(context, device_id, (const uint8_t*)&buf[ptr], length); |
||||||
|
ptr += length; |
||||||
|
} |
||||||
|
|
||||||
|
for (auto &obj : jdat["kernels"].array_items()) { |
||||||
|
auto gws = obj["global_work_size"]; |
||||||
|
auto lws = obj["local_work_size"]; |
||||||
|
auto kk = shared_ptr<CLQueuedKernel>(new CLQueuedKernel(this)); |
||||||
|
|
||||||
|
kk->name = obj["name"].string_value(); |
||||||
|
kk->program = g_programs[kk->name]; |
||||||
|
kk->work_dim = obj["work_dim"].int_value(); |
||||||
|
for (int i = 0; i < kk->work_dim; i++) { |
||||||
|
kk->global_work_size[i] = gws[i].int_value(); |
||||||
|
kk->local_work_size[i] = lws[i].int_value(); |
||||||
|
} |
||||||
|
kk->num_args = obj["num_args"].int_value(); |
||||||
|
for (int i = 0; i < kk->num_args; i++) { |
||||||
|
string arg = obj["args"].array_items()[i].string_value(); |
||||||
|
int arg_size = obj["args_size"].array_items()[i].int_value(); |
||||||
|
kk->args_size.push_back(arg_size); |
||||||
|
if (arg_size == 8) { |
||||||
|
cl_mem val = *(cl_mem*)(arg.data()); |
||||||
|
val = real_mem[val]; |
||||||
|
kk->args.push_back(string((char*)&val, sizeof(val))); |
||||||
|
} else { |
||||||
|
kk->args.push_back(arg); |
||||||
|
} |
||||||
|
} |
||||||
|
kq.push_back(kk); |
||||||
|
} |
||||||
|
|
||||||
|
clFinish(command_queue); |
||||||
|
} |
@ -0,0 +1,133 @@ |
|||||||
|
#pragma once |
||||||
|
|
||||||
|
#ifndef __user |
||||||
|
#define __user __attribute__(()) |
||||||
|
#endif |
||||||
|
|
||||||
|
#include <cstdint> |
||||||
|
#include <cstdlib> |
||||||
|
#include <memory> |
||||||
|
#include <string> |
||||||
|
#include <vector> |
||||||
|
|
||||||
|
#include <CL/cl.h> |
||||||
|
|
||||||
|
#include "third_party/linux/include/msm_kgsl.h" |
||||||
|
|
||||||
|
using namespace std; |
||||||
|
|
||||||
|
cl_int thneed_clSetKernelArg(cl_kernel kernel, cl_uint arg_index, size_t arg_size, const void *arg_value); |
||||||
|
|
||||||
|
namespace json11 { |
||||||
|
class Json; |
||||||
|
} |
||||||
|
class Thneed; |
||||||
|
|
||||||
|
class GPUMalloc { |
||||||
|
public: |
||||||
|
GPUMalloc(int size, int fd); |
||||||
|
~GPUMalloc(); |
||||||
|
void *alloc(int size); |
||||||
|
private: |
||||||
|
uint64_t base; |
||||||
|
int remaining; |
||||||
|
}; |
||||||
|
|
||||||
|
class CLQueuedKernel { |
||||||
|
public: |
||||||
|
CLQueuedKernel(Thneed *lthneed) { thneed = lthneed; } |
||||||
|
CLQueuedKernel(Thneed *lthneed, |
||||||
|
cl_kernel _kernel, |
||||||
|
cl_uint _work_dim, |
||||||
|
const size_t *_global_work_size, |
||||||
|
const size_t *_local_work_size); |
||||||
|
cl_int exec(); |
||||||
|
void debug_print(bool verbose); |
||||||
|
int get_arg_num(const char *search_arg_name); |
||||||
|
cl_program program; |
||||||
|
string name; |
||||||
|
cl_uint num_args; |
||||||
|
vector<string> arg_names; |
||||||
|
vector<string> arg_types; |
||||||
|
vector<string> args; |
||||||
|
vector<int> args_size; |
||||||
|
cl_kernel kernel = NULL; |
||||||
|
json11::Json to_json() const; |
||||||
|
|
||||||
|
cl_uint work_dim; |
||||||
|
size_t global_work_size[3] = {0}; |
||||||
|
size_t local_work_size[3] = {0}; |
||||||
|
private: |
||||||
|
Thneed *thneed; |
||||||
|
}; |
||||||
|
|
||||||
|
class CachedIoctl { |
||||||
|
public: |
||||||
|
virtual void exec() {} |
||||||
|
}; |
||||||
|
|
||||||
|
class CachedSync: public CachedIoctl { |
||||||
|
public: |
||||||
|
CachedSync(Thneed *lthneed, string ldata) { thneed = lthneed; data = ldata; } |
||||||
|
void exec(); |
||||||
|
private: |
||||||
|
Thneed *thneed; |
||||||
|
string data; |
||||||
|
}; |
||||||
|
|
||||||
|
class CachedCommand: public CachedIoctl { |
||||||
|
public: |
||||||
|
CachedCommand(Thneed *lthneed, struct kgsl_gpu_command *cmd); |
||||||
|
void exec(); |
||||||
|
private: |
||||||
|
void disassemble(int cmd_index); |
||||||
|
struct kgsl_gpu_command cache; |
||||||
|
unique_ptr<kgsl_command_object[]> cmds; |
||||||
|
unique_ptr<kgsl_command_object[]> objs; |
||||||
|
Thneed *thneed; |
||||||
|
vector<shared_ptr<CLQueuedKernel> > kq; |
||||||
|
}; |
||||||
|
|
||||||
|
class Thneed { |
||||||
|
public: |
||||||
|
Thneed(bool do_clinit=false, cl_context _context = NULL); |
||||||
|
void stop(); |
||||||
|
void execute(float **finputs, float *foutput, bool slow=false); |
||||||
|
void wait(); |
||||||
|
|
||||||
|
vector<cl_mem> input_clmem; |
||||||
|
vector<void *> inputs; |
||||||
|
vector<size_t> input_sizes; |
||||||
|
cl_mem output = NULL; |
||||||
|
|
||||||
|
cl_context context = NULL; |
||||||
|
cl_command_queue command_queue; |
||||||
|
cl_device_id device_id; |
||||||
|
int context_id; |
||||||
|
|
||||||
|
// protected?
|
||||||
|
bool record = false; |
||||||
|
int debug; |
||||||
|
int timestamp; |
||||||
|
|
||||||
|
#ifdef QCOM2 |
||||||
|
unique_ptr<GPUMalloc> ram; |
||||||
|
vector<unique_ptr<CachedIoctl> > cmds; |
||||||
|
int fd; |
||||||
|
#endif |
||||||
|
|
||||||
|
// all CL kernels
|
||||||
|
void copy_inputs(float **finputs, bool internal=false); |
||||||
|
void copy_output(float *foutput); |
||||||
|
cl_int clexec(); |
||||||
|
vector<shared_ptr<CLQueuedKernel> > kq; |
||||||
|
|
||||||
|
// pending CL kernels
|
||||||
|
vector<shared_ptr<CLQueuedKernel> > ckq; |
||||||
|
|
||||||
|
// loading
|
||||||
|
void load(const char *filename); |
||||||
|
private: |
||||||
|
void clinit(); |
||||||
|
}; |
||||||
|
|
@ -0,0 +1,216 @@ |
|||||||
|
#include "selfdrive/modeld/thneed/thneed.h" |
||||||
|
|
||||||
|
#include <cassert> |
||||||
|
#include <cstring> |
||||||
|
#include <map> |
||||||
|
|
||||||
|
#include "common/clutil.h" |
||||||
|
#include "common/timing.h" |
||||||
|
|
||||||
|
map<pair<cl_kernel, int>, string> g_args; |
||||||
|
map<pair<cl_kernel, int>, int> g_args_size; |
||||||
|
map<cl_program, string> g_program_source; |
||||||
|
|
||||||
|
void Thneed::stop() { |
||||||
|
//printf("Thneed::stop: recorded %lu commands\n", cmds.size());
|
||||||
|
record = false; |
||||||
|
} |
||||||
|
|
||||||
|
void Thneed::clinit() { |
||||||
|
device_id = cl_get_device_id(CL_DEVICE_TYPE_DEFAULT); |
||||||
|
if (context == NULL) context = CL_CHECK_ERR(clCreateContext(NULL, 1, &device_id, NULL, NULL, &err)); |
||||||
|
//cl_command_queue_properties props[3] = {CL_QUEUE_PROPERTIES, CL_QUEUE_PROFILING_ENABLE, 0};
|
||||||
|
cl_command_queue_properties props[3] = {CL_QUEUE_PROPERTIES, 0, 0}; |
||||||
|
command_queue = CL_CHECK_ERR(clCreateCommandQueueWithProperties(context, device_id, props, &err)); |
||||||
|
printf("Thneed::clinit done\n"); |
||||||
|
} |
||||||
|
|
||||||
|
cl_int Thneed::clexec() { |
||||||
|
if (debug >= 1) printf("Thneed::clexec: running %lu queued kernels\n", kq.size()); |
||||||
|
for (auto &k : kq) { |
||||||
|
if (record) ckq.push_back(k); |
||||||
|
cl_int ret = k->exec(); |
||||||
|
assert(ret == CL_SUCCESS); |
||||||
|
} |
||||||
|
return clFinish(command_queue); |
||||||
|
} |
||||||
|
|
||||||
|
void Thneed::copy_inputs(float **finputs, bool internal) { |
||||||
|
for (int idx = 0; idx < inputs.size(); ++idx) { |
||||||
|
if (debug >= 1) printf("copying %lu -- %p -> %p (cl %p)\n", input_sizes[idx], finputs[idx], inputs[idx], input_clmem[idx]); |
||||||
|
|
||||||
|
if (internal) { |
||||||
|
// if it's internal, using memcpy is fine since the buffer sync is cached in the ioctl layer
|
||||||
|
if (finputs[idx] != NULL) memcpy(inputs[idx], finputs[idx], input_sizes[idx]); |
||||||
|
} else { |
||||||
|
if (finputs[idx] != NULL) CL_CHECK(clEnqueueWriteBuffer(command_queue, input_clmem[idx], CL_TRUE, 0, input_sizes[idx], finputs[idx], 0, NULL, NULL)); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
void Thneed::copy_output(float *foutput) { |
||||||
|
if (output != NULL) { |
||||||
|
size_t sz; |
||||||
|
clGetMemObjectInfo(output, CL_MEM_SIZE, sizeof(sz), &sz, NULL); |
||||||
|
if (debug >= 1) printf("copying %lu for output %p -> %p\n", sz, output, foutput); |
||||||
|
CL_CHECK(clEnqueueReadBuffer(command_queue, output, CL_TRUE, 0, sz, foutput, 0, NULL, NULL)); |
||||||
|
} else { |
||||||
|
printf("CAUTION: model output is NULL, does it have no outputs?\n"); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// *********** CLQueuedKernel ***********
|
||||||
|
|
||||||
|
CLQueuedKernel::CLQueuedKernel(Thneed *lthneed, |
||||||
|
cl_kernel _kernel, |
||||||
|
cl_uint _work_dim, |
||||||
|
const size_t *_global_work_size, |
||||||
|
const size_t *_local_work_size) { |
||||||
|
thneed = lthneed; |
||||||
|
kernel = _kernel; |
||||||
|
work_dim = _work_dim; |
||||||
|
assert(work_dim <= 3); |
||||||
|
for (int i = 0; i < work_dim; i++) { |
||||||
|
global_work_size[i] = _global_work_size[i]; |
||||||
|
local_work_size[i] = _local_work_size[i]; |
||||||
|
} |
||||||
|
|
||||||
|
char _name[0x100]; |
||||||
|
clGetKernelInfo(kernel, CL_KERNEL_FUNCTION_NAME, sizeof(_name), _name, NULL); |
||||||
|
name = string(_name); |
||||||
|
clGetKernelInfo(kernel, CL_KERNEL_NUM_ARGS, sizeof(num_args), &num_args, NULL); |
||||||
|
|
||||||
|
// get args
|
||||||
|
for (int i = 0; i < num_args; i++) { |
||||||
|
char arg_name[0x100] = {0}; |
||||||
|
clGetKernelArgInfo(kernel, i, CL_KERNEL_ARG_NAME, sizeof(arg_name), arg_name, NULL); |
||||||
|
arg_names.push_back(string(arg_name)); |
||||||
|
clGetKernelArgInfo(kernel, i, CL_KERNEL_ARG_TYPE_NAME, sizeof(arg_name), arg_name, NULL); |
||||||
|
arg_types.push_back(string(arg_name)); |
||||||
|
|
||||||
|
args.push_back(g_args[make_pair(kernel, i)]); |
||||||
|
args_size.push_back(g_args_size[make_pair(kernel, i)]); |
||||||
|
} |
||||||
|
|
||||||
|
// get program
|
||||||
|
clGetKernelInfo(kernel, CL_KERNEL_PROGRAM, sizeof(program), &program, NULL); |
||||||
|
} |
||||||
|
|
||||||
|
int CLQueuedKernel::get_arg_num(const char *search_arg_name) { |
||||||
|
for (int i = 0; i < num_args; i++) { |
||||||
|
if (arg_names[i] == search_arg_name) return i; |
||||||
|
} |
||||||
|
printf("failed to find %s in %s\n", search_arg_name, name.c_str()); |
||||||
|
assert(false); |
||||||
|
} |
||||||
|
|
||||||
|
cl_int CLQueuedKernel::exec() { |
||||||
|
if (kernel == NULL) { |
||||||
|
kernel = clCreateKernel(program, name.c_str(), NULL); |
||||||
|
arg_names.clear(); |
||||||
|
arg_types.clear(); |
||||||
|
|
||||||
|
for (int j = 0; j < num_args; j++) { |
||||||
|
char arg_name[0x100] = {0}; |
||||||
|
clGetKernelArgInfo(kernel, j, CL_KERNEL_ARG_NAME, sizeof(arg_name), arg_name, NULL); |
||||||
|
arg_names.push_back(string(arg_name)); |
||||||
|
clGetKernelArgInfo(kernel, j, CL_KERNEL_ARG_TYPE_NAME, sizeof(arg_name), arg_name, NULL); |
||||||
|
arg_types.push_back(string(arg_name)); |
||||||
|
|
||||||
|
cl_int ret; |
||||||
|
if (args[j].size() != 0) { |
||||||
|
assert(args[j].size() == args_size[j]); |
||||||
|
ret = thneed_clSetKernelArg(kernel, j, args[j].size(), args[j].data()); |
||||||
|
} else { |
||||||
|
ret = thneed_clSetKernelArg(kernel, j, args_size[j], NULL); |
||||||
|
} |
||||||
|
assert(ret == CL_SUCCESS); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if (thneed->debug >= 1) { |
||||||
|
debug_print(thneed->debug >= 2); |
||||||
|
} |
||||||
|
|
||||||
|
return clEnqueueNDRangeKernel(thneed->command_queue, |
||||||
|
kernel, work_dim, NULL, global_work_size, local_work_size, 0, NULL, NULL); |
||||||
|
} |
||||||
|
|
||||||
|
void CLQueuedKernel::debug_print(bool verbose) { |
||||||
|
printf("%p %56s -- ", kernel, name.c_str()); |
||||||
|
for (int i = 0; i < work_dim; i++) { |
||||||
|
printf("%4zu ", global_work_size[i]); |
||||||
|
} |
||||||
|
printf(" -- "); |
||||||
|
for (int i = 0; i < work_dim; i++) { |
||||||
|
printf("%4zu ", local_work_size[i]); |
||||||
|
} |
||||||
|
printf("\n"); |
||||||
|
|
||||||
|
if (verbose) { |
||||||
|
for (int i = 0; i < num_args; i++) { |
||||||
|
string arg = args[i]; |
||||||
|
printf(" %s %s", arg_types[i].c_str(), arg_names[i].c_str()); |
||||||
|
void *arg_value = (void*)arg.data(); |
||||||
|
int arg_size = arg.size(); |
||||||
|
if (arg_size == 0) { |
||||||
|
printf(" (size) %d", args_size[i]); |
||||||
|
} else if (arg_size == 1) { |
||||||
|
printf(" = %d", *((char*)arg_value)); |
||||||
|
} else if (arg_size == 2) { |
||||||
|
printf(" = %d", *((short*)arg_value)); |
||||||
|
} else if (arg_size == 4) { |
||||||
|
if (arg_types[i] == "float") { |
||||||
|
printf(" = %f", *((float*)arg_value)); |
||||||
|
} else { |
||||||
|
printf(" = %d", *((int*)arg_value)); |
||||||
|
} |
||||||
|
} else if (arg_size == 8) { |
||||||
|
cl_mem val = (cl_mem)(*((uintptr_t*)arg_value)); |
||||||
|
printf(" = %p", val); |
||||||
|
if (val != NULL) { |
||||||
|
cl_mem_object_type obj_type; |
||||||
|
clGetMemObjectInfo(val, CL_MEM_TYPE, sizeof(obj_type), &obj_type, NULL); |
||||||
|
if (arg_types[i] == "image2d_t" || arg_types[i] == "image1d_t" || obj_type == CL_MEM_OBJECT_IMAGE2D) { |
||||||
|
cl_image_format format; |
||||||
|
size_t width, height, depth, array_size, row_pitch, slice_pitch; |
||||||
|
cl_mem buf; |
||||||
|
clGetImageInfo(val, CL_IMAGE_FORMAT, sizeof(format), &format, NULL); |
||||||
|
assert(format.image_channel_order == CL_RGBA); |
||||||
|
assert(format.image_channel_data_type == CL_HALF_FLOAT || format.image_channel_data_type == CL_FLOAT); |
||||||
|
clGetImageInfo(val, CL_IMAGE_WIDTH, sizeof(width), &width, NULL); |
||||||
|
clGetImageInfo(val, CL_IMAGE_HEIGHT, sizeof(height), &height, NULL); |
||||||
|
clGetImageInfo(val, CL_IMAGE_ROW_PITCH, sizeof(row_pitch), &row_pitch, NULL); |
||||||
|
clGetImageInfo(val, CL_IMAGE_DEPTH, sizeof(depth), &depth, NULL); |
||||||
|
clGetImageInfo(val, CL_IMAGE_ARRAY_SIZE, sizeof(array_size), &array_size, NULL); |
||||||
|
clGetImageInfo(val, CL_IMAGE_SLICE_PITCH, sizeof(slice_pitch), &slice_pitch, NULL); |
||||||
|
assert(depth == 0); |
||||||
|
assert(array_size == 0); |
||||||
|
assert(slice_pitch == 0); |
||||||
|
|
||||||
|
clGetImageInfo(val, CL_IMAGE_BUFFER, sizeof(buf), &buf, NULL); |
||||||
|
size_t sz = 0; |
||||||
|
if (buf != NULL) clGetMemObjectInfo(buf, CL_MEM_SIZE, sizeof(sz), &sz, NULL); |
||||||
|
printf(" image %zu x %zu rp %zu @ %p buffer %zu", width, height, row_pitch, buf, sz); |
||||||
|
} else { |
||||||
|
size_t sz; |
||||||
|
clGetMemObjectInfo(val, CL_MEM_SIZE, sizeof(sz), &sz, NULL); |
||||||
|
printf(" buffer %zu", sz); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
printf("\n"); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
cl_int thneed_clSetKernelArg(cl_kernel kernel, cl_uint arg_index, size_t arg_size, const void *arg_value) { |
||||||
|
g_args_size[make_pair(kernel, arg_index)] = arg_size; |
||||||
|
if (arg_value != NULL) { |
||||||
|
g_args[make_pair(kernel, arg_index)] = string((char*)arg_value, arg_size); |
||||||
|
} else { |
||||||
|
g_args[make_pair(kernel, arg_index)] = string(""); |
||||||
|
} |
||||||
|
cl_int ret = clSetKernelArg(kernel, arg_index, arg_size, arg_value); |
||||||
|
return ret; |
||||||
|
} |
@ -0,0 +1,32 @@ |
|||||||
|
#include "selfdrive/modeld/thneed/thneed.h" |
||||||
|
|
||||||
|
#include <cassert> |
||||||
|
|
||||||
|
#include "common/clutil.h" |
||||||
|
#include "common/timing.h" |
||||||
|
|
||||||
|
Thneed::Thneed(bool do_clinit, cl_context _context) { |
||||||
|
context = _context; |
||||||
|
if (do_clinit) clinit(); |
||||||
|
char *thneed_debug_env = getenv("THNEED_DEBUG"); |
||||||
|
debug = (thneed_debug_env != NULL) ? atoi(thneed_debug_env) : 0; |
||||||
|
} |
||||||
|
|
||||||
|
void Thneed::execute(float **finputs, float *foutput, bool slow) { |
||||||
|
uint64_t tb, te; |
||||||
|
if (debug >= 1) tb = nanos_since_boot(); |
||||||
|
|
||||||
|
// ****** copy inputs
|
||||||
|
copy_inputs(finputs); |
||||||
|
|
||||||
|
// ****** run commands
|
||||||
|
clexec(); |
||||||
|
|
||||||
|
// ****** copy outputs
|
||||||
|
copy_output(foutput); |
||||||
|
|
||||||
|
if (debug >= 1) { |
||||||
|
te = nanos_since_boot(); |
||||||
|
printf("model exec in %lu us\n", (te-tb)/1000); |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,258 @@ |
|||||||
|
#include "selfdrive/modeld/thneed/thneed.h" |
||||||
|
|
||||||
|
#include <dlfcn.h> |
||||||
|
#include <sys/mman.h> |
||||||
|
|
||||||
|
#include <cassert> |
||||||
|
#include <cerrno> |
||||||
|
#include <cstring> |
||||||
|
#include <map> |
||||||
|
#include <string> |
||||||
|
|
||||||
|
#include "common/clutil.h" |
||||||
|
#include "common/timing.h" |
||||||
|
|
||||||
|
Thneed *g_thneed = NULL; |
||||||
|
int g_fd = -1; |
||||||
|
|
||||||
|
void hexdump(uint8_t *d, int len) { |
||||||
|
assert((len%4) == 0); |
||||||
|
printf(" dumping %p len 0x%x\n", d, len); |
||||||
|
for (int i = 0; i < len/4; i++) { |
||||||
|
if (i != 0 && (i%0x10) == 0) printf("\n"); |
||||||
|
printf("%8x ", d[i]); |
||||||
|
} |
||||||
|
printf("\n"); |
||||||
|
} |
||||||
|
|
||||||
|
// *********** ioctl interceptor ***********
|
||||||
|
|
||||||
|
extern "C" { |
||||||
|
|
||||||
|
int (*my_ioctl)(int filedes, unsigned long request, void *argp) = NULL; |
||||||
|
#undef ioctl |
||||||
|
int ioctl(int filedes, unsigned long request, void *argp) { |
||||||
|
request &= 0xFFFFFFFF; // needed on QCOM2
|
||||||
|
if (my_ioctl == NULL) my_ioctl = reinterpret_cast<decltype(my_ioctl)>(dlsym(RTLD_NEXT, "ioctl")); |
||||||
|
Thneed *thneed = g_thneed; |
||||||
|
|
||||||
|
// save the fd
|
||||||
|
if (request == IOCTL_KGSL_GPUOBJ_ALLOC) g_fd = filedes; |
||||||
|
|
||||||
|
// note that this runs always, even without a thneed object
|
||||||
|
if (request == IOCTL_KGSL_DRAWCTXT_CREATE) { |
||||||
|
struct kgsl_drawctxt_create *create = (struct kgsl_drawctxt_create *)argp; |
||||||
|
create->flags &= ~KGSL_CONTEXT_PRIORITY_MASK; |
||||||
|
create->flags |= 6 << KGSL_CONTEXT_PRIORITY_SHIFT; // priority from 1-15, 1 is max priority
|
||||||
|
printf("IOCTL_KGSL_DRAWCTXT_CREATE: creating context with flags 0x%x\n", create->flags); |
||||||
|
} |
||||||
|
|
||||||
|
if (thneed != NULL) { |
||||||
|
if (request == IOCTL_KGSL_GPU_COMMAND) { |
||||||
|
struct kgsl_gpu_command *cmd = (struct kgsl_gpu_command *)argp; |
||||||
|
if (thneed->record) { |
||||||
|
thneed->timestamp = cmd->timestamp; |
||||||
|
thneed->context_id = cmd->context_id; |
||||||
|
thneed->cmds.push_back(unique_ptr<CachedCommand>(new CachedCommand(thneed, cmd))); |
||||||
|
} |
||||||
|
if (thneed->debug >= 1) { |
||||||
|
printf("IOCTL_KGSL_GPU_COMMAND(%2zu): flags: 0x%lx context_id: %u timestamp: %u numcmds: %d numobjs: %d\n", |
||||||
|
thneed->cmds.size(), |
||||||
|
cmd->flags, |
||||||
|
cmd->context_id, cmd->timestamp, cmd->numcmds, cmd->numobjs); |
||||||
|
} |
||||||
|
} else if (request == IOCTL_KGSL_GPUOBJ_SYNC) { |
||||||
|
struct kgsl_gpuobj_sync *cmd = (struct kgsl_gpuobj_sync *)argp; |
||||||
|
struct kgsl_gpuobj_sync_obj *objs = (struct kgsl_gpuobj_sync_obj *)(cmd->objs); |
||||||
|
|
||||||
|
if (thneed->debug >= 2) { |
||||||
|
printf("IOCTL_KGSL_GPUOBJ_SYNC count:%d ", cmd->count); |
||||||
|
for (int i = 0; i < cmd->count; i++) { |
||||||
|
printf(" -- offset:0x%lx len:0x%lx id:%d op:%d ", objs[i].offset, objs[i].length, objs[i].id, objs[i].op); |
||||||
|
} |
||||||
|
printf("\n"); |
||||||
|
} |
||||||
|
|
||||||
|
if (thneed->record) { |
||||||
|
thneed->cmds.push_back(unique_ptr<CachedSync>(new |
||||||
|
CachedSync(thneed, string((char *)objs, sizeof(struct kgsl_gpuobj_sync_obj)*cmd->count)))); |
||||||
|
} |
||||||
|
} else if (request == IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID) { |
||||||
|
struct kgsl_device_waittimestamp_ctxtid *cmd = (struct kgsl_device_waittimestamp_ctxtid *)argp; |
||||||
|
if (thneed->debug >= 1) { |
||||||
|
printf("IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID: context_id: %d timestamp: %d timeout: %d\n", |
||||||
|
cmd->context_id, cmd->timestamp, cmd->timeout); |
||||||
|
} |
||||||
|
} else if (request == IOCTL_KGSL_SETPROPERTY) { |
||||||
|
if (thneed->debug >= 1) { |
||||||
|
struct kgsl_device_getproperty *prop = (struct kgsl_device_getproperty *)argp; |
||||||
|
printf("IOCTL_KGSL_SETPROPERTY: 0x%x sizebytes:%zu\n", prop->type, prop->sizebytes); |
||||||
|
if (thneed->debug >= 2) { |
||||||
|
hexdump((uint8_t *)prop->value, prop->sizebytes); |
||||||
|
if (prop->type == KGSL_PROP_PWR_CONSTRAINT) { |
||||||
|
struct kgsl_device_constraint *constraint = (struct kgsl_device_constraint *)prop->value; |
||||||
|
hexdump((uint8_t *)constraint->data, constraint->size); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} else if (request == IOCTL_KGSL_DRAWCTXT_CREATE || request == IOCTL_KGSL_DRAWCTXT_DESTROY) { |
||||||
|
// this happens
|
||||||
|
} else if (request == IOCTL_KGSL_GPUOBJ_ALLOC || request == IOCTL_KGSL_GPUOBJ_FREE) { |
||||||
|
// this happens
|
||||||
|
} else { |
||||||
|
if (thneed->debug >= 1) { |
||||||
|
printf("other ioctl %lx\n", request); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
int ret = my_ioctl(filedes, request, argp); |
||||||
|
// NOTE: This error message goes into stdout and messes up pyenv
|
||||||
|
// if (ret != 0) printf("ioctl returned %d with errno %d\n", ret, errno);
|
||||||
|
return ret; |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
// *********** GPUMalloc ***********
|
||||||
|
|
||||||
|
GPUMalloc::GPUMalloc(int size, int fd) { |
||||||
|
struct kgsl_gpuobj_alloc alloc; |
||||||
|
memset(&alloc, 0, sizeof(alloc)); |
||||||
|
alloc.size = size; |
||||||
|
alloc.flags = 0x10000a00; |
||||||
|
ioctl(fd, IOCTL_KGSL_GPUOBJ_ALLOC, &alloc); |
||||||
|
void *addr = mmap64(NULL, alloc.mmapsize, 0x3, 0x1, fd, alloc.id*0x1000); |
||||||
|
assert(addr != MAP_FAILED); |
||||||
|
|
||||||
|
base = (uint64_t)addr; |
||||||
|
remaining = size; |
||||||
|
} |
||||||
|
|
||||||
|
GPUMalloc::~GPUMalloc() { |
||||||
|
// TODO: free the GPU malloced area
|
||||||
|
} |
||||||
|
|
||||||
|
void *GPUMalloc::alloc(int size) { |
||||||
|
void *ret = (void*)base; |
||||||
|
size = (size+0xff) & (~0xFF); |
||||||
|
assert(size <= remaining); |
||||||
|
remaining -= size; |
||||||
|
base += size; |
||||||
|
return ret; |
||||||
|
} |
||||||
|
|
||||||
|
// *********** CachedSync, at the ioctl layer ***********
|
||||||
|
|
||||||
|
void CachedSync::exec() { |
||||||
|
struct kgsl_gpuobj_sync cmd; |
||||||
|
|
||||||
|
cmd.objs = (uint64_t)data.data(); |
||||||
|
cmd.obj_len = data.length(); |
||||||
|
cmd.count = data.length() / sizeof(struct kgsl_gpuobj_sync_obj); |
||||||
|
|
||||||
|
int ret = ioctl(thneed->fd, IOCTL_KGSL_GPUOBJ_SYNC, &cmd); |
||||||
|
assert(ret == 0); |
||||||
|
} |
||||||
|
|
||||||
|
// *********** CachedCommand, at the ioctl layer ***********
|
||||||
|
|
||||||
|
CachedCommand::CachedCommand(Thneed *lthneed, struct kgsl_gpu_command *cmd) { |
||||||
|
thneed = lthneed; |
||||||
|
assert(cmd->numsyncs == 0); |
||||||
|
|
||||||
|
memcpy(&cache, cmd, sizeof(cache)); |
||||||
|
|
||||||
|
if (cmd->numcmds > 0) { |
||||||
|
cmds = make_unique<struct kgsl_command_object[]>(cmd->numcmds); |
||||||
|
memcpy(cmds.get(), (void *)cmd->cmdlist, sizeof(struct kgsl_command_object)*cmd->numcmds); |
||||||
|
cache.cmdlist = (uint64_t)cmds.get(); |
||||||
|
for (int i = 0; i < cmd->numcmds; i++) { |
||||||
|
void *nn = thneed->ram->alloc(cmds[i].size); |
||||||
|
memcpy(nn, (void*)cmds[i].gpuaddr, cmds[i].size); |
||||||
|
cmds[i].gpuaddr = (uint64_t)nn; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if (cmd->numobjs > 0) { |
||||||
|
objs = make_unique<struct kgsl_command_object[]>(cmd->numobjs); |
||||||
|
memcpy(objs.get(), (void *)cmd->objlist, sizeof(struct kgsl_command_object)*cmd->numobjs); |
||||||
|
cache.objlist = (uint64_t)objs.get(); |
||||||
|
for (int i = 0; i < cmd->numobjs; i++) { |
||||||
|
void *nn = thneed->ram->alloc(objs[i].size); |
||||||
|
memset(nn, 0, objs[i].size); |
||||||
|
objs[i].gpuaddr = (uint64_t)nn; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
kq = thneed->ckq; |
||||||
|
thneed->ckq.clear(); |
||||||
|
} |
||||||
|
|
||||||
|
void CachedCommand::exec() { |
||||||
|
cache.timestamp = ++thneed->timestamp; |
||||||
|
int ret = ioctl(thneed->fd, IOCTL_KGSL_GPU_COMMAND, &cache); |
||||||
|
|
||||||
|
if (thneed->debug >= 1) printf("CachedCommand::exec got %d\n", ret); |
||||||
|
|
||||||
|
if (thneed->debug >= 2) { |
||||||
|
for (auto &it : kq) { |
||||||
|
it->debug_print(false); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
assert(ret == 0); |
||||||
|
} |
||||||
|
|
||||||
|
// *********** Thneed ***********
|
||||||
|
|
||||||
|
Thneed::Thneed(bool do_clinit, cl_context _context) { |
||||||
|
// TODO: QCOM2 actually requires a different context
|
||||||
|
//context = _context;
|
||||||
|
if (do_clinit) clinit(); |
||||||
|
assert(g_fd != -1); |
||||||
|
fd = g_fd; |
||||||
|
ram = make_unique<GPUMalloc>(0x80000, fd); |
||||||
|
timestamp = -1; |
||||||
|
g_thneed = this; |
||||||
|
char *thneed_debug_env = getenv("THNEED_DEBUG"); |
||||||
|
debug = (thneed_debug_env != NULL) ? atoi(thneed_debug_env) : 0; |
||||||
|
} |
||||||
|
|
||||||
|
void Thneed::wait() { |
||||||
|
struct kgsl_device_waittimestamp_ctxtid wait; |
||||||
|
wait.context_id = context_id; |
||||||
|
wait.timestamp = timestamp; |
||||||
|
wait.timeout = -1; |
||||||
|
|
||||||
|
uint64_t tb = nanos_since_boot(); |
||||||
|
int wret = ioctl(fd, IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID, &wait); |
||||||
|
uint64_t te = nanos_since_boot(); |
||||||
|
|
||||||
|
if (debug >= 1) printf("wait %d after %lu us\n", wret, (te-tb)/1000); |
||||||
|
} |
||||||
|
|
||||||
|
void Thneed::execute(float **finputs, float *foutput, bool slow) { |
||||||
|
uint64_t tb, te; |
||||||
|
if (debug >= 1) tb = nanos_since_boot(); |
||||||
|
|
||||||
|
// ****** copy inputs
|
||||||
|
copy_inputs(finputs, true); |
||||||
|
|
||||||
|
// ****** run commands
|
||||||
|
int i = 0; |
||||||
|
for (auto &it : cmds) { |
||||||
|
++i; |
||||||
|
if (debug >= 1) printf("run %2d @ %7lu us: ", i, (nanos_since_boot()-tb)/1000); |
||||||
|
it->exec(); |
||||||
|
if ((i == cmds.size()) || slow) wait(); |
||||||
|
} |
||||||
|
|
||||||
|
// ****** copy outputs
|
||||||
|
copy_output(foutput); |
||||||
|
|
||||||
|
if (debug >= 1) { |
||||||
|
te = nanos_since_boot(); |
||||||
|
printf("model exec in %lu us\n", (te-tb)/1000); |
||||||
|
} |
||||||
|
} |
@ -1 +1 @@ |
|||||||
Subproject commit 270bbd36a925d9c612f1eeb7ea0ea4ad83fec41e |
Subproject commit 9dda6d260db0255750bacff61e3cee1e580567e1 |
Loading…
Reference in new issue