solve the onnxruntime inference issue ()

pull/13105/head^2
CaiRan 2024-06-21 17:20:40 +08:00 committed by GitHub
parent f8ca01dc01
commit 6d5f998fe1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 10 additions and 2 deletions
tools/infer

View File

@ -197,10 +197,18 @@ def create_predictor(args, mode, logger):
raise ValueError("not find model file path {}".format(model_file_path))
if args.use_gpu:
sess = ort.InferenceSession(
model_file_path, providers=["CUDAExecutionProvider"]
model_file_path,
providers=[
(
"CUDAExecutionProvider",
{"device_id": args.gpu_id, "cudnn_conv_algo_search": "DEFAULT"},
)
],
)
else:
sess = ort.InferenceSession(model_file_path)
sess = ort.InferenceSession(
model_file_path, providers=["CPUExecutionProvider"]
)
return sess, sess.get_inputs()[0], None, None
else: