[Feature] Refactor v1 (#56)
* [Refactor] add enum class and use functions to get configuration (#40) * add task and codebase enum class * use funcitons to get config * Refactor wrappers of mmcls and mmseg (#41) * move wrappers of cls & det to apis * remove get_classes_from_config * rename onnx_helper to onnx_utils * move import to outside of class * refactor ortwrappers * Refactor build dataset and dataloader for mmseg (#44) * refactor build_dataset and build_dataloader for mmcls and mmseg * remove repeated classes * set build_dataloader with shuffle=False * [Refactor] pplwrapper and mmocr refactor (#46) * add * add pplwrapper and refactor mmocr * fix lint * remove unused arguments * apply dict input for pplwrapper and ortwrapper * add condition before import ppl and ort stuff * update ppl (#51) * Refactor return value and extract_model (#54) * remove ret_value * refactor extract_model * fix typo * resolve comments * [Refactor] Refactor model inference pipeline (#52) * move attribute_to_dict to extract_model * simplify the inference and visualization * remove unused import * [Feature] Support SRCNN in mmedit with ONNXRuntime and TensorRT (#45) * finish mmedit-ort * edit __init__ files * add noqa * add tensorrt support * 1. Rename "base.py" 2. Move srcnn.py to correct directory * fix bugs * remove figures * align to refactor-v1 * update comment in srcnn * fix lint * newfunc -> new_func * Add visualize.py split visualize() in each codebase * fix lint * fix lint * remove unnecessary code in ORTRestorer * remove .api * edit super(), remove dataset * [Refactor]: Change name of split to partition (#57) * refactor mmcls configs * refactor mmdet configs and split params * rename rest split to partition from master * remove base.py * fix init of inference class * fix mmocr init, add show_result alias Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: RunningLeon <maningsheng@sensetime.com> Co-authored-by: Yifan Zhou <singlezombie@163.com>pull/12/head
parent
823ca38646
commit
2b98040b26
|
@ -1,2 +1,2 @@
|
|||
[settings]
|
||||
known_third_party = h5py,mmcls,mmcv,mmdet,mmocr,mmseg,ncnn,numpy,onnx,onnxruntime,packaging,pyppl,pytest,setuptools,tensorrt,torch
|
||||
known_third_party = h5py,mmcls,mmcv,mmdet,mmedit,mmocr,mmseg,ncnn,numpy,onnx,onnxruntime,packaging,pyppl,pytest,setuptools,tensorrt,torch
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
_base_ = ['../_base_/torch2onnx.py']
|
||||
codebase = 'mmcls'
|
||||
pytorch2onnx = dict(input_names=['input'], output_names=['output'])
|
|
@ -0,0 +1,11 @@
|
|||
_base_ = ['./base.py']
|
||||
pytorch2onnx = dict(dynamic_axes={
|
||||
'input': {
|
||||
0: 'batch',
|
||||
2: 'height',
|
||||
3: 'width'
|
||||
},
|
||||
'output': {
|
||||
0: 'batch'
|
||||
}
|
||||
})
|
|
@ -1,8 +0,0 @@
|
|||
_base_ = ['../_base_/torch2onnx.py']
|
||||
codebase = 'mmcls'
|
||||
pytorch2onnx = dict(
|
||||
input_names=['input'],
|
||||
output_names=['output'],
|
||||
dynamic_axes={'input': {
|
||||
0: 'batch'
|
||||
}})
|
|
@ -1 +0,0 @@
|
|||
_base_ = ['./mmcls_base.py', '../_base_/backends/ncnn.py']
|
|
@ -1 +0,0 @@
|
|||
_base_ = ['./mmcls_base.py', '../_base_/backends/onnxruntime.py']
|
|
@ -1 +0,0 @@
|
|||
_base_ = ['./mmcls_base.py', '../_base_/backends/ppl.py']
|
|
@ -0,0 +1 @@
|
|||
_base_ = ['./base_dynamic.py', '../_base_/backends/ncnn.py']
|
|
@ -0,0 +1 @@
|
|||
_base_ = ['./base_dynamic.py', '../_base_/backends/onnxruntime.py']
|
|
@ -0,0 +1 @@
|
|||
_base_ = ['./base_dynamic.py', '../_base_/backends/ppl.py']
|
|
@ -1,4 +1,4 @@
|
|||
_base_ = ['./mmcls_base.py', '../_base_/backends/tensorrt.py']
|
||||
_base_ = ['./base_dynamic.py', '../_base_/backends/tensorrt.py']
|
||||
tensorrt_params = dict(model_params=[
|
||||
dict(
|
||||
save_file='end2end.engine',
|
|
@ -1,4 +1,4 @@
|
|||
_base_ = ['./mmcls_base.py', '../_base_/backends/tensorrt_int8.py']
|
||||
_base_ = ['./base_dynamic.py', '../_base_/backends/tensorrt_int8.py']
|
||||
tensorrt_params = dict(model_params=[
|
||||
dict(
|
||||
save_file='end2end.engine',
|
|
@ -1,30 +0,0 @@
|
|||
_base_ = ['../_base_/torch2onnx.py']
|
||||
codebase = 'mmdet'
|
||||
pytorch2onnx = dict(
|
||||
input_names=['input'],
|
||||
output_names=['dets', 'labels'],
|
||||
dynamic_axes={
|
||||
'input': {
|
||||
0: 'batch',
|
||||
2: 'height',
|
||||
3: 'width'
|
||||
},
|
||||
'dets': {
|
||||
0: 'batch',
|
||||
1: 'num_dets',
|
||||
},
|
||||
'labels': {
|
||||
0: 'batch',
|
||||
1: 'num_dets',
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
post_processing = dict(
|
||||
score_threshold=0.05,
|
||||
iou_threshold=0.5,
|
||||
max_output_boxes_per_class=200,
|
||||
pre_top_k=-1,
|
||||
keep_top_k=100,
|
||||
background_label_id=-1,
|
||||
)
|
|
@ -0,0 +1,17 @@
|
|||
_base_ = ['./base_static.py']
|
||||
pytorch2onnx = dict(
|
||||
dynamic_axes={
|
||||
'input': {
|
||||
0: 'batch',
|
||||
2: 'height',
|
||||
3: 'width'
|
||||
},
|
||||
'dets': {
|
||||
0: 'batch',
|
||||
1: 'num_dets',
|
||||
},
|
||||
'labels': {
|
||||
0: 'batch',
|
||||
1: 'num_dets',
|
||||
},
|
||||
}, )
|
|
@ -1,6 +1,9 @@
|
|||
_base_ = ['../_base_/torch2onnx.py']
|
||||
codebase = 'mmdet'
|
||||
pytorch2onnx = dict(input_names=['input'], output_names=['dets', 'labels'])
|
||||
pytorch2onnx = dict(
|
||||
input_names=['input'],
|
||||
output_names=['dets', 'labels'],
|
||||
)
|
||||
|
||||
post_processing = dict(
|
||||
score_threshold=0.05,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
_base_ = ['./base.py']
|
||||
_base_ = ['./base_dynamic.py']
|
||||
pytorch2onnx = dict(
|
||||
output_names=['dets', 'labels', 'masks'],
|
||||
dynamic_axes={
|
||||
|
|
|
@ -1 +1 @@
|
|||
_base_ = ['./base.py', '../_base_/backends/onnxruntime.py']
|
||||
_base_ = ['./base_dynamic.py', '../_base_/backends/onnxruntime.py']
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
_base_ = ['./base_dynamic.py']
|
||||
|
||||
apply_marks = True
|
||||
|
||||
partition_params = dict(partition_type='single_stage_base')
|
|
@ -0,0 +1 @@
|
|||
_base_ = ['./partition_single_stage.py', '../_base_/backends/onnxruntime.py']
|
|
@ -0,0 +1,5 @@
|
|||
_base_ = ['./base_static.py']
|
||||
|
||||
apply_marks = True
|
||||
|
||||
partition_params = dict(partition_type='single_stage_base')
|
|
@ -0,0 +1 @@
|
|||
_base_ = ['./partition_single_stage_static.py', '../_base_/backends/ncnn.py']
|
|
@ -0,0 +1,5 @@
|
|||
_base_ = ['./base_dynamic.py']
|
||||
|
||||
apply_marks = True
|
||||
|
||||
partition_params = dict(partition_type='two_stage_base')
|
|
@ -0,0 +1 @@
|
|||
_base_ = ['./partition_two_stage.py', '../_base_/backends/onnxruntime.py']
|
|
@ -0,0 +1,5 @@
|
|||
_base_ = ['./base_static.py']
|
||||
|
||||
apply_marks = True
|
||||
|
||||
partition_params = dict(partition_type='two_stage_base')
|
|
@ -0,0 +1 @@
|
|||
_base_ = ['./partition_two_stage_static.py', '../_base_/backends/ncnn.py']
|
|
@ -1,4 +1,4 @@
|
|||
_base_ = ['./split_two_stage.py', '../_base_/backends/tensorrt_int8.py']
|
||||
_base_ = ['./partition_two_stage.py', '../_base_/backends/tensorrt_int8.py']
|
||||
|
||||
tensorrt_params = dict(model_params=[
|
||||
dict(
|
|
@ -1 +1 @@
|
|||
_base_ = ['./base.py', '../_base_/backends/ppl.py']
|
||||
_base_ = ['./base_dynamic.py', '../_base_/backends/ppl.py']
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
_base_ = ['./base.py']
|
||||
|
||||
apply_marks = True
|
||||
|
||||
split_params = dict(split_type='single_stage_base')
|
|
@ -1 +0,0 @@
|
|||
_base_ = ['./split_single_stage.py', '../_base_/backends/onnxruntime.py']
|
|
@ -1,5 +0,0 @@
|
|||
_base_ = ['./base_static.py']
|
||||
|
||||
apply_marks = True
|
||||
|
||||
split_params = dict(split_type='single_stage_base')
|
|
@ -1 +0,0 @@
|
|||
_base_ = ['./split_single_stage_static.py', '../_base_/backends/ncnn.py']
|
|
@ -1,5 +0,0 @@
|
|||
_base_ = ['./base.py']
|
||||
|
||||
apply_marks = True
|
||||
|
||||
split_params = dict(split_type='two_stage_base')
|
|
@ -1 +0,0 @@
|
|||
_base_ = ['./split_two_stage.py', '../_base_/backends/onnxruntime.py']
|
|
@ -1,5 +0,0 @@
|
|||
_base_ = ['./base_static.py']
|
||||
|
||||
apply_marks = True
|
||||
|
||||
split_params = dict(split_type='two_stage_base')
|
|
@ -1 +0,0 @@
|
|||
_base_ = ['./split_two_stage_static.py', '../_base_/backends/ncnn.py']
|
|
@ -1 +1 @@
|
|||
_base_ = ['./base.py', './tensorrt_base.py']
|
||||
_base_ = ['./base_dynamic.py', './tensorrt_base.py']
|
||||
|
|
|
@ -1 +1 @@
|
|||
_base_ = ['./base.py', './tensorrt_int8_base.py']
|
||||
_base_ = ['./base_dynamic.py', './tensorrt_int8_base.py']
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
_base_ = ['../_base_/torch2onnx.py']
|
||||
codebase = 'mmedit'
|
||||
pytorch2onnx = dict(
|
||||
input_names=['input'],
|
||||
output_names=['output'],
|
||||
dynamic_axes={
|
||||
'input': {
|
||||
0: 'batch',
|
||||
2: 'height',
|
||||
3: 'width'
|
||||
},
|
||||
'output': {
|
||||
0: 'batch',
|
||||
2: 'height',
|
||||
3: 'width'
|
||||
}
|
||||
},
|
||||
)
|
|
@ -0,0 +1 @@
|
|||
_base_ = ['./base_dynamic.py', '../_base_/backends/onnxruntime.py']
|
|
@ -0,0 +1,7 @@
|
|||
_base_ = ['./base_dynamic.py', '../_base_/backends/tensorrt.py']
|
||||
tensorrt_params = dict(model_params=[
|
||||
dict(
|
||||
opt_shape_dict=dict(
|
||||
input=[[1, 3, 32, 32], [1, 3, 32, 32], [1, 3, 512, 512]]),
|
||||
max_workspace_size=1 << 30)
|
||||
])
|
|
@ -1,8 +1,9 @@
|
|||
_base_ = ['../_base_/torch2onnx.py']
|
||||
codebase = 'mmocr'
|
||||
|
||||
# 'det' for text detection and 'recog' for text recognition
|
||||
algorithm_type = 'det'
|
||||
# 'TextDetection' or 'TextRecognition'
|
||||
task = 'TextDetection'
|
||||
|
||||
pytorch2onnx = dict(
|
||||
input_names=['input'],
|
||||
output_names=['output'],
|
||||
|
@ -12,5 +13,5 @@ pytorch2onnx = dict(
|
|||
3: 'width'
|
||||
}})
|
||||
|
||||
if algorithm_type == 'recog':
|
||||
if task == 'TextRecognition':
|
||||
pytorch2onnx['dynamic_axes'] = {'input': {0: 'batch', 3: 'width'}}
|
||||
|
|
|
@ -38,7 +38,7 @@ Before starting this tutorial, you should make sure that the prerequisites menti
|
|||
cd build
|
||||
cmake -DBUILD_NCNN_OPS=ON -DNCNN_DIR=${PATH_TO_NCNN}/ncnn ..
|
||||
```
|
||||
The `${PATH_TO_NCNN}` refers as the root direction of ncnn source code.
|
||||
The `${PATH_TO_NCNN}` refers as the root directory of ncnn source code.
|
||||
- Install mmdeploy module
|
||||
```bash
|
||||
cd deploy_prototype
|
||||
|
|
|
@ -33,3 +33,8 @@ if importlib.util.find_spec('mmocr'):
|
|||
importlib.import_module('mmdeploy.mmocr')
|
||||
else:
|
||||
logging.debug('mmocr is not installed.')
|
||||
|
||||
if importlib.util.find_spec('mmedit'):
|
||||
importlib.import_module('mmdeploy.mmedit')
|
||||
else:
|
||||
logging.debug('mmedit is not installed.')
|
||||
|
|
|
@ -2,15 +2,13 @@ from .calibration import create_calib_table
|
|||
from .extract_model import extract_model
|
||||
from .inference import inference_model
|
||||
from .pytorch2onnx import torch2onnx, torch2onnx_impl
|
||||
from .test import post_process_outputs, prepare_data_loader, single_gpu_test
|
||||
from .utils import (assert_cfg_valid, assert_module_exist, build_dataset,
|
||||
get_classes_from_config, get_tensor_from_input,
|
||||
init_backend_model, load_config)
|
||||
from .test import post_process_outputs, single_gpu_test
|
||||
from .utils import (build_dataloader, build_dataset, get_tensor_from_input,
|
||||
init_backend_model)
|
||||
|
||||
__all__ = [
|
||||
'create_calib_table', 'torch2onnx_impl', 'torch2onnx', 'extract_model',
|
||||
'inference_model', 'prepare_data_loader', 'assert_module_exist',
|
||||
'assert_cfg_valid', 'init_backend_model', 'get_classes_from_config',
|
||||
'single_gpu_test', 'post_process_outputs', 'build_dataset',
|
||||
'get_tensor_from_input', 'load_config'
|
||||
'inference_model', 'init_backend_model', 'single_gpu_test',
|
||||
'post_process_outputs', 'build_dataset', 'get_tensor_from_input',
|
||||
'build_dataloader'
|
||||
]
|
||||
|
|
|
@ -3,13 +3,13 @@ from typing import Optional, Union
|
|||
import h5py
|
||||
import mmcv
|
||||
import torch
|
||||
import torch.multiprocessing as mp
|
||||
from mmcv.parallel import MMDataParallel
|
||||
|
||||
from mmdeploy.core import (RewriterContext, patch_model,
|
||||
reset_mark_function_count)
|
||||
from .utils import (_inference, build_dataloader, build_dataset,
|
||||
get_tensor_from_input, init_model)
|
||||
from mmdeploy.utils import get_codebase, load_config
|
||||
from .utils import (build_dataloader, build_dataset, get_tensor_from_input,
|
||||
init_pytorch_model, run_inference)
|
||||
|
||||
|
||||
def create_calib_table(calib_file: str,
|
||||
|
@ -19,45 +19,28 @@ def create_calib_table(calib_file: str,
|
|||
dataset_cfg: Optional[Union[str, mmcv.Config]] = None,
|
||||
dataset_type: str = 'val',
|
||||
device: str = 'cuda:0',
|
||||
ret_value: Optional[mp.Value] = None,
|
||||
**kwargs) -> None:
|
||||
|
||||
if ret_value is not None:
|
||||
ret_value.value = -1
|
||||
|
||||
if dataset_cfg is None:
|
||||
dataset_cfg = model_cfg
|
||||
|
||||
# load deploy_cfg if necessary
|
||||
if isinstance(deploy_cfg, str):
|
||||
deploy_cfg = mmcv.Config.fromfile(deploy_cfg)
|
||||
if not isinstance(deploy_cfg, mmcv.Config):
|
||||
raise TypeError('deploy_cfg must be a filename or Config object, '
|
||||
f'but got {type(deploy_cfg)}')
|
||||
# load model_cfg if needed
|
||||
if isinstance(model_cfg, str):
|
||||
model_cfg = mmcv.Config.fromfile(model_cfg)
|
||||
if not isinstance(model_cfg, mmcv.Config):
|
||||
raise TypeError('config must be a filename or Config object, '
|
||||
f'but got {type(model_cfg)}')
|
||||
|
||||
# load cfg if necessary
|
||||
deploy_cfg = load_config(deploy_cfg)[0]
|
||||
model_cfg = load_config(model_cfg)[0]
|
||||
device_id = torch.device(device).index
|
||||
if device_id is None:
|
||||
device_id = 0
|
||||
|
||||
# load dataset_cfg if needed
|
||||
if dataset_cfg is None:
|
||||
dataset_cfg = model_cfg
|
||||
elif isinstance(dataset_cfg, str):
|
||||
dataset_cfg = mmcv.Config.fromfile(dataset_cfg)
|
||||
if not isinstance(dataset_cfg, mmcv.Config):
|
||||
raise TypeError('config must be a filename or Config object, '
|
||||
f'but got {type(dataset_cfg)}')
|
||||
# load dataset_cfg if necessary
|
||||
dataset_cfg = load_config(dataset_cfg)[0]
|
||||
|
||||
codebase = deploy_cfg.codebase
|
||||
codebase = get_codebase(deploy_cfg)
|
||||
apply_marks = deploy_cfg.get('apply_marks', False)
|
||||
backend = 'default'
|
||||
model = init_model(codebase, model_cfg, model_checkpoint, device=device)
|
||||
model = init_pytorch_model(
|
||||
codebase, model_cfg, model_checkpoint, device=device)
|
||||
dataset = build_dataset(codebase, dataset_cfg, dataset_type)
|
||||
|
||||
# patch model
|
||||
|
@ -94,10 +77,7 @@ def create_calib_table(calib_file: str,
|
|||
calib_file=calib_file,
|
||||
data_id=data_id):
|
||||
reset_mark_function_count()
|
||||
_ = _inference(codebase, input_data, patched_model)
|
||||
_ = run_inference(codebase, input_data, patched_model)
|
||||
calib_file.flush()
|
||||
|
||||
prog_bar.update()
|
||||
|
||||
if ret_value is not None:
|
||||
ret_value.value = 0
|
||||
|
|
|
@ -4,170 +4,10 @@ from typing import Dict, Iterable, Optional, Union
|
|||
import onnx
|
||||
import onnx.helper
|
||||
import onnx.utils
|
||||
import torch.multiprocessing as mp
|
||||
|
||||
from mmdeploy.utils import parse_extractor_io_string
|
||||
from .utils import attribute_to_dict
|
||||
|
||||
|
||||
def _dfs_search_reacable_nodes_fast(self, node_output_name, graph_input_nodes,
|
||||
reachable_nodes):
|
||||
outputs = {}
|
||||
for index, node in enumerate(self.graph.node):
|
||||
for name in node.output:
|
||||
if name not in outputs:
|
||||
outputs[name] = set()
|
||||
outputs[name].add(index)
|
||||
|
||||
def impl(node_output_name, graph_input_nodes, reachable_nodes):
|
||||
if node_output_name in graph_input_nodes:
|
||||
return
|
||||
if node_output_name not in outputs:
|
||||
return
|
||||
for index in outputs[node_output_name]:
|
||||
node = self.graph.node[index]
|
||||
if node in reachable_nodes:
|
||||
continue
|
||||
reachable_nodes.append(node)
|
||||
for name in node.input:
|
||||
impl(name, graph_input_nodes, reachable_nodes)
|
||||
|
||||
impl(node_output_name, graph_input_nodes, reachable_nodes)
|
||||
|
||||
|
||||
def remove_nodes(model, predicate):
|
||||
# ! this doesn't handle inputs/outputs
|
||||
while True:
|
||||
connect = None
|
||||
for i, node in enumerate(model.graph.node):
|
||||
if predicate(node):
|
||||
assert len(node.input) == 1
|
||||
assert len(node.output) == 1
|
||||
connect = (node.input[0], node.output[0])
|
||||
logging.info(f'remove node {node.name}')
|
||||
del model.graph.node[i]
|
||||
break
|
||||
if not connect:
|
||||
break
|
||||
src, dst = connect
|
||||
for node in model.graph.node:
|
||||
for i, input in enumerate(node.input):
|
||||
if input == dst:
|
||||
node.input[i] = src
|
||||
return model
|
||||
|
||||
|
||||
def is_unused_mark(marks):
|
||||
|
||||
def f(node):
|
||||
if node.op_type == 'Mark':
|
||||
attr = attribute_to_dict(node.attribute)
|
||||
name = attr['func'] + ':' + attr['type']
|
||||
if name not in marks:
|
||||
return True
|
||||
return False
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def is_identity(node):
|
||||
return node.op_type == 'Identity'
|
||||
|
||||
|
||||
def get_new_name(attrs, mark_name='', name_map=None):
|
||||
if 'name' in attrs:
|
||||
new_name = attrs['name']
|
||||
else:
|
||||
new_name = '_'.join((attrs['func'], attrs['type'], str(attrs['id'])))
|
||||
|
||||
if name_map is not None:
|
||||
if new_name in name_map:
|
||||
return name_map[new_name]
|
||||
|
||||
if f'{mark_name}:{new_name}' in name_map:
|
||||
return name_map[f'{mark_name}:{new_name}']
|
||||
|
||||
return new_name
|
||||
|
||||
|
||||
def rename_value(model, old_name, new_name):
|
||||
if old_name == new_name:
|
||||
return
|
||||
logging.info(f'rename {old_name} -> {new_name}')
|
||||
for n in model.graph.node:
|
||||
for i, output in enumerate(n.output):
|
||||
if output == old_name:
|
||||
n.output[i] = new_name
|
||||
for i, input in enumerate(n.input):
|
||||
if input == old_name:
|
||||
n.input[i] = new_name
|
||||
for v in model.graph.value_info:
|
||||
if v.name == old_name:
|
||||
v.name = new_name
|
||||
for i, input in enumerate(model.graph.input):
|
||||
if input.name == old_name:
|
||||
input.name = new_name
|
||||
for i, output in enumerate(model.graph.output):
|
||||
if output.name == old_name:
|
||||
output.name = new_name
|
||||
|
||||
|
||||
def optimize(model):
|
||||
graph = model.graph
|
||||
|
||||
def simplify_inputs():
|
||||
connect = None
|
||||
for input in graph.input:
|
||||
for i, node in enumerate(graph.node):
|
||||
if node.op_type == 'Identity' and node.input[0] == input.name:
|
||||
connect = (node.input[0], node.output[0])
|
||||
logging.info(f'remove node {node.name}')
|
||||
del graph.node[i]
|
||||
break
|
||||
if connect:
|
||||
break
|
||||
if not connect:
|
||||
return False
|
||||
src, dst = connect
|
||||
for node in graph.node:
|
||||
for i, input_name in enumerate(node.input):
|
||||
if input_name == dst:
|
||||
node.input[i] = src
|
||||
# the input just changed won't be an output
|
||||
return True
|
||||
|
||||
def simplify_outputs():
|
||||
connect = None
|
||||
for output in graph.output:
|
||||
for i, node in enumerate(graph.node):
|
||||
if node.op_type == 'Identity' and \
|
||||
node.output[0] == output.name:
|
||||
connect = (node.input[0], node.output[0])
|
||||
logging.info(f'remove node {node.name}')
|
||||
del graph.node[i]
|
||||
break
|
||||
if connect:
|
||||
break
|
||||
if not connect:
|
||||
return False
|
||||
src, dst = connect
|
||||
for node in graph.node:
|
||||
for i, output_name in enumerate(node.output):
|
||||
if output_name == src:
|
||||
node.output[i] = dst
|
||||
# the output just renamed may be someone's input
|
||||
for i, input_name in enumerate(node.input):
|
||||
if input_name == src:
|
||||
node.input[i] = dst
|
||||
return True
|
||||
|
||||
while simplify_inputs():
|
||||
pass
|
||||
|
||||
while simplify_outputs():
|
||||
pass
|
||||
|
||||
remove_nodes(model, is_identity)
|
||||
from mmdeploy.core.optimizers import (attribute_to_dict, create_extractor,
|
||||
get_new_name, optimize,
|
||||
parse_extractor_io_string, rename_value)
|
||||
|
||||
|
||||
def extract_model(model: Union[str, onnx.ModelProto],
|
||||
|
@ -176,12 +16,7 @@ def extract_model(model: Union[str, onnx.ModelProto],
|
|||
start_name_map: Optional[Dict[str, str]] = None,
|
||||
end_name_map: Optional[Dict[str, str]] = None,
|
||||
dynamic_axes: Optional[Dict[str, Dict[int, str]]] = None,
|
||||
save_file: Optional[str] = None,
|
||||
ret_value: Optional[mp.Value] = None):
|
||||
|
||||
# set init flag for multiprocessor
|
||||
if ret_value is not None:
|
||||
ret_value.value = -1
|
||||
save_file: Optional[str] = None):
|
||||
|
||||
if isinstance(model, str):
|
||||
model = onnx.load(model)
|
||||
|
@ -247,11 +82,7 @@ def extract_model(model: Union[str, onnx.ModelProto],
|
|||
node.domain = ''
|
||||
node.op_type = 'Identity'
|
||||
|
||||
# patch extractor
|
||||
onnx.utils.Extractor._dfs_search_reachable_nodes = \
|
||||
_dfs_search_reacable_nodes_fast
|
||||
|
||||
extractor = onnx.utils.Extractor(model)
|
||||
extractor = create_extractor(model)
|
||||
extracted_model = extractor.extract_model(inputs, outputs)
|
||||
|
||||
# remove all Identity, this may be done by onnx simplifier
|
||||
|
@ -322,8 +153,4 @@ def extract_model(model: Union[str, onnx.ModelProto],
|
|||
if save_file is not None:
|
||||
onnx.save(extracted_model, save_file)
|
||||
|
||||
# set success flag for multiprocessor
|
||||
if ret_value is not None:
|
||||
ret_value.value = 0
|
||||
|
||||
return extracted_model
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
from typing import Optional
|
||||
|
||||
import torch.multiprocessing as mp
|
||||
import torch
|
||||
|
||||
from .utils import (assert_cfg_valid, check_model_outputs, create_input,
|
||||
init_backend_model, init_model)
|
||||
from mmdeploy.utils import Backend, get_backend, get_codebase, load_config
|
||||
from .utils import (create_input, init_backend_model, init_pytorch_model,
|
||||
run_inference, visualize)
|
||||
|
||||
|
||||
def inference_model(model_cfg,
|
||||
|
@ -11,25 +12,21 @@ def inference_model(model_cfg,
|
|||
model,
|
||||
img,
|
||||
device: str,
|
||||
backend: Optional[str] = None,
|
||||
backend: Optional[Backend] = None,
|
||||
output_file: Optional[str] = None,
|
||||
show_result=False,
|
||||
ret_value: Optional[mp.Value] = None):
|
||||
show_result=False):
|
||||
|
||||
if ret_value is not None:
|
||||
ret_value.value = -1
|
||||
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
|
||||
|
||||
deploy_cfg, model_cfg = assert_cfg_valid(deploy_cfg, model_cfg)
|
||||
|
||||
codebase = deploy_cfg['codebase']
|
||||
codebase = get_codebase(deploy_cfg)
|
||||
if backend is None:
|
||||
backend = deploy_cfg['backend']
|
||||
backend = get_backend(deploy_cfg)
|
||||
|
||||
if isinstance(model, str):
|
||||
model = [model]
|
||||
if isinstance(model, (list, tuple)):
|
||||
if backend == 'pytorch':
|
||||
model = init_model(codebase, model_cfg, model[0], device)
|
||||
if backend == Backend.PYTORCH:
|
||||
model = init_pytorch_model(codebase, model_cfg, model[0], device)
|
||||
else:
|
||||
device_id = -1 if device == 'cpu' else 0
|
||||
model = init_backend_model(
|
||||
|
@ -40,14 +37,14 @@ def inference_model(model_cfg,
|
|||
|
||||
model_inputs, _ = create_input(codebase, model_cfg, img, device)
|
||||
|
||||
check_model_outputs(
|
||||
with torch.no_grad():
|
||||
result = run_inference(codebase, model_inputs, model)
|
||||
|
||||
visualize(
|
||||
codebase,
|
||||
img,
|
||||
model_inputs=model_inputs,
|
||||
result=result,
|
||||
model=model,
|
||||
output_file=output_file,
|
||||
backend=backend,
|
||||
show_result=show_result)
|
||||
|
||||
if ret_value is not None:
|
||||
ret_value.value = 0
|
||||
|
|
|
@ -2,9 +2,6 @@ import importlib
|
|||
import os.path as osp
|
||||
|
||||
from .init_plugins import get_ops_path
|
||||
from .onnxruntime_utils import ORTWrapper
|
||||
|
||||
__all__ = ['get_ops_path', 'ORTWrapper']
|
||||
|
||||
|
||||
def is_available():
|
||||
|
@ -12,3 +9,8 @@ def is_available():
|
|||
if not osp.exists(onnxruntime_op_path):
|
||||
return False
|
||||
return importlib.util.find_spec('onnxruntime') is not None
|
||||
|
||||
|
||||
if is_available():
|
||||
from .onnxruntime_utils import ORTWrapper
|
||||
__all__ = ['get_ops_path', 'ORTWrapper']
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
import os.path as osp
|
||||
from typing import Sequence
|
||||
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
import torch
|
||||
|
||||
from .init_plugins import get_ops_path
|
||||
|
||||
|
||||
class ORTWrapper(torch.nn.Module):
|
||||
"""ONNXRuntime Wrapper.
|
||||
|
@ -13,10 +16,12 @@ class ORTWrapper(torch.nn.Module):
|
|||
device_id (int): The device id to put model
|
||||
"""
|
||||
|
||||
def __init__(self, onnx_file: str, device_id: int):
|
||||
def __init__(self,
|
||||
onnx_file: str,
|
||||
device_id: int,
|
||||
output_names: Sequence[str] = None):
|
||||
super(ORTWrapper, self).__init__()
|
||||
# get the custom op path
|
||||
from mmdeploy.apis.onnxruntime import get_ops_path
|
||||
ort_custom_op_path = get_ops_path()
|
||||
session_options = ort.SessionOptions()
|
||||
# register custom op for onnxruntime
|
||||
|
@ -31,10 +36,11 @@ class ORTWrapper(torch.nn.Module):
|
|||
providers.insert(0, 'CUDAExecutionProvider')
|
||||
options.insert(0, {'device_id': device_id})
|
||||
sess.set_providers(providers, options)
|
||||
|
||||
if output_names is None:
|
||||
output_names = [_.name for _ in sess.get_outputs()]
|
||||
self.sess = sess
|
||||
self.io_binding = sess.io_binding()
|
||||
self.output_names = [_.name for _ in sess.get_outputs()]
|
||||
self.output_names = output_names
|
||||
self.device_id = device_id
|
||||
self.is_cuda_available = is_cuda_available
|
||||
self.device_type = 'cuda' if is_cuda_available else 'cpu'
|
||||
|
@ -42,21 +48,22 @@ class ORTWrapper(torch.nn.Module):
|
|||
def forward(self, inputs):
|
||||
"""
|
||||
Arguments:
|
||||
inputs (tensor): the input tensor
|
||||
|
||||
inputs (dict): the input name and tensor pairs
|
||||
input_names: list of input name
|
||||
Return:
|
||||
dict: dict of output name-tensors pair
|
||||
list[np.ndarray]: list of output numpy array
|
||||
"""
|
||||
# set io binding for inputs/outputs
|
||||
if not self.is_cuda_available:
|
||||
inputs = inputs.cpu()
|
||||
self.io_binding.bind_input(
|
||||
name='input',
|
||||
device_type=self.device_type,
|
||||
device_id=self.device_id,
|
||||
element_type=np.float32,
|
||||
shape=inputs.shape,
|
||||
buffer_ptr=inputs.data_ptr())
|
||||
for name, input_tensor in inputs.items():
|
||||
# set io binding for inputs/outputs
|
||||
if not self.is_cuda_available:
|
||||
input_tensor = input_tensor.cpu()
|
||||
self.io_binding.bind_input(
|
||||
name=name,
|
||||
device_type=self.device_type,
|
||||
device_id=self.device_id,
|
||||
element_type=np.float32,
|
||||
shape=input_tensor.shape,
|
||||
buffer_ptr=input_tensor.data_ptr())
|
||||
|
||||
for name in self.output_names:
|
||||
self.io_binding.bind_output(name)
|
||||
|
|
|
@ -1,3 +1,11 @@
|
|||
from .ppl_utils import register_engines
|
||||
import importlib
|
||||
|
||||
__all__ = ['register_engines']
|
||||
|
||||
def is_available():
|
||||
"""Check whether ppl is installed."""
|
||||
return importlib.util.find_spec('pyppl') is not None
|
||||
|
||||
|
||||
if is_available():
|
||||
from .ppl_utils import PPLWrapper, register_engines
|
||||
__all__ = ['register_engines', 'PPLWrapper']
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
import logging
|
||||
import sys
|
||||
|
||||
import numpy as np
|
||||
import pyppl.common as pplcommon
|
||||
import pyppl.nn as pplnn
|
||||
import torch
|
||||
|
||||
|
||||
def register_engines(device_id: int,
|
||||
|
@ -12,12 +14,13 @@ def register_engines(device_id: int,
|
|||
|
||||
Args:
|
||||
device_id (int): -1 for cpu.
|
||||
disable_avx512 (bool): Wheather to disable avx512 for x86.
|
||||
quick_select (bool): Wheather to use default algorithms.
|
||||
disable_avx512 (bool): Whether to disable avx512 for x86.
|
||||
quick_select (bool): Whether to use default algorithms.
|
||||
"""
|
||||
engines = []
|
||||
if device_id == -1:
|
||||
x86_engine = pplnn.X86EngineFactory.Create()
|
||||
x86_options = pplnn.X86EngineOptions()
|
||||
x86_engine = pplnn.X86EngineFactory.Create(x86_options)
|
||||
if not x86_engine:
|
||||
logging.error('Failed to create x86 engine')
|
||||
sys.exit(-1)
|
||||
|
@ -51,3 +54,55 @@ def register_engines(device_id: int,
|
|||
engines.append(pplnn.Engine(cuda_engine))
|
||||
|
||||
return engines
|
||||
|
||||
|
||||
class PPLWrapper(torch.nn.Module):
|
||||
"""PPLWrapper Wrapper.
|
||||
|
||||
Arguments:
|
||||
model_file (str): Input onnx model file
|
||||
device_id (int): The device id to put model
|
||||
"""
|
||||
|
||||
def __init__(self, model_file: str, device_id: int):
|
||||
super(PPLWrapper, self).__init__()
|
||||
# enable quick select by default to speed up pipeline
|
||||
# TODO: open it to users after ppl supports saving serialized models
|
||||
# TODO: disable_avx512 will be removed or open to users in config
|
||||
engines = register_engines(
|
||||
device_id, disable_avx512=False, quick_select=True)
|
||||
runtime_builder = pplnn.OnnxRuntimeBuilderFactory.CreateFromFile(
|
||||
model_file, engines)
|
||||
assert runtime_builder is not None, 'Failed to create '\
|
||||
'OnnxRuntimeBuilder.'
|
||||
|
||||
runtime = runtime_builder.CreateRuntime()
|
||||
assert runtime is not None, 'Failed to create the instance of Runtime.'
|
||||
|
||||
self.runtime = runtime
|
||||
self.inputs = {
|
||||
runtime.GetInputTensor(i).GetName(): runtime.GetInputTensor(i)
|
||||
for i in range(runtime.GetInputCount())
|
||||
}
|
||||
|
||||
def forward(self, input_data):
|
||||
"""
|
||||
Arguments:
|
||||
input_data (dict): the input name and tensor pairs
|
||||
Return:
|
||||
list[np.ndarray]: list of output numpy array
|
||||
"""
|
||||
for name, input_tensor in input_data.items():
|
||||
input_tensor = input_tensor.contiguous()
|
||||
self.inputs[name].ConvertFromHost(input_tensor.cpu().numpy())
|
||||
status = self.runtime.Run()
|
||||
assert status == pplcommon.RC_SUCCESS, 'Run() '\
|
||||
'failed: ' + pplcommon.GetRetCodeStr(status)
|
||||
status = self.runtime.Sync()
|
||||
assert status == pplcommon.RC_SUCCESS, 'Sync() '\
|
||||
'failed: ' + pplcommon.GetRetCodeStr(status)
|
||||
outputs = []
|
||||
for i in range(self.runtime.GetOutputCount()):
|
||||
out_tensor = self.runtime.GetOutputTensor(i).ConvertToHost()
|
||||
outputs.append(np.array(out_tensor, copy=False))
|
||||
return outputs
|
||||
|
|
|
@ -3,11 +3,11 @@ from typing import Any, Optional, Union
|
|||
|
||||
import mmcv
|
||||
import torch
|
||||
import torch.multiprocessing as mp
|
||||
|
||||
from mmdeploy.core import (RewriterContext, patch_model,
|
||||
register_extra_symbolics)
|
||||
from .utils import create_input, init_model
|
||||
from mmdeploy.utils import get_backend, get_codebase, load_config
|
||||
from .utils import create_input, init_pytorch_model
|
||||
|
||||
|
||||
def torch2onnx_impl(model: torch.nn.Module, input: torch.Tensor,
|
||||
|
@ -20,7 +20,7 @@ def torch2onnx_impl(model: torch.nn.Module, input: torch.Tensor,
|
|||
f'but got {type(deploy_cfg)}')
|
||||
|
||||
pytorch2onnx_cfg = deploy_cfg['pytorch2onnx']
|
||||
backend = deploy_cfg['backend']
|
||||
backend = get_backend(deploy_cfg).value
|
||||
opset_version = pytorch2onnx_cfg.get('opset_version', 11)
|
||||
|
||||
# load registed symbolic
|
||||
|
@ -29,7 +29,7 @@ def torch2onnx_impl(model: torch.nn.Module, input: torch.Tensor,
|
|||
# patch model
|
||||
patched_model = patch_model(model, cfg=deploy_cfg, backend=backend)
|
||||
|
||||
with RewriterContext(cfg=deploy_cfg, backend=backend):
|
||||
with RewriterContext(cfg=deploy_cfg, backend=backend), torch.no_grad():
|
||||
torch.onnx.export(
|
||||
patched_model,
|
||||
input,
|
||||
|
@ -49,31 +49,18 @@ def torch2onnx(img: Any,
|
|||
deploy_cfg: Union[str, mmcv.Config],
|
||||
model_cfg: Union[str, mmcv.Config],
|
||||
model_checkpoint: Optional[str] = None,
|
||||
device: str = 'cuda:0',
|
||||
ret_value: Optional[mp.Value] = None):
|
||||
|
||||
if ret_value is not None:
|
||||
ret_value.value = -1
|
||||
device: str = 'cuda:0'):
|
||||
|
||||
# load deploy_cfg if necessary
|
||||
if isinstance(deploy_cfg, str):
|
||||
deploy_cfg = mmcv.Config.fromfile(deploy_cfg)
|
||||
if not isinstance(deploy_cfg, mmcv.Config):
|
||||
raise TypeError('deploy_cfg must be a filename or Config object, '
|
||||
f'but got {type(deploy_cfg)}')
|
||||
# load model_cfg if needed
|
||||
if isinstance(model_cfg, str):
|
||||
model_cfg = mmcv.Config.fromfile(model_cfg)
|
||||
if not isinstance(model_cfg, mmcv.Config):
|
||||
raise TypeError('config must be a filename or Config object, '
|
||||
f'but got {type(model_cfg)}')
|
||||
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
|
||||
|
||||
mmcv.mkdir_or_exist(osp.abspath(work_dir))
|
||||
output_file = osp.join(work_dir, save_file)
|
||||
|
||||
codebase = deploy_cfg['codebase']
|
||||
codebase = get_codebase(deploy_cfg)
|
||||
|
||||
torch_model = init_model(codebase, model_cfg, model_checkpoint, device)
|
||||
torch_model = init_pytorch_model(codebase, model_cfg, model_checkpoint,
|
||||
device)
|
||||
data, model_inputs = create_input(codebase, model_cfg, img, device)
|
||||
if not isinstance(model_inputs, torch.Tensor):
|
||||
model_inputs = model_inputs[0]
|
||||
|
@ -83,6 +70,3 @@ def torch2onnx(img: Any,
|
|||
model_inputs,
|
||||
deploy_cfg=deploy_cfg,
|
||||
output_file=output_file)
|
||||
|
||||
if ret_value is not None:
|
||||
ret_value.value = 0
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
import os.path as osp
|
||||
from typing import Optional, Union
|
||||
from typing import Union
|
||||
|
||||
import mmcv
|
||||
import onnx
|
||||
import tensorrt as trt
|
||||
import torch.multiprocessing as mp
|
||||
|
||||
from .tensorrt_utils import create_trt_engine, save_trt_engine
|
||||
|
||||
|
@ -22,10 +21,8 @@ def onnx2tensorrt(work_dir: str,
|
|||
deploy_cfg: Union[str, mmcv.Config],
|
||||
onnx_model: Union[str, onnx.ModelProto],
|
||||
device: str = 'cuda:0',
|
||||
split_type: str = 'end2end',
|
||||
ret_value: Optional[mp.Value] = None,
|
||||
partition_type: str = 'end2end',
|
||||
**kwargs):
|
||||
ret_value.value = -1
|
||||
|
||||
# load deploy_cfg if necessary
|
||||
if isinstance(deploy_cfg, str):
|
||||
|
@ -50,7 +47,7 @@ def onnx2tensorrt(work_dir: str,
|
|||
calib_params = deploy_cfg.get('calib_params', dict())
|
||||
calib_file = calib_params.get('calib_file', 'calib_file.h5')
|
||||
int8_param['calib_file'] = osp.join(work_dir, calib_file)
|
||||
int8_param['model_type'] = split_type
|
||||
int8_param['model_type'] = partition_type
|
||||
|
||||
assert device.startswith('cuda')
|
||||
device_id = parse_device_id(device)
|
||||
|
@ -65,5 +62,3 @@ def onnx2tensorrt(work_dir: str,
|
|||
device_id=device_id)
|
||||
|
||||
save_trt_engine(engine, osp.join(work_dir, save_file))
|
||||
|
||||
ret_value.value = 0
|
||||
|
|
|
@ -189,7 +189,7 @@ class TRTWrapper(torch.nn.Module):
|
|||
self.input_names = input_names
|
||||
self.output_names = output_names
|
||||
|
||||
def _on_state_dict(self, state_dict, prefix, local_metadata):
|
||||
def _on_state_dict(self, state_dict, prefix):
|
||||
state_dict[prefix + 'engine'] = bytearray(self.engine.serialize())
|
||||
state_dict[prefix + 'input_names'] = self.input_names
|
||||
state_dict[prefix + 'output_names'] = self.output_names
|
||||
|
|
|
@ -1,90 +1,12 @@
|
|||
import warnings
|
||||
from typing import Any, Union
|
||||
from typing import Any
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
from torch import nn
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from mmdeploy.apis.utils import assert_module_exist
|
||||
|
||||
|
||||
def prepare_data_loader(codebase: str, model_cfg: Union[str, mmcv.Config]):
|
||||
assert_module_exist(codebase)
|
||||
# load model_cfg if necessary
|
||||
if isinstance(model_cfg, str):
|
||||
model_cfg = mmcv.Config.fromfile(model_cfg)
|
||||
|
||||
if codebase == 'mmcls':
|
||||
from mmcls.datasets import (build_dataloader, build_dataset)
|
||||
# build dataset and dataloader
|
||||
dataset = build_dataset(model_cfg.data.test)
|
||||
data_loader = build_dataloader(
|
||||
dataset,
|
||||
samples_per_gpu=model_cfg.data.samples_per_gpu,
|
||||
workers_per_gpu=model_cfg.data.workers_per_gpu,
|
||||
shuffle=False,
|
||||
round_up=False)
|
||||
|
||||
elif codebase == 'mmdet':
|
||||
from mmdet.datasets import (build_dataloader, build_dataset,
|
||||
replace_ImageToTensor)
|
||||
# in case the test dataset is concatenated
|
||||
samples_per_gpu = 1
|
||||
if isinstance(model_cfg.data.test, dict):
|
||||
model_cfg.data.test.test_mode = True
|
||||
samples_per_gpu = model_cfg.data.test.pop('samples_per_gpu', 1)
|
||||
if samples_per_gpu > 1:
|
||||
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
|
||||
model_cfg.data.test.pipeline = replace_ImageToTensor(
|
||||
model_cfg.data.test.pipeline)
|
||||
elif isinstance(model_cfg.data.test, list):
|
||||
for ds_cfg in model_cfg.data.test:
|
||||
ds_cfg.test_mode = True
|
||||
samples_per_gpu = max([
|
||||
ds_cfg.pop('samples_per_gpu', 1)
|
||||
for ds_cfg in model_cfg.data.test
|
||||
])
|
||||
if samples_per_gpu > 1:
|
||||
for ds_cfg in model_cfg.data.test:
|
||||
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
|
||||
|
||||
# build the dataloader
|
||||
dataset = build_dataset(model_cfg.data.test)
|
||||
data_loader = build_dataloader(
|
||||
dataset,
|
||||
samples_per_gpu=samples_per_gpu,
|
||||
workers_per_gpu=model_cfg.data.workers_per_gpu,
|
||||
dist=False,
|
||||
shuffle=False)
|
||||
|
||||
elif codebase == 'mmseg':
|
||||
from mmseg.datasets import build_dataset, build_dataloader
|
||||
model_cfg.data.test.test_mode = True
|
||||
dataset = build_dataset(model_cfg.data.test)
|
||||
samples_per_gpu = 1
|
||||
data_loader = build_dataloader(
|
||||
dataset,
|
||||
samples_per_gpu=samples_per_gpu,
|
||||
workers_per_gpu=model_cfg.data.workers_per_gpu,
|
||||
dist=False,
|
||||
shuffle=False)
|
||||
|
||||
elif codebase == 'mmocr':
|
||||
from mmocr.datasets import build_dataset, build_dataloader
|
||||
model_cfg.data.test.test_mode = True
|
||||
dataset = build_dataset(model_cfg.data.test)
|
||||
samples_per_gpu = 1
|
||||
data_loader = build_dataloader(
|
||||
dataset,
|
||||
samples_per_gpu=samples_per_gpu,
|
||||
workers_per_gpu=model_cfg.data.workers_per_gpu,
|
||||
dist=False,
|
||||
shuffle=False)
|
||||
else:
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase}')
|
||||
|
||||
return dataset, data_loader
|
||||
from mmdeploy.utils import Codebase
|
||||
|
||||
|
||||
def single_gpu_test(codebase: str,
|
||||
|
@ -93,23 +15,24 @@ def single_gpu_test(codebase: str,
|
|||
show: bool = False,
|
||||
out_dir: Any = None,
|
||||
show_score_thr: float = 0.3):
|
||||
assert_module_exist(codebase)
|
||||
|
||||
if codebase == 'mmcls':
|
||||
if codebase == Codebase.MMCLS:
|
||||
from mmcls.apis import single_gpu_test
|
||||
outputs = single_gpu_test(model, data_loader, show, out_dir)
|
||||
elif codebase == 'mmdet':
|
||||
elif codebase == Codebase.MMDET:
|
||||
from mmdet.apis import single_gpu_test
|
||||
outputs = single_gpu_test(model, data_loader, show, out_dir,
|
||||
show_score_thr)
|
||||
elif codebase == 'mmseg':
|
||||
elif codebase == Codebase.MMSEG:
|
||||
from mmseg.apis import single_gpu_test
|
||||
outputs = single_gpu_test(model, data_loader, show, out_dir)
|
||||
elif codebase == 'mmocr':
|
||||
elif codebase == Codebase.MMOCR:
|
||||
from mmdet.apis import single_gpu_test
|
||||
outputs = single_gpu_test(model, data_loader, show, out_dir)
|
||||
elif codebase == Codebase.MMEDIT:
|
||||
from mmedit.apis import single_gpu_test
|
||||
outputs = single_gpu_test(model, data_loader, show, out_dir)
|
||||
else:
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase}')
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase.value}')
|
||||
return outputs
|
||||
|
||||
|
||||
|
@ -121,7 +44,7 @@ def post_process_outputs(outputs,
|
|||
out: str = None,
|
||||
metric_options: dict = None,
|
||||
format_only: bool = False):
|
||||
if codebase == 'mmcls':
|
||||
if codebase == Codebase.MMCLS:
|
||||
if metrics:
|
||||
results = dataset.evaluate(outputs, metrics, metric_options)
|
||||
for k, v in results.items():
|
||||
|
@ -147,7 +70,7 @@ def post_process_outputs(outputs,
|
|||
print(f'\nwriting results to {out}')
|
||||
mmcv.dump(results, out)
|
||||
|
||||
elif codebase == 'mmdet':
|
||||
elif codebase == Codebase.MMDET:
|
||||
if out:
|
||||
print(f'\nwriting results to {out}')
|
||||
mmcv.dump(outputs, out)
|
||||
|
@ -165,7 +88,7 @@ def post_process_outputs(outputs,
|
|||
eval_kwargs.update(dict(metric=metrics, **kwargs))
|
||||
print(dataset.evaluate(outputs, **eval_kwargs))
|
||||
|
||||
elif codebase == 'mmseg':
|
||||
elif codebase == Codebase.MMSEG:
|
||||
if out:
|
||||
print(f'\nwriting results to {out}')
|
||||
mmcv.dump(outputs, out)
|
||||
|
@ -175,7 +98,7 @@ def post_process_outputs(outputs,
|
|||
if metrics:
|
||||
dataset.evaluate(outputs, metrics, **kwargs)
|
||||
|
||||
elif codebase == 'mmocr':
|
||||
elif codebase == Codebase.MMOCR:
|
||||
if out:
|
||||
print(f'\nwriting results to {out}')
|
||||
mmcv.dump(outputs, out)
|
||||
|
@ -193,5 +116,16 @@ def post_process_outputs(outputs,
|
|||
eval_kwargs.update(dict(metric=metrics, **kwargs))
|
||||
print(dataset.evaluate(outputs, **eval_kwargs))
|
||||
|
||||
elif codebase == Codebase.MMEDIT:
|
||||
if out:
|
||||
print(f'\nwriting results to {out}')
|
||||
mmcv.dump(outputs, out)
|
||||
# The Dataset doesn't need metrics
|
||||
print('')
|
||||
# print metrics
|
||||
stats = dataset.evaluate(outputs)
|
||||
for stat in stats:
|
||||
print('Eval-{}: {}'.format(stat, stats[stat]))
|
||||
|
||||
else:
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase}')
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase.value}')
|
||||
|
|
|
@ -1,110 +1,75 @@
|
|||
import importlib
|
||||
from typing import Any, Dict, Optional, Sequence, Union
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from mmdeploy.utils import Backend, Codebase, get_codebase, load_config
|
||||
|
||||
|
||||
def assert_cfg_valid(cfg: Union[str, mmcv.Config, mmcv.ConfigDict], *args):
|
||||
"""Check config validation."""
|
||||
|
||||
def _assert_cfg_valid_(cfg):
|
||||
if isinstance(cfg, str):
|
||||
cfg = mmcv.Config.fromfile(cfg)
|
||||
if not isinstance(cfg, (mmcv.Config, mmcv.ConfigDict)):
|
||||
raise TypeError('deploy_cfg must be a filename or Config object, '
|
||||
f'but got {type(cfg)}')
|
||||
return cfg
|
||||
|
||||
args = (cfg, ) + args
|
||||
ret = [_assert_cfg_valid_(cfg) for cfg in args]
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def assert_module_exist(module_name: str):
|
||||
if importlib.util.find_spec(module_name) is None:
|
||||
raise ImportError(f'Can not import module: {module_name}')
|
||||
|
||||
|
||||
def load_config(cfg):
|
||||
if isinstance(cfg, str):
|
||||
cfg = mmcv.Config.fromfile(cfg)
|
||||
elif not isinstance(cfg, (mmcv.Config, mmcv.ConfigDict)):
|
||||
raise TypeError('config must be a filename or Config object, '
|
||||
f'but got {type(cfg)}')
|
||||
return cfg
|
||||
|
||||
|
||||
def init_model(codebase: str,
|
||||
model_cfg: Union[str, mmcv.Config],
|
||||
model_checkpoint: Optional[str] = None,
|
||||
device: str = 'cuda:0',
|
||||
cfg_options: Optional[Dict] = None):
|
||||
assert_module_exist(codebase)
|
||||
if codebase == 'mmcls':
|
||||
def init_pytorch_model(codebase: Codebase,
|
||||
model_cfg: Union[str, mmcv.Config],
|
||||
model_checkpoint: Optional[str] = None,
|
||||
device: str = 'cuda:0',
|
||||
cfg_options: Optional[Dict] = None):
|
||||
if codebase == Codebase.MMCLS:
|
||||
from mmcls.apis import init_model
|
||||
model = init_model(model_cfg, model_checkpoint, device, cfg_options)
|
||||
|
||||
elif codebase == 'mmdet':
|
||||
elif codebase == Codebase.MMDET:
|
||||
from mmdet.apis import init_detector
|
||||
model = init_detector(model_cfg, model_checkpoint, device, cfg_options)
|
||||
|
||||
elif codebase == 'mmseg':
|
||||
elif codebase == Codebase.MMSEG:
|
||||
from mmseg.apis import init_segmentor
|
||||
from mmdeploy.mmseg.export import convert_syncbatchnorm
|
||||
model = init_segmentor(model_cfg, model_checkpoint, device)
|
||||
model = convert_syncbatchnorm(model)
|
||||
|
||||
elif codebase == 'mmocr':
|
||||
elif codebase == Codebase.MMOCR:
|
||||
from mmdet.apis import init_detector
|
||||
from mmocr.models import build_detector # noqa: F401
|
||||
model = init_detector(model_cfg, model_checkpoint, device, cfg_options)
|
||||
|
||||
elif codebase == Codebase.MMEDIT:
|
||||
from mmedit.apis import init_model
|
||||
model = init_model(model_cfg, model_checkpoint, device)
|
||||
model.forward = model.forward_dummy
|
||||
else:
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase}')
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase.value}')
|
||||
|
||||
return model.eval()
|
||||
|
||||
|
||||
def create_input(codebase: str,
|
||||
def create_input(codebase: Codebase,
|
||||
model_cfg: Union[str, mmcv.Config],
|
||||
imgs: Any,
|
||||
device: str = 'cuda:0'):
|
||||
model_cfg = assert_cfg_valid(model_cfg)[0]
|
||||
device: str = 'cuda:0',
|
||||
**kwargs):
|
||||
model_cfg = load_config(model_cfg)[0]
|
||||
|
||||
assert_module_exist(codebase)
|
||||
cfg = model_cfg.copy()
|
||||
if codebase == 'mmcls':
|
||||
if codebase == Codebase.MMCLS:
|
||||
from mmdeploy.mmcls.export import create_input
|
||||
return create_input(cfg, imgs, device)
|
||||
return create_input(cfg, imgs, device, **kwargs)
|
||||
|
||||
elif codebase == 'mmdet':
|
||||
elif codebase == Codebase.MMDET:
|
||||
from mmdeploy.mmdet.export import create_input
|
||||
return create_input(cfg, imgs, device)
|
||||
return create_input(cfg, imgs, device, **kwargs)
|
||||
|
||||
elif codebase == 'mmocr':
|
||||
elif codebase == Codebase.MMOCR:
|
||||
from mmdeploy.mmocr.export import create_input
|
||||
return create_input(cfg, imgs, device)
|
||||
return create_input(cfg, imgs, device, **kwargs)
|
||||
|
||||
elif codebase == 'mmseg':
|
||||
elif codebase == Codebase.MMSEG:
|
||||
from mmdeploy.mmseg.export import create_input
|
||||
return create_input(cfg, imgs, device)
|
||||
return create_input(cfg, imgs, device, **kwargs)
|
||||
|
||||
elif codebase == Codebase.MMEDIT:
|
||||
from mmdeploy.mmedit.export import create_input
|
||||
return create_input(cfg, imgs, device, **kwargs)
|
||||
|
||||
else:
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase}')
|
||||
|
||||
|
||||
def attribute_to_dict(attr):
|
||||
from onnx.helper import get_attribute_value
|
||||
ret = {}
|
||||
for a in attr:
|
||||
value = get_attribute_value(a)
|
||||
if isinstance(value, bytes):
|
||||
value = str(value, 'utf-8')
|
||||
ret[a.name] = value
|
||||
return ret
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase.value}')
|
||||
|
||||
|
||||
def init_backend_model(model_files: Sequence[str],
|
||||
|
@ -112,259 +77,173 @@ def init_backend_model(model_files: Sequence[str],
|
|||
deploy_cfg: Union[str, mmcv.Config],
|
||||
device_id: int = 0,
|
||||
**kwargs):
|
||||
deploy_cfg, model_cfg = assert_cfg_valid(deploy_cfg, model_cfg)
|
||||
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
|
||||
|
||||
codebase = deploy_cfg['codebase']
|
||||
backend = deploy_cfg['backend']
|
||||
assert_module_exist(codebase)
|
||||
if codebase != 'mmocr':
|
||||
class_names = get_classes_from_config(codebase, model_cfg)
|
||||
codebase = get_codebase(deploy_cfg)
|
||||
|
||||
if codebase == 'mmcls':
|
||||
if backend == 'onnxruntime':
|
||||
from mmdeploy.mmcls.export import ONNXRuntimeClassifier
|
||||
backend_model = ONNXRuntimeClassifier(
|
||||
model_files[0], class_names=class_names, device_id=device_id)
|
||||
elif backend == 'tensorrt':
|
||||
from mmdeploy.mmcls.export import TensorRTClassifier
|
||||
backend_model = TensorRTClassifier(
|
||||
model_files[0], class_names=class_names, device_id=device_id)
|
||||
elif backend == 'ncnn':
|
||||
from mmdeploy.mmcls.export import NCNNClassifier
|
||||
backend_model = NCNNClassifier(
|
||||
model_files[0],
|
||||
model_files[1],
|
||||
class_names=class_names,
|
||||
device_id=device_id)
|
||||
elif backend == 'ppl':
|
||||
from mmdeploy.mmcls.export import PPLClassifier
|
||||
backend_model = PPLClassifier(
|
||||
model_files[0], class_names=class_names, device_id=device_id)
|
||||
else:
|
||||
raise NotImplementedError(f'Unsupported backend type: {backend}')
|
||||
return backend_model
|
||||
if codebase == Codebase.MMCLS:
|
||||
from mmdeploy.mmcls.apis import build_classifier
|
||||
return build_classifier(
|
||||
model_files, model_cfg, deploy_cfg, device_id=device_id)
|
||||
|
||||
elif codebase == 'mmdet':
|
||||
from mmdeploy.mmdet.export.model_wrappers import build_detector
|
||||
elif codebase == Codebase.MMDET:
|
||||
from mmdeploy.mmdet.apis import build_detector
|
||||
return build_detector(
|
||||
model_files, model_cfg, deploy_cfg, device_id=device_id)
|
||||
|
||||
elif codebase == 'mmseg':
|
||||
if backend == 'onnxruntime':
|
||||
from mmdeploy.mmseg.export import ONNXRuntimeSegmentor
|
||||
backend_model = ONNXRuntimeSegmentor(
|
||||
model_files[0], class_names=class_names, device_id=device_id)
|
||||
elif backend == 'tensorrt':
|
||||
from mmdeploy.mmseg.export import TensorRTSegmentor
|
||||
backend_model = TensorRTSegmentor(
|
||||
model_files[0], class_names=class_names, device_id=device_id)
|
||||
else:
|
||||
raise NotImplementedError(f'Unsupported backend type: {backend}')
|
||||
return backend_model
|
||||
elif codebase == Codebase.MMSEG:
|
||||
from mmdeploy.mmseg.apis import build_segmentor
|
||||
return build_segmentor(
|
||||
model_files, model_cfg, deploy_cfg, device_id=device_id)
|
||||
|
||||
elif codebase == 'mmocr':
|
||||
algorithm_type = deploy_cfg['algorithm_type']
|
||||
if backend == 'onnxruntime':
|
||||
if algorithm_type == 'det':
|
||||
from mmdeploy.mmocr.export import ONNXRuntimeDetector
|
||||
backend_model = ONNXRuntimeDetector(
|
||||
model_files[0], cfg=model_cfg, device_id=device_id)
|
||||
elif algorithm_type == 'recog':
|
||||
from mmdeploy.mmocr.export import ONNXRuntimeRecognizer
|
||||
backend_model = ONNXRuntimeRecognizer(
|
||||
model_files[0], cfg=model_cfg, device_id=device_id)
|
||||
elif backend == 'tensorrt':
|
||||
if algorithm_type == 'det':
|
||||
from mmdeploy.mmocr.export import TensorRTDetector
|
||||
backend_model = TensorRTDetector(
|
||||
model_files[0], cfg=model_cfg, device_id=device_id)
|
||||
elif algorithm_type == 'recog':
|
||||
from mmdeploy.mmocr.export import TensorRTRecognizer
|
||||
backend_model = TensorRTRecognizer(
|
||||
model_files[0], cfg=model_cfg, device_id=device_id)
|
||||
else:
|
||||
raise NotImplementedError(f'Unsupported backend type: {backend}')
|
||||
return backend_model
|
||||
elif codebase == Codebase.MMOCR:
|
||||
from mmdeploy.mmocr.apis import build_ocr_processor
|
||||
return build_ocr_processor(
|
||||
model_files, model_cfg, deploy_cfg, device_id=device_id)
|
||||
|
||||
elif codebase == Codebase.MMEDIT:
|
||||
from mmdeploy.mmedit.apis import build_editing_processor
|
||||
return build_editing_processor(model_files, model_cfg, deploy_cfg,
|
||||
device_id)
|
||||
|
||||
else:
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase}')
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase.value}')
|
||||
|
||||
|
||||
def get_classes_from_config(codebase: str, model_cfg: Union[str, mmcv.Config]):
|
||||
assert_module_exist(codebase)
|
||||
|
||||
model_cfg_str = model_cfg
|
||||
model_cfg = assert_cfg_valid(model_cfg)[0]
|
||||
|
||||
if codebase == 'mmdet':
|
||||
from mmdeploy.mmdet.export.model_wrappers \
|
||||
import get_classes_from_config as get_classes_mmdet
|
||||
return get_classes_mmdet(model_cfg)
|
||||
|
||||
if codebase == 'mmcls':
|
||||
from mmcls.datasets import DATASETS
|
||||
elif codebase == 'mmdet':
|
||||
from mmdet.datasets import DATASETS
|
||||
elif codebase == 'mmseg':
|
||||
from mmseg.datasets import DATASETS
|
||||
elif codebase == 'mmocr':
|
||||
from mmocr.datasets import DATASETS
|
||||
else:
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase}')
|
||||
|
||||
module_dict = DATASETS.module_dict
|
||||
data_cfg = model_cfg.data
|
||||
|
||||
if 'train' in data_cfg:
|
||||
module = module_dict[data_cfg.train.type]
|
||||
elif 'val' in data_cfg:
|
||||
module = module_dict[data_cfg.val.type]
|
||||
elif 'test' in data_cfg:
|
||||
module = module_dict[data_cfg.test.type]
|
||||
else:
|
||||
raise RuntimeError(f'No dataset config found in: {model_cfg_str}')
|
||||
|
||||
return module.CLASSES
|
||||
|
||||
|
||||
def _inference(codebase: str, model_inputs, model):
|
||||
assert_module_exist(codebase)
|
||||
if codebase == 'mmcls':
|
||||
def run_inference(codebase: Codebase, model_inputs, model):
|
||||
if codebase == Codebase.MMCLS:
|
||||
return model(**model_inputs, return_loss=False)[0]
|
||||
elif codebase == Codebase.MMDET:
|
||||
return model(**model_inputs, return_loss=False, rescale=True)[0]
|
||||
elif codebase == Codebase.MMSEG:
|
||||
return model(**model_inputs, return_loss=False)
|
||||
elif codebase == 'mmdet':
|
||||
return model(**model_inputs, return_loss=False, rescale=True)
|
||||
elif codebase == Codebase.MMOCR:
|
||||
return model(**model_inputs, return_loss=False, rescale=True)[0]
|
||||
elif codebase == Codebase.MMEDIT:
|
||||
result = model(model_inputs['lq'])[0]
|
||||
# TODO: (For mmedit codebase)
|
||||
# The data type of pytorch backend is not consistent
|
||||
if not isinstance(result, np.ndarray):
|
||||
result = result.detach().cpu().numpy()
|
||||
return result
|
||||
else:
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase}')
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase.value}')
|
||||
|
||||
|
||||
def check_model_outputs(codebase: str,
|
||||
image: Union[str, np.ndarray],
|
||||
model_inputs,
|
||||
model,
|
||||
output_file: str,
|
||||
backend: str,
|
||||
dataset: str = None,
|
||||
show_result=False):
|
||||
assert_module_exist(codebase)
|
||||
def visualize(codebase: Codebase,
|
||||
image: Union[str, np.ndarray],
|
||||
result,
|
||||
model,
|
||||
output_file: str,
|
||||
backend: Backend,
|
||||
show_result=False):
|
||||
|
||||
show_img = mmcv.imread(image) if isinstance(image, str) else image
|
||||
output_file = None if show_result else output_file
|
||||
|
||||
if codebase == 'mmcls':
|
||||
output_file = None if show_result else output_file
|
||||
with torch.no_grad():
|
||||
scores = _inference(codebase, model_inputs, model)[0]
|
||||
pred_score = np.max(scores, axis=0)
|
||||
pred_label = np.argmax(scores, axis=0)
|
||||
result = {
|
||||
'pred_label': pred_label,
|
||||
'pred_score': float(pred_score)
|
||||
}
|
||||
result['pred_class'] = model.CLASSES[result['pred_label']]
|
||||
model.show_result(
|
||||
show_img,
|
||||
result,
|
||||
show=True,
|
||||
win_name=backend,
|
||||
out_file=output_file)
|
||||
if codebase == Codebase.MMCLS:
|
||||
from mmdeploy.mmcls.apis import show_result as show_result_mmcls
|
||||
show_result_mmcls(model, show_img, result, output_file, backend,
|
||||
show_result)
|
||||
elif codebase == Codebase.MMDET:
|
||||
from mmdeploy.mmdet.apis import show_result as show_result_mmdet
|
||||
show_result_mmdet(model, show_img, result, output_file, backend,
|
||||
show_result)
|
||||
elif codebase == Codebase.MMSEG:
|
||||
from mmdeploy.mmseg.apis import show_result as show_result_mmseg
|
||||
show_result_mmseg(model, show_img, result, output_file, backend,
|
||||
show_result)
|
||||
elif codebase == Codebase.MMOCR:
|
||||
from mmdeploy.mmocr.apis import show_result as show_result_mmocr
|
||||
show_result_mmocr(model, show_img, result, output_file, backend,
|
||||
show_result)
|
||||
elif codebase == Codebase.MMEDIT:
|
||||
from mmdeploy.mmedit.apis import show_result as show_result_mmedit
|
||||
show_result_mmedit(result, output_file, backend, show_result)
|
||||
|
||||
elif codebase == 'mmdet':
|
||||
output_file = None if show_result else output_file
|
||||
score_thr = 0.3
|
||||
with torch.no_grad():
|
||||
results = _inference(codebase, model_inputs, model)[0]
|
||||
model.show_result(
|
||||
show_img,
|
||||
results,
|
||||
score_thr=score_thr,
|
||||
show=True,
|
||||
win_name=backend,
|
||||
out_file=output_file)
|
||||
|
||||
elif codebase == 'mmocr':
|
||||
assert_module_exist(codebase)
|
||||
output_file = None if show_result else output_file
|
||||
score_thr = 0.3
|
||||
with torch.no_grad():
|
||||
results = model(**model_inputs, return_loss=False, rescale=True)[0]
|
||||
model.show_result(
|
||||
show_img,
|
||||
results,
|
||||
score_thr=score_thr,
|
||||
show=True,
|
||||
win_name=backend,
|
||||
out_file=output_file)
|
||||
|
||||
elif codebase == 'mmseg':
|
||||
output_file = None if show_result else output_file
|
||||
from mmseg.core.evaluation import get_palette
|
||||
dataset = 'cityscapes' if dataset is None else dataset
|
||||
palette = get_palette(dataset)
|
||||
with torch.no_grad():
|
||||
results = model(**model_inputs, return_loss=False, rescale=True)
|
||||
model.show_result(
|
||||
show_img,
|
||||
results,
|
||||
palette=palette,
|
||||
show=True,
|
||||
win_name=backend,
|
||||
out_file=output_file,
|
||||
opacity=0.5)
|
||||
def get_partition_cfg(codebase: Codebase, partition_type: str):
|
||||
if codebase == Codebase.MMDET:
|
||||
from mmdeploy.mmdet.export import get_partition_cfg \
|
||||
as get_partition_cfg_mmdet
|
||||
return get_partition_cfg_mmdet(partition_type)
|
||||
else:
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase}')
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase.value}')
|
||||
|
||||
|
||||
def get_split_cfg(codebase: str, split_type: str):
|
||||
assert_module_exist(codebase)
|
||||
if codebase == 'mmdet':
|
||||
from mmdeploy.mmdet.export import get_split_cfg \
|
||||
as get_split_cfg_mmdet
|
||||
return get_split_cfg_mmdet(split_type)
|
||||
else:
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase}')
|
||||
|
||||
|
||||
def build_dataset(codebase: str,
|
||||
def build_dataset(codebase: Codebase,
|
||||
dataset_cfg: Union[str, mmcv.Config],
|
||||
dataset_type: str = 'val',
|
||||
**kwargs):
|
||||
assert_module_exist(codebase)
|
||||
if codebase == 'mmcls':
|
||||
if codebase == Codebase.MMCLS:
|
||||
from mmdeploy.mmcls.export import build_dataset \
|
||||
as build_dataset_mmcls
|
||||
return build_dataset_mmcls(dataset_cfg, dataset_type, **kwargs)
|
||||
elif codebase == 'mmdet':
|
||||
elif codebase == Codebase.MMDET:
|
||||
from mmdeploy.mmdet.export import build_dataset \
|
||||
as build_dataset_mmdet
|
||||
return build_dataset_mmdet(dataset_cfg, dataset_type, **kwargs)
|
||||
elif codebase == Codebase.MMSEG:
|
||||
from mmdeploy.mmseg.export import build_dataset as build_dataset_mmseg
|
||||
return build_dataset_mmseg(dataset_cfg, dataset_type, **kwargs)
|
||||
elif codebase == Codebase.MMEDIT:
|
||||
from mmdeploy.mmedit.export import build_dataset \
|
||||
as build_dataset_mmedit
|
||||
return build_dataset_mmedit(dataset_cfg, **kwargs)
|
||||
elif codebase == Codebase.MMOCR:
|
||||
from mmdeploy.mmocr.export import build_dataset as build_dataset_mmocr
|
||||
return build_dataset_mmocr(dataset_cfg, dataset_type, **kwargs)
|
||||
else:
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase}')
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase.value}')
|
||||
|
||||
|
||||
def build_dataloader(codebase: str, dataset, samples_per_gpu: int,
|
||||
def build_dataloader(codebase: Codebase, dataset, samples_per_gpu: int,
|
||||
workers_per_gpu: int, **kwargs):
|
||||
assert_module_exist(codebase)
|
||||
if codebase == 'mmcls':
|
||||
if codebase == Codebase.MMCLS:
|
||||
from mmdeploy.mmcls.export import build_dataloader \
|
||||
as build_dataloader_mmcls
|
||||
return build_dataloader_mmcls(dataset, samples_per_gpu,
|
||||
workers_per_gpu, **kwargs)
|
||||
elif codebase == 'mmdet':
|
||||
elif codebase == Codebase.MMDET:
|
||||
from mmdeploy.mmdet.export import build_dataloader \
|
||||
as build_dataloader_mmdet
|
||||
return build_dataloader_mmdet(dataset, samples_per_gpu,
|
||||
workers_per_gpu, **kwargs)
|
||||
elif codebase == Codebase.MMSEG:
|
||||
from mmdeploy.mmseg.export import build_dataloader \
|
||||
as build_dataloader_mmseg
|
||||
return build_dataloader_mmseg(dataset, samples_per_gpu,
|
||||
workers_per_gpu, **kwargs)
|
||||
elif codebase == Codebase.MMEDIT:
|
||||
from mmdeploy.mmedit.export import build_dataloader \
|
||||
as build_dataloader_mmedit
|
||||
return build_dataloader_mmedit(dataset, samples_per_gpu,
|
||||
workers_per_gpu, **kwargs)
|
||||
elif codebase == Codebase.MMOCR:
|
||||
from mmdeploy.mmocr.export import build_dataloader \
|
||||
as build_dataloader_mmocr
|
||||
return build_dataloader_mmocr(dataset, samples_per_gpu,
|
||||
workers_per_gpu, **kwargs)
|
||||
else:
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase}')
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase.value}')
|
||||
|
||||
|
||||
def get_tensor_from_input(codebase: str, input_data):
|
||||
assert_module_exist(codebase)
|
||||
if codebase == 'mmcls':
|
||||
def get_tensor_from_input(codebase: Codebase, input_data):
|
||||
if codebase == Codebase.MMCLS:
|
||||
from mmdeploy.mmcls.export import get_tensor_from_input \
|
||||
as get_tensor_from_input_mmcls
|
||||
return get_tensor_from_input_mmcls(input_data)
|
||||
elif codebase == 'mmdet':
|
||||
elif codebase == Codebase.MMDET:
|
||||
from mmdeploy.mmdet.export import get_tensor_from_input \
|
||||
as get_tensor_from_input_mmdet
|
||||
return get_tensor_from_input_mmdet(input_data)
|
||||
elif codebase == Codebase.MMSEG:
|
||||
from mmdeploy.mmseg.export import get_tensor_from_input \
|
||||
as get_tensor_from_input_mmseg
|
||||
return get_tensor_from_input_mmseg(input_data)
|
||||
elif codebase == Codebase.MMOCR:
|
||||
from mmdeploy.mmocr.export import get_tensor_from_input \
|
||||
as get_tensor_from_input_mmocr
|
||||
return get_tensor_from_input_mmocr(input_data)
|
||||
else:
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase}')
|
||||
raise NotImplementedError(f'Unknown codebase type: {codebase.value}')
|
||||
|
|
|
@ -1,3 +1,9 @@
|
|||
from .extractor import create_extractor, parse_extractor_io_string
|
||||
from .function_marker import mark, reset_mark_function_count
|
||||
from .optimize import attribute_to_dict, get_new_name, optimize, rename_value
|
||||
|
||||
__all__ = ['mark', 'reset_mark_function_count']
|
||||
__all__ = [
|
||||
'mark', 'reset_mark_function_count', 'create_extractor',
|
||||
'parse_extractor_io_string', 'optimize', 'attribute_to_dict',
|
||||
'rename_value', 'get_new_name'
|
||||
]
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
import re
|
||||
|
||||
import onnx
|
||||
from packaging import version
|
||||
|
||||
|
||||
def parse_extractor_io_string(io_str):
|
||||
name, io_type = io_str.split(':')
|
||||
assert io_type in ['input', 'output']
|
||||
func_id = 0
|
||||
|
||||
search_result = re.search(r'^(.+)\[([0-9]+)\]$', name)
|
||||
if search_result is not None:
|
||||
name = search_result.group(1)
|
||||
func_id = int(search_result.group(2))
|
||||
|
||||
return name, func_id, io_type
|
||||
|
||||
|
||||
def _dfs_search_reacable_nodes_fast(self, node_output_name, graph_input_nodes,
|
||||
reachable_nodes):
|
||||
outputs = {}
|
||||
for index, node in enumerate(self.graph.node):
|
||||
for name in node.output:
|
||||
if name not in outputs:
|
||||
outputs[name] = set()
|
||||
outputs[name].add(index)
|
||||
|
||||
def impl(node_output_name, graph_input_nodes, reachable_nodes):
|
||||
if node_output_name in graph_input_nodes:
|
||||
return
|
||||
if node_output_name not in outputs:
|
||||
return
|
||||
for index in outputs[node_output_name]:
|
||||
node = self.graph.node[index]
|
||||
if node in reachable_nodes:
|
||||
continue
|
||||
reachable_nodes.append(node)
|
||||
for name in node.input:
|
||||
impl(name, graph_input_nodes, reachable_nodes)
|
||||
|
||||
impl(node_output_name, graph_input_nodes, reachable_nodes)
|
||||
|
||||
|
||||
def create_extractor(model):
|
||||
assert version.parse(onnx.__version__) >= version.parse('1.8.0')
|
||||
# patch extractor
|
||||
onnx.utils.Extractor._dfs_search_reachable_nodes = \
|
||||
_dfs_search_reacable_nodes_fast
|
||||
|
||||
extractor = onnx.utils.Extractor(model)
|
||||
return extractor
|
|
@ -3,6 +3,7 @@ import inspect
|
|||
import torch
|
||||
|
||||
from mmdeploy.core.rewriters.function_rewriter import FUNCTION_REWRITER
|
||||
from mmdeploy.utils import get_codebase
|
||||
|
||||
MARK_FUNCTION_COUNT = dict()
|
||||
|
||||
|
@ -66,36 +67,36 @@ def forward_of_mark(rewriter, ctx, x, dtype, shape, func, func_id, type, name,
|
|||
apply_marks = deploy_cfg.get('apply_marks', False)
|
||||
create_calib = getattr(rewriter, 'create_calib', False)
|
||||
if apply_marks and create_calib:
|
||||
codebase = deploy_cfg['codebase']
|
||||
assert 'split_params' in deploy_cfg
|
||||
split_params = deploy_cfg['split_params']
|
||||
split_type = split_params['split_type']
|
||||
from mmdeploy.apis.utils import get_split_cfg
|
||||
split_cfgs = get_split_cfg(codebase, split_type)
|
||||
codebase = get_codebase(deploy_cfg)
|
||||
assert 'partition_params' in deploy_cfg
|
||||
partition_params = deploy_cfg['partition_params']
|
||||
partition_type = partition_params['partition_type']
|
||||
from mmdeploy.apis.utils import get_partition_cfg
|
||||
partition_cfgs = get_partition_cfg(codebase, partition_type)
|
||||
assert hasattr(rewriter, 'calib_file')
|
||||
|
||||
for split_id, split_cfg in enumerate(split_cfgs):
|
||||
start = split_cfg['start']
|
||||
for partition_id, partition_cfg in enumerate(partition_cfgs):
|
||||
start = partition_cfg['start']
|
||||
if (f'{func}:{type}' not in start) and (f'{func}[{func_id}]:{type}'
|
||||
not in start):
|
||||
continue
|
||||
|
||||
input_name = name
|
||||
dynamic_axes = split_cfg.get('dynamic_axes', None)
|
||||
dynamic_axes = partition_cfg.get('dynamic_axes', None)
|
||||
if dynamic_axes is not None:
|
||||
input_name = name
|
||||
calib_file = rewriter.calib_file
|
||||
|
||||
calib_data_group = calib_file['calib_data']
|
||||
split_name = f'split{split_id}'
|
||||
partition_name = f'partition{partition_id}'
|
||||
|
||||
if split_name not in calib_data_group:
|
||||
calib_data_group.create_group(split_name)
|
||||
split_group = calib_data_group[split_name]
|
||||
if partition_name not in calib_data_group:
|
||||
calib_data_group.create_group(partition_name)
|
||||
partition_group = calib_data_group[partition_name]
|
||||
|
||||
if input_name not in split_group:
|
||||
split_group.create_group(input_name)
|
||||
input_data_group = split_group[input_name]
|
||||
if input_name not in partition_group:
|
||||
partition_group.create_group(input_name)
|
||||
input_data_group = partition_group[input_name]
|
||||
|
||||
data_id = rewriter.data_id
|
||||
x_np = x.detach().cpu().numpy()
|
||||
|
|
|
@ -0,0 +1,148 @@
|
|||
import logging
|
||||
|
||||
from onnx.helper import get_attribute_value
|
||||
|
||||
|
||||
def attribute_to_dict(attr):
|
||||
ret = {}
|
||||
for a in attr:
|
||||
value = get_attribute_value(a)
|
||||
if isinstance(value, bytes):
|
||||
value = str(value, 'utf-8')
|
||||
ret[a.name] = value
|
||||
return ret
|
||||
|
||||
|
||||
def remove_nodes(model, predicate):
|
||||
# ! this doesn't handle inputs/outputs
|
||||
while True:
|
||||
connect = None
|
||||
for i, node in enumerate(model.graph.node):
|
||||
if predicate(node):
|
||||
assert len(node.input) == 1
|
||||
assert len(node.output) == 1
|
||||
connect = (node.input[0], node.output[0])
|
||||
logging.info(f'remove node {node.name}')
|
||||
del model.graph.node[i]
|
||||
break
|
||||
if not connect:
|
||||
break
|
||||
src, dst = connect
|
||||
for node in model.graph.node:
|
||||
for i, input in enumerate(node.input):
|
||||
if input == dst:
|
||||
node.input[i] = src
|
||||
return model
|
||||
|
||||
|
||||
def is_unused_mark(marks):
|
||||
|
||||
def f(node):
|
||||
if node.op_type == 'Mark':
|
||||
attr = attribute_to_dict(node.attribute)
|
||||
name = attr['func'] + ':' + attr['type']
|
||||
if name not in marks:
|
||||
return True
|
||||
return False
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def is_identity(node):
|
||||
return node.op_type == 'Identity'
|
||||
|
||||
|
||||
def get_new_name(attrs, mark_name='', name_map=None):
|
||||
if 'name' in attrs:
|
||||
new_name = attrs['name']
|
||||
else:
|
||||
new_name = '_'.join((attrs['func'], attrs['type'], str(attrs['id'])))
|
||||
|
||||
if name_map is not None:
|
||||
if new_name in name_map:
|
||||
return name_map[new_name]
|
||||
|
||||
if f'{mark_name}:{new_name}' in name_map:
|
||||
return name_map[f'{mark_name}:{new_name}']
|
||||
|
||||
return new_name
|
||||
|
||||
|
||||
def rename_value(model, old_name, new_name):
|
||||
if old_name == new_name:
|
||||
return
|
||||
logging.info(f'rename {old_name} -> {new_name}')
|
||||
for n in model.graph.node:
|
||||
for i, output in enumerate(n.output):
|
||||
if output == old_name:
|
||||
n.output[i] = new_name
|
||||
for i, input in enumerate(n.input):
|
||||
if input == old_name:
|
||||
n.input[i] = new_name
|
||||
for v in model.graph.value_info:
|
||||
if v.name == old_name:
|
||||
v.name = new_name
|
||||
for i, input in enumerate(model.graph.input):
|
||||
if input.name == old_name:
|
||||
input.name = new_name
|
||||
for i, output in enumerate(model.graph.output):
|
||||
if output.name == old_name:
|
||||
output.name = new_name
|
||||
|
||||
|
||||
def optimize(model):
|
||||
graph = model.graph
|
||||
|
||||
def simplify_inputs():
|
||||
connect = None
|
||||
for input in graph.input:
|
||||
for i, node in enumerate(graph.node):
|
||||
if node.op_type == 'Identity' and node.input[0] == input.name:
|
||||
connect = (node.input[0], node.output[0])
|
||||
logging.info(f'remove node {node.name}')
|
||||
del graph.node[i]
|
||||
break
|
||||
if connect:
|
||||
break
|
||||
if not connect:
|
||||
return False
|
||||
src, dst = connect
|
||||
for node in graph.node:
|
||||
for i, input_name in enumerate(node.input):
|
||||
if input_name == dst:
|
||||
node.input[i] = src
|
||||
# the input just changed won't be an output
|
||||
return True
|
||||
|
||||
def simplify_outputs():
|
||||
connect = None
|
||||
for output in graph.output:
|
||||
for i, node in enumerate(graph.node):
|
||||
if node.op_type == 'Identity' and \
|
||||
node.output[0] == output.name:
|
||||
connect = (node.input[0], node.output[0])
|
||||
logging.info(f'remove node {node.name}')
|
||||
del graph.node[i]
|
||||
break
|
||||
if connect:
|
||||
break
|
||||
if not connect:
|
||||
return False
|
||||
src, dst = connect
|
||||
for node in graph.node:
|
||||
for i, output_name in enumerate(node.output):
|
||||
if output_name == src:
|
||||
node.output[i] = dst
|
||||
# the output just renamed may be someone's input
|
||||
for i, input_name in enumerate(node.input):
|
||||
if input_name == src:
|
||||
node.input[i] = dst
|
||||
return True
|
||||
|
||||
while simplify_inputs():
|
||||
pass
|
||||
|
||||
while simplify_outputs():
|
||||
pass
|
||||
|
||||
remove_nodes(model, is_identity)
|
|
@ -0,0 +1,4 @@
|
|||
from .inference import build_classifier
|
||||
from .visualize import show_result
|
||||
|
||||
__all__ = ['build_classifier', 'show_result']
|
|
@ -0,0 +1,146 @@
|
|||
from typing import Union
|
||||
|
||||
import mmcv
|
||||
import torch
|
||||
from mmcls.datasets import DATASETS
|
||||
from mmcls.models import BaseClassifier
|
||||
|
||||
from mmdeploy.utils.config_utils import Backend, get_backend, load_config
|
||||
|
||||
|
||||
class DeployBaseClassifier(BaseClassifier):
|
||||
"""Base Class of Wrapper for classifier's inference."""
|
||||
|
||||
def __init__(self, class_names, device_id):
|
||||
super(DeployBaseClassifier, self).__init__()
|
||||
self.CLASSES = class_names
|
||||
self.device_id = device_id
|
||||
|
||||
def simple_test(self, img, *args, **kwargs):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def extract_feat(self, imgs):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def forward_train(self, imgs, **kwargs):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def forward_test(self, imgs, *args, **kwargs):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
|
||||
class ONNXRuntimeClassifier(DeployBaseClassifier):
|
||||
"""Wrapper for classifier's inference with ONNXRuntime."""
|
||||
|
||||
def __init__(self, model_file, class_names, device_id):
|
||||
super(ONNXRuntimeClassifier, self).__init__(class_names, device_id)
|
||||
from mmdeploy.apis.onnxruntime import ORTWrapper
|
||||
self.model = ORTWrapper(model_file, device_id)
|
||||
|
||||
def forward_test(self, imgs, *args, **kwargs):
|
||||
input_data = imgs
|
||||
results = self.model({'input': input_data})[0]
|
||||
return list(results)
|
||||
|
||||
|
||||
class TensorRTClassifier(DeployBaseClassifier):
|
||||
|
||||
def __init__(self, model_file, class_names, device_id):
|
||||
super(TensorRTClassifier, self).__init__(class_names, device_id)
|
||||
from mmdeploy.apis.tensorrt import TRTWrapper
|
||||
model = TRTWrapper(model_file)
|
||||
|
||||
self.model = model
|
||||
|
||||
def forward_test(self, imgs, *args, **kwargs):
|
||||
input_data = imgs
|
||||
with torch.cuda.device(self.device_id), torch.no_grad():
|
||||
results = self.model({'input': input_data})['output']
|
||||
results = results.detach().cpu().numpy()
|
||||
|
||||
return list(results)
|
||||
|
||||
|
||||
class NCNNClassifier(DeployBaseClassifier):
|
||||
|
||||
def __init__(self, param_file, bin_file, class_names, device_id):
|
||||
super(NCNNClassifier, self).__init__(class_names, device_id)
|
||||
from mmdeploy.apis.ncnn import NCNNWrapper
|
||||
self.model = NCNNWrapper(param_file, bin_file, output_names=['output'])
|
||||
|
||||
def forward_test(self, imgs, *args, **kwargs):
|
||||
results = self.model({'input': imgs})['output']
|
||||
results = results.detach().cpu().numpy()
|
||||
results_list = list(results)
|
||||
return results_list
|
||||
|
||||
|
||||
class PPLClassifier(DeployBaseClassifier):
|
||||
"""Wrapper for classifier's inference with PPL."""
|
||||
|
||||
def __init__(self, model_file, class_names, device_id):
|
||||
super(PPLClassifier, self).__init__(class_names, device_id)
|
||||
from mmdeploy.apis.ppl import PPLWrapper
|
||||
model = PPLWrapper(model_file=model_file, device_id=device_id)
|
||||
self.model = model
|
||||
self.CLASSES = class_names
|
||||
|
||||
def forward_test(self, imgs, *args, **kwargs):
|
||||
input_data = imgs
|
||||
results = self.model({'input': input_data})[0]
|
||||
|
||||
return list(results)
|
||||
|
||||
|
||||
ONNXRUNTIME_CLASSIFIER_MAP = dict(end2end=ONNXRuntimeClassifier)
|
||||
|
||||
TENSORRT_CLASSIFIER_MAP = dict(end2end=TensorRTClassifier)
|
||||
|
||||
PPL_CLASSIFIER_MAP = dict(end2end=PPLClassifier)
|
||||
|
||||
NCNN_CLASSIFIER_MAP = dict(end2end=NCNNClassifier)
|
||||
|
||||
BACKEND_CLASSIFIER_MAP = {
|
||||
Backend.ONNXRUNTIME: ONNXRUNTIME_CLASSIFIER_MAP,
|
||||
Backend.TENSORRT: TENSORRT_CLASSIFIER_MAP,
|
||||
Backend.PPL: PPL_CLASSIFIER_MAP,
|
||||
Backend.NCNN: NCNN_CLASSIFIER_MAP
|
||||
}
|
||||
|
||||
|
||||
def get_classes_from_config(model_cfg: Union[str, mmcv.Config]):
|
||||
model_cfg = load_config(model_cfg)[0]
|
||||
module_dict = DATASETS.module_dict
|
||||
data_cfg = model_cfg.data
|
||||
|
||||
if 'train' in data_cfg:
|
||||
module = module_dict[data_cfg.train.type]
|
||||
elif 'val' in data_cfg:
|
||||
module = module_dict[data_cfg.val.type]
|
||||
elif 'test' in data_cfg:
|
||||
module = module_dict[data_cfg.test.type]
|
||||
else:
|
||||
raise RuntimeError(f'No dataset config found in: {model_cfg}')
|
||||
|
||||
return module.CLASSES
|
||||
|
||||
|
||||
def build_classifier(model_files, model_cfg, deploy_cfg, device_id, **kwargs):
|
||||
model_cfg = load_config(model_cfg)[0]
|
||||
deploy_cfg = load_config(deploy_cfg)[0]
|
||||
|
||||
backend = get_backend(deploy_cfg)
|
||||
class_names = get_classes_from_config(model_cfg)
|
||||
|
||||
assert backend in BACKEND_CLASSIFIER_MAP, \
|
||||
f'Unsupported backend type: {backend.value}'
|
||||
model_map = BACKEND_CLASSIFIER_MAP[backend]
|
||||
|
||||
model_type = 'end2end'
|
||||
assert model_type in model_map, f'Unsupported model type: {model_type}'
|
||||
backend_classifier_class = model_map[model_type]
|
||||
|
||||
backend_detector = backend_classifier_class(
|
||||
*model_files, class_names=class_names, device_id=device_id)
|
||||
|
||||
return backend_detector
|
|
@ -0,0 +1,17 @@
|
|||
import numpy as np
|
||||
|
||||
from mmdeploy.utils import Backend
|
||||
|
||||
|
||||
def show_result(model,
|
||||
image: np.ndarray,
|
||||
result,
|
||||
output_file: str,
|
||||
backend: Backend,
|
||||
show=True):
|
||||
pred_score = np.max(result, axis=0)
|
||||
pred_label = np.argmax(result, axis=0)
|
||||
result = {'pred_label': pred_label, 'pred_score': float(pred_score)}
|
||||
result['pred_class'] = model.CLASSES[result['pred_label']]
|
||||
return model.show_result(
|
||||
image, result, show=show, win_name=backend.value, out_file=output_file)
|
|
@ -1,10 +1,7 @@
|
|||
from .model_wrappers import (NCNNClassifier, ONNXRuntimeClassifier,
|
||||
PPLClassifier, TensorRTClassifier)
|
||||
from .prepare_input import (build_dataloader, build_dataset, create_input,
|
||||
get_tensor_from_input)
|
||||
|
||||
__all__ = [
|
||||
'build_dataloader', 'build_dataset', 'create_input',
|
||||
'get_tensor_from_input', 'ONNXRuntimeClassifier', 'TensorRTClassifier',
|
||||
'NCNNClassifier', 'PPLClassifier'
|
||||
'get_tensor_from_input'
|
||||
]
|
||||
|
|
|
@ -1,125 +0,0 @@
|
|||
import warnings
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from mmcls.models import BaseClassifier
|
||||
|
||||
|
||||
class DeployBaseClassifier(BaseClassifier):
|
||||
"""Base Class of Wrapper for classifier's inference."""
|
||||
|
||||
def __init__(self, class_names, device_id):
|
||||
super(DeployBaseClassifier, self).__init__()
|
||||
self.CLASSES = class_names
|
||||
self.device_id = device_id
|
||||
|
||||
def simple_test(self, img, *args, **kwargs):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def extract_feat(self, imgs):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def forward_train(self, imgs, **kwargs):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def forward_test(self, imgs, *args, **kwargs):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
|
||||
class ONNXRuntimeClassifier(DeployBaseClassifier):
|
||||
"""Wrapper for classifier's inference with ONNXRuntime."""
|
||||
|
||||
def __init__(self, onnx_file, class_names, device_id):
|
||||
super(ONNXRuntimeClassifier, self).__init__(class_names, device_id)
|
||||
from mmdeploy.apis.onnxruntime import ORTWrapper
|
||||
self.model = ORTWrapper(onnx_file, device_id)
|
||||
|
||||
def forward_test(self, imgs, *args, **kwargs):
|
||||
input_data = imgs
|
||||
results = self.model(input_data)[0]
|
||||
return list(results)
|
||||
|
||||
|
||||
class TensorRTClassifier(DeployBaseClassifier):
|
||||
|
||||
def __init__(self, trt_file, class_names, device_id):
|
||||
super(TensorRTClassifier, self).__init__(class_names, device_id)
|
||||
from mmdeploy.apis.tensorrt import TRTWrapper, load_tensorrt_plugin
|
||||
try:
|
||||
load_tensorrt_plugin()
|
||||
except (ImportError, ModuleNotFoundError):
|
||||
warnings.warn('If input model has custom plugins, \
|
||||
you may have to build backend ops with TensorRT')
|
||||
model = TRTWrapper(trt_file)
|
||||
|
||||
self.model = model
|
||||
|
||||
def forward_test(self, imgs, *args, **kwargs):
|
||||
input_data = imgs
|
||||
with torch.cuda.device(self.device_id), torch.no_grad():
|
||||
results = self.model({'input': input_data})['output']
|
||||
results = results.detach().cpu().numpy()
|
||||
|
||||
return list(results)
|
||||
|
||||
|
||||
class NCNNClassifier(DeployBaseClassifier):
|
||||
|
||||
def __init__(self, ncnn_param_file, ncnn_bin_file, class_names, device_id):
|
||||
super(NCNNClassifier, self).__init__(class_names, device_id)
|
||||
from mmdeploy.apis.ncnn import NCNNWrapper
|
||||
self.model = NCNNWrapper(
|
||||
ncnn_param_file, ncnn_bin_file, output_names=['output'])
|
||||
|
||||
def forward_test(self, imgs, *args, **kwargs):
|
||||
results = self.model({'input': imgs})['output']
|
||||
results = results.detach().cpu().numpy()
|
||||
results_list = list(results)
|
||||
return results_list
|
||||
|
||||
|
||||
class PPLClassifier(DeployBaseClassifier):
|
||||
"""Wrapper for classifier's inference with PPL."""
|
||||
|
||||
def __init__(self, onnx_file, class_names, device_id):
|
||||
super(PPLClassifier, self).__init__(class_names, device_id)
|
||||
import pyppl.nn as pplnn
|
||||
from mmdeploy.apis.ppl import register_engines
|
||||
|
||||
# enable quick select by default to speed up pipeline
|
||||
# TODO: open it to users after ppl supports saving serialized models
|
||||
# TODO: disable_avx512 will be removed or open to users in config
|
||||
engines = register_engines(
|
||||
device_id, disable_avx512=False, quick_select=True)
|
||||
cuda_options = pplnn.CudaEngineOptions()
|
||||
cuda_options.device_id = device_id
|
||||
runtime_builder = pplnn.OnnxRuntimeBuilderFactory.CreateFromFile(
|
||||
onnx_file, engines)
|
||||
assert runtime_builder is not None, 'Failed to create '\
|
||||
'ONNXRuntimeBuilder.'
|
||||
|
||||
runtime_options = pplnn.RuntimeOptions()
|
||||
runtime = runtime_builder.CreateRuntime(runtime_options)
|
||||
assert runtime is not None, 'Failed to create the instance of Runtime.'
|
||||
|
||||
self.runtime = runtime
|
||||
self.CLASSES = class_names
|
||||
self.device_id = device_id
|
||||
self.inputs = [
|
||||
runtime.GetInputTensor(i) for i in range(runtime.GetInputCount())
|
||||
]
|
||||
|
||||
def forward_test(self, imgs, *args, **kwargs):
|
||||
import pyppl.common as pplcommon
|
||||
input_data = imgs
|
||||
self.inputs[0].ConvertFromHost(input_data.cpu().numpy())
|
||||
status = self.runtime.Run()
|
||||
assert status == pplcommon.RC_SUCCESS, 'Run() '\
|
||||
'failed: ' + pplcommon.GetRetCodeStr(status)
|
||||
status = self.runtime.Sync()
|
||||
assert status == pplcommon.RC_SUCCESS, 'Sync() '\
|
||||
'failed: ' + pplcommon.GetRetCodeStr(status)
|
||||
results = self.runtime.GetOutputTensor(0).ConvertToHost()
|
||||
results = np.array(results, copy=False)
|
||||
|
||||
return list(results)
|
|
@ -6,17 +6,13 @@ from mmcls.datasets import build_dataset as build_dataset_mmcls
|
|||
from mmcls.datasets.pipelines import Compose
|
||||
from mmcv.parallel import collate, scatter
|
||||
|
||||
from mmdeploy.utils.config_utils import load_config
|
||||
|
||||
|
||||
def create_input(model_cfg: Union[str, mmcv.Config],
|
||||
imgs: Any,
|
||||
device: str = 'cuda:0'):
|
||||
if isinstance(model_cfg, str):
|
||||
model_cfg = mmcv.Config.fromfile(model_cfg)
|
||||
elif not isinstance(model_cfg, (mmcv.Config, mmcv.ConfigDict)):
|
||||
raise TypeError('config must be a filename or Config object, '
|
||||
f'but got {type(model_cfg)}')
|
||||
|
||||
cfg = model_cfg.copy()
|
||||
cfg = load_config(model_cfg)[0].copy()
|
||||
if isinstance(imgs, str):
|
||||
if cfg.data.test.pipeline[0]['type'] != 'LoadImageFromFile':
|
||||
cfg.data.test.pipeline.insert(0, dict(type='LoadImageFromFile'))
|
||||
|
@ -36,12 +32,7 @@ def create_input(model_cfg: Union[str, mmcv.Config],
|
|||
def build_dataset(dataset_cfg: Union[str, mmcv.Config],
|
||||
dataset_type: str = 'val',
|
||||
**kwargs):
|
||||
if isinstance(dataset_cfg, str):
|
||||
dataset_cfg = mmcv.Config.fromfile(dataset_cfg)
|
||||
elif not isinstance(dataset_cfg, (mmcv.Config, mmcv.ConfigDict)):
|
||||
raise TypeError('config must be a filename or Config object, '
|
||||
f'but got {type(dataset_cfg)}')
|
||||
|
||||
dataset_cfg = load_config(dataset_cfg)[0]
|
||||
data = dataset_cfg.data
|
||||
assert dataset_type in data
|
||||
|
||||
|
@ -54,8 +45,8 @@ def build_dataloader(dataset,
|
|||
samples_per_gpu: int,
|
||||
workers_per_gpu: int,
|
||||
num_gpus: int = 1,
|
||||
dist: bool = True,
|
||||
shuffle: bool = True,
|
||||
dist: bool = False,
|
||||
shuffle: bool = False,
|
||||
round_up: bool = True,
|
||||
seed: Optional[int] = None,
|
||||
pin_memory: bool = True,
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
from .inference import build_detector
|
||||
from .visualize import show_result
|
||||
|
||||
__all__ = ['build_detector', 'show_result']
|
|
@ -1,5 +1,3 @@
|
|||
import os.path as osp
|
||||
import warnings
|
||||
from functools import partial
|
||||
from typing import Union
|
||||
|
||||
|
@ -7,9 +5,11 @@ import mmcv
|
|||
import numpy as np
|
||||
import torch
|
||||
from mmdet.core import bbox2result
|
||||
from mmdet.datasets import DATASETS
|
||||
from mmdet.models import BaseDetector
|
||||
|
||||
from mmdeploy.mmdet.core.post_processing import multiclass_nms
|
||||
from mmdeploy.utils.config_utils import Backend, get_backend, load_config
|
||||
|
||||
|
||||
class DeployBaseDetector(BaseDetector):
|
||||
|
@ -100,15 +100,15 @@ class DeployBaseDetector(BaseDetector):
|
|||
class ONNXRuntimeDetector(DeployBaseDetector):
|
||||
"""Wrapper for detector's inference with ONNXRuntime."""
|
||||
|
||||
def __init__(self, onnx_file, class_names, device_id, **kwargs):
|
||||
def __init__(self, model_file, class_names, device_id, **kwargs):
|
||||
super(ONNXRuntimeDetector, self).__init__(class_names, device_id,
|
||||
**kwargs)
|
||||
from mmdeploy.apis.onnxruntime import ORTWrapper
|
||||
self.model = ORTWrapper(onnx_file, device_id)
|
||||
self.model = ORTWrapper(model_file, device_id)
|
||||
|
||||
def forward_test(self, imgs, *args, **kwargs):
|
||||
input_data = imgs[0]
|
||||
ort_outputs = self.model(input_data)
|
||||
ort_outputs = self.model({'input': input_data})
|
||||
return ort_outputs
|
||||
|
||||
|
||||
|
@ -118,12 +118,8 @@ class TensorRTDetector(DeployBaseDetector):
|
|||
def __init__(self, model_file, class_names, device_id, **kwargs):
|
||||
super(TensorRTDetector, self).__init__(class_names, device_id,
|
||||
**kwargs)
|
||||
from mmdeploy.apis.tensorrt import TRTWrapper, load_tensorrt_plugin
|
||||
try:
|
||||
load_tensorrt_plugin()
|
||||
except (ImportError, ModuleNotFoundError):
|
||||
warnings.warn('If input model has custom plugins, \
|
||||
you may have to build backend ops with TensorRT')
|
||||
from mmdeploy.apis.tensorrt import TRTWrapper
|
||||
|
||||
self.model = TRTWrapper(model_file)
|
||||
self.output_names = ['dets', 'labels']
|
||||
if len(self.model.output_names) == 3:
|
||||
|
@ -147,31 +143,36 @@ class TensorRTDetector(DeployBaseDetector):
|
|||
return outputs
|
||||
|
||||
|
||||
# Split Single-Stage Base
|
||||
class SplitSingleStageBaseDetector(DeployBaseDetector):
|
||||
class PPLDetector(DeployBaseDetector):
|
||||
"""Wrapper for detector's inference with TensorRT."""
|
||||
|
||||
def __init__(self, model_file, class_names, device_id, **kwargs):
|
||||
super(PPLDetector, self).__init__(class_names, device_id)
|
||||
from mmdeploy.apis.ppl import PPLWrapper
|
||||
self.model = PPLWrapper(model_file, device_id)
|
||||
|
||||
def forward_test(self, imgs, *args, **kwargs):
|
||||
input_data = imgs[0]
|
||||
ppl_outputs = self.model({'input': input_data})
|
||||
return ppl_outputs
|
||||
|
||||
|
||||
# Partition Single-Stage Base
|
||||
class PartitionSingleStageDetector(DeployBaseDetector):
|
||||
"""Wrapper for detector's inference with TensorRT."""
|
||||
|
||||
def __init__(self, class_names, model_cfg, deploy_cfg, device_id,
|
||||
**kwargs):
|
||||
super().__init__(class_names, device_id, **kwargs)
|
||||
# load deploy_cfg if necessary
|
||||
if isinstance(deploy_cfg, str):
|
||||
deploy_cfg = mmcv.Config.fromfile(deploy_cfg)
|
||||
if not isinstance(deploy_cfg, mmcv.Config):
|
||||
raise TypeError('deploy_cfg must be a filename or Config object, '
|
||||
f'but got {type(deploy_cfg)}')
|
||||
|
||||
# load model_cfg if needed
|
||||
if isinstance(model_cfg, str):
|
||||
model_cfg = mmcv.Config.fromfile(model_cfg)
|
||||
if not isinstance(model_cfg, mmcv.Config):
|
||||
raise TypeError('config must be a filename or Config object, '
|
||||
f'but got {type(model_cfg)}')
|
||||
super(PartitionSingleStageDetector,
|
||||
self).__init__(class_names, device_id, **kwargs)
|
||||
# load cfg if necessary
|
||||
deploy_cfg = load_config(deploy_cfg)[0]
|
||||
model_cfg = load_config(model_cfg)[0]
|
||||
|
||||
self.model_cfg = model_cfg
|
||||
self.deploy_cfg = deploy_cfg
|
||||
|
||||
def split0_postprocess(self, scores, bboxes):
|
||||
def partition0_postprocess(self, scores, bboxes):
|
||||
cfg = self.model_cfg.model.test_cfg
|
||||
deploy_cfg = self.deploy_cfg
|
||||
|
||||
|
@ -191,69 +192,34 @@ class SplitSingleStageBaseDetector(DeployBaseDetector):
|
|||
keep_top_k=keep_top_k)
|
||||
|
||||
|
||||
class ONNXRuntimeSSSBDetector(SplitSingleStageBaseDetector):
|
||||
class ONNXRuntimePSSDetector(PartitionSingleStageDetector):
|
||||
"""Wrapper for detector's inference with ONNXRuntime."""
|
||||
|
||||
def __init__(self, model_file, class_names, model_cfg, deploy_cfg,
|
||||
device_id, **kwargs):
|
||||
super().__init__(class_names, model_cfg, deploy_cfg, device_id,
|
||||
**kwargs)
|
||||
import onnxruntime as ort
|
||||
|
||||
# get the custom op path
|
||||
from mmdeploy.apis.onnxruntime import get_ops_path
|
||||
ort_custom_op_path = get_ops_path()
|
||||
session_options = ort.SessionOptions()
|
||||
# register custom op for onnxruntime
|
||||
if osp.exists(ort_custom_op_path):
|
||||
session_options.register_custom_ops_library(ort_custom_op_path)
|
||||
sess = ort.InferenceSession(model_file, session_options)
|
||||
providers = ['CPUExecutionProvider']
|
||||
options = [{}]
|
||||
is_cuda_available = ort.get_device() == 'GPU'
|
||||
if is_cuda_available:
|
||||
providers.insert(0, 'CUDAExecutionProvider')
|
||||
options.insert(0, {'device_id': device_id})
|
||||
|
||||
sess.set_providers(providers, options)
|
||||
|
||||
self.sess = sess
|
||||
self.io_binding = sess.io_binding()
|
||||
self.output_names = ['scores', 'boxes']
|
||||
self.is_cuda_available = is_cuda_available
|
||||
super(ONNXRuntimePSSDetector,
|
||||
self).__init__(class_names, model_cfg, deploy_cfg, device_id,
|
||||
**kwargs)
|
||||
from mmdeploy.apis.onnxruntime import ORTWrapper
|
||||
self.model = ORTWrapper(
|
||||
model_file, device_id, output_names=['scores', 'boxes'])
|
||||
|
||||
def forward_test(self, imgs, *args, **kwargs):
|
||||
input_data = imgs[0]
|
||||
# set io binding for inputs/outputs
|
||||
device_type = 'cuda' if self.is_cuda_available else 'cpu'
|
||||
if not self.is_cuda_available:
|
||||
input_data = input_data.cpu()
|
||||
self.io_binding.bind_input(
|
||||
name='input',
|
||||
device_type=device_type,
|
||||
device_id=self.device_id,
|
||||
element_type=np.float32,
|
||||
shape=input_data.shape,
|
||||
buffer_ptr=input_data.data_ptr())
|
||||
|
||||
for name in self.output_names:
|
||||
self.io_binding.bind_output(name)
|
||||
# run session to get outputs
|
||||
self.sess.run_with_iobinding(self.io_binding)
|
||||
ort_outputs = self.io_binding.copy_outputs_to_cpu()
|
||||
ort_outputs = self.model({'input': input_data})
|
||||
scores, bboxes = ort_outputs[:2]
|
||||
scores = torch.from_numpy(scores).to(input_data.device)
|
||||
bboxes = torch.from_numpy(bboxes).to(input_data.device)
|
||||
return self.split0_postprocess(scores, bboxes)
|
||||
return self.partition0_postprocess(scores, bboxes)
|
||||
|
||||
|
||||
class NCNNSSSBDetector(SplitSingleStageBaseDetector):
|
||||
class NCNNPSSDetector(PartitionSingleStageDetector):
|
||||
"""Wrapper for detector's inference with NCNN."""
|
||||
|
||||
def __init__(self, model_file, class_names, model_cfg, deploy_cfg,
|
||||
device_id, **kwargs):
|
||||
super().__init__(class_names, model_cfg, deploy_cfg, device_id,
|
||||
**kwargs)
|
||||
super(NCNNPSSDetector, self).__init__(class_names, model_cfg,
|
||||
deploy_cfg, device_id, **kwargs)
|
||||
from mmdeploy.apis.ncnn import NCNNWrapper
|
||||
assert len(model_file) == 2
|
||||
ncnn_param_file = model_file[0]
|
||||
|
@ -267,33 +233,24 @@ class NCNNSSSBDetector(SplitSingleStageBaseDetector):
|
|||
outputs = self.model({'input': imgs})
|
||||
boxes = outputs['boxes']
|
||||
scores = outputs['scores']
|
||||
return self.split0_postprocess(scores, boxes)
|
||||
return self.partition0_postprocess(scores, boxes)
|
||||
|
||||
|
||||
# Split Two-Stage Base
|
||||
class SplitTwoStageBaseDetector(DeployBaseDetector):
|
||||
# Partition Two-Stage Base
|
||||
class PartitionTwoStageDetector(DeployBaseDetector):
|
||||
"""Wrapper for detector's inference with TensorRT."""
|
||||
|
||||
def __init__(self, class_names, model_cfg, deploy_cfg, device_id,
|
||||
**kwargs):
|
||||
super().__init__(class_names, device_id, **kwargs)
|
||||
from mmdet.models.builder import build_roi_extractor, build_head
|
||||
super(PartitionTwoStageDetector,
|
||||
self).__init__(class_names, device_id, **kwargs)
|
||||
from mmdet.models.builder import build_head, build_roi_extractor
|
||||
|
||||
from mmdeploy.mmdet.models.roi_heads.bbox_heads import \
|
||||
get_bboxes_of_bbox_head
|
||||
|
||||
# load deploy_cfg if necessary
|
||||
if isinstance(deploy_cfg, str):
|
||||
deploy_cfg = mmcv.Config.fromfile(deploy_cfg)
|
||||
if not isinstance(deploy_cfg, mmcv.Config):
|
||||
raise TypeError('deploy_cfg must be a filename or Config object, '
|
||||
f'but got {type(deploy_cfg)}')
|
||||
|
||||
# load model_cfg if needed
|
||||
if isinstance(model_cfg, str):
|
||||
model_cfg = mmcv.Config.fromfile(model_cfg)
|
||||
if not isinstance(model_cfg, mmcv.Config):
|
||||
raise TypeError('config must be a filename or Config object, '
|
||||
f'but got {type(model_cfg)}')
|
||||
# load cfg if necessary
|
||||
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
|
||||
|
||||
self.model_cfg = model_cfg
|
||||
self.deploy_cfg = deploy_cfg
|
||||
|
@ -309,7 +266,7 @@ class SplitTwoStageBaseDetector(DeployBaseDetector):
|
|||
ctx.cfg = self.deploy_cfg
|
||||
self.get_bboxes_of_bbox_head = partial(get_bboxes_of_bbox_head, ctx)
|
||||
|
||||
def split0_postprocess(self, x, scores, bboxes):
|
||||
def partition0_postprocess(self, x, scores, bboxes):
|
||||
# rpn-nms + roi-extractor
|
||||
cfg = self.model_cfg.model.test_cfg.rpn
|
||||
deploy_cfg = self.deploy_cfg
|
||||
|
@ -346,7 +303,7 @@ class SplitTwoStageBaseDetector(DeployBaseDetector):
|
|||
rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))
|
||||
return rois, bbox_feats
|
||||
|
||||
def split1_postprocess(self, rois, cls_score, bbox_pred, img_metas):
|
||||
def partition1_postprocess(self, rois, cls_score, bbox_pred, img_metas):
|
||||
|
||||
batch_size = rois.shape[0]
|
||||
num_proposals_per_img = rois.shape[1]
|
||||
|
@ -364,72 +321,28 @@ class SplitTwoStageBaseDetector(DeployBaseDetector):
|
|||
rcnn_test_cfg)
|
||||
|
||||
|
||||
class ONNXRuntimeSTSBDetector(SplitTwoStageBaseDetector):
|
||||
class ONNXRuntimePTSDetector(PartitionTwoStageDetector):
|
||||
"""Wrapper for detector's inference with ONNXRuntime."""
|
||||
|
||||
def __init__(self, model_file, class_names, model_cfg, deploy_cfg,
|
||||
device_id, **kwargs):
|
||||
super().__init__(class_names, model_cfg, deploy_cfg, device_id,
|
||||
**kwargs)
|
||||
import onnxruntime as ort
|
||||
|
||||
# get the custom op path
|
||||
from mmdeploy.apis.onnxruntime import get_ops_path
|
||||
ort_custom_op_path = get_ops_path()
|
||||
session_options = ort.SessionOptions()
|
||||
# register custom op for onnxruntime
|
||||
if osp.exists(ort_custom_op_path):
|
||||
session_options.register_custom_ops_library(ort_custom_op_path)
|
||||
providers = ['CPUExecutionProvider']
|
||||
options = [{}]
|
||||
is_cuda_available = ort.get_device() == 'GPU'
|
||||
if is_cuda_available:
|
||||
providers.insert(0, 'CUDAExecutionProvider')
|
||||
options.insert(0, {'device_id': device_id})
|
||||
|
||||
sess_list = []
|
||||
io_binding_list = []
|
||||
for m_file in model_file:
|
||||
sess = ort.InferenceSession(m_file, session_options)
|
||||
sess.set_providers(providers, options)
|
||||
sess_list.append(sess)
|
||||
io_binding_list.append(sess.io_binding())
|
||||
|
||||
self.sess_list = sess_list
|
||||
self.io_binding_list = io_binding_list
|
||||
|
||||
output_names_list = []
|
||||
num_split0_outputs = len(sess_list[0].get_outputs())
|
||||
num_feat = num_split0_outputs - 2
|
||||
output_names_list.append(
|
||||
['feat/{}'.format(i)
|
||||
for i in range(num_feat)] + ['scores', 'boxes']) # split0
|
||||
output_names_list.append(['cls_score', 'bbox_pred']) # split1
|
||||
self.output_names_list = output_names_list
|
||||
|
||||
self.is_cuda_available = is_cuda_available
|
||||
super(ONNXRuntimePTSDetector,
|
||||
self).__init__(class_names, model_cfg, deploy_cfg, device_id,
|
||||
**kwargs)
|
||||
from mmdeploy.apis.onnxruntime import ORTWrapper
|
||||
self.model_list = [
|
||||
ORTWrapper(file, device_id=device_id) for file in model_file
|
||||
]
|
||||
num_partition0_outputs = len(self.model_list[0].output_names)
|
||||
num_feat = num_partition0_outputs - 2
|
||||
self.model_list[0].output_names = [
|
||||
'feat/{}'.format(i) for i in range(num_feat)
|
||||
] + ['scores', 'boxes']
|
||||
self.model_list[1].output_names = ['cls_score', 'bbox_pred']
|
||||
|
||||
def forward_test(self, imgs, img_metas, *args, **kwargs):
|
||||
input_data = imgs[0]
|
||||
# set io binding for inputs/outputs
|
||||
device_type = 'cuda' if self.is_cuda_available else 'cpu'
|
||||
|
||||
# split0
|
||||
if not self.is_cuda_available:
|
||||
input_data = input_data.cpu()
|
||||
self.io_binding_list[0].bind_input(
|
||||
name='input',
|
||||
device_type=device_type,
|
||||
device_id=self.device_id,
|
||||
element_type=np.float32,
|
||||
shape=input_data.shape,
|
||||
buffer_ptr=input_data.data_ptr())
|
||||
|
||||
for name in self.output_names_list[0]:
|
||||
self.io_binding_list[0].bind_output(name)
|
||||
# run session to get outputs
|
||||
self.sess_list[0].run_with_iobinding(self.io_binding_list[0])
|
||||
ort_outputs = self.io_binding_list[0].copy_outputs_to_cpu()
|
||||
ort_outputs = self.model_list[0]({'input': input_data})
|
||||
feats = ort_outputs[:-2]
|
||||
scores, bboxes = ort_outputs[-2:]
|
||||
feats = [
|
||||
|
@ -438,95 +351,30 @@ class ONNXRuntimeSTSBDetector(SplitTwoStageBaseDetector):
|
|||
scores = torch.from_numpy(scores).to(input_data.device)
|
||||
bboxes = torch.from_numpy(bboxes).to(input_data.device)
|
||||
|
||||
# split0_postprocess
|
||||
rois, bbox_feats = self.split0_postprocess(feats, scores, bboxes)
|
||||
# partition0_postprocess
|
||||
rois, bbox_feats = self.partition0_postprocess(feats, scores, bboxes)
|
||||
|
||||
# split1
|
||||
if not self.is_cuda_available:
|
||||
bbox_feats = bbox_feats.cpu()
|
||||
self.io_binding_list[1].bind_input(
|
||||
name='bbox_feats',
|
||||
device_type=device_type,
|
||||
device_id=self.device_id,
|
||||
element_type=np.float32,
|
||||
shape=bbox_feats.shape,
|
||||
buffer_ptr=bbox_feats.data_ptr())
|
||||
|
||||
for name in self.output_names_list[1]:
|
||||
self.io_binding_list[1].bind_output(name)
|
||||
# run session to get outputs
|
||||
self.sess_list[1].run_with_iobinding(self.io_binding_list[1])
|
||||
ort_outputs = self.io_binding_list[1].copy_outputs_to_cpu()
|
||||
# partition1
|
||||
ort_outputs = self.model_list[1]({'bbox_feats': bbox_feats})
|
||||
cls_score, bbox_pred = ort_outputs[:2]
|
||||
cls_score = torch.from_numpy(cls_score).to(input_data.device)
|
||||
bbox_pred = torch.from_numpy(bbox_pred).to(input_data.device)
|
||||
|
||||
# split1_postprocess
|
||||
return self.split1_postprocess(rois, cls_score, bbox_pred, img_metas)
|
||||
# partition1_postprocess
|
||||
return self.partition1_postprocess(rois, cls_score, bbox_pred,
|
||||
img_metas)
|
||||
|
||||
|
||||
class PPLDetector(DeployBaseDetector):
|
||||
"""Wrapper for detector's inference with TensorRT."""
|
||||
|
||||
def __init__(self, model_file, class_names, device_id, **kwargs):
|
||||
super(PPLDetector, self).__init__(class_names, device_id)
|
||||
import pyppl.nn as pplnn
|
||||
from mmdeploy.apis.ppl import register_engines
|
||||
|
||||
# enable quick select by default to speed up pipeline
|
||||
# TODO: open it to users after ppl supports saving serialized models
|
||||
# TODO: disable_avx512 will be removed or open to users in config
|
||||
engines = register_engines(
|
||||
device_id, disable_avx512=False, quick_select=True)
|
||||
cuda_options = pplnn.CudaEngineOptions()
|
||||
cuda_options.device_id = device_id
|
||||
runtime_builder = pplnn.OnnxRuntimeBuilderFactory.CreateFromFile(
|
||||
model_file, engines)
|
||||
assert runtime_builder is not None, 'Failed to create '\
|
||||
'OnnxRuntimeBuilder.'
|
||||
|
||||
runtime_options = pplnn.RuntimeOptions()
|
||||
runtime = runtime_builder.CreateRuntime(runtime_options)
|
||||
assert runtime is not None, 'Failed to create the instance of Runtime.'
|
||||
|
||||
self.runtime = runtime
|
||||
self.CLASSES = class_names
|
||||
self.device_id = device_id
|
||||
self.inputs = [
|
||||
runtime.GetInputTensor(i) for i in range(runtime.GetInputCount())
|
||||
]
|
||||
|
||||
def forward_test(self, imgs, *args, **kwargs):
|
||||
import pyppl.common as pplcommon
|
||||
input_data = imgs[0].contiguous()
|
||||
self.inputs[0].ConvertFromHost(input_data.cpu().numpy())
|
||||
status = self.runtime.Run()
|
||||
assert status == pplcommon.RC_SUCCESS, 'Run() '\
|
||||
'failed: ' + pplcommon.GetRetCodeStr(status)
|
||||
status = self.runtime.Sync()
|
||||
assert status == pplcommon.RC_SUCCESS, 'Sync() '\
|
||||
'failed: ' + pplcommon.GetRetCodeStr(status)
|
||||
outputs = []
|
||||
for i in range(self.runtime.GetOutputCount()):
|
||||
out_tensor = self.runtime.GetOutputTensor(i).ConvertToHost()
|
||||
outputs.append(np.array(out_tensor, copy=False))
|
||||
return outputs
|
||||
|
||||
|
||||
class TensorRTSTSBDetector(SplitTwoStageBaseDetector):
|
||||
class TensorRTPTSDetector(PartitionTwoStageDetector):
|
||||
"""Wrapper for detector's inference with TensorRT."""
|
||||
|
||||
def __init__(self, model_file, class_names, model_cfg, deploy_cfg,
|
||||
device_id, **kwargs):
|
||||
super().__init__(class_names, model_cfg, deploy_cfg, device_id,
|
||||
**kwargs)
|
||||
super(TensorRTPTSDetector,
|
||||
self).__init__(class_names, model_cfg, deploy_cfg, device_id,
|
||||
**kwargs)
|
||||
|
||||
from mmdeploy.apis.tensorrt import TRTWrapper, load_tensorrt_plugin
|
||||
try:
|
||||
load_tensorrt_plugin()
|
||||
except (ImportError, ModuleNotFoundError):
|
||||
warnings.warn('If input model has custom plugins, \
|
||||
you may have to build backend ops with TensorRT')
|
||||
from mmdeploy.apis.tensorrt import TRTWrapper
|
||||
|
||||
model_list = []
|
||||
for m_file in model_file:
|
||||
|
@ -536,17 +384,17 @@ class TensorRTSTSBDetector(SplitTwoStageBaseDetector):
|
|||
self.model_list = model_list
|
||||
|
||||
output_names_list = []
|
||||
num_split0_outputs = len(model_list[0].output_names)
|
||||
num_feat = num_split0_outputs - 2
|
||||
num_partition0_outputs = len(model_list[0].output_names)
|
||||
num_feat = num_partition0_outputs - 2
|
||||
output_names_list.append(
|
||||
['feat/{}'.format(i)
|
||||
for i in range(num_feat)] + ['scores', 'boxes']) # split0
|
||||
output_names_list.append(['cls_score', 'bbox_pred']) # split1
|
||||
for i in range(num_feat)] + ['scores', 'boxes']) # partition0
|
||||
output_names_list.append(['cls_score', 'bbox_pred']) # partition1
|
||||
self.output_names_list = output_names_list
|
||||
|
||||
def forward_test(self, imgs, img_metas, *args, **kwargs):
|
||||
|
||||
# split0 forward
|
||||
# partition0 forward
|
||||
input_data = imgs[0].contiguous()
|
||||
with torch.cuda.device(self.device_id), torch.no_grad():
|
||||
outputs = self.model_list[0]({'input': input_data})
|
||||
|
@ -554,30 +402,30 @@ class TensorRTSTSBDetector(SplitTwoStageBaseDetector):
|
|||
feats = outputs[:-2]
|
||||
scores, bboxes = outputs[-2:]
|
||||
|
||||
# split0_postprocess
|
||||
rois, bbox_feats = self.split0_postprocess(feats, scores, bboxes)
|
||||
# partition0_postprocess
|
||||
rois, bbox_feats = self.partition0_postprocess(feats, scores, bboxes)
|
||||
|
||||
# split1 forward
|
||||
# partition1 forward
|
||||
bbox_feats = bbox_feats.contiguous()
|
||||
with torch.cuda.device(self.device_id), torch.no_grad():
|
||||
outputs = self.model_list[1]({'bbox_feats': bbox_feats})
|
||||
outputs = [outputs[name] for name in self.output_names_list[1]]
|
||||
cls_score, bbox_pred = outputs[:2]
|
||||
|
||||
# split1_postprocess
|
||||
outputs = self.split1_postprocess(rois, cls_score, bbox_pred,
|
||||
img_metas)
|
||||
# partition1_postprocess
|
||||
outputs = self.partition1_postprocess(rois, cls_score, bbox_pred,
|
||||
img_metas)
|
||||
outputs = [out.detach().cpu() for out in outputs]
|
||||
return outputs
|
||||
|
||||
|
||||
class NCNNSTSBDetector(SplitTwoStageBaseDetector):
|
||||
class NCNNPTSDetector(PartitionTwoStageDetector):
|
||||
"""Wrapper for detector's inference with NCNN."""
|
||||
|
||||
def __init__(self, model_file, class_names, model_cfg, deploy_cfg,
|
||||
device_id, **kwargs):
|
||||
super().__init__(class_names, model_cfg, deploy_cfg, device_id,
|
||||
**kwargs)
|
||||
super(NCNNPTSDetector, self).__init__(class_names, model_cfg,
|
||||
deploy_cfg, device_id, **kwargs)
|
||||
from mmdeploy.apis.ncnn import NCNNWrapper
|
||||
assert self.device_id == -1
|
||||
assert len(model_file) == 4
|
||||
|
@ -588,9 +436,7 @@ class NCNNSTSBDetector(SplitTwoStageBaseDetector):
|
|||
model = NCNNWrapper(ncnn_param_file, ncnn_bin_file)
|
||||
model_list.append(model)
|
||||
|
||||
# TODO: update this after refactor
|
||||
if isinstance(model_cfg, str):
|
||||
model_cfg = mmcv.Config.fromfile(model_cfg)
|
||||
model_cfg = load_config(model_cfg)[0]
|
||||
num_output_stage1 = model_cfg['model']['neck']['num_outs']
|
||||
|
||||
output_names_list = []
|
||||
|
@ -619,7 +465,7 @@ class NCNNSTSBDetector(SplitTwoStageBaseDetector):
|
|||
scores, bboxes = outputs[-2:]
|
||||
|
||||
# stage0_postprocess
|
||||
rois, bbox_feats = self.split0_postprocess(feats, scores, bboxes)
|
||||
rois, bbox_feats = self.partition0_postprocess(feats, scores, bboxes)
|
||||
|
||||
# stage1 forward
|
||||
out_stage1 = self.model_list[1]({'bbox_feats': bbox_feats})
|
||||
|
@ -627,20 +473,15 @@ class NCNNSTSBDetector(SplitTwoStageBaseDetector):
|
|||
bbox_pred = out_stage1['bbox_pred']
|
||||
|
||||
# stage1_postprocess
|
||||
outputs = self.split1_postprocess(rois, cls_score, bbox_pred,
|
||||
img_metas)
|
||||
outputs = self.partition1_postprocess(rois, cls_score, bbox_pred,
|
||||
img_metas)
|
||||
outputs = [out.detach().cpu() for out in outputs]
|
||||
return outputs
|
||||
|
||||
|
||||
def get_classes_from_config(model_cfg: Union[str, mmcv.Config], **kwargs):
|
||||
if isinstance(model_cfg, str):
|
||||
model_cfg = mmcv.Config.fromfile(model_cfg)
|
||||
elif not isinstance(model_cfg, (mmcv.Config, mmcv.ConfigDict)):
|
||||
raise TypeError('config must be a filename or Config object, '
|
||||
f'but got {type(model_cfg)}')
|
||||
|
||||
from mmdet.datasets import DATASETS
|
||||
# load cfg if necessary
|
||||
model_cfg = load_config(model_cfg)[0]
|
||||
module_dict = DATASETS.module_dict
|
||||
data_cfg = model_cfg.data
|
||||
|
||||
|
@ -658,50 +499,43 @@ def get_classes_from_config(model_cfg: Union[str, mmcv.Config], **kwargs):
|
|||
|
||||
ONNXRUNTIME_DETECTOR_MAP = dict(
|
||||
end2end=ONNXRuntimeDetector,
|
||||
single_stage_base=ONNXRuntimeSSSBDetector,
|
||||
two_stage_base=ONNXRuntimeSTSBDetector)
|
||||
single_stage_base=ONNXRuntimePSSDetector,
|
||||
two_stage_base=ONNXRuntimePTSDetector)
|
||||
TENSORRT_DETECTOR_MAP = dict(
|
||||
end2end=TensorRTDetector, two_stage_base=TensorRTSTSBDetector)
|
||||
end2end=TensorRTDetector, two_stage_base=TensorRTPTSDetector)
|
||||
|
||||
PPL_DETECTOR_MAP = dict(end2end=PPLDetector)
|
||||
NCNN_DETECTOR_MAP = dict(
|
||||
single_stage_base=NCNNSSSBDetector, two_stage_base=NCNNSTSBDetector)
|
||||
single_stage_base=NCNNPSSDetector, two_stage_base=NCNNPTSDetector)
|
||||
|
||||
BACKEND_DETECTOR_MAP = dict(
|
||||
onnxruntime=ONNXRUNTIME_DETECTOR_MAP,
|
||||
tensorrt=TENSORRT_DETECTOR_MAP,
|
||||
ppl=PPL_DETECTOR_MAP,
|
||||
ncnn=NCNN_DETECTOR_MAP)
|
||||
BACKEND_DETECTOR_MAP = {
|
||||
Backend.ONNXRUNTIME: ONNXRUNTIME_DETECTOR_MAP,
|
||||
Backend.TENSORRT: TENSORRT_DETECTOR_MAP,
|
||||
Backend.PPL: PPL_DETECTOR_MAP,
|
||||
Backend.NCNN: NCNN_DETECTOR_MAP
|
||||
}
|
||||
|
||||
|
||||
def build_detector(model_files, model_cfg, deploy_cfg, device_id, **kwargs):
|
||||
# load cfg if necessary
|
||||
deploy_cfg = load_config(deploy_cfg)[0]
|
||||
model_cfg = load_config(model_cfg)[0]
|
||||
|
||||
if isinstance(model_cfg, str):
|
||||
model_cfg = mmcv.Config.fromfile(model_cfg)
|
||||
elif not isinstance(model_cfg, (mmcv.Config, mmcv.ConfigDict)):
|
||||
raise TypeError('config must be a filename or Config object, '
|
||||
f'but got {type(model_cfg)}')
|
||||
|
||||
if isinstance(deploy_cfg, str):
|
||||
deploy_cfg = mmcv.Config.fromfile(deploy_cfg)
|
||||
elif not isinstance(deploy_cfg, (mmcv.Config, mmcv.ConfigDict)):
|
||||
raise TypeError('config must be a filename or Config object, '
|
||||
f'but got {type(deploy_cfg)}')
|
||||
|
||||
backend = deploy_cfg['backend']
|
||||
backend = get_backend(deploy_cfg)
|
||||
class_names = get_classes_from_config(model_cfg)
|
||||
|
||||
assert backend in BACKEND_DETECTOR_MAP, \
|
||||
f'Unsupported backend type: {backend}'
|
||||
f'Unsupported backend type: {backend.value}'
|
||||
detector_map = BACKEND_DETECTOR_MAP[backend]
|
||||
|
||||
split_type = 'end2end'
|
||||
partition_type = 'end2end'
|
||||
if deploy_cfg.get('apply_marks', False):
|
||||
split_params = deploy_cfg.get('split_params', dict())
|
||||
split_type = split_params.get('split_type', None)
|
||||
partition_params = deploy_cfg.get('partition_params', dict())
|
||||
partition_type = partition_params.get('partition_type', None)
|
||||
|
||||
assert split_type in detector_map, f'Unsupported split type: {split_type}'
|
||||
backend_detector_class = detector_map[split_type]
|
||||
assert partition_type in detector_map,\
|
||||
f'Unsupported partition type: {partition_type}'
|
||||
backend_detector_class = detector_map[partition_type]
|
||||
|
||||
model_files = model_files[0] if len(model_files) == 1 else model_files
|
||||
backend_detector = backend_detector_class(
|
|
@ -0,0 +1,19 @@
|
|||
import numpy as np
|
||||
|
||||
from mmdeploy.utils import Backend
|
||||
|
||||
|
||||
def show_result(model,
|
||||
image: np.ndarray,
|
||||
result,
|
||||
output_file: str,
|
||||
backend: Backend,
|
||||
show=True,
|
||||
score_thr=0.3):
|
||||
return model.show_result(
|
||||
image,
|
||||
result,
|
||||
score_thr=score_thr,
|
||||
show=show,
|
||||
win_name=backend.value,
|
||||
out_file=output_file)
|
|
@ -1,12 +1,10 @@
|
|||
from .model_split import get_split_cfg
|
||||
from .model_wrappers import ONNXRuntimeDetector, PPLDetector, TensorRTDetector
|
||||
from .onnx_helper import clip_bboxes
|
||||
from .model_partition import get_partition_cfg
|
||||
from .onnx_utils import clip_bboxes
|
||||
from .prepare_input import (build_dataloader, build_dataset, create_input,
|
||||
get_tensor_from_input)
|
||||
from .tensorrt_helper import pad_with_value
|
||||
|
||||
__all__ = [
|
||||
'get_split_cfg', 'clip_bboxes', 'TensorRTDetector', 'create_input',
|
||||
'build_dataloader', 'build_dataset', 'get_tensor_from_input',
|
||||
'ONNXRuntimeDetector', 'pad_with_value', 'PPLDetector'
|
||||
'get_partition_cfg', 'clip_bboxes', 'create_input', 'build_dataloader',
|
||||
'build_dataset', 'get_tensor_from_input', 'pad_with_value'
|
||||
]
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
MMDET_SPLIT_CFG = dict(
|
||||
MMDET_PARTITION_CFG = dict(
|
||||
single_stage_base=[
|
||||
dict(
|
||||
save_file='split0.onnx',
|
||||
save_file='partition0.onnx',
|
||||
start='detector_forward:input',
|
||||
end='multiclass_nms:input',
|
||||
dynamic_axes={
|
||||
|
@ -23,7 +23,7 @@ MMDET_SPLIT_CFG = dict(
|
|||
],
|
||||
two_stage_base=[
|
||||
dict(
|
||||
save_file='split0.onnx',
|
||||
save_file='partition0.onnx',
|
||||
start='detector_forward:input',
|
||||
end=['extract_feat:output', 'multiclass_nms[0]:input'],
|
||||
dynamic_axes={
|
||||
|
@ -43,7 +43,7 @@ MMDET_SPLIT_CFG = dict(
|
|||
},
|
||||
),
|
||||
dict(
|
||||
save_file='split1.onnx',
|
||||
save_file='partition1.onnx',
|
||||
start='roi_extractor:output',
|
||||
end='bbox_head_forward:output',
|
||||
dynamic_axes={
|
||||
|
@ -61,6 +61,7 @@ MMDET_SPLIT_CFG = dict(
|
|||
])
|
||||
|
||||
|
||||
def get_split_cfg(split_type):
|
||||
assert (split_type in MMDET_SPLIT_CFG), f'Unknow split_type {split_type}'
|
||||
return MMDET_SPLIT_CFG[split_type]
|
||||
def get_partition_cfg(partition_type):
|
||||
assert (partition_type
|
||||
in MMDET_PARTITION_CFG), f'Unknown partition_type {partition_type}'
|
||||
return MMDET_PARTITION_CFG[partition_type]
|
|
@ -8,16 +8,13 @@ from mmdet.datasets import build_dataset as build_dataset_mmdet
|
|||
from mmdet.datasets import replace_ImageToTensor
|
||||
from mmdet.datasets.pipelines import Compose
|
||||
|
||||
from mmdeploy.utils.config_utils import load_config
|
||||
|
||||
|
||||
def create_input(model_cfg: Union[str, mmcv.Config],
|
||||
imgs: Any,
|
||||
device: str = 'cuda:0'):
|
||||
if isinstance(model_cfg, str):
|
||||
model_cfg = mmcv.Config.fromfile(model_cfg)
|
||||
elif not isinstance(model_cfg, (mmcv.Config, mmcv.ConfigDict)):
|
||||
raise TypeError('config must be a filename or Config object, '
|
||||
f'but got {type(model_cfg)}')
|
||||
cfg = model_cfg.copy()
|
||||
cfg = load_config(model_cfg)[0].copy()
|
||||
|
||||
if not isinstance(imgs, (list, tuple)):
|
||||
imgs = [imgs]
|
||||
|
@ -55,16 +52,26 @@ def create_input(model_cfg: Union[str, mmcv.Config],
|
|||
def build_dataset(dataset_cfg: Union[str, mmcv.Config],
|
||||
dataset_type: str = 'val',
|
||||
**kwargs):
|
||||
if isinstance(dataset_cfg, str):
|
||||
dataset_cfg = mmcv.Config.fromfile(dataset_cfg)
|
||||
elif not isinstance(dataset_cfg, (mmcv.Config, mmcv.ConfigDict)):
|
||||
raise TypeError('config must be a filename or Config object, '
|
||||
f'but got {type(dataset_cfg)}')
|
||||
dataset_cfg = load_config(dataset_cfg)[0].copy()
|
||||
|
||||
data = dataset_cfg.data
|
||||
assert dataset_type in data
|
||||
|
||||
dataset = build_dataset_mmdet(data[dataset_type])
|
||||
assert dataset_type in dataset_cfg.data
|
||||
data_cfg = dataset_cfg.data[dataset_type]
|
||||
# in case the dataset is concatenated
|
||||
if isinstance(data_cfg, dict):
|
||||
data_cfg.test_mode = True
|
||||
samples_per_gpu = data_cfg.get('samples_per_gpu', 1)
|
||||
if samples_per_gpu > 1:
|
||||
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
|
||||
data_cfg.pipeline = replace_ImageToTensor(data_cfg.pipeline)
|
||||
elif isinstance(data_cfg, list):
|
||||
for ds_cfg in data_cfg:
|
||||
ds_cfg.test_mode = True
|
||||
samples_per_gpu = max(
|
||||
[ds_cfg.get('samples_per_gpu', 1) for ds_cfg in data_cfg])
|
||||
if samples_per_gpu > 1:
|
||||
for ds_cfg in data_cfg:
|
||||
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
|
||||
dataset = build_dataset_mmdet(data_cfg)
|
||||
|
||||
return dataset
|
||||
|
||||
|
@ -73,8 +80,8 @@ def build_dataloader(dataset,
|
|||
samples_per_gpu: int,
|
||||
workers_per_gpu: int,
|
||||
num_gpus: int = 1,
|
||||
dist: bool = True,
|
||||
shuffle: bool = True,
|
||||
dist: bool = False,
|
||||
shuffle: bool = False,
|
||||
seed: Optional[int] = None,
|
||||
**kwargs):
|
||||
return build_dataloader_mmdet(
|
||||
|
|
|
@ -4,6 +4,7 @@ from mmdeploy.core import FUNCTION_REWRITER
|
|||
from mmdeploy.mmdet.core import multiclass_nms
|
||||
from mmdeploy.mmdet.export import pad_with_value
|
||||
from mmdeploy.utils import is_dynamic_shape
|
||||
from mmdeploy.utils.config_utils import Backend, get_backend
|
||||
|
||||
|
||||
@FUNCTION_REWRITER.register_rewriter(
|
||||
|
@ -56,10 +57,10 @@ def get_bboxes_of_anchor_head(ctx,
|
|||
|
||||
anchors = anchors.expand_as(bbox_pred)
|
||||
|
||||
backend = deploy_cfg['backend']
|
||||
backend = get_backend(deploy_cfg)
|
||||
# topk in tensorrt does not support shape<k
|
||||
# concate zero to enable topk,
|
||||
if backend == 'tensorrt':
|
||||
if backend == Backend.TENSORRT:
|
||||
anchors = pad_with_value(anchors, 1, pre_topk)
|
||||
bbox_pred = pad_with_value(bbox_pred, 1, pre_topk)
|
||||
scores = pad_with_value(scores, 1, pre_topk, 0.)
|
||||
|
|
|
@ -4,6 +4,7 @@ from mmdeploy.core import FUNCTION_REWRITER
|
|||
from mmdeploy.mmdet.core import distance2bbox, multiclass_nms
|
||||
from mmdeploy.mmdet.export import pad_with_value
|
||||
from mmdeploy.utils import is_dynamic_shape
|
||||
from mmdeploy.utils.config_utils import Backend, get_backend
|
||||
|
||||
|
||||
@FUNCTION_REWRITER.register_rewriter(
|
||||
|
@ -60,10 +61,10 @@ def get_bboxes_of_fcos_head(ctx,
|
|||
|
||||
points = points.expand(batch_size, -1, 2)
|
||||
|
||||
backend = deploy_cfg['backend']
|
||||
backend = get_backend(deploy_cfg)
|
||||
# topk in tensorrt does not support shape<k
|
||||
# concate zero to enable topk,
|
||||
if backend == 'tensorrt':
|
||||
if backend == Backend.TENSORRT:
|
||||
scores = pad_with_value(scores, 1, pre_topk, 0.)
|
||||
centerness = pad_with_value(centerness, 1, pre_topk)
|
||||
bbox_pred = pad_with_value(bbox_pred, 1, pre_topk)
|
||||
|
|
|
@ -4,6 +4,7 @@ from mmdeploy.core import FUNCTION_REWRITER
|
|||
from mmdeploy.mmdet.core import multiclass_nms
|
||||
from mmdeploy.mmdet.export import pad_with_value
|
||||
from mmdeploy.utils import is_dynamic_shape
|
||||
from mmdeploy.utils.config_utils import Backend, get_backend
|
||||
|
||||
|
||||
@FUNCTION_REWRITER.register_rewriter('mmdet.models.RPNHead.get_bboxes')
|
||||
|
@ -60,10 +61,10 @@ def get_bboxes_of_rpn_head(ctx,
|
|||
|
||||
anchors = anchors.expand_as(bbox_pred)
|
||||
|
||||
backend = deploy_cfg['backend']
|
||||
backend = get_backend(deploy_cfg)
|
||||
# topk in tensorrt does not support shape<k
|
||||
# concate zero to enable topk,
|
||||
if backend == 'tensorrt':
|
||||
if backend == Backend.TENSORRT:
|
||||
scores = pad_with_value(scores, 1, pre_topk, 0.)
|
||||
bbox_pred = pad_with_value(bbox_pred, 1, pre_topk)
|
||||
anchors = pad_with_value(anchors, 1, pre_topk)
|
||||
|
|
|
@ -2,6 +2,7 @@ import torch
|
|||
import torch.nn.functional as F
|
||||
|
||||
from mmdeploy.core import FUNCTION_REWRITER
|
||||
from mmdeploy.utils.config_utils import Backend, get_backend
|
||||
|
||||
|
||||
@FUNCTION_REWRITER.register_rewriter(
|
||||
|
@ -21,7 +22,7 @@ def get_seg_masks_of_fcn_mask_head(ctx, self, mask_pred, det_bboxes,
|
|||
Returns:
|
||||
Tensor: a mask of shape (N, img_h, img_w).
|
||||
"""
|
||||
backend = ctx.cfg.get('backend', 'default')
|
||||
backend = get_backend(ctx.cfg, 'default')
|
||||
mask_pred = mask_pred.sigmoid()
|
||||
bboxes = det_bboxes[:, :4]
|
||||
labels = det_labels
|
||||
|
@ -31,7 +32,7 @@ def get_seg_masks_of_fcn_mask_head(ctx, self, mask_pred, det_bboxes,
|
|||
mask_pred = mask_pred[box_inds, labels][:, None]
|
||||
masks, _ = _do_paste_mask(
|
||||
mask_pred, bboxes, ori_shape[0], ori_shape[1], skip_empty=False)
|
||||
if backend == 'tensorrt':
|
||||
if backend == Backend.TENSORRT:
|
||||
return masks
|
||||
if threshold >= 0:
|
||||
masks = (masks >= threshold).to(dtype=torch.bool)
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
from .export import * # noqa: F401,F403
|
||||
from .models import * # noqa: F401,F403
|
|
@ -0,0 +1,4 @@
|
|||
from .inference import build_editing_processor
|
||||
from .visualize import show_result
|
||||
|
||||
__all__ = ['build_editing_processor', 'show_result']
|
|
@ -0,0 +1,159 @@
|
|||
import warnings
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from mmedit.core import psnr, ssim, tensor2img
|
||||
from mmedit.models import BaseModel
|
||||
|
||||
from mmdeploy.utils.config_utils import Backend, get_backend, load_config
|
||||
|
||||
|
||||
class DeployBaseRestorer(BaseModel):
|
||||
"""Base Class of Wrapper for restorer's inference."""
|
||||
|
||||
allowed_metrics = {'PSNR': psnr, 'SSIM': ssim}
|
||||
|
||||
def __init__(self, device_id, test_cfg=None, **kwargs):
|
||||
super(DeployBaseRestorer, self).__init__(**kwargs)
|
||||
self.test_cfg = test_cfg
|
||||
self.device_id = device_id
|
||||
|
||||
def init_weights(self):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def forward(self, lq, test_mode=False, **kwargs):
|
||||
if (test_mode):
|
||||
return self.forward_test(lq, **kwargs)
|
||||
else:
|
||||
return self.forward_dummy(lq, **kwargs)
|
||||
|
||||
def forward_train(self, imgs, labels):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def forward_test(self, lq, gt=None, **kwargs):
|
||||
outputs = self.forward_dummy(lq)
|
||||
result = self._test_post_process(outputs, lq, gt)
|
||||
return result
|
||||
|
||||
def train_step(self, data_batch, optimizer):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def evaluate(self, output, gt):
|
||||
"""Evaluation function. (Copy from mmedit)
|
||||
|
||||
Args:
|
||||
output (Tensor): Model output with shape (n, c, h, w).
|
||||
gt (Tensor): GT Tensor with shape (n, c, h, w).
|
||||
|
||||
Returns:
|
||||
dict: Evaluation results.
|
||||
"""
|
||||
crop_border = self.test_cfg.crop_border
|
||||
|
||||
if isinstance(output, np.ndarray):
|
||||
output = torch.from_numpy(output)
|
||||
output = tensor2img(output)
|
||||
gt = tensor2img(gt)
|
||||
|
||||
eval_result = dict()
|
||||
for metric in self.test_cfg.metrics:
|
||||
eval_result[metric] = self.allowed_metrics[metric](output, gt,
|
||||
crop_border)
|
||||
return eval_result
|
||||
|
||||
def _test_post_process(self, outputs, lq, gt=None):
|
||||
if self.test_cfg is not None and self.test_cfg.get('metrics', None):
|
||||
assert gt is not None, (
|
||||
'evaluation with metrics must have gt images.')
|
||||
results = dict(eval_result=self.evaluate(outputs, gt))
|
||||
else:
|
||||
results = dict(lq=lq.cpu(), output=outputs)
|
||||
if gt is not None:
|
||||
results['gt'] = gt.cpu()
|
||||
|
||||
return results
|
||||
|
||||
|
||||
class ONNXRuntimeRestorer(DeployBaseRestorer):
|
||||
"""Wrapper for restorer's inference with ONNXRuntime."""
|
||||
|
||||
def __init__(self, model_file, device_id, test_cfg=None, **kwargs):
|
||||
super(ONNXRuntimeRestorer, self).__init__(
|
||||
device_id, test_cfg=test_cfg, **kwargs)
|
||||
|
||||
from mmdeploy.apis.onnxruntime import ORTWrapper
|
||||
self.model = ORTWrapper(model_file, device_id)
|
||||
|
||||
def forward_dummy(self, lq, *args, **kwargs):
|
||||
ort_outputs = self.model({'input': lq})
|
||||
# only concern pred_alpha value
|
||||
if isinstance(ort_outputs, (tuple, list)):
|
||||
ort_outputs = ort_outputs[0]
|
||||
return ort_outputs
|
||||
|
||||
|
||||
class TensorRTRestorer(DeployBaseRestorer):
|
||||
|
||||
def __init__(self, trt_file, device_id, test_cfg=None, **kwargs):
|
||||
super(TensorRTRestorer, self).__init__(
|
||||
device_id, test_cfg=test_cfg, **kwargs)
|
||||
|
||||
from mmdeploy.apis.tensorrt import TRTWrapper, load_tensorrt_plugin
|
||||
try:
|
||||
load_tensorrt_plugin()
|
||||
except (ImportError, ModuleNotFoundError):
|
||||
warnings.warn('If input model has custom plugins, \
|
||||
you may have to build backend ops with TensorRT')
|
||||
model = TRTWrapper(trt_file)
|
||||
self.model = model
|
||||
|
||||
def forward_dummy(self, img, *args, **kwargs):
|
||||
input_data = img.contiguous()
|
||||
with torch.cuda.device(self.device_id), torch.no_grad():
|
||||
pred = self.model({'input': input_data})['output']
|
||||
pred = pred.detach().cpu().numpy()
|
||||
return pred
|
||||
|
||||
|
||||
ONNXRUNTIME_RESTORER_MAP = dict(end2end=ONNXRuntimeRestorer)
|
||||
|
||||
TENSORRT_RESTORER_MAP = dict(end2end=TensorRTRestorer)
|
||||
|
||||
# TODO: Coming Soon
|
||||
# PPL_RESTORER_MAP = dict(end2end=PPLClassifier)
|
||||
# NCNN_RESTORER_MAP = dict(end2end=NCNNClassifier)
|
||||
|
||||
BACKEND_RESTORER_MAP = {
|
||||
Backend.ONNXRUNTIME: ONNXRUNTIME_RESTORER_MAP,
|
||||
Backend.TENSORRT: TENSORRT_RESTORER_MAP,
|
||||
# TODO: Coming Soon
|
||||
# Backend.PPL: PPL_RESTORER_MAP,
|
||||
# Backend.NCNN: NCNN_RESTORER_MAP
|
||||
}
|
||||
|
||||
|
||||
def build_restorer(model_files, backend, model_cfg, device_id):
|
||||
model_map = BACKEND_RESTORER_MAP[backend]
|
||||
|
||||
model_type = 'end2end'
|
||||
assert model_type in model_map, f'Unsupported model type: {model_type}'
|
||||
backend_model_class = model_map[model_type]
|
||||
|
||||
backend_model = backend_model_class(
|
||||
model_files[0], device_id=device_id, test_cfg=model_cfg.test_cfg)
|
||||
|
||||
return backend_model
|
||||
|
||||
|
||||
def build_editing_processor(model_files, model_cfg, deploy_cfg, device_id):
|
||||
|
||||
model_cfg = load_config(model_cfg)[0]
|
||||
deploy_cfg = load_config(deploy_cfg)[0]
|
||||
|
||||
backend = get_backend(deploy_cfg)
|
||||
|
||||
assert backend in BACKEND_RESTORER_MAP, \
|
||||
f'Unsupported backend type: {backend.value}'
|
||||
|
||||
# TODO: Add other tasks
|
||||
return build_restorer(model_files, backend, model_cfg, device_id)
|
|
@ -0,0 +1,31 @@
|
|||
import warnings
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from mmdeploy.utils import Backend
|
||||
|
||||
|
||||
# BaseModel in mmedit doesn't implement show_result
|
||||
# TODO: add show_result to different tasks
|
||||
def show_result(result, output_file, backend: Backend, show=True):
|
||||
win_name = backend.value
|
||||
with torch.no_grad():
|
||||
result = result.transpose(1, 2, 0)
|
||||
result = np.clip(result, 0, 1)[:, :, ::-1]
|
||||
result = (result * 255.0).round()
|
||||
|
||||
if output_file is not None:
|
||||
show = False
|
||||
|
||||
if show:
|
||||
int_result = result.astype(np.uint8)
|
||||
mmcv.imshow(int_result, win_name, 0)
|
||||
if output_file is not None:
|
||||
mmcv.imwrite(result, output_file)
|
||||
|
||||
if not (show or output_file):
|
||||
warnings.warn('show==False and output_file is not specified, only '
|
||||
'result image will be returned')
|
||||
return result
|
|
@ -0,0 +1,3 @@
|
|||
from .prepare_input import build_dataloader, build_dataset, create_input
|
||||
|
||||
__all__ = ['create_input', 'build_dataset', 'build_dataloader']
|
|
@ -0,0 +1,93 @@
|
|||
from typing import Any, Union
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
from mmcv.parallel import collate, scatter
|
||||
from mmedit.datasets import build_dataloader as build_dataloadeer_mmedit
|
||||
from mmedit.datasets import build_dataset as build_dataset_mmedit
|
||||
from mmedit.datasets.pipelines import Compose
|
||||
|
||||
from mmdeploy.utils.config_utils import Task, load_config
|
||||
|
||||
|
||||
def _preprocess_cfg(config):
|
||||
# TODO: Differentiate the editting tasks (e.g. restorers and mattors
|
||||
# preprocess the data in differenet ways)
|
||||
|
||||
keys_to_remove = ['gt', 'gt_path']
|
||||
for key in keys_to_remove:
|
||||
for pipeline in list(config.test_pipeline):
|
||||
if 'key' in pipeline and key == pipeline['key']:
|
||||
config.test_pipeline.remove(pipeline)
|
||||
if 'keys' in pipeline and key in pipeline['keys']:
|
||||
pipeline['keys'].remove(key)
|
||||
if len(pipeline['keys']) == 0:
|
||||
config.test_pipeline.remove(pipeline)
|
||||
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
|
||||
pipeline['meta_keys'].remove(key)
|
||||
|
||||
|
||||
def create_input(model_cfg: Union[str, mmcv.Config],
|
||||
imgs: Any,
|
||||
device: str = 'cuda:0',
|
||||
task: Task = Task.SUPER_RESOLUTION):
|
||||
if isinstance(imgs, (list, tuple)):
|
||||
if not isinstance(imgs[0], (np.ndarray, str)):
|
||||
raise AssertionError('imgs must be strings or numpy arrays')
|
||||
elif isinstance(imgs, (np.ndarray, str)):
|
||||
imgs = [imgs]
|
||||
else:
|
||||
raise AssertionError('imgs must be strings or numpy arrays')
|
||||
|
||||
cfg = load_config(model_cfg)[0].copy()
|
||||
_preprocess_cfg(cfg)
|
||||
|
||||
if isinstance(imgs[0], np.ndarray):
|
||||
cfg = cfg.copy()
|
||||
# set loading pipeline type
|
||||
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
|
||||
|
||||
test_pipeline = Compose(cfg.test_pipeline)
|
||||
|
||||
data_arr = []
|
||||
for img in imgs:
|
||||
# TODO: This is only for restore. Add condiction statement
|
||||
data = dict(lq_path=img)
|
||||
|
||||
data = test_pipeline(data)
|
||||
data_arr.append(data)
|
||||
|
||||
data = collate(data_arr, samples_per_gpu=len(imgs))
|
||||
|
||||
# TODO: This is only for restore. Add condiction statement
|
||||
data['img'] = data['lq']
|
||||
|
||||
if device != 'cpu':
|
||||
data = scatter(data, [device])[0]
|
||||
|
||||
return data, data['img']
|
||||
|
||||
|
||||
def build_dataset(dataset_cfg: Union[str, mmcv.Config], **kwargs):
|
||||
dataset_cfg = load_config(dataset_cfg)[0]
|
||||
data = dataset_cfg.data
|
||||
|
||||
dataset = build_dataset_mmedit(data.test)
|
||||
return dataset
|
||||
|
||||
|
||||
def build_dataloader(dataset,
|
||||
samples_per_gpu: int,
|
||||
workers_per_gpu: int,
|
||||
num_gpus=1,
|
||||
dist=False,
|
||||
shuffle=False,
|
||||
seed=None,
|
||||
drop_last=False,
|
||||
pin_memory=True,
|
||||
persistent_workers=True,
|
||||
**kwargs):
|
||||
|
||||
return build_dataloadeer_mmedit(dataset, samples_per_gpu, workers_per_gpu,
|
||||
num_gpus, dist, shuffle, seed, drop_last,
|
||||
pin_memory, persistent_workers, **kwargs)
|
|
@ -0,0 +1 @@
|
|||
from .backbones import * # noqa: F401,F403
|
|
@ -0,0 +1 @@
|
|||
from .sr_backbones import * # noqa: F401,F403
|
|
@ -0,0 +1,3 @@
|
|||
from .srcnn import SRCNNWrapper
|
||||
|
||||
__all__ = ['SRCNNWrapper']
|
|
@ -0,0 +1,47 @@
|
|||
import torch.nn as nn
|
||||
|
||||
from mmdeploy.core import MODULE_REWRITER
|
||||
|
||||
|
||||
@MODULE_REWRITER.register_rewrite_module(
|
||||
'mmedit.models.backbones.sr_backbones.SRCNN', backend='tensorrt')
|
||||
class SRCNNWrapper(nn.Module):
|
||||
"""SRCNN network structure for image super resolution.
|
||||
|
||||
SRCNN has three conv layers. For each layer, we can define the
|
||||
`in_channels`, `out_channels` and `kernel_size`.
|
||||
The input image will first be upsampled with a bicubic upsampler, and then
|
||||
super-resolved in the HR spatial size.
|
||||
Because TensorRT doesn't support bicubic operator, when deployment we use
|
||||
bilinear instead. According to the experiments, the precision may decrease
|
||||
about 4%.
|
||||
|
||||
Paper: Learning a Deep Convolutional Network for Image Super-Resolution.
|
||||
|
||||
Args:
|
||||
channels (tuple[int]): A tuple of channel numbers for each layer
|
||||
including channels of input and output . Default: (3, 64, 32, 3).
|
||||
kernel_sizes (tuple[int]): A tuple of kernel sizes for each conv layer.
|
||||
Default: (9, 1, 5).
|
||||
upscale_factor (int): Upsampling factor. Default: 4.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
module,
|
||||
channels=(3, 64, 32, 3),
|
||||
kernel_sizes=(9, 1, 5),
|
||||
upscale_factor=4):
|
||||
super(SRCNNWrapper, self).__init__()
|
||||
|
||||
self._module = module
|
||||
|
||||
module.img_upsampler = nn.Upsample(
|
||||
scale_factor=module.upscale_factor,
|
||||
mode='bilinear',
|
||||
align_corners=False)
|
||||
|
||||
def forward(self, *args, **kwargs):
|
||||
return self._module(*args, **kwargs)
|
||||
|
||||
def init_weights(self, *args, **kwargs):
|
||||
return self._module.init_weights(*args, **kwargs)
|
|
@ -0,0 +1,4 @@
|
|||
from .inference import build_ocr_processor
|
||||
from .visualize import show_result
|
||||
|
||||
__all__ = ['build_ocr_processor', 'show_result']
|
|
@ -3,10 +3,14 @@ from typing import Iterable, Union
|
|||
import mmcv
|
||||
import torch
|
||||
from mmdet.models.builder import DETECTORS
|
||||
from mmocr.datasets import DATASETS
|
||||
from mmocr.models.textdet.detectors import (SingleStageTextDetector,
|
||||
TextDetectorMixin)
|
||||
from mmocr.models.textrecog.recognizer import EncodeDecodeRecognizer
|
||||
|
||||
from mmdeploy.utils.config_utils import (Backend, Task, get_backend,
|
||||
get_task_type, load_config)
|
||||
|
||||
|
||||
@DETECTORS.register_module()
|
||||
class DeployBaseTextDetector(TextDetectorMixin, SingleStageTextDetector):
|
||||
|
@ -15,7 +19,9 @@ class DeployBaseTextDetector(TextDetectorMixin, SingleStageTextDetector):
|
|||
def __init__(self,
|
||||
cfg: Union[mmcv.Config, mmcv.ConfigDict],
|
||||
device_id: int,
|
||||
show_score: bool = False):
|
||||
show_score: bool = False,
|
||||
*args,
|
||||
**kwargs):
|
||||
SingleStageTextDetector.__init__(self, cfg.model.backbone,
|
||||
cfg.model.neck, cfg.model.bbox_head)
|
||||
TextDetectorMixin.__init__(self, show_score)
|
||||
|
@ -35,8 +41,11 @@ class DeployBaseTextDetector(TextDetectorMixin, SingleStageTextDetector):
|
|||
def simple_test(self,
|
||||
img: torch.Tensor,
|
||||
img_metas: Iterable,
|
||||
rescale: bool = False):
|
||||
pred = self.forward_of_backend(img, img_metas, rescale)
|
||||
rescale: bool = False,
|
||||
*args,
|
||||
**kwargs):
|
||||
pred = self.forward_of_backend(img, img_metas, rescale, *args,
|
||||
**kwargs)
|
||||
if len(img_metas) > 1:
|
||||
boundaries = [
|
||||
self.bbox_head.get_boundary(*(pred[i].unsqueeze(0)),
|
||||
|
@ -57,12 +66,13 @@ class DeployBaseRecognizer(EncodeDecodeRecognizer):
|
|||
def __init__(self,
|
||||
cfg: Union[mmcv.Config, mmcv.ConfigDict],
|
||||
device_id: int,
|
||||
show_score: bool = False):
|
||||
EncodeDecodeRecognizer.__init__(self, None, cfg.model.backbone,
|
||||
cfg.model.encoder, cfg.model.decoder,
|
||||
cfg.model.loss,
|
||||
cfg.model.label_convertor, None, None,
|
||||
40, None)
|
||||
show_score: bool = False,
|
||||
*args,
|
||||
**kwargs):
|
||||
super(DeployBaseRecognizer,
|
||||
self).__init__(None, cfg.model.backbone, cfg.model.encoder,
|
||||
cfg.model.decoder, cfg.model.loss,
|
||||
cfg.model.label_convertor, None, None, 40, None)
|
||||
self.device_id = device_id
|
||||
self.show_score = show_score
|
||||
self.cfg = cfg
|
||||
|
@ -102,7 +112,9 @@ class DeployBaseRecognizer(EncodeDecodeRecognizer):
|
|||
def simple_test(self,
|
||||
img: torch.Tensor,
|
||||
img_metas: Iterable,
|
||||
rescale: bool = False):
|
||||
rescale: bool = False,
|
||||
*args,
|
||||
**kwargs):
|
||||
"""Test function.
|
||||
|
||||
Args:
|
||||
|
@ -112,7 +124,8 @@ class DeployBaseRecognizer(EncodeDecodeRecognizer):
|
|||
Returns:
|
||||
list[str]: Text label result of each image.
|
||||
"""
|
||||
pred = self.forward_of_backend(img, img_metas, rescale)
|
||||
pred = self.forward_of_backend(img, img_metas, rescale, *args,
|
||||
**kwargs)
|
||||
label_indexes, label_scores = self.label_convertor.tensor2idx(
|
||||
pred, img_metas)
|
||||
label_strings = self.label_convertor.idx2str(label_indexes)
|
||||
|
@ -129,19 +142,23 @@ class ONNXRuntimeDetector(DeployBaseTextDetector):
|
|||
"""The class for evaluating onnx file of detection."""
|
||||
|
||||
def __init__(self,
|
||||
onnx_file: str,
|
||||
model_file: str,
|
||||
cfg: Union[mmcv.Config, mmcv.ConfigDict],
|
||||
device_id: int,
|
||||
show_score: bool = False):
|
||||
show_score: bool = False,
|
||||
*args,
|
||||
**kwargs):
|
||||
super(ONNXRuntimeDetector, self).__init__(cfg, device_id, show_score)
|
||||
from mmdeploy.apis.onnxruntime import ORTWrapper
|
||||
self.model = ORTWrapper(onnx_file, device_id)
|
||||
self.model = ORTWrapper(model_file, device_id)
|
||||
|
||||
def forward_of_backend(self,
|
||||
img: torch.Tensor,
|
||||
img_metas: Iterable,
|
||||
rescale: bool = False):
|
||||
onnx_pred = self.model(img)
|
||||
rescale: bool = False,
|
||||
*args,
|
||||
**kwargs):
|
||||
onnx_pred = self.model({'input': img})
|
||||
onnx_pred = torch.from_numpy(onnx_pred[0])
|
||||
return onnx_pred
|
||||
|
||||
|
@ -150,19 +167,23 @@ class ONNXRuntimeRecognizer(DeployBaseRecognizer):
|
|||
"""The class for evaluating onnx file of recognition."""
|
||||
|
||||
def __init__(self,
|
||||
onnx_file: str,
|
||||
model_file: str,
|
||||
cfg: Union[mmcv.Config, mmcv.ConfigDict],
|
||||
device_id: int,
|
||||
show_score: bool = False):
|
||||
show_score: bool = False,
|
||||
*args,
|
||||
**kwargs):
|
||||
super(ONNXRuntimeRecognizer, self).__init__(cfg, device_id, show_score)
|
||||
from mmdeploy.apis.onnxruntime import ORTWrapper
|
||||
self.model = ORTWrapper(onnx_file, device_id)
|
||||
self.model = ORTWrapper(model_file, device_id)
|
||||
|
||||
def forward_of_backend(self,
|
||||
img: torch.Tensor,
|
||||
img_metas: Iterable,
|
||||
rescale: bool = False):
|
||||
onnx_pred = self.model(img)
|
||||
rescale: bool = False,
|
||||
*args,
|
||||
**kwargs):
|
||||
onnx_pred = self.model({'input': img})
|
||||
onnx_pred = torch.from_numpy(onnx_pred[0])
|
||||
return onnx_pred
|
||||
|
||||
|
@ -171,19 +192,23 @@ class TensorRTDetector(DeployBaseTextDetector):
|
|||
"""The class for evaluating TensorRT file of text detection."""
|
||||
|
||||
def __init__(self,
|
||||
trt_file: str,
|
||||
model_file: str,
|
||||
cfg: Union[mmcv.Config, mmcv.ConfigDict],
|
||||
device_id: int,
|
||||
show_score: bool = False):
|
||||
show_score: bool = False,
|
||||
*args,
|
||||
**kwargs):
|
||||
super(TensorRTDetector, self).__init__(cfg, device_id, show_score)
|
||||
from mmdeploy.apis.tensorrt import TRTWrapper
|
||||
model = TRTWrapper(trt_file)
|
||||
model = TRTWrapper(model_file)
|
||||
self.model = model
|
||||
|
||||
def forward_of_backend(self,
|
||||
img: torch.Tensor,
|
||||
img_metas: Iterable,
|
||||
rescale: bool = False):
|
||||
rescale: bool = False,
|
||||
*args,
|
||||
**kwargs):
|
||||
with torch.cuda.device(self.device_id), torch.no_grad():
|
||||
trt_pred = self.model({'input': img})['output']
|
||||
return trt_pred
|
||||
|
@ -193,19 +218,79 @@ class TensorRTRecognizer(DeployBaseRecognizer):
|
|||
"""The class for evaluating TensorRT file of recognition."""
|
||||
|
||||
def __init__(self,
|
||||
trt_file: str,
|
||||
model_file: str,
|
||||
cfg: Union[mmcv.Config, mmcv.ConfigDict],
|
||||
device_id: int,
|
||||
show_score: bool = False):
|
||||
show_score: bool = False,
|
||||
*args,
|
||||
**kwargs):
|
||||
super(TensorRTRecognizer, self).__init__(cfg, device_id, show_score)
|
||||
from mmdeploy.apis.tensorrt import TRTWrapper
|
||||
model = TRTWrapper(trt_file)
|
||||
model = TRTWrapper(model_file)
|
||||
self.model = model
|
||||
|
||||
def forward_of_backend(self,
|
||||
img: torch.Tensor,
|
||||
img_metas: Iterable,
|
||||
rescale: bool = False):
|
||||
rescale: bool = False,
|
||||
*args,
|
||||
**kwargs):
|
||||
with torch.cuda.device(self.device_id), torch.no_grad():
|
||||
trt_pred = self.model({'input': img})['output']
|
||||
return trt_pred
|
||||
|
||||
|
||||
def get_classes_from_config(model_cfg: Union[str, mmcv.Config], **kwargs):
|
||||
# load cfg if necessary
|
||||
model_cfg = load_config(model_cfg)[0]
|
||||
module_dict = DATASETS.module_dict
|
||||
data_cfg = model_cfg.data
|
||||
|
||||
if 'train' in data_cfg:
|
||||
module = module_dict[data_cfg.train.type]
|
||||
elif 'val' in data_cfg:
|
||||
module = module_dict[data_cfg.val.type]
|
||||
elif 'test' in data_cfg:
|
||||
module = module_dict[data_cfg.test.type]
|
||||
else:
|
||||
raise RuntimeError(f'No dataset config found in: {model_cfg}')
|
||||
|
||||
return module.CLASSES
|
||||
|
||||
|
||||
TASK_ONNXRUNTIME_MAP = {
|
||||
Task.TEXT_DETECTION: ONNXRuntimeDetector,
|
||||
Task.TEXT_RECOGNITION: ONNXRuntimeRecognizer
|
||||
}
|
||||
|
||||
TASK_TENSORRT_MAP = {
|
||||
Task.TEXT_DETECTION: TensorRTDetector,
|
||||
Task.TEXT_RECOGNITION: TensorRTRecognizer
|
||||
}
|
||||
|
||||
BACKEND_TASK_MAP = {
|
||||
Backend.ONNXRUNTIME: TASK_ONNXRUNTIME_MAP,
|
||||
Backend.TENSORRT: TASK_TENSORRT_MAP
|
||||
}
|
||||
|
||||
|
||||
def build_ocr_processor(model_files, model_cfg, deploy_cfg, device_id,
|
||||
**kwargs):
|
||||
# load cfg if necessary
|
||||
deploy_cfg = load_config(deploy_cfg)[0]
|
||||
model_cfg = load_config(model_cfg)[0]
|
||||
|
||||
backend = get_backend(deploy_cfg)
|
||||
task = get_task_type(deploy_cfg)
|
||||
|
||||
assert backend in BACKEND_TASK_MAP, \
|
||||
f'Unsupported backend type: {backend.value}'
|
||||
assert task in BACKEND_TASK_MAP[backend], \
|
||||
f'Unsupported task type: {task.value}'
|
||||
backend_task_class = BACKEND_TASK_MAP[backend][task]
|
||||
|
||||
model_files = model_files[0] if len(model_files) == 1 else model_files
|
||||
backend_detector = backend_task_class(
|
||||
model_file=model_files, cfg=model_cfg, device_id=device_id, **kwargs)
|
||||
|
||||
return backend_detector
|
|
@ -0,0 +1,19 @@
|
|||
import numpy as np
|
||||
|
||||
from mmdeploy.utils import Backend
|
||||
|
||||
|
||||
def show_result(model,
|
||||
image: np.ndarray,
|
||||
result,
|
||||
output_file: str,
|
||||
backend: Backend,
|
||||
show=True,
|
||||
score_thr=0.3):
|
||||
return model.show_result(
|
||||
image,
|
||||
result,
|
||||
score_thr=score_thr,
|
||||
show=show,
|
||||
win_name=backend.value,
|
||||
out_file=output_file)
|
|
@ -1,8 +1,3 @@
|
|||
from .model_wrappers import (ONNXRuntimeDetector, ONNXRuntimeRecognizer,
|
||||
TensorRTDetector, TensorRTRecognizer)
|
||||
from .prepare_input import create_input
|
||||
from .prepare_input import build_dataloader, build_dataset, create_input
|
||||
|
||||
__all__ = [
|
||||
'ONNXRuntimeDetector', 'ONNXRuntimeRecognizer', 'TensorRTDetector',
|
||||
'TensorRTRecognizer', 'create_input'
|
||||
]
|
||||
__all__ = ['create_input', 'build_dataset', 'build_dataloader']
|
||||
|
|
|
@ -1,9 +1,13 @@
|
|||
from typing import Any, Union
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
from mmcv.parallel import collate, scatter
|
||||
from mmdet.datasets import replace_ImageToTensor
|
||||
from mmocr.datasets import build_dataloader as build_dataloader_mmocr
|
||||
from mmocr.datasets import build_dataset as build_dataset_mmocr
|
||||
|
||||
from mmdeploy.utils.config_utils import load_config
|
||||
|
||||
|
||||
def create_input(model_cfg: Union[str, mmcv.Config],
|
||||
|
@ -76,3 +80,38 @@ def create_input(model_cfg: Union[str, mmcv.Config],
|
|||
data = scatter(data, [device])[0]
|
||||
|
||||
return data, data['img']
|
||||
|
||||
|
||||
def build_dataset(dataset_cfg: Union[str, mmcv.Config],
|
||||
dataset_type: str = 'val',
|
||||
**kwargs):
|
||||
dataset_cfg = load_config(dataset_cfg)[0].copy()
|
||||
|
||||
data = dataset_cfg.data
|
||||
assert dataset_type in data
|
||||
dataset = build_dataset_mmocr(data[dataset_type])
|
||||
|
||||
return dataset
|
||||
|
||||
|
||||
def build_dataloader(dataset,
|
||||
samples_per_gpu: int,
|
||||
workers_per_gpu: int,
|
||||
num_gpus: int = 1,
|
||||
dist: bool = False,
|
||||
shuffle: bool = False,
|
||||
seed: Optional[int] = None,
|
||||
**kwargs):
|
||||
return build_dataloader_mmocr(
|
||||
dataset,
|
||||
samples_per_gpu,
|
||||
workers_per_gpu,
|
||||
num_gpus=num_gpus,
|
||||
dist=dist,
|
||||
shuffle=shuffle,
|
||||
seed=seed,
|
||||
**kwargs)
|
||||
|
||||
|
||||
def get_tensor_from_input(input_data):
|
||||
return input_data['img']
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
from .inference import build_segmentor
|
||||
from .visualize import show_result
|
||||
|
||||
__all__ = ['build_segmentor', 'show_result']
|
|
@ -0,0 +1,140 @@
|
|||
from typing import Sequence, Union
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import torch
|
||||
from mmseg.datasets import DATASETS
|
||||
from mmseg.models.segmentors.base import BaseSegmentor
|
||||
from mmseg.ops import resize
|
||||
|
||||
from mmdeploy.utils.config_utils import Backend, get_backend, load_config
|
||||
|
||||
|
||||
class DeployBaseSegmentor(BaseSegmentor):
|
||||
|
||||
def __init__(self, class_names: Sequence[str], palette: np.ndarray,
|
||||
device_id: int):
|
||||
super(DeployBaseSegmentor, self).__init__(init_cfg=None)
|
||||
self.CLASSES = class_names
|
||||
self.device_id = device_id
|
||||
self.PALETTE = palette
|
||||
|
||||
def extract_feat(self, imgs):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def encode_decode(self, img, img_metas):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def forward_train(self, imgs, img_metas, **kwargs):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def simple_test(self, img, img_meta, **kwargs):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def aug_test(self, imgs, img_metas, **kwargs):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def forward(self, img, img_metas, **kwargs):
|
||||
seg_pred = self.forward_test(img, img_metas, **kwargs)
|
||||
# whole mode supports dynamic shape
|
||||
ori_shape = img_metas[0][0]['ori_shape']
|
||||
if not (ori_shape[0] == seg_pred.shape[-2]
|
||||
and ori_shape[1] == seg_pred.shape[-1]):
|
||||
seg_pred = torch.from_numpy(seg_pred).float()
|
||||
seg_pred = resize(
|
||||
seg_pred, size=tuple(ori_shape[:2]), mode='nearest')
|
||||
seg_pred = seg_pred.long().detach().cpu().numpy()
|
||||
# remove unnecessary dim
|
||||
seg_pred = seg_pred.squeeze(1)
|
||||
seg_pred = list(seg_pred)
|
||||
return seg_pred
|
||||
|
||||
|
||||
class ONNXRuntimeSegmentor(DeployBaseSegmentor):
|
||||
|
||||
def __init__(self, model_file: str, class_names: Sequence[str],
|
||||
palette: np.ndarray, device_id: int):
|
||||
super(ONNXRuntimeSegmentor, self).__init__(class_names, palette,
|
||||
device_id)
|
||||
from mmdeploy.apis.onnxruntime import ORTWrapper
|
||||
self.model = ORTWrapper(model_file, device_id)
|
||||
|
||||
def forward_test(self, imgs, img_metas, **kwargs):
|
||||
if isinstance(imgs, (list, tuple)):
|
||||
imgs = imgs[0]
|
||||
seg_pred = self.model({'input': imgs})[0]
|
||||
return seg_pred
|
||||
|
||||
|
||||
class TensorRTSegmentor(DeployBaseSegmentor):
|
||||
|
||||
def __init__(self, model_file: str, class_names: Sequence[str],
|
||||
palette: np.ndarray, device_id: int):
|
||||
super(TensorRTSegmentor, self).__init__(class_names, palette,
|
||||
device_id)
|
||||
from mmdeploy.apis.tensorrt import TRTWrapper
|
||||
|
||||
model = TRTWrapper(model_file)
|
||||
self.model = model
|
||||
self.output_name = self.model.output_names[0]
|
||||
|
||||
def forward_test(self, imgs, img_metas, **kwargs):
|
||||
input_data = imgs[0].contiguous()
|
||||
with torch.cuda.device(self.device_id), torch.no_grad():
|
||||
seg_pred = self.model({'input': input_data})[self.output_name]
|
||||
seg_pred = seg_pred.detach().cpu().numpy()
|
||||
return seg_pred
|
||||
|
||||
|
||||
ONNXRUNTIME_SEGMENTOR_MAP = dict(end2end=ONNXRuntimeSegmentor)
|
||||
|
||||
TENSORRT_SEGMENTOR_MAP = dict(end2end=TensorRTSegmentor)
|
||||
|
||||
BACKEND_SEGMENTOR_MAP = {
|
||||
Backend.ONNXRUNTIME: ONNXRUNTIME_SEGMENTOR_MAP,
|
||||
Backend.TENSORRT: TENSORRT_SEGMENTOR_MAP
|
||||
}
|
||||
|
||||
|
||||
def get_classes_palette_from_config(model_cfg: Union[str, mmcv.Config]):
|
||||
# load cfg if necessary
|
||||
model_cfg = load_config(model_cfg)[0]
|
||||
|
||||
module_dict = DATASETS.module_dict
|
||||
data_cfg = model_cfg.data
|
||||
|
||||
if 'train' in data_cfg:
|
||||
module = module_dict[data_cfg.train.type]
|
||||
elif 'val' in data_cfg:
|
||||
module = module_dict[data_cfg.val.type]
|
||||
elif 'test' in data_cfg:
|
||||
module = module_dict[data_cfg.test.type]
|
||||
else:
|
||||
raise RuntimeError(f'No dataset config found in: {model_cfg}')
|
||||
|
||||
return module.CLASSES, module.PALETTE
|
||||
|
||||
|
||||
def build_segmentor(model_files, model_cfg, deploy_cfg, device_id):
|
||||
|
||||
# load cfg if necessary
|
||||
model_cfg = load_config(model_cfg)[0]
|
||||
deploy_cfg = load_config(deploy_cfg)[0]
|
||||
|
||||
backend = get_backend(deploy_cfg)
|
||||
class_names, palette = get_classes_palette_from_config(model_cfg)
|
||||
assert backend in BACKEND_SEGMENTOR_MAP, \
|
||||
f'Unsupported backend type: {backend.value}'
|
||||
segmentor_map = BACKEND_SEGMENTOR_MAP[backend]
|
||||
|
||||
model_type = 'end2end'
|
||||
assert model_type in segmentor_map, f'Unsupported model type: {model_type}'
|
||||
backend_segmentor_class = segmentor_map[model_type]
|
||||
|
||||
backend_segmentor = backend_segmentor_class(
|
||||
*model_files,
|
||||
class_names=class_names,
|
||||
device_id=device_id,
|
||||
palette=palette)
|
||||
|
||||
return backend_segmentor
|
|
@ -0,0 +1,19 @@
|
|||
import numpy as np
|
||||
|
||||
from mmdeploy.utils import Backend
|
||||
|
||||
|
||||
def show_result(model,
|
||||
image: np.ndarray,
|
||||
result,
|
||||
output_file: str,
|
||||
backend: Backend,
|
||||
show=True,
|
||||
opacity=0.5):
|
||||
return model.show_result(
|
||||
image,
|
||||
result,
|
||||
opacity=opacity,
|
||||
show=show,
|
||||
win_name=backend.value,
|
||||
out_file=output_file)
|
|
@ -1,8 +1,8 @@
|
|||
from .model_wrappers import ONNXRuntimeSegmentor, TensorRTSegmentor
|
||||
from .onnx_helper import convert_syncbatchnorm
|
||||
from .prepare_input import create_input
|
||||
from .onnx_utils import convert_syncbatchnorm
|
||||
from .prepare_input import (build_dataloader, build_dataset, create_input,
|
||||
get_tensor_from_input)
|
||||
|
||||
__all__ = [
|
||||
'create_input', 'ONNXRuntimeSegmentor', 'TensorRTSegmentor',
|
||||
'convert_syncbatchnorm'
|
||||
'create_input', 'convert_syncbatchnorm', 'build_dataloader',
|
||||
'build_dataset', 'get_tensor_from_input'
|
||||
]
|
||||
|
|
|
@ -1,117 +0,0 @@
|
|||
import os.path as osp
|
||||
import warnings
|
||||
from typing import Sequence
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from mmseg.models.segmentors.base import BaseSegmentor
|
||||
from mmseg.ops import resize
|
||||
|
||||
|
||||
class DeployBaseSegmentor(BaseSegmentor):
|
||||
|
||||
def __init__(self, class_names: Sequence[str], device_id: int):
|
||||
super(DeployBaseSegmentor, self).__init__(init_cfg=None)
|
||||
self.CLASSES = class_names
|
||||
self.device_id = device_id
|
||||
self.PALETTE = None
|
||||
|
||||
def extract_feat(self, imgs):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def encode_decode(self, img, img_metas):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def forward_train(self, imgs, img_metas, **kwargs):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def simple_test(self, img, img_meta, **kwargs):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def aug_test(self, imgs, img_metas, **kwargs):
|
||||
raise NotImplementedError('This method is not implemented.')
|
||||
|
||||
def forward(self, img, img_metas, **kwargs):
|
||||
seg_pred = self.forward_test(img, img_metas, **kwargs)
|
||||
# whole mode supports dynamic shape
|
||||
ori_shape = img_metas[0][0]['ori_shape']
|
||||
if not (ori_shape[0] == seg_pred.shape[-2]
|
||||
and ori_shape[1] == seg_pred.shape[-1]):
|
||||
seg_pred = torch.from_numpy(seg_pred).float()
|
||||
seg_pred = resize(
|
||||
seg_pred, size=tuple(ori_shape[:2]), mode='nearest')
|
||||
seg_pred = seg_pred.long().detach().cpu().numpy()
|
||||
# remove unnecessary dim
|
||||
seg_pred = seg_pred.squeeze(1)
|
||||
seg_pred = list(seg_pred)
|
||||
return seg_pred
|
||||
|
||||
|
||||
class ONNXRuntimeSegmentor(DeployBaseSegmentor):
|
||||
|
||||
def __init__(self, onnx_file: str, class_names: Sequence[str],
|
||||
device_id: int):
|
||||
super(ONNXRuntimeSegmentor, self).__init__(class_names, device_id)
|
||||
|
||||
import onnxruntime as ort
|
||||
from mmdeploy.apis.onnxruntime import get_ops_path
|
||||
|
||||
# get the custom op path
|
||||
ort_custom_op_path = get_ops_path()
|
||||
session_options = ort.SessionOptions()
|
||||
# register custom op for onnxruntime
|
||||
if osp.exists(ort_custom_op_path):
|
||||
session_options.register_custom_ops_library(ort_custom_op_path)
|
||||
sess = ort.InferenceSession(onnx_file, session_options)
|
||||
providers = ['CPUExecutionProvider']
|
||||
options = [{}]
|
||||
is_cuda_available = ort.get_device() == 'GPU'
|
||||
if is_cuda_available:
|
||||
providers.insert(0, 'CUDAExecutionProvider')
|
||||
options.insert(0, {'device_id': device_id})
|
||||
|
||||
sess.set_providers(providers, options)
|
||||
|
||||
self.sess = sess
|
||||
self.io_binding = sess.io_binding()
|
||||
self.output_names = [_.name for _ in sess.get_outputs()]
|
||||
for name in self.output_names:
|
||||
self.io_binding.bind_output(name)
|
||||
|
||||
def forward_test(self, imgs, img_metas, **kwargs):
|
||||
input_data = imgs[0]
|
||||
device_type = input_data.device.type
|
||||
self.io_binding.bind_input(
|
||||
name='input',
|
||||
device_type=device_type,
|
||||
device_id=self.device_id,
|
||||
element_type=np.float32,
|
||||
shape=input_data.shape,
|
||||
buffer_ptr=input_data.data_ptr())
|
||||
self.sess.run_with_iobinding(self.io_binding)
|
||||
seg_pred = self.io_binding.copy_outputs_to_cpu()[0]
|
||||
return seg_pred
|
||||
|
||||
|
||||
class TensorRTSegmentor(DeployBaseSegmentor):
|
||||
|
||||
def __init__(self, trt_file: str, class_names: Sequence[str],
|
||||
device_id: int):
|
||||
super(TensorRTSegmentor, self).__init__(class_names, device_id)
|
||||
|
||||
from mmdeploy.apis.tensorrt import TRTWrapper, load_tensorrt_plugin
|
||||
try:
|
||||
load_tensorrt_plugin()
|
||||
except (ImportError, ModuleNotFoundError):
|
||||
warnings.warn('If input model has custom plugins, \
|
||||
you may have to build backend ops with TensorRT')
|
||||
model = TRTWrapper(trt_file)
|
||||
self.model = model
|
||||
self.output_name = self.model.output_names[0]
|
||||
|
||||
def forward_test(self, imgs, img_metas, **kwargs):
|
||||
input_data = imgs[0].contiguous()
|
||||
with torch.cuda.device(self.device_id), torch.no_grad():
|
||||
seg_pred = self.model({'input': input_data})[self.output_name]
|
||||
seg_pred = seg_pred.detach().cpu().numpy()
|
||||
return seg_pred
|
|
@ -4,16 +4,18 @@ import mmcv
|
|||
import numpy as np
|
||||
from mmcv.parallel import collate, scatter
|
||||
from mmseg.apis.inference import LoadImage
|
||||
from mmseg.datasets import build_dataloader as build_dataloader_mmseg
|
||||
from mmseg.datasets import build_dataset as build_dataset_mmseg
|
||||
from mmseg.datasets.pipelines import Compose
|
||||
|
||||
from mmdeploy.apis.utils import load_config
|
||||
from mmdeploy.utils.config_utils import load_config
|
||||
|
||||
|
||||
def create_input(model_cfg: Union[str, mmcv.Config],
|
||||
imgs: Any,
|
||||
device: str = 'cuda:0'):
|
||||
|
||||
cfg = load_config(model_cfg).copy()
|
||||
cfg = load_config(model_cfg)[0].copy()
|
||||
if not isinstance(imgs, (list, tuple)):
|
||||
imgs = [imgs]
|
||||
|
||||
|
@ -21,6 +23,7 @@ def create_input(model_cfg: Union[str, mmcv.Config],
|
|||
cfg = cfg.copy()
|
||||
# set loading pipeline type
|
||||
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
|
||||
# TODO remove hard code
|
||||
cfg.data.test.pipeline[1]['img_scale'] = (1024, 512)
|
||||
cfg.data.test.pipeline[1]['transforms'][0]['keep_ratio'] = False
|
||||
cfg.data.test.pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
|
||||
|
@ -42,3 +45,35 @@ def create_input(model_cfg: Union[str, mmcv.Config],
|
|||
data = scatter(data, [device])[0]
|
||||
|
||||
return data, data['img']
|
||||
|
||||
|
||||
def build_dataset(dataset_cfg: Union[str, mmcv.Config],
|
||||
dataset_type: str = 'val',
|
||||
**kwargs):
|
||||
dataset_cfg = load_config(dataset_cfg)[0]
|
||||
data = dataset_cfg.data
|
||||
assert dataset_type in data
|
||||
|
||||
dataset = build_dataset_mmseg(data[dataset_type])
|
||||
|
||||
return dataset
|
||||
|
||||
|
||||
def build_dataloader(dataset,
|
||||
samples_per_gpu: int,
|
||||
workers_per_gpu: int,
|
||||
num_gpus=1,
|
||||
dist=False,
|
||||
shuffle=False,
|
||||
seed=None,
|
||||
drop_last=False,
|
||||
pin_memory=True,
|
||||
persistent_workers=True,
|
||||
**kwargs):
|
||||
return build_dataloader_mmseg(dataset, samples_per_gpu, workers_per_gpu,
|
||||
num_gpus, dist, shuffle, seed, drop_last,
|
||||
pin_memory, persistent_workers, **kwargs)
|
||||
|
||||
|
||||
def get_tensor_from_input(input_data):
|
||||
return input_data['img']
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
from .config_utils import (is_dynamic_batch, is_dynamic_shape,
|
||||
parse_extractor_io_string)
|
||||
from .config_utils import (get_backend, get_codebase, get_task_type,
|
||||
is_dynamic_batch, is_dynamic_shape, load_config)
|
||||
from .constants import Backend, Codebase, Task
|
||||
|
||||
__all__ = ['is_dynamic_batch', 'is_dynamic_shape', 'parse_extractor_io_string']
|
||||
__all__ = [
|
||||
'is_dynamic_batch', 'is_dynamic_shape', 'get_task_type', 'get_codebase',
|
||||
'get_backend', 'load_config', 'Backend', 'Codebase', 'Task'
|
||||
]
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue