2021-06-17 15:28:23 +08:00
|
|
|
import argparse
|
|
|
|
import logging
|
2021-06-17 15:29:12 +08:00
|
|
|
import os.path as osp
|
[Feature] Merge NCNN deployment to grimoire based on mmcls - revert [#25](https://github.com/grimoire/deploy_prototype/pull/25) (#30)
* add
* change VulkanSDK to 1.2.176.1
* add ncnn cmakelist
* add ncnn source code as third party
* add all ncnn
* ncnn compile passed
* onnx2ncnn correctly
* fix code style
* merge_as_grimoire_design, only backend_ops, manually register.
* remove data and test sh
* remove build example
* remove config ncnn
* remove onnx2ncnn intermediate files
* remove other files auto-generated
* remove vulkan tools
* remove Vulkan, gitignore new rules, __init__ new lines
* rollback __init__ to grimoire
* remove pytorch version pending
* grimoire comments reply 1, 3, 4
* reply comment 5,6,7
* add auto definer, add python register
* fix lint
* add ncnn deploy support
* add model_wrapper, fix a typo bug, and add code comment for onnx2ncnn(WIP)
* add model wrapper ncnn
* fix lint
* fix pep8
* fix pre-commit-config.yaml paths
* fix import
* fix lint
* remove sys.path.append
* remove sys
* isort fix
* fix double quoted
* fix trailing space
* try fix isort
* fix clang-format-9
* fix requests
* fix all comments
* Fix typo
* test code for grimoire
* fix ops register
* new definere
* fix visualization of mmcls
* remove temp
* fix flake8
* fix seed-isort-config
* fix thirdparty
* fix thirdparty
* fix yapf
* fix third_party_sort
* fix third party
* fix clang-format
* try fix clang-format
* try to fix clang format 9 customreshape
* try fix clang-format-9
* try fix clang-format-9
* try fix clang-format-9
* try fix ext
* fix onnx2ncnn
* Fix comments
* Fix Comments
* Fix Comments
* Fix Comments
* Fix conflict
* Fix flake8
* Update .isort.cfg
* Update ncnn_ext.cpp
* Update ncnn_ext.cpp
* fix missing ncnn backend code
* delete out of date comments of gather.cpp
* add DeployBaseClassifier
* add return -100 error
* clear out-of-date to do comments
Co-authored-by: 韩睿 <SENSETIME\hanrui1@cn0614008774l.domain.sensetime.com>
Co-authored-by: grimoire <yaoqian@sensetime.com>
Co-authored-by: grimoire <streetyao@live.com>
2021-08-05 14:06:47 +08:00
|
|
|
import subprocess
|
2021-07-23 13:18:32 +08:00
|
|
|
from functools import partial
|
2021-06-17 15:28:23 +08:00
|
|
|
|
|
|
|
import mmcv
|
2021-06-17 17:26:32 +08:00
|
|
|
import torch.multiprocessing as mp
|
2021-06-17 15:29:12 +08:00
|
|
|
from torch.multiprocessing import Process, set_start_method
|
2021-06-17 15:28:23 +08:00
|
|
|
|
2021-08-03 17:12:44 +08:00
|
|
|
from mmdeploy.apis import (assert_cfg_valid, extract_model, inference_model,
|
|
|
|
torch2onnx)
|
2021-08-13 10:06:28 +08:00
|
|
|
from mmdeploy.apis.utils import get_split_cfg
|
2021-06-17 15:28:23 +08:00
|
|
|
|
|
|
|
|
|
|
|
def parse_args():
|
|
|
|
parser = argparse.ArgumentParser(description='Export model to backend.')
|
|
|
|
parser.add_argument('deploy_cfg', help='deploy config path')
|
|
|
|
parser.add_argument('model_cfg', help='model config path')
|
|
|
|
parser.add_argument('checkpoint', help='model checkpoint path')
|
|
|
|
parser.add_argument(
|
|
|
|
'img', help='image used to convert model and test model')
|
|
|
|
parser.add_argument('--work-dir', help='the dir to save logs and models')
|
|
|
|
parser.add_argument(
|
2021-06-17 17:37:08 +08:00
|
|
|
'--device', help='device used for conversion', default='cpu')
|
2021-06-23 13:14:28 +08:00
|
|
|
parser.add_argument(
|
|
|
|
'--log-level',
|
|
|
|
help='set log level',
|
|
|
|
default='INFO',
|
|
|
|
choices=list(logging._nameToLevel.keys()))
|
2021-07-13 17:21:02 +08:00
|
|
|
parser.add_argument(
|
|
|
|
'--show', action='store_true', help='Show detection outputs')
|
2021-06-17 15:28:23 +08:00
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
|
|
return args
|
|
|
|
|
|
|
|
|
2021-07-23 13:18:32 +08:00
|
|
|
def target_wrapper(target, log_level, *args, **kwargs):
|
|
|
|
logger = logging.getLogger()
|
|
|
|
logger.level
|
|
|
|
logger.setLevel(log_level)
|
|
|
|
return target(*args, **kwargs)
|
|
|
|
|
|
|
|
|
2021-07-13 17:21:02 +08:00
|
|
|
def create_process(name, target, args, kwargs, ret_value=None):
|
2021-07-23 13:18:32 +08:00
|
|
|
logging.info(f'{name} start.')
|
|
|
|
log_level = logging.getLogger().level
|
|
|
|
|
|
|
|
wrap_func = partial(target_wrapper, target, log_level)
|
|
|
|
|
|
|
|
process = Process(target=wrap_func, args=args, kwargs=kwargs)
|
2021-07-13 17:21:02 +08:00
|
|
|
process.start()
|
|
|
|
process.join()
|
|
|
|
|
|
|
|
if ret_value is not None:
|
|
|
|
if ret_value.value != 0:
|
|
|
|
logging.error(f'{name} failed.')
|
|
|
|
exit()
|
|
|
|
else:
|
|
|
|
logging.info(f'{name} success.')
|
|
|
|
|
|
|
|
|
2021-06-17 15:28:23 +08:00
|
|
|
def main():
|
|
|
|
args = parse_args()
|
|
|
|
set_start_method('spawn')
|
|
|
|
|
2021-06-23 13:14:28 +08:00
|
|
|
logger = logging.getLogger()
|
|
|
|
logger.setLevel(args.log_level)
|
|
|
|
|
|
|
|
deploy_cfg_path = args.deploy_cfg
|
|
|
|
model_cfg_path = args.model_cfg
|
|
|
|
checkpoint_path = args.checkpoint
|
|
|
|
|
|
|
|
# load deploy_cfg
|
|
|
|
deploy_cfg = mmcv.Config.fromfile(deploy_cfg_path)
|
2021-08-03 17:12:44 +08:00
|
|
|
assert_cfg_valid(deploy_cfg, model_cfg_path)
|
2021-06-17 15:28:23 +08:00
|
|
|
|
|
|
|
# create work_dir if not
|
|
|
|
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
|
|
|
|
|
2021-06-17 17:26:32 +08:00
|
|
|
ret_value = mp.Value('d', 0, lock=False)
|
|
|
|
|
2021-06-23 13:14:28 +08:00
|
|
|
# convert onnx
|
|
|
|
onnx_save_file = deploy_cfg['pytorch2onnx']['save_file']
|
2021-07-13 17:21:02 +08:00
|
|
|
create_process(
|
|
|
|
'torch2onnx',
|
2021-06-17 15:28:23 +08:00
|
|
|
target=torch2onnx,
|
2021-06-23 13:14:28 +08:00
|
|
|
args=(args.img, args.work_dir, onnx_save_file, deploy_cfg_path,
|
|
|
|
model_cfg_path, checkpoint_path),
|
2021-07-13 17:21:02 +08:00
|
|
|
kwargs=dict(device=args.device, ret_value=ret_value),
|
|
|
|
ret_value=ret_value)
|
2021-06-17 17:26:32 +08:00
|
|
|
|
2021-06-23 13:14:28 +08:00
|
|
|
# convert backend
|
2021-07-13 17:21:02 +08:00
|
|
|
onnx_files = [osp.join(args.work_dir, onnx_save_file)]
|
2021-07-01 11:42:07 +08:00
|
|
|
|
2021-07-23 13:18:32 +08:00
|
|
|
# split model
|
|
|
|
apply_marks = deploy_cfg.get('apply_marks', False)
|
|
|
|
if apply_marks:
|
|
|
|
assert hasattr(deploy_cfg, 'split_params')
|
2021-08-13 10:06:28 +08:00
|
|
|
split_params = deploy_cfg['split_params']
|
|
|
|
|
|
|
|
if 'split_cfg' in split_params:
|
|
|
|
split_cfgs = split_params.get('split_cfg', None)
|
|
|
|
else:
|
|
|
|
assert 'split_type' in split_params
|
|
|
|
split_cfgs = get_split_cfg(deploy_cfg['codebase'],
|
|
|
|
split_params['split_type'])
|
2021-07-23 13:18:32 +08:00
|
|
|
|
|
|
|
origin_onnx_file = onnx_files[0]
|
|
|
|
onnx_files = []
|
2021-08-13 10:06:28 +08:00
|
|
|
for split_cfg in split_cfgs:
|
|
|
|
save_file = split_cfg['save_file']
|
2021-07-23 13:18:32 +08:00
|
|
|
save_path = osp.join(args.work_dir, save_file)
|
2021-08-13 10:06:28 +08:00
|
|
|
start = split_cfg['start']
|
|
|
|
end = split_cfg['end']
|
|
|
|
dynamic_axes = split_cfg.get('dynamic_axes', None)
|
2021-07-23 13:18:32 +08:00
|
|
|
|
|
|
|
create_process(
|
|
|
|
f'split model {save_file} with start: {start}, end: {end}',
|
|
|
|
extract_model,
|
|
|
|
args=(origin_onnx_file, start, end),
|
2021-08-13 10:06:28 +08:00
|
|
|
kwargs=dict(
|
|
|
|
dynamic_axes=dynamic_axes,
|
|
|
|
save_file=save_path,
|
|
|
|
ret_value=ret_value),
|
2021-07-23 13:18:32 +08:00
|
|
|
ret_value=ret_value)
|
|
|
|
|
|
|
|
onnx_files.append(save_path)
|
|
|
|
|
|
|
|
backend_files = onnx_files
|
|
|
|
# convert backend
|
2021-06-23 13:14:28 +08:00
|
|
|
backend = deploy_cfg.get('backend', 'default')
|
|
|
|
if backend == 'tensorrt':
|
2021-07-12 16:26:44 +08:00
|
|
|
assert hasattr(deploy_cfg, 'tensorrt_params')
|
|
|
|
tensorrt_params = deploy_cfg['tensorrt_params']
|
|
|
|
model_params = tensorrt_params.get('model_params', [])
|
2021-07-13 17:21:02 +08:00
|
|
|
assert len(model_params) == len(onnx_files)
|
2021-07-01 11:42:07 +08:00
|
|
|
|
2021-06-23 13:14:28 +08:00
|
|
|
from mmdeploy.apis.tensorrt import onnx2tensorrt
|
2021-08-13 10:06:28 +08:00
|
|
|
from mmdeploy.apis.tensorrt import is_available as trt_is_available
|
|
|
|
assert trt_is_available(
|
|
|
|
), 'TensorRT is not available,' \
|
|
|
|
+ ' please install TensorRT and build TensorRT custom ops first.'
|
2021-07-13 17:21:02 +08:00
|
|
|
backend_files = []
|
2021-07-01 11:42:07 +08:00
|
|
|
for model_id, model_param, onnx_path in zip(
|
2021-07-13 17:21:02 +08:00
|
|
|
range(len(onnx_files)), model_params, onnx_files):
|
2021-07-01 11:42:07 +08:00
|
|
|
onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]
|
|
|
|
save_file = model_param.get('save_file', onnx_name + '.engine')
|
2021-07-13 17:21:02 +08:00
|
|
|
|
|
|
|
create_process(
|
|
|
|
f'onnx2tensorrt of {onnx_path}',
|
2021-06-23 13:14:28 +08:00
|
|
|
target=onnx2tensorrt,
|
2021-07-01 11:42:07 +08:00
|
|
|
args=(args.work_dir, save_file, model_id, deploy_cfg_path,
|
2021-06-23 13:14:28 +08:00
|
|
|
onnx_path),
|
2021-07-13 17:21:02 +08:00
|
|
|
kwargs=dict(device=args.device, ret_value=ret_value),
|
|
|
|
ret_value=ret_value)
|
|
|
|
|
|
|
|
backend_files.append(osp.join(args.work_dir, save_file))
|
|
|
|
|
[Feature] Merge NCNN deployment to grimoire based on mmcls - revert [#25](https://github.com/grimoire/deploy_prototype/pull/25) (#30)
* add
* change VulkanSDK to 1.2.176.1
* add ncnn cmakelist
* add ncnn source code as third party
* add all ncnn
* ncnn compile passed
* onnx2ncnn correctly
* fix code style
* merge_as_grimoire_design, only backend_ops, manually register.
* remove data and test sh
* remove build example
* remove config ncnn
* remove onnx2ncnn intermediate files
* remove other files auto-generated
* remove vulkan tools
* remove Vulkan, gitignore new rules, __init__ new lines
* rollback __init__ to grimoire
* remove pytorch version pending
* grimoire comments reply 1, 3, 4
* reply comment 5,6,7
* add auto definer, add python register
* fix lint
* add ncnn deploy support
* add model_wrapper, fix a typo bug, and add code comment for onnx2ncnn(WIP)
* add model wrapper ncnn
* fix lint
* fix pep8
* fix pre-commit-config.yaml paths
* fix import
* fix lint
* remove sys.path.append
* remove sys
* isort fix
* fix double quoted
* fix trailing space
* try fix isort
* fix clang-format-9
* fix requests
* fix all comments
* Fix typo
* test code for grimoire
* fix ops register
* new definere
* fix visualization of mmcls
* remove temp
* fix flake8
* fix seed-isort-config
* fix thirdparty
* fix thirdparty
* fix yapf
* fix third_party_sort
* fix third party
* fix clang-format
* try fix clang-format
* try to fix clang format 9 customreshape
* try fix clang-format-9
* try fix clang-format-9
* try fix clang-format-9
* try fix ext
* fix onnx2ncnn
* Fix comments
* Fix Comments
* Fix Comments
* Fix Comments
* Fix conflict
* Fix flake8
* Update .isort.cfg
* Update ncnn_ext.cpp
* Update ncnn_ext.cpp
* fix missing ncnn backend code
* delete out of date comments of gather.cpp
* add DeployBaseClassifier
* add return -100 error
* clear out-of-date to do comments
Co-authored-by: 韩睿 <SENSETIME\hanrui1@cn0614008774l.domain.sensetime.com>
Co-authored-by: grimoire <yaoqian@sensetime.com>
Co-authored-by: grimoire <streetyao@live.com>
2021-08-05 14:06:47 +08:00
|
|
|
elif backend == 'ncnn':
|
|
|
|
from mmdeploy.apis.ncnn import get_onnx2ncnn_path
|
|
|
|
from mmdeploy.apis.ncnn import is_available as is_available_ncnn
|
|
|
|
|
|
|
|
if not is_available_ncnn():
|
|
|
|
logging.error('ncnn support is not available.')
|
|
|
|
exit(-1)
|
|
|
|
|
|
|
|
onnx2ncnn_path = get_onnx2ncnn_path()
|
|
|
|
|
|
|
|
backend_files = []
|
|
|
|
for onnx_path in onnx_files:
|
|
|
|
onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]
|
|
|
|
save_param = onnx_name + '.param'
|
|
|
|
save_bin = onnx_name + '.bin'
|
|
|
|
|
|
|
|
save_param = osp.join(args.work_dir, save_param)
|
|
|
|
save_bin = osp.join(args.work_dir, save_bin)
|
|
|
|
|
|
|
|
subprocess.call([onnx2ncnn_path, onnx_path, save_param, save_bin])
|
|
|
|
|
|
|
|
backend_files += [save_param, save_bin]
|
|
|
|
|
2021-07-13 17:21:02 +08:00
|
|
|
# check model outputs by visualization
|
|
|
|
|
2021-07-22 09:58:53 +08:00
|
|
|
# visualize model of the backend
|
2021-07-13 17:21:02 +08:00
|
|
|
create_process(
|
|
|
|
f'visualize {backend} model',
|
|
|
|
target=inference_model,
|
2021-08-13 10:06:28 +08:00
|
|
|
args=(model_cfg_path, deploy_cfg_path, backend_files, args.img),
|
2021-07-13 17:21:02 +08:00
|
|
|
kwargs=dict(
|
|
|
|
device=args.device,
|
|
|
|
output_file=f'output_{backend}.jpg',
|
|
|
|
show_result=args.show,
|
2021-07-21 19:46:23 +08:00
|
|
|
ret_value=ret_value),
|
|
|
|
ret_value=ret_value)
|
2021-07-13 17:21:02 +08:00
|
|
|
|
|
|
|
# visualize pytorch model
|
|
|
|
create_process(
|
|
|
|
'visualize pytorch model',
|
|
|
|
target=inference_model,
|
2021-08-13 10:06:28 +08:00
|
|
|
args=(model_cfg_path, deploy_cfg_path, [checkpoint_path], args.img),
|
2021-07-13 17:21:02 +08:00
|
|
|
kwargs=dict(
|
|
|
|
device=args.device,
|
2021-08-13 10:06:28 +08:00
|
|
|
backend='pytorch',
|
2021-07-13 17:21:02 +08:00
|
|
|
output_file='output_pytorch.jpg',
|
|
|
|
show_result=args.show,
|
|
|
|
ret_value=ret_value),
|
|
|
|
ret_value=ret_value)
|
2021-06-23 13:14:28 +08:00
|
|
|
|
|
|
|
logging.info('All process success.')
|
|
|
|
|
2021-06-17 15:28:23 +08:00
|
|
|
|
2021-06-17 15:29:12 +08:00
|
|
|
if __name__ == '__main__':
|
2021-06-17 15:28:23 +08:00
|
|
|
main()
|