mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
* make -install -> make install (#621) change `make -install` to `make install` https://github.com/open-mmlab/mmdeploy/issues/618 * [Fix] fix csharp api detector release result (#620) * fix csharp api detector release result * fix wrong count arg of xxx_release_result in c# api * [Enhancement] Support two-stage rotated detector TensorRT. (#530) * upload * add fake_multiclass_nms_rotated * delete unused code * align with pytorch * Update delta_midpointoffset_rbbox_coder.py * add trt rotated roi align * add index feature in nms * not good * fix index * add ut * add benchmark * move to csrc/mmdeploy * update unit test Co-authored-by: zytx121 <592267829@qq.com> * Reduce mmcls version dependency (#635) * fix shufflenetv2 with trt (#645) * fix shufflenetv2 and pspnet * fix ci * remove print * ' -> " (#654) If there is a variable in the string, single quotes will ignored it, while double quotes will bring the variable into the string after parsing * ' -> " (#655) same with https://github.com/open-mmlab/mmdeploy/pull/654 * Support deployment of Segmenter (#587) * support segmentor with ncnn * update regression yml * replace chunk with split to support ts * update regression yml * update docs * fix segmenter ncnn inference failure brought by #477 * add test * fix test for ncnn and trt * fix lint * export nn.linear to Gemm op in onnx for ncnn * fix ci * simplify `Expand` (#617) * Fix typo (#625) * Add make install in en docs * Add make install in zh docs * Fix typo * Merge and add windows build Co-authored-by: tripleMu <865626@163.com> * [Enhancement] Fix ncnn unittest (#626) * optmize-csp-darknet * replace floordiv to torch.div * update csp_darknet default implement * fix test * [Enhancement] TensorRT Anchor generator plugin (#646) * custom trt anchor generator * add ut * add docstring, update doc * Add partition doc and sample code (#599) * update torch2onnx tool to support onnx partition * add model partition of yolov3 * add cn doc * update torch2onnx tool to support onnx partition * add model partition of yolov3 * add cn doc * add to index.rst * resolve comment * resolve comments * fix lint * change caption level in docs * update docs (#624) * Add java apis and demos (#563) * add java classifier detector * add segmentor * fix lint * add ImageRestorer java apis and demo * remove useless count parameter for Segmentor and Restorer, add PoseDetector * add RotatedDetection java api and demo * add Ocr java demo and apis * remove mmrotate ncnn java api and demo * fix lint * sync java api folder after rebase to master * fix include * remove record * fix java apis dir path in cmake * add java demo readme * fix lint mdformat * add test javaapi ci * fix lint * fix flake8 * fix test javaapi ci * refactor readme.md * fix install opencv for ci * fix install opencv : add permission * add all codebases and mmcv install * add torch * install mmdeploy * fix image path * fix picture path * fix import ncnn * fix import ncnn * add submodule of pybind * fix pybind submodule * change download to git clone for submodule * fix ncnn dir * fix README error * simplify the github ci * fix ci * fix yapf * add JNI as required * fix Capitalize * fix Capitalize * fix copyright * ignore .class changed * add OpenJDK installation docs * install target of javaapi * simplify ci * add jar * fix ci * fix ci * fix test java command * debugging what failed * debugging what failed * debugging what failed * add java version info * install openjdk * add java env var * fix export * fix export * fix export * fix export * fix picture path * fix picture path * fix file name * fix file name * fix README * remove java_api strategy * fix python version * format task name * move args position * extract common utils code * show image class result * add detector result * segmentation result format * add ImageRestorer result * add PoseDetection java result format * fix ci * stage ocr * add visualize * move utils * fix lint * fix ocr bugs * fix ci demo * fix java classpath for ci * fix popd * fix ocr demo text garbled * fix ci * fix ci * fix ci * fix path of utils ci * update the circleci config file by adding workflows both for linux, windows and linux-gpu (#368) * update circleci by adding more workflows * fix test workflow failure on windows platform * fix docker exec command for SDK unittests * Fixed tensorrt plugin not found in Windows (#672) * update introduction.png (#674) * [Enhancement] Add fuse select assign pass (#589) * Add fuse select assign pass * move code to csrc * add config flag * remove bool cast * fix export sdk info of input shape (#667) * Update get_started.md (#675) Fix backend model assignment * Update get_started.md (#676) Fix backend model assignment * [Fix] fix clang build (#677) * fix clang build * fix ndk build * fix ndk build * switch to `std::filesystem` for clang-7 and later * Deploy the Swin Transformer on TensorRT. (#652) * resolve conflicts * update ut and docs * fix ut * refine docstring * add comments and refine UT * resolve comments * resolve comments * update doc * add roll export * check backend * update regression test * bump version to 0.6.0 (#680) * bump vertion to 0.6.0 * update version * pass img_metas while exporting to onnx (#681) * pass img_metas while exporting to onnx * remove try-catch in tools for beter debugging * use get * fix typo * [Fix] fix ssd ncnn ut (#692) * fix ssd ncnn ut * fix yapf * fix passing img_metas to pytorch2onnx for mmedit (#700) * fix passing img_metas for mmdet3d (#707) * [Fix] Fix android build (#698) * fix android build * fix cmake * fix url link * fix wrong exit code in pipeline_manager (#715) * fix exit * change to general exit errorcode=1 * fix passing wrong backend type (#719) * Rename onnx2ncnn to mmdeploy_onnx2ncnn (#694) * improvement(tools/onnx2ncnn.py): rename to mmdeploy_onnx2ncnn * format(tools/deploy.py): clean code * fix(init_plugins.py): improve if condition * fix(CI): update target * fix(test_onnx2ncnn.py): update desc * Update init_plugins.py * [Fix] Fix mmdet ort static shape bug (#687) * fix shape * add device * fix yapf * fix rewriter for transforms * reverse image shape * fix ut of distance2bbox * fix rewriter name * fix c4 for torchscript (#724) * [Enhancement] Standardize C API (#634) * unify C API naming * fix demo and move apis/c/* -> apis/c/mmdeploy/* * fix lint * fix C# project * fix Java API * [Enhancement] Support Slide Vertex TRT (#650) * reorgnize mmrotate * fix * add hbb2obb * add ut * fix rotated nms * update docs * update benchmark * update test * remove ort regression test, remove comment * Fix get-started rendering issues in readthedocs (#740) * fix mermaid markdown rendering issue in readthedocs * fix error in C++ example * fix error in c++ example in zh_cn get_started doc * [Fix] set default topk for dump info (#702) * set default topk for dump info * remove redundant docstrings * add ci densenet * fix classification warnings * fix mmcls version * fix logger.warnings * add version control (#754) * fix satrn for ORT (#753) * fix satrn for ORT * move rewrite into pytorch * Add inference latency test tool (#665) * add profile tool * remove print envs in profile tool * set cudnn_benchmark to True * add doc * update tests * fix typo * support test with images from a directory * update doc * resolve comments * [Enhancement] Add CSE ONNX pass (#647) * Add fuse select assign pass * move code to csrc * add config flag * Add fuse select assign pass * Add CSE for ONNX * remove useless code * Test robot Just test robot * Update README.md Revert * [Fix] fix yolox point_generator (#758) * fix yolox point_generator * add a UT * resolve comments * fix comment lines * limit markdown version (#773) * [Enhancement] Better index put ONNX export. (#704) * Add rewriter for tensor setitem * add version check * Upgrade Dockerfile to use TensorRT==8.2.4.2 (#706) * Upgrade TensorRT to 8.2.4.2 * upgrade pytorch&mmcv in CPU Dockerfile * Delete redundant port example in Docker * change 160x160-608x608 to 64x64-608x608 for yolov3 * [Fix] reduce log verbosity & improve error reporting (#755) * reduce log verbosity & improve error reporting * improve error reporting * [Enhancement] Support latest ppl.nn & ppl.cv (#564) * support latest ppl.nn * fix pplnn for model convertor * fix lint * update memory policy * import algo from buffer * update ppl.cv * use `ppl.cv==0.7.0` * document supported ppl.nn version * skip pplnn dependency when building shared libs * [Fix][P0] Fix for torch1.12 (#751) * fix for torch1.12 * add comment * fix check env (#785) * [Fix] fix cascade mask rcnn (#787) * fix cascade mask rcnn * fix lint * add regression * [Feature] Support RoITransRoIHead (#713) * [Feature] Support RoITransRoIHead * Add docs * Add mmrotate models regression test * Add a draft for test code * change the argument name * fix test code * fix minor change for not class agnostic case * fix sample for test code * fix sample for test code * Add mmrotate in requirements * Revert "Add mmrotate in requirements" This reverts commit 043490075e6dbe4a8fb98e94b2b583b91fc5038d. * [Fix] fix triu (#792) * fix triu * triu -> triu_default * [Enhancement] Install Optimizer by setuptools (#690) * Add fuse select assign pass * move code to csrc * add config flag * Add fuse select assign pass * Add CSE for ONNX * remove useless code * Install optimizer by setup tools * fix comment * [Feature] support MMRotate model with le135 (#788) * support MMRotate model with le135 * cse before fuse select assign * remove unused import * [Fix] Support macOS build (#762) * fix macOS build * fix missing * add option to build & install examples (#822) * [Fix] Fix setup on non-linux-x64 (#811) * fix setup * replace long to int64_t * [Feature] support build single sdk library (#806) * build single lib for c api * update csharp doc & project * update test build * fix test build * fix * update document for building android sdk (#817) Co-authored-by: dwSun <dwsunny@icloud.com> * [Enhancement] support kwargs in SDK python bindings (#794) * support-kwargs * make '__call__' as single image inference and add 'batch' API to deal with batch images inference * fix linting error and typo * fix lint * improvement(sdk): add sdk code coverage (#808) * feat(doc): add CI * CI(sdk): add sdk coverage * style(test): code format * fix(CI): update coverage.info path * improvement(CI): use internal image * improvement(CI): push coverage info once * [Feature] Add C++ API for SDK (#831) * add C++ API * unify result type & add examples * minor fix * install cxx API headers * fix Mat, add more examples * fix monolithic build & fix lint * install examples correctly * fix lint * feat(tools/deploy.py): support snpe (#789) * fix(tools/deploy.py): support snpe * improvement(backend/snpe): review advices * docs(backend/snpe): update build * docs(backend/snpe): server support specify port * docs(backend/snpe): update path * fix(backend/snpe): time counter missing argument * docs(backend/snpe): add missing argument * docs(backend/snpe): update download and using * improvement(snpe_net.cpp): load model with modeldata * Support setup on environment with no PyTorch (#843) * support test with multi batch (#829) * support test with multi batch * resolve comment * import algorithm from buffer (#793) * [Enhancement] build sdk python api in standard-alone manner (#810) * build sdk python api in standard-alone manner * enable MMDEPLOY_BUILD_SDK_MONOLITHIC and MMDEPLOY_BUILD_EXAMPLES in prebuild config * link mmdeploy to python target when monolithic option is on * checkin README to describe precompiled package build procedure * use packaging.version.parse(python_version) instead of list(python_version) * fix according to review results * rebase master * rollback cmake.in and apis/python/CMakeLists.txt * reorganize files in install/example * let cmake detect visual studio instead of specifying 2019 * rename whl name of precompiled package * fix according to review results * Fix SDK backend (#844) * fix mmpose python api (#852) * add prebuild package usage docs on windows (#816) * add prebuild package usage docs on windows * fix lint * update * try fix lint * add en docs * update * update * udpate faq * fix typo (#862) * [Enhancement] Improve get_started documents and bump version to 0.7.0 (#813) * simplify commands in get_started * add installation commands for Windows * fix typo * limit markdown and sphinx_markdown_tables version * adopt html <details open> tag * bump mmdeploy version * bump mmdeploy version * update get_started * update get_started * use python3.8 instead of python3.7 * remove duplicate section * resolve issue #856 * update according to review results * add reference to prebuilt_package_windows.md * fix error when build sdk demos * fix mmcls Co-authored-by: Ryan_Huang <44900829+DrRyanHuang@users.noreply.github.com> Co-authored-by: Chen Xin <xinchen.tju@gmail.com> Co-authored-by: q.yao <yaoqian@sensetime.com> Co-authored-by: zytx121 <592267829@qq.com> Co-authored-by: Li Zhang <lzhang329@gmail.com> Co-authored-by: tripleMu <gpu@163.com> Co-authored-by: tripleMu <865626@163.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Bryan Glen Suello <11388006+bgsuello@users.noreply.github.com> Co-authored-by: zambranohally <63218980+zambranohally@users.noreply.github.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: tpoisonooo <khj.application@aliyun.com> Co-authored-by: Hakjin Lee <nijkah@gmail.com> Co-authored-by: 孙德伟 <5899962+dwSun@users.noreply.github.com> Co-authored-by: dwSun <dwsunny@icloud.com> Co-authored-by: Chen Xin <irexyc@gmail.com>
344 lines
11 KiB
Python
344 lines
11 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
import os.path as osp
|
|
|
|
import mmcv
|
|
import numpy as np
|
|
import pytest
|
|
import torch
|
|
import torch.nn.functional as F
|
|
from packaging.version import parse
|
|
|
|
from mmdeploy.utils import Backend
|
|
from mmdeploy.utils.test import (WrapFunction, backend_checker,
|
|
get_rewrite_outputs)
|
|
|
|
deploy_cfg_ncnn = mmcv.Config(
|
|
dict(
|
|
onnx_config=dict(input_shape=None),
|
|
backend_config=dict(type='ncnn', model_inputs=None, use_vulkan=False),
|
|
codebase_config=dict(type='mmdet', task='ObjectDetection')))
|
|
|
|
|
|
def get_trt_config(output_names, shape):
|
|
deploy_cfg_tensorrt = mmcv.Config(
|
|
dict(
|
|
onnx_config=dict(input_shape=None, output_names=output_names),
|
|
backend_config=dict(
|
|
type='tensorrt',
|
|
common_config=dict(
|
|
fp16_mode=False, max_workspace_size=1 << 20),
|
|
model_inputs=[
|
|
dict(
|
|
input_shapes=dict(
|
|
input=dict(
|
|
min_shape=shape,
|
|
opt_shape=shape,
|
|
max_shape=shape)))
|
|
]),
|
|
codebase_config=dict(type='mmdet', task='ObjectDetection')))
|
|
return deploy_cfg_tensorrt
|
|
|
|
|
|
@backend_checker(Backend.NCNN)
|
|
def test_get_attribute():
|
|
|
|
def model_func(tensor):
|
|
x = tensor.size()
|
|
assert isinstance(x[0], int) and not isinstance(x[0], torch.Tensor)
|
|
return torch.tensor(x)
|
|
|
|
input = torch.zeros([1, 2, 3, 4])
|
|
wrapped_func = WrapFunction(model_func)
|
|
rewrite_outputs, _ = get_rewrite_outputs(
|
|
wrapped_func,
|
|
model_inputs={'tensor': input},
|
|
deploy_cfg=deploy_cfg_ncnn,
|
|
run_with_backend=True)
|
|
|
|
assert rewrite_outputs is not None, 'Got unexpected rewrite '
|
|
'outputs: {}'.format(rewrite_outputs)
|
|
|
|
|
|
@backend_checker(Backend.NCNN)
|
|
def test_group_norm_ncnn():
|
|
input = torch.rand([1, 2, 2, 2])
|
|
weight = torch.rand([2])
|
|
bias = torch.rand([2])
|
|
model_output = F.group_norm(input, 1, weight, bias, 1e-05)
|
|
|
|
def group_norm_caller(input):
|
|
return F.group_norm(input, 1, weight, bias)
|
|
|
|
wrapped_func = WrapFunction(group_norm_caller)
|
|
rewrite_output, _ = get_rewrite_outputs(
|
|
wrapped_func,
|
|
model_inputs={'input': input},
|
|
deploy_cfg=deploy_cfg_ncnn,
|
|
run_with_backend=True)
|
|
|
|
assert np.allclose(model_output, rewrite_output[0], rtol=1e-03, atol=1e-05)
|
|
|
|
|
|
@backend_checker(Backend.NCNN)
|
|
def test_chunk_ncnn():
|
|
input = torch.rand(1, 16, 16, 16)
|
|
|
|
model_output = input.chunk(2, dim=1)
|
|
|
|
def chunk_caller(input):
|
|
return input.chunk(2, dim=1)
|
|
|
|
wrapped_func = WrapFunction(chunk_caller)
|
|
rewrite_output, _ = get_rewrite_outputs(
|
|
wrapped_func,
|
|
model_inputs={'input': input},
|
|
deploy_cfg=deploy_cfg_ncnn,
|
|
run_with_backend=True)
|
|
|
|
assert len(model_output) == len(rewrite_output)
|
|
for i in range(len(model_output)):
|
|
assert np.allclose(
|
|
model_output[i], rewrite_output[i], rtol=1e-03, atol=1e-05)
|
|
|
|
|
|
@backend_checker(Backend.NCNN)
|
|
def test_interpolate_static():
|
|
input = torch.rand([1, 2, 2, 2])
|
|
model_output = F.interpolate(input, scale_factor=[2, 2])
|
|
|
|
def interpolate_caller(*arg, **kwargs):
|
|
return F.interpolate(*arg, **kwargs)
|
|
|
|
wrapped_func = WrapFunction(interpolate_caller, size=[4, 4])
|
|
rewrite_output, _ = get_rewrite_outputs(
|
|
wrapped_func,
|
|
model_inputs={'input': input},
|
|
deploy_cfg=deploy_cfg_ncnn,
|
|
run_with_backend=True)
|
|
|
|
assert np.allclose(model_output, rewrite_output[0], rtol=1e-03, atol=1e-05)
|
|
|
|
|
|
@backend_checker(Backend.NCNN)
|
|
def test_linear_ncnn():
|
|
input = torch.rand([1, 2, 2])
|
|
weight = torch.rand([2, 2])
|
|
bias = torch.rand([2])
|
|
model_output = F.linear(input, weight=weight, bias=bias)
|
|
|
|
def linear_caller(*arg, **kwargs):
|
|
return F.linear(*arg, **kwargs)
|
|
|
|
wrapped_func = WrapFunction(linear_caller, weight=weight, bias=bias)
|
|
rewrite_output, _ = get_rewrite_outputs(
|
|
wrapped_func,
|
|
model_inputs={'input': input},
|
|
deploy_cfg=deploy_cfg_ncnn,
|
|
run_with_backend=True)
|
|
|
|
assert np.allclose(model_output, rewrite_output[0], rtol=1e-03, atol=1e-05)
|
|
|
|
|
|
@backend_checker(Backend.TENSORRT)
|
|
def test_repeat_static():
|
|
input = torch.rand([1])
|
|
|
|
def model_func(input):
|
|
return torch.Tensor.repeat(input, 4)
|
|
|
|
wrapped_func = WrapFunction(model_func)
|
|
|
|
model_output = model_func(input)
|
|
|
|
deploy_cfg = get_trt_config(['output'], [1])
|
|
|
|
rewrite_output, is_backend_output = get_rewrite_outputs(
|
|
wrapped_func, model_inputs={'input': input}, deploy_cfg=deploy_cfg)
|
|
|
|
if is_backend_output:
|
|
rewrite_output = rewrite_output[0].detach().cpu()
|
|
|
|
assert np.allclose(
|
|
model_output, rewrite_output, rtol=1e-03, atol=1e-05)
|
|
else:
|
|
assert rewrite_output is not None
|
|
|
|
|
|
@backend_checker(Backend.NCNN)
|
|
def test_size_of_tensor_static():
|
|
|
|
def model_func(input):
|
|
x = torch.Tensor.size(input)
|
|
assert isinstance(x[0], int) and not isinstance(x[0], torch.Tensor)
|
|
return torch.tensor(x)
|
|
|
|
input = torch.zeros([1, 2, 3, 4])
|
|
wrapped_func = WrapFunction(model_func)
|
|
rewrite_outputs, _ = get_rewrite_outputs(
|
|
wrapped_func,
|
|
model_inputs={'input': input},
|
|
deploy_cfg=deploy_cfg_ncnn,
|
|
run_with_backend=True)
|
|
|
|
assert rewrite_outputs is not None, 'Got unexpected rewrite '
|
|
'outputs: {}'.format(rewrite_outputs)
|
|
|
|
|
|
class TestTopk:
|
|
|
|
input = torch.rand(1, 5, 5, 5)
|
|
|
|
@backend_checker(Backend.NCNN)
|
|
@pytest.mark.parametrize('k', [1, 3, 4])
|
|
@pytest.mark.parametrize('dim', [1, 2, 3])
|
|
def test_topk_ncnn(self, dim, k):
|
|
|
|
model_output = torch.Tensor.topk(TestTopk.input, k, dim).values
|
|
|
|
def model_func(input):
|
|
x = input.topk(k, dim)
|
|
return x.indices, x.values
|
|
|
|
wrapped_func = WrapFunction(model_func)
|
|
|
|
# mmdeploy.pytorch.functions.topk.topk_dynamic
|
|
output, _ = get_rewrite_outputs(
|
|
wrapped_func,
|
|
model_inputs={'input': TestTopk.input},
|
|
deploy_cfg=deploy_cfg_ncnn,
|
|
run_with_backend=True)
|
|
assert np.allclose(model_output, output[0], rtol=1e-03, atol=1e-05)
|
|
|
|
@backend_checker(Backend.TENSORRT)
|
|
@pytest.mark.parametrize('k', [1, 3, 4])
|
|
@pytest.mark.parametrize('dim', [1, 2, 3])
|
|
def test_topk_tensorrt(self, dim, k):
|
|
model_output = torch.Tensor.topk(TestTopk.input, k, dim).values
|
|
|
|
def model_func(input):
|
|
x = input.topk(k, dim)
|
|
return x.indices, x.values
|
|
|
|
wrapped_func = WrapFunction(model_func)
|
|
|
|
# mmdeploy.pytorch.functions.topk.topk_static
|
|
deploy_cfg_tensorrt = get_trt_config(['indices', 'values'],
|
|
[1, 5, 5, 5])
|
|
output, is_backend_output = get_rewrite_outputs(
|
|
wrapped_func,
|
|
model_inputs={'input': TestTopk.input},
|
|
deploy_cfg=deploy_cfg_tensorrt)
|
|
|
|
if is_backend_output:
|
|
output = output[1].detach().cpu()
|
|
|
|
assert np.allclose(model_output, output, rtol=1e-03, atol=1e-05)
|
|
else:
|
|
assert output is not None
|
|
|
|
|
|
@backend_checker(Backend.TENSORRT)
|
|
@pytest.mark.parametrize('shape', [[2, 2], [4, 2], [2, 4], [2, 4, 2]])
|
|
@pytest.mark.parametrize('diagonal', [0, 1, -1])
|
|
def test_triu_trt(shape, diagonal):
|
|
|
|
input = torch.rand(shape)
|
|
model_output = torch.triu(input=input, diagonal=diagonal)
|
|
|
|
def triu_caller(*arg, **kwargs):
|
|
return torch.triu(*arg, **kwargs)
|
|
|
|
wrapped_func = WrapFunction(triu_caller, diagonal=diagonal)
|
|
rewrite_outputs, is_backend_output = get_rewrite_outputs(
|
|
wrapped_func,
|
|
model_inputs={'input': input},
|
|
deploy_cfg=get_trt_config(['output'], shape=shape),
|
|
run_with_backend=True)
|
|
if is_backend_output:
|
|
rewrite_outputs = rewrite_outputs[0].detach().cpu()
|
|
|
|
assert np.allclose(
|
|
model_output, rewrite_outputs, rtol=1e-03, atol=1e-05)
|
|
else:
|
|
assert rewrite_outputs is not None
|
|
|
|
|
|
@backend_checker(Backend.NCNN)
|
|
@pytest.mark.parametrize(
|
|
'input',
|
|
[torch.rand(1, 16, 16), torch.rand(1, 3, 16, 16)])
|
|
@pytest.mark.parametrize('dim', [1, 2])
|
|
def test_normalize_ncnn(input, dim):
|
|
import mmdeploy.apis.ncnn as ncnn_apis
|
|
from mmdeploy.utils.test import get_onnx_model
|
|
|
|
def norm_func(input, dim):
|
|
return F.normalize(input, p=2, dim=dim)
|
|
|
|
wrapped_func = WrapFunction(norm_func, dim=dim)
|
|
model_inputs = {'input': input}
|
|
ir_file_path = get_onnx_model(wrapped_func, model_inputs, deploy_cfg_ncnn)
|
|
assert osp.exists(ir_file_path)
|
|
ncnn_files_prefix = osp.splitext(ir_file_path)[0]
|
|
ncnn_apis.from_onnx(ir_file_path, ncnn_files_prefix)
|
|
param_path, bin_path = ncnn_apis.get_output_model_file(ir_file_path)
|
|
assert osp.exists(param_path)
|
|
assert osp.exists(bin_path)
|
|
|
|
|
|
@backend_checker(Backend.ONNXRUNTIME)
|
|
@pytest.mark.parametrize(
|
|
'input',
|
|
[torch.rand(1, 16, 16), torch.rand(1, 3, 16, 16)])
|
|
def test_masked_fill_onnxruntime(input):
|
|
mask = input > 0
|
|
value = float('-inf')
|
|
|
|
def masked_fill_caller(*arg, **kwargs):
|
|
return torch.masked_fill(*arg, **kwargs)
|
|
|
|
deploy_cfg_ort = mmcv.Config(
|
|
dict(
|
|
onnx_config=dict(input_shape=None),
|
|
backend_config=dict(type='onnxruntime'),
|
|
codebase_config=dict(type='mmdet', task='ObjectDetection')))
|
|
|
|
wrapped_func = WrapFunction(masked_fill_caller, mask=mask, value=value)
|
|
rewrite_output, _ = get_rewrite_outputs(
|
|
wrapped_func,
|
|
model_inputs={'input': input},
|
|
deploy_cfg=deploy_cfg_ort,
|
|
run_with_backend=True)
|
|
assert rewrite_output is not None
|
|
|
|
|
|
@backend_checker(Backend.ONNXRUNTIME)
|
|
@pytest.mark.skipif(
|
|
parse(torch.__version__) < parse('1.9.0'), reason='requires torch>1.8.0')
|
|
@pytest.mark.parametrize('x', [torch.rand(1, 3, 16, 16)])
|
|
@pytest.mark.parametrize('y', [torch.rand(1, 3, 4, 4)])
|
|
def test_tensor_setitem(x, y):
|
|
import onnx
|
|
|
|
from mmdeploy.utils.test import get_onnx_model
|
|
|
|
def setitem_slice(x, y):
|
|
H, W = y.shape[2:]
|
|
x[:, :, 2:H + 2, 2:W + 2] = y
|
|
return x
|
|
|
|
wrapped_func = WrapFunction(setitem_slice)
|
|
model_inputs = {'x': x, 'y': y}
|
|
|
|
deploy_cfg = mmcv.Config(
|
|
dict(
|
|
onnx_config=dict(input_shape=None),
|
|
backend_config=dict(type='onnxruntime'),
|
|
codebase_config=dict(type='mmdet', task='ObjectDetection')))
|
|
ir_file_path = get_onnx_model(wrapped_func, model_inputs, deploy_cfg)
|
|
|
|
onnx_model = onnx.load(ir_file_path)
|
|
nodes = onnx_model.graph.node
|
|
for node in nodes:
|
|
assert node.op_type != 'ScatterND'
|