[Improvement] Better unit test. (#1619)
* update test for mmcls and mmdet * update det3d mmedit mmocr mmpose mmrotate * update mmseg * bug fixing * refactor ops * rename variable * remove commentpull/1736/head
parent
5de0ecfcaf
commit
d8e4a78636
csrc/mmdeploy/backend_ops/tensorrt/roi_align
mmdeploy
codebase
mmcls/models/utils
mmdet3d/core/bbox
core/rewriters
utils
tests
test_apis
test_backend
test_codebase
test_mmcv
test_ops
test_pytorch
test_utils
|
@ -202,13 +202,13 @@ nvinfer1::IPluginV2 *TRTRoIAlignCreator::createPlugin(
|
|||
if (field_name.compare("mode") == 0) {
|
||||
int data_size = fc->fields[i].length;
|
||||
const char *data_start = static_cast<const char *>(fc->fields[i].data);
|
||||
std::string poolModeStr(data_start, data_size);
|
||||
if (poolModeStr == "avg") {
|
||||
std::string pool_mode_str(data_start);
|
||||
if (pool_mode_str == "avg") {
|
||||
poolMode = 1;
|
||||
} else if (poolModeStr == "max") {
|
||||
} else if (pool_mode_str == "max") {
|
||||
poolMode = 0;
|
||||
} else {
|
||||
std::cout << "Unknown pool mode \"" << poolModeStr << "\"." << std::endl;
|
||||
std::cout << "Unknown pool mode \"" << pool_mode_str << "\"." << std::endl;
|
||||
}
|
||||
ASSERT(poolMode >= 0);
|
||||
}
|
||||
|
|
|
@ -143,7 +143,6 @@ def shift_window_msa__forward__default(ctx, self, query, hw_shape):
|
|||
'mmcls.models.utils.ShiftWindowMSA.get_attn_mask',
|
||||
extra_checkers=LibVersionChecker('mmcls', min_version='0.21.0'))
|
||||
def shift_window_msa__get_attn_mask__default(ctx,
|
||||
self,
|
||||
hw_shape,
|
||||
window_size,
|
||||
shift_size,
|
||||
|
|
|
@ -8,7 +8,7 @@ from mmdeploy.core import FUNCTION_REWRITER
|
|||
|
||||
@FUNCTION_REWRITER.register_rewriter(
|
||||
'mmdet3d.core.bbox.coders.fcos3d_bbox_coder.FCOS3DBBoxCoder.decode_yaw')
|
||||
def decode_yaw(ctx, self, bbox, centers2d, dir_cls, dir_offset, cam2img):
|
||||
def decode_yaw(ctx, bbox, centers2d, dir_cls, dir_offset, cam2img):
|
||||
"""Decode yaw angle and change it from local to global.i. Rewrite this func
|
||||
to use slice instead of the original operation.
|
||||
Args:
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import inspect
|
||||
from typing import (Any, Callable, Dict, List, MutableSequence, Optional,
|
||||
Tuple, Union)
|
||||
|
||||
|
@ -72,7 +73,16 @@ def _set_func(origin_func_path: str,
|
|||
rewrite_func,
|
||||
ignore_refs=ignore_refs,
|
||||
ignore_keys=ignore_keys)
|
||||
exec(f'{origin_func_path} = rewrite_func')
|
||||
|
||||
is_static_method = False
|
||||
if method_class:
|
||||
origin_type = inspect.getattr_static(module_or_class, split_path[-1])
|
||||
is_static_method = isinstance(origin_type, staticmethod)
|
||||
|
||||
if is_static_method:
|
||||
exec(f'{origin_func_path} = staticmethod(rewrite_func)')
|
||||
else:
|
||||
exec(f'{origin_func_path} = rewrite_func')
|
||||
|
||||
|
||||
def _del_func(path: str):
|
||||
|
|
|
@ -326,6 +326,29 @@ class RewriterRegistry:
|
|||
|
||||
return decorator
|
||||
|
||||
def remove_record(self, object: Any, filter_cb: Optional[Callable] = None):
|
||||
"""Remove record.
|
||||
|
||||
Args:
|
||||
object (Any): The object to remove.
|
||||
filter_cb (Callable): Check if the object need to be remove.
|
||||
Defaults to None.
|
||||
"""
|
||||
key_to_pop = []
|
||||
for key, records in self._rewrite_records.items():
|
||||
for rec in records:
|
||||
if rec['_object'] == object:
|
||||
if filter_cb is not None:
|
||||
if filter_cb(rec):
|
||||
continue
|
||||
key_to_pop.append((key, rec))
|
||||
|
||||
for key, rec in key_to_pop:
|
||||
records = self._rewrite_records[key]
|
||||
records.remove(rec)
|
||||
if len(records) == 0:
|
||||
self._rewrite_records.pop(key)
|
||||
|
||||
|
||||
class ContextCaller:
|
||||
"""A callable object used in RewriteContext.
|
||||
|
|
|
@ -17,6 +17,11 @@ from mmdeploy.core import RewriterContext, patch_model
|
|||
from mmdeploy.utils import (IR, Backend, get_backend, get_dynamic_axes,
|
||||
get_ir_config, get_onnx_config)
|
||||
|
||||
try:
|
||||
from torch.testing import assert_close as torch_assert_close
|
||||
except Exception:
|
||||
from torch.testing import assert_allclose as torch_assert_close
|
||||
|
||||
|
||||
def backend_checker(backend: Backend, require_plugin: bool = False):
|
||||
"""A decorator which checks if a backend is available.
|
||||
|
@ -189,12 +194,6 @@ class SwitchBackendWrapper:
|
|||
self._recover_class = recover_class
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, trace):
|
||||
self.recover()
|
||||
|
||||
def set(self, **kwargs):
|
||||
"""Replace attributes in backend wrappers with dummy items."""
|
||||
obj = self._recover_class
|
||||
self.init = obj.__init__
|
||||
|
@ -203,10 +202,9 @@ class SwitchBackendWrapper:
|
|||
obj.__init__ = SwitchBackendWrapper.BackendWrapper.__init__
|
||||
obj.forward = SwitchBackendWrapper.BackendWrapper.forward
|
||||
obj.__call__ = SwitchBackendWrapper.BackendWrapper.__call__
|
||||
for k, v in kwargs.items():
|
||||
setattr(obj, k, v)
|
||||
return self
|
||||
|
||||
def recover(self):
|
||||
def __exit__(self, type, value, trace):
|
||||
"""Recover to original class."""
|
||||
assert self.init is not None and \
|
||||
self.forward is not None,\
|
||||
|
@ -216,6 +214,11 @@ class SwitchBackendWrapper:
|
|||
obj.forward = self.forward
|
||||
obj.__call__ = self.call
|
||||
|
||||
def set(self, **kwargs):
|
||||
obj = self._recover_class
|
||||
for k, v in kwargs.items():
|
||||
setattr(obj, k, v)
|
||||
|
||||
|
||||
def assert_allclose(expected: List[Union[torch.Tensor, np.ndarray]],
|
||||
actual: List[Union[torch.Tensor, np.ndarray]],
|
||||
|
@ -239,8 +242,7 @@ def assert_allclose(expected: List[Union[torch.Tensor, np.ndarray]],
|
|||
if isinstance(actual[i], (list, np.ndarray)):
|
||||
actual[i] = torch.tensor(actual[i])
|
||||
try:
|
||||
torch.testing.assert_allclose(
|
||||
actual[i], expected[i], rtol=1e-03, atol=1e-05)
|
||||
torch_assert_close(actual[i], expected[i], rtol=1e-03, atol=1e-05)
|
||||
except AssertionError as error:
|
||||
if tolerate_small_mismatch:
|
||||
assert '(0.00%)' in str(error), str(error)
|
||||
|
@ -417,6 +419,19 @@ def get_backend_outputs(ir_file_path: str,
|
|||
if backend == Backend.TENSORRT:
|
||||
device = 'cuda'
|
||||
model_inputs = dict((k, v.cuda()) for k, v in model_inputs.items())
|
||||
input_shapes = dict(
|
||||
(k, dict(min_shape=v.shape, max_shape=v.shape, opt_shape=v.shape))
|
||||
for k, v in model_inputs.items())
|
||||
model_inputs_cfg = deploy_cfg['backend_config'].get(
|
||||
'model_inputs', [dict(input_shapes=input_shapes)])
|
||||
if len(model_inputs_cfg) < 1:
|
||||
model_inputs_cfg = [dict(input_shapes=input_shapes)]
|
||||
|
||||
if 'input_shapes' not in model_inputs_cfg[0]:
|
||||
model_inputs_cfg[0]['input_shapes'] = input_shapes
|
||||
|
||||
deploy_cfg['backend_config']['model_inputs'] = model_inputs_cfg
|
||||
|
||||
elif backend == Backend.OPENVINO:
|
||||
input_info = {
|
||||
name: value.shape
|
||||
|
|
|
@ -4,6 +4,7 @@ import tempfile
|
|||
from multiprocessing import Process
|
||||
|
||||
import mmcv
|
||||
import pytest
|
||||
|
||||
from mmdeploy.apis import create_calib_input_data
|
||||
|
||||
|
@ -11,7 +12,8 @@ calib_file = tempfile.NamedTemporaryFile(suffix='.h5').name
|
|||
ann_file = 'tests/data/annotation.json'
|
||||
|
||||
|
||||
def get_end2end_deploy_cfg():
|
||||
@pytest.fixture
|
||||
def deploy_cfg():
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
onnx_config=dict(
|
||||
|
@ -53,14 +55,15 @@ def get_end2end_deploy_cfg():
|
|||
return deploy_cfg
|
||||
|
||||
|
||||
def get_partition_deploy_cfg():
|
||||
deploy_cfg = get_end2end_deploy_cfg()
|
||||
@pytest.fixture
|
||||
def partition_deploy_cfg(deploy_cfg):
|
||||
deploy_cfg._cfg_dict['partition_config'] = dict(
|
||||
type='two_stage', apply_marks=True)
|
||||
return deploy_cfg
|
||||
|
||||
|
||||
def get_model_cfg():
|
||||
@pytest.fixture
|
||||
def model_cfg():
|
||||
dataset_type = 'CustomDataset'
|
||||
data_root = 'tests/data/'
|
||||
img_norm_cfg = dict(
|
||||
|
@ -169,10 +172,8 @@ def get_model_cfg():
|
|||
return model_cfg
|
||||
|
||||
|
||||
def run_test_create_calib_end2end():
|
||||
def run_test_create_calib_end2end(deploy_cfg, model_cfg):
|
||||
import h5py
|
||||
model_cfg = get_model_cfg()
|
||||
deploy_cfg = get_end2end_deploy_cfg()
|
||||
create_calib_input_data(
|
||||
calib_file,
|
||||
deploy_cfg,
|
||||
|
@ -194,18 +195,19 @@ def run_test_create_calib_end2end():
|
|||
# new process.
|
||||
|
||||
|
||||
def test_create_calib_end2end():
|
||||
p = Process(target=run_test_create_calib_end2end)
|
||||
def test_create_calib_end2end(deploy_cfg, model_cfg):
|
||||
p = Process(
|
||||
target=run_test_create_calib_end2end,
|
||||
kwargs=dict(deploy_cfg=deploy_cfg, model_cfg=model_cfg))
|
||||
try:
|
||||
p.start()
|
||||
finally:
|
||||
p.join()
|
||||
|
||||
|
||||
def run_test_create_calib_parittion():
|
||||
def run_test_create_calib_parittion(partition_deploy_cfg, model_cfg):
|
||||
import h5py
|
||||
model_cfg = get_model_cfg()
|
||||
deploy_cfg = get_partition_deploy_cfg()
|
||||
deploy_cfg = partition_deploy_cfg
|
||||
create_calib_input_data(
|
||||
calib_file,
|
||||
deploy_cfg,
|
||||
|
@ -227,8 +229,11 @@ def run_test_create_calib_parittion():
|
|||
assert calib_data[partition_name][input_names[i]]['0'] is not None
|
||||
|
||||
|
||||
def test_create_calib_parittion():
|
||||
p = Process(target=run_test_create_calib_parittion)
|
||||
def test_create_calib_parittion(partition_deploy_cfg, model_cfg):
|
||||
p = Process(
|
||||
target=run_test_create_calib_parittion,
|
||||
kwargs=dict(
|
||||
partition_deploy_cfg=partition_deploy_cfg, model_cfg=model_cfg))
|
||||
try:
|
||||
p.start()
|
||||
finally:
|
||||
|
|
|
@ -208,6 +208,7 @@ def run_wrapper(backend, wrapper, input):
|
|||
ALL_BACKEND = list(Backend)
|
||||
ALL_BACKEND.remove(Backend.DEFAULT)
|
||||
ALL_BACKEND.remove(Backend.PYTORCH)
|
||||
ALL_BACKEND.remove(Backend.SNPE)
|
||||
ALL_BACKEND.remove(Backend.SDK)
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import pytest
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase
|
||||
|
||||
|
||||
def pytest_ignore_collect(*args, **kwargs):
|
||||
import importlib
|
||||
return importlib.util.find_spec('mmcls') is None
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope='package')
|
||||
def import_all_modules():
|
||||
codebase = Codebase.MMCLS
|
||||
try:
|
||||
import_codebase(codebase)
|
||||
except ImportError:
|
||||
pytest.skip(f'{codebase} is not installed.', allow_module_level=True)
|
|
@ -1,7 +1,6 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import copy
|
||||
import os
|
||||
from tempfile import NamedTemporaryFile, TemporaryDirectory
|
||||
from typing import Any
|
||||
|
||||
import mmcv
|
||||
|
@ -9,41 +8,50 @@ import numpy as np
|
|||
import pytest
|
||||
import torch
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.apis import build_task_processor
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase, load_config
|
||||
from mmdeploy.utils import load_config
|
||||
from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMCLS)
|
||||
except ImportError:
|
||||
pytest.skip(f'{Codebase.MMCLS} is not installed.', allow_module_level=True)
|
||||
|
||||
model_cfg_path = 'tests/test_codebase/test_mmcls/data/model.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(type='mmcls', task='Classification'),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['input'],
|
||||
output_names=['output'])))
|
||||
|
||||
onnx_file = NamedTemporaryFile(suffix='.onnx').name
|
||||
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def model_cfg():
|
||||
return load_config(model_cfg_path)[0]
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def deploy_cfg():
|
||||
return mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(type='mmcls', task='Classification'),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['input'],
|
||||
output_names=['output'])))
|
||||
|
||||
|
||||
img_shape = (64, 64)
|
||||
num_classes = 1000
|
||||
img = np.random.rand(*img_shape, 3)
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def task_processor(model_cfg, deploy_cfg):
|
||||
return build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def img():
|
||||
return np.random.rand(*img_shape, 3)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('from_mmrazor', [True, False, '123', 0])
|
||||
def test_init_pytorch_model(from_mmrazor: Any):
|
||||
def test_init_pytorch_model(from_mmrazor: Any, task_processor, deploy_cfg):
|
||||
from mmcls.models.classifiers.base import BaseClassifier
|
||||
if from_mmrazor is False:
|
||||
_task_processor = task_processor
|
||||
|
@ -73,58 +81,57 @@ def test_init_pytorch_model(from_mmrazor: Any):
|
|||
assert isinstance(model, BaseClassifier)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def backend_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def backend_model(task_processor):
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
wrapper.set(outputs={
|
||||
'output': torch.rand(1, num_classes),
|
||||
})
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
wrapper.set(outputs={
|
||||
'output': torch.rand(1, num_classes),
|
||||
})
|
||||
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
wrapper.recover()
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
|
||||
def test_init_backend_model(backend_model):
|
||||
assert isinstance(backend_model, torch.nn.Module)
|
||||
|
||||
|
||||
def test_create_input():
|
||||
inputs = task_processor.create_input(img, input_shape=img_shape)
|
||||
@pytest.fixture(scope='module')
|
||||
def model_inputs(task_processor, img):
|
||||
return task_processor.create_input(img, input_shape=img_shape)
|
||||
|
||||
|
||||
def test_create_input(model_inputs):
|
||||
inputs = model_inputs
|
||||
assert isinstance(inputs, tuple) and len(inputs) == 2
|
||||
|
||||
|
||||
def test_run_inference(backend_model):
|
||||
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
|
||||
def test_run_inference(task_processor, backend_model, model_inputs):
|
||||
input_dict, _ = model_inputs
|
||||
results = task_processor.run_inference(backend_model, input_dict)
|
||||
assert results is not None
|
||||
|
||||
|
||||
def test_visualize(backend_model):
|
||||
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
|
||||
def test_visualize(task_processor, backend_model, tmp_path, img, model_inputs):
|
||||
input_dict, _ = model_inputs
|
||||
results = task_processor.run_inference(backend_model, input_dict)
|
||||
with TemporaryDirectory() as dir:
|
||||
filename = dir + 'tmp.jpg'
|
||||
task_processor.visualize(backend_model, img, results[0], filename, '')
|
||||
assert os.path.exists(filename)
|
||||
filename = str(tmp_path / 'tmp.jpg')
|
||||
task_processor.visualize(backend_model, img, results[0], filename, '')
|
||||
assert os.path.exists(filename)
|
||||
|
||||
|
||||
def test_get_tensor_from_input():
|
||||
def test_get_tensor_from_input(task_processor):
|
||||
input_data = {'img': torch.ones(3, 4, 5)}
|
||||
inputs = task_processor.get_tensor_from_input(input_data)
|
||||
assert torch.equal(inputs, torch.ones(3, 4, 5))
|
||||
|
||||
|
||||
def test_get_partition_cfg():
|
||||
try:
|
||||
_ = task_processor.get_partition_cfg(partition_type='')
|
||||
except NotImplementedError:
|
||||
pass
|
||||
def test_get_partition_cfg(task_processor):
|
||||
with pytest.raises(NotImplementedError):
|
||||
task_processor.get_partition_cfg(partition_type='')
|
||||
|
||||
|
||||
def test_build_dataset_and_dataloader():
|
||||
def test_build_dataset_and_dataloader(task_processor, model_cfg):
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
dataset = task_processor.build_dataset(
|
||||
dataset_cfg=model_cfg, dataset_type='test')
|
||||
|
@ -133,7 +140,7 @@ def test_build_dataset_and_dataloader():
|
|||
assert isinstance(dataloader, DataLoader), 'Failed to build dataloader'
|
||||
|
||||
|
||||
def test_single_gpu_test_and_evaluate():
|
||||
def test_single_gpu_test_and_evaluate(task_processor, model_cfg):
|
||||
from mmcv.parallel import MMDataParallel
|
||||
dataset = task_processor.build_dataset(
|
||||
dataset_cfg=model_cfg, dataset_type='test')
|
||||
|
|
|
@ -1,78 +1,66 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os.path as osp
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase
|
||||
from mmdeploy.utils import Backend
|
||||
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
|
||||
|
||||
NUM_CLASS = 1000
|
||||
IMAGE_SIZE = 64
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMCLS)
|
||||
except ImportError:
|
||||
pytest.skip(f'{Codebase.MMCLS} is not installed.', allow_module_level=True)
|
||||
|
||||
|
||||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
class TestEnd2EndModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class')
|
||||
def end2end_model(self):
|
||||
# force add backend wrapper regardless of plugins
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
cls.outputs = {
|
||||
'outputs': torch.rand(1, 1, IMAGE_SIZE, IMAGE_SIZE),
|
||||
}
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
deploy_cfg = mmcv.Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['outputs']
|
||||
}})
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
outputs = {
|
||||
'outputs': torch.rand(1, 1, IMAGE_SIZE, IMAGE_SIZE),
|
||||
}
|
||||
wrapper.set(outputs=outputs)
|
||||
deploy_cfg = mmcv.Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['outputs']
|
||||
}})
|
||||
|
||||
from mmdeploy.codebase.mmcls.deploy.classification_model import \
|
||||
End2EndModel
|
||||
class_names = ['' for i in range(NUM_CLASS)]
|
||||
cls.end2end_model = End2EndModel(
|
||||
Backend.ONNXRUNTIME, [''],
|
||||
device='cpu',
|
||||
class_names=class_names,
|
||||
deploy_cfg=deploy_cfg)
|
||||
from mmdeploy.codebase.mmcls.deploy.classification_model import \
|
||||
End2EndModel
|
||||
class_names = ['' for i in range(NUM_CLASS)]
|
||||
model = End2EndModel(
|
||||
Backend.ONNXRUNTIME, [''],
|
||||
device='cpu',
|
||||
class_names=class_names,
|
||||
deploy_cfg=deploy_cfg)
|
||||
yield model
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
cls.wrapper.recover()
|
||||
|
||||
def test_forward(self):
|
||||
def test_forward(self, end2end_model):
|
||||
imgs = [torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE)]
|
||||
results = self.end2end_model.forward(imgs)
|
||||
results = end2end_model.forward(imgs)
|
||||
assert results is not None, 'failed to get output using '\
|
||||
'End2EndModel'
|
||||
|
||||
def test_forward_test(self):
|
||||
def test_forward_test(self, end2end_model):
|
||||
imgs = torch.rand(2, 3, IMAGE_SIZE, IMAGE_SIZE)
|
||||
results = self.end2end_model.forward_test(imgs)
|
||||
results = end2end_model.forward_test(imgs)
|
||||
assert isinstance(results[0], np.ndarray)
|
||||
|
||||
def test_show_result(self):
|
||||
def test_show_result(self, end2end_model, tmp_path):
|
||||
input_img = np.zeros([IMAGE_SIZE, IMAGE_SIZE, 3])
|
||||
img_path = NamedTemporaryFile(suffix='.jpg').name
|
||||
img_path = str(tmp_path / 'tmp.jpg')
|
||||
|
||||
pred_label = torch.randint(0, NUM_CLASS, (1, ))
|
||||
pred_score = torch.rand((1, ))
|
||||
result = {'pred_label': pred_label, 'pred_score': float(pred_score)}
|
||||
self.end2end_model.show_result(
|
||||
end2end_model.show_result(
|
||||
input_img, result, '', show=False, out_file=img_path)
|
||||
assert osp.exists(img_path), 'Fails to create drawn image.'
|
||||
|
||||
|
@ -80,44 +68,43 @@ class TestEnd2EndModel:
|
|||
@backend_checker(Backend.RKNN)
|
||||
class TestRKNNEnd2EndModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class')
|
||||
def end2end_model(self):
|
||||
# force add backend wrapper regardless of plugins
|
||||
import mmdeploy.backend.rknn as rknn_apis
|
||||
from mmdeploy.backend.rknn import RKNNWrapper
|
||||
rknn_apis.__dict__.update({'RKNNWrapper': RKNNWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
cls.wrapper = SwitchBackendWrapper(RKNNWrapper)
|
||||
cls.outputs = [torch.rand(1, 1, IMAGE_SIZE, IMAGE_SIZE)]
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
deploy_cfg = mmcv.Config({
|
||||
'onnx_config': {
|
||||
'output_names': ['outputs']
|
||||
},
|
||||
'backend_config': {
|
||||
'common_config': {}
|
||||
}
|
||||
})
|
||||
with SwitchBackendWrapper(RKNNWrapper) as wrapper:
|
||||
outputs = [torch.rand(1, 1, IMAGE_SIZE, IMAGE_SIZE)]
|
||||
wrapper.set(outputs=outputs)
|
||||
deploy_cfg = mmcv.Config({
|
||||
'onnx_config': {
|
||||
'output_names': ['outputs']
|
||||
},
|
||||
'backend_config': {
|
||||
'common_config': {}
|
||||
}
|
||||
})
|
||||
|
||||
from mmdeploy.codebase.mmcls.deploy.classification_model import \
|
||||
RKNNEnd2EndModel
|
||||
class_names = ['' for i in range(NUM_CLASS)]
|
||||
cls.end2end_model = RKNNEnd2EndModel(
|
||||
Backend.RKNN, [''],
|
||||
device='cpu',
|
||||
class_names=class_names,
|
||||
deploy_cfg=deploy_cfg)
|
||||
from mmdeploy.codebase.mmcls.deploy.classification_model import \
|
||||
RKNNEnd2EndModel
|
||||
class_names = ['' for i in range(NUM_CLASS)]
|
||||
model = RKNNEnd2EndModel(
|
||||
Backend.RKNN, [''],
|
||||
device='cpu',
|
||||
class_names=class_names,
|
||||
deploy_cfg=deploy_cfg)
|
||||
yield model
|
||||
|
||||
def test_forward_test(self):
|
||||
def test_forward_test(self, end2end_model):
|
||||
imgs = torch.rand(2, 3, IMAGE_SIZE, IMAGE_SIZE)
|
||||
results = self.end2end_model.forward_test(imgs)
|
||||
results = end2end_model.forward_test(imgs)
|
||||
assert isinstance(results[0], np.ndarray)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('from_file', [True, False])
|
||||
@pytest.mark.parametrize('data_type', ['train', 'val', 'test'])
|
||||
def test_get_classes_from_config(from_file, data_type):
|
||||
def test_get_classes_from_config(from_file, data_type, tmp_path):
|
||||
from mmcls.datasets import DATASETS
|
||||
|
||||
from mmdeploy.codebase.mmcls.deploy.classification_model import \
|
||||
|
@ -136,7 +123,7 @@ def test_get_classes_from_config(from_file, data_type):
|
|||
})
|
||||
|
||||
if from_file:
|
||||
config_path = NamedTemporaryFile(suffix='.py').name
|
||||
config_path = str(tmp_path / 'tmp.py')
|
||||
with open(config_path, 'w') as file:
|
||||
file.write(data_cfg.pretty_text)
|
||||
data_cfg = config_path
|
||||
|
@ -157,7 +144,6 @@ def test_build_classification_model():
|
|||
codebase_config=dict(type='mmcls')))
|
||||
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
|
|
|
@ -4,28 +4,25 @@ import numpy as np
|
|||
import pytest
|
||||
import torch
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.core import RewriterContext
|
||||
from mmdeploy.utils import Backend, Codebase
|
||||
from mmdeploy.utils import Backend
|
||||
from mmdeploy.utils.test import WrapModel, check_backend, get_rewrite_outputs
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMCLS)
|
||||
except ImportError:
|
||||
pytest.skip(f'{Codebase.MMCLS} is not installed.', allow_module_level=True)
|
||||
|
||||
input = torch.rand(1)
|
||||
|
||||
|
||||
def get_invertedresidual_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def invertedresidual_model():
|
||||
from mmcls.models.backbones.shufflenet_v2 import InvertedResidual
|
||||
model = InvertedResidual(16, 16)
|
||||
|
||||
model.requires_grad_(False)
|
||||
model.eval()
|
||||
return model
|
||||
|
||||
|
||||
def get_vit_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def vit_model():
|
||||
from mmcls.models.classifiers.image import ImageClassifier
|
||||
model = ImageClassifier(
|
||||
backbone={
|
||||
|
@ -58,6 +55,7 @@ def get_vit_model():
|
|||
},
|
||||
)
|
||||
model.requires_grad_(False)
|
||||
model.eval()
|
||||
|
||||
return model
|
||||
|
||||
|
@ -115,10 +113,11 @@ def test_multilabel_cls_head():
|
|||
@pytest.mark.parametrize(
|
||||
'backend_type',
|
||||
[Backend.ONNXRUNTIME, Backend.TENSORRT, Backend.NCNN, Backend.OPENVINO])
|
||||
def test_shufflenetv2_backbone__forward(backend_type: Backend):
|
||||
def test_shufflenetv2_backbone__forward(backend_type: Backend,
|
||||
invertedresidual_model):
|
||||
|
||||
check_backend(backend_type, True)
|
||||
model = get_invertedresidual_model()
|
||||
model = invertedresidual_model
|
||||
model.cpu().eval()
|
||||
if backend_type.value == 'tensorrt':
|
||||
deploy_cfg = mmcv.Config(
|
||||
|
@ -163,11 +162,11 @@ def test_shufflenetv2_backbone__forward(backend_type: Backend):
|
|||
|
||||
|
||||
@pytest.mark.parametrize('backend_type', [Backend.NCNN])
|
||||
def test_vision_transformer_backbone__forward(backend_type: Backend):
|
||||
def test_vision_transformer_backbone__forward(backend_type: Backend,
|
||||
vit_model):
|
||||
|
||||
check_backend(backend_type, True)
|
||||
model = get_vit_model()
|
||||
model.eval()
|
||||
model = vit_model.eval()
|
||||
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import pytest
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase
|
||||
|
||||
|
||||
def pytest_ignore_collect(*args, **kwargs):
|
||||
import importlib
|
||||
return importlib.util.find_spec('mmdet') is None
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope='package')
|
||||
def import_all_modules():
|
||||
codebase = Codebase.MMDET
|
||||
try:
|
||||
import_codebase(codebase)
|
||||
except ImportError:
|
||||
pytest.skip(f'{codebase} is not installed.', allow_module_level=True)
|
|
@ -6,17 +6,16 @@ import numpy as np
|
|||
import pytest
|
||||
import torch
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.core.rewriters.rewriter_manager import RewriterContext
|
||||
from mmdeploy.utils import Backend, Codebase
|
||||
from mmdeploy.utils import Backend
|
||||
from mmdeploy.utils.test import (WrapFunction, WrapModel, backend_checker,
|
||||
check_backend, get_onnx_model,
|
||||
get_rewrite_outputs)
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMDET)
|
||||
except ImportError:
|
||||
pytest.skip(f'{Codebase.MMDET} is not installed.', allow_module_level=True)
|
||||
from torch.testing import assert_close as torch_assert_close
|
||||
except Exception:
|
||||
from torch.testing import assert_allclose as torch_assert_close
|
||||
|
||||
|
||||
@backend_checker(Backend.TENSORRT)
|
||||
|
@ -75,10 +74,7 @@ def test_multiclass_nms_static():
|
|||
|
||||
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
|
||||
@pytest.mark.parametrize('add_ctr_clamp', [True, False])
|
||||
@pytest.mark.parametrize('clip_border,max_shape',
|
||||
[(False, None), (True, torch.tensor([100, 200]))])
|
||||
def test_delta2bbox(backend_type: Backend, add_ctr_clamp: bool,
|
||||
clip_border: bool, max_shape: tuple):
|
||||
def test_delta2bbox(backend_type: Backend, add_ctr_clamp: bool):
|
||||
check_backend(backend_type)
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
|
@ -319,7 +315,7 @@ def test__anchorgenerator__single_level_grid_priors():
|
|||
# test forward
|
||||
with RewriterContext({}, backend_type):
|
||||
wrap_output = wrapped_func(x)
|
||||
torch.testing.assert_allclose(output, wrap_output)
|
||||
torch_assert_close(output, wrap_output)
|
||||
|
||||
onnx_prefix = tempfile.NamedTemporaryFile().name
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,17 +1,8 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMDET)
|
||||
except ImportError:
|
||||
pytest.skip(f'{Codebase.MMDET} is not installed.', allow_module_level=True)
|
||||
|
||||
from mmdeploy.codebase.mmdet.deploy import (clip_bboxes,
|
||||
get_post_processing_params,
|
||||
pad_with_value,
|
||||
|
@ -45,22 +36,20 @@ def test_pad_with_value_if_necessary():
|
|||
assert np.allclose(padded_x.sum(), x.sum(), rtol=1e-03, atol=1e-05)
|
||||
|
||||
|
||||
config_with_mmdet_params = mmcv.Config(
|
||||
dict(
|
||||
codebase_config=dict(
|
||||
type='mmdet',
|
||||
task='ObjectDetection',
|
||||
post_processing=dict(
|
||||
score_threshold=0.05,
|
||||
iou_threshold=0.5,
|
||||
max_output_boxes_per_class=200,
|
||||
pre_top_k=-1,
|
||||
keep_top_k=100,
|
||||
background_label_id=-1,
|
||||
))))
|
||||
|
||||
|
||||
def test_get_mmdet_params():
|
||||
config_with_mmdet_params = mmcv.Config(
|
||||
dict(
|
||||
codebase_config=dict(
|
||||
type='mmdet',
|
||||
task='ObjectDetection',
|
||||
post_processing=dict(
|
||||
score_threshold=0.05,
|
||||
iou_threshold=0.5,
|
||||
max_output_boxes_per_class=200,
|
||||
pre_top_k=-1,
|
||||
keep_top_k=100,
|
||||
background_label_id=-1,
|
||||
))))
|
||||
assert get_post_processing_params(config_with_mmdet_params) == dict(
|
||||
score_threshold=0.05,
|
||||
iou_threshold=0.5,
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import copy
|
||||
import os
|
||||
from tempfile import NamedTemporaryFile, TemporaryDirectory
|
||||
from typing import Any
|
||||
|
||||
import mmcv
|
||||
|
@ -11,50 +10,62 @@ import torch
|
|||
from torch.utils.data import DataLoader
|
||||
from torch.utils.data.dataset import Dataset
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.apis import build_task_processor
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase, load_config
|
||||
from mmdeploy.utils import load_config
|
||||
from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMDET)
|
||||
except ImportError:
|
||||
pytest.skip(f'{Codebase.MMDET} is not installed.', allow_module_level=True)
|
||||
|
||||
model_cfg_path = 'tests/test_codebase/test_mmdet/data/model.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(
|
||||
type='mmdet',
|
||||
task='ObjectDetection',
|
||||
post_processing=dict(
|
||||
score_threshold=0.05,
|
||||
confidence_threshold=0.005, # for YOLOv3
|
||||
iou_threshold=0.5,
|
||||
max_output_boxes_per_class=200,
|
||||
pre_top_k=5000,
|
||||
keep_top_k=100,
|
||||
background_label_id=-1,
|
||||
)),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['input'],
|
||||
output_names=['dets', 'labels'])))
|
||||
onnx_file = NamedTemporaryFile(suffix='.onnx').name
|
||||
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
img_shape = (32, 32)
|
||||
img = np.random.rand(*img_shape, 3)
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def model_cfg():
|
||||
return load_config(model_cfg_path)[0]
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def deploy_cfg():
|
||||
return mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(
|
||||
type='mmdet',
|
||||
task='ObjectDetection',
|
||||
post_processing=dict(
|
||||
score_threshold=0.05,
|
||||
confidence_threshold=0.005, # for YOLOv3
|
||||
iou_threshold=0.5,
|
||||
max_output_boxes_per_class=200,
|
||||
pre_top_k=5000,
|
||||
keep_top_k=100,
|
||||
background_label_id=-1,
|
||||
)),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['input'],
|
||||
output_names=['dets', 'labels'])))
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def task_processor(model_cfg, deploy_cfg):
|
||||
return build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def img_shape():
|
||||
return (32, 32)
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def img(img_shape):
|
||||
return np.random.rand(*img_shape, 3)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('from_mmrazor', [True, False, '123', 0])
|
||||
def test_init_pytorch_model(from_mmrazor: Any):
|
||||
def test_init_pytorch_model(from_mmrazor: Any, deploy_cfg, task_processor):
|
||||
from mmdet.models import BaseDetector
|
||||
if from_mmrazor is False:
|
||||
_task_processor = task_processor
|
||||
|
@ -84,19 +95,16 @@ def test_init_pytorch_model(from_mmrazor: Any):
|
|||
assert isinstance(model, BaseDetector)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def backend_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def backend_model(task_processor):
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
wrapper.set(outputs={
|
||||
'dets': torch.rand(1, 10, 5),
|
||||
'labels': torch.rand(1, 10)
|
||||
})
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
wrapper.set(outputs={
|
||||
'dets': torch.rand(1, 10, 5),
|
||||
'labels': torch.rand(1, 10)
|
||||
})
|
||||
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
wrapper.recover()
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
|
||||
def test_init_backend_model(backend_model):
|
||||
|
@ -122,20 +130,25 @@ def test_can_postprocess_masks():
|
|||
f'did not match actual shape {actual_shape}.'
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def model_inputs(task_processor, img):
|
||||
return task_processor.create_input(img, input_shape=img.shape[:2])
|
||||
|
||||
|
||||
@pytest.mark.parametrize('device', ['cpu', 'cuda:0'])
|
||||
def test_create_input(device):
|
||||
def test_create_input(device, task_processor, model_inputs):
|
||||
if device == 'cuda:0' and not torch.cuda.is_available():
|
||||
pytest.skip('cuda is not available')
|
||||
original_device = task_processor.device
|
||||
task_processor.device = device
|
||||
inputs = task_processor.create_input(img, input_shape=img_shape)
|
||||
inputs = model_inputs
|
||||
assert len(inputs) == 2
|
||||
task_processor.device = original_device
|
||||
|
||||
|
||||
def test_run_inference(backend_model):
|
||||
def test_run_inference(backend_model, task_processor, model_inputs):
|
||||
torch_model = task_processor.init_pytorch_model(None)
|
||||
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
|
||||
input_dict, _ = model_inputs
|
||||
torch_results = task_processor.run_inference(torch_model, input_dict)
|
||||
backend_results = task_processor.run_inference(backend_model, input_dict)
|
||||
assert torch_results is not None
|
||||
|
@ -143,18 +156,17 @@ def test_run_inference(backend_model):
|
|||
assert len(torch_results[0]) == len(backend_results[0])
|
||||
|
||||
|
||||
def test_visualize(backend_model):
|
||||
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
|
||||
def test_visualize(backend_model, task_processor, img, tmp_path, model_inputs):
|
||||
input_dict, _ = model_inputs
|
||||
results = task_processor.run_inference(backend_model, input_dict)
|
||||
with TemporaryDirectory() as dir:
|
||||
filename = dir + 'tmp.jpg'
|
||||
task_processor.visualize(backend_model, img, results[0], filename, '')
|
||||
assert os.path.exists(filename)
|
||||
filename = str(tmp_path / 'tmp.jpg')
|
||||
task_processor.visualize(backend_model, img, results[0], filename, '')
|
||||
assert os.path.exists(filename)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('partition_type', ['single_stage', 'two_stage'])
|
||||
# Currently only mmdet implements get_partition_cfg
|
||||
def test_get_partition_cfg(partition_type):
|
||||
def test_get_partition_cfg(partition_type, task_processor):
|
||||
from mmdeploy.codebase.mmdet.deploy.model_partition_cfg import \
|
||||
MMDET_PARTITION_CFG
|
||||
partition_cfg = task_processor.get_partition_cfg(
|
||||
|
@ -162,13 +174,13 @@ def test_get_partition_cfg(partition_type):
|
|||
assert partition_cfg == MMDET_PARTITION_CFG[partition_type]
|
||||
|
||||
|
||||
def test_get_tensort_from_input():
|
||||
def test_get_tensort_from_input(task_processor):
|
||||
input_data = {'img': [torch.ones(3, 4, 5)]}
|
||||
inputs = task_processor.get_tensor_from_input(input_data)
|
||||
assert torch.equal(inputs, torch.ones(3, 4, 5))
|
||||
|
||||
|
||||
def test_build_dataset_and_dataloader():
|
||||
def test_build_dataset_and_dataloader(model_cfg, task_processor):
|
||||
dataset = task_processor.build_dataset(
|
||||
dataset_cfg=model_cfg, dataset_type='test')
|
||||
assert isinstance(dataset, Dataset), 'Failed to build dataset'
|
||||
|
@ -176,7 +188,7 @@ def test_build_dataset_and_dataloader():
|
|||
assert isinstance(dataloader, DataLoader), 'Failed to build dataloader'
|
||||
|
||||
|
||||
def test_single_gpu_test_and_evaluate():
|
||||
def test_single_gpu_test_and_evaluate(model_cfg, task_processor, tmp_path):
|
||||
from mmcv.parallel import MMDataParallel
|
||||
|
||||
class DummyDataset(Dataset):
|
||||
|
@ -203,6 +215,6 @@ def test_single_gpu_test_and_evaluate():
|
|||
# Run test
|
||||
outputs = task_processor.single_gpu_test(model, dataloader)
|
||||
assert isinstance(outputs, list)
|
||||
output_file = NamedTemporaryFile(suffix='.pkl').name
|
||||
output_file = str(tmp_path / 'tmp.pkl')
|
||||
task_processor.evaluate_outputs(
|
||||
model_cfg, outputs, dataset, 'bbox', out=output_file, format_only=True)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os.path as osp
|
||||
from tempfile import NamedTemporaryFile
|
||||
from typing import Sequence
|
||||
|
||||
import mmcv
|
||||
|
@ -8,18 +7,10 @@ import numpy as np
|
|||
import pytest
|
||||
import torch
|
||||
|
||||
import mmdeploy.backend.ncnn as ncnn_apis
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase
|
||||
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMDET)
|
||||
except ImportError:
|
||||
pytest.skip(f'{Codebase.MMDET} is not installed.', allow_module_level=True)
|
||||
|
||||
from mmdeploy.codebase.mmdet.deploy.object_detection_model import End2EndModel
|
||||
from mmdeploy.utils import Backend
|
||||
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
|
||||
|
||||
|
||||
def assert_det_results(results, module_name: str = 'model'):
|
||||
|
@ -43,35 +34,31 @@ def assert_forward_results(results, module_name: str = 'model'):
|
|||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
class TestEnd2EndModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class')
|
||||
def end2end_model(self):
|
||||
# force add backend wrapper regardless of plugins
|
||||
# make sure ONNXRuntimeDetector can use ORTWrapper inside itself
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
cls.outputs = {
|
||||
'dets': torch.rand(1, 10, 5),
|
||||
'labels': torch.rand(1, 10)
|
||||
}
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
deploy_cfg = mmcv.Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['dets', 'labels']
|
||||
}})
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
outputs = {
|
||||
'dets': torch.rand(1, 10, 5),
|
||||
'labels': torch.rand(1, 10)
|
||||
}
|
||||
wrapper.set(outputs=outputs)
|
||||
deploy_cfg = mmcv.Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['dets', 'labels']
|
||||
}})
|
||||
|
||||
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
|
||||
End2EndModel
|
||||
cls.end2end_model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu',
|
||||
['' for i in range(80)], deploy_cfg)
|
||||
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
|
||||
End2EndModel
|
||||
model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu',
|
||||
['' for i in range(80)], deploy_cfg)
|
||||
yield model
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
cls.wrapper.recover()
|
||||
|
||||
def test_forward(self):
|
||||
def test_forward(self, end2end_model):
|
||||
imgs = [torch.rand(1, 3, 64, 64)]
|
||||
img_metas = [[{
|
||||
'ori_shape': [64, 64, 3],
|
||||
|
@ -79,15 +66,15 @@ class TestEnd2EndModel:
|
|||
'scale_factor': [1, 1, 1, 1],
|
||||
'border': [0, 0, 0]
|
||||
}]]
|
||||
results = self.end2end_model.forward(imgs, img_metas)
|
||||
results = end2end_model.forward(imgs, img_metas)
|
||||
assert_forward_results(results, 'End2EndModel')
|
||||
|
||||
def test_show_result(self):
|
||||
def test_show_result(self, end2end_model, tmp_path):
|
||||
input_img = np.zeros([64, 64, 3])
|
||||
img_path = NamedTemporaryFile(suffix='.jpg').name
|
||||
img_path = str(tmp_path / 'tmp.jpg')
|
||||
|
||||
result = (torch.rand(1, 10, 5), torch.rand(1, 10))
|
||||
self.end2end_model.show_result(
|
||||
end2end_model.show_result(
|
||||
input_img, result, '', show=False, out_file=img_path)
|
||||
assert osp.exists(img_path)
|
||||
|
||||
|
@ -95,55 +82,52 @@ class TestEnd2EndModel:
|
|||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
class TestMaskEnd2EndModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class')
|
||||
def end2end_model(self):
|
||||
# force add backend wrapper regardless of plugins
|
||||
# make sure ONNXRuntimeDetector can use ORTWrapper inside itself
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
num_classes = 80
|
||||
num_dets = 10
|
||||
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
cls.outputs = {
|
||||
'dets': torch.rand(1, num_dets, 5),
|
||||
'labels': torch.randint(num_classes, (1, num_dets)),
|
||||
'masks': torch.rand(1, num_dets, 28, 28)
|
||||
}
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
deploy_cfg = mmcv.Config({
|
||||
'onnx_config': {
|
||||
'output_names': ['dets', 'labels', 'masks']
|
||||
},
|
||||
'codebase_config': {
|
||||
'post_processing': {
|
||||
'export_postprocess_mask': False
|
||||
}
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
outputs = {
|
||||
'dets': torch.rand(1, num_dets, 5),
|
||||
'labels': torch.randint(num_classes, (1, num_dets)),
|
||||
'masks': torch.rand(1, num_dets, 28, 28)
|
||||
}
|
||||
})
|
||||
wrapper.set(outputs=outputs)
|
||||
deploy_cfg = mmcv.Config({
|
||||
'onnx_config': {
|
||||
'output_names': ['dets', 'labels', 'masks']
|
||||
},
|
||||
'codebase_config': {
|
||||
'post_processing': {
|
||||
'export_postprocess_mask': False
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
|
||||
End2EndModel
|
||||
cls.end2end_model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu',
|
||||
['' for i in range(80)], deploy_cfg)
|
||||
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
|
||||
End2EndModel
|
||||
model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu',
|
||||
['' for i in range(80)], deploy_cfg)
|
||||
yield model
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
cls.wrapper.recover()
|
||||
|
||||
def test_forward(self):
|
||||
def test_forward(self, end2end_model):
|
||||
imgs = [torch.rand(1, 3, 64, 64)]
|
||||
img_metas = [[{
|
||||
'ori_shape': [64, 64, 3],
|
||||
'img_shape': [64, 64, 3],
|
||||
'scale_factor': [1, 1, 1, 1],
|
||||
}]]
|
||||
results = self.end2end_model.forward(imgs, img_metas)
|
||||
results = end2end_model.forward(imgs, img_metas)
|
||||
assert_forward_results(results, 'mask End2EndModel')
|
||||
|
||||
|
||||
def get_test_cfg_and_post_processing():
|
||||
@pytest.fixture(scope='module')
|
||||
def cfg_and_post_processing():
|
||||
test_cfg = {
|
||||
'nms_pre': 100,
|
||||
'min_bbox_size': 0,
|
||||
|
@ -168,61 +152,57 @@ def get_test_cfg_and_post_processing():
|
|||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
class TestPartitionSingleStageModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class')
|
||||
def model(self, cfg_and_post_processing):
|
||||
# force add backend wrapper regardless of plugins
|
||||
# make sure ONNXRuntimeDetector can use ORTWrapper inside itself
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
cls.outputs = {
|
||||
'scores': torch.rand(1, 10, 80),
|
||||
'boxes': torch.rand(1, 10, 4)
|
||||
}
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
outputs = {
|
||||
'scores': torch.rand(1, 10, 80),
|
||||
'boxes': torch.rand(1, 10, 4)
|
||||
}
|
||||
wrapper.set(outputs=outputs)
|
||||
|
||||
test_cfg, post_processing = get_test_cfg_and_post_processing()
|
||||
model_cfg = mmcv.Config(dict(model=dict(test_cfg=test_cfg)))
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(codebase_config=dict(post_processing=post_processing)))
|
||||
test_cfg, post_processing = cfg_and_post_processing
|
||||
model_cfg = mmcv.Config(dict(model=dict(test_cfg=test_cfg)))
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(codebase_config=dict(post_processing=post_processing)))
|
||||
|
||||
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
|
||||
PartitionSingleStageModel
|
||||
cls.model = PartitionSingleStageModel(
|
||||
Backend.ONNXRUNTIME, [''],
|
||||
'cpu', ['' for i in range(80)],
|
||||
model_cfg=model_cfg,
|
||||
deploy_cfg=deploy_cfg)
|
||||
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
|
||||
PartitionSingleStageModel
|
||||
model_ = PartitionSingleStageModel(
|
||||
Backend.ONNXRUNTIME, [''],
|
||||
'cpu', ['' for i in range(80)],
|
||||
model_cfg=model_cfg,
|
||||
deploy_cfg=deploy_cfg)
|
||||
yield model_
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
cls.wrapper.recover()
|
||||
|
||||
def test_forward_test(self):
|
||||
def test_forward_test(self, model):
|
||||
imgs = [torch.rand(1, 3, 64, 64)]
|
||||
img_metas = [[{
|
||||
'ori_shape': [64, 64, 3],
|
||||
'img_shape': [64, 64, 3],
|
||||
'scale_factor': [1, 1, 1, 1],
|
||||
}]]
|
||||
results = self.model.forward_test(imgs, img_metas)
|
||||
results = model.forward_test(imgs, img_metas)
|
||||
assert_det_results(results, 'PartitionSingleStageModel')
|
||||
|
||||
def test_postprocess(self):
|
||||
def test_postprocess(self, model):
|
||||
scores = torch.rand(1, 120, 80)
|
||||
bboxes = torch.rand(1, 120, 4)
|
||||
|
||||
results = self.model.partition0_postprocess(
|
||||
scores=scores, bboxes=bboxes)
|
||||
results = model.partition0_postprocess(scores=scores, bboxes=bboxes)
|
||||
assert_det_results(
|
||||
results, '.partition0_postprocess of'
|
||||
'PartitionSingleStageModel')
|
||||
|
||||
|
||||
def prepare_model_deploy_cfgs():
|
||||
test_cfg, post_processing = get_test_cfg_and_post_processing()
|
||||
@pytest.fixture(scope='module')
|
||||
def model_deploy_cfgs(cfg_and_post_processing):
|
||||
test_cfg, post_processing = cfg_and_post_processing
|
||||
bbox_roi_extractor = {
|
||||
'type': 'SingleRoIExtractor',
|
||||
'roi_layer': {
|
||||
|
@ -282,58 +262,56 @@ class DummyWrapper(torch.nn.Module):
|
|||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
class TestPartitionTwoStageModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class')
|
||||
def model(self, model_deploy_cfgs):
|
||||
# force add backend wrapper regardless of plugins
|
||||
# make sure ONNXRuntimeDetector can use ORTWrapper inside itself
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
outputs = [
|
||||
np.random.rand(1, 12, 80).astype(np.float32),
|
||||
np.random.rand(1, 12, 4).astype(np.float32),
|
||||
] * 2
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
outputs = [
|
||||
np.random.rand(1, 12, 80).astype(np.float32),
|
||||
np.random.rand(1, 12, 4).astype(np.float32),
|
||||
] * 2
|
||||
|
||||
model_cfg, deploy_cfg = prepare_model_deploy_cfgs()
|
||||
model_cfg, deploy_cfg = model_deploy_cfgs
|
||||
|
||||
cls.wrapper.set(
|
||||
outputs=outputs, model_cfg=model_cfg, deploy_cfg=deploy_cfg)
|
||||
wrapper.set(
|
||||
outputs=outputs, model_cfg=model_cfg, deploy_cfg=deploy_cfg)
|
||||
|
||||
# replace original function in PartitionTwoStageModel
|
||||
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
|
||||
PartitionTwoStageModel
|
||||
# replace original function in PartitionTwoStageModel
|
||||
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
|
||||
PartitionTwoStageModel
|
||||
|
||||
cls.model = PartitionTwoStageModel(
|
||||
Backend.ONNXRUNTIME, ['', ''],
|
||||
'cpu', ['' for i in range(80)],
|
||||
model_cfg=model_cfg,
|
||||
deploy_cfg=deploy_cfg)
|
||||
feats = [torch.randn(1, 8, 14, 14) for i in range(5)]
|
||||
scores = torch.rand(1, 10, 1)
|
||||
bboxes = torch.rand(1, 10, 4)
|
||||
bboxes[..., 2:4] = 2 * bboxes[..., :2]
|
||||
model_ = PartitionTwoStageModel(
|
||||
Backend.ONNXRUNTIME, ['', ''],
|
||||
'cpu', ['' for i in range(80)],
|
||||
model_cfg=model_cfg,
|
||||
deploy_cfg=deploy_cfg)
|
||||
feats = [torch.randn(1, 8, 14, 14) for i in range(5)]
|
||||
scores = torch.rand(1, 10, 1)
|
||||
bboxes = torch.rand(1, 10, 4)
|
||||
bboxes[..., 2:4] = 2 * bboxes[..., :2]
|
||||
|
||||
cls_score = torch.rand(10, 81)
|
||||
bbox_pred = torch.rand(10, 320)
|
||||
cls_score = torch.rand(10, 81)
|
||||
bbox_pred = torch.rand(10, 320)
|
||||
|
||||
cls.model.device = 'cpu'
|
||||
cls.model.CLASSES = ['' for i in range(80)]
|
||||
cls.model.first_wrapper = DummyWrapper([*feats, scores, bboxes])
|
||||
cls.model.second_wrapper = DummyWrapper([cls_score, bbox_pred])
|
||||
model_.device = 'cpu'
|
||||
model_.CLASSES = ['' for i in range(80)]
|
||||
model_.first_wrapper = DummyWrapper([*feats, scores, bboxes])
|
||||
model_.second_wrapper = DummyWrapper([cls_score, bbox_pred])
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
cls.wrapper.recover()
|
||||
yield model_
|
||||
|
||||
def test_postprocess(self):
|
||||
def test_postprocess(self, model):
|
||||
feats = [torch.randn(1, 8, 14, 14) for i in range(5)]
|
||||
scores = torch.rand(1, 50, 1)
|
||||
bboxes = torch.rand(1, 50, 4)
|
||||
bboxes[..., 2:4] = 2 * bboxes[..., :2]
|
||||
|
||||
results = self.model.partition0_postprocess(
|
||||
results = model.partition0_postprocess(
|
||||
x=feats, scores=scores, bboxes=bboxes)
|
||||
assert results is not None, 'failed to get output using '\
|
||||
'partition0_postprocess of PartitionTwoStageDetector'
|
||||
|
@ -348,7 +326,7 @@ class TestPartitionTwoStageModel:
|
|||
'img_shape': [32, 32, 3],
|
||||
'scale_factor': [1, 1, 1, 1],
|
||||
}]]
|
||||
results = self.model.partition1_postprocess(
|
||||
results = model.partition1_postprocess(
|
||||
rois=rois,
|
||||
cls_score=cls_score,
|
||||
bbox_pred=bbox_pred,
|
||||
|
@ -358,7 +336,7 @@ class TestPartitionTwoStageModel:
|
|||
assert isinstance(results, tuple)
|
||||
assert len(results) == 2
|
||||
|
||||
def test_forward(self):
|
||||
def test_forward(self, model):
|
||||
|
||||
class DummyPTSDetector(torch.nn.Module):
|
||||
"""A dummy wrapper for unit tests."""
|
||||
|
@ -373,12 +351,12 @@ class TestPartitionTwoStageModel:
|
|||
return self.outputs1
|
||||
|
||||
import types
|
||||
self.model.partition0_postprocess = types.MethodType(
|
||||
DummyPTSDetector.partition0_postprocess, self.model)
|
||||
self.model.partition1_postprocess = types.MethodType(
|
||||
DummyPTSDetector.partition1_postprocess, self.model)
|
||||
self.model.outputs0 = [torch.rand(2, 3)] * 2
|
||||
self.model.outputs1 = [torch.rand(1, 9, 5), torch.rand(1, 9)]
|
||||
model.partition0_postprocess = types.MethodType(
|
||||
DummyPTSDetector.partition0_postprocess, model)
|
||||
model.partition1_postprocess = types.MethodType(
|
||||
DummyPTSDetector.partition1_postprocess, model)
|
||||
model.outputs0 = [torch.rand(2, 3)] * 2
|
||||
model.outputs1 = [torch.rand(1, 9, 5), torch.rand(1, 9)]
|
||||
|
||||
imgs = [torch.rand(1, 3, 32, 32)]
|
||||
img_metas = [[{
|
||||
|
@ -386,7 +364,7 @@ class TestPartitionTwoStageModel:
|
|||
'img_shape': [32, 32, 3],
|
||||
'scale_factor': [1, 1, 1, 1],
|
||||
}]]
|
||||
results = self.model.forward(imgs, img_metas)
|
||||
results = model.forward(imgs, img_metas)
|
||||
assert_forward_results(results, 'PartitionTwoStageModel')
|
||||
|
||||
|
||||
|
@ -447,8 +425,8 @@ class TestGetClassesFromCfg:
|
|||
|
||||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
@pytest.mark.parametrize('partition_type', [None, 'end2end'])
|
||||
def test_build_object_detection_model(partition_type):
|
||||
_, post_processing = get_test_cfg_and_post_processing()
|
||||
def test_build_object_detection_model(partition_type, cfg_and_post_processing):
|
||||
_, post_processing = cfg_and_post_processing
|
||||
model_cfg = mmcv.Config(dict(data=dict(test={'type': 'CocoDataset'})))
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
|
@ -463,7 +441,6 @@ def test_build_object_detection_model(partition_type):
|
|||
partition_cfg=[dict(output_names=[])])
|
||||
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
|
@ -478,120 +455,114 @@ def test_build_object_detection_model(partition_type):
|
|||
@backend_checker(Backend.NCNN)
|
||||
class TestNCNNEnd2EndModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class', params=[10, 0])
|
||||
def end2end_model(self, request):
|
||||
# force add backend wrapper regardless of plugins
|
||||
from mmdeploy.backend.ncnn import NCNNWrapper
|
||||
ncnn_apis.__dict__.update({'NCNNWrapper': NCNNWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
cls.wrapper = SwitchBackendWrapper(NCNNWrapper)
|
||||
cls.outputs = {
|
||||
'output': torch.rand(1, 10, 6),
|
||||
}
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
deploy_cfg = mmcv.Config({'onnx_config': {'output_names': ['output']}})
|
||||
model_cfg = mmcv.Config({})
|
||||
with SwitchBackendWrapper(NCNNWrapper) as wrapper:
|
||||
param = request.param
|
||||
outputs = {
|
||||
'output': torch.rand(1, param, 6),
|
||||
}
|
||||
wrapper.set(outputs=outputs)
|
||||
deploy_cfg = mmcv.Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['output']
|
||||
}})
|
||||
model_cfg = mmcv.Config({})
|
||||
|
||||
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
|
||||
NCNNEnd2EndModel
|
||||
cls.ncnn_end2end_model = NCNNEnd2EndModel(Backend.NCNN, ['', ''],
|
||||
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
|
||||
NCNNEnd2EndModel
|
||||
ncnn_end2end_model = NCNNEnd2EndModel(Backend.NCNN, ['', ''],
|
||||
'cpu',
|
||||
['' for i in range(80)],
|
||||
model_cfg, deploy_cfg)
|
||||
yield ncnn_end2end_model
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
cls.wrapper.recover()
|
||||
|
||||
@pytest.mark.parametrize('num_det', [10, 0])
|
||||
def test_forward_test(self, num_det):
|
||||
self.outputs = {
|
||||
'output': torch.rand(1, num_det, 6),
|
||||
}
|
||||
def test_forward_test(self, end2end_model):
|
||||
imgs = torch.rand(1, 3, 64, 64)
|
||||
results = self.ncnn_end2end_model.forward_test(imgs)
|
||||
results = end2end_model.forward_test(imgs)
|
||||
assert_det_results(results, 'NCNNEnd2EndModel')
|
||||
|
||||
|
||||
@backend_checker(Backend.RKNN)
|
||||
class TestRKNNModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class')
|
||||
def end2end_model(self):
|
||||
# force add backend wrapper regardless of plugins
|
||||
import mmdeploy.backend.rknn as rknn_apis
|
||||
from mmdeploy.backend.rknn import RKNNWrapper
|
||||
rknn_apis.__dict__.update({'RKNNWrapper': RKNNWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
cls.wrapper = SwitchBackendWrapper(RKNNWrapper)
|
||||
cls.outputs = [
|
||||
torch.rand(1, 255, 5, 5),
|
||||
torch.rand(1, 255, 10, 10),
|
||||
torch.rand(1, 255, 20, 20)
|
||||
]
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
deploy_cfg = mmcv.Config({
|
||||
'onnx_config': {
|
||||
'output_names': ['output']
|
||||
},
|
||||
'backend_config': {
|
||||
'common_config': {}
|
||||
}
|
||||
})
|
||||
model_cfg = mmcv.Config(
|
||||
dict(
|
||||
model=dict(
|
||||
bbox_head=dict(
|
||||
type='YOLOV3Head',
|
||||
num_classes=80,
|
||||
in_channels=[512, 256, 128],
|
||||
out_channels=[1024, 512, 256],
|
||||
anchor_generator=dict(
|
||||
type='YOLOAnchorGenerator',
|
||||
base_sizes=[[(116, 90), (156, 198), (
|
||||
373, 326)], [(30, 61), (62, 45), (
|
||||
59, 119)], [(10, 13), (16, 30), (33, 23)]],
|
||||
strides=[32, 16, 8]),
|
||||
bbox_coder=dict(type='YOLOBBoxCoder'),
|
||||
featmap_strides=[32, 16, 8],
|
||||
loss_cls=dict(
|
||||
type='CrossEntropyLoss',
|
||||
use_sigmoid=True,
|
||||
loss_weight=1.0,
|
||||
reduction='sum'),
|
||||
loss_conf=dict(
|
||||
type='CrossEntropyLoss',
|
||||
use_sigmoid=True,
|
||||
loss_weight=1.0,
|
||||
reduction='sum'),
|
||||
loss_xy=dict(
|
||||
type='CrossEntropyLoss',
|
||||
use_sigmoid=True,
|
||||
loss_weight=2.0,
|
||||
reduction='sum'),
|
||||
loss_wh=dict(
|
||||
type='MSELoss', loss_weight=2.0, reduction='sum')),
|
||||
test_cfg=dict(
|
||||
nms_pre=1000,
|
||||
min_bbox_size=0,
|
||||
score_thr=0.05,
|
||||
conf_thr=0.005,
|
||||
nms=dict(type='nms', iou_threshold=0.45),
|
||||
max_per_img=100))))
|
||||
with SwitchBackendWrapper(RKNNWrapper) as wrapper:
|
||||
outputs = [
|
||||
torch.rand(1, 255, 5, 5),
|
||||
torch.rand(1, 255, 10, 10),
|
||||
torch.rand(1, 255, 20, 20)
|
||||
]
|
||||
wrapper.set(outputs=outputs)
|
||||
deploy_cfg = mmcv.Config({
|
||||
'onnx_config': {
|
||||
'output_names': ['output']
|
||||
},
|
||||
'backend_config': {
|
||||
'common_config': {}
|
||||
}
|
||||
})
|
||||
model_cfg = mmcv.Config(
|
||||
dict(
|
||||
model=dict(
|
||||
bbox_head=dict(
|
||||
type='YOLOV3Head',
|
||||
num_classes=80,
|
||||
in_channels=[512, 256, 128],
|
||||
out_channels=[1024, 512, 256],
|
||||
anchor_generator=dict(
|
||||
type='YOLOAnchorGenerator',
|
||||
base_sizes=[[(116, 90), (156, 198), (
|
||||
373, 326)], [(30, 61), (62, 45), (
|
||||
59,
|
||||
119)], [(10, 13), (16, 30), (33, 23)]],
|
||||
strides=[32, 16, 8]),
|
||||
bbox_coder=dict(type='YOLOBBoxCoder'),
|
||||
featmap_strides=[32, 16, 8],
|
||||
loss_cls=dict(
|
||||
type='CrossEntropyLoss',
|
||||
use_sigmoid=True,
|
||||
loss_weight=1.0,
|
||||
reduction='sum'),
|
||||
loss_conf=dict(
|
||||
type='CrossEntropyLoss',
|
||||
use_sigmoid=True,
|
||||
loss_weight=1.0,
|
||||
reduction='sum'),
|
||||
loss_xy=dict(
|
||||
type='CrossEntropyLoss',
|
||||
use_sigmoid=True,
|
||||
loss_weight=2.0,
|
||||
reduction='sum'),
|
||||
loss_wh=dict(
|
||||
type='MSELoss',
|
||||
loss_weight=2.0,
|
||||
reduction='sum')),
|
||||
test_cfg=dict(
|
||||
nms_pre=1000,
|
||||
min_bbox_size=0,
|
||||
score_thr=0.05,
|
||||
conf_thr=0.005,
|
||||
nms=dict(type='nms', iou_threshold=0.45),
|
||||
max_per_img=100))))
|
||||
|
||||
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
|
||||
RKNNModel
|
||||
cls.rknn_model = RKNNModel(Backend.RKNN, ['', ''], 'cpu',
|
||||
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
|
||||
RKNNModel
|
||||
rknn_model = RKNNModel(Backend.RKNN, ['', ''], 'cpu',
|
||||
['' for i in range(80)], model_cfg,
|
||||
deploy_cfg)
|
||||
return rknn_model
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
cls.wrapper.recover()
|
||||
|
||||
def test_forward_test(self):
|
||||
def test_forward_test(self, end2end_model):
|
||||
imgs = torch.rand(1, 3, 64, 64)
|
||||
results = self.rknn_model.forward_test(imgs)
|
||||
results = end2end_model.forward_test(imgs)
|
||||
assert_det_results(results, 'RKNNWrapper')
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import pytest
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase
|
||||
|
||||
|
||||
def pytest_ignore_collect(*args, **kwargs):
|
||||
import importlib
|
||||
return importlib.util.find_spec('mmdet3d') is None
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope='package')
|
||||
def import_all_modules():
|
||||
codebase = Codebase.MMDET3D
|
||||
try:
|
||||
import_codebase(codebase)
|
||||
except ImportError:
|
||||
pytest.skip(f'{codebase} is not installed.', allow_module_level=True)
|
|
@ -1,25 +1,28 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import copy
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase, Task, load_config
|
||||
from mmdeploy.utils.test import WrapModel, check_backend, get_rewrite_outputs
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMDET3D)
|
||||
except ImportError:
|
||||
pytest.skip(
|
||||
f'{Codebase.MMDET3D} is not installed.', allow_module_level=True)
|
||||
model_cfg = load_config(
|
||||
'tests/test_codebase/test_mmdet3d/data/model_cfg.py')[0]
|
||||
monodet_model_cfg = load_config(
|
||||
'tests/test_codebase/test_mmdet3d/data/monodet_model_cfg.py')[0]
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def model_cfg():
|
||||
return load_config('tests/test_codebase/test_mmdet3d/data/model_cfg.py')[0]
|
||||
|
||||
|
||||
def get_pillar_encoder():
|
||||
@pytest.fixture(scope='module')
|
||||
def monodet_model_cfg():
|
||||
return load_config(
|
||||
'tests/test_codebase/test_mmdet3d/data/monodet_model_cfg.py')[0]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def pillar_encoder():
|
||||
from mmdet3d.models.voxel_encoders import PillarFeatureNet
|
||||
model = PillarFeatureNet(
|
||||
in_channels=4,
|
||||
|
@ -32,21 +35,23 @@ def get_pillar_encoder():
|
|||
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
|
||||
mode='max')
|
||||
model.requires_grad_(False)
|
||||
model.cpu().eval()
|
||||
return model
|
||||
|
||||
|
||||
def get_pointpillars_scatter():
|
||||
@pytest.fixture
|
||||
def pointpillars_scatter():
|
||||
from mmdet3d.models.middle_encoders import PointPillarsScatter
|
||||
model = PointPillarsScatter(in_channels=64, output_shape=(16, 16))
|
||||
model.requires_grad_(False)
|
||||
model.cpu().eval()
|
||||
return model
|
||||
|
||||
|
||||
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
|
||||
def test_pillar_encoder(backend_type: Backend):
|
||||
def test_pillar_encoder(backend_type: Backend, pillar_encoder):
|
||||
check_backend(backend_type, True)
|
||||
model = get_pillar_encoder()
|
||||
model.cpu().eval()
|
||||
model = pillar_encoder
|
||||
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
|
@ -81,10 +86,9 @@ def test_pillar_encoder(backend_type: Backend):
|
|||
|
||||
|
||||
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
|
||||
def test_pointpillars_scatter(backend_type: Backend):
|
||||
def test_pointpillars_scatter(backend_type: Backend, pointpillars_scatter):
|
||||
check_backend(backend_type, True)
|
||||
model = get_pointpillars_scatter()
|
||||
model.cpu().eval()
|
||||
model = pointpillars_scatter
|
||||
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
|
@ -113,30 +117,22 @@ def test_pointpillars_scatter(backend_type: Backend):
|
|||
model_output.shape, rewrite_output.shape, rtol=1e-03, atol=1e-03)
|
||||
|
||||
|
||||
def get_centerpoint():
|
||||
@pytest.fixture
|
||||
def centerpoint(model_cfg):
|
||||
from mmdet3d.models.detectors.centerpoint import CenterPoint
|
||||
|
||||
model = CenterPoint(**model_cfg.centerpoint_model)
|
||||
model.requires_grad_(False)
|
||||
model.cpu().eval()
|
||||
return model
|
||||
|
||||
|
||||
def get_centerpoint_head():
|
||||
from mmdet3d.models import builder
|
||||
model_cfg.centerpoint_model.pts_bbox_head.test_cfg = model_cfg.\
|
||||
centerpoint_model.test_cfg
|
||||
head = builder.build_head(model_cfg.centerpoint_model.pts_bbox_head)
|
||||
head.requires_grad_(False)
|
||||
return head
|
||||
|
||||
|
||||
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
|
||||
def test_centerpoint(backend_type: Backend):
|
||||
def test_centerpoint(backend_type: Backend, model_cfg, centerpoint):
|
||||
from mmdeploy.codebase.mmdet3d.deploy.voxel_detection import VoxelDetection
|
||||
from mmdeploy.core import RewriterContext
|
||||
check_backend(backend_type, True)
|
||||
model = get_centerpoint()
|
||||
model.cpu().eval()
|
||||
model = centerpoint
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type=backend_type.value),
|
||||
|
@ -159,21 +155,22 @@ def test_centerpoint(backend_type: Backend):
|
|||
assert rewrite_outputs is not None
|
||||
|
||||
|
||||
def get_pointpillars_nus():
|
||||
@pytest.fixture
|
||||
def pointpillars_nus(model_cfg):
|
||||
from mmdet3d.models.detectors import MVXFasterRCNN
|
||||
|
||||
model = MVXFasterRCNN(**model_cfg.pointpillars_nus_model)
|
||||
model.requires_grad_(False)
|
||||
model.cpu().eval()
|
||||
return model
|
||||
|
||||
|
||||
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
|
||||
def test_pointpillars_nus(backend_type: Backend):
|
||||
def test_pointpillars_nus(backend_type: Backend, model_cfg, pointpillars_nus):
|
||||
from mmdeploy.codebase.mmdet3d.deploy.voxel_detection import VoxelDetection
|
||||
from mmdeploy.core import RewriterContext
|
||||
check_backend(backend_type, True)
|
||||
model = get_pointpillars_nus()
|
||||
model.cpu().eval()
|
||||
model = pointpillars_nus
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type=backend_type.value),
|
||||
|
@ -196,22 +193,24 @@ def test_pointpillars_nus(backend_type: Backend):
|
|||
assert outputs is not None
|
||||
|
||||
|
||||
def get_fcos3d():
|
||||
@pytest.fixture
|
||||
def fcos3d(monodet_model_cfg):
|
||||
from mmdet3d.models.detectors import FCOSMono3D
|
||||
monodet_model_cfg.model.pop('type')
|
||||
model = FCOSMono3D(**monodet_model_cfg.model)
|
||||
cfg = copy.deepcopy(monodet_model_cfg)
|
||||
cfg.model.pop('type')
|
||||
model = FCOSMono3D(**cfg.model)
|
||||
model.requires_grad_(False)
|
||||
model.cpu().eval()
|
||||
return model
|
||||
|
||||
|
||||
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
|
||||
def test_fcos3d(backend_type: Backend):
|
||||
def test_fcos3d(backend_type: Backend, monodet_model_cfg, fcos3d):
|
||||
from mmdeploy.codebase.mmdet3d.deploy.monocular_detection import \
|
||||
MonocularDetection
|
||||
from mmdeploy.core import RewriterContext
|
||||
check_backend(backend_type, True)
|
||||
model = get_fcos3d()
|
||||
model.cpu().eval()
|
||||
model = fcos3d
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type=backend_type.value),
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os
|
||||
from tempfile import NamedTemporaryFile, TemporaryDirectory
|
||||
|
||||
import mmcv
|
||||
import pytest
|
||||
|
@ -8,70 +7,85 @@ import torch
|
|||
from torch.utils.data import DataLoader
|
||||
from torch.utils.data.dataset import Dataset
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.apis import build_task_processor
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase, load_config
|
||||
from mmdeploy.utils import load_config
|
||||
from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMDET3D)
|
||||
except ImportError:
|
||||
pytest.skip(
|
||||
f'{Codebase.MMDET3D} is not installed.', allow_module_level=True)
|
||||
|
||||
model_cfg_path = 'tests/test_codebase/test_mmdet3d/data/monodet_model_cfg.py'
|
||||
img_path = 'tests/test_codebase/test_mmdet3d/data/nuscenes/' \
|
||||
@pytest.fixture(scope='module')
|
||||
def model_cfg_path():
|
||||
return 'tests/test_codebase/test_mmdet3d/data/monodet_model_cfg.py'
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def img_path():
|
||||
return 'tests/test_codebase/test_mmdet3d/data/nuscenes/' \
|
||||
'n015-2018-07-24-11-22-45+0800__CAM_BACK__1532402927637525.jpg'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(
|
||||
type='mmdet3d',
|
||||
task='MonocularDetection',
|
||||
ann_file='tests/test_codebase/test_mmdet3d/data/nuscenes/n015-2018'
|
||||
'-07-24-11-22-45+0800__CAM_BACK__1532402927637525_mono3d.coco.json'
|
||||
),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['img', 'cam2img', 'cam2img_inverse'],
|
||||
output_names=['bboxes', 'scores', 'labels', 'dir_scores',
|
||||
'attrs'])))
|
||||
onnx_file = NamedTemporaryFile(suffix='.onnx').name
|
||||
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def model_cfg(model_cfg_path):
|
||||
return load_config(model_cfg_path)[0]
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def deploy_cfg():
|
||||
return mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(
|
||||
type='mmdet3d',
|
||||
task='MonocularDetection',
|
||||
ann_file='tests/test_codebase/test_mmdet3d/data' +
|
||||
'/nuscenes/n015-2018' +
|
||||
'-07-24-11-22-45+0800__CAM_BACK__1532402927637525_mono3d' +
|
||||
'.coco.json'),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['img', 'cam2img', 'cam2img_inverse'],
|
||||
output_names=[
|
||||
'bboxes', 'scores', 'labels', 'dir_scores', 'attrs'
|
||||
])))
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def task_processor(model_cfg, deploy_cfg):
|
||||
return build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
|
||||
|
||||
num_classes = 10
|
||||
num_attr = 5
|
||||
num_dets = 20
|
||||
|
||||
|
||||
def test_init_pytorch_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def torch_model(task_processor):
|
||||
return task_processor.init_pytorch_model(None)
|
||||
|
||||
|
||||
def test_init_pytorch_model(torch_model):
|
||||
from mmdet3d.models import SingleStageMono3DDetector
|
||||
model = task_processor.init_pytorch_model(None)
|
||||
assert isinstance(model, SingleStageMono3DDetector)
|
||||
assert isinstance(torch_model, SingleStageMono3DDetector)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def backend_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def backend_model(task_processor):
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
wrapper.set(
|
||||
outputs={
|
||||
'bboxes': torch.rand(1, num_dets, 9),
|
||||
'scores': torch.rand(1, num_dets),
|
||||
'labels': torch.randint(num_classes, (1, num_dets)),
|
||||
'dir_scores': torch.randint(2, (1, num_dets)),
|
||||
'attrs': torch.randint(num_attr, (1, num_dets))
|
||||
})
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
wrapper.set(
|
||||
outputs={
|
||||
'bboxes': torch.rand(1, num_dets, 9),
|
||||
'scores': torch.rand(1, num_dets),
|
||||
'labels': torch.randint(num_classes, (1, num_dets)),
|
||||
'dir_scores': torch.randint(2, (1, num_dets)),
|
||||
'attrs': torch.randint(num_attr, (1, num_dets))
|
||||
})
|
||||
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
wrapper.recover()
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
|
||||
def test_init_backend_model(backend_model):
|
||||
|
@ -80,23 +94,28 @@ def test_init_backend_model(backend_model):
|
|||
assert isinstance(backend_model, MonocularDetectionModel)
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def model_inputs(task_processor, img_path):
|
||||
return task_processor.create_input(img_path)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('device', ['cpu', 'cuda:0'])
|
||||
def test_create_input(device):
|
||||
def test_create_input(device, task_processor, model_inputs):
|
||||
if device == 'cuda:0' and not torch.cuda.is_available():
|
||||
pytest.skip('cuda is not available')
|
||||
original_device = task_processor.device
|
||||
task_processor.device = device
|
||||
inputs = task_processor.create_input(img_path)
|
||||
inputs = model_inputs
|
||||
assert len(inputs) == 2
|
||||
task_processor.device = original_device
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
reason='Only support GPU test', condition=not torch.cuda.is_available())
|
||||
def test_run_inference(backend_model):
|
||||
def test_run_inference(backend_model, task_processor, torch_model,
|
||||
model_inputs):
|
||||
task_processor.device = 'cuda:0'
|
||||
torch_model = task_processor.init_pytorch_model(None)
|
||||
input_dict, _ = task_processor.create_input(img_path)
|
||||
input_dict, _ = model_inputs
|
||||
torch_results = task_processor.run_inference(torch_model, input_dict)
|
||||
backend_results = task_processor.run_inference(backend_model, input_dict)
|
||||
assert torch_results is not None
|
||||
|
@ -107,20 +126,19 @@ def test_run_inference(backend_model):
|
|||
|
||||
@pytest.mark.skipif(
|
||||
reason='Only support GPU test', condition=not torch.cuda.is_available())
|
||||
def test_visualize():
|
||||
def test_visualize(task_processor, torch_model, model_inputs, img_path,
|
||||
tmp_path):
|
||||
task_processor.device = 'cuda:0'
|
||||
input_dict, _ = task_processor.create_input(img_path)
|
||||
torch_model = task_processor.init_pytorch_model(None)
|
||||
input_dict, _ = model_inputs
|
||||
results = task_processor.run_inference(torch_model, input_dict)
|
||||
with TemporaryDirectory() as dir:
|
||||
filename = dir + 'tmp.bin'
|
||||
task_processor.visualize(torch_model, img_path, results[0], filename,
|
||||
'test', False)
|
||||
assert os.path.exists(filename)
|
||||
filename = str(tmp_path / 'tmp.bin')
|
||||
task_processor.visualize(torch_model, img_path, results[0], filename,
|
||||
'test', False)
|
||||
assert os.path.exists(filename)
|
||||
task_processor.device = 'cpu'
|
||||
|
||||
|
||||
def test_build_dataset_and_dataloader():
|
||||
def test_build_dataset_and_dataloader(task_processor, model_cfg):
|
||||
dataset = task_processor.build_dataset(
|
||||
dataset_cfg=model_cfg, dataset_type='test')
|
||||
assert isinstance(dataset, Dataset), 'Failed to build dataset'
|
||||
|
@ -130,7 +148,7 @@ def test_build_dataset_and_dataloader():
|
|||
|
||||
@pytest.mark.skipif(
|
||||
reason='Only support GPU test', condition=not torch.cuda.is_available())
|
||||
def test_single_gpu_test_and_evaluate():
|
||||
def test_single_gpu_test_and_evaluate(task_processor, model_cfg, tmp_path):
|
||||
from mmcv.parallel import MMDataParallel
|
||||
task_processor.device = 'cuda:0'
|
||||
|
||||
|
@ -158,7 +176,7 @@ def test_single_gpu_test_and_evaluate():
|
|||
# Run test
|
||||
outputs = task_processor.single_gpu_test(model, dataloader)
|
||||
assert isinstance(outputs, list)
|
||||
output_file = NamedTemporaryFile(suffix='.pkl').name
|
||||
output_file = str(tmp_path / 'tmp.pkl')
|
||||
task_processor.evaluate_outputs(
|
||||
model_cfg, outputs, dataset, 'bbox', out=output_file, format_only=True)
|
||||
task_processor.device = 'cpu'
|
||||
|
|
|
@ -5,71 +5,59 @@ import mmcv
|
|||
import pytest
|
||||
import torch
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase
|
||||
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMDET3D)
|
||||
except ImportError:
|
||||
pytest.skip(
|
||||
f'{Codebase.MMDET3D} is not installed.', allow_module_level=True)
|
||||
from mmdeploy.codebase.mmdet3d.deploy.monocular_detection_model import (
|
||||
MonocularDetectionModel, build_monocular_detection_model)
|
||||
from mmdeploy.utils import Backend, Codebase
|
||||
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
|
||||
|
||||
|
||||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
class TestMonocularDetectionModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class')
|
||||
def end2end_model(self):
|
||||
# force add backend wrapper regardless of plugins
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
num_classes = 10
|
||||
num_attr = 5
|
||||
num_dets = 20
|
||||
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
cls.outputs = {
|
||||
'bboxes': torch.rand(1, num_dets, 9),
|
||||
'scores': torch.rand(1, num_dets),
|
||||
'labels': torch.randint(num_classes, (1, num_dets)),
|
||||
'dir_scores': torch.randint(2, (1, num_dets)),
|
||||
'attrs': torch.randint(num_attr, (1, num_dets))
|
||||
}
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
deploy_cfg = mmcv.Config({
|
||||
'onnx_config': {
|
||||
'input_names': ['img', 'cam2img', 'cam2img_inverse'],
|
||||
'output_names':
|
||||
['bboxes', 'scores', 'labels', 'dir_scores', 'attrs'],
|
||||
'opset_version':
|
||||
11
|
||||
},
|
||||
'backend_config': {
|
||||
'type': 'tensorrt'
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
outputs = {
|
||||
'bboxes': torch.rand(1, num_dets, 9),
|
||||
'scores': torch.rand(1, num_dets),
|
||||
'labels': torch.randint(num_classes, (1, num_dets)),
|
||||
'dir_scores': torch.randint(2, (1, num_dets)),
|
||||
'attrs': torch.randint(num_attr, (1, num_dets))
|
||||
}
|
||||
})
|
||||
wrapper.set(outputs=outputs)
|
||||
deploy_cfg = mmcv.Config({
|
||||
'onnx_config': {
|
||||
'input_names': ['img', 'cam2img', 'cam2img_inverse'],
|
||||
'output_names':
|
||||
['bboxes', 'scores', 'labels', 'dir_scores', 'attrs'],
|
||||
'opset_version':
|
||||
11
|
||||
},
|
||||
'backend_config': {
|
||||
'type': 'tensorrt'
|
||||
}
|
||||
})
|
||||
|
||||
cls.end2end_model = MonocularDetectionModel(
|
||||
Backend.ONNXRUNTIME,
|
||||
[''],
|
||||
device='cuda',
|
||||
model_cfg=['' for i in range(10)],
|
||||
deploy_cfg=deploy_cfg,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
cls.wrapper.recover()
|
||||
model = MonocularDetectionModel(
|
||||
Backend.ONNXRUNTIME,
|
||||
[''],
|
||||
device='cuda',
|
||||
model_cfg=['' for i in range(10)],
|
||||
deploy_cfg=deploy_cfg,
|
||||
)
|
||||
yield model
|
||||
|
||||
@pytest.mark.skipif(
|
||||
reason='Only support GPU test',
|
||||
condition=not torch.cuda.is_available())
|
||||
def test_forward_and_show_result(self):
|
||||
def test_forward_and_show_result(self, end2end_model, tmp_path):
|
||||
from mmdet3d.core import Box3DMode
|
||||
from mmdet3d.core.bbox.structures.box_3d_mode import \
|
||||
CameraInstance3DBoxes
|
||||
|
@ -87,16 +75,15 @@ class TestMonocularDetectionModel:
|
|||
Box3DMode.CAM,
|
||||
}]]
|
||||
data = dict(img=img, img_metas=img_metas)
|
||||
results = self.end2end_model.forward(img, img_metas)
|
||||
results = end2end_model.forward(img, img_metas)
|
||||
assert results is not None
|
||||
assert isinstance(results, list)
|
||||
assert len(results) == 1
|
||||
# assert results[0]['img_bbox']['scores_3d'].shape == 4
|
||||
from tempfile import TemporaryDirectory
|
||||
with TemporaryDirectory() as dir:
|
||||
self.end2end_model.show_result(data, results,
|
||||
osp.join(dir, 'backend_output'))
|
||||
assert osp.exists(dir + '/backend_output')
|
||||
dir = str(tmp_path)
|
||||
end2end_model.show_result(data, results,
|
||||
osp.join(dir, 'backend_output'))
|
||||
assert osp.exists(dir + '/backend_output')
|
||||
|
||||
|
||||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
|
@ -112,7 +99,6 @@ def test_build_monocular_detection_model():
|
|||
codebase_config=dict(type=Codebase.MMDET3D.value)))
|
||||
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os
|
||||
from tempfile import NamedTemporaryFile, TemporaryDirectory
|
||||
|
||||
import mmcv
|
||||
import pytest
|
||||
|
@ -8,58 +7,69 @@ import torch
|
|||
from torch.utils.data import DataLoader
|
||||
from torch.utils.data.dataset import Dataset
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.apis import build_task_processor
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase, load_config
|
||||
from mmdeploy.utils import load_config
|
||||
from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMDET3D)
|
||||
except ImportError:
|
||||
pytest.skip(
|
||||
f'{Codebase.MMDET3D} is not installed.', allow_module_level=True)
|
||||
|
||||
model_cfg_path = 'tests/test_codebase/test_mmdet3d/data/model_cfg.py'
|
||||
pcd_path = 'tests/test_codebase/test_mmdet3d/data/kitti/kitti_000008.bin'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(type='mmdet3d', task='VoxelDetection'),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['voxels', 'num_points', 'coors'],
|
||||
output_names=['bboxes', 'scores', 'labels'])))
|
||||
onnx_file = NamedTemporaryFile(suffix='.onnx').name
|
||||
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
@pytest.fixture(scope='module')
|
||||
def model_cfg_path():
|
||||
return 'tests/test_codebase/test_mmdet3d/data/model_cfg.py'
|
||||
|
||||
|
||||
def test_init_pytorch_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def pcd_path():
|
||||
return 'tests/test_codebase/test_mmdet3d/data/kitti/kitti_000008.bin'
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def model_cfg(model_cfg_path):
|
||||
return load_config(model_cfg_path)[0]
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def deploy_cfg():
|
||||
return mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(type='mmdet3d', task='VoxelDetection'),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['voxels', 'num_points', 'coors'],
|
||||
output_names=['bboxes', 'scores', 'labels'])))
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def task_processor(model_cfg, deploy_cfg):
|
||||
return build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def torch_model(task_processor):
|
||||
return task_processor.init_pytorch_model(None)
|
||||
|
||||
|
||||
def test_init_pytorch_model(torch_model):
|
||||
from mmdet3d.models import Base3DDetector
|
||||
model = task_processor.init_pytorch_model(None)
|
||||
assert isinstance(model, Base3DDetector)
|
||||
assert isinstance(torch_model, Base3DDetector)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def backend_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def backend_model(task_processor):
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
wrapper.set(
|
||||
outputs={
|
||||
'bboxes': torch.rand(1, 50, 7),
|
||||
'scores': torch.rand(1, 50),
|
||||
'labels': torch.rand(1, 50)
|
||||
})
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
wrapper.set(
|
||||
outputs={
|
||||
'bboxes': torch.rand(1, 50, 7),
|
||||
'scores': torch.rand(1, 50),
|
||||
'labels': torch.rand(1, 50)
|
||||
})
|
||||
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
wrapper.recover()
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
|
||||
def test_init_backend_model(backend_model):
|
||||
|
@ -68,23 +78,28 @@ def test_init_backend_model(backend_model):
|
|||
assert isinstance(backend_model, VoxelDetectionModel)
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def model_inputs(task_processor, pcd_path):
|
||||
return task_processor.create_input(pcd_path)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('device', ['cpu', 'cuda:0'])
|
||||
def test_create_input(device):
|
||||
def test_create_input(device, task_processor, model_inputs):
|
||||
if device == 'cuda:0' and not torch.cuda.is_available():
|
||||
pytest.skip('cuda is not available')
|
||||
original_device = task_processor.device
|
||||
task_processor.device = device
|
||||
inputs = task_processor.create_input(pcd_path)
|
||||
inputs = model_inputs
|
||||
assert len(inputs) == 2
|
||||
task_processor.device = original_device
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
reason='Only support GPU test', condition=not torch.cuda.is_available())
|
||||
def test_run_inference(backend_model):
|
||||
def test_run_inference(backend_model, task_processor, torch_model,
|
||||
model_inputs):
|
||||
task_processor.device = 'cuda:0'
|
||||
torch_model = task_processor.init_pytorch_model(None)
|
||||
input_dict, _ = task_processor.create_input(pcd_path)
|
||||
input_dict, _ = model_inputs
|
||||
torch_results = task_processor.run_inference(torch_model, input_dict)
|
||||
backend_results = task_processor.run_inference(backend_model, input_dict)
|
||||
assert torch_results is not None
|
||||
|
@ -95,20 +110,19 @@ def test_run_inference(backend_model):
|
|||
|
||||
@pytest.mark.skipif(
|
||||
reason='Only support GPU test', condition=not torch.cuda.is_available())
|
||||
def test_visualize():
|
||||
def test_visualize(pcd_path, task_processor, torch_model, tmp_path,
|
||||
model_inputs):
|
||||
task_processor.device = 'cuda:0'
|
||||
input_dict, _ = task_processor.create_input(pcd_path)
|
||||
torch_model = task_processor.init_pytorch_model(None)
|
||||
input_dict, _ = model_inputs
|
||||
results = task_processor.run_inference(torch_model, input_dict)
|
||||
with TemporaryDirectory() as dir:
|
||||
filename = dir + 'tmp.bin'
|
||||
task_processor.visualize(torch_model, pcd_path, results[0], filename,
|
||||
'test', False)
|
||||
assert os.path.exists(filename)
|
||||
filename = str(tmp_path / 'tmp.bin')
|
||||
task_processor.visualize(torch_model, pcd_path, results[0], filename,
|
||||
'test', False)
|
||||
assert os.path.exists(filename)
|
||||
task_processor.device = 'cpu'
|
||||
|
||||
|
||||
def test_build_dataset_and_dataloader():
|
||||
def test_build_dataset_and_dataloader(model_cfg, task_processor):
|
||||
dataset = task_processor.build_dataset(
|
||||
dataset_cfg=model_cfg, dataset_type='test')
|
||||
assert isinstance(dataset, Dataset), 'Failed to build dataset'
|
||||
|
@ -118,7 +132,7 @@ def test_build_dataset_and_dataloader():
|
|||
|
||||
@pytest.mark.skipif(
|
||||
reason='Only support GPU test', condition=not torch.cuda.is_available())
|
||||
def test_single_gpu_test_and_evaluate():
|
||||
def test_single_gpu_test_and_evaluate(model_cfg, task_processor, tmp_path):
|
||||
from mmcv.parallel import MMDataParallel
|
||||
task_processor.device = 'cuda:0'
|
||||
|
||||
|
@ -146,7 +160,7 @@ def test_single_gpu_test_and_evaluate():
|
|||
# Run test
|
||||
outputs = task_processor.single_gpu_test(model, dataloader)
|
||||
assert isinstance(outputs, list)
|
||||
output_file = NamedTemporaryFile(suffix='.pkl').name
|
||||
output_file = str(tmp_path / 'tmp.pkl')
|
||||
task_processor.evaluate_outputs(
|
||||
model_cfg, outputs, dataset, 'bbox', out=output_file, format_only=True)
|
||||
task_processor.device = 'cpu'
|
||||
|
|
|
@ -5,18 +5,10 @@ import mmcv
|
|||
import pytest
|
||||
import torch
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.codebase.mmdet3d.deploy.voxel_detection import VoxelDetection
|
||||
from mmdeploy.utils import Backend, Codebase
|
||||
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMDET3D)
|
||||
except ImportError:
|
||||
pytest.skip(
|
||||
f'{Codebase.MMDET3D} is not installed.', allow_module_level=True)
|
||||
from mmdeploy.codebase.mmdet3d.deploy.voxel_detection import VoxelDetection
|
||||
|
||||
pcd_path = 'tests/test_codebase/test_mmdet3d/data/kitti/kitti_000008.bin'
|
||||
model_cfg = 'tests/test_codebase/test_mmdet3d/data/model_cfg.py'
|
||||
|
||||
|
@ -24,54 +16,55 @@ model_cfg = 'tests/test_codebase/test_mmdet3d/data/model_cfg.py'
|
|||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
class TestVoxelDetectionModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class')
|
||||
def end2end_model(self):
|
||||
# force add backend wrapper regardless of plugins
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
cls.outputs = {
|
||||
'bboxes': torch.rand(1, 50, 7),
|
||||
'scores': torch.rand(1, 50),
|
||||
'labels': torch.rand(1, 50)
|
||||
}
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
deploy_cfg = mmcv.Config({
|
||||
'onnx_config': {
|
||||
'input_names': ['voxels', 'num_points', 'coors'],
|
||||
'output_names': ['bboxes', 'scores', 'labels'],
|
||||
'opset_version': 11
|
||||
},
|
||||
'backend_config': {
|
||||
'type': 'tensorrt'
|
||||
}
|
||||
})
|
||||
|
||||
from mmdeploy.utils import load_config
|
||||
model_cfg_path = 'tests/test_codebase/test_mmdet3d/data/model_cfg.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
from mmdeploy.codebase.mmdet3d.deploy.voxel_detection_model import \
|
||||
VoxelDetectionModel
|
||||
cls.end2end_model = VoxelDetectionModel(
|
||||
Backend.ONNXRUNTIME, [''],
|
||||
device='cuda',
|
||||
deploy_cfg=deploy_cfg,
|
||||
model_cfg=model_cfg)
|
||||
from mmdeploy.utils import load_config
|
||||
|
||||
# simplify backend inference
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
outputs = {
|
||||
'bboxes': torch.rand(1, 50, 7),
|
||||
'scores': torch.rand(1, 50),
|
||||
'labels': torch.rand(1, 50)
|
||||
}
|
||||
wrapper.set(outputs=outputs)
|
||||
deploy_cfg = mmcv.Config({
|
||||
'onnx_config': {
|
||||
'input_names': ['voxels', 'num_points', 'coors'],
|
||||
'output_names': ['bboxes', 'scores', 'labels'],
|
||||
'opset_version': 11
|
||||
},
|
||||
'backend_config': {
|
||||
'type': 'tensorrt'
|
||||
}
|
||||
})
|
||||
|
||||
model_cfg_path = 'tests/test_codebase/test_mmdet3d/data' + \
|
||||
'/model_cfg.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
|
||||
model = VoxelDetectionModel(
|
||||
Backend.ONNXRUNTIME, [''],
|
||||
device='cuda',
|
||||
deploy_cfg=deploy_cfg,
|
||||
model_cfg=model_cfg)
|
||||
yield model
|
||||
|
||||
@pytest.mark.skipif(
|
||||
reason='Only support GPU test',
|
||||
condition=not torch.cuda.is_available())
|
||||
def test_forward_and_show_result(self):
|
||||
def test_forward_and_show_result(self, end2end_model, tmp_path):
|
||||
data = VoxelDetection.read_pcd_file(pcd_path, model_cfg, 'cuda')
|
||||
results = self.end2end_model.forward(data['points'], data['img_metas'])
|
||||
results = end2end_model.forward(data['points'], data['img_metas'])
|
||||
assert results is not None
|
||||
from tempfile import TemporaryDirectory
|
||||
with TemporaryDirectory() as dir:
|
||||
self.end2end_model.show_result(
|
||||
data, results, dir, 'backend_output.bin', show=False)
|
||||
assert osp.exists(dir + '/backend_output.bin')
|
||||
dir = str(tmp_path)
|
||||
end2end_model.show_result(
|
||||
data, results, dir, 'backend_output.bin', show=False)
|
||||
assert osp.exists(dir + '/backend_output.bin')
|
||||
|
||||
|
||||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
|
@ -87,7 +80,6 @@ def test_build_voxel_detection_model():
|
|||
codebase_config=dict(type=Codebase.MMDET3D.value)))
|
||||
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import pytest
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase
|
||||
|
||||
|
||||
def pytest_ignore_collect(*args, **kwargs):
|
||||
import importlib
|
||||
return importlib.util.find_spec('mmedit') is None
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope='package')
|
||||
def import_all_modules():
|
||||
codebase = Codebase.MMEDIT
|
||||
try:
|
||||
import_codebase(codebase)
|
||||
except ImportError:
|
||||
pytest.skip(f'{codebase} is not installed.', allow_module_level=True)
|
|
@ -1,6 +1,5 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os.path as osp
|
||||
import tempfile
|
||||
from typing import Dict
|
||||
|
||||
import mmcv
|
||||
|
@ -8,55 +7,54 @@ import onnx
|
|||
import pytest
|
||||
import torch
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.core import RewriterContext
|
||||
from mmdeploy.utils import Backend, Codebase, get_onnx_config
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMEDIT)
|
||||
except ImportError:
|
||||
pytest.skip(
|
||||
f'{Codebase.MMEDIT} is not installed.', allow_module_level=True)
|
||||
|
||||
img = torch.rand(1, 3, 4, 4)
|
||||
model_file = tempfile.NamedTemporaryFile(suffix='.onnx').name
|
||||
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
codebase_config=dict(
|
||||
type='mmedit',
|
||||
task='SuperResolution',
|
||||
),
|
||||
backend_config=dict(
|
||||
type='tensorrt',
|
||||
common_config=dict(fp16_mode=False, max_workspace_size=1 << 10),
|
||||
model_inputs=[
|
||||
dict(
|
||||
input_shapes=dict(
|
||||
input=dict(
|
||||
min_shape=[1, 3, 4, 4],
|
||||
opt_shape=[1, 3, 4, 4],
|
||||
max_shape=[1, 3, 4, 4])))
|
||||
]),
|
||||
ir_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
save_file=model_file,
|
||||
input_shape=None,
|
||||
input_names=['input'],
|
||||
output_names=['output'])))
|
||||
from mmdeploy.utils import Backend, get_onnx_config
|
||||
|
||||
|
||||
def test_srcnn():
|
||||
@pytest.fixture
|
||||
def img():
|
||||
return torch.rand(1, 3, 4, 4)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def deploy_cfg(tmp_path):
|
||||
model_file = str(tmp_path / 'end2end.onnx')
|
||||
return mmcv.Config(
|
||||
dict(
|
||||
codebase_config=dict(
|
||||
type='mmedit',
|
||||
task='SuperResolution',
|
||||
),
|
||||
backend_config=dict(
|
||||
type='tensorrt',
|
||||
common_config=dict(
|
||||
fp16_mode=False, max_workspace_size=1 << 10),
|
||||
model_inputs=[
|
||||
dict(
|
||||
input_shapes=dict(
|
||||
input=dict(
|
||||
min_shape=[1, 3, 4, 4],
|
||||
opt_shape=[1, 3, 4, 4],
|
||||
max_shape=[1, 3, 4, 4])))
|
||||
]),
|
||||
ir_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
save_file=model_file,
|
||||
input_shape=None,
|
||||
input_names=['input'],
|
||||
output_names=['output'])))
|
||||
|
||||
|
||||
def test_srcnn(img, deploy_cfg):
|
||||
from mmedit.models.backbones.sr_backbones import SRCNN
|
||||
pytorch_model = SRCNN()
|
||||
model_inputs = {'x': img}
|
||||
|
||||
onnx_file_path = tempfile.NamedTemporaryFile(suffix='.onnx').name
|
||||
onnx_cfg = get_onnx_config(deploy_cfg)
|
||||
input_names = [k for k, v in model_inputs.items() if k != 'ctx']
|
||||
onnx_file_path = onnx_cfg['save_file']
|
||||
input_names = ['x']
|
||||
|
||||
dynamic_axes = onnx_cfg.get('dynamic_axes', None)
|
||||
|
||||
|
@ -67,7 +65,7 @@ def test_srcnn():
|
|||
cfg=deploy_cfg, backend=Backend.TENSORRT.value), torch.no_grad():
|
||||
torch.onnx.export(
|
||||
pytorch_model,
|
||||
tuple([v for k, v in model_inputs.items()]),
|
||||
img,
|
||||
onnx_file_path,
|
||||
export_params=True,
|
||||
input_names=input_names,
|
||||
|
@ -82,7 +80,4 @@ def test_srcnn():
|
|||
|
||||
model = onnx.load(onnx_file_path)
|
||||
assert model is not None
|
||||
try:
|
||||
onnx.checker.check_model(model)
|
||||
except onnx.checker.ValidationError:
|
||||
assert False
|
||||
onnx.checker.check_model(model)
|
||||
|
|
|
@ -1,76 +1,82 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os
|
||||
import tempfile
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
import mmdeploy.apis.onnxruntime as ort_apis
|
||||
from mmdeploy.apis import build_task_processor
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase, load_config
|
||||
from mmdeploy.utils import load_config
|
||||
from mmdeploy.utils.test import SwitchBackendWrapper
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMEDIT)
|
||||
except ImportError:
|
||||
pytest.skip(
|
||||
f'{Codebase.MMEDIT} is not installed.', allow_module_level=True)
|
||||
|
||||
model_cfg = 'tests/test_codebase/test_mmedit/data/model.py'
|
||||
model_cfg = load_config(model_cfg)[0]
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(type='mmedit', task='SuperResolution'),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['input'],
|
||||
output_names=['output'])))
|
||||
input_img = np.random.rand(32, 32, 3)
|
||||
img_shape = [32, 32]
|
||||
input = {'lq': input_img}
|
||||
onnx_file = NamedTemporaryFile(suffix='.onnx').name
|
||||
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
@pytest.fixture(scope='module')
|
||||
def model_cfg():
|
||||
cfg = 'tests/test_codebase/test_mmedit/data/model.py'
|
||||
return load_config(cfg)[0]
|
||||
|
||||
|
||||
def test_init_pytorch_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def deploy_cfg():
|
||||
return mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(type='mmedit', task='SuperResolution'),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['input'],
|
||||
output_names=['output'])))
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def input_img():
|
||||
return np.random.rand(32, 32, 3)
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def model_input(input_img):
|
||||
return {'lq': input_img}
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def task_processor(model_cfg, deploy_cfg):
|
||||
return build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
|
||||
|
||||
def test_init_pytorch_model(task_processor):
|
||||
torch_model = task_processor.init_pytorch_model(None)
|
||||
assert torch_model is not None
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def backend_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def backend_model(task_processor):
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
wrapper.set(outputs={
|
||||
'output': torch.rand(3, 50, 50),
|
||||
})
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
wrapper.set(outputs={
|
||||
'output': torch.rand(3, 50, 50),
|
||||
})
|
||||
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
wrapper.recover()
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
|
||||
def test_init_backend_model(backend_model):
|
||||
assert backend_model is not None
|
||||
|
||||
|
||||
def test_create_input():
|
||||
inputs = task_processor.create_input(input_img, img_shape=img_shape)
|
||||
def test_create_input(task_processor, input_img):
|
||||
inputs = task_processor.create_input(
|
||||
input_img, img_shape=input_img.shape[:2])
|
||||
assert inputs is not None
|
||||
|
||||
|
||||
def test_visualize(backend_model):
|
||||
result = task_processor.run_inference(backend_model, input)
|
||||
def test_visualize(backend_model, task_processor, model_input, input_img):
|
||||
result = task_processor.run_inference(backend_model, model_input)
|
||||
with tempfile.TemporaryDirectory() as dir:
|
||||
filename = dir + 'tmp.jpg'
|
||||
task_processor.visualize(backend_model, input_img, result[0], filename,
|
||||
|
@ -78,21 +84,21 @@ def test_visualize(backend_model):
|
|||
assert os.path.exists(filename)
|
||||
|
||||
|
||||
def test_run_inference(backend_model):
|
||||
results = task_processor.run_inference(backend_model, input)
|
||||
def test_run_inference(backend_model, task_processor, model_input):
|
||||
results = task_processor.run_inference(backend_model, model_input)
|
||||
assert results is not None
|
||||
|
||||
|
||||
def test_get_tensor_from_input():
|
||||
assert type(task_processor.get_tensor_from_input(input)) is not dict
|
||||
def test_get_tensor_from_input(task_processor, model_input):
|
||||
assert type(task_processor.get_tensor_from_input(model_input)) is not dict
|
||||
|
||||
|
||||
def test_get_partition_cfg():
|
||||
def test_get_partition_cfg(task_processor):
|
||||
with pytest.raises(NotImplementedError):
|
||||
task_processor.get_partition_cfg(None)
|
||||
|
||||
|
||||
def test_build_dataset():
|
||||
def test_build_dataset(task_processor):
|
||||
data = dict(
|
||||
test={
|
||||
'type': 'SRFolderDataset',
|
||||
|
@ -114,7 +120,7 @@ def test_build_dataset():
|
|||
assert dataloader is not None, 'Failed to build dataloader'
|
||||
|
||||
|
||||
def test_single_gpu_test(backend_model):
|
||||
def test_single_gpu_test(backend_model, model_cfg, task_processor):
|
||||
from mmcv.parallel import MMDataParallel
|
||||
dataset = task_processor.build_dataset(model_cfg, dataset_type='test')
|
||||
assert dataset is not None, 'Failed to build dataset'
|
||||
|
|
|
@ -4,55 +4,43 @@ import numpy as np
|
|||
import pytest
|
||||
import torch
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase, load_config
|
||||
from mmdeploy.utils import Backend, load_config
|
||||
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMEDIT)
|
||||
except ImportError:
|
||||
pytest.skip(
|
||||
f'{Codebase.MMEDIT} is not installed.', allow_module_level=True)
|
||||
|
||||
|
||||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
class TestEnd2EndModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class')
|
||||
def end2end_model(self):
|
||||
# force add backend wrapper regardless of plugins
|
||||
# make sure ONNXRuntimeEditor can use ORTWrapper inside itself
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
cls.outputs = {
|
||||
'outputs': torch.rand(3, 64, 64),
|
||||
}
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
deploy_cfg = mmcv.Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['outputs']
|
||||
}})
|
||||
model_cfg = 'tests/test_codebase/test_mmedit/data/model.py'
|
||||
model_cfg = load_config(model_cfg)[0]
|
||||
from mmdeploy.codebase.mmedit.deploy.super_resolution_model import \
|
||||
End2EndModel
|
||||
cls.end2end_model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu',
|
||||
model_cfg, deploy_cfg)
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
cls.wrapper.recover()
|
||||
# simplify backend inference
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
outputs = {
|
||||
'outputs': torch.rand(3, 64, 64),
|
||||
}
|
||||
wrapper.set(outputs=outputs)
|
||||
deploy_cfg = mmcv.Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['outputs']
|
||||
}})
|
||||
model_cfg = 'tests/test_codebase/test_mmedit/data/model.py'
|
||||
model_cfg = load_config(model_cfg)[0]
|
||||
model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu', model_cfg,
|
||||
deploy_cfg)
|
||||
yield model
|
||||
|
||||
def test_forward(self):
|
||||
def test_forward(self, end2end_model):
|
||||
input_img = np.random.rand(3, 32, 32)
|
||||
|
||||
results = self.end2end_model.forward(input_img, test_mode=False)
|
||||
results = end2end_model.forward(input_img, test_mode=False)
|
||||
assert results is not None
|
||||
|
||||
results = self.end2end_model.forward(
|
||||
results = end2end_model.forward(
|
||||
input_img, test_mode=True, gt=torch.tensor(results[0]))
|
||||
assert results is not None
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import pytest
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase
|
||||
|
||||
|
||||
def pytest_ignore_collect(*args, **kwargs):
|
||||
import importlib
|
||||
return importlib.util.find_spec('mmocr') is None
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope='package')
|
||||
def import_all_modules():
|
||||
codebase = Codebase.MMOCR
|
||||
try:
|
||||
import_codebase(codebase)
|
||||
except ImportError:
|
||||
pytest.skip(f'{codebase} is not installed.', allow_module_level=True)
|
|
@ -1,24 +1,15 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import tempfile
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
from mmocr.models.textdet.necks import FPNC
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.core import RewriterContext, patch_model
|
||||
from mmdeploy.utils import Backend, Codebase
|
||||
from mmdeploy.utils import Backend
|
||||
from mmdeploy.utils.test import (WrapModel, check_backend, get_model_outputs,
|
||||
get_rewrite_outputs)
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMOCR)
|
||||
except ImportError:
|
||||
pytest.skip(f'{Codebase.MMOCR} is not installed.', allow_module_level=True)
|
||||
|
||||
from mmocr.models.textdet.necks import FPNC
|
||||
|
||||
|
||||
class FPNCNeckModel(FPNC):
|
||||
|
||||
|
@ -35,7 +26,8 @@ class FPNCNeckModel(FPNC):
|
|||
return output
|
||||
|
||||
|
||||
def get_bidirectionallstm_model():
|
||||
@pytest.fixture
|
||||
def bidirectionallstm_model():
|
||||
from mmocr.models.textrecog.layers.lstm_layer import BidirectionalLSTM
|
||||
model = BidirectionalLSTM(32, 16, 16)
|
||||
|
||||
|
@ -43,7 +35,8 @@ def get_bidirectionallstm_model():
|
|||
return model
|
||||
|
||||
|
||||
def get_single_stage_text_detector_model():
|
||||
@pytest.fixture
|
||||
def single_stage_text_detector():
|
||||
from mmocr.models.textdet import SingleStageTextDetector
|
||||
backbone = dict(
|
||||
type='mmdet.ResNet',
|
||||
|
@ -71,7 +64,8 @@ def get_single_stage_text_detector_model():
|
|||
return model
|
||||
|
||||
|
||||
def get_encode_decode_recognizer_model():
|
||||
@pytest.fixture
|
||||
def encode_decode_recognizer():
|
||||
from mmocr.models.textrecog import EncodeDecodeRecognizer
|
||||
|
||||
cfg = dict(
|
||||
|
@ -97,7 +91,9 @@ def get_encode_decode_recognizer_model():
|
|||
return model
|
||||
|
||||
|
||||
def get_crnn_decoder_model(rnn_flag):
|
||||
@pytest.fixture(params=[True, False])
|
||||
def crnn_decoder_model(request):
|
||||
rnn_flag = request.param
|
||||
from mmocr.models.textrecog.decoders import CRNNDecoder
|
||||
model = CRNNDecoder(32, 4, rnn_flag=rnn_flag)
|
||||
|
||||
|
@ -105,14 +101,16 @@ def get_crnn_decoder_model(rnn_flag):
|
|||
return model
|
||||
|
||||
|
||||
def get_fpnc_neck_model():
|
||||
@pytest.fixture
|
||||
def fpnc_neck_model():
|
||||
model = FPNCNeckModel([2, 4, 8, 16])
|
||||
|
||||
model.requires_grad_(False)
|
||||
return model
|
||||
|
||||
|
||||
def get_base_recognizer_model():
|
||||
@pytest.fixture
|
||||
def base_recognizer():
|
||||
from mmocr.models.textrecog import CRNNNet
|
||||
|
||||
cfg = dict(
|
||||
|
@ -138,10 +136,10 @@ def get_base_recognizer_model():
|
|||
|
||||
|
||||
@pytest.mark.parametrize('backend', [Backend.NCNN])
|
||||
def test_bidirectionallstm(backend: Backend):
|
||||
def test_bidirectionallstm(backend: Backend, bidirectionallstm_model):
|
||||
"""Test forward rewrite of bidirectionallstm."""
|
||||
check_backend(backend)
|
||||
bilstm = get_bidirectionallstm_model()
|
||||
bilstm = bidirectionallstm_model
|
||||
bilstm.cpu().eval()
|
||||
|
||||
deploy_cfg = mmcv.Config(
|
||||
|
@ -178,10 +176,10 @@ def test_bidirectionallstm(backend: Backend):
|
|||
|
||||
|
||||
@pytest.mark.parametrize('backend', [Backend.ONNXRUNTIME])
|
||||
def test_simple_test_of_single_stage_text_detector(backend: Backend):
|
||||
def test_simple_test_of_single_stage_text_detector(backend: Backend,
|
||||
single_stage_text_detector):
|
||||
"""Test simple_test single_stage_text_detector."""
|
||||
check_backend(backend)
|
||||
single_stage_text_detector = get_single_stage_text_detector_model()
|
||||
single_stage_text_detector.eval()
|
||||
|
||||
deploy_cfg = mmcv.Config(
|
||||
|
@ -214,11 +212,10 @@ def test_simple_test_of_single_stage_text_detector(backend: Backend):
|
|||
|
||||
|
||||
@pytest.mark.parametrize('backend', [Backend.NCNN])
|
||||
@pytest.mark.parametrize('rnn_flag', [True, False])
|
||||
def test_crnndecoder(backend: Backend, rnn_flag: bool):
|
||||
def test_crnndecoder(backend: Backend, crnn_decoder_model):
|
||||
"""Test forward rewrite of crnndecoder."""
|
||||
check_backend(backend)
|
||||
crnn_decoder = get_crnn_decoder_model(rnn_flag)
|
||||
crnn_decoder = crnn_decoder_model
|
||||
crnn_decoder.cpu().eval()
|
||||
|
||||
deploy_cfg = mmcv.Config(
|
||||
|
@ -277,10 +274,10 @@ def test_crnndecoder(backend: Backend, rnn_flag: bool):
|
|||
'valid_ratio': 1.0
|
||||
}]]])
|
||||
@pytest.mark.parametrize('is_dynamic', [True, False])
|
||||
def test_forward_of_base_recognizer(img_metas, is_dynamic, backend):
|
||||
def test_forward_of_base_recognizer(img_metas, is_dynamic, backend,
|
||||
base_recognizer):
|
||||
"""Test forward base_recognizer."""
|
||||
check_backend(backend)
|
||||
base_recognizer = get_base_recognizer_model()
|
||||
base_recognizer.eval()
|
||||
|
||||
if not is_dynamic:
|
||||
|
@ -342,10 +339,10 @@ def test_forward_of_base_recognizer(img_metas, is_dynamic, backend):
|
|||
|
||||
|
||||
@pytest.mark.parametrize('backend', [Backend.ONNXRUNTIME])
|
||||
def test_simple_test_of_encode_decode_recognizer(backend):
|
||||
def test_simple_test_of_encode_decode_recognizer(backend,
|
||||
encode_decode_recognizer):
|
||||
"""Test simple_test encode_decode_recognizer."""
|
||||
check_backend(backend)
|
||||
encode_decode_recognizer = get_encode_decode_recognizer_model()
|
||||
encode_decode_recognizer.eval()
|
||||
|
||||
deploy_cfg = mmcv.Config(
|
||||
|
@ -383,10 +380,10 @@ def test_simple_test_of_encode_decode_recognizer(backend):
|
|||
|
||||
|
||||
@pytest.mark.parametrize('backend', [Backend.TENSORRT])
|
||||
def test_forward_of_fpnc(backend: Backend):
|
||||
def test_forward_of_fpnc(backend: Backend, fpnc_neck_model):
|
||||
"""Test forward rewrite of fpnc."""
|
||||
check_backend(backend)
|
||||
fpnc = get_fpnc_neck_model().cuda()
|
||||
fpnc = fpnc_neck_model.cuda()
|
||||
fpnc.eval()
|
||||
input = torch.rand(1, 1, 64, 64).cuda()
|
||||
deploy_cfg = mmcv.Config(
|
||||
|
@ -483,7 +480,7 @@ def get_sar_model_cfg(decoder_type: str):
|
|||
@pytest.mark.parametrize('backend', [Backend.ONNXRUNTIME])
|
||||
@pytest.mark.parametrize('decoder_type',
|
||||
['SequentialSARDecoder', 'ParallelSARDecoder'])
|
||||
def test_sar_model(backend: Backend, decoder_type):
|
||||
def test_sar_model(backend: Backend, decoder_type, tmp_path):
|
||||
check_backend(backend)
|
||||
import os.path as osp
|
||||
|
||||
|
@ -506,7 +503,7 @@ def test_sar_model(backend: Backend, decoder_type):
|
|||
pytorch_model.cfg = sar_cfg
|
||||
patched_model = patch_model(
|
||||
pytorch_model, cfg=deploy_cfg, backend=backend.value)
|
||||
onnx_file_path = tempfile.NamedTemporaryFile(suffix='.onnx').name
|
||||
onnx_file_path = str(tmp_path / 'tmp.onnx')
|
||||
input_names = [k for k, v in model_inputs.items() if k != 'ctx']
|
||||
with RewriterContext(
|
||||
cfg=deploy_cfg, backend=backend.value), torch.no_grad():
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os
|
||||
from tempfile import NamedTemporaryFile, TemporaryDirectory
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
|
@ -8,95 +7,104 @@ import pytest
|
|||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.apis import build_task_processor
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase, load_config
|
||||
from mmdeploy.utils import load_config
|
||||
from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMOCR)
|
||||
except ImportError:
|
||||
pytest.skip(f'{Codebase.MMOCR} is not installed.', allow_module_level=True)
|
||||
|
||||
model_cfg_path = 'tests/test_codebase/test_mmocr/data/dbnet.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(type='mmocr', task='TextDetection'),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['input'],
|
||||
output_names=['output'])))
|
||||
|
||||
onnx_file = NamedTemporaryFile(suffix='.onnx').name
|
||||
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def model_cfg():
|
||||
return load_config(model_cfg_path)[0]
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def deploy_cfg():
|
||||
return mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(type='mmocr', task='TextDetection'),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['input'],
|
||||
output_names=['output'])))
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def task_processor(model_cfg, deploy_cfg):
|
||||
return build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
|
||||
|
||||
img_shape = (32, 32)
|
||||
img = np.random.rand(*img_shape, 3).astype(np.uint8)
|
||||
|
||||
|
||||
def test_init_pytorch_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def img():
|
||||
return np.random.rand(*img_shape, 3).astype(np.uint8)
|
||||
|
||||
|
||||
def test_init_pytorch_model(task_processor):
|
||||
from mmocr.models.textdet.detectors.single_stage_text_detector import \
|
||||
SingleStageDetector
|
||||
model = task_processor.init_pytorch_model(None)
|
||||
assert isinstance(model, SingleStageDetector)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def backend_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def backend_model(task_processor):
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
wrapper.set(outputs={
|
||||
'output': torch.rand(1, 3, *img_shape),
|
||||
})
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
wrapper.set(outputs={
|
||||
'output': torch.rand(1, 3, *img_shape),
|
||||
})
|
||||
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
wrapper.recover()
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
|
||||
def test_init_backend_model(backend_model):
|
||||
assert isinstance(backend_model, torch.nn.Module)
|
||||
|
||||
|
||||
def test_create_input():
|
||||
inputs = task_processor.create_input(img, input_shape=img_shape)
|
||||
assert isinstance(inputs, tuple) and len(inputs) == 2
|
||||
@pytest.fixture(scope='module')
|
||||
def model_inputs(task_processor, img):
|
||||
return task_processor.create_input(img, input_shape=img_shape)
|
||||
|
||||
|
||||
def test_run_inference(backend_model):
|
||||
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
|
||||
def test_create_input(model_inputs):
|
||||
assert isinstance(model_inputs, tuple) and len(model_inputs) == 2
|
||||
|
||||
|
||||
def test_run_inference(backend_model, task_processor, model_inputs):
|
||||
input_dict, _ = model_inputs
|
||||
results = task_processor.run_inference(backend_model, input_dict)
|
||||
assert results is not None
|
||||
|
||||
|
||||
def test_visualize(backend_model):
|
||||
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
|
||||
def test_visualize(backend_model, task_processor, model_inputs, img, tmp_path):
|
||||
input_dict, _ = model_inputs
|
||||
results = task_processor.run_inference(backend_model, input_dict)
|
||||
with TemporaryDirectory() as dir:
|
||||
filename = dir + 'tmp.jpg'
|
||||
task_processor.visualize(backend_model, img, results[0], filename, '')
|
||||
assert os.path.exists(filename)
|
||||
filename = str(tmp_path / 'tmp.jpg')
|
||||
task_processor.visualize(backend_model, img, results[0], filename, '')
|
||||
assert os.path.exists(filename)
|
||||
|
||||
|
||||
def test_get_tensort_from_input():
|
||||
def test_get_tensort_from_input(task_processor):
|
||||
input_data = {'img': [torch.ones(3, 4, 5)]}
|
||||
inputs = task_processor.get_tensor_from_input(input_data)
|
||||
assert torch.equal(inputs, torch.ones(3, 4, 5))
|
||||
|
||||
|
||||
def test_get_partition_cfg():
|
||||
def test_get_partition_cfg(task_processor):
|
||||
with pytest.raises(NotImplementedError):
|
||||
_ = task_processor.get_partition_cfg(partition_type='')
|
||||
|
||||
|
||||
def test_build_dataset_and_dataloader():
|
||||
def test_build_dataset_and_dataloader(model_cfg, task_processor):
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
dataset = task_processor.build_dataset(
|
||||
dataset_cfg=model_cfg, dataset_type='test')
|
||||
|
@ -105,7 +113,7 @@ def test_build_dataset_and_dataloader():
|
|||
assert isinstance(dataloader, DataLoader), 'Failed to build dataloader'
|
||||
|
||||
|
||||
def test_single_gpu_test_and_evaluate():
|
||||
def test_single_gpu_test_and_evaluate(model_cfg, task_processor):
|
||||
from mmcv.parallel import MMDataParallel
|
||||
|
||||
# Prepare dataloader
|
||||
|
|
|
@ -1,63 +1,51 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os.path as osp
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase, load_config
|
||||
from mmdeploy.utils import Backend, load_config
|
||||
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMOCR)
|
||||
except ImportError:
|
||||
pytest.skip(f'{Codebase.MMOCR} is not installed.', allow_module_level=True)
|
||||
|
||||
IMAGE_SIZE = 32
|
||||
|
||||
|
||||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
class TestEnd2EndModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class')
|
||||
def end2end_model(self):
|
||||
# force add backend wrapper regardless of plugins
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
cls.outputs = {
|
||||
'outputs': torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE),
|
||||
}
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
deploy_cfg = mmcv.Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['outputs']
|
||||
}})
|
||||
model_cfg_path = 'tests/test_codebase/test_mmocr/data/dbnet.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
outputs = {
|
||||
'outputs': torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE),
|
||||
}
|
||||
wrapper.set(outputs=outputs)
|
||||
deploy_cfg = mmcv.Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['outputs']
|
||||
}})
|
||||
model_cfg_path = 'tests/test_codebase/test_mmocr/data/dbnet.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
|
||||
from mmdeploy.codebase.mmocr.deploy.text_detection_model import \
|
||||
End2EndModel
|
||||
cls.end2end_model = End2EndModel(
|
||||
Backend.ONNXRUNTIME, [''],
|
||||
device='cpu',
|
||||
deploy_cfg=deploy_cfg,
|
||||
model_cfg=model_cfg)
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
cls.wrapper.recover()
|
||||
from mmdeploy.codebase.mmocr.deploy.text_detection_model import \
|
||||
End2EndModel
|
||||
model = End2EndModel(
|
||||
Backend.ONNXRUNTIME, [''],
|
||||
device='cpu',
|
||||
deploy_cfg=deploy_cfg,
|
||||
model_cfg=model_cfg)
|
||||
yield model
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'ori_shape',
|
||||
[[IMAGE_SIZE, IMAGE_SIZE, 3], [2 * IMAGE_SIZE, 2 * IMAGE_SIZE, 3]])
|
||||
def test_forward(self, ori_shape):
|
||||
def test_forward(self, ori_shape, end2end_model):
|
||||
imgs = [torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE)]
|
||||
img_metas = [[{
|
||||
'ori_shape': ori_shape,
|
||||
|
@ -65,21 +53,21 @@ class TestEnd2EndModel:
|
|||
'scale_factor': [1., 1., 1., 1.],
|
||||
'filename': ''
|
||||
}]]
|
||||
results = self.end2end_model.forward(imgs, img_metas)
|
||||
results = end2end_model.forward(imgs, img_metas)
|
||||
assert results is not None, 'failed to get output using '\
|
||||
'End2EndModel'
|
||||
|
||||
def test_forward_test(self):
|
||||
def test_forward_test(self, end2end_model):
|
||||
imgs = torch.rand(2, 3, IMAGE_SIZE, IMAGE_SIZE)
|
||||
results = self.end2end_model.forward_test(imgs)
|
||||
results = end2end_model.forward_test(imgs)
|
||||
assert isinstance(results[0], torch.Tensor)
|
||||
|
||||
def test_show_result(self):
|
||||
def test_show_result(self, end2end_model, tmp_path):
|
||||
input_img = np.zeros([IMAGE_SIZE, IMAGE_SIZE, 3])
|
||||
img_path = NamedTemporaryFile(suffix='.jpg').name
|
||||
img_path = str(tmp_path / 'tmp.jpg')
|
||||
|
||||
result = {'boundary_result': [[1, 2, 3, 4, 5], [2, 2, 0, 4, 5]]}
|
||||
self.end2end_model.show_result(
|
||||
end2end_model.show_result(
|
||||
input_img, result, '', show=False, out_file=img_path)
|
||||
assert osp.exists(img_path), 'Fails to create drawn image.'
|
||||
|
||||
|
@ -95,7 +83,6 @@ def test_build_text_detection_model():
|
|||
codebase_config=dict(type='mmocr')))
|
||||
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os
|
||||
from tempfile import NamedTemporaryFile, TemporaryDirectory
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
|
@ -8,96 +7,103 @@ import pytest
|
|||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.apis import build_task_processor
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase, load_config
|
||||
from mmdeploy.utils import load_config
|
||||
from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMOCR)
|
||||
except ImportError:
|
||||
pytest.skip(f'{Codebase.MMOCR} is not installed.', allow_module_level=True)
|
||||
|
||||
model_cfg_path = 'tests/test_codebase/test_mmocr/data/crnn.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(type='mmocr', task='TextRecognition'),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['input'],
|
||||
output_names=['output'])))
|
||||
|
||||
onnx_file = NamedTemporaryFile(suffix='.onnx').name
|
||||
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def model_cfg():
|
||||
return load_config(model_cfg_path)[0]
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def deploy_cfg():
|
||||
return mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(type='mmocr', task='TextRecognition'),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['input'],
|
||||
output_names=['output'])))
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def task_processor(model_cfg, deploy_cfg):
|
||||
return build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
|
||||
|
||||
img_shape = (32, 32)
|
||||
img = np.random.rand(*img_shape, 3).astype(np.uint8)
|
||||
|
||||
|
||||
def test_init_pytorch_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def img():
|
||||
return np.random.rand(*img_shape, 3).astype(np.uint8)
|
||||
|
||||
|
||||
def test_init_pytorch_model(task_processor):
|
||||
from mmocr.models.textrecog.recognizer import BaseRecognizer
|
||||
model = task_processor.init_pytorch_model(None)
|
||||
assert isinstance(model, BaseRecognizer)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def backend_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def backend_model(task_processor):
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
wrapper.set(outputs={
|
||||
'output': torch.rand(1, 9, 37),
|
||||
})
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
wrapper.set(outputs={
|
||||
'output': torch.rand(1, 9, 37),
|
||||
})
|
||||
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
wrapper.recover()
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
|
||||
def test_init_backend_model(backend_model):
|
||||
assert isinstance(backend_model, torch.nn.Module)
|
||||
|
||||
|
||||
def test_create_input():
|
||||
inputs = task_processor.create_input(img, input_shape=img_shape)
|
||||
assert isinstance(inputs, tuple) and len(inputs) == 2
|
||||
@pytest.fixture(scope='module')
|
||||
def model_inputs(task_processor, img):
|
||||
return task_processor.create_input(img, input_shape=img_shape)
|
||||
|
||||
|
||||
def test_run_inference(backend_model):
|
||||
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
|
||||
def test_create_input(model_inputs):
|
||||
assert isinstance(model_inputs, tuple) and len(model_inputs) == 2
|
||||
|
||||
|
||||
def test_run_inference(backend_model, task_processor, model_inputs):
|
||||
input_dict, _ = model_inputs
|
||||
results = task_processor.run_inference(backend_model, input_dict)
|
||||
assert results is not None
|
||||
|
||||
|
||||
def test_visualize(backend_model):
|
||||
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
|
||||
def test_visualize(backend_model, task_processor, model_inputs, img, tmp_path):
|
||||
input_dict, _ = model_inputs
|
||||
results = task_processor.run_inference(backend_model, input_dict)
|
||||
with TemporaryDirectory() as dir:
|
||||
filename = dir + 'tmp.jpg'
|
||||
task_processor.visualize(backend_model, img, results[0], filename, '')
|
||||
assert os.path.exists(filename)
|
||||
filename = str(tmp_path / 'tmp.jpg')
|
||||
task_processor.visualize(backend_model, img, results[0], filename, '')
|
||||
assert os.path.exists(filename)
|
||||
|
||||
|
||||
def test_get_tensort_from_input():
|
||||
def test_get_tensort_from_input(task_processor):
|
||||
input_data = {'img': [torch.ones(3, 4, 5)]}
|
||||
inputs = task_processor.get_tensor_from_input(input_data)
|
||||
assert torch.equal(inputs, torch.ones(3, 4, 5))
|
||||
|
||||
|
||||
def test_get_partition_cfg():
|
||||
try:
|
||||
def test_get_partition_cfg(task_processor):
|
||||
with pytest.raises(NotImplementedError):
|
||||
_ = task_processor.get_partition_cfg(partition_type='')
|
||||
except NotImplementedError:
|
||||
pass
|
||||
|
||||
|
||||
def test_build_dataset_and_dataloader():
|
||||
def test_build_dataset_and_dataloader(task_processor, model_cfg):
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
dataset = task_processor.build_dataset(
|
||||
dataset_cfg=model_cfg, dataset_type='test')
|
||||
|
@ -106,7 +112,7 @@ def test_build_dataset_and_dataloader():
|
|||
assert isinstance(dataloader, DataLoader), 'Failed to build dataloader'
|
||||
|
||||
|
||||
def test_single_gpu_test_and_evaluate():
|
||||
def test_single_gpu_test_and_evaluate(task_processor, model_cfg):
|
||||
from mmcv.parallel import MMDataParallel
|
||||
|
||||
# Prepare dataloader
|
||||
|
|
|
@ -1,82 +1,73 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os.path as osp
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase, load_config
|
||||
from mmdeploy.utils import Backend, load_config
|
||||
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMOCR)
|
||||
except ImportError:
|
||||
pytest.skip(f'{Codebase.MMOCR} is not installed.', allow_module_level=True)
|
||||
|
||||
IMAGE_SIZE = 32
|
||||
|
||||
|
||||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
class TestEnd2EndModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class')
|
||||
def end2end_model(self):
|
||||
# force add backend wrapper regardless of plugins
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
cls.outputs = {
|
||||
'output': torch.rand(1, 9, 37),
|
||||
}
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
deploy_cfg = mmcv.Config({'onnx_config': {'output_names': ['output']}})
|
||||
model_cfg_path = 'tests/test_codebase/test_mmocr/data/crnn.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
outputs = {
|
||||
'output': torch.rand(1, 9, 37),
|
||||
}
|
||||
wrapper.set(outputs=outputs)
|
||||
deploy_cfg = mmcv.Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['output']
|
||||
}})
|
||||
model_cfg_path = 'tests/test_codebase/test_mmocr/data/crnn.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
|
||||
from mmdeploy.codebase.mmocr.deploy.text_recognition_model import \
|
||||
End2EndModel
|
||||
cls.end2end_model = End2EndModel(
|
||||
Backend.ONNXRUNTIME, [''],
|
||||
device='cpu',
|
||||
deploy_cfg=deploy_cfg,
|
||||
model_cfg=model_cfg)
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
cls.wrapper.recover()
|
||||
from mmdeploy.codebase.mmocr.deploy.text_recognition_model import \
|
||||
End2EndModel
|
||||
model = End2EndModel(
|
||||
Backend.ONNXRUNTIME, [''],
|
||||
device='cpu',
|
||||
deploy_cfg=deploy_cfg,
|
||||
model_cfg=model_cfg)
|
||||
yield model
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'ori_shape',
|
||||
[[IMAGE_SIZE, IMAGE_SIZE, 3], [2 * IMAGE_SIZE, 2 * IMAGE_SIZE, 3]])
|
||||
def test_forward(self, ori_shape):
|
||||
def test_forward(self, ori_shape, end2end_model):
|
||||
imgs = [torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE)]
|
||||
img_metas = [[{
|
||||
'ori_shape': ori_shape,
|
||||
'img_shape': [IMAGE_SIZE, IMAGE_SIZE, 3],
|
||||
'scale_factor': [1., 1., 1., 1.],
|
||||
}]]
|
||||
results = self.end2end_model.forward(imgs, img_metas)
|
||||
results = end2end_model.forward(imgs, img_metas)
|
||||
assert results is not None, 'failed to get output using '\
|
||||
'End2EndModel'
|
||||
|
||||
def test_forward_test(self):
|
||||
def test_forward_test(self, end2end_model):
|
||||
imgs = torch.rand(2, 3, IMAGE_SIZE, IMAGE_SIZE)
|
||||
img_metas = [{}]
|
||||
results = self.end2end_model.forward_test(imgs, img_metas)
|
||||
results = end2end_model.forward_test(imgs, img_metas)
|
||||
assert isinstance(results[0], dict)
|
||||
|
||||
def test_show_result(self):
|
||||
def test_show_result(self, end2end_model, tmp_path):
|
||||
input_img = np.zeros([IMAGE_SIZE, IMAGE_SIZE, 3])
|
||||
img_path = NamedTemporaryFile(suffix='.jpg').name
|
||||
img_path = str(tmp_path / 'tmp.jpg')
|
||||
|
||||
result = {'text': 'sier', 'score': [0.29, 0.62, 0.25, 0.54]}
|
||||
self.end2end_model.show_result(
|
||||
end2end_model.show_result(
|
||||
input_img, result, '', show=False, out_file=img_path)
|
||||
assert osp.exists(img_path), 'Fails to create drawn image.'
|
||||
|
||||
|
@ -92,7 +83,6 @@ def test_build_text_recognition_model():
|
|||
codebase_config=dict(type='mmocr')))
|
||||
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import pytest
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase
|
||||
|
||||
|
||||
def pytest_ignore_collect(*args, **kwargs):
|
||||
import importlib
|
||||
return importlib.util.find_spec('mmpose') is None
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope='package')
|
||||
def import_all_modules():
|
||||
codebase = Codebase.MMPOSE
|
||||
try:
|
||||
import_codebase(codebase)
|
||||
except ImportError:
|
||||
pytest.skip(f'{codebase} is not installed.', allow_module_level=True)
|
|
@ -4,18 +4,12 @@ import numpy as np
|
|||
import pytest
|
||||
import torch
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase, Task
|
||||
from mmdeploy.utils.test import WrapModel, check_backend, get_rewrite_outputs
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMPOSE)
|
||||
except ImportError:
|
||||
pytest.skip(
|
||||
f'{Codebase.MMPOSE} is not installed.', allow_module_level=True)
|
||||
|
||||
|
||||
def get_top_down_heatmap_simple_head_model():
|
||||
@pytest.fixture
|
||||
def top_down_heatmap_simple_head_model():
|
||||
from mmpose.models.heads import TopdownHeatmapSimpleHead
|
||||
model = TopdownHeatmapSimpleHead(
|
||||
2,
|
||||
|
@ -28,9 +22,10 @@ def get_top_down_heatmap_simple_head_model():
|
|||
|
||||
@pytest.mark.parametrize('backend_type',
|
||||
[Backend.ONNXRUNTIME, Backend.TENSORRT])
|
||||
def test_top_down_heatmap_simple_head_inference_model(backend_type: Backend):
|
||||
def test_top_down_heatmap_simple_head_inference_model(
|
||||
backend_type: Backend, top_down_heatmap_simple_head_model):
|
||||
check_backend(backend_type, True)
|
||||
model = get_top_down_heatmap_simple_head_model()
|
||||
model = top_down_heatmap_simple_head_model
|
||||
model.cpu().eval()
|
||||
if backend_type == Backend.TENSORRT:
|
||||
deploy_cfg = mmcv.Config(
|
||||
|
@ -76,7 +71,8 @@ def test_top_down_heatmap_simple_head_inference_model(backend_type: Backend):
|
|||
model_output, rewrite_output, rtol=1e-03, atol=1e-05)
|
||||
|
||||
|
||||
def get_top_down_heatmap_msmu_head_model():
|
||||
@pytest.fixture
|
||||
def top_down_heatmap_msmu_head_model():
|
||||
|
||||
class DummyMSMUHead(torch.nn.Module):
|
||||
|
||||
|
@ -104,9 +100,10 @@ def get_top_down_heatmap_msmu_head_model():
|
|||
|
||||
@pytest.mark.parametrize('backend_type',
|
||||
[Backend.ONNXRUNTIME, Backend.TENSORRT])
|
||||
def test_top_down_heatmap_msmu_head_inference_model(backend_type: Backend):
|
||||
def test_top_down_heatmap_msmu_head_inference_model(
|
||||
backend_type: Backend, top_down_heatmap_msmu_head_model):
|
||||
check_backend(backend_type, True)
|
||||
model = get_top_down_heatmap_msmu_head_model()
|
||||
model = top_down_heatmap_msmu_head_model
|
||||
model.cpu().eval()
|
||||
if backend_type == Backend.TENSORRT:
|
||||
deploy_cfg = mmcv.Config(
|
||||
|
@ -152,7 +149,8 @@ def test_top_down_heatmap_msmu_head_inference_model(backend_type: Backend):
|
|||
model_output, rewrite_output, rtol=1e-03, atol=1e-05)
|
||||
|
||||
|
||||
def get_cross_resolution_weighting_model():
|
||||
@pytest.fixture
|
||||
def cross_resolution_weighting_model():
|
||||
from mmpose.models.backbones.litehrnet import CrossResolutionWeighting
|
||||
|
||||
class DummyModel(torch.nn.Module):
|
||||
|
@ -171,9 +169,10 @@ def get_cross_resolution_weighting_model():
|
|||
|
||||
|
||||
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME, Backend.NCNN])
|
||||
def test_cross_resolution_weighting_forward(backend_type: Backend):
|
||||
def test_cross_resolution_weighting_forward(backend_type: Backend,
|
||||
cross_resolution_weighting_model):
|
||||
check_backend(backend_type, True)
|
||||
model = get_cross_resolution_weighting_model()
|
||||
model = cross_resolution_weighting_model
|
||||
model.cpu().eval()
|
||||
imgs = torch.rand(1, 16, 16, 16)
|
||||
|
||||
|
@ -210,7 +209,8 @@ def test_cross_resolution_weighting_forward(backend_type: Backend):
|
|||
model_output, rewrite_output, rtol=1e-03, atol=1e-05)
|
||||
|
||||
|
||||
def get_top_down_model():
|
||||
@pytest.fixture
|
||||
def top_down_model():
|
||||
from mmpose.models.detectors.top_down import TopDown
|
||||
model_cfg = dict(
|
||||
type='TopDown',
|
||||
|
@ -237,9 +237,9 @@ def get_top_down_model():
|
|||
|
||||
@pytest.mark.parametrize('backend_type',
|
||||
[Backend.ONNXRUNTIME, Backend.TENSORRT])
|
||||
def test_top_down_forward(backend_type: Backend):
|
||||
def test_top_down_forward(backend_type: Backend, top_down_model):
|
||||
check_backend(backend_type, True)
|
||||
model = get_top_down_model()
|
||||
model = top_down_model
|
||||
model.cpu().eval()
|
||||
if backend_type == Backend.TENSORRT:
|
||||
deploy_cfg = mmcv.Config(
|
||||
|
|
|
@ -1,56 +1,29 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os
|
||||
from tempfile import NamedTemporaryFile, TemporaryDirectory
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.apis import build_task_processor
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase, Task, load_config
|
||||
from mmdeploy.utils import load_config
|
||||
from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMPOSE)
|
||||
except ImportError:
|
||||
pytest.skip(
|
||||
f'{Codebase.MMPOSE.value} is not installed.', allow_module_level=True)
|
||||
|
||||
model_cfg_path = 'tests/test_codebase/test_mmpose/data/model.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(type='mmpose', task='PoseDetection'),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
save_file='end2end.onnx',
|
||||
input_names=['input'],
|
||||
output_names=['output'],
|
||||
input_shape=None)))
|
||||
|
||||
onnx_file = NamedTemporaryFile(suffix='.onnx').name
|
||||
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
img_shape = (192, 256)
|
||||
heatmap_shape = (48, 64)
|
||||
# mmpose.apis.inference.LoadImage uses opencv, needs float32 in
|
||||
# cv2.cvtColor.
|
||||
img = np.random.rand(*img_shape, 3).astype(np.float32)
|
||||
num_output_channels = model_cfg['data_cfg']['num_output_channels']
|
||||
|
||||
|
||||
def test_create_input():
|
||||
deploy_cfg = mmcv.Config(
|
||||
@pytest.fixture(scope='module')
|
||||
def model_cfg():
|
||||
return load_config(model_cfg_path)[0]
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def deploy_cfg():
|
||||
return mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type=Backend.ONNXRUNTIME.value),
|
||||
codebase_config=dict(
|
||||
type=Codebase.MMPOSE.value, task=Task.POSE_DETECTION.value),
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(type='mmpose', task='PoseDetection'),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
|
@ -60,69 +33,87 @@ def test_create_input():
|
|||
input_names=['input'],
|
||||
output_names=['output'],
|
||||
input_shape=None)))
|
||||
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
inputs = task_processor.create_input(img, input_shape=img_shape)
|
||||
assert isinstance(inputs, tuple) and len(inputs) == 2
|
||||
|
||||
|
||||
def test_init_pytorch_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def task_processor(model_cfg, deploy_cfg):
|
||||
return build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
|
||||
|
||||
img_shape = (192, 256)
|
||||
heatmap_shape = (48, 64)
|
||||
|
||||
|
||||
# mmpose.apis.inference.LoadImage uses opencv, needs float32 in
|
||||
# cv2.cvtColor.
|
||||
@pytest.fixture(scope='module')
|
||||
def img():
|
||||
return np.random.rand(*img_shape, 3).astype(np.float32)
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def model_inputs(task_processor, img):
|
||||
return task_processor.create_input(img, input_shape=img_shape)
|
||||
|
||||
|
||||
def test_create_input(model_inputs):
|
||||
assert isinstance(model_inputs, tuple) and len(model_inputs) == 2
|
||||
|
||||
|
||||
def test_init_pytorch_model(task_processor):
|
||||
from mmpose.models.detectors.base import BasePose
|
||||
model = task_processor.init_pytorch_model(None)
|
||||
assert isinstance(model, BasePose)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def backend_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def backend_model(task_processor, model_cfg):
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
wrapper.set(outputs={
|
||||
'output': torch.rand(1, num_output_channels, *heatmap_shape),
|
||||
})
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
num_output_channels = model_cfg['data_cfg']['num_output_channels']
|
||||
wrapper.set(
|
||||
outputs={
|
||||
'output': torch.rand(1, num_output_channels, *heatmap_shape),
|
||||
})
|
||||
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
wrapper.recover()
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
|
||||
def test_init_backend_model(backend_model):
|
||||
assert isinstance(backend_model, torch.nn.Module)
|
||||
|
||||
|
||||
def test_run_inference(backend_model):
|
||||
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
|
||||
def test_run_inference(backend_model, task_processor, model_inputs):
|
||||
input_dict, _ = model_inputs
|
||||
results = task_processor.run_inference(backend_model, input_dict)
|
||||
assert results is not None
|
||||
|
||||
|
||||
def test_visualize(backend_model):
|
||||
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
|
||||
def test_visualize(backend_model, task_processor, model_inputs, img, tmp_path):
|
||||
input_dict, _ = model_inputs
|
||||
results = task_processor.run_inference(backend_model, input_dict)
|
||||
with TemporaryDirectory() as dir:
|
||||
filename = dir + 'tmp.jpg'
|
||||
task_processor.visualize(backend_model, img, results[0], filename, '')
|
||||
assert os.path.exists(filename)
|
||||
filename = str(tmp_path / 'tmp.jpg')
|
||||
task_processor.visualize(backend_model, img, results[0], filename, '')
|
||||
assert os.path.exists(filename)
|
||||
|
||||
|
||||
def test_get_tensor_from_input():
|
||||
def test_get_tensor_from_input(task_processor):
|
||||
input_data = {'img': torch.ones(3, 4, 5)}
|
||||
inputs = task_processor.get_tensor_from_input(input_data)
|
||||
assert torch.equal(inputs, torch.ones(3, 4, 5))
|
||||
|
||||
|
||||
def test_get_partition_cfg():
|
||||
try:
|
||||
def test_get_partition_cfg(task_processor):
|
||||
with pytest.raises(NotImplementedError):
|
||||
_ = task_processor.get_partition_cfg(partition_type='')
|
||||
except NotImplementedError:
|
||||
pass
|
||||
|
||||
|
||||
def test_get_model_name():
|
||||
def test_get_model_name(task_processor):
|
||||
model_name = task_processor.get_model_name()
|
||||
assert isinstance(model_name, str) and model_name is not None
|
||||
|
||||
|
||||
def test_build_dataset_and_dataloader():
|
||||
def test_build_dataset_and_dataloader(task_processor, model_cfg):
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
dataset = task_processor.build_dataset(
|
||||
dataset_cfg=model_cfg, dataset_type='test')
|
||||
|
@ -131,7 +122,7 @@ def test_build_dataset_and_dataloader():
|
|||
assert isinstance(dataloader, DataLoader), 'Failed to build dataloader'
|
||||
|
||||
|
||||
def test_single_gpu_test_and_evaluate():
|
||||
def test_single_gpu_test_and_evaluate(task_processor, model_cfg):
|
||||
from mmcv.parallel import MMDataParallel
|
||||
dataset = task_processor.build_dataset(
|
||||
dataset_cfg=model_cfg, dataset_type='test')
|
||||
|
|
|
@ -1,63 +1,50 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os.path as osp
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase
|
||||
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
|
||||
|
||||
IMAGE_H = 192
|
||||
IMAGE_W = 256
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMPOSE)
|
||||
except ImportError:
|
||||
pytest.skip(
|
||||
f'{Codebase.MMPOSE} is not installed.', allow_module_level=True)
|
||||
|
||||
|
||||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
class TestEnd2EndModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class')
|
||||
def end2end_model(self):
|
||||
# force add backend wrapper regardless of plugins
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
cls.outputs = {
|
||||
'outputs': torch.rand(1, 1, IMAGE_H, IMAGE_W),
|
||||
}
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
deploy_cfg = mmcv.Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['outputs']
|
||||
}})
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
outputs = {
|
||||
'outputs': torch.rand(1, 1, IMAGE_H, IMAGE_W),
|
||||
}
|
||||
wrapper.set(outputs=outputs)
|
||||
deploy_cfg = mmcv.Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['outputs']
|
||||
}})
|
||||
|
||||
from mmdeploy.utils import load_config
|
||||
model_cfg_path = 'tests/test_codebase/test_mmpose/data/model.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
from mmdeploy.codebase.mmpose.deploy.pose_detection_model import \
|
||||
End2EndModel
|
||||
cls.end2end_model = End2EndModel(
|
||||
Backend.ONNXRUNTIME, [''],
|
||||
device='cpu',
|
||||
deploy_cfg=deploy_cfg,
|
||||
model_cfg=model_cfg)
|
||||
from mmdeploy.utils import load_config
|
||||
model_cfg_path = 'tests/test_codebase/test_mmpose/data/model.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
from mmdeploy.codebase.mmpose.deploy.pose_detection_model import \
|
||||
End2EndModel
|
||||
model = End2EndModel(
|
||||
Backend.ONNXRUNTIME, [''],
|
||||
device='cpu',
|
||||
deploy_cfg=deploy_cfg,
|
||||
model_cfg=model_cfg)
|
||||
yield model
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
cls.wrapper.recover()
|
||||
|
||||
def test_forward(self):
|
||||
def test_forward(self, end2end_model):
|
||||
img = torch.rand(1, 3, IMAGE_H, IMAGE_W)
|
||||
img_metas = [{
|
||||
'image_file':
|
||||
|
@ -67,23 +54,23 @@ class TestEnd2EndModel:
|
|||
'location': torch.tensor([0.5, 0.5]),
|
||||
'bbox_score': 0.5
|
||||
}]
|
||||
results = self.end2end_model.forward(img, img_metas)
|
||||
results = end2end_model.forward(img, img_metas)
|
||||
assert results is not None, 'failed to get output using '\
|
||||
'End2EndModel'
|
||||
|
||||
def test_forward_test(self):
|
||||
def test_forward_test(self, end2end_model):
|
||||
imgs = torch.rand(2, 3, IMAGE_H, IMAGE_W)
|
||||
results = self.end2end_model.forward_test(imgs)
|
||||
results = end2end_model.forward_test(imgs)
|
||||
assert isinstance(results[0], np.ndarray)
|
||||
|
||||
def test_show_result(self):
|
||||
def test_show_result(self, end2end_model, tmp_path):
|
||||
input_img = np.zeros([IMAGE_H, IMAGE_W, 3])
|
||||
img_path = NamedTemporaryFile(suffix='.jpg').name
|
||||
img_path = str(tmp_path / 'tmp.jpg')
|
||||
|
||||
pred_bbox = torch.rand(1, 5)
|
||||
pred_keypoint = torch.rand((1, 10, 2))
|
||||
result = [{'bbox': pred_bbox, 'keypoints': pred_keypoint}]
|
||||
self.end2end_model.show_result(
|
||||
end2end_model.show_result(
|
||||
input_img, result, '', show=False, out_file=img_path)
|
||||
assert osp.exists(img_path), 'Fails to create drawn image.'
|
||||
|
||||
|
@ -100,7 +87,6 @@ def test_build_pose_detection_model():
|
|||
codebase_config=dict(type=Codebase.MMPOSE.value)))
|
||||
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import pytest
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase
|
||||
|
||||
|
||||
def pytest_ignore_collect(*args, **kwargs):
|
||||
import importlib
|
||||
return importlib.util.find_spec('mmrotate') is None
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope='package')
|
||||
def import_all_modules():
|
||||
codebase = Codebase.MMROTATE
|
||||
try:
|
||||
import_codebase(codebase)
|
||||
except ImportError:
|
||||
pytest.skip(f'{codebase} is not installed.', allow_module_level=True)
|
|
@ -4,18 +4,11 @@ import numpy as np
|
|||
import pytest
|
||||
import torch
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase
|
||||
from mmdeploy.utils import Backend
|
||||
from mmdeploy.utils.test import (WrapFunction, WrapModel, backend_checker,
|
||||
check_backend, get_onnx_model,
|
||||
get_rewrite_outputs)
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMROTATE)
|
||||
except ImportError:
|
||||
pytest.skip(
|
||||
f'{Codebase.MMROTATE} is not installed.', allow_module_level=True)
|
||||
|
||||
|
||||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
def test_multiclass_nms_rotated():
|
||||
|
|
|
@ -9,18 +9,11 @@ import numpy as np
|
|||
import pytest
|
||||
import torch
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase
|
||||
from mmdeploy.utils import Backend
|
||||
from mmdeploy.utils.config_utils import get_ir_config
|
||||
from mmdeploy.utils.test import (WrapModel, check_backend, get_model_outputs,
|
||||
get_rewrite_outputs)
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMROTATE)
|
||||
except ImportError:
|
||||
pytest.skip(
|
||||
f'{Codebase.MMROTATE} is not installed.', allow_module_level=True)
|
||||
|
||||
|
||||
def seed_everything(seed=1029):
|
||||
random.seed(seed)
|
||||
|
@ -47,7 +40,17 @@ def convert_to_list(rewrite_output: Dict, output_names: List[str]) -> List:
|
|||
return outputs
|
||||
|
||||
|
||||
def get_anchor_head_model():
|
||||
def get_head_inputs(seed, channels, num_inputs):
|
||||
"""Generate inputs for the head."""
|
||||
seed_everything(seed)
|
||||
return [
|
||||
torch.rand(1, channels, pow(2, i), pow(2, i))
|
||||
for i in range(num_inputs, 0, -1)
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def anchor_head():
|
||||
"""AnchorHead Config."""
|
||||
test_cfg = mmcv.Config(
|
||||
dict(
|
||||
|
@ -137,10 +140,10 @@ def get_deploy_cfg(backend_type: Backend, ir_type: str):
|
|||
|
||||
@pytest.mark.parametrize('backend_type, ir_type',
|
||||
[(Backend.ONNXRUNTIME, 'onnx')])
|
||||
def test_base_dense_head_get_bboxes(backend_type: Backend, ir_type: str):
|
||||
def test_base_dense_head_get_bboxes(backend_type: Backend, ir_type: str,
|
||||
anchor_head):
|
||||
"""Test get_bboxes rewrite of base dense head."""
|
||||
check_backend(backend_type)
|
||||
anchor_head = get_anchor_head_model()
|
||||
anchor_head.cpu().eval()
|
||||
s = 128
|
||||
img_metas = [{
|
||||
|
@ -156,12 +159,8 @@ def test_base_dense_head_get_bboxes(backend_type: Backend, ir_type: str):
|
|||
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2).
|
||||
# the bboxes's size: (1, 45, 32, 32), (1, 45, 16, 16),
|
||||
# (1, 45, 8, 8), (1, 45, 4, 4), (1, 45, 2, 2)
|
||||
seed_everything(1234)
|
||||
cls_score = [
|
||||
torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1)
|
||||
]
|
||||
seed_everything(5678)
|
||||
bboxes = [torch.rand(1, 45, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
|
||||
cls_score = get_head_inputs(1234, 36, 5)
|
||||
bboxes = get_head_inputs(5678, 45, 5)
|
||||
|
||||
# to get outputs of pytorch model
|
||||
model_inputs = {
|
||||
|
@ -202,7 +201,8 @@ def test_base_dense_head_get_bboxes(backend_type: Backend, ir_type: str):
|
|||
assert rewrite_outputs is not None
|
||||
|
||||
|
||||
def get_single_roi_extractor():
|
||||
@pytest.fixture
|
||||
def single_roi_extractor():
|
||||
"""SingleRoIExtractor Config."""
|
||||
from mmrotate.models.roi_heads import RotatedSingleRoIExtractor
|
||||
roi_layer = dict(
|
||||
|
@ -216,10 +216,10 @@ def get_single_roi_extractor():
|
|||
|
||||
|
||||
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
|
||||
def test_rotated_single_roi_extractor(backend_type: Backend):
|
||||
def test_rotated_single_roi_extractor(backend_type: Backend,
|
||||
single_roi_extractor):
|
||||
check_backend(backend_type)
|
||||
|
||||
single_roi_extractor = get_single_roi_extractor()
|
||||
output_names = ['roi_feat']
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
|
@ -262,7 +262,8 @@ def test_rotated_single_roi_extractor(backend_type: Backend):
|
|||
model_output, backend_output, rtol=1e-03, atol=1e-05)
|
||||
|
||||
|
||||
def get_oriented_rpn_head_model():
|
||||
@pytest.fixture
|
||||
def oriented_rpn_head_model():
|
||||
"""Oriented RPN Head Config."""
|
||||
test_cfg = mmcv.Config(
|
||||
dict(
|
||||
|
@ -283,9 +284,10 @@ def get_oriented_rpn_head_model():
|
|||
|
||||
|
||||
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
|
||||
def test_get_bboxes_of_oriented_rpn_head(backend_type: Backend):
|
||||
def test_get_bboxes_of_oriented_rpn_head(backend_type: Backend,
|
||||
oriented_rpn_head_model):
|
||||
check_backend(backend_type)
|
||||
head = get_oriented_rpn_head_model()
|
||||
head = oriented_rpn_head_model
|
||||
head.cpu().eval()
|
||||
s = 128
|
||||
img_metas = [{
|
||||
|
@ -312,12 +314,8 @@ def test_get_bboxes_of_oriented_rpn_head(backend_type: Backend):
|
|||
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2).
|
||||
# the bboxes's size: (1, 54, 32, 32), (1, 54, 16, 16),
|
||||
# (1, 54, 8, 8), (1, 54, 4, 4), (1, 54, 2, 2)
|
||||
seed_everything(1234)
|
||||
cls_score = [
|
||||
torch.rand(1, 9, pow(2, i), pow(2, i)) for i in range(5, 0, -1)
|
||||
]
|
||||
seed_everything(5678)
|
||||
bboxes = [torch.rand(1, 54, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
|
||||
cls_score = get_head_inputs(1234, 9, 5)
|
||||
bboxes = get_head_inputs(5678, 54, 5)
|
||||
|
||||
# to get outputs of onnx model after rewrite
|
||||
img_metas[0]['img_shape'] = torch.Tensor([s, s])
|
||||
|
@ -334,7 +332,8 @@ def test_get_bboxes_of_oriented_rpn_head(backend_type: Backend):
|
|||
assert rewrite_outputs is not None
|
||||
|
||||
|
||||
def get_rotated_rpn_head_model():
|
||||
@pytest.fixture
|
||||
def rotated_rpn_head_model():
|
||||
"""Oriented RPN Head Config."""
|
||||
test_cfg = mmcv.Config(
|
||||
dict(
|
||||
|
@ -364,9 +363,10 @@ def get_rotated_rpn_head_model():
|
|||
|
||||
|
||||
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
|
||||
def test_get_bboxes_of_rotated_rpn_head(backend_type: Backend):
|
||||
def test_get_bboxes_of_rotated_rpn_head(backend_type: Backend,
|
||||
rotated_rpn_head_model):
|
||||
check_backend(backend_type)
|
||||
head = get_rotated_rpn_head_model()
|
||||
head = rotated_rpn_head_model
|
||||
head.cpu().eval()
|
||||
s = 128
|
||||
img_metas = [{
|
||||
|
@ -393,12 +393,8 @@ def test_get_bboxes_of_rotated_rpn_head(backend_type: Backend):
|
|||
# (1, 3, 8, 8), (1, 3, 4, 4), (1, 3, 2, 2).
|
||||
# the bboxes's size: (1, 18, 32, 32), (1, 18, 16, 16),
|
||||
# (1, 18, 8, 8), (1, 18, 4, 4), (1, 18, 2, 2)
|
||||
seed_everything(1234)
|
||||
cls_score = [
|
||||
torch.rand(1, 3, pow(2, i), pow(2, i)) for i in range(5, 0, -1)
|
||||
]
|
||||
seed_everything(5678)
|
||||
bboxes = [torch.rand(1, 18, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
|
||||
cls_score = get_head_inputs(1234, 3, 5)
|
||||
bboxes = get_head_inputs(5678, 18, 5)
|
||||
|
||||
# to get outputs of onnx model after rewrite
|
||||
img_metas[0]['img_shape'] = torch.Tensor([s, s])
|
||||
|
@ -468,8 +464,7 @@ def test_rotate_standard_roi_head__simple_test(backend_type: Backend):
|
|||
test_cfg=test_cfg)
|
||||
head.cpu().eval()
|
||||
|
||||
seed_everything(1234)
|
||||
x = [torch.rand(1, 3, pow(2, i), pow(2, i)) for i in range(4, 0, -1)]
|
||||
x = get_head_inputs(1234, 3, 4)
|
||||
proposals = [torch.rand(1, 100, 6), torch.randint(0, 10, (1, 100))]
|
||||
img_metas = [{'img_shape': torch.tensor([224, 224])}]
|
||||
|
||||
|
@ -536,7 +531,7 @@ def test_gv_ratio_roi_head__simple_test(backend_type: Backend):
|
|||
head.cpu().eval()
|
||||
|
||||
seed_everything(1234)
|
||||
x = [torch.rand(1, 3, pow(2, i), pow(2, i)) for i in range(4, 0, -1)]
|
||||
x = get_head_inputs(1234, 3, 4)
|
||||
bboxes = torch.rand(1, 100, 2)
|
||||
bboxes = torch.cat(
|
||||
[bboxes, bboxes + torch.rand(1, 100, 2) + torch.rand(1, 100, 1)],
|
||||
|
@ -554,7 +549,8 @@ def test_gv_ratio_roi_head__simple_test(backend_type: Backend):
|
|||
assert rewrite_outputs is not None
|
||||
|
||||
|
||||
def get_roi_trans_roi_head_model():
|
||||
@pytest.fixture
|
||||
def roi_trans_roi_head_model():
|
||||
"""Oriented RPN Head Config."""
|
||||
angle_version = 'le90'
|
||||
|
||||
|
@ -631,11 +627,12 @@ def get_roi_trans_roi_head_model():
|
|||
return model
|
||||
|
||||
|
||||
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
|
||||
def test_simple_test_of_roi_trans_roi_head(backend_type: Backend):
|
||||
@pytest.mark.parametrize('backend_type', [Backend.TENSORRT])
|
||||
def test_simple_test_of_roi_trans_roi_head(backend_type: Backend,
|
||||
roi_trans_roi_head_model):
|
||||
check_backend(backend_type)
|
||||
|
||||
roi_head = get_roi_trans_roi_head_model()
|
||||
roi_head = roi_trans_roi_head_model
|
||||
roi_head.cpu()
|
||||
|
||||
seed_everything(1234)
|
||||
|
@ -661,7 +658,9 @@ def test_simple_test_of_roi_trans_roi_head(backend_type: Backend):
|
|||
output_names = ['det_bboxes', 'det_labels']
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type=backend_type.value),
|
||||
backend_config=dict(
|
||||
type=backend_type.value,
|
||||
common_config=dict(max_workspace_size=1 << 30)),
|
||||
onnx_config=dict(output_names=output_names, input_shape=None),
|
||||
codebase_config=dict(
|
||||
type='mmrotate',
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os
|
||||
from tempfile import NamedTemporaryFile, TemporaryDirectory
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
|
@ -9,64 +8,74 @@ import torch
|
|||
from torch.utils.data import DataLoader
|
||||
from torch.utils.data.dataset import Dataset
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.apis import build_task_processor
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase, load_config
|
||||
from mmdeploy.utils import load_config
|
||||
from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMROTATE)
|
||||
except ImportError:
|
||||
pytest.skip(
|
||||
f'{Codebase.MMROTATE} is not installed.', allow_module_level=True)
|
||||
|
||||
model_cfg_path = 'tests/test_codebase/test_mmrotate/data/model.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(
|
||||
type='mmrotate',
|
||||
task='RotatedDetection',
|
||||
post_processing=dict(
|
||||
score_threshold=0.05,
|
||||
iou_threshold=0.1,
|
||||
pre_top_k=2000,
|
||||
keep_top_k=2000)),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['input'],
|
||||
output_names=['dets', 'labels'])))
|
||||
onnx_file = NamedTemporaryFile(suffix='.onnx').name
|
||||
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def model_cfg():
|
||||
return load_config(model_cfg_path)[0]
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def deploy_cfg():
|
||||
return mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(
|
||||
type='mmrotate',
|
||||
task='RotatedDetection',
|
||||
post_processing=dict(
|
||||
score_threshold=0.05,
|
||||
iou_threshold=0.1,
|
||||
pre_top_k=2000,
|
||||
keep_top_k=2000)),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['input'],
|
||||
output_names=['dets', 'labels'])))
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def task_processor(model_cfg, deploy_cfg):
|
||||
return build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
|
||||
|
||||
img_shape = (32, 32)
|
||||
img = np.random.rand(*img_shape, 3)
|
||||
|
||||
|
||||
def test_init_pytorch_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def img():
|
||||
return np.random.rand(*img_shape, 3)
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def torch_model(task_processor):
|
||||
return task_processor.init_pytorch_model(None)
|
||||
|
||||
|
||||
def test_init_pytorch_model(torch_model):
|
||||
from mmrotate.models import RotatedBaseDetector
|
||||
model = task_processor.init_pytorch_model(None)
|
||||
assert isinstance(model, RotatedBaseDetector)
|
||||
assert isinstance(torch_model, RotatedBaseDetector)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def backend_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def backend_model(task_processor):
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
wrapper.set(outputs={
|
||||
'dets': torch.rand(1, 10, 6),
|
||||
'labels': torch.rand(1, 10)
|
||||
})
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
wrapper.set(outputs={
|
||||
'dets': torch.rand(1, 10, 6),
|
||||
'labels': torch.rand(1, 10)
|
||||
})
|
||||
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
wrapper.recover()
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
|
||||
def test_init_backend_model(backend_model):
|
||||
|
@ -75,18 +84,22 @@ def test_init_backend_model(backend_model):
|
|||
assert isinstance(backend_model, End2EndModel)
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def model_inputs(task_processor, img):
|
||||
return task_processor.create_input(img, input_shape=img_shape)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('device', ['cpu'])
|
||||
def test_create_input(device):
|
||||
def test_create_input(device, task_processor, model_inputs):
|
||||
original_device = task_processor.device
|
||||
task_processor.device = device
|
||||
inputs = task_processor.create_input(img, input_shape=img_shape)
|
||||
assert len(inputs) == 2
|
||||
assert len(model_inputs) == 2
|
||||
task_processor.device = original_device
|
||||
|
||||
|
||||
def test_run_inference(backend_model):
|
||||
torch_model = task_processor.init_pytorch_model(None)
|
||||
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
|
||||
def test_run_inference(backend_model, task_processor, torch_model,
|
||||
model_inputs):
|
||||
input_dict, _ = model_inputs
|
||||
torch_results = task_processor.run_inference(torch_model, input_dict)
|
||||
backend_results = task_processor.run_inference(backend_model, input_dict)
|
||||
assert torch_results is not None
|
||||
|
@ -94,21 +107,20 @@ def test_run_inference(backend_model):
|
|||
assert len(torch_results[0]) == len(backend_results[0])
|
||||
|
||||
|
||||
def test_visualize(backend_model):
|
||||
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
|
||||
def test_visualize(backend_model, task_processor, model_inputs, img, tmp_path):
|
||||
input_dict, _ = model_inputs
|
||||
results = task_processor.run_inference(backend_model, input_dict)
|
||||
with TemporaryDirectory() as dir:
|
||||
filename = dir + 'tmp.jpg'
|
||||
task_processor.visualize(backend_model, img, results[0], filename, '')
|
||||
assert os.path.exists(filename)
|
||||
filename = str(tmp_path / 'tmp.jpg')
|
||||
task_processor.visualize(backend_model, img, results[0], filename, '')
|
||||
assert os.path.exists(filename)
|
||||
|
||||
|
||||
def test_get_partition_cfg():
|
||||
def test_get_partition_cfg(task_processor):
|
||||
with pytest.raises(NotImplementedError):
|
||||
_ = task_processor.get_partition_cfg(partition_type='')
|
||||
|
||||
|
||||
def test_build_dataset_and_dataloader():
|
||||
def test_build_dataset_and_dataloader(task_processor, model_cfg):
|
||||
dataset = task_processor.build_dataset(
|
||||
dataset_cfg=model_cfg, dataset_type='test')
|
||||
assert isinstance(dataset, Dataset), 'Failed to build dataset'
|
||||
|
@ -116,7 +128,7 @@ def test_build_dataset_and_dataloader():
|
|||
assert isinstance(dataloader, DataLoader), 'Failed to build dataloader'
|
||||
|
||||
|
||||
def test_single_gpu_test_and_evaluate():
|
||||
def test_single_gpu_test_and_evaluate(task_processor, model_cfg, tmp_path):
|
||||
from mmcv.parallel import MMDataParallel
|
||||
|
||||
class DummyDataset(Dataset):
|
||||
|
@ -143,6 +155,6 @@ def test_single_gpu_test_and_evaluate():
|
|||
# Run test
|
||||
outputs = task_processor.single_gpu_test(model, dataloader)
|
||||
assert isinstance(outputs, list)
|
||||
output_file = NamedTemporaryFile(suffix='.pkl').name
|
||||
output_file = str(tmp_path / 'tmp.pkl')
|
||||
task_processor.evaluate_outputs(
|
||||
model_cfg, outputs, dataset, 'bbox', out=output_file, format_only=True)
|
||||
|
|
|
@ -1,65 +1,52 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os.path as osp
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase, load_config
|
||||
from mmdeploy.utils import Backend, load_config
|
||||
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMROTATE)
|
||||
except ImportError:
|
||||
pytest.skip(
|
||||
f'{Codebase.MMROTATE} is not installed.', allow_module_level=True)
|
||||
|
||||
IMAGE_SIZE = 32
|
||||
|
||||
|
||||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
class TestEnd2EndModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class')
|
||||
def end2end_model(self):
|
||||
# force add backend wrapper regardless of plugins
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
cls.outputs = {
|
||||
'dets': torch.rand(1, 10, 6),
|
||||
'labels': torch.rand(1, 10)
|
||||
}
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
deploy_cfg = mmcv.Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['dets', 'labels']
|
||||
}})
|
||||
model_cfg_path = 'tests/test_codebase/test_mmrotate/data/model.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
|
||||
from mmdeploy.codebase.mmrotate.deploy.rotated_detection_model import \
|
||||
End2EndModel
|
||||
cls.end2end_model = End2EndModel(
|
||||
Backend.ONNXRUNTIME, [''], ['' for i in range(15)],
|
||||
device='cpu',
|
||||
deploy_cfg=deploy_cfg,
|
||||
model_cfg=model_cfg)
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
cls.wrapper.recover()
|
||||
# simplify backend inference
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
outputs = {
|
||||
'dets': torch.rand(1, 10, 6),
|
||||
'labels': torch.rand(1, 10)
|
||||
}
|
||||
wrapper.set(outputs=outputs)
|
||||
deploy_cfg = mmcv.Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['dets', 'labels']
|
||||
}})
|
||||
model_cfg_path = 'tests/test_codebase/test_mmrotate/data/model.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
|
||||
model = End2EndModel(
|
||||
Backend.ONNXRUNTIME, [''], ['' for i in range(15)],
|
||||
device='cpu',
|
||||
deploy_cfg=deploy_cfg,
|
||||
model_cfg=model_cfg)
|
||||
yield model
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'ori_shape',
|
||||
[[IMAGE_SIZE, IMAGE_SIZE, 3], [2 * IMAGE_SIZE, 2 * IMAGE_SIZE, 3]])
|
||||
def test_forward(self, ori_shape):
|
||||
def test_forward(self, ori_shape, end2end_model):
|
||||
imgs = [torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE)]
|
||||
img_metas = [[{
|
||||
'ori_shape': ori_shape,
|
||||
|
@ -67,21 +54,21 @@ class TestEnd2EndModel:
|
|||
'scale_factor': [1., 1., 1., 1.],
|
||||
'filename': ''
|
||||
}]]
|
||||
results = self.end2end_model.forward(imgs, img_metas)
|
||||
results = end2end_model.forward(imgs, img_metas)
|
||||
assert results is not None, 'failed to get output using '\
|
||||
'End2EndModel'
|
||||
|
||||
def test_forward_test(self):
|
||||
def test_forward_test(self, end2end_model):
|
||||
imgs = torch.rand(2, 3, IMAGE_SIZE, IMAGE_SIZE)
|
||||
results = self.end2end_model.forward_test(imgs)
|
||||
results = end2end_model.forward_test(imgs)
|
||||
assert isinstance(results[0], torch.Tensor)
|
||||
|
||||
def test_show_result(self):
|
||||
def test_show_result(self, end2end_model, tmp_path):
|
||||
input_img = np.zeros([IMAGE_SIZE, IMAGE_SIZE, 3])
|
||||
img_path = NamedTemporaryFile(suffix='.jpg').name
|
||||
img_path = str(tmp_path / 'tmp.jpg')
|
||||
|
||||
result = torch.rand(1, 10, 6)
|
||||
self.end2end_model.show_result(
|
||||
end2end_model.show_result(
|
||||
input_img, result, '', show=False, out_file=img_path)
|
||||
assert osp.exists(img_path)
|
||||
|
||||
|
@ -97,7 +84,6 @@ def test_build_rotated_detection_model():
|
|||
codebase_config=dict(type='mmrotate')))
|
||||
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import pytest
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase
|
||||
|
||||
|
||||
def pytest_ignore_collect(*args, **kwargs):
|
||||
import importlib
|
||||
return importlib.util.find_spec('mmseg') is None
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope='package')
|
||||
def import_all_modules():
|
||||
codebase = Codebase.MMSEG
|
||||
try:
|
||||
import_codebase(codebase)
|
||||
except ImportError:
|
||||
pytest.skip(f'{codebase} is not installed.', allow_module_level=True)
|
|
@ -5,20 +5,13 @@ import pytest
|
|||
import torch
|
||||
import torch.nn as nn
|
||||
from mmcv import ConfigDict
|
||||
from mmseg.models import BACKBONES, HEADS
|
||||
from mmseg.models.decode_heads.decode_head import BaseDecodeHead
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase, Task
|
||||
from mmdeploy.utils.test import (WrapModel, check_backend, get_model_outputs,
|
||||
get_rewrite_outputs)
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMSEG)
|
||||
except ImportError:
|
||||
pytest.skip(f'{Codebase.MMSEG} is not installed.', allow_module_level=True)
|
||||
|
||||
from mmseg.models import BACKBONES, HEADS
|
||||
from mmseg.models.decode_heads.decode_head import BaseDecodeHead
|
||||
|
||||
|
||||
@BACKBONES.register_module()
|
||||
class ExampleBackbone(nn.Module):
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import copy
|
||||
import os
|
||||
from tempfile import NamedTemporaryFile, TemporaryDirectory
|
||||
from typing import Any
|
||||
|
||||
import mmcv
|
||||
|
@ -10,40 +9,49 @@ import pytest
|
|||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.apis import build_task_processor
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Codebase, load_config
|
||||
from mmdeploy.utils import load_config
|
||||
from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMSEG)
|
||||
except ImportError:
|
||||
pytest.skip(f'{Codebase.MMSEG} is not installed.', allow_module_level=True)
|
||||
|
||||
model_cfg_path = 'tests/test_codebase/test_mmseg/data/model.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(type='mmseg', task='Segmentation'),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['input'],
|
||||
output_names=['output'])))
|
||||
|
||||
onnx_file = NamedTemporaryFile(suffix='.onnx').name
|
||||
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def model_cfg():
|
||||
return load_config(model_cfg_path)[0]
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def deploy_cfg():
|
||||
return mmcv.Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(type='mmseg', task='Segmentation'),
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
input_shape=None,
|
||||
input_names=['input'],
|
||||
output_names=['output'])))
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def task_processor(model_cfg, deploy_cfg):
|
||||
return build_task_processor(model_cfg, deploy_cfg, 'cpu')
|
||||
|
||||
|
||||
img_shape = (32, 32)
|
||||
img = np.random.rand(*img_shape, 3)
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def img():
|
||||
return np.random.rand(*img_shape, 3)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('from_mmrazor', [True, False, '123', 0])
|
||||
def test_init_pytorch_model(from_mmrazor: Any):
|
||||
def test_init_pytorch_model(from_mmrazor: Any, task_processor, deploy_cfg):
|
||||
from mmseg.models.segmentors.base import BaseSegmentor
|
||||
if from_mmrazor is False:
|
||||
_task_processor = task_processor
|
||||
|
@ -72,58 +80,56 @@ def test_init_pytorch_model(from_mmrazor: Any):
|
|||
assert isinstance(model, BaseSegmentor)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def backend_model():
|
||||
@pytest.fixture(scope='module')
|
||||
def backend_model(task_processor):
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
wrapper.set(outputs={
|
||||
'output': torch.rand(1, 1, *img_shape),
|
||||
})
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
wrapper.set(outputs={
|
||||
'output': torch.rand(1, 1, *img_shape),
|
||||
})
|
||||
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
wrapper.recover()
|
||||
yield task_processor.init_backend_model([''])
|
||||
|
||||
|
||||
def test_init_backend_model(backend_model):
|
||||
assert isinstance(backend_model, torch.nn.Module)
|
||||
|
||||
|
||||
def test_create_input():
|
||||
inputs = task_processor.create_input(img, input_shape=img_shape)
|
||||
assert isinstance(inputs, tuple) and len(inputs) == 2
|
||||
@pytest.fixture(scope='module')
|
||||
def model_inputs(task_processor, img):
|
||||
return task_processor.create_input(img, input_shape=img_shape)
|
||||
|
||||
|
||||
def test_run_inference(backend_model):
|
||||
def test_create_input(model_inputs):
|
||||
assert isinstance(model_inputs, tuple) and len(model_inputs) == 2
|
||||
|
||||
|
||||
def test_run_inference(backend_model, task_processor, img):
|
||||
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
|
||||
results = task_processor.run_inference(backend_model, input_dict)
|
||||
assert results is not None
|
||||
|
||||
|
||||
def test_visualize(backend_model):
|
||||
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
|
||||
def test_visualize(backend_model, task_processor, model_inputs, img, tmp_path):
|
||||
input_dict, _ = model_inputs
|
||||
results = task_processor.run_inference(backend_model, input_dict)
|
||||
with TemporaryDirectory() as dir:
|
||||
filename = dir + 'tmp.jpg'
|
||||
task_processor.visualize(backend_model, img, results[0], filename, '')
|
||||
assert os.path.exists(filename)
|
||||
filename = str(tmp_path / 'tmp.jpg')
|
||||
task_processor.visualize(backend_model, img, results[0], filename, '')
|
||||
assert os.path.exists(filename)
|
||||
|
||||
|
||||
def test_get_tensort_from_input():
|
||||
def test_get_tensort_from_input(task_processor):
|
||||
input_data = {'img': [torch.ones(3, 4, 5)]}
|
||||
inputs = task_processor.get_tensor_from_input(input_data)
|
||||
assert torch.equal(inputs, torch.ones(3, 4, 5))
|
||||
|
||||
|
||||
def test_get_partition_cfg():
|
||||
try:
|
||||
def test_get_partition_cfg(task_processor):
|
||||
with pytest.raises(NotImplementedError):
|
||||
_ = task_processor.get_partition_cfg(partition_type='')
|
||||
except NotImplementedError:
|
||||
pass
|
||||
|
||||
|
||||
def test_build_dataset_and_dataloader():
|
||||
def test_build_dataset_and_dataloader(task_processor, model_cfg):
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
dataset = task_processor.build_dataset(
|
||||
dataset_cfg=model_cfg, dataset_type='test')
|
||||
|
@ -132,7 +138,7 @@ def test_build_dataset_and_dataloader():
|
|||
assert isinstance(dataloader, DataLoader), 'Failed to build dataloader'
|
||||
|
||||
|
||||
def test_single_gpu_test_and_evaluate():
|
||||
def test_single_gpu_test_and_evaluate(task_processor, model_cfg):
|
||||
from mmcv.parallel import MMDataParallel
|
||||
|
||||
# Prepare dataloader
|
||||
|
|
|
@ -1,22 +1,14 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os.path as osp
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase
|
||||
from mmdeploy.utils import Backend
|
||||
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
|
||||
|
||||
try:
|
||||
import_codebase(Codebase.MMSEG)
|
||||
except ImportError:
|
||||
pytest.skip(f'{Codebase.MMSEG} is not installed.', allow_module_level=True)
|
||||
|
||||
NUM_CLASS = 19
|
||||
IMAGE_SIZE = 32
|
||||
|
||||
|
@ -24,63 +16,59 @@ IMAGE_SIZE = 32
|
|||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
class TestEnd2EndModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class')
|
||||
def end2end_model(self):
|
||||
# force add backend wrapper regardless of plugins
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
|
||||
cls.outputs = {
|
||||
'outputs': torch.rand(1, 1, IMAGE_SIZE, IMAGE_SIZE),
|
||||
}
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
deploy_cfg = mmcv.Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['outputs']
|
||||
}})
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
outputs = {
|
||||
'outputs': torch.rand(1, 1, IMAGE_SIZE, IMAGE_SIZE),
|
||||
}
|
||||
wrapper.set(outputs=outputs)
|
||||
deploy_cfg = mmcv.Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['outputs']
|
||||
}})
|
||||
|
||||
from mmdeploy.codebase.mmseg.deploy.segmentation_model import \
|
||||
End2EndModel
|
||||
class_names = ['' for i in range(NUM_CLASS)]
|
||||
palette = np.random.randint(0, 255, size=(NUM_CLASS, 3))
|
||||
cls.end2end_model = End2EndModel(
|
||||
Backend.ONNXRUNTIME, [''],
|
||||
device='cpu',
|
||||
class_names=class_names,
|
||||
palette=palette,
|
||||
deploy_cfg=deploy_cfg)
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
cls.wrapper.recover()
|
||||
from mmdeploy.codebase.mmseg.deploy.segmentation_model import \
|
||||
End2EndModel
|
||||
class_names = ['' for i in range(NUM_CLASS)]
|
||||
palette = np.random.randint(0, 255, size=(NUM_CLASS, 3))
|
||||
model = End2EndModel(
|
||||
Backend.ONNXRUNTIME, [''],
|
||||
device='cpu',
|
||||
class_names=class_names,
|
||||
palette=palette,
|
||||
deploy_cfg=deploy_cfg)
|
||||
yield model
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'ori_shape',
|
||||
[[IMAGE_SIZE, IMAGE_SIZE, 3], [2 * IMAGE_SIZE, 2 * IMAGE_SIZE, 3]])
|
||||
def test_forward(self, ori_shape):
|
||||
def test_forward(self, ori_shape, end2end_model):
|
||||
imgs = [torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE)]
|
||||
img_metas = [[{
|
||||
'ori_shape': ori_shape,
|
||||
'img_shape': [IMAGE_SIZE, IMAGE_SIZE, 3],
|
||||
'scale_factor': [1., 1., 1., 1.],
|
||||
}]]
|
||||
results = self.end2end_model.forward(imgs, img_metas)
|
||||
results = end2end_model.forward(imgs, img_metas)
|
||||
assert results is not None, 'failed to get output using '\
|
||||
'End2EndModel'
|
||||
|
||||
def test_forward_test(self):
|
||||
def test_forward_test(self, end2end_model):
|
||||
imgs = torch.rand(2, 3, IMAGE_SIZE, IMAGE_SIZE)
|
||||
results = self.end2end_model.forward_test(imgs)
|
||||
results = end2end_model.forward_test(imgs)
|
||||
assert isinstance(results[0], np.ndarray)
|
||||
|
||||
def test_show_result(self):
|
||||
def test_show_result(self, end2end_model, tmp_path):
|
||||
input_img = np.zeros([IMAGE_SIZE, IMAGE_SIZE, 3])
|
||||
img_path = NamedTemporaryFile(suffix='.jpg').name
|
||||
img_path = str(tmp_path / 'tmp.jpg')
|
||||
|
||||
result = [torch.rand(IMAGE_SIZE, IMAGE_SIZE)]
|
||||
self.end2end_model.show_result(
|
||||
end2end_model.show_result(
|
||||
input_img, result, '', show=False, out_file=img_path)
|
||||
assert osp.exists(img_path), 'Fails to create drawn image.'
|
||||
|
||||
|
@ -88,45 +76,44 @@ class TestEnd2EndModel:
|
|||
@backend_checker(Backend.RKNN)
|
||||
class TestRKNNModel:
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@pytest.fixture(scope='class')
|
||||
def end2end_model(self):
|
||||
# force add backend wrapper regardless of plugins
|
||||
import mmdeploy.backend.rknn as rknn_apis
|
||||
from mmdeploy.backend.rknn import RKNNWrapper
|
||||
rknn_apis.__dict__.update({'RKNNWrapper': RKNNWrapper})
|
||||
from mmdeploy.codebase.mmseg.deploy.segmentation_model import RKNNModel
|
||||
|
||||
# simplify backend inference
|
||||
cls.wrapper = SwitchBackendWrapper(RKNNWrapper)
|
||||
cls.outputs = [torch.rand(1, 19, IMAGE_SIZE, IMAGE_SIZE)]
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
deploy_cfg = mmcv.Config({
|
||||
'onnx_config': {
|
||||
'output_names': ['outputs']
|
||||
},
|
||||
'backend_config': {
|
||||
'common_config': {}
|
||||
}
|
||||
})
|
||||
with SwitchBackendWrapper(RKNNWrapper) as wrapper:
|
||||
outputs = [torch.rand(1, 19, IMAGE_SIZE, IMAGE_SIZE)]
|
||||
wrapper.set(outputs=outputs)
|
||||
deploy_cfg = mmcv.Config({
|
||||
'onnx_config': {
|
||||
'output_names': ['outputs']
|
||||
},
|
||||
'backend_config': {
|
||||
'common_config': {}
|
||||
}
|
||||
})
|
||||
|
||||
from mmdeploy.codebase.mmseg.deploy.segmentation_model import RKNNModel
|
||||
class_names = ['' for i in range(NUM_CLASS)]
|
||||
palette = np.random.randint(0, 255, size=(NUM_CLASS, 3))
|
||||
cls.rknn_model = RKNNModel(
|
||||
Backend.RKNN, [''],
|
||||
device='cpu',
|
||||
class_names=class_names,
|
||||
palette=palette,
|
||||
deploy_cfg=deploy_cfg)
|
||||
class_names = ['' for i in range(NUM_CLASS)]
|
||||
palette = np.random.randint(0, 255, size=(NUM_CLASS, 3))
|
||||
model = RKNNModel(
|
||||
Backend.RKNN, [''],
|
||||
device='cpu',
|
||||
class_names=class_names,
|
||||
palette=palette,
|
||||
deploy_cfg=deploy_cfg)
|
||||
yield model
|
||||
|
||||
def test_forward_test(self):
|
||||
def test_forward_test(self, end2end_model):
|
||||
imgs = torch.rand(2, 3, IMAGE_SIZE, IMAGE_SIZE)
|
||||
results = self.rknn_model.forward_test(imgs)
|
||||
results = end2end_model.forward_test(imgs)
|
||||
assert isinstance(results[0], np.ndarray)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('from_file', [True, False])
|
||||
@pytest.mark.parametrize('data_type', ['train', 'val', 'test'])
|
||||
def test_get_classes_palette_from_config(from_file, data_type):
|
||||
def test_get_classes_palette_from_config(from_file, data_type, tmp_path):
|
||||
from mmseg.datasets import DATASETS
|
||||
|
||||
from mmdeploy.codebase.mmseg.deploy.segmentation_model import \
|
||||
|
@ -145,7 +132,7 @@ def test_get_classes_palette_from_config(from_file, data_type):
|
|||
})
|
||||
|
||||
if from_file:
|
||||
config_path = NamedTemporaryFile(suffix='.py').name
|
||||
config_path = str(tmp_path / 'tmp_cfg.py')
|
||||
with open(config_path, 'w') as file:
|
||||
file.write(data_cfg.pretty_text)
|
||||
data_cfg = config_path
|
||||
|
@ -169,7 +156,6 @@ def test_build_segmentation_model():
|
|||
codebase_config=dict(type='mmseg')))
|
||||
|
||||
from mmdeploy.backend.onnxruntime import ORTWrapper
|
||||
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
|
||||
|
||||
# simplify backend inference
|
||||
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from mmdeploy.core import FUNCTION_REWRITER, RewriterContext
|
||||
|
@ -6,73 +7,89 @@ from mmdeploy.core.rewriters.function_rewriter import FunctionRewriter
|
|||
from mmdeploy.core.rewriters.rewriter_utils import collect_env
|
||||
from mmdeploy.utils.constants import IR, Backend
|
||||
|
||||
try:
|
||||
from torch.testing import assert_close as torch_assert_close
|
||||
except Exception:
|
||||
from torch.testing import assert_allclose as torch_assert_close
|
||||
|
||||
def test_function_rewriter():
|
||||
|
||||
x = torch.tensor([1, 2, 3, 4, 5])
|
||||
y = torch.tensor([2, 4, 6, 8, 10])
|
||||
@pytest.fixture(scope='module')
|
||||
def register_test_rewriter():
|
||||
|
||||
@FUNCTION_REWRITER.register_rewriter(
|
||||
func_name='torch.mul', backend='tensorrt')
|
||||
@FUNCTION_REWRITER.register_rewriter(
|
||||
func_name='torch.add', backend='tensorrt')
|
||||
def sub_func(rewriter, x, y):
|
||||
assert hasattr(rewriter, 'cfg')
|
||||
assert hasattr(rewriter, 'origin_func')
|
||||
def sub_func(ctx, x, y):
|
||||
assert hasattr(ctx, 'cfg')
|
||||
assert hasattr(ctx, 'origin_func')
|
||||
return x - y
|
||||
|
||||
# test different config
|
||||
@FUNCTION_REWRITER.register_rewriter(
|
||||
func_name='torch.Tensor.div', backend='default')
|
||||
def mul_func_class(ctx, x, y):
|
||||
return x * y
|
||||
|
||||
# test origin_func
|
||||
@FUNCTION_REWRITER.register_rewriter(
|
||||
func_name='torch.sub', backend='default')
|
||||
def origin_sub_func(ctx, x, y, **kwargs):
|
||||
return ctx.origin_func(x, y, **kwargs) + 1
|
||||
|
||||
yield
|
||||
|
||||
del FUNCTION_REWRITER._origin_functions[-1]
|
||||
FUNCTION_REWRITER._registry.remove_record(sub_func)
|
||||
FUNCTION_REWRITER._registry.remove_record(mul_func_class)
|
||||
FUNCTION_REWRITER._registry.remove_record(origin_sub_func)
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('register_test_rewriter')
|
||||
def test_function_rewriter():
|
||||
|
||||
x = torch.tensor([1, 2, 3, 4, 5])
|
||||
y = torch.tensor([2, 4, 6, 8, 10])
|
||||
|
||||
cfg = dict()
|
||||
with RewriterContext(cfg, backend='tensorrt'):
|
||||
result = torch.add(x, y)
|
||||
# replace add with sub
|
||||
torch.testing.assert_allclose(result, x - y)
|
||||
torch_assert_close(result, x - y)
|
||||
result = torch.mul(x, y)
|
||||
# replace add with sub
|
||||
torch.testing.assert_allclose(result, x - y)
|
||||
torch_assert_close(result, x - y)
|
||||
|
||||
result = torch.add(x, y)
|
||||
# recovery origin function
|
||||
torch.testing.assert_allclose(result, x + y)
|
||||
torch_assert_close(result, x + y)
|
||||
|
||||
with RewriterContext(cfg):
|
||||
result = torch.add(x, y)
|
||||
# replace should not happen with wrong backend
|
||||
torch.testing.assert_allclose(result, x + y)
|
||||
|
||||
# test different config
|
||||
@FUNCTION_REWRITER.register_rewriter(
|
||||
func_name='torch.Tensor.add', backend='default')
|
||||
def mul_func_class(rewriter, x, y):
|
||||
return x * y
|
||||
torch_assert_close(result, x + y)
|
||||
|
||||
with RewriterContext(cfg, backend='tensorrt'):
|
||||
result = x.add(y)
|
||||
# replace add with multi
|
||||
torch.testing.assert_allclose(result, x * y)
|
||||
result = x.div(y)
|
||||
# replace div with multi
|
||||
torch_assert_close(result, x * y)
|
||||
|
||||
result = x.add(y)
|
||||
# recovery origin function
|
||||
torch.testing.assert_allclose(result, x + y)
|
||||
torch_assert_close(result, x + y)
|
||||
|
||||
with RewriterContext(cfg):
|
||||
result = x.add(y)
|
||||
# replace add with multi
|
||||
torch.testing.assert_allclose(result, x * y)
|
||||
|
||||
# test origin_func
|
||||
@FUNCTION_REWRITER.register_rewriter(
|
||||
func_name='torch.add', backend='default')
|
||||
def origin_add_func(rewriter, x, y, **kwargs):
|
||||
return rewriter.origin_func(x, y, **kwargs) + 1
|
||||
result = x.div(y)
|
||||
# replace div with multi
|
||||
torch_assert_close(result, x * y)
|
||||
|
||||
with RewriterContext(cfg):
|
||||
result = torch.add(x, y)
|
||||
result = torch.sub(x, y)
|
||||
# replace with origin + 1
|
||||
torch.testing.assert_allclose(result, x + y + 1)
|
||||
torch_assert_close(result, x - y + 1)
|
||||
|
||||
# remove torch.add
|
||||
del FUNCTION_REWRITER._origin_functions[-1]
|
||||
torch.testing.assert_allclose(torch.add(x, y), x + y)
|
||||
torch_assert_close(torch.sub(x, y), x - y)
|
||||
|
||||
|
||||
def test_rewrite_empty_function():
|
||||
|
|
|
@ -3,6 +3,11 @@ import torch
|
|||
|
||||
from mmdeploy.core import MODULE_REWRITER, patch_model
|
||||
|
||||
try:
|
||||
from torch.testing import assert_close as torch_assert_close
|
||||
except Exception:
|
||||
from torch.testing import assert_allclose as torch_assert_close
|
||||
|
||||
|
||||
def test_module_rewriter():
|
||||
from torchvision.models.resnet import resnet50
|
||||
|
@ -29,7 +34,7 @@ def test_module_rewriter():
|
|||
rewritten_model = patch_model(model, cfg=cfg, backend='tensorrt')
|
||||
rewritten_bottle_nect = rewritten_model.layer1[0]
|
||||
rewritten_result = rewritten_bottle_nect(x)
|
||||
torch.testing.assert_allclose(rewritten_result, result * 2)
|
||||
torch_assert_close(rewritten_result, result * 2)
|
||||
|
||||
# wrong backend should not be rewritten
|
||||
model = resnet50().eval()
|
||||
|
@ -38,7 +43,7 @@ def test_module_rewriter():
|
|||
rewritten_model = patch_model(model, cfg=cfg)
|
||||
rewritten_bottle_nect = rewritten_model.layer1[0]
|
||||
rewritten_result = rewritten_bottle_nect(x)
|
||||
torch.testing.assert_allclose(rewritten_result, result)
|
||||
torch_assert_close(rewritten_result, result)
|
||||
|
||||
|
||||
def test_pass_redundant_args_to_model():
|
||||
|
|
|
@ -35,8 +35,8 @@ def create_custom_module():
|
|||
del mmdeploy.TestFunc
|
||||
|
||||
|
||||
def test_symbolic_rewriter():
|
||||
test_func = mmdeploy.TestFunc.apply
|
||||
@pytest.fixture(scope='module')
|
||||
def register_custom_rewriter():
|
||||
|
||||
@SYMBOLIC_REWRITER.register_symbolic('mmdeploy.TestFunc', backend='ncnn')
|
||||
@SYMBOLIC_REWRITER.register_symbolic('mmdeploy.TestFunc')
|
||||
|
@ -54,6 +54,17 @@ def test_symbolic_rewriter():
|
|||
def symbolic_cummax(symbolic_wrapper, g, input, dim):
|
||||
return g.op('mmdeploy::cummax_default', input, dim_i=dim, outputs=2)
|
||||
|
||||
yield
|
||||
|
||||
SYMBOLIC_REWRITER._registry.remove_record(symbolic_testfunc_default)
|
||||
SYMBOLIC_REWRITER._registry.remove_record(symbolic_testfunc_tensorrt)
|
||||
SYMBOLIC_REWRITER._registry.remove_record(symbolic_cummax)
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('register_custom_rewriter')
|
||||
def test_symbolic_rewriter():
|
||||
test_func = mmdeploy.TestFunc.apply
|
||||
|
||||
class TestModel(torch.nn.Module):
|
||||
|
||||
def __init__(self):
|
||||
|
@ -99,18 +110,10 @@ def test_symbolic_rewriter():
|
|||
assert nodes[1].domain == 'mmdeploy'
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('register_custom_rewriter')
|
||||
def test_unregister():
|
||||
test_func = mmdeploy.TestFunc.apply
|
||||
|
||||
@SYMBOLIC_REWRITER.register_symbolic('mmdeploy.TestFunc')
|
||||
def symbolic_testfunc_default(symbolic_wrapper, g, x, val):
|
||||
return g.op('mmdeploy::symbolic_testfunc_default', x, val_i=val)
|
||||
|
||||
@SYMBOLIC_REWRITER.register_symbolic(
|
||||
'cummax', is_pytorch=True, arg_descriptors=['v', 'i'])
|
||||
def symbolic_cummax(symbolic_wrapper, g, input, dim):
|
||||
return g.op('mmdeploy::cummax_default', input, dim_i=dim, outputs=2)
|
||||
|
||||
class TestModel(torch.nn.Module):
|
||||
|
||||
def __init__(self):
|
||||
|
|
|
@ -13,6 +13,7 @@ def test_multiheadattention_ncnn():
|
|||
from mmcv.cnn.bricks.transformer import MultiheadAttention
|
||||
embed_dims, num_heads = 12, 2
|
||||
model = MultiheadAttention(embed_dims, num_heads, batch_first=True)
|
||||
model.eval()
|
||||
query = torch.rand(1, 3, embed_dims)
|
||||
|
||||
deploy_cfg = mmcv.Config(
|
||||
|
|
|
@ -9,6 +9,11 @@ from mmdeploy.core import RewriterContext
|
|||
from mmdeploy.utils import Backend
|
||||
from mmdeploy.utils.test import WrapFunction, check_backend
|
||||
|
||||
try:
|
||||
from torch.testing import assert_close as torch_assert_close
|
||||
except Exception:
|
||||
from torch.testing import assert_allclose as torch_assert_close
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'iou_threshold, score_threshold,max_output_boxes_per_class',
|
||||
|
@ -135,4 +140,4 @@ def test_modulated_deform_conv():
|
|||
out = model(x)
|
||||
jit_out = jit_model(x)
|
||||
|
||||
torch.testing.assert_allclose(out, jit_out)
|
||||
torch_assert_close(out, jit_out)
|
||||
|
|
|
@ -1,4 +1 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .utils import TestNCNNExporter, TestOnnxRTExporter, TestTensorRTExporter
|
||||
|
||||
__all__ = ['TestTensorRTExporter', 'TestOnnxRTExporter', 'TestNCNNExporter']
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -5,7 +5,6 @@ import tempfile
|
|||
|
||||
import mmcv
|
||||
import onnx
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
import mmdeploy.apis.tensorrt as trt_apis
|
||||
|
@ -13,8 +12,8 @@ from mmdeploy.utils import Backend
|
|||
from mmdeploy.utils.test import assert_allclose, check_backend
|
||||
|
||||
|
||||
@pytest.mark.skip(reason='This a not test class but a utility class.')
|
||||
class TestOnnxRTExporter:
|
||||
__test__ = False
|
||||
|
||||
def __init__(self):
|
||||
self.backend_name = 'onnxruntime'
|
||||
|
@ -70,8 +69,8 @@ class TestOnnxRTExporter:
|
|||
assert_allclose(model_outputs, onnx_outputs, tolerate_small_mismatch)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason='This a not test class but a utility class.')
|
||||
class TestTensorRTExporter:
|
||||
__test__ = False
|
||||
|
||||
def __init__(self):
|
||||
self.backend_name = 'tensorrt'
|
||||
|
@ -158,8 +157,8 @@ class TestTensorRTExporter:
|
|||
assert_allclose(model_outputs, trt_outputs, tolerate_small_mismatch)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason='This a not test class but a utility class.')
|
||||
class TestNCNNExporter:
|
||||
__test__ = False
|
||||
|
||||
def __init__(self):
|
||||
self.backend_name = 'ncnn'
|
||||
|
|
|
@ -12,11 +12,15 @@ from mmdeploy.utils import Backend
|
|||
from mmdeploy.utils.test import (WrapFunction, backend_checker,
|
||||
get_rewrite_outputs)
|
||||
|
||||
deploy_cfg_ncnn = mmcv.Config(
|
||||
dict(
|
||||
onnx_config=dict(input_shape=None),
|
||||
backend_config=dict(type='ncnn', model_inputs=None, use_vulkan=False),
|
||||
codebase_config=dict(type='mmdet', task='ObjectDetection')))
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def deploy_cfg_ncnn():
|
||||
return mmcv.Config(
|
||||
dict(
|
||||
onnx_config=dict(input_shape=None),
|
||||
backend_config=dict(
|
||||
type='ncnn', model_inputs=None, use_vulkan=False),
|
||||
codebase_config=dict(type='mmdet', task='ObjectDetection')))
|
||||
|
||||
|
||||
def get_trt_config(output_names, shape):
|
||||
|
@ -40,7 +44,7 @@ def get_trt_config(output_names, shape):
|
|||
|
||||
|
||||
@backend_checker(Backend.NCNN)
|
||||
def test_get_attribute():
|
||||
def test_get_attribute(deploy_cfg_ncnn):
|
||||
|
||||
def model_func(tensor):
|
||||
x = tensor.size()
|
||||
|
@ -60,7 +64,7 @@ def test_get_attribute():
|
|||
|
||||
|
||||
@backend_checker(Backend.NCNN)
|
||||
def test_group_norm_ncnn():
|
||||
def test_group_norm_ncnn(deploy_cfg_ncnn):
|
||||
input = torch.rand([1, 2, 2, 2])
|
||||
weight = torch.rand([2])
|
||||
bias = torch.rand([2])
|
||||
|
@ -80,7 +84,7 @@ def test_group_norm_ncnn():
|
|||
|
||||
|
||||
@backend_checker(Backend.NCNN)
|
||||
def test_chunk_ncnn():
|
||||
def test_chunk_ncnn(deploy_cfg_ncnn):
|
||||
input = torch.rand(1, 16, 16, 16)
|
||||
|
||||
model_output = input.chunk(2, dim=1)
|
||||
|
@ -102,7 +106,7 @@ def test_chunk_ncnn():
|
|||
|
||||
|
||||
@backend_checker(Backend.NCNN)
|
||||
def test_interpolate_static():
|
||||
def test_interpolate_static(deploy_cfg_ncnn):
|
||||
input = torch.rand([1, 2, 2, 2])
|
||||
model_output = F.interpolate(input, scale_factor=[2, 2])
|
||||
|
||||
|
@ -144,7 +148,7 @@ def test_interpolate__rknn():
|
|||
|
||||
|
||||
@backend_checker(Backend.NCNN)
|
||||
def test_linear_ncnn():
|
||||
def test_linear_ncnn(deploy_cfg_ncnn):
|
||||
input = torch.rand([1, 2, 2])
|
||||
weight = torch.rand([2, 2])
|
||||
bias = torch.rand([2])
|
||||
|
@ -189,7 +193,7 @@ def test_repeat_static():
|
|||
|
||||
|
||||
@backend_checker(Backend.NCNN)
|
||||
def test_size_of_tensor_static():
|
||||
def test_size_of_tensor_static(deploy_cfg_ncnn):
|
||||
|
||||
def model_func(input):
|
||||
x = torch.Tensor.size(input)
|
||||
|
@ -241,7 +245,7 @@ class TestTopk:
|
|||
@backend_checker(Backend.NCNN)
|
||||
@pytest.mark.parametrize('k', [1, 3, 4])
|
||||
@pytest.mark.parametrize('dim', [1, 2, 3])
|
||||
def test_topk_ncnn(self, dim, k):
|
||||
def test_topk_ncnn(self, dim, k, deploy_cfg_ncnn):
|
||||
|
||||
model_output = torch.Tensor.topk(TestTopk.input, k, dim).values
|
||||
|
||||
|
@ -318,7 +322,7 @@ def test_triu_trt(shape, diagonal):
|
|||
'input',
|
||||
[torch.rand(1, 16, 16), torch.rand(1, 3, 16, 16)])
|
||||
@pytest.mark.parametrize('dim', [1, 2])
|
||||
def test_normalize_ncnn(input, dim):
|
||||
def test_normalize_ncnn(input, dim, deploy_cfg_ncnn):
|
||||
import mmdeploy.apis.ncnn as ncnn_apis
|
||||
from mmdeploy.utils.test import get_onnx_model
|
||||
|
||||
|
|
|
@ -13,39 +13,31 @@ onnx_file = tempfile.NamedTemporaryFile(suffix='onnx').name
|
|||
|
||||
@pytest.fixture(autouse=False, scope='function')
|
||||
def prepare_symbolics():
|
||||
context = RewriterContext(
|
||||
Config(
|
||||
dict(
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
save_file='end2end.onnx',
|
||||
input_names=['input'],
|
||||
output_names=['output'],
|
||||
input_shape=None),
|
||||
backend_config=dict(type='tensorrt'))),
|
||||
'tensorrt',
|
||||
opset=11)
|
||||
context.enter()
|
||||
|
||||
yield
|
||||
|
||||
context.exit()
|
||||
with RewriterContext(
|
||||
Config(
|
||||
dict(
|
||||
onnx_config=dict(
|
||||
type='onnx',
|
||||
export_params=True,
|
||||
keep_initializers_as_inputs=False,
|
||||
opset_version=11,
|
||||
save_file='end2end.onnx',
|
||||
input_names=['input'],
|
||||
output_names=['output'],
|
||||
input_shape=None),
|
||||
backend_config=dict(type='tensorrt'))),
|
||||
'tensorrt',
|
||||
opset=11):
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture(autouse=False, scope='function')
|
||||
def prepare_symbolics_ncnn():
|
||||
context = RewriterContext(
|
||||
Config({'backend_config': {
|
||||
'type': 'ncnn'
|
||||
}}), 'ncnn', opset=11)
|
||||
context.enter()
|
||||
|
||||
yield
|
||||
|
||||
context.exit()
|
||||
with RewriterContext(
|
||||
Config({'backend_config': {
|
||||
'type': 'ncnn'
|
||||
}}), 'ncnn', opset=11):
|
||||
yield
|
||||
|
||||
|
||||
class OpModel(torch.nn.Module):
|
||||
|
@ -116,7 +108,6 @@ def test_instance_norm():
|
|||
class TestLinear:
|
||||
|
||||
def check(self, nodes):
|
||||
print(nodes)
|
||||
exist = False
|
||||
for node in nodes:
|
||||
if node.op_type in ['Gemm', 'MatMul']:
|
||||
|
|
|
@ -417,15 +417,15 @@ def test_AdvancedEnum():
|
|||
|
||||
@pytest.mark.skipif(
|
||||
not importlib.util.find_spec('mmedit'), reason='requires mmedit')
|
||||
def test_export_info():
|
||||
with tempfile.TemporaryDirectory() as dir:
|
||||
export2SDK(correct_deploy_cfg, correct_model_cfg, dir, '', 'cpu')
|
||||
deploy_json = os.path.join(dir, 'deploy.json')
|
||||
pipeline_json = os.path.join(dir, 'pipeline.json')
|
||||
detail_json = os.path.join(dir, 'detail.json')
|
||||
assert os.path.exists(pipeline_json)
|
||||
assert os.path.exists(detail_json)
|
||||
assert os.path.exists(deploy_json)
|
||||
def test_export_info(tmp_path):
|
||||
dir = str(tmp_path)
|
||||
export2SDK(correct_deploy_cfg, correct_model_cfg, dir, '', 'cpu')
|
||||
deploy_json = os.path.join(dir, 'deploy.json')
|
||||
pipeline_json = os.path.join(dir, 'pipeline.json')
|
||||
detail_json = os.path.join(dir, 'detail.json')
|
||||
assert os.path.exists(pipeline_json)
|
||||
assert os.path.exists(detail_json)
|
||||
assert os.path.exists(deploy_json)
|
||||
|
||||
|
||||
def wrap_target():
|
||||
|
|
Loading…
Reference in New Issue