diff --git a/csrc/mmdeploy/backend_ops/tensorrt/roi_align/trt_roi_align.cpp b/csrc/mmdeploy/backend_ops/tensorrt/roi_align/trt_roi_align.cpp index 4f1221f2c..5bf920bc1 100644 --- a/csrc/mmdeploy/backend_ops/tensorrt/roi_align/trt_roi_align.cpp +++ b/csrc/mmdeploy/backend_ops/tensorrt/roi_align/trt_roi_align.cpp @@ -202,13 +202,13 @@ nvinfer1::IPluginV2 *TRTRoIAlignCreator::createPlugin( if (field_name.compare("mode") == 0) { int data_size = fc->fields[i].length; const char *data_start = static_cast(fc->fields[i].data); - std::string poolModeStr(data_start, data_size); - if (poolModeStr == "avg") { + std::string pool_mode_str(data_start); + if (pool_mode_str == "avg") { poolMode = 1; - } else if (poolModeStr == "max") { + } else if (pool_mode_str == "max") { poolMode = 0; } else { - std::cout << "Unknown pool mode \"" << poolModeStr << "\"." << std::endl; + std::cout << "Unknown pool mode \"" << pool_mode_str << "\"." << std::endl; } ASSERT(poolMode >= 0); } diff --git a/mmdeploy/codebase/mmcls/models/utils/attention.py b/mmdeploy/codebase/mmcls/models/utils/attention.py index 0d8a9234a..b4262cdc0 100644 --- a/mmdeploy/codebase/mmcls/models/utils/attention.py +++ b/mmdeploy/codebase/mmcls/models/utils/attention.py @@ -143,7 +143,6 @@ def shift_window_msa__forward__default(ctx, self, query, hw_shape): 'mmcls.models.utils.ShiftWindowMSA.get_attn_mask', extra_checkers=LibVersionChecker('mmcls', min_version='0.21.0')) def shift_window_msa__get_attn_mask__default(ctx, - self, hw_shape, window_size, shift_size, diff --git a/mmdeploy/codebase/mmdet3d/core/bbox/fcos3d_bbox_coder.py b/mmdeploy/codebase/mmdet3d/core/bbox/fcos3d_bbox_coder.py index 8c5b92a50..f9c7a40b0 100644 --- a/mmdeploy/codebase/mmdet3d/core/bbox/fcos3d_bbox_coder.py +++ b/mmdeploy/codebase/mmdet3d/core/bbox/fcos3d_bbox_coder.py @@ -8,7 +8,7 @@ from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( 'mmdet3d.core.bbox.coders.fcos3d_bbox_coder.FCOS3DBBoxCoder.decode_yaw') -def decode_yaw(ctx, self, bbox, centers2d, dir_cls, dir_offset, cam2img): +def decode_yaw(ctx, bbox, centers2d, dir_cls, dir_offset, cam2img): """Decode yaw angle and change it from local to global.i. Rewrite this func to use slice instead of the original operation. Args: diff --git a/mmdeploy/core/rewriters/function_rewriter.py b/mmdeploy/core/rewriters/function_rewriter.py index b623476f3..abb63fad3 100644 --- a/mmdeploy/core/rewriters/function_rewriter.py +++ b/mmdeploy/core/rewriters/function_rewriter.py @@ -1,4 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. +import inspect from typing import (Any, Callable, Dict, List, MutableSequence, Optional, Tuple, Union) @@ -72,7 +73,16 @@ def _set_func(origin_func_path: str, rewrite_func, ignore_refs=ignore_refs, ignore_keys=ignore_keys) - exec(f'{origin_func_path} = rewrite_func') + + is_static_method = False + if method_class: + origin_type = inspect.getattr_static(module_or_class, split_path[-1]) + is_static_method = isinstance(origin_type, staticmethod) + + if is_static_method: + exec(f'{origin_func_path} = staticmethod(rewrite_func)') + else: + exec(f'{origin_func_path} = rewrite_func') def _del_func(path: str): diff --git a/mmdeploy/core/rewriters/rewriter_utils.py b/mmdeploy/core/rewriters/rewriter_utils.py index a7c247ced..f58836a3d 100644 --- a/mmdeploy/core/rewriters/rewriter_utils.py +++ b/mmdeploy/core/rewriters/rewriter_utils.py @@ -326,6 +326,29 @@ class RewriterRegistry: return decorator + def remove_record(self, object: Any, filter_cb: Optional[Callable] = None): + """Remove record. + + Args: + object (Any): The object to remove. + filter_cb (Callable): Check if the object need to be remove. + Defaults to None. + """ + key_to_pop = [] + for key, records in self._rewrite_records.items(): + for rec in records: + if rec['_object'] == object: + if filter_cb is not None: + if filter_cb(rec): + continue + key_to_pop.append((key, rec)) + + for key, rec in key_to_pop: + records = self._rewrite_records[key] + records.remove(rec) + if len(records) == 0: + self._rewrite_records.pop(key) + class ContextCaller: """A callable object used in RewriteContext. diff --git a/mmdeploy/utils/test.py b/mmdeploy/utils/test.py index 67cdb0641..87a0d84d4 100644 --- a/mmdeploy/utils/test.py +++ b/mmdeploy/utils/test.py @@ -17,6 +17,11 @@ from mmdeploy.core import RewriterContext, patch_model from mmdeploy.utils import (IR, Backend, get_backend, get_dynamic_axes, get_ir_config, get_onnx_config) +try: + from torch.testing import assert_close as torch_assert_close +except Exception: + from torch.testing import assert_allclose as torch_assert_close + def backend_checker(backend: Backend, require_plugin: bool = False): """A decorator which checks if a backend is available. @@ -189,12 +194,6 @@ class SwitchBackendWrapper: self._recover_class = recover_class def __enter__(self): - return self - - def __exit__(self, type, value, trace): - self.recover() - - def set(self, **kwargs): """Replace attributes in backend wrappers with dummy items.""" obj = self._recover_class self.init = obj.__init__ @@ -203,10 +202,9 @@ class SwitchBackendWrapper: obj.__init__ = SwitchBackendWrapper.BackendWrapper.__init__ obj.forward = SwitchBackendWrapper.BackendWrapper.forward obj.__call__ = SwitchBackendWrapper.BackendWrapper.__call__ - for k, v in kwargs.items(): - setattr(obj, k, v) + return self - def recover(self): + def __exit__(self, type, value, trace): """Recover to original class.""" assert self.init is not None and \ self.forward is not None,\ @@ -216,6 +214,11 @@ class SwitchBackendWrapper: obj.forward = self.forward obj.__call__ = self.call + def set(self, **kwargs): + obj = self._recover_class + for k, v in kwargs.items(): + setattr(obj, k, v) + def assert_allclose(expected: List[Union[torch.Tensor, np.ndarray]], actual: List[Union[torch.Tensor, np.ndarray]], @@ -239,8 +242,7 @@ def assert_allclose(expected: List[Union[torch.Tensor, np.ndarray]], if isinstance(actual[i], (list, np.ndarray)): actual[i] = torch.tensor(actual[i]) try: - torch.testing.assert_allclose( - actual[i], expected[i], rtol=1e-03, atol=1e-05) + torch_assert_close(actual[i], expected[i], rtol=1e-03, atol=1e-05) except AssertionError as error: if tolerate_small_mismatch: assert '(0.00%)' in str(error), str(error) @@ -417,6 +419,19 @@ def get_backend_outputs(ir_file_path: str, if backend == Backend.TENSORRT: device = 'cuda' model_inputs = dict((k, v.cuda()) for k, v in model_inputs.items()) + input_shapes = dict( + (k, dict(min_shape=v.shape, max_shape=v.shape, opt_shape=v.shape)) + for k, v in model_inputs.items()) + model_inputs_cfg = deploy_cfg['backend_config'].get( + 'model_inputs', [dict(input_shapes=input_shapes)]) + if len(model_inputs_cfg) < 1: + model_inputs_cfg = [dict(input_shapes=input_shapes)] + + if 'input_shapes' not in model_inputs_cfg[0]: + model_inputs_cfg[0]['input_shapes'] = input_shapes + + deploy_cfg['backend_config']['model_inputs'] = model_inputs_cfg + elif backend == Backend.OPENVINO: input_info = { name: value.shape diff --git a/tests/test_apis/test_calibration.py b/tests/test_apis/test_calibration.py index 1493cd9e1..5771466c8 100644 --- a/tests/test_apis/test_calibration.py +++ b/tests/test_apis/test_calibration.py @@ -4,6 +4,7 @@ import tempfile from multiprocessing import Process import mmcv +import pytest from mmdeploy.apis import create_calib_input_data @@ -11,7 +12,8 @@ calib_file = tempfile.NamedTemporaryFile(suffix='.h5').name ann_file = 'tests/data/annotation.json' -def get_end2end_deploy_cfg(): +@pytest.fixture +def deploy_cfg(): deploy_cfg = mmcv.Config( dict( onnx_config=dict( @@ -53,14 +55,15 @@ def get_end2end_deploy_cfg(): return deploy_cfg -def get_partition_deploy_cfg(): - deploy_cfg = get_end2end_deploy_cfg() +@pytest.fixture +def partition_deploy_cfg(deploy_cfg): deploy_cfg._cfg_dict['partition_config'] = dict( type='two_stage', apply_marks=True) return deploy_cfg -def get_model_cfg(): +@pytest.fixture +def model_cfg(): dataset_type = 'CustomDataset' data_root = 'tests/data/' img_norm_cfg = dict( @@ -169,10 +172,8 @@ def get_model_cfg(): return model_cfg -def run_test_create_calib_end2end(): +def run_test_create_calib_end2end(deploy_cfg, model_cfg): import h5py - model_cfg = get_model_cfg() - deploy_cfg = get_end2end_deploy_cfg() create_calib_input_data( calib_file, deploy_cfg, @@ -194,18 +195,19 @@ def run_test_create_calib_end2end(): # new process. -def test_create_calib_end2end(): - p = Process(target=run_test_create_calib_end2end) +def test_create_calib_end2end(deploy_cfg, model_cfg): + p = Process( + target=run_test_create_calib_end2end, + kwargs=dict(deploy_cfg=deploy_cfg, model_cfg=model_cfg)) try: p.start() finally: p.join() -def run_test_create_calib_parittion(): +def run_test_create_calib_parittion(partition_deploy_cfg, model_cfg): import h5py - model_cfg = get_model_cfg() - deploy_cfg = get_partition_deploy_cfg() + deploy_cfg = partition_deploy_cfg create_calib_input_data( calib_file, deploy_cfg, @@ -227,8 +229,11 @@ def run_test_create_calib_parittion(): assert calib_data[partition_name][input_names[i]]['0'] is not None -def test_create_calib_parittion(): - p = Process(target=run_test_create_calib_parittion) +def test_create_calib_parittion(partition_deploy_cfg, model_cfg): + p = Process( + target=run_test_create_calib_parittion, + kwargs=dict( + partition_deploy_cfg=partition_deploy_cfg, model_cfg=model_cfg)) try: p.start() finally: diff --git a/tests/test_backend/test_wrapper.py b/tests/test_backend/test_wrapper.py index 6bcf2580c..8bc8214c6 100644 --- a/tests/test_backend/test_wrapper.py +++ b/tests/test_backend/test_wrapper.py @@ -208,6 +208,7 @@ def run_wrapper(backend, wrapper, input): ALL_BACKEND = list(Backend) ALL_BACKEND.remove(Backend.DEFAULT) ALL_BACKEND.remove(Backend.PYTORCH) +ALL_BACKEND.remove(Backend.SNPE) ALL_BACKEND.remove(Backend.SDK) diff --git a/tests/test_codebase/test_mmcls/conftest.py b/tests/test_codebase/test_mmcls/conftest.py new file mode 100644 index 000000000..cf97caa92 --- /dev/null +++ b/tests/test_codebase/test_mmcls/conftest.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest + +from mmdeploy.codebase import import_codebase +from mmdeploy.utils import Codebase + + +def pytest_ignore_collect(*args, **kwargs): + import importlib + return importlib.util.find_spec('mmcls') is None + + +@pytest.fixture(autouse=True, scope='package') +def import_all_modules(): + codebase = Codebase.MMCLS + try: + import_codebase(codebase) + except ImportError: + pytest.skip(f'{codebase} is not installed.', allow_module_level=True) diff --git a/tests/test_codebase/test_mmcls/test_classification.py b/tests/test_codebase/test_mmcls/test_classification.py index 599a8da1d..fad79e96c 100644 --- a/tests/test_codebase/test_mmcls/test_classification.py +++ b/tests/test_codebase/test_mmcls/test_classification.py @@ -1,7 +1,6 @@ # Copyright (c) OpenMMLab. All rights reserved. import copy import os -from tempfile import NamedTemporaryFile, TemporaryDirectory from typing import Any import mmcv @@ -9,41 +8,50 @@ import numpy as np import pytest import torch -import mmdeploy.backend.onnxruntime as ort_apis from mmdeploy.apis import build_task_processor -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Codebase, load_config +from mmdeploy.utils import load_config from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper -try: - import_codebase(Codebase.MMCLS) -except ImportError: - pytest.skip(f'{Codebase.MMCLS} is not installed.', allow_module_level=True) - model_cfg_path = 'tests/test_codebase/test_mmcls/data/model.py' -model_cfg = load_config(model_cfg_path)[0] -deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type='onnxruntime'), - codebase_config=dict(type='mmcls', task='Classification'), - onnx_config=dict( - type='onnx', - export_params=True, - keep_initializers_as_inputs=False, - opset_version=11, - input_shape=None, - input_names=['input'], - output_names=['output']))) -onnx_file = NamedTemporaryFile(suffix='.onnx').name -task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') + +@pytest.fixture(scope='module') +def model_cfg(): + return load_config(model_cfg_path)[0] + + +@pytest.fixture(scope='module') +def deploy_cfg(): + return mmcv.Config( + dict( + backend_config=dict(type='onnxruntime'), + codebase_config=dict(type='mmcls', task='Classification'), + onnx_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + input_shape=None, + input_names=['input'], + output_names=['output']))) + + img_shape = (64, 64) num_classes = 1000 -img = np.random.rand(*img_shape, 3) + + +@pytest.fixture(scope='module') +def task_processor(model_cfg, deploy_cfg): + return build_task_processor(model_cfg, deploy_cfg, 'cpu') + + +@pytest.fixture(scope='module') +def img(): + return np.random.rand(*img_shape, 3) @pytest.mark.parametrize('from_mmrazor', [True, False, '123', 0]) -def test_init_pytorch_model(from_mmrazor: Any): +def test_init_pytorch_model(from_mmrazor: Any, task_processor, deploy_cfg): from mmcls.models.classifiers.base import BaseClassifier if from_mmrazor is False: _task_processor = task_processor @@ -73,58 +81,57 @@ def test_init_pytorch_model(from_mmrazor: Any): assert isinstance(model, BaseClassifier) -@pytest.fixture -def backend_model(): +@pytest.fixture(scope='module') +def backend_model(task_processor): from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) - wrapper = SwitchBackendWrapper(ORTWrapper) - wrapper.set(outputs={ - 'output': torch.rand(1, num_classes), - }) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + wrapper.set(outputs={ + 'output': torch.rand(1, num_classes), + }) - yield task_processor.init_backend_model(['']) - - wrapper.recover() + yield task_processor.init_backend_model(['']) def test_init_backend_model(backend_model): assert isinstance(backend_model, torch.nn.Module) -def test_create_input(): - inputs = task_processor.create_input(img, input_shape=img_shape) +@pytest.fixture(scope='module') +def model_inputs(task_processor, img): + return task_processor.create_input(img, input_shape=img_shape) + + +def test_create_input(model_inputs): + inputs = model_inputs assert isinstance(inputs, tuple) and len(inputs) == 2 -def test_run_inference(backend_model): - input_dict, _ = task_processor.create_input(img, input_shape=img_shape) +def test_run_inference(task_processor, backend_model, model_inputs): + input_dict, _ = model_inputs results = task_processor.run_inference(backend_model, input_dict) assert results is not None -def test_visualize(backend_model): - input_dict, _ = task_processor.create_input(img, input_shape=img_shape) +def test_visualize(task_processor, backend_model, tmp_path, img, model_inputs): + input_dict, _ = model_inputs results = task_processor.run_inference(backend_model, input_dict) - with TemporaryDirectory() as dir: - filename = dir + 'tmp.jpg' - task_processor.visualize(backend_model, img, results[0], filename, '') - assert os.path.exists(filename) + filename = str(tmp_path / 'tmp.jpg') + task_processor.visualize(backend_model, img, results[0], filename, '') + assert os.path.exists(filename) -def test_get_tensor_from_input(): +def test_get_tensor_from_input(task_processor): input_data = {'img': torch.ones(3, 4, 5)} inputs = task_processor.get_tensor_from_input(input_data) assert torch.equal(inputs, torch.ones(3, 4, 5)) -def test_get_partition_cfg(): - try: - _ = task_processor.get_partition_cfg(partition_type='') - except NotImplementedError: - pass +def test_get_partition_cfg(task_processor): + with pytest.raises(NotImplementedError): + task_processor.get_partition_cfg(partition_type='') -def test_build_dataset_and_dataloader(): +def test_build_dataset_and_dataloader(task_processor, model_cfg): from torch.utils.data import DataLoader, Dataset dataset = task_processor.build_dataset( dataset_cfg=model_cfg, dataset_type='test') @@ -133,7 +140,7 @@ def test_build_dataset_and_dataloader(): assert isinstance(dataloader, DataLoader), 'Failed to build dataloader' -def test_single_gpu_test_and_evaluate(): +def test_single_gpu_test_and_evaluate(task_processor, model_cfg): from mmcv.parallel import MMDataParallel dataset = task_processor.build_dataset( dataset_cfg=model_cfg, dataset_type='test') diff --git a/tests/test_codebase/test_mmcls/test_classification_model.py b/tests/test_codebase/test_mmcls/test_classification_model.py index 07a207423..4ca7a9bc5 100644 --- a/tests/test_codebase/test_mmcls/test_classification_model.py +++ b/tests/test_codebase/test_mmcls/test_classification_model.py @@ -1,78 +1,66 @@ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp -from tempfile import NamedTemporaryFile import mmcv import numpy as np import pytest import torch -import mmdeploy.backend.onnxruntime as ort_apis -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Backend, Codebase +from mmdeploy.utils import Backend from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker NUM_CLASS = 1000 IMAGE_SIZE = 64 -try: - import_codebase(Codebase.MMCLS) -except ImportError: - pytest.skip(f'{Codebase.MMCLS} is not installed.', allow_module_level=True) - @backend_checker(Backend.ONNXRUNTIME) class TestEnd2EndModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class') + def end2end_model(self): # force add backend wrapper regardless of plugins from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference - cls.wrapper = SwitchBackendWrapper(ORTWrapper) - cls.outputs = { - 'outputs': torch.rand(1, 1, IMAGE_SIZE, IMAGE_SIZE), - } - cls.wrapper.set(outputs=cls.outputs) - deploy_cfg = mmcv.Config( - {'onnx_config': { - 'output_names': ['outputs'] - }}) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + outputs = { + 'outputs': torch.rand(1, 1, IMAGE_SIZE, IMAGE_SIZE), + } + wrapper.set(outputs=outputs) + deploy_cfg = mmcv.Config( + {'onnx_config': { + 'output_names': ['outputs'] + }}) - from mmdeploy.codebase.mmcls.deploy.classification_model import \ - End2EndModel - class_names = ['' for i in range(NUM_CLASS)] - cls.end2end_model = End2EndModel( - Backend.ONNXRUNTIME, [''], - device='cpu', - class_names=class_names, - deploy_cfg=deploy_cfg) + from mmdeploy.codebase.mmcls.deploy.classification_model import \ + End2EndModel + class_names = ['' for i in range(NUM_CLASS)] + model = End2EndModel( + Backend.ONNXRUNTIME, [''], + device='cpu', + class_names=class_names, + deploy_cfg=deploy_cfg) + yield model - @classmethod - def teardown_class(cls): - cls.wrapper.recover() - - def test_forward(self): + def test_forward(self, end2end_model): imgs = [torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE)] - results = self.end2end_model.forward(imgs) + results = end2end_model.forward(imgs) assert results is not None, 'failed to get output using '\ 'End2EndModel' - def test_forward_test(self): + def test_forward_test(self, end2end_model): imgs = torch.rand(2, 3, IMAGE_SIZE, IMAGE_SIZE) - results = self.end2end_model.forward_test(imgs) + results = end2end_model.forward_test(imgs) assert isinstance(results[0], np.ndarray) - def test_show_result(self): + def test_show_result(self, end2end_model, tmp_path): input_img = np.zeros([IMAGE_SIZE, IMAGE_SIZE, 3]) - img_path = NamedTemporaryFile(suffix='.jpg').name + img_path = str(tmp_path / 'tmp.jpg') pred_label = torch.randint(0, NUM_CLASS, (1, )) pred_score = torch.rand((1, )) result = {'pred_label': pred_label, 'pred_score': float(pred_score)} - self.end2end_model.show_result( + end2end_model.show_result( input_img, result, '', show=False, out_file=img_path) assert osp.exists(img_path), 'Fails to create drawn image.' @@ -80,44 +68,43 @@ class TestEnd2EndModel: @backend_checker(Backend.RKNN) class TestRKNNEnd2EndModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class') + def end2end_model(self): # force add backend wrapper regardless of plugins - import mmdeploy.backend.rknn as rknn_apis from mmdeploy.backend.rknn import RKNNWrapper - rknn_apis.__dict__.update({'RKNNWrapper': RKNNWrapper}) # simplify backend inference - cls.wrapper = SwitchBackendWrapper(RKNNWrapper) - cls.outputs = [torch.rand(1, 1, IMAGE_SIZE, IMAGE_SIZE)] - cls.wrapper.set(outputs=cls.outputs) - deploy_cfg = mmcv.Config({ - 'onnx_config': { - 'output_names': ['outputs'] - }, - 'backend_config': { - 'common_config': {} - } - }) + with SwitchBackendWrapper(RKNNWrapper) as wrapper: + outputs = [torch.rand(1, 1, IMAGE_SIZE, IMAGE_SIZE)] + wrapper.set(outputs=outputs) + deploy_cfg = mmcv.Config({ + 'onnx_config': { + 'output_names': ['outputs'] + }, + 'backend_config': { + 'common_config': {} + } + }) - from mmdeploy.codebase.mmcls.deploy.classification_model import \ - RKNNEnd2EndModel - class_names = ['' for i in range(NUM_CLASS)] - cls.end2end_model = RKNNEnd2EndModel( - Backend.RKNN, [''], - device='cpu', - class_names=class_names, - deploy_cfg=deploy_cfg) + from mmdeploy.codebase.mmcls.deploy.classification_model import \ + RKNNEnd2EndModel + class_names = ['' for i in range(NUM_CLASS)] + model = RKNNEnd2EndModel( + Backend.RKNN, [''], + device='cpu', + class_names=class_names, + deploy_cfg=deploy_cfg) + yield model - def test_forward_test(self): + def test_forward_test(self, end2end_model): imgs = torch.rand(2, 3, IMAGE_SIZE, IMAGE_SIZE) - results = self.end2end_model.forward_test(imgs) + results = end2end_model.forward_test(imgs) assert isinstance(results[0], np.ndarray) @pytest.mark.parametrize('from_file', [True, False]) @pytest.mark.parametrize('data_type', ['train', 'val', 'test']) -def test_get_classes_from_config(from_file, data_type): +def test_get_classes_from_config(from_file, data_type, tmp_path): from mmcls.datasets import DATASETS from mmdeploy.codebase.mmcls.deploy.classification_model import \ @@ -136,7 +123,7 @@ def test_get_classes_from_config(from_file, data_type): }) if from_file: - config_path = NamedTemporaryFile(suffix='.py').name + config_path = str(tmp_path / 'tmp.py') with open(config_path, 'w') as file: file.write(data_cfg.pretty_text) data_cfg = config_path @@ -157,7 +144,6 @@ def test_build_classification_model(): codebase_config=dict(type='mmcls'))) from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference with SwitchBackendWrapper(ORTWrapper) as wrapper: diff --git a/tests/test_codebase/test_mmcls/test_mmcls_models.py b/tests/test_codebase/test_mmcls/test_mmcls_models.py index caac79e37..294b87d6d 100644 --- a/tests/test_codebase/test_mmcls/test_mmcls_models.py +++ b/tests/test_codebase/test_mmcls/test_mmcls_models.py @@ -4,28 +4,25 @@ import numpy as np import pytest import torch -from mmdeploy.codebase import import_codebase from mmdeploy.core import RewriterContext -from mmdeploy.utils import Backend, Codebase +from mmdeploy.utils import Backend from mmdeploy.utils.test import WrapModel, check_backend, get_rewrite_outputs -try: - import_codebase(Codebase.MMCLS) -except ImportError: - pytest.skip(f'{Codebase.MMCLS} is not installed.', allow_module_level=True) - input = torch.rand(1) -def get_invertedresidual_model(): +@pytest.fixture(scope='module') +def invertedresidual_model(): from mmcls.models.backbones.shufflenet_v2 import InvertedResidual model = InvertedResidual(16, 16) model.requires_grad_(False) + model.eval() return model -def get_vit_model(): +@pytest.fixture(scope='module') +def vit_model(): from mmcls.models.classifiers.image import ImageClassifier model = ImageClassifier( backbone={ @@ -58,6 +55,7 @@ def get_vit_model(): }, ) model.requires_grad_(False) + model.eval() return model @@ -115,10 +113,11 @@ def test_multilabel_cls_head(): @pytest.mark.parametrize( 'backend_type', [Backend.ONNXRUNTIME, Backend.TENSORRT, Backend.NCNN, Backend.OPENVINO]) -def test_shufflenetv2_backbone__forward(backend_type: Backend): +def test_shufflenetv2_backbone__forward(backend_type: Backend, + invertedresidual_model): check_backend(backend_type, True) - model = get_invertedresidual_model() + model = invertedresidual_model model.cpu().eval() if backend_type.value == 'tensorrt': deploy_cfg = mmcv.Config( @@ -163,11 +162,11 @@ def test_shufflenetv2_backbone__forward(backend_type: Backend): @pytest.mark.parametrize('backend_type', [Backend.NCNN]) -def test_vision_transformer_backbone__forward(backend_type: Backend): +def test_vision_transformer_backbone__forward(backend_type: Backend, + vit_model): check_backend(backend_type, True) - model = get_vit_model() - model.eval() + model = vit_model.eval() deploy_cfg = mmcv.Config( dict( diff --git a/tests/test_codebase/test_mmdet/conftest.py b/tests/test_codebase/test_mmdet/conftest.py new file mode 100644 index 000000000..d5eda3c46 --- /dev/null +++ b/tests/test_codebase/test_mmdet/conftest.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest + +from mmdeploy.codebase import import_codebase +from mmdeploy.utils import Codebase + + +def pytest_ignore_collect(*args, **kwargs): + import importlib + return importlib.util.find_spec('mmdet') is None + + +@pytest.fixture(autouse=True, scope='package') +def import_all_modules(): + codebase = Codebase.MMDET + try: + import_codebase(codebase) + except ImportError: + pytest.skip(f'{codebase} is not installed.', allow_module_level=True) diff --git a/tests/test_codebase/test_mmdet/test_mmdet_core.py b/tests/test_codebase/test_mmdet/test_mmdet_core.py index 45e31aadc..de5b51e24 100644 --- a/tests/test_codebase/test_mmdet/test_mmdet_core.py +++ b/tests/test_codebase/test_mmdet/test_mmdet_core.py @@ -6,17 +6,16 @@ import numpy as np import pytest import torch -from mmdeploy.codebase import import_codebase from mmdeploy.core.rewriters.rewriter_manager import RewriterContext -from mmdeploy.utils import Backend, Codebase +from mmdeploy.utils import Backend from mmdeploy.utils.test import (WrapFunction, WrapModel, backend_checker, check_backend, get_onnx_model, get_rewrite_outputs) try: - import_codebase(Codebase.MMDET) -except ImportError: - pytest.skip(f'{Codebase.MMDET} is not installed.', allow_module_level=True) + from torch.testing import assert_close as torch_assert_close +except Exception: + from torch.testing import assert_allclose as torch_assert_close @backend_checker(Backend.TENSORRT) @@ -75,10 +74,7 @@ def test_multiclass_nms_static(): @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) @pytest.mark.parametrize('add_ctr_clamp', [True, False]) -@pytest.mark.parametrize('clip_border,max_shape', - [(False, None), (True, torch.tensor([100, 200]))]) -def test_delta2bbox(backend_type: Backend, add_ctr_clamp: bool, - clip_border: bool, max_shape: tuple): +def test_delta2bbox(backend_type: Backend, add_ctr_clamp: bool): check_backend(backend_type) deploy_cfg = mmcv.Config( dict( @@ -319,7 +315,7 @@ def test__anchorgenerator__single_level_grid_priors(): # test forward with RewriterContext({}, backend_type): wrap_output = wrapped_func(x) - torch.testing.assert_allclose(output, wrap_output) + torch_assert_close(output, wrap_output) onnx_prefix = tempfile.NamedTemporaryFile().name diff --git a/tests/test_codebase/test_mmdet/test_mmdet_models.py b/tests/test_codebase/test_mmdet/test_mmdet_models.py index 97d9f1f67..1c7ab3299 100644 --- a/tests/test_codebase/test_mmdet/test_mmdet_models.py +++ b/tests/test_codebase/test_mmdet/test_mmdet_models.py @@ -9,16 +9,15 @@ import numpy as np import pytest import torch -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Backend, Codebase +from mmdeploy.utils import Backend from mmdeploy.utils.config_utils import get_ir_config from mmdeploy.utils.test import (WrapModel, backend_checker, check_backend, get_model_outputs, get_rewrite_outputs) try: - import_codebase(Codebase.MMDET) -except ImportError: - pytest.skip(f'{Codebase.MMDET} is not installed.', allow_module_level=True) + from torch.testing import assert_close as torch_assert_close +except Exception: + from torch.testing import assert_allclose as torch_assert_close def seed_everything(seed=1029): @@ -46,60 +45,17 @@ def convert_to_list(rewrite_output: Dict, output_names: List[str]) -> List: return outputs -def get_anchor_head_model(): - """AnchorHead Config.""" - test_cfg = mmcv.Config( - dict( - deploy_nms_pre=0, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)) - - from mmdet.models.dense_heads import AnchorHead - model = AnchorHead(num_classes=4, in_channels=1, test_cfg=test_cfg) - model.requires_grad_(False) - - return model +def get_head_inputs(seed, channels, num_inputs): + """Generate inputs for the head.""" + seed_everything(seed) + return [ + torch.rand(1, channels, pow(2, i), pow(2, i)) + for i in range(num_inputs, 0, -1) + ] -def get_ssd_head_model(): - """SSDHead Config.""" - test_cfg = mmcv.Config( - dict( - nms_pre=1000, - nms=dict(type='nms', iou_threshold=0.45), - min_bbox_size=0, - score_thr=0.02, - max_per_img=200)) - - from mmdet.models import SSDHead - model = SSDHead( - in_channels=(96, 1280, 512, 256, 256, 128), - num_classes=4, - use_depthwise=True, - norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), - act_cfg=dict(type='ReLU6'), - init_cfg=dict(type='Normal', layer='Conv2d', std=0.001), - anchor_generator=dict( - type='SSDAnchorGenerator', - scale_major=False, - strides=[16, 32, 64, 107, 160, 320], - ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]], - min_sizes=[48, 100, 150, 202, 253, 304], - max_sizes=[100, 150, 202, 253, 304, 320]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - test_cfg=test_cfg) - - model.requires_grad_(False) - - return model - - -def get_fcos_head_model(): +@pytest.fixture +def fcos_head_model(): """FCOS Head Config.""" test_cfg = mmcv.Config( dict( @@ -116,7 +72,8 @@ def get_fcos_head_model(): return model -def get_focus_backbone_model(): +@pytest.fixture +def focus_backbone_model(): """Backbone Focus Config.""" from mmdet.models.backbones.csp_darknet import Focus model = Focus(3, 32) @@ -125,7 +82,8 @@ def get_focus_backbone_model(): return model -def get_l2norm_forward_model(): +@pytest.fixture +def l2norm_forward_model(): """L2Norm Neck Config.""" from mmdet.models.necks.ssd_neck import L2Norm model = L2Norm(16) @@ -135,7 +93,8 @@ def get_l2norm_forward_model(): return model -def get_rpn_head_model(): +@pytest.fixture +def rpn_head_model(): """RPN Head Config.""" test_cfg = mmcv.Config( dict( @@ -151,24 +110,8 @@ def get_rpn_head_model(): return model -def get_reppoints_head_model(): - """Reppoints Head Config.""" - test_cfg = mmcv.Config( - dict( - deploy_nms_pre=0, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)) - - from mmdet.models.dense_heads import RepPointsHead - model = RepPointsHead(num_classes=4, in_channels=1, test_cfg=test_cfg) - - model.requires_grad_(False) - return model - - -def get_detrhead_model(): +@pytest.fixture +def detrhead_model(): """DETR head Config.""" from mmdet.models import build_head model = build_head( @@ -232,46 +175,10 @@ def get_detrhead_model(): return model -def get_single_roi_extractor(): - """SingleRoIExtractor Config.""" - from mmdet.models.roi_heads import SingleRoIExtractor - roi_layer = dict(type='RoIAlign', output_size=7, sampling_ratio=2) - out_channels = 1 - featmap_strides = [4, 8, 16, 32] - model = SingleRoIExtractor(roi_layer, out_channels, featmap_strides).eval() - - return model - - -def get_gfl_head_model(): - test_cfg = mmcv.Config( - dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) - anchor_generator = dict( - type='AnchorGenerator', - scales_per_octave=1, - octave_base_scale=8, - ratios=[1.0], - strides=[8, 16, 32, 64, 128]) - from mmdet.models.dense_heads import GFLHead - model = GFLHead( - num_classes=3, - in_channels=256, - reg_max=3, - test_cfg=test_cfg, - anchor_generator=anchor_generator) - model.requires_grad_(False) - return model - - @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME, Backend.NCNN]) -def test_focus_forward(backend_type): +def test_focus_forward(backend_type, focus_backbone_model): check_backend(backend_type) - focus_model = get_focus_backbone_model() + focus_model = focus_backbone_model focus_model.cpu().eval() s = 128 seed_everything(1234) @@ -292,14 +199,14 @@ def test_focus_forward(backend_type): for model_output, rewrite_output in zip(model_outputs[0], rewrite_outputs): model_output = model_output.squeeze() rewrite_output = rewrite_output.squeeze() - torch.testing.assert_allclose( + torch_assert_close( model_output, rewrite_output, rtol=1e-03, atol=1e-05) @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) -def test_l2norm_forward(backend_type): +def test_l2norm_forward(backend_type, l2norm_forward_model): check_backend(backend_type) - l2norm_neck = get_l2norm_forward_model() + l2norm_neck = l2norm_forward_model l2norm_neck.cpu().eval() s = 128 deploy_cfg = mmcv.Config( @@ -334,10 +241,10 @@ def test_l2norm_forward(backend_type): model_output[0], rewrite_output, rtol=1e-03, atol=1e-05) -def test_get_bboxes_of_fcos_head_ncnn(): +def test_get_bboxes_of_fcos_head_ncnn(fcos_head_model): backend_type = Backend.NCNN check_backend(backend_type) - fcos_head = get_fcos_head_model() + fcos_head = fcos_head_model fcos_head.cpu().eval() s = 128 img_metas = [{ @@ -368,18 +275,9 @@ def test_get_bboxes_of_fcos_head_ncnn(): # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2). # the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16), # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2) - seed_everything(1234) - cls_score = [ - torch.rand(1, fcos_head.num_classes, pow(2, i), pow(2, i)) - for i in range(5, 0, -1) - ] - seed_everything(5678) - bboxes = [torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] - - seed_everything(9101) - centernesses = [ - torch.rand(1, 1, pow(2, i), pow(2, i)) for i in range(5, 0, -1) - ] + cls_score = get_head_inputs(1234, fcos_head.num_classes, 5) + bboxes = get_head_inputs(5678, 4, 5) + centernesses = get_head_inputs(9101, 1, 5) # to get outputs of onnx model after rewrite img_metas[0]['img_shape'] = torch.Tensor([s, s]) @@ -403,9 +301,9 @@ def test_get_bboxes_of_fcos_head_ncnn(): @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME, Backend.NCNN]) -def test_get_bboxes_of_rpn_head(backend_type: Backend): +def test_get_bboxes_of_rpn_head(backend_type: Backend, rpn_head_model): check_backend(backend_type) - head = get_rpn_head_model() + head = rpn_head_model head.cpu().eval() s = 4 img_metas = [{ @@ -435,12 +333,8 @@ def test_get_bboxes_of_rpn_head(backend_type: Backend): # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2). # the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16), # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2) - seed_everything(1234) - cls_score = [ - torch.rand(1, 9, pow(2, i), pow(2, i)) for i in range(5, 0, -1) - ] - seed_everything(5678) - bboxes = [torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] + cls_score = get_head_inputs(1234, 9, 5) + bboxes = get_head_inputs(5678, 36, 5) # to get outputs of onnx model after rewrite img_metas[0]['img_shape'] = torch.Tensor([s, s]) @@ -460,86 +354,105 @@ def test_get_bboxes_of_rpn_head(backend_type: Backend): assert rewrite_outputs is not None -@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) -def test_get_bboxes_of_gfl_head(backend_type): - check_backend(backend_type) - head = get_gfl_head_model() - head.cpu().eval() - s = 4 - img_metas = [{ - 'scale_factor': np.ones(4), - 'pad_shape': (s, s, 3), - 'img_shape': (s, s, 3) - }] - output_names = ['dets'] - deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type=backend_type.value), - onnx_config=dict(output_names=output_names, input_shape=None), - codebase_config=dict( - type='mmdet', - task='ObjectDetection', - model_type='ncnn_end2end', - post_processing=dict( - score_threshold=0.05, - iou_threshold=0.5, - max_output_boxes_per_class=200, - pre_top_k=5000, - keep_top_k=100, - background_label_id=-1, - )))) +class TestGFLHead: - seed_everything(1234) - cls_score = [ - torch.rand(1, 3, pow(2, i), pow(2, i)) for i in range(5, 0, -1) - ] - seed_everything(5678) - bboxes = [torch.rand(1, 16, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] + @pytest.fixture + def gfl_head(self): + test_cfg = mmcv.Config( + dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + anchor_generator = dict( + type='AnchorGenerator', + scales_per_octave=1, + octave_base_scale=8, + ratios=[1.0], + strides=[8, 16, 32, 64, 128]) + from mmdet.models.dense_heads import GFLHead + model = GFLHead( + num_classes=3, + in_channels=256, + reg_max=3, + test_cfg=test_cfg, + anchor_generator=anchor_generator) + model.requires_grad_(False) + model.cpu().eval() + return model - # to get outputs of onnx model after rewrite - img_metas[0]['img_shape'] = torch.Tensor([s, s]) - wrapped_model = WrapModel( - head, 'get_bboxes', img_metas=img_metas, with_nms=True) - rewrite_inputs = { - 'cls_scores': cls_score, - 'bbox_preds': bboxes, - } - # do not run with ncnn backend - run_with_backend = False if backend_type in [Backend.NCNN] else True - rewrite_outputs, is_backend_output = get_rewrite_outputs( - wrapped_model=wrapped_model, - model_inputs=rewrite_inputs, - deploy_cfg=deploy_cfg, - run_with_backend=run_with_backend) - assert rewrite_outputs is not None + @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) + def test_get_bboxes_of_gfl_head(self, backend_type, gfl_head): + check_backend(backend_type) + s = 4 + img_metas = [{ + 'scale_factor': np.ones(4), + 'pad_shape': (s, s, 3), + 'img_shape': (s, s, 3) + }] + output_names = ['dets'] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + model_type='ncnn_end2end', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=5000, + keep_top_k=100, + background_label_id=-1, + )))) + cls_score = get_head_inputs(1234, 3, 5) + bboxes = get_head_inputs(5678, 16, 5) -@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) -def test_forward_of_gfl_head(backend_type): - check_backend(backend_type) - head = get_gfl_head_model() - head.cpu().eval() - deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type=backend_type.value), - onnx_config=dict(input_shape=None))) - feats = [torch.rand(1, 256, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] - model_outputs = [head.forward(feats)] - wrapped_model = WrapModel(head, 'forward') - rewrite_inputs = { - 'feats': feats, - } - rewrite_outputs, is_backend_output = get_rewrite_outputs( - wrapped_model=wrapped_model, - model_inputs=rewrite_inputs, - deploy_cfg=deploy_cfg) - model_outputs[0] = [*model_outputs[0][0], *model_outputs[0][1]] - for model_output, rewrite_output in zip(model_outputs[0], - rewrite_outputs[0]): - model_output = model_output.squeeze().cpu().numpy() - rewrite_output = rewrite_output.squeeze() - assert np.allclose( - model_output, rewrite_output, rtol=1e-03, atol=1e-05) + # to get outputs of onnx model after rewrite + img_metas[0]['img_shape'] = torch.Tensor([s, s]) + wrapped_model = WrapModel( + gfl_head, 'get_bboxes', img_metas=img_metas, with_nms=True) + rewrite_inputs = { + 'cls_scores': cls_score, + 'bbox_preds': bboxes, + } + # do not run with ncnn backend + run_with_backend = False if backend_type in [Backend.NCNN] else True + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg, + run_with_backend=run_with_backend) + assert rewrite_outputs is not None + + @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) + def test_forward_of_gfl_head(self, backend_type, gfl_head): + check_backend(backend_type) + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(input_shape=None))) + feats = get_head_inputs(1234, 256, 5) + model_outputs = [gfl_head.forward(feats)] + wrapped_model = WrapModel(gfl_head, 'forward') + rewrite_inputs = { + 'feats': feats, + } + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + model_outputs[0] = [*model_outputs[0][0], *model_outputs[0][1]] + for model_output, rewrite_output in zip(model_outputs[0], + rewrite_outputs[0]): + model_output = model_output.squeeze().cpu().numpy() + rewrite_output = rewrite_output.squeeze() + assert np.allclose( + model_output, rewrite_output, rtol=1e-03, atol=1e-05) def _replace_r50_with_r18(model): @@ -592,252 +505,299 @@ def test_forward_of_base_detector(model_cfg_path, backend): assert rewrite_outputs is not None -@pytest.mark.parametrize('backend_type', - [Backend.ONNXRUNTIME, Backend.OPENVINO]) -def test_single_roi_extractor(backend_type: Backend): - check_backend(backend_type) +class TestSingleRoIExtractor: - single_roi_extractor = get_single_roi_extractor() - output_names = ['roi_feat'] - deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type=backend_type.value), - onnx_config=dict(output_names=output_names, input_shape=None), - codebase_config=dict( - type='mmdet', - task='ObjectDetection', - ))) + @pytest.fixture(scope='class') + def single_roi_extractor(self): + """SingleRoIExtractor Config.""" + from mmdet.models.roi_heads import SingleRoIExtractor + roi_layer = dict(type='RoIAlign', output_size=7, sampling_ratio=2) + out_channels = 1 + featmap_strides = [4, 8, 16, 32] + model = SingleRoIExtractor(roi_layer, out_channels, + featmap_strides).eval() - seed_everything(1234) - out_channels = single_roi_extractor.out_channels - feats = [ - torch.rand((1, out_channels, 200, 336)), - torch.rand((1, out_channels, 100, 168)), - torch.rand((1, out_channels, 50, 84)), - torch.rand((1, out_channels, 25, 42)), - ] - seed_everything(5678) - rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]]) + return model - model_inputs = { - 'feats': feats, - 'rois': rois, - } - model_outputs = get_model_outputs(single_roi_extractor, 'forward', - model_inputs) + @pytest.fixture(scope='class') + def feats(self, single_roi_extractor): + seed_everything(1234) + out_channels = single_roi_extractor.out_channels + return [ + torch.rand((1, out_channels, 200, 336)), + torch.rand((1, out_channels, 100, 168)), + torch.rand((1, out_channels, 50, 84)), + torch.rand((1, out_channels, 25, 42)), + ] - backend_outputs, _ = get_rewrite_outputs( - wrapped_model=single_roi_extractor, - model_inputs=model_inputs, - deploy_cfg=deploy_cfg) - if isinstance(backend_outputs, dict): - backend_outputs = backend_outputs.values() - for model_output, backend_output in zip(model_outputs[0], backend_outputs): - model_output = model_output.squeeze().cpu().numpy() - backend_output = backend_output.squeeze() - assert np.allclose( - model_output, backend_output, rtol=1e-03, atol=1e-05) + @pytest.fixture(scope='class') + def rois(self): + seed_everything(5678) + return torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]]) + @pytest.mark.parametrize('backend_type', + [Backend.ONNXRUNTIME, Backend.OPENVINO]) + def test_single_roi_extractor(self, backend_type: Backend, + single_roi_extractor, feats, rois): + check_backend(backend_type) -def test_single_roi_extractor__ascend(): - check_backend(Backend.ASCEND) + output_names = ['roi_feat'] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + ))) - # create wrap function - from mmdeploy.utils.test import WrapFunction - single_roi_extractor = get_single_roi_extractor() - out_channels = single_roi_extractor.out_channels - - def single_roi_extractor_func(feat0, feat1, feat2, feat3, rois): - return single_roi_extractor([feat0, feat1, feat2, feat3], rois) - - single_roi_extractor_wrapper = WrapFunction(single_roi_extractor_func) - - # generate data - seed_everything(1234) - feats = [ - torch.rand((1, out_channels, 200, 336)), - torch.rand((1, out_channels, 100, 168)), - torch.rand((1, out_channels, 50, 84)), - torch.rand((1, out_channels, 25, 42)), - ] - seed_everything(5678) - rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]]) - - # create config - input_names = ['feat0', 'feat1', 'feat2', 'feat3', 'rois'] - output_names = ['roi_feat'] - model_inputs = dict(zip(input_names, feats + [rois])) - deploy_cfg = mmcv.Config( - dict( - backend_config=dict( - type=Backend.ASCEND.value, - model_inputs=[ - dict( - input_shapes=dict( - feat0=feats[0].shape, - feat1=feats[1].shape, - feat2=feats[2].shape, - feat3=feats[3].shape, - rois=rois.shape)) - ]), - onnx_config=dict( - input_names=input_names, - output_names=output_names, - input_shape=None), - codebase_config=dict( - type='mmdet', - task='ObjectDetection', - ))) - - # get torch output - model_outputs = get_model_outputs(single_roi_extractor_wrapper, 'forward', - model_inputs) - - # get backend output - backend_outputs, _ = get_rewrite_outputs( - wrapped_model=single_roi_extractor_wrapper, - model_inputs=model_inputs, - deploy_cfg=deploy_cfg) - if isinstance(backend_outputs, dict): - backend_outputs = backend_outputs.values() - for model_output, backend_output in zip(model_outputs[0], backend_outputs): - model_output = model_output.squeeze().cpu().numpy() - backend_output = backend_output.squeeze() - assert model_output.shape == backend_output.shape - - -def get_cascade_roi_head(is_instance_seg=False): - """CascadeRoIHead Config.""" - num_stages = 3 - stage_loss_weights = [1, 0.5, 0.25] - bbox_roi_extractor = { - 'type': 'SingleRoIExtractor', - 'roi_layer': { - 'type': 'RoIAlign', - 'output_size': 7, - 'sampling_ratio': 0 - }, - 'out_channels': 64, - 'featmap_strides': [4, 8, 16, 32] - } - all_target_stds = [[0.1, 0.1, 0.2, 0.2], [0.05, 0.05, 0.1, 0.1], - [0.033, 0.033, 0.067, 0.067]] - bbox_head = [{ - 'type': 'Shared2FCBBoxHead', - 'in_channels': 64, - 'fc_out_channels': 1024, - 'roi_feat_size': 7, - 'num_classes': 80, - 'bbox_coder': { - 'type': 'DeltaXYWHBBoxCoder', - 'target_means': [0.0, 0.0, 0.0, 0.0], - 'target_stds': target_stds - }, - 'reg_class_agnostic': True, - 'loss_cls': { - 'type': 'CrossEntropyLoss', - 'use_sigmoid': False, - 'loss_weight': 1.0 - }, - 'loss_bbox': { - 'type': 'SmoothL1Loss', - 'beta': 1.0, - 'loss_weight': 1.0 + model_inputs = { + 'feats': feats, + 'rois': rois, } - } for target_stds in all_target_stds] + model_outputs = get_model_outputs(single_roi_extractor, 'forward', + model_inputs) - mask_roi_extractor = { - 'type': 'SingleRoIExtractor', - 'roi_layer': { - 'type': 'RoIAlign', - 'output_size': 14, - 'sampling_ratio': 0 - }, - 'out_channels': 64, - 'featmap_strides': [4, 8, 16, 32] - } - mask_head = { - 'type': 'FCNMaskHead', - 'num_convs': 4, - 'in_channels': 64, - 'conv_out_channels': 64, - 'num_classes': 80, - 'loss_mask': { - 'type': 'CrossEntropyLoss', - 'use_mask': True, - 'loss_weight': 1.0 + backend_outputs, _ = get_rewrite_outputs( + wrapped_model=single_roi_extractor, + model_inputs=model_inputs, + deploy_cfg=deploy_cfg) + if isinstance(backend_outputs, dict): + backend_outputs = backend_outputs.values() + for model_output, backend_output in zip(model_outputs[0], + backend_outputs): + model_output = model_output.squeeze().cpu().numpy() + backend_output = backend_output.squeeze() + assert np.allclose( + model_output, backend_output, rtol=1e-03, atol=1e-05) + + def test_single_roi_extractor__ascend(self, single_roi_extractor, feats, + rois): + check_backend(Backend.ASCEND) + + # create wrap function + from mmdeploy.utils.test import WrapFunction + + def single_roi_extractor_func(feat0, feat1, feat2, feat3, rois): + return single_roi_extractor([feat0, feat1, feat2, feat3], rois) + + single_roi_extractor_wrapper = WrapFunction(single_roi_extractor_func) + + # create config + input_names = ['feat0', 'feat1', 'feat2', 'feat3', 'rois'] + output_names = ['roi_feat'] + model_inputs = dict(zip(input_names, feats + [rois])) + deploy_cfg = mmcv.Config( + dict( + backend_config=dict( + type=Backend.ASCEND.value, + model_inputs=[ + dict( + input_shapes=dict( + feat0=feats[0].shape, + feat1=feats[1].shape, + feat2=feats[2].shape, + feat3=feats[3].shape, + rois=rois.shape)) + ]), + onnx_config=dict( + input_names=input_names, + output_names=output_names, + input_shape=None), + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + ))) + + # get torch output + model_outputs = get_model_outputs(single_roi_extractor_wrapper, + 'forward', model_inputs) + + # get backend output + backend_outputs, _ = get_rewrite_outputs( + wrapped_model=single_roi_extractor_wrapper, + model_inputs=model_inputs, + deploy_cfg=deploy_cfg) + if isinstance(backend_outputs, dict): + backend_outputs = backend_outputs.values() + for model_output, backend_output in zip(model_outputs[0], + backend_outputs): + model_output = model_output.squeeze().cpu().numpy() + backend_output = backend_output.squeeze() + assert model_output.shape == backend_output.shape + + +class TestCascadeRoIHead: + + @pytest.fixture(scope='class') + def cascade_roi_head(self, request): + """CascadeRoIHead Config.""" + is_instance_seg = request.node.get_closest_marker( + 'is_instance_seg', False) + num_stages = 3 + stage_loss_weights = [1, 0.5, 0.25] + bbox_roi_extractor = { + 'type': 'SingleRoIExtractor', + 'roi_layer': { + 'type': 'RoIAlign', + 'output_size': 7, + 'sampling_ratio': 0 + }, + 'out_channels': 64, + 'featmap_strides': [4, 8, 16, 32] } - } + all_target_stds = [[0.1, 0.1, 0.2, 0.2], [0.05, 0.05, 0.1, 0.1], + [0.033, 0.033, 0.067, 0.067]] + bbox_head = [{ + 'type': 'Shared2FCBBoxHead', + 'in_channels': 64, + 'fc_out_channels': 1024, + 'roi_feat_size': 7, + 'num_classes': 80, + 'bbox_coder': { + 'type': 'DeltaXYWHBBoxCoder', + 'target_means': [0.0, 0.0, 0.0, 0.0], + 'target_stds': target_stds + }, + 'reg_class_agnostic': True, + 'loss_cls': { + 'type': 'CrossEntropyLoss', + 'use_sigmoid': False, + 'loss_weight': 1.0 + }, + 'loss_bbox': { + 'type': 'SmoothL1Loss', + 'beta': 1.0, + 'loss_weight': 1.0 + } + } for target_stds in all_target_stds] - test_cfg = mmcv.Config( - dict( - score_thr=0.05, - nms=mmcv.Config(dict(type='nms', iou_threshold=0.5)), - max_per_img=100, - mask_thr_binary=0.5)) + mask_roi_extractor = { + 'type': 'SingleRoIExtractor', + 'roi_layer': { + 'type': 'RoIAlign', + 'output_size': 14, + 'sampling_ratio': 0 + }, + 'out_channels': 64, + 'featmap_strides': [4, 8, 16, 32] + } + mask_head = { + 'type': 'FCNMaskHead', + 'num_convs': 4, + 'in_channels': 64, + 'conv_out_channels': 64, + 'num_classes': 80, + 'loss_mask': { + 'type': 'CrossEntropyLoss', + 'use_mask': True, + 'loss_weight': 1.0 + } + } - args = [num_stages, stage_loss_weights, bbox_roi_extractor, bbox_head] - kwargs = {'test_cfg': test_cfg} - if is_instance_seg: - args += [mask_roi_extractor, mask_head] + test_cfg = mmcv.Config( + dict( + score_thr=0.05, + nms=mmcv.Config(dict(type='nms', iou_threshold=0.5)), + max_per_img=100, + mask_thr_binary=0.5)) - from mmdet.models.roi_heads import CascadeRoIHead - model = CascadeRoIHead(*args, **kwargs).eval() - return model + args = [num_stages, stage_loss_weights, bbox_roi_extractor, bbox_head] + kwargs = {'test_cfg': test_cfg} + if is_instance_seg: + args += [mask_roi_extractor, mask_head] + + from mmdet.models.roi_heads import CascadeRoIHead + model = CascadeRoIHead(*args, **kwargs).eval() + return model + + @pytest.fixture(scope='class') + def model_inputs(self): + seed_everything(1234) + x = [ + torch.rand((1, 64, 200, 304)), + torch.rand((1, 64, 100, 152)), + torch.rand((1, 64, 50, 76)), + torch.rand((1, 64, 25, 38)), + ] + proposals = torch.tensor([[587.8285, 52.1405, 886.2484, 341.5644, + 0.5]]) + return {'x': x, 'proposals': proposals.unsqueeze(0)} + + @pytest.fixture(scope='class') + def img_metas(self): + return { + 'img_shape': torch.tensor([800, 1216]), + 'ori_shape': torch.tensor([800, 1216]), + 'scale_factor': torch.tensor([1, 1, 1, 1]) + } + + @pytest.fixture(scope='class') + def wrapped_model(self, cascade_roi_head, img_metas): + return WrapModel( + cascade_roi_head, 'simple_test', img_metas=[img_metas]) + + @pytest.mark.parametrize('backend_type', + [Backend.ONNXRUNTIME, Backend.OPENVINO]) + def test_cascade_roi_head(self, backend_type: Backend, model_inputs, + wrapped_model): + check_backend(backend_type) + + output_names = ['results'] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=-1, + keep_top_k=100, + background_label_id=-1)))) + backend_outputs, _ = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=model_inputs, + deploy_cfg=deploy_cfg) + + assert backend_outputs is not None + + @pytest.mark.parametrize('backend_type', [Backend.OPENVINO]) + @pytest.mark.is_instance_seg(True) + def test_cascade_roi_head_with_mask(self, backend_type: Backend, + model_inputs, wrapped_model): + check_backend(backend_type) + + output_names = ['bbox_results', 'segm_results'] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=-1, + keep_top_k=100, + background_label_id=-1)))) + backend_outputs, _ = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=model_inputs, + deploy_cfg=deploy_cfg) + bbox_results = backend_outputs[0] + segm_results = backend_outputs[1] + assert bbox_results is not None + assert segm_results is not None -@pytest.mark.parametrize('backend_type', - [Backend.ONNXRUNTIME, Backend.OPENVINO]) -def test_cascade_roi_head(backend_type: Backend): - check_backend(backend_type) - - cascade_roi_head = get_cascade_roi_head() - seed_everything(1234) - x = [ - torch.rand((1, 64, 200, 304)), - torch.rand((1, 64, 100, 152)), - torch.rand((1, 64, 50, 76)), - torch.rand((1, 64, 25, 38)), - ] - proposals = torch.tensor([[587.8285, 52.1405, 886.2484, 341.5644, 0.5]]) - img_metas = { - 'img_shape': torch.tensor([800, 1216]), - 'ori_shape': torch.tensor([800, 1216]), - 'scale_factor': torch.tensor([1, 1, 1, 1]) - } - - model_inputs = { - 'x': x, - 'proposal_list': [proposals], - 'img_metas': [img_metas] - } - output_names = ['results'] - deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type=backend_type.value), - onnx_config=dict(output_names=output_names, input_shape=None), - codebase_config=dict( - type='mmdet', - task='ObjectDetection', - post_processing=dict( - score_threshold=0.05, - iou_threshold=0.5, - max_output_boxes_per_class=200, - pre_top_k=-1, - keep_top_k=100, - background_label_id=-1)))) - model_inputs = {'x': x, 'proposals': proposals.unsqueeze(0)} - wrapped_model = WrapModel( - cascade_roi_head, 'simple_test', img_metas=[img_metas]) - backend_outputs, _ = get_rewrite_outputs( - wrapped_model=wrapped_model, - model_inputs=model_inputs, - deploy_cfg=deploy_cfg) - - assert backend_outputs is not None - - -def get_fovea_head_model(): +@pytest.fixture +def fovea_head_model(): """FoveaHead Config.""" test_cfg = mmcv.Config( dict( @@ -856,9 +816,9 @@ def get_fovea_head_model(): @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME, Backend.OPENVINO]) -def test_get_bboxes_of_fovea_head(backend_type: Backend): +def test_get_bboxes_of_fovea_head(backend_type: Backend, fovea_head_model): check_backend(backend_type) - fovea_head = get_fovea_head_model() + fovea_head = fovea_head_model fovea_head.cpu().eval() s = 128 img_metas = [{ @@ -888,13 +848,8 @@ def test_get_bboxes_of_fovea_head(backend_type: Backend): # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2). # the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16), # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2) - seed_everything(1234) - cls_score = [ - torch.rand(1, fovea_head.num_classes, pow(2, i), pow(2, i)) - for i in range(5, 0, -1) - ] - seed_everything(5678) - bboxes = [torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] + cls_score = get_head_inputs(1234, fovea_head.num_classes, 5) + bboxes = get_head_inputs(5678, 4, 5) model_inputs = { 'cls_scores': cls_score, @@ -932,365 +887,287 @@ def test_get_bboxes_of_fovea_head(backend_type: Backend): assert rewrite_outputs is not None -@pytest.mark.parametrize('backend_type', [Backend.OPENVINO]) -def test_cascade_roi_head_with_mask(backend_type: Backend): - check_backend(backend_type) +class TestYOLOV3Head: - cascade_roi_head = get_cascade_roi_head(is_instance_seg=True) - seed_everything(1234) - x = [ - torch.rand((1, 64, 200, 304)), - torch.rand((1, 64, 100, 152)), - torch.rand((1, 64, 50, 76)), - torch.rand((1, 64, 25, 38)), - ] - proposals = torch.tensor([[587.8285, 52.1405, 886.2484, 341.5644, 0.5]]) - img_metas = { - 'img_shape': torch.tensor([800, 1216]), - 'ori_shape': torch.tensor([800, 1216]), - 'scale_factor': torch.tensor([1, 1, 1, 1]) - } + @pytest.fixture(scope='class') + def yolov3_head(self): + """yolov3 Head Config.""" + test_cfg = mmcv.Config( + dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + conf_thr=0.005, + nms=dict(type='nms', iou_threshold=0.45), + max_per_img=100)) + from mmdet.models.dense_heads import YOLOV3Head + model = YOLOV3Head( + num_classes=4, + in_channels=[16, 8, 4], + out_channels=[32, 16, 8], + test_cfg=test_cfg) - output_names = ['bbox_results', 'segm_results'] - deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type=backend_type.value), - onnx_config=dict(output_names=output_names, input_shape=None), - codebase_config=dict( - type='mmdet', - task='ObjectDetection', - post_processing=dict( - score_threshold=0.05, - iou_threshold=0.5, - max_output_boxes_per_class=200, - pre_top_k=-1, - keep_top_k=100, - background_label_id=-1)))) - model_inputs = {'x': x, 'proposals': proposals.unsqueeze(0)} - wrapped_model = WrapModel( - cascade_roi_head, 'simple_test', img_metas=[img_metas]) - backend_outputs, _ = get_rewrite_outputs( - wrapped_model=wrapped_model, - model_inputs=model_inputs, - deploy_cfg=deploy_cfg) - bbox_results = backend_outputs[0] - segm_results = backend_outputs[1] - assert bbox_results is not None - assert segm_results is not None + model.requires_grad_(False) + model.cpu().eval() + return model + + @pytest.fixture(scope='class') + def img_metas(self): + s = 128 + return [{ + 'scale_factor': np.ones(4), + 'pad_shape': (s, s, 3), + 'img_shape': (s, s, 3) + }] + + @pytest.fixture(scope='class') + def pred_maps(self): + seed_everything(1234) + return [ + torch.rand(1, 27, 5, 5), + torch.rand(1, 27, 10, 10), + torch.rand(1, 27, 20, 20) + ] + + @pytest.mark.parametrize('backend_type', + [Backend.ONNXRUNTIME, Backend.OPENVINO]) + def test_yolov3_head_get_bboxes(self, backend_type, yolov3_head, img_metas, + pred_maps): + """Test get_bboxes rewrite of yolov3 head.""" + check_backend(backend_type) + + output_names = ['dets', 'labels'] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.45, + confidence_threshold=0.005, + max_output_boxes_per_class=200, + pre_top_k=-1, + keep_top_k=100, + background_label_id=-1, + )))) + + # to get outputs of pytorch model + model_inputs = {'pred_maps': pred_maps, 'img_metas': img_metas} + model_outputs = get_model_outputs(yolov3_head, 'get_bboxes', + model_inputs) + + # to get outputs of onnx model after rewrite + wrapped_model = WrapModel( + yolov3_head, 'get_bboxes', img_metas=img_metas, with_nms=True) + rewrite_inputs = { + 'pred_maps': pred_maps, + } + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + + if is_backend_output: + if isinstance(rewrite_outputs, dict): + rewrite_outputs = convert_to_list(rewrite_outputs, + output_names) + for model_output, rewrite_output in zip(model_outputs[0], + rewrite_outputs): + model_output = model_output.squeeze().cpu().numpy() + rewrite_output = rewrite_output.squeeze() + # hard code to make two tensors with the same shape + # rewrite and original codes applied different nms strategy + assert np.allclose( + model_output[:rewrite_output.shape[0]], + rewrite_output, + rtol=1e-03, + atol=1e-05) + else: + assert rewrite_outputs is not None + + def test_yolov3_head_get_bboxes_ncnn(self, yolov3_head, img_metas, + pred_maps): + """Test get_bboxes rewrite of yolov3 head.""" + backend_type = Backend.NCNN + check_backend(backend_type) + + output_names = ['detection_output'] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmdet', + model_type='ncnn_end2end', + task='ObjectDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.45, + confidence_threshold=0.005, + max_output_boxes_per_class=200, + pre_top_k=-1, + keep_top_k=10, + background_label_id=-1, + )))) + + # to get outputs of onnx model after rewrite + wrapped_model = WrapModel( + yolov3_head, 'get_bboxes', img_metas=img_metas[0], with_nms=True) + rewrite_inputs = { + 'pred_maps': pred_maps, + } + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + # output should be of shape [1, N, 6] + if is_backend_output: + assert rewrite_outputs[0].shape[-1] == 6 + else: + assert rewrite_outputs.shape[-1] == 6 -def get_yolov3_head_model(): - """yolov3 Head Config.""" - test_cfg = mmcv.Config( - dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - conf_thr=0.005, - nms=dict(type='nms', iou_threshold=0.45), - max_per_img=100)) - from mmdet.models.dense_heads import YOLOV3Head - model = YOLOV3Head( - num_classes=4, - in_channels=[16, 8, 4], - out_channels=[32, 16, 8], - test_cfg=test_cfg) +class TestYOLOXHead: - model.requires_grad_(False) - return model + @pytest.fixture(scope='class') + def yolox_head(self): + """YOLOX Head Config.""" + test_cfg = mmcv.Config( + dict( + deploy_nms_pre=0, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) + + from mmdet.models.dense_heads import YOLOXHead + model = YOLOXHead(num_classes=4, in_channels=1, test_cfg=test_cfg) + + model.requires_grad_(False) + model.cpu().eval() + return model + + @pytest.fixture(scope='class') + def img_metas(self): + s = 128 + return [{ + 'scale_factor': np.ones(4), + 'pad_shape': (s, s, 3), + 'img_shape': (s, s, 3) + }] + + @pytest.fixture(scope='class') + def wrapped_model(self, yolox_head, img_metas): + return WrapModel( + yolox_head, 'get_bboxes', img_metas=img_metas, with_nms=True) + + @pytest.fixture(scope='class') + def rewrite_inputs(self, yolox_head): + + cls_scores = get_head_inputs(1234, yolox_head.num_classes, 3) + bbox_preds = get_head_inputs(5678, 4, 3) + objectnesses = get_head_inputs(9101, 1, 3) + + return { + 'cls_scores': cls_scores, + 'bbox_preds': bbox_preds, + 'objectnesses': objectnesses + } + + @pytest.fixture(scope='class') + def model_outputs(self, yolox_head, rewrite_inputs, img_metas): + model_inputs = {'img_metas': img_metas} + model_inputs.update(rewrite_inputs) + return get_model_outputs(yolox_head, 'get_bboxes', model_inputs) + + @pytest.mark.parametrize('backend_type', + [Backend.ONNXRUNTIME, Backend.OPENVINO]) + def test_yolox_head_get_bboxes(self, backend_type: Backend, rewrite_inputs, + model_outputs, wrapped_model): + """Test get_bboxes rewrite of YOLOXHead.""" + check_backend(backend_type) + output_names = ['dets', 'labels'] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.5, + max_output_boxes_per_class=20, + pre_top_k=-1, + keep_top_k=10, + background_label_id=-1, + )))) + + # to get outputs of onnx model after rewrite + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + + if is_backend_output: + if isinstance(rewrite_outputs, dict): + rewrite_outputs = convert_to_list(rewrite_outputs, + output_names) + for model_output, rewrite_output in zip(model_outputs[0], + rewrite_outputs): + model_output = model_output.squeeze().cpu().numpy() + rewrite_output = rewrite_output.squeeze().cpu().numpy() + # hard code to make two tensors with the same shape + # rewrite and original codes applied different nms strategy + min_shape = min(model_output.shape[0], rewrite_output.shape[0], + 5) + assert np.allclose( + model_output[:min_shape], + rewrite_output[:min_shape], + rtol=1e-03, + atol=1e-05) + else: + assert rewrite_outputs is not None + + def test_yolox_head_get_bboxes_ncnn(self, rewrite_inputs, wrapped_model): + """Test get_bboxes rewrite of yolox head for ncnn.""" + backend_type = Backend.NCNN + check_backend(backend_type) + + output_names = ['detection_output'] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.5, + max_output_boxes_per_class=20, + pre_top_k=5000, + keep_top_k=10, + background_label_id=0, + )))) + + # to get outputs of onnx model after rewrite + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + # output should be of shape [1, N, 6] + if is_backend_output: + assert rewrite_outputs[0].shape[-1] == 6 + else: + assert rewrite_outputs.shape[-1] == 6 -@pytest.mark.parametrize('backend_type', - [Backend.ONNXRUNTIME, Backend.OPENVINO]) -def test_yolov3_head_get_bboxes(backend_type): - """Test get_bboxes rewrite of yolov3 head.""" - check_backend(backend_type) - yolov3_head = get_yolov3_head_model() - yolov3_head.cpu().eval() - s = 128 - img_metas = [{ - 'scale_factor': np.ones(4), - 'pad_shape': (s, s, 3), - 'img_shape': (s, s, 3) - }] - - output_names = ['dets', 'labels'] - deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type=backend_type.value), - onnx_config=dict(output_names=output_names, input_shape=None), - codebase_config=dict( - type='mmdet', - task='ObjectDetection', - post_processing=dict( - score_threshold=0.05, - iou_threshold=0.45, - confidence_threshold=0.005, - max_output_boxes_per_class=200, - pre_top_k=-1, - keep_top_k=100, - background_label_id=-1, - )))) - - seed_everything(1234) - pred_maps = [ - torch.rand(1, 27, 5, 5), - torch.rand(1, 27, 10, 10), - torch.rand(1, 27, 20, 20) - ] - # to get outputs of pytorch model - model_inputs = {'pred_maps': pred_maps, 'img_metas': img_metas} - model_outputs = get_model_outputs(yolov3_head, 'get_bboxes', model_inputs) - - # to get outputs of onnx model after rewrite - wrapped_model = WrapModel( - yolov3_head, 'get_bboxes', img_metas=img_metas, with_nms=True) - rewrite_inputs = { - 'pred_maps': pred_maps, - } - rewrite_outputs, is_backend_output = get_rewrite_outputs( - wrapped_model=wrapped_model, - model_inputs=rewrite_inputs, - deploy_cfg=deploy_cfg) - - if is_backend_output: - if isinstance(rewrite_outputs, dict): - rewrite_outputs = convert_to_list(rewrite_outputs, output_names) - for model_output, rewrite_output in zip(model_outputs[0], - rewrite_outputs): - model_output = model_output.squeeze().cpu().numpy() - rewrite_output = rewrite_output.squeeze() - # hard code to make two tensors with the same shape - # rewrite and original codes applied different nms strategy - assert np.allclose( - model_output[:rewrite_output.shape[0]], - rewrite_output, - rtol=1e-03, - atol=1e-05) - else: - assert rewrite_outputs is not None - - -def test_yolov3_head_get_bboxes_ncnn(): - """Test get_bboxes rewrite of yolov3 head.""" - backend_type = Backend.NCNN - check_backend(backend_type) - yolov3_head = get_yolov3_head_model() - yolov3_head.cpu().eval() - s = 128 - img_metas = [{ - 'scale_factor': np.ones(4), - 'pad_shape': (s, s, 3), - 'img_shape': (s, s, 3) - }] - - output_names = ['detection_output'] - deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type=backend_type.value), - onnx_config=dict(output_names=output_names, input_shape=None), - codebase_config=dict( - type='mmdet', - model_type='ncnn_end2end', - task='ObjectDetection', - post_processing=dict( - score_threshold=0.05, - iou_threshold=0.45, - confidence_threshold=0.005, - max_output_boxes_per_class=200, - pre_top_k=-1, - keep_top_k=10, - background_label_id=-1, - )))) - - seed_everything(1234) - pred_maps = [ - torch.rand(1, 27, 5, 5), - torch.rand(1, 27, 10, 10), - torch.rand(1, 27, 20, 20) - ] - - # to get outputs of onnx model after rewrite - wrapped_model = WrapModel( - yolov3_head, 'get_bboxes', img_metas=img_metas[0], with_nms=True) - rewrite_inputs = { - 'pred_maps': pred_maps, - } - rewrite_outputs, is_backend_output = get_rewrite_outputs( - wrapped_model=wrapped_model, - model_inputs=rewrite_inputs, - deploy_cfg=deploy_cfg) - # output should be of shape [1, N, 6] - if is_backend_output: - assert rewrite_outputs[0].shape[-1] == 6 - else: - assert rewrite_outputs.shape[-1] == 6 - - -def get_yolox_head_model(): - """YOLOX Head Config.""" - test_cfg = mmcv.Config( - dict( - deploy_nms_pre=0, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)) - - from mmdet.models.dense_heads import YOLOXHead - model = YOLOXHead(num_classes=4, in_channels=1, test_cfg=test_cfg) - - model.requires_grad_(False) - return model - - -@pytest.mark.parametrize('backend_type', - [Backend.ONNXRUNTIME, Backend.OPENVINO]) -def test_yolox_head_get_bboxes(backend_type: Backend): - """Test get_bboxes rewrite of YOLOXHead.""" - check_backend(backend_type) - yolox_head = get_yolox_head_model() - yolox_head.cpu().eval() - s = 128 - img_metas = [{ - 'scale_factor': np.ones(4), - 'pad_shape': (s, s, 3), - 'img_shape': (s, s, 3) - }] - output_names = ['dets', 'labels'] - deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type=backend_type.value), - onnx_config=dict(output_names=output_names, input_shape=None), - codebase_config=dict( - type='mmdet', - task='ObjectDetection', - post_processing=dict( - score_threshold=0.05, - iou_threshold=0.5, - max_output_boxes_per_class=20, - pre_top_k=-1, - keep_top_k=10, - background_label_id=-1, - )))) - seed_everything(1234) - cls_scores = [ - torch.rand(1, yolox_head.num_classes, pow(2, i), pow(2, i)) - for i in range(3, 0, -1) - ] - seed_everything(5678) - bbox_preds = [ - torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(3, 0, -1) - ] - seed_everything(9101) - objectnesses = [ - torch.rand(1, 1, pow(2, i), pow(2, i)) for i in range(3, 0, -1) - ] - - # to get outputs of pytorch model - model_inputs = { - 'cls_scores': cls_scores, - 'bbox_preds': bbox_preds, - 'objectnesses': objectnesses, - 'img_metas': img_metas - } - model_outputs = get_model_outputs(yolox_head, 'get_bboxes', model_inputs) - - # to get outputs of onnx model after rewrite - wrapped_model = WrapModel( - yolox_head, 'get_bboxes', img_metas=img_metas, with_nms=True) - rewrite_inputs = { - 'cls_scores': cls_scores, - 'bbox_preds': bbox_preds, - 'objectnesses': objectnesses, - } - rewrite_outputs, is_backend_output = get_rewrite_outputs( - wrapped_model=wrapped_model, - model_inputs=rewrite_inputs, - deploy_cfg=deploy_cfg) - - if is_backend_output: - if isinstance(rewrite_outputs, dict): - rewrite_outputs = convert_to_list(rewrite_outputs, output_names) - for model_output, rewrite_output in zip(model_outputs[0], - rewrite_outputs): - model_output = model_output.squeeze().cpu().numpy() - rewrite_output = rewrite_output.squeeze().cpu().numpy() - # hard code to make two tensors with the same shape - # rewrite and original codes applied different nms strategy - min_shape = min(model_output.shape[0], rewrite_output.shape[0], 5) - assert np.allclose( - model_output[:min_shape], - rewrite_output[:min_shape], - rtol=1e-03, - atol=1e-05) - else: - assert rewrite_outputs is not None - - -def test_yolox_head_get_bboxes_ncnn(): - """Test get_bboxes rewrite of yolox head for ncnn.""" - backend_type = Backend.NCNN - check_backend(backend_type) - yolox_head = get_yolox_head_model() - yolox_head.cpu().eval() - s = 128 - img_metas = [{ - 'scale_factor': np.ones(4), - 'pad_shape': (s, s, 3), - 'img_shape': (s, s, 3) - }] - - output_names = ['detection_output'] - deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type=backend_type.value), - onnx_config=dict(output_names=output_names, input_shape=None), - codebase_config=dict( - type='mmdet', - task='ObjectDetection', - post_processing=dict( - score_threshold=0.05, - iou_threshold=0.5, - max_output_boxes_per_class=20, - pre_top_k=5000, - keep_top_k=10, - background_label_id=0, - )))) - - seed_everything(1234) - cls_scores = [ - torch.rand(1, yolox_head.num_classes, pow(2, i), pow(2, i)) - for i in range(3, 0, -1) - ] - seed_everything(5678) - bbox_preds = [ - torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(3, 0, -1) - ] - seed_everything(9101) - objectnesses = [ - torch.rand(1, 1, pow(2, i), pow(2, i)) for i in range(3, 0, -1) - ] - - # to get outputs of onnx model after rewrite - wrapped_model = WrapModel(yolox_head, 'get_bboxes', img_metas=img_metas) - rewrite_inputs = { - 'cls_scores': cls_scores, - 'bbox_preds': bbox_preds, - 'objectnesses': objectnesses, - } - rewrite_outputs, is_backend_output = get_rewrite_outputs( - wrapped_model=wrapped_model, - model_inputs=rewrite_inputs, - deploy_cfg=deploy_cfg) - # output should be of shape [1, N, 6] - if is_backend_output: - assert rewrite_outputs[0].shape[-1] == 6 - else: - assert rewrite_outputs.shape[-1] == 6 - - -def get_vfnet_head_model(): +@pytest.fixture +def vfnet_head_model(): """VFNet Head Config.""" test_cfg = mmcv.Config( dict( @@ -1309,10 +1186,10 @@ def get_vfnet_head_model(): @pytest.mark.parametrize('backend_type', [Backend.OPENVINO, Backend.ONNXRUNTIME]) -def test_get_bboxes_of_vfnet_head(backend_type: Backend): +def test_get_bboxes_of_vfnet_head(backend_type: Backend, vfnet_head_model): """Test get_bboxes rewrite of VFNet head.""" check_backend(backend_type) - vfnet_head = get_vfnet_head_model() + vfnet_head = vfnet_head_model vfnet_head.cpu().eval() s = 16 img_metas = [{ @@ -1337,14 +1214,8 @@ def test_get_bboxes_of_vfnet_head(backend_type: Backend): background_label_id=-1, )))) - seed_everything(1234) - cls_score = [ - torch.rand(1, vfnet_head.num_classes, pow(2, i), pow(2, i)) - for i in range(5, 0, -1) - ] - seed_everything(5678) - bboxes = [torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] - seed_everything(9101) + cls_score = get_head_inputs(1234, vfnet_head.num_classes, 5) + bboxes = get_head_inputs(5678, 4, 5) model_inputs = { 'cls_scores': cls_score, @@ -1400,392 +1271,419 @@ def get_deploy_cfg(backend_type: Backend, ir_type: str): )))) -@pytest.mark.parametrize('backend_type, ir_type', - [(Backend.ONNXRUNTIME, 'onnx'), - (Backend.OPENVINO, 'onnx'), - (Backend.TORCHSCRIPT, 'torchscript')]) -def test_base_dense_head_get_bboxes(backend_type: Backend, ir_type: str): - """Test get_bboxes rewrite of base dense head.""" - check_backend(backend_type) - anchor_head = get_anchor_head_model() - anchor_head.cpu().eval() - s = 128 - img_metas = [{ - 'scale_factor': np.ones(4), - 'pad_shape': (s, s, 3), - 'img_shape': (s, s, 3) - }] +class TestBaseDenseHead: - deploy_cfg = get_deploy_cfg(backend_type, ir_type) - output_names = get_ir_config(deploy_cfg).get('output_names', None) + @pytest.fixture(scope='class') + def anchor_head(self): + """AnchorHead Config.""" + test_cfg = mmcv.Config( + dict( + deploy_nms_pre=0, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) - # the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16), - # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2). - # the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16), - # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2) - seed_everything(1234) - cls_score = [ - torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1) - ] - seed_everything(5678) - bboxes = [torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] + from mmdet.models.dense_heads import AnchorHead + model = AnchorHead(num_classes=4, in_channels=1, test_cfg=test_cfg) + model.requires_grad_(False) - # to get outputs of pytorch model - model_inputs = { - 'cls_scores': cls_score, - 'bbox_preds': bboxes, - 'img_metas': img_metas - } - model_outputs = get_model_outputs(anchor_head, 'get_bboxes', model_inputs) + model.cpu().eval() + return model - # to get outputs of onnx model after rewrite - img_metas[0]['img_shape'] = torch.Tensor([s, s]) - wrapped_model = WrapModel( - anchor_head, 'get_bboxes', img_metas=img_metas, with_nms=True) - rewrite_inputs = { - 'cls_scores': cls_score, - 'bbox_preds': bboxes, - } - rewrite_outputs, is_backend_output = get_rewrite_outputs( - wrapped_model=wrapped_model, - model_inputs=rewrite_inputs, - deploy_cfg=deploy_cfg) - - if is_backend_output: - if isinstance(rewrite_outputs, dict): - rewrite_outputs = convert_to_list(rewrite_outputs, output_names) - for model_output, rewrite_output in zip(model_outputs[0], - rewrite_outputs): - model_output = model_output.squeeze().cpu().numpy() - rewrite_output = rewrite_output.squeeze() - # hard code to make two tensors with the same shape - # rewrite and original codes applied different nms strategy - assert np.allclose( - model_output[:rewrite_output.shape[0]], - rewrite_output, - rtol=1e-03, - atol=1e-05) - else: - assert rewrite_outputs is not None - - -def test_base_dense_head_get_bboxes__ncnn(): - """Test get_bboxes rewrite of base dense head.""" - backend_type = Backend.NCNN - check_backend(backend_type) - anchor_head = get_anchor_head_model() - anchor_head.cpu().eval() - s = 128 - img_metas = [{ - 'scale_factor': np.ones(4), - 'pad_shape': (s, s, 3), - 'img_shape': (s, s, 3) - }] - - output_names = ['output'] - deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type=backend_type.value), - onnx_config=dict(output_names=output_names, input_shape=None), - codebase_config=dict( - type='mmdet', - task='ObjectDetection', - model_type='ncnn_end2end', - post_processing=dict( - score_threshold=0.05, - iou_threshold=0.5, - max_output_boxes_per_class=200, - pre_top_k=5000, - keep_top_k=100, - background_label_id=-1, - )))) - - # the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16), - # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2). - # the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16), - # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2) - seed_everything(1234) - cls_score = [ - torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1) - ] - seed_everything(5678) - bboxes = [torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] - - # to get outputs of onnx model after rewrite - img_metas[0]['img_shape'] = torch.Tensor([s, s]) - wrapped_model = WrapModel( - anchor_head, 'get_bboxes', img_metas=img_metas, with_nms=True) - rewrite_inputs = { - 'cls_scores': cls_score, - 'bbox_preds': bboxes, - } - rewrite_outputs, is_backend_output = get_rewrite_outputs( - wrapped_model=wrapped_model, - model_inputs=rewrite_inputs, - deploy_cfg=deploy_cfg) - - # output should be of shape [1, N, 6] - if is_backend_output: - rewrite_outputs = rewrite_outputs[0] - - assert rewrite_outputs.shape[-1] == 6 - - -@pytest.mark.parametrize('is_dynamic', [True, False]) -def test_ssd_head_get_bboxes__ncnn(is_dynamic: bool): - """Test get_bboxes rewrite of ssd head for ncnn.""" - check_backend(Backend.NCNN) - ssd_head = get_ssd_head_model() - ssd_head.cpu().eval() - s = 128 - img_metas = [{ - 'scale_factor': np.ones(4), - 'pad_shape': (s, s, 3), - 'img_shape': (s, s, 3) - }] - output_names = ['output'] - input_names = [] - for i in range(6): - input_names.append('cls_scores_' + str(i)) - input_names.append('bbox_preds_' + str(i)) - dynamic_axes = None - if is_dynamic: - dynamic_axes = { - output_names[0]: { - 1: 'num_dets', - } + @pytest.fixture(scope='class') + def rewrite_inputs(self): + # the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16), + # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2). + cls_score = get_head_inputs(1234, 36, 5) + bboxes = get_head_inputs(5678, 36, 5) + return { + 'cls_scores': cls_score, + 'bbox_preds': bboxes, } - for input_name in input_names: - dynamic_axes[input_name] = {2: 'height', 3: 'width'} - deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type=Backend.NCNN.value), - onnx_config=dict( - input_names=input_names, - output_names=output_names, - input_shape=None, - dynamic_axes=dynamic_axes), - codebase_config=dict( - type='mmdet', - task='ObjectDetection', - model_type='ncnn_end2end', - post_processing=dict( - score_threshold=0.05, - iou_threshold=0.5, - max_output_boxes_per_class=200, - pre_top_k=5000, - keep_top_k=100, - background_label_id=-1, - )))) - # For the ssd_head: - # the cls_score's size: (1, 30, 20, 20), (1, 30, 10, 10), - # (1, 30, 5, 5), (1, 30, 3, 3), (1, 30, 2, 2), (1, 30, 1, 1) - # the bboxes's size: (1, 24, 20, 20), (1, 24, 10, 10), - # (1, 24, 5, 5), (1, 24, 3, 3), (1, 24, 2, 2), (1, 24, 1, 1) + @pytest.fixture(scope='class') + def img_metas(self): + s = 128 + img_metas = [{ + 'scale_factor': np.ones(4), + 'pad_shape': (s, s, 3), + 'img_shape': (s, s, 3) + }] + img_metas[0]['img_shape'] = torch.tensor([s, s]) + + return img_metas + + @pytest.fixture(scope='class') + def wrapped_model(self, anchor_head, img_metas): + return WrapModel( + anchor_head, 'get_bboxes', img_metas=img_metas, with_nms=True) + + @pytest.fixture(scope='class') + def model_outputs(self, rewrite_inputs, anchor_head, img_metas): + model_inputs = {'img_metas': img_metas} + model_inputs.update(rewrite_inputs) + return get_model_outputs(anchor_head, 'get_bboxes', model_inputs) + + @pytest.mark.parametrize('backend_type, ir_type', + [(Backend.ONNXRUNTIME, 'onnx'), + (Backend.OPENVINO, 'onnx'), + (Backend.TORCHSCRIPT, 'torchscript')]) + def test_base_dense_head_get_bboxes(self, backend_type: Backend, + ir_type: str, rewrite_inputs, + wrapped_model, model_outputs): + """Test get_bboxes rewrite of base dense head.""" + check_backend(backend_type) + deploy_cfg = get_deploy_cfg(backend_type, ir_type) + output_names = get_ir_config(deploy_cfg).get('output_names', None) + + # to get outputs of onnx model after rewrite + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + + if is_backend_output: + if isinstance(rewrite_outputs, dict): + rewrite_outputs = convert_to_list(rewrite_outputs, + output_names) + for model_output, rewrite_output in zip(model_outputs[0], + rewrite_outputs): + model_output = model_output.squeeze().cpu().numpy() + rewrite_output = rewrite_output.squeeze() + # hard code to make two tensors with the same shape + # rewrite and original codes applied different nms strategy + assert np.allclose( + model_output[:rewrite_output.shape[0]], + rewrite_output, + rtol=1e-03, + atol=1e-05) + else: + assert rewrite_outputs is not None + + def test_base_dense_head_get_bboxes__ncnn(self, rewrite_inputs, + wrapped_model): + """Test get_bboxes rewrite of base dense head.""" + backend_type = Backend.NCNN + check_backend(backend_type) + + output_names = ['output'] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + model_type='ncnn_end2end', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=5000, + keep_top_k=100, + background_label_id=-1, + )))) + + # to get outputs of onnx model after rewrite + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + + # output should be of shape [1, N, 6] + if is_backend_output: + rewrite_outputs = rewrite_outputs[0] + + assert rewrite_outputs.shape[-1] == 6 + + +class TestSSDHead: + feat_shape = [20, 10, 5, 3, 2, 1] num_prior = 6 - seed_everything(1234) - cls_score = [ - torch.rand(1, 30, feat_shape[i], feat_shape[i]) - for i in range(num_prior) - ] - seed_everything(5678) - bboxes = [ - torch.rand(1, 24, feat_shape[i], feat_shape[i]) - for i in range(num_prior) - ] - # to get outputs of onnx model after rewrite - img_metas[0]['img_shape'] = torch.tensor([s, s]) if is_dynamic else [s, s] - wrapped_model = WrapModel( - ssd_head, 'get_bboxes', img_metas=img_metas, with_nms=True) - rewrite_inputs = { - 'cls_scores': cls_score, - 'bbox_preds': bboxes, - } - rewrite_outputs, is_backend_output = get_rewrite_outputs( - wrapped_model=wrapped_model, - model_inputs=rewrite_inputs, - deploy_cfg=deploy_cfg) + @pytest.fixture(scope='class') + def ssd_head(self): + """SSDHead Config.""" + test_cfg = mmcv.Config( + dict( + nms_pre=1000, + nms=dict(type='nms', iou_threshold=0.45), + min_bbox_size=0, + score_thr=0.02, + max_per_img=200)) - # output should be of shape [1, N, 6] - if is_backend_output: - rewrite_outputs = rewrite_outputs[0] + from mmdet.models import SSDHead + model = SSDHead( + in_channels=(96, 1280, 512, 256, 256, 128), + num_classes=4, + use_depthwise=True, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + act_cfg=dict(type='ReLU6'), + init_cfg=dict(type='Normal', layer='Conv2d', std=0.001), + anchor_generator=dict( + type='SSDAnchorGenerator', + scale_major=False, + strides=[16, 32, 64, 107, 160, 320], + ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]], + min_sizes=[48, 100, 150, 202, 253, 304], + max_sizes=[100, 150, 202, 253, 304, 320]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + test_cfg=test_cfg) - assert rewrite_outputs.shape[-1] == 6 + model.requires_grad_(False) + + model.cpu().eval() + return model + + @pytest.fixture(scope='class') + def rewrite_inputs(self): + # the cls_score's size: (1, 30, 20, 20), (1, 30, 10, 10), + # (1, 30, 5, 5), (1, 30, 3, 3), (1, 30, 2, 2), (1, 30, 1, 1) + seed_everything(1234) + cls_scores = [ + torch.rand(1, 30, self.feat_shape[i], self.feat_shape[i]) + for i in range(self.num_prior) + ] + seed_everything(5678) + bbox_preds = [ + torch.rand(1, 24, self.feat_shape[i], self.feat_shape[i]) + for i in range(self.num_prior) + ] + + return dict(cls_scores=cls_scores, bbox_preds=bbox_preds) + + @pytest.fixture(scope='class') + def img_metas(self): + s = 128 + img_metas = [{ + 'scale_factor': np.ones(4), + 'pad_shape': (s, s, 3), + 'img_shape': (s, s, 3) + }] + + return img_metas + + @pytest.mark.parametrize('is_dynamic', [True, False]) + def test_ssd_head_get_bboxes__ncnn(self, is_dynamic: bool, ssd_head, + rewrite_inputs, img_metas): + """Test get_bboxes rewrite of ssd head for ncnn.""" + check_backend(Backend.NCNN) + output_names = ['output'] + input_names = [] + for i in range(6): + input_names.append('cls_scores_' + str(i)) + input_names.append('bbox_preds_' + str(i)) + dynamic_axes = None + if is_dynamic: + dynamic_axes = { + output_names[0]: { + 1: 'num_dets', + } + } + for input_name in input_names: + dynamic_axes[input_name] = {2: 'height', 3: 'width'} + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=Backend.NCNN.value), + onnx_config=dict( + input_names=input_names, + output_names=output_names, + input_shape=None, + dynamic_axes=dynamic_axes), + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + model_type='ncnn_end2end', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=5000, + keep_top_k=100, + background_label_id=-1, + )))) + + img_metas = copy.deepcopy(img_metas) + s = 128 + img_metas[0]['img_shape'] = torch.tensor([s, s + ]) if is_dynamic else [s, s] + wrapped_model = WrapModel( + ssd_head, 'get_bboxes', img_metas=img_metas, with_nms=True) + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + + # output should be of shape [1, N, 6] + if is_backend_output: + rewrite_outputs = rewrite_outputs[0] + + assert rewrite_outputs.shape[-1] == 6 + + @backend_checker(Backend.RKNN) + def test_base_dense_head_get_bboxes__rknn(self, ssd_head, rewrite_inputs, + img_metas): + """Test get_bboxes rewrite of ssd head for rknn.""" + output_names = ['output'] + input_names = [] + for i in range(6): + input_names.append('cls_scores_' + str(i)) + input_names.append('bbox_preds_' + str(i)) + dynamic_axes = None + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=Backend.RKNN.value), + onnx_config=dict( + input_names=input_names, + output_names=output_names, + input_shape=None, + dynamic_axes=dynamic_axes), + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + model_type='rknn', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=5000, + keep_top_k=100, + background_label_id=-1, + )))) + + # to get outputs of onnx model after rewrite + img_metas = copy.deepcopy(img_metas) + s = 128 + img_metas[0]['img_shape'] = [s, s] + wrapped_model = WrapModel( + ssd_head, 'get_bboxes', img_metas=img_metas, with_nms=True) + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg, + run_with_backend=False) + + # output should be of shape [1, N, 4] + assert rewrite_outputs[0].shape[-1] == 4 -@backend_checker(Backend.RKNN) -def test_base_dense_head_get_bboxes__rknn(): - """Test get_bboxes rewrite of ssd head for rknn.""" - ssd_head = get_ssd_head_model() - ssd_head.cpu().eval() - s = 128 - img_metas = [{ - 'scale_factor': np.ones(4), - 'pad_shape': (s, s, 3), - 'img_shape': (s, s, 3) - }] - output_names = ['output'] - input_names = [] - for i in range(6): - input_names.append('cls_scores_' + str(i)) - input_names.append('bbox_preds_' + str(i)) - dynamic_axes = None - deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type=Backend.RKNN.value), - onnx_config=dict( - input_names=input_names, - output_names=output_names, - input_shape=None, - dynamic_axes=dynamic_axes), - codebase_config=dict( - type='mmdet', - task='ObjectDetection', - model_type='rknn', - post_processing=dict( - score_threshold=0.05, - iou_threshold=0.5, - max_output_boxes_per_class=200, - pre_top_k=5000, - keep_top_k=100, - background_label_id=-1, - )))) +class TestReppointsHead: - # For the ssd_head: - # the cls_score's size: (1, 30, 20, 20), (1, 30, 10, 10), - # (1, 30, 5, 5), (1, 30, 3, 3), (1, 30, 2, 2), (1, 30, 1, 1) - # the bboxes's size: (1, 24, 20, 20), (1, 24, 10, 10), - # (1, 24, 5, 5), (1, 24, 3, 3), (1, 24, 2, 2), (1, 24, 1, 1) - feat_shape = [20, 10, 5, 3, 2, 1] - num_prior = 6 - seed_everything(1234) - cls_score = [ - torch.rand(1, 30, feat_shape[i], feat_shape[i]) - for i in range(num_prior) - ] - seed_everything(5678) - bboxes = [ - torch.rand(1, 24, feat_shape[i], feat_shape[i]) - for i in range(num_prior) - ] + @pytest.fixture(scope='class') + def dense_head(self): + """Reppoints Head Config.""" + test_cfg = mmcv.Config( + dict( + deploy_nms_pre=0, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) - # to get outputs of onnx model after rewrite - img_metas[0]['img_shape'] = [s, s] - wrapped_model = WrapModel( - ssd_head, 'get_bboxes', img_metas=img_metas, with_nms=True) - rewrite_inputs = { - 'cls_scores': cls_score, - 'bbox_preds': bboxes, - } - rewrite_outputs, is_backend_output = get_rewrite_outputs( - wrapped_model=wrapped_model, - model_inputs=rewrite_inputs, - deploy_cfg=deploy_cfg, - run_with_backend=False) + from mmdet.models.dense_heads import RepPointsHead + model = RepPointsHead(num_classes=4, in_channels=1, test_cfg=test_cfg) - # output should be of shape [1, N, 4] - assert rewrite_outputs[0].shape[-1] == 4 + model.requires_grad_(False) + model.cpu().eval() + return model + @pytest.mark.parametrize('backend_type, ir_type', + [(Backend.OPENVINO, 'onnx')]) + def test_reppoints_head_get_bboxes(self, backend_type: Backend, + ir_type: str, dense_head): + """Test get_bboxes rewrite of base dense head.""" + check_backend(backend_type) + s = 128 + img_metas = [{ + 'scale_factor': np.ones(4), + 'pad_shape': (s, s, 3), + 'img_shape': (s, s, 3) + }] -@pytest.mark.parametrize('backend_type, ir_type', [(Backend.OPENVINO, 'onnx')]) -def test_reppoints_head_get_bboxes(backend_type: Backend, ir_type: str): - """Test get_bboxes rewrite of base dense head.""" - check_backend(backend_type) - dense_head = get_reppoints_head_model() - dense_head.cpu().eval() - s = 128 - img_metas = [{ - 'scale_factor': np.ones(4), - 'pad_shape': (s, s, 3), - 'img_shape': (s, s, 3) - }] + deploy_cfg = get_deploy_cfg(backend_type, ir_type) + output_names = get_ir_config(deploy_cfg).get('output_names', None) - deploy_cfg = get_deploy_cfg(backend_type, ir_type) - output_names = get_ir_config(deploy_cfg).get('output_names', None) + # the cls_score's size: (1, 4, 32, 32), (1, 4, 16, 16), + # (1, 4, 8, 8), (1, 4, 4, 4), (1, 4, 2, 2). + # the bboxes's size: (1, 4, 32, 32), (1, 4, 16, 16), + # (1, 4, 8, 8), (1, 4, 4, 4), (1, 4, 2, 2) + cls_score = get_head_inputs(1234, 4, 5) + bboxes = get_head_inputs(5678, 4, 5) - # the cls_score's size: (1, 4, 32, 32), (1, 4, 16, 16), - # (1, 4, 8, 8), (1, 4, 4, 4), (1, 4, 2, 2). - # the bboxes's size: (1, 4, 32, 32), (1, 4, 16, 16), - # (1, 4, 8, 8), (1, 4, 4, 4), (1, 4, 2, 2) - seed_everything(1234) - cls_score = [ - torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(5, 0, -1) - ] - seed_everything(5678) - bboxes = [torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] + # to get outputs of pytorch model + model_inputs = { + 'cls_scores': cls_score, + 'bbox_preds': bboxes, + 'img_metas': img_metas + } + model_outputs = get_model_outputs(dense_head, 'get_bboxes', + model_inputs) - # to get outputs of pytorch model - model_inputs = { - 'cls_scores': cls_score, - 'bbox_preds': bboxes, - 'img_metas': img_metas - } - model_outputs = get_model_outputs(dense_head, 'get_bboxes', model_inputs) + # to get outputs of onnx model after rewrite + img_metas[0]['img_shape'] = torch.Tensor([s, s]) + wrapped_model = WrapModel( + dense_head, 'get_bboxes', img_metas=img_metas, with_nms=True) + rewrite_inputs = { + 'cls_scores': cls_score, + 'bbox_preds': bboxes, + } + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) - # to get outputs of onnx model after rewrite - img_metas[0]['img_shape'] = torch.Tensor([s, s]) - wrapped_model = WrapModel( - dense_head, 'get_bboxes', img_metas=img_metas, with_nms=True) - rewrite_inputs = { - 'cls_scores': cls_score, - 'bbox_preds': bboxes, - } - rewrite_outputs, is_backend_output = get_rewrite_outputs( - wrapped_model=wrapped_model, - model_inputs=rewrite_inputs, - deploy_cfg=deploy_cfg) + if is_backend_output: + if isinstance(rewrite_outputs, dict): + rewrite_outputs = convert_to_list(rewrite_outputs, + output_names) + for model_output, rewrite_output in zip(model_outputs[0], + rewrite_outputs): + model_output = model_output.squeeze().cpu().numpy() + rewrite_output = rewrite_output.squeeze() + # hard code to make two tensors with the same shape + # rewrite and original codes applied different nms strategy + assert np.allclose( + model_output[:rewrite_output.shape[0]], + rewrite_output, + rtol=1e-03, + atol=1e-05) + else: + assert rewrite_outputs is not None - if is_backend_output: - if isinstance(rewrite_outputs, dict): - rewrite_outputs = convert_to_list(rewrite_outputs, output_names) - for model_output, rewrite_output in zip(model_outputs[0], - rewrite_outputs): - model_output = model_output.squeeze().cpu().numpy() - rewrite_output = rewrite_output.squeeze() - # hard code to make two tensors with the same shape - # rewrite and original codes applied different nms strategy - assert np.allclose( - model_output[:rewrite_output.shape[0]], - rewrite_output, - rtol=1e-03, - atol=1e-05) - else: - assert rewrite_outputs is not None + @pytest.mark.parametrize('backend_type', [(Backend.OPENVINO)]) + def test_reppoints_head_points2bbox(self, backend_type: Backend, + dense_head): + """Test get_bboxes rewrite of base dense head.""" + check_backend(backend_type) + output_names = ['output'] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict( + input_shape=None, + input_names=['pts'], + output_names=output_names))) -@pytest.mark.parametrize('backend_type, ir_type', [(Backend.OPENVINO, 'onnx')]) -def test_reppoints_head_points2bbox(backend_type: Backend, ir_type: str): - """Test get_bboxes rewrite of base dense head.""" - check_backend(backend_type) - dense_head = get_reppoints_head_model() - dense_head.cpu().eval() - output_names = ['output'] + # the cls_score's size: (1, 4, 32, 32), (1, 4, 16, 16), + # (1, 4, 8, 8), (1, 4, 4, 4), (1, 4, 2, 2). + # the bboxes's size: (1, 4, 32, 32), (1, 4, 16, 16), + # (1, 4, 8, 8), (1, 4, 4, 4), (1, 4, 2, 2) + seed_everything(1234) + pts = torch.rand(1, 18, 16, 16) - deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type=backend_type.value), - onnx_config=dict( - input_shape=None, - input_names=['pts'], - output_names=output_names))) - - # the cls_score's size: (1, 4, 32, 32), (1, 4, 16, 16), - # (1, 4, 8, 8), (1, 4, 4, 4), (1, 4, 2, 2). - # the bboxes's size: (1, 4, 32, 32), (1, 4, 16, 16), - # (1, 4, 8, 8), (1, 4, 4, 4), (1, 4, 2, 2) - seed_everything(1234) - pts = torch.rand(1, 18, 16, 16) - - # to get outputs of onnx model after rewrite - wrapped_model = WrapModel(dense_head, 'points2bbox', y_first=True) - rewrite_inputs = {'pts': pts} - _ = get_rewrite_outputs( - wrapped_model=wrapped_model, - model_inputs=rewrite_inputs, - deploy_cfg=deploy_cfg) + # to get outputs of onnx model after rewrite + wrapped_model = WrapModel(dense_head, 'points2bbox', y_first=True) + rewrite_inputs = {'pts': pts} + _ = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) @pytest.mark.skipif( @@ -1905,10 +1803,11 @@ def test_mlvl_point_generator__single_level_grid_priors__tensorrt( @pytest.mark.parametrize('backend_type, ir_type', [(Backend.ONNXRUNTIME, 'onnx')]) -def test_detrhead_get_bboxes(backend_type: Backend, ir_type: str): +def test_detrhead_get_bboxes(backend_type: Backend, ir_type: str, + detrhead_model): """Test get_bboxes rewrite of base dense head.""" check_backend(backend_type) - dense_head = get_detrhead_model() + dense_head = detrhead_model dense_head.cpu().eval() s = 128 img_metas = [{ diff --git a/tests/test_codebase/test_mmdet/test_mmdet_utils.py b/tests/test_codebase/test_mmdet/test_mmdet_utils.py index 3ab8a4c89..76c690de0 100644 --- a/tests/test_codebase/test_mmdet/test_mmdet_utils.py +++ b/tests/test_codebase/test_mmdet/test_mmdet_utils.py @@ -1,17 +1,8 @@ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np -import pytest import torch -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Codebase - -try: - import_codebase(Codebase.MMDET) -except ImportError: - pytest.skip(f'{Codebase.MMDET} is not installed.', allow_module_level=True) - from mmdeploy.codebase.mmdet.deploy import (clip_bboxes, get_post_processing_params, pad_with_value, @@ -45,22 +36,20 @@ def test_pad_with_value_if_necessary(): assert np.allclose(padded_x.sum(), x.sum(), rtol=1e-03, atol=1e-05) -config_with_mmdet_params = mmcv.Config( - dict( - codebase_config=dict( - type='mmdet', - task='ObjectDetection', - post_processing=dict( - score_threshold=0.05, - iou_threshold=0.5, - max_output_boxes_per_class=200, - pre_top_k=-1, - keep_top_k=100, - background_label_id=-1, - )))) - - def test_get_mmdet_params(): + config_with_mmdet_params = mmcv.Config( + dict( + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=-1, + keep_top_k=100, + background_label_id=-1, + )))) assert get_post_processing_params(config_with_mmdet_params) == dict( score_threshold=0.05, iou_threshold=0.5, diff --git a/tests/test_codebase/test_mmdet/test_object_detection.py b/tests/test_codebase/test_mmdet/test_object_detection.py index d67e60116..24829971c 100644 --- a/tests/test_codebase/test_mmdet/test_object_detection.py +++ b/tests/test_codebase/test_mmdet/test_object_detection.py @@ -1,7 +1,6 @@ # Copyright (c) OpenMMLab. All rights reserved. import copy import os -from tempfile import NamedTemporaryFile, TemporaryDirectory from typing import Any import mmcv @@ -11,50 +10,62 @@ import torch from torch.utils.data import DataLoader from torch.utils.data.dataset import Dataset -import mmdeploy.backend.onnxruntime as ort_apis from mmdeploy.apis import build_task_processor -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Codebase, load_config +from mmdeploy.utils import load_config from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper -try: - import_codebase(Codebase.MMDET) -except ImportError: - pytest.skip(f'{Codebase.MMDET} is not installed.', allow_module_level=True) - model_cfg_path = 'tests/test_codebase/test_mmdet/data/model.py' -model_cfg = load_config(model_cfg_path)[0] -deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type='onnxruntime'), - codebase_config=dict( - type='mmdet', - task='ObjectDetection', - post_processing=dict( - score_threshold=0.05, - confidence_threshold=0.005, # for YOLOv3 - iou_threshold=0.5, - max_output_boxes_per_class=200, - pre_top_k=5000, - keep_top_k=100, - background_label_id=-1, - )), - onnx_config=dict( - type='onnx', - export_params=True, - keep_initializers_as_inputs=False, - opset_version=11, - input_shape=None, - input_names=['input'], - output_names=['dets', 'labels']))) -onnx_file = NamedTemporaryFile(suffix='.onnx').name -task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') -img_shape = (32, 32) -img = np.random.rand(*img_shape, 3) + + +@pytest.fixture(scope='module') +def model_cfg(): + return load_config(model_cfg_path)[0] + + +@pytest.fixture(scope='module') +def deploy_cfg(): + return mmcv.Config( + dict( + backend_config=dict(type='onnxruntime'), + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + post_processing=dict( + score_threshold=0.05, + confidence_threshold=0.005, # for YOLOv3 + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=5000, + keep_top_k=100, + background_label_id=-1, + )), + onnx_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + input_shape=None, + input_names=['input'], + output_names=['dets', 'labels']))) + + +@pytest.fixture(scope='module') +def task_processor(model_cfg, deploy_cfg): + return build_task_processor(model_cfg, deploy_cfg, 'cpu') + + +@pytest.fixture(scope='module') +def img_shape(): + return (32, 32) + + +@pytest.fixture(scope='module') +def img(img_shape): + return np.random.rand(*img_shape, 3) @pytest.mark.parametrize('from_mmrazor', [True, False, '123', 0]) -def test_init_pytorch_model(from_mmrazor: Any): +def test_init_pytorch_model(from_mmrazor: Any, deploy_cfg, task_processor): from mmdet.models import BaseDetector if from_mmrazor is False: _task_processor = task_processor @@ -84,19 +95,16 @@ def test_init_pytorch_model(from_mmrazor: Any): assert isinstance(model, BaseDetector) -@pytest.fixture -def backend_model(): +@pytest.fixture(scope='module') +def backend_model(task_processor): from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) - wrapper = SwitchBackendWrapper(ORTWrapper) - wrapper.set(outputs={ - 'dets': torch.rand(1, 10, 5), - 'labels': torch.rand(1, 10) - }) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + wrapper.set(outputs={ + 'dets': torch.rand(1, 10, 5), + 'labels': torch.rand(1, 10) + }) - yield task_processor.init_backend_model(['']) - - wrapper.recover() + yield task_processor.init_backend_model(['']) def test_init_backend_model(backend_model): @@ -122,20 +130,25 @@ def test_can_postprocess_masks(): f'did not match actual shape {actual_shape}.' +@pytest.fixture(scope='module') +def model_inputs(task_processor, img): + return task_processor.create_input(img, input_shape=img.shape[:2]) + + @pytest.mark.parametrize('device', ['cpu', 'cuda:0']) -def test_create_input(device): +def test_create_input(device, task_processor, model_inputs): if device == 'cuda:0' and not torch.cuda.is_available(): pytest.skip('cuda is not available') original_device = task_processor.device task_processor.device = device - inputs = task_processor.create_input(img, input_shape=img_shape) + inputs = model_inputs assert len(inputs) == 2 task_processor.device = original_device -def test_run_inference(backend_model): +def test_run_inference(backend_model, task_processor, model_inputs): torch_model = task_processor.init_pytorch_model(None) - input_dict, _ = task_processor.create_input(img, input_shape=img_shape) + input_dict, _ = model_inputs torch_results = task_processor.run_inference(torch_model, input_dict) backend_results = task_processor.run_inference(backend_model, input_dict) assert torch_results is not None @@ -143,18 +156,17 @@ def test_run_inference(backend_model): assert len(torch_results[0]) == len(backend_results[0]) -def test_visualize(backend_model): - input_dict, _ = task_processor.create_input(img, input_shape=img_shape) +def test_visualize(backend_model, task_processor, img, tmp_path, model_inputs): + input_dict, _ = model_inputs results = task_processor.run_inference(backend_model, input_dict) - with TemporaryDirectory() as dir: - filename = dir + 'tmp.jpg' - task_processor.visualize(backend_model, img, results[0], filename, '') - assert os.path.exists(filename) + filename = str(tmp_path / 'tmp.jpg') + task_processor.visualize(backend_model, img, results[0], filename, '') + assert os.path.exists(filename) @pytest.mark.parametrize('partition_type', ['single_stage', 'two_stage']) # Currently only mmdet implements get_partition_cfg -def test_get_partition_cfg(partition_type): +def test_get_partition_cfg(partition_type, task_processor): from mmdeploy.codebase.mmdet.deploy.model_partition_cfg import \ MMDET_PARTITION_CFG partition_cfg = task_processor.get_partition_cfg( @@ -162,13 +174,13 @@ def test_get_partition_cfg(partition_type): assert partition_cfg == MMDET_PARTITION_CFG[partition_type] -def test_get_tensort_from_input(): +def test_get_tensort_from_input(task_processor): input_data = {'img': [torch.ones(3, 4, 5)]} inputs = task_processor.get_tensor_from_input(input_data) assert torch.equal(inputs, torch.ones(3, 4, 5)) -def test_build_dataset_and_dataloader(): +def test_build_dataset_and_dataloader(model_cfg, task_processor): dataset = task_processor.build_dataset( dataset_cfg=model_cfg, dataset_type='test') assert isinstance(dataset, Dataset), 'Failed to build dataset' @@ -176,7 +188,7 @@ def test_build_dataset_and_dataloader(): assert isinstance(dataloader, DataLoader), 'Failed to build dataloader' -def test_single_gpu_test_and_evaluate(): +def test_single_gpu_test_and_evaluate(model_cfg, task_processor, tmp_path): from mmcv.parallel import MMDataParallel class DummyDataset(Dataset): @@ -203,6 +215,6 @@ def test_single_gpu_test_and_evaluate(): # Run test outputs = task_processor.single_gpu_test(model, dataloader) assert isinstance(outputs, list) - output_file = NamedTemporaryFile(suffix='.pkl').name + output_file = str(tmp_path / 'tmp.pkl') task_processor.evaluate_outputs( model_cfg, outputs, dataset, 'bbox', out=output_file, format_only=True) diff --git a/tests/test_codebase/test_mmdet/test_object_detection_model.py b/tests/test_codebase/test_mmdet/test_object_detection_model.py index 498de681f..191027b3d 100644 --- a/tests/test_codebase/test_mmdet/test_object_detection_model.py +++ b/tests/test_codebase/test_mmdet/test_object_detection_model.py @@ -1,6 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp -from tempfile import NamedTemporaryFile from typing import Sequence import mmcv @@ -8,18 +7,10 @@ import numpy as np import pytest import torch -import mmdeploy.backend.ncnn as ncnn_apis import mmdeploy.backend.onnxruntime as ort_apis -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Backend, Codebase -from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker - -try: - import_codebase(Codebase.MMDET) -except ImportError: - pytest.skip(f'{Codebase.MMDET} is not installed.', allow_module_level=True) - from mmdeploy.codebase.mmdet.deploy.object_detection_model import End2EndModel +from mmdeploy.utils import Backend +from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker def assert_det_results(results, module_name: str = 'model'): @@ -43,35 +34,31 @@ def assert_forward_results(results, module_name: str = 'model'): @backend_checker(Backend.ONNXRUNTIME) class TestEnd2EndModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class') + def end2end_model(self): # force add backend wrapper regardless of plugins # make sure ONNXRuntimeDetector can use ORTWrapper inside itself from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference - cls.wrapper = SwitchBackendWrapper(ORTWrapper) - cls.outputs = { - 'dets': torch.rand(1, 10, 5), - 'labels': torch.rand(1, 10) - } - cls.wrapper.set(outputs=cls.outputs) - deploy_cfg = mmcv.Config( - {'onnx_config': { - 'output_names': ['dets', 'labels'] - }}) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + outputs = { + 'dets': torch.rand(1, 10, 5), + 'labels': torch.rand(1, 10) + } + wrapper.set(outputs=outputs) + deploy_cfg = mmcv.Config( + {'onnx_config': { + 'output_names': ['dets', 'labels'] + }}) - from mmdeploy.codebase.mmdet.deploy.object_detection_model import \ - End2EndModel - cls.end2end_model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu', - ['' for i in range(80)], deploy_cfg) + from mmdeploy.codebase.mmdet.deploy.object_detection_model import \ + End2EndModel + model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu', + ['' for i in range(80)], deploy_cfg) + yield model - @classmethod - def teardown_class(cls): - cls.wrapper.recover() - - def test_forward(self): + def test_forward(self, end2end_model): imgs = [torch.rand(1, 3, 64, 64)] img_metas = [[{ 'ori_shape': [64, 64, 3], @@ -79,15 +66,15 @@ class TestEnd2EndModel: 'scale_factor': [1, 1, 1, 1], 'border': [0, 0, 0] }]] - results = self.end2end_model.forward(imgs, img_metas) + results = end2end_model.forward(imgs, img_metas) assert_forward_results(results, 'End2EndModel') - def test_show_result(self): + def test_show_result(self, end2end_model, tmp_path): input_img = np.zeros([64, 64, 3]) - img_path = NamedTemporaryFile(suffix='.jpg').name + img_path = str(tmp_path / 'tmp.jpg') result = (torch.rand(1, 10, 5), torch.rand(1, 10)) - self.end2end_model.show_result( + end2end_model.show_result( input_img, result, '', show=False, out_file=img_path) assert osp.exists(img_path) @@ -95,55 +82,52 @@ class TestEnd2EndModel: @backend_checker(Backend.ONNXRUNTIME) class TestMaskEnd2EndModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class') + def end2end_model(self): # force add backend wrapper regardless of plugins # make sure ONNXRuntimeDetector can use ORTWrapper inside itself from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference num_classes = 80 num_dets = 10 - cls.wrapper = SwitchBackendWrapper(ORTWrapper) - cls.outputs = { - 'dets': torch.rand(1, num_dets, 5), - 'labels': torch.randint(num_classes, (1, num_dets)), - 'masks': torch.rand(1, num_dets, 28, 28) - } - cls.wrapper.set(outputs=cls.outputs) - deploy_cfg = mmcv.Config({ - 'onnx_config': { - 'output_names': ['dets', 'labels', 'masks'] - }, - 'codebase_config': { - 'post_processing': { - 'export_postprocess_mask': False - } + with SwitchBackendWrapper(ORTWrapper) as wrapper: + outputs = { + 'dets': torch.rand(1, num_dets, 5), + 'labels': torch.randint(num_classes, (1, num_dets)), + 'masks': torch.rand(1, num_dets, 28, 28) } - }) + wrapper.set(outputs=outputs) + deploy_cfg = mmcv.Config({ + 'onnx_config': { + 'output_names': ['dets', 'labels', 'masks'] + }, + 'codebase_config': { + 'post_processing': { + 'export_postprocess_mask': False + } + } + }) - from mmdeploy.codebase.mmdet.deploy.object_detection_model import \ - End2EndModel - cls.end2end_model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu', - ['' for i in range(80)], deploy_cfg) + from mmdeploy.codebase.mmdet.deploy.object_detection_model import \ + End2EndModel + model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu', + ['' for i in range(80)], deploy_cfg) + yield model - @classmethod - def teardown_class(cls): - cls.wrapper.recover() - - def test_forward(self): + def test_forward(self, end2end_model): imgs = [torch.rand(1, 3, 64, 64)] img_metas = [[{ 'ori_shape': [64, 64, 3], 'img_shape': [64, 64, 3], 'scale_factor': [1, 1, 1, 1], }]] - results = self.end2end_model.forward(imgs, img_metas) + results = end2end_model.forward(imgs, img_metas) assert_forward_results(results, 'mask End2EndModel') -def get_test_cfg_and_post_processing(): +@pytest.fixture(scope='module') +def cfg_and_post_processing(): test_cfg = { 'nms_pre': 100, 'min_bbox_size': 0, @@ -168,61 +152,57 @@ def get_test_cfg_and_post_processing(): @backend_checker(Backend.ONNXRUNTIME) class TestPartitionSingleStageModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class') + def model(self, cfg_and_post_processing): # force add backend wrapper regardless of plugins # make sure ONNXRuntimeDetector can use ORTWrapper inside itself from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference - cls.wrapper = SwitchBackendWrapper(ORTWrapper) - cls.outputs = { - 'scores': torch.rand(1, 10, 80), - 'boxes': torch.rand(1, 10, 4) - } - cls.wrapper.set(outputs=cls.outputs) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + outputs = { + 'scores': torch.rand(1, 10, 80), + 'boxes': torch.rand(1, 10, 4) + } + wrapper.set(outputs=outputs) - test_cfg, post_processing = get_test_cfg_and_post_processing() - model_cfg = mmcv.Config(dict(model=dict(test_cfg=test_cfg))) - deploy_cfg = mmcv.Config( - dict(codebase_config=dict(post_processing=post_processing))) + test_cfg, post_processing = cfg_and_post_processing + model_cfg = mmcv.Config(dict(model=dict(test_cfg=test_cfg))) + deploy_cfg = mmcv.Config( + dict(codebase_config=dict(post_processing=post_processing))) - from mmdeploy.codebase.mmdet.deploy.object_detection_model import \ - PartitionSingleStageModel - cls.model = PartitionSingleStageModel( - Backend.ONNXRUNTIME, [''], - 'cpu', ['' for i in range(80)], - model_cfg=model_cfg, - deploy_cfg=deploy_cfg) + from mmdeploy.codebase.mmdet.deploy.object_detection_model import \ + PartitionSingleStageModel + model_ = PartitionSingleStageModel( + Backend.ONNXRUNTIME, [''], + 'cpu', ['' for i in range(80)], + model_cfg=model_cfg, + deploy_cfg=deploy_cfg) + yield model_ - @classmethod - def teardown_class(cls): - cls.wrapper.recover() - - def test_forward_test(self): + def test_forward_test(self, model): imgs = [torch.rand(1, 3, 64, 64)] img_metas = [[{ 'ori_shape': [64, 64, 3], 'img_shape': [64, 64, 3], 'scale_factor': [1, 1, 1, 1], }]] - results = self.model.forward_test(imgs, img_metas) + results = model.forward_test(imgs, img_metas) assert_det_results(results, 'PartitionSingleStageModel') - def test_postprocess(self): + def test_postprocess(self, model): scores = torch.rand(1, 120, 80) bboxes = torch.rand(1, 120, 4) - results = self.model.partition0_postprocess( - scores=scores, bboxes=bboxes) + results = model.partition0_postprocess(scores=scores, bboxes=bboxes) assert_det_results( results, '.partition0_postprocess of' 'PartitionSingleStageModel') -def prepare_model_deploy_cfgs(): - test_cfg, post_processing = get_test_cfg_and_post_processing() +@pytest.fixture(scope='module') +def model_deploy_cfgs(cfg_and_post_processing): + test_cfg, post_processing = cfg_and_post_processing bbox_roi_extractor = { 'type': 'SingleRoIExtractor', 'roi_layer': { @@ -282,58 +262,56 @@ class DummyWrapper(torch.nn.Module): @backend_checker(Backend.ONNXRUNTIME) class TestPartitionTwoStageModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class') + def model(self, model_deploy_cfgs): # force add backend wrapper regardless of plugins # make sure ONNXRuntimeDetector can use ORTWrapper inside itself from mmdeploy.backend.onnxruntime import ORTWrapper ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference - cls.wrapper = SwitchBackendWrapper(ORTWrapper) - outputs = [ - np.random.rand(1, 12, 80).astype(np.float32), - np.random.rand(1, 12, 4).astype(np.float32), - ] * 2 + with SwitchBackendWrapper(ORTWrapper) as wrapper: + outputs = [ + np.random.rand(1, 12, 80).astype(np.float32), + np.random.rand(1, 12, 4).astype(np.float32), + ] * 2 - model_cfg, deploy_cfg = prepare_model_deploy_cfgs() + model_cfg, deploy_cfg = model_deploy_cfgs - cls.wrapper.set( - outputs=outputs, model_cfg=model_cfg, deploy_cfg=deploy_cfg) + wrapper.set( + outputs=outputs, model_cfg=model_cfg, deploy_cfg=deploy_cfg) - # replace original function in PartitionTwoStageModel - from mmdeploy.codebase.mmdet.deploy.object_detection_model import \ - PartitionTwoStageModel + # replace original function in PartitionTwoStageModel + from mmdeploy.codebase.mmdet.deploy.object_detection_model import \ + PartitionTwoStageModel - cls.model = PartitionTwoStageModel( - Backend.ONNXRUNTIME, ['', ''], - 'cpu', ['' for i in range(80)], - model_cfg=model_cfg, - deploy_cfg=deploy_cfg) - feats = [torch.randn(1, 8, 14, 14) for i in range(5)] - scores = torch.rand(1, 10, 1) - bboxes = torch.rand(1, 10, 4) - bboxes[..., 2:4] = 2 * bboxes[..., :2] + model_ = PartitionTwoStageModel( + Backend.ONNXRUNTIME, ['', ''], + 'cpu', ['' for i in range(80)], + model_cfg=model_cfg, + deploy_cfg=deploy_cfg) + feats = [torch.randn(1, 8, 14, 14) for i in range(5)] + scores = torch.rand(1, 10, 1) + bboxes = torch.rand(1, 10, 4) + bboxes[..., 2:4] = 2 * bboxes[..., :2] - cls_score = torch.rand(10, 81) - bbox_pred = torch.rand(10, 320) + cls_score = torch.rand(10, 81) + bbox_pred = torch.rand(10, 320) - cls.model.device = 'cpu' - cls.model.CLASSES = ['' for i in range(80)] - cls.model.first_wrapper = DummyWrapper([*feats, scores, bboxes]) - cls.model.second_wrapper = DummyWrapper([cls_score, bbox_pred]) + model_.device = 'cpu' + model_.CLASSES = ['' for i in range(80)] + model_.first_wrapper = DummyWrapper([*feats, scores, bboxes]) + model_.second_wrapper = DummyWrapper([cls_score, bbox_pred]) - @classmethod - def teardown_class(cls): - cls.wrapper.recover() + yield model_ - def test_postprocess(self): + def test_postprocess(self, model): feats = [torch.randn(1, 8, 14, 14) for i in range(5)] scores = torch.rand(1, 50, 1) bboxes = torch.rand(1, 50, 4) bboxes[..., 2:4] = 2 * bboxes[..., :2] - results = self.model.partition0_postprocess( + results = model.partition0_postprocess( x=feats, scores=scores, bboxes=bboxes) assert results is not None, 'failed to get output using '\ 'partition0_postprocess of PartitionTwoStageDetector' @@ -348,7 +326,7 @@ class TestPartitionTwoStageModel: 'img_shape': [32, 32, 3], 'scale_factor': [1, 1, 1, 1], }]] - results = self.model.partition1_postprocess( + results = model.partition1_postprocess( rois=rois, cls_score=cls_score, bbox_pred=bbox_pred, @@ -358,7 +336,7 @@ class TestPartitionTwoStageModel: assert isinstance(results, tuple) assert len(results) == 2 - def test_forward(self): + def test_forward(self, model): class DummyPTSDetector(torch.nn.Module): """A dummy wrapper for unit tests.""" @@ -373,12 +351,12 @@ class TestPartitionTwoStageModel: return self.outputs1 import types - self.model.partition0_postprocess = types.MethodType( - DummyPTSDetector.partition0_postprocess, self.model) - self.model.partition1_postprocess = types.MethodType( - DummyPTSDetector.partition1_postprocess, self.model) - self.model.outputs0 = [torch.rand(2, 3)] * 2 - self.model.outputs1 = [torch.rand(1, 9, 5), torch.rand(1, 9)] + model.partition0_postprocess = types.MethodType( + DummyPTSDetector.partition0_postprocess, model) + model.partition1_postprocess = types.MethodType( + DummyPTSDetector.partition1_postprocess, model) + model.outputs0 = [torch.rand(2, 3)] * 2 + model.outputs1 = [torch.rand(1, 9, 5), torch.rand(1, 9)] imgs = [torch.rand(1, 3, 32, 32)] img_metas = [[{ @@ -386,7 +364,7 @@ class TestPartitionTwoStageModel: 'img_shape': [32, 32, 3], 'scale_factor': [1, 1, 1, 1], }]] - results = self.model.forward(imgs, img_metas) + results = model.forward(imgs, img_metas) assert_forward_results(results, 'PartitionTwoStageModel') @@ -447,8 +425,8 @@ class TestGetClassesFromCfg: @backend_checker(Backend.ONNXRUNTIME) @pytest.mark.parametrize('partition_type', [None, 'end2end']) -def test_build_object_detection_model(partition_type): - _, post_processing = get_test_cfg_and_post_processing() +def test_build_object_detection_model(partition_type, cfg_and_post_processing): + _, post_processing = cfg_and_post_processing model_cfg = mmcv.Config(dict(data=dict(test={'type': 'CocoDataset'}))) deploy_cfg = mmcv.Config( dict( @@ -463,7 +441,6 @@ def test_build_object_detection_model(partition_type): partition_cfg=[dict(output_names=[])]) from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference with SwitchBackendWrapper(ORTWrapper) as wrapper: @@ -478,120 +455,114 @@ def test_build_object_detection_model(partition_type): @backend_checker(Backend.NCNN) class TestNCNNEnd2EndModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class', params=[10, 0]) + def end2end_model(self, request): # force add backend wrapper regardless of plugins from mmdeploy.backend.ncnn import NCNNWrapper - ncnn_apis.__dict__.update({'NCNNWrapper': NCNNWrapper}) # simplify backend inference - cls.wrapper = SwitchBackendWrapper(NCNNWrapper) - cls.outputs = { - 'output': torch.rand(1, 10, 6), - } - cls.wrapper.set(outputs=cls.outputs) - deploy_cfg = mmcv.Config({'onnx_config': {'output_names': ['output']}}) - model_cfg = mmcv.Config({}) + with SwitchBackendWrapper(NCNNWrapper) as wrapper: + param = request.param + outputs = { + 'output': torch.rand(1, param, 6), + } + wrapper.set(outputs=outputs) + deploy_cfg = mmcv.Config( + {'onnx_config': { + 'output_names': ['output'] + }}) + model_cfg = mmcv.Config({}) - from mmdeploy.codebase.mmdet.deploy.object_detection_model import \ - NCNNEnd2EndModel - cls.ncnn_end2end_model = NCNNEnd2EndModel(Backend.NCNN, ['', ''], + from mmdeploy.codebase.mmdet.deploy.object_detection_model import \ + NCNNEnd2EndModel + ncnn_end2end_model = NCNNEnd2EndModel(Backend.NCNN, ['', ''], 'cpu', ['' for i in range(80)], model_cfg, deploy_cfg) + yield ncnn_end2end_model - @classmethod - def teardown_class(cls): - cls.wrapper.recover() - - @pytest.mark.parametrize('num_det', [10, 0]) - def test_forward_test(self, num_det): - self.outputs = { - 'output': torch.rand(1, num_det, 6), - } + def test_forward_test(self, end2end_model): imgs = torch.rand(1, 3, 64, 64) - results = self.ncnn_end2end_model.forward_test(imgs) + results = end2end_model.forward_test(imgs) assert_det_results(results, 'NCNNEnd2EndModel') @backend_checker(Backend.RKNN) class TestRKNNModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class') + def end2end_model(self): # force add backend wrapper regardless of plugins - import mmdeploy.backend.rknn as rknn_apis from mmdeploy.backend.rknn import RKNNWrapper - rknn_apis.__dict__.update({'RKNNWrapper': RKNNWrapper}) # simplify backend inference - cls.wrapper = SwitchBackendWrapper(RKNNWrapper) - cls.outputs = [ - torch.rand(1, 255, 5, 5), - torch.rand(1, 255, 10, 10), - torch.rand(1, 255, 20, 20) - ] - cls.wrapper.set(outputs=cls.outputs) - deploy_cfg = mmcv.Config({ - 'onnx_config': { - 'output_names': ['output'] - }, - 'backend_config': { - 'common_config': {} - } - }) - model_cfg = mmcv.Config( - dict( - model=dict( - bbox_head=dict( - type='YOLOV3Head', - num_classes=80, - in_channels=[512, 256, 128], - out_channels=[1024, 512, 256], - anchor_generator=dict( - type='YOLOAnchorGenerator', - base_sizes=[[(116, 90), (156, 198), ( - 373, 326)], [(30, 61), (62, 45), ( - 59, 119)], [(10, 13), (16, 30), (33, 23)]], - strides=[32, 16, 8]), - bbox_coder=dict(type='YOLOBBoxCoder'), - featmap_strides=[32, 16, 8], - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0, - reduction='sum'), - loss_conf=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0, - reduction='sum'), - loss_xy=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=2.0, - reduction='sum'), - loss_wh=dict( - type='MSELoss', loss_weight=2.0, reduction='sum')), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - conf_thr=0.005, - nms=dict(type='nms', iou_threshold=0.45), - max_per_img=100)))) + with SwitchBackendWrapper(RKNNWrapper) as wrapper: + outputs = [ + torch.rand(1, 255, 5, 5), + torch.rand(1, 255, 10, 10), + torch.rand(1, 255, 20, 20) + ] + wrapper.set(outputs=outputs) + deploy_cfg = mmcv.Config({ + 'onnx_config': { + 'output_names': ['output'] + }, + 'backend_config': { + 'common_config': {} + } + }) + model_cfg = mmcv.Config( + dict( + model=dict( + bbox_head=dict( + type='YOLOV3Head', + num_classes=80, + in_channels=[512, 256, 128], + out_channels=[1024, 512, 256], + anchor_generator=dict( + type='YOLOAnchorGenerator', + base_sizes=[[(116, 90), (156, 198), ( + 373, 326)], [(30, 61), (62, 45), ( + 59, + 119)], [(10, 13), (16, 30), (33, 23)]], + strides=[32, 16, 8]), + bbox_coder=dict(type='YOLOBBoxCoder'), + featmap_strides=[32, 16, 8], + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0, + reduction='sum'), + loss_conf=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0, + reduction='sum'), + loss_xy=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=2.0, + reduction='sum'), + loss_wh=dict( + type='MSELoss', + loss_weight=2.0, + reduction='sum')), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + conf_thr=0.005, + nms=dict(type='nms', iou_threshold=0.45), + max_per_img=100)))) - from mmdeploy.codebase.mmdet.deploy.object_detection_model import \ - RKNNModel - cls.rknn_model = RKNNModel(Backend.RKNN, ['', ''], 'cpu', + from mmdeploy.codebase.mmdet.deploy.object_detection_model import \ + RKNNModel + rknn_model = RKNNModel(Backend.RKNN, ['', ''], 'cpu', ['' for i in range(80)], model_cfg, deploy_cfg) + return rknn_model - @classmethod - def teardown_class(cls): - cls.wrapper.recover() - - def test_forward_test(self): + def test_forward_test(self, end2end_model): imgs = torch.rand(1, 3, 64, 64) - results = self.rknn_model.forward_test(imgs) + results = end2end_model.forward_test(imgs) assert_det_results(results, 'RKNNWrapper') diff --git a/tests/test_codebase/test_mmdet3d/conftest.py b/tests/test_codebase/test_mmdet3d/conftest.py new file mode 100644 index 000000000..6258f3b00 --- /dev/null +++ b/tests/test_codebase/test_mmdet3d/conftest.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest + +from mmdeploy.codebase import import_codebase +from mmdeploy.utils import Codebase + + +def pytest_ignore_collect(*args, **kwargs): + import importlib + return importlib.util.find_spec('mmdet3d') is None + + +@pytest.fixture(autouse=True, scope='package') +def import_all_modules(): + codebase = Codebase.MMDET3D + try: + import_codebase(codebase) + except ImportError: + pytest.skip(f'{codebase} is not installed.', allow_module_level=True) diff --git a/tests/test_codebase/test_mmdet3d/test_mmdet3d_models.py b/tests/test_codebase/test_mmdet3d/test_mmdet3d_models.py index 697fe9029..a7d1c1c96 100644 --- a/tests/test_codebase/test_mmdet3d/test_mmdet3d_models.py +++ b/tests/test_codebase/test_mmdet3d/test_mmdet3d_models.py @@ -1,25 +1,28 @@ # Copyright (c) OpenMMLab. All rights reserved. +import copy + import mmcv import numpy as np import pytest import torch -from mmdeploy.codebase import import_codebase from mmdeploy.utils import Backend, Codebase, Task, load_config from mmdeploy.utils.test import WrapModel, check_backend, get_rewrite_outputs -try: - import_codebase(Codebase.MMDET3D) -except ImportError: - pytest.skip( - f'{Codebase.MMDET3D} is not installed.', allow_module_level=True) -model_cfg = load_config( - 'tests/test_codebase/test_mmdet3d/data/model_cfg.py')[0] -monodet_model_cfg = load_config( - 'tests/test_codebase/test_mmdet3d/data/monodet_model_cfg.py')[0] + +@pytest.fixture(scope='module') +def model_cfg(): + return load_config('tests/test_codebase/test_mmdet3d/data/model_cfg.py')[0] -def get_pillar_encoder(): +@pytest.fixture(scope='module') +def monodet_model_cfg(): + return load_config( + 'tests/test_codebase/test_mmdet3d/data/monodet_model_cfg.py')[0] + + +@pytest.fixture +def pillar_encoder(): from mmdet3d.models.voxel_encoders import PillarFeatureNet model = PillarFeatureNet( in_channels=4, @@ -32,21 +35,23 @@ def get_pillar_encoder(): norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), mode='max') model.requires_grad_(False) + model.cpu().eval() return model -def get_pointpillars_scatter(): +@pytest.fixture +def pointpillars_scatter(): from mmdet3d.models.middle_encoders import PointPillarsScatter model = PointPillarsScatter(in_channels=64, output_shape=(16, 16)) model.requires_grad_(False) + model.cpu().eval() return model @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) -def test_pillar_encoder(backend_type: Backend): +def test_pillar_encoder(backend_type: Backend, pillar_encoder): check_backend(backend_type, True) - model = get_pillar_encoder() - model.cpu().eval() + model = pillar_encoder deploy_cfg = mmcv.Config( dict( @@ -81,10 +86,9 @@ def test_pillar_encoder(backend_type: Backend): @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) -def test_pointpillars_scatter(backend_type: Backend): +def test_pointpillars_scatter(backend_type: Backend, pointpillars_scatter): check_backend(backend_type, True) - model = get_pointpillars_scatter() - model.cpu().eval() + model = pointpillars_scatter deploy_cfg = mmcv.Config( dict( @@ -113,30 +117,22 @@ def test_pointpillars_scatter(backend_type: Backend): model_output.shape, rewrite_output.shape, rtol=1e-03, atol=1e-03) -def get_centerpoint(): +@pytest.fixture +def centerpoint(model_cfg): from mmdet3d.models.detectors.centerpoint import CenterPoint model = CenterPoint(**model_cfg.centerpoint_model) model.requires_grad_(False) + model.cpu().eval() return model -def get_centerpoint_head(): - from mmdet3d.models import builder - model_cfg.centerpoint_model.pts_bbox_head.test_cfg = model_cfg.\ - centerpoint_model.test_cfg - head = builder.build_head(model_cfg.centerpoint_model.pts_bbox_head) - head.requires_grad_(False) - return head - - @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) -def test_centerpoint(backend_type: Backend): +def test_centerpoint(backend_type: Backend, model_cfg, centerpoint): from mmdeploy.codebase.mmdet3d.deploy.voxel_detection import VoxelDetection from mmdeploy.core import RewriterContext check_backend(backend_type, True) - model = get_centerpoint() - model.cpu().eval() + model = centerpoint deploy_cfg = mmcv.Config( dict( backend_config=dict(type=backend_type.value), @@ -159,21 +155,22 @@ def test_centerpoint(backend_type: Backend): assert rewrite_outputs is not None -def get_pointpillars_nus(): +@pytest.fixture +def pointpillars_nus(model_cfg): from mmdet3d.models.detectors import MVXFasterRCNN model = MVXFasterRCNN(**model_cfg.pointpillars_nus_model) model.requires_grad_(False) + model.cpu().eval() return model @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) -def test_pointpillars_nus(backend_type: Backend): +def test_pointpillars_nus(backend_type: Backend, model_cfg, pointpillars_nus): from mmdeploy.codebase.mmdet3d.deploy.voxel_detection import VoxelDetection from mmdeploy.core import RewriterContext check_backend(backend_type, True) - model = get_pointpillars_nus() - model.cpu().eval() + model = pointpillars_nus deploy_cfg = mmcv.Config( dict( backend_config=dict(type=backend_type.value), @@ -196,22 +193,24 @@ def test_pointpillars_nus(backend_type: Backend): assert outputs is not None -def get_fcos3d(): +@pytest.fixture +def fcos3d(monodet_model_cfg): from mmdet3d.models.detectors import FCOSMono3D - monodet_model_cfg.model.pop('type') - model = FCOSMono3D(**monodet_model_cfg.model) + cfg = copy.deepcopy(monodet_model_cfg) + cfg.model.pop('type') + model = FCOSMono3D(**cfg.model) model.requires_grad_(False) + model.cpu().eval() return model @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) -def test_fcos3d(backend_type: Backend): +def test_fcos3d(backend_type: Backend, monodet_model_cfg, fcos3d): from mmdeploy.codebase.mmdet3d.deploy.monocular_detection import \ MonocularDetection from mmdeploy.core import RewriterContext check_backend(backend_type, True) - model = get_fcos3d() - model.cpu().eval() + model = fcos3d deploy_cfg = mmcv.Config( dict( backend_config=dict(type=backend_type.value), diff --git a/tests/test_codebase/test_mmdet3d/test_monocular_detection.py b/tests/test_codebase/test_mmdet3d/test_monocular_detection.py index 9a861bcf3..b5229a6cc 100644 --- a/tests/test_codebase/test_mmdet3d/test_monocular_detection.py +++ b/tests/test_codebase/test_mmdet3d/test_monocular_detection.py @@ -1,6 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. import os -from tempfile import NamedTemporaryFile, TemporaryDirectory import mmcv import pytest @@ -8,70 +7,85 @@ import torch from torch.utils.data import DataLoader from torch.utils.data.dataset import Dataset -import mmdeploy.backend.onnxruntime as ort_apis from mmdeploy.apis import build_task_processor -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Codebase, load_config +from mmdeploy.utils import load_config from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper -try: - import_codebase(Codebase.MMDET3D) -except ImportError: - pytest.skip( - f'{Codebase.MMDET3D} is not installed.', allow_module_level=True) -model_cfg_path = 'tests/test_codebase/test_mmdet3d/data/monodet_model_cfg.py' -img_path = 'tests/test_codebase/test_mmdet3d/data/nuscenes/' \ +@pytest.fixture(scope='module') +def model_cfg_path(): + return 'tests/test_codebase/test_mmdet3d/data/monodet_model_cfg.py' + + +@pytest.fixture(scope='module') +def img_path(): + return 'tests/test_codebase/test_mmdet3d/data/nuscenes/' \ 'n015-2018-07-24-11-22-45+0800__CAM_BACK__1532402927637525.jpg' -model_cfg = load_config(model_cfg_path)[0] -deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type='onnxruntime'), - codebase_config=dict( - type='mmdet3d', - task='MonocularDetection', - ann_file='tests/test_codebase/test_mmdet3d/data/nuscenes/n015-2018' - '-07-24-11-22-45+0800__CAM_BACK__1532402927637525_mono3d.coco.json' - ), - onnx_config=dict( - type='onnx', - export_params=True, - keep_initializers_as_inputs=False, - opset_version=11, - input_shape=None, - input_names=['img', 'cam2img', 'cam2img_inverse'], - output_names=['bboxes', 'scores', 'labels', 'dir_scores', - 'attrs']))) -onnx_file = NamedTemporaryFile(suffix='.onnx').name -task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') + + +@pytest.fixture(scope='module') +def model_cfg(model_cfg_path): + return load_config(model_cfg_path)[0] + + +@pytest.fixture(scope='module') +def deploy_cfg(): + return mmcv.Config( + dict( + backend_config=dict(type='onnxruntime'), + codebase_config=dict( + type='mmdet3d', + task='MonocularDetection', + ann_file='tests/test_codebase/test_mmdet3d/data' + + '/nuscenes/n015-2018' + + '-07-24-11-22-45+0800__CAM_BACK__1532402927637525_mono3d' + + '.coco.json'), + onnx_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + input_shape=None, + input_names=['img', 'cam2img', 'cam2img_inverse'], + output_names=[ + 'bboxes', 'scores', 'labels', 'dir_scores', 'attrs' + ]))) + + +@pytest.fixture(scope='module') +def task_processor(model_cfg, deploy_cfg): + return build_task_processor(model_cfg, deploy_cfg, 'cpu') + + num_classes = 10 num_attr = 5 num_dets = 20 -def test_init_pytorch_model(): +@pytest.fixture(scope='module') +def torch_model(task_processor): + return task_processor.init_pytorch_model(None) + + +def test_init_pytorch_model(torch_model): from mmdet3d.models import SingleStageMono3DDetector - model = task_processor.init_pytorch_model(None) - assert isinstance(model, SingleStageMono3DDetector) + assert isinstance(torch_model, SingleStageMono3DDetector) -@pytest.fixture -def backend_model(): +@pytest.fixture(scope='module') +def backend_model(task_processor): from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) - wrapper = SwitchBackendWrapper(ORTWrapper) - wrapper.set( - outputs={ - 'bboxes': torch.rand(1, num_dets, 9), - 'scores': torch.rand(1, num_dets), - 'labels': torch.randint(num_classes, (1, num_dets)), - 'dir_scores': torch.randint(2, (1, num_dets)), - 'attrs': torch.randint(num_attr, (1, num_dets)) - }) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + wrapper.set( + outputs={ + 'bboxes': torch.rand(1, num_dets, 9), + 'scores': torch.rand(1, num_dets), + 'labels': torch.randint(num_classes, (1, num_dets)), + 'dir_scores': torch.randint(2, (1, num_dets)), + 'attrs': torch.randint(num_attr, (1, num_dets)) + }) - yield task_processor.init_backend_model(['']) - - wrapper.recover() + yield task_processor.init_backend_model(['']) def test_init_backend_model(backend_model): @@ -80,23 +94,28 @@ def test_init_backend_model(backend_model): assert isinstance(backend_model, MonocularDetectionModel) +@pytest.fixture(scope='module') +def model_inputs(task_processor, img_path): + return task_processor.create_input(img_path) + + @pytest.mark.parametrize('device', ['cpu', 'cuda:0']) -def test_create_input(device): +def test_create_input(device, task_processor, model_inputs): if device == 'cuda:0' and not torch.cuda.is_available(): pytest.skip('cuda is not available') original_device = task_processor.device task_processor.device = device - inputs = task_processor.create_input(img_path) + inputs = model_inputs assert len(inputs) == 2 task_processor.device = original_device @pytest.mark.skipif( reason='Only support GPU test', condition=not torch.cuda.is_available()) -def test_run_inference(backend_model): +def test_run_inference(backend_model, task_processor, torch_model, + model_inputs): task_processor.device = 'cuda:0' - torch_model = task_processor.init_pytorch_model(None) - input_dict, _ = task_processor.create_input(img_path) + input_dict, _ = model_inputs torch_results = task_processor.run_inference(torch_model, input_dict) backend_results = task_processor.run_inference(backend_model, input_dict) assert torch_results is not None @@ -107,20 +126,19 @@ def test_run_inference(backend_model): @pytest.mark.skipif( reason='Only support GPU test', condition=not torch.cuda.is_available()) -def test_visualize(): +def test_visualize(task_processor, torch_model, model_inputs, img_path, + tmp_path): task_processor.device = 'cuda:0' - input_dict, _ = task_processor.create_input(img_path) - torch_model = task_processor.init_pytorch_model(None) + input_dict, _ = model_inputs results = task_processor.run_inference(torch_model, input_dict) - with TemporaryDirectory() as dir: - filename = dir + 'tmp.bin' - task_processor.visualize(torch_model, img_path, results[0], filename, - 'test', False) - assert os.path.exists(filename) + filename = str(tmp_path / 'tmp.bin') + task_processor.visualize(torch_model, img_path, results[0], filename, + 'test', False) + assert os.path.exists(filename) task_processor.device = 'cpu' -def test_build_dataset_and_dataloader(): +def test_build_dataset_and_dataloader(task_processor, model_cfg): dataset = task_processor.build_dataset( dataset_cfg=model_cfg, dataset_type='test') assert isinstance(dataset, Dataset), 'Failed to build dataset' @@ -130,7 +148,7 @@ def test_build_dataset_and_dataloader(): @pytest.mark.skipif( reason='Only support GPU test', condition=not torch.cuda.is_available()) -def test_single_gpu_test_and_evaluate(): +def test_single_gpu_test_and_evaluate(task_processor, model_cfg, tmp_path): from mmcv.parallel import MMDataParallel task_processor.device = 'cuda:0' @@ -158,7 +176,7 @@ def test_single_gpu_test_and_evaluate(): # Run test outputs = task_processor.single_gpu_test(model, dataloader) assert isinstance(outputs, list) - output_file = NamedTemporaryFile(suffix='.pkl').name + output_file = str(tmp_path / 'tmp.pkl') task_processor.evaluate_outputs( model_cfg, outputs, dataset, 'bbox', out=output_file, format_only=True) task_processor.device = 'cpu' diff --git a/tests/test_codebase/test_mmdet3d/test_monocular_detection_model.py b/tests/test_codebase/test_mmdet3d/test_monocular_detection_model.py index fb142233d..63001b592 100644 --- a/tests/test_codebase/test_mmdet3d/test_monocular_detection_model.py +++ b/tests/test_codebase/test_mmdet3d/test_monocular_detection_model.py @@ -5,71 +5,59 @@ import mmcv import pytest import torch -import mmdeploy.backend.onnxruntime as ort_apis -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Backend, Codebase -from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker - -try: - import_codebase(Codebase.MMDET3D) -except ImportError: - pytest.skip( - f'{Codebase.MMDET3D} is not installed.', allow_module_level=True) from mmdeploy.codebase.mmdet3d.deploy.monocular_detection_model import ( MonocularDetectionModel, build_monocular_detection_model) +from mmdeploy.utils import Backend, Codebase +from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker @backend_checker(Backend.ONNXRUNTIME) class TestMonocularDetectionModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class') + def end2end_model(self): # force add backend wrapper regardless of plugins from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference num_classes = 10 num_attr = 5 num_dets = 20 - cls.wrapper = SwitchBackendWrapper(ORTWrapper) - cls.outputs = { - 'bboxes': torch.rand(1, num_dets, 9), - 'scores': torch.rand(1, num_dets), - 'labels': torch.randint(num_classes, (1, num_dets)), - 'dir_scores': torch.randint(2, (1, num_dets)), - 'attrs': torch.randint(num_attr, (1, num_dets)) - } - cls.wrapper.set(outputs=cls.outputs) - deploy_cfg = mmcv.Config({ - 'onnx_config': { - 'input_names': ['img', 'cam2img', 'cam2img_inverse'], - 'output_names': - ['bboxes', 'scores', 'labels', 'dir_scores', 'attrs'], - 'opset_version': - 11 - }, - 'backend_config': { - 'type': 'tensorrt' + with SwitchBackendWrapper(ORTWrapper) as wrapper: + outputs = { + 'bboxes': torch.rand(1, num_dets, 9), + 'scores': torch.rand(1, num_dets), + 'labels': torch.randint(num_classes, (1, num_dets)), + 'dir_scores': torch.randint(2, (1, num_dets)), + 'attrs': torch.randint(num_attr, (1, num_dets)) } - }) + wrapper.set(outputs=outputs) + deploy_cfg = mmcv.Config({ + 'onnx_config': { + 'input_names': ['img', 'cam2img', 'cam2img_inverse'], + 'output_names': + ['bboxes', 'scores', 'labels', 'dir_scores', 'attrs'], + 'opset_version': + 11 + }, + 'backend_config': { + 'type': 'tensorrt' + } + }) - cls.end2end_model = MonocularDetectionModel( - Backend.ONNXRUNTIME, - [''], - device='cuda', - model_cfg=['' for i in range(10)], - deploy_cfg=deploy_cfg, - ) - - @classmethod - def teardown_class(cls): - cls.wrapper.recover() + model = MonocularDetectionModel( + Backend.ONNXRUNTIME, + [''], + device='cuda', + model_cfg=['' for i in range(10)], + deploy_cfg=deploy_cfg, + ) + yield model @pytest.mark.skipif( reason='Only support GPU test', condition=not torch.cuda.is_available()) - def test_forward_and_show_result(self): + def test_forward_and_show_result(self, end2end_model, tmp_path): from mmdet3d.core import Box3DMode from mmdet3d.core.bbox.structures.box_3d_mode import \ CameraInstance3DBoxes @@ -87,16 +75,15 @@ class TestMonocularDetectionModel: Box3DMode.CAM, }]] data = dict(img=img, img_metas=img_metas) - results = self.end2end_model.forward(img, img_metas) + results = end2end_model.forward(img, img_metas) assert results is not None assert isinstance(results, list) assert len(results) == 1 # assert results[0]['img_bbox']['scores_3d'].shape == 4 - from tempfile import TemporaryDirectory - with TemporaryDirectory() as dir: - self.end2end_model.show_result(data, results, - osp.join(dir, 'backend_output')) - assert osp.exists(dir + '/backend_output') + dir = str(tmp_path) + end2end_model.show_result(data, results, + osp.join(dir, 'backend_output')) + assert osp.exists(dir + '/backend_output') @backend_checker(Backend.ONNXRUNTIME) @@ -112,7 +99,6 @@ def test_build_monocular_detection_model(): codebase_config=dict(type=Codebase.MMDET3D.value))) from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference with SwitchBackendWrapper(ORTWrapper) as wrapper: diff --git a/tests/test_codebase/test_mmdet3d/test_voxel_detection.py b/tests/test_codebase/test_mmdet3d/test_voxel_detection.py index 28d6a0811..f519dd138 100644 --- a/tests/test_codebase/test_mmdet3d/test_voxel_detection.py +++ b/tests/test_codebase/test_mmdet3d/test_voxel_detection.py @@ -1,6 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. import os -from tempfile import NamedTemporaryFile, TemporaryDirectory import mmcv import pytest @@ -8,58 +7,69 @@ import torch from torch.utils.data import DataLoader from torch.utils.data.dataset import Dataset -import mmdeploy.backend.onnxruntime as ort_apis from mmdeploy.apis import build_task_processor -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Codebase, load_config +from mmdeploy.utils import load_config from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper -try: - import_codebase(Codebase.MMDET3D) -except ImportError: - pytest.skip( - f'{Codebase.MMDET3D} is not installed.', allow_module_level=True) -model_cfg_path = 'tests/test_codebase/test_mmdet3d/data/model_cfg.py' -pcd_path = 'tests/test_codebase/test_mmdet3d/data/kitti/kitti_000008.bin' -model_cfg = load_config(model_cfg_path)[0] -deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type='onnxruntime'), - codebase_config=dict(type='mmdet3d', task='VoxelDetection'), - onnx_config=dict( - type='onnx', - export_params=True, - keep_initializers_as_inputs=False, - opset_version=11, - input_shape=None, - input_names=['voxels', 'num_points', 'coors'], - output_names=['bboxes', 'scores', 'labels']))) -onnx_file = NamedTemporaryFile(suffix='.onnx').name -task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') +@pytest.fixture(scope='module') +def model_cfg_path(): + return 'tests/test_codebase/test_mmdet3d/data/model_cfg.py' -def test_init_pytorch_model(): +@pytest.fixture(scope='module') +def pcd_path(): + return 'tests/test_codebase/test_mmdet3d/data/kitti/kitti_000008.bin' + + +@pytest.fixture(scope='module') +def model_cfg(model_cfg_path): + return load_config(model_cfg_path)[0] + + +@pytest.fixture(scope='module') +def deploy_cfg(): + return mmcv.Config( + dict( + backend_config=dict(type='onnxruntime'), + codebase_config=dict(type='mmdet3d', task='VoxelDetection'), + onnx_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + input_shape=None, + input_names=['voxels', 'num_points', 'coors'], + output_names=['bboxes', 'scores', 'labels']))) + + +@pytest.fixture(scope='module') +def task_processor(model_cfg, deploy_cfg): + return build_task_processor(model_cfg, deploy_cfg, 'cpu') + + +@pytest.fixture(scope='module') +def torch_model(task_processor): + return task_processor.init_pytorch_model(None) + + +def test_init_pytorch_model(torch_model): from mmdet3d.models import Base3DDetector - model = task_processor.init_pytorch_model(None) - assert isinstance(model, Base3DDetector) + assert isinstance(torch_model, Base3DDetector) -@pytest.fixture -def backend_model(): +@pytest.fixture(scope='module') +def backend_model(task_processor): from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) - wrapper = SwitchBackendWrapper(ORTWrapper) - wrapper.set( - outputs={ - 'bboxes': torch.rand(1, 50, 7), - 'scores': torch.rand(1, 50), - 'labels': torch.rand(1, 50) - }) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + wrapper.set( + outputs={ + 'bboxes': torch.rand(1, 50, 7), + 'scores': torch.rand(1, 50), + 'labels': torch.rand(1, 50) + }) - yield task_processor.init_backend_model(['']) - - wrapper.recover() + yield task_processor.init_backend_model(['']) def test_init_backend_model(backend_model): @@ -68,23 +78,28 @@ def test_init_backend_model(backend_model): assert isinstance(backend_model, VoxelDetectionModel) +@pytest.fixture(scope='module') +def model_inputs(task_processor, pcd_path): + return task_processor.create_input(pcd_path) + + @pytest.mark.parametrize('device', ['cpu', 'cuda:0']) -def test_create_input(device): +def test_create_input(device, task_processor, model_inputs): if device == 'cuda:0' and not torch.cuda.is_available(): pytest.skip('cuda is not available') original_device = task_processor.device task_processor.device = device - inputs = task_processor.create_input(pcd_path) + inputs = model_inputs assert len(inputs) == 2 task_processor.device = original_device @pytest.mark.skipif( reason='Only support GPU test', condition=not torch.cuda.is_available()) -def test_run_inference(backend_model): +def test_run_inference(backend_model, task_processor, torch_model, + model_inputs): task_processor.device = 'cuda:0' - torch_model = task_processor.init_pytorch_model(None) - input_dict, _ = task_processor.create_input(pcd_path) + input_dict, _ = model_inputs torch_results = task_processor.run_inference(torch_model, input_dict) backend_results = task_processor.run_inference(backend_model, input_dict) assert torch_results is not None @@ -95,20 +110,19 @@ def test_run_inference(backend_model): @pytest.mark.skipif( reason='Only support GPU test', condition=not torch.cuda.is_available()) -def test_visualize(): +def test_visualize(pcd_path, task_processor, torch_model, tmp_path, + model_inputs): task_processor.device = 'cuda:0' - input_dict, _ = task_processor.create_input(pcd_path) - torch_model = task_processor.init_pytorch_model(None) + input_dict, _ = model_inputs results = task_processor.run_inference(torch_model, input_dict) - with TemporaryDirectory() as dir: - filename = dir + 'tmp.bin' - task_processor.visualize(torch_model, pcd_path, results[0], filename, - 'test', False) - assert os.path.exists(filename) + filename = str(tmp_path / 'tmp.bin') + task_processor.visualize(torch_model, pcd_path, results[0], filename, + 'test', False) + assert os.path.exists(filename) task_processor.device = 'cpu' -def test_build_dataset_and_dataloader(): +def test_build_dataset_and_dataloader(model_cfg, task_processor): dataset = task_processor.build_dataset( dataset_cfg=model_cfg, dataset_type='test') assert isinstance(dataset, Dataset), 'Failed to build dataset' @@ -118,7 +132,7 @@ def test_build_dataset_and_dataloader(): @pytest.mark.skipif( reason='Only support GPU test', condition=not torch.cuda.is_available()) -def test_single_gpu_test_and_evaluate(): +def test_single_gpu_test_and_evaluate(model_cfg, task_processor, tmp_path): from mmcv.parallel import MMDataParallel task_processor.device = 'cuda:0' @@ -146,7 +160,7 @@ def test_single_gpu_test_and_evaluate(): # Run test outputs = task_processor.single_gpu_test(model, dataloader) assert isinstance(outputs, list) - output_file = NamedTemporaryFile(suffix='.pkl').name + output_file = str(tmp_path / 'tmp.pkl') task_processor.evaluate_outputs( model_cfg, outputs, dataset, 'bbox', out=output_file, format_only=True) task_processor.device = 'cpu' diff --git a/tests/test_codebase/test_mmdet3d/test_voxel_detection_model.py b/tests/test_codebase/test_mmdet3d/test_voxel_detection_model.py index a75c8ce72..a8949fd31 100644 --- a/tests/test_codebase/test_mmdet3d/test_voxel_detection_model.py +++ b/tests/test_codebase/test_mmdet3d/test_voxel_detection_model.py @@ -5,18 +5,10 @@ import mmcv import pytest import torch -import mmdeploy.backend.onnxruntime as ort_apis -from mmdeploy.codebase import import_codebase +from mmdeploy.codebase.mmdet3d.deploy.voxel_detection import VoxelDetection from mmdeploy.utils import Backend, Codebase from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker -try: - import_codebase(Codebase.MMDET3D) -except ImportError: - pytest.skip( - f'{Codebase.MMDET3D} is not installed.', allow_module_level=True) -from mmdeploy.codebase.mmdet3d.deploy.voxel_detection import VoxelDetection - pcd_path = 'tests/test_codebase/test_mmdet3d/data/kitti/kitti_000008.bin' model_cfg = 'tests/test_codebase/test_mmdet3d/data/model_cfg.py' @@ -24,54 +16,55 @@ model_cfg = 'tests/test_codebase/test_mmdet3d/data/model_cfg.py' @backend_checker(Backend.ONNXRUNTIME) class TestVoxelDetectionModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class') + def end2end_model(self): # force add backend wrapper regardless of plugins from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) - - # simplify backend inference - cls.wrapper = SwitchBackendWrapper(ORTWrapper) - cls.outputs = { - 'bboxes': torch.rand(1, 50, 7), - 'scores': torch.rand(1, 50), - 'labels': torch.rand(1, 50) - } - cls.wrapper.set(outputs=cls.outputs) - deploy_cfg = mmcv.Config({ - 'onnx_config': { - 'input_names': ['voxels', 'num_points', 'coors'], - 'output_names': ['bboxes', 'scores', 'labels'], - 'opset_version': 11 - }, - 'backend_config': { - 'type': 'tensorrt' - } - }) - - from mmdeploy.utils import load_config - model_cfg_path = 'tests/test_codebase/test_mmdet3d/data/model_cfg.py' - model_cfg = load_config(model_cfg_path)[0] from mmdeploy.codebase.mmdet3d.deploy.voxel_detection_model import \ VoxelDetectionModel - cls.end2end_model = VoxelDetectionModel( - Backend.ONNXRUNTIME, [''], - device='cuda', - deploy_cfg=deploy_cfg, - model_cfg=model_cfg) + from mmdeploy.utils import load_config + + # simplify backend inference + with SwitchBackendWrapper(ORTWrapper) as wrapper: + outputs = { + 'bboxes': torch.rand(1, 50, 7), + 'scores': torch.rand(1, 50), + 'labels': torch.rand(1, 50) + } + wrapper.set(outputs=outputs) + deploy_cfg = mmcv.Config({ + 'onnx_config': { + 'input_names': ['voxels', 'num_points', 'coors'], + 'output_names': ['bboxes', 'scores', 'labels'], + 'opset_version': 11 + }, + 'backend_config': { + 'type': 'tensorrt' + } + }) + + model_cfg_path = 'tests/test_codebase/test_mmdet3d/data' + \ + '/model_cfg.py' + model_cfg = load_config(model_cfg_path)[0] + + model = VoxelDetectionModel( + Backend.ONNXRUNTIME, [''], + device='cuda', + deploy_cfg=deploy_cfg, + model_cfg=model_cfg) + yield model @pytest.mark.skipif( reason='Only support GPU test', condition=not torch.cuda.is_available()) - def test_forward_and_show_result(self): + def test_forward_and_show_result(self, end2end_model, tmp_path): data = VoxelDetection.read_pcd_file(pcd_path, model_cfg, 'cuda') - results = self.end2end_model.forward(data['points'], data['img_metas']) + results = end2end_model.forward(data['points'], data['img_metas']) assert results is not None - from tempfile import TemporaryDirectory - with TemporaryDirectory() as dir: - self.end2end_model.show_result( - data, results, dir, 'backend_output.bin', show=False) - assert osp.exists(dir + '/backend_output.bin') + dir = str(tmp_path) + end2end_model.show_result( + data, results, dir, 'backend_output.bin', show=False) + assert osp.exists(dir + '/backend_output.bin') @backend_checker(Backend.ONNXRUNTIME) @@ -87,7 +80,6 @@ def test_build_voxel_detection_model(): codebase_config=dict(type=Codebase.MMDET3D.value))) from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference with SwitchBackendWrapper(ORTWrapper) as wrapper: diff --git a/tests/test_codebase/test_mmedit/conftest.py b/tests/test_codebase/test_mmedit/conftest.py new file mode 100644 index 000000000..359a0e7bf --- /dev/null +++ b/tests/test_codebase/test_mmedit/conftest.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest + +from mmdeploy.codebase import import_codebase +from mmdeploy.utils import Codebase + + +def pytest_ignore_collect(*args, **kwargs): + import importlib + return importlib.util.find_spec('mmedit') is None + + +@pytest.fixture(autouse=True, scope='package') +def import_all_modules(): + codebase = Codebase.MMEDIT + try: + import_codebase(codebase) + except ImportError: + pytest.skip(f'{codebase} is not installed.', allow_module_level=True) diff --git a/tests/test_codebase/test_mmedit/test_mmedit_models.py b/tests/test_codebase/test_mmedit/test_mmedit_models.py index d55e2cf44..ae6435f6e 100644 --- a/tests/test_codebase/test_mmedit/test_mmedit_models.py +++ b/tests/test_codebase/test_mmedit/test_mmedit_models.py @@ -1,6 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp -import tempfile from typing import Dict import mmcv @@ -8,55 +7,54 @@ import onnx import pytest import torch -from mmdeploy.codebase import import_codebase from mmdeploy.core import RewriterContext -from mmdeploy.utils import Backend, Codebase, get_onnx_config - -try: - import_codebase(Codebase.MMEDIT) -except ImportError: - pytest.skip( - f'{Codebase.MMEDIT} is not installed.', allow_module_level=True) - -img = torch.rand(1, 3, 4, 4) -model_file = tempfile.NamedTemporaryFile(suffix='.onnx').name - -deploy_cfg = mmcv.Config( - dict( - codebase_config=dict( - type='mmedit', - task='SuperResolution', - ), - backend_config=dict( - type='tensorrt', - common_config=dict(fp16_mode=False, max_workspace_size=1 << 10), - model_inputs=[ - dict( - input_shapes=dict( - input=dict( - min_shape=[1, 3, 4, 4], - opt_shape=[1, 3, 4, 4], - max_shape=[1, 3, 4, 4]))) - ]), - ir_config=dict( - type='onnx', - export_params=True, - keep_initializers_as_inputs=False, - opset_version=11, - save_file=model_file, - input_shape=None, - input_names=['input'], - output_names=['output']))) +from mmdeploy.utils import Backend, get_onnx_config -def test_srcnn(): +@pytest.fixture +def img(): + return torch.rand(1, 3, 4, 4) + + +@pytest.fixture +def deploy_cfg(tmp_path): + model_file = str(tmp_path / 'end2end.onnx') + return mmcv.Config( + dict( + codebase_config=dict( + type='mmedit', + task='SuperResolution', + ), + backend_config=dict( + type='tensorrt', + common_config=dict( + fp16_mode=False, max_workspace_size=1 << 10), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 4, 4], + opt_shape=[1, 3, 4, 4], + max_shape=[1, 3, 4, 4]))) + ]), + ir_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + save_file=model_file, + input_shape=None, + input_names=['input'], + output_names=['output']))) + + +def test_srcnn(img, deploy_cfg): from mmedit.models.backbones.sr_backbones import SRCNN pytorch_model = SRCNN() - model_inputs = {'x': img} - onnx_file_path = tempfile.NamedTemporaryFile(suffix='.onnx').name onnx_cfg = get_onnx_config(deploy_cfg) - input_names = [k for k, v in model_inputs.items() if k != 'ctx'] + onnx_file_path = onnx_cfg['save_file'] + input_names = ['x'] dynamic_axes = onnx_cfg.get('dynamic_axes', None) @@ -67,7 +65,7 @@ def test_srcnn(): cfg=deploy_cfg, backend=Backend.TENSORRT.value), torch.no_grad(): torch.onnx.export( pytorch_model, - tuple([v for k, v in model_inputs.items()]), + img, onnx_file_path, export_params=True, input_names=input_names, @@ -82,7 +80,4 @@ def test_srcnn(): model = onnx.load(onnx_file_path) assert model is not None - try: - onnx.checker.check_model(model) - except onnx.checker.ValidationError: - assert False + onnx.checker.check_model(model) diff --git a/tests/test_codebase/test_mmedit/test_super_resolution.py b/tests/test_codebase/test_mmedit/test_super_resolution.py index 0b6f87000..47fb949be 100644 --- a/tests/test_codebase/test_mmedit/test_super_resolution.py +++ b/tests/test_codebase/test_mmedit/test_super_resolution.py @@ -1,76 +1,82 @@ # Copyright (c) OpenMMLab. All rights reserved. import os import tempfile -from tempfile import NamedTemporaryFile import mmcv import numpy as np import pytest import torch -import mmdeploy.apis.onnxruntime as ort_apis from mmdeploy.apis import build_task_processor -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Codebase, load_config +from mmdeploy.utils import load_config from mmdeploy.utils.test import SwitchBackendWrapper -try: - import_codebase(Codebase.MMEDIT) -except ImportError: - pytest.skip( - f'{Codebase.MMEDIT} is not installed.', allow_module_level=True) -model_cfg = 'tests/test_codebase/test_mmedit/data/model.py' -model_cfg = load_config(model_cfg)[0] -deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type='onnxruntime'), - codebase_config=dict(type='mmedit', task='SuperResolution'), - onnx_config=dict( - type='onnx', - export_params=True, - keep_initializers_as_inputs=False, - opset_version=11, - input_shape=None, - input_names=['input'], - output_names=['output']))) -input_img = np.random.rand(32, 32, 3) -img_shape = [32, 32] -input = {'lq': input_img} -onnx_file = NamedTemporaryFile(suffix='.onnx').name -task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') +@pytest.fixture(scope='module') +def model_cfg(): + cfg = 'tests/test_codebase/test_mmedit/data/model.py' + return load_config(cfg)[0] -def test_init_pytorch_model(): +@pytest.fixture(scope='module') +def deploy_cfg(): + return mmcv.Config( + dict( + backend_config=dict(type='onnxruntime'), + codebase_config=dict(type='mmedit', task='SuperResolution'), + onnx_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + input_shape=None, + input_names=['input'], + output_names=['output']))) + + +@pytest.fixture(scope='module') +def input_img(): + return np.random.rand(32, 32, 3) + + +@pytest.fixture(scope='module') +def model_input(input_img): + return {'lq': input_img} + + +@pytest.fixture(scope='module') +def task_processor(model_cfg, deploy_cfg): + return build_task_processor(model_cfg, deploy_cfg, 'cpu') + + +def test_init_pytorch_model(task_processor): torch_model = task_processor.init_pytorch_model(None) assert torch_model is not None -@pytest.fixture -def backend_model(): +@pytest.fixture(scope='module') +def backend_model(task_processor): from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) - wrapper = SwitchBackendWrapper(ORTWrapper) - wrapper.set(outputs={ - 'output': torch.rand(3, 50, 50), - }) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + wrapper.set(outputs={ + 'output': torch.rand(3, 50, 50), + }) - yield task_processor.init_backend_model(['']) - - wrapper.recover() + yield task_processor.init_backend_model(['']) def test_init_backend_model(backend_model): assert backend_model is not None -def test_create_input(): - inputs = task_processor.create_input(input_img, img_shape=img_shape) +def test_create_input(task_processor, input_img): + inputs = task_processor.create_input( + input_img, img_shape=input_img.shape[:2]) assert inputs is not None -def test_visualize(backend_model): - result = task_processor.run_inference(backend_model, input) +def test_visualize(backend_model, task_processor, model_input, input_img): + result = task_processor.run_inference(backend_model, model_input) with tempfile.TemporaryDirectory() as dir: filename = dir + 'tmp.jpg' task_processor.visualize(backend_model, input_img, result[0], filename, @@ -78,21 +84,21 @@ def test_visualize(backend_model): assert os.path.exists(filename) -def test_run_inference(backend_model): - results = task_processor.run_inference(backend_model, input) +def test_run_inference(backend_model, task_processor, model_input): + results = task_processor.run_inference(backend_model, model_input) assert results is not None -def test_get_tensor_from_input(): - assert type(task_processor.get_tensor_from_input(input)) is not dict +def test_get_tensor_from_input(task_processor, model_input): + assert type(task_processor.get_tensor_from_input(model_input)) is not dict -def test_get_partition_cfg(): +def test_get_partition_cfg(task_processor): with pytest.raises(NotImplementedError): task_processor.get_partition_cfg(None) -def test_build_dataset(): +def test_build_dataset(task_processor): data = dict( test={ 'type': 'SRFolderDataset', @@ -114,7 +120,7 @@ def test_build_dataset(): assert dataloader is not None, 'Failed to build dataloader' -def test_single_gpu_test(backend_model): +def test_single_gpu_test(backend_model, model_cfg, task_processor): from mmcv.parallel import MMDataParallel dataset = task_processor.build_dataset(model_cfg, dataset_type='test') assert dataset is not None, 'Failed to build dataset' diff --git a/tests/test_codebase/test_mmedit/test_super_resolution_model.py b/tests/test_codebase/test_mmedit/test_super_resolution_model.py index aff765f84..1ab02bddb 100644 --- a/tests/test_codebase/test_mmedit/test_super_resolution_model.py +++ b/tests/test_codebase/test_mmedit/test_super_resolution_model.py @@ -4,55 +4,43 @@ import numpy as np import pytest import torch -import mmdeploy.backend.onnxruntime as ort_apis -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Backend, Codebase, load_config +from mmdeploy.utils import Backend, load_config from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker -try: - import_codebase(Codebase.MMEDIT) -except ImportError: - pytest.skip( - f'{Codebase.MMEDIT} is not installed.', allow_module_level=True) - @backend_checker(Backend.ONNXRUNTIME) class TestEnd2EndModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class') + def end2end_model(self): # force add backend wrapper regardless of plugins # make sure ONNXRuntimeEditor can use ORTWrapper inside itself from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) - - # simplify backend inference - cls.wrapper = SwitchBackendWrapper(ORTWrapper) - cls.outputs = { - 'outputs': torch.rand(3, 64, 64), - } - cls.wrapper.set(outputs=cls.outputs) - deploy_cfg = mmcv.Config( - {'onnx_config': { - 'output_names': ['outputs'] - }}) - model_cfg = 'tests/test_codebase/test_mmedit/data/model.py' - model_cfg = load_config(model_cfg)[0] from mmdeploy.codebase.mmedit.deploy.super_resolution_model import \ End2EndModel - cls.end2end_model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu', - model_cfg, deploy_cfg) - @classmethod - def teardown_class(cls): - cls.wrapper.recover() + # simplify backend inference + with SwitchBackendWrapper(ORTWrapper) as wrapper: + outputs = { + 'outputs': torch.rand(3, 64, 64), + } + wrapper.set(outputs=outputs) + deploy_cfg = mmcv.Config( + {'onnx_config': { + 'output_names': ['outputs'] + }}) + model_cfg = 'tests/test_codebase/test_mmedit/data/model.py' + model_cfg = load_config(model_cfg)[0] + model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu', model_cfg, + deploy_cfg) + yield model - def test_forward(self): + def test_forward(self, end2end_model): input_img = np.random.rand(3, 32, 32) - results = self.end2end_model.forward(input_img, test_mode=False) + results = end2end_model.forward(input_img, test_mode=False) assert results is not None - results = self.end2end_model.forward( + results = end2end_model.forward( input_img, test_mode=True, gt=torch.tensor(results[0])) assert results is not None diff --git a/tests/test_codebase/test_mmocr/conftest.py b/tests/test_codebase/test_mmocr/conftest.py new file mode 100644 index 000000000..2ea63d2b1 --- /dev/null +++ b/tests/test_codebase/test_mmocr/conftest.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest + +from mmdeploy.codebase import import_codebase +from mmdeploy.utils import Codebase + + +def pytest_ignore_collect(*args, **kwargs): + import importlib + return importlib.util.find_spec('mmocr') is None + + +@pytest.fixture(autouse=True, scope='package') +def import_all_modules(): + codebase = Codebase.MMOCR + try: + import_codebase(codebase) + except ImportError: + pytest.skip(f'{codebase} is not installed.', allow_module_level=True) diff --git a/tests/test_codebase/test_mmocr/test_mmocr_models.py b/tests/test_codebase/test_mmocr/test_mmocr_models.py index 5bfddd785..52986aded 100644 --- a/tests/test_codebase/test_mmocr/test_mmocr_models.py +++ b/tests/test_codebase/test_mmocr/test_mmocr_models.py @@ -1,24 +1,15 @@ # Copyright (c) OpenMMLab. All rights reserved. -import tempfile - import mmcv import numpy as np import pytest import torch +from mmocr.models.textdet.necks import FPNC -from mmdeploy.codebase import import_codebase from mmdeploy.core import RewriterContext, patch_model -from mmdeploy.utils import Backend, Codebase +from mmdeploy.utils import Backend from mmdeploy.utils.test import (WrapModel, check_backend, get_model_outputs, get_rewrite_outputs) -try: - import_codebase(Codebase.MMOCR) -except ImportError: - pytest.skip(f'{Codebase.MMOCR} is not installed.', allow_module_level=True) - -from mmocr.models.textdet.necks import FPNC - class FPNCNeckModel(FPNC): @@ -35,7 +26,8 @@ class FPNCNeckModel(FPNC): return output -def get_bidirectionallstm_model(): +@pytest.fixture +def bidirectionallstm_model(): from mmocr.models.textrecog.layers.lstm_layer import BidirectionalLSTM model = BidirectionalLSTM(32, 16, 16) @@ -43,7 +35,8 @@ def get_bidirectionallstm_model(): return model -def get_single_stage_text_detector_model(): +@pytest.fixture +def single_stage_text_detector(): from mmocr.models.textdet import SingleStageTextDetector backbone = dict( type='mmdet.ResNet', @@ -71,7 +64,8 @@ def get_single_stage_text_detector_model(): return model -def get_encode_decode_recognizer_model(): +@pytest.fixture +def encode_decode_recognizer(): from mmocr.models.textrecog import EncodeDecodeRecognizer cfg = dict( @@ -97,7 +91,9 @@ def get_encode_decode_recognizer_model(): return model -def get_crnn_decoder_model(rnn_flag): +@pytest.fixture(params=[True, False]) +def crnn_decoder_model(request): + rnn_flag = request.param from mmocr.models.textrecog.decoders import CRNNDecoder model = CRNNDecoder(32, 4, rnn_flag=rnn_flag) @@ -105,14 +101,16 @@ def get_crnn_decoder_model(rnn_flag): return model -def get_fpnc_neck_model(): +@pytest.fixture +def fpnc_neck_model(): model = FPNCNeckModel([2, 4, 8, 16]) model.requires_grad_(False) return model -def get_base_recognizer_model(): +@pytest.fixture +def base_recognizer(): from mmocr.models.textrecog import CRNNNet cfg = dict( @@ -138,10 +136,10 @@ def get_base_recognizer_model(): @pytest.mark.parametrize('backend', [Backend.NCNN]) -def test_bidirectionallstm(backend: Backend): +def test_bidirectionallstm(backend: Backend, bidirectionallstm_model): """Test forward rewrite of bidirectionallstm.""" check_backend(backend) - bilstm = get_bidirectionallstm_model() + bilstm = bidirectionallstm_model bilstm.cpu().eval() deploy_cfg = mmcv.Config( @@ -178,10 +176,10 @@ def test_bidirectionallstm(backend: Backend): @pytest.mark.parametrize('backend', [Backend.ONNXRUNTIME]) -def test_simple_test_of_single_stage_text_detector(backend: Backend): +def test_simple_test_of_single_stage_text_detector(backend: Backend, + single_stage_text_detector): """Test simple_test single_stage_text_detector.""" check_backend(backend) - single_stage_text_detector = get_single_stage_text_detector_model() single_stage_text_detector.eval() deploy_cfg = mmcv.Config( @@ -214,11 +212,10 @@ def test_simple_test_of_single_stage_text_detector(backend: Backend): @pytest.mark.parametrize('backend', [Backend.NCNN]) -@pytest.mark.parametrize('rnn_flag', [True, False]) -def test_crnndecoder(backend: Backend, rnn_flag: bool): +def test_crnndecoder(backend: Backend, crnn_decoder_model): """Test forward rewrite of crnndecoder.""" check_backend(backend) - crnn_decoder = get_crnn_decoder_model(rnn_flag) + crnn_decoder = crnn_decoder_model crnn_decoder.cpu().eval() deploy_cfg = mmcv.Config( @@ -277,10 +274,10 @@ def test_crnndecoder(backend: Backend, rnn_flag: bool): 'valid_ratio': 1.0 }]]]) @pytest.mark.parametrize('is_dynamic', [True, False]) -def test_forward_of_base_recognizer(img_metas, is_dynamic, backend): +def test_forward_of_base_recognizer(img_metas, is_dynamic, backend, + base_recognizer): """Test forward base_recognizer.""" check_backend(backend) - base_recognizer = get_base_recognizer_model() base_recognizer.eval() if not is_dynamic: @@ -342,10 +339,10 @@ def test_forward_of_base_recognizer(img_metas, is_dynamic, backend): @pytest.mark.parametrize('backend', [Backend.ONNXRUNTIME]) -def test_simple_test_of_encode_decode_recognizer(backend): +def test_simple_test_of_encode_decode_recognizer(backend, + encode_decode_recognizer): """Test simple_test encode_decode_recognizer.""" check_backend(backend) - encode_decode_recognizer = get_encode_decode_recognizer_model() encode_decode_recognizer.eval() deploy_cfg = mmcv.Config( @@ -383,10 +380,10 @@ def test_simple_test_of_encode_decode_recognizer(backend): @pytest.mark.parametrize('backend', [Backend.TENSORRT]) -def test_forward_of_fpnc(backend: Backend): +def test_forward_of_fpnc(backend: Backend, fpnc_neck_model): """Test forward rewrite of fpnc.""" check_backend(backend) - fpnc = get_fpnc_neck_model().cuda() + fpnc = fpnc_neck_model.cuda() fpnc.eval() input = torch.rand(1, 1, 64, 64).cuda() deploy_cfg = mmcv.Config( @@ -483,7 +480,7 @@ def get_sar_model_cfg(decoder_type: str): @pytest.mark.parametrize('backend', [Backend.ONNXRUNTIME]) @pytest.mark.parametrize('decoder_type', ['SequentialSARDecoder', 'ParallelSARDecoder']) -def test_sar_model(backend: Backend, decoder_type): +def test_sar_model(backend: Backend, decoder_type, tmp_path): check_backend(backend) import os.path as osp @@ -506,7 +503,7 @@ def test_sar_model(backend: Backend, decoder_type): pytorch_model.cfg = sar_cfg patched_model = patch_model( pytorch_model, cfg=deploy_cfg, backend=backend.value) - onnx_file_path = tempfile.NamedTemporaryFile(suffix='.onnx').name + onnx_file_path = str(tmp_path / 'tmp.onnx') input_names = [k for k, v in model_inputs.items() if k != 'ctx'] with RewriterContext( cfg=deploy_cfg, backend=backend.value), torch.no_grad(): diff --git a/tests/test_codebase/test_mmocr/test_text_detection.py b/tests/test_codebase/test_mmocr/test_text_detection.py index d894727d9..12dec00f2 100644 --- a/tests/test_codebase/test_mmocr/test_text_detection.py +++ b/tests/test_codebase/test_mmocr/test_text_detection.py @@ -1,6 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. import os -from tempfile import NamedTemporaryFile, TemporaryDirectory import mmcv import numpy as np @@ -8,95 +7,104 @@ import pytest import torch from torch.utils.data import DataLoader -import mmdeploy.backend.onnxruntime as ort_apis from mmdeploy.apis import build_task_processor -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Codebase, load_config +from mmdeploy.utils import load_config from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper -try: - import_codebase(Codebase.MMOCR) -except ImportError: - pytest.skip(f'{Codebase.MMOCR} is not installed.', allow_module_level=True) - model_cfg_path = 'tests/test_codebase/test_mmocr/data/dbnet.py' -model_cfg = load_config(model_cfg_path)[0] -deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type='onnxruntime'), - codebase_config=dict(type='mmocr', task='TextDetection'), - onnx_config=dict( - type='onnx', - export_params=True, - keep_initializers_as_inputs=False, - opset_version=11, - input_shape=None, - input_names=['input'], - output_names=['output']))) -onnx_file = NamedTemporaryFile(suffix='.onnx').name -task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') + +@pytest.fixture(scope='module') +def model_cfg(): + return load_config(model_cfg_path)[0] + + +@pytest.fixture(scope='module') +def deploy_cfg(): + return mmcv.Config( + dict( + backend_config=dict(type='onnxruntime'), + codebase_config=dict(type='mmocr', task='TextDetection'), + onnx_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + input_shape=None, + input_names=['input'], + output_names=['output']))) + + +@pytest.fixture(scope='module') +def task_processor(model_cfg, deploy_cfg): + return build_task_processor(model_cfg, deploy_cfg, 'cpu') + + img_shape = (32, 32) -img = np.random.rand(*img_shape, 3).astype(np.uint8) -def test_init_pytorch_model(): +@pytest.fixture(scope='module') +def img(): + return np.random.rand(*img_shape, 3).astype(np.uint8) + + +def test_init_pytorch_model(task_processor): from mmocr.models.textdet.detectors.single_stage_text_detector import \ SingleStageDetector model = task_processor.init_pytorch_model(None) assert isinstance(model, SingleStageDetector) -@pytest.fixture -def backend_model(): +@pytest.fixture(scope='module') +def backend_model(task_processor): from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) - wrapper = SwitchBackendWrapper(ORTWrapper) - wrapper.set(outputs={ - 'output': torch.rand(1, 3, *img_shape), - }) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + wrapper.set(outputs={ + 'output': torch.rand(1, 3, *img_shape), + }) - yield task_processor.init_backend_model(['']) - - wrapper.recover() + yield task_processor.init_backend_model(['']) def test_init_backend_model(backend_model): assert isinstance(backend_model, torch.nn.Module) -def test_create_input(): - inputs = task_processor.create_input(img, input_shape=img_shape) - assert isinstance(inputs, tuple) and len(inputs) == 2 +@pytest.fixture(scope='module') +def model_inputs(task_processor, img): + return task_processor.create_input(img, input_shape=img_shape) -def test_run_inference(backend_model): - input_dict, _ = task_processor.create_input(img, input_shape=img_shape) +def test_create_input(model_inputs): + assert isinstance(model_inputs, tuple) and len(model_inputs) == 2 + + +def test_run_inference(backend_model, task_processor, model_inputs): + input_dict, _ = model_inputs results = task_processor.run_inference(backend_model, input_dict) assert results is not None -def test_visualize(backend_model): - input_dict, _ = task_processor.create_input(img, input_shape=img_shape) +def test_visualize(backend_model, task_processor, model_inputs, img, tmp_path): + input_dict, _ = model_inputs results = task_processor.run_inference(backend_model, input_dict) - with TemporaryDirectory() as dir: - filename = dir + 'tmp.jpg' - task_processor.visualize(backend_model, img, results[0], filename, '') - assert os.path.exists(filename) + filename = str(tmp_path / 'tmp.jpg') + task_processor.visualize(backend_model, img, results[0], filename, '') + assert os.path.exists(filename) -def test_get_tensort_from_input(): +def test_get_tensort_from_input(task_processor): input_data = {'img': [torch.ones(3, 4, 5)]} inputs = task_processor.get_tensor_from_input(input_data) assert torch.equal(inputs, torch.ones(3, 4, 5)) -def test_get_partition_cfg(): +def test_get_partition_cfg(task_processor): with pytest.raises(NotImplementedError): _ = task_processor.get_partition_cfg(partition_type='') -def test_build_dataset_and_dataloader(): +def test_build_dataset_and_dataloader(model_cfg, task_processor): from torch.utils.data import DataLoader, Dataset dataset = task_processor.build_dataset( dataset_cfg=model_cfg, dataset_type='test') @@ -105,7 +113,7 @@ def test_build_dataset_and_dataloader(): assert isinstance(dataloader, DataLoader), 'Failed to build dataloader' -def test_single_gpu_test_and_evaluate(): +def test_single_gpu_test_and_evaluate(model_cfg, task_processor): from mmcv.parallel import MMDataParallel # Prepare dataloader diff --git a/tests/test_codebase/test_mmocr/test_text_detection_models.py b/tests/test_codebase/test_mmocr/test_text_detection_models.py index 819658ba3..0b6c4a112 100644 --- a/tests/test_codebase/test_mmocr/test_text_detection_models.py +++ b/tests/test_codebase/test_mmocr/test_text_detection_models.py @@ -1,63 +1,51 @@ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp -from tempfile import NamedTemporaryFile import mmcv import numpy as np import pytest import torch -import mmdeploy.backend.onnxruntime as ort_apis -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Backend, Codebase, load_config +from mmdeploy.utils import Backend, load_config from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker -try: - import_codebase(Codebase.MMOCR) -except ImportError: - pytest.skip(f'{Codebase.MMOCR} is not installed.', allow_module_level=True) - IMAGE_SIZE = 32 @backend_checker(Backend.ONNXRUNTIME) class TestEnd2EndModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class') + def end2end_model(self): # force add backend wrapper regardless of plugins from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference - cls.wrapper = SwitchBackendWrapper(ORTWrapper) - cls.outputs = { - 'outputs': torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE), - } - cls.wrapper.set(outputs=cls.outputs) - deploy_cfg = mmcv.Config( - {'onnx_config': { - 'output_names': ['outputs'] - }}) - model_cfg_path = 'tests/test_codebase/test_mmocr/data/dbnet.py' - model_cfg = load_config(model_cfg_path)[0] + with SwitchBackendWrapper(ORTWrapper) as wrapper: + outputs = { + 'outputs': torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE), + } + wrapper.set(outputs=outputs) + deploy_cfg = mmcv.Config( + {'onnx_config': { + 'output_names': ['outputs'] + }}) + model_cfg_path = 'tests/test_codebase/test_mmocr/data/dbnet.py' + model_cfg = load_config(model_cfg_path)[0] - from mmdeploy.codebase.mmocr.deploy.text_detection_model import \ - End2EndModel - cls.end2end_model = End2EndModel( - Backend.ONNXRUNTIME, [''], - device='cpu', - deploy_cfg=deploy_cfg, - model_cfg=model_cfg) - - @classmethod - def teardown_class(cls): - cls.wrapper.recover() + from mmdeploy.codebase.mmocr.deploy.text_detection_model import \ + End2EndModel + model = End2EndModel( + Backend.ONNXRUNTIME, [''], + device='cpu', + deploy_cfg=deploy_cfg, + model_cfg=model_cfg) + yield model @pytest.mark.parametrize( 'ori_shape', [[IMAGE_SIZE, IMAGE_SIZE, 3], [2 * IMAGE_SIZE, 2 * IMAGE_SIZE, 3]]) - def test_forward(self, ori_shape): + def test_forward(self, ori_shape, end2end_model): imgs = [torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE)] img_metas = [[{ 'ori_shape': ori_shape, @@ -65,21 +53,21 @@ class TestEnd2EndModel: 'scale_factor': [1., 1., 1., 1.], 'filename': '' }]] - results = self.end2end_model.forward(imgs, img_metas) + results = end2end_model.forward(imgs, img_metas) assert results is not None, 'failed to get output using '\ 'End2EndModel' - def test_forward_test(self): + def test_forward_test(self, end2end_model): imgs = torch.rand(2, 3, IMAGE_SIZE, IMAGE_SIZE) - results = self.end2end_model.forward_test(imgs) + results = end2end_model.forward_test(imgs) assert isinstance(results[0], torch.Tensor) - def test_show_result(self): + def test_show_result(self, end2end_model, tmp_path): input_img = np.zeros([IMAGE_SIZE, IMAGE_SIZE, 3]) - img_path = NamedTemporaryFile(suffix='.jpg').name + img_path = str(tmp_path / 'tmp.jpg') result = {'boundary_result': [[1, 2, 3, 4, 5], [2, 2, 0, 4, 5]]} - self.end2end_model.show_result( + end2end_model.show_result( input_img, result, '', show=False, out_file=img_path) assert osp.exists(img_path), 'Fails to create drawn image.' @@ -95,7 +83,6 @@ def test_build_text_detection_model(): codebase_config=dict(type='mmocr'))) from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference with SwitchBackendWrapper(ORTWrapper) as wrapper: diff --git a/tests/test_codebase/test_mmocr/test_text_recognition.py b/tests/test_codebase/test_mmocr/test_text_recognition.py index 5913ae844..da7048eae 100644 --- a/tests/test_codebase/test_mmocr/test_text_recognition.py +++ b/tests/test_codebase/test_mmocr/test_text_recognition.py @@ -1,6 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. import os -from tempfile import NamedTemporaryFile, TemporaryDirectory import mmcv import numpy as np @@ -8,96 +7,103 @@ import pytest import torch from torch.utils.data import DataLoader -import mmdeploy.backend.onnxruntime as ort_apis from mmdeploy.apis import build_task_processor -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Codebase, load_config +from mmdeploy.utils import load_config from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper -try: - import_codebase(Codebase.MMOCR) -except ImportError: - pytest.skip(f'{Codebase.MMOCR} is not installed.', allow_module_level=True) - model_cfg_path = 'tests/test_codebase/test_mmocr/data/crnn.py' -model_cfg = load_config(model_cfg_path)[0] -deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type='onnxruntime'), - codebase_config=dict(type='mmocr', task='TextRecognition'), - onnx_config=dict( - type='onnx', - export_params=True, - keep_initializers_as_inputs=False, - opset_version=11, - input_shape=None, - input_names=['input'], - output_names=['output']))) -onnx_file = NamedTemporaryFile(suffix='.onnx').name -task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') + +@pytest.fixture(scope='module') +def model_cfg(): + return load_config(model_cfg_path)[0] + + +@pytest.fixture(scope='module') +def deploy_cfg(): + return mmcv.Config( + dict( + backend_config=dict(type='onnxruntime'), + codebase_config=dict(type='mmocr', task='TextRecognition'), + onnx_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + input_shape=None, + input_names=['input'], + output_names=['output']))) + + +@pytest.fixture(scope='module') +def task_processor(model_cfg, deploy_cfg): + return build_task_processor(model_cfg, deploy_cfg, 'cpu') + + img_shape = (32, 32) -img = np.random.rand(*img_shape, 3).astype(np.uint8) -def test_init_pytorch_model(): +@pytest.fixture(scope='module') +def img(): + return np.random.rand(*img_shape, 3).astype(np.uint8) + + +def test_init_pytorch_model(task_processor): from mmocr.models.textrecog.recognizer import BaseRecognizer model = task_processor.init_pytorch_model(None) assert isinstance(model, BaseRecognizer) -@pytest.fixture -def backend_model(): +@pytest.fixture(scope='module') +def backend_model(task_processor): from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) - wrapper = SwitchBackendWrapper(ORTWrapper) - wrapper.set(outputs={ - 'output': torch.rand(1, 9, 37), - }) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + wrapper.set(outputs={ + 'output': torch.rand(1, 9, 37), + }) - yield task_processor.init_backend_model(['']) - - wrapper.recover() + yield task_processor.init_backend_model(['']) def test_init_backend_model(backend_model): assert isinstance(backend_model, torch.nn.Module) -def test_create_input(): - inputs = task_processor.create_input(img, input_shape=img_shape) - assert isinstance(inputs, tuple) and len(inputs) == 2 +@pytest.fixture(scope='module') +def model_inputs(task_processor, img): + return task_processor.create_input(img, input_shape=img_shape) -def test_run_inference(backend_model): - input_dict, _ = task_processor.create_input(img, input_shape=img_shape) +def test_create_input(model_inputs): + assert isinstance(model_inputs, tuple) and len(model_inputs) == 2 + + +def test_run_inference(backend_model, task_processor, model_inputs): + input_dict, _ = model_inputs results = task_processor.run_inference(backend_model, input_dict) assert results is not None -def test_visualize(backend_model): - input_dict, _ = task_processor.create_input(img, input_shape=img_shape) +def test_visualize(backend_model, task_processor, model_inputs, img, tmp_path): + input_dict, _ = model_inputs results = task_processor.run_inference(backend_model, input_dict) - with TemporaryDirectory() as dir: - filename = dir + 'tmp.jpg' - task_processor.visualize(backend_model, img, results[0], filename, '') - assert os.path.exists(filename) + filename = str(tmp_path / 'tmp.jpg') + task_processor.visualize(backend_model, img, results[0], filename, '') + assert os.path.exists(filename) -def test_get_tensort_from_input(): +def test_get_tensort_from_input(task_processor): input_data = {'img': [torch.ones(3, 4, 5)]} inputs = task_processor.get_tensor_from_input(input_data) assert torch.equal(inputs, torch.ones(3, 4, 5)) -def test_get_partition_cfg(): - try: +def test_get_partition_cfg(task_processor): + with pytest.raises(NotImplementedError): _ = task_processor.get_partition_cfg(partition_type='') - except NotImplementedError: - pass -def test_build_dataset_and_dataloader(): +def test_build_dataset_and_dataloader(task_processor, model_cfg): from torch.utils.data import DataLoader, Dataset dataset = task_processor.build_dataset( dataset_cfg=model_cfg, dataset_type='test') @@ -106,7 +112,7 @@ def test_build_dataset_and_dataloader(): assert isinstance(dataloader, DataLoader), 'Failed to build dataloader' -def test_single_gpu_test_and_evaluate(): +def test_single_gpu_test_and_evaluate(task_processor, model_cfg): from mmcv.parallel import MMDataParallel # Prepare dataloader diff --git a/tests/test_codebase/test_mmocr/test_text_recognition_models.py b/tests/test_codebase/test_mmocr/test_text_recognition_models.py index 572f02dc7..5d00caf26 100644 --- a/tests/test_codebase/test_mmocr/test_text_recognition_models.py +++ b/tests/test_codebase/test_mmocr/test_text_recognition_models.py @@ -1,82 +1,73 @@ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp -from tempfile import NamedTemporaryFile import mmcv import numpy as np import pytest import torch -import mmdeploy.backend.onnxruntime as ort_apis -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Backend, Codebase, load_config +from mmdeploy.utils import Backend, load_config from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker -try: - import_codebase(Codebase.MMOCR) -except ImportError: - pytest.skip(f'{Codebase.MMOCR} is not installed.', allow_module_level=True) - IMAGE_SIZE = 32 @backend_checker(Backend.ONNXRUNTIME) class TestEnd2EndModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class') + def end2end_model(self): # force add backend wrapper regardless of plugins from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference - cls.wrapper = SwitchBackendWrapper(ORTWrapper) - cls.outputs = { - 'output': torch.rand(1, 9, 37), - } - cls.wrapper.set(outputs=cls.outputs) - deploy_cfg = mmcv.Config({'onnx_config': {'output_names': ['output']}}) - model_cfg_path = 'tests/test_codebase/test_mmocr/data/crnn.py' - model_cfg = load_config(model_cfg_path)[0] + with SwitchBackendWrapper(ORTWrapper) as wrapper: + outputs = { + 'output': torch.rand(1, 9, 37), + } + wrapper.set(outputs=outputs) + deploy_cfg = mmcv.Config( + {'onnx_config': { + 'output_names': ['output'] + }}) + model_cfg_path = 'tests/test_codebase/test_mmocr/data/crnn.py' + model_cfg = load_config(model_cfg_path)[0] - from mmdeploy.codebase.mmocr.deploy.text_recognition_model import \ - End2EndModel - cls.end2end_model = End2EndModel( - Backend.ONNXRUNTIME, [''], - device='cpu', - deploy_cfg=deploy_cfg, - model_cfg=model_cfg) - - @classmethod - def teardown_class(cls): - cls.wrapper.recover() + from mmdeploy.codebase.mmocr.deploy.text_recognition_model import \ + End2EndModel + model = End2EndModel( + Backend.ONNXRUNTIME, [''], + device='cpu', + deploy_cfg=deploy_cfg, + model_cfg=model_cfg) + yield model @pytest.mark.parametrize( 'ori_shape', [[IMAGE_SIZE, IMAGE_SIZE, 3], [2 * IMAGE_SIZE, 2 * IMAGE_SIZE, 3]]) - def test_forward(self, ori_shape): + def test_forward(self, ori_shape, end2end_model): imgs = [torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE)] img_metas = [[{ 'ori_shape': ori_shape, 'img_shape': [IMAGE_SIZE, IMAGE_SIZE, 3], 'scale_factor': [1., 1., 1., 1.], }]] - results = self.end2end_model.forward(imgs, img_metas) + results = end2end_model.forward(imgs, img_metas) assert results is not None, 'failed to get output using '\ 'End2EndModel' - def test_forward_test(self): + def test_forward_test(self, end2end_model): imgs = torch.rand(2, 3, IMAGE_SIZE, IMAGE_SIZE) img_metas = [{}] - results = self.end2end_model.forward_test(imgs, img_metas) + results = end2end_model.forward_test(imgs, img_metas) assert isinstance(results[0], dict) - def test_show_result(self): + def test_show_result(self, end2end_model, tmp_path): input_img = np.zeros([IMAGE_SIZE, IMAGE_SIZE, 3]) - img_path = NamedTemporaryFile(suffix='.jpg').name + img_path = str(tmp_path / 'tmp.jpg') result = {'text': 'sier', 'score': [0.29, 0.62, 0.25, 0.54]} - self.end2end_model.show_result( + end2end_model.show_result( input_img, result, '', show=False, out_file=img_path) assert osp.exists(img_path), 'Fails to create drawn image.' @@ -92,7 +83,6 @@ def test_build_text_recognition_model(): codebase_config=dict(type='mmocr'))) from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference with SwitchBackendWrapper(ORTWrapper) as wrapper: diff --git a/tests/test_codebase/test_mmpose/conftest.py b/tests/test_codebase/test_mmpose/conftest.py new file mode 100644 index 000000000..461b815bf --- /dev/null +++ b/tests/test_codebase/test_mmpose/conftest.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest + +from mmdeploy.codebase import import_codebase +from mmdeploy.utils import Codebase + + +def pytest_ignore_collect(*args, **kwargs): + import importlib + return importlib.util.find_spec('mmpose') is None + + +@pytest.fixture(autouse=True, scope='package') +def import_all_modules(): + codebase = Codebase.MMPOSE + try: + import_codebase(codebase) + except ImportError: + pytest.skip(f'{codebase} is not installed.', allow_module_level=True) diff --git a/tests/test_codebase/test_mmpose/test_mmpose_models.py b/tests/test_codebase/test_mmpose/test_mmpose_models.py index 34c4b862a..0f039bf2b 100644 --- a/tests/test_codebase/test_mmpose/test_mmpose_models.py +++ b/tests/test_codebase/test_mmpose/test_mmpose_models.py @@ -4,18 +4,12 @@ import numpy as np import pytest import torch -from mmdeploy.codebase import import_codebase from mmdeploy.utils import Backend, Codebase, Task from mmdeploy.utils.test import WrapModel, check_backend, get_rewrite_outputs -try: - import_codebase(Codebase.MMPOSE) -except ImportError: - pytest.skip( - f'{Codebase.MMPOSE} is not installed.', allow_module_level=True) - -def get_top_down_heatmap_simple_head_model(): +@pytest.fixture +def top_down_heatmap_simple_head_model(): from mmpose.models.heads import TopdownHeatmapSimpleHead model = TopdownHeatmapSimpleHead( 2, @@ -28,9 +22,10 @@ def get_top_down_heatmap_simple_head_model(): @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME, Backend.TENSORRT]) -def test_top_down_heatmap_simple_head_inference_model(backend_type: Backend): +def test_top_down_heatmap_simple_head_inference_model( + backend_type: Backend, top_down_heatmap_simple_head_model): check_backend(backend_type, True) - model = get_top_down_heatmap_simple_head_model() + model = top_down_heatmap_simple_head_model model.cpu().eval() if backend_type == Backend.TENSORRT: deploy_cfg = mmcv.Config( @@ -76,7 +71,8 @@ def test_top_down_heatmap_simple_head_inference_model(backend_type: Backend): model_output, rewrite_output, rtol=1e-03, atol=1e-05) -def get_top_down_heatmap_msmu_head_model(): +@pytest.fixture +def top_down_heatmap_msmu_head_model(): class DummyMSMUHead(torch.nn.Module): @@ -104,9 +100,10 @@ def get_top_down_heatmap_msmu_head_model(): @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME, Backend.TENSORRT]) -def test_top_down_heatmap_msmu_head_inference_model(backend_type: Backend): +def test_top_down_heatmap_msmu_head_inference_model( + backend_type: Backend, top_down_heatmap_msmu_head_model): check_backend(backend_type, True) - model = get_top_down_heatmap_msmu_head_model() + model = top_down_heatmap_msmu_head_model model.cpu().eval() if backend_type == Backend.TENSORRT: deploy_cfg = mmcv.Config( @@ -152,7 +149,8 @@ def test_top_down_heatmap_msmu_head_inference_model(backend_type: Backend): model_output, rewrite_output, rtol=1e-03, atol=1e-05) -def get_cross_resolution_weighting_model(): +@pytest.fixture +def cross_resolution_weighting_model(): from mmpose.models.backbones.litehrnet import CrossResolutionWeighting class DummyModel(torch.nn.Module): @@ -171,9 +169,10 @@ def get_cross_resolution_weighting_model(): @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME, Backend.NCNN]) -def test_cross_resolution_weighting_forward(backend_type: Backend): +def test_cross_resolution_weighting_forward(backend_type: Backend, + cross_resolution_weighting_model): check_backend(backend_type, True) - model = get_cross_resolution_weighting_model() + model = cross_resolution_weighting_model model.cpu().eval() imgs = torch.rand(1, 16, 16, 16) @@ -210,7 +209,8 @@ def test_cross_resolution_weighting_forward(backend_type: Backend): model_output, rewrite_output, rtol=1e-03, atol=1e-05) -def get_top_down_model(): +@pytest.fixture +def top_down_model(): from mmpose.models.detectors.top_down import TopDown model_cfg = dict( type='TopDown', @@ -237,9 +237,9 @@ def get_top_down_model(): @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME, Backend.TENSORRT]) -def test_top_down_forward(backend_type: Backend): +def test_top_down_forward(backend_type: Backend, top_down_model): check_backend(backend_type, True) - model = get_top_down_model() + model = top_down_model model.cpu().eval() if backend_type == Backend.TENSORRT: deploy_cfg = mmcv.Config( diff --git a/tests/test_codebase/test_mmpose/test_pose_detection.py b/tests/test_codebase/test_mmpose/test_pose_detection.py index 4a8085a63..9a5597660 100644 --- a/tests/test_codebase/test_mmpose/test_pose_detection.py +++ b/tests/test_codebase/test_mmpose/test_pose_detection.py @@ -1,56 +1,29 @@ # Copyright (c) OpenMMLab. All rights reserved. import os -from tempfile import NamedTemporaryFile, TemporaryDirectory import mmcv import numpy as np import pytest import torch -import mmdeploy.backend.onnxruntime as ort_apis from mmdeploy.apis import build_task_processor -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Backend, Codebase, Task, load_config +from mmdeploy.utils import load_config from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper -try: - import_codebase(Codebase.MMPOSE) -except ImportError: - pytest.skip( - f'{Codebase.MMPOSE.value} is not installed.', allow_module_level=True) - model_cfg_path = 'tests/test_codebase/test_mmpose/data/model.py' -model_cfg = load_config(model_cfg_path)[0] -deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type='onnxruntime'), - codebase_config=dict(type='mmpose', task='PoseDetection'), - onnx_config=dict( - type='onnx', - export_params=True, - keep_initializers_as_inputs=False, - opset_version=11, - save_file='end2end.onnx', - input_names=['input'], - output_names=['output'], - input_shape=None))) - -onnx_file = NamedTemporaryFile(suffix='.onnx').name -task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') -img_shape = (192, 256) -heatmap_shape = (48, 64) -# mmpose.apis.inference.LoadImage uses opencv, needs float32 in -# cv2.cvtColor. -img = np.random.rand(*img_shape, 3).astype(np.float32) -num_output_channels = model_cfg['data_cfg']['num_output_channels'] -def test_create_input(): - deploy_cfg = mmcv.Config( +@pytest.fixture(scope='module') +def model_cfg(): + return load_config(model_cfg_path)[0] + + +@pytest.fixture(scope='module') +def deploy_cfg(): + return mmcv.Config( dict( - backend_config=dict(type=Backend.ONNXRUNTIME.value), - codebase_config=dict( - type=Codebase.MMPOSE.value, task=Task.POSE_DETECTION.value), + backend_config=dict(type='onnxruntime'), + codebase_config=dict(type='mmpose', task='PoseDetection'), onnx_config=dict( type='onnx', export_params=True, @@ -60,69 +33,87 @@ def test_create_input(): input_names=['input'], output_names=['output'], input_shape=None))) - task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') - inputs = task_processor.create_input(img, input_shape=img_shape) - assert isinstance(inputs, tuple) and len(inputs) == 2 -def test_init_pytorch_model(): +@pytest.fixture(scope='module') +def task_processor(model_cfg, deploy_cfg): + return build_task_processor(model_cfg, deploy_cfg, 'cpu') + + +img_shape = (192, 256) +heatmap_shape = (48, 64) + + +# mmpose.apis.inference.LoadImage uses opencv, needs float32 in +# cv2.cvtColor. +@pytest.fixture(scope='module') +def img(): + return np.random.rand(*img_shape, 3).astype(np.float32) + + +@pytest.fixture(scope='module') +def model_inputs(task_processor, img): + return task_processor.create_input(img, input_shape=img_shape) + + +def test_create_input(model_inputs): + assert isinstance(model_inputs, tuple) and len(model_inputs) == 2 + + +def test_init_pytorch_model(task_processor): from mmpose.models.detectors.base import BasePose model = task_processor.init_pytorch_model(None) assert isinstance(model, BasePose) -@pytest.fixture -def backend_model(): +@pytest.fixture(scope='module') +def backend_model(task_processor, model_cfg): from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) - wrapper = SwitchBackendWrapper(ORTWrapper) - wrapper.set(outputs={ - 'output': torch.rand(1, num_output_channels, *heatmap_shape), - }) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + num_output_channels = model_cfg['data_cfg']['num_output_channels'] + wrapper.set( + outputs={ + 'output': torch.rand(1, num_output_channels, *heatmap_shape), + }) - yield task_processor.init_backend_model(['']) - - wrapper.recover() + yield task_processor.init_backend_model(['']) def test_init_backend_model(backend_model): assert isinstance(backend_model, torch.nn.Module) -def test_run_inference(backend_model): - input_dict, _ = task_processor.create_input(img, input_shape=img_shape) +def test_run_inference(backend_model, task_processor, model_inputs): + input_dict, _ = model_inputs results = task_processor.run_inference(backend_model, input_dict) assert results is not None -def test_visualize(backend_model): - input_dict, _ = task_processor.create_input(img, input_shape=img_shape) +def test_visualize(backend_model, task_processor, model_inputs, img, tmp_path): + input_dict, _ = model_inputs results = task_processor.run_inference(backend_model, input_dict) - with TemporaryDirectory() as dir: - filename = dir + 'tmp.jpg' - task_processor.visualize(backend_model, img, results[0], filename, '') - assert os.path.exists(filename) + filename = str(tmp_path / 'tmp.jpg') + task_processor.visualize(backend_model, img, results[0], filename, '') + assert os.path.exists(filename) -def test_get_tensor_from_input(): +def test_get_tensor_from_input(task_processor): input_data = {'img': torch.ones(3, 4, 5)} inputs = task_processor.get_tensor_from_input(input_data) assert torch.equal(inputs, torch.ones(3, 4, 5)) -def test_get_partition_cfg(): - try: +def test_get_partition_cfg(task_processor): + with pytest.raises(NotImplementedError): _ = task_processor.get_partition_cfg(partition_type='') - except NotImplementedError: - pass -def test_get_model_name(): +def test_get_model_name(task_processor): model_name = task_processor.get_model_name() assert isinstance(model_name, str) and model_name is not None -def test_build_dataset_and_dataloader(): +def test_build_dataset_and_dataloader(task_processor, model_cfg): from torch.utils.data import DataLoader, Dataset dataset = task_processor.build_dataset( dataset_cfg=model_cfg, dataset_type='test') @@ -131,7 +122,7 @@ def test_build_dataset_and_dataloader(): assert isinstance(dataloader, DataLoader), 'Failed to build dataloader' -def test_single_gpu_test_and_evaluate(): +def test_single_gpu_test_and_evaluate(task_processor, model_cfg): from mmcv.parallel import MMDataParallel dataset = task_processor.build_dataset( dataset_cfg=model_cfg, dataset_type='test') diff --git a/tests/test_codebase/test_mmpose/test_pose_detection_model.py b/tests/test_codebase/test_mmpose/test_pose_detection_model.py index 740dc2f04..c0b72c8e4 100644 --- a/tests/test_codebase/test_mmpose/test_pose_detection_model.py +++ b/tests/test_codebase/test_mmpose/test_pose_detection_model.py @@ -1,63 +1,50 @@ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp -from tempfile import NamedTemporaryFile import mmcv import numpy as np import pytest import torch -import mmdeploy.backend.onnxruntime as ort_apis -from mmdeploy.codebase import import_codebase from mmdeploy.utils import Backend, Codebase from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker IMAGE_H = 192 IMAGE_W = 256 -try: - import_codebase(Codebase.MMPOSE) -except ImportError: - pytest.skip( - f'{Codebase.MMPOSE} is not installed.', allow_module_level=True) - @backend_checker(Backend.ONNXRUNTIME) class TestEnd2EndModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class') + def end2end_model(self): # force add backend wrapper regardless of plugins from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference - cls.wrapper = SwitchBackendWrapper(ORTWrapper) - cls.outputs = { - 'outputs': torch.rand(1, 1, IMAGE_H, IMAGE_W), - } - cls.wrapper.set(outputs=cls.outputs) - deploy_cfg = mmcv.Config( - {'onnx_config': { - 'output_names': ['outputs'] - }}) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + outputs = { + 'outputs': torch.rand(1, 1, IMAGE_H, IMAGE_W), + } + wrapper.set(outputs=outputs) + deploy_cfg = mmcv.Config( + {'onnx_config': { + 'output_names': ['outputs'] + }}) - from mmdeploy.utils import load_config - model_cfg_path = 'tests/test_codebase/test_mmpose/data/model.py' - model_cfg = load_config(model_cfg_path)[0] - from mmdeploy.codebase.mmpose.deploy.pose_detection_model import \ - End2EndModel - cls.end2end_model = End2EndModel( - Backend.ONNXRUNTIME, [''], - device='cpu', - deploy_cfg=deploy_cfg, - model_cfg=model_cfg) + from mmdeploy.utils import load_config + model_cfg_path = 'tests/test_codebase/test_mmpose/data/model.py' + model_cfg = load_config(model_cfg_path)[0] + from mmdeploy.codebase.mmpose.deploy.pose_detection_model import \ + End2EndModel + model = End2EndModel( + Backend.ONNXRUNTIME, [''], + device='cpu', + deploy_cfg=deploy_cfg, + model_cfg=model_cfg) + yield model - @classmethod - def teardown_class(cls): - cls.wrapper.recover() - - def test_forward(self): + def test_forward(self, end2end_model): img = torch.rand(1, 3, IMAGE_H, IMAGE_W) img_metas = [{ 'image_file': @@ -67,23 +54,23 @@ class TestEnd2EndModel: 'location': torch.tensor([0.5, 0.5]), 'bbox_score': 0.5 }] - results = self.end2end_model.forward(img, img_metas) + results = end2end_model.forward(img, img_metas) assert results is not None, 'failed to get output using '\ 'End2EndModel' - def test_forward_test(self): + def test_forward_test(self, end2end_model): imgs = torch.rand(2, 3, IMAGE_H, IMAGE_W) - results = self.end2end_model.forward_test(imgs) + results = end2end_model.forward_test(imgs) assert isinstance(results[0], np.ndarray) - def test_show_result(self): + def test_show_result(self, end2end_model, tmp_path): input_img = np.zeros([IMAGE_H, IMAGE_W, 3]) - img_path = NamedTemporaryFile(suffix='.jpg').name + img_path = str(tmp_path / 'tmp.jpg') pred_bbox = torch.rand(1, 5) pred_keypoint = torch.rand((1, 10, 2)) result = [{'bbox': pred_bbox, 'keypoints': pred_keypoint}] - self.end2end_model.show_result( + end2end_model.show_result( input_img, result, '', show=False, out_file=img_path) assert osp.exists(img_path), 'Fails to create drawn image.' @@ -100,7 +87,6 @@ def test_build_pose_detection_model(): codebase_config=dict(type=Codebase.MMPOSE.value))) from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference with SwitchBackendWrapper(ORTWrapper) as wrapper: diff --git a/tests/test_codebase/test_mmrotate/conftest.py b/tests/test_codebase/test_mmrotate/conftest.py new file mode 100644 index 000000000..c44def355 --- /dev/null +++ b/tests/test_codebase/test_mmrotate/conftest.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest + +from mmdeploy.codebase import import_codebase +from mmdeploy.utils import Codebase + + +def pytest_ignore_collect(*args, **kwargs): + import importlib + return importlib.util.find_spec('mmrotate') is None + + +@pytest.fixture(autouse=True, scope='package') +def import_all_modules(): + codebase = Codebase.MMROTATE + try: + import_codebase(codebase) + except ImportError: + pytest.skip(f'{codebase} is not installed.', allow_module_level=True) diff --git a/tests/test_codebase/test_mmrotate/test_mmrotate_core.py b/tests/test_codebase/test_mmrotate/test_mmrotate_core.py index 174b888cc..a63da6b26 100644 --- a/tests/test_codebase/test_mmrotate/test_mmrotate_core.py +++ b/tests/test_codebase/test_mmrotate/test_mmrotate_core.py @@ -4,18 +4,11 @@ import numpy as np import pytest import torch -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Backend, Codebase +from mmdeploy.utils import Backend from mmdeploy.utils.test import (WrapFunction, WrapModel, backend_checker, check_backend, get_onnx_model, get_rewrite_outputs) -try: - import_codebase(Codebase.MMROTATE) -except ImportError: - pytest.skip( - f'{Codebase.MMROTATE} is not installed.', allow_module_level=True) - @backend_checker(Backend.ONNXRUNTIME) def test_multiclass_nms_rotated(): diff --git a/tests/test_codebase/test_mmrotate/test_mmrotate_models.py b/tests/test_codebase/test_mmrotate/test_mmrotate_models.py index 491832655..9d1465287 100644 --- a/tests/test_codebase/test_mmrotate/test_mmrotate_models.py +++ b/tests/test_codebase/test_mmrotate/test_mmrotate_models.py @@ -9,18 +9,11 @@ import numpy as np import pytest import torch -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Backend, Codebase +from mmdeploy.utils import Backend from mmdeploy.utils.config_utils import get_ir_config from mmdeploy.utils.test import (WrapModel, check_backend, get_model_outputs, get_rewrite_outputs) -try: - import_codebase(Codebase.MMROTATE) -except ImportError: - pytest.skip( - f'{Codebase.MMROTATE} is not installed.', allow_module_level=True) - def seed_everything(seed=1029): random.seed(seed) @@ -47,7 +40,17 @@ def convert_to_list(rewrite_output: Dict, output_names: List[str]) -> List: return outputs -def get_anchor_head_model(): +def get_head_inputs(seed, channels, num_inputs): + """Generate inputs for the head.""" + seed_everything(seed) + return [ + torch.rand(1, channels, pow(2, i), pow(2, i)) + for i in range(num_inputs, 0, -1) + ] + + +@pytest.fixture +def anchor_head(): """AnchorHead Config.""" test_cfg = mmcv.Config( dict( @@ -137,10 +140,10 @@ def get_deploy_cfg(backend_type: Backend, ir_type: str): @pytest.mark.parametrize('backend_type, ir_type', [(Backend.ONNXRUNTIME, 'onnx')]) -def test_base_dense_head_get_bboxes(backend_type: Backend, ir_type: str): +def test_base_dense_head_get_bboxes(backend_type: Backend, ir_type: str, + anchor_head): """Test get_bboxes rewrite of base dense head.""" check_backend(backend_type) - anchor_head = get_anchor_head_model() anchor_head.cpu().eval() s = 128 img_metas = [{ @@ -156,12 +159,8 @@ def test_base_dense_head_get_bboxes(backend_type: Backend, ir_type: str): # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2). # the bboxes's size: (1, 45, 32, 32), (1, 45, 16, 16), # (1, 45, 8, 8), (1, 45, 4, 4), (1, 45, 2, 2) - seed_everything(1234) - cls_score = [ - torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1) - ] - seed_everything(5678) - bboxes = [torch.rand(1, 45, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] + cls_score = get_head_inputs(1234, 36, 5) + bboxes = get_head_inputs(5678, 45, 5) # to get outputs of pytorch model model_inputs = { @@ -202,7 +201,8 @@ def test_base_dense_head_get_bboxes(backend_type: Backend, ir_type: str): assert rewrite_outputs is not None -def get_single_roi_extractor(): +@pytest.fixture +def single_roi_extractor(): """SingleRoIExtractor Config.""" from mmrotate.models.roi_heads import RotatedSingleRoIExtractor roi_layer = dict( @@ -216,10 +216,10 @@ def get_single_roi_extractor(): @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) -def test_rotated_single_roi_extractor(backend_type: Backend): +def test_rotated_single_roi_extractor(backend_type: Backend, + single_roi_extractor): check_backend(backend_type) - single_roi_extractor = get_single_roi_extractor() output_names = ['roi_feat'] deploy_cfg = mmcv.Config( dict( @@ -262,7 +262,8 @@ def test_rotated_single_roi_extractor(backend_type: Backend): model_output, backend_output, rtol=1e-03, atol=1e-05) -def get_oriented_rpn_head_model(): +@pytest.fixture +def oriented_rpn_head_model(): """Oriented RPN Head Config.""" test_cfg = mmcv.Config( dict( @@ -283,9 +284,10 @@ def get_oriented_rpn_head_model(): @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) -def test_get_bboxes_of_oriented_rpn_head(backend_type: Backend): +def test_get_bboxes_of_oriented_rpn_head(backend_type: Backend, + oriented_rpn_head_model): check_backend(backend_type) - head = get_oriented_rpn_head_model() + head = oriented_rpn_head_model head.cpu().eval() s = 128 img_metas = [{ @@ -312,12 +314,8 @@ def test_get_bboxes_of_oriented_rpn_head(backend_type: Backend): # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2). # the bboxes's size: (1, 54, 32, 32), (1, 54, 16, 16), # (1, 54, 8, 8), (1, 54, 4, 4), (1, 54, 2, 2) - seed_everything(1234) - cls_score = [ - torch.rand(1, 9, pow(2, i), pow(2, i)) for i in range(5, 0, -1) - ] - seed_everything(5678) - bboxes = [torch.rand(1, 54, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] + cls_score = get_head_inputs(1234, 9, 5) + bboxes = get_head_inputs(5678, 54, 5) # to get outputs of onnx model after rewrite img_metas[0]['img_shape'] = torch.Tensor([s, s]) @@ -334,7 +332,8 @@ def test_get_bboxes_of_oriented_rpn_head(backend_type: Backend): assert rewrite_outputs is not None -def get_rotated_rpn_head_model(): +@pytest.fixture +def rotated_rpn_head_model(): """Oriented RPN Head Config.""" test_cfg = mmcv.Config( dict( @@ -364,9 +363,10 @@ def get_rotated_rpn_head_model(): @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) -def test_get_bboxes_of_rotated_rpn_head(backend_type: Backend): +def test_get_bboxes_of_rotated_rpn_head(backend_type: Backend, + rotated_rpn_head_model): check_backend(backend_type) - head = get_rotated_rpn_head_model() + head = rotated_rpn_head_model head.cpu().eval() s = 128 img_metas = [{ @@ -393,12 +393,8 @@ def test_get_bboxes_of_rotated_rpn_head(backend_type: Backend): # (1, 3, 8, 8), (1, 3, 4, 4), (1, 3, 2, 2). # the bboxes's size: (1, 18, 32, 32), (1, 18, 16, 16), # (1, 18, 8, 8), (1, 18, 4, 4), (1, 18, 2, 2) - seed_everything(1234) - cls_score = [ - torch.rand(1, 3, pow(2, i), pow(2, i)) for i in range(5, 0, -1) - ] - seed_everything(5678) - bboxes = [torch.rand(1, 18, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] + cls_score = get_head_inputs(1234, 3, 5) + bboxes = get_head_inputs(5678, 18, 5) # to get outputs of onnx model after rewrite img_metas[0]['img_shape'] = torch.Tensor([s, s]) @@ -468,8 +464,7 @@ def test_rotate_standard_roi_head__simple_test(backend_type: Backend): test_cfg=test_cfg) head.cpu().eval() - seed_everything(1234) - x = [torch.rand(1, 3, pow(2, i), pow(2, i)) for i in range(4, 0, -1)] + x = get_head_inputs(1234, 3, 4) proposals = [torch.rand(1, 100, 6), torch.randint(0, 10, (1, 100))] img_metas = [{'img_shape': torch.tensor([224, 224])}] @@ -536,7 +531,7 @@ def test_gv_ratio_roi_head__simple_test(backend_type: Backend): head.cpu().eval() seed_everything(1234) - x = [torch.rand(1, 3, pow(2, i), pow(2, i)) for i in range(4, 0, -1)] + x = get_head_inputs(1234, 3, 4) bboxes = torch.rand(1, 100, 2) bboxes = torch.cat( [bboxes, bboxes + torch.rand(1, 100, 2) + torch.rand(1, 100, 1)], @@ -554,7 +549,8 @@ def test_gv_ratio_roi_head__simple_test(backend_type: Backend): assert rewrite_outputs is not None -def get_roi_trans_roi_head_model(): +@pytest.fixture +def roi_trans_roi_head_model(): """Oriented RPN Head Config.""" angle_version = 'le90' @@ -631,11 +627,12 @@ def get_roi_trans_roi_head_model(): return model -@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) -def test_simple_test_of_roi_trans_roi_head(backend_type: Backend): +@pytest.mark.parametrize('backend_type', [Backend.TENSORRT]) +def test_simple_test_of_roi_trans_roi_head(backend_type: Backend, + roi_trans_roi_head_model): check_backend(backend_type) - roi_head = get_roi_trans_roi_head_model() + roi_head = roi_trans_roi_head_model roi_head.cpu() seed_everything(1234) @@ -661,7 +658,9 @@ def test_simple_test_of_roi_trans_roi_head(backend_type: Backend): output_names = ['det_bboxes', 'det_labels'] deploy_cfg = mmcv.Config( dict( - backend_config=dict(type=backend_type.value), + backend_config=dict( + type=backend_type.value, + common_config=dict(max_workspace_size=1 << 30)), onnx_config=dict(output_names=output_names, input_shape=None), codebase_config=dict( type='mmrotate', diff --git a/tests/test_codebase/test_mmrotate/test_rotated_detection.py b/tests/test_codebase/test_mmrotate/test_rotated_detection.py index 34b921a73..4f568736a 100644 --- a/tests/test_codebase/test_mmrotate/test_rotated_detection.py +++ b/tests/test_codebase/test_mmrotate/test_rotated_detection.py @@ -1,6 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. import os -from tempfile import NamedTemporaryFile, TemporaryDirectory import mmcv import numpy as np @@ -9,64 +8,74 @@ import torch from torch.utils.data import DataLoader from torch.utils.data.dataset import Dataset -import mmdeploy.backend.onnxruntime as ort_apis from mmdeploy.apis import build_task_processor -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Codebase, load_config +from mmdeploy.utils import load_config from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper -try: - import_codebase(Codebase.MMROTATE) -except ImportError: - pytest.skip( - f'{Codebase.MMROTATE} is not installed.', allow_module_level=True) - model_cfg_path = 'tests/test_codebase/test_mmrotate/data/model.py' -model_cfg = load_config(model_cfg_path)[0] -deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type='onnxruntime'), - codebase_config=dict( - type='mmrotate', - task='RotatedDetection', - post_processing=dict( - score_threshold=0.05, - iou_threshold=0.1, - pre_top_k=2000, - keep_top_k=2000)), - onnx_config=dict( - type='onnx', - export_params=True, - keep_initializers_as_inputs=False, - opset_version=11, - input_shape=None, - input_names=['input'], - output_names=['dets', 'labels']))) -onnx_file = NamedTemporaryFile(suffix='.onnx').name -task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') + + +@pytest.fixture(scope='module') +def model_cfg(): + return load_config(model_cfg_path)[0] + + +@pytest.fixture(scope='module') +def deploy_cfg(): + return mmcv.Config( + dict( + backend_config=dict(type='onnxruntime'), + codebase_config=dict( + type='mmrotate', + task='RotatedDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.1, + pre_top_k=2000, + keep_top_k=2000)), + onnx_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + input_shape=None, + input_names=['input'], + output_names=['dets', 'labels']))) + + +@pytest.fixture(scope='module') +def task_processor(model_cfg, deploy_cfg): + return build_task_processor(model_cfg, deploy_cfg, 'cpu') + + img_shape = (32, 32) -img = np.random.rand(*img_shape, 3) -def test_init_pytorch_model(): +@pytest.fixture(scope='module') +def img(): + return np.random.rand(*img_shape, 3) + + +@pytest.fixture(scope='module') +def torch_model(task_processor): + return task_processor.init_pytorch_model(None) + + +def test_init_pytorch_model(torch_model): from mmrotate.models import RotatedBaseDetector - model = task_processor.init_pytorch_model(None) - assert isinstance(model, RotatedBaseDetector) + assert isinstance(torch_model, RotatedBaseDetector) -@pytest.fixture -def backend_model(): +@pytest.fixture(scope='module') +def backend_model(task_processor): from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) - wrapper = SwitchBackendWrapper(ORTWrapper) - wrapper.set(outputs={ - 'dets': torch.rand(1, 10, 6), - 'labels': torch.rand(1, 10) - }) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + wrapper.set(outputs={ + 'dets': torch.rand(1, 10, 6), + 'labels': torch.rand(1, 10) + }) - yield task_processor.init_backend_model(['']) - - wrapper.recover() + yield task_processor.init_backend_model(['']) def test_init_backend_model(backend_model): @@ -75,18 +84,22 @@ def test_init_backend_model(backend_model): assert isinstance(backend_model, End2EndModel) +@pytest.fixture(scope='module') +def model_inputs(task_processor, img): + return task_processor.create_input(img, input_shape=img_shape) + + @pytest.mark.parametrize('device', ['cpu']) -def test_create_input(device): +def test_create_input(device, task_processor, model_inputs): original_device = task_processor.device task_processor.device = device - inputs = task_processor.create_input(img, input_shape=img_shape) - assert len(inputs) == 2 + assert len(model_inputs) == 2 task_processor.device = original_device -def test_run_inference(backend_model): - torch_model = task_processor.init_pytorch_model(None) - input_dict, _ = task_processor.create_input(img, input_shape=img_shape) +def test_run_inference(backend_model, task_processor, torch_model, + model_inputs): + input_dict, _ = model_inputs torch_results = task_processor.run_inference(torch_model, input_dict) backend_results = task_processor.run_inference(backend_model, input_dict) assert torch_results is not None @@ -94,21 +107,20 @@ def test_run_inference(backend_model): assert len(torch_results[0]) == len(backend_results[0]) -def test_visualize(backend_model): - input_dict, _ = task_processor.create_input(img, input_shape=img_shape) +def test_visualize(backend_model, task_processor, model_inputs, img, tmp_path): + input_dict, _ = model_inputs results = task_processor.run_inference(backend_model, input_dict) - with TemporaryDirectory() as dir: - filename = dir + 'tmp.jpg' - task_processor.visualize(backend_model, img, results[0], filename, '') - assert os.path.exists(filename) + filename = str(tmp_path / 'tmp.jpg') + task_processor.visualize(backend_model, img, results[0], filename, '') + assert os.path.exists(filename) -def test_get_partition_cfg(): +def test_get_partition_cfg(task_processor): with pytest.raises(NotImplementedError): _ = task_processor.get_partition_cfg(partition_type='') -def test_build_dataset_and_dataloader(): +def test_build_dataset_and_dataloader(task_processor, model_cfg): dataset = task_processor.build_dataset( dataset_cfg=model_cfg, dataset_type='test') assert isinstance(dataset, Dataset), 'Failed to build dataset' @@ -116,7 +128,7 @@ def test_build_dataset_and_dataloader(): assert isinstance(dataloader, DataLoader), 'Failed to build dataloader' -def test_single_gpu_test_and_evaluate(): +def test_single_gpu_test_and_evaluate(task_processor, model_cfg, tmp_path): from mmcv.parallel import MMDataParallel class DummyDataset(Dataset): @@ -143,6 +155,6 @@ def test_single_gpu_test_and_evaluate(): # Run test outputs = task_processor.single_gpu_test(model, dataloader) assert isinstance(outputs, list) - output_file = NamedTemporaryFile(suffix='.pkl').name + output_file = str(tmp_path / 'tmp.pkl') task_processor.evaluate_outputs( model_cfg, outputs, dataset, 'bbox', out=output_file, format_only=True) diff --git a/tests/test_codebase/test_mmrotate/test_rotated_detection_model.py b/tests/test_codebase/test_mmrotate/test_rotated_detection_model.py index d13617488..c1c91233b 100644 --- a/tests/test_codebase/test_mmrotate/test_rotated_detection_model.py +++ b/tests/test_codebase/test_mmrotate/test_rotated_detection_model.py @@ -1,65 +1,52 @@ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp -from tempfile import NamedTemporaryFile import mmcv import numpy as np import pytest import torch -import mmdeploy.backend.onnxruntime as ort_apis -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Backend, Codebase, load_config +from mmdeploy.utils import Backend, load_config from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker -try: - import_codebase(Codebase.MMROTATE) -except ImportError: - pytest.skip( - f'{Codebase.MMROTATE} is not installed.', allow_module_level=True) - IMAGE_SIZE = 32 @backend_checker(Backend.ONNXRUNTIME) class TestEnd2EndModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class') + def end2end_model(self): # force add backend wrapper regardless of plugins from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) - - # simplify backend inference - cls.wrapper = SwitchBackendWrapper(ORTWrapper) - cls.outputs = { - 'dets': torch.rand(1, 10, 6), - 'labels': torch.rand(1, 10) - } - cls.wrapper.set(outputs=cls.outputs) - deploy_cfg = mmcv.Config( - {'onnx_config': { - 'output_names': ['dets', 'labels'] - }}) - model_cfg_path = 'tests/test_codebase/test_mmrotate/data/model.py' - model_cfg = load_config(model_cfg_path)[0] - from mmdeploy.codebase.mmrotate.deploy.rotated_detection_model import \ End2EndModel - cls.end2end_model = End2EndModel( - Backend.ONNXRUNTIME, [''], ['' for i in range(15)], - device='cpu', - deploy_cfg=deploy_cfg, - model_cfg=model_cfg) - @classmethod - def teardown_class(cls): - cls.wrapper.recover() + # simplify backend inference + with SwitchBackendWrapper(ORTWrapper) as wrapper: + outputs = { + 'dets': torch.rand(1, 10, 6), + 'labels': torch.rand(1, 10) + } + wrapper.set(outputs=outputs) + deploy_cfg = mmcv.Config( + {'onnx_config': { + 'output_names': ['dets', 'labels'] + }}) + model_cfg_path = 'tests/test_codebase/test_mmrotate/data/model.py' + model_cfg = load_config(model_cfg_path)[0] + + model = End2EndModel( + Backend.ONNXRUNTIME, [''], ['' for i in range(15)], + device='cpu', + deploy_cfg=deploy_cfg, + model_cfg=model_cfg) + yield model @pytest.mark.parametrize( 'ori_shape', [[IMAGE_SIZE, IMAGE_SIZE, 3], [2 * IMAGE_SIZE, 2 * IMAGE_SIZE, 3]]) - def test_forward(self, ori_shape): + def test_forward(self, ori_shape, end2end_model): imgs = [torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE)] img_metas = [[{ 'ori_shape': ori_shape, @@ -67,21 +54,21 @@ class TestEnd2EndModel: 'scale_factor': [1., 1., 1., 1.], 'filename': '' }]] - results = self.end2end_model.forward(imgs, img_metas) + results = end2end_model.forward(imgs, img_metas) assert results is not None, 'failed to get output using '\ 'End2EndModel' - def test_forward_test(self): + def test_forward_test(self, end2end_model): imgs = torch.rand(2, 3, IMAGE_SIZE, IMAGE_SIZE) - results = self.end2end_model.forward_test(imgs) + results = end2end_model.forward_test(imgs) assert isinstance(results[0], torch.Tensor) - def test_show_result(self): + def test_show_result(self, end2end_model, tmp_path): input_img = np.zeros([IMAGE_SIZE, IMAGE_SIZE, 3]) - img_path = NamedTemporaryFile(suffix='.jpg').name + img_path = str(tmp_path / 'tmp.jpg') result = torch.rand(1, 10, 6) - self.end2end_model.show_result( + end2end_model.show_result( input_img, result, '', show=False, out_file=img_path) assert osp.exists(img_path) @@ -97,7 +84,6 @@ def test_build_rotated_detection_model(): codebase_config=dict(type='mmrotate'))) from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference with SwitchBackendWrapper(ORTWrapper) as wrapper: diff --git a/tests/test_codebase/test_mmseg/conftest.py b/tests/test_codebase/test_mmseg/conftest.py new file mode 100644 index 000000000..1c2fcc0e3 --- /dev/null +++ b/tests/test_codebase/test_mmseg/conftest.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest + +from mmdeploy.codebase import import_codebase +from mmdeploy.utils import Codebase + + +def pytest_ignore_collect(*args, **kwargs): + import importlib + return importlib.util.find_spec('mmseg') is None + + +@pytest.fixture(autouse=True, scope='package') +def import_all_modules(): + codebase = Codebase.MMSEG + try: + import_codebase(codebase) + except ImportError: + pytest.skip(f'{codebase} is not installed.', allow_module_level=True) diff --git a/tests/test_codebase/test_mmseg/test_mmseg_models.py b/tests/test_codebase/test_mmseg/test_mmseg_models.py index 29a24b2a6..4d8a7cbbc 100644 --- a/tests/test_codebase/test_mmseg/test_mmseg_models.py +++ b/tests/test_codebase/test_mmseg/test_mmseg_models.py @@ -5,20 +5,13 @@ import pytest import torch import torch.nn as nn from mmcv import ConfigDict +from mmseg.models import BACKBONES, HEADS +from mmseg.models.decode_heads.decode_head import BaseDecodeHead -from mmdeploy.codebase import import_codebase from mmdeploy.utils import Backend, Codebase, Task from mmdeploy.utils.test import (WrapModel, check_backend, get_model_outputs, get_rewrite_outputs) -try: - import_codebase(Codebase.MMSEG) -except ImportError: - pytest.skip(f'{Codebase.MMSEG} is not installed.', allow_module_level=True) - -from mmseg.models import BACKBONES, HEADS -from mmseg.models.decode_heads.decode_head import BaseDecodeHead - @BACKBONES.register_module() class ExampleBackbone(nn.Module): diff --git a/tests/test_codebase/test_mmseg/test_segmentation.py b/tests/test_codebase/test_mmseg/test_segmentation.py index 8e89df869..e7bd3517d 100644 --- a/tests/test_codebase/test_mmseg/test_segmentation.py +++ b/tests/test_codebase/test_mmseg/test_segmentation.py @@ -1,7 +1,6 @@ # Copyright (c) OpenMMLab. All rights reserved. import copy import os -from tempfile import NamedTemporaryFile, TemporaryDirectory from typing import Any import mmcv @@ -10,40 +9,49 @@ import pytest import torch from torch.utils.data import DataLoader -import mmdeploy.backend.onnxruntime as ort_apis from mmdeploy.apis import build_task_processor -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Codebase, load_config +from mmdeploy.utils import load_config from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper -try: - import_codebase(Codebase.MMSEG) -except ImportError: - pytest.skip(f'{Codebase.MMSEG} is not installed.', allow_module_level=True) - model_cfg_path = 'tests/test_codebase/test_mmseg/data/model.py' -model_cfg = load_config(model_cfg_path)[0] -deploy_cfg = mmcv.Config( - dict( - backend_config=dict(type='onnxruntime'), - codebase_config=dict(type='mmseg', task='Segmentation'), - onnx_config=dict( - type='onnx', - export_params=True, - keep_initializers_as_inputs=False, - opset_version=11, - input_shape=None, - input_names=['input'], - output_names=['output']))) -onnx_file = NamedTemporaryFile(suffix='.onnx').name -task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') + +@pytest.fixture(scope='module') +def model_cfg(): + return load_config(model_cfg_path)[0] + + +@pytest.fixture(scope='module') +def deploy_cfg(): + return mmcv.Config( + dict( + backend_config=dict(type='onnxruntime'), + codebase_config=dict(type='mmseg', task='Segmentation'), + onnx_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + input_shape=None, + input_names=['input'], + output_names=['output']))) + + +@pytest.fixture(scope='module') +def task_processor(model_cfg, deploy_cfg): + return build_task_processor(model_cfg, deploy_cfg, 'cpu') + + img_shape = (32, 32) -img = np.random.rand(*img_shape, 3) + + +@pytest.fixture(scope='module') +def img(): + return np.random.rand(*img_shape, 3) @pytest.mark.parametrize('from_mmrazor', [True, False, '123', 0]) -def test_init_pytorch_model(from_mmrazor: Any): +def test_init_pytorch_model(from_mmrazor: Any, task_processor, deploy_cfg): from mmseg.models.segmentors.base import BaseSegmentor if from_mmrazor is False: _task_processor = task_processor @@ -72,58 +80,56 @@ def test_init_pytorch_model(from_mmrazor: Any): assert isinstance(model, BaseSegmentor) -@pytest.fixture -def backend_model(): +@pytest.fixture(scope='module') +def backend_model(task_processor): from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) - wrapper = SwitchBackendWrapper(ORTWrapper) - wrapper.set(outputs={ - 'output': torch.rand(1, 1, *img_shape), - }) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + wrapper.set(outputs={ + 'output': torch.rand(1, 1, *img_shape), + }) - yield task_processor.init_backend_model(['']) - - wrapper.recover() + yield task_processor.init_backend_model(['']) def test_init_backend_model(backend_model): assert isinstance(backend_model, torch.nn.Module) -def test_create_input(): - inputs = task_processor.create_input(img, input_shape=img_shape) - assert isinstance(inputs, tuple) and len(inputs) == 2 +@pytest.fixture(scope='module') +def model_inputs(task_processor, img): + return task_processor.create_input(img, input_shape=img_shape) -def test_run_inference(backend_model): +def test_create_input(model_inputs): + assert isinstance(model_inputs, tuple) and len(model_inputs) == 2 + + +def test_run_inference(backend_model, task_processor, img): input_dict, _ = task_processor.create_input(img, input_shape=img_shape) results = task_processor.run_inference(backend_model, input_dict) assert results is not None -def test_visualize(backend_model): - input_dict, _ = task_processor.create_input(img, input_shape=img_shape) +def test_visualize(backend_model, task_processor, model_inputs, img, tmp_path): + input_dict, _ = model_inputs results = task_processor.run_inference(backend_model, input_dict) - with TemporaryDirectory() as dir: - filename = dir + 'tmp.jpg' - task_processor.visualize(backend_model, img, results[0], filename, '') - assert os.path.exists(filename) + filename = str(tmp_path / 'tmp.jpg') + task_processor.visualize(backend_model, img, results[0], filename, '') + assert os.path.exists(filename) -def test_get_tensort_from_input(): +def test_get_tensort_from_input(task_processor): input_data = {'img': [torch.ones(3, 4, 5)]} inputs = task_processor.get_tensor_from_input(input_data) assert torch.equal(inputs, torch.ones(3, 4, 5)) -def test_get_partition_cfg(): - try: +def test_get_partition_cfg(task_processor): + with pytest.raises(NotImplementedError): _ = task_processor.get_partition_cfg(partition_type='') - except NotImplementedError: - pass -def test_build_dataset_and_dataloader(): +def test_build_dataset_and_dataloader(task_processor, model_cfg): from torch.utils.data import DataLoader, Dataset dataset = task_processor.build_dataset( dataset_cfg=model_cfg, dataset_type='test') @@ -132,7 +138,7 @@ def test_build_dataset_and_dataloader(): assert isinstance(dataloader, DataLoader), 'Failed to build dataloader' -def test_single_gpu_test_and_evaluate(): +def test_single_gpu_test_and_evaluate(task_processor, model_cfg): from mmcv.parallel import MMDataParallel # Prepare dataloader diff --git a/tests/test_codebase/test_mmseg/test_segmentation_model.py b/tests/test_codebase/test_mmseg/test_segmentation_model.py index b28214f76..a6a48347c 100644 --- a/tests/test_codebase/test_mmseg/test_segmentation_model.py +++ b/tests/test_codebase/test_mmseg/test_segmentation_model.py @@ -1,22 +1,14 @@ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp -from tempfile import NamedTemporaryFile import mmcv import numpy as np import pytest import torch -import mmdeploy.backend.onnxruntime as ort_apis -from mmdeploy.codebase import import_codebase -from mmdeploy.utils import Backend, Codebase +from mmdeploy.utils import Backend from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker -try: - import_codebase(Codebase.MMSEG) -except ImportError: - pytest.skip(f'{Codebase.MMSEG} is not installed.', allow_module_level=True) - NUM_CLASS = 19 IMAGE_SIZE = 32 @@ -24,63 +16,59 @@ IMAGE_SIZE = 32 @backend_checker(Backend.ONNXRUNTIME) class TestEnd2EndModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class') + def end2end_model(self): # force add backend wrapper regardless of plugins from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference - cls.wrapper = SwitchBackendWrapper(ORTWrapper) - cls.outputs = { - 'outputs': torch.rand(1, 1, IMAGE_SIZE, IMAGE_SIZE), - } - cls.wrapper.set(outputs=cls.outputs) - deploy_cfg = mmcv.Config( - {'onnx_config': { - 'output_names': ['outputs'] - }}) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + outputs = { + 'outputs': torch.rand(1, 1, IMAGE_SIZE, IMAGE_SIZE), + } + wrapper.set(outputs=outputs) + deploy_cfg = mmcv.Config( + {'onnx_config': { + 'output_names': ['outputs'] + }}) - from mmdeploy.codebase.mmseg.deploy.segmentation_model import \ - End2EndModel - class_names = ['' for i in range(NUM_CLASS)] - palette = np.random.randint(0, 255, size=(NUM_CLASS, 3)) - cls.end2end_model = End2EndModel( - Backend.ONNXRUNTIME, [''], - device='cpu', - class_names=class_names, - palette=palette, - deploy_cfg=deploy_cfg) - - @classmethod - def teardown_class(cls): - cls.wrapper.recover() + from mmdeploy.codebase.mmseg.deploy.segmentation_model import \ + End2EndModel + class_names = ['' for i in range(NUM_CLASS)] + palette = np.random.randint(0, 255, size=(NUM_CLASS, 3)) + model = End2EndModel( + Backend.ONNXRUNTIME, [''], + device='cpu', + class_names=class_names, + palette=palette, + deploy_cfg=deploy_cfg) + yield model @pytest.mark.parametrize( 'ori_shape', [[IMAGE_SIZE, IMAGE_SIZE, 3], [2 * IMAGE_SIZE, 2 * IMAGE_SIZE, 3]]) - def test_forward(self, ori_shape): + def test_forward(self, ori_shape, end2end_model): imgs = [torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE)] img_metas = [[{ 'ori_shape': ori_shape, 'img_shape': [IMAGE_SIZE, IMAGE_SIZE, 3], 'scale_factor': [1., 1., 1., 1.], }]] - results = self.end2end_model.forward(imgs, img_metas) + results = end2end_model.forward(imgs, img_metas) assert results is not None, 'failed to get output using '\ 'End2EndModel' - def test_forward_test(self): + def test_forward_test(self, end2end_model): imgs = torch.rand(2, 3, IMAGE_SIZE, IMAGE_SIZE) - results = self.end2end_model.forward_test(imgs) + results = end2end_model.forward_test(imgs) assert isinstance(results[0], np.ndarray) - def test_show_result(self): + def test_show_result(self, end2end_model, tmp_path): input_img = np.zeros([IMAGE_SIZE, IMAGE_SIZE, 3]) - img_path = NamedTemporaryFile(suffix='.jpg').name + img_path = str(tmp_path / 'tmp.jpg') result = [torch.rand(IMAGE_SIZE, IMAGE_SIZE)] - self.end2end_model.show_result( + end2end_model.show_result( input_img, result, '', show=False, out_file=img_path) assert osp.exists(img_path), 'Fails to create drawn image.' @@ -88,45 +76,44 @@ class TestEnd2EndModel: @backend_checker(Backend.RKNN) class TestRKNNModel: - @classmethod - def setup_class(cls): + @pytest.fixture(scope='class') + def end2end_model(self): # force add backend wrapper regardless of plugins - import mmdeploy.backend.rknn as rknn_apis from mmdeploy.backend.rknn import RKNNWrapper - rknn_apis.__dict__.update({'RKNNWrapper': RKNNWrapper}) + from mmdeploy.codebase.mmseg.deploy.segmentation_model import RKNNModel # simplify backend inference - cls.wrapper = SwitchBackendWrapper(RKNNWrapper) - cls.outputs = [torch.rand(1, 19, IMAGE_SIZE, IMAGE_SIZE)] - cls.wrapper.set(outputs=cls.outputs) - deploy_cfg = mmcv.Config({ - 'onnx_config': { - 'output_names': ['outputs'] - }, - 'backend_config': { - 'common_config': {} - } - }) + with SwitchBackendWrapper(RKNNWrapper) as wrapper: + outputs = [torch.rand(1, 19, IMAGE_SIZE, IMAGE_SIZE)] + wrapper.set(outputs=outputs) + deploy_cfg = mmcv.Config({ + 'onnx_config': { + 'output_names': ['outputs'] + }, + 'backend_config': { + 'common_config': {} + } + }) - from mmdeploy.codebase.mmseg.deploy.segmentation_model import RKNNModel - class_names = ['' for i in range(NUM_CLASS)] - palette = np.random.randint(0, 255, size=(NUM_CLASS, 3)) - cls.rknn_model = RKNNModel( - Backend.RKNN, [''], - device='cpu', - class_names=class_names, - palette=palette, - deploy_cfg=deploy_cfg) + class_names = ['' for i in range(NUM_CLASS)] + palette = np.random.randint(0, 255, size=(NUM_CLASS, 3)) + model = RKNNModel( + Backend.RKNN, [''], + device='cpu', + class_names=class_names, + palette=palette, + deploy_cfg=deploy_cfg) + yield model - def test_forward_test(self): + def test_forward_test(self, end2end_model): imgs = torch.rand(2, 3, IMAGE_SIZE, IMAGE_SIZE) - results = self.rknn_model.forward_test(imgs) + results = end2end_model.forward_test(imgs) assert isinstance(results[0], np.ndarray) @pytest.mark.parametrize('from_file', [True, False]) @pytest.mark.parametrize('data_type', ['train', 'val', 'test']) -def test_get_classes_palette_from_config(from_file, data_type): +def test_get_classes_palette_from_config(from_file, data_type, tmp_path): from mmseg.datasets import DATASETS from mmdeploy.codebase.mmseg.deploy.segmentation_model import \ @@ -145,7 +132,7 @@ def test_get_classes_palette_from_config(from_file, data_type): }) if from_file: - config_path = NamedTemporaryFile(suffix='.py').name + config_path = str(tmp_path / 'tmp_cfg.py') with open(config_path, 'w') as file: file.write(data_cfg.pretty_text) data_cfg = config_path @@ -169,7 +156,6 @@ def test_build_segmentation_model(): codebase_config=dict(type='mmseg'))) from mmdeploy.backend.onnxruntime import ORTWrapper - ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference with SwitchBackendWrapper(ORTWrapper) as wrapper: diff --git a/tests/test_core/test_function_rewriter.py b/tests/test_core/test_function_rewriter.py index ca7a681c3..0963fee00 100644 --- a/tests/test_core/test_function_rewriter.py +++ b/tests/test_core/test_function_rewriter.py @@ -1,4 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. +import pytest import torch from mmdeploy.core import FUNCTION_REWRITER, RewriterContext @@ -6,73 +7,89 @@ from mmdeploy.core.rewriters.function_rewriter import FunctionRewriter from mmdeploy.core.rewriters.rewriter_utils import collect_env from mmdeploy.utils.constants import IR, Backend +try: + from torch.testing import assert_close as torch_assert_close +except Exception: + from torch.testing import assert_allclose as torch_assert_close -def test_function_rewriter(): - x = torch.tensor([1, 2, 3, 4, 5]) - y = torch.tensor([2, 4, 6, 8, 10]) +@pytest.fixture(scope='module') +def register_test_rewriter(): @FUNCTION_REWRITER.register_rewriter( func_name='torch.mul', backend='tensorrt') @FUNCTION_REWRITER.register_rewriter( func_name='torch.add', backend='tensorrt') - def sub_func(rewriter, x, y): - assert hasattr(rewriter, 'cfg') - assert hasattr(rewriter, 'origin_func') + def sub_func(ctx, x, y): + assert hasattr(ctx, 'cfg') + assert hasattr(ctx, 'origin_func') return x - y + # test different config + @FUNCTION_REWRITER.register_rewriter( + func_name='torch.Tensor.div', backend='default') + def mul_func_class(ctx, x, y): + return x * y + + # test origin_func + @FUNCTION_REWRITER.register_rewriter( + func_name='torch.sub', backend='default') + def origin_sub_func(ctx, x, y, **kwargs): + return ctx.origin_func(x, y, **kwargs) + 1 + + yield + + del FUNCTION_REWRITER._origin_functions[-1] + FUNCTION_REWRITER._registry.remove_record(sub_func) + FUNCTION_REWRITER._registry.remove_record(mul_func_class) + FUNCTION_REWRITER._registry.remove_record(origin_sub_func) + + +@pytest.mark.usefixtures('register_test_rewriter') +def test_function_rewriter(): + + x = torch.tensor([1, 2, 3, 4, 5]) + y = torch.tensor([2, 4, 6, 8, 10]) + cfg = dict() with RewriterContext(cfg, backend='tensorrt'): result = torch.add(x, y) # replace add with sub - torch.testing.assert_allclose(result, x - y) + torch_assert_close(result, x - y) result = torch.mul(x, y) # replace add with sub - torch.testing.assert_allclose(result, x - y) + torch_assert_close(result, x - y) result = torch.add(x, y) # recovery origin function - torch.testing.assert_allclose(result, x + y) + torch_assert_close(result, x + y) with RewriterContext(cfg): result = torch.add(x, y) # replace should not happen with wrong backend - torch.testing.assert_allclose(result, x + y) - - # test different config - @FUNCTION_REWRITER.register_rewriter( - func_name='torch.Tensor.add', backend='default') - def mul_func_class(rewriter, x, y): - return x * y + torch_assert_close(result, x + y) with RewriterContext(cfg, backend='tensorrt'): - result = x.add(y) - # replace add with multi - torch.testing.assert_allclose(result, x * y) + result = x.div(y) + # replace div with multi + torch_assert_close(result, x * y) result = x.add(y) # recovery origin function - torch.testing.assert_allclose(result, x + y) + torch_assert_close(result, x + y) with RewriterContext(cfg): - result = x.add(y) - # replace add with multi - torch.testing.assert_allclose(result, x * y) - - # test origin_func - @FUNCTION_REWRITER.register_rewriter( - func_name='torch.add', backend='default') - def origin_add_func(rewriter, x, y, **kwargs): - return rewriter.origin_func(x, y, **kwargs) + 1 + result = x.div(y) + # replace div with multi + torch_assert_close(result, x * y) with RewriterContext(cfg): - result = torch.add(x, y) + result = torch.sub(x, y) # replace with origin + 1 - torch.testing.assert_allclose(result, x + y + 1) + torch_assert_close(result, x - y + 1) # remove torch.add - del FUNCTION_REWRITER._origin_functions[-1] - torch.testing.assert_allclose(torch.add(x, y), x + y) + torch_assert_close(torch.sub(x, y), x - y) def test_rewrite_empty_function(): diff --git a/tests/test_core/test_module_rewriter.py b/tests/test_core/test_module_rewriter.py index 001756d18..3c9780064 100644 --- a/tests/test_core/test_module_rewriter.py +++ b/tests/test_core/test_module_rewriter.py @@ -3,6 +3,11 @@ import torch from mmdeploy.core import MODULE_REWRITER, patch_model +try: + from torch.testing import assert_close as torch_assert_close +except Exception: + from torch.testing import assert_allclose as torch_assert_close + def test_module_rewriter(): from torchvision.models.resnet import resnet50 @@ -29,7 +34,7 @@ def test_module_rewriter(): rewritten_model = patch_model(model, cfg=cfg, backend='tensorrt') rewritten_bottle_nect = rewritten_model.layer1[0] rewritten_result = rewritten_bottle_nect(x) - torch.testing.assert_allclose(rewritten_result, result * 2) + torch_assert_close(rewritten_result, result * 2) # wrong backend should not be rewritten model = resnet50().eval() @@ -38,7 +43,7 @@ def test_module_rewriter(): rewritten_model = patch_model(model, cfg=cfg) rewritten_bottle_nect = rewritten_model.layer1[0] rewritten_result = rewritten_bottle_nect(x) - torch.testing.assert_allclose(rewritten_result, result) + torch_assert_close(rewritten_result, result) def test_pass_redundant_args_to_model(): diff --git a/tests/test_core/test_symbolic_register.py b/tests/test_core/test_symbolic_register.py index b012f6a8b..e9210be2c 100644 --- a/tests/test_core/test_symbolic_register.py +++ b/tests/test_core/test_symbolic_register.py @@ -35,8 +35,8 @@ def create_custom_module(): del mmdeploy.TestFunc -def test_symbolic_rewriter(): - test_func = mmdeploy.TestFunc.apply +@pytest.fixture(scope='module') +def register_custom_rewriter(): @SYMBOLIC_REWRITER.register_symbolic('mmdeploy.TestFunc', backend='ncnn') @SYMBOLIC_REWRITER.register_symbolic('mmdeploy.TestFunc') @@ -54,6 +54,17 @@ def test_symbolic_rewriter(): def symbolic_cummax(symbolic_wrapper, g, input, dim): return g.op('mmdeploy::cummax_default', input, dim_i=dim, outputs=2) + yield + + SYMBOLIC_REWRITER._registry.remove_record(symbolic_testfunc_default) + SYMBOLIC_REWRITER._registry.remove_record(symbolic_testfunc_tensorrt) + SYMBOLIC_REWRITER._registry.remove_record(symbolic_cummax) + + +@pytest.mark.usefixtures('register_custom_rewriter') +def test_symbolic_rewriter(): + test_func = mmdeploy.TestFunc.apply + class TestModel(torch.nn.Module): def __init__(self): @@ -99,18 +110,10 @@ def test_symbolic_rewriter(): assert nodes[1].domain == 'mmdeploy' +@pytest.mark.usefixtures('register_custom_rewriter') def test_unregister(): test_func = mmdeploy.TestFunc.apply - @SYMBOLIC_REWRITER.register_symbolic('mmdeploy.TestFunc') - def symbolic_testfunc_default(symbolic_wrapper, g, x, val): - return g.op('mmdeploy::symbolic_testfunc_default', x, val_i=val) - - @SYMBOLIC_REWRITER.register_symbolic( - 'cummax', is_pytorch=True, arg_descriptors=['v', 'i']) - def symbolic_cummax(symbolic_wrapper, g, input, dim): - return g.op('mmdeploy::cummax_default', input, dim_i=dim, outputs=2) - class TestModel(torch.nn.Module): def __init__(self): diff --git a/tests/test_mmcv/test_mmcv_cnn.py b/tests/test_mmcv/test_mmcv_cnn.py index df109069f..ef2f4e8d9 100644 --- a/tests/test_mmcv/test_mmcv_cnn.py +++ b/tests/test_mmcv/test_mmcv_cnn.py @@ -13,6 +13,7 @@ def test_multiheadattention_ncnn(): from mmcv.cnn.bricks.transformer import MultiheadAttention embed_dims, num_heads = 12, 2 model = MultiheadAttention(embed_dims, num_heads, batch_first=True) + model.eval() query = torch.rand(1, 3, embed_dims) deploy_cfg = mmcv.Config( diff --git a/tests/test_mmcv/test_mmcv_ops.py b/tests/test_mmcv/test_mmcv_ops.py index bd3df9d26..aaff78eb7 100644 --- a/tests/test_mmcv/test_mmcv_ops.py +++ b/tests/test_mmcv/test_mmcv_ops.py @@ -9,6 +9,11 @@ from mmdeploy.core import RewriterContext from mmdeploy.utils import Backend from mmdeploy.utils.test import WrapFunction, check_backend +try: + from torch.testing import assert_close as torch_assert_close +except Exception: + from torch.testing import assert_allclose as torch_assert_close + @pytest.mark.parametrize( 'iou_threshold, score_threshold,max_output_boxes_per_class', @@ -135,4 +140,4 @@ def test_modulated_deform_conv(): out = model(x) jit_out = jit_model(x) - torch.testing.assert_allclose(out, jit_out) + torch_assert_close(out, jit_out) diff --git a/tests/test_ops/__init__.py b/tests/test_ops/__init__.py index b319885bb..ef101fec6 100644 --- a/tests/test_ops/__init__.py +++ b/tests/test_ops/__init__.py @@ -1,4 +1 @@ # Copyright (c) OpenMMLab. All rights reserved. -from .utils import TestNCNNExporter, TestOnnxRTExporter, TestTensorRTExporter - -__all__ = ['TestTensorRTExporter', 'TestOnnxRTExporter', 'TestNCNNExporter'] diff --git a/tests/test_ops/test_ops.py b/tests/test_ops/test_ops.py index e8822da3f..dd23e45af 100644 --- a/tests/test_ops/test_ops.py +++ b/tests/test_ops/test_ops.py @@ -11,77 +11,10 @@ from mmdeploy.core import RewriterContext from mmdeploy.utils.test import WrapFunction, assert_allclose from .utils import TestNCNNExporter, TestOnnxRTExporter, TestTensorRTExporter -TEST_ONNXRT = TestOnnxRTExporter() -TEST_TENSORRT = TestTensorRTExporter() -TEST_NCNN = TestNCNNExporter() +def _test_grid_sample(backend, mode, padding_mode, align_corners, tmp_path): -@pytest.fixture(scope='function') -def disable_cudnn(): - cudnn_enable = torch.backends.cudnn.enabled - torch.backends.cudnn.enabled = False - - yield - torch.backends.cudnn.enabled = cudnn_enable - - -@pytest.mark.parametrize('backend', [TEST_TENSORRT]) -@pytest.mark.parametrize('pool_h,pool_w,spatial_scale,sampling_ratio', - [(2, 2, 1.0, 2), (4, 4, 2.0, 4)]) -def test_roi_align(backend, - pool_h, - pool_w, - spatial_scale, - sampling_ratio, - input_list=None, - save_dir=None): - backend.check_env() - - if input_list is None: - input = torch.rand(1, 1, 16, 16, dtype=torch.float32) - single_roi = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32) - else: - input = torch.tensor(input_list[0], dtype=torch.float32) - single_roi = torch.tensor(input_list[1], dtype=torch.float32) - - from mmcv.ops import roi_align - - def wrapped_function(torch_input, torch_rois): - return roi_align(torch_input, torch_rois, (pool_w, pool_h), - spatial_scale, sampling_ratio, 'avg', True) - - wrapped_model = WrapFunction(wrapped_function).eval() - - with RewriterContext( - Config({'backend_config': { - 'type': backend.backend_name - }}), - backend=backend.backend_name, - opset=11): - backend.run_and_validate( - wrapped_model, [input, single_roi], - 'roi_align', - input_names=['input', 'rois'], - output_names=['roi_feat'], - save_dir=save_dir) - - -@pytest.mark.parametrize('backend', [TEST_TENSORRT, TEST_ONNXRT]) -@pytest.mark.parametrize('mode', ['bilinear', 'nearest']) -@pytest.mark.parametrize('padding_mode', ['zeros', 'border', 'reflection']) -@pytest.mark.parametrize('align_corners', [True, False]) -def test_grid_sample(backend, - mode, - padding_mode, - align_corners, - input_list=None, - save_dir=None): - backend.check_env() - - if input_list is None: - input = torch.rand(1, 1, 10, 10) - else: - input = torch.tensor(input_list[0]) + input = torch.rand(1, 1, 10, 10) grid = torch.Tensor([[[1, 0, 0], [0, 1, 0]]]) grid = nn.functional.affine_grid( grid, (1, 1, input.shape[2] * 2, input.shape[3] * 2)).type_as(input) @@ -107,95 +40,15 @@ def test_grid_sample(backend, 'grid_sampler', input_names=['input', 'grid'], output_names=['output'], - save_dir=save_dir) + save_dir=str(tmp_path)) -@pytest.mark.parametrize('backend', [TEST_TENSORRT]) -@pytest.mark.parametrize('dynamic_export', [True, False]) -@pytest.mark.parametrize('mode', ['bicubic', 'nearest']) -@pytest.mark.parametrize('align_corners', [True, False]) -@pytest.mark.parametrize('output_size', [[10, 20], None]) -@pytest.mark.parametrize('scale_factor', [2]) -@pytest.mark.parametrize('n, c, h, w', [(2, 3, 5, 10)]) -def test_bicubic_interpolate(backend, - dynamic_export, - mode, - align_corners, - output_size, - scale_factor, - n, - c, - h, - w, - input_list=None, - save_dir=None): - backend.check_env() +def _test_modulated_deform_conv(backend, in_channels, out_channels, stride, + padding, dilation, groups, deform_groups, + kernel_size, bias, tmp_path): - if input_list is None: - input = torch.randn(n, c, h, w) - if dynamic_export: - dynamic_axes = { - 'input': { - 0: 'n', - 2: 'h', - 3: 'w', - }, - 'output': { - 0: 'n', - 2: 'h', - 3: 'w', - }, - } - else: - dynamic_axes = None - - if mode == 'nearest': - align_corners = None - if output_size is None: - resize = nn.Upsample( - scale_factor=scale_factor, mode=mode, align_corners=align_corners) - else: - resize = nn.Upsample( - size=output_size, mode=mode, align_corners=align_corners) - expected_result = resize(input).cuda() - wrapped_model = WrapFunction(resize).eval() - - with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): - backend.run_and_validate( - wrapped_model, [input], - 'bicubic_interpolate', - input_names=['input'], - dynamic_axes=dynamic_axes, - output_names=['output'], - save_dir=save_dir, - expected_result=expected_result) - - -@pytest.mark.parametrize('backend', [TEST_TENSORRT, TEST_ONNXRT]) -@pytest.mark.parametrize('in_channels,out_channels,stride,padding,' - 'dilation,groups,deform_groups,kernel_size', - [(3, 64, 1, 0, 1, 1, 1, 3), - (1, 32, 3, 2, 1, 1, 1, 3)]) -@pytest.mark.parametrize('bias', [True, False]) -def test_modulated_deform_conv(backend, - in_channels, - out_channels, - stride, - padding, - dilation, - groups, - deform_groups, - kernel_size, - bias, - input_list=None, - save_dir=None): - backend.check_env() - - if input_list is None: - input = torch.rand( - 1, in_channels, 28, 28, requires_grad=False) # (n, c, h, w) - else: - input = torch.tensor(input_list[0]) + input = torch.rand( + 1, in_channels, 28, 28, requires_grad=False) # (n, c, h, w) conv_offset = nn.Conv2d( in_channels=in_channels, out_channels=deform_groups * 3 * kernel_size * kernel_size, @@ -220,117 +73,198 @@ def test_modulated_deform_conv(backend, 'modulated_deform_conv', input_names=['input', 'offset', 'mask'], output_names=['output'], - save_dir=save_dir) + save_dir=str(tmp_path)) -@pytest.mark.parametrize('backend', [TEST_TENSORRT]) -@pytest.mark.parametrize('in_channels,out_channels,stride,padding,' - 'dilation,groups,deform_groups,kernel_size', - [(3, 64, 1, 0, 1, 1, 1, 3), - (1, 32, 3, 2, 1, 1, 1, 3)]) -def test_deform_conv(backend, - in_channels, - out_channels, - stride, - padding, - dilation, - groups, - deform_groups, - kernel_size, - input_list=None, - save_dir=None): - backend.check_env() +class TestTensorRTOps: - if input_list is None: + @pytest.fixture(scope='class') + def backend(self): + return TestTensorRTExporter() + + @pytest.fixture(autouse=True, scope='class') + def check_env(self, backend): + backend.check_env() + + @pytest.fixture(scope='function') + def disable_cudnn(self): + cudnn_enable = torch.backends.cudnn.enabled + torch.backends.cudnn.enabled = False + + yield + torch.backends.cudnn.enabled = cudnn_enable + + @pytest.mark.parametrize('pool_h,pool_w,spatial_scale,sampling_ratio', + [(2, 2, 1.0, 2), (4, 4, 2.0, 4)]) + def test_roi_align(self, backend, pool_h, pool_w, spatial_scale, + sampling_ratio, tmp_path): + + input = torch.rand(1, 1, 16, 16, dtype=torch.float32) + single_roi = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32) + + from mmcv.ops import roi_align + + def wrapped_function(torch_input, torch_rois): + return roi_align(torch_input, torch_rois, (pool_w, pool_h), + spatial_scale, sampling_ratio, 'avg', True) + + wrapped_model = WrapFunction(wrapped_function).eval() + + save_dir = str(tmp_path) + with RewriterContext( + Config({'backend_config': { + 'type': backend.backend_name + }}), + backend=backend.backend_name, + opset=11): + backend.run_and_validate( + wrapped_model, [input, single_roi], + 'roi_align', + input_names=['input', 'rois'], + output_names=['roi_feat'], + save_dir=save_dir) + + @pytest.mark.parametrize('mode', ['bilinear', 'nearest']) + @pytest.mark.parametrize('padding_mode', ['zeros', 'border', 'reflection']) + @pytest.mark.parametrize('align_corners', [True, False]) + def test_grid_sample(self, backend, mode, padding_mode, align_corners, + tmp_path): + _test_grid_sample(backend, mode, padding_mode, align_corners, tmp_path) + + @pytest.mark.parametrize('dynamic_export', [True, False]) + @pytest.mark.parametrize('mode', ['bicubic', 'nearest']) + @pytest.mark.parametrize('align_corners', [True, False]) + @pytest.mark.parametrize('output_size', [[10, 20], None]) + @pytest.mark.parametrize('scale_factor', [2]) + @pytest.mark.parametrize('n, c, h, w', [(2, 3, 5, 10)]) + def test_bicubic_interpolate(self, backend, dynamic_export, mode, + align_corners, output_size, scale_factor, n, + c, h, w, tmp_path): + + input = torch.randn(n, c, h, w) + if dynamic_export: + dynamic_axes = { + 'input': { + 0: 'n', + 2: 'h', + 3: 'w', + }, + 'output': { + 0: 'n', + 2: 'h', + 3: 'w', + }, + } + else: + dynamic_axes = None + + if mode == 'nearest': + align_corners = None + if output_size is None: + resize = nn.Upsample( + scale_factor=scale_factor, + mode=mode, + align_corners=align_corners) + else: + resize = nn.Upsample( + size=output_size, mode=mode, align_corners=align_corners) + expected_result = resize(input).cuda() + wrapped_model = WrapFunction(resize).eval() + + with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): + backend.run_and_validate( + wrapped_model, [input], + 'bicubic_interpolate', + input_names=['input'], + dynamic_axes=dynamic_axes, + output_names=['output'], + save_dir=str(tmp_path), + expected_result=expected_result) + + @pytest.mark.parametrize('in_channels,out_channels,stride,padding,' + 'dilation,groups,deform_groups,kernel_size', + [(3, 64, 1, 0, 1, 1, 1, 3), + (1, 32, 3, 2, 1, 1, 1, 3)]) + @pytest.mark.parametrize('bias', [True, False]) + def test_modulated_deform_conv(self, backend, in_channels, out_channels, + stride, padding, dilation, groups, + deform_groups, kernel_size, bias, tmp_path): + _test_modulated_deform_conv(backend, in_channels, out_channels, stride, + padding, dilation, groups, deform_groups, + kernel_size, bias, tmp_path) + + @pytest.mark.parametrize('in_channels,out_channels,stride,padding,' + 'dilation,groups,deform_groups,kernel_size', + [(3, 64, 1, 0, 1, 1, 1, 3), + (1, 32, 3, 2, 1, 1, 1, 3)]) + def test_deform_conv(self, backend, in_channels, out_channels, stride, + padding, dilation, groups, deform_groups, kernel_size, + tmp_path): input = torch.rand( 1, in_channels, 28, 28, requires_grad=False) # (n, c, h, w) - else: - input = torch.tensor(input_list[0]) - conv_offset = nn.Conv2d( - in_channels=in_channels, - out_channels=deform_groups * 2 * kernel_size * kernel_size, - kernel_size=kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - bias=True) - offset = conv_offset(input) + conv_offset = nn.Conv2d( + in_channels=in_channels, + out_channels=deform_groups * 2 * kernel_size * kernel_size, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=True) + offset = conv_offset(input) - from mmcv.ops import DeformConv2d - model = DeformConv2d(in_channels, out_channels, kernel_size, stride, - padding, dilation, groups, deform_groups).eval() + from mmcv.ops import DeformConv2d + model = DeformConv2d(in_channels, out_channels, kernel_size, stride, + padding, dilation, groups, deform_groups).eval() - with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): - backend.run_and_validate( - model, [input, offset], - 'deform_conv', - input_names=['input', 'offset'], - output_names=['output'], - save_dir=save_dir) + with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): + backend.run_and_validate( + model, [input, offset], + 'deform_conv', + input_names=['input', 'offset'], + output_names=['output'], + save_dir=str(tmp_path)) + @pytest.mark.usefixtures('disable_cudnn') + @pytest.mark.parametrize('dynamic_export', [True, False]) + @pytest.mark.parametrize('fp16_mode', [True, False]) + @pytest.mark.parametrize('n, c, h, w', [(2, 3, 10, 10)]) + def test_instance_norm(self, backend, dynamic_export, fp16_mode, n, c, h, + w, tmp_path): -@pytest.mark.parametrize('backend', [TEST_TENSORRT]) -@pytest.mark.parametrize('dynamic_export', [True, False]) -@pytest.mark.parametrize('fp16_mode', [True, False]) -@pytest.mark.parametrize('n, c, h, w', [(2, 3, 10, 10)]) -def test_instance_norm(disable_cudnn, - backend, - dynamic_export, - fp16_mode, - n, - c, - h, - w, - input_list=None, - save_dir=None): - backend.check_env() - - if input_list is None: input = torch.randn(n, c, h, w) - if dynamic_export: - dynamic_axes = { - 'input': { - 0: 'n', - 2: 'h', - 3: 'w', - }, - 'output': { - 0: 'n', - 2: 'h', - 3: 'w', - }, - } - else: - dynamic_axes = None + if dynamic_export: + dynamic_axes = { + 'input': { + 0: 'n', + 2: 'h', + 3: 'w', + }, + 'output': { + 0: 'n', + 2: 'h', + 3: 'w', + }, + } + else: + dynamic_axes = None - wrapped_model = nn.InstanceNorm2d(c, affine=True).eval().cuda() + wrapped_model = nn.InstanceNorm2d(c, affine=True).eval().cuda() - with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): - backend.run_and_validate( - wrapped_model, [input], - 'instance_norm', - input_names=['input'], - dynamic_axes=dynamic_axes, - output_names=['output'], - save_dir=save_dir) + with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): + backend.run_and_validate( + wrapped_model, [input], + 'instance_norm', + input_names=['input'], + dynamic_axes=dynamic_axes, + output_names=['output'], + save_dir=str(tmp_path)) - -@pytest.mark.parametrize('backend', [TEST_TENSORRT]) -@pytest.mark.parametrize('num_classes,pre_topk,after_topk,iou_threshold,' - 'score_threshold,background_label_id', - [(5, 6, 3, 0.7, 0.1, -1)]) -def test_batched_nms(backend, - num_classes, - pre_topk, - after_topk, - iou_threshold, - score_threshold, - background_label_id, - input_list=None, - save_dir=None): - backend.check_env() - - if input_list is None: + @pytest.mark.parametrize('num_classes,pre_topk,after_topk,iou_threshold,' + 'score_threshold,background_label_id', + [(5, 6, 3, 0.7, 0.1, -1)]) + def test_batched_nms(self, backend, num_classes, pre_topk, after_topk, + iou_threshold, score_threshold, background_label_id, + tmp_path): nms_boxes = torch.tensor([[[291.1746, 316.2263, 343.5029, 347.7312], [288.4846, 315.0447, 343.7267, 346.5630], [288.5307, 318.1989, 341.6425, 349.7222], @@ -351,60 +285,48 @@ def test_batched_nms(backend, [0.7144, 0.1066, 0.4125, 0.4041, 0.8819], [0.4963, 0.7891, 0.6908, 0.1499, 0.5584], [0.4385, 0.6035, 0.0508, 0.0662, 0.5938]]]) - else: - nms_boxes = torch.tensor(input_list[0], dtype=torch.float32) - scores = torch.tensor(input_list[1], dtype=torch.float32) - from mmdeploy.codebase.mmdet.core.post_processing import _multiclass_nms - expected_result = _multiclass_nms( - nms_boxes, - scores, - iou_threshold=iou_threshold, - score_threshold=score_threshold, - pre_top_k=pre_topk + 1, - keep_top_k=after_topk + 1) - expected_result = (expected_result[0][:, - 0:-1, :], expected_result[1][:, - 0:-1]) + from mmdeploy.codebase.mmdet.core.post_processing.bbox_nms import \ + _multiclass_nms + expected_result = _multiclass_nms( + nms_boxes, + scores, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + pre_top_k=pre_topk + 1, + keep_top_k=after_topk + 1) + expected_result = (expected_result[0][:, 0:-1, :], + expected_result[1][:, 0:-1]) - boxes = nms_boxes.unsqueeze(2).tile(num_classes, 1) + boxes = nms_boxes.unsqueeze(2).tile(num_classes, 1) - from mmdeploy.mmcv.ops.nms import TRTBatchedNMSop - batched_nms = TRTBatchedNMSop.apply + from mmdeploy.mmcv.ops.nms import TRTBatchedNMSop + batched_nms = TRTBatchedNMSop.apply - def wrapped_function(boxes, scores): - return batched_nms(boxes, scores, num_classes, pre_topk, after_topk, - iou_threshold, score_threshold, background_label_id) + def wrapped_function(boxes, scores): + return batched_nms(boxes, scores, num_classes, pre_topk, + after_topk, iou_threshold, score_threshold, + background_label_id) - wrapped_model = WrapFunction(wrapped_function) + wrapped_model = WrapFunction(wrapped_function) - with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): - backend.run_and_validate( - wrapped_model, [boxes, scores], - 'batched_nms', - input_names=['boxes', 'scores'], - output_names=['batched_nms_bboxes', 'inds'], - expected_result=expected_result, - save_dir=save_dir) + with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): + backend.run_and_validate( + wrapped_model, [boxes, scores], + 'batched_nms', + input_names=['boxes', 'scores'], + output_names=['batched_nms_bboxes', 'inds'], + expected_result=expected_result, + save_dir=str(tmp_path)) + @pytest.mark.parametrize('num_classes,pre_topk,after_topk,iou_threshold,' + 'score_threshold,background_label_id', + [(5, 6, 3, 0.7, 0.1, -1)]) + def test_batched_rotated_nms(self, backend, num_classes, pre_topk, + after_topk, iou_threshold, score_threshold, + background_label_id, tmp_path): + pytest.importorskip('mmrotate', reason='mmrorate is not installed.') -@pytest.mark.parametrize('backend', [TEST_TENSORRT]) -@pytest.mark.parametrize('num_classes,pre_topk,after_topk,iou_threshold,' - 'score_threshold,background_label_id', - [(5, 6, 3, 0.7, 0.1, -1)]) -def test_batched_rotated_nms(backend, - num_classes, - pre_topk, - after_topk, - iou_threshold, - score_threshold, - background_label_id, - input_list=None, - save_dir=None): - backend.check_env() - pytest.importorskip('mmrotate', reason='mmrorate is not installed.') - - if input_list is None: nms_boxes = torch.tensor( [[[291.1746, 316.2263, 343.5029, 347.7312, 1.], [288.4846, 315.0447, 343.7267, 346.5630, 2.], @@ -426,64 +348,49 @@ def test_batched_rotated_nms(backend, [0.7144, 0.1066, 0.4125, 0.4041, 0.8819], [0.4963, 0.7891, 0.6908, 0.1499, 0.5584], [0.4385, 0.6035, 0.0508, 0.0662, 0.5938]]]) - else: - nms_boxes = torch.tensor(input_list[0], dtype=torch.float32) - scores = torch.tensor(input_list[1], dtype=torch.float32) - from mmdeploy.codebase.mmrotate.core.post_processing.bbox_nms import \ - _multiclass_nms_rotated - expected_result = _multiclass_nms_rotated( - nms_boxes, - scores, - iou_threshold=iou_threshold, - score_threshold=score_threshold, - pre_top_k=pre_topk + 1, - keep_top_k=after_topk + 1) - expected_result = (expected_result[0][:, - 0:-1, :], expected_result[1][:, - 0:-1]) + from mmdeploy.codebase.mmrotate.core.post_processing.bbox_nms import \ + _multiclass_nms_rotated + expected_result = _multiclass_nms_rotated( + nms_boxes, + scores, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + pre_top_k=pre_topk + 1, + keep_top_k=after_topk + 1) + expected_result = (expected_result[0][:, 0:-1, :], + expected_result[1][:, 0:-1]) - boxes = nms_boxes.unsqueeze(2).tile(num_classes, 1) + boxes = nms_boxes.unsqueeze(2).tile(num_classes, 1) - from mmdeploy.mmcv.ops.nms_rotated import TRTBatchedRotatedNMSop - batched_rotated_nms = TRTBatchedRotatedNMSop.apply + from mmdeploy.mmcv.ops.nms_rotated import TRTBatchedRotatedNMSop + batched_rotated_nms = TRTBatchedRotatedNMSop.apply - def wrapped_function(boxes, scores): - return batched_rotated_nms(boxes, scores, num_classes, pre_topk, - after_topk, iou_threshold, score_threshold, - background_label_id) + def wrapped_function(boxes, scores): + return batched_rotated_nms(boxes, scores, num_classes, pre_topk, + after_topk, iou_threshold, + score_threshold, background_label_id) - wrapped_model = WrapFunction(wrapped_function) + wrapped_model = WrapFunction(wrapped_function) - with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): - backend.run_and_validate( - wrapped_model, [boxes, scores], - 'batched_rotated_nms', - input_names=['boxes', 'scores'], - output_names=['batched_rotated_nms_bboxes', 'inds'], - expected_result=expected_result, - save_dir=save_dir) + with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): + backend.run_and_validate( + wrapped_model, [boxes, scores], + 'batched_rotated_nms', + input_names=['boxes', 'scores'], + output_names=['batched_rotated_nms_bboxes', 'inds'], + expected_result=expected_result, + save_dir=str(tmp_path)) - -@pytest.mark.parametrize('backend', [TEST_TENSORRT]) -@pytest.mark.parametrize( - 'out_size, pool_mode, sampling_ratio,roi_scale_factor,' - ' finest_scale,featmap_strides, aligned', - [(tuple([2, 2]), 0, 2, 1.0, 2, list([2.0, 4.0]), 1), - (tuple([2, 2]), 1, 2, 1.0, 2, list([2.0, 4.0]), 1)]) -def test_multi_level_roi_align(backend, - out_size, - pool_mode, - sampling_ratio, - roi_scale_factor, - finest_scale, - featmap_strides, - aligned, - input_list=None, - save_dir=None): - backend.check_env() - - if input_list is None: + @pytest.mark.parametrize( + 'out_size, pool_mode, sampling_ratio,roi_scale_factor,' + ' finest_scale,featmap_strides, aligned', + [(tuple([2, 2]), 0, 2, 1.0, 2, list([2.0, 4.0]), 1), + (tuple([2, 2]), 1, 2, 1.0, 2, list([2.0, 4.0]), 1)]) + def test_multi_level_roi_align(self, backend, out_size, pool_mode, + sampling_ratio, roi_scale_factor, + finest_scale, featmap_strides, aligned, + tmp_path): input = [ torch.tensor([[[[0.3014, 0.7334, 0.6502, 0.1689], [0.3031, 0.3735, 0.6032, 0.1644], @@ -526,440 +433,62 @@ def test_multi_level_roi_align(backend, [[0.1542, 0.2849], [0.2370, 0.3053]]]]) - else: - input = input_list[0] - rois = input_list[1] - expected_result = input_list[2] - input_name = [('input_' + str(i)) for i in range(len(featmap_strides))] - input_name.insert(0, 'rois') + input_name = [('input_' + str(i)) for i in range(len(featmap_strides))] + input_name.insert(0, 'rois') - inputs = [ - onnx.helper.make_tensor_value_info( - input_name[i + 1], onnx.TensorProto.FLOAT, shape=input[i].shape) - for i in range(len(input_name) - 1) - ] - inputs.append( - onnx.helper.make_tensor_value_info( - 'rois', onnx.TensorProto.FLOAT, shape=rois.shape)) - outputs = [ - onnx.helper.make_tensor_value_info( - 'bbox_feats', onnx.TensorProto.FLOAT, shape=expected_result.shape) - ] - node = onnx.helper.make_node( - 'MMCVMultiLevelRoiAlign', - input_name, ['bbox_feats'], - 'MMCVMultiLevelRoiAlign_0', - None, - 'mmdeploy', - pool_mode=pool_mode, - aligned=aligned, - featmap_strides=featmap_strides, - finest_scale=finest_scale, - output_height=out_size[0], - output_width=out_size[1], - roi_scale_factor=roi_scale_factor, - sampling_ratio=sampling_ratio) - graph = onnx.helper.make_graph([node], 'torch-jit-export', inputs, outputs) - onnx_model = onnx.helper.make_model( - graph, producer_name='pytorch', producer_version='1.8') - onnx_model.opset_import[0].version = 11 - onnx_model.opset_import.append( - onnx.onnx_ml_pb2.OperatorSetIdProto(domain='mmdeploy', version=1)) + inputs = [ + onnx.helper.make_tensor_value_info( + input_name[i + 1], + onnx.TensorProto.FLOAT, + shape=input[i].shape) for i in range(len(input_name) - 1) + ] + inputs.append( + onnx.helper.make_tensor_value_info( + 'rois', onnx.TensorProto.FLOAT, shape=rois.shape)) + outputs = [ + onnx.helper.make_tensor_value_info( + 'bbox_feats', + onnx.TensorProto.FLOAT, + shape=expected_result.shape) + ] + node = onnx.helper.make_node( + 'MMCVMultiLevelRoiAlign', + input_name, ['bbox_feats'], + 'MMCVMultiLevelRoiAlign_0', + None, + 'mmdeploy', + pool_mode=pool_mode, + aligned=aligned, + featmap_strides=featmap_strides, + finest_scale=finest_scale, + output_height=out_size[0], + output_width=out_size[1], + roi_scale_factor=roi_scale_factor, + sampling_ratio=sampling_ratio) + graph = onnx.helper.make_graph([node], 'torch-jit-export', inputs, + outputs) + onnx_model = onnx.helper.make_model( + graph, producer_name='pytorch', producer_version='1.8') + onnx_model.opset_import[0].version = 11 + onnx_model.opset_import.append( + onnx.onnx_ml_pb2.OperatorSetIdProto(domain='mmdeploy', version=1)) - backend.run_and_validate( - onnx_model, [rois, *input], - 'multi_level_roi_align', - input_names=input_name, - output_names=['bbox_feats'], - expected_result=expected_result, - save_dir=save_dir) - - -@pytest.mark.parametrize('backend', [TEST_NCNN]) -@pytest.mark.parametrize('k', [1, 3, 5]) -@pytest.mark.parametrize('dim', [1, 2, 3]) -@pytest.mark.parametrize('largest', [True, False]) -@pytest.mark.parametrize('sorted', [True, False]) -def test_topk(backend, - k, - dim, - largest, - sorted, - input_list=None, - save_dir=None): - backend.check_env() - - if input_list is None: - input = torch.rand(1, 8, 12, 17) - else: - input = input_list[0] - assert input.shape[0] == 1, ('ncnn batch must be 1, ' - f'but got {input.shape[0]}') - - def topk_function(inputs): - return torch.Tensor.topk(inputs, k, dim, largest, sorted) - - wrapped_model = WrapFunction(topk_function) - - # when the 'sorted' attribute is False, pytorch will return - # a hard to expect result, which only features that the topk - # number is right. So the Topk unittest only check whether the - # topk elements are right, all the possible order will be accepted. - with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): - if not sorted: - backend.run_and_validate( - wrapped_model, [input.float()], - 'topk' + f'_no_sorted_dim_{dim}', - input_names=['inputs'], - output_names=['data', 'index'], - save_dir=save_dir) - else: - backend.run_and_validate( - wrapped_model, [input.float()], - 'topk', - input_names=['inputs'], - output_names=['data', 'index'], - save_dir=save_dir) - - -@pytest.mark.parametrize('backend', [TEST_NCNN]) -@pytest.mark.parametrize('dim, n, c, h, w', [(1, 1, 1, 1, 8), (2, 1, 1, 5, 7), - (3, 1, 3, 10, 15)]) -def test_shape(backend, - dim, - n, - c, - h, - w, - input_names=['input'], - output_names=['output'], - tolerate_small_mismatch=False, - input_list=None, - save_dir=None): - backend.check_env() - - orig_shape = (n, c, h, w)[-dim - 1:] - if input_list is None: - input = torch.rand(orig_shape) - else: - input = input_list[0] - assert input.dim() == dim + 1, 'input.dim() must equal to dim + 1' - assert tuple(input.shape) == orig_shape, ('input.shape must the ' - 'same as orig_shape') - - assert input.shape[0] == 1, ('ncnn batch must be 1, ' - f'but got {input.shape[0]}') - - shape_node = make_node('Shape', input_names, output_names) - assert len(input_names) == 1, 'length of input_names must be 1' - assert len(output_names) == 1, 'length of output_names must be 1' - shape_graph = make_graph([shape_node], 'shape_graph', [ - make_tensor_value_info(input_names[0], onnx.TensorProto.FLOAT, - orig_shape) - ], [ - make_tensor_value_info(output_names[0], onnx.TensorProto.FLOAT, - (dim + 1, )) - ]) - shape_model = make_model(shape_graph) - - with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): - ncnn_model = backend.onnx2ncnn(shape_model, 'shape', output_names, - save_dir) - - # ncnn mat has implicit batch for mat, the ncnn_output is a mat, - # so the ncnn_outputs has 2 dimensions, not 1. - model_outputs = [torch.tensor(orig_shape).unsqueeze(0).float()] - ncnn_outputs = ncnn_model(dict(zip(input_names, [input]))) - ncnn_outputs = [ncnn_outputs[name] for name in output_names] - assert_allclose(model_outputs, ncnn_outputs, tolerate_small_mismatch) - - -@pytest.mark.parametrize('backend', [TEST_NCNN]) -@pytest.mark.parametrize('dim, n, c, h, w', [(1, 1, 1, 1, 8), (2, 1, 1, 5, 7), - (3, 1, 3, 10, 15)]) -@pytest.mark.parametrize('val', [0., 1., -3, 4.25]) -def test_constantofshape(backend, - dim, - n, - c, - h, - w, - val, - input_names=['input'], - output_names=['output'], - tolerate_small_mismatch=False, - input_list=None, - save_dir=None): - backend.check_env() - if input_list is None: - input = torch.tensor((n, c, h, w)[-dim - 1:]).unsqueeze(0) - else: - input = input_list[0] - assert input.dim() == dim + 1, 'input.dim() must equal to dim + 1' - assert tuple(input.shape) == (n, c, h, - w)[-dim - 1:], ('input.shape must the ' - 'same as orig_shape') - - assert input.shape[0] == 1, ('ncnn input batch must be 1, ' - f'got {input.shape[0]}') - assert input[0][0] == 1, ('ncnn output mat batch must be 1, ' - f'got {input[0][0]}') - - constantofshape_node = make_node( - 'ConstantOfShape', input_names, output_names, value=float(val)) - assert len(input_names) == 1, 'length of input_names must be 1' - assert len(output_names) == 1, 'length of output_names must be 1' - constantofshape_graph = make_graph( - [constantofshape_node], 'constantofshape_graph', [ - make_tensor_value_info(input_names[0], onnx.TensorProto.FLOAT, - input.shape) - ], [ - make_tensor_value_info(output_names[0], onnx.TensorProto.FLOAT, - torch.Size(input[0])) - ]) - constantofshape_model = make_model(constantofshape_graph) - with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): - ncnn_model = backend.onnx2ncnn(constantofshape_model, - 'constantofshape', output_names, - save_dir) - - # ncnn mat has implicit batch for mat, the ncnn_output is a mat, - # so the ncnn_outputs has 2 dimensions, not 1. - model_outputs = [torch.fill_(torch.rand(tuple(input[0])), val)] - ncnn_outputs = ncnn_model(dict(zip(input_names, [input.float()]))) - ncnn_outputs = [ncnn_outputs[name] for name in output_names] - assert_allclose(model_outputs, ncnn_outputs, tolerate_small_mismatch) - - -@pytest.mark.parametrize('backend', [TEST_NCNN]) -@pytest.mark.parametrize('axis, data_dims, indice_dims', [(0, 1, 1), (0, 2, 1), - (1, 2, 1), (0, 3, 1), - (1, 3, 1), - (2, 3, 1)]) -def test_gather(backend, - axis, - data_dims, - indice_dims, - input_names=['input', 'indices'], - output_names=['output'], - tolerate_small_mismatch=False, - input_list=None, - save_dir=None): - backend.check_env() - - if input_list is None: - # the real data dims is data_dims + 1 - data = torch.rand((8, 12, 17)[-data_dims:]).unsqueeze(0) - indice = torch.randint(0, 8, (3, 4, 5)[-indice_dims:]).unsqueeze(0) - else: - data = input_list[0] - indice = input_list[1] - assert data.shape[0] == 1, ('ncnn batch must be 1, ' - f'but got {data.shape[0]}') - assert indice.shape[0] == 1, ('ncnn batch must be 1, ' - f'but got {indice.shape[0]}') - - gather_node = make_node('Gather', input_names, output_names, axis=axis + 1) - gather_graph = make_graph([gather_node], 'gather_graph', [ - make_tensor_value_info(input_names[0], onnx.TensorProto.FLOAT, None), - make_tensor_value_info(input_names[1], onnx.TensorProto.INT64, None) - ], [make_tensor_value_info(output_names[0], onnx.TensorProto.FLOAT, None)]) - gather_model = make_model(gather_graph) - - with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): - ncnn_model = backend.onnx2ncnn(gather_model, 'gather', output_names, - save_dir) - - # ncnn mat has implicit batch for mat, the ncnn_output is a mat, - # so the ncnn_outputs has 2 dimensions, not 1. - import importlib - - if importlib.util.find_spec('onnxruntime') is None: - pytest.skip('onnxruntime not installed.') - - import numpy as np - import onnxruntime - session = onnxruntime.InferenceSession(gather_model.SerializeToString()) - model_outputs = session.run( - output_names, - dict( - zip(input_names, [ - np.array(data, dtype=np.float32), - np.array(indice[0], dtype=np.int64) - ]))) - model_outputs = [model_output for model_output in model_outputs] - ncnn_outputs = ncnn_model( - dict(zip(input_names, [data.float(), indice.float()]))) - ncnn_outputs = [ncnn_outputs[name] for name in output_names] - assert_allclose(model_outputs, ncnn_outputs, tolerate_small_mismatch) - - -@pytest.mark.parametrize('backend', [TEST_NCNN]) -@pytest.mark.parametrize('dim', [1, 2, 3]) -def test_tensorslice(backend, dim, input_list=None, save_dir=None): - backend.check_env() - - if input_list is None: - input = torch.rand((8, 12, 17)[-dim:]).unsqueeze(0) - else: - input = input_list[0] - assert input.dim() == dim + 1, ('input.dim() must equal to ' - f'dim + 1, expected: {dim + 1}, ' - f'got: {input.dim()}') - - assert input.shape[0] == 1, ('ncnn batch must be 1, ' - f'but got {input.shape[0]}') - - def tensorslice_function(inputs): - if dim == 1: - return inputs[:, 2:17:7] - if dim == 2: - return inputs[:, 3:12:4, 2:15:3] - if dim == 3: - return inputs[:, 0:8:2, 2:12:4, 2:17:7] - - wrapped_model = WrapFunction(tensorslice_function) - - with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): backend.run_and_validate( - wrapped_model, [input.float()], - 'tensorslice', - input_names=['inputs'], - output_names=['outputs'], - save_dir=save_dir) + onnx_model, [rois, *input], + 'multi_level_roi_align', + input_names=input_name, + output_names=['bbox_feats'], + expected_result=expected_result, + save_dir=str(tmp_path)) - -@pytest.mark.parametrize('backend', [TEST_NCNN]) -@pytest.mark.parametrize('input_dim, output_dim', [(1, 1), (1, 2), (1, 3), - (2, 2), (2, 3), (3, 3)]) -def test_expand(backend, - input_dim, - output_dim, - input_list=None, - save_dir=None): - backend.check_env() - if input_list is None: - input = torch.rand((1, 12, 1)[-input_dim:]).unsqueeze(0) - target = torch.rand((8, 12, 17)[-output_dim:]).unsqueeze(0) - else: - input = input_list[0] - target = input_list[1] - assert input.shape[0] == 1, (f'ncnn batch must be 1, ' - f'but not {input.shape[0]}') - assert target.shape[0] == 1, (f'ncnn batch must be 1, ' - f'but not {target.shape[0]}') - - def expand_function(input, target): - return input.expand_as(target) - - wrapped_model = WrapFunction(expand_function) - with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): - backend.run_and_validate( - wrapped_model, [input.float(), target.float()], - 'expand', - input_names=['input', 'shape'], - output_names=['output'], - save_dir=save_dir) - - -@pytest.mark.parametrize('backend', [TEST_ONNXRT]) -@pytest.mark.parametrize('iou_threshold', [0.1, 0.3]) -@pytest.mark.parametrize('score_threshold', [0., 0.1]) -def test_nms_rotated(backend, iou_threshold, score_threshold, save_dir=None): - backend.check_env() - - boxes = torch.tensor( - [[[60, 75, 20, 50, 0], [65, 80, 10, 40, 0], [30, 30, 40, 40, 0]], - [[60, 75, 20, 50, 0], [65, 80, 10, 40, 0], [30, 30, 40, 40, 0]]], - dtype=torch.float32) - scores = torch.tensor( - [[[0.5, 0.1, 0.1], [0.1, 0.6, 0.1], [0.1, 0.1, 0.7], [0.1, 0.1, 0.1]], - [[0.1, 0.1, 0.1], [0.7, 0.1, 0.1], [0.1, 0.6, 0.1], [0.1, 0.1, 0.5]]], - dtype=torch.float32) - - from mmdeploy.mmcv.ops import ONNXNMSRotatedOp - - def wrapped_function(torch_boxes, torch_scores): - return ONNXNMSRotatedOp.apply(torch_boxes, torch_scores, iou_threshold, - score_threshold) - - wrapped_model = WrapFunction(wrapped_function).eval() - - with RewriterContext( - Config({'backend_config': { - 'type': backend.backend_name - }}), - backend=backend.backend_name, - opset=11): - backend.run_and_validate( - wrapped_model, [boxes, scores], - 'nms_rotated', - input_names=['boxes', 'scores'], - output_names=['keep_inds'], - save_dir=save_dir) - - -@pytest.mark.parametrize('backend', [TEST_ONNXRT]) -@pytest.mark.parametrize('pool_h,pool_w,spatial_scale,sampling_ratio', - [(2, 2, 1.0, 2), (4, 4, 2.0, 4)]) -def test_roi_align_rotated(backend, - pool_h, - pool_w, - spatial_scale, - sampling_ratio, - input_list=None, - save_dir=None): - backend.check_env() - - if input_list is None: - # input = torch.rand(1, 1, 16, 16, dtype=torch.float32) - input = torch.tensor([[[[1., 2.], [3., 4.]]]], dtype=torch.float32) - single_roi = torch.tensor([[0., 0.5, 0.5, 1., 1., 0]], - dtype=torch.float32) - else: - input = torch.tensor(input_list[0], dtype=torch.float32) - single_roi = torch.tensor(input_list[1], dtype=torch.float32) - - from mmcv.ops import roi_align_rotated - - def wrapped_function(torch_input, torch_rois): - return roi_align_rotated(torch_input, torch_rois, (pool_w, pool_h), - spatial_scale, sampling_ratio, True, False) - - wrapped_model = WrapFunction(wrapped_function).eval() - - with RewriterContext( - Config({'backend_config': { - 'type': backend.backend_name - }}), - backend=backend.backend_name, - opset=11): - backend.run_and_validate( - wrapped_model, [input, single_roi], - 'roi_align_rotated', - input_names=['input', 'rois'], - output_names=['roi_feat'], - save_dir=save_dir) - - -@pytest.mark.parametrize('backend', [TEST_TENSORRT]) -@pytest.mark.parametrize( - 'out_size, clockwise, sampling_ratio, roi_scale_factor,' - ' finest_scale, featmap_strides, aligned', - [(tuple([2, 2]), False, 2, 1.0, 2, list([1.0]), 1)]) -def test_multi_level_rotated_roi_align(backend, - out_size, - clockwise, - sampling_ratio, - roi_scale_factor, - finest_scale, - featmap_strides, - aligned, - input_list=None, - save_dir=None): - backend.check_env() - - if input_list is None: + @pytest.mark.parametrize( + 'out_size, clockwise, sampling_ratio, roi_scale_factor,' + ' finest_scale, featmap_strides, aligned', + [(tuple([2, 2]), False, 2, 1.0, 2, list([1.0]), 1)]) + def test_multi_level_rotated_roi_align(self, backend, out_size, clockwise, + sampling_ratio, roi_scale_factor, + finest_scale, featmap_strides, + aligned, tmp_path): import numpy as np input = [ torch.tensor([[[[1., 2., 5., 6.], [3., 4., 7., 8.], @@ -967,61 +496,56 @@ def test_multi_level_rotated_roi_align(backend, ] rois = torch.tensor([[0., 1.5, 1.5, 3., 3., np.pi / 2]]) expected_result = torch.tensor([[[[7.5625, 1.9375], [10.375, 4.75]]]]) - else: - input = input_list[0] - rois = input_list[1] - expected_result = input_list[2] - input_name = [('input_' + str(i)) for i in range(len(featmap_strides))] - input_name.insert(0, 'rois') + input_name = [('input_' + str(i)) for i in range(len(featmap_strides))] + input_name.insert(0, 'rois') - inputs = [ - onnx.helper.make_tensor_value_info( - input_name[i + 1], onnx.TensorProto.FLOAT, shape=input[i].shape) - for i in range(len(input_name) - 1) - ] - inputs.append( - onnx.helper.make_tensor_value_info( - 'rois', onnx.TensorProto.FLOAT, shape=rois.shape)) - outputs = [ - onnx.helper.make_tensor_value_info( - 'bbox_feats', onnx.TensorProto.FLOAT, shape=expected_result.shape) - ] - node = onnx.helper.make_node( - 'MMCVMultiLevelRotatedRoiAlign', - input_name, ['bbox_feats'], - 'MMCVMultiLevelRotatedRoiAlign_0', - None, - 'mmdeploy', - featmap_strides=featmap_strides, - finest_scale=finest_scale, - output_height=out_size[0], - output_width=out_size[1], - clockwise=clockwise, - roi_scale_factor=roi_scale_factor, - sampling_ratio=sampling_ratio, - aligned=aligned) - graph = onnx.helper.make_graph([node], 'torch-jit-export', inputs, outputs) - onnx_model = onnx.helper.make_model( - graph, producer_name='pytorch', producer_version='1.8') - onnx_model.opset_import[0].version = 11 - onnx_model.opset_import.append( - onnx.onnx_ml_pb2.OperatorSetIdProto(domain='mmdeploy', version=1)) + inputs = [ + onnx.helper.make_tensor_value_info( + input_name[i + 1], + onnx.TensorProto.FLOAT, + shape=input[i].shape) for i in range(len(input_name) - 1) + ] + inputs.append( + onnx.helper.make_tensor_value_info( + 'rois', onnx.TensorProto.FLOAT, shape=rois.shape)) + outputs = [ + onnx.helper.make_tensor_value_info( + 'bbox_feats', + onnx.TensorProto.FLOAT, + shape=expected_result.shape) + ] + node = onnx.helper.make_node( + 'MMCVMultiLevelRotatedRoiAlign', + input_name, ['bbox_feats'], + 'MMCVMultiLevelRotatedRoiAlign_0', + None, + 'mmdeploy', + featmap_strides=featmap_strides, + finest_scale=finest_scale, + output_height=out_size[0], + output_width=out_size[1], + clockwise=clockwise, + roi_scale_factor=roi_scale_factor, + sampling_ratio=sampling_ratio, + aligned=aligned) + graph = onnx.helper.make_graph([node], 'torch-jit-export', inputs, + outputs) + onnx_model = onnx.helper.make_model( + graph, producer_name='pytorch', producer_version='1.8') + onnx_model.opset_import[0].version = 11 + onnx_model.opset_import.append( + onnx.onnx_ml_pb2.OperatorSetIdProto(domain='mmdeploy', version=1)) - backend.run_and_validate( - onnx_model, [rois, *input], - 'multi_level_rotated_roi_align', - input_names=input_name, - output_names=['bbox_feats'], - expected_result=expected_result, - save_dir=save_dir) + backend.run_and_validate( + onnx_model, [rois, *input], + 'multi_level_rotated_roi_align', + input_names=input_name, + output_names=['bbox_feats'], + expected_result=expected_result, + save_dir=str(tmp_path)) - -@pytest.mark.parametrize('backend', [TEST_TENSORRT]) -@pytest.mark.parametrize('strides', [(4, 4)]) -def test_trt_grid_priors(backend, strides, input_list=None, save_dir=None): - backend.check_env() - - if input_list is None: + @pytest.mark.parametrize('strides', [(4, 4)]) + def test_trt_grid_priors(self, backend, strides, tmp_path): input = torch.rand(1, 3, 2, 2) base_anchors = torch.tensor([[-22.6274, -11.3137, 22.6274, 11.3137], [-16.0000, -16.0000, 16.0000, 16.0000], @@ -1039,133 +563,419 @@ def test_trt_grid_priors(backend, strides, input_list=None, save_dir=None): [-18.6274, -7.3137, 26.6274, 15.3137], [-12.0000, -12.0000, 20.0000, 20.0000], [-7.3137, -18.6274, 15.3137, 26.6274]]) - else: - input = input_list[0] - base_anchors = input_list[1] - expected_result = input_list[2] - input_name = ['input'] - output_name = ['output'] + input_name = ['input'] + output_name = ['output'] - class GridPriorsTestOps(torch.autograd.Function): + class GridPriorsTestOps(torch.autograd.Function): - @staticmethod - def forward(ctx, base_anchor, feat_h, feat_w, stride_h: int, - stride_w: int): - a = base_anchor.shape[0] - return base_anchor.new_empty(feat_h * feat_w * a, 4) + @staticmethod + def forward(ctx, base_anchor, feat_h, feat_w, stride_h: int, + stride_w: int): + a = base_anchor.shape[0] + return base_anchor.new_empty(feat_h * feat_w * a, 4) - @staticmethod - def symbolic(g, base_anchor, feat_h, feat_w, stride_h: int, - stride_w: int): - from torch.onnx import symbolic_helper - feat_h = symbolic_helper._unsqueeze_helper(g, feat_h, [0]) - feat_w = symbolic_helper._unsqueeze_helper(g, feat_w, [0]) - zero_h = g.op( - 'ConstantOfShape', - feat_h, - value_t=torch.tensor([0], dtype=torch.long), - ) - zero_w = g.op( - 'ConstantOfShape', - feat_w, - value_t=torch.tensor([0], dtype=torch.long), - ) - return g.op( - 'mmdeploy::GridPriorsTRT', - base_anchor, - zero_h, - zero_w, - stride_h_i=stride_h, - stride_w_i=stride_w) + @staticmethod + def symbolic(g, base_anchor, feat_h, feat_w, stride_h: int, + stride_w: int): + from torch.onnx import symbolic_helper + feat_h = symbolic_helper._unsqueeze_helper(g, feat_h, [0]) + feat_w = symbolic_helper._unsqueeze_helper(g, feat_w, [0]) + zero_h = g.op( + 'ConstantOfShape', + feat_h, + value_t=torch.tensor([0], dtype=torch.long), + ) + zero_w = g.op( + 'ConstantOfShape', + feat_w, + value_t=torch.tensor([0], dtype=torch.long), + ) + return g.op( + 'mmdeploy::GridPriorsTRT', + base_anchor, + zero_h, + zero_w, + stride_h_i=stride_h, + stride_w_i=stride_w) - class GridPriorsTestModel(torch.nn.Module): + class GridPriorsTestModel(torch.nn.Module): - def __init__(self, strides, base_anchors=base_anchors) -> None: - super().__init__() - self.strides = strides - self.base_anchors = base_anchors + def __init__(self, strides, base_anchors=base_anchors) -> None: + super().__init__() + self.strides = strides + self.base_anchors = base_anchors - def forward(self, x): - base_anchors = self.base_anchors - h, w = x.shape[2:] - strides = self.strides - return GridPriorsTestOps.apply(base_anchors, h, w, strides[0], - strides[1]) + def forward(self, x): + base_anchors = self.base_anchors + h, w = x.shape[2:] + strides = self.strides + return GridPriorsTestOps.apply(base_anchors, h, w, strides[0], + strides[1]) - model = GridPriorsTestModel(strides=strides) + model = GridPriorsTestModel(strides=strides) - backend.run_and_validate( - model, [input], - 'trt_grid_priors', - input_names=input_name, - output_names=output_name, - expected_result=expected_result, - dynamic_axes=dict(input={ - 2: 'h', - 3: 'w' - }), - save_dir=save_dir) - - -@pytest.mark.parametrize('backend', [TEST_TENSORRT]) -def test_dot_product_attention(backend, save_dir=None): - backend.check_env() - - B = 2 - Nt = 4 - Ns = 4 - E = 2 - query = torch.rand(B, Nt, E).cuda() - key = torch.rand(B, Ns, E).cuda() - value = torch.rand(B, Ns, E).cuda() - - model = torch.nn.MultiheadAttention(E, 2).cuda() - - with RewriterContext( - Config({'backend_config': { - 'type': backend.backend_name - }}), - backend=backend.backend_name, - opset=11): backend.run_and_validate( - model, [query, key, value], - 'dot_product_attention', - input_names=['query', 'key', 'value'], - output_names=['out', 'attn'], - save_dir=save_dir) + model, [input], + 'trt_grid_priors', + input_names=input_name, + output_names=output_name, + expected_result=expected_result, + dynamic_axes=dict(input={ + 2: 'h', + 3: 'w' + }), + save_dir=str(tmp_path)) + + def test_dot_product_attention(self, backend, tmp_path): + B = 2 + Nt = 4 + Ns = 4 + E = 2 + query = torch.rand(B, Nt, E).cuda() + key = torch.rand(B, Ns, E).cuda() + value = torch.rand(B, Ns, E).cuda() + + model = torch.nn.MultiheadAttention(E, 2).cuda() + + with RewriterContext( + Config({'backend_config': { + 'type': backend.backend_name + }}), + backend=backend.backend_name, + opset=11): + backend.run_and_validate( + model, [query, key, value], + 'dot_product_attention', + input_names=['query', 'key', 'value'], + output_names=['out', 'attn'], + save_dir=str(tmp_path)) + + def test_gather_topk(self, backend, tmp_path): + from mmdeploy.codebase.mmdet.deploy.utils import gather_topk + + x = torch.rand(2, 10, 4).cuda() + + class TestModel(torch.nn.Module): + + def __init__(self) -> None: + super().__init__() + + def forward(self, x): + batch_size = x.size(0) + max_x, _ = x.max(-1) + _, inds = max_x.topk(4) + + new_x = gather_topk(x, inds=inds, batch_size=batch_size) + return new_x + + model = TestModel().cuda() + + with RewriterContext( + Config({'backend_config': { + 'type': backend.backend_name + }}), + backend=backend.backend_name, + opset=11): + backend.run_and_validate( + model, [x], + 'gather_topk', + input_names=['x'], + output_names=['out'], + save_dir=str(tmp_path)) -@pytest.mark.parametrize('backend', [TEST_TENSORRT]) -def test_gather_topk(backend, save_dir=None): - backend.check_env() - from mmdeploy.codebase.mmdet.deploy.utils import gather_topk +class TestONNXRuntimeOps: - x = torch.rand(2, 10, 4).cuda() + @pytest.fixture(scope='class') + def backend(self): + return TestOnnxRTExporter() - class TestModel(torch.nn.Module): + @pytest.fixture(autouse=True, scope='class') + def check_env(self, backend): + backend.check_env() - def __init__(self) -> None: - super().__init__() + @pytest.mark.parametrize('mode', ['bilinear', 'nearest']) + @pytest.mark.parametrize('padding_mode', ['zeros', 'border', 'reflection']) + @pytest.mark.parametrize('align_corners', [True, False]) + def test_grid_sample(self, backend, mode, padding_mode, align_corners, + tmp_path): + _test_grid_sample(backend, mode, padding_mode, align_corners, tmp_path) - def forward(self, x): - batch_size = x.size(0) - max_x, _ = x.max(-1) - _, inds = max_x.topk(4) + @pytest.mark.parametrize('in_channels,out_channels,stride,padding,' + 'dilation,groups,deform_groups,kernel_size', + [(3, 64, 1, 0, 1, 1, 1, 3), + (1, 32, 3, 2, 1, 1, 1, 3)]) + @pytest.mark.parametrize('bias', [True, False]) + def test_modulated_deform_conv(self, backend, in_channels, out_channels, + stride, padding, dilation, groups, + deform_groups, kernel_size, bias, tmp_path): + _test_modulated_deform_conv(backend, in_channels, out_channels, stride, + padding, dilation, groups, deform_groups, + kernel_size, bias, tmp_path) - new_x = gather_topk(x, inds=inds, batch_size=batch_size) - return new_x + @pytest.mark.parametrize('iou_threshold', [0.1, 0.3]) + @pytest.mark.parametrize('score_threshold', [0., 0.1]) + def test_nms_rotated(self, backend, iou_threshold, score_threshold, + tmp_path): + boxes = torch.tensor( + [[[60, 75, 20, 50, 0], [65, 80, 10, 40, 0], [30, 30, 40, 40, 0]], + [[60, 75, 20, 50, 0], [65, 80, 10, 40, 0], [30, 30, 40, 40, 0]]], + dtype=torch.float32) + scores = torch.tensor([[[0.5, 0.1, 0.1], [0.1, 0.6, 0.1], + [0.1, 0.1, 0.7], [0.1, 0.1, 0.1]], + [[0.1, 0.1, 0.1], [0.7, 0.1, 0.1], + [0.1, 0.6, 0.1], [0.1, 0.1, 0.5]]], + dtype=torch.float32) - model = TestModel().cuda() + from mmdeploy.mmcv.ops import ONNXNMSRotatedOp - with RewriterContext( - Config({'backend_config': { - 'type': backend.backend_name - }}), - backend=backend.backend_name, - opset=11): - backend.run_and_validate( - model, [x], - 'gather_topk', - input_names=['x'], - output_names=['out'], - save_dir=save_dir) + def wrapped_function(torch_boxes, torch_scores): + return ONNXNMSRotatedOp.apply(torch_boxes, torch_scores, + iou_threshold, score_threshold) + + wrapped_model = WrapFunction(wrapped_function).eval() + + with RewriterContext( + Config({'backend_config': { + 'type': backend.backend_name + }}), + backend=backend.backend_name, + opset=11): + backend.run_and_validate( + wrapped_model, [boxes, scores], + 'nms_rotated', + input_names=['boxes', 'scores'], + output_names=['keep_inds'], + save_dir=tmp_path) + + @pytest.mark.parametrize('pool_h,pool_w,spatial_scale,sampling_ratio', + [(2, 2, 1.0, 2), (4, 4, 2.0, 4)]) + def test_roi_align_rotated(self, backend, pool_h, pool_w, spatial_scale, + sampling_ratio, tmp_path): + input = torch.tensor([[[[1., 2.], [3., 4.]]]], dtype=torch.float32) + single_roi = torch.tensor([[0., 0.5, 0.5, 1., 1., 0]], + dtype=torch.float32) + + from mmcv.ops import roi_align_rotated + + def wrapped_function(torch_input, torch_rois): + return roi_align_rotated(torch_input, torch_rois, (pool_w, pool_h), + spatial_scale, sampling_ratio, True, + False) + + wrapped_model = WrapFunction(wrapped_function).eval() + + with RewriterContext( + Config({'backend_config': { + 'type': backend.backend_name + }}), + backend=backend.backend_name, + opset=11): + backend.run_and_validate( + wrapped_model, [input, single_roi], + 'roi_align_rotated', + input_names=['input', 'rois'], + output_names=['roi_feat'], + save_dir=str(tmp_path)) + + +class TestNCNNOps: + + @pytest.fixture(scope='class') + def backend(self): + return TestNCNNExporter() + + @pytest.fixture(autouse=True, scope='class') + def check_env(self, backend): + backend.check_env() + + @pytest.mark.parametrize('k', [1, 3, 5]) + @pytest.mark.parametrize('dim', [1, 2, 3]) + @pytest.mark.parametrize('largest', [True, False]) + @pytest.mark.parametrize('sorted', [True, False]) + def test_topk(self, backend, k, dim, largest, sorted, tmp_path): + input = torch.rand(1, 8, 12, 17) + + def topk_function(inputs): + return torch.Tensor.topk(inputs, k, dim, largest, sorted) + + wrapped_model = WrapFunction(topk_function) + + # when the 'sorted' attribute is False, pytorch will return + # a hard to expect result, which only features that the topk + # number is right. So the Topk unittest only check whether the + # topk elements are right, all the possible order will be accepted. + with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): + if not sorted: + backend.run_and_validate( + wrapped_model, [input.float()], + 'topk' + f'_no_sorted_dim_{dim}', + input_names=['inputs'], + output_names=['data', 'index'], + save_dir=tmp_path) + else: + backend.run_and_validate( + wrapped_model, [input.float()], + 'topk', + input_names=['inputs'], + output_names=['data', 'index'], + save_dir=tmp_path) + + @pytest.mark.parametrize('dim, n, c, h, w', [(1, 1, 1, 1, 8), + (2, 1, 1, 5, 7), + (3, 1, 3, 10, 15)]) + def test_shape(self, backend, dim, n, c, h, w, tmp_path): + input_names = ['input'] + output_names = ['output'] + + orig_shape = (n, c, h, w)[-dim - 1:] + input = torch.rand(orig_shape) + + shape_node = make_node('Shape', input_names, output_names) + shape_graph = make_graph([shape_node], 'shape_graph', [ + make_tensor_value_info(input_names[0], onnx.TensorProto.FLOAT, + orig_shape) + ], [ + make_tensor_value_info(output_names[0], onnx.TensorProto.FLOAT, + (dim + 1, )) + ]) + shape_model = make_model(shape_graph) + + with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): + ncnn_model = backend.onnx2ncnn(shape_model, 'shape', output_names, + tmp_path) + + # ncnn mat has implicit batch for mat, the ncnn_output is a mat, + # so the ncnn_outputs has 2 dimensions, not 1. + model_outputs = [torch.tensor(orig_shape).unsqueeze(0).float()] + ncnn_outputs = ncnn_model(dict(zip(input_names, [input]))) + ncnn_outputs = [ncnn_outputs[name] for name in output_names] + assert_allclose(model_outputs, ncnn_outputs, False) + + @pytest.mark.parametrize('dim, n, c, h, w', [(1, 1, 1, 1, 8), + (2, 1, 1, 5, 7), + (3, 1, 3, 10, 15)]) + @pytest.mark.parametrize('val', [0., 1., -3, 4.25]) + def test_constantofshape(self, backend, dim, n, c, h, w, val, tmp_path): + input = torch.tensor((n, c, h, w)[-dim - 1:]).unsqueeze(0) + + input_names = ['input'] + output_names = ['output'] + constantofshape_node = make_node( + 'ConstantOfShape', input_names, output_names, value=float(val)) + constantofshape_graph = make_graph( + [constantofshape_node], 'constantofshape_graph', [ + make_tensor_value_info(input_names[0], onnx.TensorProto.FLOAT, + input.shape) + ], [ + make_tensor_value_info(output_names[0], onnx.TensorProto.FLOAT, + torch.Size(input[0])) + ]) + constantofshape_model = make_model(constantofshape_graph) + with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): + ncnn_model = backend.onnx2ncnn(constantofshape_model, + 'constantofshape', output_names, + str(tmp_path)) + + # ncnn mat has implicit batch for mat, the ncnn_output is a mat, + # so the ncnn_outputs has 2 dimensions, not 1. + model_outputs = [torch.fill_(torch.rand(tuple(input[0])), val)] + ncnn_outputs = ncnn_model(dict(zip(input_names, [input.float()]))) + ncnn_outputs = [ncnn_outputs[name] for name in output_names] + assert_allclose(model_outputs, ncnn_outputs, False) + + @pytest.mark.parametrize('axis, data_dims, indice_dims', [(0, 1, 1), + (0, 2, 1), + (1, 2, 1), + (0, 3, 1), + (1, 3, 1), + (2, 3, 1)]) + def test_gather(self, backend, axis, data_dims, indice_dims, tmp_path): + input_names = ['input', 'indices'] + output_names = ['output'] + data = torch.rand((8, 12, 17)[-data_dims:]).unsqueeze(0) + indice = torch.randint(0, 8, (3, 4, 5)[-indice_dims:]).unsqueeze(0) + + gather_node = make_node( + 'Gather', input_names, output_names, axis=axis + 1) + gather_graph = make_graph([gather_node], 'gather_graph', [ + make_tensor_value_info(input_names[0], onnx.TensorProto.FLOAT, + None), + make_tensor_value_info(input_names[1], onnx.TensorProto.INT64, + None) + ], [ + make_tensor_value_info(output_names[0], onnx.TensorProto.FLOAT, + None) + ]) + gather_model = make_model(gather_graph) + + with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): + ncnn_model = backend.onnx2ncnn(gather_model, 'gather', + output_names, str(tmp_path)) + + # ncnn mat has implicit batch for mat, the ncnn_output is a mat, + # so the ncnn_outputs has 2 dimensions, not 1. + import importlib + + if importlib.util.find_spec('onnxruntime') is None: + pytest.skip('onnxruntime not installed.') + + import numpy as np + import onnxruntime + session = onnxruntime.InferenceSession( + gather_model.SerializeToString(), + providers=['CPUExecutionProvider']) + model_outputs = session.run( + output_names, + dict( + zip(input_names, [ + np.array(data, dtype=np.float32), + np.array(indice[0], dtype=np.int64) + ]))) + model_outputs = [model_output for model_output in model_outputs] + + ncnn_outputs = ncnn_model( + dict(zip(input_names, [data.float(), indice.float()]))) + ncnn_outputs = [ncnn_outputs[name] for name in output_names] + assert_allclose(model_outputs, ncnn_outputs, False) + + @pytest.mark.parametrize('dim', [1, 2, 3]) + def test_tensorslice(self, backend, dim, tmp_path): + + input = torch.rand((8, 12, 17)[-dim:]).unsqueeze(0) + + def tensorslice_function(inputs): + if dim == 1: + return inputs[:, 2:17:7] + if dim == 2: + return inputs[:, 3:12:4, 2:15:3] + if dim == 3: + return inputs[:, 0:8:2, 2:12:4, 2:17:7] + + wrapped_model = WrapFunction(tensorslice_function) + + with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): + backend.run_and_validate( + wrapped_model, [input.float()], + 'tensorslice', + input_names=['inputs'], + output_names=['outputs'], + save_dir=str(tmp_path)) + + @pytest.mark.parametrize('input_dim, output_dim', [(1, 1), (1, 2), (1, 3), + (2, 2), (2, 3), (3, 3)]) + def test_expand(self, backend, input_dim, output_dim, tmp_path): + input = torch.rand((1, 12, 1)[-input_dim:]).unsqueeze(0) + target = torch.rand((8, 12, 17)[-output_dim:]).unsqueeze(0) + + def expand_function(input, target): + return input.expand_as(target) + + wrapped_model = WrapFunction(expand_function) + with RewriterContext(cfg={}, backend=backend.backend_name, opset=11): + backend.run_and_validate( + wrapped_model, [input.float(), target.float()], + 'expand', + input_names=['input', 'shape'], + output_names=['output'], + save_dir=str(tmp_path)) diff --git a/tests/test_ops/utils.py b/tests/test_ops/utils.py index 52e563a37..d052d180c 100644 --- a/tests/test_ops/utils.py +++ b/tests/test_ops/utils.py @@ -5,7 +5,6 @@ import tempfile import mmcv import onnx -import pytest import torch import mmdeploy.apis.tensorrt as trt_apis @@ -13,8 +12,8 @@ from mmdeploy.utils import Backend from mmdeploy.utils.test import assert_allclose, check_backend -@pytest.mark.skip(reason='This a not test class but a utility class.') class TestOnnxRTExporter: + __test__ = False def __init__(self): self.backend_name = 'onnxruntime' @@ -70,8 +69,8 @@ class TestOnnxRTExporter: assert_allclose(model_outputs, onnx_outputs, tolerate_small_mismatch) -@pytest.mark.skip(reason='This a not test class but a utility class.') class TestTensorRTExporter: + __test__ = False def __init__(self): self.backend_name = 'tensorrt' @@ -158,8 +157,8 @@ class TestTensorRTExporter: assert_allclose(model_outputs, trt_outputs, tolerate_small_mismatch) -@pytest.mark.skip(reason='This a not test class but a utility class.') class TestNCNNExporter: + __test__ = False def __init__(self): self.backend_name = 'ncnn' diff --git a/tests/test_pytorch/test_pytorch_functions.py b/tests/test_pytorch/test_pytorch_functions.py index 70ac507e4..6f55d1c62 100644 --- a/tests/test_pytorch/test_pytorch_functions.py +++ b/tests/test_pytorch/test_pytorch_functions.py @@ -12,11 +12,15 @@ from mmdeploy.utils import Backend from mmdeploy.utils.test import (WrapFunction, backend_checker, get_rewrite_outputs) -deploy_cfg_ncnn = mmcv.Config( - dict( - onnx_config=dict(input_shape=None), - backend_config=dict(type='ncnn', model_inputs=None, use_vulkan=False), - codebase_config=dict(type='mmdet', task='ObjectDetection'))) + +@pytest.fixture(scope='module') +def deploy_cfg_ncnn(): + return mmcv.Config( + dict( + onnx_config=dict(input_shape=None), + backend_config=dict( + type='ncnn', model_inputs=None, use_vulkan=False), + codebase_config=dict(type='mmdet', task='ObjectDetection'))) def get_trt_config(output_names, shape): @@ -40,7 +44,7 @@ def get_trt_config(output_names, shape): @backend_checker(Backend.NCNN) -def test_get_attribute(): +def test_get_attribute(deploy_cfg_ncnn): def model_func(tensor): x = tensor.size() @@ -60,7 +64,7 @@ def test_get_attribute(): @backend_checker(Backend.NCNN) -def test_group_norm_ncnn(): +def test_group_norm_ncnn(deploy_cfg_ncnn): input = torch.rand([1, 2, 2, 2]) weight = torch.rand([2]) bias = torch.rand([2]) @@ -80,7 +84,7 @@ def test_group_norm_ncnn(): @backend_checker(Backend.NCNN) -def test_chunk_ncnn(): +def test_chunk_ncnn(deploy_cfg_ncnn): input = torch.rand(1, 16, 16, 16) model_output = input.chunk(2, dim=1) @@ -102,7 +106,7 @@ def test_chunk_ncnn(): @backend_checker(Backend.NCNN) -def test_interpolate_static(): +def test_interpolate_static(deploy_cfg_ncnn): input = torch.rand([1, 2, 2, 2]) model_output = F.interpolate(input, scale_factor=[2, 2]) @@ -144,7 +148,7 @@ def test_interpolate__rknn(): @backend_checker(Backend.NCNN) -def test_linear_ncnn(): +def test_linear_ncnn(deploy_cfg_ncnn): input = torch.rand([1, 2, 2]) weight = torch.rand([2, 2]) bias = torch.rand([2]) @@ -189,7 +193,7 @@ def test_repeat_static(): @backend_checker(Backend.NCNN) -def test_size_of_tensor_static(): +def test_size_of_tensor_static(deploy_cfg_ncnn): def model_func(input): x = torch.Tensor.size(input) @@ -241,7 +245,7 @@ class TestTopk: @backend_checker(Backend.NCNN) @pytest.mark.parametrize('k', [1, 3, 4]) @pytest.mark.parametrize('dim', [1, 2, 3]) - def test_topk_ncnn(self, dim, k): + def test_topk_ncnn(self, dim, k, deploy_cfg_ncnn): model_output = torch.Tensor.topk(TestTopk.input, k, dim).values @@ -318,7 +322,7 @@ def test_triu_trt(shape, diagonal): 'input', [torch.rand(1, 16, 16), torch.rand(1, 3, 16, 16)]) @pytest.mark.parametrize('dim', [1, 2]) -def test_normalize_ncnn(input, dim): +def test_normalize_ncnn(input, dim, deploy_cfg_ncnn): import mmdeploy.apis.ncnn as ncnn_apis from mmdeploy.utils.test import get_onnx_model diff --git a/tests/test_pytorch/test_pytorch_ops.py b/tests/test_pytorch/test_pytorch_ops.py index 241247241..d2faf1e5e 100644 --- a/tests/test_pytorch/test_pytorch_ops.py +++ b/tests/test_pytorch/test_pytorch_ops.py @@ -13,39 +13,31 @@ onnx_file = tempfile.NamedTemporaryFile(suffix='onnx').name @pytest.fixture(autouse=False, scope='function') def prepare_symbolics(): - context = RewriterContext( - Config( - dict( - onnx_config=dict( - type='onnx', - export_params=True, - keep_initializers_as_inputs=False, - opset_version=11, - save_file='end2end.onnx', - input_names=['input'], - output_names=['output'], - input_shape=None), - backend_config=dict(type='tensorrt'))), - 'tensorrt', - opset=11) - context.enter() - - yield - - context.exit() + with RewriterContext( + Config( + dict( + onnx_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + save_file='end2end.onnx', + input_names=['input'], + output_names=['output'], + input_shape=None), + backend_config=dict(type='tensorrt'))), + 'tensorrt', + opset=11): + yield @pytest.fixture(autouse=False, scope='function') def prepare_symbolics_ncnn(): - context = RewriterContext( - Config({'backend_config': { - 'type': 'ncnn' - }}), 'ncnn', opset=11) - context.enter() - - yield - - context.exit() + with RewriterContext( + Config({'backend_config': { + 'type': 'ncnn' + }}), 'ncnn', opset=11): + yield class OpModel(torch.nn.Module): @@ -116,7 +108,6 @@ def test_instance_norm(): class TestLinear: def check(self, nodes): - print(nodes) exist = False for node in nodes: if node.op_type in ['Gemm', 'MatMul']: diff --git a/tests/test_utils/test_util.py b/tests/test_utils/test_util.py index b572b8566..58807d40f 100644 --- a/tests/test_utils/test_util.py +++ b/tests/test_utils/test_util.py @@ -417,15 +417,15 @@ def test_AdvancedEnum(): @pytest.mark.skipif( not importlib.util.find_spec('mmedit'), reason='requires mmedit') -def test_export_info(): - with tempfile.TemporaryDirectory() as dir: - export2SDK(correct_deploy_cfg, correct_model_cfg, dir, '', 'cpu') - deploy_json = os.path.join(dir, 'deploy.json') - pipeline_json = os.path.join(dir, 'pipeline.json') - detail_json = os.path.join(dir, 'detail.json') - assert os.path.exists(pipeline_json) - assert os.path.exists(detail_json) - assert os.path.exists(deploy_json) +def test_export_info(tmp_path): + dir = str(tmp_path) + export2SDK(correct_deploy_cfg, correct_model_cfg, dir, '', 'cpu') + deploy_json = os.path.join(dir, 'deploy.json') + pipeline_json = os.path.join(dir, 'pipeline.json') + detail_json = os.path.join(dir, 'detail.json') + assert os.path.exists(pipeline_json) + assert os.path.exists(detail_json) + assert os.path.exists(deploy_json) def wrap_target():