From 10c4ef4203887449c9936cd24f30bd402cd941cf Mon Sep 17 00:00:00 2001 From: Yifan Zhou Date: Fri, 15 Oct 2021 10:25:48 +0800 Subject: [PATCH] [Unittest]: MMEdit unittest (#124) * add mmedit test * Solve unittest bug * lint * Remove debug code * add data * Refine code * Fix ci * Fix ci * follow changes in mmdet test * try to remove pytest skip * remove redundant code * rename * Fix type hint * Fix lint * Refine SwitchBackendWrapper. Fix type hint * update docstring --- mmdeploy/apis/utils.py | 6 +- mmdeploy/mmedit/export/prepare_input.py | 79 +++-- mmdeploy/utils/test.py | 40 ++- tests/test_apis/test_calibration.py | 28 +- tests/test_mmdet/test_mmdet_apis.py | 394 ++++++++++++------------ tests/test_mmedit/data/imgs/blank.jpg | Bin 0 -> 691 bytes tests/test_mmedit/data/model.py | 110 +++++++ tests/test_mmedit/test_mmedit_apis.py | 219 +++++++++++++ tests/test_mmedit/test_mmedit_export.py | 97 ++++++ tests/test_mmedit/test_mmedit_models.py | 72 +++++ tests/test_ops/utils.py | 2 +- 11 files changed, 793 insertions(+), 254 deletions(-) create mode 100644 tests/test_mmedit/data/imgs/blank.jpg create mode 100644 tests/test_mmedit/data/model.py create mode 100644 tests/test_mmedit/test_mmedit_apis.py create mode 100644 tests/test_mmedit/test_mmedit_export.py create mode 100644 tests/test_mmedit/test_mmedit_models.py diff --git a/mmdeploy/apis/utils.py b/mmdeploy/apis/utils.py index 540c0b144..09d4618d4 100644 --- a/mmdeploy/apis/utils.py +++ b/mmdeploy/apis/utils.py @@ -57,7 +57,7 @@ def init_pytorch_model(codebase: Codebase, def create_input(codebase: Codebase, task: Task, model_cfg: Union[str, mmcv.Config], - imgs: Any, + imgs: Union[str, np.ndarray], input_shape: Sequence[int] = None, device: str = 'cuda:0', **kwargs): @@ -68,8 +68,8 @@ def create_input(codebase: Codebase, task (Task): Specifying task type. model_cfg (str | mmcv.Config): Model config file or loaded Config object. - imgs (Any): Input image(s), accpeted data type are `str`, - `np.ndarray`, `torch.Tensor`. + imgs (str | np.ndarray): Input image(s), accpeted data types are `str`, + `np.ndarray`. input_shape (list[int]): Input shape of image in (width, height) format, defaults to `None`. device (str): A string specifying device type, defaults to 'cuda:0'. diff --git a/mmdeploy/mmedit/export/prepare_input.py b/mmdeploy/mmedit/export/prepare_input.py index 99dfff9dc..c7293eca6 100644 --- a/mmdeploy/mmedit/export/prepare_input.py +++ b/mmdeploy/mmedit/export/prepare_input.py @@ -11,32 +11,65 @@ from torch.utils.data.dataset import Dataset from mmdeploy.utils import Task, load_config -def _preprocess_cfg(config: Union[str, mmcv.Config]): +def _preprocess_cfg(config: Union[str, mmcv.Config], task: Task, + load_from_file: bool, is_static_cfg: bool, + input_shape: Sequence[int]): """Remove unnecessary information in config. Args: model_cfg (str | mmcv.Config): The input model config. + task (Task): Specifying editing task type. + load_from_file (bool): Whether the input is a filename of a numpy + matrix. If this variable is True, extra preprocessing is required. + is_static_cfg (bool): Whether the config specifys a static export. + If this variable if True, the input image will be resize to a fix + resolution. + input_shape (Sequence[int]): A list of two integer in (width, height) + format specifying input shape. Defaults to `None`. """ # TODO: Differentiate the editing tasks (e.g. restorers and mattors # preprocess the data in differenet ways) - keys_to_remove = ['gt', 'gt_path'] + if task == Task.SUPER_RESOLUTION: + keys_to_remove = ['gt', 'gt_path'] + else: + raise NotImplementedError(f'Unknown task type: {task.value}') + + # MMEdit doesn't support LoadImageFromWebcam. + # Remove "LoadImageFromFile" and related metakeys. + if not load_from_file: + config.test_pipeline.pop(0) + if task == Task.SUPER_RESOLUTION: + keys_to_remove.append('lq_path') + + # Fix the input shape by 'Resize' + if is_static_cfg: + if task == Task.SUPER_RESOLUTION: + resize = { + 'type': 'Resize', + 'scale': (input_shape[0], input_shape[1]), + 'keys': ['lq'] + } + config.test_pipeline.insert(1, resize) + for key in keys_to_remove: for pipeline in list(config.test_pipeline): if 'key' in pipeline and key == pipeline['key']: config.test_pipeline.remove(pipeline) - if 'keys' in pipeline and key in pipeline['keys']: - pipeline['keys'].remove(key) + if 'keys' in pipeline: + while key in pipeline['keys']: + pipeline['keys'].remove(key) if len(pipeline['keys']) == 0: config.test_pipeline.remove(pipeline) - if 'meta_keys' in pipeline and key in pipeline['meta_keys']: - pipeline['meta_keys'].remove(key) + if 'meta_keys' in pipeline: + while key in pipeline['meta_keys']: + pipeline['meta_keys'].remove(key) def create_input(task: Task, model_cfg: Union[str, mmcv.Config], - imgs: Union[str, mmcv.Config], + imgs: Union[str, np.ndarray], input_shape: Optional[Sequence[int]] = None, device: Optional[str] = 'cuda:0'): """Create input for editing processor. @@ -61,38 +94,30 @@ def create_input(task: Task, raise AssertionError('imgs must be strings or numpy arrays') cfg = load_config(model_cfg)[0].copy() - _preprocess_cfg(cfg) - if isinstance(imgs[0], np.ndarray): - cfg = cfg.copy() - # set loading pipeline type - cfg.test_pipeline[0].type = 'LoadImageFromWebcam' - - # for static exporting - if input_shape is not None: - if task == Task.SUPER_RESOLUTION: - resize = { - 'type': 'Resize', - 'scale': (input_shape[0], input_shape[1]), - 'keys': ['lq'] - } - cfg.test_pipeline.insert(1, resize) - else: - raise NotImplementedError(f'Unknown task type: {task.value}') + _preprocess_cfg( + cfg, + task=task, + load_from_file=isinstance(imgs[0], str), + is_static_cfg=input_shape is not None, + input_shape=input_shape) test_pipeline = Compose(cfg.test_pipeline) data_arr = [] for img in imgs: - # TODO: This is only for restore. Add condiction statement - data = dict(lq_path=img) + # TODO: This is only for restore. Add condiction statement. + if isinstance(img, np.ndarray): + data = dict(lq=img) + else: + data = dict(lq_path=img) data = test_pipeline(data) data_arr.append(data) data = collate(data_arr, samples_per_gpu=len(imgs)) - # TODO: This is only for restore. Add condiction statement + # TODO: This is only for restore. Add condiction statement. data['img'] = data['lq'] if device != 'cpu': diff --git a/mmdeploy/utils/test.py b/mmdeploy/utils/test.py index 049b1e3e6..ce51c6422 100644 --- a/mmdeploy/utils/test.py +++ b/mmdeploy/utils/test.py @@ -59,13 +59,14 @@ class WrapModel(nn.Module): class SwitchBackendWrapper: """A switcher for backend wrapper for unit tests. - Examples: >>> from mmdeploy.utils.test import SwitchBackendWrapper >>> from mmdeploy.apis.onnxruntime.onnxruntime_utils import ORTWrapper - >>> SwitchBackendWrapper.set(ORTWrapper, outputs=outputs) + >>> with SwitchBackendWrapper(ORTWrapper) as wrapper: + >>> wrapper.set(ORTWrapper, outputs=outputs) + >>> ... + >>> # ORTWrapper will recover when exiting context >>> ... - >>> SwitchBackendWrapper.recover(ORTWrapper) """ init = None forward = None @@ -83,26 +84,35 @@ class SwitchBackendWrapper: def __call__(self, *args, **kwds): return self.forward(*args, **kwds) - @staticmethod - def set(obj, **kwargs): + def __init__(self, recover_class): + self._recover_class = recover_class + + def __enter__(self): + return self + + def __exit__(self, type, value, trace): + self.recover() + + def set(self, **kwargs): """Replace attributes in backend wrappers with dummy items.""" - SwitchBackendWrapper.init = obj.__init__ - SwitchBackendWrapper.forward = obj.forward - SwitchBackendWrapper.call = obj.__call__ + obj = self._recover_class + self.init = obj.__init__ + self.forward = obj.forward + self.call = obj.__call__ obj.__init__ = SwitchBackendWrapper.BackendWrapper.__init__ obj.forward = SwitchBackendWrapper.BackendWrapper.forward obj.__call__ = SwitchBackendWrapper.BackendWrapper.__call__ for k, v in kwargs.items(): setattr(obj, k, v) - @staticmethod - def recover(obj): - assert SwitchBackendWrapper.init is not None and \ - SwitchBackendWrapper.forward is not None,\ + def recover(self): + assert self.init is not None and \ + self.forward is not None,\ 'recover method must be called after exchange' - obj.__init__ = SwitchBackendWrapper.init - obj.forward = SwitchBackendWrapper.forward - obj.__call__ = SwitchBackendWrapper.call + obj = self._recover_class + obj.__init__ = self.init + obj.forward = self.forward + obj.__call__ = self.call def assert_allclose(expected: List[Union[torch.Tensor, np.ndarray]], diff --git a/tests/test_apis/test_calibration.py b/tests/test_apis/test_calibration.py index 26b1594a9..17f6f3689 100644 --- a/tests/test_apis/test_calibration.py +++ b/tests/test_apis/test_calibration.py @@ -1,5 +1,6 @@ import os.path as osp import tempfile +from multiprocessing import Process import h5py import mmcv @@ -7,7 +8,6 @@ import mmcv from mmdeploy.apis import create_calib_table calib_file = tempfile.NamedTemporaryFile(suffix='.h5').name -data_prefix = 'tests/data/tiger' ann_file = 'tests/data/annotation.json' @@ -71,7 +71,7 @@ def get_model_cfg(): dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', - img_scale=(1333, 800), + img_scale=(1, 1), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), @@ -169,7 +169,7 @@ def get_model_cfg(): return model_cfg -def test_create_calib_end2end(): +def run_test_create_calib_end2end(): model_cfg = get_model_cfg() deploy_cfg = get_end2end_deploy_cfg() create_calib_table( @@ -189,7 +189,19 @@ def test_create_calib_end2end(): assert calibrator['calib_data']['end2end']['input']['0'] is not None -def test_create_calib_parittion(): +# Because Faster-RCNN needs too much memory on GPU, we need to run tests in a +# new process. + + +def test_create_calib_end2end(): + p = Process(target=run_test_create_calib_end2end) + try: + p.start() + finally: + p.join() + + +def run_test_create_calib_parittion(): model_cfg = get_model_cfg() deploy_cfg = get_partition_deploy_cfg() create_calib_table( @@ -211,3 +223,11 @@ def test_create_calib_parittion(): assert calib_data[partition_name] is not None assert calib_data[partition_name][input_names[i]] is not None assert calib_data[partition_name][input_names[i]]['0'] is not None + + +def test_create_calib_parittion(): + p = Process(target=run_test_create_calib_parittion) + try: + p.start() + finally: + p.join() diff --git a/tests/test_mmdet/test_mmdet_apis.py b/tests/test_mmdet/test_mmdet_apis.py index 03079291c..cc4770df5 100644 --- a/tests/test_mmdet/test_mmdet_apis.py +++ b/tests/test_mmdet/test_mmdet_apis.py @@ -26,20 +26,21 @@ def test_TensorRTDetector(): 'dets': torch.rand(1, 100, 5).cuda(), 'labels': torch.rand(1, 100).cuda() } - SwitchBackendWrapper.set(TRTWrapper, outputs=outputs) + with SwitchBackendWrapper(TRTWrapper) as wrapper: + wrapper.set(outputs=outputs) - from mmdeploy.mmdet.apis.inference import TensorRTDetector - trt_detector = TensorRTDetector('', ['' for i in range(80)], 0) - imgs = [torch.rand(1, 3, 64, 64).cuda()] - img_metas = [[{ - 'ori_shape': [64, 64, 3], - 'img_shape': [64, 64, 3], - 'scale_factor': [2.09, 1.87, 2.09, 1.87], - }]] + from mmdeploy.mmdet.apis.inference import TensorRTDetector + trt_detector = TensorRTDetector('', ['' for i in range(80)], 0) + imgs = [torch.rand(1, 3, 64, 64).cuda()] + img_metas = [[{ + 'ori_shape': [64, 64, 3], + 'img_shape': [64, 64, 3], + 'scale_factor': [2.09, 1.87, 2.09, 1.87], + }]] - results = trt_detector.forward(imgs, img_metas) - assert results is not None, 'failed to get output using TensorRTDetector' - SwitchBackendWrapper.recover(TRTWrapper) + results = trt_detector.forward(imgs, img_metas) + assert results is not None, ('failed to get output using ' + 'TensorRTDetector') @pytest.mark.skipif( @@ -52,21 +53,21 @@ def test_ONNXRuntimeDetector(): # simplify backend inference outputs = (torch.rand(1, 100, 5), torch.rand(1, 100)) - SwitchBackendWrapper.set(ORTWrapper, outputs=outputs) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + wrapper.set(outputs=outputs) - from mmdeploy.mmdet.apis.inference import ONNXRuntimeDetector - ort_detector = ONNXRuntimeDetector('', ['' for i in range(80)], 0) - imgs = [torch.rand(1, 3, 64, 64)] - img_metas = [[{ - 'ori_shape': [64, 64, 3], - 'img_shape': [64, 64, 3], - 'scale_factor': [2.09, 1.87, 2.09, 1.87], - }]] + from mmdeploy.mmdet.apis.inference import ONNXRuntimeDetector + ort_detector = ONNXRuntimeDetector('', ['' for i in range(80)], 0) + imgs = [torch.rand(1, 3, 64, 64)] + img_metas = [[{ + 'ori_shape': [64, 64, 3], + 'img_shape': [64, 64, 3], + 'scale_factor': [2.09, 1.87, 2.09, 1.87], + }]] - results = ort_detector.forward(imgs, img_metas) - assert results is not None, 'failed to get output using '\ - 'ONNXRuntimeDetector' - SwitchBackendWrapper.recover(ORTWrapper) + results = ort_detector.forward(imgs, img_metas) + assert results is not None, 'failed to get output using '\ + 'ONNXRuntimeDetector' @pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda') @@ -80,20 +81,20 @@ def test_PPLDetector(): # simplify backend inference outputs = (torch.rand(1, 100, 5), torch.rand(1, 100)) - SwitchBackendWrapper.set(PPLWrapper, outputs=outputs) + with SwitchBackendWrapper(PPLWrapper) as wrapper: + wrapper.set(outputs=outputs) - from mmdeploy.mmdet.apis.inference import PPLDetector - ppl_detector = PPLDetector('', ['' for i in range(80)], 0) - imgs = [torch.rand(1, 3, 64, 64)] - img_metas = [[{ - 'ori_shape': [64, 64, 3], - 'img_shape': [64, 64, 3], - 'scale_factor': [2.09, 1.87, 2.09, 1.87], - }]] + from mmdeploy.mmdet.apis.inference import PPLDetector + ppl_detector = PPLDetector('', ['' for i in range(80)], 0) + imgs = [torch.rand(1, 3, 64, 64)] + img_metas = [[{ + 'ori_shape': [64, 64, 3], + 'img_shape': [64, 64, 3], + 'scale_factor': [2.09, 1.87, 2.09, 1.87], + }]] - results = ppl_detector.forward(imgs, img_metas) - assert results is not None, 'failed to get output using PPLDetector' - SwitchBackendWrapper.recover(PPLWrapper) + results = ppl_detector.forward(imgs, img_metas) + assert results is not None, 'failed to get output using PPLDetector' def get_test_cfg_and_post_processing(): @@ -155,28 +156,26 @@ def test_NCNNPSSDetector(): 'scores': torch.rand(1, 120, 80), 'boxes': torch.rand(1, 120, 4) } - SwitchBackendWrapper.set( - NCNNWrapper, - outputs=outputs, - model_cfg=model_cfg, - deploy_cfg=deploy_cfg) + with SwitchBackendWrapper(NCNNWrapper) as wrapper: + wrapper.set( + outputs=outputs, model_cfg=model_cfg, deploy_cfg=deploy_cfg) - from mmdeploy.mmdet.apis.inference import NCNNPSSDetector + from mmdeploy.mmdet.apis.inference import NCNNPSSDetector - ncnn_pss_detector = NCNNPSSDetector(['', ''], ['' for i in range(80)], - model_cfg=model_cfg, - deploy_cfg=deploy_cfg, - device_id=0) - imgs = [torch.rand(1, 3, 32, 32)] - img_metas = [[{ - 'ori_shape': [32, 32, 3], - 'img_shape': [32, 32, 3], - 'scale_factor': [2.09, 1.87, 2.09, 1.87], - }]] + ncnn_pss_detector = NCNNPSSDetector(['', ''], ['' for i in range(80)], + model_cfg=model_cfg, + deploy_cfg=deploy_cfg, + device_id=0) + imgs = [torch.rand(1, 3, 32, 32)] + img_metas = [[{ + 'ori_shape': [32, 32, 3], + 'img_shape': [32, 32, 3], + 'scale_factor': [2.09, 1.87, 2.09, 1.87], + }]] - results = ncnn_pss_detector.forward(imgs, img_metas) - assert results is not None, 'failed to get output using NCNNPSSDetector' - SwitchBackendWrapper.recover(NCNNWrapper) + results = ncnn_pss_detector.forward(imgs, img_metas) + assert results is not None, ('failed to get output using ' + 'NCNNPSSDetector') @pytest.mark.skipif( @@ -197,30 +196,27 @@ def test_ONNXRuntimePSSDetector(): np.random.rand(1, 120, 80).astype(np.float32), np.random.rand(1, 120, 4).astype(np.float32) ] - SwitchBackendWrapper.set( - ORTWrapper, - outputs=outputs, - model_cfg=model_cfg, - deploy_cfg=deploy_cfg) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + wrapper.set( + outputs=outputs, model_cfg=model_cfg, deploy_cfg=deploy_cfg) - from mmdeploy.mmdet.apis.inference import ONNXRuntimePSSDetector + from mmdeploy.mmdet.apis.inference import ONNXRuntimePSSDetector - ort_pss_detector = ONNXRuntimePSSDetector( - '', ['' for i in range(80)], - model_cfg=model_cfg, - deploy_cfg=deploy_cfg, - device_id=0) - imgs = [torch.rand(1, 3, 32, 32)] - img_metas = [[{ - 'ori_shape': [32, 32, 3], - 'img_shape': [32, 32, 3], - 'scale_factor': [2.09, 1.87, 2.09, 1.87], - }]] + ort_pss_detector = ONNXRuntimePSSDetector( + '', ['' for i in range(80)], + model_cfg=model_cfg, + deploy_cfg=deploy_cfg, + device_id=0) + imgs = [torch.rand(1, 3, 32, 32)] + img_metas = [[{ + 'ori_shape': [32, 32, 3], + 'img_shape': [32, 32, 3], + 'scale_factor': [2.09, 1.87, 2.09, 1.87], + }]] - results = ort_pss_detector.forward(imgs, img_metas) - assert results is not None, 'failed to get output using ' - 'ONNXRuntimePSSDetector' - SwitchBackendWrapper.recover(ORTWrapper) + results = ort_pss_detector.forward(imgs, img_metas) + assert results is not None, 'failed to get output using ' + 'ONNXRuntimePSSDetector' @pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda') @@ -242,30 +238,27 @@ def test_TensorRTPSSDetector(): 'scores': torch.rand(1, 120, 80).cuda(), 'boxes': torch.rand(1, 120, 4).cuda() } - SwitchBackendWrapper.set( - TRTWrapper, - outputs=outputs, - model_cfg=model_cfg, - deploy_cfg=deploy_cfg) + with SwitchBackendWrapper(TRTWrapper) as wrapper: + wrapper.set( + outputs=outputs, model_cfg=model_cfg, deploy_cfg=deploy_cfg) - from mmdeploy.mmdet.apis.inference import TensorRTPSSDetector + from mmdeploy.mmdet.apis.inference import TensorRTPSSDetector - trt_pss_detector = TensorRTPSSDetector( - '', ['' for i in range(80)], - model_cfg=model_cfg, - deploy_cfg=deploy_cfg, - device_id=0) - imgs = [torch.rand(1, 3, 32, 32).cuda()] - img_metas = [[{ - 'ori_shape': [32, 32, 3], - 'img_shape': [32, 32, 3], - 'scale_factor': [2.09, 1.87, 2.09, 1.87], - }]] + trt_pss_detector = TensorRTPSSDetector( + '', ['' for i in range(80)], + model_cfg=model_cfg, + deploy_cfg=deploy_cfg, + device_id=0) + imgs = [torch.rand(1, 3, 32, 32).cuda()] + img_metas = [[{ + 'ori_shape': [32, 32, 3], + 'img_shape': [32, 32, 3], + 'scale_factor': [2.09, 1.87, 2.09, 1.87], + }]] - results = trt_pss_detector.forward(imgs, img_metas) - assert results is not None, 'failed to get output using ' - 'TensorRTPSSDetector' - SwitchBackendWrapper.recover(TRTWrapper) + results = trt_pss_detector.forward(imgs, img_metas) + assert results is not None, 'failed to get output using ' + 'TensorRTPSSDetector' def prepare_model_deploy_cfgs(): @@ -377,41 +370,41 @@ def test_TensorRTPTSDetector(): 'cls_score': torch.rand(1, 12, 80).cuda(), 'bbox_pred': torch.rand(1, 12, 4).cuda() } - SwitchBackendWrapper.set(TRTWrapper, outputs=outputs) - TRTWrapper.model_cfg = model_cfg - TRTWrapper.deploy_cfg = deploy_cfg + with SwitchBackendWrapper(TRTWrapper) as wrapper: + wrapper.set( + outputs=outputs, model_cfg=model_cfg, deploy_cfg=deploy_cfg) - # replace original function in PartitionTwoStageDetector - from mmdeploy.mmdet.apis.inference import PartitionTwoStageDetector - PartitionTwoStageDetector.__init__ = DummyPTSDetector.__init__ - PartitionTwoStageDetector.partition0_postprocess = \ - DummyPTSDetector.partition0_postprocess - PartitionTwoStageDetector.partition1_postprocess = \ - DummyPTSDetector.partition1_postprocess - PartitionTwoStageDetector.outputs0 = [torch.rand(2, 3).cuda()] * 2 - PartitionTwoStageDetector.outputs1 = [ - torch.rand(1, 9, 5).cuda(), - torch.rand(1, 9).cuda() - ] - PartitionTwoStageDetector.device_id = 0 - PartitionTwoStageDetector.CLASSES = ['' for i in range(80)] + # replace original function in PartitionTwoStageDetector + from mmdeploy.mmdet.apis.inference import PartitionTwoStageDetector + PartitionTwoStageDetector.__init__ = DummyPTSDetector.__init__ + PartitionTwoStageDetector.partition0_postprocess = \ + DummyPTSDetector.partition0_postprocess + PartitionTwoStageDetector.partition1_postprocess = \ + DummyPTSDetector.partition1_postprocess + PartitionTwoStageDetector.outputs0 = [torch.rand(2, 3).cuda()] * 2 + PartitionTwoStageDetector.outputs1 = [ + torch.rand(1, 9, 5).cuda(), + torch.rand(1, 9).cuda() + ] + PartitionTwoStageDetector.device_id = 0 + PartitionTwoStageDetector.CLASSES = ['' for i in range(80)] - from mmdeploy.mmdet.apis.inference import TensorRTPTSDetector - trt_pts_detector = TensorRTPTSDetector(['', ''], ['' for i in range(80)], - model_cfg=model_cfg, - deploy_cfg=deploy_cfg, - device_id=0) + from mmdeploy.mmdet.apis.inference import TensorRTPTSDetector + trt_pts_detector = TensorRTPTSDetector(['', ''], + ['' for i in range(80)], + model_cfg=model_cfg, + deploy_cfg=deploy_cfg, + device_id=0) - imgs = [torch.rand(1, 3, 32, 32).cuda()] - img_metas = [[{ - 'ori_shape': [32, 32, 3], - 'img_shape': [32, 32, 3], - 'scale_factor': [2.09, 1.87, 2.09, 1.87], - }]] - results = trt_pts_detector.forward(imgs, img_metas) - assert results is not None, 'failed to get output using ' - 'TensorRTPTSDetector' - SwitchBackendWrapper.recover(TRTWrapper) + imgs = [torch.rand(1, 3, 32, 32).cuda()] + img_metas = [[{ + 'ori_shape': [32, 32, 3], + 'img_shape': [32, 32, 3], + 'scale_factor': [2.09, 1.87, 2.09, 1.87], + }]] + results = trt_pts_detector.forward(imgs, img_metas) + assert results is not None, 'failed to get output using ' + 'TensorRTPTSDetector' @pytest.mark.skipif( @@ -429,43 +422,40 @@ def test_ONNXRuntimePTSDetector(): np.random.rand(1, 12, 80).astype(np.float32), np.random.rand(1, 12, 4).astype(np.float32), ] * 2 - SwitchBackendWrapper.set( - ORTWrapper, - outputs=outputs, - model_cfg=model_cfg, - deploy_cfg=deploy_cfg) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + wrapper.set( + outputs=outputs, model_cfg=model_cfg, deploy_cfg=deploy_cfg) - # replace original function in PartitionTwoStageDetector - from mmdeploy.mmdet.apis.inference import PartitionTwoStageDetector - PartitionTwoStageDetector.__init__ = DummyPTSDetector.__init__ - PartitionTwoStageDetector.partition0_postprocess = \ - DummyPTSDetector.partition0_postprocess - PartitionTwoStageDetector.partition1_postprocess = \ - DummyPTSDetector.partition1_postprocess - PartitionTwoStageDetector.outputs0 = [torch.rand(2, 3)] * 2 - PartitionTwoStageDetector.outputs1 = [ - torch.rand(1, 9, 5), torch.rand(1, 9) - ] - PartitionTwoStageDetector.device_id = -1 - PartitionTwoStageDetector.CLASSES = ['' for i in range(80)] + # replace original function in PartitionTwoStageDetector + from mmdeploy.mmdet.apis.inference import PartitionTwoStageDetector + PartitionTwoStageDetector.__init__ = DummyPTSDetector.__init__ + PartitionTwoStageDetector.partition0_postprocess = \ + DummyPTSDetector.partition0_postprocess + PartitionTwoStageDetector.partition1_postprocess = \ + DummyPTSDetector.partition1_postprocess + PartitionTwoStageDetector.outputs0 = [torch.rand(2, 3)] * 2 + PartitionTwoStageDetector.outputs1 = [ + torch.rand(1, 9, 5), torch.rand(1, 9) + ] + PartitionTwoStageDetector.device_id = -1 + PartitionTwoStageDetector.CLASSES = ['' for i in range(80)] - from mmdeploy.mmdet.apis.inference import ONNXRuntimePTSDetector - ort_pts_detector = ONNXRuntimePTSDetector(['', ''], - ['' for i in range(80)], - model_cfg=model_cfg, - deploy_cfg=deploy_cfg, - device_id=0) + from mmdeploy.mmdet.apis.inference import ONNXRuntimePTSDetector + ort_pts_detector = ONNXRuntimePTSDetector(['', ''], + ['' for i in range(80)], + model_cfg=model_cfg, + deploy_cfg=deploy_cfg, + device_id=0) - imgs = [torch.rand(1, 3, 32, 32)] - img_metas = [[{ - 'ori_shape': [32, 32, 3], - 'img_shape': [32, 32, 3], - 'scale_factor': [2.09, 1.87, 2.09, 1.87], - }]] - results = ort_pts_detector.forward(imgs, img_metas) - assert results is not None, 'failed to get output using ' - 'ONNXRuntimePTSDetector' - SwitchBackendWrapper.recover(ORTWrapper) + imgs = [torch.rand(1, 3, 32, 32)] + img_metas = [[{ + 'ori_shape': [32, 32, 3], + 'img_shape': [32, 32, 3], + 'scale_factor': [2.09, 1.87, 2.09, 1.87], + }]] + results = ort_pts_detector.forward(imgs, img_metas) + assert results is not None, 'failed to get output using ' + 'ONNXRuntimePTSDetector' @pytest.mark.skipif( @@ -487,43 +477,40 @@ def test_NCNNPTSDetector(): 'cls_score': torch.rand(1, 12, 80), 'bbox_pred': torch.rand(1, 12, 4) } - SwitchBackendWrapper.set( - NCNNWrapper, - outputs=outputs, - model_cfg=model_cfg, - deploy_cfg=deploy_cfg) + with SwitchBackendWrapper(NCNNWrapper) as wrapper: + wrapper.set( + outputs=outputs, model_cfg=model_cfg, deploy_cfg=deploy_cfg) - # replace original function in PartitionTwoStageDetector - from mmdeploy.mmdet.apis.inference import PartitionTwoStageDetector - PartitionTwoStageDetector.__init__ = DummyPTSDetector.__init__ - PartitionTwoStageDetector.partition0_postprocess = \ - DummyPTSDetector.partition0_postprocess - PartitionTwoStageDetector.partition1_postprocess = \ - DummyPTSDetector.partition1_postprocess - PartitionTwoStageDetector.outputs0 = [torch.rand(2, 3)] * 2 - PartitionTwoStageDetector.outputs1 = [ - torch.rand(1, 9, 5), torch.rand(1, 9) - ] - PartitionTwoStageDetector.device_id = -1 - PartitionTwoStageDetector.CLASSES = ['' for i in range(80)] + # replace original function in PartitionTwoStageDetector + from mmdeploy.mmdet.apis.inference import PartitionTwoStageDetector + PartitionTwoStageDetector.__init__ = DummyPTSDetector.__init__ + PartitionTwoStageDetector.partition0_postprocess = \ + DummyPTSDetector.partition0_postprocess + PartitionTwoStageDetector.partition1_postprocess = \ + DummyPTSDetector.partition1_postprocess + PartitionTwoStageDetector.outputs0 = [torch.rand(2, 3)] * 2 + PartitionTwoStageDetector.outputs1 = [ + torch.rand(1, 9, 5), torch.rand(1, 9) + ] + PartitionTwoStageDetector.device_id = -1 + PartitionTwoStageDetector.CLASSES = ['' for i in range(80)] - from mmdeploy.mmdet.apis.inference import NCNNPTSDetector - ncnn_pts_detector = NCNNPTSDetector( - [''] * 4, [''] * 80, - model_cfg=model_cfg, - deploy_cfg=deploy_cfg, - device_id=0) + from mmdeploy.mmdet.apis.inference import NCNNPTSDetector + ncnn_pts_detector = NCNNPTSDetector( + [''] * 4, [''] * 80, + model_cfg=model_cfg, + deploy_cfg=deploy_cfg, + device_id=0) - imgs = [torch.rand(1, 3, 32, 32)] - img_metas = [[{ - 'ori_shape': [32, 32, 3], - 'img_shape': [32, 32, 3], - 'scale_factor': [2.09, 1.87, 2.09, 1.87], - }]] - results = ncnn_pts_detector.forward(imgs, img_metas) - assert results is not None, 'failed to get output using ' - 'NCNNPTSDetector' - SwitchBackendWrapper.recover(NCNNWrapper) + imgs = [torch.rand(1, 3, 32, 32)] + img_metas = [[{ + 'ori_shape': [32, 32, 3], + 'img_shape': [32, 32, 3], + 'scale_factor': [2.09, 1.87, 2.09, 1.87], + }]] + results = ncnn_pts_detector.forward(imgs, img_metas) + assert results is not None, 'failed to get output using ' + 'NCNNPTSDetector' @pytest.mark.skipif( @@ -541,9 +528,8 @@ def test_build_detector(): ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) # simplify backend inference - SwitchBackendWrapper.set( - ORTWrapper, model_cfg=model_cfg, deploy_cfg=deploy_cfg) - from mmdeploy.apis.utils import init_backend_model - detector = init_backend_model([''], model_cfg, deploy_cfg, -1) - assert detector is not None - SwitchBackendWrapper.recover(ORTWrapper) + with SwitchBackendWrapper(ORTWrapper) as wrapper: + wrapper.set(model_cfg=model_cfg, deploy_cfg=deploy_cfg) + from mmdeploy.apis.utils import init_backend_model + detector = init_backend_model([''], model_cfg, deploy_cfg, -1) + assert detector is not None diff --git a/tests/test_mmedit/data/imgs/blank.jpg b/tests/test_mmedit/data/imgs/blank.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ac446f47d90543976ba5a6a45740267b096f57d9 GIT binary patch literal 691 zcmex=U zW@aX!`+*(+s;&jfGq4D<3Mm>ovIz$!vMUve7&T5@$f4}C@t|nX#SbdRNkvVZTw>x9 zl2WQ_>Kd9_CZ=ZQ7M51dF0O9w9-dyoA)#U65s^{JDXD4c8JStdC8cHM6_r)ZEv;?s z9i3g1CQq3GGAU*RJ2V zdF$b$$4{OPfBE|D`;VW$K>lK6V1{@LNJ2b@<}X2@znFm0!om*n7b88f2KE_o9%~}Y iXK;@p{B?_ghnW!=dCY