diff --git a/configs/mmrotate/rotated-detection_onnxruntime_static.py b/configs/mmrotate/rotated-detection_onnxruntime_static.py index 662608bbe..924b204dd 100644 --- a/configs/mmrotate/rotated-detection_onnxruntime_static.py +++ b/configs/mmrotate/rotated-detection_onnxruntime_static.py @@ -1,3 +1,3 @@ _base_ = ['./rotated-detection_static.py', '../_base_/backends/onnxruntime.py'] -onnx_config = dict(output_names=['dets', 'labels'], input_shape=None) +onnx_config = dict(output_names=['dets', 'labels'], input_shape=[1024, 1024]) diff --git a/mmdeploy/codebase/mmrotate/deploy/rotated_detection.py b/mmdeploy/codebase/mmrotate/deploy/rotated_detection.py index 94b6da3f9..757f01947 100644 --- a/mmdeploy/codebase/mmrotate/deploy/rotated_detection.py +++ b/mmdeploy/codebase/mmrotate/deploy/rotated_detection.py @@ -85,12 +85,12 @@ def process_model_config(model_cfg: Config, cfg.test_pipeline[0].type = 'mmdet.LoadImageFromNDArray' pipeline = cfg.test_pipeline - - for i, transform in enumerate(pipeline): - # for static exporting - if input_shape is not None and transform.type == 'Resize': - pipeline[i].keep_ratio = False - pipeline[i].scale = tuple(input_shape) + # for static exporting + if input_shape is not None: + for i, transform in enumerate(pipeline): + if transform.type in ['Resize', 'mmdet.Resize']: + pipeline[i].keep_ratio = False + pipeline[i].scale = tuple(input_shape) pipeline = [ transform for transform in pipeline @@ -209,15 +209,12 @@ class RotatedDetection(BaseTask): cfg = process_model_config(self.model_cfg, imgs, input_shape) pipeline = cfg.test_pipeline + # for static exporting if not dynamic_flag: - transform = pipeline[1] - if 'transforms' in transform: - transform_list = transform['transforms'] - for i, step in enumerate(transform_list): - if step['type'] == 'Pad' and 'pad_to_square' in step \ - and step['pad_to_square']: - transform_list.pop(i) - break + for i, trans in enumerate(pipeline): + if trans['type'] == 'Pad' and 'pad_to_square' in trans \ + and trans['pad_to_square']: + trans.pop(i) test_pipeline = Compose(pipeline) data = [] @@ -261,6 +258,7 @@ class RotatedDetection(BaseTask): input_shape = get_input_shape(self.deploy_cfg) model_cfg = process_model_config(self.model_cfg, [''], input_shape) pipeline = model_cfg.test_pipeline + pipeline = replace_RResize(pipeline) meta_keys = [ 'filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'flip_direction', 'img_norm_cfg', diff --git a/tests/regression/mmpose.yml b/tests/regression/mmpose.yml index c0d32d390..6e93db1c7 100644 --- a/tests/regression/mmpose.yml +++ b/tests/regression/mmpose.yml @@ -51,7 +51,7 @@ openvino: deploy_config: configs/mmpose/pose-detection_openvino_static-256x192.py pipeline_openvino_static_fp32_256x256: &pipeline_openvino_static_fp32_256x256 convert_image: *convert_image - backend_test: *default_backend_test + backend_test: False deploy_config: configs/mmpose/pose-detection_openvino_static-256x256.py ncnn: @@ -74,7 +74,6 @@ torchscript: pipeline_ts_static_fp32: &pipeline_ts_fp32 convert_image: *convert_image backend_test: *default_backend_test - sdk_config: *sdk_static deploy_config: configs/mmpose/pose-detection_torchscript.py models: diff --git a/tools/regression_test.py b/tools/regression_test.py index 69c53259c..f47a6dce6 100644 --- a/tools/regression_test.py +++ b/tools/regression_test.py @@ -332,7 +332,7 @@ def get_pytorch_result(model_name: str, meta_info: dict, checkpoint_path: Path, } # get pytorch fps value - fps_info = model_info.get('Metadata').get('inference time (ms/im)') + fps_info = model_info.get('Metadata', {}).get('inference time (ms/im)') if fps_info is None: fps = '-' elif isinstance(fps_info, list):