[Fix] Fix unit test in dev1.x (#1790)

* fix erros in ut

* use nms_ratated ops instead of ext

* fix ut

* fix superresolution

* update super_resolution
pull/1850/head
q.yao 2023-03-08 14:32:50 +08:00 committed by GitHub
parent 84a289fd88
commit 91a0a9af0b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 158 additions and 183 deletions

View File

@ -140,7 +140,7 @@ label: 65, score: 0.95
- MMDet models. - MMDet models.
YOLOV3 & YOLOX: you may paste the following partition configuration into [detection_rknn_static-320x320.py](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_rknn_static-320x320.py): YOLOV3 & YOLOX: you may paste the following partition configuration into [detection_rknn_static-320x320.py](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_rknn-int8_static-320x320.py):
```python ```python
# yolov3, yolox for rknn-toolkit and rknn-toolkit2 # yolov3, yolox for rknn-toolkit and rknn-toolkit2
@ -172,7 +172,7 @@ label: 65, score: 0.95
]) ])
``` ```
RetinaNet & SSD & FSAF with rknn-toolkit2, you may paste the following partition configuration into [detection_rknn_static-320x320.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmdet/detection/detection_rknn_static-320x320.py). Users with rknn-toolkit can directly use default config. RetinaNet & SSD & FSAF with rknn-toolkit2, you may paste the following partition configuration into [detection_rknn_static-320x320.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/configs/mmdet/detection/detection_rknn-int8_static-320x320.py). Users with rknn-toolkit can directly use default config.
```python ```python
# retinanet, ssd for rknn-toolkit2 # retinanet, ssd for rknn-toolkit2

View File

@ -105,7 +105,7 @@ python tools/deploy.py \
- YOLOV3 & YOLOX - YOLOV3 & YOLOX
将下面的模型拆分配置写入到 [detection_rknn_static.py](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_rknn_static-320x320.py) 将下面的模型拆分配置写入到 [detection_rknn_static.py](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_rknn-int8_static-320x320.py)
```python ```python
# yolov3, yolox for rknn-toolkit and rknn-toolkit2 # yolov3, yolox for rknn-toolkit and rknn-toolkit2
@ -154,7 +154,7 @@ partition_config = dict(
- RetinaNet & SSD & FSAF with rknn-toolkit2 - RetinaNet & SSD & FSAF with rknn-toolkit2
将下面的模型拆分配置写入到 [detection_rknn_static.py](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_rknn_static-320x320.py)。使用 rknn-toolkit 的用户则不用。 将下面的模型拆分配置写入到 [detection_rknn_static.py](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_rknn-int8_static-320x320.py)。使用 rknn-toolkit 的用户则不用。
```python ```python
# retinanet, ssd and fsaf for rknn-toolkit2 # retinanet, ssd and fsaf for rknn-toolkit2

View File

@ -46,7 +46,7 @@ def detrhead__predict_by_feat__default(self,
bbox_preds = all_bbox_preds_list[-1][-1] bbox_preds = all_bbox_preds_list[-1][-1]
img_shape = batch_img_metas[0]['img_shape'] img_shape = batch_img_metas[0]['img_shape']
max_per_img = self.test_cfg.get('max_per_img', self.num_query) max_per_img = self.test_cfg.get('max_per_img', len(cls_scores[0]))
batch_size = cls_scores.size(0) batch_size = cls_scores.size(0)
# `batch_index_offset` is used for the gather of concatenated tensor # `batch_index_offset` is used for the gather of concatenated tensor

View File

@ -3,7 +3,7 @@ from typing import List, Optional, Sequence, Union
import mmengine import mmengine
import torch import torch
from mmedit.structures import EditDataSample, PixelData from mmedit.structures import EditDataSample
from mmengine import Config from mmengine import Config
from mmengine.model.base_model.data_preprocessor import BaseDataPreprocessor from mmengine.model.base_model.data_preprocessor import BaseDataPreprocessor
from mmengine.registry import Registry from mmengine.registry import Registry
@ -64,6 +64,35 @@ class End2EndModel(BaseBackendModel):
deploy_cfg=self.deploy_cfg, deploy_cfg=self.deploy_cfg,
**kwargs) **kwargs)
def convert_to_datasample(
self, predictions: EditDataSample, data_samples: EditDataSample,
inputs: Optional[torch.Tensor]) -> List[EditDataSample]:
"""Add predictions and destructed inputs (if passed) to data samples.
Args:
predictions (EditDataSample): The predictions of the model.
data_samples (EditDataSample): The data samples loaded from
dataloader.
inputs (Optional[torch.Tensor]): The input of model. Defaults to
None.
Returns:
List[EditDataSample]: Modified data samples.
"""
if inputs is not None:
destructed_input = self.data_preprocessor.destruct(
inputs, data_samples, 'img')
data_samples.set_tensor_data({'input': destructed_input})
# split to list of data samples
data_samples = data_samples.split()
predictions = predictions.split()
for data_sample, pred in zip(data_samples, predictions):
data_sample.output = pred
return data_samples
def forward(self, def forward(self,
inputs: torch.Tensor, inputs: torch.Tensor,
data_samples: Optional[List[BaseDataElement]] = None, data_samples: Optional[List[BaseDataElement]] = None,
@ -94,20 +123,15 @@ class End2EndModel(BaseBackendModel):
lq = lq.to(self.device) lq = lq.to(self.device)
batch_outputs = self.wrapper({self.input_name: batch_outputs = self.wrapper({self.input_name:
lq})[self.output_names[0]].to('cpu') lq})[self.output_names[0]].to('cpu')
if hasattr(self.data_preprocessor, 'destructor'): assert hasattr(self.data_preprocessor, 'destruct')
batch_outputs = self.data_preprocessor.destructor( batch_outputs = self.data_preprocessor.destruct(
batch_outputs.to(self.data_preprocessor.outputs_std.device)) batch_outputs, data_samples)
predictions = []
for sr_pred, data_sample in zip(batch_outputs, data_samples): # create a stacked data sample here
pred = EditDataSample() predictions = EditDataSample(pred_img=batch_outputs.cpu())
pred.set_data(dict(pred_img=PixelData(**dict(data=sr_pred))))
data_sample.set_data(dict(output=pred)) predictions = self.convert_to_datasample(predictions, data_samples,
''' inputs)
data_sample.set_data(
dict(pred_img=PixelData(**dict(data=sr_pred))))
'''
predictions.append(data_sample)
return predictions return predictions
@ -118,6 +142,35 @@ class SDKEnd2EndModel(End2EndModel):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(SDKEnd2EndModel, self).__init__(*args, **kwargs) super(SDKEnd2EndModel, self).__init__(*args, **kwargs)
def convert_to_datasample(
self, predictions: EditDataSample, data_samples: EditDataSample,
inputs: Optional[torch.Tensor]) -> List[EditDataSample]:
"""Add predictions and destructed inputs (if passed) to data samples.
Args:
predictions (EditDataSample): The predictions of the model.
data_samples (EditDataSample): The data samples loaded from
dataloader.
inputs (Optional[torch.Tensor]): The input of model. Defaults to
None.
Returns:
List[EditDataSample]: Modified data samples.
"""
if inputs is not None:
destructed_input = self.data_preprocessor.destruct(
inputs, data_samples, 'img')
data_samples.set_tensor_data({'input': destructed_input})
# split to list of data samples
data_samples = data_samples.split()
predictions = predictions.split()
for data_sample, pred in zip(data_samples, predictions):
data_sample.output = pred
return data_samples
def forward(self, def forward(self,
inputs: torch.Tensor, inputs: torch.Tensor,
data_samples: Optional[List[BaseDataElement]] = None, data_samples: Optional[List[BaseDataElement]] = None,
@ -151,14 +204,14 @@ class SDKEnd2EndModel(End2EndModel):
outputs.append( outputs.append(
torch.from_numpy(output).permute(2, 0, 1).contiguous()) torch.from_numpy(output).permute(2, 0, 1).contiguous())
outputs = torch.stack(outputs, 0) / 255. outputs = torch.stack(outputs, 0) / 255.
if hasattr(self.data_preprocessor, 'destructor'): assert hasattr(self.data_preprocessor, 'destruct')
outputs = self.data_preprocessor.destructor( outputs = self.data_preprocessor.destruct(outputs, data_samples)
outputs.to(self.data_preprocessor.outputs_std.device))
for i, sr_pred in enumerate(outputs): # create a stacked data sample here
pred = EditDataSample() predictions = EditDataSample(pred_img=outputs.cpu())
pred.set_data(dict(pred_img=PixelData(**dict(data=sr_pred))))
data_samples[i].set_data(dict(output=pred)) predictions = self.convert_to_datasample(predictions, data_samples,
inputs)
return data_samples return data_samples

View File

@ -26,8 +26,7 @@ class ONNXNMSRotatedOp(torch.autograd.Function):
Returns: Returns:
Tensor: Selected indices of boxes. Tensor: Selected indices of boxes.
""" """
from mmcv.utils import ext_loader from mmcv.ops import nms_rotated
ext_module = ext_loader.load_ext('_ext', ['nms_rotated'])
batch_size, num_class, _ = scores.shape batch_size, num_class, _ = scores.shape
indices = [] indices = []
@ -42,11 +41,8 @@ class ONNXNMSRotatedOp(torch.autograd.Function):
continue continue
valid_inds = torch.nonzero( valid_inds = torch.nonzero(
valid_mask, as_tuple=False).squeeze(dim=1) valid_mask, as_tuple=False).squeeze(dim=1)
_, order = _scores.sort(0, descending=True) _, box_inds = nms_rotated(
dets_sorted = _boxes.index_select(0, order) _boxes, _scores, iou_threshold=iou_threshold)
box_inds = ext_module.nms_rotated(_boxes, _scores, order,
dets_sorted, iou_threshold,
0)
box_inds = valid_inds[box_inds] box_inds = valid_inds[box_inds]
batch_inds = torch.zeros_like(box_inds) + batch_id batch_inds = torch.zeros_like(box_inds) + batch_id
cls_inds = torch.zeros_like(box_inds) + cls_id cls_inds = torch.zeros_like(box_inds) + cls_id

View File

@ -42,12 +42,11 @@ def test_torch2torchscript(input_name, output_name):
import numpy as np import numpy as np
deploy_cfg = get_deploy_cfg(input_name, output_name) deploy_cfg = get_deploy_cfg(input_name, output_name)
torch2torchscript( torch2torchscript(
np.random.rand(8, 8, 3), np.random.randint(0, 255, (8, 8, 3)),
'', '',
ts_file, ts_file,
deploy_cfg, deploy_cfg,
model_cfg=get_model_cfg(), model_cfg=get_model_cfg(),
device='cpu') device='cpu')
print(ts_file)
assert osp.exists(ts_file) assert osp.exists(ts_file)

View File

@ -29,7 +29,7 @@ def test_forward_of_base_recognizer(model_cfg_path, backend):
keep_initializers_as_inputs=False, keep_initializers_as_inputs=False,
opset_version=11, opset_version=11,
input_shape=None, input_shape=None,
input_names=['input'], input_names=['inputs'],
output_names=['output']))) output_names=['output'])))
model_cfg = load_config(model_cfg_path)[0] model_cfg = load_config(model_cfg_path)[0]

View File

@ -29,7 +29,7 @@ def get_invertedresidual_model():
return model return model
def get_vit_model(): def get_vit_backbone():
from mmcls.models.classifiers.image import ImageClassifier from mmcls.models.classifiers.image import ImageClassifier
model = ImageClassifier( model = ImageClassifier(
backbone={ backbone={
@ -60,7 +60,7 @@ def get_vit_model():
}, },
'topk': (1, 5) 'topk': (1, 5)
}, },
) ).backbone
model.requires_grad_(False) model.requires_grad_(False)
return model return model
@ -151,39 +151,30 @@ def test_shufflenetv2_backbone__forward(backend_type: Backend):
@pytest.mark.parametrize('backend_type', [Backend.NCNN]) @pytest.mark.parametrize('backend_type', [Backend.NCNN])
def test_vision_transformer_backbone__forward(backend_type: Backend): def test_vision_transformer_backbone__forward(backend_type: Backend):
from mmcls.structures import ClsDataSample
from mmdeploy.core import patch_model
import_codebase(Codebase.MMCLS) import_codebase(Codebase.MMCLS)
check_backend(backend_type, True) check_backend(backend_type, True)
model = get_vit_model() model = get_vit_backbone()
model.eval() model.eval()
deploy_cfg = Config( deploy_cfg = Config(
dict( dict(
backend_config=dict(type=backend_type.value), backend_config=dict(type=backend_type.value),
onnx_config=dict(input_shape=None, output_names=['output']), onnx_config=dict(input_shape=None, output_names=['out0', 'out1']),
codebase_config=dict(type='mmcls', task='Classification'))) codebase_config=dict(type='mmcls', task='Classification')))
imgs = torch.rand((1, 3, 384, 384)) imgs = torch.rand((1, 3, 384, 384))
data_sample = ClsDataSample( model_outputs = model.forward(imgs)[0]
metainfo=dict(
scale_factor=(1, 1),
ori_shape=imgs.shape[2:],
img_shape=imgs.shape[2:]))
model = patch_model(
model, {}, backend=backend_type.value, data_samples=[data_sample])
model_outputs = model.forward(imgs)
wrapped_model = WrapModel(model, 'forward') wrapped_model = WrapModel(model, 'forward')
rewrite_inputs = {'batch_inputs': imgs} rewrite_inputs = {'x': imgs}
rewrite_outputs, is_backend_output = get_rewrite_outputs( rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model, wrapped_model=wrapped_model,
model_inputs=rewrite_inputs, model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg) deploy_cfg=deploy_cfg)
if isinstance(rewrite_outputs, dict): if isinstance(rewrite_outputs, dict):
rewrite_outputs = rewrite_outputs['output'] rewrite_outputs = [
rewrite_outputs[out_name] for out_name in ['out0', 'out1']
]
for model_output, rewrite_output in zip(model_outputs, rewrite_outputs): for model_output, rewrite_output in zip(model_outputs, rewrite_outputs):
if isinstance(rewrite_output, torch.Tensor): if isinstance(rewrite_output, torch.Tensor):
rewrite_output = rewrite_output.cpu().numpy() rewrite_output = rewrite_output.cpu().numpy()

View File

@ -406,54 +406,15 @@ def get_detrhead_model():
dict( dict(
type='DETRHead', type='DETRHead',
num_classes=4, num_classes=4,
in_channels=1, embed_dims=4,
transformer=dict( loss_cls=dict(
type='Transformer', type='CrossEntropyLoss',
encoder=dict( bg_cls_weight=0.1,
type='DetrTransformerEncoder', use_sigmoid=False,
num_layers=1, loss_weight=1.0,
transformerlayers=dict( class_weight=1.0),
type='BaseTransformerLayer', loss_bbox=dict(type='L1Loss', loss_weight=5.0),
attn_cfgs=[ loss_iou=dict(type='GIoULoss', loss_weight=2.0)))
dict(
type='MultiheadAttention',
embed_dims=4,
num_heads=1)
],
ffn_cfgs=dict(
type='FFN',
embed_dims=4,
feedforward_channels=32,
num_fcs=2,
ffn_drop=0.,
act_cfg=dict(type='ReLU', inplace=True),
),
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=True,
num_layers=1,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=4,
num_heads=1),
ffn_cfgs=dict(
type='FFN',
embed_dims=4,
feedforward_channels=32,
num_fcs=2,
ffn_drop=0.,
act_cfg=dict(type='ReLU', inplace=True),
),
feedforward_channels=32,
operation_order=('self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm')),
)),
positional_encoding=dict(
type='SinePositionalEncoding', num_feats=2, normalize=True),
test_cfg=dict(max_per_img=100)))
model.requires_grad_(False) model.requires_grad_(False)
return model return model
@ -715,7 +676,7 @@ def test_forward_of_base_detector(model_cfg_path, backend):
model_cfg = Config(dict(model=mmengine.load(model_cfg_path))) model_cfg = Config(dict(model=mmengine.load(model_cfg_path)))
model_cfg.model = _replace_r50_with_r18(model_cfg.model) model_cfg.model = _replace_r50_with_r18(model_cfg.model)
from mmdet.apis import init_detector from mmdet.apis import init_detector
model = init_detector(model_cfg, None, device='cpu') model = init_detector(model_cfg, None, device='cpu', palette='coco')
img = torch.randn(1, 3, 64, 64) img = torch.randn(1, 3, 64, 64)
from mmdet.structures import DetDataSample from mmdet.structures import DetDataSample

View File

@ -74,7 +74,6 @@ train_pipeline = [
dict( dict(
type='Flip', keys=['img', 'gt'], flip_ratio=0.5, direction='vertical'), type='Flip', keys=['img', 'gt'], flip_ratio=0.5, direction='vertical'),
dict(type='RandomTransposeHW', keys=['img', 'gt'], transpose_ratio=0.5), dict(type='RandomTransposeHW', keys=['img', 'gt'], transpose_ratio=0.5),
dict(type='ToTensor', keys=['img', 'gt']),
dict(type='PackEditInputs') dict(type='PackEditInputs')
] ]
val_pipeline = [ val_pipeline = [
@ -90,7 +89,6 @@ val_pipeline = [
color_type='color', color_type='color',
channel_order='rgb', channel_order='rgb',
imdecode_backend='cv2'), imdecode_backend='cv2'),
dict(type='ToTensor', keys=['img', 'gt']),
dict(type='PackEditInputs') dict(type='PackEditInputs')
] ]
@ -100,6 +98,7 @@ data_root = 'data'
train_dataloader = dict( train_dataloader = dict(
num_workers=4, num_workers=4,
batch_size=16,
persistent_workers=False, persistent_workers=False,
sampler=dict(type='InfiniteSampler', shuffle=True), sampler=dict(type='InfiniteSampler', shuffle=True),
dataset=dict( dataset=dict(
@ -124,15 +123,17 @@ val_dataloader = dict(
data_prefix=dict(img='LRbicx4', gt='GTmod12'), data_prefix=dict(img='LRbicx4', gt='GTmod12'),
pipeline=val_pipeline)) pipeline=val_pipeline))
val_evaluator = [ val_evaluator = dict(
dict(type='MAE'), type='EditEvaluator',
dict(type='PSNR', crop_border=scale), metrics=[
dict(type='SSIM', crop_border=scale), dict(type='MAE'),
] dict(type='PSNR', crop_border=scale),
dict(type='SSIM', crop_border=scale),
])
train_cfg = dict( train_cfg = dict(
type='IterBasedTrainLoop', max_iters=1000000, val_interval=5000) type='IterBasedTrainLoop', max_iters=1000000, val_interval=5000)
val_cfg = dict(type='ValLoop') val_cfg = dict(type='EditValLoop')
# optimizer # optimizer
optim_wrapper = dict( optim_wrapper = dict(
@ -175,7 +176,6 @@ test_pipeline = [
color_type='color', color_type='color',
channel_order='rgb', channel_order='rgb',
imdecode_backend='cv2'), imdecode_backend='cv2'),
dict(type='ToTensor', keys=['img', 'gt']),
dict(type='PackEditInputs') dict(type='PackEditInputs')
] ]
@ -190,12 +190,14 @@ set5_dataloader = dict(
type='BasicImageDataset', type='BasicImageDataset',
metainfo=dict(dataset_type='set5', task_name='sisr'), metainfo=dict(dataset_type='set5', task_name='sisr'),
data_root=set5_data_root, data_root=set5_data_root,
data_prefix=dict(img='LRbicx4', gt='GTmod12'), data_prefix=dict(img='imgs', gt='imgs'),
pipeline=test_pipeline)) pipeline=test_pipeline))
set5_evaluator = [ set5_evaluator = dict(
dict(type='PSNR', crop_border=2, prefix='Set5'), type='EditEvaluator',
dict(type='SSIM', crop_border=2, prefix='Set5'), metrics=[
] dict(type='PSNR', crop_border=4, prefix='Set5'),
dict(type='SSIM', crop_border=4, prefix='Set5'),
])
set14_data_root = 'data/Set14' set14_data_root = 'data/Set14'
set14_dataloader = dict( set14_dataloader = dict(
@ -207,12 +209,14 @@ set14_dataloader = dict(
type='BasicImageDataset', type='BasicImageDataset',
metainfo=dict(dataset_type='set14', task_name='sisr'), metainfo=dict(dataset_type='set14', task_name='sisr'),
data_root=set5_data_root, data_root=set5_data_root,
data_prefix=dict(img='LRbicx4', gt='GTmod12'), data_prefix=dict(img='imgs', gt='imgs'),
pipeline=test_pipeline)) pipeline=test_pipeline))
set14_evaluator = [ set14_evaluator = dict(
dict(type='PSNR', crop_border=2, prefix='Set14'), type='EditEvaluator',
dict(type='SSIM', crop_border=2, prefix='Set14'), metrics=[
] dict(type='PSNR', crop_border=4, prefix='Set14'),
dict(type='SSIM', crop_border=4, prefix='Set14'),
])
ut_data_root = 'tests/test_codebase/test_mmedit/data' ut_data_root = 'tests/test_codebase/test_mmedit/data'
ut_dataloader = dict( ut_dataloader = dict(
@ -227,28 +231,7 @@ ut_dataloader = dict(
data_prefix=dict(img='imgs', gt='imgs'), data_prefix=dict(img='imgs', gt='imgs'),
pipeline=test_pipeline)) pipeline=test_pipeline))
# test config for DIV2K
div2k_data_root = 'data/DIV2K'
div2k_dataloader = dict(
num_workers=4,
persistent_workers=False,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='BasicImageDataset',
ann_file='meta_info_DIV2K100sub_GT.txt',
metainfo=dict(dataset_type='div2k', task_name='sisr'),
data_root=div2k_data_root,
data_prefix=dict(
img='DIV2K_train_LR_bicubic/X4_sub', gt='DIV2K_train_HR_sub'),
# filename_tmpl=dict(img='{}_x4', gt='{}'),
pipeline=test_pipeline))
div2k_evaluator = [
dict(type='PSNR', crop_border=2, prefix='DIV2K'),
dict(type='SSIM', crop_border=2, prefix='DIV2K'),
]
# test config # test config
test_cfg = dict(type='MultiTestLoop') test_cfg = dict(type='EditTestLoop')
test_dataloader = [ut_dataloader, ut_dataloader] test_dataloader = [ut_dataloader, ut_dataloader]
test_evaluator = [set5_evaluator, set14_evaluator] test_evaluator = [set5_evaluator, set14_evaluator]

View File

@ -55,7 +55,7 @@ def backend_model():
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
wrapper = SwitchBackendWrapper(ORTWrapper) wrapper = SwitchBackendWrapper(ORTWrapper)
wrapper.set(outputs={ wrapper.set(outputs={
'output': torch.rand(3, 50, 50), 'output': torch.rand(1, 3, 50, 50),
}) })
yield task_processor.build_backend_model(['']) yield task_processor.build_backend_model([''])
@ -65,17 +65,14 @@ def backend_model():
def test_build_test_runner(): def test_build_test_runner():
# Prepare dummy model # Prepare dummy model
from mmedit.structures import EditDataSample, PixelData from mmedit.structures import EditDataSample
data_sample = EditDataSample() img_meta = dict(ori_img_shape=(32, 32, 3))
img = torch.rand(3, 32, 32)
img_meta = dict(img_shape=(800, 1196, 3)) data_sample = EditDataSample(gt_img=img, metainfo=img_meta)
img = torch.rand((3, 800, 1196)) data_sample.set_data(
gt_img = PixelData(data=img, metainfo=img_meta) dict(output=EditDataSample(pred_img=img, metainfo=img_meta)))
data_sample.gt_img = gt_img data_sample.set_data(dict(input=img))
pred_img = PixelData(data=img, metainfo=img_meta)
data_sample.set_data(dict(output=pred_img))
# data_sample.output.pred_img = pred_img
outputs = [data_sample] outputs = [data_sample]
model = DummyModel(outputs=outputs) model = DummyModel(outputs=outputs)
assert model is not None assert model is not None
@ -104,9 +101,7 @@ def test_create_input():
def test_visualize(backend_model): def test_visualize(backend_model):
data_preprocessor = task_processor.build_data_preprocessor() input_dict, _ = task_processor.create_input(input_img, img_shape)
input_dict, _ = task_processor.create_input(input_img, img_shape,
data_preprocessor)
with torch.no_grad(): with torch.no_grad():
results = backend_model.test_step(input_dict)[0] results = backend_model.test_step(input_dict)[0]

View File

@ -2,9 +2,7 @@
import pytest import pytest
import torch import torch
from mmengine import Config from mmengine import Config
from mmengine.structures import BaseDataElement
import mmdeploy.backend.onnxruntime as ort_apis
from mmdeploy.codebase import import_codebase from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Backend, Codebase, load_config from mmdeploy.utils import Backend, Codebase, load_config
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
@ -19,33 +17,34 @@ except ImportError:
@backend_checker(Backend.ONNXRUNTIME) @backend_checker(Backend.ONNXRUNTIME)
class TestEnd2EndModel: class TestEnd2EndModel:
@classmethod @pytest.fixture(scope='class')
def setup_class(cls): def end2end_model(self):
# force add backend wrapper regardless of plugins # force add backend wrapper regardless of plugins
# make sure ONNXRuntimeEditor can use ORTWrapper inside itself # make sure ONNXRuntimeEditor can use ORTWrapper inside itself
from mmdeploy.backend.onnxruntime import ORTWrapper from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
cls.outputs = {
'outputs': torch.rand(3, 64, 64),
}
cls.wrapper.set(outputs=cls.outputs)
deploy_cfg = Config({'onnx_config': {'output_names': ['outputs']}})
model_cfg = 'tests/test_codebase/test_mmedit/data/model.py'
model_cfg = load_config(model_cfg)[0]
from mmdeploy.codebase.mmedit.deploy.super_resolution_model import \ from mmdeploy.codebase.mmedit.deploy.super_resolution_model import \
End2EndModel End2EndModel
cls.end2end_model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu',
model_cfg, deploy_cfg)
@classmethod # simplify backend inference
def teardown_class(cls): with SwitchBackendWrapper(ORTWrapper) as wrapper:
cls.wrapper.recover() outputs = {
'outputs': torch.rand(3, 64, 64),
}
wrapper.set(outputs=outputs)
deploy_cfg = Config({'onnx_config': {'output_names': ['outputs']}})
model_cfg = 'tests/test_codebase/test_mmedit/data/model.py'
model_cfg = load_config(model_cfg)[0]
model = End2EndModel(
Backend.ONNXRUNTIME, [''],
'cpu',
model_cfg,
deploy_cfg,
data_preprocessor=model_cfg.model.data_preprocessor)
yield model
def test_forward(self): def test_forward(self, end2end_model):
input_img = torch.rand(1, 3, 32, 32) input_img = torch.rand(1, 3, 32, 32)
img_metas = [BaseDataElement(metainfo={'ori_img_shape': [3, 32, 32]})] from mmedit.structures import EditDataSample
results = self.end2end_model.forward(input_img, img_metas) img_metas = EditDataSample(metainfo={'ori_img_shape': [(32, 32, 3)]})
results = end2end_model.forward(input_img, img_metas)
assert results is not None assert results is not None

View File

@ -34,7 +34,6 @@ val_dataloader = dict(
data_root=data_root, data_root=data_root,
ann_file=ann_file, ann_file=ann_file,
data_prefix=dict(img_path='trainval/images/'), data_prefix=dict(img_path='trainval/images/'),
img_shape=(1024, 1024),
test_mode=True, test_mode=True,
pipeline=[ pipeline=[
dict( dict(
@ -62,7 +61,6 @@ test_dataloader = dict(
data_root=data_root, data_root=data_root,
ann_file=ann_file, ann_file=ann_file,
data_prefix=dict(img_path='trainval/images/'), data_prefix=dict(img_path='trainval/images/'),
img_shape=(1024, 1024),
test_mode=True, test_mode=True,
pipeline=[ pipeline=[
dict( dict(