[Fix] Sync codebase 2.0 (#979)

* remove mmcv.Config

* fix mmdet with new mmengine

* fix ut

* fix mmdet ut

* fix sdk cpp pipeline

* fix mmcls

* fix mmcls ut

* adapt for new config

* fix cls models

* fix mmcls sdk

* fix yapf

* fix lint

* fix torchscript

* fix torchscript cpu

* fix check_env

* add test data

* fix export_info

* fix blank.jpg

* fix mmdet torchscript

* fix test.py

* fix sdk forward

* fix yapf
pull/1006/head
hanrui1sensetime 2022-09-15 21:54:57 +08:00 committed by GitHub
parent 06028d6a21
commit b0b502cba0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 156 additions and 129 deletions

View File

@ -40,7 +40,6 @@ def torch2torchscript(img: Any,
from mmdeploy.apis import build_task_processor
task_processor = build_task_processor(model_cfg, deploy_cfg, device)
torch_model = task_processor.build_pytorch_model(model_checkpoint)
_, model_inputs = task_processor.create_input(
img,
@ -48,11 +47,11 @@ def torch2torchscript(img: Any,
data_preprocessor=getattr(torch_model, 'data_preprocessor', None))
if not isinstance(model_inputs, torch.Tensor):
model_inputs = model_inputs[0]
context_info = dict(deploy_cfg=deploy_cfg)
backend = get_backend(deploy_cfg).value
output_prefix = osp.join(work_dir, osp.splitext(save_file)[0])
if model_inputs.device != device:
model_inputs = model_inputs.to(device)
with no_mp():
trace(
torch_model,

View File

@ -217,11 +217,26 @@ def get_preprocess(deploy_cfg: mmengine.Config, model_cfg: mmengine.Config):
transform['keys'] = ['img']
if 'key' in transform and transform['key'] == 'lq':
transform['key'] = 'img'
if transform['type'] == 'Resize':
transform['size'] = transform['scale']
del transform['scale']
if transform['type'] == 'ResizeEdge':
transform['type'] = 'Resize'
transform['keep_ratio'] = True
# now the sdk of class has bugs, because ResizeEdge not implement
# in sdk.
transform['size'] = (transform['scale'], transform['scale'])
if transform['type'] == 'PackTextDetInputs':
meta_keys += transform[
'meta_keys'] if 'meta_keys' in transform else []
transform['meta_keys'] = list(set(meta_keys))
transforms[i]['type'] = 'Collect'
if transform['type'] == 'PackDetInputs' or \
transform['type'] == 'PackClsInputs':
transforms.insert(i, dict(type='DefaultFormatBundle'))
transform['type'] = 'Collect'
if 'keys' not in transform:
transform['keys'] = ['img']
assert transforms[0]['type'] == 'LoadImageFromFile', 'The first item type'\
' of pipeline should be LoadImageFromFile'

View File

@ -47,6 +47,9 @@ def process_model_config(model_cfg: Config,
if cfg.test_pipeline[0]['type'] == 'LoadImageFromFile':
cfg.test_pipeline.pop(0)
# check whether input_shape is valid
if 'data_preprocessor' in cfg:
cfg.test_pipeline.insert(
3, dict(type='Normalize', **cfg['data_preprocessor']))
if input_shape is not None:
if 'crop_size' in cfg.test_pipeline[2]:
crop_size = cfg.test_pipeline[2]['crop_size']
@ -166,12 +169,11 @@ class Classification(BaseTask):
tuple: (data, img), meta information for the input image and input.
"""
model_cfg = self.model_cfg
model_cfg = process_model_config(self.model_cfg, imgs, input_shape)
assert 'test_pipeline' in model_cfg, \
f'test_pipeline not found in {model_cfg}.'
from mmengine.dataset import Compose
pipeline = deepcopy(model_cfg.test_pipeline)
if isinstance(imgs, str):
if pipeline[0]['type'] != 'LoadImageFromFile':
pipeline.insert(0, dict(type='LoadImageFromFile'))
@ -247,7 +249,7 @@ class Classification(BaseTask):
"""
input_shape = get_input_shape(self.deploy_cfg)
cfg = process_model_config(self.model_cfg, '', input_shape)
preprocess = cfg.data.test.pipeline
preprocess = cfg.test_pipeline
return preprocess
def get_postprocess(self) -> Dict:

View File

@ -5,6 +5,7 @@ from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from mmengine import Config
from mmengine.dataset import pseudo_collate
from mmengine.model import BaseDataPreprocessor
from mmengine.registry import Registry
@ -43,23 +44,21 @@ def process_model_config(model_cfg: Config,
if isinstance(imgs[0], np.ndarray):
cfg = cfg.copy()
# set loading pipeline type
cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
cfg.test_pipeline[0].type = 'LoadImageFromNDArray'
# for static exporting
if input_shape is not None:
pipeline = cfg.test_dataloader.dataset.pipeline
print(f'debugging pipeline: {pipeline}')
pipeline[1]['scale'] = tuple(input_shape)
'''
transforms = pipeline[1]['transforms']
for trans in transforms:
trans_type = trans['type']
if trans_type == 'Resize':
trans['keep_ratio'] = False
elif trans_type == 'Pad':
trans['size_divisor'] = 1
'''
pipeline = cfg.test_pipeline
for i, transform in enumerate(pipeline):
# for static exporting
if input_shape is not None and transform.type == 'Resize':
pipeline[i].keep_ratio = False
pipeline[i].scale = tuple(input_shape)
pipeline = [
transform for transform in pipeline
if transform.type != 'LoadAnnotations'
]
cfg.test_pipeline = pipeline
return cfg
@ -161,7 +160,7 @@ class ObjectDetection(BaseTask):
# Drop pad_to_square when static shape. Because static shape should
# ensure the shape before input image.
pipeline = cfg.test_dataloader.dataset.pipeline
pipeline = cfg.test_pipeline
if not dynamic_flag:
transform = pipeline[1]
if 'transforms' in transform:
@ -185,10 +184,10 @@ class ObjectDetection(BaseTask):
data_ = test_pipeline(data_)
data.append(data_)
data = data[0]
data = pseudo_collate(data)
if data_preprocessor is not None:
data = data_preprocessor([data], False)
return data, data[0]
data = data_preprocessor(data, False)
return data, data['inputs']
else:
return data, BaseTask.get_tensor_from_input(data)

View File

@ -5,7 +5,6 @@ from typing import Any, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from mmdet.models.detectors import BaseDetector
from mmengine import Config
from mmengine.model.base_model.data_preprocessor import BaseDataPreprocessor
from mmengine.registry import Registry
@ -168,24 +167,23 @@ class End2EndModel(BaseBackendModel):
return result_masks.squeeze(0)
def forward(self,
batch_inputs: torch.Tensor,
inputs: torch.Tensor,
data_samples: Optional[List[BaseDataElement]] = None,
mode: str = 'predict',
**kwargs) -> Any:
assert mode == 'predict', 'Deploy model only allow mode=="predict".'
batch_inputs = batch_inputs.contiguous()
outputs = self.predict(batch_inputs)
inputs = inputs.contiguous()
outputs = self.predict(inputs)
outputs = End2EndModel.__clear_outputs(outputs)
batch_dets, batch_labels = outputs[:2]
batch_masks = outputs[2] if len(outputs) == 3 else None
batch_size = batch_inputs.shape[0]
batch_size = inputs.shape[0]
img_metas = [data_sample.metainfo for data_sample in data_samples]
results = [InstanceData() for _ in range(batch_size)]
results = []
rescale = kwargs.get('rescale', True)
for i in range(batch_size):
dets, labels = batch_dets[i], batch_labels[i]
result = results[i]
result = InstanceData()
bboxes = dets[:, :4]
scores = dets[:, 4]
@ -243,8 +241,8 @@ class End2EndModel(BaseBackendModel):
# aligned with mmdet to easily convert to numpy
masks = masks.cpu()
result.masks = masks
results = BaseDetector.convert_to_datasample(None, results)
data_samples[i].pred_instances = result
results.append(data_samples[i])
return results
def predict(self, imgs: Tensor) -> Tuple[np.ndarray, np.ndarray]:
@ -584,11 +582,13 @@ class SDKEnd2EndModel(End2EndModel):
"""SDK inference class, converts SDK output to mmdet format."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.has_mask = self.deploy_cfg.codebase_config.get('has_mask', False)
kwargs['data_preprocessor'] = None
super(SDKEnd2EndModel, self).__init__(*args, **kwargs)
def forward(self, img: Sequence[Tensor], img_metas: Sequence[dict], *args,
**kwargs):
def forward(self,
inputs: torch.Tensor,
data_samples: Optional[List[BaseDataElement]] = None,
mode: str = 'predict'):
"""Run forward inference.
Args:
@ -601,24 +601,29 @@ class SDKEnd2EndModel(End2EndModel):
Returns:
list: A list contains predictions.
"""
from mmdet.core import bbox2result
if isinstance(inputs, list):
inputs = inputs[0]
# inputs are c,h,w, sdk requested h,w,c
inputs = inputs.permute(1, 2, 0)
dets, labels, masks = self.wrapper.invoke(
img[0].contiguous().detach().cpu().numpy())
det_results = bbox2result(dets[np.newaxis, ...], labels[np.newaxis,
...],
len(self.CLASSES))
if self.has_mask:
segm_results = [[] for _ in range(len(self.CLASSES))]
ori_h, ori_w = img_metas[0]['ori_shape'][:2]
for bbox, label, mask in zip(dets, labels, masks):
img_mask = np.zeros((ori_h, ori_w), dtype=np.uint8)
left = int(max(np.floor(bbox[0]) - 1, 0))
top = int(max(np.floor(bbox[1]) - 1, 0))
img_mask[top:top + mask.shape[0],
left:left + mask.shape[1]] = mask
segm_results[label].append(img_mask)
return [(det_results, segm_results)]
return [det_results]
inputs.contiguous().detach().cpu().numpy())
dets = torch.from_numpy(dets).to(self.device).unsqueeze(0)
labels = torch.from_numpy(labels).to(torch.int64).to(
self.device).unsqueeze(0)
predictions = []
masks = np.concatenate(masks, 0)
for det, label, mask, data_sample in zip(dets, labels, masks,
data_samples):
pred_instances = InstanceData()
pred_instances.scores = det[..., 4]
pred_instances.bboxes = det[..., :4]
pred_instances.labels = label
pred_instances.masks = torch.from_numpy(mask).\
to(self.device).unsqueeze(0)
data_sample.pred_instances = pred_instances
predictions.append(data_sample)
return predictions
def build_object_detection_model(

View File

@ -42,7 +42,7 @@ def gfl_head__predict_by_feat(ctx,
(batch_size, num_priors * 1, H, W). Default None.
batch_img_metas (list[dict]): Meta information of the image, e.g.,
image size, scaling factor, etc.
cfg (mmcv.Config | None): Test / postprocessing configuration,
cfg (Config | None): Test / postprocessing configuration,
if None, test_cfg would be used. Default: None.
rescale (bool): If True, return boxes in original image space.
Default: False.

View File

@ -3,6 +3,7 @@ import copy
import torch
from mmdet.models.detectors.base import ForwardResults
from mmdet.structures import DetDataSample
from mmdet.structures.det_data_sample import OptSampleList
from mmdeploy.core import FUNCTION_REWRITER
@ -39,6 +40,8 @@ def single_stage_detector__forward(ctx,
(num_instances, ).
"""
data_samples = copy.deepcopy(data_samples)
if data_samples is None:
data_samples = [DetDataSample()]
deploy_cfg = ctx.cfg
# get origin input shape as tensor to support onnx dynamic shape

View File

@ -4,7 +4,7 @@ import tempfile
from multiprocessing import Process
import h5py
import mmcv
from mmengine import Config
from mmdeploy.apis import create_calib_input_data
@ -13,7 +13,7 @@ ann_file = 'tests/data/annotation.json'
def get_end2end_deploy_cfg():
deploy_cfg = mmcv.Config(
deploy_cfg = Config(
dict(
onnx_config=dict(
dynamic_axes={
@ -84,7 +84,7 @@ def get_model_cfg():
])
]
model_cfg = mmcv.Config(
model_cfg = Config(
dict(
data=dict(
samples_per_gpu=1,

View File

@ -3,11 +3,11 @@ import os
import os.path as osp
import tempfile
import mmcv
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmengine import Config
from mmdeploy.utils import Backend
from mmdeploy.utils.test import backend_checker, get_random_name
@ -61,12 +61,12 @@ def get_outputs(pytorch_model, openvino_model_path, input, input_name,
def get_base_deploy_cfg():
deploy_cfg = mmcv.Config(dict(backend_config=dict(type='openvino')))
deploy_cfg = Config(dict(backend_config=dict(type='openvino')))
return deploy_cfg
def get_deploy_cfg_with_mo_args():
deploy_cfg = mmcv.Config(
deploy_cfg = Config(
dict(
backend_config=dict(
type='openvino',
@ -131,7 +131,7 @@ def test_get_input_info_from_cfg():
from mmdeploy.apis.openvino import get_input_info_from_cfg
# Test 1
deploy_cfg = mmcv.Config()
deploy_cfg = Config()
with pytest.raises(KeyError):
get_input_info_from_cfg(deploy_cfg)
@ -140,7 +140,7 @@ def test_get_input_info_from_cfg():
height, width = 600, 1000
shape = [1, 3, height, width]
expected_input_info = {input_name: shape}
deploy_cfg = mmcv.Config({
deploy_cfg = Config({
'backend_config': {
'model_inputs': [{
'opt_shapes': expected_input_info

View File

@ -3,8 +3,8 @@ import importlib
import os.path as osp
import tempfile
import mmcv
import pytest
from mmengine import Config
from mmdeploy.apis import torch2torchscript
from mmdeploy.utils import IR, Backend
@ -16,7 +16,7 @@ output_name = get_random_name()
def get_deploy_cfg(input_name, output_name):
return mmcv.Config(
return Config(
dict(
ir_config=dict(
type=IR.TORCHSCRIPT.value,
@ -28,7 +28,7 @@ def get_deploy_cfg(input_name, output_name):
def get_model_cfg():
return mmcv.Config(
return Config(
dict(
model=dict(
pretrained=None,

View File

@ -48,10 +48,10 @@ def generate_onnx_file():
@pytest.fixture(autouse=True, scope='module')
def generate_torchscript_file():
import mmcv
from mmengine import Config
backend = Backend.TORCHSCRIPT.value
deploy_cfg = mmcv.Config({'backend_config': dict(type=backend)})
deploy_cfg = Config({'backend_config': dict(type=backend)})
from mmdeploy.apis.torch_jit import trace
context_info = dict(deploy_cfg=deploy_cfg)

Binary file not shown.

After

Width:  |  Height:  |  Size: 691 B

View File

@ -95,10 +95,10 @@ def test_create_input():
def test_visualize(backend_model):
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
results = backend_model.test_step([input_dict])
results = backend_model.test_step(input_dict)[0]
with TemporaryDirectory() as dir:
filename = dir + '/tmp.jpg'
task_processor.visualize(img, results[0], filename, 'window')
task_processor.visualize(img, results, filename, 'window')
assert os.path.exists(filename)
@ -127,8 +127,8 @@ def test_build_dataset_and_dataloader():
def test_build_test_runner():
# Prepare dummy model
from mmcls.core import ClsDataSample
from mmengine.data import LabelData
from mmcls.structures import ClsDataSample
from mmengine.structures import LabelData
label = LabelData(
label=torch.tensor([0]),
score=torch.rand(10),
@ -137,6 +137,8 @@ def test_build_test_runner():
ClsDataSample(
pred_label=label,
_pred_label=label,
gt_label=label,
_gt_label=label,
metainfo=dict(
img_shape=(224, 224),
img_path='',

View File

@ -41,7 +41,7 @@ class TestEnd2EndModel:
def test_forward(self):
imgs = torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE)
from mmcls.core import ClsDataSample
from mmcls.structures import ClsDataSample
data_sample = ClsDataSample(
metainfo=dict(
scale_factor=(1, 1),

View File

@ -148,7 +148,7 @@ def test_shufflenetv2_backbone__forward(backend_type: Backend):
@pytest.mark.parametrize('backend_type', [Backend.NCNN])
def test_vision_transformer_backbone__forward(backend_type: Backend):
from mmcls.core import ClsDataSample
from mmcls.structures import ClsDataSample
from mmdeploy.core import patch_model
import_codebase(Codebase.MMCLS)

View File

@ -5,7 +5,7 @@ import random
import tempfile
from typing import Dict, List
import mmcv
import mmengine
import numpy as np
import pytest
import torch
@ -784,15 +784,14 @@ def test_forward_of_base_detector(model_cfg_path, backend):
keep_top_k=100,
background_label_id=-1,
))))
model_cfg = Config(dict(model=mmcv.load(model_cfg_path)))
model_cfg = Config(dict(model=mmengine.load(model_cfg_path)))
model_cfg.model = _replace_r50_with_r18(model_cfg.model)
from mmdet.apis import init_detector
model = init_detector(model_cfg, None, device='cpu')
img = torch.randn(1, 3, 64, 64)
from mmdet.structures import DetDataSample
from mmengine import InstanceData
from mmengine.structures import InstanceData
data_sample = DetDataSample()
img_meta = dict(img_shape=(800, 1216, 3))
gt_instances = InstanceData(metainfo=img_meta)

View File

@ -21,6 +21,9 @@ import_codebase(Codebase.MMDET)
model_cfg_path = 'tests/test_codebase/test_mmdet/data/model.py'
model_cfg = load_config(model_cfg_path)[0]
model_cfg.test_dataloader.dataset.data_root = \
'tests/test_codebase/test_mmdet/data'
model_cfg.test_dataloader.dataset.ann_file = 'coco_sample.json'
deploy_cfg = Config(
dict(
backend_config=dict(type='onnxruntime'),
@ -53,7 +56,7 @@ img = np.random.rand(*img_shape, 3)
def test_build_test_runner():
# Prepare dummy model
from mmdet.structures import DetDataSample
from mmengine.data import InstanceData
from mmengine.structures import InstanceData
data_sample = DetDataSample()
img_meta = dict(img_shape=(800, 1216, 3))
@ -66,6 +69,8 @@ def test_build_test_runner():
pred_instances.scores = torch.rand((5, ))
pred_instances.labels = torch.randint(0, 10, (5, ))
data_sample.pred_instances = pred_instances
data_sample.img_id = 139
data_sample.ori_shape = (800, 1216)
outputs = [data_sample]
model = DummyModel(outputs=outputs)
assert model is not None
@ -158,7 +163,7 @@ def test_create_input(device):
def test_visualize(backend_model):
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
results = backend_model.test_step([input_dict])
results = backend_model.test_step(input_dict)[0]
with TemporaryDirectory() as dir:
filename = dir + 'tmp.jpg'
task_processor.visualize(img, results, filename, 'window')

View File

@ -1,7 +1,8 @@
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmengine import BaseDataElement, Config, InstanceData
from mmengine import Config
from mmengine.structures import BaseDataElement, InstanceData
import mmdeploy.backend.ncnn as ncnn_apis
import mmdeploy.backend.onnxruntime as ort_apis

View File

@ -1,6 +1,6 @@
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmengine import Config
from mmdeploy.utils import Backend
from mmdeploy.utils.test import check_backend, get_rewrite_outputs
@ -13,7 +13,7 @@ def test_multiheadattention_ncnn():
model = MultiheadAttention(embed_dims, num_heads, batch_first=True)
query = torch.rand(1, 3, embed_dims)
deploy_cfg = mmcv.Config(
deploy_cfg = Config(
dict(
onnx_config=dict(input_shape=None),
backend_config=dict(type=Backend.NCNN.value),

View File

@ -3,10 +3,10 @@ import os
import subprocess
import tempfile
import mmcv
import onnx
import pytest
import torch
from mmengine import Config
import mmdeploy.apis.tensorrt as trt_apis
from mmdeploy.utils import Backend
@ -114,7 +114,7 @@ class TestTensorRTExporter:
dynamic_axes=dynamic_axes,
opset_version=11)
deploy_cfg = mmcv.Config(
deploy_cfg = Config(
dict(
backend_config=dict(
type='tensorrt',

View File

@ -1,18 +1,18 @@
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import mmcv
import numpy as np
import pytest
import torch
import torch.nn.functional as F
from mmengine import Config
from packaging.version import parse
from mmdeploy.utils import Backend
from mmdeploy.utils.test import (WrapFunction, backend_checker,
get_rewrite_outputs)
deploy_cfg_ncnn = mmcv.Config(
deploy_cfg_ncnn = Config(
dict(
onnx_config=dict(input_shape=None),
backend_config=dict(type='ncnn', model_inputs=None, use_vulkan=False),
@ -20,7 +20,7 @@ deploy_cfg_ncnn = mmcv.Config(
def get_trt_config(output_names, shape):
deploy_cfg_tensorrt = mmcv.Config(
deploy_cfg_tensorrt = Config(
dict(
onnx_config=dict(input_shape=None, output_names=output_names),
backend_config=dict(
@ -297,7 +297,7 @@ def test_masked_fill_onnxruntime(input):
def masked_fill_caller(*arg, **kwargs):
return torch.masked_fill(*arg, **kwargs)
deploy_cfg_ort = mmcv.Config(
deploy_cfg_ort = Config(
dict(
onnx_config=dict(input_shape=None),
backend_config=dict(type='onnxruntime'),
@ -330,7 +330,7 @@ def test_tensor_setitem(x, y):
wrapped_func = WrapFunction(setitem_slice)
model_inputs = {'x': x, 'y': y}
deploy_cfg = mmcv.Config(
deploy_cfg = Config(
dict(
onnx_config=dict(input_shape=None),
backend_config=dict(type='onnxruntime'),

View File

@ -5,9 +5,9 @@ import os
import tempfile
from functools import partial
import mmcv
import pytest
import torch.multiprocessing as mp
from mmengine import Config
import mmdeploy.utils as util
from mmdeploy.backend.sdk.export_info import export2SDK
@ -16,9 +16,9 @@ from mmdeploy.utils.constants import Backend, Codebase, Task
from mmdeploy.utils.test import get_random_name
correct_model_path = 'tests/data/srgan.py'
correct_model_cfg = mmcv.Config.fromfile(correct_model_path)
correct_model_cfg = Config.fromfile(correct_model_path)
correct_deploy_path = 'tests/data/super-resolution.py'
correct_deploy_cfg = mmcv.Config.fromfile(correct_deploy_path)
correct_deploy_cfg = Config.fromfile(correct_deploy_path)
empty_file_path = tempfile.NamedTemporaryFile(suffix='.py').name
empty_path = './a.py'
@ -58,7 +58,7 @@ class TestLoadConfig:
configs = util.load_config(*args)
for v in zip(configs, args):
if isinstance(v[1], str):
cfg = mmcv.Config.fromfile(v[1])
cfg = Config.fromfile(v[1])
else:
cfg = v[1]
assert v[0]._cfg_dict == cfg._cfg_dict
@ -67,7 +67,7 @@ class TestLoadConfig:
class TestGetCodebaseConfig:
def test_get_codebase_config_empty(self):
assert util.get_codebase_config(mmcv.Config(dict())) == {}
assert util.get_codebase_config(Config(dict())) == {}
def test_get_codebase_config(self):
codebase_config = util.get_codebase_config(correct_deploy_path)
@ -78,7 +78,7 @@ class TestGetTaskType:
def test_get_task_type_none(self):
with pytest.raises(AssertionError):
util.get_task_type(mmcv.Config(dict()))
util.get_task_type(Config(dict()))
def test_get_task_type(self):
assert util.get_task_type(correct_deploy_path) == Task.SUPER_RESOLUTION
@ -88,7 +88,7 @@ class TestGetCodebase:
def test_get_codebase_none(self):
with pytest.raises(AssertionError):
util.get_codebase(mmcv.Config(dict()))
util.get_codebase(Config(dict()))
def test_get_codebase(self):
assert util.get_codebase(correct_deploy_path) == Codebase.MMEDIT
@ -97,7 +97,7 @@ class TestGetCodebase:
class TestGetBackendConfig:
def test_get_backend_config_empty(self):
assert util.get_backend_config(mmcv.Config(dict())) == {}
assert util.get_backend_config(Config(dict())) == {}
def test_get_backend_config(self):
backend_config = util.get_backend_config(correct_deploy_path)
@ -108,7 +108,7 @@ class TestGetBackend:
def test_get_backend_none(self):
with pytest.raises(AssertionError):
util.get_backend(mmcv.Config(dict()))
util.get_backend(Config(dict()))
def test_get_backend(self):
assert util.get_backend(correct_deploy_path) == Backend.ONNXRUNTIME
@ -117,7 +117,7 @@ class TestGetBackend:
class TestGetOnnxConfig:
def test_get_onnx_config_empty(self):
assert util.get_onnx_config(mmcv.Config(dict())) == {}
assert util.get_onnx_config(Config(dict())) == {}
def test_get_onnx_config(self):
onnx_config = dict(
@ -146,10 +146,10 @@ class TestGetOnnxConfig:
class TestIsDynamic:
config_with_onnx_config = mmcv.Config(
config_with_onnx_config = Config(
dict(onnx_config=dict(), backend_config=dict(type='default')))
config_with_dynamic_axes = mmcv.Config(
config_with_dynamic_axes = Config(
dict(
onnx_config=dict(
type='onnx',
@ -160,7 +160,7 @@ class TestIsDynamic:
}}),
backend_config=dict(type='default')))
config_with_dynamic_axes_and_input_names = mmcv.Config(
config_with_dynamic_axes_and_input_names = Config(
dict(
onnx_config=dict(
type='onnx',
@ -172,7 +172,7 @@ class TestIsDynamic:
}}),
backend_config=dict(type='default')))
config_with_dynamic_axes_list = mmcv.Config(
config_with_dynamic_axes_list = Config(
dict(
onnx_config=dict(
type='onnx', input_names=['image'], dynamic_axes=[[0, 2, 3]]),
@ -223,11 +223,11 @@ class TestIsDynamic:
class TestGetInputShape:
config_without_input_shape = mmcv.Config(
config_without_input_shape = Config(
dict(onnx_config=dict(input_shape=None)))
config_with_input_shape = mmcv.Config(
config_with_input_shape = Config(
dict(onnx_config=dict(input_shape=[1, 1])))
config_with_error_shape = mmcv.Config(
config_with_error_shape = Config(
dict(onnx_config=dict(input_shape=[1, 1, 1])))
def test_get_input_shape_none(self):
@ -245,11 +245,10 @@ class TestGetInputShape:
class TestCfgApplyMark:
config_with_mask = mmcv.Config(
dict(partition_config=dict(apply_marks=True)))
config_with_mask = Config(dict(partition_config=dict(apply_marks=True)))
def test_cfg_apply_marks_none(self):
assert util.cfg_apply_marks(mmcv.Config(dict())) is None
assert util.cfg_apply_marks(Config(dict())) is None
def test_cfg_apply_marks(self):
assert util.cfg_apply_marks(TestCfgApplyMark.config_with_mask) is True
@ -257,13 +256,12 @@ class TestCfgApplyMark:
class TestGetPartitionConfig:
config_with_mask = mmcv.Config(
dict(partition_config=dict(apply_marks=True)))
config_without_mask = mmcv.Config(
config_with_mask = Config(dict(partition_config=dict(apply_marks=True)))
config_without_mask = Config(
dict(partition_config=dict(apply_marks=False)))
def test_get_partition_config_none(self):
assert util.get_partition_config(mmcv.Config(dict())) is None
assert util.get_partition_config(Config(dict())) is None
def test_get_partition_config_without_mask(self):
assert util.get_partition_config(
@ -275,10 +273,10 @@ class TestGetPartitionConfig:
class TestGetCalib:
config_with_calib = mmcv.Config(
config_with_calib = Config(
dict(calib_config=dict(create_calib=True, calib_file='calib_data.h5')))
config_without_calib = mmcv.Config(
config_without_calib = Config(
dict(
calib_config=dict(create_calib=False, calib_file='calib_data.h5')))
@ -287,7 +285,7 @@ class TestGetCalib:
create_calib=True, calib_file='calib_data.h5')
def test_get_calib_filename_none(self):
assert util.get_calib_filename(mmcv.Config(dict())) is None
assert util.get_calib_filename(Config(dict())) is None
def test_get_calib_filename_false(self):
assert util.get_calib_filename(
@ -299,7 +297,7 @@ class TestGetCalib:
class TestGetCommonConfig:
config_with_common_config = mmcv.Config(
config_with_common_config = Config(
dict(
backend_config=dict(
type='tensorrt', common_config=dict(fp16_mode=False))))
@ -312,7 +310,7 @@ class TestGetCommonConfig:
class TestGetModelInputs:
config_with_model_inputs = mmcv.Config(
config_with_model_inputs = Config(
dict(backend_config=dict(model_inputs=[dict(input_shapes=None)])))
def test_model_inputs(self):
@ -327,7 +325,7 @@ class TestGetDynamicAxes:
input_name = get_random_name()
def test_with_empty_cfg(self):
deploy_cfg = mmcv.Config()
deploy_cfg = Config()
with pytest.raises(KeyError):
util.get_dynamic_axes(deploy_cfg)
@ -339,14 +337,14 @@ class TestGetDynamicAxes:
3: 'width'
}
}
deploy_cfg = mmcv.Config(
deploy_cfg = Config(
dict(onnx_config=dict(dynamic_axes=expected_dynamic_axes)))
dynamic_axes = util.get_dynamic_axes(deploy_cfg)
assert expected_dynamic_axes == dynamic_axes
def test_can_not_get_axes_from_list_without_names(self):
axes = [[0, 2, 3]]
deploy_cfg = mmcv.Config(dict(onnx_config=dict(dynamic_axes=axes)))
deploy_cfg = Config(dict(onnx_config=dict(dynamic_axes=axes)))
with pytest.raises(KeyError):
util.get_dynamic_axes(deploy_cfg)
@ -354,7 +352,7 @@ class TestGetDynamicAxes:
axes = [[0, 2, 3]]
expected_dynamic_axes = {self.input_name: axes[0]}
axes_names = [self.input_name]
deploy_cfg = mmcv.Config(dict(onnx_config=dict(dynamic_axes=axes)))
deploy_cfg = Config(dict(onnx_config=dict(dynamic_axes=axes)))
dynamic_axes = util.get_dynamic_axes(deploy_cfg, axes_names)
assert expected_dynamic_axes == dynamic_axes
@ -365,7 +363,7 @@ class TestGetDynamicAxes:
self.input_name: axes[0],
output_name: axes[1]
}
deploy_cfg = mmcv.Config(
deploy_cfg = Config(
dict(
onnx_config=dict(
input_names=[self.input_name],

View File

@ -1,7 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import collect_env as collect_base_env
from mmcv.utils import get_git_hash
from mmengine.utils import get_git_hash
import mmdeploy
from mmdeploy.utils import (get_backend_version, get_codebase_version,

View File

@ -132,8 +132,7 @@ def main():
warmup=args.warmup,
log_interval=args.log_interval,
with_sync=with_sync,
file=args.log2file,
logger=runner.logger):
file=args.log2file):
runner.test()
else:
runner.test()