mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
* Add support for inpainting models * Add configs * Add comment * Refactor * Add test code for inpainting task * Fix * Fix * Update * Fix * Fix * Update docs * Update * Fix visualization * Handle case without Resize
49 lines
1.7 KiB
Python
49 lines
1.7 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
import mmcv
|
|
import numpy as np
|
|
import pytest
|
|
import torch
|
|
|
|
from mmdeploy.utils import Backend, load_config
|
|
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
|
|
|
|
|
|
@backend_checker(Backend.ONNXRUNTIME)
|
|
class TestEnd2EndModel:
|
|
|
|
@pytest.fixture(scope='class')
|
|
def end2end_model(self):
|
|
# force add backend wrapper regardless of plugins
|
|
# make sure ONNXRuntimeEditor can use ORTWrapper inside itself
|
|
from mmdeploy.backend.onnxruntime import ORTWrapper
|
|
from mmdeploy.codebase.mmedit.deploy.inpainting_model import \
|
|
End2EndModel
|
|
|
|
# simplify backend inference
|
|
with SwitchBackendWrapper(ORTWrapper) as wrapper:
|
|
wrapper.set(outputs=dict(fake_img=torch.rand(3, 32, 32)))
|
|
deploy_cfg = mmcv.Config(
|
|
dict(
|
|
onnx_config=dict(
|
|
input_names=['masked_img', 'mask'],
|
|
output_names=['fake_img'])))
|
|
model_cfg = load_config(
|
|
'tests/test_codebase/test_mmedit/data/inpainting_model.py')[0]
|
|
model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu', model_cfg,
|
|
deploy_cfg)
|
|
yield model
|
|
|
|
def test_forward(self, end2end_model):
|
|
masked_img = np.random.rand(3, 32, 32)
|
|
mask = np.random.randint(0, 2, (1, 32, 32))
|
|
|
|
results = end2end_model.forward(masked_img, mask, test_mode=False)
|
|
assert results is not None
|
|
|
|
results = end2end_model.forward(
|
|
masked_img,
|
|
torch.tensor(mask),
|
|
test_mode=True,
|
|
gt_img=torch.tensor(results[0]))
|
|
assert results is not None
|