[Refactor] refactor is_available, check_env (#1540)

* refactor is available

* remove try catch in apis

* fix trt check env

* fix ops_info

* update default value

* remove backend list

* optimial pycuda

* update requirement, check env for rknn
pull/1574/head
q.yao 2022-12-23 12:06:32 +08:00 committed by GitHub
parent 5285caf30a
commit d113a5f1c7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
50 changed files with 723 additions and 393 deletions

View File

@ -87,4 +87,4 @@ jobs:
python3 tools/scripts/build_ubuntu_x64_ncnn.py python3 tools/scripts/build_ubuntu_x64_ncnn.py
python3 -m pip install torch==1.8.2 torchvision==0.9.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cpu python3 -m pip install torch==1.8.2 torchvision==0.9.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cpu
python3 -m pip install mmcv-full==1.5.1 -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.8.0/index.html python3 -m pip install mmcv-full==1.5.1 -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.8.0/index.html
python3 -c 'import mmdeploy.apis.ncnn as ncnn_api; assert ncnn_api.is_available() and ncnn_api.is_custom_ops_available()' python3 -c 'import mmdeploy.apis.ncnn as ncnn_api; assert ncnn_api.is_available(with_custom_ops=True)'

View File

@ -36,7 +36,7 @@ jobs:
python3 tools/scripts/build_ubuntu_x64_ort.py python3 tools/scripts/build_ubuntu_x64_ort.py
python3 -m pip install torch==1.8.2 torchvision==0.9.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cpu python3 -m pip install torch==1.8.2 torchvision==0.9.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cpu
python3 -m pip install mmcv-full==1.5.1 -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.8.0/index.html python3 -m pip install mmcv-full==1.5.1 -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.8.0/index.html
python3 -c 'import mmdeploy.apis.onnxruntime as ort_api; assert ort_api.is_available() and ort_api.is_custom_ops_available()' python3 -c 'import mmdeploy.apis.onnxruntime as ort_api; assert ort_api.is_available(with_custom_ops=True)'
- name: test mmcls full pipeline - name: test mmcls full pipeline
run: | run: |
pip install openmim pip install openmim

View File

@ -31,3 +31,6 @@ mmdeploy_export(${PROJECT_NAME}_obj)
mmdeploy_add_module(${PROJECT_NAME} MODULE EXCLUDE "") mmdeploy_add_module(${PROJECT_NAME} MODULE EXCLUDE "")
target_link_libraries(${PROJECT_NAME} PUBLIC ${PROJECT_NAME}_obj) target_link_libraries(${PROJECT_NAME} PUBLIC ${PROJECT_NAME}_obj)
add_library(mmdeploy::torchscript_ops ALIAS ${PROJECT_NAME}) add_library(mmdeploy::torchscript_ops ALIAS ${PROJECT_NAME})
set(_TORCHJIT_OPS_DIR ${CMAKE_SOURCE_DIR}/mmdeploy/lib)
install(TARGETS ${PROJECT_NAME} DESTINATION ${_TORCHJIT_OPS_DIR})

View File

@ -1,7 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
# mmcv dependency
try:
from .calibration import create_calib_input_data from .calibration import create_calib_input_data
from .extract_model import extract_model from .extract_model import extract_model
from .inference import inference_model from .inference import inference_model
@ -15,5 +12,3 @@ try:
'torch2onnx', 'torch2torchscript', 'build_task_processor', 'torch2onnx', 'torch2torchscript', 'build_task_processor',
'get_predefined_partition_cfg', 'visualize_model' 'get_predefined_partition_cfg', 'visualize_model'
] ]
except Exception:
pass

View File

@ -2,14 +2,8 @@
from typing import Optional, Union from typing import Optional, Union
import mmcv import mmcv
import torch
from mmcv.parallel import MMDataParallel
from mmdeploy.core import patch_model
from mmdeploy.utils import (IR, cfg_apply_marks, get_backend, get_ir_config,
load_config)
from .core import PIPELINE_MANAGER, no_mp from .core import PIPELINE_MANAGER, no_mp
from .utils import create_calib_input_data as create_calib_input_data_impl
@PIPELINE_MANAGER.register_pipeline() @PIPELINE_MANAGER.register_pipeline()
@ -36,6 +30,13 @@ def create_calib_input_data(calib_file: str,
dataset_type (str, optional): The dataset type. Defaults to 'val'. dataset_type (str, optional): The dataset type. Defaults to 'val'.
device (str, optional): Device to create dataset. Defaults to 'cpu'. device (str, optional): Device to create dataset. Defaults to 'cpu'.
""" """
import torch
from mmcv.parallel import MMDataParallel
from mmdeploy.core import patch_model
from mmdeploy.utils import (IR, cfg_apply_marks, get_backend,
get_ir_config, load_config)
from .utils import create_calib_input_data as create_calib_input_data_impl
with no_mp(): with no_mp():
if dataset_cfg is None: if dataset_cfg is None:
dataset_cfg = model_cfg dataset_cfg = model_cfg

View File

@ -5,7 +5,6 @@ from typing import Dict, Iterable, Optional, Union
import onnx import onnx
from .core import PIPELINE_MANAGER from .core import PIPELINE_MANAGER
from .onnx import extract_partition
@PIPELINE_MANAGER.register_pipeline() @PIPELINE_MANAGER.register_pipeline()
@ -62,6 +61,7 @@ def extract_model(model: Union[str, onnx.ModelProto],
Returns: Returns:
onnx.ModelProto: The extracted model. onnx.ModelProto: The extracted model.
""" """
from .onnx import extract_partition
return extract_partition(model, start_marker, end_marker, start_name_map, return extract_partition(model, start_marker, end_marker, start_name_map,
end_name_map, dynamic_axes, save_file) end_name_map, dynamic_axes, save_file)

View File

@ -3,9 +3,6 @@ from typing import Any, Sequence, Union
import mmcv import mmcv
import numpy as np import numpy as np
import torch
from mmdeploy.utils import get_input_shape, load_config
def inference_model(model_cfg: Union[str, mmcv.Config], def inference_model(model_cfg: Union[str, mmcv.Config],
@ -37,6 +34,10 @@ def inference_model(model_cfg: Union[str, mmcv.Config],
Returns: Returns:
Any: The inference results Any: The inference results
""" """
import torch
from mmdeploy.utils import get_input_shape, load_config
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
from mmdeploy.apis.utils import build_task_processor from mmdeploy.apis.utils import build_task_processor

View File

@ -1,11 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
from mmdeploy.backend.ncnn import from_onnx as _from_onnx from mmdeploy.backend.ncnn import from_onnx as _from_onnx
from mmdeploy.backend.ncnn import is_available, is_custom_ops_available from mmdeploy.backend.ncnn import is_available
from ..core import PIPELINE_MANAGER from ..core import PIPELINE_MANAGER
from_onnx = PIPELINE_MANAGER.register_pipeline()(_from_onnx) from_onnx = PIPELINE_MANAGER.register_pipeline()(_from_onnx)
__all__ = ['is_available', 'is_custom_ops_available', 'from_onnx'] __all__ = ['is_available', 'from_onnx']
if is_available(): if is_available():
try: try:

View File

@ -1,4 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
from mmdeploy.backend.onnxruntime import is_available, is_custom_ops_available from mmdeploy.backend.onnxruntime import is_available
__all__ = ['is_available', 'is_custom_ops_available'] __all__ = ['is_available']

View File

@ -3,13 +3,8 @@ import os.path as osp
from typing import Any, Optional, Union from typing import Any, Optional, Union
import mmcv import mmcv
import torch
from mmdeploy.apis.core.pipeline_manager import no_mp
from mmdeploy.utils import (Backend, get_backend, get_dynamic_axes,
get_input_shape, get_onnx_config, load_config)
from .core import PIPELINE_MANAGER from .core import PIPELINE_MANAGER
from .onnx import export
@PIPELINE_MANAGER.register_pipeline() @PIPELINE_MANAGER.register_pipeline()
@ -49,6 +44,13 @@ def torch2onnx(img: Any,
defaults to `None`. defaults to `None`.
device (str): A string specifying device type, defaults to 'cuda:0'. device (str): A string specifying device type, defaults to 'cuda:0'.
""" """
import torch
from mmdeploy.apis.core.pipeline_manager import no_mp
from mmdeploy.utils import (Backend, get_backend, get_dynamic_axes,
get_input_shape, get_onnx_config, load_config)
from .onnx import export
# load deploy_cfg if necessary # load deploy_cfg if necessary
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
mmcv.mkdir_or_exist(osp.abspath(work_dir)) mmcv.mkdir_or_exist(osp.abspath(work_dir))

View File

@ -3,11 +3,8 @@ import os.path as osp
from typing import Any, Optional, Union from typing import Any, Optional, Union
import mmcv import mmcv
import torch
from mmdeploy.apis.core.pipeline_manager import PIPELINE_MANAGER, no_mp from mmdeploy.apis.core.pipeline_manager import PIPELINE_MANAGER, no_mp
from mmdeploy.utils import get_backend, get_input_shape, load_config
from .torch_jit import trace
@PIPELINE_MANAGER.register_pipeline() @PIPELINE_MANAGER.register_pipeline()
@ -32,6 +29,11 @@ def torch2torchscript(img: Any,
defaults to `None`. defaults to `None`.
device (str): A string specifying device type, defaults to 'cuda:0'. device (str): A string specifying device type, defaults to 'cuda:0'.
""" """
import torch
from mmdeploy.utils import get_backend, get_input_shape, load_config
from .torch_jit import trace
# load deploy_cfg if necessary # load deploy_cfg if necessary
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
mmcv.mkdir_or_exist(osp.abspath(work_dir)) mmcv.mkdir_or_exist(osp.abspath(work_dir))

View File

@ -1,8 +1,8 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
from mmdeploy.backend.tensorrt import is_available, is_custom_ops_available from mmdeploy.backend.tensorrt import is_available
from ..core import PIPELINE_MANAGER from ..core import PIPELINE_MANAGER
__all__ = ['is_available', 'is_custom_ops_available'] __all__ = ['is_available']
if is_available(): if is_available():
from mmdeploy.backend.tensorrt import from_onnx as _from_onnx from mmdeploy.backend.tensorrt import from_onnx as _from_onnx

View File

@ -2,12 +2,9 @@
from copy import deepcopy from copy import deepcopy
from typing import Callable, Dict, Optional from typing import Callable, Dict, Optional
import h5py
import torch import torch
import tqdm
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
from mmdeploy.core import RewriterContext, reset_mark_function_count
from ..core import PIPELINE_MANAGER from ..core import PIPELINE_MANAGER
@ -46,6 +43,10 @@ def create_calib_input_data(calib_file: str,
'val', defaults to 'val'. 'val', defaults to 'val'.
device (str): Specifying the device to run on, defaults to 'cpu'. device (str): Specifying the device to run on, defaults to 'cpu'.
""" """
import h5py
import tqdm
from mmdeploy.core import RewriterContext, reset_mark_function_count
backend = 'default' backend = 'default'

View File

@ -5,13 +5,12 @@ import mmcv
import numpy as np import numpy as np
import torch import torch
from mmdeploy.codebase import BaseTask
from mmdeploy.utils import Backend, get_backend, get_input_shape, load_config from mmdeploy.utils import Backend, get_backend, get_input_shape, load_config
def visualize_model(model_cfg: Union[str, mmcv.Config], def visualize_model(model_cfg: Union[str, mmcv.Config],
deploy_cfg: Union[str, mmcv.Config], deploy_cfg: Union[str, mmcv.Config],
model: Union[str, Sequence[str], BaseTask], model: Union[str, Sequence[str]],
img: Union[str, np.ndarray], img: Union[str, np.ndarray],
device: str, device: str,
backend: Optional[Backend] = None, backend: Optional[Backend] = None,

View File

@ -1,18 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import importlib
from .backend_manager import AscendManager from .backend_manager import AscendManager
from .utils import update_sdk_pipeline from .utils import update_sdk_pipeline
_BackendManager = AscendManager
def is_available(): is_available = _BackendManager.is_available
"""Check whether acl is installed. build_wrapper = _BackendManager.build_wrapper
Returns:
bool: True if acl package is installed.
"""
return importlib.util.find_spec('acl') is not None
__all__ = ['update_sdk_pipeline', 'AscendManager'] __all__ = ['update_sdk_pipeline', 'AscendManager']

View File

@ -32,6 +32,31 @@ class AscendManager(BaseBackendManager):
from .wrapper import AscendWrapper from .wrapper import AscendWrapper
return AscendWrapper(model=backend_files[0], device=device) return AscendWrapper(model=backend_files[0], device=device)
@classmethod
def is_available(cls, with_custom_ops: bool = False) -> bool:
"""Check whether backend is installed.
Args:
with_custom_ops (bool): check custom ops exists.
Returns:
bool: True if backend package is installed.
"""
import importlib
return importlib.util.find_spec('acl') is not None
@classmethod
def get_version(cls) -> str:
"""Get the version of the backend."""
if not cls.is_available():
return 'None'
else:
import pkg_resources
try:
return pkg_resources.get_distribution('acl').version
except Exception:
return 'None'
@classmethod @classmethod
def to_backend(cls, def to_backend(cls,
ir_files: Sequence[str], ir_files: Sequence[str],
@ -51,7 +76,7 @@ class AscendManager(BaseBackendManager):
device (str, optional): The device type. Defaults to 'cpu'. device (str, optional): The device type. Defaults to 'cpu'.
Returns: Returns:
Seqeuence[str]: Backend files. Sequence[str]: Backend files.
""" """
from mmdeploy.utils import get_model_inputs from mmdeploy.utils import get_model_inputs
from .onnx2ascend import from_onnx from .onnx2ascend import from_onnx

View File

@ -2,7 +2,7 @@
import importlib import importlib
import logging import logging
from abc import ABCMeta from abc import ABCMeta
from typing import Any, Optional, Sequence from typing import Any, Callable, Optional, Sequence
class BaseBackendManager(metaclass=ABCMeta): class BaseBackendManager(metaclass=ABCMeta):
@ -29,7 +29,50 @@ class BaseBackendManager(metaclass=ABCMeta):
to None. to None.
""" """
raise NotImplementedError( raise NotImplementedError(
f'build_wrapper has not been implemented for `{cls.__name__}`') f'build_wrapper has not been implemented for "{cls.__name__}"')
@classmethod
def is_available(cls, with_custom_ops: bool = False) -> bool:
"""Check whether backend is installed.
Args:
with_custom_ops (bool): check custom ops exists.
Returns:
bool: True if backend package is installed.
"""
raise NotImplementedError(
f'is_available has not been implemented for "{cls.__name__}"')
@classmethod
def get_version(cls) -> str:
"""Get the version of the backend."""
raise NotImplementedError(
f'get_version has not been implemented for "{cls.__name__}"')
@classmethod
def check_env(cls, log_callback: Callable = lambda _: _) -> str:
"""Check current environment.
Returns:
str: Info about the environment.
"""
try:
available = cls.is_available()
if available:
try:
backend_version = cls.get_version()
except NotImplementedError:
backend_version = 'Unknown'
else:
backend_version = 'None'
info = f'{cls.backend_name}:\t{backend_version}'
except Exception:
info = f'{cls.backend_name}:\tCheckFailed'
log_callback(info)
return info
@classmethod @classmethod
def to_backend(cls, def to_backend(cls,
@ -92,6 +135,8 @@ class BackendManagerRegistry:
self._module_dict[name] = cls self._module_dict[name] = cls
cls.backend_name = name
return cls return cls
return wrap_manager return wrap_manager

View File

@ -1,18 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import importlib
from .backend_manager import CoreMLManager from .backend_manager import CoreMLManager
_BackendManager = CoreMLManager
def is_available(): is_available = _BackendManager.is_available
"""Check whether coremltools is installed. build_wrapper = _BackendManager.build_wrapper
Returns:
bool: True if coremltools package is installed.
"""
return importlib.util.find_spec('coremltools') is not None
__all__ = ['CoreMLManager'] __all__ = ['CoreMLManager']

View File

@ -32,6 +32,31 @@ class CoreMLManager(BaseBackendManager):
from .wrapper import CoreMLWrapper from .wrapper import CoreMLWrapper
return CoreMLWrapper(model_file=backend_files[0]) return CoreMLWrapper(model_file=backend_files[0])
@classmethod
def is_available(cls, with_custom_ops: bool = False) -> bool:
"""Check whether backend is installed.
Args:
with_custom_ops (bool): check custom ops exists.
Returns:
bool: True if backend package is installed.
"""
import importlib
return importlib.util.find_spec('coreml') is not None
@classmethod
def get_version(cls) -> str:
"""Get the version of the backend."""
if not cls.is_available():
return 'None'
else:
import pkg_resources
try:
return pkg_resources.get_distribution('coreml').version
except Exception:
return 'None'
@classmethod @classmethod
def to_backend(cls, def to_backend(cls,
ir_files: Sequence[str], ir_files: Sequence[str],

View File

@ -1,37 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
from .backend_manager import NCNNManager from .backend_manager import NCNNManager
from .init_plugins import get_onnx2ncnn_path, get_ops_path
from .onnx2ncnn import from_onnx from .onnx2ncnn import from_onnx
_BackendManager = NCNNManager
def is_available(): is_available = _BackendManager.is_available
"""Check whether ncnn and onnx2ncnn tool are installed. build_wrapper = _BackendManager.build_wrapper
Returns:
bool: True if ncnn and onnx2ncnn tool are installed.
"""
has_pyncnn = importlib.util.find_spec('ncnn') is not None
onnx2ncnn = get_onnx2ncnn_path()
return has_pyncnn and osp.exists(onnx2ncnn)
def is_custom_ops_available():
"""Check whether ncnn extension and custom ops are installed.
Returns:
bool: True if ncnn extension and custom ops are compiled.
"""
has_pyncnn_ext = importlib.util.find_spec(
'mmdeploy.backend.ncnn.ncnn_ext') is not None
ncnn_ops_path = get_ops_path()
return has_pyncnn_ext and osp.exists(ncnn_ops_path)
__all__ = ['NCNNManager', 'from_onnx'] __all__ = ['NCNNManager', 'from_onnx']

View File

@ -2,7 +2,7 @@
import logging import logging
import os.path as osp import os.path as osp
import sys import sys
from typing import Any, Optional, Sequence from typing import Any, Callable, Optional, Sequence
from mmdeploy.utils import get_backend_config, get_root_logger from mmdeploy.utils import get_backend_config, get_root_logger
from ..base import BACKEND_MANAGERS, BaseBackendManager from ..base import BACKEND_MANAGERS, BaseBackendManager
@ -46,6 +46,63 @@ class NCNNManager(BaseBackendManager):
output_names=output_names, output_names=output_names,
use_vulkan=use_vulkan) use_vulkan=use_vulkan)
@classmethod
def is_available(cls, with_custom_ops: bool = False) -> bool:
"""Check whether backend is installed.
Args:
with_custom_ops (bool): check custom ops exists.
Returns:
bool: True if backend package is installed.
"""
import importlib
from .init_plugins import get_onnx2ncnn_path, get_ops_path
has_pyncnn = importlib.util.find_spec('ncnn') is not None
onnx2ncnn = get_onnx2ncnn_path()
ret = has_pyncnn and (onnx2ncnn is not None)
if ret and with_custom_ops:
has_pyncnn_ext = importlib.util.find_spec(
'mmdeploy.backend.ncnn.ncnn_ext') is not None
op_path = get_ops_path()
custom_ops_exist = osp.exists(op_path)
ret = ret and has_pyncnn_ext and custom_ops_exist
return ret
@classmethod
def get_version(cls) -> str:
"""Get the version of the backend."""
if not cls.is_available():
return 'None'
else:
import pkg_resources
try:
return pkg_resources.get_distribution('ncnn').version
except Exception:
return 'None'
@classmethod
def check_env(cls, log_callback: Callable = lambda _: _) -> str:
"""Check current environment.
Returns:
str: Info about the environment.
"""
info = super().check_env(log_callback=log_callback)
available = cls.is_available()
ops_available = cls.is_available(with_custom_ops=True)
ops_available = 'Available' if ops_available else 'NotAvailable'
if available:
ops_info = f'ncnn custom ops:\t{ops_available}'
log_callback(ops_info)
info = f'{info}\n{ops_info}'
return info
@classmethod @classmethod
def to_backend(cls, def to_backend(cls,
ir_files: Sequence[str], ir_files: Sequence[str],

View File

@ -1,30 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
from .backend_manager import ONNXRuntimeManager from .backend_manager import ONNXRuntimeManager
from .init_plugins import get_ops_path
_BackendManager = ONNXRuntimeManager
def is_available(): is_available = _BackendManager.is_available
"""Check whether ONNX Runtime package is installed. build_wrapper = _BackendManager.build_wrapper
Returns:
bool: True if ONNX Runtime package is installed.
"""
return importlib.util.find_spec('onnxruntime') is not None
def is_custom_ops_available():
"""Check whether ONNX Runtime custom ops are installed.
Returns:
bool: True if ONNX Runtime custom ops are compiled.
"""
onnxruntime_op_path = get_ops_path()
return osp.exists(onnxruntime_op_path)
__all__ = ['ONNXRuntimeManager'] __all__ = ['ONNXRuntimeManager']

View File

@ -1,6 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import logging import logging
from typing import Any, Optional, Sequence import os.path as osp
from typing import Any, Callable, Optional, Sequence
from ..base import BACKEND_MANAGERS, BaseBackendManager from ..base import BACKEND_MANAGERS, BaseBackendManager
@ -35,6 +36,92 @@ class ONNXRuntimeManager(BaseBackendManager):
device=device, device=device,
output_names=output_names) output_names=output_names)
@classmethod
def is_available(cls, with_custom_ops: bool = False) -> bool:
"""Check whether backend is installed.
Args:
with_custom_ops (bool): check custom ops exists.
Returns:
bool: True if backend package is installed.
"""
import importlib
ret = importlib.util.find_spec('onnxruntime') is not None
if ret and with_custom_ops:
from .init_plugins import get_ops_path
ops_path = get_ops_path()
custom_ops_exist = osp.exists(ops_path)
ret = ret and custom_ops_exist
return ret
@classmethod
def get_version(cls) -> str:
"""Get the version of the backend."""
if not cls.is_available():
return 'None'
else:
import pkg_resources
try:
ort_version = pkg_resources.get_distribution(
'onnxruntime').version
except Exception:
ort_version = 'None'
try:
ort_gpu_version = pkg_resources.get_distribution(
'onnxruntime-gpu').version
except Exception:
ort_gpu_version = 'None'
if ort_gpu_version != 'None':
return ort_gpu_version
else:
return ort_version
@classmethod
def check_env(cls, log_callback: Callable = lambda _: _) -> str:
"""Check current environment.
Returns:
str: Info about the environment.
"""
import pkg_resources
try:
if cls.is_available():
ops_available = cls.is_available(with_custom_ops=True)
ops_available = 'Available' \
if ops_available else 'NotAvailable'
try:
ort_version = pkg_resources.get_distribution(
'onnxruntime').version
except Exception:
ort_version = 'None'
try:
ort_gpu_version = pkg_resources.get_distribution(
'onnxruntime-gpu').version
except Exception:
ort_gpu_version = 'None'
ort_info = f'ONNXRuntime:\t{ort_version}'
log_callback(ort_info)
ort_gpu_info = f'ONNXRuntime-gpu:\t{ort_gpu_version}'
log_callback(ort_gpu_info)
ort_ops_info = f'ONNXRuntime custom ops:\t{ops_available}'
log_callback(ort_ops_info)
info = f'{ort_info}\n{ort_gpu_info}\n{ort_ops_info}'
else:
info = 'ONNXRuntime:\tNone'
log_callback(info)
except Exception:
info = f'{cls.backend_name}:\tCheckFailed'
log_callback(info)
return info
@classmethod @classmethod
def to_backend(cls, def to_backend(cls,
ir_files: Sequence[str], ir_files: Sequence[str],

View File

@ -1,17 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import importlib
from .backend_manager import OpenVINOManager from .backend_manager import OpenVINOManager
_BackendManager = OpenVINOManager
def is_available() -> bool: is_available = _BackendManager.is_available
"""Checking if OpenVINO is installed. build_wrapper = _BackendManager.build_wrapper
Returns:
bool: True if OpenVINO is installed.
"""
return importlib.util.find_spec('openvino') is not None
__all__ = ['OpenVINOManager'] __all__ = ['OpenVINOManager']

View File

@ -33,6 +33,33 @@ class OpenVINOManager(BaseBackendManager):
return OpenVINOWrapper( return OpenVINOWrapper(
ir_model_file=backend_files[0], output_names=output_names) ir_model_file=backend_files[0], output_names=output_names)
@classmethod
def is_available(cls, with_custom_ops: bool = False) -> bool:
"""Check whether backend is installed.
Args:
with_custom_ops (bool): check custom ops exists.
Returns:
bool: True if backend package is installed.
"""
import importlib
ret = importlib.util.find_spec('openvino') is not None
return ret
@classmethod
def get_version(cls) -> str:
"""Get the version of the backend."""
if not cls.is_available():
return 'None'
else:
import pkg_resources
try:
return pkg_resources.get_distribution('openvino').version
except Exception:
return 'None'
@classmethod @classmethod
def to_backend(cls, def to_backend(cls,
ir_files: Sequence[str], ir_files: Sequence[str],

View File

@ -1,17 +1,9 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import importlib
from .backend_manager import PPLNNManager from .backend_manager import PPLNNManager
_BackendManager = PPLNNManager
def is_available(): is_available = _BackendManager.is_available
"""Check whether pplnn is installed. build_wrapper = _BackendManager.build_wrapper
Returns:
bool: True if pplnn package is installed.
"""
return importlib.util.find_spec('pyppl') is not None
__all__ = ['PPLNNManager'] __all__ = ['PPLNNManager']

View File

@ -36,6 +36,33 @@ class PPLNNManager(BaseBackendManager):
device=device, device=device,
output_names=output_names) output_names=output_names)
@classmethod
def is_available(cls, with_custom_ops: bool = False) -> bool:
"""Check whether backend is installed.
Args:
with_custom_ops (bool): check custom ops exists.
Returns:
bool: True if backend package is installed.
"""
import importlib
ret = importlib.util.find_spec('pyppl') is not None
return ret
@classmethod
def get_version(cls) -> str:
"""Get the version of the backend."""
if not cls.is_available():
return 'None'
else:
import pkg_resources
try:
return pkg_resources.get_distribution('pyppl').version
except Exception:
return 'None'
@classmethod @classmethod
def to_backend(cls, def to_backend(cls,
ir_files: Sequence[str], ir_files: Sequence[str],

View File

@ -1,18 +1,12 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import importlib
import re import re
import subprocess import subprocess
from .backend_manager import RKNNManager from .backend_manager import RKNNManager
_BackendManager = RKNNManager
def is_available(): is_available = _BackendManager.is_available
"""Check whether rknn is installed. build_wrapper = _BackendManager.build_wrapper
Returns:
bool: True if rknn package is installed.
"""
return importlib.util.find_spec('rknn') is not None
def device_available(): def device_available():

View File

@ -1,7 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import logging import logging
import os.path as osp import os.path as osp
from typing import Any, Optional, Sequence from typing import Any, Callable, Optional, Sequence
from mmdeploy.utils import get_common_config from mmdeploy.utils import get_common_config
from ..base import BACKEND_MANAGERS, BaseBackendManager from ..base import BACKEND_MANAGERS, BaseBackendManager
@ -38,6 +38,87 @@ class RKNNManager(BaseBackendManager):
common_config=common_config, common_config=common_config,
output_names=output_names) output_names=output_names)
@classmethod
def is_available(cls, with_custom_ops: bool = False) -> bool:
"""Check whether backend is installed.
Args:
with_custom_ops (bool): check custom ops exists.
Returns:
bool: True if backend package is installed.
"""
import importlib
try:
ret = importlib.util.find_spec('rknn-toolkit2') is not None
except Exception:
pass
if ret is None:
try:
ret = importlib.util.find_spec('rknn-toolkit') is not None
except Exception:
pass
return ret
@classmethod
def get_version(cls) -> str:
"""Get the version of the backend."""
if not cls.is_available():
return 'None'
else:
import pkg_resources
rknn_version = None
rknn2_version = None
try:
rknn_version = pkg_resources.get_distribution(
'rknn-toolkit').version
except Exception:
pass
try:
rknn2_version = pkg_resources.get_distribution(
'rknn-toolkit2').version
except Exception:
pass
if rknn2_version is not None:
return rknn2_version
elif rknn_version is not None:
return rknn_version
return 'None'
@classmethod
def check_env(cls, log_callback: Callable = lambda _: _) -> str:
"""Check current environment.
Returns:
str: Info about the environment.
"""
import pkg_resources
try:
rknn_version = 'None'
rknn2_version = 'None'
try:
rknn_version = pkg_resources.get_distribution(
'rknn-toolkit').version
except Exception:
pass
try:
rknn2_version = pkg_resources.get_distribution(
'rknn-toolkit2').version
except Exception:
pass
rknn_info = f'rknn-toolkit:\t{rknn_version}'
rknn2_info = f'rknn2-toolkit:\t{rknn2_version}'
log_callback(rknn_info)
log_callback(rknn2_info)
info = '\n'.join([rknn_info, rknn2_info])
except Exception:
info = f'{cls.backend_name}:\tCheckFailed'
log_callback(info)
return info
@classmethod @classmethod
def to_backend(cls, def to_backend(cls,
ir_files: Sequence[str], ir_files: Sequence[str],

View File

@ -1,33 +1,9 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os
import sys
from mmdeploy.utils import get_file_path
from .backend_manager import SDKManager from .backend_manager import SDKManager
_is_available = False _BackendManager = SDKManager
is_available = _BackendManager.is_available
module_name = 'mmdeploy_python' build_wrapper = _BackendManager.build_wrapper
candidates = [
f'../../../build/lib/{module_name}.*.so',
f'../../../build/bin/*/{module_name}.*.pyd'
]
lib_path = get_file_path(os.path.dirname(__file__), candidates)
if lib_path:
lib_dir = os.path.dirname(lib_path)
sys.path.append(lib_dir)
if importlib.util.find_spec(module_name) is not None:
_is_available = True
def is_available() -> bool:
return _is_available
__all__ = ['SDKManager'] __all__ = ['SDKManager']

View File

@ -1,10 +1,30 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
import sys
from typing import Any, Optional, Sequence from typing import Any, Optional, Sequence
from mmdeploy.utils import SDK_TASK_MAP, get_task_type from mmdeploy.utils import get_file_path
from ..base import BACKEND_MANAGERS, BaseBackendManager from ..base import BACKEND_MANAGERS, BaseBackendManager
_is_available = False
module_name = 'mmdeploy_python'
candidates = [
f'../../../build/lib/{module_name}.*.so',
f'../../../build/bin/*/{module_name}.*.pyd'
]
lib_path = get_file_path(osp.dirname(__file__), candidates)
if lib_path:
lib_dir = osp.dirname(lib_path)
sys.path.append(lib_dir)
if importlib.util.find_spec(module_name) is not None:
_is_available = True
@BACKEND_MANAGERS.register('sdk') @BACKEND_MANAGERS.register('sdk')
class SDKManager(BaseBackendManager): class SDKManager(BaseBackendManager):
@ -32,6 +52,33 @@ class SDKManager(BaseBackendManager):
assert deploy_cfg is not None, \ assert deploy_cfg is not None, \
'Building SDKWrapper requires deploy_cfg' 'Building SDKWrapper requires deploy_cfg'
from mmdeploy.backend.sdk import SDKWrapper from mmdeploy.backend.sdk import SDKWrapper
from mmdeploy.utils import SDK_TASK_MAP, get_task_type
task_name = SDK_TASK_MAP[get_task_type(deploy_cfg)]['cls_name'] task_name = SDK_TASK_MAP[get_task_type(deploy_cfg)]['cls_name']
return SDKWrapper( return SDKWrapper(
model_file=backend_files[0], task_name=task_name, device=device) model_file=backend_files[0], task_name=task_name, device=device)
@classmethod
def is_available(cls, with_custom_ops: bool = False) -> bool:
"""Check whether backend is installed.
Args:
with_custom_ops (bool): check custom ops exists.
Returns:
bool: True if backend package is installed.
"""
global _is_available
return _is_available
@classmethod
def get_version(cls) -> str:
"""Get the version of the backend."""
if not cls.is_available():
return 'None'
else:
import pkg_resources
try:
return pkg_resources.get_distribution('mmdeploy').version
except Exception:
return 'None'

View File

@ -1,23 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from .backend_manager import SNPEManager from .backend_manager import SNPEManager
from .init_plugins import get_onnx2dlc_path
from .onnx2dlc import from_onnx from .onnx2dlc import from_onnx
_BackendManager = SNPEManager
def is_available(): is_available = _BackendManager.is_available
"""Check whether ncnn and snpe-onnx-to-dlc tool are installed. build_wrapper = _BackendManager.build_wrapper
Returns:
bool: True if snpe-onnx-to-dlc tool are installed.
"""
onnx2dlc = get_onnx2dlc_path()
if onnx2dlc is None:
return False
return osp.exists(onnx2dlc)
__all__ = ['from_onnx', 'SNPEManager'] __all__ = ['from_onnx', 'SNPEManager']

View File

@ -38,6 +38,22 @@ class SNPEManager(BaseBackendManager):
return SNPEWrapper( return SNPEWrapper(
dlc_file=backend_files[0], uri=uri, output_names=output_names) dlc_file=backend_files[0], uri=uri, output_names=output_names)
@classmethod
def is_available(cls, with_custom_ops: bool = False) -> bool:
"""Check whether backend is installed.
Args:
with_custom_ops (bool): check custom ops exists.
Returns:
bool: True if backend package is installed.
"""
from .onnx2dlc import get_onnx2dlc_path
onnx2dlc = get_onnx2dlc_path()
if onnx2dlc is None:
return False
return osp.exists(onnx2dlc)
@classmethod @classmethod
def to_backend(cls, def to_backend(cls,
ir_files: Sequence[str], ir_files: Sequence[str],

View File

@ -1,31 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa # flake8: noqa
import importlib
import os.path as osp
from .backend_manager import TensorRTManager from .backend_manager import TensorRTManager
from .init_plugins import get_ops_path, load_tensorrt_plugin from .init_plugins import load_tensorrt_plugin
def is_available():
"""Check whether TensorRT package is installed and cuda is available.
Returns:
bool: True if TensorRT package is installed and cuda is available.
"""
return importlib.util.find_spec('tensorrt') is not None
def is_custom_ops_available():
"""Check whether TensorRT custom ops are installed.
Returns:
bool: True if TensorRT custom ops are compiled.
"""
tensorrt_op_path = get_ops_path()
return osp.exists(tensorrt_op_path)
_BackendManager = TensorRTManager
is_available = _BackendManager.is_available
build_wrapper = _BackendManager.build_wrapper
__all__ = ['TensorRTManager'] __all__ = ['TensorRTManager']

View File

@ -1,7 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import logging import logging
from typing import Any, Optional, Sequence import os.path as osp
from typing import Any, Callable, Optional, Sequence
from ..base import BACKEND_MANAGERS, BaseBackendManager from ..base import BACKEND_MANAGERS, BaseBackendManager
@ -33,6 +33,57 @@ class TensorRTManager(BaseBackendManager):
from .wrapper import TRTWrapper from .wrapper import TRTWrapper
return TRTWrapper(engine=backend_files[0], output_names=output_names) return TRTWrapper(engine=backend_files[0], output_names=output_names)
@classmethod
def is_available(cls, with_custom_ops: bool = False) -> bool:
"""Check whether backend is installed.
Args:
with_custom_ops (bool): check custom ops exists.
Returns:
bool: True if backend package is installed.
"""
import importlib
ret = importlib.util.find_spec('tensorrt') is not None
if ret and with_custom_ops:
from .init_plugins import get_ops_path
ops_path = get_ops_path()
custom_ops_exist = osp.exists(ops_path)
ret = ret and custom_ops_exist
return ret
@classmethod
def get_version(cls) -> str:
"""Get the version of the backend."""
if not cls.is_available():
return 'None'
else:
import pkg_resources
try:
return pkg_resources.get_distribution('tensorrt').version
except Exception:
return 'None'
@classmethod
def check_env(cls, log_callback: Callable = lambda _: _) -> str:
"""Check current environment.
Returns:
str: Info about the environment.
"""
info = super().check_env(log_callback=log_callback)
available = cls.is_available()
ops_available = cls.is_available(with_custom_ops=True)
ops_available = 'Available' if ops_available else 'NotAvailable'
if available:
ops_info = f'tensorrt custom ops:\t{ops_available}'
log_callback(ops_info)
info = f'{info}\n{ops_info}'
return info
@classmethod @classmethod
def to_backend(cls, def to_backend(cls,
ir_files: Sequence[str], ir_files: Sequence[str],

View File

@ -1,7 +1,6 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Sequence, Union from typing import Any, Dict, Sequence, Union
import h5py
import numpy as np import numpy as np
import pycuda.autoinit # noqa:F401 import pycuda.autoinit # noqa:F401
import pycuda.driver as cuda import pycuda.driver as cuda
@ -25,13 +24,14 @@ class HDF5Calibrator(trt.IInt8Calibrator):
def __init__( def __init__(
self, self,
calib_file: Union[str, h5py.File], calib_file: Union[str, Any],
input_shapes: Dict[str, Sequence[int]], input_shapes: Dict[str, Sequence[int]],
model_type: str = 'end2end', model_type: str = 'end2end',
device_id: int = 0, device_id: int = 0,
algorithm: trt.CalibrationAlgoType = DEFAULT_CALIBRATION_ALGORITHM, algorithm: trt.CalibrationAlgoType = DEFAULT_CALIBRATION_ALGORITHM,
**kwargs): **kwargs):
super().__init__() super().__init__()
import h5py
if isinstance(calib_file, str): if isinstance(calib_file, str):
calib_file = h5py.File(calib_file, mode='r') calib_file = h5py.File(calib_file, mode='r')

View File

@ -139,6 +139,7 @@ def from_onnx(onnx_model: Union[str, onnx.ModelProto],
>>> }) >>> })
""" """
if device_id != 0:
import os import os
old_cuda_device = os.environ.get('CUDA_DEVICE', None) old_cuda_device = os.environ.get('CUDA_DEVICE', None)
os.environ['CUDA_DEVICE'] = str(device_id) os.environ['CUDA_DEVICE'] = str(device_id)

View File

@ -3,15 +3,9 @@
from .backend_manager import TorchScriptManager from .backend_manager import TorchScriptManager
from .init_plugins import get_ops_path, ops_available from .init_plugins import get_ops_path, ops_available
_BackendManager = TorchScriptManager
def is_available(): is_available = _BackendManager.is_available
"""Torchscript available. build_wrapper = _BackendManager.build_wrapper
Returns:
bool: Always True.
"""
return True
__all__ = ['get_ops_path', 'ops_available', 'TorchScriptManager'] __all__ = ['get_ops_path', 'ops_available', 'TorchScriptManager']

View File

@ -1,7 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import logging import logging
from typing import Any, Optional, Sequence from typing import Any, Callable, Optional, Sequence
from ..base import BACKEND_MANAGERS, BaseBackendManager from ..base import BACKEND_MANAGERS, BaseBackendManager
@ -35,6 +35,56 @@ class TorchScriptManager(BaseBackendManager):
input_names=input_names, input_names=input_names,
output_names=output_names) output_names=output_names)
@classmethod
def is_available(cls, with_custom_ops: bool = False) -> bool:
"""Check whether backend is installed.
Args:
with_custom_ops (bool): check custom ops exists.
Returns:
bool: True if backend package is installed.
"""
import importlib
ret = importlib.util.find_spec('torch') is not None
if ret and with_custom_ops:
from .init_plugins import ops_available
ret = ret and ops_available()
return ret
@classmethod
def get_version(cls) -> str:
"""Get the version of the backend."""
if not cls.is_available():
return 'None'
else:
import pkg_resources
try:
return pkg_resources.get_distribution('torch').version
except Exception:
return 'None'
@classmethod
def check_env(cls, log_callback: Callable = lambda _: _) -> str:
"""Check current environment.
Returns:
str: Info about the environment.
"""
info = super().check_env(log_callback=log_callback)
available = cls.is_available()
ops_available = cls.is_available(with_custom_ops=True)
ops_available = 'Available' if ops_available else 'NotAvailable'
if available:
ops_info = f'torchscript custom ops:\t{ops_available}'
log_callback(ops_info)
info = f'{info}\n{ops_info}'
return info
@classmethod @classmethod
def to_backend(cls, def to_backend(cls,
ir_files: Sequence[str], ir_files: Sequence[str],

View File

@ -1,5 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import glob
import os.path as osp import os.path as osp
@ -9,14 +8,14 @@ def get_ops_path() -> str:
Returns: Returns:
str: A path of the torchscript extension library. str: A path of the torchscript extension library.
""" """
wildcard = osp.abspath( from mmdeploy.utils import get_file_path
osp.join( candidates = [
osp.dirname(__file__), '../../lib/libmmdeploy_torchscript_ops.so',
'../../../build/lib/libmmdeploy_torchscript_ops.so')) '../../lib/mmdeploy_torchscript_ops.dll',
'../../../build/lib/libmmdeploy_torchscript_ops.so',
paths = glob.glob(wildcard) '../../../build/bin/*/mmdeploy_torchscript_ops.dll'
lib_path = paths[0] if len(paths) > 0 else '' ]
return lib_path return get_file_path(osp.dirname(__file__), candidates)
def ops_available() -> bool: def ops_available() -> bool:

View File

@ -1,18 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import importlib
import sys import sys
from .backend_manager import TVMManager from .backend_manager import TVMManager
_BackendManager = TVMManager
def is_available() -> bool: is_available = _BackendManager.is_available
"""Check whether tvm package is installed. build_wrapper = _BackendManager.build_wrapper
Returns:
bool: True if tvm package is installed.
"""
return importlib.util.find_spec('tvm') is not None
def get_library_ext() -> str: def get_library_ext() -> str:

View File

@ -38,6 +38,33 @@ class TVMManager(BaseBackendManager):
output_names=output_names, output_names=output_names,
device=device) device=device)
@classmethod
def is_available(cls, with_custom_ops: bool = False) -> bool:
"""Check whether backend is installed.
Args:
with_custom_ops (bool): check custom ops exists.
Returns:
bool: True if backend package is installed.
"""
import importlib
ret = importlib.util.find_spec('tvm') is not None
return ret
@classmethod
def get_version(cls) -> str:
"""Get the version of the backend."""
if not cls.is_available():
return 'None'
else:
import pkg_resources
try:
return pkg_resources.get_distribution('tvm').version
except Exception:
return 'None'
@classmethod @classmethod
def to_backend(cls, def to_backend(cls,
ir_files: Sequence[str], ir_files: Sequence[str],

View File

@ -18,7 +18,10 @@ def get_library_version(lib):
except Exception: except Exception:
version = None version = None
else: else:
if hasattr(lib, '__version__'):
version = lib.__version__ version = lib.__version__
else:
version = None
return version return version

View File

@ -4,7 +4,6 @@ import os.path as osp
import random import random
import string import string
import tempfile import tempfile
import warnings
from typing import Any, Callable, Dict, List, Optional, Tuple, Union from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import mmcv import mmcv
@ -29,47 +28,13 @@ def backend_checker(backend: Backend, require_plugin: bool = False):
will also check if the backend plugin has been compiled. Default will also check if the backend plugin has been compiled. Default
to `False`. to `False`.
""" """
is_custom_ops_available = None from mmdeploy.backend.base import get_backend_manager
if backend == Backend.ONNXRUNTIME:
from mmdeploy.apis.onnxruntime import is_available backend_mgr = get_backend_manager(backend.value)
if require_plugin: result = backend_mgr.is_available(with_custom_ops=require_plugin)
from mmdeploy.apis.onnxruntime import is_custom_ops_available
elif backend == Backend.TENSORRT:
from mmdeploy.apis.tensorrt import is_available
if require_plugin:
from mmdeploy.apis.tensorrt import is_custom_ops_available
elif backend == Backend.PPLNN:
from mmdeploy.apis.pplnn import is_available
elif backend == Backend.NCNN:
from mmdeploy.apis.ncnn import is_available
if require_plugin:
from mmdeploy.apis.ncnn import is_custom_ops_available
elif backend == Backend.OPENVINO:
from mmdeploy.apis.openvino import is_available
elif backend == Backend.RKNN:
# device not require as backend is not really running
from mmdeploy.apis.rknn import is_available
elif backend == Backend.ASCEND:
from mmdeploy.apis.ascend import is_available
elif backend == Backend.TVM:
from mmdeploy.apis.tvm import is_available
else:
warnings.warn('The backend checker is not available')
return
checker = pytest.mark.skipif( checker = pytest.mark.skipif(
not is_available(), reason=f'{backend.value} package is not available') not result, reason=f'{backend.value} package is not available')
if require_plugin and is_custom_ops_available is not None:
plugin_checker = pytest.mark.skipif(
not is_custom_ops_available(),
reason=f'{backend.value} plugin is not available')
def double_checker(func):
func = checker(func)
func = plugin_checker(func)
return func
return double_checker
return checker return checker
@ -84,47 +49,18 @@ def check_backend(backend: Backend, require_plugin: bool = False):
will also check if the backend plugin has been compiled. Default will also check if the backend plugin has been compiled. Default
to `False`. to `False`.
""" """
is_custom_ops_available = None from mmdeploy.backend.base import get_backend_manager
if backend == Backend.ONNXRUNTIME:
from mmdeploy.apis.onnxruntime import is_available
if require_plugin:
from mmdeploy.apis.onnxruntime import is_custom_ops_available
elif backend == Backend.TENSORRT:
from mmdeploy.apis.tensorrt import is_available
if require_plugin:
from mmdeploy.apis.tensorrt import is_custom_ops_available
elif backend == Backend.PPLNN:
from mmdeploy.apis.pplnn import is_available
elif backend == Backend.NCNN:
from mmdeploy.apis.ncnn import is_available
if require_plugin:
from mmdeploy.apis.ncnn import is_custom_ops_available
elif backend == Backend.OPENVINO:
from mmdeploy.apis.openvino import is_available
elif backend == Backend.TORCHSCRIPT:
from mmdeploy.backend.torchscript import ops_available as is_available
elif backend == Backend.RKNN:
from mmdeploy.backend.rknn import is_available
if not is_available():
# skip CI in github
pytest.skip(f'{backend.value} package is not available')
# device required
from mmdeploy.backend.rknn import device_available as is_available
elif backend == Backend.ASCEND:
from mmdeploy.backend.ascend import is_available
elif backend == Backend.TVM:
from mmdeploy.backend.tvm import is_available
elif backend == Backend.COREML:
from mmdeploy.backend.coreml import is_available
else:
warnings.warn('The backend checker is not available')
return
if not is_available(): backend_mgr = get_backend_manager(backend.value)
result = backend_mgr.is_available(with_custom_ops=require_plugin)
if backend == Backend.RKNN:
# device required
from mmdeploy.backend.rknn import device_available
result = result and device_available()
if not result:
pytest.skip(f'{backend.value} package is not available') pytest.skip(f'{backend.value} package is not available')
if require_plugin and is_custom_ops_available is not None:
if not is_custom_ops_available():
pytest.skip(f'{backend.value} plugin is not available')
class WrapFunction(nn.Module): class WrapFunction(nn.Module):

View File

@ -1,3 +1,4 @@
h5py
mmcls>=0.21.0,<=0.23.0 mmcls>=0.21.0,<=0.23.0
mmdet>=2.19.0,<=2.20.0 mmdet>=2.19.0,<=2.20.0
mmedit mmedit
@ -7,3 +8,4 @@ mmrazor>=0.3.0
mmsegmentation mmsegmentation
onnxruntime>=1.8.0 onnxruntime>=1.8.0
openvino-dev openvino-dev
tqdm

View File

@ -1,5 +1,4 @@
grpcio grpcio
h5py
matplotlib matplotlib
multiprocess multiprocess
numpy numpy
@ -7,4 +6,3 @@ onnx>=1.8.0
protobuf<=3.20.1 protobuf<=3.20.1
six six
terminaltables terminaltables
tqdm

View File

@ -3,7 +3,6 @@ import os.path as osp
import tempfile import tempfile
from multiprocessing import Process from multiprocessing import Process
import h5py
import mmcv import mmcv
from mmdeploy.apis import create_calib_input_data from mmdeploy.apis import create_calib_input_data
@ -171,6 +170,7 @@ def get_model_cfg():
def run_test_create_calib_end2end(): def run_test_create_calib_end2end():
import h5py
model_cfg = get_model_cfg() model_cfg = get_model_cfg()
deploy_cfg = get_end2end_deploy_cfg() deploy_cfg = get_end2end_deploy_cfg()
create_calib_input_data( create_calib_input_data(
@ -203,6 +203,7 @@ def test_create_calib_end2end():
def run_test_create_calib_parittion(): def run_test_create_calib_parittion():
import h5py
model_cfg = get_model_cfg() model_cfg = get_model_cfg()
deploy_cfg = get_partition_deploy_cfg() deploy_cfg = get_partition_deploy_cfg()
create_calib_input_data( create_calib_input_data(

View File

@ -177,11 +177,10 @@ def run_wrapper(backend, wrapper, input):
return results return results
ALL_BACKEND = [ ALL_BACKEND = list(Backend)
Backend.TENSORRT, Backend.ONNXRUNTIME, Backend.PPLNN, Backend.NCNN, ALL_BACKEND.remove(Backend.DEFAULT)
Backend.OPENVINO, Backend.TORCHSCRIPT, Backend.ASCEND, Backend.RKNN, ALL_BACKEND.remove(Backend.PYTORCH)
Backend.COREML, Backend.TVM ALL_BACKEND.remove(Backend.SDK)
]
@pytest.mark.parametrize('backend', ALL_BACKEND) @pytest.mark.parametrize('backend', ALL_BACKEND)

View File

@ -32,7 +32,7 @@ def assert_det_results(results, module_name: str = 'model'):
def assert_forward_results(results, module_name: str = 'model'): def assert_forward_results(results, module_name: str = 'model'):
assert results is not None, f'failed to get output using {module_name}' assert results is not None, f'failed to get output using {module_name}'
assert isinstance(results, list) assert isinstance(results, Sequence)
assert len(results) == 1 assert len(results) == 1
if isinstance(results[0], tuple): # mask if isinstance(results[0], tuple): # mask
assert len(results[0][0]) == 80 assert len(results[0][0]) == 80

View File

@ -4,8 +4,7 @@ from mmcv.utils import collect_env as collect_base_env
from mmcv.utils import get_git_hash from mmcv.utils import get_git_hash
import mmdeploy import mmdeploy
from mmdeploy.utils import (get_backend_version, get_codebase_version, from mmdeploy.utils import get_codebase_version, get_root_logger
get_root_logger)
def collect_env(): def collect_env():
@ -17,41 +16,16 @@ def collect_env():
def check_backend(): def check_backend():
backend_versions = get_backend_version() from mmdeploy.backend.base import get_backend_manager
ort_version = backend_versions['onnxruntime'] from mmdeploy.utils import Backend
trt_version = backend_versions['tensorrt'] exclude_backend_lists = [Backend.DEFAULT, Backend.PYTORCH, Backend.SDK]
ncnn_version = backend_versions['ncnn'] backend_lists = [
tvm_version = backend_versions['tvm'] backend for backend in Backend if backend not in exclude_backend_lists
]
import mmdeploy.apis.onnxruntime as ort_apis for backend in backend_lists:
logger = get_root_logger() backend_mgr = get_backend_manager(backend.value)
logger.info(f'onnxruntime: {ort_version}\tops_is_avaliable : ' backend_mgr.check_env(logger.info)
f'{ort_apis.is_custom_ops_available()}')
import mmdeploy.apis.tensorrt as trt_apis
logger.info(f'tensorrt: {trt_version}\tops_is_avaliable : '
f'{trt_apis.is_custom_ops_available()}')
import mmdeploy.apis.ncnn as ncnn_apis
logger.info(f'ncnn: {ncnn_version}\tops_is_avaliable : '
f'{ncnn_apis.is_custom_ops_available()}')
logger.info(f'tvm: {tvm_version}')
import mmdeploy.apis.pplnn as pplnn_apis
logger.info(f'pplnn_is_avaliable: {pplnn_apis.is_available()}')
import mmdeploy.apis.openvino as openvino_apis
logger.info(f'openvino_is_avaliable: {openvino_apis.is_available()}')
import mmdeploy.apis.snpe as snpe_apis
logger.info(f'snpe_is_available: {snpe_apis.is_available()}')
import mmdeploy.apis.ascend as ascend_apis
logger.info(f'ascend_is_available: {ascend_apis.is_available()}')
import mmdeploy.apis.coreml as coreml_apis
logger.info(f'coreml_is_available: {coreml_apis.is_available()}')
def check_codebase(): def check_codebase():