From d6fdb3e860e9ae75c8522edb87f77a9197980fef Mon Sep 17 00:00:00 2001
From: "q.yao" <streetyao@live.com>
Date: Wed, 28 Dec 2022 11:38:01 +0800
Subject: [PATCH] [Refactor] Add backend manager for 1.x (#1515)

* backend manager 1.x

* update pplnn init

* rename file

* add to backend

* add check env and misc

* fix action

* fix ut

* fix comment
---
 .github/workflows/backend-ncnn.yml            |   2 +-
 .github/workflows/backend-ort.yml             |   2 +-
 .../torchscript/ops/CMakeLists.txt            |   3 +
 .../07-developer-guide/support_new_backend.md |  61 ++--
 .../07-developer-guide/support_new_backend.md |  60 ++--
 mmdeploy/apis/__init__.py                     |  29 +-
 mmdeploy/apis/calibration.py                  |   9 +-
 mmdeploy/apis/extract_model.py                |   2 +-
 mmdeploy/apis/inference.py                    |   6 +-
 mmdeploy/apis/ncnn/__init__.py                |   4 +-
 mmdeploy/apis/onnxruntime/__init__.py         |   4 +-
 mmdeploy/apis/pytorch2onnx.py                 |  10 +-
 mmdeploy/apis/pytorch2torchscript.py          |   8 +-
 mmdeploy/apis/tensorrt/__init__.py            |   4 +-
 mmdeploy/apis/utils/__init__.py               |   5 +-
 mmdeploy/apis/utils/calibration.py            |   6 +-
 mmdeploy/apis/utils/utils.py                  |  36 ++
 mmdeploy/apis/visualize.py                    |   3 +-
 mmdeploy/backend/ascend/__init__.py           |  16 +-
 mmdeploy/backend/ascend/backend_manager.py    |  91 +++++
 mmdeploy/backend/base/__init__.py             |   3 +
 mmdeploy/backend/base/backend_manager.py      | 173 ++++++++++
 mmdeploy/backend/coreml/__init__.py           |  16 +-
 mmdeploy/backend/coreml/backend_manager.py    |  89 +++++
 mmdeploy/backend/ncnn/__init__.py             |  35 +-
 mmdeploy/backend/ncnn/backend_manager.py      | 145 ++++++++
 mmdeploy/backend/onnxruntime/__init__.py      |  29 +-
 .../backend/onnxruntime/backend_manager.py    | 142 ++++++++
 mmdeploy/backend/onnxruntime/wrapper.py       |   8 +-
 mmdeploy/backend/openvino/__init__.py         |  16 +-
 mmdeploy/backend/openvino/backend_manager.py  | 101 ++++++
 mmdeploy/backend/pplnn/__init__.py            |  16 +-
 mmdeploy/backend/pplnn/backend_manager.py     | 108 ++++++
 mmdeploy/backend/rknn/__init__.py             |  16 +-
 mmdeploy/backend/rknn/backend_manager.py      | 155 +++++++++
 mmdeploy/backend/rknn/onnx2rknn.py            |   4 +-
 mmdeploy/backend/sdk/__init__.py              |  33 +-
 mmdeploy/backend/sdk/backend_manager.py       |  83 +++++
 mmdeploy/backend/sdk/export_info.py           |   6 +-
 mmdeploy/backend/snpe/__init__.py             |  23 +-
 mmdeploy/backend/snpe/backend_manager.py      |  99 ++++++
 mmdeploy/backend/tensorrt/__init__.py         |  31 +-
 mmdeploy/backend/tensorrt/backend_manager.py  | 138 ++++++++
 mmdeploy/backend/tensorrt/calib_utils.py      |   6 +-
 mmdeploy/backend/tensorrt/utils.py            |  17 +-
 mmdeploy/backend/torchscript/__init__.py      |  15 +-
 .../backend/torchscript/backend_manager.py    | 104 ++++++
 mmdeploy/backend/torchscript/init_plugins.py  |  17 +-
 mmdeploy/backend/tvm/__init__.py              |  17 +-
 mmdeploy/backend/tvm/backend_manager.py       | 135 ++++++++
 mmdeploy/codebase/base/backend_model.py       |  92 +----
 .../mmdet/deploy/object_detection_model.py    |   4 +-
 .../mmdet/models/roi_heads/fcn_mask_head.py   |   3 +-
 mmdeploy/utils/env.py                         |   5 +-
 mmdeploy/utils/test.py                        | 196 ++---------
 requirements/optional.txt                     |   2 +
 requirements/runtime.txt                      |   2 -
 tests/test_apis/test_calibration.py           |   3 +-
 tests/test_backend/test_wrapper.py            | 108 ++----
 .../test_mmdet/test_mmdet_models.py           |   1 +
 .../test_mmdet/test_object_detection_model.py |   6 +-
 .../test_mmrotate/test_mmrotate_models.py     |   6 +-
 .../test_mmseg/test_mmseg_models.py           |   2 +-
 tests/test_codebase/test_mmseg/utils.py       |   2 +-
 tools/check_env.py                            |  49 +--
 tools/deploy.py                               | 318 ++++--------------
 66 files changed, 1959 insertions(+), 981 deletions(-)
 create mode 100644 mmdeploy/backend/ascend/backend_manager.py
 create mode 100644 mmdeploy/backend/base/backend_manager.py
 create mode 100644 mmdeploy/backend/coreml/backend_manager.py
 create mode 100644 mmdeploy/backend/ncnn/backend_manager.py
 create mode 100644 mmdeploy/backend/onnxruntime/backend_manager.py
 create mode 100644 mmdeploy/backend/openvino/backend_manager.py
 create mode 100644 mmdeploy/backend/pplnn/backend_manager.py
 create mode 100644 mmdeploy/backend/rknn/backend_manager.py
 create mode 100644 mmdeploy/backend/sdk/backend_manager.py
 create mode 100644 mmdeploy/backend/snpe/backend_manager.py
 create mode 100644 mmdeploy/backend/tensorrt/backend_manager.py
 create mode 100644 mmdeploy/backend/torchscript/backend_manager.py
 create mode 100644 mmdeploy/backend/tvm/backend_manager.py

diff --git a/.github/workflows/backend-ncnn.yml b/.github/workflows/backend-ncnn.yml
index 884c830bc..15b1a503a 100644
--- a/.github/workflows/backend-ncnn.yml
+++ b/.github/workflows/backend-ncnn.yml
@@ -79,4 +79,4 @@ jobs:
           python -m pip install torch==1.8.2 torchvision==0.9.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cpu
           python -m pip install mmcv-lite
           python tools/scripts/build_ubuntu_x64_ncnn.py
-          python -c 'import mmdeploy.apis.ncnn as ncnn_api; assert ncnn_api.is_available() and ncnn_api.is_custom_ops_available()'
+          python -c 'import mmdeploy.apis.ncnn as ncnn_api; assert ncnn_api.is_available(with_custom_ops=True)'
diff --git a/.github/workflows/backend-ort.yml b/.github/workflows/backend-ort.yml
index ec8dcc589..ccf3af51f 100644
--- a/.github/workflows/backend-ort.yml
+++ b/.github/workflows/backend-ort.yml
@@ -36,7 +36,7 @@ jobs:
           python -m pip install torch==1.8.2 torchvision==0.9.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cpu
           python -m pip install mmcv-lite openmim
           python tools/scripts/build_ubuntu_x64_ort.py
-          python -c 'import mmdeploy.apis.onnxruntime as ort_api; assert ort_api.is_available() and ort_api.is_custom_ops_available()'
+          python -c 'import mmdeploy.apis.onnxruntime as ort_api; assert ort_api.is_available(with_custom_ops=True)'
       - name: test mmcls full pipeline
         run: |
           python -m mim install $(cat requirements/codebases.txt | grep mmcls)
diff --git a/csrc/mmdeploy/backend_ops/torchscript/ops/CMakeLists.txt b/csrc/mmdeploy/backend_ops/torchscript/ops/CMakeLists.txt
index d53ca9527..18a2b6f03 100644
--- a/csrc/mmdeploy/backend_ops/torchscript/ops/CMakeLists.txt
+++ b/csrc/mmdeploy/backend_ops/torchscript/ops/CMakeLists.txt
@@ -31,3 +31,6 @@ mmdeploy_export(${PROJECT_NAME}_obj)
 mmdeploy_add_module(${PROJECT_NAME} MODULE EXCLUDE "")
 target_link_libraries(${PROJECT_NAME} PUBLIC ${PROJECT_NAME}_obj)
 add_library(mmdeploy::torchscript_ops ALIAS ${PROJECT_NAME})
+
+set(_TORCHJIT_OPS_DIR ${CMAKE_SOURCE_DIR}/mmdeploy/lib)
+install(TARGETS ${PROJECT_NAME} DESTINATION ${_TORCHJIT_OPS_DIR})
diff --git a/docs/en/07-developer-guide/support_new_backend.md b/docs/en/07-developer-guide/support_new_backend.md
index 7872334a8..1513ca42c 100644
--- a/docs/en/07-developer-guide/support_new_backend.md
+++ b/docs/en/07-developer-guide/support_new_backend.md
@@ -123,32 +123,20 @@ The backends in MMDeploy must support the ONNX. The backend loads the ".onnx" fi
        __all__ += ['onnx2ncnn', 'get_output_model_file']
    ```
 
-   Then add the codes about conversion to `tools/deploy.py` using these APIs if necessary.
+   Create a backend manager class which derive from `BaseBackendManager`, implement its `to_backend` static method.
 
    **Example:**
 
    ```Python
-   # tools/deploy.py
-   # ...
-       elif backend == Backend.NCNN:
-           from mmdeploy.apis.ncnn import is_available as is_available_ncnn
-
-           if not is_available_ncnn():
-               logging.error('ncnn support is not available.')
-               exit(-1)
-
-           from mmdeploy.apis.ncnn import onnx2ncnn, get_output_model_file
-
-           backend_files = []
-           for onnx_path in onnx_files:
-               create_process(
-                   f'onnx2ncnn with {onnx_path}',
-                   target=onnx2ncnn,
-                   args=(onnx_path, args.work_dir),
-                   kwargs=dict(),
-                   ret_value=ret_value)
-               backend_files += get_output_model_file(onnx_path, args.work_dir)
-   # ...
+    @classmethod
+    def to_backend(cls,
+                ir_files: Sequence[str],
+                deploy_cfg: Any,
+                work_dir: str,
+                log_level: int = logging.INFO,
+                device: str = 'cpu',
+                **kwargs) -> Sequence[str]:
+        return ir_files
    ```
 
 6. Convert the models of OpenMMLab to backends (if necessary) and inference on backend engine. If you find some incompatible operators when testing, you can try to rewrite the original model for the backend following the [rewriter tutorial](support_new_model.md) or add custom operators.
@@ -209,23 +197,26 @@ Although the backend engines are usually implemented in C/C++, it is convenient
            self.sess.run_with_iobinding(io_binding)
    ```
 
-4. Add a default initialization method for the new wrapper in `mmdeploy/codebase/base/backend_model.py`
+4. Create a backend manager class which derive from `BaseBackendManager`, implement its `build_wrapper` static method.
 
    **Example:**
 
    ```Python
-       @staticmethod
-       def _build_wrapper(backend: Backend,
-                          backend_files: Sequence[str],
-                          device: str,
-                          input_names: Optional[Sequence[str]] = None,
-                          output_names: Optional[Sequence[str]] = None):
-           if backend == Backend.ONNXRUNTIME:
-               from mmdeploy.backend.onnxruntime import ORTWrapper
-               return ORTWrapper(
-                   onnx_file=backend_files[0],
-                   device=device,
-                   output_names=output_names)
+        @BACKEND_MANAGERS.register('onnxruntime')
+        class ONNXRuntimeManager(BaseBackendManager):
+            @classmethod
+            def build_wrapper(cls,
+                              backend_files: Sequence[str],
+                              device: str = 'cpu',
+                              input_names: Optional[Sequence[str]] = None,
+                              output_names: Optional[Sequence[str]] = None,
+                              deploy_cfg: Optional[Any] = None,
+                              **kwargs):
+                from .wrapper import ORTWrapper
+                return ORTWrapper(
+                    onnx_file=backend_files[0],
+                    device=device,
+                    output_names=output_names)
    ```
 
 5. Add docstring and unit tests for new code :).
diff --git a/docs/zh_cn/07-developer-guide/support_new_backend.md b/docs/zh_cn/07-developer-guide/support_new_backend.md
index d881bfc49..91b1cf339 100644
--- a/docs/zh_cn/07-developer-guide/support_new_backend.md
+++ b/docs/zh_cn/07-developer-guide/support_new_backend.md
@@ -123,32 +123,20 @@ MMDeploy 中的后端必须支持 ONNX,因此后端能直接加载“.onnx”
        __all__ += ['onnx2ncnn', 'get_output_model_file']
    ```
 
-   然后根据需要使用这些 APIs 为 `tools/deploy.py` 添加相关转换代码
+   从 BaseBackendManager 派生类,实现 `to_backend` 类方法。
 
    **例子**
 
    ```Python
-   # tools/deploy.py
-   # ...
-       elif backend == Backend.NCNN:
-           from mmdeploy.apis.ncnn import is_available as is_available_ncnn
-
-           if not is_available_ncnn():
-               logging.error('ncnn support is not available.')
-               exit(-1)
-
-           from mmdeploy.apis.ncnn import onnx2ncnn, get_output_model_file
-
-           backend_files = []
-           for onnx_path in onnx_files:
-               create_process(
-                   f'mmdeploy_onnx2ncnn with {onnx_path}',
-                   target=onnx2ncnn,
-                   args=(onnx_path, args.work_dir),
-                   kwargs=dict(),
-                   ret_value=ret_value)
-               backend_files += get_output_model_file(onnx_path, args.work_dir)
-   # ...
+   @classmethod
+    def to_backend(cls,
+                   ir_files: Sequence[str],
+                   deploy_cfg: Any,
+                   work_dir: str,
+                   log_level: int = logging.INFO,
+                   device: str = 'cpu',
+                   **kwargs) -> Sequence[str]:
+        return ir_files
    ```
 
 6. 将 OpenMMLab 的模型转换后(如有必要)并在后端引擎上进行推理。如果在测试时发现一些不兼容的算子,可以尝试按照[重写器教程](support_new_model.md)为后端重写原始模型或添加自定义算子。
@@ -210,22 +198,26 @@ MMDeploy 中的后端必须支持 ONNX,因此后端能直接加载“.onnx”
            self.sess.run_with_iobinding(io_binding)
    ```
 
-4. 为新封装装器添加默认初始化方法 `mmdeploy/codebase/base/backend_model.py`
+4. 从 `BaseBackendManager` 派生接口类,实现 `build_wrapper` 静态方法
 
    **例子**
 
    ```Python
-       @staticmethod
-       def _build_wrapper(backend: Backend,
-                          backend_files: Sequence[str],
-                          device: str,
-                          output_names: Optional[Sequence[str]] = None):
-           if backend == Backend.ONNXRUNTIME:
-               from mmdeploy.backend.onnxruntime import ORTWrapper
-               return ORTWrapper(
-                   onnx_file=backend_files[0],
-                   device=device,
-                   output_names=output_names)
+        @BACKEND_MANAGERS.register('onnxruntime')
+        class ONNXRuntimeManager(BaseBackendManager):
+            @classmethod
+            def build_wrapper(cls,
+                              backend_files: Sequence[str],
+                              device: str = 'cpu',
+                              input_names: Optional[Sequence[str]] = None,
+                              output_names: Optional[Sequence[str]] = None,
+                              deploy_cfg: Optional[Any] = None,
+                              **kwargs):
+                from .wrapper import ORTWrapper
+                return ORTWrapper(
+                    onnx_file=backend_files[0],
+                    device=device,
+                    output_names=output_names)
    ```
 
 5. 为新后端引擎代码添加相关注释和单元测试 :).
diff --git a/mmdeploy/apis/__init__.py b/mmdeploy/apis/__init__.py
index 0b3a7d20a..132d2ce40 100644
--- a/mmdeploy/apis/__init__.py
+++ b/mmdeploy/apis/__init__.py
@@ -1,19 +1,14 @@
 # Copyright (c) OpenMMLab. All rights reserved.
+from .calibration import create_calib_input_data
+from .extract_model import extract_model
+from .inference import inference_model
+from .pytorch2onnx import torch2onnx
+from .pytorch2torchscript import torch2torchscript
+from .utils import build_task_processor, get_predefined_partition_cfg
+from .visualize import visualize_model
 
-# mmcv & mmengine dependency
-try:
-    from .calibration import create_calib_input_data
-    from .extract_model import extract_model
-    from .inference import inference_model
-    from .pytorch2onnx import torch2onnx
-    from .pytorch2torchscript import torch2torchscript
-    from .utils import build_task_processor, get_predefined_partition_cfg
-    from .visualize import visualize_model
-
-    __all__ = [
-        'create_calib_input_data', 'extract_model', 'inference_model',
-        'torch2onnx', 'torch2torchscript', 'build_task_processor',
-        'get_predefined_partition_cfg', 'visualize_model'
-    ]
-except Exception:
-    pass
+__all__ = [
+    'create_calib_input_data', 'extract_model', 'inference_model',
+    'torch2onnx', 'torch2torchscript', 'build_task_processor',
+    'get_predefined_partition_cfg', 'visualize_model'
+]
diff --git a/mmdeploy/apis/calibration.py b/mmdeploy/apis/calibration.py
index 2b99626e5..1ed2be392 100644
--- a/mmdeploy/apis/calibration.py
+++ b/mmdeploy/apis/calibration.py
@@ -4,11 +4,7 @@ from typing import Optional, Union
 
 from mmengine import Config
 
-from mmdeploy.core import patch_model
-from mmdeploy.utils import (IR, cfg_apply_marks, get_backend, get_ir_config,
-                            load_config)
 from .core import PIPELINE_MANAGER, no_mp
-from .utils import create_calib_input_data as create_calib_input_data_impl
 
 
 @PIPELINE_MANAGER.register_pipeline()
@@ -34,6 +30,11 @@ def create_calib_input_data(calib_file: str,
         dataset_type (str, optional): The dataset type. Defaults to 'val'.
         device (str, optional): Device to create dataset. Defaults to 'cpu'.
     """
+
+    from mmdeploy.core import patch_model
+    from mmdeploy.utils import (IR, cfg_apply_marks, get_backend,
+                                get_ir_config, load_config)
+    from .utils import create_calib_input_data as create_calib_input_data_impl
     with no_mp():
         if dataset_cfg is None:
             dataset_cfg = model_cfg
diff --git a/mmdeploy/apis/extract_model.py b/mmdeploy/apis/extract_model.py
index 62b53b185..a8ed6ee11 100644
--- a/mmdeploy/apis/extract_model.py
+++ b/mmdeploy/apis/extract_model.py
@@ -5,7 +5,6 @@ from typing import Dict, Iterable, Optional, Union
 import onnx
 
 from .core import PIPELINE_MANAGER
-from .onnx import extract_partition
 
 
 @PIPELINE_MANAGER.register_pipeline()
@@ -63,5 +62,6 @@ def extract_model(model: Union[str, onnx.ModelProto],
         onnx.ModelProto: The extracted model.
     """
 
+    from .onnx import extract_partition
     return extract_partition(model, start_marker, end_marker, start_name_map,
                              end_name_map, dynamic_axes, save_file)
diff --git a/mmdeploy/apis/inference.py b/mmdeploy/apis/inference.py
index 3f02caea8..a56d1803c 100644
--- a/mmdeploy/apis/inference.py
+++ b/mmdeploy/apis/inference.py
@@ -3,9 +3,6 @@ from typing import Any, Sequence, Union
 
 import mmengine
 import numpy as np
-import torch
-
-from mmdeploy.utils import get_input_shape, load_config
 
 
 def inference_model(model_cfg: Union[str, mmengine.Config],
@@ -37,6 +34,9 @@ def inference_model(model_cfg: Union[str, mmengine.Config],
     Returns:
         Any: The inference results
     """
+    import torch
+
+    from mmdeploy.utils import get_input_shape, load_config
     deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
 
     from mmdeploy.apis.utils import build_task_processor
diff --git a/mmdeploy/apis/ncnn/__init__.py b/mmdeploy/apis/ncnn/__init__.py
index 196e7f2bf..1657e633b 100644
--- a/mmdeploy/apis/ncnn/__init__.py
+++ b/mmdeploy/apis/ncnn/__init__.py
@@ -1,11 +1,11 @@
 # Copyright (c) OpenMMLab. All rights reserved.
 from mmdeploy.backend.ncnn import from_onnx as _from_onnx
-from mmdeploy.backend.ncnn import is_available, is_custom_ops_available
+from mmdeploy.backend.ncnn import is_available
 from ..core import PIPELINE_MANAGER
 
 from_onnx = PIPELINE_MANAGER.register_pipeline()(_from_onnx)
 
-__all__ = ['is_available', 'is_custom_ops_available', 'from_onnx']
+__all__ = ['is_available', 'from_onnx']
 
 if is_available():
     try:
diff --git a/mmdeploy/apis/onnxruntime/__init__.py b/mmdeploy/apis/onnxruntime/__init__.py
index 63ef448d5..187a3ca64 100644
--- a/mmdeploy/apis/onnxruntime/__init__.py
+++ b/mmdeploy/apis/onnxruntime/__init__.py
@@ -1,4 +1,4 @@
 # Copyright (c) OpenMMLab. All rights reserved.
-from mmdeploy.backend.onnxruntime import is_available, is_custom_ops_available
+from mmdeploy.backend.onnxruntime import is_available
 
-__all__ = ['is_available', 'is_custom_ops_available']
+__all__ = ['is_available']
diff --git a/mmdeploy/apis/pytorch2onnx.py b/mmdeploy/apis/pytorch2onnx.py
index 247ee2066..9f8e91f36 100644
--- a/mmdeploy/apis/pytorch2onnx.py
+++ b/mmdeploy/apis/pytorch2onnx.py
@@ -4,11 +4,7 @@ from typing import Any, Optional, Union
 
 import mmengine
 
-from mmdeploy.apis.core.pipeline_manager import no_mp
-from mmdeploy.utils import (Backend, get_backend, get_dynamic_axes,
-                            get_input_shape, get_onnx_config, load_config)
 from .core import PIPELINE_MANAGER
-from .onnx import export
 
 
 @PIPELINE_MANAGER.register_pipeline()
@@ -48,6 +44,12 @@ def torch2onnx(img: Any,
             defaults to `None`.
         device (str): A string specifying device type, defaults to 'cuda:0'.
     """
+
+    from mmdeploy.apis.core.pipeline_manager import no_mp
+    from mmdeploy.utils import (Backend, get_backend, get_dynamic_axes,
+                                get_input_shape, get_onnx_config, load_config)
+    from .onnx import export
+
     # load deploy_cfg if necessary
     deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
     mmengine.mkdir_or_exist(osp.abspath(work_dir))
diff --git a/mmdeploy/apis/pytorch2torchscript.py b/mmdeploy/apis/pytorch2torchscript.py
index 075a9a6ae..0ab097f47 100644
--- a/mmdeploy/apis/pytorch2torchscript.py
+++ b/mmdeploy/apis/pytorch2torchscript.py
@@ -3,11 +3,8 @@ import os.path as osp
 from typing import Any, Optional, Union
 
 import mmengine
-import torch
 
 from mmdeploy.apis.core.pipeline_manager import PIPELINE_MANAGER, no_mp
-from mmdeploy.utils import get_backend, get_input_shape, load_config
-from .torch_jit import trace
 
 
 @PIPELINE_MANAGER.register_pipeline()
@@ -32,6 +29,11 @@ def torch2torchscript(img: Any,
             defaults to `None`.
         device (str): A string specifying device type, defaults to 'cuda:0'.
     """
+    import torch
+
+    from mmdeploy.utils import get_backend, get_input_shape, load_config
+    from .torch_jit import trace
+
     # load deploy_cfg if necessary
     deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
     mmengine.mkdir_or_exist(osp.abspath(work_dir))
diff --git a/mmdeploy/apis/tensorrt/__init__.py b/mmdeploy/apis/tensorrt/__init__.py
index cad955bc6..d3010b37a 100644
--- a/mmdeploy/apis/tensorrt/__init__.py
+++ b/mmdeploy/apis/tensorrt/__init__.py
@@ -1,8 +1,8 @@
 # Copyright (c) OpenMMLab. All rights reserved.
-from mmdeploy.backend.tensorrt import is_available, is_custom_ops_available
+from mmdeploy.backend.tensorrt import is_available
 from ..core import PIPELINE_MANAGER
 
-__all__ = ['is_available', 'is_custom_ops_available']
+__all__ = ['is_available']
 
 if is_available():
     from mmdeploy.backend.tensorrt import from_onnx as _from_onnx
diff --git a/mmdeploy/apis/utils/__init__.py b/mmdeploy/apis/utils/__init__.py
index 27740b5e9..f9b97caec 100644
--- a/mmdeploy/apis/utils/__init__.py
+++ b/mmdeploy/apis/utils/__init__.py
@@ -1,8 +1,9 @@
 # Copyright (c) OpenMMLab. All rights reserved.
 from .calibration import create_calib_input_data
-from .utils import build_task_processor, get_predefined_partition_cfg
+from .utils import (build_task_processor, get_predefined_partition_cfg,
+                    to_backend)
 
 __all__ = [
     'create_calib_input_data', 'build_task_processor',
-    'get_predefined_partition_cfg'
+    'get_predefined_partition_cfg', 'to_backend'
 ]
diff --git a/mmdeploy/apis/utils/calibration.py b/mmdeploy/apis/utils/calibration.py
index 24353b601..3dc6a7095 100644
--- a/mmdeploy/apis/utils/calibration.py
+++ b/mmdeploy/apis/utils/calibration.py
@@ -2,12 +2,9 @@
 from copy import deepcopy
 from typing import Callable, Dict, Optional
 
-import h5py
 import torch
-import tqdm
 from torch.utils.data import DataLoader
 
-from mmdeploy.core import RewriterContext, reset_mark_function_count
 from ..core import PIPELINE_MANAGER
 
 
@@ -46,7 +43,10 @@ def create_calib_input_data(calib_file: str,
             'val', defaults to 'val'.
         device (str): Specifying the device to run on, defaults to 'cpu'.
     """
+    import h5py
+    import tqdm
 
+    from mmdeploy.core import RewriterContext, reset_mark_function_count
     backend = 'default'
 
     with h5py.File(calib_file, mode='w') as file:
diff --git a/mmdeploy/apis/utils/utils.py b/mmdeploy/apis/utils/utils.py
index d1e0a1b78..d7630e663 100644
--- a/mmdeploy/apis/utils/utils.py
+++ b/mmdeploy/apis/utils/utils.py
@@ -1,10 +1,14 @@
 # Copyright (c) OpenMMLab. All rights reserved.
+import logging
+from typing import Any, Optional, Sequence
+
 import mmengine
 
 from mmdeploy.codebase import BaseTask, get_codebase_class, import_codebase
 from mmdeploy.utils import (get_backend, get_codebase, get_task_type,
                             parse_device_id)
 from mmdeploy.utils.config_utils import get_codebase_external_module
+from ..core import PIPELINE_MANAGER
 
 
 def check_backend_device(deploy_cfg: mmengine.Config, device: str):
@@ -66,3 +70,35 @@ def get_predefined_partition_cfg(deploy_cfg: mmengine.Config,
     codebase = get_codebase_class(codebase_type)
     task_processor_class = codebase.get_task_class(task)
     return task_processor_class.get_partition_cfg(partition_type)
+
+
+@PIPELINE_MANAGER.register_pipeline()
+def to_backend(backend_name: str,
+               ir_files: Sequence[str],
+               work_dir: str,
+               deploy_cfg: Optional[Any] = None,
+               log_level: int = logging.INFO,
+               device: str = 'cpu',
+               **kwargs) -> Sequence[str]:
+    """Convert intermediate representation to given backend.
+
+    Args:
+        backend_name (str): The name of the backend.
+        ir_files (Sequence[str]): The intermediate representation files.
+        work_dir (str): The work directory, backend files and logs should
+            be save in this directory.
+        deploy_cfg (Any): The deploy config.
+        log_level (int, optional): The log level. Defaults to logging.INFO.
+        device (str, optional): The device type. Defaults to 'cpu'.
+    Returns:
+        Sequence[str]: Backend files.
+    """
+    from mmdeploy.backend.base import get_backend_manager
+    backend_mgr = get_backend_manager(backend_name)
+    return backend_mgr.to_backend(
+        ir_files=ir_files,
+        work_dir=work_dir,
+        deploy_cfg=deploy_cfg,
+        log_level=log_level,
+        device=device,
+        **kwargs)
diff --git a/mmdeploy/apis/visualize.py b/mmdeploy/apis/visualize.py
index 8476e2537..593cfce32 100644
--- a/mmdeploy/apis/visualize.py
+++ b/mmdeploy/apis/visualize.py
@@ -5,13 +5,12 @@ import mmengine
 import numpy as np
 import torch
 
-from mmdeploy.codebase import BaseTask
 from mmdeploy.utils import Backend, get_backend, get_input_shape, load_config
 
 
 def visualize_model(model_cfg: Union[str, mmengine.Config],
                     deploy_cfg: Union[str, mmengine.Config],
-                    model: Union[str, Sequence[str], BaseTask],
+                    model: Union[str, Sequence[str]],
                     img: Union[str, np.ndarray],
                     device: str,
                     backend: Optional[Backend] = None,
diff --git a/mmdeploy/backend/ascend/__init__.py b/mmdeploy/backend/ascend/__init__.py
index 9bbfadabe..5b70bf8d8 100644
--- a/mmdeploy/backend/ascend/__init__.py
+++ b/mmdeploy/backend/ascend/__init__.py
@@ -1,19 +1,13 @@
 # Copyright (c) OpenMMLab. All rights reserved.
-import importlib
-
+from .backend_manager import AscendManager
 from .utils import update_sdk_pipeline
 
+_BackendManager = AscendManager
 
-def is_available():
-    """Check whether acl is installed.
+is_available = _BackendManager.is_available
+build_wrapper = _BackendManager.build_wrapper
 
-    Returns:
-        bool: True if acl package is installed.
-    """
-    return importlib.util.find_spec('acl') is not None
-
-
-__all__ = ['update_sdk_pipeline']
+__all__ = ['update_sdk_pipeline', 'AscendManager']
 
 if is_available():
     from .wrapper import AscendWrapper, Error
diff --git a/mmdeploy/backend/ascend/backend_manager.py b/mmdeploy/backend/ascend/backend_manager.py
new file mode 100644
index 000000000..165e3ba58
--- /dev/null
+++ b/mmdeploy/backend/ascend/backend_manager.py
@@ -0,0 +1,91 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import logging
+import os.path as osp
+from typing import Any, Optional, Sequence
+
+from ..base import BACKEND_MANAGERS, BaseBackendManager
+
+
+@BACKEND_MANAGERS.register('ascend')
+class AscendManager(BaseBackendManager):
+
+    @classmethod
+    def build_wrapper(cls,
+                      backend_files: Sequence[str],
+                      device: str = 'cpu',
+                      input_names: Optional[Sequence[str]] = None,
+                      output_names: Optional[Sequence[str]] = None,
+                      deploy_cfg: Optional[Any] = None,
+                      **kwargs):
+        """Build the wrapper for the backend model.
+
+        Args:
+            backend_files (Sequence[str]): Backend files.
+            device (str, optional): The device info. Defaults to 'cpu'.
+            input_names (Optional[Sequence[str]], optional): input names.
+                Defaults to None.
+            output_names (Optional[Sequence[str]], optional): output names.
+                Defaults to None.
+            deploy_cfg (Optional[Any], optional): The deploy config. Defaults
+                to None.
+        """
+        from .wrapper import AscendWrapper
+        return AscendWrapper(model=backend_files[0], device=device)
+
+    @classmethod
+    def is_available(cls, with_custom_ops: bool = False) -> bool:
+        """Check whether backend is installed.
+
+        Args:
+            with_custom_ops (bool): check custom ops exists.
+        Returns:
+            bool: True if backend package is installed.
+        """
+        import importlib
+        return importlib.util.find_spec('acl') is not None
+
+    @classmethod
+    def get_version(cls) -> str:
+        """Get the version of the backend."""
+        if not cls.is_available():
+            return 'None'
+        else:
+            import pkg_resources
+            try:
+                return pkg_resources.get_distribution('acl').version
+            except Exception:
+                return 'None'
+
+    @classmethod
+    def to_backend(cls,
+                   ir_files: Sequence[str],
+                   work_dir: str,
+                   deploy_cfg: Any,
+                   log_level: int = logging.INFO,
+                   device: str = 'cpu',
+                   **kwargs) -> Sequence[str]:
+        """Convert intermediate representation to given backend.
+
+        Args:
+            ir_files (Sequence[str]): The intermediate representation files.
+            work_dir (str): The work directory, backend files and logs should
+                be saved in this directory.
+            deploy_cfg (Any): The deploy config.
+            log_level (int, optional): The log level. Defaults to logging.INFO.
+            device (str, optional): The device type. Defaults to 'cpu'.
+        Returns:
+            Sequence[str]: Backend files.
+        """
+        from mmdeploy.utils import get_model_inputs
+        from .onnx2ascend import from_onnx
+
+        model_inputs = get_model_inputs(deploy_cfg)
+
+        om_files = []
+        for model_id, onnx_path in enumerate(ir_files):
+            om_path = osp.splitext(onnx_path)[0] + '.om'
+            from_onnx(onnx_path, work_dir, model_inputs[model_id])
+            om_files.append(om_path)
+        backend_files = om_files
+
+        return backend_files
diff --git a/mmdeploy/backend/base/__init__.py b/mmdeploy/backend/base/__init__.py
index cfbec5424..840c11391 100644
--- a/mmdeploy/backend/base/__init__.py
+++ b/mmdeploy/backend/base/__init__.py
@@ -1,9 +1,12 @@
 # Copyright (c) OpenMMLab. All rights reserved.
+from .backend_manager import (BACKEND_MANAGERS, BaseBackendManager,
+                              get_backend_manager)
 from .backend_wrapper_registry import (BACKEND_WRAPPER, get_backend_file_count,
                                        get_backend_wrapper_class)
 from .base_wrapper import BaseWrapper
 
 __all__ = [
+    'BACKEND_MANAGERS', 'BaseBackendManager', 'get_backend_manager',
     'BaseWrapper', 'BACKEND_WRAPPER', 'get_backend_wrapper_class',
     'get_backend_file_count'
 ]
diff --git a/mmdeploy/backend/base/backend_manager.py b/mmdeploy/backend/base/backend_manager.py
new file mode 100644
index 000000000..28546ab97
--- /dev/null
+++ b/mmdeploy/backend/base/backend_manager.py
@@ -0,0 +1,173 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import importlib
+import logging
+from abc import ABCMeta
+from typing import Any, Callable, Optional, Sequence
+
+
+class BaseBackendManager(metaclass=ABCMeta):
+    """Abstract interface of backend manager."""
+
+    @classmethod
+    def build_wrapper(cls,
+                      backend_files: Sequence[str],
+                      device: str = 'cpu',
+                      input_names: Optional[Sequence[str]] = None,
+                      output_names: Optional[Sequence[str]] = None,
+                      deploy_cfg: Optional[Any] = None,
+                      **kwargs):
+        """Build the wrapper for the backend model.
+
+        Args:
+            backend_files (Sequence[str]): Backend files.
+            device (str, optional): The device info. Defaults to 'cpu'.
+            input_names (Optional[Sequence[str]], optional): input names.
+                Defaults to None.
+            output_names (Optional[Sequence[str]], optional): output names.
+                Defaults to None.
+            deploy_cfg (Optional[Any], optional): The deploy config. Defaults
+                to None.
+        """
+        raise NotImplementedError(
+            f'build_wrapper has not been implemented for `{cls.__name__}`')
+
+    @classmethod
+    def is_available(cls, with_custom_ops: bool = False) -> bool:
+        """Check whether backend is installed.
+
+        Args:
+            with_custom_ops (bool): check custom ops exists.
+        Returns:
+            bool: True if backend package is installed.
+        """
+        raise NotImplementedError(
+            f'is_available has not been implemented for "{cls.__name__}"')
+
+    @classmethod
+    def get_version(cls) -> str:
+        """Get the version of the backend."""
+        raise NotImplementedError(
+            f'get_version has not been implemented for "{cls.__name__}"')
+
+    @classmethod
+    def check_env(cls, log_callback: Callable = lambda _: _) -> str:
+        """Check current environment.
+
+        Returns:
+            str: Info about the environment.
+        """
+        try:
+            available = cls.is_available()
+            if available:
+                try:
+                    backend_version = cls.get_version()
+                except NotImplementedError:
+                    backend_version = 'Unknown'
+            else:
+                backend_version = 'None'
+
+            info = f'{cls.backend_name}:\t{backend_version}'
+        except Exception:
+            info = f'{cls.backend_name}:\tCheckFailed'
+
+        log_callback(info)
+
+        return info
+
+    @classmethod
+    def to_backend(cls,
+                   ir_files: Sequence[str],
+                   work_dir: str,
+                   deploy_cfg: Any,
+                   log_level: int = logging.INFO,
+                   device: str = 'cpu',
+                   **kwargs) -> Sequence[str]:
+        """Convert intermediate representation to given backend.
+
+        Args:
+            ir_files (Sequence[str]): The intermediate representation files.
+            work_dir (str): The work directory, backend files and logs should
+                be saved in this directory.
+            deploy_cfg (Any): The deploy config.
+            log_level (int, optional): The log level. Defaults to logging.INFO.
+            device (str, optional): The device type. Defaults to 'cpu'.
+        Returns:
+            Sequence[str]: Backend files.
+        """
+        raise NotImplementedError(
+            f'to_backend has not been implemented for `{cls.__name__}`')
+
+
+class BackendManagerRegistry:
+    """backend manager registry."""
+
+    def __init__(self):
+        self._module_dict = {}
+
+    def register(self, name: str, enum_name: Optional[str] = None):
+        """register backend manager.
+
+        Args:
+            name (str): name of the backend
+            enum_name (Optional[str], optional): enum name of the backend.
+                if not given, the upper case of name would be used.
+        """
+        from mmdeploy.utils import get_root_logger
+        logger = get_root_logger()
+
+        if enum_name is None:
+            enum_name = name.upper()
+
+        def wrap_manager(cls):
+
+            from mmdeploy.utils import Backend
+
+            if not hasattr(Backend, enum_name):
+                from aenum import extend_enum
+                extend_enum(Backend, enum_name, name)
+                logger.info(f'Registry new backend: {enum_name} = {name}.')
+
+            if name in self._module_dict:
+                logger.info(
+                    f'Backend manager of `{name}` has already been registered.'
+                )
+
+            self._module_dict[name] = cls
+
+            cls.backend_name = name
+
+            return cls
+
+        return wrap_manager
+
+    def find(self, name: str) -> BaseBackendManager:
+        """Find the backend manager with name.
+
+        Args:
+            name (str): backend name.
+        Returns:
+            BaseBackendManager: backend manager of the given backend.
+        """
+        # try import backend if backend is in `mmdeploy.backend`
+        try:
+            importlib.import_module('mmdeploy.backend.' + name)
+        except Exception:
+            pass
+        return self._module_dict.get(name, None)
+
+
+BACKEND_MANAGERS = BackendManagerRegistry()
+
+
+def get_backend_manager(name: str) -> BaseBackendManager:
+    """Get backend manager.
+
+    Args:
+        name (str): name of the backend.
+    Returns:
+        BaseBackendManager: The backend manager of given name
+    """
+    from enum import Enum
+    if isinstance(name, Enum):
+        name = name.value
+    return BACKEND_MANAGERS.find(name)
diff --git a/mmdeploy/backend/coreml/__init__.py b/mmdeploy/backend/coreml/__init__.py
index 0ebce3e06..b44447de9 100644
--- a/mmdeploy/backend/coreml/__init__.py
+++ b/mmdeploy/backend/coreml/__init__.py
@@ -1,18 +1,12 @@
 # Copyright (c) OpenMMLab. All rights reserved.
+from .backend_manager import CoreMLManager
 
-import importlib
+_BackendManager = CoreMLManager
 
+is_available = _BackendManager.is_available
+build_wrapper = _BackendManager.build_wrapper
 
-def is_available():
-    """Check whether coremltools is installed.
-
-    Returns:
-        bool: True if coremltools package is installed.
-    """
-    return importlib.util.find_spec('coremltools') is not None
-
-
-__all__ = []
+__all__ = ['CoreMLManager']
 
 if is_available():
     from . import ops
diff --git a/mmdeploy/backend/coreml/backend_manager.py b/mmdeploy/backend/coreml/backend_manager.py
new file mode 100644
index 000000000..2d7906315
--- /dev/null
+++ b/mmdeploy/backend/coreml/backend_manager.py
@@ -0,0 +1,89 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import logging
+import os.path as osp
+from typing import Any, Optional, Sequence
+
+from ..base import BACKEND_MANAGERS, BaseBackendManager
+
+
+@BACKEND_MANAGERS.register('coreml')
+class CoreMLManager(BaseBackendManager):
+
+    @classmethod
+    def build_wrapper(cls,
+                      backend_files: Sequence[str],
+                      device: str = 'cpu',
+                      input_names: Optional[Sequence[str]] = None,
+                      output_names: Optional[Sequence[str]] = None,
+                      deploy_cfg: Optional[Any] = None,
+                      **kwargs):
+        """Build the wrapper for the backend model.
+
+        Args:
+            backend_files (Sequence[str]): Backend files.
+            device (str, optional): The device info. Defaults to 'cpu'.
+            input_names (Optional[Sequence[str]], optional): input names.
+                Defaults to None.
+            output_names (Optional[Sequence[str]], optional): output names.
+                Defaults to None.
+            deploy_cfg (Optional[Any], optional): The deploy config. Defaults
+                to None.
+        """
+        from .wrapper import CoreMLWrapper
+        return CoreMLWrapper(model_file=backend_files[0])
+
+    @classmethod
+    def is_available(cls, with_custom_ops: bool = False) -> bool:
+        """Check whether backend is installed.
+
+        Args:
+            with_custom_ops (bool): check custom ops exists.
+        Returns:
+            bool: True if backend package is installed.
+        """
+        import importlib
+        return importlib.util.find_spec('coreml') is not None
+
+    @classmethod
+    def get_version(cls) -> str:
+        """Get the version of the backend."""
+        if not cls.is_available():
+            return 'None'
+        else:
+            import pkg_resources
+            try:
+                return pkg_resources.get_distribution('coreml').version
+            except Exception:
+                return 'None'
+
+    @classmethod
+    def to_backend(cls,
+                   ir_files: Sequence[str],
+                   work_dir: str,
+                   deploy_cfg: Any,
+                   log_level: int = logging.INFO,
+                   device: str = 'cpu',
+                   **kwargs) -> Sequence[str]:
+        """Convert intermediate representation to given backend.
+
+        Args:
+            ir_files (Sequence[str]): The intermediate representation files.
+            work_dir (str): The work directory, backend files and logs should
+                be saved in this directory.
+            deploy_cfg (Any): The deploy config.
+            log_level (int, optional): The log level. Defaults to logging.INFO.
+            device (str, optional): The device type. Defaults to 'cpu'.
+        Returns:
+            Sequence[str]: Backend files.
+        """
+        from .torchscript2coreml import from_torchscript
+
+        coreml_files = []
+        for model_id, torchscript_path in enumerate(ir_files):
+            torchscript_name = osp.splitext(osp.split(torchscript_path)[1])[0]
+            output_file_prefix = osp.join(work_dir, torchscript_name)
+
+            from_torchscript(model_id, torchscript_path, output_file_prefix,
+                             deploy_cfg, coreml_files)
+
+        return coreml_files
diff --git a/mmdeploy/backend/ncnn/__init__.py b/mmdeploy/backend/ncnn/__init__.py
index 134493242..9e3f65f35 100644
--- a/mmdeploy/backend/ncnn/__init__.py
+++ b/mmdeploy/backend/ncnn/__init__.py
@@ -1,38 +1,13 @@
 # Copyright (c) OpenMMLab. All rights reserved.
-import importlib
-import os.path as osp
-
-from .init_plugins import get_onnx2ncnn_path, get_ops_path
+from .backend_manager import NCNNManager
 from .onnx2ncnn import from_onnx
 
+_BackendManager = NCNNManager
 
-def is_available():
-    """Check whether ncnn and onnx2ncnn tool are installed.
+is_available = _BackendManager.is_available
+build_wrapper = _BackendManager.build_wrapper
 
-    Returns:
-        bool: True if ncnn and onnx2ncnn tool are installed.
-    """
-
-    has_pyncnn = importlib.util.find_spec('ncnn') is not None
-
-    onnx2ncnn = get_onnx2ncnn_path()
-
-    return has_pyncnn and osp.exists(onnx2ncnn)
-
-
-def is_custom_ops_available():
-    """Check whether ncnn extension and custom ops are installed.
-
-    Returns:
-        bool: True if ncnn extension and custom ops are compiled.
-    """
-    has_pyncnn_ext = importlib.util.find_spec(
-        'mmdeploy.backend.ncnn.ncnn_ext') is not None
-    ncnn_ops_path = get_ops_path()
-    return has_pyncnn_ext and osp.exists(ncnn_ops_path)
-
-
-__all__ = ['from_onnx']
+__all__ = ['NCNNManager', 'from_onnx']
 
 if is_available():
     try:
diff --git a/mmdeploy/backend/ncnn/backend_manager.py b/mmdeploy/backend/ncnn/backend_manager.py
new file mode 100644
index 000000000..6b8d29b69
--- /dev/null
+++ b/mmdeploy/backend/ncnn/backend_manager.py
@@ -0,0 +1,145 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import logging
+import os.path as osp
+import sys
+from typing import Any, Callable, Optional, Sequence
+
+from mmdeploy.utils import get_backend_config, get_root_logger
+from ..base import BACKEND_MANAGERS, BaseBackendManager
+
+
+@BACKEND_MANAGERS.register('ncnn')
+class NCNNManager(BaseBackendManager):
+
+    @classmethod
+    def build_wrapper(cls,
+                      backend_files: Sequence[str],
+                      device: str = 'cpu',
+                      input_names: Optional[Sequence[str]] = None,
+                      output_names: Optional[Sequence[str]] = None,
+                      deploy_cfg: Optional[Any] = None,
+                      **kwargs):
+        """Build the wrapper for the backend model.
+
+        Args:
+            backend_files (Sequence[str]): Backend files.
+            device (str, optional): The device info. Defaults to 'cpu'.
+            input_names (Optional[Sequence[str]], optional): input names.
+                Defaults to None.
+            output_names (Optional[Sequence[str]], optional): output names.
+                Defaults to None.
+            deploy_cfg (Optional[Any], optional): The deploy config. Defaults
+                to None.
+        """
+        from .wrapper import NCNNWrapper
+
+        # For unittest deploy_config will not pass into _build_wrapper
+        # function.
+        if deploy_cfg:
+            backend_config = get_backend_config(deploy_cfg)
+            use_vulkan = backend_config.get('use_vulkan', False)
+        else:
+            use_vulkan = False
+        return NCNNWrapper(
+            param_file=backend_files[0],
+            bin_file=backend_files[1],
+            output_names=output_names,
+            use_vulkan=use_vulkan)
+
+    @classmethod
+    def is_available(cls, with_custom_ops: bool = False) -> bool:
+        """Check whether backend is installed.
+
+        Args:
+            with_custom_ops (bool): check custom ops exists.
+        Returns:
+            bool: True if backend package is installed.
+        """
+        import importlib
+
+        from .init_plugins import get_onnx2ncnn_path, get_ops_path
+        has_pyncnn = importlib.util.find_spec('ncnn') is not None
+        onnx2ncnn = get_onnx2ncnn_path()
+        ret = has_pyncnn and (onnx2ncnn is not None)
+
+        if ret and with_custom_ops:
+            has_pyncnn_ext = importlib.util.find_spec(
+                'mmdeploy.backend.ncnn.ncnn_ext') is not None
+            op_path = get_ops_path()
+            custom_ops_exist = osp.exists(op_path)
+            ret = ret and has_pyncnn_ext and custom_ops_exist
+
+        return ret
+
+    @classmethod
+    def get_version(cls) -> str:
+        """Get the version of the backend."""
+        if not cls.is_available():
+            return 'None'
+        else:
+            import pkg_resources
+            try:
+                return pkg_resources.get_distribution('ncnn').version
+            except Exception:
+                return 'None'
+
+    @classmethod
+    def check_env(cls, log_callback: Callable = lambda _: _) -> str:
+        """Check current environment.
+
+        Returns:
+            str: Info about the environment.
+        """
+        info = super().check_env(log_callback=log_callback)
+        available = cls.is_available()
+        ops_available = cls.is_available(with_custom_ops=True)
+        ops_available = 'Available' if ops_available else 'NotAvailable'
+
+        if available:
+            ops_info = f'ncnn custom ops:\t{ops_available}'
+            log_callback(ops_info)
+            info = f'{info}\n{ops_info}'
+
+        return info
+
+    @classmethod
+    def to_backend(cls,
+                   ir_files: Sequence[str],
+                   work_dir: str,
+                   log_level: int = logging.INFO,
+                   device: str = 'cpu',
+                   **kwargs) -> Sequence[str]:
+        """Convert intermediate representation to given backend.
+
+        Args:
+            ir_files (Sequence[str]): The intermediate representation files.
+            work_dir (str): The work directory, backend files and logs should
+                be saved in this directory.
+            log_level (int, optional): The log level. Defaults to logging.INFO.
+            device (str, optional): The device type. Defaults to 'cpu'.
+        Returns:
+            Sequence[str]: Backend files.
+        """
+        logger = get_root_logger()
+
+        from . import is_available
+
+        if not is_available():
+            logger.error('ncnn support is not available, please make sure:\n'
+                         '1) `mmdeploy_onnx2ncnn` existed in `PATH`\n'
+                         '2) python import ncnn success')
+            sys.exit(1)
+
+        from mmdeploy.apis.ncnn import get_output_model_file
+        from .onnx2ncnn import from_onnx
+
+        backend_files = []
+        for onnx_path in ir_files:
+            model_param_path, model_bin_path = get_output_model_file(
+                onnx_path, work_dir)
+            onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]
+            from_onnx(onnx_path, osp.join(work_dir, onnx_name))
+
+            backend_files += [model_param_path, model_bin_path]
+
+        return backend_files
diff --git a/mmdeploy/backend/onnxruntime/__init__.py b/mmdeploy/backend/onnxruntime/__init__.py
index e808311bc..8dd174c2d 100644
--- a/mmdeploy/backend/onnxruntime/__init__.py
+++ b/mmdeploy/backend/onnxruntime/__init__.py
@@ -1,34 +1,17 @@
 # Copyright (c) OpenMMLab. All rights reserved.
-import importlib
-import os.path as osp
+from .backend_manager import ONNXRuntimeManager
 
-from .init_plugins import get_ops_path
+_BackendManager = ONNXRuntimeManager
 
+is_available = _BackendManager.is_available
+build_wrapper = _BackendManager.build_wrapper
 
-def is_available():
-    """Check whether ONNX Runtime package is installed.
-
-    Returns:
-        bool: True if ONNX Runtime package is installed.
-    """
-
-    return importlib.util.find_spec('onnxruntime') is not None
-
-
-def is_custom_ops_available():
-    """Check whether ONNX Runtime custom ops are installed.
-
-    Returns:
-        bool: True if ONNX Runtime custom ops are compiled.
-    """
-    onnxruntime_op_path = get_ops_path()
-    return osp.exists(onnxruntime_op_path)
-
+__all__ = ['ONNXRuntimeManager']
 
 if is_available():
     try:
         # import wrapper if pytorch is available
         from .wrapper import ORTWrapper
-        __all__ = ['ORTWrapper']
+        __all__ += ['ORTWrapper']
     except Exception:
         pass
diff --git a/mmdeploy/backend/onnxruntime/backend_manager.py b/mmdeploy/backend/onnxruntime/backend_manager.py
new file mode 100644
index 000000000..7410f381c
--- /dev/null
+++ b/mmdeploy/backend/onnxruntime/backend_manager.py
@@ -0,0 +1,142 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import logging
+import os.path as osp
+from typing import Any, Callable, Optional, Sequence
+
+from ..base import BACKEND_MANAGERS, BaseBackendManager
+
+
+@BACKEND_MANAGERS.register('onnxruntime')
+class ONNXRuntimeManager(BaseBackendManager):
+
+    @classmethod
+    def build_wrapper(cls,
+                      backend_files: Sequence[str],
+                      device: str = 'cpu',
+                      input_names: Optional[Sequence[str]] = None,
+                      output_names: Optional[Sequence[str]] = None,
+                      deploy_cfg: Optional[Any] = None,
+                      **kwargs):
+        """Build the wrapper for the backend model.
+
+        Args:
+            backend_files (Sequence[str]): Backend files.
+            device (str, optional): The device info. Defaults to 'cpu'.
+            input_names (Optional[Sequence[str]], optional): input names.
+                Defaults to None.
+            output_names (Optional[Sequence[str]], optional): output names.
+                Defaults to None.
+            deploy_cfg (Optional[Any], optional): The deploy config. Defaults
+                to None.
+        """
+
+        from .wrapper import ORTWrapper
+        return ORTWrapper(
+            onnx_file=backend_files[0],
+            device=device,
+            output_names=output_names)
+
+    @classmethod
+    def is_available(cls, with_custom_ops: bool = False) -> bool:
+        """Check whether backend is installed.
+
+        Args:
+            with_custom_ops (bool): check custom ops exists.
+        Returns:
+            bool: True if backend package is installed.
+        """
+        import importlib
+        ret = importlib.util.find_spec('onnxruntime') is not None
+
+        if ret and with_custom_ops:
+            from .init_plugins import get_ops_path
+            ops_path = get_ops_path()
+            custom_ops_exist = osp.exists(ops_path)
+            ret = ret and custom_ops_exist
+
+        return ret
+
+    @classmethod
+    def get_version(cls) -> str:
+        """Get the version of the backend."""
+        if not cls.is_available():
+            return 'None'
+        else:
+            import pkg_resources
+            try:
+                ort_version = pkg_resources.get_distribution(
+                    'onnxruntime').version
+            except Exception:
+                ort_version = 'None'
+            try:
+                ort_gpu_version = pkg_resources.get_distribution(
+                    'onnxruntime-gpu').version
+            except Exception:
+                ort_gpu_version = 'None'
+
+            if ort_gpu_version != 'None':
+                return ort_gpu_version
+            else:
+                return ort_version
+
+    @classmethod
+    def check_env(cls, log_callback: Callable = lambda _: _) -> str:
+        """Check current environment.
+
+        Returns:
+            str: Info about the environment.
+        """
+        import pkg_resources
+
+        try:
+            if cls.is_available():
+                ops_available = cls.is_available(with_custom_ops=True)
+                ops_available = 'Available' \
+                    if ops_available else 'NotAvailable'
+
+                try:
+                    ort_version = pkg_resources.get_distribution(
+                        'onnxruntime').version
+                except Exception:
+                    ort_version = 'None'
+                try:
+                    ort_gpu_version = pkg_resources.get_distribution(
+                        'onnxruntime-gpu').version
+                except Exception:
+                    ort_gpu_version = 'None'
+
+                ort_info = f'ONNXRuntime:\t{ort_version}'
+                log_callback(ort_info)
+                ort_gpu_info = f'ONNXRuntime-gpu:\t{ort_gpu_version}'
+                log_callback(ort_gpu_info)
+                ort_ops_info = f'ONNXRuntime custom ops:\t{ops_available}'
+                log_callback(ort_ops_info)
+
+                info = f'{ort_info}\n{ort_gpu_info}\n{ort_ops_info}'
+            else:
+                info = 'ONNXRuntime:\tNone'
+                log_callback(info)
+        except Exception:
+            info = f'{cls.backend_name}:\tCheckFailed'
+            log_callback(info)
+        return info
+
+    @classmethod
+    def to_backend(cls,
+                   ir_files: Sequence[str],
+                   work_dir: str,
+                   log_level: int = logging.INFO,
+                   device: str = 'cpu',
+                   **kwargs) -> Sequence[str]:
+        """Convert intermediate representation to given backend.
+
+        Args:
+            ir_files (Sequence[str]): The intermediate representation files.
+            work_dir (str): The work directory, backend files and logs should
+                be saved in this directory.
+            log_level (int, optional): The log level. Defaults to logging.INFO.
+            device (str, optional): The device type. Defaults to 'cpu'.
+        Returns:
+            Sequence[str]: Backend files.
+        """
+        return ir_files
diff --git a/mmdeploy/backend/onnxruntime/wrapper.py b/mmdeploy/backend/onnxruntime/wrapper.py
index 0ce23553f..a620ce913 100644
--- a/mmdeploy/backend/onnxruntime/wrapper.py
+++ b/mmdeploy/backend/onnxruntime/wrapper.py
@@ -44,11 +44,11 @@ class ORTWrapper(BaseWrapper):
         logger = get_root_logger()
         if osp.exists(ort_custom_op_path):
             session_options.register_custom_ops_library(ort_custom_op_path)
-            logger.info(f'Successfully loaded onnxruntime custom ops from \
-            {ort_custom_op_path}')
+            logger.info('Successfully loaded onnxruntime custom ops from '
+                        f'{ort_custom_op_path}')
         else:
-            logger.warning(f'The library of onnxruntime custom ops does \
-            not exist: {ort_custom_op_path}')
+            logger.warning('The library of onnxruntime custom ops does'
+                           f'not exist: {ort_custom_op_path}')
         device_id = parse_device_id(device)
         providers = ['CPUExecutionProvider'] \
             if device == 'cpu' else \
diff --git a/mmdeploy/backend/openvino/__init__.py b/mmdeploy/backend/openvino/__init__.py
index 7314e48df..f17ae36c2 100644
--- a/mmdeploy/backend/openvino/__init__.py
+++ b/mmdeploy/backend/openvino/__init__.py
@@ -1,20 +1,16 @@
 # Copyright (c) OpenMMLab. All rights reserved.
-import importlib
+from .backend_manager import OpenVINOManager
 
+_BackendManager = OpenVINOManager
 
-def is_available() -> bool:
-    """Checking if OpenVINO is installed.
-
-    Returns:
-        bool: True if OpenVINO is installed.
-    """
-    return importlib.util.find_spec('openvino') is not None
-
+is_available = _BackendManager.is_available
+build_wrapper = _BackendManager.build_wrapper
 
+__all__ = ['OpenVINOManager']
 if is_available():
     from .onnx2openvino import get_output_model_file
     from .utils import ModelOptimizerOptions
     from .wrapper import OpenVINOWrapper
-    __all__ = [
+    __all__ += [
         'OpenVINOWrapper', 'get_output_model_file', 'ModelOptimizerOptions'
     ]
diff --git a/mmdeploy/backend/openvino/backend_manager.py b/mmdeploy/backend/openvino/backend_manager.py
new file mode 100644
index 000000000..63ebc9286
--- /dev/null
+++ b/mmdeploy/backend/openvino/backend_manager.py
@@ -0,0 +1,101 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import logging
+from typing import Any, Optional, Sequence
+
+from ..base import BACKEND_MANAGERS, BaseBackendManager
+
+
+@BACKEND_MANAGERS.register('openvino')
+class OpenVINOManager(BaseBackendManager):
+
+    @classmethod
+    def build_wrapper(cls,
+                      backend_files: Sequence[str],
+                      device: str = 'cpu',
+                      input_names: Optional[Sequence[str]] = None,
+                      output_names: Optional[Sequence[str]] = None,
+                      deploy_cfg: Optional[Any] = None,
+                      **kwargs):
+        """Build the wrapper for the backend model.
+
+        Args:
+            backend_files (Sequence[str]): Backend files.
+            device (str, optional): The device info. Defaults to 'cpu'.
+            input_names (Optional[Sequence[str]], optional): input names.
+                Defaults to None.
+            output_names (Optional[Sequence[str]], optional): output names.
+                Defaults to None.
+            deploy_cfg (Optional[Any], optional): The deploy config. Defaults
+                to None.
+        """
+        from .wrapper import OpenVINOWrapper
+        return OpenVINOWrapper(
+            ir_model_file=backend_files[0], output_names=output_names)
+
+    @classmethod
+    def is_available(cls, with_custom_ops: bool = False) -> bool:
+        """Check whether backend is installed.
+
+        Args:
+            with_custom_ops (bool): check custom ops exists.
+        Returns:
+            bool: True if backend package is installed.
+        """
+        import importlib
+        ret = importlib.util.find_spec('openvino') is not None
+
+        return ret
+
+    @classmethod
+    def get_version(cls) -> str:
+        """Get the version of the backend."""
+        if not cls.is_available():
+            return 'None'
+        else:
+            import pkg_resources
+            try:
+                return pkg_resources.get_distribution('openvino').version
+            except Exception:
+                return 'None'
+
+    @classmethod
+    def to_backend(cls,
+                   ir_files: Sequence[str],
+                   work_dir: str,
+                   deploy_cfg: Any,
+                   log_level: int = logging.INFO,
+                   device: str = 'cpu',
+                   **kwargs) -> Sequence[str]:
+        """Convert intermediate representation to given backend.
+
+        Args:
+            ir_files (Sequence[str]): The intermediate representation files.
+            work_dir (str): The work directory, backend files and logs should
+                be saved in this directory.
+            deploy_cfg (Any): The deploy config.
+            log_level (int, optional): The log level. Defaults to logging.INFO.
+            device (str, optional): The device type. Defaults to 'cpu'.
+        Returns:
+            Sequence[str]: Backend files.
+        """
+        from . import is_available
+        assert is_available(), \
+            'OpenVINO is not available, please install OpenVINO first.'
+
+        from mmdeploy.apis.openvino import (get_input_info_from_cfg,
+                                            get_mo_options_from_cfg,
+                                            get_output_model_file)
+        from mmdeploy.utils import get_ir_config
+        from .onnx2openvino import from_onnx
+
+        openvino_files = []
+        for onnx_path in ir_files:
+            model_xml_path = get_output_model_file(onnx_path, work_dir)
+            input_info = get_input_info_from_cfg(deploy_cfg)
+            output_names = get_ir_config(deploy_cfg).output_names
+            mo_options = get_mo_options_from_cfg(deploy_cfg)
+            from_onnx(onnx_path, work_dir, input_info, output_names,
+                      mo_options)
+            openvino_files.append(model_xml_path)
+
+        return openvino_files
diff --git a/mmdeploy/backend/pplnn/__init__.py b/mmdeploy/backend/pplnn/__init__.py
index 35b28b819..8ed610118 100644
--- a/mmdeploy/backend/pplnn/__init__.py
+++ b/mmdeploy/backend/pplnn/__init__.py
@@ -1,17 +1,11 @@
 # Copyright (c) OpenMMLab. All rights reserved.
-import importlib
+from .backend_manager import PPLNNManager
 
+_BackendManager = PPLNNManager
+is_available = _BackendManager.is_available
+build_wrapper = _BackendManager.build_wrapper
 
-def is_available():
-    """Check whether pplnn is installed.
-
-    Returns:
-        bool: True if pplnn package is installed.
-    """
-    return importlib.util.find_spec('pyppl') is not None
-
-
-__all__ = []
+__all__ = ['PPLNNManager']
 
 if is_available():
     from .utils import register_engines
diff --git a/mmdeploy/backend/pplnn/backend_manager.py b/mmdeploy/backend/pplnn/backend_manager.py
new file mode 100644
index 000000000..fc331d304
--- /dev/null
+++ b/mmdeploy/backend/pplnn/backend_manager.py
@@ -0,0 +1,108 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import logging
+import os.path as osp
+from typing import Any, Optional, Sequence
+
+from ..base import BACKEND_MANAGERS, BaseBackendManager
+
+
+@BACKEND_MANAGERS.register('pplnn')
+class PPLNNManager(BaseBackendManager):
+
+    @classmethod
+    def build_wrapper(cls,
+                      backend_files: Sequence[str],
+                      device: str = 'cpu',
+                      input_names: Optional[Sequence[str]] = None,
+                      output_names: Optional[Sequence[str]] = None,
+                      deploy_cfg: Optional[Any] = None,
+                      **kwargs):
+        """Build the wrapper for the backend model.
+
+        Args:
+            backend_files (Sequence[str]): Backend files.
+            device (str, optional): The device info. Defaults to 'cpu'.
+            input_names (Optional[Sequence[str]], optional): input names.
+                Defaults to None.
+            output_names (Optional[Sequence[str]], optional): output names.
+                Defaults to None.
+            deploy_cfg (Optional[Any], optional): The deploy config. Defaults
+                to None.
+        """
+        from .wrapper import PPLNNWrapper
+        return PPLNNWrapper(
+            onnx_file=backend_files[0],
+            algo_file=backend_files[1] if len(backend_files) > 1 else None,
+            device=device,
+            output_names=output_names)
+
+    @classmethod
+    def is_available(cls, with_custom_ops: bool = False) -> bool:
+        """Check whether backend is installed.
+
+        Args:
+            with_custom_ops (bool): check custom ops exists.
+        Returns:
+            bool: True if backend package is installed.
+        """
+        import importlib
+        ret = importlib.util.find_spec('pyppl') is not None
+
+        return ret
+
+    @classmethod
+    def get_version(cls) -> str:
+        """Get the version of the backend."""
+        if not cls.is_available():
+            return 'None'
+        else:
+            import pkg_resources
+            try:
+                return pkg_resources.get_distribution('pyppl').version
+            except Exception:
+                return 'None'
+
+    @classmethod
+    def to_backend(cls,
+                   ir_files: Sequence[str],
+                   work_dir: str,
+                   deploy_cfg: Any,
+                   log_level: int = logging.INFO,
+                   device: str = 'cpu',
+                   **kwargs) -> Sequence[str]:
+        """Convert intermediate representation to given backend.
+
+        Args:
+            ir_files (Sequence[str]): The intermediate representation files.
+            work_dir (str): The work directory, backend files and logs should
+                be saved in this directory.
+            deploy_cfg (Any): The deploy config.
+            log_level (int, optional): The log level. Defaults to logging.INFO.
+            device (str, optional): The device type. Defaults to 'cpu'.
+        Returns:
+            Sequence[str]: Backend files.
+        """
+        from mmdeploy.utils import get_model_inputs
+        from . import is_available
+        from .onnx2pplnn import from_onnx
+        assert is_available(), \
+            'PPLNN is not available, please install PPLNN first.'
+
+        pplnn_files = []
+        for onnx_path in ir_files:
+            algo_file = onnx_path.replace('.onnx', '.json')
+            model_inputs = get_model_inputs(deploy_cfg)
+            assert 'opt_shape' in model_inputs, 'Expect opt_shape ' \
+                'in deploy config for PPLNN'
+            # PPLNN accepts only 1 input shape for optimization,
+            # may get changed in the future
+            input_shapes = [model_inputs.opt_shape]
+            algo_prefix = osp.splitext(algo_file)[0]
+            from_onnx(
+                onnx_path,
+                algo_prefix,
+                device=device,
+                input_shapes=input_shapes)
+            pplnn_files += [onnx_path, algo_file]
+
+        return pplnn_files
diff --git a/mmdeploy/backend/rknn/__init__.py b/mmdeploy/backend/rknn/__init__.py
index 0b7ecbd9b..b0a84a090 100644
--- a/mmdeploy/backend/rknn/__init__.py
+++ b/mmdeploy/backend/rknn/__init__.py
@@ -1,17 +1,11 @@
 # Copyright (c) OpenMMLab. All rights reserved.
-import importlib
+from .backend_manager import RKNNManager
 
+_BackendManager = RKNNManager
+is_available = _BackendManager.is_available
+build_wrapper = _BackendManager.build_wrapper
 
-def is_available():
-    """Check whether rknn is installed.
-
-    Returns:
-        bool: True if rknn package is installed.
-    """
-    return importlib.util.find_spec('rknn') is not None
-
-
-__all__ = []
+__all__ = ['RKNNManager']
 
 if is_available():
     from .wrapper import RKNNWrapper
diff --git a/mmdeploy/backend/rknn/backend_manager.py b/mmdeploy/backend/rknn/backend_manager.py
new file mode 100644
index 000000000..406f2f1d8
--- /dev/null
+++ b/mmdeploy/backend/rknn/backend_manager.py
@@ -0,0 +1,155 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import logging
+import os.path as osp
+from typing import Any, Callable, Optional, Sequence
+
+from mmdeploy.utils import get_common_config
+from ..base import BACKEND_MANAGERS, BaseBackendManager
+
+
+@BACKEND_MANAGERS.register('rknn')
+class RKNNManager(BaseBackendManager):
+
+    @classmethod
+    def build_wrapper(cls,
+                      backend_files: Sequence[str],
+                      device: str = 'cpu',
+                      input_names: Optional[Sequence[str]] = None,
+                      output_names: Optional[Sequence[str]] = None,
+                      deploy_cfg: Optional[Any] = None,
+                      **kwargs):
+        """Build the wrapper for the backend model.
+
+        Args:
+            backend_files (Sequence[str]): Backend files.
+            device (str, optional): The device info. Defaults to 'cpu'.
+            input_names (Optional[Sequence[str]], optional): input names.
+                Defaults to None.
+            output_names (Optional[Sequence[str]], optional): output names.
+                Defaults to None.
+            deploy_cfg (Optional[Any], optional): The deploy config. Defaults
+                to None.
+        """
+
+        from .wrapper import RKNNWrapper
+        common_config = get_common_config(deploy_cfg)
+        return RKNNWrapper(
+            model=backend_files[0],
+            common_config=common_config,
+            input_names=input_names,
+            output_names=output_names)
+
+    @classmethod
+    def is_available(cls, with_custom_ops: bool = False) -> bool:
+        """Check whether backend is installed.
+
+        Args:
+            with_custom_ops (bool): check custom ops exists.
+        Returns:
+            bool: True if backend package is installed.
+        """
+        import importlib
+        try:
+            ret = importlib.util.find_spec('rknn-toolkit2') is not None
+        except Exception:
+            pass
+        if ret is None:
+            try:
+                ret = importlib.util.find_spec('rknn-toolkit') is not None
+            except Exception:
+                pass
+        return ret
+
+    @classmethod
+    def get_version(cls) -> str:
+        """Get the version of the backend."""
+        if not cls.is_available():
+            return 'None'
+        else:
+            import pkg_resources
+            rknn_version = None
+            rknn2_version = None
+            try:
+                rknn_version = pkg_resources.get_distribution(
+                    'rknn-toolkit').version
+            except Exception:
+                pass
+            try:
+                rknn2_version = pkg_resources.get_distribution(
+                    'rknn-toolkit2').version
+            except Exception:
+                pass
+            if rknn2_version is not None:
+                return rknn2_version
+            elif rknn_version is not None:
+                return rknn_version
+            return 'None'
+
+    @classmethod
+    def check_env(cls, log_callback: Callable = lambda _: _) -> str:
+        """Check current environment.
+
+        Returns:
+            str: Info about the environment.
+        """
+        import pkg_resources
+        try:
+            rknn_version = 'None'
+            rknn2_version = 'None'
+            try:
+                rknn_version = pkg_resources.get_distribution(
+                    'rknn-toolkit').version
+            except Exception:
+                pass
+            try:
+                rknn2_version = pkg_resources.get_distribution(
+                    'rknn-toolkit2').version
+            except Exception:
+                pass
+
+            rknn_info = f'rknn-toolkit:\t{rknn_version}'
+            rknn2_info = f'rknn2-toolkit:\t{rknn2_version}'
+            log_callback(rknn_info)
+            log_callback(rknn2_info)
+
+            info = '\n'.join([rknn_info, rknn2_info])
+
+        except Exception:
+            info = f'{cls.backend_name}:\tCheckFailed'
+            log_callback(info)
+        return info
+
+    @classmethod
+    def to_backend(cls,
+                   ir_files: Sequence[str],
+                   work_dir: str,
+                   deploy_cfg: Any,
+                   log_level: int = logging.INFO,
+                   device: str = 'cpu',
+                   **kwargs) -> Sequence[str]:
+        """Convert intermediate representation to given backend.
+
+        Args:
+            ir_files (Sequence[str]): The intermediate representation files.
+            work_dir (str): The work directory, backend files and logs should
+                be saved in this directory.
+            deploy_cfg (Any): The deploy config.
+            log_level (int, optional): The log level. Defaults to logging.INFO.
+            device (str, optional): The device type. Defaults to 'cpu'.
+        Returns:
+            Sequence[str]: Backend files.
+        """
+        from . import is_available
+        assert is_available(
+        ), 'RKNN is not available, please install RKNN first.'
+
+        from .onnx2rknn import onnx2rknn
+
+        backend_files = []
+        for model_id, onnx_path in zip(range(len(ir_files)), ir_files):
+            pre_fix_name = osp.splitext(osp.split(onnx_path)[1])[0]
+            output_path = osp.join(work_dir, pre_fix_name + '.rknn')
+            onnx2rknn(onnx_path, output_path, deploy_cfg)
+            backend_files.append(output_path)
+
+        return backend_files
diff --git a/mmdeploy/backend/rknn/onnx2rknn.py b/mmdeploy/backend/rknn/onnx2rknn.py
index 63230d7e5..a0d058356 100644
--- a/mmdeploy/backend/rknn/onnx2rknn.py
+++ b/mmdeploy/backend/rknn/onnx2rknn.py
@@ -25,7 +25,7 @@ def rknn_package_info():
 def onnx2rknn(onnx_model: str,
               output_path: str,
               deploy_cfg: Union[str, mmengine.Config],
-              model_cfg: Union[str, mmengine.Config],
+              model_cfg: Optional[Union[str, mmengine.Config]] = None,
               dataset_file: Optional[str] = None,
               **kwargs):
     """Convert ONNX to RKNN.
@@ -55,7 +55,7 @@ def onnx2rknn(onnx_model: str,
     input_size_list = get_backend_config(deploy_cfg).get(
         'input_size_list', None)
     # update norm value
-    if get_rknn_quantization(deploy_cfg) is True:
+    if get_rknn_quantization(deploy_cfg) is True and model_cfg is not None:
         transform = get_normalization(model_cfg)
         common_params.update(
             dict(
diff --git a/mmdeploy/backend/sdk/__init__.py b/mmdeploy/backend/sdk/__init__.py
index e64379dd7..46c750c40 100644
--- a/mmdeploy/backend/sdk/__init__.py
+++ b/mmdeploy/backend/sdk/__init__.py
@@ -1,37 +1,16 @@
 # Copyright (c) OpenMMLab. All rights reserved.
-import importlib
-import os
-import sys
+from .backend_manager import SDKManager
 
-from mmdeploy.utils import get_file_path
-
-_is_available = False
-
-module_name = 'mmdeploy_python'
-
-candidates = [
-    f'../../../build/lib/{module_name}.*.so',
-    f'../../../build/bin/*/{module_name}.*.pyd'
-]
-
-lib_path = get_file_path(os.path.dirname(__file__), candidates)
-
-if lib_path:
-    lib_dir = os.path.dirname(lib_path)
-    sys.path.append(lib_dir)
-
-if importlib.util.find_spec(module_name) is not None:
-    _is_available = True
-
-
-def is_available() -> bool:
-    return _is_available
+_BackendManager = SDKManager
+is_available = _BackendManager.is_available
+build_wrapper = _BackendManager.build_wrapper
 
+__all__ = ['SDKManager']
 
 if is_available():
 
     try:
         from .wrapper import SDKWrapper
-        __all__ = ['SDKWrapper']
+        __all__ += ['SDKWrapper']
     except Exception:
         pass
diff --git a/mmdeploy/backend/sdk/backend_manager.py b/mmdeploy/backend/sdk/backend_manager.py
new file mode 100644
index 000000000..e37762823
--- /dev/null
+++ b/mmdeploy/backend/sdk/backend_manager.py
@@ -0,0 +1,83 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import importlib
+import os.path as osp
+import sys
+from typing import Any, Optional, Sequence
+
+from mmdeploy.utils import get_file_path
+from ..base import BACKEND_MANAGERS, BaseBackendManager
+
+_is_available = False
+
+module_name = 'mmdeploy_python'
+
+candidates = [
+    f'../../../build/lib/{module_name}.*.so',
+    f'../../../build/bin/*/{module_name}.*.pyd'
+]
+
+lib_path = get_file_path(osp.dirname(__file__), candidates)
+
+if lib_path:
+    lib_dir = osp.dirname(lib_path)
+    sys.path.append(lib_dir)
+
+if importlib.util.find_spec(module_name) is not None:
+    _is_available = True
+
+
+@BACKEND_MANAGERS.register('sdk')
+class SDKManager(BaseBackendManager):
+
+    @classmethod
+    def build_wrapper(cls,
+                      backend_files: Sequence[str],
+                      device: str = 'cpu',
+                      input_names: Optional[Sequence[str]] = None,
+                      output_names: Optional[Sequence[str]] = None,
+                      deploy_cfg: Optional[Any] = None,
+                      **kwargs):
+        """Build the wrapper for the backend model.
+
+        Args:
+            backend_files (Sequence[str]): Backend files.
+            device (str, optional): The device info. Defaults to 'cpu'.
+            input_names (Optional[Sequence[str]], optional): input names.
+                Defaults to None.
+            output_names (Optional[Sequence[str]], optional): output names.
+                Defaults to None.
+            deploy_cfg (Optional[Any], optional): The deploy config. Defaults
+                to None.
+        """
+        assert deploy_cfg is not None, \
+            'Building SDKWrapper requires deploy_cfg'
+        from mmdeploy.backend.sdk import SDKWrapper
+        from mmdeploy.utils import SDK_TASK_MAP, get_task_type
+        task_name = SDK_TASK_MAP[get_task_type(deploy_cfg)]['cls_name']
+        return SDKWrapper(
+            model_file=backend_files[0], task_name=task_name, device=device)
+
+    @classmethod
+    def is_available(cls, with_custom_ops: bool = False) -> bool:
+        """Check whether backend is installed.
+
+        Args:
+            with_custom_ops (bool): check custom ops exists.
+        Returns:
+            bool: True if backend package is installed.
+        """
+        global _is_available
+
+        return _is_available
+
+    @classmethod
+    def get_version(cls) -> str:
+        """Get the version of the backend."""
+        if not cls.is_available():
+            return 'None'
+        else:
+            import pkg_resources
+            try:
+                return pkg_resources.get_distribution('mmdeploy').version
+            except Exception:
+                return 'None'
diff --git a/mmdeploy/backend/sdk/export_info.py b/mmdeploy/backend/sdk/export_info.py
index 59ea514b8..c21d23627 100644
--- a/mmdeploy/backend/sdk/export_info.py
+++ b/mmdeploy/backend/sdk/export_info.py
@@ -14,7 +14,7 @@ from mmdeploy.utils.config_utils import get_backend_config
 from mmdeploy.utils.constants import SDK_TASK_MAP as task_map
 
 
-def get_mmdpeloy_version() -> str:
+def get_mmdeploy_version() -> str:
     """Return the version of MMDeploy."""
     import mmdeploy
     version = mmdeploy.__version__
@@ -261,7 +261,7 @@ def get_deploy(deploy_cfg: mmengine.Config, model_cfg: mmengine.Config,
     cls_name = task_map[task]['cls_name']
     _, customs = get_model_name_customs(
         deploy_cfg, model_cfg, work_dir=work_dir, device=device)
-    version = get_mmdpeloy_version()
+    version = get_mmdeploy_version()
     models = get_models(deploy_cfg, model_cfg, work_dir, device)
     return dict(version=version, task=cls_name, models=models, customs=customs)
 
@@ -312,7 +312,7 @@ def get_detail(deploy_cfg: mmengine.Config, model_cfg: mmengine.Config,
         dict: Composed of version, codebase, codebase_config, onnx_config,
             backend_config and calib_config.
     """
-    version = get_mmdpeloy_version()
+    version = get_mmdeploy_version()
     codebase = get_task(deploy_cfg)
     codebase['pth'] = pth
     codebase['config'] = model_cfg.filename
diff --git a/mmdeploy/backend/snpe/__init__.py b/mmdeploy/backend/snpe/__init__.py
index 961b75dc7..5398d0e94 100644
--- a/mmdeploy/backend/snpe/__init__.py
+++ b/mmdeploy/backend/snpe/__init__.py
@@ -1,25 +1,12 @@
 # Copyright (c) OpenMMLab. All rights reserved.
-import os.path as osp
-
-from .init_plugins import get_onnx2dlc_path
+from .backend_manager import SNPEManager
 from .onnx2dlc import from_onnx
 
+_BackendManager = SNPEManager
+is_available = _BackendManager.is_available
+build_wrapper = _BackendManager.build_wrapper
 
-def is_available():
-    """Check whether ncnn and snpe-onnx-to-dlc tool are installed.
-
-    Returns:
-        bool: True if snpe-onnx-to-dlc tool are installed.
-    """
-
-    onnx2dlc = get_onnx2dlc_path()
-    if onnx2dlc is None:
-        return False
-    return osp.exists(onnx2dlc)
-
-
-__all__ = ['from_onnx']
-
+__all__ = ['from_onnx', 'SNPEManager']
 if is_available():
     try:
         from .wrapper import SNPEWrapper
diff --git a/mmdeploy/backend/snpe/backend_manager.py b/mmdeploy/backend/snpe/backend_manager.py
new file mode 100644
index 000000000..b0ddf2dee
--- /dev/null
+++ b/mmdeploy/backend/snpe/backend_manager.py
@@ -0,0 +1,99 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import logging
+import os
+import os.path as osp
+import sys
+from typing import Any, Optional, Sequence
+
+from mmdeploy.utils import get_root_logger
+from ..base import BACKEND_MANAGERS, BaseBackendManager
+
+
+@BACKEND_MANAGERS.register('snpe')
+class SNPEManager(BaseBackendManager):
+
+    @classmethod
+    def build_wrapper(cls,
+                      backend_files: Sequence[str],
+                      device: str = 'cpu',
+                      input_names: Optional[Sequence[str]] = None,
+                      output_names: Optional[Sequence[str]] = None,
+                      deploy_cfg: Optional[Any] = None,
+                      **kwargs):
+        """Build the wrapper for the backend model.
+
+        Args:
+            backend_files (Sequence[str]): Backend files.
+            device (str, optional): The device info. Defaults to 'cpu'.
+            input_names (Optional[Sequence[str]], optional): input names.
+                Defaults to None.
+            output_names (Optional[Sequence[str]], optional): output names.
+                Defaults to None.
+            deploy_cfg (Optional[Any], optional): The deploy config. Defaults
+                to None.
+        """
+        from .wrapper import SNPEWrapper
+        uri = None
+        if 'uri' in kwargs:
+            uri = kwargs['uri']
+        return SNPEWrapper(
+            dlc_file=backend_files[0], uri=uri, output_names=output_names)
+
+    @classmethod
+    def is_available(cls, with_custom_ops: bool = False) -> bool:
+        """Check whether backend is installed.
+
+        Args:
+            with_custom_ops (bool): check custom ops exists.
+        Returns:
+            bool: True if backend package is installed.
+        """
+        from .onnx2dlc import get_onnx2dlc_path
+        onnx2dlc = get_onnx2dlc_path()
+        if onnx2dlc is None:
+            return False
+        return osp.exists(onnx2dlc)
+
+    @classmethod
+    def to_backend(cls,
+                   ir_files: Sequence[str],
+                   work_dir: str,
+                   log_level: int = logging.INFO,
+                   device: str = 'cpu',
+                   uri: str = '',
+                   **kwargs) -> Sequence[str]:
+        """Convert intermediate representation to given backend.
+
+        Args:
+            ir_files (Sequence[str]): The intermediate representation files.
+            work_dir (str): The work directory, backend files and logs should
+                be saved in this directory.
+            log_level (int, optional): The log level. Defaults to logging.INFO.
+            device (str, optional): The device type. Defaults to 'cpu'.
+        Returns:
+            Sequence[str]: Backend files.
+        """
+        from . import is_available
+        logger = get_root_logger()
+
+        if not is_available():
+            logger.error('snpe support is not available, please check\n'
+                         '1) `snpe-onnx-to-dlc` existed in `PATH`\n'
+                         '2) snpe only support\n'
+                         'ubuntu18.04')
+            sys.exit(1)
+
+        from mmdeploy.apis.snpe import get_env_key, get_output_model_file
+        from .onnx2dlc import from_onnx
+
+        if get_env_key() not in os.environ:
+            os.environ[get_env_key()] = uri
+
+        backend_files = []
+        for onnx_path in ir_files:
+            dlc_path = get_output_model_file(onnx_path, work_dir)
+            onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]
+            from_onnx(onnx_path, osp.join(work_dir, onnx_name))
+            backend_files += [dlc_path]
+
+        return backend_files
diff --git a/mmdeploy/backend/tensorrt/__init__.py b/mmdeploy/backend/tensorrt/__init__.py
index c08412846..6587a3ff8 100644
--- a/mmdeploy/backend/tensorrt/__init__.py
+++ b/mmdeploy/backend/tensorrt/__init__.py
@@ -1,35 +1,18 @@
 # Copyright (c) OpenMMLab. All rights reserved.
 # flake8: noqa
-import importlib
-import os.path as osp
+from .backend_manager import TensorRTManager
+from .init_plugins import load_tensorrt_plugin
 
-from .init_plugins import get_ops_path, load_tensorrt_plugin
-
-
-def is_available():
-    """Check whether TensorRT package is installed and cuda is available.
-
-    Returns:
-        bool: True if TensorRT package is installed and cuda is available.
-    """
-
-    return importlib.util.find_spec('tensorrt') is not None
-
-
-def is_custom_ops_available():
-    """Check whether TensorRT custom ops are installed.
-
-    Returns:
-        bool: True if TensorRT custom ops are compiled.
-    """
-    tensorrt_op_path = get_ops_path()
-    return osp.exists(tensorrt_op_path)
+_BackendManager = TensorRTManager
+is_available = _BackendManager.is_available
+build_wrapper = _BackendManager.build_wrapper
 
+__all__ = ['TensorRTManager']
 
 if is_available():
     from .utils import from_onnx, load, save
 
-    __all__ = ['from_onnx', 'save', 'load', 'load_tensorrt_plugin']
+    __all__ += ['from_onnx', 'save', 'load', 'load_tensorrt_plugin']
 
     try:
         # import wrapper if pytorch is available
diff --git a/mmdeploy/backend/tensorrt/backend_manager.py b/mmdeploy/backend/tensorrt/backend_manager.py
new file mode 100644
index 000000000..912d9cf4f
--- /dev/null
+++ b/mmdeploy/backend/tensorrt/backend_manager.py
@@ -0,0 +1,138 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import logging
+import os.path as osp
+from typing import Any, Callable, Optional, Sequence
+
+from ..base import BACKEND_MANAGERS, BaseBackendManager
+
+
+@BACKEND_MANAGERS.register('tensorrt')
+class TensorRTManager(BaseBackendManager):
+
+    @classmethod
+    def build_wrapper(cls,
+                      backend_files: Sequence[str],
+                      device: str = 'cpu',
+                      input_names: Optional[Sequence[str]] = None,
+                      output_names: Optional[Sequence[str]] = None,
+                      deploy_cfg: Optional[Any] = None,
+                      **kwargs):
+        """Build the wrapper for the backend model.
+
+        Args:
+            backend_files (Sequence[str]): Backend files.
+            device (str, optional): The device info. Defaults to 'cpu'.
+            input_names (Optional[Sequence[str]], optional): input names.
+                Defaults to None.
+            output_names (Optional[Sequence[str]], optional): output names.
+                Defaults to None.
+            deploy_cfg (Optional[Any], optional): The deploy config. Defaults
+                to None.
+        """
+
+        from .wrapper import TRTWrapper
+        return TRTWrapper(engine=backend_files[0], output_names=output_names)
+
+    @classmethod
+    def is_available(cls, with_custom_ops: bool = False) -> bool:
+        """Check whether backend is installed.
+
+        Args:
+            with_custom_ops (bool): check custom ops exists.
+        Returns:
+            bool: True if backend package is installed.
+        """
+        import importlib
+        ret = importlib.util.find_spec('tensorrt') is not None
+
+        if ret and with_custom_ops:
+            from .init_plugins import get_ops_path
+            ops_path = get_ops_path()
+            custom_ops_exist = osp.exists(ops_path)
+            ret = ret and custom_ops_exist
+
+        return ret
+
+    @classmethod
+    def get_version(cls) -> str:
+        """Get the version of the backend."""
+        if not cls.is_available():
+            return 'None'
+        else:
+            import pkg_resources
+            try:
+                return pkg_resources.get_distribution('tensorrt').version
+            except Exception:
+                return 'None'
+
+    @classmethod
+    def check_env(cls, log_callback: Callable = lambda _: _) -> str:
+        """Check current environment.
+
+        Returns:
+            str: Info about the environment.
+        """
+        info = super().check_env(log_callback=log_callback)
+        available = cls.is_available()
+        ops_available = cls.is_available(with_custom_ops=True)
+        ops_available = 'Available' if ops_available else 'NotAvailable'
+
+        if available:
+            ops_info = f'tensorrt custom ops:\t{ops_available}'
+            log_callback(ops_info)
+            info = f'{info}\n{ops_info}'
+        return info
+
+    @classmethod
+    def to_backend(cls,
+                   ir_files: Sequence[str],
+                   work_dir: str,
+                   deploy_cfg: Any,
+                   log_level: int = logging.INFO,
+                   device: str = 'cpu',
+                   **kwargs) -> Sequence[str]:
+        """Convert intermediate representation to given backend.
+
+        Args:
+            ir_files (Sequence[str]): The intermediate representation files.
+            work_dir (str): The work directory, backend files and logs should
+                be saved in this directory.
+            deploy_cfg (Any): The deploy config.
+            log_level (int, optional): The log level. Defaults to logging.INFO.
+            device (str, optional): The device type. Defaults to 'cpu'.
+        Returns:
+            Sequence[str]: Backend files.
+        """
+        import os.path as osp
+
+        from mmdeploy.utils import get_model_inputs, get_partition_config
+        model_params = get_model_inputs(deploy_cfg)
+        partition_cfgs = get_partition_config(deploy_cfg)
+        assert len(model_params) == len(ir_files)
+
+        from . import is_available
+        assert is_available(), (
+            'TensorRT is not available,'
+            ' please install TensorRT and build TensorRT custom ops first.')
+
+        from .onnx2tensorrt import onnx2tensorrt
+        backend_files = []
+        for model_id, model_param, onnx_path in zip(
+                range(len(ir_files)), model_params, ir_files):
+            onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]
+            save_file = model_param.get('save_file', onnx_name + '.engine')
+
+            partition_type = 'end2end' if partition_cfgs is None \
+                else onnx_name
+            onnx2tensorrt(
+                work_dir,
+                save_file,
+                model_id,
+                deploy_cfg,
+                onnx_path,
+                device=device,
+                partition_type=partition_type)
+
+            backend_files.append(osp.join(work_dir, save_file))
+
+        return backend_files
diff --git a/mmdeploy/backend/tensorrt/calib_utils.py b/mmdeploy/backend/tensorrt/calib_utils.py
index fdd1fda18..1c89366fa 100644
--- a/mmdeploy/backend/tensorrt/calib_utils.py
+++ b/mmdeploy/backend/tensorrt/calib_utils.py
@@ -1,7 +1,6 @@
 # Copyright (c) OpenMMLab. All rights reserved.
-from typing import Dict, Sequence, Union
+from typing import Any, Dict, Sequence, Union
 
-import h5py
 import numpy as np
 import pycuda.autoinit  # noqa:F401
 import pycuda.driver as cuda
@@ -25,13 +24,14 @@ class HDF5Calibrator(trt.IInt8Calibrator):
 
     def __init__(
             self,
-            calib_file: Union[str, h5py.File],
+            calib_file: Union[str, Any],
             input_shapes: Dict[str, Sequence[int]],
             model_type: str = 'end2end',
             device_id: int = 0,
             algorithm: trt.CalibrationAlgoType = DEFAULT_CALIBRATION_ALGORITHM,
             **kwargs):
         super().__init__()
+        import h5py
 
         if isinstance(calib_file, str):
             calib_file = h5py.File(calib_file, mode='r')
diff --git a/mmdeploy/backend/tensorrt/utils.py b/mmdeploy/backend/tensorrt/utils.py
index 088abaf6c..7ad190428 100644
--- a/mmdeploy/backend/tensorrt/utils.py
+++ b/mmdeploy/backend/tensorrt/utils.py
@@ -140,14 +140,15 @@ def from_onnx(onnx_model: Union[str, onnx.ModelProto],
         >>>             })
     """
 
-    import os
-    old_cuda_device = os.environ.get('CUDA_DEVICE', None)
-    os.environ['CUDA_DEVICE'] = str(device_id)
-    import pycuda.autoinit  # noqa:F401
-    if old_cuda_device is not None:
-        os.environ['CUDA_DEVICE'] = old_cuda_device
-    else:
-        os.environ.pop('CUDA_DEVICE')
+    if device_id != 0:
+        import os
+        old_cuda_device = os.environ.get('CUDA_DEVICE', None)
+        os.environ['CUDA_DEVICE'] = str(device_id)
+        import pycuda.autoinit  # noqa:F401
+        if old_cuda_device is not None:
+            os.environ['CUDA_DEVICE'] = old_cuda_device
+        else:
+            os.environ.pop('CUDA_DEVICE')
 
     load_tensorrt_plugin()
     # create builder and network
diff --git a/mmdeploy/backend/torchscript/__init__.py b/mmdeploy/backend/torchscript/__init__.py
index 9179ef3da..7cb34c4a6 100644
--- a/mmdeploy/backend/torchscript/__init__.py
+++ b/mmdeploy/backend/torchscript/__init__.py
@@ -1,18 +1,13 @@
 # Copyright (c) OpenMMLab. All rights reserved.
 # flake8: noqa
+from .backend_manager import TorchScriptManager
 from .init_plugins import get_ops_path, ops_available
 
+_BackendManager = TorchScriptManager
+is_available = _BackendManager.is_available
+build_wrapper = _BackendManager.build_wrapper
 
-def is_available():
-    """Torchscript available.
-
-    Returns:
-        bool: Always True.
-    """
-    return True
-
-
-__all__ = ['get_ops_path', 'ops_available']
+__all__ = ['get_ops_path', 'ops_available', 'TorchScriptManager']
 
 if is_available():
     from .wrapper import TorchscriptWrapper
diff --git a/mmdeploy/backend/torchscript/backend_manager.py b/mmdeploy/backend/torchscript/backend_manager.py
new file mode 100644
index 000000000..91572f48d
--- /dev/null
+++ b/mmdeploy/backend/torchscript/backend_manager.py
@@ -0,0 +1,104 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import logging
+from typing import Any, Callable, Optional, Sequence
+
+from ..base import BACKEND_MANAGERS, BaseBackendManager
+
+
+@BACKEND_MANAGERS.register('torchscript')
+class TorchScriptManager(BaseBackendManager):
+
+    @classmethod
+    def build_wrapper(cls,
+                      backend_files: Sequence[str],
+                      device: str = 'cpu',
+                      input_names: Optional[Sequence[str]] = None,
+                      output_names: Optional[Sequence[str]] = None,
+                      deploy_cfg: Optional[Any] = None,
+                      **kwargs):
+        """Build the wrapper for the backend model.
+
+        Args:
+            backend_files (Sequence[str]): Backend files.
+            device (str, optional): The device info. Defaults to 'cpu'.
+            input_names (Optional[Sequence[str]], optional): input names.
+                Defaults to None.
+            output_names (Optional[Sequence[str]], optional): output names.
+                Defaults to None.
+            deploy_cfg (Optional[Any], optional): The deploy config. Defaults
+                to None.
+        """
+        from .wrapper import TorchscriptWrapper
+        return TorchscriptWrapper(
+            model=backend_files[0],
+            input_names=input_names,
+            output_names=output_names)
+
+    @classmethod
+    def is_available(cls, with_custom_ops: bool = False) -> bool:
+        """Check whether backend is installed.
+
+        Args:
+            with_custom_ops (bool): check custom ops exists.
+        Returns:
+            bool: True if backend package is installed.
+        """
+        import importlib
+        ret = importlib.util.find_spec('torch') is not None
+
+        if ret and with_custom_ops:
+            from .init_plugins import ops_available
+            ret = ret and ops_available()
+
+        return ret
+
+    @classmethod
+    def get_version(cls) -> str:
+        """Get the version of the backend."""
+        if not cls.is_available():
+            return 'None'
+        else:
+            import pkg_resources
+            try:
+                return pkg_resources.get_distribution('torch').version
+            except Exception:
+                return 'None'
+
+    @classmethod
+    def check_env(cls, log_callback: Callable = lambda _: _) -> str:
+        """Check current environment.
+
+        Returns:
+            str: Info about the environment.
+        """
+        info = super().check_env(log_callback=log_callback)
+        available = cls.is_available()
+        ops_available = cls.is_available(with_custom_ops=True)
+        ops_available = 'Available' if ops_available else 'NotAvailable'
+
+        if available:
+            ops_info = f'torchscript custom ops:\t{ops_available}'
+            log_callback(ops_info)
+            info = f'{info}\n{ops_info}'
+
+        return info
+
+    @classmethod
+    def to_backend(cls,
+                   ir_files: Sequence[str],
+                   work_dir: str,
+                   log_level: int = logging.INFO,
+                   device: str = 'cpu',
+                   **kwargs) -> Sequence[str]:
+        """Convert intermediate representation to given backend.
+
+        Args:
+            ir_files (Sequence[str]): The intermediate representation files.
+            work_dir (str): The work directory, backend files and logs should
+                be save in this directory.
+            log_level (int, optional): The log level. Defaults to logging.INFO.
+            device (str, optional): The device type. Defaults to 'cpu'.
+        Returns:
+            Sequence[str]: Backend files.
+        """
+        return ir_files
diff --git a/mmdeploy/backend/torchscript/init_plugins.py b/mmdeploy/backend/torchscript/init_plugins.py
index ec0371b59..3a55a8894 100644
--- a/mmdeploy/backend/torchscript/init_plugins.py
+++ b/mmdeploy/backend/torchscript/init_plugins.py
@@ -1,5 +1,4 @@
 # Copyright (c) OpenMMLab. All rights reserved.
-import glob
 import os.path as osp
 
 
@@ -9,14 +8,14 @@ def get_ops_path() -> str:
     Returns:
         str: A path of the torchscript extension library.
     """
-    wildcard = osp.abspath(
-        osp.join(
-            osp.dirname(__file__),
-            '../../../build/lib/libmmdeploy_torchscript_ops.so'))
-
-    paths = glob.glob(wildcard)
-    lib_path = paths[0] if len(paths) > 0 else ''
-    return lib_path
+    from mmdeploy.utils import get_file_path
+    candidates = [
+        '../../lib/libmmdeploy_torchscript_ops.so',
+        '../../lib/mmdeploy_torchscript_ops.dll',
+        '../../../build/lib/libmmdeploy_torchscript_ops.so',
+        '../../../build/bin/*/mmdeploy_torchscript_ops.dll'
+    ]
+    return get_file_path(osp.dirname(__file__), candidates)
 
 
 def ops_available() -> bool:
diff --git a/mmdeploy/backend/tvm/__init__.py b/mmdeploy/backend/tvm/__init__.py
index 24e7eccdf..8389b527a 100644
--- a/mmdeploy/backend/tvm/__init__.py
+++ b/mmdeploy/backend/tvm/__init__.py
@@ -1,16 +1,11 @@
 # Copyright (c) OpenMMLab. All rights reserved.
-import importlib
 import sys
 
+from .backend_manager import TVMManager
 
-def is_available() -> bool:
-    """Check whether tvm package is installed.
-
-    Returns:
-        bool: True if tvm package is installed.
-    """
-
-    return importlib.util.find_spec('tvm') is not None
+_BackendManager = TVMManager
+is_available = _BackendManager.is_available
+build_wrapper = _BackendManager.build_wrapper
 
 
 def get_library_ext() -> str:
@@ -26,12 +21,14 @@ def get_library_ext() -> str:
         return '.so'
 
 
+__all__ = ['TVMManager']
+
 if is_available():
     from .onnx2tvm import from_onnx
     from .quantize import HDF5Dataset
     from .tuner import build_tvm_tuner
 
-    __all__ = ['from_onnx', 'build_tvm_tuner', 'HDF5Dataset', 'TVMManager']
+    __all__ += ['from_onnx', 'build_tvm_tuner', 'HDF5Dataset']
 
     try:
         # import wrapper if pytorch is available
diff --git a/mmdeploy/backend/tvm/backend_manager.py b/mmdeploy/backend/tvm/backend_manager.py
new file mode 100644
index 000000000..f9e99f18f
--- /dev/null
+++ b/mmdeploy/backend/tvm/backend_manager.py
@@ -0,0 +1,135 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import logging
+import os.path as osp
+from typing import Any, Optional, Sequence
+
+from ..base import BACKEND_MANAGERS, BaseBackendManager
+
+
+@BACKEND_MANAGERS.register('tvm')
+class TVMManager(BaseBackendManager):
+
+    @classmethod
+    def build_wrapper(cls,
+                      backend_files: Sequence[str],
+                      device: str = 'cpu',
+                      input_names: Optional[Sequence[str]] = None,
+                      output_names: Optional[Sequence[str]] = None,
+                      deploy_cfg: Optional[Any] = None,
+                      **kwargs):
+        """Build the wrapper for the backend model.
+
+        Args:
+            backend_files (Sequence[str]): Backend files.
+            device (str, optional): The device info. Defaults to 'cpu'.
+            input_names (Optional[Sequence[str]], optional): input names.
+                Defaults to None.
+            output_names (Optional[Sequence[str]], optional): output names.
+                Defaults to None.
+            deploy_cfg (Optional[Any], optional): The deploy config. Defaults
+                to None.
+        """
+        from .wrapper import TVMWrapper
+        bytecode = None if len(backend_files) <= 1 else backend_files[1]
+        return TVMWrapper(
+            backend_files[0],
+            bytecode=bytecode,
+            output_names=output_names,
+            device=device)
+
+    @classmethod
+    def is_available(cls, with_custom_ops: bool = False) -> bool:
+        """Check whether backend is installed.
+
+        Args:
+            with_custom_ops (bool): check custom ops exists.
+        Returns:
+            bool: True if backend package is installed.
+        """
+        import importlib
+        ret = importlib.util.find_spec('tvm') is not None
+
+        return ret
+
+    @classmethod
+    def get_version(cls) -> str:
+        """Get the version of the backend."""
+        if not cls.is_available():
+            return 'None'
+        else:
+            import pkg_resources
+            try:
+                return pkg_resources.get_distribution('tvm').version
+            except Exception:
+                return 'None'
+
+    @classmethod
+    def to_backend(cls,
+                   ir_files: Sequence[str],
+                   work_dir: str,
+                   deploy_cfg: Any,
+                   log_level: int = logging.INFO,
+                   device: str = 'cpu',
+                   **kwargs) -> Sequence[str]:
+        """Convert intermediate representation to given backend.
+
+        Args:
+            ir_files (Sequence[str]): The intermediate representation files.
+            work_dir (str): The work directory, backend files and logs should
+                be saved in this directory.
+            deploy_cfg (Any): The deploy config.
+            log_level (int, optional): The log level. Defaults to logging.INFO.
+            device (str, optional): The device type. Defaults to 'cpu'.
+        Returns:
+            Sequence[str]: Backend files.
+        """
+
+        import copy
+
+        from mmdeploy.apis.tvm import get_library_ext
+        from mmdeploy.utils import (get_calib_filename, get_model_inputs,
+                                    get_partition_config)
+        from .onnx2tvm import from_onnx
+        model_inputs = get_model_inputs(deploy_cfg)
+
+        if device.startswith('cuda'):
+            target = 'cuda'
+        else:
+            target = 'llvm'
+
+        lib_ext = get_library_ext()
+
+        tvm_files = []
+        for model_id, onnx_path in enumerate(ir_files):
+            model_input = copy.deepcopy(model_inputs[model_id])
+            use_vm = model_input.get('use_vm', False)
+            if 'target' not in model_input['tuner']:
+                model_input['tuner']['target'] = target
+            lib_path = osp.splitext(onnx_path)[0] + lib_ext
+            code_path = osp.splitext(
+                onnx_path)[0] + '.code' if use_vm else None
+            model_input['output_file'] = lib_path
+            model_input['onnx_model'] = onnx_path
+            model_input['bytecode_file'] = code_path
+
+            # create calibration dataset
+            if 'qconfig' in model_input:
+                from .quantize import HDF5Dataset
+                calib_filename = get_calib_filename(deploy_cfg)
+                calib_path = osp.join(work_dir, calib_filename)
+                partition_cfgs = get_partition_config(deploy_cfg)
+                onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]
+                partition_type = 'end2end' if partition_cfgs is None \
+                    else onnx_name
+                dataset = HDF5Dataset(
+                    calib_path,
+                    model_input['shape'],
+                    model_type=partition_type,
+                    device=target)
+                model_input['dataset'] = dataset()
+
+            from_onnx(**model_input)
+
+            tvm_files += [lib_path, code_path]
+
+        return tvm_files
diff --git a/mmdeploy/codebase/base/backend_model.py b/mmdeploy/codebase/base/backend_model.py
index 49a49bf3c..327621eff 100644
--- a/mmdeploy/codebase/base/backend_model.py
+++ b/mmdeploy/codebase/base/backend_model.py
@@ -6,8 +6,7 @@ import mmengine
 from mmengine.model import BaseModel
 from torch import nn
 
-from mmdeploy.utils import (SDK_TASK_MAP, Backend, get_backend_config,
-                            get_common_config, get_ir_config, get_task_type)
+from mmdeploy.utils import Backend, get_ir_config
 
 
 class BaseBackendModel(BaseModel, metaclass=ABCMeta):
@@ -58,88 +57,13 @@ class BaseBackendModel(BaseModel, metaclass=ABCMeta):
                 names from the model.
             deploy_cfg: Deployment config file.
         """
-        if backend == Backend.ONNXRUNTIME:
-            from mmdeploy.backend.onnxruntime import ORTWrapper
-            return ORTWrapper(
-                onnx_file=backend_files[0],
-                device=device,
-                output_names=output_names)
-        elif backend == Backend.TENSORRT:
-            from mmdeploy.backend.tensorrt import TRTWrapper
-            return TRTWrapper(
-                engine=backend_files[0], output_names=output_names)
-        elif backend == Backend.PPLNN:
-            from mmdeploy.backend.pplnn import PPLNNWrapper
-            return PPLNNWrapper(
-                onnx_file=backend_files[0],
-                algo_file=backend_files[1] if len(backend_files) > 1 else None,
-                device=device,
-                output_names=output_names)
-        elif backend == Backend.NCNN:
-            from mmdeploy.backend.ncnn import NCNNWrapper
-
-            # For unittest deploy_config will not pass into _build_wrapper
-            # function.
-            if deploy_cfg:
-                backend_config = get_backend_config(deploy_cfg)
-                use_vulkan = backend_config.get('use_vulkan', False)
-            else:
-                use_vulkan = False
-            return NCNNWrapper(
-                param_file=backend_files[0],
-                bin_file=backend_files[1],
-                output_names=output_names,
-                use_vulkan=use_vulkan)
-        elif backend == Backend.OPENVINO:
-            from mmdeploy.backend.openvino import OpenVINOWrapper
-            return OpenVINOWrapper(
-                ir_model_file=backend_files[0], output_names=output_names)
-        elif backend == Backend.SDK:
-            assert deploy_cfg is not None, \
-                'Building SDKWrapper requires deploy_cfg'
-            from mmdeploy.backend.sdk.wrapper import SDKWrapper
-            task_name = SDK_TASK_MAP[get_task_type(deploy_cfg)]['cls_name']
-            return SDKWrapper(
-                model_file=backend_files[0],
-                task_name=task_name,
-                device=device)
-        elif backend == Backend.TORCHSCRIPT:
-            from mmdeploy.backend.torchscript import TorchscriptWrapper
-            return TorchscriptWrapper(
-                model=backend_files[0],
-                input_names=input_names,
-                output_names=output_names)
-        elif backend == Backend.RKNN:
-            from mmdeploy.backend.rknn import RKNNWrapper
-            common_config = get_common_config(deploy_cfg)
-            return RKNNWrapper(
-                model=backend_files[0],
-                common_config=common_config,
-                input_names=input_names,
-                output_names=output_names)
-        elif backend == Backend.ASCEND:
-            from mmdeploy.backend.ascend import AscendWrapper
-            return AscendWrapper(model=backend_files[0], device=device)
-        elif backend == Backend.SNPE:
-            from mmdeploy.backend.snpe import SNPEWrapper
-            uri = None
-            if 'uri' in kwargs:
-                uri = kwargs['uri']
-            return SNPEWrapper(
-                dlc_file=backend_files[0], uri=uri, output_names=output_names)
-        elif backend == Backend.COREML:
-            from mmdeploy.backend.coreml import CoreMLWrapper
-            return CoreMLWrapper(model_file=backend_files[0])
-        elif backend == Backend.TVM:
-            from mmdeploy.backend.tvm import TVMWrapper
-            bytecode = None if len(backend_files) == 1 else backend_files[1]
-            return TVMWrapper(
-                lib=backend_files[0],
-                output_names=output_names,
-                bytecode=bytecode,
-                device=device)
-        else:
-            raise NotImplementedError(f'Unknown backend type: {backend.value}')
+        from mmdeploy.backend.base import get_backend_manager
+        backend_mgr = get_backend_manager(backend.value)
+        if backend_mgr is None:
+            raise NotImplementedError(
+                f'Unsupported backend type: {backend.value}')
+        return backend_mgr.build_wrapper(backend_files, device, input_names,
+                                         output_names, deploy_cfg, **kwargs)
 
     def destroy(self):
         if hasattr(self, 'wrapper') and hasattr(self.wrapper, 'destroy'):
diff --git a/mmdeploy/codebase/mmdet/deploy/object_detection_model.py b/mmdeploy/codebase/mmdet/deploy/object_detection_model.py
index b0883475a..d0de435bf 100644
--- a/mmdeploy/codebase/mmdet/deploy/object_detection_model.py
+++ b/mmdeploy/codebase/mmdet/deploy/object_detection_model.py
@@ -239,14 +239,14 @@ class End2EndModel(BaseBackendModel):
                 masks = batch_masks[i]
                 img_h, img_w = img_metas[i]['img_shape'][:2]
                 ori_h, ori_w = img_metas[i]['ori_shape'][:2]
-                export_postprocess_mask = True
+                export_postprocess_mask = False
                 if self.deploy_cfg is not None:
 
                     mmdet_deploy_cfg = get_post_processing_params(
                         self.deploy_cfg)
                     # this flag enable postprocess when export.
                     export_postprocess_mask = mmdet_deploy_cfg.get(
-                        'export_postprocess_mask', True)
+                        'export_postprocess_mask', False)
                 if not export_postprocess_mask:
                     masks = End2EndModel.postprocessing_masks(
                         dets[:, :4], masks, ori_w, ori_h, self.device)
diff --git a/mmdeploy/codebase/mmdet/models/roi_heads/fcn_mask_head.py b/mmdeploy/codebase/mmdet/models/roi_heads/fcn_mask_head.py
index 9371ff552..74eb731c8 100644
--- a/mmdeploy/codebase/mmdet/models/roi_heads/fcn_mask_head.py
+++ b/mmdeploy/codebase/mmdet/models/roi_heads/fcn_mask_head.py
@@ -64,7 +64,8 @@ def fcn_mask_head__predict_by_feat(self,
     # grid sample is not supported by most engine
     # so we add a flag to disable it.
     mmdet_params = get_post_processing_params(ctx.cfg)
-    export_postprocess_mask = mmdet_params.get('export_postprocess_mask', True)
+    export_postprocess_mask = mmdet_params.get('export_postprocess_mask',
+                                               False)
     if not export_postprocess_mask:
         return mask_pred
 
diff --git a/mmdeploy/utils/env.py b/mmdeploy/utils/env.py
index 8e75c1b45..0d56ef048 100644
--- a/mmdeploy/utils/env.py
+++ b/mmdeploy/utils/env.py
@@ -15,7 +15,10 @@ def get_library_version(lib):
     """
     try:
         lib = importlib.import_module(lib)
-        version = lib.__version__
+        if hasattr(lib, '__version__'):
+            version = lib.__version__
+        else:
+            version = None
     except Exception:
         version = None
 
diff --git a/mmdeploy/utils/test.py b/mmdeploy/utils/test.py
index b6a943b8e..835f5b8b5 100644
--- a/mmdeploy/utils/test.py
+++ b/mmdeploy/utils/test.py
@@ -4,7 +4,6 @@ import os.path as osp
 import random
 import string
 import tempfile
-import warnings
 from typing import Any, Callable, Dict, List, Optional, Tuple, Union
 
 import numpy as np
@@ -35,47 +34,13 @@ def backend_checker(backend: Backend, require_plugin: bool = False):
             will also check if the backend plugin has been compiled. Default
             to `False`.
     """
-    is_custom_ops_available = None
-    if backend == Backend.ONNXRUNTIME:
-        from mmdeploy.apis.onnxruntime import is_available
-        if require_plugin:
-            from mmdeploy.apis.onnxruntime import is_custom_ops_available
-    elif backend == Backend.TENSORRT:
-        from mmdeploy.apis.tensorrt import is_available
-        if require_plugin:
-            from mmdeploy.apis.tensorrt import is_custom_ops_available
-    elif backend == Backend.PPLNN:
-        from mmdeploy.apis.pplnn import is_available
-    elif backend == Backend.NCNN:
-        from mmdeploy.apis.ncnn import is_available
-        if require_plugin:
-            from mmdeploy.apis.ncnn import is_custom_ops_available
-    elif backend == Backend.OPENVINO:
-        from mmdeploy.apis.openvino import is_available
-    elif backend == Backend.RKNN:
-        # device not require as backend is not really running
-        from mmdeploy.apis.rknn import is_available
-    elif backend == Backend.ASCEND:
-        from mmdeploy.apis.ascend import is_available
-    elif backend == Backend.TVM:
-        from mmdeploy.apis.ascend import is_available
-    else:
-        warnings.warn('The backend checker is not available')
-        return
+    from mmdeploy.backend.base import get_backend_manager
+
+    backend_mgr = get_backend_manager(backend.value)
+    result = backend_mgr.is_available(with_custom_ops=require_plugin)
 
     checker = pytest.mark.skipif(
-        not is_available(), reason=f'{backend.value} package is not available')
-    if require_plugin and is_custom_ops_available is not None:
-        plugin_checker = pytest.mark.skipif(
-            not is_custom_ops_available(),
-            reason=f'{backend.value} plugin is not available')
-
-        def double_checker(func):
-            func = checker(func)
-            func = plugin_checker(func)
-            return func
-
-        return double_checker
+        not result, reason=f'{backend.value} package is not available')
 
     return checker
 
@@ -90,40 +55,13 @@ def check_backend(backend: Backend, require_plugin: bool = False):
             will also check if the backend plugin has been compiled. Default
             to `False`.
     """
-    is_custom_ops_available = None
-    if backend == Backend.ONNXRUNTIME:
-        from mmdeploy.apis.onnxruntime import is_available
-        if require_plugin:
-            from mmdeploy.apis.onnxruntime import is_custom_ops_available
-    elif backend == Backend.TENSORRT:
-        from mmdeploy.apis.tensorrt import is_available
-        if require_plugin:
-            from mmdeploy.apis.tensorrt import is_custom_ops_available
-    elif backend == Backend.PPLNN:
-        from mmdeploy.apis.pplnn import is_available
-    elif backend == Backend.NCNN:
-        from mmdeploy.apis.ncnn import is_available
-        if require_plugin:
-            from mmdeploy.apis.ncnn import is_custom_ops_available
-    elif backend == Backend.OPENVINO:
-        from mmdeploy.apis.openvino import is_available
-    elif backend == Backend.TORCHSCRIPT:
-        from mmdeploy.backend.torchscript import ops_available as is_available
-    elif backend == Backend.RKNN:
-        from mmdeploy.backend.rknn import is_available
-    elif backend == Backend.ASCEND:
-        from mmdeploy.backend.ascend import is_available
-    elif backend == Backend.TVM:
-        from mmdeploy.backend.ascend import is_available
-    else:
-        warnings.warn('The backend checker is not available')
-        return
+    from mmdeploy.backend.base import get_backend_manager
 
-    if not is_available():
+    backend_mgr = get_backend_manager(backend.value)
+    result = backend_mgr.is_available(with_custom_ops=require_plugin)
+
+    if not result:
         pytest.skip(f'{backend.value} package is not available')
-    if require_plugin and is_custom_ops_available is not None:
-        if not is_custom_ops_available():
-            pytest.skip(f'{backend.value} plugin is not available')
 
 
 class WrapFunction(nn.Module):
@@ -455,6 +393,7 @@ def get_backend_outputs(ir_file_path: str,
             If the backend specified in 'deploy_cfg' is not available,
             then None will be returned.
     """
+    from mmdeploy.apis.utils import to_backend
     backend = get_backend(deploy_cfg)
     flatten_model_inputs = get_flatten_inputs(model_inputs)
     ir_config = get_ir_config(deploy_cfg)
@@ -465,109 +404,32 @@ def get_backend_outputs(ir_file_path: str,
             k for k, v in flatten_model_inputs.items() if k != 'ctx'
         ]
 
-    # prepare backend model and input features
+    work_dir = tempfile.TemporaryDirectory().name
+    device = 'cpu'
+
+    # TODO: Try to wrap these code into backend manager
+    if backend != Backend.TORCHSCRIPT:
+        model_inputs = flatten_model_inputs
     if backend == Backend.TENSORRT:
-        # convert to engine
-        import mmdeploy.apis.tensorrt as trt_apis
-        if not (trt_apis.is_available()
-                and trt_apis.is_custom_ops_available()):
-            return None
-        trt_file_path = tempfile.NamedTemporaryFile(suffix='.engine').name
-        trt_apis.onnx2tensorrt(
-            '',
-            trt_file_path,
-            0,
-            deploy_cfg=deploy_cfg,
-            onnx_model=ir_file_path)
-        backend_files = [trt_file_path]
-        for k, v in model_inputs.items():
-            model_inputs[k] = model_inputs[k].cuda()
-
-        backend_feats = model_inputs
-        device = 'cuda:0'
-    elif backend == Backend.ONNXRUNTIME:
-        import mmdeploy.apis.onnxruntime as ort_apis
-        if not (ort_apis.is_available()
-                and ort_apis.is_custom_ops_available()):
-            return None
-        feature_list = []
-        backend_feats = {}
-        for k, item in model_inputs.items():
-            if type(item) is torch.Tensor:
-                feature_list.append(item)
-            elif type(item) is tuple or list:
-                for i in item:
-                    assert type(i) is torch.Tensor, 'model_inputs contains '
-                    'nested sequence of torch.Tensor'
-                    feature_list.append(i)
-            else:
-                raise TypeError('values of model_inputs are expected to be '
-                                'torch.Tensor or its sequence, '
-                                f'but got {type(model_inputs)}')
-
-        # for onnx file generated with list[torch.Tensor] input,
-        # the input dict keys are just numbers if not specified
-        for i in range(len(feature_list)):
-            if i < len(input_names):
-                backend_feats[input_names[i]] = feature_list[i]
-            else:
-                backend_feats[str(i)] = feature_list[i]
-        backend_files = [ir_file_path]
-        device = 'cpu'
-    elif backend == Backend.NCNN:
-        import mmdeploy.apis.ncnn as ncnn_apis
-        if not (ncnn_apis.is_available()
-                and ncnn_apis.is_custom_ops_available()):
-            return None
-        param_path, bin_path = ncnn_apis.get_output_model_file(ir_file_path)
-        ncnn_files_prefix = osp.splitext(ir_file_path)[0]
-        ncnn_apis.from_onnx(ir_file_path, ncnn_files_prefix)
-        backend_files = [param_path, bin_path]
-        backend_feats = flatten_model_inputs
-        device = 'cpu'
-
+        device = 'cuda'
+        model_inputs = dict((k, v.cuda()) for k, v in model_inputs.items())
     elif backend == Backend.OPENVINO:
-        import mmdeploy.apis.openvino as openvino_apis
-        if not openvino_apis.is_available():
-            return None
-        from mmdeploy.apis.openvino import get_mo_options_from_cfg
-        openvino_work_dir = tempfile.TemporaryDirectory().name
-        openvino_file_path = openvino_apis.get_output_model_file(
-            ir_file_path, openvino_work_dir)
         input_info = {
             name: value.shape
             for name, value in flatten_model_inputs.items()
         }
-        mo_options = get_mo_options_from_cfg(deploy_cfg)
-        openvino_apis.from_onnx(ir_file_path, openvino_work_dir, input_info,
-                                output_names, mo_options)
-        backend_files = [openvino_file_path]
-        backend_feats = flatten_model_inputs
-        device = 'cpu'
+        deploy_cfg['backend_config']['model_inputs'] = [
+            dict(opt_shapes=input_info)
+        ]
+    backend_files = to_backend(
+        backend.value, [ir_file_path],
+        work_dir=work_dir,
+        deploy_cfg=deploy_cfg,
+        device=device)
+    backend_feats = model_inputs
 
-    elif backend == Backend.DEFAULT:
-        return None
-    elif backend == Backend.TORCHSCRIPT:
-        backend_files = [ir_file_path]
-        device = 'cpu'
+    if backend == Backend.TORCHSCRIPT:
         backend_feats = [v for _, v in model_inputs.items()]
-    elif backend == Backend.ASCEND:
-        # Ascend model conversion
-        import mmdeploy.apis.ascend as ascend_apis
-        from mmdeploy.utils import get_model_inputs
-        if not ascend_apis.is_available():
-            return None
-        work_dir = osp.split(ir_file_path)[0]
-        # convert model
-        convert_args = get_model_inputs(deploy_cfg)
-        ascend_apis.from_onnx(ir_file_path, work_dir, convert_args[0])
-        om_file_name = osp.splitext(osp.split(ir_file_path)[1])[0]
-        backend_files = [osp.join(work_dir, om_file_name + '.om')]
-        backend_feats = flatten_model_inputs
-        device = 'cpu'
-    else:
-        raise NotImplementedError(
-            f'Unimplemented backend type: {backend.value}')
 
     from mmdeploy.codebase.base import BaseBackendModel
     backend_model = BaseBackendModel._build_wrapper(
diff --git a/requirements/optional.txt b/requirements/optional.txt
index 85931d7d2..9c445a253 100644
--- a/requirements/optional.txt
+++ b/requirements/optional.txt
@@ -1,2 +1,4 @@
 -r requirements/codebases.txt
 -r requirements/backends.txt
+h5py
+tqdm
diff --git a/requirements/runtime.txt b/requirements/runtime.txt
index 5279f7d4c..5a40d3d23 100644
--- a/requirements/runtime.txt
+++ b/requirements/runtime.txt
@@ -1,6 +1,5 @@
 aenum
 grpcio
-h5py
 matplotlib
 mmengine
 multiprocess
@@ -10,4 +9,3 @@ prettytable
 protobuf<=3.20.1
 six
 terminaltables
-tqdm
diff --git a/tests/test_apis/test_calibration.py b/tests/test_apis/test_calibration.py
index 222193d56..f2a241421 100644
--- a/tests/test_apis/test_calibration.py
+++ b/tests/test_apis/test_calibration.py
@@ -3,7 +3,6 @@ import os.path as osp
 import tempfile
 from multiprocessing import Process
 
-import h5py
 from mmengine import Config
 
 from mmdeploy.apis import create_calib_input_data
@@ -171,6 +170,7 @@ def get_model_cfg():
 
 
 def run_test_create_calib_end2end():
+    import h5py
     model_cfg = get_model_cfg()
     deploy_cfg = get_end2end_deploy_cfg()
     create_calib_input_data(
@@ -203,6 +203,7 @@ def test_create_calib_end2end():
 
 
 def run_test_create_calib_parittion():
+    import h5py
     model_cfg = get_model_cfg()
     deploy_cfg = get_partition_deploy_cfg()
     create_calib_input_data(
diff --git a/tests/test_backend/test_wrapper.py b/tests/test_backend/test_wrapper.py
index 45a114f23..f6de134ac 100644
--- a/tests/test_backend/test_wrapper.py
+++ b/tests/test_backend/test_wrapper.py
@@ -145,96 +145,40 @@ def onnx2backend(backend, onnx_file):
 
 
 def create_wrapper(backend, model_files):
-    if backend == Backend.TENSORRT:
-        from mmdeploy.backend.tensorrt import TRTWrapper
-        trt_model = TRTWrapper(model_files, output_names)
-        return trt_model
-    elif backend == Backend.ONNXRUNTIME:
-        from mmdeploy.backend.onnxruntime import ORTWrapper
-        ort_model = ORTWrapper(model_files, 'cpu', output_names)
-        return ort_model
-    elif backend == Backend.PPLNN:
-        from mmdeploy.backend.pplnn import PPLNNWrapper
-        onnx_file, algo_file = model_files
-        pplnn_model = PPLNNWrapper(onnx_file, algo_file, 'cpu', output_names)
-        return pplnn_model
-    elif backend == Backend.NCNN:
-        from mmdeploy.backend.ncnn import NCNNWrapper
-        param_file, bin_file = model_files
-        ncnn_model = NCNNWrapper(param_file, bin_file, output_names)
-        return ncnn_model
-    elif backend == Backend.OPENVINO:
-        from mmdeploy.backend.openvino import OpenVINOWrapper
-        openvino_model = OpenVINOWrapper(model_files, output_names)
-        return openvino_model
-    elif backend == Backend.TORCHSCRIPT:
-        from mmdeploy.backend.torchscript import TorchscriptWrapper
-        torchscript_model = TorchscriptWrapper(
-            model_files, input_names=input_names, output_names=output_names)
-        return torchscript_model
+    from mmdeploy.backend.base import get_backend_manager
+    backend_mgr = get_backend_manager(backend.value)
+    deploy_cfg = None
+    if isinstance(model_files, str):
+        model_files = [model_files]
     elif backend == Backend.RKNN:
-        from mmdeploy.backend.rknn import RKNNWrapper
-        rknn_model = RKNNWrapper(
-            model_files,
-            common_config=dict(target_platform=target_platform),
-            input_names=input_names,
-            output_names=output_names)
-        return rknn_model
-    elif backend == Backend.ASCEND:
-        from mmdeploy.backend.ascend import AscendWrapper
-        ascend_model = AscendWrapper(model_files)
-        return ascend_model
-    elif backend == Backend.TVM:
-        from mmdeploy.backend.tvm import TVMWrapper
-        tvm_model = TVMWrapper(model_files, output_names=output_names)
-        return tvm_model
-    else:
-        raise NotImplementedError(f'Unknown backend type: {backend.value}')
+        deploy_cfg = dict(
+            backend_config=dict(
+                common_config=dict(target_platform=target_platform)))
+    return backend_mgr.build_wrapper(
+        model_files,
+        input_names=input_names,
+        output_names=output_names,
+        deploy_cfg=deploy_cfg)
 
 
 def run_wrapper(backend, wrapper, input):
     if backend == Backend.TENSORRT:
         input = input.cuda()
-        results = wrapper({'input': input})['output']
-        results = results.detach().cpu()
-        return results
-    elif backend == Backend.ONNXRUNTIME:
-        results = wrapper({'input': input})['output']
-        results = results.detach().cpu()
-        return results
-    elif backend == Backend.PPLNN:
-        results = wrapper({'input': input})['output']
-        results = results.detach().cpu()
-        return results
-    elif backend == Backend.NCNN:
-        input = input.float()
-        results = wrapper({'input': input})['output']
-        results = results.detach().cpu()
-        return results
-    elif backend == Backend.OPENVINO:
-        results = wrapper({'input': input})['output']
-        results = results.detach().cpu()
-        return results
-    elif backend == Backend.TORCHSCRIPT:
-        results = wrapper({'input': input})['output']
-        return results
-    elif backend == Backend.RKNN:
-        results = wrapper({'input': input})
-    elif backend == Backend.ASCEND:
-        results = wrapper({'input': input})['output']
-        return results
-    elif backend == Backend.TVM:
-        results = wrapper({'input': input})['output']
-        return results
-    else:
-        raise NotImplementedError(f'Unknown backend type: {backend.value}')
+
+    results = wrapper({'input': input})
+
+    if backend != Backend.RKNN:
+        results = results['output']
+
+    results = results.detach().cpu()
+
+    return results
 
 
-ALL_BACKEND = [
-    Backend.TENSORRT, Backend.ONNXRUNTIME, Backend.PPLNN, Backend.NCNN,
-    Backend.OPENVINO, Backend.TORCHSCRIPT, Backend.ASCEND, Backend.RKNN,
-    Backend.TVM
-]
+ALL_BACKEND = list(Backend)
+ALL_BACKEND.remove(Backend.DEFAULT)
+ALL_BACKEND.remove(Backend.PYTORCH)
+ALL_BACKEND.remove(Backend.SDK)
 
 
 @pytest.mark.parametrize('backend', ALL_BACKEND)
diff --git a/tests/test_codebase/test_mmdet/test_mmdet_models.py b/tests/test_codebase/test_mmdet/test_mmdet_models.py
index 90c4b900b..5d70b0bef 100644
--- a/tests/test_codebase/test_mmdet/test_mmdet_models.py
+++ b/tests/test_codebase/test_mmdet/test_mmdet_models.py
@@ -710,6 +710,7 @@ def test_forward_of_base_detector(model_cfg_path, backend):
                     pre_top_k=-1,
                     keep_top_k=100,
                     background_label_id=-1,
+                    export_postprocess_mask=False,
                 ))))
     model_cfg = Config(dict(model=mmengine.load(model_cfg_path)))
     model_cfg.model = _replace_r50_with_r18(model_cfg.model)
diff --git a/tests/test_codebase/test_mmdet/test_object_detection_model.py b/tests/test_codebase/test_mmdet/test_object_detection_model.py
index a5e9745c6..0091e610e 100644
--- a/tests/test_codebase/test_mmdet/test_object_detection_model.py
+++ b/tests/test_codebase/test_mmdet/test_object_detection_model.py
@@ -1,4 +1,6 @@
 # Copyright (c) OpenMMLab. All rights reserved.
+from typing import Sequence
+
 import pytest
 import torch
 from mmengine import Config
@@ -20,7 +22,7 @@ from mmdeploy.codebase.mmdet.deploy.object_detection_model import End2EndModel
 
 def assert_det_results(results, module_name: str = 'model'):
     assert results is not None, f'failed to get output using {module_name}'
-    assert isinstance(results, tuple)
+    assert isinstance(results, Sequence)
     assert len(results) == 2
     assert results[0].shape[0] == results[1].shape[0]
     assert results[0].shape[1] == results[1].shape[1]
@@ -28,7 +30,7 @@ def assert_det_results(results, module_name: str = 'model'):
 
 def assert_forward_results(results, module_name: str = 'model'):
     assert results is not None, f'failed to get output using {module_name}'
-    assert isinstance(results, list)
+    assert isinstance(results, Sequence)
     assert len(results) == 1
     assert isinstance(results[0].pred_instances, InstanceData)
     assert results[0].pred_instances.bboxes.shape[-1] == 4
diff --git a/tests/test_codebase/test_mmrotate/test_mmrotate_models.py b/tests/test_codebase/test_mmrotate/test_mmrotate_models.py
index 3c5862679..c31038b59 100644
--- a/tests/test_codebase/test_mmrotate/test_mmrotate_models.py
+++ b/tests/test_codebase/test_mmrotate/test_mmrotate_models.py
@@ -96,7 +96,7 @@ def get_single_roi_extractor():
 
 @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
 def test_rotated_single_roi_extractor(backend_type: Backend):
-    check_backend(backend_type)
+    check_backend(backend_type, True)
 
     single_roi_extractor = get_single_roi_extractor()
     output_names = ['roi_feat']
@@ -226,7 +226,7 @@ def test_oriented_rpn_head__predict_by_feat(backend_type: Backend):
 
 @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
 def test_gv_ratio_roi_head__predict_bbox(backend_type: Backend):
-    check_backend(backend_type)
+    check_backend(backend_type, True)
     from mmrotate.models.roi_heads import GVRatioRoIHead
     output_names = ['dets', 'labels']
     deploy_cfg = Config(
@@ -369,7 +369,7 @@ def get_rotated_rtmdet_head_model():
 @pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
 def test_rotated_rtmdet_head_predict_by_feat(backend_type: Backend):
     """Test predict_by_feat rewrite of RTMDet-R."""
-    check_backend(backend_type)
+    check_backend(backend_type, require_plugin=True)
     rtm_r_head = get_rotated_rtmdet_head_model()
     rtm_r_head.cpu().eval()
     s = 128
diff --git a/tests/test_codebase/test_mmseg/test_mmseg_models.py b/tests/test_codebase/test_mmseg/test_mmseg_models.py
index c1fcf105a..b5704b822 100644
--- a/tests/test_codebase/test_mmseg/test_mmseg_models.py
+++ b/tests/test_codebase/test_mmseg/test_mmseg_models.py
@@ -119,7 +119,7 @@ def test_upconvblock_forward(backend, is_dynamic_shape):
         dict(
             backend_config=dict(type=backend.value),
             onnx_config=dict(
-                input_names=['skip', 'x'],
+                input_names=['x', 'skip'],
                 output_names=['output'],
                 dynamic_axes=dynamic_axes),
             codebase_config=dict(
diff --git a/tests/test_codebase/test_mmseg/utils.py b/tests/test_codebase/test_mmseg/utils.py
index 154518ba8..7da63d7a4 100644
--- a/tests/test_codebase/test_mmseg/utils.py
+++ b/tests/test_codebase/test_mmseg/utils.py
@@ -31,7 +31,7 @@ def generate_mmseg_deploy_config(backend='onnxruntime'):
                 keep_initializers_as_inputs=False,
                 opset_version=11,
                 input_shape=None,
-                input_names=['input'],
+                input_names=['inputs'],
                 output_names=['output'])))
     return deploy_cfg
 
diff --git a/tools/check_env.py b/tools/check_env.py
index d2d826903..537c1e1a0 100644
--- a/tools/check_env.py
+++ b/tools/check_env.py
@@ -4,8 +4,7 @@ from mmcv.utils import collect_env as collect_base_env
 from mmengine.utils import get_git_hash
 
 import mmdeploy
-from mmdeploy.utils import (get_backend_version, get_codebase_version,
-                            get_root_logger)
+from mmdeploy.utils import get_codebase_version, get_root_logger
 
 
 def collect_env():
@@ -17,44 +16,16 @@ def collect_env():
 
 
 def check_backend():
-    backend_versions = get_backend_version()
-    ort_version = backend_versions['onnxruntime']
-    trt_version = backend_versions['tensorrt']
-    ncnn_version = backend_versions['ncnn']
-    tvm_version = backend_versions['tvm']
+    from mmdeploy.backend.base import get_backend_manager
+    from mmdeploy.utils import Backend
+    exclude_backend_lists = [Backend.DEFAULT, Backend.PYTORCH, Backend.SDK]
+    backend_lists = [
+        backend for backend in Backend if backend not in exclude_backend_lists
+    ]
 
-    import mmdeploy.apis.onnxruntime as ort_apis
-    logger = get_root_logger()
-    logger.info(f'onnxruntime: {ort_version}\tops_is_avaliable : '
-                f'{ort_apis.is_custom_ops_available()}')
-
-    import mmdeploy.apis.tensorrt as trt_apis
-    logger.info(f'tensorrt: {trt_version}\tops_is_avaliable : '
-                f'{trt_apis.is_custom_ops_available()}')
-
-    import mmdeploy.apis.ncnn as ncnn_apis
-    logger.info(f'ncnn: {ncnn_version}\tops_is_avaliable : '
-                f'{ncnn_apis.is_custom_ops_available()}')
-
-    logger.info(f'tvm: {tvm_version}')
-
-    import mmdeploy.apis.pplnn as pplnn_apis
-    logger.info(f'pplnn_is_avaliable: {pplnn_apis.is_available()}')
-
-    import mmdeploy.apis.openvino as openvino_apis
-    logger.info(f'openvino_is_avaliable: {openvino_apis.is_available()}')
-
-    import mmdeploy.apis.snpe as snpe_apis
-    logger.info(f'snpe_is_available: {snpe_apis.is_available()}')
-
-    import mmdeploy.apis.ascend as ascend_apis
-    logger.info(f'ascend_is_available: {ascend_apis.is_available()}')
-
-    import mmdeploy.apis.coreml as coreml_apis
-    logger.info(f'coreml_is_available: {coreml_apis.is_available()}')
-
-    import mmdeploy.apis.rknn as rknn_apis
-    logger.info(f'rknn_is_avaliable: {rknn_apis.is_available()}')
+    for backend in backend_lists:
+        backend_mgr = get_backend_manager(backend.value)
+        backend_mgr.check_env(logger.info)
 
 
 def check_codebase():
diff --git a/tools/deploy.py b/tools/deploy.py
index 3a4a2793c..7b077ecd7 100644
--- a/tools/deploy.py
+++ b/tools/deploy.py
@@ -13,11 +13,11 @@ from mmdeploy.apis import (create_calib_input_data, extract_model,
                            get_predefined_partition_cfg, torch2onnx,
                            torch2torchscript, visualize_model)
 from mmdeploy.apis.core import PIPELINE_MANAGER
+from mmdeploy.apis.utils import to_backend
 from mmdeploy.backend.sdk.export_info import export2SDK
 from mmdeploy.utils import (IR, Backend, get_backend, get_calib_filename,
-                            get_ir_config, get_model_inputs,
-                            get_partition_config, get_root_logger, load_config,
-                            target_wrapper)
+                            get_ir_config, get_partition_config,
+                            get_root_logger, load_config, target_wrapper)
 
 
 def parse_args():
@@ -193,269 +193,79 @@ def main():
     backend_files = ir_files
     # convert backend
     backend = get_backend(deploy_cfg)
-    if backend == Backend.TENSORRT:
-        model_params = get_model_inputs(deploy_cfg)
-        assert len(model_params) == len(ir_files)
 
-        from mmdeploy.apis.tensorrt import is_available as trt_is_available
-        assert trt_is_available(
-        ), 'TensorRT is not available,' \
-            + ' please install TensorRT and build TensorRT custom ops first.'
+    # preprocess deploy_cfg
+    if backend == Backend.RKNN:
+        # TODO: Add this to task_processor in the future
+        import tempfile
 
-        from mmdeploy.apis.tensorrt import onnx2tensorrt
-        PIPELINE_MANAGER.enable_multiprocess(True, [onnx2tensorrt])
-        PIPELINE_MANAGER.set_log_level(log_level, [onnx2tensorrt])
-
-        backend_files = []
-        for model_id, model_param, onnx_path in zip(
-                range(len(ir_files)), model_params, ir_files):
-            onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]
-            save_file = model_param.get('save_file', onnx_name + '.engine')
-
-            partition_type = 'end2end' if partition_cfgs is None \
-                else onnx_name
-            onnx2tensorrt(
-                args.work_dir,
-                save_file,
-                model_id,
-                deploy_cfg_path,
-                onnx_path,
-                device=args.device,
-                partition_type=partition_type)
-
-            backend_files.append(osp.join(args.work_dir, save_file))
-
-    elif backend == Backend.NCNN:
-        from mmdeploy.apis.ncnn import is_available as is_available_ncnn
-
-        if not is_available_ncnn():
-            logger.error('ncnn support is not available, please make sure \
-                1) `mmdeploy_onnx2ncnn` existed in `PATH` \
-                2) python import ncnn success')
-            exit(1)
-
-        import mmdeploy.apis.ncnn as ncnn_api
-        from mmdeploy.apis.ncnn import get_output_model_file
-
-        PIPELINE_MANAGER.set_log_level(log_level, [ncnn_api.from_onnx])
-
-        backend_files = []
-        for onnx_path in ir_files:
-            model_param_path, model_bin_path = get_output_model_file(
-                onnx_path, args.work_dir)
-            onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]
-            ncnn_api.from_onnx(onnx_path, osp.join(args.work_dir, onnx_name))
-
-            if quant:
-                from onnx2ncnn_quant_table import get_table
-
-                from mmdeploy.apis.ncnn import get_quant_model_file, ncnn2int8
-
-                deploy_cfg, model_cfg = load_config(deploy_cfg_path,
-                                                    model_cfg_path)
-                quant_onnx, quant_table, quant_param, quant_bin = get_quant_model_file(  # noqa: E501
-                    onnx_path, args.work_dir)
-
-                create_process(
-                    'ncnn quant table',
-                    target=get_table,
-                    args=(onnx_path, deploy_cfg, model_cfg, quant_onnx,
-                          quant_table, quant_image_dir, args.device),
-                    kwargs=dict(),
-                    ret_value=ret_value)
-
-                create_process(
-                    'ncnn_int8',
-                    target=ncnn2int8,
-                    args=(model_param_path, model_bin_path, quant_table,
-                          quant_param, quant_bin),
-                    kwargs=dict(),
-                    ret_value=ret_value)
-                backend_files += [quant_param, quant_bin]
-            else:
-                backend_files += [model_param_path, model_bin_path]
-
-    elif backend == Backend.SNPE:
-        from mmdeploy.apis.snpe import is_available as is_available
-
-        if not is_available():
-            logger.error('snpe support is not available, please check \
-                1) `snpe-onnx-to-dlc` existed in `PATH` 2) snpe only support \
-                    ubuntu18.04')
-            exit(1)
-
-        import mmdeploy.apis.snpe as snpe_api
-        from mmdeploy.apis.snpe import get_env_key, get_output_model_file
-
-        if get_env_key() not in os.environ:
-            os.environ[get_env_key()] = args.uri
-
-        PIPELINE_MANAGER.set_log_level(log_level, [snpe_api.from_onnx])
-
-        backend_files = []
-        for onnx_path in ir_files:
-            dlc_path = get_output_model_file(onnx_path, args.work_dir)
-            onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]
-            snpe_api.from_onnx(onnx_path, osp.join(args.work_dir, onnx_name))
-            backend_files = [dlc_path]
-
-    elif backend == Backend.OPENVINO:
-        from mmdeploy.apis.openvino import \
-            is_available as is_available_openvino
-        assert is_available_openvino(), \
-            'OpenVINO is not available, please install OpenVINO first.'
-
-        import mmdeploy.apis.openvino as openvino_api
-        from mmdeploy.apis.openvino import (get_input_info_from_cfg,
-                                            get_mo_options_from_cfg,
-                                            get_output_model_file)
-
-        PIPELINE_MANAGER.set_log_level(log_level, [openvino_api.from_onnx])
-
-        openvino_files = []
-        for onnx_path in ir_files:
-            model_xml_path = get_output_model_file(onnx_path, args.work_dir)
-            input_info = get_input_info_from_cfg(deploy_cfg)
-            output_names = get_ir_config(deploy_cfg).output_names
-            mo_options = get_mo_options_from_cfg(deploy_cfg)
-            openvino_api.from_onnx(onnx_path, args.work_dir, input_info,
-                                   output_names, mo_options)
-            openvino_files.append(model_xml_path)
-        backend_files = openvino_files
-
-    elif backend == Backend.PPLNN:
-        from mmdeploy.apis.pplnn import is_available as is_available_pplnn
-        assert is_available_pplnn(), \
-            'PPLNN is not available, please install PPLNN first.'
-
-        from mmdeploy.apis.pplnn import from_onnx
-
-        pplnn_pipeline_funcs = [from_onnx]
-        PIPELINE_MANAGER.set_log_level(log_level, pplnn_pipeline_funcs)
-
-        pplnn_files = []
-        for onnx_path in ir_files:
-            algo_file = onnx_path.replace('.onnx', '.json')
-            model_inputs = get_model_inputs(deploy_cfg)
-            assert 'opt_shape' in model_inputs, 'Expect opt_shape ' \
-                'in deploy config for PPLNN'
-            # PPLNN accepts only 1 input shape for optimization,
-            # may get changed in the future
-            input_shapes = [model_inputs.opt_shape]
-            algo_prefix = osp.splitext(algo_file)[0]
-            from_onnx(
-                onnx_path,
-                algo_prefix,
-                device=args.device,
-                input_shapes=input_shapes)
-            pplnn_files += [onnx_path, algo_file]
-        backend_files = pplnn_files
-
-    elif backend == Backend.RKNN:
-        from mmdeploy.apis.rknn import is_available as rknn_is_available
-        assert rknn_is_available(
-        ), 'RKNN is not available, please install RKNN first.'
-
-        from mmdeploy.apis.rknn import onnx2rknn
-        PIPELINE_MANAGER.enable_multiprocess(True, [onnx2rknn])
-        PIPELINE_MANAGER.set_log_level(logging.INFO, [onnx2rknn])
-
-        backend_files = []
-        for model_id, onnx_path in zip(range(len(ir_files)), ir_files):
-            pre_fix_name = osp.splitext(osp.split(onnx_path)[1])[0]
-            output_path = osp.join(args.work_dir, pre_fix_name + '.rknn')
-            import tempfile
-            dataset_file = tempfile.NamedTemporaryFile(suffix='.txt').name
-            with open(dataset_file, 'w') as f:
-                f.writelines([osp.abspath(args.img)])
-            onnx2rknn(
-                onnx_path,
-                output_path,
-                deploy_cfg_path,
-                model_cfg_path,
-                dataset_file=dataset_file)
-
-            backend_files.append(output_path)
-    elif backend == Backend.ASCEND:
-        from mmdeploy.apis.ascend import from_onnx
-
-        ascend_pipeline_funcs = [from_onnx]
-        PIPELINE_MANAGER.set_log_level(log_level, ascend_pipeline_funcs)
-
-        model_inputs = get_model_inputs(deploy_cfg)
-
-        om_files = []
-        for model_id, onnx_path in enumerate(ir_files):
-            om_path = osp.splitext(onnx_path)[0] + '.om'
-            from_onnx(onnx_path, args.work_dir, model_inputs[model_id])
-            om_files.append(om_path)
-        backend_files = om_files
+        from mmdeploy.utils import (get_common_config, get_normalization,
+                                    get_quantization_config,
+                                    get_rknn_quantization)
+        quantization_cfg = get_quantization_config(deploy_cfg)
+        common_params = get_common_config(deploy_cfg)
+        if get_rknn_quantization(deploy_cfg) is True:
+            transform = get_normalization(model_cfg)
+            common_params.update(
+                dict(
+                    mean_values=[transform['mean']],
+                    std_values=[transform['std']]))
 
+        dataset_file = tempfile.NamedTemporaryFile(suffix='.txt').name
+        with open(dataset_file, 'w') as f:
+            f.writelines([osp.abspath(args.img)])
+        quantization_cfg.setdefault('dataset', dataset_file)
+    if backend == Backend.ASCEND:
+        # TODO: Add this to backend manager in the future
         if args.dump_info:
             from mmdeploy.backend.ascend import update_sdk_pipeline
             update_sdk_pipeline(args.work_dir)
 
-    elif backend == Backend.COREML:
-        from mmdeploy.apis.coreml import from_torchscript, get_model_suffix
-        coreml_pipeline_funcs = [from_torchscript]
-        PIPELINE_MANAGER.set_log_level(log_level, coreml_pipeline_funcs)
-        model_inputs = get_model_inputs(deploy_cfg)
-        coreml_files = []
-        for model_id, torchscript_path in enumerate(ir_files):
-            torchscript_name = osp.splitext(osp.split(torchscript_path)[1])[0]
-            output_file_prefix = osp.join(args.work_dir, torchscript_name)
-            convert_to = deploy_cfg.backend_config.convert_to
-            from_torchscript(torchscript_path, output_file_prefix,
-                             ir_config.input_names, ir_config.output_names,
-                             model_inputs[model_id].input_shapes, convert_to)
-            suffix = get_model_suffix(convert_to)
-            coreml_files.append(output_file_prefix + suffix)
-        backend_files = coreml_files
-    elif backend == Backend.TVM:
-        import copy
+    # convert to backend
+    PIPELINE_MANAGER.set_log_level(log_level, [to_backend])
+    if backend == Backend.TENSORRT:
+        PIPELINE_MANAGER.enable_multiprocess(True, [to_backend])
+    backend_files = to_backend(
+        backend,
+        ir_files,
+        work_dir=args.work_dir,
+        deploy_cfg=deploy_cfg,
+        log_level=log_level,
+        device=args.device,
+        uri=args.uri)
 
-        from mmdeploy.apis.tvm import from_onnx, get_library_ext
-        PIPELINE_MANAGER.set_log_level(log_level, [from_onnx])
-        model_inputs = get_model_inputs(deploy_cfg)
+    # ncnn quantization
+    if backend == Backend.NCNN and quant:
+        from onnx2ncnn_quant_table import get_table
 
-        if args.device.startswith('cuda'):
-            target = 'cuda'
-        else:
-            target = 'llvm'
+        from mmdeploy.apis.ncnn import get_quant_model_file, ncnn2int8
+        model_param_paths = backend_files[::2]
+        model_bin_paths = backend_files[1::2]
+        backend_files = []
+        for onnx_path, model_param_path, model_bin_path in zip(
+                ir_files, model_param_paths, model_bin_paths):
 
-        lib_ext = get_library_ext()
+            deploy_cfg, model_cfg = load_config(deploy_cfg_path,
+                                                model_cfg_path)
+            quant_onnx, quant_table, quant_param, quant_bin = get_quant_model_file(  # noqa: E501
+                onnx_path, args.work_dir)
 
-        tvm_files = []
-        for model_id, onnx_path in enumerate(ir_files):
-            model_input = copy.deepcopy(model_inputs[model_id])
-            use_vm = model_input.get('use_vm', False)
-            if 'target' not in model_input['tuner']:
-                model_input['tuner']['target'] = target
-            lib_path = osp.splitext(onnx_path)[0] + lib_ext
-            code_path = osp.splitext(
-                onnx_path)[0] + '.code' if use_vm else None
-            model_input['output_file'] = lib_path
-            model_input['onnx_model'] = onnx_path
-            model_input['bytecode_file'] = code_path
+            create_process(
+                'ncnn quant table',
+                target=get_table,
+                args=(onnx_path, deploy_cfg, model_cfg, quant_onnx,
+                      quant_table, quant_image_dir, args.device),
+                kwargs=dict(),
+                ret_value=ret_value)
 
-            # create calibration dataset
-            if 'qconfig' in model_input:
-                calib_path = osp.join(args.work_dir, calib_filename)
-                from mmdeploy.backend.tvm import HDF5Dataset
-                partition_type = 'end2end' if partition_cfgs is None \
-                    else onnx_name
-                dataset = HDF5Dataset(
-                    calib_path,
-                    model_input['shape'],
-                    model_type=partition_type,
-                    device=target)
-                model_input['dataset'] = dataset()
-
-            from_onnx(**model_input)
-
-            tvm_files += [lib_path, code_path]
-
-        backend_files = tvm_files
+            create_process(
+                'ncnn_int8',
+                target=ncnn2int8,
+                args=(model_param_path, model_bin_path, quant_table,
+                      quant_param, quant_bin),
+                kwargs=dict(),
+                ret_value=ret_value)
+            backend_files += [quant_param, quant_bin]
 
     if args.test_img is None:
         args.test_img = args.img