mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
[Refactor] Ease rewriter import for dev-1.x (#1170)
* Update rewriter import * remove root import * add interface
This commit is contained in:
parent
c5edb85550
commit
6420e20445
@ -49,9 +49,15 @@ class MMCodebase(metaclass=ABCMeta):
|
||||
deploy_cfg=deploy_cfg,
|
||||
device=device))
|
||||
|
||||
@classmethod
|
||||
def register_deploy_modules(cls):
|
||||
"""register deploy module."""
|
||||
raise NotImplementedError('register_deploy_modules not implemented.')
|
||||
|
||||
@classmethod
|
||||
def register_all_modules(cls):
|
||||
pass
|
||||
"""register codebase module."""
|
||||
raise NotImplementedError('register_all_modules not implemented.')
|
||||
|
||||
|
||||
# Note that the build function returns the class instead of its instance.
|
||||
|
@ -1,4 +1,3 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
|
||||
from .deploy import * # noqa: F401,F403
|
||||
from .models import * # noqa: F401,F403
|
||||
|
@ -13,7 +13,12 @@ class MMACTION(MMCodebase):
|
||||
|
||||
task_registry = MMACTION_TASK
|
||||
|
||||
@classmethod
|
||||
def register_deploy_modules(cls):
|
||||
import mmdeploy.codebase.mmaction.models # noqa: F401
|
||||
|
||||
@classmethod
|
||||
def register_all_modules(cls):
|
||||
from mmaction.utils.setup_env import register_all_modules
|
||||
cls.register_deploy_modules()
|
||||
register_all_modules(True)
|
||||
|
@ -1,3 +1,3 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
|
||||
from .recognizers import * # noqa: F401,F403
|
||||
from . import recognizers # noqa: F401,F403
|
||||
|
@ -1,5 +1,3 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
|
||||
from .base import base_recognizer__forward
|
||||
|
||||
__all__ = ['base_recognizer__forward']
|
||||
from . import base # noqa: F401,F403
|
||||
|
@ -1,3 +1,2 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .deploy import * # noqa: F401,F403
|
||||
from .models import * # noqa: F401,F403
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .backbones import * # noqa: F401,F403
|
||||
from .classifiers import * # noqa: F401,F403
|
||||
from .necks import * # noqa: F401,F403
|
||||
from .utils import * # noqa: F401,F403
|
||||
from . import backbones # noqa: F401,F403
|
||||
from . import classifiers # noqa: F401,F403
|
||||
from . import necks # noqa: F401,F403
|
||||
from . import utils # noqa: F401,F403
|
||||
|
@ -1,8 +1,3 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .shufflenet_v2 import shufflenetv2_backbone__forward__default
|
||||
from .vision_transformer import visiontransformer__forward__ncnn
|
||||
|
||||
__all__ = [
|
||||
'shufflenetv2_backbone__forward__default',
|
||||
'visiontransformer__forward__ncnn'
|
||||
]
|
||||
from . import shufflenet_v2 # noqa: F401,F403
|
||||
from . import vision_transformer # noqa: F401,F403
|
||||
|
@ -1,4 +1,2 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .base import base_classifier__forward
|
||||
|
||||
__all__ = ['base_classifier__forward']
|
||||
from . import base # noqa: F401,F403
|
||||
|
@ -1,5 +1,2 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
|
||||
from .gap import gap__forward
|
||||
|
||||
__all__ = ['gap__forward']
|
||||
from . import gap # noqa: F401,F403
|
||||
|
@ -1,10 +1,2 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .attention import (multiheadattention__forward__ncnn,
|
||||
shift_window_msa__forward__default,
|
||||
shift_window_msa__get_attn_mask__default)
|
||||
|
||||
__all__ = [
|
||||
'multiheadattention__forward__ncnn',
|
||||
'shift_window_msa__get_attn_mask__default',
|
||||
'shift_window_msa__forward__default'
|
||||
]
|
||||
from . import attention # noqa: F401,F403
|
||||
|
@ -2,9 +2,6 @@
|
||||
from .deploy import (ObjectDetection, clip_bboxes, gather_topk,
|
||||
get_post_processing_params, pad_with_value,
|
||||
pad_with_value_if_necessary)
|
||||
from .models import * # noqa: F401,F403
|
||||
from .ops import * # noqa: F401,F403
|
||||
from .structures import * # noqa: F401, F403
|
||||
|
||||
__all__ = [
|
||||
'get_post_processing_params', 'clip_bboxes', 'pad_with_value',
|
||||
|
@ -13,7 +13,8 @@ from torch import Tensor, nn
|
||||
|
||||
from mmdeploy.backend.base import get_backend_file_count
|
||||
from mmdeploy.codebase.base import BaseBackendModel
|
||||
from mmdeploy.codebase.mmdet import get_post_processing_params, multiclass_nms
|
||||
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
|
||||
from mmdeploy.codebase.mmdet.models.layers import multiclass_nms
|
||||
from mmdeploy.utils import (Backend, get_backend, get_codebase_config,
|
||||
get_partition_config, load_config)
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .backbones import * # noqa: F401, F403
|
||||
from .dense_heads import * # noqa: F401,F403
|
||||
from .detectors import * # noqa: F401,F403
|
||||
from .layers import * # noqa: F401,F403
|
||||
from .necks import * # noqa: F401,F403
|
||||
from .roi_heads import * # noqa: F401,F403
|
||||
from .task_modules import * # noqa: F401,F403
|
||||
from .transformer import * # noqa: F401,F403
|
||||
from . import backbones # noqa: F401, F403
|
||||
from . import dense_heads # noqa: F401,F403
|
||||
from . import detectors # noqa: F401,F403
|
||||
from . import layers # noqa: F401,F403
|
||||
from . import necks # noqa: F401,F403
|
||||
from . import roi_heads # noqa: F401,F403
|
||||
from . import task_modules # noqa: F401,F403
|
||||
from . import transformer # noqa: F401,F403
|
||||
|
@ -1,23 +1,10 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from . import detr_head
|
||||
from .base_dense_head import (base_dense_head__predict_by_feat,
|
||||
base_dense_head__predict_by_feat__ncnn)
|
||||
from .fovea_head import fovea_head__predict_by_feat
|
||||
from .gfl_head import gfl_head__predict_by_feat
|
||||
from .reppoints_head import reppoints_head__predict_by_feat
|
||||
from .rpn_head import rpn_head__get_bboxes__ncnn, rpn_head__predict_by_feat
|
||||
from .rtmdet_head import rtmdet_head__predict_by_feat
|
||||
from .yolo_head import (yolov3_head__predict_by_feat,
|
||||
yolov3_head__predict_by_feat__ncnn)
|
||||
from .yolox_head import (yolox_head__predict_by_feat,
|
||||
yolox_head__predict_by_feat__ncnn)
|
||||
|
||||
__all__ = [
|
||||
'rpn_head__predict_by_feat', 'rpn_head__get_bboxes__ncnn',
|
||||
'yolov3_head__predict_by_feat', 'yolov3_head__predict_by_feat__ncnn',
|
||||
'yolox_head__predict_by_feat', 'base_dense_head__predict_by_feat',
|
||||
'fovea_head__predict_by_feat', 'base_dense_head__predict_by_feat__ncnn',
|
||||
'yolox_head__predict_by_feat__ncnn', 'gfl_head__predict_by_feat',
|
||||
'reppoints_head__predict_by_feat', 'detr_head',
|
||||
'rtmdet_head__predict_by_feat'
|
||||
]
|
||||
from . import base_dense_head # noqa: F401,F403
|
||||
from . import detr_head # noqa: F401,F403
|
||||
from . import fovea_head # noqa: F401,F403
|
||||
from . import gfl_head # noqa: F401,F403
|
||||
from . import reppoints_head # noqa: F401,F403
|
||||
from . import rpn_head # noqa: F401,F403
|
||||
from . import rtmdet_head # noqa: F401,F403
|
||||
from . import yolo_head # noqa: F401,F403
|
||||
from . import yolox_head # noqa: F401,F403
|
||||
|
@ -10,8 +10,9 @@ from mmdet.structures.bbox.transforms import distance2bbox
|
||||
from mmengine import ConfigDict
|
||||
from torch import Tensor
|
||||
|
||||
from mmdeploy.codebase.mmdet import (gather_topk, get_post_processing_params,
|
||||
pad_with_value_if_necessary)
|
||||
from mmdeploy.codebase.mmdet.deploy import (gather_topk,
|
||||
get_post_processing_params,
|
||||
pad_with_value_if_necessary)
|
||||
from mmdeploy.codebase.mmdet.models.layers import multiclass_nms
|
||||
from mmdeploy.codebase.mmdet.ops import ncnn_detection_output_forward
|
||||
from mmdeploy.core import FUNCTION_REWRITER, mark
|
||||
|
@ -6,7 +6,7 @@ from mmengine.config import ConfigDict
|
||||
from mmengine.structures import InstanceData
|
||||
from torch import Tensor
|
||||
|
||||
from mmdeploy.codebase.mmdet import get_post_processing_params
|
||||
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
|
||||
from mmdeploy.codebase.mmdet.models.layers import multiclass_nms
|
||||
from mmdeploy.core import FUNCTION_REWRITER
|
||||
|
||||
|
@ -7,8 +7,9 @@ from mmengine.config import ConfigDict
|
||||
from mmengine.structures import InstanceData
|
||||
from torch import Tensor
|
||||
|
||||
from mmdeploy.codebase.mmdet import (gather_topk, get_post_processing_params,
|
||||
pad_with_value)
|
||||
from mmdeploy.codebase.mmdet.deploy import (gather_topk,
|
||||
get_post_processing_params,
|
||||
pad_with_value)
|
||||
from mmdeploy.codebase.mmdet.models.layers import multiclass_nms
|
||||
from mmdeploy.core import FUNCTION_REWRITER
|
||||
from mmdeploy.utils import Backend, get_backend, is_dynamic_shape
|
||||
|
@ -6,8 +6,9 @@ from mmengine.config import ConfigDict
|
||||
from mmengine.structures import InstanceData
|
||||
from torch import Tensor
|
||||
|
||||
from mmdeploy.codebase.mmdet import (gather_topk, get_post_processing_params,
|
||||
pad_with_value_if_necessary)
|
||||
from mmdeploy.codebase.mmdet.deploy import (gather_topk,
|
||||
get_post_processing_params,
|
||||
pad_with_value_if_necessary)
|
||||
from mmdeploy.codebase.mmdet.models.layers import multiclass_nms
|
||||
from mmdeploy.core import FUNCTION_REWRITER
|
||||
from mmdeploy.utils import is_dynamic_shape
|
||||
|
@ -5,8 +5,9 @@ import torch
|
||||
from mmengine import ConfigDict
|
||||
from torch import Tensor
|
||||
|
||||
from mmdeploy.codebase.mmdet import (gather_topk, get_post_processing_params,
|
||||
pad_with_value_if_necessary)
|
||||
from mmdeploy.codebase.mmdet.deploy import (gather_topk,
|
||||
get_post_processing_params,
|
||||
pad_with_value_if_necessary)
|
||||
from mmdeploy.codebase.mmdet.models.layers import multiclass_nms
|
||||
from mmdeploy.core import FUNCTION_REWRITER
|
||||
from mmdeploy.utils import Backend, is_dynamic_shape
|
||||
|
@ -1,7 +1,7 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import torch
|
||||
|
||||
from mmdeploy.codebase.mmdet import get_post_processing_params
|
||||
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
|
||||
from mmdeploy.codebase.mmdet.ops import (ncnn_detection_output_forward,
|
||||
ncnn_prior_box_forward)
|
||||
from mmdeploy.core import FUNCTION_REWRITER
|
||||
|
@ -6,8 +6,8 @@ import torch
|
||||
from mmdet.utils.typing import OptConfigType
|
||||
from torch import Tensor
|
||||
|
||||
from mmdeploy.codebase.mmdet import (get_post_processing_params,
|
||||
pad_with_value_if_necessary)
|
||||
from mmdeploy.codebase.mmdet.deploy import (get_post_processing_params,
|
||||
pad_with_value_if_necessary)
|
||||
from mmdeploy.codebase.mmdet.models.layers import multiclass_nms
|
||||
from mmdeploy.core import FUNCTION_REWRITER, mark
|
||||
from mmdeploy.utils import Backend, is_dynamic_shape
|
||||
|
@ -6,7 +6,7 @@ from mmengine.config import ConfigDict
|
||||
from mmengine.structures import InstanceData
|
||||
from torch import Tensor
|
||||
|
||||
from mmdeploy.codebase.mmdet import get_post_processing_params
|
||||
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
|
||||
from mmdeploy.codebase.mmdet.models.layers import multiclass_nms
|
||||
from mmdeploy.core import FUNCTION_REWRITER, mark
|
||||
from mmdeploy.utils import Backend
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .bbox_nms import _multiclass_nms, multiclass_nms
|
||||
from .bbox_nms import multiclass_nms
|
||||
|
||||
__all__ = ['multiclass_nms', '_multiclass_nms']
|
||||
__all__ = ['multiclass_nms']
|
||||
|
@ -2,7 +2,6 @@
|
||||
import torch
|
||||
from torch import Tensor
|
||||
|
||||
import mmdeploy
|
||||
from mmdeploy.core import FUNCTION_REWRITER, mark
|
||||
from mmdeploy.mmcv.ops import ONNXNMSop, TRTBatchedNMSop
|
||||
from mmdeploy.utils import IR, is_dynamic_batch
|
||||
@ -166,7 +165,7 @@ def _multiclass_nms_single(boxes: Tensor,
|
||||
|
||||
|
||||
@FUNCTION_REWRITER.register_rewriter(
|
||||
func_name='mmdeploy.codebase.mmdet.models.layers._multiclass_nms')
|
||||
func_name='mmdeploy.codebase.mmdet.models.layers.bbox_nms._multiclass_nms')
|
||||
def multiclass_nms__default(ctx,
|
||||
boxes: Tensor,
|
||||
scores: Tensor,
|
||||
@ -223,7 +222,7 @@ def multiclass_nms__default(ctx,
|
||||
|
||||
|
||||
@FUNCTION_REWRITER.register_rewriter(
|
||||
func_name='mmdeploy.codebase.mmdet.models.layers._multiclass_nms',
|
||||
func_name='mmdeploy.codebase.mmdet.models.layers.bbox_nms._multiclass_nms',
|
||||
backend='tensorrt')
|
||||
def multiclass_nms_static(ctx,
|
||||
boxes: Tensor,
|
||||
@ -274,12 +273,11 @@ def multiclass_nms_static(ctx,
|
||||
@mark('multiclass_nms', inputs=['boxes', 'scores'], outputs=['dets', 'labels'])
|
||||
def multiclass_nms(*args, **kwargs):
|
||||
"""Wrapper function for `_multiclass_nms`."""
|
||||
return mmdeploy.codebase.mmdet.models.layers._multiclass_nms(
|
||||
*args, **kwargs)
|
||||
return _multiclass_nms(*args, **kwargs)
|
||||
|
||||
|
||||
@FUNCTION_REWRITER.register_rewriter(
|
||||
func_name='mmdeploy.codebase.mmdet.models.layers._multiclass_nms',
|
||||
func_name='mmdeploy.codebase.mmdet.models.layers.bbox_nms._multiclass_nms',
|
||||
backend=Backend.COREML.value)
|
||||
def multiclass_nms__coreml(ctx,
|
||||
boxes: Tensor,
|
||||
@ -340,7 +338,7 @@ def multiclass_nms__coreml(ctx,
|
||||
|
||||
|
||||
@FUNCTION_REWRITER.register_rewriter(
|
||||
func_name='mmdeploy.codebase.mmdet.models.layers._multiclass_nms',
|
||||
func_name='mmdeploy.codebase.mmdet.models.layers.bbox_nms._multiclass_nms',
|
||||
ir=IR.TORCHSCRIPT)
|
||||
def multiclass_nms__torchscript(ctx,
|
||||
boxes: Tensor,
|
||||
|
@ -1,19 +1,6 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .bbox_head import bbox_head__forward, bbox_head__predict_by_feat
|
||||
from .cascade_roi_head import (cascade_roi_head__predict_bbox,
|
||||
cascade_roi_head__predict_mask)
|
||||
from .fcn_mask_head import fcn_mask_head__predict_by_feat
|
||||
from .single_level_roi_extractor import (
|
||||
single_roi_extractor__forward, single_roi_extractor__forward__openvino,
|
||||
single_roi_extractor__forward__tensorrt)
|
||||
from .standard_roi_head import (standard_roi_head__predict_bbox,
|
||||
standard_roi_head__predict_mask)
|
||||
|
||||
__all__ = [
|
||||
'bbox_head__predict_by_feat', 'bbox_head__forward',
|
||||
'cascade_roi_head__predict_bbox', 'cascade_roi_head__predict_mask',
|
||||
'fcn_mask_head__predict_by_feat', 'single_roi_extractor__forward',
|
||||
'single_roi_extractor__forward__openvino',
|
||||
'single_roi_extractor__forward__tensorrt',
|
||||
'standard_roi_head__predict_bbox', 'standard_roi_head__predict_mask'
|
||||
]
|
||||
from . import bbox_head # noqa: F401,F403
|
||||
from . import cascade_roi_head # noqa: F401,F403
|
||||
from . import fcn_mask_head # noqa: F401,F403
|
||||
from . import single_level_roi_extractor # noqa: F401,F403
|
||||
from . import standard_roi_head # noqa: F401,F403
|
||||
|
@ -6,7 +6,7 @@ import torch.nn.functional as F
|
||||
from mmengine import ConfigDict
|
||||
from torch import Tensor
|
||||
|
||||
from mmdeploy.codebase.mmdet import get_post_processing_params
|
||||
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
|
||||
from mmdeploy.codebase.mmdet.models.layers import multiclass_nms
|
||||
from mmdeploy.core import FUNCTION_REWRITER, mark
|
||||
|
||||
|
@ -6,7 +6,7 @@ import torch.nn.functional as F
|
||||
from mmengine import ConfigDict
|
||||
from torch import Tensor
|
||||
|
||||
from mmdeploy.codebase.mmdet import get_post_processing_params
|
||||
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
|
||||
from mmdeploy.core import FUNCTION_REWRITER
|
||||
from mmdeploy.utils import Backend, get_backend
|
||||
|
||||
|
@ -1,3 +1,3 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .coders import * # noqa: F401,F403
|
||||
from .prior_generators import * # noqa: F401,F403
|
||||
from . import coders # noqa: F401,F403
|
||||
from . import prior_generators # noqa: F401,F403
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .delta_xywh_bbox_coder import * # noqa: F401,F403
|
||||
from .distance_point_bbox_coder import * # noqa: F401,F403
|
||||
from .tblr_bbox_coder import * # noqa: F401,F403
|
||||
from . import delta_xywh_bbox_coder # noqa: F401,F403
|
||||
from . import distance_point_bbox_coder # noqa: F401,F403
|
||||
from . import tblr_bbox_coder # noqa: F401,F403
|
||||
|
@ -1,3 +1,3 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .anchor import * # noqa: F401,F403
|
||||
from .point_generator import * # noqa: F401,F403
|
||||
from . import anchor # noqa: F401,F403
|
||||
from . import point_generator # noqa: F401,F403
|
||||
|
@ -1,2 +1,2 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .bbox import * # noqa: F401,F403
|
||||
from . import bbox # noqa: F401,F403
|
||||
|
@ -1,2 +1,2 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .transforms import * # noqa: F401,F403
|
||||
from . import transforms # noqa: F401,F403
|
||||
|
@ -1,5 +1,4 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .deploy import MMDetection3d, VoxelDetection
|
||||
from .models import * # noqa: F401,F403
|
||||
|
||||
__all__ = ['MMDetection3d', 'VoxelDetection']
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .base import * # noqa: F401,F403
|
||||
from .mvx_two_stage import * # noqa: F401,F403
|
||||
from .pillar_encode import * # noqa: F401,F403
|
||||
from .pillar_scatter import * # noqa: F401,F403
|
||||
from . import base # noqa: F401,F403
|
||||
from . import mvx_two_stage # noqa: F401,F403
|
||||
from . import pillar_encode # noqa: F401,F403
|
||||
from . import pillar_scatter # noqa: F401,F403
|
||||
|
@ -1,5 +1,4 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .deploy import MMEditing, SuperResolution
|
||||
from .models import base_edit_model__forward
|
||||
|
||||
__all__ = ['MMEditing', 'SuperResolution', 'base_edit_model__forward']
|
||||
__all__ = ['MMEditing', 'SuperResolution']
|
||||
|
@ -1,2 +1,2 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .base_models import * # noqa F401, F403
|
||||
from . import base_models # noqa F401, F403
|
||||
|
@ -1,4 +1,2 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .base_edit_model import base_edit_model__forward
|
||||
|
||||
__all__ = ['base_edit_model__forward']
|
||||
from . import base_edit_model # noqa: F401,F403
|
||||
|
@ -1,3 +1,2 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .deploy import * # noqa: F401,F403
|
||||
from .models import * # noqa: F401,F403
|
||||
|
@ -1,3 +1,3 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .text_detection import * # noqa: F401,F403
|
||||
from .text_recognition import * # noqa: F401,F403
|
||||
from . import text_detection # noqa: F401,F403
|
||||
from . import text_recognition # noqa: F401,F403
|
||||
|
@ -1,9 +1,4 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .fpn_cat import fpnc__forward__tensorrt
|
||||
from .heads import base_text_det_head__predict, db_head__predict
|
||||
from .single_stage_text_detector import single_stage_text_detector__forward
|
||||
|
||||
__all__ = [
|
||||
'fpnc__forward__tensorrt', 'base_text_det_head__predict',
|
||||
'single_stage_text_detector__forward', 'db_head__predict'
|
||||
]
|
||||
from . import fpn_cat # noqa: F401,F403
|
||||
from . import heads # noqa: F401,F403
|
||||
from . import single_stage_text_detector # noqa: F401,F403
|
||||
|
@ -1,14 +1,7 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# from .base import base_recognizer__forward
|
||||
from .base_decoder import base_decoder__forward
|
||||
from .crnn_decoder import crnndecoder__forward_train__ncnn
|
||||
from .encoder_decoder_recognizer import encoder_decoder_recognizer__forward
|
||||
from .lstm_layer import bidirectionallstm__forward__ncnn
|
||||
from .sar_decoder import * # noqa: F401,F403
|
||||
from .sar_encoder import sar_encoder__forward
|
||||
|
||||
__all__ = [
|
||||
'base_decoder__forward', 'crnndecoder__forward_train__ncnn',
|
||||
'encoder_decoder_recognizer__forward', 'bidirectionallstm__forward__ncnn',
|
||||
'sar_encoder__forward'
|
||||
]
|
||||
from . import base_decoder # noqa: F401,F403
|
||||
from . import crnn_decoder # noqa: F401,F403
|
||||
from . import encoder_decoder_recognizer # noqa: F401,F403
|
||||
from . import lstm_layer # noqa: F401,F403
|
||||
from . import sar_decoder # noqa: F401,F403
|
||||
from . import sar_encoder # noqa: F401,F403
|
||||
|
@ -1,5 +1,4 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .deploy import PoseDetection
|
||||
from .models import * # noqa: F401,F403
|
||||
|
||||
__all__ = ['PoseDetection']
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
|
||||
from .heads import * # noqa: F401,F403
|
||||
from .pose_estimators import * # noqa: F401,F403
|
||||
from . import heads # noqa: F401,F403
|
||||
from . import pose_estimators # noqa: F401,F403
|
||||
|
@ -1,8 +1,8 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import torch
|
||||
|
||||
from mmdeploy.codebase.mmdet import (get_post_processing_params,
|
||||
pad_with_value_if_necessary)
|
||||
from mmdeploy.codebase.mmdet.deploy import (get_post_processing_params,
|
||||
pad_with_value_if_necessary)
|
||||
from mmdeploy.codebase.mmrotate.core.post_processing import \
|
||||
fake_multiclass_nms_rotated
|
||||
from mmdeploy.core import FUNCTION_REWRITER
|
||||
|
@ -1,8 +1,8 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import torch
|
||||
|
||||
from mmdeploy.codebase.mmdet import (get_post_processing_params,
|
||||
pad_with_value_if_necessary)
|
||||
from mmdeploy.codebase.mmdet.deploy import (get_post_processing_params,
|
||||
pad_with_value_if_necessary)
|
||||
from mmdeploy.codebase.mmrotate.core.post_processing import \
|
||||
multiclass_nms_rotated
|
||||
from mmdeploy.core import FUNCTION_REWRITER
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import torch
|
||||
|
||||
from mmdeploy.codebase.mmdet import (get_post_processing_params,
|
||||
pad_with_value_if_necessary)
|
||||
from mmdeploy.codebase.mmdet.core.post_processing import multiclass_nms
|
||||
from mmdeploy.codebase.mmdet.deploy import (get_post_processing_params,
|
||||
pad_with_value_if_necessary)
|
||||
from mmdeploy.codebase.mmdet.models.layers import multiclass_nms
|
||||
from mmdeploy.core import FUNCTION_REWRITER
|
||||
from mmdeploy.utils import is_dynamic_shape
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import torch.nn.functional as F
|
||||
|
||||
from mmdeploy.codebase.mmdet import get_post_processing_params
|
||||
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
|
||||
from mmdeploy.codebase.mmrotate.core.post_processing import \
|
||||
multiclass_nms_rotated
|
||||
from mmdeploy.core import FUNCTION_REWRITER
|
||||
|
@ -2,7 +2,7 @@
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from mmdeploy.codebase.mmdet import get_post_processing_params
|
||||
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
|
||||
from mmdeploy.codebase.mmrotate.core.post_processing import \
|
||||
multiclass_nms_rotated
|
||||
from mmdeploy.core import FUNCTION_REWRITER
|
||||
|
@ -1,3 +1,2 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .deploy import * # noqa: F401,F403
|
||||
from .models import * # noqa: F401,F403
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .decode_heads import * # noqa: F401,F403
|
||||
from .segmentors import * # noqa: F401,F403
|
||||
from .utils import * # noqa: F401,F403
|
||||
from . import decode_heads # noqa: F401,F403
|
||||
from . import segmentors # noqa: F401,F403
|
||||
from . import utils # noqa: F401,F403
|
||||
|
@ -1,4 +1,2 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .up_conv_block import up_conv_block__forward
|
||||
|
||||
__all__ = ['up_conv_block__forward']
|
||||
from . import up_conv_block # noqa: F401,F403
|
||||
|
@ -1,3 +1,3 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .cnn import * # noqa: F401,F403
|
||||
from .ops import * # noqa: F401,F403
|
||||
from . import cnn # noqa: F401,F403
|
||||
from . import ops # noqa: F401,F403
|
||||
|
@ -2,4 +2,4 @@
|
||||
from . import conv2d_adaptive_padding # noqa: F401,F403
|
||||
from .transformer import MultiHeadAttentionop
|
||||
|
||||
__all__ = ['conv2d_adaptive_padding', 'MultiHeadAttentionop']
|
||||
__all__ = ['MultiHeadAttentionop']
|
||||
|
@ -1,15 +1,14 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .deform_conv import deform_conv_openvino
|
||||
from .modulated_deform_conv import modulated_deform_conv_default
|
||||
from .nms import * # noqa: F401,F403
|
||||
from .nms_rotated import * # noqa: F401,F403
|
||||
from .point_sample import * # noqa: F401,F403
|
||||
from .roi_align import roi_align_default
|
||||
from .roi_align_rotated import roi_align_rotated_default
|
||||
from .transformer import patch_embed__forward__ncnn
|
||||
from . import deform_conv # noqa: F401,F403
|
||||
from . import modulated_deform_conv # noqa: F401,F403
|
||||
from . import point_sample # noqa: F401,F403
|
||||
from . import roi_align # noqa: F401,F403
|
||||
from . import roi_align_rotated # noqa: F401,F403
|
||||
from . import transformer # noqa: F401,F403
|
||||
from .nms import ONNXNMSop, TRTBatchedNMSop
|
||||
from .nms_rotated import ONNXNMSRotatedOp, TRTBatchedRotatedNMSop
|
||||
|
||||
__all__ = [
|
||||
'roi_align_default', 'modulated_deform_conv_default',
|
||||
'deform_conv_openvino', 'roi_align_rotated_default',
|
||||
'patch_embed__forward__ncnn'
|
||||
'ONNXNMSop', 'TRTBatchedNMSop', 'TRTBatchedRotatedNMSop',
|
||||
'ONNXNMSRotatedOp'
|
||||
]
|
||||
|
@ -1,3 +1,3 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .functions import * # noqa: F401,F403
|
||||
from .ops import * # noqa: F401,F403
|
||||
from . import functions # noqa: F401,F403
|
||||
from . import symbolics # noqa: F401,F403
|
||||
|
@ -1,35 +1,22 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from . import multi_head_attention_forward
|
||||
from .adaptive_pool import (adaptive_avg_pool2d__default,
|
||||
adaptive_avg_pool2d__ncnn)
|
||||
from .atan2 import atan2__default
|
||||
from .chunk import chunk__ncnn, chunk__torchscript
|
||||
from .clip import clip__coreml
|
||||
from .expand import expand__ncnn
|
||||
from .flatten import flatten__coreml
|
||||
from .getattribute import tensor__getattribute__ncnn
|
||||
from .group_norm import group_norm__ncnn
|
||||
from .interpolate import interpolate__ncnn, interpolate__tensorrt
|
||||
from .linear import linear__ncnn
|
||||
from .masked_fill import masked_fill__onnxruntime
|
||||
from .mod import mod__tensorrt
|
||||
from .normalize import normalize__ncnn
|
||||
from .pad import _prepare_onnx_paddings__tensorrt
|
||||
from .repeat import tensor__repeat__tensorrt
|
||||
from .size import tensor__size__ncnn
|
||||
from .tensor_getitem import tensor__getitem__ascend
|
||||
from .tensor_setitem import tensor__setitem__default
|
||||
from .topk import topk__dynamic, topk__tensorrt
|
||||
from .triu import triu__default
|
||||
|
||||
__all__ = [
|
||||
'tensor__getattribute__ncnn', 'group_norm__ncnn', 'interpolate__ncnn',
|
||||
'interpolate__tensorrt', 'linear__ncnn', 'tensor__repeat__tensorrt',
|
||||
'tensor__size__ncnn', 'topk__dynamic', 'topk__tensorrt', 'chunk__ncnn',
|
||||
'triu__default', 'atan2__default', 'normalize__ncnn', 'expand__ncnn',
|
||||
'chunk__torchscript', 'masked_fill__onnxruntime',
|
||||
'tensor__setitem__default', 'tensor__getitem__ascend',
|
||||
'adaptive_avg_pool2d__default', 'adaptive_avg_pool2d__ncnn',
|
||||
'multi_head_attention_forward', 'flatten__coreml', 'clip__coreml',
|
||||
'mod__tensorrt', '_prepare_onnx_paddings__tensorrt'
|
||||
]
|
||||
from . import adaptive_pool # noqa: F401,F403
|
||||
from . import atan2 # noqa: F401,F403
|
||||
from . import chunk # noqa: F401,F403
|
||||
from . import clip # noqa: F401,F403
|
||||
from . import expand # noqa: F401,F403
|
||||
from . import flatten # noqa: F401,F403
|
||||
from . import getattribute # noqa: F401,F403
|
||||
from . import group_norm # noqa: F401,F403
|
||||
from . import interpolate # noqa: F401,F403
|
||||
from . import linear # noqa: F401,F403
|
||||
from . import masked_fill # noqa: F401,F403
|
||||
from . import mod # noqa: F401,F403
|
||||
from . import multi_head_attention_forward # noqa: F401,F403
|
||||
from . import normalize # noqa: F401,F403
|
||||
from . import pad # noqa: F401,F403
|
||||
from . import repeat # noqa: F401,F403
|
||||
from . import size # noqa: F401,F403
|
||||
from . import tensor_getitem # noqa: F401,F403
|
||||
from . import tensor_setitem # noqa: F401,F403
|
||||
from . import topk # noqa: F401,F403
|
||||
from . import triu # noqa: F401,F403
|
||||
|
@ -1,17 +0,0 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .adaptive_pool import adaptive_avg_pool2d__ncnn
|
||||
from .gelu import gelu__ncnn
|
||||
from .grid_sampler import grid_sampler__default
|
||||
from .hardsigmoid import hardsigmoid__default
|
||||
from .instance_norm import instance_norm__tensorrt
|
||||
from .layer_norm import layer_norm__ncnn
|
||||
from .linear import linear__ncnn
|
||||
from .lstm import generic_rnn__ncnn
|
||||
from .roll import roll_default
|
||||
from .squeeze import squeeze__default
|
||||
|
||||
__all__ = [
|
||||
'grid_sampler__default', 'hardsigmoid__default', 'instance_norm__tensorrt',
|
||||
'generic_rnn__ncnn', 'squeeze__default', 'adaptive_avg_pool2d__ncnn',
|
||||
'gelu__ncnn', 'layer_norm__ncnn', 'linear__ncnn', 'roll_default'
|
||||
]
|
11
mmdeploy/pytorch/symbolics/__init__.py
Normal file
11
mmdeploy/pytorch/symbolics/__init__.py
Normal file
@ -0,0 +1,11 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from . import adaptive_pool # noqa: F401,F403
|
||||
from . import gelu # noqa: F401,F403
|
||||
from . import grid_sampler # noqa: F401,F403
|
||||
from . import hardsigmoid # noqa: F401,F403
|
||||
from . import instance_norm # noqa: F401,F403
|
||||
from . import layer_norm # noqa: F401,F403
|
||||
from . import linear # noqa: F401,F403
|
||||
from . import lstm # noqa: F401,F403
|
||||
from . import roll # noqa: F401,F403
|
||||
from . import squeeze # noqa: F401,F403
|
@ -12,9 +12,10 @@ try:
|
||||
except ImportError:
|
||||
pytest.skip(f'{Codebase.MMDET} is not installed.', allow_module_level=True)
|
||||
|
||||
from mmdeploy.codebase.mmdet import (clip_bboxes, get_post_processing_params,
|
||||
pad_with_value,
|
||||
pad_with_value_if_necessary)
|
||||
from mmdeploy.codebase.mmdet.deploy import (clip_bboxes,
|
||||
get_post_processing_params,
|
||||
pad_with_value,
|
||||
pad_with_value_if_necessary)
|
||||
|
||||
|
||||
def test_clip_bboxes():
|
||||
|
@ -9,6 +9,7 @@ from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase, Task, load_config
|
||||
from mmdeploy.utils.test import WrapModel, check_backend, get_rewrite_outputs
|
||||
|
||||
import_codebase(Codebase.MMDET3D)
|
||||
try:
|
||||
import_codebase(Codebase.MMDET3D)
|
||||
except ImportError:
|
||||
@ -60,6 +61,7 @@ def test_pillar_encoder(backend_type: Backend):
|
||||
num_points = torch.randint(0, 32, (3945, ), dtype=torch.int32)
|
||||
coors = torch.randint(0, 10, (3945, 4), dtype=torch.int32)
|
||||
model_outputs = model.forward(features, num_points, coors)
|
||||
model_outputs = [model_outputs]
|
||||
wrapped_model = WrapModel(model, 'forward')
|
||||
rewrite_inputs = {
|
||||
'features': features,
|
||||
@ -97,6 +99,7 @@ def test_pointpillars_scatter(backend_type: Backend):
|
||||
voxel_features = torch.rand(16 * 16, 64) * 100
|
||||
coors = torch.randint(0, 10, (16 * 16, 4), dtype=torch.int32)
|
||||
model_outputs = model.forward_batch(voxel_features, coors, 1)
|
||||
model_outputs = [model_outputs]
|
||||
wrapped_model = WrapModel(model, 'forward_batch')
|
||||
rewrite_inputs = {'voxel_features': voxel_features, 'coors': coors}
|
||||
rewrite_outputs, is_backend_output = get_rewrite_outputs(
|
||||
|
@ -1,8 +1,8 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import mmengine
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
from mmengine import Config
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase
|
||||
@ -20,7 +20,7 @@ except ImportError:
|
||||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
def test_multiclass_nms_rotated():
|
||||
from mmdeploy.codebase.mmrotate.core import multiclass_nms_rotated
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
onnx_config=dict(output_names=None, input_shape=None),
|
||||
backend_config=dict(
|
||||
@ -72,7 +72,7 @@ def test_multiclass_nms_rotated_with_keep_top_k(pre_top_k):
|
||||
|
||||
from mmdeploy.codebase.mmrotate.core import multiclass_nms_rotated
|
||||
keep_top_k = 15
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
onnx_config=dict(
|
||||
output_names=None,
|
||||
@ -140,7 +140,7 @@ def test_delta_xywha_rbbox_coder_delta2bbox(backend_type: Backend,
|
||||
max_shape: tuple, proj_xy: bool,
|
||||
edge_swap: bool):
|
||||
check_backend(backend_type)
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
onnx_config=dict(output_names=None, input_shape=None),
|
||||
backend_config=dict(type=backend_type.value, model_inputs=None),
|
||||
@ -189,7 +189,7 @@ def test_delta_xywha_rbbox_coder_delta2bbox(backend_type: Backend,
|
||||
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
|
||||
def test_delta_midpointoffset_rbbox_delta2bbox(backend_type: Backend):
|
||||
check_backend(backend_type)
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
onnx_config=dict(output_names=None, input_shape=None),
|
||||
backend_config=dict(type=backend_type.value, model_inputs=None),
|
||||
@ -227,7 +227,7 @@ def test_delta_midpointoffset_rbbox_delta2bbox(backend_type: Backend):
|
||||
@backend_checker(Backend.ONNXRUNTIME)
|
||||
def test_fake_multiclass_nms_rotated():
|
||||
from mmdeploy.codebase.mmrotate.core import fake_multiclass_nms_rotated
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
onnx_config=dict(output_names=None, input_shape=None),
|
||||
backend_config=dict(
|
||||
@ -277,7 +277,7 @@ def test_fake_multiclass_nms_rotated():
|
||||
def test_poly2obb_le90(backend_type: Backend):
|
||||
check_backend(backend_type)
|
||||
polys = torch.rand(1, 10, 8)
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
onnx_config=dict(output_names=None, input_shape=None),
|
||||
backend_config=dict(
|
||||
@ -316,7 +316,7 @@ def test_poly2obb_le90(backend_type: Backend):
|
||||
def test_poly2obb_le135(backend_type: Backend):
|
||||
check_backend(backend_type)
|
||||
polys = torch.rand(1, 10, 8)
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
onnx_config=dict(output_names=None, input_shape=None),
|
||||
backend_config=dict(
|
||||
@ -351,7 +351,7 @@ def test_poly2obb_le135(backend_type: Backend):
|
||||
def test_obb2poly_le135(backend_type: Backend):
|
||||
check_backend(backend_type)
|
||||
rboxes = torch.rand(1, 10, 5)
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
onnx_config=dict(output_names=None, input_shape=None),
|
||||
backend_config=dict(
|
||||
@ -386,7 +386,7 @@ def test_obb2poly_le135(backend_type: Backend):
|
||||
def test_gvfixcoder__decode(backend_type: Backend):
|
||||
check_backend(backend_type)
|
||||
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
onnx_config=dict(output_names=['output'], input_shape=None),
|
||||
backend_config=dict(type=backend_type.value),
|
||||
|
@ -4,11 +4,11 @@ import os
|
||||
import random
|
||||
from typing import Dict, List
|
||||
|
||||
import mmcv
|
||||
import mmengine
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
from mmengine import Config
|
||||
|
||||
from mmdeploy.codebase import import_codebase
|
||||
from mmdeploy.utils import Backend, Codebase
|
||||
@ -50,7 +50,7 @@ def convert_to_list(rewrite_output: Dict, output_names: List[str]) -> List:
|
||||
|
||||
def get_anchor_head_model():
|
||||
"""AnchorHead Config."""
|
||||
test_cfg = mmengine.Config(
|
||||
test_cfg = Config(
|
||||
dict(
|
||||
nms_pre=2000,
|
||||
min_bbox_size=0,
|
||||
@ -81,7 +81,7 @@ def _replace_r50_with_r18(model):
|
||||
['tests/test_codebase/test_mmrotate/data/single_stage_model.json'])
|
||||
def test_forward_of_base_detector(model_cfg_path, backend):
|
||||
check_backend(backend)
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
backend_config=dict(type=backend.value),
|
||||
onnx_config=dict(
|
||||
@ -96,7 +96,7 @@ def test_forward_of_base_detector(model_cfg_path, backend):
|
||||
keep_top_k=100,
|
||||
))))
|
||||
|
||||
model_cfg = mmengine.Config(dict(model=mmcv.load(model_cfg_path)))
|
||||
model_cfg = Config(dict(model=mmengine.load(model_cfg_path)))
|
||||
model_cfg.model = _replace_r50_with_r18(model_cfg.model)
|
||||
|
||||
from mmrotate.models import build_detector
|
||||
@ -118,7 +118,7 @@ def test_forward_of_base_detector(model_cfg_path, backend):
|
||||
|
||||
|
||||
def get_deploy_cfg(backend_type: Backend, ir_type: str):
|
||||
return mmengine.Config(
|
||||
return Config(
|
||||
dict(
|
||||
backend_config=dict(type=backend_type.value),
|
||||
onnx_config=dict(
|
||||
@ -222,7 +222,7 @@ def test_rotated_single_roi_extractor(backend_type: Backend):
|
||||
|
||||
single_roi_extractor = get_single_roi_extractor()
|
||||
output_names = ['roi_feat']
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
backend_config=dict(type=backend_type.value),
|
||||
onnx_config=dict(output_names=output_names, input_shape=None),
|
||||
@ -265,7 +265,7 @@ def test_rotated_single_roi_extractor(backend_type: Backend):
|
||||
|
||||
def get_oriented_rpn_head_model():
|
||||
"""Oriented RPN Head Config."""
|
||||
test_cfg = mmengine.Config(
|
||||
test_cfg = Config(
|
||||
dict(
|
||||
nms_pre=2000,
|
||||
min_bbox_size=0,
|
||||
@ -296,7 +296,7 @@ def test_get_bboxes_of_oriented_rpn_head(backend_type: Backend):
|
||||
}]
|
||||
|
||||
output_names = ['dets', 'labels']
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
backend_config=dict(type=backend_type.value),
|
||||
onnx_config=dict(output_names=output_names, input_shape=None),
|
||||
@ -337,7 +337,7 @@ def test_get_bboxes_of_oriented_rpn_head(backend_type: Backend):
|
||||
|
||||
def get_rotated_rpn_head_model():
|
||||
"""Oriented RPN Head Config."""
|
||||
test_cfg = mmengine.Config(
|
||||
test_cfg = Config(
|
||||
dict(
|
||||
nms_pre=2000,
|
||||
min_bbox_size=0,
|
||||
@ -377,7 +377,7 @@ def test_get_bboxes_of_rotated_rpn_head(backend_type: Backend):
|
||||
}]
|
||||
|
||||
output_names = ['dets', 'labels']
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
backend_config=dict(type=backend_type.value),
|
||||
onnx_config=dict(output_names=output_names, input_shape=None),
|
||||
@ -421,7 +421,7 @@ def test_rotate_standard_roi_head__simple_test(backend_type: Backend):
|
||||
check_backend(backend_type)
|
||||
from mmrotate.models.roi_heads import OrientedStandardRoIHead
|
||||
output_names = ['dets', 'labels']
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
backend_config=dict(type=backend_type.value),
|
||||
onnx_config=dict(output_names=output_names, input_shape=None),
|
||||
@ -434,7 +434,7 @@ def test_rotate_standard_roi_head__simple_test(backend_type: Backend):
|
||||
pre_top_k=2000,
|
||||
keep_top_k=2000))))
|
||||
angle_version = 'le90'
|
||||
test_cfg = mmengine.Config(
|
||||
test_cfg = Config(
|
||||
dict(
|
||||
nms_pre=2000,
|
||||
min_bbox_size=0,
|
||||
@ -489,7 +489,7 @@ def test_gv_ratio_roi_head__simple_test(backend_type: Backend):
|
||||
check_backend(backend_type)
|
||||
from mmrotate.models.roi_heads import GVRatioRoIHead
|
||||
output_names = ['dets', 'labels']
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
backend_config=dict(type=backend_type.value),
|
||||
onnx_config=dict(output_names=output_names, input_shape=None),
|
||||
@ -503,7 +503,7 @@ def test_gv_ratio_roi_head__simple_test(backend_type: Backend):
|
||||
keep_top_k=2000,
|
||||
max_output_boxes_per_class=1000))))
|
||||
angle_version = 'le90'
|
||||
test_cfg = mmengine.Config(
|
||||
test_cfg = Config(
|
||||
dict(
|
||||
nms_pre=2000,
|
||||
min_bbox_size=0,
|
||||
@ -616,7 +616,7 @@ def get_roi_trans_roi_head_model():
|
||||
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
|
||||
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
|
||||
]
|
||||
test_cfg = mmengine.Config(
|
||||
test_cfg = Config(
|
||||
dict(
|
||||
nms_pre=2000,
|
||||
min_bbox_size=0,
|
||||
@ -660,7 +660,7 @@ def test_simple_test_of_roi_trans_roi_head(backend_type: Backend):
|
||||
}
|
||||
|
||||
output_names = ['det_bboxes', 'det_labels']
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
backend_config=dict(type=backend_type.value),
|
||||
onnx_config=dict(output_names=output_names, input_shape=None),
|
||||
|
@ -2,10 +2,10 @@
|
||||
import os
|
||||
from tempfile import NamedTemporaryFile, TemporaryDirectory
|
||||
|
||||
import mmengine
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
from mmengine import Config
|
||||
from torch.utils.data import DataLoader
|
||||
from torch.utils.data.dataset import Dataset
|
||||
|
||||
@ -23,7 +23,7 @@ except ImportError:
|
||||
|
||||
model_cfg_path = 'tests/test_codebase/test_mmrotate/data/model.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
codebase_config=dict(
|
||||
|
@ -2,10 +2,10 @@
|
||||
import os.path as osp
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
import mmengine
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
from mmengine import Config
|
||||
|
||||
import mmdeploy.backend.onnxruntime as ort_apis
|
||||
from mmdeploy.codebase import import_codebase
|
||||
@ -37,7 +37,7 @@ class TestEnd2EndModel:
|
||||
'labels': torch.rand(1, 10)
|
||||
}
|
||||
cls.wrapper.set(outputs=cls.outputs)
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
{'onnx_config': {
|
||||
'output_names': ['dets', 'labels']
|
||||
}})
|
||||
@ -90,7 +90,7 @@ class TestEnd2EndModel:
|
||||
def test_build_rotated_detection_model():
|
||||
model_cfg_path = 'tests/test_codebase/test_mmrotate/data/model.py'
|
||||
model_cfg = load_config(model_cfg_path)[0]
|
||||
deploy_cfg = mmengine.Config(
|
||||
deploy_cfg = Config(
|
||||
dict(
|
||||
backend_config=dict(type='onnxruntime'),
|
||||
onnx_config=dict(output_names=['dets', 'labels']),
|
||||
|
Loading…
x
Reference in New Issue
Block a user