mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
Reformat multi-line logs and docstrings (#1489)
This commit is contained in:
parent
7b3c3bc223
commit
4046e13146
@ -30,7 +30,7 @@ def visualize_model(model_cfg: Union[str, mmcv.Config],
|
|||||||
>>> img = 'demo.jpg'
|
>>> img = 'demo.jpg'
|
||||||
>>> device = 'cpu'
|
>>> device = 'cpu'
|
||||||
>>> visualize_model(model_cfg, deploy_cfg, model, \
|
>>> visualize_model(model_cfg, deploy_cfg, model, \
|
||||||
img, device, show_result=True)
|
>>> img, device, show_result=True)
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
model_cfg (str | mmcv.Config): Model config file or Config object.
|
model_cfg (str | mmcv.Config): Model config file or Config object.
|
||||||
@ -59,8 +59,8 @@ def visualize_model(model_cfg: Union[str, mmcv.Config],
|
|||||||
|
|
||||||
if isinstance(model, (list, tuple)):
|
if isinstance(model, (list, tuple)):
|
||||||
assert len(model) > 0, 'Model should have at least one element.'
|
assert len(model) > 0, 'Model should have at least one element.'
|
||||||
assert all([isinstance(m, str) for m in model]), 'All elements in the \
|
assert all([isinstance(m, str) for m in model]), \
|
||||||
list should be str'
|
'All elements in the list should be str'
|
||||||
|
|
||||||
if backend == Backend.PYTORCH:
|
if backend == Backend.PYTORCH:
|
||||||
model = task_processor.init_pytorch_model(model[0])
|
model = task_processor.init_pytorch_model(model[0])
|
||||||
|
@ -54,9 +54,8 @@ class BaseWrapper(torch.nn.Module, metaclass=ABCMeta):
|
|||||||
"""Set the output names."""
|
"""Set the output names."""
|
||||||
self._output_names = value
|
self._output_names = value
|
||||||
|
|
||||||
def output_to_list(self, output_dict: Dict[str,
|
def output_to_list(
|
||||||
torch.Tensor]) -> \
|
self, output_dict: Dict[str, torch.Tensor]) -> List[torch.Tensor]:
|
||||||
List[torch.Tensor]:
|
|
||||||
"""Convert the output dict of forward() to a tensor list.
|
"""Convert the output dict of forward() to a tensor list.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -44,11 +44,11 @@ class ORTWrapper(BaseWrapper):
|
|||||||
logger = get_root_logger()
|
logger = get_root_logger()
|
||||||
if osp.exists(ort_custom_op_path):
|
if osp.exists(ort_custom_op_path):
|
||||||
session_options.register_custom_ops_library(ort_custom_op_path)
|
session_options.register_custom_ops_library(ort_custom_op_path)
|
||||||
logger.info(f'Successfully loaded onnxruntime custom ops from \
|
logger.info('Successfully loaded onnxruntime custom ops from '
|
||||||
{ort_custom_op_path}')
|
f'{ort_custom_op_path}')
|
||||||
else:
|
else:
|
||||||
logger.warning(f'The library of onnxruntime custom ops does \
|
logger.warning('The library of onnxruntime custom ops does '
|
||||||
not exist: {ort_custom_op_path}')
|
f'not exist: {ort_custom_op_path}')
|
||||||
device_id = parse_device_id(device)
|
device_id = parse_device_id(device)
|
||||||
providers = ['CPUExecutionProvider'] \
|
providers = ['CPUExecutionProvider'] \
|
||||||
if device == 'cpu' else \
|
if device == 'cpu' else \
|
||||||
|
@ -34,6 +34,6 @@ def load_tensorrt_plugin() -> bool:
|
|||||||
logger.info(f'Successfully loaded tensorrt plugins from {lib_path}')
|
logger.info(f'Successfully loaded tensorrt plugins from {lib_path}')
|
||||||
success = True
|
success = True
|
||||||
else:
|
else:
|
||||||
logger.warning(f'Could not load the library of tensorrt plugins. \
|
logger.warning(f'Could not load the library of tensorrt plugins.'
|
||||||
Because the file does not exist: {lib_path}')
|
f'Because the file does not exist: {lib_path}')
|
||||||
return success
|
return success
|
||||||
|
@ -68,8 +68,8 @@ def onnx2tensorrt(work_dir: str,
|
|||||||
if not isinstance(input_shapes, Dict):
|
if not isinstance(input_shapes, Dict):
|
||||||
input_shapes = dict(zip(input_names, input_shapes))
|
input_shapes = dict(zip(input_names, input_shapes))
|
||||||
|
|
||||||
assert device.startswith('cuda'), f'TensorRT requires cuda device, \
|
assert device.startswith('cuda'), 'TensorRT requires cuda device,' \
|
||||||
but given: {device}'
|
f'but given: {device}'
|
||||||
|
|
||||||
device_id = parse_device_id(device)
|
device_id = parse_device_id(device)
|
||||||
assert save_file.endswith(
|
assert save_file.endswith(
|
||||||
|
@ -84,8 +84,8 @@ class TRTWrapper(BaseWrapper):
|
|||||||
self.engine = load(engine)
|
self.engine = load(engine)
|
||||||
|
|
||||||
if not isinstance(self.engine, trt.ICudaEngine):
|
if not isinstance(self.engine, trt.ICudaEngine):
|
||||||
raise TypeError(f'`engine` should be str or trt.ICudaEngine, \
|
raise TypeError('`engine` should be str or trt.ICudaEngine,'
|
||||||
but given: {type(self.engine)}')
|
f'but given: {type(self.engine)}')
|
||||||
|
|
||||||
self._register_state_dict_hook(TRTWrapper.__on_state_dict)
|
self._register_state_dict_hook(TRTWrapper.__on_state_dict)
|
||||||
self.context = self.engine.create_execution_context()
|
self.context = self.engine.create_execution_context()
|
||||||
|
@ -300,8 +300,8 @@ class Classification(BaseTask):
|
|||||||
if 'topk' not in postprocess:
|
if 'topk' not in postprocess:
|
||||||
topk = (1, )
|
topk = (1, )
|
||||||
logger = get_root_logger()
|
logger = get_root_logger()
|
||||||
logger.warning('no topk in postprocess config, using default \
|
logger.warning('no topk in postprocess config, using default'
|
||||||
topk value.')
|
'topk value.')
|
||||||
else:
|
else:
|
||||||
topk = postprocess.topk
|
topk = postprocess.topk
|
||||||
postprocess.topk = max(topk)
|
postprocess.topk = max(topk)
|
||||||
|
@ -178,8 +178,8 @@ def get_classes_from_config(model_cfg: Union[str, mmcv.Config]):
|
|||||||
|
|
||||||
if class_names is None:
|
if class_names is None:
|
||||||
logger = get_root_logger()
|
logger = get_root_logger()
|
||||||
logger.warning(f'Use generated class names, because \
|
logger.warning(f'Use generated class names, because '
|
||||||
it failed to parse CLASSES from config: {data_cfg}')
|
f'it failed to parse CLASSES from config: {data_cfg}')
|
||||||
num_classes = model_cfg.model.head.num_classes
|
num_classes = model_cfg.model.head.num_classes
|
||||||
class_names = [str(i) for i in range(num_classes)]
|
class_names = [str(i) for i in range(num_classes)]
|
||||||
return class_names
|
return class_names
|
||||||
|
@ -583,8 +583,8 @@ class NCNNEnd2EndModel(End2EndModel):
|
|||||||
device: str, class_names: Sequence[str],
|
device: str, class_names: Sequence[str],
|
||||||
model_cfg: Union[str, mmcv.Config],
|
model_cfg: Union[str, mmcv.Config],
|
||||||
deploy_cfg: Union[str, mmcv.Config], **kwargs):
|
deploy_cfg: Union[str, mmcv.Config], **kwargs):
|
||||||
assert backend == Backend.NCNN, f'only supported ncnn, but give \
|
assert backend == Backend.NCNN, 'only supported ncnn, but give' \
|
||||||
{backend.value}'
|
f'{backend.value}'
|
||||||
|
|
||||||
super(NCNNEnd2EndModel,
|
super(NCNNEnd2EndModel,
|
||||||
self).__init__(backend, backend_files, device, class_names,
|
self).__init__(backend, backend_files, device, class_names,
|
||||||
@ -669,8 +669,8 @@ class RKNNModel(End2EndModel):
|
|||||||
device: str, class_names: Sequence[str],
|
device: str, class_names: Sequence[str],
|
||||||
model_cfg: Union[str, mmcv.Config],
|
model_cfg: Union[str, mmcv.Config],
|
||||||
deploy_cfg: Union[str, mmcv.Config], **kwargs):
|
deploy_cfg: Union[str, mmcv.Config], **kwargs):
|
||||||
assert backend == Backend.RKNN, f'only supported RKNN, but give \
|
assert backend == Backend.RKNN, 'only supported RKNN, but give' \
|
||||||
{backend.value}'
|
f'{backend.value}'
|
||||||
|
|
||||||
super(RKNNModel, self).__init__(backend, backend_files, device,
|
super(RKNNModel, self).__init__(backend, backend_files, device,
|
||||||
class_names, deploy_cfg, **kwargs)
|
class_names, deploy_cfg, **kwargs)
|
||||||
|
@ -41,8 +41,8 @@ def focus__forward__ncnn(ctx, self, x):
|
|||||||
x (Tensor): The calculated tensor with shape (N, 4*C, H//2, W//2).
|
x (Tensor): The calculated tensor with shape (N, 4*C, H//2, W//2).
|
||||||
"""
|
"""
|
||||||
batch_size, c, h, w = x.shape
|
batch_size, c, h, w = x.shape
|
||||||
assert h % 2 == 0 and w % 2 == 0, f'focus for yolox needs even feature\
|
assert h % 2 == 0 and w % 2 == 0, 'focus for yolox needs even feature' \
|
||||||
height and width, got {(h, w)}.'
|
f'height and width, got {(h, w)}.'
|
||||||
|
|
||||||
x = x.reshape(batch_size, c * h, 1, w)
|
x = x.reshape(batch_size, c * h, 1, w)
|
||||||
_b, _c, _h, _w = x.shape
|
_b, _c, _h, _w = x.shape
|
||||||
|
@ -356,8 +356,8 @@ def base_dense_head__get_bboxes__ncnn(ctx,
|
|||||||
"""
|
"""
|
||||||
assert len(cls_scores) == len(bbox_preds)
|
assert len(cls_scores) == len(bbox_preds)
|
||||||
deploy_cfg = ctx.cfg
|
deploy_cfg = ctx.cfg
|
||||||
assert not is_dynamic_shape(deploy_cfg), 'base_dense_head for ncnn\
|
assert not is_dynamic_shape(deploy_cfg), 'base_dense_head for ncnn' \
|
||||||
only supports static shape.'
|
'only supports static shape.'
|
||||||
|
|
||||||
if score_factors is None:
|
if score_factors is None:
|
||||||
# e.g. Retina, FreeAnchor, Foveabox, etc.
|
# e.g. Retina, FreeAnchor, Foveabox, etc.
|
||||||
@ -367,8 +367,8 @@ def base_dense_head__get_bboxes__ncnn(ctx,
|
|||||||
with_score_factors = True
|
with_score_factors = True
|
||||||
assert len(cls_scores) == len(score_factors)
|
assert len(cls_scores) == len(score_factors)
|
||||||
batch_size = cls_scores[0].shape[0]
|
batch_size = cls_scores[0].shape[0]
|
||||||
assert batch_size == 1, f'ncnn deployment requires batch size 1, \
|
assert batch_size == 1, 'ncnn deployment requires batch size 1,' \
|
||||||
got {batch_size}.'
|
f'got {batch_size}.'
|
||||||
|
|
||||||
num_levels = len(cls_scores)
|
num_levels = len(cls_scores)
|
||||||
if with_score_factors:
|
if with_score_factors:
|
||||||
@ -384,13 +384,15 @@ def base_dense_head__get_bboxes__ncnn(ctx,
|
|||||||
vars = torch.tensor([normalizer, normalizer, 1, 1],
|
vars = torch.tensor([normalizer, normalizer, 1, 1],
|
||||||
dtype=torch.float32)
|
dtype=torch.float32)
|
||||||
else:
|
else:
|
||||||
assert len(normalizer) == 4, f'normalizer of tblr must be 4,\
|
assert len(normalizer) == 4, 'normalizer of tblr must be 4,' \
|
||||||
got {len(normalizer)}'
|
f' got {len(normalizer)}'
|
||||||
|
|
||||||
assert (normalizer[0] == normalizer[1] and normalizer[2]
|
assert (
|
||||||
== normalizer[3]), 'normalizer between top \
|
normalizer[0] == normalizer[1]
|
||||||
and bottom, left and right must be the same value, or \
|
and normalizer[2] == normalizer[3]
|
||||||
we can not transform it to delta_xywh format.'
|
), 'normalizer between top and bottom,' \
|
||||||
|
'left and right must be the same value, or' \
|
||||||
|
'we can not transform it to delta_xywh format.'
|
||||||
|
|
||||||
vars = torch.tensor([normalizer[0], normalizer[2], 1, 1],
|
vars = torch.tensor([normalizer[0], normalizer[2], 1, 1],
|
||||||
dtype=torch.float32)
|
dtype=torch.float32)
|
||||||
|
@ -80,7 +80,7 @@ def yolov3_head__get_bboxes(ctx,
|
|||||||
pred_map = pred_map.permute(0, 2, 3,
|
pred_map = pred_map.permute(0, 2, 3,
|
||||||
1).reshape(batch_size, -1, self.num_attrib)
|
1).reshape(batch_size, -1, self.num_attrib)
|
||||||
# Inplace operation like
|
# Inplace operation like
|
||||||
# ```pred_map[..., :2] = \torch.sigmoid(pred_map[..., :2])```
|
# ```pred_map[..., :2] = torch.sigmoid(pred_map[..., :2])```
|
||||||
# would create constant tensor when exporting to onnx
|
# would create constant tensor when exporting to onnx
|
||||||
pred_map_conf = torch.sigmoid(pred_map[..., :2])
|
pred_map_conf = torch.sigmoid(pred_map[..., :2])
|
||||||
pred_map_rest = pred_map[..., 2:]
|
pred_map_rest = pred_map[..., 2:]
|
||||||
|
@ -188,8 +188,9 @@ def yolox_head__get_bboxes__ncnn(ctx,
|
|||||||
batch_mlvl_scores = torch.cat([dummy_cls_scores, cls_scores], dim=2)
|
batch_mlvl_scores = torch.cat([dummy_cls_scores, cls_scores], dim=2)
|
||||||
score_factor = torch.cat(flatten_objectness, dim=1).sigmoid()
|
score_factor = torch.cat(flatten_objectness, dim=1).sigmoid()
|
||||||
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
|
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
|
||||||
assert flatten_priors.shape[-1] == 4, f'yolox needs (B, N, 4) priors, got\
|
assert flatten_priors.shape[-1] == 4, \
|
||||||
(B, N, {flatten_priors.shape[-1]})'
|
'yolox needs (B, N, 4) priors, got ' \
|
||||||
|
f'(B, N, {flatten_priors.shape[-1]})'
|
||||||
prior_box_x1 = (flatten_priors[:, :, 0:1] - flatten_priors[:, :, 2:3] / 2)\
|
prior_box_x1 = (flatten_priors[:, :, 0:1] - flatten_priors[:, :, 2:3] / 2)\
|
||||||
/ img_width
|
/ img_width
|
||||||
prior_box_y1 = (flatten_priors[:, :, 1:2] - flatten_priors[:, :, 3:4] / 2)\
|
prior_box_y1 = (flatten_priors[:, :, 1:2] - flatten_priors[:, :, 3:4] / 2)\
|
||||||
|
@ -53,8 +53,8 @@ def process_model_config(model_cfg: mmcv.Config,
|
|||||||
test_pipeline[1].transforms[0].height = input_shape[1]
|
test_pipeline[1].transforms[0].height = input_shape[1]
|
||||||
test_pipeline[1].transforms[0].max_width = input_shape[0]
|
test_pipeline[1].transforms[0].max_width = input_shape[0]
|
||||||
else:
|
else:
|
||||||
raise ValueError(f'Transforms[0] should be ResizeOCR, but got\
|
raise ValueError('Transforms[0] should be ResizeOCR, but got'
|
||||||
{test_pipeline[1].transforms[0].type}')
|
f'{test_pipeline[1].transforms[0].type}')
|
||||||
else:
|
else:
|
||||||
test_pipeline[1].update(resize)
|
test_pipeline[1].update(resize)
|
||||||
model_cfg.data.test.pipeline = test_pipeline
|
model_cfg.data.test.pipeline = test_pipeline
|
||||||
|
@ -25,11 +25,11 @@ def gv_ratio_roi_head__simple_test_bboxes(ctx,
|
|||||||
Default: False.
|
Default: False.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
tuple[list[Tensor], list[Tensor]]: The first list contains \
|
tuple[list[Tensor], list[Tensor]]: The first list contains
|
||||||
the boxes of the corresponding image in a batch, each \
|
the boxes of the corresponding image in a batch, each
|
||||||
tensor has the shape (num_boxes, 6) and last dimension \
|
tensor has the shape (num_boxes, 6) and last dimension
|
||||||
6 represent (x, y, w, h, theta, score). Each Tensor \
|
6 represent (x, y, w, h, theta, score). Each Tensor
|
||||||
in the second list is the labels with shape (num_boxes, ). \
|
in the second list is the labels with shape (num_boxes, ).
|
||||||
The length of both lists should be equal to batch_size.
|
The length of both lists should be equal to batch_size.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -57,11 +57,11 @@ def oriented_standard_roi_head__simple_test_bboxes(ctx,
|
|||||||
Default: False.
|
Default: False.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
tuple[list[Tensor], list[Tensor]]: The first list contains \
|
tuple[list[Tensor], list[Tensor]]: The first list contains
|
||||||
the boxes of the corresponding image in a batch, each \
|
the boxes of the corresponding image in a batch, each
|
||||||
tensor has the shape (num_boxes, 6) and last dimension \
|
tensor has the shape (num_boxes, 6) and last dimension
|
||||||
6 represent (x, y, w, h, theta, score). Each Tensor \
|
6 represent (x, y, w, h, theta, score). Each Tensor
|
||||||
in the second list is the labels with shape (num_boxes, ). \
|
in the second list is the labels with shape (num_boxes, ).
|
||||||
The length of both lists should be equal to batch_size.
|
The length of both lists should be equal to batch_size.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -204,9 +204,9 @@ def mark(func_name: Optional[str] = None,
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
func_name (str): The name of the function where marks come from.
|
func_name (str): The name of the function where marks come from.
|
||||||
inputs (Sequence[str]): The input names of the marks. The final name \
|
inputs (Sequence[str]): The input names of the marks. The final name
|
||||||
might have suffix if inputs is list or dictionary.
|
might have suffix if inputs is list or dictionary.
|
||||||
outputs (Sequence[str]): The output names of the marks. The final \
|
outputs (Sequence[str]): The output names of the marks. The final
|
||||||
name might have suffix if outputs is list or dictionary.
|
name might have suffix if outputs is list or dictionary.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
@ -19,8 +19,7 @@ class SymbolicRewriter:
|
|||||||
recover after exiting the RewriteContext.
|
recover after exiting the RewriteContext.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> @SYMBOLIC_REWRITER.register_symbolic('squeeze', \
|
>>> @SYMBOLIC_REWRITER.register_symbolic('squeeze', is_pytorch=True)
|
||||||
>>> is_pytorch=True)
|
|
||||||
>>> def squeeze_default(ctx, g, self, dim=None):
|
>>> def squeeze_default(ctx, g, self, dim=None):
|
||||||
>>> if dim is None:
|
>>> if dim is None:
|
||||||
>>> dims = []
|
>>> dims = []
|
||||||
@ -49,9 +48,9 @@ class SymbolicRewriter:
|
|||||||
Args:
|
Args:
|
||||||
func_name (str): The function name/path to override the symbolic.
|
func_name (str): The function name/path to override the symbolic.
|
||||||
backend (str): The rewriter will be activated on which backend.
|
backend (str): The rewriter will be activated on which backend.
|
||||||
is_pytorch (bool): Enable this flag if func_name is the name of \
|
is_pytorch (bool): Enable this flag if func_name is the name of
|
||||||
a pytorch builtin function.
|
a pytorch builtin function.
|
||||||
arg_descriptors (Sequence[str]): The argument descriptors of the \
|
arg_descriptors (Sequence[str]): The argument descriptors of the
|
||||||
symbol.
|
symbol.
|
||||||
ir (IR): The rewriter will be activated on which IR.
|
ir (IR): The rewriter will be activated on which IR.
|
||||||
extra_checkers (Checker | List[Checker] | None): Other requirements
|
extra_checkers (Checker | List[Checker] | None): Other requirements
|
||||||
|
@ -584,8 +584,8 @@ def test_topk(backend,
|
|||||||
input = torch.rand(1, 8, 12, 17)
|
input = torch.rand(1, 8, 12, 17)
|
||||||
else:
|
else:
|
||||||
input = input_list[0]
|
input = input_list[0]
|
||||||
assert input.shape[0] == 1, (f'ncnn batch must be 1, \
|
assert input.shape[0] == 1, ('ncnn batch must be 1, '
|
||||||
but got {input.shape[0]}')
|
f'but got {input.shape[0]}')
|
||||||
|
|
||||||
def topk_function(inputs):
|
def topk_function(inputs):
|
||||||
return torch.Tensor.topk(inputs, k, dim, largest, sorted)
|
return torch.Tensor.topk(inputs, k, dim, largest, sorted)
|
||||||
@ -635,11 +635,11 @@ def test_shape(backend,
|
|||||||
else:
|
else:
|
||||||
input = input_list[0]
|
input = input_list[0]
|
||||||
assert input.dim() == dim + 1, 'input.dim() must equal to dim + 1'
|
assert input.dim() == dim + 1, 'input.dim() must equal to dim + 1'
|
||||||
assert tuple(input.shape) == orig_shape, 'input.shape must the \
|
assert tuple(input.shape) == orig_shape, ('input.shape must the '
|
||||||
same as orig_shape'
|
'same as orig_shape')
|
||||||
|
|
||||||
assert input.shape[0] == 1, (f'ncnn batch must be 1, \
|
assert input.shape[0] == 1, ('ncnn batch must be 1, '
|
||||||
but got {input.shape[0]}')
|
f'but got {input.shape[0]}')
|
||||||
|
|
||||||
shape_node = make_node('Shape', input_names, output_names)
|
shape_node = make_node('Shape', input_names, output_names)
|
||||||
assert len(input_names) == 1, 'length of input_names must be 1'
|
assert len(input_names) == 1, 'length of input_names must be 1'
|
||||||
@ -688,13 +688,13 @@ def test_constantofshape(backend,
|
|||||||
input = input_list[0]
|
input = input_list[0]
|
||||||
assert input.dim() == dim + 1, 'input.dim() must equal to dim + 1'
|
assert input.dim() == dim + 1, 'input.dim() must equal to dim + 1'
|
||||||
assert tuple(input.shape) == (n, c, h,
|
assert tuple(input.shape) == (n, c, h,
|
||||||
w)[-dim - 1:], 'input.shape must the \
|
w)[-dim - 1:], ('input.shape must the '
|
||||||
same as orig_shape'
|
'same as orig_shape')
|
||||||
|
|
||||||
assert input.shape[0] == 1, (f'ncnn input batch must be 1, \
|
assert input.shape[0] == 1, ('ncnn input batch must be 1, '
|
||||||
got {input.shape[0]}')
|
f'got {input.shape[0]}')
|
||||||
assert input[0][0] == 1, (f'ncnn output mat batch must be 1, \
|
assert input[0][0] == 1, ('ncnn output mat batch must be 1, '
|
||||||
got {input[0][0]}')
|
f'got {input[0][0]}')
|
||||||
|
|
||||||
constantofshape_node = make_node(
|
constantofshape_node = make_node(
|
||||||
'ConstantOfShape', input_names, output_names, value=float(val))
|
'ConstantOfShape', input_names, output_names, value=float(val))
|
||||||
@ -745,10 +745,10 @@ def test_gather(backend,
|
|||||||
else:
|
else:
|
||||||
data = input_list[0]
|
data = input_list[0]
|
||||||
indice = input_list[1]
|
indice = input_list[1]
|
||||||
assert data.shape[0] == 1, (f'ncnn batch must be 1, \
|
assert data.shape[0] == 1, ('ncnn batch must be 1, '
|
||||||
but got {data.shape[0]}')
|
f'but got {data.shape[0]}')
|
||||||
assert indice.shape[0] == 1, (f'ncnn batch must be 1, \
|
assert indice.shape[0] == 1, ('ncnn batch must be 1, '
|
||||||
but got {indice.shape[0]}')
|
f'but got {indice.shape[0]}')
|
||||||
|
|
||||||
gather_node = make_node('Gather', input_names, output_names, axis=axis + 1)
|
gather_node = make_node('Gather', input_names, output_names, axis=axis + 1)
|
||||||
gather_graph = make_graph([gather_node], 'gather_graph', [
|
gather_graph = make_graph([gather_node], 'gather_graph', [
|
||||||
@ -766,8 +766,8 @@ def test_gather(backend,
|
|||||||
import importlib
|
import importlib
|
||||||
|
|
||||||
import onnxruntime
|
import onnxruntime
|
||||||
assert importlib.util.find_spec('onnxruntime') is not None, 'onnxruntime \
|
assert importlib.util.find_spec('onnxruntime') is not None, \
|
||||||
not installed.'
|
'onnxruntime not installed.'
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
session = onnxruntime.InferenceSession(gather_model.SerializeToString())
|
session = onnxruntime.InferenceSession(gather_model.SerializeToString())
|
||||||
@ -795,11 +795,12 @@ def test_tensorslice(backend, dim, input_list=None, save_dir=None):
|
|||||||
input = torch.rand((8, 12, 17)[-dim:]).unsqueeze(0)
|
input = torch.rand((8, 12, 17)[-dim:]).unsqueeze(0)
|
||||||
else:
|
else:
|
||||||
input = input_list[0]
|
input = input_list[0]
|
||||||
assert input.dim() == dim + 1, f'input.dim() must equal to \
|
assert input.dim() == dim + 1, ('input.dim() must equal to '
|
||||||
dim + 1, expected: {dim + 1}, got: {input.dim()}'
|
f'dim + 1, expected: {dim + 1}, '
|
||||||
|
f'got: {input.dim()}')
|
||||||
|
|
||||||
assert input.shape[0] == 1, (f'ncnn batch must be 1, \
|
assert input.shape[0] == 1, ('ncnn batch must be 1, '
|
||||||
but got {input.shape[0]}')
|
f'but got {input.shape[0]}')
|
||||||
|
|
||||||
def tensorslice_function(inputs):
|
def tensorslice_function(inputs):
|
||||||
if dim == 1:
|
if dim == 1:
|
||||||
@ -835,10 +836,10 @@ def test_expand(backend,
|
|||||||
else:
|
else:
|
||||||
input = input_list[0]
|
input = input_list[0]
|
||||||
target = input_list[1]
|
target = input_list[1]
|
||||||
assert input.shape[0] == 1, (f'ncnn batch must be 1, \
|
assert input.shape[0] == 1, (f'ncnn batch must be 1, '
|
||||||
but not {input.shape[0]}')
|
f'but not {input.shape[0]}')
|
||||||
assert target.shape[0] == 1, (f'ncnn batch must be 1, \
|
assert target.shape[0] == 1, (f'ncnn batch must be 1, '
|
||||||
but not {target.shape[0]}')
|
f'but not {target.shape[0]}')
|
||||||
|
|
||||||
def expand_function(input, target):
|
def expand_function(input, target):
|
||||||
return input.expand_as(target)
|
return input.expand_as(target)
|
||||||
|
@ -34,8 +34,8 @@ def parse_args():
|
|||||||
help='the dir to save logs and models')
|
help='the dir to save logs and models')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--calib-dataset-cfg',
|
'--calib-dataset-cfg',
|
||||||
help='dataset config path used to calibrate in int8 mode. If not \
|
help=('dataset config path used to calibrate in int8 mode. If not '
|
||||||
specified, it will use "val" dataset in model config instead.',
|
'specified, it will use "val" dataset in model config instead.'),
|
||||||
default=None)
|
default=None)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--device', help='device used for conversion', default='cpu')
|
'--device', help='device used for conversion', default='cpu')
|
||||||
@ -198,9 +198,9 @@ def main():
|
|||||||
assert len(model_params) == len(ir_files)
|
assert len(model_params) == len(ir_files)
|
||||||
|
|
||||||
from mmdeploy.apis.tensorrt import is_available as trt_is_available
|
from mmdeploy.apis.tensorrt import is_available as trt_is_available
|
||||||
assert trt_is_available(
|
assert trt_is_available(), (
|
||||||
), 'TensorRT is not available,' \
|
'TensorRT is not available,'
|
||||||
+ ' please install TensorRT and build TensorRT custom ops first.'
|
' please install TensorRT and build TensorRT custom ops first.')
|
||||||
|
|
||||||
from mmdeploy.apis.tensorrt import onnx2tensorrt
|
from mmdeploy.apis.tensorrt import onnx2tensorrt
|
||||||
PIPELINE_MANAGER.enable_multiprocess(True, [onnx2tensorrt])
|
PIPELINE_MANAGER.enable_multiprocess(True, [onnx2tensorrt])
|
||||||
@ -229,9 +229,9 @@ def main():
|
|||||||
from mmdeploy.apis.ncnn import is_available as is_available_ncnn
|
from mmdeploy.apis.ncnn import is_available as is_available_ncnn
|
||||||
|
|
||||||
if not is_available_ncnn():
|
if not is_available_ncnn():
|
||||||
logger.error('ncnn support is not available, please make sure \
|
logger.error('ncnn support is not available, please make sure:\n'
|
||||||
1) `mmdeploy_onnx2ncnn` existed in `PATH` \
|
'1) `mmdeploy_onnx2ncnn` existed in `PATH`\n'
|
||||||
2) python import ncnn success')
|
'2) python import ncnn success')
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
import mmdeploy.apis.ncnn as ncnn_api
|
import mmdeploy.apis.ncnn as ncnn_api
|
||||||
@ -279,9 +279,10 @@ def main():
|
|||||||
from mmdeploy.apis.snpe import is_available as is_available
|
from mmdeploy.apis.snpe import is_available as is_available
|
||||||
|
|
||||||
if not is_available():
|
if not is_available():
|
||||||
logger.error('snpe support is not available, please check \
|
logger.error('snpe support is not available, please check\n'
|
||||||
1) `snpe-onnx-to-dlc` existed in `PATH` 2) snpe only support \
|
'1) `snpe-onnx-to-dlc` existed in `PATH`\n'
|
||||||
ubuntu18.04')
|
'2) snpe only support\n'
|
||||||
|
'ubuntu18.04')
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
import mmdeploy.apis.snpe as snpe_api
|
import mmdeploy.apis.snpe as snpe_api
|
||||||
|
Loading…
x
Reference in New Issue
Block a user