From acf1dc5d8844b31ddd6bb361b10d86fb512cc81e Mon Sep 17 00:00:00 2001 From: "q.yao" Date: Wed, 17 Nov 2021 14:20:29 +0800 Subject: [PATCH] [Enhancement] Update function name and docstring in mmdeploy.pytorch (#191) * Update function name and docstring in mmdeploy.python * remove in docstring --- mmdeploy/pytorch/functions/__init__.py | 20 ++++++++-------- mmdeploy/pytorch/functions/getattribute.py | 8 +++++-- mmdeploy/pytorch/functions/group_norm.py | 8 +++++-- mmdeploy/pytorch/functions/interpolate.py | 26 ++++++++++++++------- mmdeploy/pytorch/functions/linear.py | 13 +++++++---- mmdeploy/pytorch/functions/repeat.py | 13 +++++++++-- mmdeploy/pytorch/functions/size.py | 8 +++++-- mmdeploy/pytorch/functions/topk.py | 27 ++++++++++++++++++---- mmdeploy/pytorch/ops/__init__.py | 17 +++++++------- mmdeploy/pytorch/ops/adaptive_avg_pool.py | 22 +++++++++++++----- mmdeploy/pytorch/ops/grid_sampler.py | 14 ++++++++--- mmdeploy/pytorch/ops/instance_norm.py | 3 ++- mmdeploy/pytorch/ops/squeeze.py | 8 +++++-- 13 files changed, 133 insertions(+), 54 deletions(-) diff --git a/mmdeploy/pytorch/functions/__init__.py b/mmdeploy/pytorch/functions/__init__.py index 33990196e..c21756dbc 100644 --- a/mmdeploy/pytorch/functions/__init__.py +++ b/mmdeploy/pytorch/functions/__init__.py @@ -1,13 +1,13 @@ -from .getattribute import getattribute_static -from .group_norm import group_norm_ncnn -from .interpolate import interpolate_static -from .linear import linear_ncnn -from .repeat import repeat_static -from .size import size_of_tensor_static -from .topk import topk_dynamic, topk_static +from .getattribute import tensor__getattribute__ncnn +from .group_norm import group_norm__ncnn +from .interpolate import interpolate__ncnn +from .linear import linear__ncnn +from .repeat import tensor__repeat__tensorrt +from .size import tensor__size__ncnn +from .topk import topk__dynamic, topk__tensorrt __all__ = [ - 'getattribute_static', 'group_norm_ncnn', 'interpolate_static', - 'linear_ncnn', 'repeat_static', 'size_of_tensor_static', 'topk_static', - 'topk_dynamic' + 'tensor__getattribute__ncnn', 'group_norm__ncnn', 'interpolate__ncnn', + 'linear__ncnn', 'tensor__repeat__tensorrt', 'tensor__size__ncnn', + 'topk__dynamic', 'topk__tensorrt' ] diff --git a/mmdeploy/pytorch/functions/getattribute.py b/mmdeploy/pytorch/functions/getattribute.py index 5f8e5cef4..d975c9f23 100644 --- a/mmdeploy/pytorch/functions/getattribute.py +++ b/mmdeploy/pytorch/functions/getattribute.py @@ -5,8 +5,12 @@ from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='torch.Tensor.__getattribute__', backend='ncnn') -def getattribute_static(ctx, self, name): - """Rewrite `__getattribute__` for NCNN backend.""" +def tensor__getattribute__ncnn(ctx, self: torch.Tensor, name: str): + """Rewrite `__getattribute__` of `torch.Tensor` for NCNN backend. + + Shape node is not supported by ncnn. This function transform dynamic shape + to constant shape. + """ ret = ctx.origin_func(self, name) if name == 'shape': diff --git a/mmdeploy/pytorch/functions/group_norm.py b/mmdeploy/pytorch/functions/group_norm.py index 5e5c9be15..536210a6e 100644 --- a/mmdeploy/pytorch/functions/group_norm.py +++ b/mmdeploy/pytorch/functions/group_norm.py @@ -7,7 +7,7 @@ from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='torch.nn.functional.group_norm', backend='ncnn') -def group_norm_ncnn( +def group_norm__ncnn( ctx, input: torch.Tensor, num_groups: int, @@ -15,7 +15,11 @@ def group_norm_ncnn( bias: Union[torch.Tensor, torch.NoneType] = None, eps: float = 1e-05, ) -> torch.Tensor: - """Rewrite `group_norm` for NCNN backend.""" + """Rewrite `group_norm` for NCNN backend. + + InstanceNorm in ncnn require input with shape [C, H, W]. So we have to + reshape the input tensor before it. + """ input_shape = input.shape batch_size = input_shape[0] # We cannot use input.reshape(batch_size, num_groups, -1, 1) diff --git a/mmdeploy/pytorch/functions/interpolate.py b/mmdeploy/pytorch/functions/interpolate.py index 716873fcc..53627753c 100644 --- a/mmdeploy/pytorch/functions/interpolate.py +++ b/mmdeploy/pytorch/functions/interpolate.py @@ -1,16 +1,26 @@ +from typing import Optional, Tuple, Union + +import torch + from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='torch.nn.functional.interpolate', backend='ncnn') -def interpolate_static(ctx, - input, - size=None, - scale_factor=None, - mode='nearest', - align_corners=None, - recompute_scale_factor=None): - """Rewrite `interpolate` for NCNN backend.""" +def interpolate__ncnn(ctx, + input: torch.Tensor, + size: Optional[Union[int, Tuple[int], Tuple[int, int], + Tuple[int, int, int]]] = None, + scale_factor: Optional[Union[float, + Tuple[float]]] = None, + mode: str = 'nearest', + align_corners: Optional[bool] = None, + recompute_scale_factor: Optional[bool] = None): + """Rewrite `interpolate` for NCNN backend. + + NCNN require `size` should be constant in ONNX Node. We use `scale_factor` + instead of `size` to avoid dynamic size. + """ input_size = input.shape if scale_factor is None: diff --git a/mmdeploy/pytorch/functions/linear.py b/mmdeploy/pytorch/functions/linear.py index 4e6c423ba..c713c45f3 100644 --- a/mmdeploy/pytorch/functions/linear.py +++ b/mmdeploy/pytorch/functions/linear.py @@ -1,4 +1,4 @@ -from typing import Union +from typing import Optional, Union import torch @@ -7,13 +7,18 @@ from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='torch.nn.functional.linear', backend='ncnn') -def linear_ncnn( +def linear__ncnn( ctx, input: torch.Tensor, weight: torch.Tensor, - bias: Union[torch.Tensor, torch.NoneType] = None, + bias: Optional[Union[torch.Tensor, torch.NoneType]] = None, ): - """Rewrite `linear` for NCNN backend.""" + """Rewrite `linear` for NCNN backend. + + The broadcast rules are different between ncnn and PyTorch. This function + add extra reshape and transpose to support linear operation of different + input shape. + """ origin_func = ctx.origin_func diff --git a/mmdeploy/pytorch/functions/repeat.py b/mmdeploy/pytorch/functions/repeat.py index 58e9f55d4..4e9e0d975 100644 --- a/mmdeploy/pytorch/functions/repeat.py +++ b/mmdeploy/pytorch/functions/repeat.py @@ -1,10 +1,19 @@ +from typing import Sequence, Union + +import torch + from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='torch.Tensor.repeat', backend='tensorrt') -def repeat_static(ctx, input, *size): - """Rewrite `repeat` for NCNN backend.""" +def tensor__repeat__tensorrt(ctx, input: torch.Tensor, + *size: Union[torch.Size, Sequence[int]]): + """Rewrite `repeat` for TensorRT backend. + + Some layers in TensorRT can not be applied on batch axis. add extra axis + before operation and remove it afterward. + """ origin_func = ctx.origin_func if input.dim() == 1 and len(size) == 1: diff --git a/mmdeploy/pytorch/functions/size.py b/mmdeploy/pytorch/functions/size.py index 601611762..5df9a877e 100644 --- a/mmdeploy/pytorch/functions/size.py +++ b/mmdeploy/pytorch/functions/size.py @@ -5,8 +5,12 @@ from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='torch.Tensor.size', backend='ncnn') -def size_of_tensor_static(ctx, self, *args): - """Rewrite `size` for NCNN backend.""" +def tensor__size__ncnn(ctx, self, *args): + """Rewrite `size` for NCNN backend. + + ONNX Shape node is not supported in ncnn. This function return integal + instead of Torch.Size to avoid ONNX Shape node. + """ ret = ctx.origin_func(self, *args) if isinstance(ret, torch.Tensor): diff --git a/mmdeploy/pytorch/functions/topk.py b/mmdeploy/pytorch/functions/topk.py index 4f469b0e6..2a6cb87fb 100644 --- a/mmdeploy/pytorch/functions/topk.py +++ b/mmdeploy/pytorch/functions/topk.py @@ -1,3 +1,5 @@ +from typing import Optional + import torch from mmdeploy.core import FUNCTION_REWRITER @@ -6,8 +8,16 @@ from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter(func_name='torch.topk', backend='default') @FUNCTION_REWRITER.register_rewriter( func_name='torch.Tensor.topk', backend='default') -def topk_dynamic(ctx, input, k, dim=None, largest=True, sorted=True): - """Rewrite `topk` for default backend.""" +def topk__dynamic(ctx, + input: torch.Tensor, + k: int, + dim: Optional[int] = None, + largest: bool = True, + sorted: bool = True): + """Rewrite `topk` for default backend. + + Cast k to tensor and makesure k is smaller than input.shape[dim]. + """ if dim is None: dim = int(input.ndim - 1) @@ -25,8 +35,17 @@ def topk_dynamic(ctx, input, k, dim=None, largest=True, sorted=True): func_name='torch.topk', backend='tensorrt') @FUNCTION_REWRITER.register_rewriter( func_name='torch.Tensor.topk', backend='tensorrt') -def topk_static(ctx, input, k, dim=None, largest=True, sorted=True): - """Rewrite `topk` for TensorRT backend.""" +def topk__tensorrt(ctx, + input: torch.Tensor, + k: int, + dim: Optional[int] = None, + largest: bool = True, + sorted: bool = True): + """Rewrite `topk` for TensorRT backend. + + TensorRT does not support topk with dynamic k. This function cast k to + constant integer. + """ if dim is None: dim = int(input.ndim - 1) diff --git a/mmdeploy/pytorch/ops/__init__.py b/mmdeploy/pytorch/ops/__init__.py index f713d5e62..a588e4087 100644 --- a/mmdeploy/pytorch/ops/__init__.py +++ b/mmdeploy/pytorch/ops/__init__.py @@ -1,11 +1,12 @@ -from .adaptive_avg_pool import (adaptive_avg_pool1d_op, adaptive_avg_pool2d_op, - adaptive_avg_pool3d_op) -from .grid_sampler import grid_sampler_default -from .instance_norm import instance_norm_trt -from .squeeze import squeeze_default +from .adaptive_avg_pool import (adaptive_avg_pool1d__default, + adaptive_avg_pool2d__default, + adaptive_avg_pool3d__default) +from .grid_sampler import grid_sampler__default +from .instance_norm import instance_norm__tensorrt +from .squeeze import squeeze__default __all__ = [ - 'adaptive_avg_pool1d_op', 'adaptive_avg_pool2d_op', - 'adaptive_avg_pool3d_op', 'grid_sampler_default', 'instance_norm_trt', - 'squeeze_default' + 'adaptive_avg_pool1d__default', 'adaptive_avg_pool2d__default', + 'adaptive_avg_pool3d__default', 'grid_sampler__default', + 'instance_norm__tensorrt', 'squeeze__default' ] diff --git a/mmdeploy/pytorch/ops/adaptive_avg_pool.py b/mmdeploy/pytorch/ops/adaptive_avg_pool.py index 5f8df752c..9a27aaeb9 100644 --- a/mmdeploy/pytorch/ops/adaptive_avg_pool.py +++ b/mmdeploy/pytorch/ops/adaptive_avg_pool.py @@ -1,3 +1,4 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # Modified from: # https://github.com/pytorch/pytorch/blob/9ade03959392e5a90b74261012de1d806cab2253/torch/onnx/symbolic_opset9.py @@ -53,18 +54,27 @@ adaptive_avg_pool3d = _adaptive_pool('adaptive_avg_pool3d', 'AveragePool', @SYMBOLIC_REWRITER.register_symbolic('adaptive_avg_pool1d', is_pytorch=True) -def adaptive_avg_pool1d_op(ctx, *args): - """Register default symbolic function for `adaptive_avg_pool1d`.""" +def adaptive_avg_pool1d__default(ctx, *args): + """Register default symbolic function for `adaptive_avg_pool1d`. + + Align symbolic of adaptive_pool between different torch version. + """ return adaptive_avg_pool1d(*args) @SYMBOLIC_REWRITER.register_symbolic('adaptive_avg_pool2d', is_pytorch=True) -def adaptive_avg_pool2d_op(ctx, *args): - """Register default symbolic function for `adaptive_avg_pool2d`.""" +def adaptive_avg_pool2d__default(ctx, *args): + """Register default symbolic function for `adaptive_avg_pool2d`. + + Align symbolic of adaptive_pool between different torch version. + """ return adaptive_avg_pool2d(*args) @SYMBOLIC_REWRITER.register_symbolic('adaptive_avg_pool3d', is_pytorch=True) -def adaptive_avg_pool3d_op(ctx, *args): - """Register default symbolic function for `adaptive_avg_pool3d`.""" +def adaptive_avg_pool3d__default(ctx, *args): + """Register default symbolic function for `adaptive_avg_pool3d`. + + Align symbolic of adaptive_pool between different torch version. + """ return adaptive_avg_pool3d(*args) diff --git a/mmdeploy/pytorch/ops/grid_sampler.py b/mmdeploy/pytorch/ops/grid_sampler.py index 2a665b5a1..aa0dca829 100644 --- a/mmdeploy/pytorch/ops/grid_sampler.py +++ b/mmdeploy/pytorch/ops/grid_sampler.py @@ -10,7 +10,12 @@ def grid_sampler(g, interpolation_mode, padding_mode, align_corners=False): - """Symbolic function for `grid_sampler`.""" + """Symbolic function for `grid_sampler`. + + PyTorch does not support export grid_sampler to ONNX by default. We add the + support here. `grid_sampler` will be exported as ONNX node + 'mmcv::grid_sampler' + """ return g.op( 'mmcv::grid_sampler', input, @@ -21,6 +26,9 @@ def grid_sampler(g, @SYMBOLIC_REWRITER.register_symbolic('grid_sampler', is_pytorch=True) -def grid_sampler_default(ctx, *args): - """Register default symbolic function for `grid_sampler`.""" +def grid_sampler__default(ctx, *args): + """Register default symbolic function for `grid_sampler`. + + Add support to grid_sample to ONNX. + """ return grid_sampler(*args) diff --git a/mmdeploy/pytorch/ops/instance_norm.py b/mmdeploy/pytorch/ops/instance_norm.py index 116f36da4..bd2c25eb9 100644 --- a/mmdeploy/pytorch/ops/instance_norm.py +++ b/mmdeploy/pytorch/ops/instance_norm.py @@ -1,3 +1,4 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # Modified from: # https://github.com/pytorch/pytorch/blob/9ade03959392e5a90b74261012de1d806cab2253/torch/onnx/symbolic_opset9.py @@ -63,7 +64,7 @@ def instance_norm(g, input, num_groups, weight, bias, eps, cudnn_enabled): @SYMBOLIC_REWRITER.register_symbolic( 'group_norm', backend='tensorrt', is_pytorch=True) -def instance_norm_trt(ctx, *args): +def instance_norm__tensorrt(ctx, *args): """Register symbolic function for TensorRT backend. Notes: diff --git a/mmdeploy/pytorch/ops/squeeze.py b/mmdeploy/pytorch/ops/squeeze.py index a066472b5..4c28ac412 100644 --- a/mmdeploy/pytorch/ops/squeeze.py +++ b/mmdeploy/pytorch/ops/squeeze.py @@ -4,8 +4,12 @@ from mmdeploy.core import SYMBOLIC_REWRITER @SYMBOLIC_REWRITER.register_symbolic('squeeze', is_pytorch=True) -def squeeze_default(ctx, g, self, dim=None): - """Register default symbolic function for `squeeze`.""" +def squeeze__default(ctx, g, self, dim=None): + """Register default symbolic function for `squeeze`. + + squeeze might be exported with IF node in ONNX, which is not supported in + lots of backend. + """ if dim is None: dims = [] for i, size in enumerate(self.type().sizes()):