[Fix] Fix ci ()

* [Fix] Fix pre-commit-hook for python3.12

* [Fix] clang lint

* use ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION

* Add todo
pull/3122/merge
Mashiro 2024-11-04 12:00:16 +08:00 committed by GitHub
parent c46684c30f
commit 139325726b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
46 changed files with 224 additions and 211 deletions

View File

@ -11,10 +11,10 @@ jobs:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.7
- name: Set up Python 3.10.15
uses: actions/setup-python@v2
with:
python-version: 3.7
python-version: '3.10.15'
- name: Install pre-commit hook
run: |
pip install pre-commit
@ -22,7 +22,7 @@ jobs:
- name: Linting
run: pre-commit run --all-files
- name: Format c/cuda codes with clang-format
uses: DoozyX/clang-format-lint-action@v0.11
uses: DoozyX/clang-format-lint-action@v0.18
with:
source: mmcv/ops/csrc
extensions: h,c,cpp,hpp,cu,cuh

View File

@ -1,5 +1,8 @@
name: pr_stage_test
env:
ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true
on:
pull_request:
paths-ignore:

View File

@ -1,7 +1,11 @@
exclude: ^tests/data/
repos:
- repo: https://gitee.com/openmmlab/mirrors-flake8
rev: 5.0.4
- repo: https://github.com/pre-commit/pre-commit
rev: v4.0.0
hooks:
- id: validate_manifest
- repo: https://github.com/PyCQA/flake8
rev: 7.1.1
hooks:
- id: flake8
- repo: https://gitee.com/openmmlab/mirrors-isort
@ -13,7 +17,7 @@ repos:
hooks:
- id: yapf
- repo: https://gitee.com/openmmlab/mirrors-pre-commit-hooks
rev: v4.3.0
rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: check-yaml
@ -39,6 +43,7 @@ repos:
- mdformat_frontmatter
- linkify-it-py
- repo: https://gitee.com/openmmlab/mirrors-docformatter
# TODO:https://github.com/PyCQA/docformatter/issues/289
rev: v1.3.1
hooks:
- id: docformatter
@ -53,8 +58,8 @@ repos:
hooks:
- id: check-copyright
args: ["mmcv", "tests", "--excludes", "mmcv/ops"]
- repo: https://gitee.com/openmmlab/mirrors-mypy
rev: v0.812
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.2.0
hooks:
- id: mypy
exclude: |-
@ -62,6 +67,7 @@ repos:
^test
| ^docs
)
additional_dependencies: ["types-setuptools", "types-requests"]
# - repo: local
# hooks:
# - id: clang-format

View File

@ -1,7 +1,11 @@
exclude: ^tests/data/
repos:
- repo: https://github.com/pre-commit/pre-commit
rev: v4.0.0
hooks:
- id: validate_manifest
- repo: https://github.com/PyCQA/flake8
rev: 5.0.4
rev: 7.1.1
hooks:
- id: flake8
- repo: https://github.com/PyCQA/isort
@ -13,7 +17,7 @@ repos:
hooks:
- id: yapf
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: check-yaml
@ -39,7 +43,7 @@ repos:
- mdformat_frontmatter
- linkify-it-py
- repo: https://github.com/myint/docformatter
rev: v1.3.1
rev: 06907d0
hooks:
- id: docformatter
args: ["--in-place", "--wrap-descriptions", "79"]
@ -54,7 +58,7 @@ repos:
- id: check-copyright
args: ["mmcv", "tests", "--excludes", "mmcv/ops"]
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v0.812
rev: v1.2.0
hooks:
- id: mypy
exclude: |-
@ -62,6 +66,7 @@ repos:
^test
| ^docs
)
additional_dependencies: ["types-setuptools", "types-requests"]
# - repo: local
# hooks:
# - id: clang-format

View File

@ -13,7 +13,8 @@ def drop_path(x: torch.Tensor,
residual blocks).
We follow the implementation
https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501
https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py
# noqa: E501
"""
if drop_prob == 0. or not training:
return x

View File

@ -71,7 +71,7 @@ class RFSearchHook(Hook):
self.by_epoch = by_epoch
def init_model(self, model: nn.Module):
"""init model with search ability.
"""Init model with search ability.
Args:
model (nn.Module): pytorch model
@ -132,7 +132,7 @@ class RFSearchHook(Hook):
)
def estimate_and_expand(self, model: nn.Module) -> None:
"""estimate and search for RFConvOp.
"""Estimate and search for RFConvOp.
Args:
model (nn.Module): pytorch model
@ -146,7 +146,7 @@ class RFSearchHook(Hook):
model: nn.Module,
search_op: str = 'Conv2d',
prefix: str = '') -> None:
"""wrap model to support searchable conv op.
"""Wrap model to support searchable conv op.
Args:
model (nn.Module): pytorch model
@ -187,7 +187,7 @@ class RFSearchHook(Hook):
search_op: str = 'Conv2d',
init_rates: Optional[int] = None,
prefix: str = '') -> None:
"""set model based on config.
"""Set model based on config.
Args:
model (nn.Module): pytorch model

View File

@ -4,7 +4,7 @@ import numpy as np
def write_to_json(config: dict, filename: str):
"""save config to json file.
"""Save config to json file.
Args:
config (dict): Config to be saved.
@ -16,7 +16,7 @@ def write_to_json(config: dict, filename: str):
def expand_rates(dilation: tuple, config: dict) -> list:
"""expand dilation rate according to config.
"""Expand dilation rate according to config.
Args:
dilation (int): _description_

View File

@ -16,9 +16,9 @@ class ActiveRotatedFilterFunction(Function):
"""Encoding the orientation information and generating orientation-
sensitive features.
The details are described in the paper `Align Deep Features for Oriented
Object Detection <https://arxiv.org/abs/2008.09397>_`.
"""
The details are described in the paper
`Align Deep Features for Oriented Object Detection <https://arxiv.org/abs/2008.09397>_`.
""" # noqa: E501
@staticmethod
def forward(ctx, input: torch.Tensor,

View File

@ -51,7 +51,7 @@ class CrissCrossAttention(nn.Module):
self.in_channels = in_channels
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""forward function of Criss-Cross Attention.
"""Forward function of Criss-Cross Attention.
Args:
x (torch.Tensor): Input feature with the shape of

View File

@ -16,7 +16,8 @@ class ChamferDistanceFunction(Function):
"""This is an implementation of the 2D Chamfer Distance.
It has been used in the paper `Oriented RepPoints for Aerial Object
Detection (CVPR 2022) <https://arxiv.org/abs/2105.11111>_`.
Detection (CVPR 2022)
<https://arxiv.org/abs/2105.11111>_`.
"""
@staticmethod

View File

@ -116,7 +116,7 @@ class CorrelationFunction(Function):
class Correlation(nn.Module):
r"""Correlation operator
r"""Correlation operator.
This correlation operator works for optical flow correlation computation.

View File

@ -222,9 +222,9 @@ __global__ void bezier_align_backward_cuda_kernel(
atomicAdd(offset_bottom_diff + y_high * width + x_high,
static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // BezierAlignBackward
#endif // BEZIER_ALIGN_CUDA_KERNEL_CUH

View File

@ -234,9 +234,9 @@ __global__ void riroi_align_rotated_backward_cuda_kernel(
g4 * l_var);
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RiRoIAlignBackward
#endif // RIROI_ALIGN_ROTATED_CUDA_KERNEL_CUH

View File

@ -194,9 +194,9 @@ __global__ void roi_align_rotated_backward_cuda_kernel(
atomicAdd(offset_bottom_diff + y_high * width + x_low, g3);
atomicAdd(offset_bottom_diff + y_high * width + x_high, g4);
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
#endif // ROI_ALIGN_ROTATED_CUDA_KERNEL_CUH

View File

@ -33,7 +33,7 @@
#define PAD_DOWN(x, y) (((x) / (y)) * (y))
#endif
#define CEIL_ALIGN(x, y) (((x) + (y)-1) / (y) * (y))
#define CEIL_ALIGN(x, y) (((x) + (y) - 1) / (y) * (y))
template <typename scalar_t>
__mlu_func__ inline scalar_t min(scalar_t a, scalar_t b) {

View File

@ -21,9 +21,9 @@
#define PAD_DOWN(x, y) (((x) / (y)) * (y))
#define CEIL_DIV(x, y) (((x) + (y)-1) / (y))
#define CEIL_DIV(x, y) (((x) + (y) - 1) / (y))
#define CEIL_ALIGN(x, y) (((x) + (y)-1) / (y) * (y))
#define CEIL_ALIGN(x, y) (((x) + (y) - 1) / (y) * (y))
inline int32_t getJobLimitCapability() {
CNcontext drv_ctx;

View File

@ -468,7 +468,7 @@ void ReleaseConvertTypes(Tuple &t) {
}
template <typename... Ts>
constexpr auto ConvertTypes(Ts &... args) {
constexpr auto ConvertTypes(Ts &...args) {
return std::make_tuple(ConvertType(args)...);
}
@ -506,7 +506,7 @@ void AddParamToBuf(const string &);
void AddParamToBuf();
template <typename T, typename... Args>
void AddParamToBuf(const T &arg, Args &... args) {
void AddParamToBuf(const T &arg, Args &...args) {
AddParamToBuf(arg);
AddParamToBuf(args...);
}

View File

@ -40,7 +40,7 @@ void assigner(TT &src, std::vector<int> counter, std::vector<scalar_t> &arg) {
template <int Idx, class TT, class scalar_t, class... TArgs>
void assigner(TT &src, std::vector<int> counter, std::vector<scalar_t> &arg,
std::vector<TArgs> &... args) {
std::vector<TArgs> &...args) {
std::get<Idx>(src) = arg[counter[Idx]];
assigner<Idx + 1>(src, counter, args...);
}

View File

@ -108,7 +108,7 @@ struct CPU {};
template <typename scalar_t, size_t MaxDim = TV_MAX_DIM>
struct SimpleVector {
public:
TV_HOST_DEVICE_INLINE SimpleVector(){};
TV_HOST_DEVICE_INLINE SimpleVector() {};
TV_HOST_DEVICE_INLINE SimpleVector(std::initializer_list<scalar_t> q) {
TV_ASSERT(q.size() <= MaxDim);
mSize = 0;
@ -315,7 +315,7 @@ struct Slice {
template <size_t MaxDim = TV_MAX_DIM>
struct ShapeBase : public SimpleVector<int, MaxDim> {
TV_HOST_DEVICE_INLINE ShapeBase() : SimpleVector<int, MaxDim>(){};
TV_HOST_DEVICE_INLINE ShapeBase() : SimpleVector<int, MaxDim>() {};
TV_HOST_DEVICE_INLINE ShapeBase(std::initializer_list<int> shape)
: SimpleVector<int, MaxDim>(shape) {}

View File

@ -220,9 +220,9 @@ void BezierAlignForward(const int nthreads, const T *input, const T *rois,
output[index] = output_val;
} // for pw
} // for ph
} // for c
} // for n
} // for ph
} // for c
} // for n
}
template <typename T>
@ -381,9 +381,9 @@ void BezierAlignBackward(const int nthreads, const T *grad_output,
add(offset_grad_input + y_high * width + x_low, static_cast<T>(g3));
add(offset_grad_input + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // for
} // ix
} // iy
} // for
} // BezierAlignBackward
void BezierAlignForwardCPULauncher(Tensor input, Tensor rois, Tensor output,

View File

@ -207,10 +207,10 @@ void ROIAlignForward(const int nthreads, const T* input, const T* rois,
// We do average (integral) pooling inside a bin
output[index] = output_val / count;
} // if
} // for pw
} // for ph
} // for c
} // for n
} // for pw
} // for ph
} // for c
} // for n
}
template <typename T>
@ -334,7 +334,7 @@ void ROIAlignBackward(const int nthreads, const T* grad_output, const T* rois,
add(offset_grad_input + y_high * width + x_low, static_cast<T>(g3));
add(offset_grad_input + y_high * width + x_high, static_cast<T>(g4));
} // if
} // mode
} // mode
} else if (pool_mode == 1) {
// We do average (integral) pooling inside a bin
// We use roi_bin_grid to sample the grid and mimic integral
@ -375,10 +375,10 @@ void ROIAlignBackward(const int nthreads, const T* grad_output, const T* rois,
add(offset_grad_input + y_high * width + x_high,
static_cast<T>(g4));
} // if
} // ix
} // iy
} // mode
} // for
} // ix
} // iy
} // mode
} // for
} // ROIAlignBackward
void ROIAlignForwardCPULauncher(Tensor input, Tensor rois, Tensor output,

View File

@ -206,9 +206,9 @@ void ROIAlignRotatedForward(const int nthreads, const T* input,
output[index] = output_val;
} // for pw
} // for ph
} // for c
} // for n
} // for ph
} // for c
} // for n
}
template <typename T>
@ -366,9 +366,9 @@ void ROIAlignRotatedBackward(
add(offset_grad_input + y_high * width + x_low, static_cast<T>(g3));
add(offset_grad_input + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // for
} // ix
} // iy
} // for
} // ROIAlignRotatedBackward
void ROIAlignRotatedForwardCPULauncher(Tensor input, Tensor rois, Tensor output,

View File

@ -100,8 +100,9 @@ void *choose_filtered_lrelu_act_kernel(void);
//------------------------------------------------------------------------
// Helpers.
enum // Filter modes.
{ MODE_SUSD = 0, // Separable upsampling, separable downsampling.
enum // Filter modes.
{
MODE_SUSD = 0, // Separable upsampling, separable downsampling.
MODE_FUSD = 1, // Full upsampling, separable downsampling.
MODE_SUFD = 2, // Separable upsampling, full downsampling.
MODE_FUFD = 3, // Full upsampling, full downsampling.
@ -157,12 +158,11 @@ struct InternalType<c10::Half> {
#define MIN(A, B) ((A) < (B) ? (A) : (B))
#define MAX(A, B) ((A) > (B) ? (A) : (B))
#define CEIL_DIV(A, B) \
(((B) == 1) \
? (A) \
: ((B) == 2) ? ((int)((A) + 1) >> 1) \
: ((B) == 4) ? ((int)((A) + 3) >> 2) \
: (((A) + ((A) > 0 ? (B)-1 : 0)) / (B)))
#define CEIL_DIV(A, B) \
(((B) == 1) ? (A) \
: ((B) == 2) ? ((int)((A) + 1) >> 1) \
: ((B) == 4) ? ((int)((A) + 3) >> 2) \
: (((A) + ((A) > 0 ? (B) - 1 : 0)) / (B)))
// This works only up to blocks of size 256 x 256 and for all N that are powers
// of two.
@ -333,22 +333,16 @@ static __global__ void filtered_lrelu_kernel(filtered_lrelu_kernel_params p) {
const int szDownX = tileUpH * tileOutW;
// Sizes for shared memory arrays.
const int s_buf0_size_base =
(filterMode == MODE_SUSD)
? MAX(szIn, szUpXY)
: (filterMode == MODE_FUSD)
? MAX(szIn, szDownX)
: (filterMode == MODE_SUFD)
? MAX(szIn, szUpXY)
: (filterMode == MODE_FUFD) ? szIn : -1;
const int s_buf1_size_base =
(filterMode == MODE_SUSD)
? MAX(szUpX, szDownX)
: (filterMode == MODE_FUSD)
? szUpXY
: (filterMode == MODE_SUFD)
? szUpX
: (filterMode == MODE_FUFD) ? szUpXY : -1;
const int s_buf0_size_base = (filterMode == MODE_SUSD) ? MAX(szIn, szUpXY)
: (filterMode == MODE_FUSD) ? MAX(szIn, szDownX)
: (filterMode == MODE_SUFD) ? MAX(szIn, szUpXY)
: (filterMode == MODE_FUFD) ? szIn
: -1;
const int s_buf1_size_base = (filterMode == MODE_SUSD) ? MAX(szUpX, szDownX)
: (filterMode == MODE_FUSD) ? szUpXY
: (filterMode == MODE_SUFD) ? szUpX
: (filterMode == MODE_FUFD) ? szUpXY
: -1;
// Ensure U128 alignment.
const int s_buf0_size = (s_buf0_size_base + 3) & ~3;
@ -980,17 +974,17 @@ static __global__ void filtered_lrelu_kernel(filtered_lrelu_kernel_params p) {
#define X_LOOP(TAPY, PX) \
for (int sx = 0; sx < fuSize / up; sx++) { \
v.x += a * (scalar_t)c_fu[(sx * up + (((PX)-0) & (up - 1))) + \
v.x += a * (scalar_t)c_fu[(sx * up + (((PX) - 0) & (up - 1))) + \
(sy * up + (TAPY)) * MAX_FILTER_SIZE]; \
v.z += b * (scalar_t)c_fu[(sx * up + (((PX)-0) & (up - 1))) + \
v.z += b * (scalar_t)c_fu[(sx * up + (((PX) - 0) & (up - 1))) + \
(sy * up + (TAPY)) * MAX_FILTER_SIZE]; \
if ((PX) == 0) { \
a = b; \
b = s_tileIn[src0 + 2 + sx + sy * tileInW]; \
} \
v.y += a * (scalar_t)c_fu[(sx * up + (((PX)-1) & (up - 1))) + \
v.y += a * (scalar_t)c_fu[(sx * up + (((PX) - 1) & (up - 1))) + \
(sy * up + (TAPY)) * MAX_FILTER_SIZE]; \
v.w += b * (scalar_t)c_fu[(sx * up + (((PX)-1) & (up - 1))) + \
v.w += b * (scalar_t)c_fu[(sx * up + (((PX) - 1) & (up - 1))) + \
(sy * up + (TAPY)) * MAX_FILTER_SIZE]; \
if ((PX) == 1) { \
a = b; \
@ -1447,7 +1441,7 @@ static __global__ void filtered_lrelu_act_kernel(
s |= __shfl_xor(s, 4);
s |= __shfl_xor(s, 8);
#else
s |= __shfl_xor_sync(m, s, 1); // Distribute.
s |= __shfl_xor_sync(m, s, 1); // Distribute.
s |= __shfl_xor_sync(m, s, 2);
s |= __shfl_xor_sync(m, s, 4);
s |= __shfl_xor_sync(m, s, 8);

View File

@ -225,9 +225,9 @@ static __global__ void upfirdn2d_kernel_small(upfirdn2d_kernel_params p) {
scalar_t v = 0;
if (inX >= 0 & inY >= 0 & inX < p.inSize.x & inY < p.inSize.y &
c < p.inSize.z)
v = (scalar_t)(
(const T *)p.x)[inX * p.inStride.x + inY * p.inStride.y +
c * p.inStride.z + n * p.inStride.w];
v = (scalar_t)((const T *)
p.x)[inX * p.inStride.x + inY * p.inStride.y +
c * p.inStride.z + n * p.inStride.w];
sx[relInY][relInX][relC] = v;
}

View File

@ -26,7 +26,7 @@ void bbox_overlaps_npu(const Tensor bboxes1, const Tensor bboxes2, Tensor ious,
gtboxesFP32 = gtboxesFP32.to(at::kFloat);
}
c10::SmallVector<int64_t, 8> iousSize = {gtboxesFP32.size(0),
bboxesFP32.size(0)};
bboxesFP32.size(0)};
if (aligned) {
iousSize = {gtboxesFP32.size(0), 1};
}

View File

@ -4,11 +4,10 @@ using namespace NPU_NAME_SPACE;
using namespace std;
void box_iou_quadri_impl(const Tensor boxes1, const Tensor boxes2, Tensor ious,
const int mode_flag, const bool aligned);
const int mode_flag, const bool aligned);
void box_iou_quadri_npu(const Tensor boxes1, const Tensor boxes2, Tensor ious,
const int mode_flag, const bool aligned) {
const int mode_flag, const bool aligned) {
TORCH_CHECK(boxes1.size(1) == 8, "boxes1 must be 2D tensor (N, 8)");
TORCH_CHECK(boxes1.size(1) == 8, "boxes1 must be 2D tensor (N, 8)");

View File

@ -8,14 +8,13 @@ void box_iou_rotated_impl(const Tensor boxes1, const Tensor boxes2, Tensor ious,
void box_iou_rotated_npu(const Tensor boxes1, const Tensor boxes2, Tensor ious,
const int mode_flag, const bool aligned) {
TORCH_CHECK(boxes1.size(1) == 5, "boxes1 must be 2D tensor (N, 5)");
TORCH_CHECK(boxes1.size(1) == 5, "boxes1 must be 2D tensor (N, 5)");
auto trans = false;
auto is_clockwise = false;
EXEC_NPU_CMD(aclnnBoxesOverlapBev, boxes1, boxes2, trans, is_clockwise,
aligned, mode_flag, ious);
aligned, mode_flag, ious);
return;
}

View File

@ -10,16 +10,17 @@ void iou3d_boxes_overlap_bev_forward_impl(const int num_a, const Tensor boxes_a,
void iou3d_boxes_overlap_bev_forward_npu(const int num_a, const Tensor boxes_a,
const int num_b, const Tensor boxes_b,
Tensor ans_overlap) {
TORCH_CHECK(boxes_a.size(1) == 7, "boxes_a must be 2D tensor (N, 7)");
TORCH_CHECK(boxes_b.size(1) == 7, "boxes_b must be 2D tensor (N, 7)");
TORCH_CHECK(boxes_a.size(1) == 7, "boxes_a must be 2D tensor (N, 7)");
TORCH_CHECK(boxes_b.size(1) == 7, "boxes_b must be 2D tensor (N, 7)");
auto trans = false;
auto is_clockwise = false;
auto aligned = false;
auto mode_flag = 2;
EXEC_NPU_CMD(aclnnBoxesOverlapBev, boxes_a, boxes_b, trans, is_clockwise, aligned, mode_flag, ans_overlap);
return;
auto trans = false;
auto is_clockwise = false;
auto aligned = false;
auto mode_flag = 2;
EXEC_NPU_CMD(aclnnBoxesOverlapBev, boxes_a, boxes_b, trans, is_clockwise,
aligned, mode_flag, ans_overlap);
return;
}
REGISTER_NPU_IMPL(iou3d_boxes_overlap_bev_forward_impl, iou3d_boxes_overlap_bev_forward_npu);
REGISTER_NPU_IMPL(iou3d_boxes_overlap_bev_forward_impl,
iou3d_boxes_overlap_bev_forward_npu);

View File

@ -4,16 +4,17 @@ using namespace NPU_NAME_SPACE;
using namespace std;
void points_in_boxes_all_forward_impl_npu(int batch_size, int boxes_num,
int pts_num, const Tensor boxes,
const Tensor pts,
Tensor box_idx_of_points) {
c10::SmallVector<int64_t, 8> output_size = {pts.size(0), pts.size(1), boxes.size(1)};
int pts_num, const Tensor boxes,
const Tensor pts,
Tensor box_idx_of_points) {
c10::SmallVector<int64_t, 8> output_size = {pts.size(0), pts.size(1),
boxes.size(1)};
auto boxes_trans = boxes.transpose(1, 2).contiguous();
EXEC_NPU_CMD(aclnnPointsInBoxAll, boxes_trans, pts, box_idx_of_points);
}
void points_in_boxes_all_forward_impl(int batch_size, int boxes_num,
int pts_num, const Tensor boxes,
const Tensor pts,
Tensor box_idx_of_points);
int pts_num, const Tensor boxes,
const Tensor pts,
Tensor box_idx_of_points);
REGISTER_NPU_IMPL(points_in_boxes_all_forward_impl,
points_in_boxes_all_forward_impl_npu);

View File

@ -4,49 +4,41 @@ using namespace NPU_NAME_SPACE;
using namespace std;
void roi_align_rotated_v2_forward_npu(const Tensor input, Tensor rois_map,
Tensor output,
double spatial_scale,
int32_t sampling_ratio,
int32_t pooled_height,
int32_t pooled_width,
bool aligned,
bool clockwise) {
Tensor output, double spatial_scale,
int32_t sampling_ratio,
int32_t pooled_height,
int32_t pooled_width, bool aligned,
bool clockwise) {
at::Tensor feature_map = input.permute({0, 2, 3, 1}).contiguous();
at::Tensor rois = rois_map.permute({1, 0}).contiguous();
EXEC_NPU_CMD(aclnnRoiAlignRotatedV2, feature_map, rois, spatial_scale, sampling_ratio, pooled_height, pooled_width, aligned, clockwise, output);
EXEC_NPU_CMD(aclnnRoiAlignRotatedV2, feature_map, rois, spatial_scale,
sampling_ratio, pooled_height, pooled_width, aligned, clockwise,
output);
}
void roi_align_rotated_v2_forward_impl(const Tensor input, Tensor rois,
Tensor output,
double spatial_scale,
int32_t sampling_ratio,
int32_t pooled_height,
int32_t pooled_width,
bool aligned,
bool clockwise);
Tensor output, double spatial_scale,
int32_t sampling_ratio,
int32_t pooled_height,
int32_t pooled_width, bool aligned,
bool clockwise);
REGISTER_NPU_IMPL(roi_align_rotated_v2_forward_impl, roi_align_rotated_v2_forward_npu);
REGISTER_NPU_IMPL(roi_align_rotated_v2_forward_impl,
roi_align_rotated_v2_forward_npu);
void roi_align_rotated_v2_backward_npu(const Tensor input, Tensor rois,
Tensor grad_output, Tensor grad_input,
int32_t pooled_height,
int32_t pooled_width,
double spatial_scale,
int32_t sampling_ratio,
bool aligned,
bool clockwise) {
void roi_align_rotated_v2_backward_npu(
const Tensor input, Tensor rois, Tensor grad_output, Tensor grad_input,
int32_t pooled_height, int32_t pooled_width, double spatial_scale,
int32_t sampling_ratio, bool aligned, bool clockwise) {
EXEC_NPU_CMD(aclnnRoiAlignRotatedGradV2, input, rois, grad_output,
pooled_height, pooled_width, spatial_scale, sampling_ratio, aligned, clockwise,
grad_input);
pooled_height, pooled_width, spatial_scale, sampling_ratio,
aligned, clockwise, grad_input);
}
void roi_align_rotated_v2_backward_impl(const Tensor input, Tensor rois,
Tensor grad_output, Tensor grad_input,
int32_t pooled_height,
int32_t pooled_width,
double spatial_scale,
int32_t sampling_ratio,
bool aligned,
bool clockwise);
void roi_align_rotated_v2_backward_impl(
const Tensor input, Tensor rois, Tensor grad_output, Tensor grad_input,
int32_t pooled_height, int32_t pooled_width, double spatial_scale,
int32_t sampling_ratio, bool aligned, bool clockwise);
REGISTER_NPU_IMPL(roi_align_rotated_v2_backward_impl, roi_align_rotated_v2_backward_npu);
REGISTER_NPU_IMPL(roi_align_rotated_v2_backward_impl,
roi_align_rotated_v2_backward_npu);

View File

@ -19,7 +19,7 @@ void roipoint_pool3d_forward_impl_npu(int batch_size, int pts_num,
at::Tensor pooled_features_trans =
at::empty(features_trans_size, xyz.options());
c10::SmallVector<int64_t, 8> empty_flag_size = {boxes3d.size(0),
boxes3d.size(1)};
boxes3d.size(1)};
EXEC_NPU_CMD(aclnnRoipointPool3dForward, points_trans, point_features_trans,
boxes3d, sampled_pts_num, pooled_features_trans,
pooled_empty_flag);

View File

@ -209,13 +209,15 @@ void roi_align_backward(Tensor grad_output, Tensor rois, Tensor argmax_y,
int sampling_ratio, int pool_mode, bool aligned);
void roi_align_rotated_v2_forward(Tensor input, Tensor rois, Tensor output,
double spatial_scale, int sampling_ratio,
int aligned_height, int aligned_width,
bool aligned, bool clockwise);
double spatial_scale, int sampling_ratio,
int aligned_height, int aligned_width,
bool aligned, bool clockwise);
void roi_align_rotated_v2_backward(Tensor input, Tensor rois, Tensor grad_output, Tensor grad_input,
int pooled_height, int pooled_width, double spatial_scale,
int sampling_ratio, bool aligned, bool clockwise);
void roi_align_rotated_v2_backward(Tensor input, Tensor rois,
Tensor grad_output, Tensor grad_input,
int pooled_height, int pooled_width,
double spatial_scale, int sampling_ratio,
bool aligned, bool clockwise);
void roi_pool_forward(Tensor input, Tensor rois, Tensor output, Tensor argmax,
int pooled_height, int pooled_width, float spatial_scale);
@ -804,13 +806,13 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("roi_align_rotated_v2_forward", &roi_align_rotated_v2_forward,
"roi_align_rotated_v2_forward", py::arg("input"), py::arg("rois"),
py::arg("output"), py::arg("spatial_scale"), py::arg("sampling_ratio"),
py::arg("pooled_height"), py::arg("pooled_width"),
py::arg("aligned"), py::arg("clockwise"));
py::arg("pooled_height"), py::arg("pooled_width"), py::arg("aligned"),
py::arg("clockwise"));
m.def("roi_align_rotated_v2_backward", &roi_align_rotated_v2_backward,
"roi_align_rotated_v2_backward", py::arg("input"), py::arg("rois"),
py::arg("grad_output"), py::arg("grad_input"), py::arg("pooled_height"),
py::arg("pooled_width"), py::arg("spatial_scale"), py::arg("sampling_ratio"),
py::arg("aligned"), py::arg("clockwise"));
py::arg("pooled_width"), py::arg("spatial_scale"),
py::arg("sampling_ratio"), py::arg("aligned"), py::arg("clockwise"));
m.def("dynamic_point_to_voxel_forward", &dynamic_point_to_voxel_forward,
"dynamic_point_to_voxel_forward", py::arg("feats"), py::arg("coors"),
py::arg("reduce_type"));

View File

@ -3,35 +3,40 @@
#include "pytorch_device_registry.hpp"
void roi_align_rotated_v2_forward_impl(Tensor input, Tensor rois, Tensor output,
double spatial_scale, int sampling_ratio,
int pooled_height, int pooled_width,
bool aligned, bool clockwise) {
double spatial_scale, int sampling_ratio,
int pooled_height, int pooled_width,
bool aligned, bool clockwise) {
DISPATCH_DEVICE_IMPL(roi_align_rotated_v2_forward_impl, input, rois, output,
spatial_scale, sampling_ratio, pooled_height, pooled_width,
aligned, clockwise);
spatial_scale, sampling_ratio, pooled_height,
pooled_width, aligned, clockwise);
}
void roi_align_rotated_v2_forward(Tensor input, Tensor rois, Tensor output,
double spatial_scale, int sampling_ratio,
int pooled_height, int pooled_width,
bool aligned, bool clockwise) {
roi_align_rotated_v2_forward_impl(input, rois, output, spatial_scale, sampling_ratio,
pooled_height, pooled_width, aligned, clockwise);
double spatial_scale, int sampling_ratio,
int pooled_height, int pooled_width,
bool aligned, bool clockwise) {
roi_align_rotated_v2_forward_impl(input, rois, output, spatial_scale,
sampling_ratio, pooled_height, pooled_width,
aligned, clockwise);
}
void roi_align_rotated_v2_backward_impl(Tensor input, Tensor rois, Tensor grad_output, Tensor grad_input,
int pooled_height, int pooled_width, double spatial_scale,
int sampling_ratio, bool aligned, bool clockwise) {
DISPATCH_DEVICE_IMPL(roi_align_rotated_v2_backward_impl, input, rois, grad_output, grad_input,
pooled_height, pooled_width, spatial_scale, sampling_ratio, aligned, clockwise);
void roi_align_rotated_v2_backward_impl(Tensor input, Tensor rois,
Tensor grad_output, Tensor grad_input,
int pooled_height, int pooled_width,
double spatial_scale,
int sampling_ratio, bool aligned,
bool clockwise) {
DISPATCH_DEVICE_IMPL(roi_align_rotated_v2_backward_impl, input, rois,
grad_output, grad_input, pooled_height, pooled_width,
spatial_scale, sampling_ratio, aligned, clockwise);
}
void roi_align_rotated_v2_backward(Tensor input, Tensor rois, Tensor grad_output, Tensor grad_input,
int pooled_height, int pooled_width, double spatial_scale,
int sampling_ratio, bool aligned, bool clockwise) {
void roi_align_rotated_v2_backward(Tensor input, Tensor rois,
Tensor grad_output, Tensor grad_input,
int pooled_height, int pooled_width,
double spatial_scale, int sampling_ratio,
bool aligned, bool clockwise) {
roi_align_rotated_v2_backward_impl(input, rois, grad_output, grad_input,
pooled_height, pooled_width, spatial_scale, sampling_ratio, aligned, clockwise);
pooled_height, pooled_width, spatial_scale,
sampling_ratio, aligned, clockwise);
}

View File

@ -449,7 +449,9 @@ if IS_MLU_AVAILABLE:
@MODELS.register_module('DCN', force=True)
class DeformConv2dPack_MLU(DeformConv2d):
"""This class is the DCN implementation of the MLU device. The MLU
"""This class is the DCN implementation of the MLU device.
The MLU
backend support of the operator has been implemented in torchvision.
The mmcv registration mechanism is used for multiplexing here. The
torchvision implementation of DCN is called.

View File

@ -42,7 +42,7 @@ def boxes_iou3d(boxes_a: Tensor, boxes_b: Tensor) -> Tensor:
Returns:
torch.Tensor: 3D IoU result with shape (M, N).
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 7,\
assert boxes_a.shape[1] == boxes_b.shape[1] == 7, \
'Input boxes shape should be (N, 7)'
boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).view(-1, 1)

View File

@ -17,9 +17,9 @@ class RotatedFeatureAlignFunction(Function):
correspond to the refined rotate anchors and reconstruct the feature maps
in pixel-wise manner to achieve feature alignment.
The details are described in the paper
`R3Det: Refined Single-Stage Detector with Feature Refinement for Rotating
Object <https://arxiv.org/abs/1908.05612>`_.
The details are described in the paper `R3Det: Refined Single-Stage
Detector with Feature Refinement for Rotating Object
<https://arxiv.org/abs/1908.05612>`_.
"""
@staticmethod

View File

@ -20,7 +20,7 @@ class _DynamicScatter(Function):
feats: torch.Tensor,
coors: torch.Tensor,
reduce_type: str = 'max') -> Tuple[torch.Tensor, torch.Tensor]:
"""convert kitti points(N, >=3) to voxels.
"""Convert kitti points(N, >=3) to voxels.
Args:
feats (torch.Tensor): [N, C]. Points features to be reduced

View File

@ -47,16 +47,15 @@ def _mean_update(vals: Union[int, List], m_vals: Union[int, List],
class SparseModule(nn.Module):
"""place holder, All module subclass from this will take sptensor in
"""Place holder, All module subclass from this will take sptensor in
SparseSequential."""
pass
class SparseSequential(SparseModule):
r"""A sequential container.
Modules will be added to it in the order they are passed in the
constructor.
Alternatively, an ordered dict of modules can also be passed in.
r"""A sequential container. Modules will be added to it in the order they
are passed in the constructor. Alternatively, an ordered dict of modules
can also be passed in.
To make it easier to understand, given is a small example::
@ -189,14 +188,14 @@ class SparseSequential(SparseModule):
class ToDense(SparseModule):
"""convert SparseConvTensor to NCHW dense tensor."""
"""Convert SparseConvTensor to NCHW dense tensor."""
def forward(self, x: SparseConvTensor):
return x.dense()
class RemoveGrid(SparseModule):
"""remove pre-allocated grid buffer."""
"""Remove pre-allocated grid buffer."""
def forward(self, x: SparseConvTensor):
x.grid = None

View File

@ -6,7 +6,7 @@ import torch
def scatter_nd(indices: torch.Tensor, updates: torch.Tensor,
shape: torch.Tensor) -> torch.Tensor:
"""pytorch edition of tensorflow scatter_nd.
"""Pytorch edition of tensorflow scatter_nd.
this function don't contain except handle code. so use this carefully when
indice repeats, don't support repeat add which is supported in tensorflow.

View File

@ -55,10 +55,12 @@ class TINShift(nn.Module):
Temporal Interlace shift is a differentiable temporal-wise frame shifting
which is proposed in "Temporal Interlacing Network"
Please refer to `Temporal Interlacing Network
<https://arxiv.org/abs/2001.06499>`_ for more details.
Please refer to
`Temporal Interlacing Network <https://arxiv.org/abs/2001.06499>`_
for more details.
Code is modified from https://github.com/mit-han-lab/temporal-shift-module
Code is modified from
https://github.com/mit-han-lab/temporal-shift-module
"""
def forward(self, input, shift):

View File

@ -19,7 +19,7 @@ ext_module = ext_loader.load_ext('_ext', ['upfirdn2d'])
def _parse_scaling(scaling):
"""parse scaling into list [x, y]"""
"""Parse scaling into list [x, y]"""
if isinstance(scaling, int):
scaling = [scaling, scaling]
assert isinstance(scaling, (list, tuple))
@ -30,7 +30,7 @@ def _parse_scaling(scaling):
def _parse_padding(padding):
"""parse padding into list [padx0, padx1, pady0, pady1]"""
"""Parse padding into list [padx0, padx1, pady0, pady1]"""
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
@ -43,7 +43,7 @@ def _parse_padding(padding):
def _get_filter_size(filter):
"""get width and height of filter kernel."""
"""Get width and height of filter kernel."""
if filter is None:
return 1, 1
assert isinstance(filter, torch.Tensor) and filter.ndim in [1, 2]

View File

@ -110,6 +110,7 @@ class ImageToTensor(BaseTransform):
def transform(self, results: dict) -> dict:
"""Transform function to convert image in results to
:obj:`torch.Tensor` and transpose the channel order.
Args:
results (dict): Result dict contains the image data to convert.
Returns:

View File

@ -1457,7 +1457,7 @@ class RandomResize(BaseTransform):
def __init__(
self,
scale: Union[Tuple[int, int], Sequence[Tuple[int, int]]],
ratio_range: Tuple[float, float] = None,
ratio_range: Optional[Tuple[float, float]] = None,
resize_type: str = 'Resize',
**resize_kwargs,
) -> None:

View File

@ -177,7 +177,7 @@ class KeyMapper(BaseTransform):
"""
def __init__(self,
transforms: Union[Transform, List[Transform]] = None,
transforms: Union[Transform, List[Transform], None] = None,
mapping: Optional[Dict] = None,
remapping: Optional[Dict] = None,
auto_remap: Optional[bool] = None,

View File

@ -149,8 +149,8 @@ def dequantize_flow(dx: np.ndarray,
dx, dy = (dequantize(d, -max_val, max_val, 255) for d in [dx, dy])
if denorm:
dx *= dx.shape[1]
dy *= dx.shape[0]
dx *= dx.shape[1] # type: ignore
dy *= dx.shape[0] # type: ignore
flow = np.dstack((dx, dy))
return flow

View File

@ -91,7 +91,7 @@ def imshow_bboxes(img: Union[str, np.ndarray],
def imshow_det_bboxes(img: Union[str, np.ndarray],
bboxes: np.ndarray,
labels: np.ndarray,
class_names: List[str] = None,
class_names: Optional[List[str]] = None,
score_thr: float = 0,
bbox_color: ColorType = 'green',
text_color: ColorType = 'green',