mirror of https://github.com/open-mmlab/mmcv.git
Upgrade the versions of pre-commit hooks (#2328)
parent
07e8775f09
commit
75ea2f8995
docs
en/understand_mmcv
zh_cn/understand_mmcv
mmcv
video
|
@ -1,7 +1,7 @@
|
|||
exclude: ^tests/data/
|
||||
repos:
|
||||
- repo: https://gitee.com/openmmlab/mirrors-flake8
|
||||
rev: 3.8.3
|
||||
rev: 5.0.4
|
||||
hooks:
|
||||
- id: flake8
|
||||
- repo: https://gitee.com/openmmlab/mirrors-isort
|
||||
|
@ -9,11 +9,11 @@ repos:
|
|||
hooks:
|
||||
- id: isort
|
||||
- repo: https://gitee.com/openmmlab/mirrors-yapf
|
||||
rev: v0.30.0
|
||||
rev: v0.32.0
|
||||
hooks:
|
||||
- id: yapf
|
||||
- repo: https://gitee.com/openmmlab/mirrors-pre-commit-hooks
|
||||
rev: v3.1.0
|
||||
rev: v4.3.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: check-yaml
|
||||
|
@ -26,7 +26,7 @@ repos:
|
|||
- id: mixed-line-ending
|
||||
args: ["--fix=lf"]
|
||||
- repo: https://gitee.com/openmmlab/mirrors-codespell
|
||||
rev: v2.1.0
|
||||
rev: v2.2.1
|
||||
hooks:
|
||||
- id: codespell
|
||||
- repo: https://gitee.com/openmmlab/mirrors-mdformat
|
||||
|
@ -44,7 +44,7 @@ repos:
|
|||
- id: docformatter
|
||||
args: ["--in-place", "--wrap-descriptions", "79"]
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v2.32.1
|
||||
rev: v3.0.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: ["--py36-plus"]
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
exclude: ^tests/data/
|
||||
repos:
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 3.8.3
|
||||
rev: 5.0.4
|
||||
hooks:
|
||||
- id: flake8
|
||||
- repo: https://github.com/PyCQA/isort
|
||||
|
@ -9,11 +9,11 @@ repos:
|
|||
hooks:
|
||||
- id: isort
|
||||
- repo: https://github.com/pre-commit/mirrors-yapf
|
||||
rev: v0.30.0
|
||||
rev: v0.32.0
|
||||
hooks:
|
||||
- id: yapf
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.1.0
|
||||
rev: v4.3.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: check-yaml
|
||||
|
@ -26,7 +26,7 @@ repos:
|
|||
- id: mixed-line-ending
|
||||
args: ["--fix=lf"]
|
||||
- repo: https://github.com/codespell-project/codespell
|
||||
rev: v2.1.0
|
||||
rev: v2.2.1
|
||||
hooks:
|
||||
- id: codespell
|
||||
- repo: https://github.com/executablebooks/mdformat
|
||||
|
@ -44,7 +44,7 @@ repos:
|
|||
- id: docformatter
|
||||
args: ["--in-place", "--wrap-descriptions", "79"]
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v2.32.1
|
||||
rev: v3.0.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: ["--py36-plus"]
|
||||
|
|
|
@ -265,12 +265,12 @@ mmcv.flowshow(flow)
|
|||
|
||||

|
||||
|
||||
3. Flow warpping
|
||||
3. Flow warping
|
||||
|
||||
```python
|
||||
img1 = mmcv.imread('img1.jpg')
|
||||
flow = mmcv.flowread('flow.flo')
|
||||
warpped_img2 = mmcv.flow_warp(img1, flow)
|
||||
warped_img2 = mmcv.flow_warp(img1, flow)
|
||||
```
|
||||
|
||||
img1 (left) and img2 (right)
|
||||
|
@ -281,6 +281,6 @@ optical flow (img2 -> img1)
|
|||
|
||||

|
||||
|
||||
warpped image and difference with ground truth
|
||||
warped image and difference with ground truth
|
||||
|
||||

|
||||

|
||||
|
|
|
@ -259,7 +259,7 @@ mmcv.flowshow(flow)
|
|||
```python
|
||||
img1 = mmcv.imread('img1.jpg')
|
||||
flow = mmcv.flowread('flow.flo')
|
||||
warpped_img2 = mmcv.flow_warp(img1, flow)
|
||||
warped_img2 = mmcv.flow_warp(img1, flow)
|
||||
```
|
||||
|
||||
img1 (左) and img2 (右)
|
||||
|
@ -272,4 +272,4 @@ img1 (左) and img2 (右)
|
|||
|
||||
变换后的图像和真实图像的差异
|
||||
|
||||

|
||||

|
||||
|
|
|
@ -47,12 +47,12 @@ class ActiveRotatedFilterFunction(Function):
|
|||
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, None]:
|
||||
"""
|
||||
Args:
|
||||
grad_output (torch.Tensor): The gradiant of output features
|
||||
grad_output (torch.Tensor): The gradient of output features
|
||||
with shape [num_output_planes * num_rotations,
|
||||
num_input_planes * num_orientations, H, W].
|
||||
|
||||
Returns:
|
||||
torch.Tensor: The gradiant of input features with shape
|
||||
torch.Tensor: The gradient of input features with shape
|
||||
[num_output_planes, num_input_planes, num_orientations, H, W].
|
||||
"""
|
||||
input, indices = ctx.saved_tensors
|
||||
|
|
|
@ -161,7 +161,7 @@ void PSAMaskForwardMLUKernelLauncher(const int psa_type, const Tensor x,
|
|||
TORCH_CHECK(h_feature * w_feature == y_c,
|
||||
"channel of y should be the same as h_feature * w_feature");
|
||||
TORCH_CHECK(psa_type == 0 || psa_type == 1,
|
||||
"psa_type only suppurts 'COLLECT' and 'DISTRIBUTE' currently");
|
||||
"psa_type only supports 'COLLECT' and 'DISTRIBUTE' currently");
|
||||
|
||||
if (x.numel() == 0) {
|
||||
CNLOG(INFO) << "skip zero-element tensor";
|
||||
|
@ -227,7 +227,7 @@ void PSAMaskBackwardMLUKernelLauncher(const int psa_type, const Tensor dy,
|
|||
TORCH_CHECK(h_mask * w_mask == dx_c,
|
||||
"channel of dx should be the same as h_mask * w_mask");
|
||||
TORCH_CHECK(psa_type == 0 || psa_type == 1,
|
||||
"psa_type only suppurts 'COLLECT' and 'DISTRIBUTE' currently");
|
||||
"psa_type only supports 'COLLECT' and 'DISTRIBUTE' currently");
|
||||
|
||||
if (dx.numel() == 0) {
|
||||
CNLOG(INFO) << "skip zero-element tensor";
|
||||
|
|
|
@ -45,7 +45,7 @@ void ROIAlignForwardMLUKernelLauncher(Tensor input, Tensor rois, Tensor output,
|
|||
input.dim(), "D");
|
||||
TORCH_CHECK(rois.dim() == 2, "rois should be a 2d tensor, got ", rois.dim(),
|
||||
"D");
|
||||
TORCH_CHECK(pool_mode == 1, "pool_mode only suppurts 'avg' currently");
|
||||
TORCH_CHECK(pool_mode == 1, "pool_mode only supports 'avg' currently");
|
||||
|
||||
auto memory_format =
|
||||
torch_mlu::cnnl::ops::get_channels_last_memory_format(input.dim());
|
||||
|
@ -121,7 +121,7 @@ void ROIAlignBackwardMLUKernelLauncher(Tensor grad, Tensor rois,
|
|||
"D");
|
||||
TORCH_CHECK(rois.dim() == 2, "rois should be a 2d tensor, got ", rois.dim(),
|
||||
"D");
|
||||
TORCH_CHECK(pool_mode == 1, "pool_mode only suppurts 'avg' currently");
|
||||
TORCH_CHECK(pool_mode == 1, "pool_mode only supports 'avg' currently");
|
||||
|
||||
int batch_size = grad_input.size(0);
|
||||
int channels = grad_input.size(1);
|
||||
|
|
|
@ -66,11 +66,11 @@ class RotatedFeatureAlignFunction(Function):
|
|||
def backward(ctx: Any, grad_output: torch.Tensor) -> tuple:
|
||||
"""
|
||||
Args:
|
||||
grad_output (torch.Tensor): The gradiant of output features
|
||||
grad_output (torch.Tensor): The gradient of output features
|
||||
with shape [N,C,H,W].
|
||||
|
||||
Returns:
|
||||
torch.Tensor: The gradiant of input features with shape [N,C,H,W].
|
||||
torch.Tensor: The gradient of input features with shape [N,C,H,W].
|
||||
"""
|
||||
best_rbboxes = ctx.saved_tensors[0]
|
||||
points = ctx.points
|
||||
|
|
|
@ -41,7 +41,7 @@ class Cache:
|
|||
class VideoReader:
|
||||
"""Video class with similar usage to a list object.
|
||||
|
||||
This video warpper class provides convenient apis to access frames.
|
||||
This video wrapper class provides convenient apis to access frames.
|
||||
There exists an issue of OpenCV's VideoCapture class that jumping to a
|
||||
certain frame may be inaccurate. It is fixed in this class by checking
|
||||
the position after jumping each time.
|
||||
|
|
Loading…
Reference in New Issue