[Enhance] Upgrade isort pre-commit hooks. (#687)

* Use new version flake8 and isort hooks

* Fix missing copyright
pull/692/head
Ma Zerun 2022-02-17 02:17:20 +08:00 committed by GitHub
parent 44e9902979
commit fcd57913ae
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 37 additions and 26 deletions

View File

@ -1,15 +1,11 @@
exclude: ^tests/data/
repos:
- repo: https://gitlab.com/pycqa/flake8.git
rev: 3.8.3
- repo: https://github.com/PyCQA/flake8
rev: 4.0.1
hooks:
- id: flake8
- repo: https://github.com/asottile/seed-isort-config
rev: v2.2.0
hooks:
- id: seed-isort-config
- repo: https://github.com/timothycrosley/isort
rev: 4.3.21
- repo: https://github.com/PyCQA/isort
rev: 5.10.1
hooks:
- id: isort
- repo: https://github.com/pre-commit/mirrors-yapf

View File

@ -115,7 +115,7 @@ def train_model(model,
else:
model = MMDataParallel(model, device_ids=cfg.gpu_ids)
if not model.device_ids:
from mmcv import digit_version, __version__
from mmcv import __version__, digit_version
assert digit_version(__version__) >= (1, 4, 4), \
'To train with CPU, please confirm your mmcv version ' \
'is not lower than v1.4.4'

View File

@ -1,3 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
from .lamb import Lamb
__all__ = [

View File

@ -1,3 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
from .image import (BaseFigureContextManager, ImshowInfosContextManager,
color_val_matplotlib, imshow_infos)

View File

@ -1,3 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import matplotlib.pyplot as plt
import mmcv
import numpy as np

View File

@ -25,8 +25,8 @@ SAMPLERS = Registry('sampler')
def build_dataset(cfg, default_args=None):
from .dataset_wrappers import (ConcatDataset, RepeatDataset,
ClassBalancedDataset, KFoldDataset)
from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset,
KFoldDataset, RepeatDataset)
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'RepeatDataset':

View File

@ -1,3 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence
import torch

View File

@ -1,3 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn.functional as F
import torch.utils.checkpoint as cp

View File

@ -1,3 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F

View File

@ -12,9 +12,8 @@ split_before_expression_after_opening_paren = true
[isort]
line_length = 79
multi_line_output = 0
known_standard_library = pkg_resources,setuptools
extra_standard_library = pkg_resources,setuptools
known_first_party = mmcls
known_third_party = PIL,cv2,matplotlib,mmcv,mmdet,modelindex,numpy,onnxruntime,packaging,pytest,pytorch_sphinx_theme,requests,rich,sphinx,tensorflow,torch,torchvision,ts
no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY

View File

@ -1,3 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
# small RetinaNet
num_classes = 3

View File

@ -1,3 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from copy import deepcopy
from unittest.mock import patch

View File

@ -760,7 +760,7 @@ def test_equalize(nb_rand_test=100):
def _imequalize(img):
# equalize the image using PIL.ImageOps.equalize
from PIL import ImageOps, Image
from PIL import Image, ImageOps
img = Image.fromarray(img)
equalized_img = np.asarray(ImageOps.equalize(img))
return equalized_img
@ -932,8 +932,9 @@ def test_posterize():
def test_contrast(nb_rand_test=100):
def _adjust_contrast(img, factor):
from PIL.ImageEnhance import Contrast
from PIL import Image
from PIL.ImageEnhance import Contrast
# Image.fromarray defaultly supports RGB, not BGR.
# convert from BGR to RGB
img = Image.fromarray(img[..., ::-1], mode='RGB')
@ -1066,8 +1067,8 @@ def test_brightness(nb_rand_test=100):
def _adjust_brightness(img, factor):
# adjust the brightness of image using
# PIL.ImageEnhance.Brightness
from PIL.ImageEnhance import Brightness
from PIL import Image
from PIL.ImageEnhance import Brightness
img = Image.fromarray(img)
brightened_img = Brightness(img).enhance(factor)
return np.asarray(brightened_img)
@ -1128,8 +1129,8 @@ def test_sharpness(nb_rand_test=100):
def _adjust_sharpness(img, factor):
# adjust the sharpness of image using
# PIL.ImageEnhance.Sharpness
from PIL.ImageEnhance import Sharpness
from PIL import Image
from PIL.ImageEnhance import Sharpness
img = Image.fromarray(img)
sharpened_img = Sharpness(img).enhance(factor)
return np.asarray(sharpened_img)

View File

@ -1,3 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules import GroupNorm

View File

@ -1,3 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import os
import tempfile

View File

@ -1,3 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules import GroupNorm

View File

@ -1,3 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import shutil
import tempfile

View File

@ -1,3 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import tempfile
from unittest.mock import MagicMock

View File

@ -1,3 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import functools
from collections import OrderedDict
from copy import deepcopy

View File

@ -1,3 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch

View File

@ -52,8 +52,8 @@ def onnx2tensorrt(onnx_file,
print(f'Successfully created TensorRT engine: {trt_file}')
if verify:
import torch
import onnxruntime as ort
import torch
input_img = torch.randn(*input_shape)
input_img_cpu = input_img.detach().cpu().numpy()

View File

@ -108,9 +108,9 @@ def pytorch2onnx(model,
model.forward = origin_forward
if do_simplify:
from mmcv import digit_version
import onnxsim
import onnx
import onnxsim
from mmcv import digit_version
min_required_version = '0.3.0'
assert digit_version(mmcv.__version__) >= digit_version(

View File

@ -17,10 +17,10 @@ from mmcls.apis import init_model
from mmcls.datasets.pipelines import Compose
try:
from pytorch_grad_cam import (EigenCAM, GradCAM, GradCAMPlusPlus, XGradCAM,
EigenGradCAM, LayerCAM)
from pytorch_grad_cam.activations_and_gradients import (
ActivationsAndGradients)
from pytorch_grad_cam import (EigenCAM, EigenGradCAM, GradCAM,
GradCAMPlusPlus, LayerCAM, XGradCAM)
from pytorch_grad_cam.activations_and_gradients import \
ActivationsAndGradients
from pytorch_grad_cam.utils.image import show_cam_on_image
except ImportError:
raise ImportError('Please run `pip install "grad-cam>=1.3.6"` to install '
@ -336,8 +336,8 @@ def main():
if args.target_category:
grad_cam_v = pkg_resources.get_distribution('grad_cam').version
if digit_version(grad_cam_v) >= digit_version('1.3.7'):
from pytorch_grad_cam.utils.model_targets import (
ClassifierOutputTarget)
from pytorch_grad_cam.utils.model_targets import \
ClassifierOutputTarget
targets = [ClassifierOutputTarget(c) for c in args.target_category]
else:
targets = args.target_category