Upgrade the version of isort (#1705)

* Upgrade the version of isort

* sort the order of importing modules
pull/1715/head
Zaida Zhou 2022-02-09 21:29:21 +08:00 committed by GitHub
parent b0b30d0c5c
commit dd82ab1497
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 44 additions and 38 deletions

View File

@ -4,12 +4,8 @@ repos:
rev: 3.8.3
hooks:
- id: flake8
- repo: https://github.com/asottile/seed-isort-config
rev: v2.2.0
hooks:
- id: seed-isort-config
- repo: https://github.com/timothycrosley/isort
rev: 4.3.21
- repo: https://github.com/PyCQA/isort
rev: 5.10.1
hooks:
- id: isort
- repo: https://github.com/pre-commit/mirrors-yapf

View File

@ -19,7 +19,8 @@ from .registry import (ATTENTION, FEEDFORWARD_NETWORK, POSITIONAL_ENCODING,
# Avoid BC-breaking of importing MultiScaleDeformableAttention from this file
try:
from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention # noqa F401
from mmcv.ops.multi_scale_deform_attn import \
MultiScaleDeformableAttention # noqa F401
warnings.warn(
ImportWarning(
'``MultiScaleDeformableAttention`` has been moved to '

View File

@ -2,7 +2,8 @@
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
from yaml import CDumper as Dumper
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader, Dumper

View File

@ -409,8 +409,8 @@ def cummin(g, input, dim):
@parse_args('v', 'v', 'is')
def roll(g, input, shifts, dims):
from torch.onnx.symbolic_opset9 import squeeze
from packaging import version
from torch.onnx.symbolic_opset9 import squeeze
input_shape = g.op('Shape', input)
need_flatten = len(dims) == 0

View File

@ -48,6 +48,7 @@ class NMSop(torch.autograd.Function):
offset_i=int(offset))
else:
from torch.onnx.symbolic_opset9 import select, squeeze, unsqueeze
from ..onnx.onnx_utils.symbolic_helper import _size_helper
boxes = unsqueeze(g, bboxes, 0)

View File

@ -30,9 +30,10 @@ class RoIAlignFunction(Function):
mode_s=pool_mode,
aligned_i=aligned)
else:
from torch.onnx.symbolic_opset9 import sub, squeeze
from torch.onnx.symbolic_helper import _slice_helper
from torch.onnx import TensorProtoDataType
from torch.onnx.symbolic_helper import _slice_helper
from torch.onnx.symbolic_opset9 import squeeze, sub
# batch_indices = rois[:, 0].long()
batch_indices = _slice_helper(
g, rois, axes=[1], starts=[0], ends=[1])

View File

@ -37,16 +37,20 @@ except ImportError:
]
else:
from .env import collect_env
from .hub import load_url
from .logging import get_logger, print_log
from .parrots_jit import jit, skip_no_elena
from .parrots_wrapper import (
TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader,
PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd,
_AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm,
_MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home)
# yapf: disable
from .parrots_wrapper import (TORCH_VERSION, BuildExtension, CppExtension,
CUDAExtension, DataLoader, PoolDataLoader,
SyncBatchNorm, _AdaptiveAvgPoolNd,
_AdaptiveMaxPoolNd, _AvgPoolNd, _BatchNorm,
_ConvNd, _ConvTransposeMixin, _get_cuda_home,
_InstanceNorm, _MaxPoolNd, get_build_config,
is_rocm_pytorch)
# yapf: enable
from .registry import Registry, build_from_cfg
from .trace import is_jit_tracing
from .hub import load_url
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger',
'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast',

View File

@ -10,12 +10,13 @@ if TORCH_VERSION != 'parrots' and digit_version(TORCH_VERSION) < digit_version(
'1.7.0'):
# Modified from https://github.com/pytorch/pytorch/blob/master/torch/hub.py
import os
import torch
import warnings
from urllib.parse import urlparse
import sys
import warnings
import zipfile
from torch.hub import download_url_to_file, _get_torch_home, HASH_REGEX
from urllib.parse import urlparse
import torch
from torch.hub import HASH_REGEX, _get_torch_home, download_url_to_file
# Hub used to support automatically extracts from zipfile manually
# compressed by users. The legacy zip format expects only one file from

View File

@ -83,8 +83,8 @@ def _get_norm():
from parrots.nn.modules.batchnorm import _BatchNorm, _InstanceNorm
SyncBatchNorm_ = torch.nn.SyncBatchNorm2d
else:
from torch.nn.modules.instancenorm import _InstanceNorm
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.instancenorm import _InstanceNorm
SyncBatchNorm_ = torch.nn.SyncBatchNorm
return _BatchNorm, _InstanceNorm, SyncBatchNorm_

View File

@ -111,8 +111,9 @@ def assert_is_norm_layer(module) -> bool:
Returns:
bool: Whether the module is a norm layer.
"""
from .parrots_wrapper import _BatchNorm, _InstanceNorm
from torch.nn import GroupNorm, LayerNorm
from .parrots_wrapper import _BatchNorm, _InstanceNorm
norm_layer_candidates = (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm)
return isinstance(module, norm_layer_candidates)

View File

@ -12,7 +12,7 @@ split_before_expression_after_opening_paren = true
[isort]
line_length = 79
multi_line_output = 0
known_standard_library = pkg_resources,setuptools,logging,os,warnings,abc
extra_standard_library = pkg_resources,setuptools,logging,os,warnings,abc
known_first_party = mmcv
known_third_party = addict,cv2,numpy,onnx,onnxruntime,packaging,pytest,pytorch_sphinx_theme,scipy,sphinx,tensorrt,torch,torchvision,yaml,yapf
no_lines_before = STDLIB,LOCALFOLDER

View File

@ -184,6 +184,7 @@ def get_extensions():
if EXT_TYPE == 'parrots':
ext_name = 'mmcv._ext'
from parrots.utils.build_extension import Extension
# new parrots op impl do not use MMCV_USE_PARROTS
# define_macros = [('MMCV_USE_PARROTS', None)]
define_macros = []
@ -314,8 +315,8 @@ def get_extensions():
if EXT_TYPE == 'pytorch' and os.getenv('MMCV_WITH_ORT', '0') != '0':
ext_name = 'mmcv._ext_ort'
from torch.utils.cpp_extension import library_paths, include_paths
import onnxruntime
from torch.utils.cpp_extension import include_paths, library_paths
library_dirs = []
libraries = []
include_dirs = []

View File

@ -112,7 +112,7 @@ class TestPhotometric:
def _imequalize(img):
# equalize the image using PIL.ImageOps.equalize
from PIL import ImageOps, Image
from PIL import Image, ImageOps
img = Image.fromarray(img)
equalized_img = np.asarray(ImageOps.equalize(img))
return equalized_img
@ -141,8 +141,8 @@ class TestPhotometric:
def _adjust_brightness(img, factor):
# adjust the brightness of image using
# PIL.ImageEnhance.Brightness
from PIL.ImageEnhance import Brightness
from PIL import Image
from PIL.ImageEnhance import Brightness
img = Image.fromarray(img)
brightened_img = Brightness(img).enhance(factor)
return np.asarray(brightened_img)
@ -169,8 +169,9 @@ class TestPhotometric:
def test_adjust_contrast(self, nb_rand_test=100):
def _adjust_contrast(img, factor):
from PIL.ImageEnhance import Contrast
from PIL import Image
from PIL.ImageEnhance import Contrast
# Image.fromarray defaultly supports RGB, not BGR.
# convert from BGR to RGB
img = Image.fromarray(img[..., ::-1], mode='RGB')
@ -204,8 +205,9 @@ class TestPhotometric:
def test_auto_contrast(self, nb_rand_test=100):
def _auto_contrast(img, cutoff=0):
from PIL.ImageOps import autocontrast
from PIL import Image
from PIL.ImageOps import autocontrast
# Image.fromarray defaultly supports RGB, not BGR.
# convert from BGR to RGB
img = Image.fromarray(img[..., ::-1], mode='RGB')
@ -250,8 +252,8 @@ class TestPhotometric:
def _adjust_sharpness(img, factor):
# adjust the sharpness of image using
# PIL.ImageEnhance.Sharpness
from PIL.ImageEnhance import Sharpness
from PIL import Image
from PIL.ImageEnhance import Sharpness
img = Image.fromarray(img)
sharpened_img = Sharpness(img).enhance(factor)
return np.asarray(sharpened_img)

View File

@ -51,7 +51,7 @@ def _test_border_align_allclose(device, dtype, pool_size):
if not torch.cuda.is_available() and device == 'cuda':
pytest.skip('test requires GPU')
try:
from mmcv.ops import border_align, BorderAlign
from mmcv.ops import BorderAlign, border_align
except ModuleNotFoundError:
pytest.skip('BorderAlign op is not successfully compiled')

View File

@ -88,10 +88,12 @@ class FooModel(BaseModule):
def test_initilization_info_logger():
# 'override' has higher priority
import torch.nn as nn
from mmcv.utils.logging import get_logger
import os
import torch.nn as nn
from mmcv.utils.logging import get_logger
class OverloadInitConv(nn.Conv2d, BaseModule):
def init_weights(self):

View File

@ -197,7 +197,6 @@ def test_load_checkpoint_with_prefix():
def test_load_checkpoint():
import os
import re
import tempfile
@ -230,7 +229,6 @@ def test_load_checkpoint():
def test_load_checkpoint_metadata():
import os
import tempfile
from mmcv.runner import load_checkpoint, save_checkpoint
@ -301,7 +299,6 @@ def test_load_checkpoint_metadata():
def test_load_classes_name():
import os
import tempfile
from mmcv.runner import load_checkpoint, save_checkpoint
@ -335,7 +332,6 @@ def test_load_classes_name():
def test_checkpoint_loader():
import os
import tempfile
from mmcv.runner import CheckpointLoader, _load_checkpoint, save_checkpoint

View File

@ -132,7 +132,6 @@ def test_requires_executable(capsys):
def test_import_modules_from_strings():
# multiple imports
import os.path as osp_
import sys as sys_
osp, sys = mmcv.import_modules_from_strings(['os.path', 'sys'])
assert osp == osp_