linting code

pull/345/head
KaiyangZhou 2020-05-05 15:58:00 +01:00
parent 2a9f44af9b
commit 5ce43185b1
33 changed files with 97 additions and 65 deletions

18
.flake8
View File

@ -1,4 +1,18 @@
[flake8]
ignore = E261, E501, W293
ignore =
# At least two spaces before inline comment
E261,
# Line lengths are recommended to be no greater than 79 characters
E501,
# Missing whitespace around arithmetic operator
E226,
# Blank line contains whitespace
W293,
# Do not use bare 'except'
E722,
# Line break after binary operator
W504,
# isort found an import in the wrong position
I001
max-line-length = 79
exclude = __init__.py, build
exclude = __init__.py, build, torchreid/metrics/rank_cylib/

View File

@ -1,3 +1,11 @@
echo "Running isort"
isort -y -sp .
echo "Done"
yapf -i -r -vv . -e build
echo "Running yapf"
yapf -i -r -vv -e build .
echo "Done"
echo "Running flake8"
flake8 .
echo "Done"

View File

@ -1,10 +1,7 @@
from __future__ import division, print_function, absolute_import
import time
import datetime
import torch
from torch.nn import functional as F
from torchreid import metrics
from torchreid.utils import open_all_layers, open_specified_layers
from torchreid.engine import Engine
from torchreid.losses import TripletLoss, CrossEntropyLoss

View File

@ -1,4 +1,3 @@
import os
import sys
import copy
import time
@ -15,8 +14,8 @@ from torchreid.utils import (
from dml import ImageDMLEngine
from default_config import (
imagedata_kwargs, optimizer_kwargs, videodata_kwargs, engine_run_kwargs,
get_default_config, lr_scheduler_kwargs
imagedata_kwargs, optimizer_kwargs, engine_run_kwargs, get_default_config,
lr_scheduler_kwargs
)

View File

@ -10,6 +10,24 @@ NORM_AFFINE = False # enable affine transformations for normalization layer
##########
# Basic layers
##########
class IBN(nn.Module):
"""Instance + Batch Normalization."""
def __init__(self, num_channels):
super(IBN, self).__init__()
half1 = int(num_channels / 2)
self.half = half1
half2 = num_channels - half1
self.IN = nn.InstanceNorm2d(half1, affine=NORM_AFFINE)
self.BN = nn.BatchNorm2d(half2, affine=NORM_AFFINE)
def forward(self, x):
split = torch.split(x, self.half, 1)
out1 = self.IN(split[0].contiguous())
out2 = self.BN(split[1].contiguous())
return torch.cat((out1, out2), 1)
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""

View File

@ -1,11 +1,6 @@
from __future__ import division, print_function, absolute_import
import time
import datetime
from torchreid import metrics
from torchreid.utils import (
AverageMeter, open_all_layers, open_specified_layers
)
from torchreid.engine import Engine
from torchreid.losses import CrossEntropyLoss
@ -58,7 +53,7 @@ class ImageSoftmaxNASEngine(Engine):
lmda = self.init_lmda
else:
lmda = self.init_lmda * self.lmda_decay_rate**(
epoch // self.lmda_decay_step
self.epoch // self.lmda_decay_step
)
if lmda < self.min_lmda:
lmda = self.min_lmda

View File

@ -1,4 +1,3 @@
import os
import sys
import time
import os.path as osp

View File

@ -45,8 +45,7 @@ def get_requirements(filename='requirements.txt'):
setup(
name='torchreid',
version=find_version(),
description=
'A library for deep learning person re-identification in PyTorch',
description='A library for deep learning person re-ID in PyTorch',
author='Kaiyang Zhou',
license='MIT',
long_description=readme(),

View File

@ -70,7 +70,7 @@ class DataManager(object):
"""Returns the number of training cameras."""
return self._num_train_cams
def return_query_and_gallery_by_name(self, name):
def fetch_qg(self, name):
"""Returns query and gallery of a test dataset, each containing
tuples of (img_path(s), pid, camid).

View File

@ -151,10 +151,8 @@ class CUHK03(ImageDataset):
img_paths = _process_images(
camp[pid, :], campid, pid, imgs_dir
)
assert len(img_paths
) > 0, 'campid{}-pid{} has no images'.format(
campid, pid
)
assert len(img_paths) > 0, \
'campid{}-pid{} has no images'.format(campid, pid)
meta_data.append((campid + 1, pid + 1, img_paths))
print(
'- done camera pair {} with {} identities'.format(

View File

@ -61,7 +61,8 @@ class DukeMTMCreID(ImageDataset):
pid, camid = map(int, pattern.search(img_path).groups())
assert 1 <= camid <= 8
camid -= 1 # index starts from 0
if relabel: pid = pid2label[pid]
if relabel:
pid = pid2label[pid]
data.append((img_path, pid, camid))
return data

View File

@ -3,7 +3,7 @@ import os.path as osp
from ..dataset import ImageDataset
##### Log #####
# Log
# 22.01.2019
# - add v2
# - v1 and v2 differ in dir names

View File

@ -83,10 +83,8 @@ class VIPeR(ImageDataset):
np.random.shuffle(order)
train_idxs = order[:num_train_pids]
test_idxs = order[num_train_pids:]
assert not bool(
set(train_idxs)
& set(test_idxs)
), 'Error: train and test overlap'
assert not bool(set(train_idxs) & set(test_idxs)), \
'Error: train and test overlap'
train = []
for pid, idx in enumerate(train_idxs):

View File

@ -98,7 +98,8 @@ class Mars(VideoDataset):
if pid == -1:
continue # junk images are just ignored
assert 1 <= camid <= 6
if relabel: pid = pid2label[pid]
if relabel:
pid = pid2label[pid]
camid -= 1 # index starts from 0
img_names = names[start_index - 1:end_index]

View File

@ -4,7 +4,9 @@ import random
from collections import deque
import torch
from PIL import Image
from torchvision.transforms import *
from torchvision.transforms import (
Resize, Compose, ToTensor, Normalize, ColorJitter, RandomHorizontalFlip
)
class Random2DTranslation(object):
@ -279,8 +281,13 @@ def build_transforms(
transform_tr += [RandomHorizontalFlip()]
if 'random_crop' in transforms:
print('+ random crop (enlarge to {}x{} and ' \
'crop {}x{})'.format(int(round(height*1.125)), int(round(width*1.125)), height, width))
print(
'+ random crop (enlarge to {}x{} and '
'crop {}x{})'.format(
int(round(height * 1.125)), int(round(width * 1.125)), height,
width
)
)
transform_tr += [Random2DTranslation(height, width)]
if 'random_patch' in transforms:

View File

@ -428,8 +428,7 @@ class Engine(object):
if visrank:
visualize_ranked_results(
distmat,
self.datamanager.
return_query_and_gallery_by_name(dataset_name),
self.datamanager.fetch_qg(dataset_name),
self.datamanager.data_type,
width=self.datamanager.width,
height=self.datamanager.height,

View File

@ -1,6 +1,4 @@
from __future__ import division, print_function, absolute_import
import time
import datetime
from torchreid import metrics
from torchreid.losses import CrossEntropyLoss

View File

@ -1,6 +1,4 @@
from __future__ import division, print_function, absolute_import
import time
import datetime
from torchreid import metrics
from torchreid.losses import TripletLoss, CrossEntropyLoss

View File

@ -57,8 +57,9 @@ def euclidean_squared_distance(input1, input2):
torch.Tensor: distance matrix.
"""
m, n = input1.size(0), input2.size(0)
distmat = torch.pow(input1, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(input2, 2).sum(dim=1, keepdim=True).expand(n, m).t()
mat1 = torch.pow(input1, 2).sum(dim=1, keepdim=True).expand(m, n)
mat2 = torch.pow(input2, 2).sum(dim=1, keepdim=True).expand(n, m).t()
distmat = mat1 + mat2
distmat.addmm_(1, -2, input1, input2.t())
return distmat

View File

@ -300,7 +300,8 @@ class HACNN(nn.Module):
theta = torch.zeros(theta_i.size(0), 2, 3)
theta[:, :, :2] = scale_factors
theta[:, :, -1] = theta_i
if self.use_gpu: theta = theta.cuda()
if self.use_gpu:
theta = theta.cuda()
return theta
def forward(self, x):

View File

@ -249,9 +249,9 @@ class Block8(nn.Module):
return out
##################### Model Definition #########################
# ----------------
# Model Definition
# ----------------
class InceptionResNetV2(nn.Module):
"""Inception-ResNet-V2.

View File

@ -45,7 +45,8 @@ class Bottleneck(nn.Module):
assert stride in [1, 2], 'Warning: stride must be either 1 or 2'
self.stride = stride
mid_channels = out_channels // 4
if stride == 2: out_channels -= in_channels
if stride == 2:
out_channels -= in_channels
# group conv is not applied to first conv1x1 at stage 2
num_groups_conv1x1 = num_groups if group_conv1x1 else 1
self.conv1 = nn.Conv2d(
@ -71,7 +72,8 @@ class Bottleneck(nn.Module):
mid_channels, out_channels, 1, groups=num_groups, bias=False
)
self.bn3 = nn.BatchNorm2d(out_channels)
if stride == 2: self.shortcut = nn.AvgPool2d(3, stride=2, padding=1)
if stride == 2:
self.shortcut = nn.AvgPool2d(3, stride=2, padding=1)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))

View File

@ -5,7 +5,6 @@ from __future__ import division, absolute_import
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from torch.utils import model_zoo as model_zoo
__all__ = ['squeezenet1_0', 'squeezenet1_1', 'squeezenet1_0_fc512']