linting and formatting code

pull/294/head
KaiyangZhou 2019-12-02 14:34:59 +00:00
parent b39e89a264
commit 08bb2cc5eb
52 changed files with 0 additions and 117 deletions

View File

@ -1,8 +1,6 @@
[style]
BASED_ON_STYLE = pep8
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true
BLANK_LINE_BEFORE_MODULE_DOCSTRING = true
BLANK_LINE_BEFORE_CLASS_DOCSTRING = true
SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true
DEDENT_CLOSING_BRACKETS = true
SPACES_BEFORE_COMMENT = 1

View File

@ -7,7 +7,6 @@ from torch.nn import functional as F
# Basic layers
##########
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""
def __init__(
@ -43,7 +42,6 @@ class ConvLayer(nn.Module):
class Conv1x1(nn.Module):
"""1x1 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
@ -67,7 +65,6 @@ class Conv1x1(nn.Module):
class Conv1x1Linear(nn.Module):
"""1x1 convolution + bn (w/o non-linearity)."""
def __init__(self, in_channels, out_channels, stride=1, bn=True):
@ -87,7 +84,6 @@ class Conv1x1Linear(nn.Module):
class Conv3x3(nn.Module):
"""3x3 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
@ -111,7 +107,6 @@ class Conv3x3(nn.Module):
class LightConv3x3(nn.Module):
"""Lightweight 3x3 convolution.
1x1 (linear) + dw 3x3 (nonlinear).
@ -142,7 +137,6 @@ class LightConv3x3(nn.Module):
class LightConvStream(nn.Module):
"""Lightweight convolution stream."""
def __init__(self, in_channels, out_channels, depth):
@ -164,7 +158,6 @@ class LightConvStream(nn.Module):
# Building blocks for omni-scale feature learning
##########
class ChannelGate(nn.Module):
"""A mini-network that generates channel-wise gates conditioned on input tensor."""
def __init__(
@ -226,7 +219,6 @@ class ChannelGate(nn.Module):
class OSBlock(nn.Module):
"""Omni-scale feature learning block."""
def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
@ -260,7 +252,6 @@ class OSBlock(nn.Module):
class OSBlockINv1(nn.Module):
"""Omni-scale feature learning block with instance normalization."""
def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
@ -296,7 +287,6 @@ class OSBlockINv1(nn.Module):
class OSBlockINv2(nn.Module):
"""Omni-scale feature learning block with instance normalization."""
def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
@ -332,7 +322,6 @@ class OSBlockINv2(nn.Module):
class OSBlockINv3(nn.Module):
"""Omni-scale feature learning block with instance normalization."""
def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
@ -373,7 +362,6 @@ class OSBlockINv3(nn.Module):
# Network architecture
##########
class OSNet(nn.Module):
"""Omni-Scale Network.
Reference:

View File

@ -11,7 +11,6 @@ NORM_AFFINE = False # enable affine transformations for normalization layer
# Basic layers
##########
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""
def __init__(
@ -47,7 +46,6 @@ class ConvLayer(nn.Module):
class Conv1x1(nn.Module):
"""1x1 convolution + bn + relu."""
def __init__(
@ -76,7 +74,6 @@ class Conv1x1(nn.Module):
class Conv1x1Linear(nn.Module):
"""1x1 convolution + bn (w/o non-linearity)."""
def __init__(self, in_channels, out_channels, stride=1, bn=True):
@ -96,7 +93,6 @@ class Conv1x1Linear(nn.Module):
class Conv3x3(nn.Module):
"""3x3 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
@ -120,7 +116,6 @@ class Conv3x3(nn.Module):
class LightConv3x3(nn.Module):
"""Lightweight 3x3 convolution.
1x1 (linear) + dw 3x3 (nonlinear).
@ -151,7 +146,6 @@ class LightConv3x3(nn.Module):
class LightConvStream(nn.Module):
"""Lightweight convolution stream."""
def __init__(self, in_channels, out_channels, depth):
@ -173,7 +167,6 @@ class LightConvStream(nn.Module):
# Building blocks for omni-scale feature learning
##########
class ChannelGate(nn.Module):
"""A mini-network that generates channel-wise gates conditioned on input tensor."""
def __init__(
@ -235,7 +228,6 @@ class ChannelGate(nn.Module):
class OSBlock(nn.Module):
"""Omni-scale feature learning block."""
def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
@ -269,7 +261,6 @@ class OSBlock(nn.Module):
class OSBlockINv1(nn.Module):
"""Omni-scale feature learning block with instance normalization."""
def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
@ -305,7 +296,6 @@ class OSBlockINv1(nn.Module):
class OSBlockINv2(nn.Module):
"""Omni-scale feature learning block with instance normalization."""
def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
@ -341,7 +331,6 @@ class OSBlockINv2(nn.Module):
class OSBlockINv3(nn.Module):
"""Omni-scale feature learning block with instance normalization."""
def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
@ -379,7 +368,6 @@ class OSBlockINv3(nn.Module):
class NASBlock(nn.Module):
"""Neural architecture search layer."""
def __init__(self, in_channels, out_channels, search_space=None):
@ -425,7 +413,6 @@ class NASBlock(nn.Module):
# Network architecture
##########
class OSNet(nn.Module):
"""Omni-Scale Network.
Reference:

View File

@ -7,7 +7,6 @@ from torchreid.data.transforms import build_transforms
class DataManager(object):
r"""Base data manager.
Args:
@ -87,7 +86,6 @@ class DataManager(object):
class ImageDataManager(DataManager):
r"""Image data manager.
Args:
@ -330,7 +328,6 @@ class ImageDataManager(DataManager):
class VideoDataManager(DataManager):
r"""Video data manager.
Args:

View File

@ -10,7 +10,6 @@ from torchreid.utils import read_image, download_url, mkdir_if_missing
class Dataset(object):
"""An abstract class representing a Dataset.
This is the base class for ``ImageDataset`` and ``VideoDataset``.
@ -248,7 +247,6 @@ class Dataset(object):
class ImageDataset(Dataset):
"""A base class representing ImageDataset.
All other image datasets should subclass it.
@ -297,7 +295,6 @@ class ImageDataset(Dataset):
class VideoDataset(Dataset):
"""A base class representing VideoDataset.
All other video datasets should subclass it.

View File

@ -10,7 +10,6 @@ from ..dataset import ImageDataset
class CUHK01(ImageDataset):
"""CUHK01.
Reference:

View File

@ -6,7 +6,6 @@ from ..dataset import ImageDataset
class CUHK02(ImageDataset):
"""CUHK02.
Reference:

View File

@ -7,7 +7,6 @@ from ..dataset import ImageDataset
class CUHK03(ImageDataset):
"""CUHK03.
Reference:

View File

@ -7,7 +7,6 @@ from ..dataset import ImageDataset
class DukeMTMCreID(ImageDataset):
"""DukeMTMC-reID.
Reference:

View File

@ -9,7 +9,6 @@ from ..dataset import ImageDataset
class GRID(ImageDataset):
"""GRID.
Reference:

View File

@ -11,7 +11,6 @@ from ..dataset import ImageDataset
class iLIDS(ImageDataset):
"""QMUL-iLIDS.
Reference:

View File

@ -8,7 +8,6 @@ from ..dataset import ImageDataset
class Market1501(ImageDataset):
"""Market1501.
Reference:

View File

@ -23,7 +23,6 @@ VERSION_DICT = {
class MSMT17(ImageDataset):
"""MSMT17.
Reference:

View File

@ -8,7 +8,6 @@ from ..dataset import ImageDataset
class PRID(ImageDataset):
"""PRID (single-shot version of prid-2011)
Reference:

View File

@ -7,7 +7,6 @@ from ..dataset import ImageDataset
class SenseReID(ImageDataset):
"""SenseReID.
This dataset is used for test purpose only.

View File

@ -9,7 +9,6 @@ from ..dataset import ImageDataset
class VIPeR(ImageDataset):
"""VIPeR.
Reference:

View File

@ -9,7 +9,6 @@ from ..dataset import VideoDataset
class DukeMTMCVidReID(VideoDataset):
"""DukeMTMCVidReID.
Reference:

View File

@ -9,7 +9,6 @@ from ..dataset import VideoDataset
class iLIDSVID(VideoDataset):
"""iLIDS-VID.
Reference:

View File

@ -7,7 +7,6 @@ from ..dataset import VideoDataset
class Mars(VideoDataset):
"""MARS.
Reference:

View File

@ -8,7 +8,6 @@ from ..dataset import VideoDataset
class PRID2011(VideoDataset):
"""PRID2011.
Reference:

View File

@ -9,7 +9,6 @@ AVAI_SAMPLERS = ['RandomIdentitySampler', 'SequentialSampler', 'RandomSampler']
class RandomIdentitySampler(Sampler):
"""Randomly samples N identities each with K instances.
Args:

View File

@ -8,7 +8,6 @@ from torchvision.transforms import *
class Random2DTranslation(object):
"""Randomly translates the input image with a probability.
Specifically, given a predefined shape (height, width), the input is first
@ -48,7 +47,6 @@ class Random2DTranslation(object):
class RandomErasing(object):
"""Randomly erases an image patch.
Origin: `<https://github.com/zhunzhong07/Random-Erasing>`_
@ -107,7 +105,6 @@ class RandomErasing(object):
class ColorAugmentation(object):
"""Randomly alters the intensities of RGB channels.
Reference:
@ -143,7 +140,6 @@ class ColorAugmentation(object):
class RandomPatch(object):
"""Random patch data augmentation.
There is a patch pool that stores randomly extracted pathces from person images.

View File

@ -15,7 +15,6 @@ from torchreid.losses import DeepSupervision
class Engine(object):
r"""A generic base Engine class for both image- and video-reid.
Args:

View File

@ -12,7 +12,6 @@ from ..engine import Engine
class ImageSoftmaxEngine(Engine):
r"""Softmax-loss engine for image-reid.
Args:

View File

@ -12,7 +12,6 @@ from ..engine import Engine
class ImageTripletEngine(Engine):
r"""Triplet-loss engine for image-reid.
Args:

View File

@ -5,7 +5,6 @@ from torchreid.engine.image import ImageSoftmaxEngine
class VideoSoftmaxEngine(ImageSoftmaxEngine):
"""Softmax-loss engine for video-reid.
Args:

View File

@ -5,7 +5,6 @@ from torchreid.engine.video import VideoSoftmaxEngine
class VideoTripletEngine(ImageTripletEngine, VideoSoftmaxEngine):
"""Triplet-loss engine for video-reid.
Args:

View File

@ -4,7 +4,6 @@ import torch.nn as nn
class CrossEntropyLoss(nn.Module):
r"""Cross entropy loss with label smoothing regularizer.
Reference:

View File

@ -4,7 +4,6 @@ import torch.nn as nn
class TripletLoss(nn.Module):
"""Triplet loss with hard positive/negative mining.
Reference:

View File

@ -100,7 +100,6 @@ class _Transition(nn.Sequential):
class DenseNet(nn.Module):
"""Densely connected network.
Reference:

View File

@ -7,7 +7,6 @@ __all__ = ['HACNN']
class ConvBlock(nn.Module):
"""Basic convolutional block.
convolution + batch normalization + relu.
@ -90,7 +89,6 @@ class InceptionB(nn.Module):
class SpatialAttn(nn.Module):
"""Spatial Attention (Sec. 3.1.I.1)"""
def __init__(self):
@ -115,7 +113,6 @@ class SpatialAttn(nn.Module):
class ChannelAttn(nn.Module):
"""Channel Attention (Sec. 3.1.I.2)"""
def __init__(self, in_channels, reduction_rate=16):
@ -134,7 +131,6 @@ class ChannelAttn(nn.Module):
class SoftAttn(nn.Module):
"""Soft Attention (Sec. 3.1.I)
Aim: Spatial Attention + Channel Attention
@ -157,7 +153,6 @@ class SoftAttn(nn.Module):
class HardAttn(nn.Module):
"""Hard Attention (Sec. 3.1.II)"""
def __init__(self, in_channels):
@ -183,7 +178,6 @@ class HardAttn(nn.Module):
class HarmAttn(nn.Module):
"""Harmonious Attention (Sec. 3.1)"""
def __init__(self, in_channels):
@ -198,7 +192,6 @@ class HarmAttn(nn.Module):
class HACNN(nn.Module):
"""Harmonious Attention Convolutional Neural Network.
Reference:

View File

@ -253,7 +253,6 @@ class Block8(nn.Module):
class InceptionResNetV2(nn.Module):
"""Inception-ResNet-V2.
Reference:

View File

@ -298,7 +298,6 @@ class Inception_C(nn.Module):
class InceptionV4(nn.Module):
"""Inception-v4.
Reference:

View File

@ -96,7 +96,6 @@ class MLFNBlock(nn.Module):
class MLFN(nn.Module):
"""Multi-Level Factorisation Net.
Reference:

View File

@ -16,7 +16,6 @@ model_urls = {
class ConvBlock(nn.Module):
"""Basic convolutional block.
convolution (bias discarded) + batch normalization + relu6.
@ -68,7 +67,6 @@ class Bottleneck(nn.Module):
class MobileNetV2(nn.Module):
"""MobileNetV2.
Reference:

View File

@ -7,7 +7,6 @@ __all__ = ['MuDeep']
class ConvBlock(nn.Module):
"""Basic convolutional block.
convolution + batch normalization + relu.
@ -30,7 +29,6 @@ class ConvBlock(nn.Module):
class ConvLayers(nn.Module):
"""Preprocessing layers."""
def __init__(self):
@ -47,7 +45,6 @@ class ConvLayers(nn.Module):
class MultiScaleA(nn.Module):
"""Multi-scale stream layer A (Sec.3.1)"""
def __init__(self):
@ -77,7 +74,6 @@ class MultiScaleA(nn.Module):
class Reduction(nn.Module):
"""Reduction layer (Sec.3.1)"""
def __init__(self):
@ -99,7 +95,6 @@ class Reduction(nn.Module):
class MultiScaleB(nn.Module):
"""Multi-scale stream layer B (Sec.3.1)"""
def __init__(self):
@ -131,7 +126,6 @@ class MultiScaleB(nn.Module):
class Fusion(nn.Module):
"""Saliency-based learning fusion layer (Sec.3.2)"""
def __init__(self):
@ -155,7 +149,6 @@ class Fusion(nn.Module):
class MuDeep(nn.Module):
"""Multiscale deep neural network.
Reference:

View File

@ -889,7 +889,6 @@ class ReductionCell1(nn.Module):
class NASNetAMobile(nn.Module):
"""Neural Architecture Search (NAS).
Reference:

View File

@ -25,7 +25,6 @@ pretrained_urls = {
# Basic layers
##########
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""
def __init__(
@ -62,7 +61,6 @@ class ConvLayer(nn.Module):
class Conv1x1(nn.Module):
"""1x1 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
@ -87,7 +85,6 @@ class Conv1x1(nn.Module):
class Conv1x1Linear(nn.Module):
"""1x1 convolution + bn (w/o non-linearity)."""
def __init__(self, in_channels, out_channels, stride=1):
@ -104,7 +101,6 @@ class Conv1x1Linear(nn.Module):
class Conv3x3(nn.Module):
"""3x3 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
@ -129,7 +125,6 @@ class Conv3x3(nn.Module):
class LightConv3x3(nn.Module):
"""Lightweight 3x3 convolution.
1x1 (linear) + dw 3x3 (nonlinear).
@ -164,7 +159,6 @@ class LightConv3x3(nn.Module):
# Building blocks for omni-scale feature learning
##########
class ChannelGate(nn.Module):
"""A mini-network that generates channel-wise gates conditioned on input tensor."""
def __init__(
@ -226,7 +220,6 @@ class ChannelGate(nn.Module):
class OSBlock(nn.Module):
"""Omni-scale feature learning block."""
def __init__(
@ -286,7 +279,6 @@ class OSBlock(nn.Module):
# Network architecture
##########
class OSNet(nn.Module):
"""Omni-Scale Network.
Reference:

View File

@ -15,7 +15,6 @@ pretrained_urls = {
# Basic layers
##########
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""
def __init__(
@ -51,7 +50,6 @@ class ConvLayer(nn.Module):
class Conv1x1(nn.Module):
"""1x1 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
@ -75,7 +73,6 @@ class Conv1x1(nn.Module):
class Conv1x1Linear(nn.Module):
"""1x1 convolution + bn (w/o non-linearity)."""
def __init__(self, in_channels, out_channels, stride=1, bn=True):
@ -95,7 +92,6 @@ class Conv1x1Linear(nn.Module):
class Conv3x3(nn.Module):
"""3x3 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
@ -119,7 +115,6 @@ class Conv3x3(nn.Module):
class LightConv3x3(nn.Module):
"""Lightweight 3x3 convolution.
1x1 (linear) + dw 3x3 (nonlinear).
@ -150,7 +145,6 @@ class LightConv3x3(nn.Module):
class LightConvStream(nn.Module):
"""Lightweight convolution stream."""
def __init__(self, in_channels, out_channels, depth):
@ -172,7 +166,6 @@ class LightConvStream(nn.Module):
# Building blocks for omni-scale feature learning
##########
class ChannelGate(nn.Module):
"""A mini-network that generates channel-wise gates conditioned on input tensor."""
def __init__(
@ -234,7 +227,6 @@ class ChannelGate(nn.Module):
class OSBlock(nn.Module):
"""Omni-scale feature learning block."""
def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
@ -268,7 +260,6 @@ class OSBlock(nn.Module):
class OSBlockINin(nn.Module):
"""Omni-scale feature learning block with instance normalization."""
def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
@ -307,7 +298,6 @@ class OSBlockINin(nn.Module):
# Network architecture
##########
class OSNet(nn.Module):
"""Omni-Scale Network.
Reference:

View File

@ -129,7 +129,6 @@ class DimReduceLayer(nn.Module):
class PCB(nn.Module):
"""Part-based Convolutional Baseline.
Reference:

View File

@ -155,7 +155,6 @@ class Bottleneck(nn.Module):
class ResNet(nn.Module):
"""Residual network.
Reference:

View File

@ -129,7 +129,6 @@ class Bottleneck(nn.Module):
class ResNet(nn.Module):
"""Residual network + IBN layer.
Reference:

View File

@ -112,7 +112,6 @@ class Bottleneck(nn.Module):
class ResNet(nn.Module):
"""Residual network + IBN layer.
Reference:

View File

@ -106,7 +106,6 @@ class Bottleneck(nn.Module):
class ResNetMid(nn.Module):
"""Residual network + mid-level features.
Reference:

View File

@ -113,7 +113,6 @@ class SEModule(nn.Module):
class Bottleneck(nn.Module):
"""
Base class for bottlenecks that implements `forward()` method.
"""
@ -142,7 +141,6 @@ class Bottleneck(nn.Module):
class SEBottleneck(Bottleneck):
"""
Bottleneck for SENet154.
"""
@ -175,7 +173,6 @@ class SEBottleneck(Bottleneck):
class SEResNetBottleneck(Bottleneck):
"""
ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe
implementation and uses `stride=stride` in `conv1` and not in `conv2`
@ -209,7 +206,6 @@ class SEResNetBottleneck(Bottleneck):
class SEResNeXtBottleneck(Bottleneck):
"""ResNeXt bottleneck type C with a Squeeze-and-Excitation module"""
expansion = 4
@ -248,7 +244,6 @@ class SEResNeXtBottleneck(Bottleneck):
class SENet(nn.Module):
"""Squeeze-and-excitation network.
Reference:

View File

@ -97,7 +97,6 @@ cfg = {
class ShuffleNet(nn.Module):
"""ShuffleNet.
Reference:

View File

@ -116,7 +116,6 @@ class InvertedResidual(nn.Module):
class ShuffleNetV2(nn.Module):
"""ShuffleNetV2.
Reference:

View File

@ -46,7 +46,6 @@ class Fire(nn.Module):
class SqueezeNet(nn.Module):
"""SqueezeNet.
Reference:

View File

@ -143,7 +143,6 @@ class Block(nn.Module):
class Xception(nn.Module):
"""Xception.
Reference:

View File

@ -4,7 +4,6 @@ __all__ = ['AverageMeter']
class AverageMeter(object):
"""Computes and stores the average and current value.
Examples::

View File

@ -9,7 +9,6 @@ __all__ = ['Logger', 'RankLogger']
class Logger(object):
"""Writes console output to external text file.
Imported from `<https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py>`_
@ -61,7 +60,6 @@ class Logger(object):
class RankLogger(object):
"""Records the rank1 matching accuracy obtained for each
test dataset at specified evaluation steps and provides a function
to show the summarized results, which are convenient for analysis.

View File

@ -1,6 +1,5 @@
#!/usr/bin/env python2/python3
# -*- coding: utf-8 -*-
"""
Source: https://github.com/zhunzhong07/person-re-ranking