mmclassification/mmcls/models/utils/__init__.py

48 lines
1.5 KiB
Python

# Copyright (c) OpenMMLab. All rights reserved.
from .attention import (BEiTAttention, ChannelMultiheadAttention,
MultiheadAttention, ShiftWindowMSA, WindowMSA,
WindowMSAV2)
from .batch_augments import CutMix, Mixup, RandomBatchAugment, ResizeMix
from .channel_shuffle import channel_shuffle
from .data_preprocessor import ClsDataPreprocessor
from .embed import (HybridEmbed, PatchEmbed, PatchMerging, resize_pos_embed,
resize_relative_position_bias_table)
from .helpers import is_tracing, to_2tuple, to_3tuple, to_4tuple, to_ntuple
from .inverted_residual import InvertedResidual
from .layer_scale import LayerScale
from .make_divisible import make_divisible
from .position_encoding import (ConditionalPositionEncoding,
PositionEncodingFourier)
from .se_layer import SELayer
__all__ = [
'channel_shuffle',
'make_divisible',
'InvertedResidual',
'SELayer',
'to_ntuple',
'to_2tuple',
'to_3tuple',
'to_4tuple',
'PatchEmbed',
'PatchMerging',
'HybridEmbed',
'RandomBatchAugment',
'ShiftWindowMSA',
'is_tracing',
'MultiheadAttention',
'ConditionalPositionEncoding',
'resize_pos_embed',
'resize_relative_position_bias_table',
'ClsDataPreprocessor',
'Mixup',
'CutMix',
'ResizeMix',
'BEiTAttention',
'LayerScale',
'WindowMSA',
'WindowMSAV2',
'ChannelMultiheadAttention',
'PositionEncodingFourier',
]