Yuan Liu e4252d6848
[Feature]: Support BEiT Transformer layer. (#919)
* [Feature]: Add BEiT-style transformer encoder layer

* [Feature]: Add average token

* [Fix]: Fix lint

* [Fix]: Refactor CAE config

* [Fix]: Change cv2 backend to pillow backend

* [Fix]: Fix MAE and CAE reshape bug

* [Feature]: Add freeze vit layers

* [Feature]: Add mc

* [Fix]: Fix lint

* [Fix]: Fix dataset bug

* [Fix]: Delete cae selfsup config

* [Fix]: docstring

* [Refactor]: Add init_values to layer_scalue_init_value

* [Fix]: Refine the docstring of avg_token

* [Fix]: Call super init weight in beit attention

* [Fix]: remove mc

* [Fix]: Fix docstring

* [Fix]: Fix docstring

* [Fix]: Fix lint

* [Fix]: Fix init_value bug and change the logic of outputting cls token

* [Fix]: Fix docstring
2022-08-17 00:07:06 +08:00

22 lines
1.1 KiB
Python

# Copyright (c) OpenMMLab. All rights reserved.
from .attention import BEiTAttention, MultiheadAttention, ShiftWindowMSA
from .batch_augments import CutMix, Mixup, RandomBatchAugment, ResizeMix
from .channel_shuffle import channel_shuffle
from .data_preprocessor import ClsDataPreprocessor
from .embed import (HybridEmbed, PatchEmbed, PatchMerging, resize_pos_embed,
resize_relative_position_bias_table)
from .helpers import is_tracing, to_2tuple, to_3tuple, to_4tuple, to_ntuple
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .position_encoding import ConditionalPositionEncoding
from .se_layer import SELayer
__all__ = [
'channel_shuffle', 'make_divisible', 'InvertedResidual', 'SELayer',
'to_ntuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'PatchEmbed',
'PatchMerging', 'HybridEmbed', 'RandomBatchAugment', 'ShiftWindowMSA',
'is_tracing', 'MultiheadAttention', 'ConditionalPositionEncoding',
'resize_pos_embed', 'resize_relative_position_bias_table',
'ClsDataPreprocessor', 'Mixup', 'CutMix', 'ResizeMix', 'BEiTAttention'
]