Yixiao Fang 08dc8c75d3
[Refactor] Add selfsup algorithms. (#1389)
* remove basehead

* add moco series

* add byol simclr simsiam

* add ut

* update configs

* add simsiam hook

* add and refactor beit

* update ut

* add cae

* update extract_feat

* refactor cae

* add mae

* refactor data preprocessor

* update heads

* add maskfeat

* add milan

* add simmim

* add mixmim

* fix lint

* fix ut

* fix lint

* add eva

* add densecl

* add barlowtwins

* add swav

* fix lint

* update readtherdocs rst

* update docs

* update

* Decrease UT memory usage

* Fix docstring

* update DALLEEncoder

* Update model docs

* refactor dalle encoder

* update docstring

* fix ut

* fix config error

* add val_cfg and test_cfg

* refactor clip generator

* fix lint

* pass check

* fix ut

* add lars

* update type of BEiT in configs

* Use MMEngine style momentum in EMA.

* apply mmpretrain solarize

---------

Co-authored-by: mzr1996 <mzr1996@163.com>
2023-03-06 16:53:15 +08:00

65 lines
1.7 KiB
Python

# Copyright (c) OpenMMLab. All rights reserved.
import copy
import platform
import pytest
import torch
from mmpretrain.models import SimSiam
from mmpretrain.structures import DataSample
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit')
def test_simsiam():
data_preprocessor = {
'mean': (123.675, 116.28, 103.53),
'std': (58.395, 57.12, 57.375),
'to_rgb': True,
}
backbone = dict(
type='ResNet',
depth=18,
norm_cfg=dict(type='BN'),
zero_init_residual=True)
neck = dict(
type='NonLinearNeck',
in_channels=512,
hid_channels=2,
out_channels=2,
num_layers=3,
with_last_bn_affine=False,
with_avg_pool=True,
norm_cfg=dict(type='BN1d'))
head = dict(
type='LatentPredictHead',
loss=dict(type='CosineSimilarityLoss'),
predictor=dict(
type='NonLinearNeck',
in_channels=2,
hid_channels=2,
out_channels=2,
with_avg_pool=False,
with_last_bn=False,
with_last_bias=True,
norm_cfg=dict(type='BN1d')))
alg = SimSiam(
backbone=backbone,
neck=neck,
head=head,
data_preprocessor=copy.deepcopy(data_preprocessor))
fake_data = {
'inputs':
[torch.randn((2, 3, 224, 224)),
torch.randn((2, 3, 224, 224))],
'data_samples': [DataSample() for _ in range(2)]
}
fake_inputs = alg.data_preprocessor(fake_data)
fake_loss = alg(**fake_inputs, mode='loss')
assert fake_loss['loss'] > -1
# test extract
fake_feat = alg(fake_inputs['inputs'][0], mode='tensor')
assert fake_feat[0].size() == torch.Size([2, 512, 7, 7])