mirror of
https://github.com/open-mmlab/mmselfsup.git
synced 2025-06-03 14:59:38 +08:00
* [Feature] Add MoCo v3 (#194) * [Feature] add position embedding function * [Fature] modify nonlinear neck for vit backbone * [Feature] add mocov3 head * [Feature] modify cls_head for vit backbone * [Feature] add ViT backbone * [Feature] add mocov3 algorithm * [Docs] revise BYOL hook docstring * [Feature] add mocov3 vit small config files * [Feature] add mocov3 vit small linear eval config files * [Fix] solve conflict * [Fix] add mmcls * [Fix] fix docstring format * [Fix] fix isort * [Fix] add mmcls to runtime requirements * [Feature] remove duplicated codes * [Feature] add mocov3 related unit test * [Feature] revise position embedding function * [Feature] add UT codes * [Docs] add README.md * [Docs] add model links and results to model zoo * [Docs] fix model links * [Docs] add metafile * [Docs] modify install.md and add mmcls requirements * [Docs] modify description * [Fix] using specific arch name `mocov3-small` rather than general arch name `small` * [Fix] add mmcls * [Fix] fix arch name * [Feature] change name to `MoCoV3` * [Fix] fix unit test bug * [Feature] change `BYOLHook` name to `MomentumUpdateHook` * [Feature] change name to MoCoV3 * [Docs] modify description Co-authored-by: fangyixiao18 <fangyx18@hotmail.com> Co-authored-by: Yixiao Fang <36138628+fangyixiao18@users.noreply.github.com> * [Docs] update model zoo results (#195) * Bump version to v0.6.0 (#198) * [Docs] update model zoo results * Bump version to v0.6.0 Co-authored-by: fangyixiao18 <fangyx18@hotmail.com> Co-authored-by: Yixiao Fang <36138628+fangyixiao18@users.noreply.github.com>
52 lines
1.5 KiB
Python
52 lines
1.5 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
import torch
|
|
|
|
from mmselfsup.models.algorithms import Classification
|
|
|
|
|
|
def test_classification():
|
|
# test ResNet
|
|
with_sobel = True,
|
|
backbone = dict(
|
|
type='ResNet',
|
|
depth=50,
|
|
in_channels=2,
|
|
out_indices=[4], # 0: conv-1, x: stage-x
|
|
norm_cfg=dict(type='BN'),
|
|
frozen_stages=4)
|
|
head = dict(
|
|
type='ClsHead', with_avg_pool=True, in_channels=2048, num_classes=4)
|
|
|
|
alg = Classification(backbone=backbone, with_sobel=with_sobel, head=head)
|
|
assert hasattr(alg, 'sobel_layer')
|
|
assert hasattr(alg, 'head')
|
|
|
|
fake_input = torch.randn((16, 3, 224, 224))
|
|
fake_labels = torch.ones(16, dtype=torch.long)
|
|
fake_backbone_out = alg.extract_feat(fake_input)
|
|
assert fake_backbone_out[0].size() == torch.Size([16, 2048, 7, 7])
|
|
fake_out = alg.forward_train(fake_input, fake_labels)
|
|
assert fake_out['loss'].item() > 0
|
|
|
|
# test ViT
|
|
backbone = dict(
|
|
type='VisionTransformer',
|
|
arch='mocov3-small', # embed_dim = 384
|
|
img_size=224,
|
|
patch_size=16,
|
|
stop_grad_conv1=True)
|
|
head = dict(
|
|
type='ClsHead',
|
|
in_channels=384,
|
|
num_classes=1000,
|
|
vit_backbone=True,
|
|
)
|
|
|
|
alg = Classification(backbone=backbone, head=head)
|
|
assert hasattr(alg, 'head')
|
|
|
|
fake_input = torch.randn((16, 3, 224, 224))
|
|
fake_labels = torch.ones(16, dtype=torch.long)
|
|
fake_out = alg.forward_train(fake_input, fake_labels)
|
|
assert fake_out['loss'].item() > 0
|