mirror of
https://github.com/open-mmlab/mmselfsup.git
synced 2025-06-03 14:59:38 +08:00
* [Enhance] add pre-commit hook for algo-readme and copyright (#213) * [Enhance] add test windows in workflows (#215) * [Enhance] add test windows in workflows * fix lint * add optional requirements * add try-except judgement * add opencv installation in windows test steps * fix path error on windows * update * update path * update * add pytest skip for algorithm test * update requirements/runtime.txt * update pytest skip * [Docs] translate 0_config.md into Chinese (#216) * [Docs] translate 0_config.md into Chinese * [Fix] fix format description in 0_config.md * Update: 0_config.md * [Fix] fix tsne 'no `init_cfg`' error (#222) * [Fix] fix tsne 'no init_cfg' and pool_type errors * [Refactor] fix linting of tsne vis * [Docs] reorganizing OpenMMLab projects and update algorithms in readme (#219) * [Docs] reorganizing OpenMMLab projects and update algorithms in readme * using small letters * fix typo * [Fix] fix image channel bgr/rgb bug and update benchmarks (#210) * [Fix] fix image channel bgr/rgb bug * update model zoo * update readme and metafile * [Fix] fix typo * [Fix] fix typo * [Fix] fix lint * modify Places205 directory according to the downloaded dataset * update results * [Fix] Fix the bug when using prefetch under multi-view methods, e.g., DenseCL (#218) * fig bug for prefetch_loader under multi-view setting * fix lint problem Co-authored-by: liming <liming.ai@bytedance.com> * [Feature]: MAE official (#221) * [Feature]: MAE single image pre-training * [Fix]: Fix config * [Fix]: Fix dataset link * [Feature]: Add run * [Refactor]: Delete spot * [Feature]: ignore nohup output file * [Feature]: Add auto script to generate run cmd * [Refactor]: Refactor mae config file * [Feature]: sz20 settings * [Feature]: Add auto resume * [Fix]: Fix lint * [Feature]: Make git ignore txt * [Refactor]: Delete gpus in script * [Fix]: Make generate_cmd to add --async * [Feature]: Initial version of Vit fine-tune * [Fix]: Add 1424 specific settings * [Fix]: Fix missing file client bug for 1424 * [Feature]: 1424 customized settings * [Fix]: Make drop in eval to False * [Feature]: Change the finetune and pre-training settings * [Feature]: Add debug setting * [Refactor]: Refactor the model * [Feature]: Customized settings * [Feature]: Add A100 settings * [Fix]: Change mae to imagenet * [Feature]: Change mae pretrain num workers to 32 * [Feature]: Change num workers to 16 * [Feature]: Add A100 setting for pre_release ft version * [Feature]: Add img_norm_cfg * [Fix]: Fix mae cls test missing logits bug * [Fix]: Fix mae cls head bias initialize to zero * [Feature]: Rename mae config name * [Feature]: Add MAE README.md * [Fix]: Fix lint * [Feature]: Fix typo * [Fix]: Fix typo * [Feature]: Fix invalid link * [Fix]: Fix finetune config file name * [Feature]: Official pretrain v1 * [Feature]: Change log interval to 100 * [Feature]: pretrain 1600 epochs * [Fix]: Change encoder num head to 12 * [Feature]: Mix precision * [Feature]: Add default value to random masking * [Feature]: Official MAE finetune * [Feature]: Finetune img per gpu 32 * [Feature]: Add multi machine training for lincls * [Fix]: Fix lincls master port master addr * [Feature]: Change img per gpu to 128 * [Feature]: Add linear eval and Refactor * [Fix]: Fix debug mode * [Fix]: Delete MAE dataset in __init__.py * [Feature]: normalize pixel for mae * [Fix]: Fix lint * [Feature]: LARS for linear eval * [Feature]: Add lars for mae linear eval * [Feature]: Change mae linear lars num workers to 32 * [Feature]: Change mae linear lars num workers to 8 * [Feature]: log every 25 iter for mae linear eval lars * [Feature]: Add 1600 epoch and 800 epoch pretraining * [Fix]: Change linear eval to 902 * [Fix]: Add random flip to linear eval * [Fix]: delete fp16 in mae * [Refactor]: Change backbone to mmcls * [Fix]: Align finetune settings * [Fix]: replace timm trunc_normal with mmcv trunc_normal * [Fix]: Change finetune layer_decay to 0.65 * [Fix]: Delete pretrain last norm when global_pooling * [Fix]: set requires_grad of norm1 to False * [Fix]: delete norm1 * [Fix]: Fix docstring bug * [Fix]: Fix lint * [Fix]: Add external link * [Fix]: Delete auto_resume and reformat config readme. * [Fix]: Fix pytest bug * [Fix]: Fix lint * [Refactor]: Rename filename * [Feature]: Add docstring * [Fix]: Rename config file name * [Fix]: Fix name inconsistency bug * [Fix]: Change the default value of persistent_worker in builder to True * [Fix]: Change the default value of CPUS_PER_TASK to 5 * [Fix]: Add a blank line to line136 in tools/train.py * [Fix]: Fix MAE algorithm docstring format and add paper name and url * [Feature]: Add MAE paper name and link, and store mae teaser on github * [Refactor]: Delete mae.png * [Fix]: Fix config file name” * [Fix]: Fix name bug * [Refactor]: Change default GPUS to 8 * [Fix]: Abandon change to drop_last * [Fix]: Fix docstring in mae algorithm * [Fix]: Fix lint * [Fix]: Fix lint * [Fix]: Fix mae finetune algo type bug * [Feature]: Add unit test for algorithm * [Feature]: Add unit test for remaining parts * [Fix]: Fix lint * [Fix]: Fix typo * [Fix]: Delete some unnecessary modification in gitignore * [Feature]: Change finetune setting in mae algo to mixup setting * [Fix]: Change norm_pix_loss to norm_pix in pretrain head * [Fix]: Delete modification in dist_train_linear.sh * [Refactor]: Delete global pool in mae_cls_vit.py * [Fix]: Change finetune param to mixup in test_mae_classification * [Fix]: Change norm_pix_loss to norm_pix of mae_pretrain_head in unit test * [Fix]: Change norm_pix_loss to norm_pix in unit test * [Refactor]: Create init_weights for mae_finetune_head and mae_linprobe_head * [Refactor]: Construct 2d sin-cosine position embedding using torch * [Refactor]: Using classification and using mixup from mmcls * [Fix]: Fix lint * [Fix]: Add False to finetune mae linprobe‘ “ * [Fix]: Set drop_last to False * [Fix]: Fix MAE finetune layerwise lr bug * [Refactor]: Delete redundant MAE when registering MAE * [Refactor]: Split initialize_weights in MAE to submodules * [Fix]: Change the min_lr of mae pretrain to 0.0 * [Refactor]: Delete unused _init_weights in mae_cls_vit * [Refactor]: Change MAE cls vit to a more general name * [Feature]: Add Epoch Fix cosine annealing lr updater * [Fix]: Fix lint * [Feature]: Add layer wise lr decay in optimizer constructor * [Fix]: Fix lint * [Fix]: Fix set layer wise lr decay bug * [Fix]: Fix UT for MAE * [Fix]: Fix lint * [Fix]: update algorithm readme format for MAE * [Fix]: Fix isort * [Fix]: Add Returns inmae_pretrain_vit * [Fix]: Change bgr to rgb * [Fix]: Change norm pix to True * [Fix]: Use cls_token to linear prob * [Fix]: Delete mixup.py * [Fix]: Fix MAE readme * [Feature]: Delete linprobe * [Refactor]: Merge MAE head into one file * [Fix]: Fix lint * [Fix]: rename mae_pretrain_head to mae_head * [Fix]: Fix import error in __init__.py * [Feature]: skip MAE algo UT when running on windows * [Fix]: Fix UT bug * [Feature]: Update model_zoo * [Fix]: Rename MAE pretrain model name * [Fix]: Delete mae ft prefix * [Feature]: Change b to base * [Refactor]: Change b in MAE pt config to base * [Fix]: Fix typo in docstring * [Fix]: Fix name bug * [Feature]: Add new constructor for MAE finetune * [Fix]: Fix model_zoo link * [Fix]: Skip UT for MAE * [Fix]: Change fixed channel order to param Co-authored-by: LIU Yuan <liuyuuan@pjlab.org.cn> Co-authored-by: liu yuan <liuyuan@pjlab.org.cn> * [Feature]: Add diff seeds to diff ranks and set torch seed in worker_init_fn (#228) * [Feature]: Add set diff seeds to diff ranks * [Fix]: Set diff seed to diff workers * Bump version to v0.7.0 (#227) * Bump version to v0.7.0 * [Docs] update readme Co-authored-by: wang11wang <95845452+wang11wang@users.noreply.github.com> Co-authored-by: Liangyu Chen <45140242+c-liangyu@users.noreply.github.com> Co-authored-by: Ming Li <73068772+mitming@users.noreply.github.com> Co-authored-by: liming <liming.ai@bytedance.com> Co-authored-by: Yuan Liu <30762564+YuanLiuuuuuu@users.noreply.github.com> Co-authored-by: LIU Yuan <liuyuuan@pjlab.org.cn> Co-authored-by: liu yuan <liuyuan@pjlab.org.cn>
127 lines
4.1 KiB
Python
127 lines
4.1 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
import logging
|
|
import tempfile
|
|
from unittest.mock import MagicMock
|
|
|
|
import pytest
|
|
import torch
|
|
import torch.nn as nn
|
|
from mmcv.parallel import MMDataParallel
|
|
from mmcv.runner import build_runner, obj_from_dict
|
|
from torch.utils.data import DataLoader, Dataset
|
|
|
|
from mmselfsup.core.hooks import DistOptimizerHook, GradAccumFp16OptimizerHook
|
|
|
|
|
|
class ExampleDataset(Dataset):
|
|
|
|
def __getitem__(self, idx):
|
|
results = dict(img=torch.tensor([1.]), img_metas=dict())
|
|
return results
|
|
|
|
def __len__(self):
|
|
return 1
|
|
|
|
|
|
class ExampleModel(nn.Module):
|
|
|
|
def __init__(self):
|
|
super(ExampleModel, self).__init__()
|
|
self.test_cfg = None
|
|
self.linear = nn.Linear(1, 1)
|
|
self.prototypes_test = nn.Linear(1, 1)
|
|
|
|
def forward(self, img, img_metas, test_mode=False, **kwargs):
|
|
out = self.linear(img)
|
|
out = self.prototypes_test(out)
|
|
return out
|
|
|
|
def train_step(self, data_batch, optimizer):
|
|
loss = self.forward(**data_batch)
|
|
return dict(loss=loss, num_samples=len(data_batch))
|
|
|
|
|
|
def test_optimizer_hook():
|
|
test_dataset = ExampleDataset()
|
|
test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
|
|
data_loader = DataLoader(
|
|
test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
|
|
|
|
runner_cfg = dict(type='EpochBasedRunner', max_epochs=5)
|
|
optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
|
|
optim_hook_cfg = dict(
|
|
grad_clip=dict(max_norm=10), frozen_layers_cfg=dict(prototypes=5005))
|
|
|
|
optimizer_hook = DistOptimizerHook(**optim_hook_cfg)
|
|
|
|
# test DistOptimizerHook
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
model = MMDataParallel(ExampleModel())
|
|
optimizer = obj_from_dict(optim_cfg, torch.optim,
|
|
dict(params=model.parameters()))
|
|
|
|
runner = build_runner(
|
|
runner_cfg,
|
|
default_args=dict(
|
|
model=model,
|
|
optimizer=optimizer,
|
|
work_dir=tmpdir,
|
|
logger=logging.getLogger()))
|
|
runner.register_training_hooks(optimizer_hook)
|
|
|
|
prototypes_start = []
|
|
for name, p in runner.model.module.named_parameters():
|
|
if 'prototypes_test' in name:
|
|
prototypes_start.append(p)
|
|
|
|
# run training
|
|
runner.run([data_loader], [('train', 1)])
|
|
|
|
prototypes_end = []
|
|
for name, p in runner.model.module.named_parameters():
|
|
if 'prototypes_test' in name:
|
|
prototypes_end.append(p)
|
|
|
|
assert len(prototypes_start) == len(prototypes_end)
|
|
for i in range(len(prototypes_start)):
|
|
p_start = prototypes_start[i]
|
|
p_end = prototypes_end[i]
|
|
assert p_start == p_end
|
|
|
|
|
|
@pytest.mark.skipif(
|
|
not torch.cuda.is_available(), reason='CUDA is not available.')
|
|
def test_fp16optimizer_hook():
|
|
test_dataset = ExampleDataset()
|
|
test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
|
|
data_loader = DataLoader(
|
|
test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
|
|
|
|
runner_cfg = dict(type='EpochBasedRunner', max_epochs=5)
|
|
optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
|
|
optim_hook_cfg = dict(
|
|
grad_clip=dict(max_norm=10),
|
|
loss_scale=16.,
|
|
frozen_layers_cfg=dict(prototypes=5005))
|
|
|
|
optimizer_hook = GradAccumFp16OptimizerHook(**optim_hook_cfg)
|
|
|
|
# test GradAccumFp16OptimizerHook
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
model = MMDataParallel(ExampleModel())
|
|
optimizer = obj_from_dict(optim_cfg, torch.optim,
|
|
dict(params=model.parameters()))
|
|
|
|
runner = build_runner(
|
|
runner_cfg,
|
|
default_args=dict(
|
|
model=model,
|
|
optimizer=optimizer,
|
|
work_dir=tmpdir,
|
|
logger=logging.getLogger(),
|
|
meta=dict()))
|
|
runner.register_training_hooks(optimizer_hook)
|
|
# run training
|
|
runner.run([data_loader], [('train', 1)])
|
|
assert runner.meta['fp16']['loss_scaler']['scale'] == 16.
|