mirror of
https://github.com/open-mmlab/mmselfsup.git
synced 2025-06-03 14:59:38 +08:00
* [Feature]: MAE pre-training with fp16 (#271) * [Feature]: MAE pre-training with fp16 * [Fix]: Fix lint * [Fix]: Fix SimMIM config link, and add SimMIM to model_zoo (#272) * [Fix]: Fix link error * [Fix]: Add SimMIM to model zoo * [Fix]: Fix lint * [Fix] fix 'no init_cfg' error for pre-trained model backbones (#256) * [UT] add unit test for apis (#276) * [UT] add unit test for apis * ignore pytest log * [Feature] Add extra dataloader settings in configs. (#264) * [Feature] support to set validation samples per gpu independently * set default to be cfg.data.samples_per_gpu * modify the tools/test.py * using 'train_dataloader', 'val_dataloader', 'test_dataloader' for specific settings * test 'evaluation' branch * [Fix]: Change imgs_per_gpu to samples_per_gpu MAE (#278) * [Feature]: Add SimMIM 192 pt 224 ft (#280) * [Feature]: Add SimMIM 192 pt 224 ft * [Feature]: Add simmim 192 pt 224 ft to readme * [Fix] fix key error bug when registering custom hooks (#273) * [UT] remove pytorch1.5 test (#288) * [Benchmark] rename linear probing config file names (#281) * [Benchmark] rename linear probing config file names * update config links * Avoid GPU memory leak with prefetch dataloader (#277) * [Feature] barlowtwins (#207) * [Fix]: Fix mmcls upgrade bug (#235) * [Feature]: Add multi machine dist_train (#232) * [Feature]: Add multi machine dist_train * [Fix]: Change bash to sh * [Fix]: Fix missing sh suffix * [Refactor]: Change bash to sh * [Refactor] Add unit test (#234) * [Refactor] add unit test * update workflow * update * [Fix] fix lint * update test * refactor moco and densecl unit test * fix lint * add unit test * update unit test * remove modification * [Feature]: Add MAE metafile (#238) * [Feature]: Add MAE metafile * [Fix]: Fix lint * [Fix]: Change LARS to AdamW in the metafile of MAE * Add barlowtwins * Add unit test for barlowtwins * Adjust training params * add decorator to pass CI * adjust params * Add barlowtwins * Add unit test for barlowtwins * Adjust training params * add decorator to pass CI * adjust params * add barlowtwins configs * revise LatentCrossCorrelationHead * modify ut to save memory * add metafile * add barlowtwins results to model zoo * add barlow twins to homepage * fix batch size bug * add algorithm readme * add type hints * reorganize the model zoo * remove one config * recover the config * add missing docstring * revise barlowtwins * reorganize coco and voc benchmark * add barlowtwins to index.rst * revise docstring Co-authored-by: Yuan Liu <30762564+YuanLiuuuuuu@users.noreply.github.com> Co-authored-by: Yixiao Fang <36138628+fangyixiao18@users.noreply.github.com> Co-authored-by: fangyixiao18 <fangyx18@hotmail.com> * [Fix] fix --local-rank (#290) * [UT] reduce memory usage while runing unit test (#291) * [Feature]: CAE Supported (#284) * [Feature]: Add mc * [Feature]: Add dataset of CAE * [Feature]: Init version of CAE * [Feature]: Add mc * [Fix]: Change beta to (0.9, 0.999) * [Fix]: New feature * [Fix]: Decouple the qkv bias * [Feature]: Decouple qkv bias in MultiheadAttention * [Feature]: New mask generator * [Fix]: Fix TransformEncoderLayer bug * [Feature]: Add MAE CAE linear prob * [Fix]: Fix config * [Fix]: Delete redundant mc * [Fix]: Add init value in mim cls vit * [Fix]: Fix cae ft config * [Fix]: Delete repeated init_values * [Fix]: Change bs from 64 to 128 in CAE ft * [Fix]: Add mc in cae pt * [Fix]: Fix momemtum update bug * [Fix]: Add no weight_decay for gamma * [Feature]: Add mc for cae pt * [Fix]: Delete mc * [Fix]: Delete redundant files * [Fix]: Fix lint * [Feature]: Add docstring to algo, backbone, neck and head * [Fix]: Fix lint * [Fix]: network * [Feature]: Add docstrings for network blocks * [Feature]: Add docstring to ToTensor * [Feature]: Add docstring to transoform * [Fix]: Add type hint to BEiTMaskGenerator * [Fix]: Fix lint * [Fix]: Add copyright to dalle_e * [Fix]: Fix BlockwiseMaskGenerator * [Feature]: Add UT for CAE * [Fix]: Fix dalle state_dict path not existed bug * [Fix]: Delete file_client_args related code * [Fix]: Remove redundant code * [Refactor]: Add fp16 to the name of cae pre-train config * [Refactor]: Use FFN from mmcv * [Refactor]: Change network_blocks to trasformer_blocks * [Fix]: Fix mask generator name bug * [Fix]: cae pre-train config bug * [Fix]: Fix docstring grammar * [Fix]: Fix mc related code * [Fix]: Add object parent to transform * [Fix]: Delete unnecessary modification * [Fix]: Change blockwisemask generator to simmim mask generator * [Refactor]: Change cae mae pretrain vit to cae mae vit * [Refactor]: Change lamb to lambd * [Fix]: Remove blank line * [Fix]: Fix lint * [Fix]: Fix UT * [Fix]: Delete modification to swin * [Fix]: Fix lint * [Feature]: Add README and metafile * [Feature]: Update index.rst * [Fix]: Update model_zoo * [Fix]: Change MAE to CAE in algorithm * [Fix]: Change SimMIMMaskGenerator to CAEMaskGenerator * [Fix]: Fix model zoo * [Fix]: Change to dalle_encoder * [Feature]: Add download link for dalle * [Fix]: Fix lint * [Fix]: Fix UT * [Fix]: Update metafile * [Fix]: Change b to base * [Feature]: Add dalle download link in warning * [Fix] add arxiv link in readme Co-authored-by: Jiahao Xie <52497952+Jiahao000@users.noreply.github.com> * [Enhance] update SimCLR models and results (#295) * [Enhance] update simclr models and results * [Fix] revise comments to indicate settings * Update version (#296) * [Feature]: Update to 0.9.0 * [Feature]: Add version constrain for mmcls * [Fix]: Fix bug * [Fix]: Fix version bug * [Feature]: Update version in install.md * update changelog * update readme * [Fix] fix uppercase * [Fix] fix uppercase * [Fix] fix uppercase * update version dependency * add cae to readme Co-authored-by: fangyixiao18 <fangyx18@hotmail.com> Co-authored-by: Jiahao Xie <52497952+Jiahao000@users.noreply.github.com> Co-authored-by: Yixiao Fang <36138628+fangyixiao18@users.noreply.github.com> Co-authored-by: Ming Li <73068772+mitming@users.noreply.github.com> Co-authored-by: xcnick <xcnick0412@gmail.com> Co-authored-by: fangyixiao18 <fangyx18@hotmail.com> Co-authored-by: Jiahao Xie <52497952+Jiahao000@users.noreply.github.com>
158 lines
5.5 KiB
Python
158 lines
5.5 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
import argparse
|
|
import os
|
|
import os.path as osp
|
|
import time
|
|
|
|
import mmcv
|
|
import torch
|
|
from mmcv import DictAction
|
|
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
|
|
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
|
|
|
|
from mmselfsup.datasets import build_dataloader, build_dataset
|
|
from mmselfsup.models import build_algorithm
|
|
from mmselfsup.utils import (get_root_logger, multi_gpu_test,
|
|
setup_multi_processes, single_gpu_test)
|
|
|
|
|
|
def parse_args():
|
|
parser = argparse.ArgumentParser(
|
|
description='MMSelfSup test (and eval) a model')
|
|
parser.add_argument('config', help='test config file path')
|
|
parser.add_argument('checkpoint', help='checkpoint file')
|
|
parser.add_argument(
|
|
'--work_dir',
|
|
help='(Deprecated, please use --work-dir) the dir to save logs and '
|
|
'models')
|
|
parser.add_argument('--work-dir', help='the dir to save logs and models')
|
|
parser.add_argument(
|
|
'--launcher',
|
|
choices=['none', 'pytorch', 'slurm', 'mpi'],
|
|
default='none',
|
|
help='job launcher')
|
|
parser.add_argument(
|
|
'--gpu-id',
|
|
type=int,
|
|
default=0,
|
|
help='id of gpu to use '
|
|
'(only applicable to non-distributed testing)')
|
|
parser.add_argument(
|
|
'--cfg-options',
|
|
nargs='+',
|
|
action=DictAction,
|
|
help='override some settings in the used config, the key-value pair '
|
|
'in xxx=yyy format will be merged into config file. If the value to '
|
|
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
|
|
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
|
|
'Note that the quotation marks are necessary and that no white space '
|
|
'is allowed.')
|
|
parser.add_argument('--local_rank', type=int, default=0)
|
|
args = parser.parse_args()
|
|
if 'LOCAL_RANK' not in os.environ:
|
|
os.environ['LOCAL_RANK'] = str(args.local_rank)
|
|
|
|
return args
|
|
|
|
|
|
def main():
|
|
args = parse_args()
|
|
|
|
cfg = mmcv.Config.fromfile(args.config)
|
|
if args.cfg_options is not None:
|
|
cfg.merge_from_dict(args.cfg_options)
|
|
|
|
# set multi-process settings
|
|
setup_multi_processes(cfg)
|
|
|
|
# set cudnn_benchmark
|
|
if cfg.get('cudnn_benchmark', False):
|
|
torch.backends.cudnn.benchmark = True
|
|
# work_dir is determined in this priority: CLI > segment in file > filename
|
|
if args.work_dir is not None:
|
|
# update configs according to CLI args if args.work_dir is not None
|
|
cfg.work_dir = args.work_dir
|
|
elif cfg.get('work_dir', None) is None:
|
|
# use config filename as default work_dir if cfg.work_dir is None
|
|
work_type = args.config.split('/')[1]
|
|
cfg.work_dir = osp.join('./work_dirs', work_type,
|
|
osp.splitext(osp.basename(args.config))[0])
|
|
cfg.gpu_ids = [args.gpu_id]
|
|
|
|
# init distributed env first, since logger depends on the dist info.
|
|
if args.launcher == 'none':
|
|
distributed = False
|
|
else:
|
|
distributed = True
|
|
init_dist(args.launcher, **cfg.dist_params)
|
|
|
|
# create work_dir
|
|
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
|
|
|
|
# logger
|
|
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
|
|
log_file = osp.join(cfg.work_dir, f'test_{timestamp}.log')
|
|
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
|
|
|
|
# build the dataloader
|
|
dataset = build_dataset(cfg.data.val)
|
|
if 'imgs_per_gpu' in cfg.data:
|
|
logger.warning('"imgs_per_gpu" is deprecated. '
|
|
'Please use "samples_per_gpu" instead')
|
|
if 'samples_per_gpu' in cfg.data:
|
|
logger.warning(
|
|
f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and '
|
|
f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"'
|
|
f'={cfg.data.imgs_per_gpu} is used in this experiments')
|
|
else:
|
|
logger.warning(
|
|
'Automatically set "samples_per_gpu"="imgs_per_gpu"='
|
|
f'{cfg.data.imgs_per_gpu} in this experiments')
|
|
cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
|
|
|
|
# The default loader config
|
|
loader_cfg = dict(
|
|
# cfg.gpus will be ignored if distributed
|
|
num_gpus=len(cfg.gpu_ids),
|
|
dist=distributed,
|
|
prefetch=getattr(cfg, 'prefetch', False),
|
|
img_norm_cfg=cfg.img_norm_cfg)
|
|
|
|
# The overall dataloader settings
|
|
loader_cfg.update({
|
|
k: v
|
|
for k, v in cfg.data.items() if k not in [
|
|
'train', 'val', 'test', 'train_dataloader', 'val_dataloader',
|
|
'test_dataloader'
|
|
]
|
|
})
|
|
# The specific train dataloader settings
|
|
test_loader_cfg = {
|
|
**loader_cfg,
|
|
'shuffle': False, # Not shuffle by default
|
|
**cfg.data.get('test_dataloader', {}),
|
|
}
|
|
data_loader = build_dataloader(dataset, **test_loader_cfg)
|
|
|
|
# build the model and load checkpoint
|
|
model = build_algorithm(cfg.model)
|
|
load_checkpoint(model, args.checkpoint, map_location='cpu')
|
|
|
|
if not distributed:
|
|
model = MMDataParallel(model, device_ids=cfg.gpu_ids)
|
|
outputs = single_gpu_test(model, data_loader)
|
|
else:
|
|
model = MMDistributedDataParallel(
|
|
model.cuda(),
|
|
device_ids=[torch.cuda.current_device()],
|
|
broadcast_buffers=False)
|
|
outputs = multi_gpu_test(model, data_loader) # dict{key: np.ndarray}
|
|
|
|
rank, _ = get_dist_info()
|
|
if rank == 0:
|
|
dataset.evaluate(outputs, logger, topk=(1, 5))
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|