mmpretrain/tools/test.py

244 lines
8.9 KiB
Python
Raw Normal View History

# Copyright (c) OpenMMLab. All rights reserved.
2020-05-21 21:21:43 +08:00
import argparse
import os
import warnings
from numbers import Number
2020-05-21 21:21:43 +08:00
import mmcv
import numpy as np
2020-05-21 21:21:43 +08:00
import torch
2020-11-20 13:50:28 +08:00
from mmcv import DictAction
2020-05-21 21:21:43 +08:00
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
2024-09-26 03:16:02 +08:00
from mmengine.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
2020-05-21 21:21:43 +08:00
2020-05-27 11:37:16 +08:00
from mmcls.apis import multi_gpu_test, single_gpu_test
2020-05-21 21:21:43 +08:00
from mmcls.datasets import build_dataloader, build_dataset
from mmcls.models import build_classifier
from mmcls.utils import get_root_logger, setup_multi_processes
2020-05-21 21:21:43 +08:00
def parse_args():
parser = argparse.ArgumentParser(description='mmcls test model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
out_options = ['class_scores', 'pred_score', 'pred_label', 'pred_class']
parser.add_argument(
'--out-items',
nargs='+',
default=['all'],
choices=out_options + ['none', 'all'],
help='Besides metrics, what items will be included in the output '
f'result file. You can choose some of ({", ".join(out_options)}), '
'or use "all" to include all above, or use "none" to disable all of '
'above. Defaults to output all.',
metavar='')
2020-05-21 21:21:43 +08:00
parser.add_argument(
'--metrics',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., '
'"accuracy", "precision", "recall", "f1_score", "support" for single '
'label dataset, and "mAP", "CP", "CR", "CF1", "OP", "OR", "OF1" for '
'multi-label dataset')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
2020-05-21 21:21:43 +08:00
parser.add_argument(
'--gpu-collect',
2020-05-21 21:21:43 +08:00
action='store_true',
help='whether to use gpu to collect results')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--metric-options',
nargs='+',
action=DictAction,
default={},
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be parsed as a dict metric_options for dataset.evaluate()'
' function.')
parser.add_argument(
'--show-options',
nargs='+',
action=DictAction,
help='custom options for show_result. key-value pair in xxx=yyy.'
'Check available options in `model.show_result`.')
parser.add_argument(
'--device', default=None, help='device used for testing. (Deprecated)')
parser.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='(Deprecated, please use --gpu-id) ids of gpus to use '
'(only applicable to non-distributed testing)')
parser.add_argument(
'--gpu-id',
type=int,
default=0,
help='id of gpu to use '
'(only applicable to non-distributed testing)')
2020-05-21 21:21:43 +08:00
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local-rank', type=int, default=0)
2020-05-21 21:21:43 +08:00
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
assert args.metrics or args.out, \
'Please specify at least one of output path and evaluation metrics.'
2020-05-21 21:21:43 +08:00
return args
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set multi-process settings
setup_multi_processes(cfg)
2020-05-21 21:21:43 +08:00
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
'Because we only support single GPU mode in '
'non-distributed testing. Use the first GPU '
'in `gpu_ids` now.')
else:
cfg.gpu_ids = [args.gpu_id]
2020-05-21 21:21:43 +08:00
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
dataset = build_dataset(cfg.data.test, default_args=dict(test_mode=True))
# build the dataloader
# The default loader config
loader_cfg = dict(
# cfg.gpus will be ignored if distributed
num_gpus=len(cfg.gpu_ids),
2020-05-21 21:21:43 +08:00
dist=distributed,
round_up=True,
)
# The overall dataloader settings
loader_cfg.update({
k: v
for k, v in cfg.data.items() if k not in [
'train', 'val', 'test', 'train_dataloader', 'val_dataloader',
'test_dataloader'
]
})
test_loader_cfg = {
**loader_cfg,
'shuffle': False, # Not shuffle by default
'sampler_cfg': None, # Not use sampler by default
**cfg.data.get('test_dataloader', {}),
}
# the extra round_up data will be removed during gpu/cpu collect
data_loader = build_dataloader(dataset, **test_loader_cfg)
2020-05-21 21:21:43 +08:00
# build the model and load checkpoint
model = build_classifier(cfg.model)
2020-05-21 21:21:43 +08:00
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
2020-05-21 21:21:43 +08:00
if 'CLASSES' in checkpoint.get('meta', {}):
CLASSES = checkpoint['meta']['CLASSES']
else:
from mmcls.datasets import ImageNet
warnings.simplefilter('once')
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use imagenet by default.')
CLASSES = ImageNet.CLASSES
2020-05-21 21:21:43 +08:00
if not distributed:
if args.device == 'cpu':
model = model.cpu()
else:
model = MMDataParallel(model, device_ids=cfg.gpu_ids)
if not model.device_ids:
assert mmcv.digit_version(mmcv.__version__) >= (1, 4, 4), \
'To test with CPU, please confirm your mmcv version ' \
'is not lower than v1.4.4'
model.CLASSES = CLASSES
show_kwargs = {} if args.show_options is None else args.show_options
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
**show_kwargs)
2020-05-21 21:21:43 +08:00
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
results = {}
logger = get_root_logger()
if args.metrics:
eval_results = dataset.evaluate(
results=outputs,
metric=args.metrics,
metric_options=args.metric_options,
logger=logger)
results.update(eval_results)
for k, v in eval_results.items():
if isinstance(v, np.ndarray):
v = [round(out, 2) for out in v.tolist()]
elif isinstance(v, Number):
v = round(v, 2)
else:
raise ValueError(f'Unsupport metric type: {type(v)}')
print(f'\n{k} : {v}')
if args.out:
if 'none' not in args.out_items:
scores = np.vstack(outputs)
pred_score = np.max(scores, axis=1)
pred_label = np.argmax(scores, axis=1)
pred_class = [CLASSES[lb] for lb in pred_label]
res_items = {
'class_scores': scores,
'pred_score': pred_score,
'pred_label': pred_label,
'pred_class': pred_class
}
if 'all' in args.out_items:
results.update(res_items)
else:
for key in args.out_items:
results[key] = res_items[key]
print(f'\ndumping results to {args.out}')
mmcv.dump(results, args.out)
2020-05-21 21:21:43 +08:00
if __name__ == '__main__':
main()