from core to engine
parent
24bcf069f8
commit
0f30c392a3
|
@ -15,7 +15,7 @@ from modelindex.load_model_index import load
|
|||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
from mmcls.core import ClsVisualizer
|
||||
from mmcls.engine import ClsVisualizer
|
||||
from mmcls.datasets import CIFAR10, CIFAR100, ImageNet
|
||||
from mmcls.utils import register_all_modules
|
||||
|
||||
|
|
|
@ -11,12 +11,12 @@ classification tasks but not supported by MMCV yet.
|
|||
|
||||
Some components may be moved to MMCV in the future.
|
||||
|
||||
.. contents:: mmcls.core
|
||||
.. contents:: mmcls.engine
|
||||
:depth: 2
|
||||
:local:
|
||||
:backlinks: top
|
||||
|
||||
.. currentmodule:: mmcls.core
|
||||
.. currentmodule:: mmcls.engine
|
||||
|
||||
Evaluation
|
||||
------------------
|
||||
|
|
|
@ -61,7 +61,7 @@ You can switch between Chinese and English documentation in the lower-left corne
|
|||
:caption: API Reference
|
||||
|
||||
mmcls.apis <api/apis>
|
||||
mmcls.core <api/core>
|
||||
mmcls.engine <api/engine>
|
||||
mmcls.models <api/models>
|
||||
mmcls.models.utils <api/models.utils>
|
||||
mmcls.datasets <api/datasets>
|
||||
|
|
|
@ -60,7 +60,6 @@ You can switch between Chinese and English documentation in the lower-left corne
|
|||
:caption: API 参考文档
|
||||
|
||||
mmcls.apis <api/apis>
|
||||
mmcls.core <api/core>
|
||||
mmcls.models <api/models>
|
||||
mmcls.models.utils <api/models.utils>
|
||||
mmcls.datasets <api/datasets>
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .inference import inference_model, init_model, show_result_pyplot
|
||||
from .test import multi_gpu_test, single_gpu_test
|
||||
from .train import init_random_seed, set_random_seed, train_model
|
||||
|
||||
__all__ = [
|
||||
'set_random_seed', 'train_model', 'init_model', 'inference_model',
|
||||
'multi_gpu_test', 'single_gpu_test', 'show_result_pyplot',
|
||||
'init_random_seed'
|
||||
'init_model', 'inference_model', 'show_result_pyplot'
|
||||
]
|
||||
|
|
|
@ -1,214 +0,0 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os.path as osp
|
||||
import pickle
|
||||
import shutil
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
import mmcv
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from mmcv.image import tensor2imgs
|
||||
from mmcv.runner import get_dist_info
|
||||
|
||||
|
||||
def single_gpu_test(model,
|
||||
data_loader,
|
||||
show=False,
|
||||
out_dir=None,
|
||||
**show_kwargs):
|
||||
"""Test model with local single gpu.
|
||||
|
||||
This method tests model with a single gpu and supports showing results.
|
||||
|
||||
Args:
|
||||
model (:obj:`torch.nn.Module`): Model to be tested.
|
||||
data_loader (:obj:`torch.utils.data.DataLoader`): Pytorch data loader.
|
||||
show (bool): Whether to show the test results. Defaults to False.
|
||||
out_dir (str): The output directory of result plots of all samples.
|
||||
Defaults to None, which means not to write output files.
|
||||
**show_kwargs: Any other keyword arguments for showing results.
|
||||
|
||||
Returns:
|
||||
list: The prediction results.
|
||||
"""
|
||||
model.eval()
|
||||
results = []
|
||||
dataset = data_loader.dataset
|
||||
prog_bar = mmcv.ProgressBar(len(dataset))
|
||||
for i, data in enumerate(data_loader):
|
||||
with torch.no_grad():
|
||||
result = model(return_loss=False, **data)
|
||||
|
||||
batch_size = len(result)
|
||||
results.extend(result)
|
||||
|
||||
if show or out_dir:
|
||||
scores = np.vstack(result)
|
||||
pred_score = np.max(scores, axis=1)
|
||||
pred_label = np.argmax(scores, axis=1)
|
||||
pred_class = [model.CLASSES[lb] for lb in pred_label]
|
||||
|
||||
img_metas = data['img_metas'].data[0]
|
||||
imgs = tensor2imgs(data['img'], **img_metas[0]['img_norm_cfg'])
|
||||
assert len(imgs) == len(img_metas)
|
||||
|
||||
for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
|
||||
h, w, _ = img_meta['img_shape']
|
||||
img_show = img[:h, :w, :]
|
||||
|
||||
ori_h, ori_w = img_meta['ori_shape'][:-1]
|
||||
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
|
||||
|
||||
if out_dir:
|
||||
out_file = osp.join(out_dir, img_meta['ori_filename'])
|
||||
else:
|
||||
out_file = None
|
||||
|
||||
result_show = {
|
||||
'pred_score': pred_score[i],
|
||||
'pred_label': pred_label[i],
|
||||
'pred_class': pred_class[i]
|
||||
}
|
||||
model.module.show_result(
|
||||
img_show,
|
||||
result_show,
|
||||
show=show,
|
||||
out_file=out_file,
|
||||
**show_kwargs)
|
||||
|
||||
batch_size = data['img'].size(0)
|
||||
for _ in range(batch_size):
|
||||
prog_bar.update()
|
||||
return results
|
||||
|
||||
|
||||
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
|
||||
"""Test model with multiple gpus.
|
||||
|
||||
This method tests model with multiple gpus and collects the results
|
||||
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
|
||||
it encodes results to gpu tensors and use gpu communication for results
|
||||
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
|
||||
and collects them by the rank 0 worker.
|
||||
|
||||
Args:
|
||||
model (nn.Module): Model to be tested.
|
||||
data_loader (nn.Dataloader): Pytorch data loader.
|
||||
tmpdir (str): Path of directory to save the temporary results from
|
||||
different gpus under cpu mode.
|
||||
gpu_collect (bool): Option to use either gpu or cpu to collect results.
|
||||
|
||||
Returns:
|
||||
list: The prediction results.
|
||||
"""
|
||||
model.eval()
|
||||
results = []
|
||||
dataset = data_loader.dataset
|
||||
rank, world_size = get_dist_info()
|
||||
if rank == 0:
|
||||
# Check if tmpdir is valid for cpu_collect
|
||||
if (not gpu_collect) and (tmpdir is not None and osp.exists(tmpdir)):
|
||||
raise OSError((f'The tmpdir {tmpdir} already exists.',
|
||||
' Since tmpdir will be deleted after testing,',
|
||||
' please make sure you specify an empty one.'))
|
||||
prog_bar = mmcv.ProgressBar(len(dataset))
|
||||
time.sleep(2)
|
||||
dist.barrier()
|
||||
for i, data in enumerate(data_loader):
|
||||
with torch.no_grad():
|
||||
result = model(return_loss=False, **data)
|
||||
if isinstance(result, list):
|
||||
results.extend(result)
|
||||
else:
|
||||
results.append(result)
|
||||
|
||||
if rank == 0:
|
||||
batch_size = data['img'].size(0)
|
||||
for _ in range(batch_size * world_size):
|
||||
prog_bar.update()
|
||||
|
||||
# collect results from all ranks
|
||||
if gpu_collect:
|
||||
results = collect_results_gpu(results, len(dataset))
|
||||
else:
|
||||
results = collect_results_cpu(results, len(dataset), tmpdir)
|
||||
return results
|
||||
|
||||
|
||||
def collect_results_cpu(result_part, size, tmpdir=None):
|
||||
rank, world_size = get_dist_info()
|
||||
# create a tmp dir if it is not specified
|
||||
if tmpdir is None:
|
||||
MAX_LEN = 512
|
||||
# 32 is whitespace
|
||||
dir_tensor = torch.full((MAX_LEN, ),
|
||||
32,
|
||||
dtype=torch.uint8,
|
||||
device='cuda')
|
||||
if rank == 0:
|
||||
mmcv.mkdir_or_exist('.dist_test')
|
||||
tmpdir = tempfile.mkdtemp(dir='.dist_test')
|
||||
tmpdir = torch.tensor(
|
||||
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
|
||||
dir_tensor[:len(tmpdir)] = tmpdir
|
||||
dist.broadcast(dir_tensor, 0)
|
||||
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
|
||||
else:
|
||||
mmcv.mkdir_or_exist(tmpdir)
|
||||
# dump the part result to the dir
|
||||
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
|
||||
dist.barrier()
|
||||
# collect all parts
|
||||
if rank != 0:
|
||||
return None
|
||||
else:
|
||||
# load results of all parts from tmp dir
|
||||
part_list = []
|
||||
for i in range(world_size):
|
||||
part_file = osp.join(tmpdir, f'part_{i}.pkl')
|
||||
part_result = mmcv.load(part_file)
|
||||
part_list.append(part_result)
|
||||
# sort the results
|
||||
ordered_results = []
|
||||
for res in zip(*part_list):
|
||||
ordered_results.extend(list(res))
|
||||
# the dataloader may pad some samples
|
||||
ordered_results = ordered_results[:size]
|
||||
# remove tmp dir
|
||||
shutil.rmtree(tmpdir)
|
||||
return ordered_results
|
||||
|
||||
|
||||
def collect_results_gpu(result_part, size):
|
||||
rank, world_size = get_dist_info()
|
||||
# dump result part to tensor with pickle
|
||||
part_tensor = torch.tensor(
|
||||
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
|
||||
# gather all result part tensor shape
|
||||
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
|
||||
shape_list = [shape_tensor.clone() for _ in range(world_size)]
|
||||
dist.all_gather(shape_list, shape_tensor)
|
||||
# padding result part tensor to max length
|
||||
shape_max = torch.tensor(shape_list).max()
|
||||
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
|
||||
part_send[:shape_tensor[0]] = part_tensor
|
||||
part_recv_list = [
|
||||
part_tensor.new_zeros(shape_max) for _ in range(world_size)
|
||||
]
|
||||
# gather all result part
|
||||
dist.all_gather(part_recv_list, part_send)
|
||||
|
||||
if rank == 0:
|
||||
part_list = []
|
||||
for recv, shape in zip(part_recv_list, shape_list):
|
||||
part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())
|
||||
part_list.append(part_result)
|
||||
# sort the results
|
||||
ordered_results = []
|
||||
for res in zip(*part_list):
|
||||
ordered_results.extend(list(res))
|
||||
# the dataloader may pad some samples
|
||||
ordered_results = ordered_results[:size]
|
||||
return ordered_results
|
|
@ -1,242 +0,0 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import random
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
|
||||
from mmcv.runner import (DistSamplerSeedHook, Fp16OptimizerHook,
|
||||
build_optimizer, build_runner, get_dist_info)
|
||||
from mmcv.runner.hooks import DistEvalHook, EvalHook
|
||||
|
||||
from mmcls.core import DistOptimizerHook
|
||||
from mmcls.datasets import build_dataloader
|
||||
from mmcls.registry import DATASETS
|
||||
from mmcls.utils import get_root_logger
|
||||
|
||||
|
||||
def init_random_seed(seed=None, device='cuda'):
|
||||
"""Initialize random seed.
|
||||
|
||||
If the seed is not set, the seed will be automatically randomized,
|
||||
and then broadcast to all processes to prevent some potential bugs.
|
||||
|
||||
Args:
|
||||
seed (int, Optional): The seed. Defaults to None.
|
||||
device (str): The device where the seed will be put on.
|
||||
Defaults to 'cuda'.
|
||||
|
||||
Returns:
|
||||
int: Seed to be used.
|
||||
"""
|
||||
if seed is not None:
|
||||
return seed
|
||||
|
||||
# Make sure all ranks share the same random seed to prevent
|
||||
# some potential bugs. Please refer to
|
||||
# https://github.com/open-mmlab/mmdetection/issues/6339
|
||||
rank, world_size = get_dist_info()
|
||||
seed = np.random.randint(2**31)
|
||||
if world_size == 1:
|
||||
return seed
|
||||
|
||||
if rank == 0:
|
||||
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
|
||||
else:
|
||||
random_num = torch.tensor(0, dtype=torch.int32, device=device)
|
||||
dist.broadcast(random_num, src=0)
|
||||
return random_num.item()
|
||||
|
||||
|
||||
def set_random_seed(seed, deterministic=False):
|
||||
"""Set random seed.
|
||||
|
||||
Args:
|
||||
seed (int): Seed to be used.
|
||||
deterministic (bool): Whether to set the deterministic option for
|
||||
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
|
||||
to True and `torch.backends.cudnn.benchmark` to False.
|
||||
Default: False.
|
||||
"""
|
||||
random.seed(seed)
|
||||
np.random.seed(seed)
|
||||
torch.manual_seed(seed)
|
||||
torch.cuda.manual_seed_all(seed)
|
||||
if deterministic:
|
||||
torch.backends.cudnn.deterministic = True
|
||||
torch.backends.cudnn.benchmark = False
|
||||
|
||||
|
||||
def train_model(model,
|
||||
dataset,
|
||||
cfg,
|
||||
distributed=False,
|
||||
validate=False,
|
||||
timestamp=None,
|
||||
device=None,
|
||||
meta=None):
|
||||
"""Train a model.
|
||||
|
||||
This method will build dataloaders, wrap the model and build a runner
|
||||
according to the provided config.
|
||||
|
||||
Args:
|
||||
model (:obj:`torch.nn.Module`): The model to be run.
|
||||
dataset (:obj:`mmcls.datasets.BaseDataset` | List[BaseDataset]):
|
||||
The dataset used to train the model. It can be a single dataset,
|
||||
or a list of dataset with the same length as workflow.
|
||||
cfg (:obj:`mmcv.utils.Config`): The configs of the experiment.
|
||||
distributed (bool): Whether to train the model in a distributed
|
||||
environment. Defaults to False.
|
||||
validate (bool): Whether to do validation with
|
||||
:obj:`mmcv.runner.EvalHook`. Defaults to False.
|
||||
timestamp (str, optional): The timestamp string to auto generate the
|
||||
name of log files. Defaults to None.
|
||||
device (str, optional): TODO
|
||||
meta (dict, optional): A dict records some import information such as
|
||||
environment info and seed, which will be logged in logger hook.
|
||||
Defaults to None.
|
||||
"""
|
||||
logger = get_root_logger()
|
||||
|
||||
# prepare data loaders
|
||||
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
|
||||
|
||||
# The default loader config
|
||||
loader_cfg = dict(
|
||||
# cfg.gpus will be ignored if distributed
|
||||
num_gpus=cfg.ipu_replicas if device == 'ipu' else len(cfg.gpu_ids),
|
||||
dist=distributed,
|
||||
round_up=True,
|
||||
seed=cfg.get('seed'),
|
||||
sampler_cfg=cfg.get('sampler', None),
|
||||
)
|
||||
# The overall dataloader settings
|
||||
loader_cfg.update({
|
||||
k: v
|
||||
for k, v in cfg.data.items() if k not in [
|
||||
'train', 'val', 'test', 'train_dataloader', 'val_dataloader',
|
||||
'test_dataloader'
|
||||
]
|
||||
})
|
||||
# The specific dataloader settings
|
||||
train_loader_cfg = {**loader_cfg, **cfg.data.get('train_dataloader', {})}
|
||||
|
||||
data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]
|
||||
|
||||
# put model on gpus
|
||||
if distributed:
|
||||
find_unused_parameters = cfg.get('find_unused_parameters', False)
|
||||
# Sets the `find_unused_parameters` parameter in
|
||||
# torch.nn.parallel.DistributedDataParallel
|
||||
model = MMDistributedDataParallel(
|
||||
model.cuda(),
|
||||
device_ids=[torch.cuda.current_device()],
|
||||
broadcast_buffers=False,
|
||||
find_unused_parameters=find_unused_parameters)
|
||||
else:
|
||||
if device == 'cpu':
|
||||
warnings.warn(
|
||||
'The argument `device` is deprecated. To use cpu to train, '
|
||||
'please refers to https://mmclassification.readthedocs.io/en'
|
||||
'/latest/getting_started.html#train-a-model')
|
||||
model = model.cpu()
|
||||
elif device == 'ipu':
|
||||
model = model.cpu()
|
||||
else:
|
||||
model = MMDataParallel(model, device_ids=cfg.gpu_ids)
|
||||
if not model.device_ids:
|
||||
from mmcv import __version__, digit_version
|
||||
assert digit_version(__version__) >= (1, 4, 4), \
|
||||
'To train with CPU, please confirm your mmcv version ' \
|
||||
'is not lower than v1.4.4'
|
||||
|
||||
# build runner
|
||||
optimizer = build_optimizer(model, cfg.optimizer)
|
||||
|
||||
if cfg.get('runner') is None:
|
||||
cfg.runner = {
|
||||
'type': 'EpochBasedRunner',
|
||||
'max_epochs': cfg.total_epochs
|
||||
}
|
||||
warnings.warn(
|
||||
'config is now expected to have a `runner` section, '
|
||||
'please set `runner` in your config.', UserWarning)
|
||||
|
||||
if device == 'ipu':
|
||||
if not cfg.runner['type'].startswith('IPU'):
|
||||
cfg.runner['type'] = 'IPU' + cfg.runner['type']
|
||||
if 'options_cfg' not in cfg.runner:
|
||||
cfg.runner['options_cfg'] = {}
|
||||
cfg.runner['options_cfg']['replicationFactor'] = cfg.ipu_replicas
|
||||
cfg.runner['fp16_cfg'] = cfg.get('fp16', None)
|
||||
|
||||
runner = build_runner(
|
||||
cfg.runner,
|
||||
default_args=dict(
|
||||
model=model,
|
||||
batch_processor=None,
|
||||
optimizer=optimizer,
|
||||
work_dir=cfg.work_dir,
|
||||
logger=logger,
|
||||
meta=meta))
|
||||
|
||||
# an ugly walkaround to make the .log and .log.json filenames the same
|
||||
runner.timestamp = timestamp
|
||||
|
||||
# fp16 setting
|
||||
fp16_cfg = cfg.get('fp16', None)
|
||||
if fp16_cfg is not None:
|
||||
if device == 'ipu':
|
||||
from mmcv.device.ipu import IPUFp16OptimizerHook
|
||||
optimizer_config = IPUFp16OptimizerHook(
|
||||
**cfg.optimizer_config,
|
||||
loss_scale=fp16_cfg['loss_scale'],
|
||||
distributed=distributed)
|
||||
else:
|
||||
optimizer_config = Fp16OptimizerHook(
|
||||
**cfg.optimizer_config,
|
||||
loss_scale=fp16_cfg['loss_scale'],
|
||||
distributed=distributed)
|
||||
elif distributed and 'type' not in cfg.optimizer_config:
|
||||
optimizer_config = DistOptimizerHook(**cfg.optimizer_config)
|
||||
else:
|
||||
optimizer_config = cfg.optimizer_config
|
||||
|
||||
# register hooks
|
||||
runner.register_training_hooks(
|
||||
cfg.lr_config,
|
||||
optimizer_config,
|
||||
cfg.checkpoint_config,
|
||||
cfg.log_config,
|
||||
cfg.get('momentum_config', None),
|
||||
custom_hooks_config=cfg.get('custom_hooks', None))
|
||||
if distributed and cfg.runner['type'] == 'EpochBasedRunner':
|
||||
runner.register_hook(DistSamplerSeedHook())
|
||||
|
||||
# register eval hooks
|
||||
if validate:
|
||||
val_dataset = DATASETS.build(cfg.data.val, dict(test_mode=True))
|
||||
# The specific dataloader settings
|
||||
val_loader_cfg = {
|
||||
**loader_cfg,
|
||||
'shuffle': False, # Not shuffle by default
|
||||
'sampler_cfg': None, # Not use sampler by default
|
||||
**cfg.data.get('val_dataloader', {}),
|
||||
}
|
||||
val_dataloader = build_dataloader(val_dataset, **val_loader_cfg)
|
||||
eval_cfg = cfg.get('evaluation', {})
|
||||
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
|
||||
eval_hook = DistEvalHook if distributed else EvalHook
|
||||
# `EvalHook` needs to be executed after `IterTimerHook`.
|
||||
# Otherwise, it will cause a bug if use `IterBasedRunner`.
|
||||
# Refers to https://github.com/open-mmlab/mmcv/issues/1261
|
||||
runner.register_hook(
|
||||
eval_hook(val_dataloader, **eval_cfg), priority='LOW')
|
||||
|
||||
if cfg.resume_from:
|
||||
runner.resume(cfg.resume_from)
|
||||
elif cfg.load_from:
|
||||
runner.load_checkpoint(cfg.load_from)
|
||||
runner.run(data_loaders, cfg.workflow)
|
|
@ -9,7 +9,7 @@ from mmcv.parallel import DataContainer as DC
|
|||
from mmcv.transforms.base import BaseTransform
|
||||
from PIL import Image
|
||||
|
||||
from mmcls.core import ClsDataSample
|
||||
from mmcls.engine import ClsDataSample
|
||||
from mmcls.registry import TRANSFORMS
|
||||
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ class ClsDataSample(BaseDataElement):
|
|||
|
||||
Examples:
|
||||
>>> import torch
|
||||
>>> from mmcls.core import ClsDataSample
|
||||
>>> from mmcls.engine import ClsDataSample
|
||||
>>>
|
||||
>>> img_meta = dict(img_shape=(960, 720), num_classes=5)
|
||||
>>> data_sample = ClsDataSample(metainfo=img_meta)
|
|
@ -8,7 +8,7 @@ from mmengine.hooks import Hook
|
|||
from mmengine.runner import EpochBasedTrainLoop, Runner
|
||||
from mmengine.visualization import Visualizer
|
||||
|
||||
from mmcls.core import ClsDataSample
|
||||
from mmcls.engine import ClsDataSample
|
||||
from mmcls.registry import HOOKS
|
||||
|
||||
|
||||
|
@ -30,7 +30,7 @@ class VisualizationHook(Hook):
|
|||
in the testing process. If None, handle with the backends of the
|
||||
visualizer. Defaults to None.
|
||||
**kwargs: other keyword arguments of
|
||||
:meth:`mmcls.core.ClsVisualizer.add_datasample`.
|
||||
:meth:`mmcls.engine.ClsVisualizer.add_datasample`.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
|
@ -6,7 +6,7 @@ import numpy as np
|
|||
from mmengine import Visualizer
|
||||
from mmengine.dist import master_only
|
||||
|
||||
from mmcls.core import ClsDataSample
|
||||
from mmcls.engine import ClsDataSample
|
||||
from mmcls.registry import VISUALIZERS
|
||||
|
||||
|
|
@ -93,7 +93,7 @@ class MultiLabelMetric(BaseMetric):
|
|||
(tensor(62.5000), tensor(31.2500), tensor(39.1667), tensor(8))
|
||||
>>>
|
||||
>>> # ------------------- Use with Evalutor -------------------
|
||||
>>> from mmcls.core import ClsDataSample
|
||||
>>> from mmcls.engine import ClsDataSample
|
||||
>>> from mmengine.evaluator import Evaluator
|
||||
>>> # The `data_batch` won't be used in this case, just use a fake.
|
||||
>>> data_batch = [
|
||||
|
|
|
@ -3,7 +3,7 @@ from typing import List, Optional
|
|||
|
||||
import torch
|
||||
|
||||
from mmcls.core import ClsDataSample
|
||||
from mmcls.engine import ClsDataSample
|
||||
from mmcls.registry import MODELS
|
||||
from .base import BaseClassifier
|
||||
|
||||
|
@ -100,7 +100,7 @@ class ImageClassifier(BaseClassifier):
|
|||
|
||||
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
|
||||
- If ``mode="predict"``, return a list of
|
||||
:obj:`mmcls.core.ClsDataSample`.
|
||||
:obj:`mmcls.engine.ClsDataSample`.
|
||||
- If ``mode="loss"``, return a dict of tensor.
|
||||
"""
|
||||
if mode == 'tensor':
|
||||
|
|
|
@ -4,7 +4,7 @@ from typing import List, Optional, Tuple, Union
|
|||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from mmcls.core.data_structures.cls_data_sample import ClsDataSample
|
||||
from mmcls.engine.data_structures.cls_data_sample import ClsDataSample
|
||||
from mmcls.metrics import Accuracy
|
||||
from mmcls.registry import MODELS
|
||||
from .base_head import BaseHead
|
||||
|
|
|
@ -4,7 +4,7 @@ from typing import List, Sequence, Tuple
|
|||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from mmcls.core import ClsDataSample
|
||||
from mmcls.engine import ClsDataSample
|
||||
from mmcls.metrics import Accuracy
|
||||
from mmcls.registry import MODELS
|
||||
from .cls_head import ClsHead
|
||||
|
|
|
@ -4,7 +4,7 @@ from typing import Dict, List, Optional, Tuple
|
|||
import torch
|
||||
from mmengine.data import LabelData
|
||||
|
||||
from mmcls.core import ClsDataSample
|
||||
from mmcls.engine import ClsDataSample
|
||||
from mmcls.registry import MODELS
|
||||
from .base_head import BaseHead
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ import numpy as np
|
|||
import torch
|
||||
from mmengine.data import LabelData
|
||||
|
||||
from mmcls.core import ClsDataSample
|
||||
from mmcls.engine import ClsDataSample
|
||||
from mmcls.registry import BATCH_AUGMENTS
|
||||
|
||||
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .registry import (BATCH_AUGMENTS, DATA_SAMPLERS, DATASETS, HOOKS, LOOPS,
|
||||
METRICS, MODEL_WRAPPERS, MODELS,
|
||||
OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS, OPTIMIZERS,
|
||||
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS,
|
||||
TASK_UTILS, TRANSFORMS, VISBACKENDS, VISUALIZERS,
|
||||
WEIGHT_INITIALIZERS)
|
||||
|
||||
__all__ = [
|
||||
'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS', 'DATASETS', 'DATA_SAMPLERS',
|
||||
'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS', 'OPTIMIZERS',
|
||||
'OPTIM_WRAPPERS', 'OPTIM_WRAPPER_CONSTRUCTORS', 'TASK_UTILS',
|
||||
'PARAM_SCHEDULERS', 'METRICS', 'MODEL_WRAPPERS', 'LOOPS', 'VISBACKENDS',
|
||||
'VISUALIZERS', 'BATCH_AUGMENTS'
|
||||
]
|
|
@ -1,5 +1,5 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .collect_env import collect_env
|
||||
from .setup_env import register_all_modules
|
||||
from .setup_env import register_all_modules, init_random_seed, set_random_seed
|
||||
|
||||
__all__ = ['collect_env', 'register_all_modules']
|
||||
__all__ = ['collect_env', 'register_all_modules', 'init_random_seed', 'set_random_seed']
|
||||
|
|
|
@ -1,10 +1,66 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import datetime
|
||||
import warnings
|
||||
import random
|
||||
|
||||
from mmcv.runner import get_dist_info
|
||||
from mmengine import DefaultScope
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
||||
|
||||
def init_random_seed(seed=None, device='cuda'):
|
||||
"""Initialize random seed.
|
||||
|
||||
If the seed is not set, the seed will be automatically randomized,
|
||||
and then broadcast to all processes to prevent some potential bugs.
|
||||
|
||||
Args:
|
||||
seed (int, Optional): The seed. Defaults to None.
|
||||
device (str): The device where the seed will be put on.
|
||||
Defaults to 'cuda'.
|
||||
|
||||
Returns:
|
||||
int: Seed to be used.
|
||||
"""
|
||||
if seed is not None:
|
||||
return seed
|
||||
|
||||
# Make sure all ranks share the same random seed to prevent
|
||||
# some potential bugs. Please refer to
|
||||
# https://github.com/open-mmlab/mmdetection/issues/6339
|
||||
rank, world_size = get_dist_info()
|
||||
seed = np.random.randint(2**31)
|
||||
if world_size == 1:
|
||||
return seed
|
||||
|
||||
if rank == 0:
|
||||
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
|
||||
else:
|
||||
random_num = torch.tensor(0, dtype=torch.int32, device=device)
|
||||
dist.broadcast(random_num, src=0)
|
||||
return random_num.item()
|
||||
|
||||
|
||||
def set_random_seed(seed, deterministic=False):
|
||||
"""Set random seed.
|
||||
|
||||
Args:
|
||||
seed (int): Seed to be used.
|
||||
deterministic (bool): Whether to set the deterministic option for
|
||||
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
|
||||
to True and `torch.backends.cudnn.benchmark` to False.
|
||||
Default: False.
|
||||
"""
|
||||
random.seed(seed)
|
||||
np.random.seed(seed)
|
||||
torch.manual_seed(seed)
|
||||
torch.cuda.manual_seed_all(seed)
|
||||
if deterministic:
|
||||
torch.backends.cudnn.deterministic = True
|
||||
torch.backends.cudnn.benchmark = False
|
||||
|
||||
def register_all_modules(init_default_scope: bool = True) -> None:
|
||||
"""Register all modules in mmcls into the registries.
|
||||
|
||||
|
@ -16,7 +72,7 @@ def register_all_modules(init_default_scope: bool = True) -> None:
|
|||
https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md
|
||||
Defaults to True.
|
||||
""" # noqa
|
||||
import mmcls.core # noqa: F401,F403
|
||||
import mmcls.engine # noqa: F401,F403
|
||||
import mmcls.datasets # noqa: F401,F403
|
||||
import mmcls.metrics # noqa: F401,F403
|
||||
import mmcls.models # noqa: F401,F403
|
||||
|
|
|
@ -5,7 +5,7 @@ import numpy as np
|
|||
import torch
|
||||
from mmengine.data import LabelData
|
||||
|
||||
from mmcls.core import ClsDataSample
|
||||
from mmcls.engine import ClsDataSample
|
||||
|
||||
|
||||
class TestClsDataSample(TestCase):
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
from unittest import TestCase
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from mmcls.core import ClassNumCheckHook
|
||||
from mmcls.engine import ClassNumCheckHook
|
||||
|
||||
|
||||
class TestClassNumCheckHook(TestCase):
|
||||
|
|
|
@ -7,7 +7,7 @@ from unittest.mock import ANY, MagicMock, patch
|
|||
import torch
|
||||
from mmengine.runner import EpochBasedTrainLoop, IterBasedTrainLoop
|
||||
|
||||
from mmcls.core import ClsDataSample, ClsVisualizer, VisualizationHook
|
||||
from mmcls.engine import ClsDataSample, ClsVisualizer, VisualizationHook
|
||||
from mmcls.registry import HOOKS
|
||||
from mmcls.utils import register_all_modules
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ from unittest.mock import patch
|
|||
import numpy as np
|
||||
import torch
|
||||
|
||||
from mmcls.core import ClsDataSample, ClsVisualizer
|
||||
from mmcls.engine import ClsDataSample, ClsVisualizer
|
||||
|
||||
|
||||
class TestClsVisualizer(TestCase):
|
||||
|
|
|
@ -7,7 +7,7 @@ import numpy as np
|
|||
import torch
|
||||
from mmengine.data import LabelData
|
||||
|
||||
from mmcls.core import ClsDataSample
|
||||
from mmcls.engine import ClsDataSample
|
||||
from mmcls.datasets.pipelines import PackClsInputs
|
||||
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ import sklearn.metrics
|
|||
import torch
|
||||
from mmengine.evaluator import Evaluator
|
||||
|
||||
from mmcls.core import ClsDataSample
|
||||
from mmcls.engine import ClsDataSample
|
||||
from mmcls.metrics import AveragePrecision, MultiLabelMetric
|
||||
from mmcls.utils import register_all_modules
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ from unittest import TestCase
|
|||
import numpy as np
|
||||
import torch
|
||||
|
||||
from mmcls.core import ClsDataSample
|
||||
from mmcls.engine import ClsDataSample
|
||||
from mmcls.metrics import Accuracy, SingleLabelMetric
|
||||
from mmcls.registry import METRICS
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ from unittest.mock import MagicMock
|
|||
import torch
|
||||
from mmengine import ConfigDict
|
||||
|
||||
from mmcls.core import ClsDataSample
|
||||
from mmcls.engine import ClsDataSample
|
||||
from mmcls.models import ImageClassifier
|
||||
from mmcls.registry import MODELS
|
||||
from mmcls.utils import register_all_modules
|
||||
|
|
|
@ -7,7 +7,7 @@ import numpy as np
|
|||
import torch
|
||||
from mmengine import is_seq_of
|
||||
|
||||
from mmcls.core import ClsDataSample
|
||||
from mmcls.engine import ClsDataSample
|
||||
from mmcls.registry import MODELS
|
||||
from mmcls.utils import register_all_modules
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ from unittest.mock import MagicMock, patch
|
|||
import numpy as np
|
||||
import torch
|
||||
|
||||
from mmcls.core import ClsDataSample
|
||||
from mmcls.engine import ClsDataSample
|
||||
from mmcls.models import Mixup, RandomBatchAugment
|
||||
from mmcls.registry import BATCH_AUGMENTS
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ from unittest import TestCase
|
|||
|
||||
import torch
|
||||
|
||||
from mmcls.core import ClsDataSample
|
||||
from mmcls.engine import ClsDataSample
|
||||
from mmcls.models import ClsDataPreprocessor, RandomBatchAugment
|
||||
from mmcls.registry import MODELS
|
||||
from mmcls.utils import register_all_modules
|
||||
|
|
|
@ -8,7 +8,7 @@ from mmcv import DictAction
|
|||
from mmcv.parallel import MMDataParallel
|
||||
|
||||
from mmcls.apis import single_gpu_test
|
||||
from mmcls.core.export import ONNXRuntimeClassifier, TensorRTClassifier
|
||||
from mmcls.engine.export import ONNXRuntimeClassifier, TensorRTClassifier
|
||||
from mmcls.datasets import build_dataloader, build_dataset
|
||||
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ from mmcv import Config, DictAction
|
|||
from mmcv.runner import get_dist_info, init_dist
|
||||
|
||||
from mmcls import __version__
|
||||
from mmcls.apis import init_random_seed, set_random_seed, train_model
|
||||
from mmcls.utils import init_random_seed, set_random_seed, train_model
|
||||
from mmcls.datasets import build_dataset
|
||||
from mmcls.models import build_classifier
|
||||
from mmcls.utils import collect_env, get_root_logger, load_json_log
|
||||
|
|
|
@ -10,8 +10,8 @@ from mmengine.config import Config, DictAction
|
|||
from mmengine.dataset import Compose
|
||||
from mmengine.visualization import Visualizer
|
||||
|
||||
from mmcls.core import ClsVisualizer
|
||||
from mmcls.core.visualization.cls_visualizer import _get_adaptive_scale
|
||||
from mmcls.engine import ClsVisualizer
|
||||
from mmcls.engine.visualization.cls_visualizer import _get_adaptive_scale
|
||||
from mmcls.datasets.builder import build_dataset
|
||||
from mmcls.registry import VISUALIZERS
|
||||
from mmcls.utils import register_all_modules
|
||||
|
|
Loading…
Reference in New Issue