[Refactor] Remove deprecation. (#633)
* Remove deprecated `--options` in some tools * Remove deprecated eval hooks and fp16 hooks * Remove deprecated mixup&cutmix, pretrained and return_tuple arguments. * Remove deprecated `deprecated_options` in multi label dataset * Remove deprecated `formating.py` * Remove deprecated default mode of `LabelSmoothLoss` * Revert pretrained argument, and remove it's deprecation sign. * Update unit testpull/677/head
parent
a7f8e96b31
commit
bca695b684
|
@ -6,30 +6,14 @@ import numpy as np
|
|||
import torch
|
||||
import torch.distributed as dist
|
||||
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
|
||||
from mmcv.runner import (DistSamplerSeedHook, build_optimizer, build_runner,
|
||||
get_dist_info)
|
||||
from mmcv.runner import (DistSamplerSeedHook, Fp16OptimizerHook,
|
||||
build_optimizer, build_runner, get_dist_info)
|
||||
from mmcv.runner.hooks import DistEvalHook, EvalHook
|
||||
|
||||
from mmcls.core import DistOptimizerHook
|
||||
from mmcls.datasets import build_dataloader, build_dataset
|
||||
from mmcls.utils import get_root_logger
|
||||
|
||||
# TODO import eval hooks from mmcv and delete them from mmcls
|
||||
try:
|
||||
from mmcv.runner.hooks import EvalHook, DistEvalHook
|
||||
except ImportError:
|
||||
warnings.warn('DeprecationWarning: EvalHook and DistEvalHook from mmcls '
|
||||
'will be deprecated.'
|
||||
'Please install mmcv through master branch.')
|
||||
from mmcls.core import EvalHook, DistEvalHook
|
||||
|
||||
# TODO import optimizer hook from mmcv and delete them from mmcls
|
||||
try:
|
||||
from mmcv.runner import Fp16OptimizerHook
|
||||
except ImportError:
|
||||
warnings.warn('DeprecationWarning: FP16OptimizerHook from mmcls will be '
|
||||
'deprecated. Please install mmcv>=1.1.4.')
|
||||
from mmcls.core import Fp16OptimizerHook
|
||||
|
||||
|
||||
def init_random_seed(seed=None, device='cuda'):
|
||||
"""Initialize random seed.
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .evaluation import * # noqa: F401, F403
|
||||
from .fp16 import * # noqa: F401, F403
|
||||
from .hook import * # noqa: F401, F403
|
||||
from .optimizers import * # noqa: F401, F403
|
||||
from .utils import * # noqa: F401, F403
|
||||
|
|
|
@ -1,12 +1,10 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .eval_hooks import DistEvalHook, EvalHook
|
||||
from .eval_metrics import (calculate_confusion_matrix, f1_score, precision,
|
||||
precision_recall_f1, recall, support)
|
||||
from .mean_ap import average_precision, mAP
|
||||
from .multilabel_eval_metrics import average_performance
|
||||
|
||||
__all__ = [
|
||||
'DistEvalHook', 'EvalHook', 'precision', 'recall', 'f1_score', 'support',
|
||||
'average_precision', 'mAP', 'average_performance',
|
||||
'calculate_confusion_matrix', 'precision_recall_f1'
|
||||
'precision', 'recall', 'f1_score', 'support', 'average_precision', 'mAP',
|
||||
'average_performance', 'calculate_confusion_matrix', 'precision_recall_f1'
|
||||
]
|
||||
|
|
|
@ -1,107 +0,0 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import os.path as osp
|
||||
import warnings
|
||||
|
||||
from mmcv.runner import Hook
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
|
||||
class EvalHook(Hook):
|
||||
"""Evaluation hook.
|
||||
|
||||
Args:
|
||||
dataloader (DataLoader): A PyTorch dataloader.
|
||||
interval (int): Evaluation interval (by epochs). Default: 1.
|
||||
"""
|
||||
|
||||
def __init__(self, dataloader, interval=1, by_epoch=True, **eval_kwargs):
|
||||
warnings.warn(
|
||||
'DeprecationWarning: EvalHook and DistEvalHook in mmcls will be '
|
||||
'deprecated, please install mmcv through master branch.')
|
||||
if not isinstance(dataloader, DataLoader):
|
||||
raise TypeError('dataloader must be a pytorch DataLoader, but got'
|
||||
f' {type(dataloader)}')
|
||||
self.dataloader = dataloader
|
||||
self.interval = interval
|
||||
self.eval_kwargs = eval_kwargs
|
||||
self.by_epoch = by_epoch
|
||||
|
||||
def after_train_epoch(self, runner):
|
||||
if not self.by_epoch or not self.every_n_epochs(runner, self.interval):
|
||||
return
|
||||
from mmcls.apis import single_gpu_test
|
||||
results = single_gpu_test(runner.model, self.dataloader, show=False)
|
||||
self.evaluate(runner, results)
|
||||
|
||||
def after_train_iter(self, runner):
|
||||
if self.by_epoch or not self.every_n_iters(runner, self.interval):
|
||||
return
|
||||
from mmcls.apis import single_gpu_test
|
||||
runner.log_buffer.clear()
|
||||
results = single_gpu_test(runner.model, self.dataloader, show=False)
|
||||
self.evaluate(runner, results)
|
||||
|
||||
def evaluate(self, runner, results):
|
||||
eval_res = self.dataloader.dataset.evaluate(
|
||||
results, logger=runner.logger, **self.eval_kwargs)
|
||||
for name, val in eval_res.items():
|
||||
runner.log_buffer.output[name] = val
|
||||
runner.log_buffer.ready = True
|
||||
|
||||
|
||||
class DistEvalHook(EvalHook):
|
||||
"""Distributed evaluation hook.
|
||||
|
||||
Args:
|
||||
dataloader (DataLoader): A PyTorch dataloader.
|
||||
interval (int): Evaluation interval (by epochs). Default: 1.
|
||||
tmpdir (str, optional): Temporary directory to save the results of all
|
||||
processes. Default: None.
|
||||
gpu_collect (bool): Whether to use gpu or cpu to collect results.
|
||||
Default: False.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
dataloader,
|
||||
interval=1,
|
||||
gpu_collect=False,
|
||||
by_epoch=True,
|
||||
**eval_kwargs):
|
||||
warnings.warn(
|
||||
'DeprecationWarning: EvalHook and DistEvalHook in mmcls will be '
|
||||
'deprecated, please install mmcv through master branch.')
|
||||
if not isinstance(dataloader, DataLoader):
|
||||
raise TypeError('dataloader must be a pytorch DataLoader, but got '
|
||||
f'{type(dataloader)}')
|
||||
self.dataloader = dataloader
|
||||
self.interval = interval
|
||||
self.gpu_collect = gpu_collect
|
||||
self.by_epoch = by_epoch
|
||||
self.eval_kwargs = eval_kwargs
|
||||
|
||||
def after_train_epoch(self, runner):
|
||||
if not self.by_epoch or not self.every_n_epochs(runner, self.interval):
|
||||
return
|
||||
from mmcls.apis import multi_gpu_test
|
||||
results = multi_gpu_test(
|
||||
runner.model,
|
||||
self.dataloader,
|
||||
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
|
||||
gpu_collect=self.gpu_collect)
|
||||
if runner.rank == 0:
|
||||
print('\n')
|
||||
self.evaluate(runner, results)
|
||||
|
||||
def after_train_iter(self, runner):
|
||||
if self.by_epoch or not self.every_n_iters(runner, self.interval):
|
||||
return
|
||||
from mmcls.apis import multi_gpu_test
|
||||
runner.log_buffer.clear()
|
||||
results = multi_gpu_test(
|
||||
runner.model,
|
||||
self.dataloader,
|
||||
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
|
||||
gpu_collect=self.gpu_collect)
|
||||
if runner.rank == 0:
|
||||
print('\n')
|
||||
self.evaluate(runner, results)
|
|
@ -1,5 +0,0 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from .decorators import auto_fp16, force_fp32
|
||||
from .hooks import Fp16OptimizerHook, wrap_fp16_model
|
||||
|
||||
__all__ = ['auto_fp16', 'force_fp32', 'Fp16OptimizerHook', 'wrap_fp16_model']
|
|
@ -1,161 +0,0 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import functools
|
||||
from inspect import getfullargspec
|
||||
|
||||
import torch
|
||||
|
||||
from .utils import cast_tensor_type
|
||||
|
||||
|
||||
def auto_fp16(apply_to=None, out_fp32=False):
|
||||
"""Decorator to enable fp16 training automatically.
|
||||
|
||||
This decorator is useful when you write custom modules and want to support
|
||||
mixed precision training. If inputs arguments are fp32 tensors, they will
|
||||
be converted to fp16 automatically. Arguments other than fp32 tensors are
|
||||
ignored.
|
||||
|
||||
Args:
|
||||
apply_to (Iterable, optional): The argument names to be converted.
|
||||
`None` indicates all arguments.
|
||||
out_fp32 (bool): Whether to convert the output back to fp32.
|
||||
|
||||
:Example:
|
||||
|
||||
class MyModule1(nn.Module)
|
||||
|
||||
# Convert x and y to fp16
|
||||
@auto_fp16()
|
||||
def forward(self, x, y):
|
||||
pass
|
||||
|
||||
class MyModule2(nn.Module):
|
||||
|
||||
# convert pred to fp16
|
||||
@auto_fp16(apply_to=('pred', ))
|
||||
def do_something(self, pred, others):
|
||||
pass
|
||||
"""
|
||||
|
||||
def auto_fp16_wrapper(old_func):
|
||||
|
||||
@functools.wraps(old_func)
|
||||
def new_func(*args, **kwargs):
|
||||
# check if the module has set the attribute `fp16_enabled`, if not,
|
||||
# just fallback to the original method.
|
||||
if not isinstance(args[0], torch.nn.Module):
|
||||
raise TypeError('@auto_fp16 can only be used to decorate the '
|
||||
'method of nn.Module')
|
||||
if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
|
||||
return old_func(*args, **kwargs)
|
||||
# get the arg spec of the decorated method
|
||||
args_info = getfullargspec(old_func)
|
||||
# get the argument names to be casted
|
||||
args_to_cast = args_info.args if apply_to is None else apply_to
|
||||
# convert the args that need to be processed
|
||||
new_args = []
|
||||
# NOTE: default args are not taken into consideration
|
||||
if args:
|
||||
arg_names = args_info.args[:len(args)]
|
||||
for i, arg_name in enumerate(arg_names):
|
||||
if arg_name in args_to_cast:
|
||||
new_args.append(
|
||||
cast_tensor_type(args[i], torch.float, torch.half))
|
||||
else:
|
||||
new_args.append(args[i])
|
||||
# convert the kwargs that need to be processed
|
||||
new_kwargs = {}
|
||||
if kwargs:
|
||||
for arg_name, arg_value in kwargs.items():
|
||||
if arg_name in args_to_cast:
|
||||
new_kwargs[arg_name] = cast_tensor_type(
|
||||
arg_value, torch.float, torch.half)
|
||||
else:
|
||||
new_kwargs[arg_name] = arg_value
|
||||
# apply converted arguments to the decorated method
|
||||
output = old_func(*new_args, **new_kwargs)
|
||||
# cast the results back to fp32 if necessary
|
||||
if out_fp32:
|
||||
output = cast_tensor_type(output, torch.half, torch.float)
|
||||
return output
|
||||
|
||||
return new_func
|
||||
|
||||
return auto_fp16_wrapper
|
||||
|
||||
|
||||
def force_fp32(apply_to=None, out_fp16=False):
|
||||
"""Decorator to convert input arguments to fp32 in force.
|
||||
|
||||
This decorator is useful when you write custom modules and want to support
|
||||
mixed precision training. If there are some inputs that must be processed
|
||||
in fp32 mode, then this decorator can handle it. If inputs arguments are
|
||||
fp16 tensors, they will be converted to fp32 automatically. Arguments other
|
||||
than fp16 tensors are ignored.
|
||||
|
||||
Args:
|
||||
apply_to (Iterable, optional): The argument names to be converted.
|
||||
`None` indicates all arguments.
|
||||
out_fp16 (bool): Whether to convert the output back to fp16.
|
||||
|
||||
:Example:
|
||||
|
||||
class MyModule1(nn.Module)
|
||||
|
||||
# Convert x and y to fp32
|
||||
@force_fp32()
|
||||
def loss(self, x, y):
|
||||
pass
|
||||
|
||||
class MyModule2(nn.Module):
|
||||
|
||||
# convert pred to fp32
|
||||
@force_fp32(apply_to=('pred', ))
|
||||
def post_process(self, pred, others):
|
||||
pass
|
||||
"""
|
||||
|
||||
def force_fp32_wrapper(old_func):
|
||||
|
||||
@functools.wraps(old_func)
|
||||
def new_func(*args, **kwargs):
|
||||
# check if the module has set the attribute `fp16_enabled`, if not,
|
||||
# just fallback to the original method.
|
||||
if not isinstance(args[0], torch.nn.Module):
|
||||
raise TypeError('@force_fp32 can only be used to decorate the '
|
||||
'method of nn.Module')
|
||||
if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
|
||||
return old_func(*args, **kwargs)
|
||||
# get the arg spec of the decorated method
|
||||
args_info = getfullargspec(old_func)
|
||||
# get the argument names to be casted
|
||||
args_to_cast = args_info.args if apply_to is None else apply_to
|
||||
# convert the args that need to be processed
|
||||
new_args = []
|
||||
if args:
|
||||
arg_names = args_info.args[:len(args)]
|
||||
for i, arg_name in enumerate(arg_names):
|
||||
if arg_name in args_to_cast:
|
||||
new_args.append(
|
||||
cast_tensor_type(args[i], torch.half, torch.float))
|
||||
else:
|
||||
new_args.append(args[i])
|
||||
# convert the kwargs that need to be processed
|
||||
new_kwargs = dict()
|
||||
if kwargs:
|
||||
for arg_name, arg_value in kwargs.items():
|
||||
if arg_name in args_to_cast:
|
||||
new_kwargs[arg_name] = cast_tensor_type(
|
||||
arg_value, torch.half, torch.float)
|
||||
else:
|
||||
new_kwargs[arg_name] = arg_value
|
||||
# apply converted arguments to the decorated method
|
||||
output = old_func(*new_args, **new_kwargs)
|
||||
# cast the results back to fp32 if necessary
|
||||
if out_fp16:
|
||||
output = cast_tensor_type(output, torch.float, torch.half)
|
||||
return output
|
||||
|
||||
return new_func
|
||||
|
||||
return force_fp32_wrapper
|
|
@ -1,129 +0,0 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import copy
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from mmcv.runner import OptimizerHook
|
||||
from mmcv.utils.parrots_wrapper import _BatchNorm
|
||||
|
||||
from ..utils import allreduce_grads
|
||||
from .utils import cast_tensor_type
|
||||
|
||||
|
||||
class Fp16OptimizerHook(OptimizerHook):
|
||||
"""FP16 optimizer hook.
|
||||
|
||||
The steps of fp16 optimizer is as follows.
|
||||
1. Scale the loss value.
|
||||
2. BP in the fp16 model.
|
||||
2. Copy gradients from fp16 model to fp32 weights.
|
||||
3. Update fp32 weights.
|
||||
4. Copy updated parameters from fp32 weights to fp16 model.
|
||||
|
||||
Refer to https://arxiv.org/abs/1710.03740 for more details.
|
||||
|
||||
Args:
|
||||
loss_scale (float): Scale factor multiplied with loss.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
grad_clip=None,
|
||||
coalesce=True,
|
||||
bucket_size_mb=-1,
|
||||
loss_scale=512.,
|
||||
distributed=True):
|
||||
self.grad_clip = grad_clip
|
||||
self.coalesce = coalesce
|
||||
self.bucket_size_mb = bucket_size_mb
|
||||
self.loss_scale = loss_scale
|
||||
self.distributed = distributed
|
||||
|
||||
def before_run(self, runner):
|
||||
# keep a copy of fp32 weights
|
||||
runner.optimizer.param_groups = copy.deepcopy(
|
||||
runner.optimizer.param_groups)
|
||||
# convert model to fp16
|
||||
wrap_fp16_model(runner.model)
|
||||
|
||||
def copy_grads_to_fp32(self, fp16_net, fp32_weights):
|
||||
"""Copy gradients from fp16 model to fp32 weight copy."""
|
||||
for fp32_param, fp16_param in zip(fp32_weights, fp16_net.parameters()):
|
||||
if fp16_param.grad is not None:
|
||||
if fp32_param.grad is None:
|
||||
fp32_param.grad = fp32_param.data.new(fp32_param.size())
|
||||
fp32_param.grad.copy_(fp16_param.grad)
|
||||
|
||||
def copy_params_to_fp16(self, fp16_net, fp32_weights):
|
||||
"""Copy updated params from fp32 weight copy to fp16 model."""
|
||||
for fp16_param, fp32_param in zip(fp16_net.parameters(), fp32_weights):
|
||||
fp16_param.data.copy_(fp32_param.data)
|
||||
|
||||
def after_train_iter(self, runner):
|
||||
# clear grads of last iteration
|
||||
runner.model.zero_grad()
|
||||
runner.optimizer.zero_grad()
|
||||
# scale the loss value
|
||||
scaled_loss = runner.outputs['loss'] * self.loss_scale
|
||||
scaled_loss.backward()
|
||||
# copy fp16 grads in the model to fp32 params in the optimizer
|
||||
fp32_weights = []
|
||||
for param_group in runner.optimizer.param_groups:
|
||||
fp32_weights += param_group['params']
|
||||
self.copy_grads_to_fp32(runner.model, fp32_weights)
|
||||
# allreduce grads
|
||||
if self.distributed:
|
||||
allreduce_grads(fp32_weights, self.coalesce, self.bucket_size_mb)
|
||||
# scale the gradients back
|
||||
for param in fp32_weights:
|
||||
if param.grad is not None:
|
||||
param.grad.div_(self.loss_scale)
|
||||
if self.grad_clip is not None:
|
||||
self.clip_grads(fp32_weights)
|
||||
# update fp32 params
|
||||
runner.optimizer.step()
|
||||
# copy fp32 params to the fp16 model
|
||||
self.copy_params_to_fp16(runner.model, fp32_weights)
|
||||
|
||||
|
||||
def wrap_fp16_model(model):
|
||||
# convert model to fp16
|
||||
model.half()
|
||||
# patch the normalization layers to make it work in fp32 mode
|
||||
patch_norm_fp32(model)
|
||||
# set `fp16_enabled` flag
|
||||
for m in model.modules():
|
||||
if hasattr(m, 'fp16_enabled'):
|
||||
m.fp16_enabled = True
|
||||
|
||||
|
||||
def patch_norm_fp32(module):
|
||||
if isinstance(module, (_BatchNorm, nn.GroupNorm)):
|
||||
module.float()
|
||||
module.forward = patch_forward_method(module.forward, torch.half,
|
||||
torch.float)
|
||||
for child in module.children():
|
||||
patch_norm_fp32(child)
|
||||
return module
|
||||
|
||||
|
||||
def patch_forward_method(func, src_type, dst_type, convert_output=True):
|
||||
"""Patch the forward method of a module.
|
||||
|
||||
Args:
|
||||
func (callable): The original forward method.
|
||||
src_type (torch.dtype): Type of input arguments to be converted from.
|
||||
dst_type (torch.dtype): Type of input arguments to be converted to.
|
||||
convert_output (bool): Whether to convert the output back to src_type.
|
||||
|
||||
Returns:
|
||||
callable: The patched forward method.
|
||||
"""
|
||||
|
||||
def new_forward(*args, **kwargs):
|
||||
output = func(*cast_tensor_type(args, src_type, dst_type),
|
||||
**cast_tensor_type(kwargs, src_type, dst_type))
|
||||
if convert_output:
|
||||
output = cast_tensor_type(output, dst_type, src_type)
|
||||
return output
|
||||
|
||||
return new_forward
|
|
@ -1,24 +0,0 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from collections import abc
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
|
||||
def cast_tensor_type(inputs, src_type, dst_type):
|
||||
if isinstance(inputs, torch.Tensor):
|
||||
return inputs.to(dst_type)
|
||||
elif isinstance(inputs, str):
|
||||
return inputs
|
||||
elif isinstance(inputs, np.ndarray):
|
||||
return inputs
|
||||
elif isinstance(inputs, abc.Mapping):
|
||||
return type(inputs)({
|
||||
k: cast_tensor_type(v, src_type, dst_type)
|
||||
for k, v in inputs.items()
|
||||
})
|
||||
elif isinstance(inputs, abc.Iterable):
|
||||
return type(inputs)(
|
||||
cast_tensor_type(item, src_type, dst_type) for item in inputs)
|
||||
else:
|
||||
return inputs
|
|
@ -1,5 +1,4 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import warnings
|
||||
from typing import List
|
||||
|
||||
import numpy as np
|
||||
|
@ -29,8 +28,7 @@ class MultiLabelDataset(BaseDataset):
|
|||
metric='mAP',
|
||||
metric_options=None,
|
||||
indices=None,
|
||||
logger=None,
|
||||
**deprecated_kwargs):
|
||||
logger=None):
|
||||
"""Evaluate the dataset.
|
||||
|
||||
Args:
|
||||
|
@ -42,7 +40,6 @@ class MultiLabelDataset(BaseDataset):
|
|||
Allowed keys are 'k' and 'thr'. Defaults to None
|
||||
logger (logging.Logger | str, optional): Logger used for printing
|
||||
related information during evaluation. Defaults to None.
|
||||
deprecated_kwargs (dict): Used for containing deprecated arguments.
|
||||
|
||||
Returns:
|
||||
dict: evaluation results
|
||||
|
@ -50,11 +47,6 @@ class MultiLabelDataset(BaseDataset):
|
|||
if metric_options is None or metric_options == {}:
|
||||
metric_options = {'thr': 0.5}
|
||||
|
||||
if deprecated_kwargs != {}:
|
||||
warnings.warn('Option arguments for metrics has been changed to '
|
||||
'`metric_options`.')
|
||||
metric_options = {**deprecated_kwargs}
|
||||
|
||||
if isinstance(metric, str):
|
||||
metrics = [metric]
|
||||
else:
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# flake8: noqa
|
||||
import warnings
|
||||
|
||||
from .formatting import *
|
||||
|
||||
warnings.warn('DeprecationWarning: mmcls.datasets.pipelines.formating will be '
|
||||
'deprecated in 2021, please replace it with '
|
||||
'mmcls.datasets.pipelines.formatting.')
|
|
@ -1,5 +1,4 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import warnings
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections import OrderedDict
|
||||
from typing import Sequence
|
||||
|
@ -7,18 +6,10 @@ from typing import Sequence
|
|||
import mmcv
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from mmcv.runner import BaseModule
|
||||
from mmcv.runner import BaseModule, auto_fp16
|
||||
|
||||
from mmcls.core.visualization import imshow_infos
|
||||
|
||||
# TODO import `auto_fp16` from mmcv and delete them from mmcls
|
||||
try:
|
||||
from mmcv.runner import auto_fp16
|
||||
except ImportError:
|
||||
warnings.warn('auto_fp16 from mmcls will be deprecated.'
|
||||
'Please install mmcv>=1.1.4.')
|
||||
from mmcls.core import auto_fp16
|
||||
|
||||
|
||||
class BaseClassifier(BaseModule, metaclass=ABCMeta):
|
||||
"""Base class for classifiers."""
|
||||
|
|
|
@ -1,14 +1,9 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import copy
|
||||
import warnings
|
||||
|
||||
from ..builder import CLASSIFIERS, build_backbone, build_head, build_neck
|
||||
from ..heads import MultiLabelClsHead
|
||||
from ..utils.augment import Augments
|
||||
from .base import BaseClassifier
|
||||
|
||||
warnings.simplefilter('once')
|
||||
|
||||
|
||||
@CLASSIFIERS.register_module()
|
||||
class ImageClassifier(BaseClassifier):
|
||||
|
@ -23,18 +18,8 @@ class ImageClassifier(BaseClassifier):
|
|||
super(ImageClassifier, self).__init__(init_cfg)
|
||||
|
||||
if pretrained is not None:
|
||||
warnings.warn('DeprecationWarning: pretrained is a deprecated \
|
||||
key, please consider using init_cfg')
|
||||
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
|
||||
|
||||
return_tuple = backbone.pop('return_tuple', True)
|
||||
self.backbone = build_backbone(backbone)
|
||||
if return_tuple is False:
|
||||
warnings.warn(
|
||||
'The `return_tuple` is a temporary arg, we will force to '
|
||||
'return tuple in the future. Please handle tuple in your '
|
||||
'custom neck or head.', DeprecationWarning)
|
||||
self.return_tuple = return_tuple
|
||||
|
||||
if neck is not None:
|
||||
self.neck = build_neck(neck)
|
||||
|
@ -47,29 +32,6 @@ class ImageClassifier(BaseClassifier):
|
|||
augments_cfg = train_cfg.get('augments', None)
|
||||
if augments_cfg is not None:
|
||||
self.augments = Augments(augments_cfg)
|
||||
else:
|
||||
# Considering BC-breaking
|
||||
mixup_cfg = train_cfg.get('mixup', None)
|
||||
cutmix_cfg = train_cfg.get('cutmix', None)
|
||||
assert mixup_cfg is None or cutmix_cfg is None, \
|
||||
'If mixup and cutmix are set simultaneously,' \
|
||||
'use augments instead.'
|
||||
if mixup_cfg is not None:
|
||||
warnings.warn('The mixup attribute will be deprecated. '
|
||||
'Please use augments instead.')
|
||||
cfg = copy.deepcopy(mixup_cfg)
|
||||
cfg['type'] = 'BatchMixup'
|
||||
# In the previous version, mixup_prob is always 1.0.
|
||||
cfg['prob'] = 1.0
|
||||
self.augments = Augments(cfg)
|
||||
if cutmix_cfg is not None:
|
||||
warnings.warn('The cutmix attribute will be deprecated. '
|
||||
'Please use augments instead.')
|
||||
cfg = copy.deepcopy(cutmix_cfg)
|
||||
cutmix_prob = cfg.pop('cutmix_prob')
|
||||
cfg['type'] = 'BatchCutMix'
|
||||
cfg['prob'] = cutmix_prob
|
||||
self.augments = Augments(cfg)
|
||||
|
||||
def extract_feat(self, img, stage='neck'):
|
||||
"""Directly extract features from the specified stage.
|
||||
|
@ -140,16 +102,7 @@ class ImageClassifier(BaseClassifier):
|
|||
'"neck" and "pre_logits"')
|
||||
|
||||
x = self.backbone(img)
|
||||
if self.return_tuple:
|
||||
if not isinstance(x, tuple):
|
||||
x = (x, )
|
||||
warnings.warn(
|
||||
'We will force all backbones to return a tuple in the '
|
||||
'future. Please check your backbone and wrap the output '
|
||||
'as a tuple.', DeprecationWarning)
|
||||
else:
|
||||
if isinstance(x, tuple):
|
||||
x = x[-1]
|
||||
|
||||
if stage == 'backbone':
|
||||
return x
|
||||
|
||||
|
@ -181,17 +134,7 @@ class ImageClassifier(BaseClassifier):
|
|||
x = self.extract_feat(img)
|
||||
|
||||
losses = dict()
|
||||
try:
|
||||
loss = self.head.forward_train(x, gt_label)
|
||||
except TypeError as e:
|
||||
if 'not tuple' in str(e) and self.return_tuple:
|
||||
return TypeError(
|
||||
'Seems the head cannot handle tuple input. We have '
|
||||
'changed all backbones\' output to a tuple. Please '
|
||||
'update your custom head\'s forward function. '
|
||||
'Temporarily, you can set "return_tuple=False" in '
|
||||
'your backbone config to disable this feature.')
|
||||
raise e
|
||||
loss = self.head.forward_train(x, gt_label)
|
||||
|
||||
losses.update(loss)
|
||||
|
||||
|
@ -201,20 +144,10 @@ class ImageClassifier(BaseClassifier):
|
|||
"""Test without augmentation."""
|
||||
x = self.extract_feat(img)
|
||||
|
||||
try:
|
||||
if isinstance(self.head, MultiLabelClsHead):
|
||||
assert 'softmax' not in kwargs, (
|
||||
'Please use `sigmoid` instead of `softmax` '
|
||||
'in multi-label tasks.')
|
||||
res = self.head.simple_test(x, **kwargs)
|
||||
except TypeError as e:
|
||||
if 'not tuple' in str(e) and self.return_tuple:
|
||||
return TypeError(
|
||||
'Seems the head cannot handle tuple input. We have '
|
||||
'changed all backbones\' output to a tuple. Please '
|
||||
'update your custom head\'s forward function. '
|
||||
'Temporarily, you can set "return_tuple=False" in '
|
||||
'your backbone config to disable this feature.')
|
||||
raise e
|
||||
if isinstance(self.head, MultiLabelClsHead):
|
||||
assert 'softmax' not in kwargs, (
|
||||
'Please use `sigmoid` instead of `softmax` '
|
||||
'in multi-label tasks.')
|
||||
res = self.head.simple_test(x, **kwargs)
|
||||
|
||||
return res
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
@ -26,7 +24,7 @@ class LabelSmoothLoss(nn.Module):
|
|||
label_smooth_val (float): The degree of label smoothing.
|
||||
num_classes (int, optional): Number of classes. Defaults to None.
|
||||
mode (str): Refers to notes, Options are 'original', 'classy_vision',
|
||||
'multi_label'. Defaults to 'classy_vision'
|
||||
'multi_label'. Defaults to 'original'
|
||||
reduction (str): The method used to reduce the loss.
|
||||
Options are "none", "mean" and "sum". Defaults to 'mean'.
|
||||
loss_weight (float): Weight of the loss. Defaults to 1.0.
|
||||
|
@ -57,7 +55,7 @@ class LabelSmoothLoss(nn.Module):
|
|||
def __init__(self,
|
||||
label_smooth_val,
|
||||
num_classes=None,
|
||||
mode=None,
|
||||
mode='original',
|
||||
reduction='mean',
|
||||
loss_weight=1.0):
|
||||
super().__init__()
|
||||
|
@ -76,14 +74,6 @@ class LabelSmoothLoss(nn.Module):
|
|||
f'but gets {mode}.'
|
||||
self.reduction = reduction
|
||||
|
||||
if mode is None:
|
||||
warnings.warn(
|
||||
'LabelSmoothLoss mode is not set, use "classy_vision" '
|
||||
'by default. The default value will be changed to '
|
||||
'"original" recently. Please set mode manually if want '
|
||||
'to keep "classy_vision".', UserWarning)
|
||||
mode = 'classy_vision'
|
||||
|
||||
accept_mode = {'original', 'classy_vision', 'multi_label'}
|
||||
assert mode in accept_mode, \
|
||||
f'LabelSmoothLoss supports mode {accept_mode}, but gets {mode}.'
|
||||
|
|
|
@ -4,10 +4,8 @@ import tempfile
|
|||
from copy import deepcopy
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
from mmcv import ConfigDict
|
||||
from mmcv.runner.base_module import BaseModule
|
||||
|
||||
from mmcls.models import CLASSIFIERS
|
||||
from mmcls.models.classifiers import ImageClassifier
|
||||
|
@ -87,13 +85,10 @@ def test_image_classifier():
|
|||
torch.testing.assert_allclose(soft_pred, torch.softmax(pred, dim=1))
|
||||
|
||||
# test pretrained
|
||||
# TODO remove deprecated pretrained
|
||||
with pytest.warns(UserWarning):
|
||||
model_cfg_ = deepcopy(model_cfg)
|
||||
model_cfg_['pretrained'] = 'checkpoint'
|
||||
model = CLASSIFIERS.build(model_cfg_)
|
||||
assert model.init_cfg == dict(
|
||||
type='Pretrained', checkpoint='checkpoint')
|
||||
model_cfg_ = deepcopy(model_cfg)
|
||||
model_cfg_['pretrained'] = 'checkpoint'
|
||||
model = CLASSIFIERS.build(model_cfg_)
|
||||
assert model.init_cfg == dict(type='Pretrained', checkpoint='checkpoint')
|
||||
|
||||
# test show_result
|
||||
img = np.random.randint(0, 256, (224, 224, 3)).astype(np.uint8)
|
||||
|
@ -137,17 +132,6 @@ def test_image_classifier_with_mixup():
|
|||
losses = img_classifier.forward_train(imgs, label)
|
||||
assert losses['loss'].item() > 0
|
||||
|
||||
# Considering BC-breaking
|
||||
# TODO remove deprecated mixup usage.
|
||||
model_cfg['train_cfg'] = dict(mixup=dict(alpha=1.0, num_classes=10))
|
||||
img_classifier = ImageClassifier(**model_cfg)
|
||||
img_classifier.init_weights()
|
||||
imgs = torch.randn(16, 3, 32, 32)
|
||||
label = torch.randint(0, 10, (16, ))
|
||||
|
||||
losses = img_classifier.forward_train(imgs, label)
|
||||
assert losses['loss'].item() > 0
|
||||
|
||||
|
||||
def test_image_classifier_with_cutmix():
|
||||
|
||||
|
@ -177,18 +161,6 @@ def test_image_classifier_with_cutmix():
|
|||
losses = img_classifier.forward_train(imgs, label)
|
||||
assert losses['loss'].item() > 0
|
||||
|
||||
# Considering BC-breaking
|
||||
# TODO remove deprecated mixup usage.
|
||||
model_cfg['train_cfg'] = dict(
|
||||
cutmix=dict(alpha=1.0, num_classes=10, cutmix_prob=1.0))
|
||||
img_classifier = ImageClassifier(**model_cfg)
|
||||
img_classifier.init_weights()
|
||||
imgs = torch.randn(16, 3, 32, 32)
|
||||
label = torch.randint(0, 10, (16, ))
|
||||
|
||||
losses = img_classifier.forward_train(imgs, label)
|
||||
assert losses['loss'].item() > 0
|
||||
|
||||
|
||||
def test_image_classifier_with_augments():
|
||||
|
||||
|
@ -266,59 +238,6 @@ def test_image_classifier_with_augments():
|
|||
assert losses['loss'].item() > 0
|
||||
|
||||
|
||||
def test_image_classifier_return_tuple():
|
||||
model_cfg = ConfigDict(
|
||||
type='ImageClassifier',
|
||||
backbone=dict(
|
||||
type='ResNet_CIFAR',
|
||||
depth=50,
|
||||
num_stages=4,
|
||||
out_indices=(3, ),
|
||||
style='pytorch',
|
||||
return_tuple=False),
|
||||
head=dict(
|
||||
type='LinearClsHead',
|
||||
num_classes=10,
|
||||
in_channels=2048,
|
||||
loss=dict(type='CrossEntropyLoss')))
|
||||
|
||||
imgs = torch.randn(16, 3, 32, 32)
|
||||
|
||||
model_cfg_ = deepcopy(model_cfg)
|
||||
with pytest.warns(DeprecationWarning):
|
||||
model = CLASSIFIERS.build(model_cfg_)
|
||||
|
||||
# test backbone return tensor
|
||||
feat = model.extract_feat(imgs)
|
||||
assert isinstance(feat, torch.Tensor)
|
||||
|
||||
# test backbone return tuple
|
||||
model_cfg_ = deepcopy(model_cfg)
|
||||
model_cfg_.backbone.return_tuple = True
|
||||
model = CLASSIFIERS.build(model_cfg_)
|
||||
|
||||
feat = model.extract_feat(imgs)
|
||||
assert isinstance(feat, tuple)
|
||||
|
||||
# test warning if backbone return tensor
|
||||
class ToyBackbone(BaseModule):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.conv = torch.nn.Conv2d(3, 16, 3)
|
||||
|
||||
def forward(self, x):
|
||||
return self.conv(x)
|
||||
|
||||
model_cfg_ = deepcopy(model_cfg)
|
||||
model_cfg_.backbone.return_tuple = True
|
||||
model = CLASSIFIERS.build(model_cfg_)
|
||||
model.backbone = ToyBackbone()
|
||||
|
||||
with pytest.warns(DeprecationWarning):
|
||||
model.extract_feat(imgs)
|
||||
|
||||
|
||||
def test_classifier_extract_feat():
|
||||
model_cfg = ConfigDict(
|
||||
type='ImageClassifier',
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import logging
|
||||
import tempfile
|
||||
import warnings
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import mmcv.runner
|
||||
|
@ -9,21 +8,11 @@ import pytest
|
|||
import torch
|
||||
import torch.nn as nn
|
||||
from mmcv.runner import obj_from_dict
|
||||
from mmcv.runner.hooks import DistEvalHook, EvalHook
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
|
||||
from mmcls.apis import single_gpu_test
|
||||
|
||||
# TODO import eval hooks from mmcv and delete them from mmcls
|
||||
try:
|
||||
from mmcv.runner.hooks import EvalHook, DistEvalHook
|
||||
use_mmcv_hook = True
|
||||
except ImportError:
|
||||
warnings.warn('DeprecationWarning: EvalHook and DistEvalHook from mmcls '
|
||||
'will be deprecated.'
|
||||
'Please install mmcv through master branch.')
|
||||
from mmcls.core import EvalHook, DistEvalHook
|
||||
use_mmcv_hook = False
|
||||
|
||||
|
||||
class ExampleDataset(Dataset):
|
||||
|
||||
|
@ -157,9 +146,8 @@ def test_dist_eval_hook():
|
|||
|
||||
# test DistEvalHook
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
if use_mmcv_hook:
|
||||
p = patch('mmcv.engine.multi_gpu_test', multi_gpu_test)
|
||||
p.start()
|
||||
p = patch('mmcv.engine.multi_gpu_test', multi_gpu_test)
|
||||
p.start()
|
||||
eval_hook = DistEvalHook(data_loader, by_epoch=False)
|
||||
runner = mmcv.runner.IterBasedRunner(
|
||||
model=model,
|
||||
|
@ -171,8 +159,7 @@ def test_dist_eval_hook():
|
|||
runner.run([loader], [('train', 1)])
|
||||
test_dataset.evaluate.assert_called_with([torch.tensor([1])],
|
||||
logger=runner.logger)
|
||||
if use_mmcv_hook:
|
||||
p.stop()
|
||||
p.stop()
|
||||
|
||||
|
||||
@patch('mmcls.apis.multi_gpu_test', multi_gpu_test)
|
||||
|
@ -201,9 +188,8 @@ def test_dist_eval_hook_epoch():
|
|||
|
||||
# test DistEvalHook
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
if use_mmcv_hook:
|
||||
p = patch('mmcv.engine.multi_gpu_test', multi_gpu_test)
|
||||
p.start()
|
||||
p = patch('mmcv.engine.multi_gpu_test', multi_gpu_test)
|
||||
p.start()
|
||||
eval_hook = DistEvalHook(data_loader, by_epoch=True, interval=2)
|
||||
runner = mmcv.runner.EpochBasedRunner(
|
||||
model=model,
|
||||
|
@ -215,5 +201,4 @@ def test_dist_eval_hook_epoch():
|
|||
runner.run([loader], [('train', 1)])
|
||||
test_dataset.evaluate.assert_called_with([torch.tensor([1])],
|
||||
logger=runner.logger)
|
||||
if use_mmcv_hook:
|
||||
p.stop()
|
||||
p.stop()
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import argparse
|
||||
import os.path as osp
|
||||
import warnings
|
||||
|
||||
import mmcv
|
||||
from mmcv import DictAction
|
||||
|
@ -31,23 +30,8 @@ def parse_args():
|
|||
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
|
||||
'Note that the quotation marks are necessary and that no white space '
|
||||
'is allowed.')
|
||||
parser.add_argument(
|
||||
'--options',
|
||||
nargs='+',
|
||||
action=DictAction,
|
||||
help='override some settings in the used config, the key-value pair '
|
||||
'in xxx=yyy format will be merged into config file (deprecate), '
|
||||
'change to --cfg-options instead.')
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.options and args.cfg_options:
|
||||
raise ValueError(
|
||||
'--options and --cfg-options cannot be both '
|
||||
'specified, --options is deprecated in favor of --cfg-options')
|
||||
if args.options:
|
||||
warnings.warn('--options is deprecated in favor of --cfg-options')
|
||||
args.cfg_options = args.options
|
||||
|
||||
return args
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import argparse
|
||||
import warnings
|
||||
|
||||
from mmcv import Config, DictAction
|
||||
|
||||
|
@ -8,13 +7,6 @@ from mmcv import Config, DictAction
|
|||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description='Print the whole config')
|
||||
parser.add_argument('config', help='config file path')
|
||||
parser.add_argument(
|
||||
'--options',
|
||||
nargs='+',
|
||||
action=DictAction,
|
||||
help='override some settings in the used config, the key-value pair '
|
||||
'in xxx=yyy format will be merged into config file (deprecate), '
|
||||
'change to --cfg-options instead.')
|
||||
parser.add_argument(
|
||||
'--cfg-options',
|
||||
nargs='+',
|
||||
|
@ -27,14 +19,6 @@ def parse_args():
|
|||
'is allowed.')
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.options and args.cfg_options:
|
||||
raise ValueError(
|
||||
'--options and --cfg-options cannot be both '
|
||||
'specified, --options is deprecated in favor of --cfg-options')
|
||||
if args.options:
|
||||
warnings.warn('--options is deprecated in favor of --cfg-options')
|
||||
args.cfg_options = args.options
|
||||
|
||||
return args
|
||||
|
||||
|
||||
|
|
|
@ -9,21 +9,14 @@ import numpy as np
|
|||
import torch
|
||||
from mmcv import DictAction
|
||||
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
|
||||
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
|
||||
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
|
||||
wrap_fp16_model)
|
||||
|
||||
from mmcls.apis import multi_gpu_test, single_gpu_test
|
||||
from mmcls.datasets import build_dataloader, build_dataset
|
||||
from mmcls.models import build_classifier
|
||||
from mmcls.utils import setup_multi_processes
|
||||
|
||||
# TODO import `wrap_fp16_model` from mmcv and delete them from mmcls
|
||||
try:
|
||||
from mmcv.runner import wrap_fp16_model
|
||||
except ImportError:
|
||||
warnings.warn('wrap_fp16_model from mmcls will be deprecated.'
|
||||
'Please install mmcv>=1.1.4.')
|
||||
from mmcls.core import wrap_fp16_model
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description='mmcls test model')
|
||||
|
@ -67,13 +60,6 @@ def parse_args():
|
|||
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
|
||||
'Note that the quotation marks are necessary and that no white space '
|
||||
'is allowed.')
|
||||
parser.add_argument(
|
||||
'--options',
|
||||
nargs='+',
|
||||
action=DictAction,
|
||||
help='override some settings in the used config, the key-value pair '
|
||||
'in xxx=yyy format will be merged into config file (deprecate), '
|
||||
'change to --cfg-options instead.')
|
||||
parser.add_argument(
|
||||
'--metric-options',
|
||||
nargs='+',
|
||||
|
@ -106,20 +92,6 @@ def parse_args():
|
|||
if 'LOCAL_RANK' not in os.environ:
|
||||
os.environ['LOCAL_RANK'] = str(args.local_rank)
|
||||
|
||||
if args.options and args.cfg_options:
|
||||
raise ValueError(
|
||||
'--options and --cfg-options cannot be both '
|
||||
'specified, --options is deprecated in favor of --cfg-options')
|
||||
if args.options:
|
||||
warnings.warn('--options is deprecated in favor of --cfg-options')
|
||||
args.cfg_options = args.options
|
||||
|
||||
if args.device:
|
||||
warnings.warn(
|
||||
'--device is deprecated. To use cpu to test, please '
|
||||
'refers to https://mmclassification.readthedocs.io/en/latest/'
|
||||
'getting_started.html#inference-with-pretrained-models')
|
||||
|
||||
assert args.metrics or args.out, \
|
||||
'Please specify at least one of output path and evaluation metrics.'
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@ import copy
|
|||
import os
|
||||
import os.path as osp
|
||||
import time
|
||||
import warnings
|
||||
|
||||
import mmcv
|
||||
import torch
|
||||
|
@ -47,13 +46,6 @@ def parse_args():
|
|||
'--deterministic',
|
||||
action='store_true',
|
||||
help='whether to set deterministic options for CUDNN backend.')
|
||||
parser.add_argument(
|
||||
'--options',
|
||||
nargs='+',
|
||||
action=DictAction,
|
||||
help='override some settings in the used config, the key-value pair '
|
||||
'in xxx=yyy format will be merged into config file (deprecate), '
|
||||
'change to --cfg-options instead.')
|
||||
parser.add_argument(
|
||||
'--cfg-options',
|
||||
nargs='+',
|
||||
|
@ -74,20 +66,6 @@ def parse_args():
|
|||
if 'LOCAL_RANK' not in os.environ:
|
||||
os.environ['LOCAL_RANK'] = str(args.local_rank)
|
||||
|
||||
if args.options and args.cfg_options:
|
||||
raise ValueError(
|
||||
'--options and --cfg-options cannot be both '
|
||||
'specified, --options is deprecated in favor of --cfg-options')
|
||||
if args.options:
|
||||
warnings.warn('--options is deprecated in favor of --cfg-options')
|
||||
args.cfg_options = args.options
|
||||
|
||||
if args.device:
|
||||
warnings.warn(
|
||||
'--device is deprecated. To use cpu to train, please '
|
||||
'refers to https://mmclassification.readthedocs.io/en/latest/'
|
||||
'getting_started.html#train-a-model')
|
||||
|
||||
return args
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue