Merge branch 'add_missing_core' into 'master'

Add missing mmcls.core

See merge request open-mmlab/mmclassification!1
pull/2/head
chenkai 2020-05-27 11:37:16 +08:00
commit 6515bb999c
9 changed files with 94 additions and 7 deletions

View File

@ -1,3 +1,4 @@
from .evaluation import * # noqa: F401, F403
from .fp16 import * # noqa: F401, F403
from .optimizer import * # noqa: F401, F403
from .utils import * # noqa: F401, F403

View File

@ -1,3 +1,3 @@
from .eval_hooks import EvalHook
from .eval_hooks import DistEvalHook, EvalHook
__all__ = ['EvalHook']
__all__ = ['DistEvalHook', 'EvalHook']

View File

@ -4,7 +4,7 @@ import torch
import torch.nn as nn
from mmcv.runner import OptimizerHook
from ..dist_utils import allreduce_grads
from ..utils import allreduce_grads
from .utils import cast_tensor_type

View File

@ -0,0 +1,3 @@
from .builder import build_optimizer
__all__ = ['build_optimizer']

View File

@ -0,0 +1,42 @@
import copy
import inspect
import torch
from mmcv.utils import Registry, build_from_cfg
OPTIMIZERS = Registry('optimizer')
OPTIMIZER_BUILDERS = Registry('optimizer builder')
def register_torch_optimizers():
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module()(_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def build_optimizer_constructor(cfg):
return build_from_cfg(cfg, OPTIMIZER_BUILDERS)
def build_optimizer(model, cfg):
optimizer_cfg = copy.deepcopy(cfg)
constructor_type = optimizer_cfg.pop('constructor',
'DefaultOptimizerConstructor')
paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None)
optim_constructor = build_optimizer_constructor(
dict(
type=constructor_type,
optimizer_cfg=optimizer_cfg,
paramwise_cfg=paramwise_cfg))
optimizer = optim_constructor(model)
return optimizer

View File

@ -1,3 +1,3 @@
from .dist_utils import DistOptimizerHook
from .dist_utils import DistOptimizerHook, allreduce_grads
__all__ = ['DistOptimizerHook']
__all__ = ['allreduce_grads', 'DistOptimizerHook']

View File

@ -1,4 +1,44 @@
from collections import OrderedDict
import torch.distributed as dist
from mmcv.runner import OptimizerHook
from torch._utils import (_flatten_dense_tensors, _take_tensors,
_unflatten_dense_tensors)
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for tensor, synced in zip(
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
grads = [
param.grad.data for param in params
if param.requires_grad and param.grad is not None
]
world_size = dist.get_world_size()
if coalesce:
_allreduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
class DistOptimizerHook(OptimizerHook):

View File

@ -2,7 +2,7 @@ from collections.abc import Sequence
from mmcv.utils import build_from_cfg
from ..registry import PIPELINES
from ..builder import PIPELINES
@PIPELINES.register_module

View File

@ -6,7 +6,8 @@ import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from mmcls.core import multi_gpu_test, single_gpu_test, wrap_fp16_model
from mmcls.apis import multi_gpu_test, single_gpu_test
from mmcls.core import wrap_fp16_model
from mmcls.datasets import build_dataloader, build_dataset
from mmcls.models import build_model