mirror of
https://github.com/open-mmlab/mmrazor.git
synced 2025-06-03 15:02:54 +08:00
* tmp * add new mmdet models * add docstring * pass test and pre-commit * rm razor tracer * update fx tracer, now it can automatically wrap methods and functions. * update tracer passed models * add warning for torch <1.12.0 fix bug for python3.6 update placeholder to support placeholder.XXX * fix bug * update docs * fix lint * fix parse_cfg in configs * restore mutablechannel * test ite prune algorithm when using dist * add get_model_from_path to MMModelLibrrary * add mm models to DefaultModelLibrary * add uts * fix bug * fix bug * add uts * add uts * add uts * add uts * fix bug * restore ite_prune_algorithm * update doc * PruneTracer -> ChannelAnalyzer * prune_tracer -> channel_analyzer * add test for fxtracer * fix bug * fix bug * PruneTracer -> ChannelAnalyzer refine * CustomFxTracer -> MMFxTracer * fix bug when test with torch<1.12 * update print log * fix lint * rm unuseful code Co-authored-by: liukai <liukai@pjlab.org.cn> Co-authored-by: jacky <jacky@xx.com> Co-authored-by: Your Name <you@example.com> Co-authored-by: liukai <your_email@abc.example>
32 lines
824 B
Python
32 lines
824 B
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
import os
|
|
import random
|
|
|
|
import torch
|
|
import torch.distributed as dist
|
|
|
|
|
|
class SetDistEnv:
|
|
|
|
def __init__(self, using_cuda=False, port=None) -> None:
|
|
self.using_cuda = using_cuda
|
|
if self.using_cuda:
|
|
assert torch.cuda.is_available()
|
|
if port is None:
|
|
port = random.randint(10000, 20000)
|
|
self.port = port
|
|
|
|
def __enter__(self):
|
|
os.environ['MASTER_ADDR'] = 'localhost'
|
|
os.environ['MASTER_PORT'] = str(self.port)
|
|
|
|
# initialize the process group
|
|
if self.using_cuda:
|
|
backend = 'nccl'
|
|
else:
|
|
backend = 'gloo'
|
|
dist.init_process_group(backend, rank=0, world_size=1)
|
|
|
|
def __exit__(self, exc_type, exc_value, tb):
|
|
dist.destroy_process_group()
|