mirror of
https://github.com/open-mmlab/mim.git
synced 2025-06-03 14:59:11 +08:00
[Improvement] Switch More UnitTest On (#30)
* cpu training * update README * test all * fix unittest
This commit is contained in:
parent
0c72b1b274
commit
a20a02a97f
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@ -73,7 +73,7 @@ jobs:
|
||||
run: pip install -r requirements/tests.txt
|
||||
- name: Run unittests and generate coverage report
|
||||
run: |
|
||||
coverage run --branch --source=mim -m pytest tests/ --ignore tests/test_gridsearch.py --ignore tests/test_run.py --ignore tests/test_test.py --ignore tests/test_train.py --ignore tests/test_train.py
|
||||
coverage run --branch --source=mim -m pytest tests/
|
||||
coverage xml
|
||||
coverage report -m
|
||||
- name: Upload coverage to Codecov
|
||||
|
14
README.md
14
README.md
@ -239,6 +239,10 @@ MIM provides a unified API for launching and installing OpenMMLab projects and t
|
||||
+ command
|
||||
|
||||
```bash
|
||||
# Train models on a single server with CPU by setting `gpus` to 0 and
|
||||
# 'launcher' to 'none' (if applicable). The training script of the
|
||||
# corresponding codebase will fail if it doesn't support CPU training.
|
||||
> mim train mmcls resnet101_b16x8_cifar10.py --work-dir tmp --gpus 0
|
||||
# Train models on a single server with one GPU
|
||||
> mim train mmcls resnet101_b16x8_cifar10.py --work-dir tmp --gpus 1
|
||||
# Train models on a single server with 4 GPUs and pytorch distributed
|
||||
@ -258,6 +262,8 @@ MIM provides a unified API for launching and installing OpenMMLab projects and t
|
||||
```python
|
||||
from mim import train
|
||||
|
||||
train(repo='mmcls', config='resnet18_b16x8_cifar10.py', gpus=0,
|
||||
other_args='--work-dir tmp')
|
||||
train(repo='mmcls', config='resnet18_b16x8_cifar10.py', gpus=1,
|
||||
other_args='--work-dir tmp')
|
||||
train(repo='mmcls', config='resnet18_b16x8_cifar10.py', gpus=4,
|
||||
@ -368,6 +374,11 @@ MIM provides a unified API for launching and installing OpenMMLab projects and t
|
||||
+ command
|
||||
|
||||
```bash
|
||||
# Parameter search on a single server with CPU by setting `gpus` to 0 and
|
||||
# 'launcher' to 'none' (if applicable). The training script of the
|
||||
# corresponding codebase will fail if it doesn't support CPU training.
|
||||
> mim gridsearch mmcls resnet101_b16x8_cifar10.py --work-dir tmp --gpus 0 \
|
||||
--search-args '--optimizer.lr 1e-2 1e-3'
|
||||
# Parameter search with on a single server with one GPU, search learning
|
||||
# rate
|
||||
> mim gridsearch mmcls resnet101_b16x8_cifar10.py --work-dir tmp --gpus 1 \
|
||||
@ -405,6 +416,9 @@ MIM provides a unified API for launching and installing OpenMMLab projects and t
|
||||
```python
|
||||
from mim import gridsearch
|
||||
|
||||
gridsearch(repo='mmcls', config='resnet101_b16x8_cifar10.py', gpus=0,
|
||||
search_args='--optimizer.lr 1e-2 1e-3',
|
||||
other_args='--work-dir tmp')
|
||||
gridsearch(repo='mmcls', config='resnet101_b16x8_cifar10.py', gpus=1,
|
||||
search_args='--optimizer.lr 1e-2 1e-3',
|
||||
other_args='--work-dir tmp')
|
||||
|
@ -80,6 +80,11 @@ def cli(package: str,
|
||||
Example:
|
||||
|
||||
\b
|
||||
# Parameter search on a single server with CPU by setting `gpus` to 0 and
|
||||
# 'launcher' to 'none' (if applicable). The training script of the
|
||||
# corresponding codebase will fail if it doesn't support CPU training.
|
||||
> mim gridsearch mmcls resnet101_b16x8_cifar10.py --work-dir tmp --gpus \
|
||||
0 --search-args '--optimizer.lr 1e-2 1e-3'
|
||||
# Parameter search with on a single server with one GPU, search learning
|
||||
# rate
|
||||
> mim gridsearch mmcls resnet101_b16x8_cifar10.py --work-dir tmp --gpus \
|
||||
@ -304,8 +309,14 @@ def gridsearch(
|
||||
common_args = ['--launcher', launcher] + other_args_str.split()
|
||||
|
||||
if launcher == 'none':
|
||||
cmd = ['python', train_script, config_path, '--gpus',
|
||||
str(gpus)] + common_args
|
||||
if gpus:
|
||||
cmd = [
|
||||
'python', train_script, config_path, '--gpus',
|
||||
str(gpus)
|
||||
] + common_args
|
||||
else:
|
||||
cmd = ['python', train_script, config_path, '--device', 'cpu'
|
||||
] + common_args
|
||||
elif launcher == 'pytorch':
|
||||
cmd = [
|
||||
'python', '-m', 'torch.distributed.launch',
|
||||
|
@ -68,6 +68,10 @@ def cli(package: str,
|
||||
Example:
|
||||
|
||||
\b
|
||||
# Train models on a single server with CPU by setting `gpus` to 0 and
|
||||
# 'launcher' to 'none' (if applicable). The training script of the
|
||||
# corresponding codebase will fail if it doesn't support CPU training.
|
||||
> mim train mmcls resnet101_b16x8_cifar10.py --work-dir tmp --gpus 0
|
||||
# Train models on a single server with one GPU
|
||||
> mim train mmcls resnet101_b16x8_cifar10.py --work-dir tmp --gpus 1
|
||||
# Train models on a single server with 4 GPUs and pytorch distributed
|
||||
@ -189,8 +193,12 @@ def train(
|
||||
common_args = ['--launcher', launcher] + list(other_args)
|
||||
|
||||
if launcher == 'none':
|
||||
cmd = ['python', train_script, config, '--gpus',
|
||||
str(gpus)] + common_args
|
||||
if gpus:
|
||||
cmd = ['python', train_script, config, '--gpus',
|
||||
str(gpus)] + common_args
|
||||
else:
|
||||
cmd = ['python', train_script, config, '--device', 'cpu'
|
||||
] + common_args
|
||||
elif launcher == 'pytorch':
|
||||
cmd = [
|
||||
'python', '-m', 'torch.distributed.launch',
|
||||
|
@ -24,7 +24,7 @@ def test_gridsearch():
|
||||
runner = CliRunner()
|
||||
if not osp.exists('/tmp/dataset'):
|
||||
download_from_file(dataset_url, '/tmp/dataset.tar')
|
||||
extract_tar('tmp/dataset.tar', '/tmp/')
|
||||
extract_tar('/tmp/dataset.tar', '/tmp/')
|
||||
|
||||
if not osp.exists('/tmp/config.py'):
|
||||
download_from_file(cfg_url, '/tmp/config.py')
|
||||
@ -33,19 +33,19 @@ def test_gridsearch():
|
||||
time.sleep(5)
|
||||
|
||||
args1 = [
|
||||
'mmcls', '/tmp/config.py', '--gpus=1', '--work-dir=tmp',
|
||||
'mmcls', '/tmp/config.py', '--gpus=0', '--work-dir=tmp',
|
||||
'--search-args', '--optimizer.lr 1e-3 1e-4'
|
||||
]
|
||||
args2 = [
|
||||
'mmcls', '/tmp/config.py', '--gpus=1', '--work-dir=tmp',
|
||||
'mmcls', '/tmp/config.py', '--gpus=0', '--work-dir=tmp',
|
||||
'--search-args', '--optimizer.weight_decay 1e-3 1e-4'
|
||||
]
|
||||
args3 = [
|
||||
'mmcls', '/tmp/xxx.py', '--gpus=1', '--work-dir=tmp', '--search-args',
|
||||
'mmcls', '/tmp/xxx.py', '--gpus=0', '--work-dir=tmp', '--search-args',
|
||||
'--optimizer.lr 1e-3 1e-4'
|
||||
]
|
||||
args4 = [
|
||||
'mmcls', '/tmp/config.py', '--gpus=1', '--work-dir=tmp',
|
||||
'mmcls', '/tmp/config.py', '--gpus=0', '--work-dir=tmp',
|
||||
'--search-args'
|
||||
]
|
||||
|
||||
|
@ -39,7 +39,7 @@ def test_run():
|
||||
|
||||
result = runner.invoke(
|
||||
run,
|
||||
['mmcls', 'train', '/tmp/config.py', '--gpus=1', '--work-dir=tmp'])
|
||||
['mmcls', 'train', '/tmp/config.py', '--device=cpu', '--work-dir=tmp'])
|
||||
assert result.exit_code == 0
|
||||
result = runner.invoke(run, [
|
||||
'mmcls', 'test', '/tmp/config.py', '/tmp/ckpt.pth',
|
||||
|
@ -34,11 +34,11 @@ def test_train():
|
||||
time.sleep(5)
|
||||
|
||||
result = runner.invoke(
|
||||
train, ['mmcls', '/tmp/config.py', '--gpus=1', '--work-dir=tmp'])
|
||||
train, ['mmcls', '/tmp/config.py', '--gpus=0', '--work-dir=tmp'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
result = runner.invoke(
|
||||
train, ['mmcls', '/tmp/xxx.py', '--gpus=1', '--work-dir=tmp'])
|
||||
train, ['mmcls', '/tmp/xxx.py', '--gpus=0', '--work-dir=tmp'])
|
||||
assert result.exit_code != 0
|
||||
|
||||
shutil.rmtree('tmp')
|
||||
|
Loading…
x
Reference in New Issue
Block a user