mirror of
https://github.com/open-mmlab/mmsegmentation.git
synced 2025-06-03 22:03:48 +08:00
* Modify default work dir when training. * Refactor gather_models.py. * Add train and test matching list. * Regression benchmark list. * lower readme name to upper readme name. * Add url check tool and model inference test tool. * Modify tool name. * Support duplicate mode of log json url check. * Add regression benchmark evaluation automatic tool. * Add train script generator. * Only Support script running. * Add evaluation results gather. * Add exec Authority. * Automatically make checkpoint root folder. * Modify gather results save path. * Coarse-grained train results gather tool. * Complete benchmark train script. * Make some little modifications. * Fix checkpoint urls. * Fix unet checkpoint urls. * Fix fast scnn & fcn checkpoint url. * Fix fast scnn checkpoint urls. * Fix fast scnn url. * Add differential results calculation. * Add differential results of regression benchmark train results. * Add an extra argument to select model. * Update nonlocal_net & hrnet checkpoint url. * Fix checkpoint url of hrnet and Fix some tta evaluation results and modify gather models tool. * Modify fast scnn checkpoint url. * Resolve new comments. * Fix url check status code bug. * Resolve some comments. * Modify train scripts generator. * Modify work_dir of regression benchmark results. * model gather tool modification.
115 lines
3.3 KiB
Python
115 lines
3.3 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
import argparse
|
|
import os.path as osp
|
|
|
|
from mmcv import Config
|
|
|
|
|
|
def parse_args():
|
|
parser = argparse.ArgumentParser(
|
|
description='Convert benchmark test model list to script')
|
|
parser.add_argument('config', help='test config file path')
|
|
parser.add_argument('--port', type=int, default=28171, help='dist port')
|
|
parser.add_argument(
|
|
'--work-dir',
|
|
default='work_dirs/benchmark_evaluation',
|
|
help='the dir to save metric')
|
|
parser.add_argument(
|
|
'--out',
|
|
type=str,
|
|
default='.dev/benchmark_evaluation.sh',
|
|
help='path to save model benchmark script')
|
|
|
|
args = parser.parse_args()
|
|
return args
|
|
|
|
|
|
def process_model_info(model_info, work_dir):
|
|
config = model_info['config'].strip()
|
|
fname, _ = osp.splitext(osp.basename(config))
|
|
job_name = fname
|
|
checkpoint = model_info['checkpoint'].strip()
|
|
work_dir = osp.join(work_dir, fname)
|
|
if not isinstance(model_info['eval'], list):
|
|
evals = [model_info['eval']]
|
|
else:
|
|
evals = model_info['eval']
|
|
eval = ' '.join(evals)
|
|
return dict(
|
|
config=config,
|
|
job_name=job_name,
|
|
checkpoint=checkpoint,
|
|
work_dir=work_dir,
|
|
eval=eval)
|
|
|
|
|
|
def create_test_bash_info(commands, model_test_dict, port, script_name,
|
|
partition):
|
|
config = model_test_dict['config']
|
|
job_name = model_test_dict['job_name']
|
|
checkpoint = model_test_dict['checkpoint']
|
|
work_dir = model_test_dict['work_dir']
|
|
eval = model_test_dict['eval']
|
|
|
|
echo_info = f'\necho \'{config}\' &'
|
|
commands.append(echo_info)
|
|
commands.append('\n')
|
|
|
|
command_info = f'GPUS=4 GPUS_PER_NODE=4 ' \
|
|
f'CPUS_PER_TASK=2 {script_name} '
|
|
|
|
command_info += f'{partition} '
|
|
command_info += f'{job_name} '
|
|
command_info += f'{config} '
|
|
command_info += f'$CHECKPOINT_DIR/{checkpoint} '
|
|
|
|
command_info += f'--eval {eval} '
|
|
command_info += f'--work-dir {work_dir} '
|
|
command_info += f'--options dist_params.port={port} '
|
|
command_info += '&'
|
|
|
|
commands.append(command_info)
|
|
|
|
|
|
def main():
|
|
args = parse_args()
|
|
if args.out:
|
|
out_suffix = args.out.split('.')[-1]
|
|
assert args.out.endswith('.sh'), \
|
|
f'Expected out file path suffix is .sh, but get .{out_suffix}'
|
|
|
|
commands = []
|
|
partition_name = 'PARTITION=$1'
|
|
commands.append(partition_name)
|
|
commands.append('\n')
|
|
|
|
checkpoint_root = 'CHECKPOINT_DIR=$2'
|
|
commands.append(checkpoint_root)
|
|
commands.append('\n')
|
|
|
|
script_name = osp.join('tools', 'slurm_test.sh')
|
|
port = args.port
|
|
work_dir = args.work_dir
|
|
|
|
cfg = Config.fromfile(args.config)
|
|
|
|
for model_key in cfg:
|
|
model_infos = cfg[model_key]
|
|
if not isinstance(model_infos, list):
|
|
model_infos = [model_infos]
|
|
for model_info in model_infos:
|
|
print('processing: ', model_info['config'])
|
|
model_test_dict = process_model_info(model_info, work_dir)
|
|
create_test_bash_info(commands, model_test_dict, port, script_name,
|
|
'$PARTITION')
|
|
port += 1
|
|
|
|
command_str = ''.join(commands)
|
|
if args.out:
|
|
with open(args.out, 'w') as f:
|
|
f.write(command_str + '\n')
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|