mmsegmentation/.dev/gather_benchmark_train_results.py
sennnnn d35fbbdb47
[Enhancement] Add Dev tools to boost develop (#798)
* Modify default work dir when training.

* Refactor gather_models.py.

* Add train and test matching list.

* Regression benchmark list.

* lower readme name to upper readme name.

* Add url check tool and model inference test tool.

* Modify tool name.

* Support duplicate mode of log json url check.

* Add regression benchmark evaluation automatic tool.

* Add train script generator.

* Only Support script running.

* Add evaluation results gather.

* Add exec Authority.

* Automatically make checkpoint root folder.

* Modify gather results save path.

* Coarse-grained train results gather tool.

* Complete benchmark train script.

* Make some little modifications.

* Fix checkpoint urls.

* Fix unet checkpoint urls.

* Fix fast scnn & fcn checkpoint url.

* Fix fast scnn checkpoint urls.

* Fix fast scnn url.

* Add differential results calculation.

* Add differential results of regression benchmark train results.

* Add an extra argument to select model.

* Update nonlocal_net & hrnet checkpoint url.

* Fix checkpoint url of hrnet and Fix some tta evaluation results and modify gather models tool.

* Modify fast scnn checkpoint url.

* Resolve new comments.

* Fix url check status code bug.

* Resolve some comments.

* Modify train scripts generator.

* Modify work_dir of regression benchmark results.

* model gather tool modification.
2021-09-02 09:44:51 -07:00

101 lines
3.4 KiB
Python

import argparse
import glob
import os.path as osp
import mmcv
from gather_models import get_final_results
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked models train results')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'--out',
type=str,
default='benchmark_train_info.json',
help='output path of gathered metrics to be stored')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
root_path = args.root
metrics_out = args.out
evaluation_cfg = Config.fromfile(args.config)
result_dict = {}
for model_key in evaluation_cfg:
model_infos = evaluation_cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
config = model_info['config']
# benchmark train dir
model_name = osp.split(osp.dirname(config))[1]
config_name = osp.splitext(osp.basename(config))[0]
exp_dir = osp.join(root_path, model_name, config_name)
if not osp.exists(exp_dir):
print(f'{config} hasn\'t {exp_dir}')
continue
# parse config
cfg = mmcv.Config.fromfile(config)
total_iters = cfg.runner.max_iters
exp_metric = cfg.evaluation.metric
if not isinstance(exp_metric, list):
exp_metrics = [exp_metric]
# determine whether total_iters ckpt exists
ckpt_path = f'iter_{total_iters}.pth'
if not osp.exists(osp.join(exp_dir, ckpt_path)):
print(f'{config} hasn\'t {ckpt_path}')
continue
# only the last log json counts
log_json_path = list(
sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1]
# extract metric value
model_performance = get_final_results(log_json_path, total_iters)
if model_performance is None:
print(f'log file error: {log_json_path}')
continue
differential_results = dict()
old_results = dict()
new_results = dict()
for metric_key in model_performance:
if metric_key in ['mIoU']:
metric = round(model_performance[metric_key] * 100, 2)
old_metric = model_info['metric'][metric_key]
old_results[metric_key] = old_metric
new_results[metric_key] = metric
differential = metric - old_metric
flag = '+' if differential > 0 else '-'
differential_results[
metric_key] = f'{flag}{abs(differential):.2f}'
result_dict[config] = dict(
differential_results=differential_results,
old_results=old_results,
new_results=new_results,
)
# 4 save or print results
if metrics_out:
mmcv.dump(result_dict, metrics_out, indent=4)
print('===================================')
for config_name, metrics in result_dict.items():
print(config_name, metrics)
print('===================================')