log cmd info for regresstion test (#1146)

* add log to regresstion test

* update for win

* update mmseg reg yml
This commit is contained in:
RunningLeon 2022-10-10 16:23:24 +08:00 committed by GitHub
parent 76b837194f
commit 41fcc2242f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 75 additions and 61 deletions

View File

@ -211,8 +211,8 @@ models:
model_configs: model_configs:
- configs/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes.py - configs/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes.py
pipelines: pipelines:
- *pipeline_ort_dynamic_fp32 - *pipeline_ort_static_fp32
- *pipeline_trt_dynamic_fp16 - *pipeline_trt_static_fp16
- *pipeline_ncnn_static_fp32 - *pipeline_ncnn_static_fp32
- *pipeline_ts_fp32 - *pipeline_ts_fp32
@ -275,7 +275,7 @@ models:
- configs/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes.py - configs/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes.py
pipelines: pipelines:
- *pipeline_ort_dynamic_fp32 - *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16 - *pipeline_trt_dynamic_fp32
- *pipeline_ncnn_static_fp32 - *pipeline_ncnn_static_fp32
- *pipeline_openvino_dynamic_fp32 - *pipeline_openvino_dynamic_fp32
- *pipeline_ts_fp32 - *pipeline_ts_fp32
@ -325,7 +325,7 @@ models:
- configs/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py - configs/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py
pipelines: pipelines:
- *pipeline_ort_dynamic_fp32 - *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16 - *pipeline_trt_dynamic_fp32
- *pipeline_ncnn_static_fp32 - *pipeline_ncnn_static_fp32
- *pipeline_openvino_dynamic_fp32 - *pipeline_openvino_dynamic_fp32
- *pipeline_ts_fp32 - *pipeline_ts_fp32
@ -345,7 +345,7 @@ models:
- configs/sem_fpn/fpn_r50_512x1024_80k_cityscapes.py - configs/sem_fpn/fpn_r50_512x1024_80k_cityscapes.py
pipelines: pipelines:
- *pipeline_ort_dynamic_fp32 - *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16 - *pipeline_trt_dynamic_fp32
- *pipeline_ncnn_static_fp32 - *pipeline_ncnn_static_fp32
- *pipeline_openvino_dynamic_fp32 - *pipeline_openvino_dynamic_fp32
- *pipeline_ts_fp32 - *pipeline_ts_fp32
@ -374,7 +374,6 @@ models:
- name: Segmenter - name: Segmenter
metafile: configs/segmenter/segmenter.yml metafile: configs/segmenter/segmenter.yml
model_configs: model_configs:
- configs/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k.py
- configs/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k.py - configs/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k.py
pipelines: pipelines:
- *pipeline_ort_static_fp32_512x512 - *pipeline_ort_static_fp32_512x512

View File

@ -4,6 +4,7 @@ import logging
import subprocess import subprocess
from collections import OrderedDict from collections import OrderedDict
from pathlib import Path from pathlib import Path
from typing import List
import mmcv import mmcv
import openpyxl import openpyxl
@ -519,6 +520,53 @@ def get_info_from_log_file(info_type: str, log_path: Path,
return info_value return info_value
def run_cmd(cmd_lines: List[str], log_path: Path):
"""
Args:
cmd_lines: (list[str]): A command in multiple line style.
log_path (Path): Path to log file.
Returns:
int: error code.
"""
import platform
system = platform.system().lower()
if system == 'windows':
sep = r'`'
else: # 'Linux', 'Darwin'
sep = '\\'
cmd_for_run = ' '.join(cmd_lines)
cmd_for_log = f' {sep}\n'.join(cmd_lines) + '\n'
parent_path = log_path.parent
if not parent_path.exists():
parent_path.mkdir(parents=True, exist_ok=True)
logger = get_root_logger()
logger.info(100 * '-')
logger.info(f'Start running cmd\n{cmd_for_log}')
logger.info(f'Logging log to \n{log_path}')
with open(log_path, 'w', encoding='utf-8') as file_handler:
# write cmd
file_handler.write(f'Command:\n{cmd_for_log}\n')
file_handler.flush()
process_res = subprocess.Popen(
cmd_for_run,
cwd=str(Path(__file__).absolute().parent.parent),
shell=True,
stdout=file_handler,
stderr=file_handler)
process_res.wait()
return_code = process_res.returncode
if return_code != 0:
logger.error(f'Got shell return code={return_code}')
with open(log_path, 'r') as f:
content = f.read()
logger.error(f'Log error message\n{content}')
return return_code
def compare_metric(metric_value: float, metric_name: str, pytorch_metric: dict, def compare_metric(metric_value: float, metric_name: str, pytorch_metric: dict,
metric_info: dict): metric_info: dict):
"""Compare metric value with the pytorch metric value and the tolerance. """Compare metric value with the pytorch metric value and the tolerance.
@ -654,26 +702,18 @@ def get_backend_fps_metric(deploy_cfg_path: str, model_cfg_path: Path,
report_txt_path (Path): report txt save path. report_txt_path (Path): report txt save path.
model_name (str): Name of model in test yaml. model_name (str): Name of model in test yaml.
""" """
cmd_str = 'python3 tools/test.py ' \ cmd_lines = [
f'{deploy_cfg_path} ' \ 'python3 tools/test.py', f'{deploy_cfg_path}',
f'{str(model_cfg_path.absolute())} ' \ f'{str(model_cfg_path.absolute())}',
f'--model {convert_checkpoint_path} ' \ f'--model {convert_checkpoint_path}', f'--device {device_type}'
f'--log2file "{log_path}" ' \ ]
f'--speed-test ' \
f'--device {device_type} '
codebase_name = get_codebase(str(deploy_cfg_path)).value codebase_name = get_codebase(str(deploy_cfg_path)).value
if codebase_name != 'mmedit': if codebase_name != 'mmedit':
# mmedit dont --metric # mmedit dont --metric
cmd_str += f'--metrics {eval_name} ' cmd_lines += [f'--metrics {eval_name}']
logger.info(f'Process cmd = {cmd_str}')
# Test backend # Test backend
shell_res = subprocess.run( return_code = run_cmd(cmd_lines, log_path)
cmd_str, cwd=str(Path(__file__).absolute().parent.parent),
shell=True).returncode
logger.info(f'Got shell_res = {shell_res}')
metric_key = '' metric_key = ''
metric_name = '' metric_name = ''
@ -689,7 +729,7 @@ def get_backend_fps_metric(deploy_cfg_path: str, model_cfg_path: Path,
logger.info(f'Got metric_key = {metric_key}') logger.info(f'Got metric_key = {metric_key}')
fps, metric_list, test_pass = \ fps, metric_list, test_pass = \
get_fps_metric(shell_res, pytorch_metric, metric_key, metric_name, get_fps_metric(return_code, pytorch_metric, metric_key, metric_name,
log_path, metrics_eval_list, metric_info, logger) log_path, metrics_eval_list, metric_info, logger)
# update useless metric # update useless metric
@ -875,54 +915,29 @@ def get_backend_result(pipeline_info: dict, model_cfg_path: Path,
backend_output_path.mkdir(parents=True, exist_ok=True) backend_output_path.mkdir(parents=True, exist_ok=True)
# convert cmd string # convert cmd string
cmd_str = 'python3 ./tools/deploy.py ' \ cmd_lines = [
f'{str(deploy_cfg_path.absolute().resolve())} ' \ 'python3 ./tools/deploy.py',
f'{str(model_cfg_path.absolute().resolve())} ' \ f'{str(deploy_cfg_path.absolute().resolve())}',
f'"{str(checkpoint_path.absolute().resolve())}" ' \ f'{str(model_cfg_path.absolute().resolve())}',
f'"{input_img_path}" ' \ f'"{str(checkpoint_path.absolute().resolve())}"',
f'--work-dir "{backend_output_path}" ' \ f'"{input_img_path}"', f'--work-dir "{backend_output_path}"',
f'--device {device_type} ' \ f'--device {device_type} ', '--log-level INFO'
'--log-level INFO' ]
if sdk_config is not None: if sdk_config is not None:
cmd_str += ' --dump-info' cmd_lines += ['--dump-info']
if test_img_path is not None: if test_img_path is not None:
cmd_str += f' --test-img {test_img_path}' cmd_lines += [f'--test-img {test_img_path}']
if precision_type == 'int8': if precision_type == 'int8':
calib_dataset_cfg = pipeline_info.get('calib_dataset_cfg', None) calib_dataset_cfg = pipeline_info.get('calib_dataset_cfg', None)
if calib_dataset_cfg is not None: if calib_dataset_cfg is not None:
cmd_str += f' --calib-dataset-cfg {calib_dataset_cfg}' cmd_lines += [f'--calib-dataset-cfg {calib_dataset_cfg}']
logger.info(f'Process cmd = {cmd_str}')
convert_result = False
convert_log_path = backend_output_path.joinpath('convert_log.log')
logger.info(f'Logging conversion log to {convert_log_path} ...')
file_handler = open(convert_log_path, 'w', encoding='utf-8')
try:
# Convert the model to specific backend
process_res = subprocess.Popen(
cmd_str,
cwd=str(Path(__file__).absolute().parent.parent),
shell=True,
stdout=file_handler,
stderr=file_handler)
process_res.wait()
logger.info(f'Got shell_res = {process_res.returncode}')
# check if converted successes or not.
if process_res.returncode == 0:
convert_result = True
else:
convert_result = False
except Exception as e:
print(f'process convert error: {e}')
finally:
file_handler.close()
convert_log_path = backend_output_path.joinpath('convert.log')
return_code = run_cmd(cmd_lines, convert_log_path)
convert_result = return_code == 0
logger.info(f'Got convert_result = {convert_result}') logger.info(f'Got convert_result = {convert_result}')
if isinstance(backend_file_name, list): if isinstance(backend_file_name, list):