benchmark
parent
ef1d80f067
commit
253c7d82b1
|
@ -0,0 +1,346 @@
|
|||
# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument(
|
||||
"--filename", type=str, help="The name of log which need to analysis.")
|
||||
parser.add_argument(
|
||||
"--log_with_profiler",
|
||||
type=str,
|
||||
help="The path of train log with profiler")
|
||||
parser.add_argument(
|
||||
"--profiler_path", type=str, help="The path of profiler timeline log.")
|
||||
parser.add_argument(
|
||||
"--keyword", type=str, help="Keyword to specify analysis data")
|
||||
parser.add_argument(
|
||||
"--separator",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Separator of different field in log")
|
||||
parser.add_argument(
|
||||
'--position', type=int, default=None, help='The position of data field')
|
||||
parser.add_argument(
|
||||
'--range',
|
||||
type=str,
|
||||
default="",
|
||||
help='The range of data field to intercept')
|
||||
parser.add_argument(
|
||||
'--base_batch_size', type=int, help='base_batch size on gpu')
|
||||
parser.add_argument(
|
||||
'--skip_steps',
|
||||
type=int,
|
||||
default=0,
|
||||
help='The number of steps to be skipped')
|
||||
parser.add_argument(
|
||||
'--model_mode',
|
||||
type=int,
|
||||
default=-1,
|
||||
help='Analysis mode, default value is -1')
|
||||
parser.add_argument('--ips_unit', type=str, default=None, help='IPS unit')
|
||||
parser.add_argument(
|
||||
'--model_name',
|
||||
type=str,
|
||||
default=0,
|
||||
help='training model_name, transformer_base')
|
||||
parser.add_argument(
|
||||
'--mission_name', type=str, default=0, help='training mission name')
|
||||
parser.add_argument(
|
||||
'--direction_id', type=int, default=0, help='training direction_id')
|
||||
parser.add_argument(
|
||||
'--run_mode',
|
||||
type=str,
|
||||
default="sp",
|
||||
help='multi process or single process')
|
||||
parser.add_argument(
|
||||
'--index',
|
||||
type=int,
|
||||
default=1,
|
||||
help='{1: speed, 2:mem, 3:profiler, 6:max_batch_size}')
|
||||
parser.add_argument(
|
||||
'--gpu_num', type=int, default=1, help='nums of training gpus')
|
||||
args = parser.parse_args()
|
||||
args.separator = None if args.separator == "None" else args.separator
|
||||
return args
|
||||
|
||||
|
||||
def _is_number(num):
|
||||
pattern = re.compile(r'^[-+]?[-0-9]\d*\.\d*|[-+]?\.?[0-9]\d*$')
|
||||
result = pattern.match(num)
|
||||
if result:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
class TimeAnalyzer(object):
|
||||
def __init__(self,
|
||||
filename,
|
||||
keyword=None,
|
||||
separator=None,
|
||||
position=None,
|
||||
range="-1"):
|
||||
if filename is None:
|
||||
raise Exception("Please specify the filename!")
|
||||
|
||||
if keyword is None:
|
||||
raise Exception("Please specify the keyword!")
|
||||
|
||||
self.filename = filename
|
||||
self.keyword = keyword
|
||||
self.separator = separator
|
||||
self.position = position
|
||||
self.range = range
|
||||
self.records = None
|
||||
self._distil()
|
||||
|
||||
def _distil(self):
|
||||
self.records = []
|
||||
with open(self.filename, "r") as f_object:
|
||||
lines = f_object.readlines()
|
||||
for line in lines:
|
||||
if self.keyword not in line:
|
||||
continue
|
||||
try:
|
||||
result = None
|
||||
|
||||
# Distil the string from a line.
|
||||
line = line.strip()
|
||||
line_words = line.split(
|
||||
self.separator) if self.separator else line.split()
|
||||
if args.position:
|
||||
result = line_words[self.position]
|
||||
else:
|
||||
# Distil the string following the keyword.
|
||||
for i in range(len(line_words) - 1):
|
||||
if line_words[i] == self.keyword:
|
||||
result = line_words[i + 1]
|
||||
break
|
||||
|
||||
# Distil the result from the picked string.
|
||||
if not self.range:
|
||||
result = result[0:]
|
||||
elif _is_number(self.range):
|
||||
result = result[0:int(self.range)]
|
||||
else:
|
||||
result = result[int(self.range.split(":")[0]):int(
|
||||
self.range.split(":")[1])]
|
||||
self.records.append(float(result))
|
||||
except Exception as exc:
|
||||
print("line is: {}; separator={}; position={}".format(
|
||||
line, self.separator, self.position))
|
||||
|
||||
print("Extract {} records: separator={}; position={}".format(
|
||||
len(self.records), self.separator, self.position))
|
||||
|
||||
def _get_fps(self,
|
||||
mode,
|
||||
batch_size,
|
||||
gpu_num,
|
||||
avg_of_records,
|
||||
run_mode,
|
||||
unit=None):
|
||||
if mode == -1 and run_mode == 'sp':
|
||||
assert unit, "Please set the unit when mode is -1."
|
||||
fps = gpu_num * avg_of_records
|
||||
elif mode == -1 and run_mode == 'mp':
|
||||
assert unit, "Please set the unit when mode is -1."
|
||||
fps = gpu_num * avg_of_records #temporarily, not used now
|
||||
print("------------this is mp")
|
||||
elif mode == 0:
|
||||
# s/step -> samples/s
|
||||
fps = (batch_size * gpu_num) / avg_of_records
|
||||
unit = "samples/s"
|
||||
elif mode == 1:
|
||||
# steps/s -> steps/s
|
||||
fps = avg_of_records
|
||||
unit = "steps/s"
|
||||
elif mode == 2:
|
||||
# s/step -> steps/s
|
||||
fps = 1 / avg_of_records
|
||||
unit = "steps/s"
|
||||
elif mode == 3:
|
||||
# steps/s -> samples/s
|
||||
fps = batch_size * gpu_num * avg_of_records
|
||||
unit = "samples/s"
|
||||
elif mode == 4:
|
||||
# s/epoch -> s/epoch
|
||||
fps = avg_of_records
|
||||
unit = "s/epoch"
|
||||
else:
|
||||
ValueError("Unsupported analysis mode.")
|
||||
|
||||
return fps, unit
|
||||
|
||||
def analysis(self,
|
||||
batch_size,
|
||||
gpu_num=1,
|
||||
skip_steps=0,
|
||||
mode=-1,
|
||||
run_mode='sp',
|
||||
unit=None):
|
||||
if batch_size <= 0:
|
||||
print("base_batch_size should larger than 0.")
|
||||
return 0, ''
|
||||
|
||||
if len(
|
||||
self.records
|
||||
) <= skip_steps: # to address the condition which item of log equals to skip_steps
|
||||
print("no records")
|
||||
return 0, ''
|
||||
|
||||
sum_of_records = 0
|
||||
sum_of_records_skipped = 0
|
||||
skip_min = self.records[skip_steps]
|
||||
skip_max = self.records[skip_steps]
|
||||
|
||||
count = len(self.records)
|
||||
for i in range(count):
|
||||
sum_of_records += self.records[i]
|
||||
if i >= skip_steps:
|
||||
sum_of_records_skipped += self.records[i]
|
||||
if self.records[i] < skip_min:
|
||||
skip_min = self.records[i]
|
||||
if self.records[i] > skip_max:
|
||||
skip_max = self.records[i]
|
||||
|
||||
avg_of_records = sum_of_records / float(count)
|
||||
avg_of_records_skipped = sum_of_records_skipped / float(count -
|
||||
skip_steps)
|
||||
|
||||
fps, fps_unit = self._get_fps(mode, batch_size, gpu_num, avg_of_records,
|
||||
run_mode, unit)
|
||||
fps_skipped, _ = self._get_fps(mode, batch_size, gpu_num,
|
||||
avg_of_records_skipped, run_mode, unit)
|
||||
if mode == -1:
|
||||
print("average ips of %d steps, skip 0 step:" % count)
|
||||
print("\tAvg: %.3f %s" % (avg_of_records, fps_unit))
|
||||
print("\tFPS: %.3f %s" % (fps, fps_unit))
|
||||
if skip_steps > 0:
|
||||
print("average ips of %d steps, skip %d steps:" %
|
||||
(count, skip_steps))
|
||||
print("\tAvg: %.3f %s" % (avg_of_records_skipped, fps_unit))
|
||||
print("\tMin: %.3f %s" % (skip_min, fps_unit))
|
||||
print("\tMax: %.3f %s" % (skip_max, fps_unit))
|
||||
print("\tFPS: %.3f %s" % (fps_skipped, fps_unit))
|
||||
elif mode == 1 or mode == 3:
|
||||
print("average latency of %d steps, skip 0 step:" % count)
|
||||
print("\tAvg: %.3f steps/s" % avg_of_records)
|
||||
print("\tFPS: %.3f %s" % (fps, fps_unit))
|
||||
if skip_steps > 0:
|
||||
print("average latency of %d steps, skip %d steps:" %
|
||||
(count, skip_steps))
|
||||
print("\tAvg: %.3f steps/s" % avg_of_records_skipped)
|
||||
print("\tMin: %.3f steps/s" % skip_min)
|
||||
print("\tMax: %.3f steps/s" % skip_max)
|
||||
print("\tFPS: %.3f %s" % (fps_skipped, fps_unit))
|
||||
elif mode == 0 or mode == 2:
|
||||
print("average latency of %d steps, skip 0 step:" % count)
|
||||
print("\tAvg: %.3f s/step" % avg_of_records)
|
||||
print("\tFPS: %.3f %s" % (fps, fps_unit))
|
||||
if skip_steps > 0:
|
||||
print("average latency of %d steps, skip %d steps:" %
|
||||
(count, skip_steps))
|
||||
print("\tAvg: %.3f s/step" % avg_of_records_skipped)
|
||||
print("\tMin: %.3f s/step" % skip_min)
|
||||
print("\tMax: %.3f s/step" % skip_max)
|
||||
print("\tFPS: %.3f %s" % (fps_skipped, fps_unit))
|
||||
|
||||
return round(fps_skipped, 3), fps_unit
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
run_info = dict()
|
||||
run_info["log_file"] = args.filename
|
||||
run_info["model_name"] = args.model_name
|
||||
run_info["mission_name"] = args.mission_name
|
||||
run_info["direction_id"] = args.direction_id
|
||||
run_info["run_mode"] = args.run_mode
|
||||
run_info["index"] = args.index
|
||||
run_info["gpu_num"] = args.gpu_num
|
||||
run_info["FINAL_RESULT"] = 0
|
||||
run_info["JOB_FAIL_FLAG"] = 0
|
||||
|
||||
try:
|
||||
if args.index == 1:
|
||||
if args.gpu_num == 1:
|
||||
run_info["log_with_profiler"] = args.log_with_profiler
|
||||
run_info["profiler_path"] = args.profiler_path
|
||||
analyzer = TimeAnalyzer(args.filename, args.keyword, args.separator,
|
||||
args.position, args.range)
|
||||
run_info["FINAL_RESULT"], run_info["UNIT"] = analyzer.analysis(
|
||||
batch_size=args.base_batch_size,
|
||||
gpu_num=args.gpu_num,
|
||||
skip_steps=args.skip_steps,
|
||||
mode=args.model_mode,
|
||||
run_mode=args.run_mode,
|
||||
unit=args.ips_unit)
|
||||
try:
|
||||
if int(os.getenv('job_fail_flag')) == 1 or int(run_info[
|
||||
"FINAL_RESULT"]) == 0:
|
||||
run_info["JOB_FAIL_FLAG"] = 1
|
||||
except:
|
||||
pass
|
||||
elif args.index == 3:
|
||||
run_info["FINAL_RESULT"] = {}
|
||||
records_fo_total = TimeAnalyzer(args.filename, 'Framework overhead',
|
||||
None, 3, '').records
|
||||
records_fo_ratio = TimeAnalyzer(args.filename, 'Framework overhead',
|
||||
None, 5).records
|
||||
records_ct_total = TimeAnalyzer(args.filename, 'Computation time',
|
||||
None, 3, '').records
|
||||
records_gm_total = TimeAnalyzer(args.filename,
|
||||
'GpuMemcpy Calls',
|
||||
None, 4, '').records
|
||||
records_gm_ratio = TimeAnalyzer(args.filename,
|
||||
'GpuMemcpy Calls',
|
||||
None, 6).records
|
||||
records_gmas_total = TimeAnalyzer(args.filename,
|
||||
'GpuMemcpyAsync Calls',
|
||||
None, 4, '').records
|
||||
records_gms_total = TimeAnalyzer(args.filename,
|
||||
'GpuMemcpySync Calls',
|
||||
None, 4, '').records
|
||||
run_info["FINAL_RESULT"]["Framework_Total"] = records_fo_total[
|
||||
0] if records_fo_total else 0
|
||||
run_info["FINAL_RESULT"]["Framework_Ratio"] = records_fo_ratio[
|
||||
0] if records_fo_ratio else 0
|
||||
run_info["FINAL_RESULT"][
|
||||
"ComputationTime_Total"] = records_ct_total[
|
||||
0] if records_ct_total else 0
|
||||
run_info["FINAL_RESULT"]["GpuMemcpy_Total"] = records_gm_total[
|
||||
0] if records_gm_total else 0
|
||||
run_info["FINAL_RESULT"]["GpuMemcpy_Ratio"] = records_gm_ratio[
|
||||
0] if records_gm_ratio else 0
|
||||
run_info["FINAL_RESULT"][
|
||||
"GpuMemcpyAsync_Total"] = records_gmas_total[
|
||||
0] if records_gmas_total else 0
|
||||
run_info["FINAL_RESULT"]["GpuMemcpySync_Total"] = records_gms_total[
|
||||
0] if records_gms_total else 0
|
||||
else:
|
||||
print("Not support!")
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
print("{}".format(json.dumps(run_info))
|
||||
) # it's required, for the log file path insert to the database
|
|
@ -0,0 +1,30 @@
|
|||
|
||||
# PaddleOCR DB/EAST/PSE 算法训练benchmark测试
|
||||
|
||||
PaddleOCR/benchmark目录下的文件用于获取并分析训练日志。
|
||||
训练采用icdar2015数据集,包括1000张训练图像和500张测试图像。模型配置采用resnet18_vd作为backbone,分别训练batch_size=8和batch_size=16的情况。
|
||||
|
||||
## 运行训练benchmark
|
||||
|
||||
benchmark/run_det.sh 中包含了三个过程:
|
||||
- 安装依赖
|
||||
- 下载数据
|
||||
- 执行训练
|
||||
- 日志分析获取IPS
|
||||
|
||||
在执行训练部分,会执行单机单卡(默认0号卡)单机多卡训练,并分别执行batch_size=8和batch_size=16的情况。所以执行完后,每种模型会得到4个日志文件。
|
||||
|
||||
run_det.sh 执行方式如下:
|
||||
|
||||
```
|
||||
# cd PaddleOCR/
|
||||
bash benchmark/run_det.sh
|
||||
```
|
||||
|
||||
以DB为例,将得到四个日志文件,如下:
|
||||
```
|
||||
det_res18_db_v2.0_sp_bs16_fp32_1
|
||||
det_res18_db_v2.0_sp_bs8_fp32_1
|
||||
det_res18_db_v2.0_mp_bs16_fp32_1
|
||||
det_res18_db_v2.0_mp_bs8_fp32_1
|
||||
```
|
|
@ -0,0 +1,61 @@
|
|||
#!/usr/bin/env bash
|
||||
set -xe
|
||||
# 运行示例:CUDA_VISIBLE_DEVICES=0 bash run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 500 ${model_mode}
|
||||
# 参数说明
|
||||
function _set_params(){
|
||||
run_mode=${1:-"sp"} # 单卡sp|多卡mp
|
||||
batch_size=${2:-"64"}
|
||||
fp_item=${3:-"fp32"} # fp32|fp16
|
||||
max_epoch=${4:-"10"} # 可选,如果需要修改代码提前中断
|
||||
model_item=${5:-"model_item"}
|
||||
run_log_path=${TRAIN_LOG_DIR:-$(pwd)} # TRAIN_LOG_DIR 后续QA设置该参数
|
||||
# 日志解析所需参数
|
||||
base_batch_size=${batch_size}
|
||||
mission_name="OCR"
|
||||
direction_id="0"
|
||||
ips_unit="images/sec"
|
||||
skip_steps=2 # 解析日志,有些模型前几个step耗时长,需要跳过 (必填)
|
||||
keyword="ips:" # 解析日志,筛选出数据所在行的关键字 (必填)
|
||||
index="1"
|
||||
model_name=${model_item}_bs${batch_size}_${fp_item} # model_item 用于yml文件名匹配,model_name 用于数据入库前端展示
|
||||
# 以下不用修改
|
||||
device=${CUDA_VISIBLE_DEVICES//,/ }
|
||||
arr=(${device})
|
||||
num_gpu_devices=${#arr[*]}
|
||||
log_file=${run_log_path}/${model_item}_${run_mode}_bs${batch_size}_${fp_item}_${num_gpu_devices}
|
||||
}
|
||||
function _train(){
|
||||
echo "Train on ${num_gpu_devices} GPUs"
|
||||
echo "current CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES, gpus=$num_gpu_devices, batch_size=$batch_size"
|
||||
|
||||
train_cmd="-c configs/det/${model_item}.yml -o Train.loader.batch_size_per_card=${batch_size} Global.epoch_num=${max_epoch} Global.eval_batch_step=[0,20000] Global.print_batch_step=2"
|
||||
case ${run_mode} in
|
||||
sp)
|
||||
train_cmd="python tools/train.py "${train_cmd}""
|
||||
;;
|
||||
mp)
|
||||
train_cmd="python -m paddle.distributed.launch --log_dir=./mylog --gpus=$CUDA_VISIBLE_DEVICES tools/train.py ${train_cmd}"
|
||||
;;
|
||||
*) echo "choose run_mode(sp or mp)"; exit 1;
|
||||
esac
|
||||
# 以下不用修改
|
||||
timeout 15m ${train_cmd} > ${log_file} 2>&1
|
||||
if [ $? -ne 0 ];then
|
||||
echo -e "${model_name}, FAIL"
|
||||
export job_fail_flag=1
|
||||
else
|
||||
echo -e "${model_name}, SUCCESS"
|
||||
export job_fail_flag=0
|
||||
fi
|
||||
|
||||
if [ $run_mode = "mp" -a -d mylog ]; then
|
||||
rm ${log_file}
|
||||
cp mylog/workerlog.0 ${log_file}
|
||||
fi
|
||||
}
|
||||
|
||||
source ${BENCHMARK_ROOT}/scripts/run_model.sh # 在该脚本中会对符合benchmark规范的log使用analysis.py 脚本进行性能数据解析;该脚本在连调时可从benchmark repo中下载https://github.com/PaddlePaddle/benchmark/blob/master/scripts/run_model.sh;如果不联调只想要产出训练log可以注掉本行,提交时需打开
|
||||
_set_params $@
|
||||
#_train # 如果只想产出训练log,不解析,可取消注释
|
||||
_run # 该函数在run_model.sh中,执行时会调用_train; 如果不联调只想要产出训练log可以注掉本行,提交时需打开
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
#!/bin/bash
|
||||
# 提供可稳定复现性能的脚本,默认在标准docker环境内py37执行: paddlepaddle/paddle:latest-gpu-cuda10.1-cudnn7 paddle=2.1.2 py=37
|
||||
# 执行目录: ./PaddleOCR
|
||||
# 1 安装该模型需要的依赖 (如需开启优化策略请注明)
|
||||
log_path=${LOG_PATH_INDEX_DIR:-$(pwd)}
|
||||
python -m pip install -r requirements.txt
|
||||
# 2 拷贝该模型需要数据、预训练模型
|
||||
wget -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar && cd train_data && tar xf icdar2015.tar && cd ../
|
||||
wget -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vd_pretrained.pdparams
|
||||
wget -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet18_vd_pretrained.pdparams
|
||||
wget -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vd_ssld_pretrained.pdparams
|
||||
# 3 批量运行(如不方便批量,1,2需放到单个模型中)
|
||||
|
||||
model_mode_list=(det_res18_db_v2.0 det_r50_vd_east det_r50_vd_pse)
|
||||
fp_item_list=(fp32)
|
||||
for model_mode in ${model_mode_list[@]}; do
|
||||
for fp_item in ${fp_item_list[@]}; do
|
||||
if [ ${model_mode} == "det_r50_vd_east" ]; then
|
||||
bs_list=(16)
|
||||
else
|
||||
bs_list=(8 16)
|
||||
fi
|
||||
for bs_item in ${bs_list[@]}; do
|
||||
echo "index is speed, 1gpus, begin, ${model_name}"
|
||||
run_mode=sp
|
||||
log_name=ocr_${model_mode}_bs${bs_item}_${fp_item}_${run_mode}
|
||||
CUDA_VISIBLE_DEVICES=0 bash benchmark/run_benchmark_det.sh ${run_mode} ${bs_item} ${fp_item} 1 ${model_mode} | tee ${log_path}/${log_name}_speed_1gpus 2>&1 # (5min)
|
||||
sleep 60
|
||||
echo "index is speed, 8gpus, run_mode is multi_process, begin, ${model_name}"
|
||||
run_mode=mp
|
||||
log_name=ocr_${model_mode}_bs${bs_item}_${fp_item}_${run_mode}
|
||||
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash benchmark/run_benchmark_det.sh ${run_mode} ${bs_item} ${fp_item} 2 ${model_mode} | tee ${log_path}/${log_name}_speed_8gpus8p 2>&1
|
||||
sleep 60
|
||||
done
|
||||
done
|
||||
done
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue