Merge remote-tracking branch 'origin/dygraph' into dygraph
commit
22838d5014
|
@ -1,4 +1,5 @@
|
|||
project(ppocr CXX C)
|
||||
cmake_minimum_required(VERSION 3.14)
|
||||
|
||||
option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON)
|
||||
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF)
|
||||
|
@ -206,13 +207,12 @@ endif()
|
|||
|
||||
set(DEPS ${DEPS} ${OpenCV_LIBS})
|
||||
|
||||
include(ExternalProject)
|
||||
include(FetchContent)
|
||||
include(external-cmake/auto-log.cmake)
|
||||
include_directories(${CMAKE_CURRENT_BINARY_DIR}/autolog/src/extern_Autolog/auto_log)
|
||||
include_directories(${FETCHCONTENT_BASE_DIR}/extern_autolog-src)
|
||||
|
||||
AUX_SOURCE_DIRECTORY(./src SRCS)
|
||||
add_executable(${DEMO_NAME} ${SRCS})
|
||||
|
||||
target_link_libraries(${DEMO_NAME} ${DEPS})
|
||||
|
||||
if (WIN32 AND WITH_MKL)
|
||||
|
|
|
@ -1,14 +1,13 @@
|
|||
find_package(Git REQUIRED)
|
||||
message("${CMAKE_BUILD_TYPE}")
|
||||
include(FetchContent)
|
||||
|
||||
set(AUTOLOG_REPOSITORY https://github.com/LDOUBLEV/AutoLog.git)
|
||||
SET(AUTOLOG_INSTALL_DIR ${CMAKE_CURRENT_BINARY_DIR}/install/Autolog)
|
||||
set(FETCHCONTENT_BASE_DIR "${CMAKE_CURRENT_BINARY_DIR}/third-party")
|
||||
|
||||
ExternalProject_Add(
|
||||
extern_Autolog
|
||||
PREFIX autolog
|
||||
GIT_REPOSITORY ${AUTOLOG_REPOSITORY}
|
||||
GIT_TAG main
|
||||
DOWNLOAD_NO_EXTRACT True
|
||||
INSTALL_COMMAND cmake -E echo "Skipping install step."
|
||||
FetchContent_Declare(
|
||||
extern_Autolog
|
||||
PREFIX autolog
|
||||
GIT_REPOSITORY https://github.com/LDOUBLEV/AutoLog.git
|
||||
GIT_TAG main
|
||||
)
|
||||
FetchContent_MakeAvailable(extern_Autolog)
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <sys/stat.h>
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
#include "auto_log/autolog.h"
|
||||
|
||||
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU.");
|
||||
DEFINE_int32(gpu_id, 0, "Device id of GPU to execute.");
|
||||
|
|
|
@ -0,0 +1,133 @@
|
|||
import numpy as np
|
||||
import os
|
||||
import subprocess
|
||||
import json
|
||||
import argparse
|
||||
import glob
|
||||
|
||||
|
||||
def init_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
# params for testing assert allclose
|
||||
parser.add_argument("--atol", type=float, default=1e-3)
|
||||
parser.add_argument("--rtol", type=float, default=1e-3)
|
||||
parser.add_argument("--gt_file", type=str, default="")
|
||||
parser.add_argument("--log_file", type=str, default="")
|
||||
parser.add_argument("--precision", type=str, default="fp32")
|
||||
return parser
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = init_args()
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def run_shell_command(cmd):
|
||||
p = subprocess.Popen(
|
||||
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
||||
out, err = p.communicate()
|
||||
|
||||
if p.returncode == 0:
|
||||
return out.decode('utf-8')
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def parser_results_from_log_by_name(log_path, names_list):
|
||||
if not os.path.exists(log_path):
|
||||
raise ValueError("The log file {} does not exists!".format(log_path))
|
||||
|
||||
if names_list is None or len(names_list) < 1:
|
||||
return []
|
||||
|
||||
parser_results = {}
|
||||
for name in names_list:
|
||||
cmd = "grep {} {}".format(name, log_path)
|
||||
outs = run_shell_command(cmd)
|
||||
outs = outs.split("\n")[0]
|
||||
result = outs.split("{}".format(name))[-1]
|
||||
result = json.loads(result)
|
||||
parser_results[name] = result
|
||||
return parser_results
|
||||
|
||||
|
||||
def load_gt_from_file(gt_file):
|
||||
if not os.path.exists(gt_file):
|
||||
raise ValueError("The log file {} does not exists!".format(gt_file))
|
||||
with open(gt_file, 'r') as f:
|
||||
data = f.readlines()
|
||||
f.close()
|
||||
parser_gt = {}
|
||||
for line in data:
|
||||
image_name, result = line.strip("\n").split("\t")
|
||||
result = json.loads(result)
|
||||
parser_gt[image_name] = result
|
||||
return parser_gt
|
||||
|
||||
|
||||
def load_gt_from_txts(gt_file):
|
||||
gt_list = glob.glob(gt_file)
|
||||
gt_collection = {}
|
||||
for gt_f in gt_list:
|
||||
gt_dict = load_gt_from_file(gt_f)
|
||||
basename = os.path.basename(gt_f)
|
||||
if "fp32" in basename:
|
||||
gt_collection["fp32"] = [gt_dict, gt_f]
|
||||
elif "fp16" in basename:
|
||||
gt_collection["fp16"] = [gt_dict, gt_f]
|
||||
elif "int8" in basename:
|
||||
gt_collection["int8"] = [gt_dict, gt_f]
|
||||
else:
|
||||
continue
|
||||
return gt_collection
|
||||
|
||||
|
||||
def collect_predict_from_logs(log_path, key_list):
|
||||
log_list = glob.glob(log_path)
|
||||
pred_collection = {}
|
||||
for log_f in log_list:
|
||||
pred_dict = parser_results_from_log_by_name(log_f, key_list)
|
||||
key = os.path.basename(log_f)
|
||||
pred_collection[key] = pred_dict
|
||||
|
||||
return pred_collection
|
||||
|
||||
|
||||
def testing_assert_allclose(dict_x, dict_y, atol=1e-7, rtol=1e-7):
|
||||
for k in dict_x:
|
||||
np.testing.assert_allclose(
|
||||
np.array(dict_x[k]), np.array(dict_y[k]), atol=atol, rtol=rtol)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Usage:
|
||||
# python3.7 tests/compare_results.py --gt_file=./tests/results/*.txt --log_file=./tests/output/infer_*.log
|
||||
|
||||
args = parse_args()
|
||||
|
||||
gt_collection = load_gt_from_txts(args.gt_file)
|
||||
key_list = gt_collection["fp32"][0].keys()
|
||||
|
||||
pred_collection = collect_predict_from_logs(args.log_file, key_list)
|
||||
for filename in pred_collection.keys():
|
||||
if "fp32" in filename:
|
||||
gt_dict, gt_filename = gt_collection["fp32"]
|
||||
elif "fp16" in filename:
|
||||
gt_dict, gt_filename = gt_collection["fp16"]
|
||||
elif "int8" in filename:
|
||||
gt_dict, gt_filename = gt_collection["int8"]
|
||||
else:
|
||||
continue
|
||||
pred_dict = pred_collection[filename]
|
||||
|
||||
try:
|
||||
testing_assert_allclose(
|
||||
gt_dict, pred_dict, atol=args.atol, rtol=args.rtol)
|
||||
print(
|
||||
"Assert allclose passed! The results of {} and {} are consistent!".
|
||||
format(filename, gt_filename))
|
||||
except Exception as E:
|
||||
print(E)
|
||||
raise ValueError(
|
||||
"The results of {} and the results of {} are inconsistent!".
|
||||
format(filename, gt_filename))
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -93,6 +93,9 @@ def main():
|
|||
for key in config["Architecture"]["Models"]:
|
||||
config["Architecture"]["Models"][key]["Head"][
|
||||
"out_channels"] = char_num
|
||||
# just one final tensor needs to to exported for inference
|
||||
config["Architecture"]["Models"][key][
|
||||
"return_all_feats"] = False
|
||||
else: # base rec model
|
||||
config["Architecture"]["Head"]["out_channels"] = char_num
|
||||
model = build_model(config["Architecture"])
|
||||
|
|
|
@ -30,7 +30,7 @@ from ppocr.utils.logging import get_logger
|
|||
from ppocr.utils.utility import get_image_file_list, check_and_read_gif
|
||||
from ppocr.data import create_operators, transform
|
||||
from ppocr.postprocess import build_post_process
|
||||
|
||||
import json
|
||||
logger = get_logger()
|
||||
|
||||
|
||||
|
@ -243,6 +243,7 @@ if __name__ == "__main__":
|
|||
|
||||
if not os.path.exists(draw_img_save):
|
||||
os.makedirs(draw_img_save)
|
||||
save_results = []
|
||||
for image_file in image_file_list:
|
||||
img, flag = check_and_read_gif(image_file)
|
||||
if not flag:
|
||||
|
@ -256,8 +257,11 @@ if __name__ == "__main__":
|
|||
if count > 0:
|
||||
total_time += elapse
|
||||
count += 1
|
||||
|
||||
logger.info("Predict time of {}: {}".format(image_file, elapse))
|
||||
save_pred = os.path.basename(image_file) + "\t" + str(
|
||||
json.dumps(np.array(dt_boxes).astype(np.int32).tolist())) + "\n"
|
||||
save_results.append(save_pred)
|
||||
logger.info(save_pred)
|
||||
logger.info("The predict time of {}: {}".format(image_file, elapse))
|
||||
src_im = utility.draw_text_det_res(dt_boxes, image_file)
|
||||
img_name_pure = os.path.split(image_file)[-1]
|
||||
img_path = os.path.join(draw_img_save,
|
||||
|
@ -265,5 +269,8 @@ if __name__ == "__main__":
|
|||
cv2.imwrite(img_path, src_im)
|
||||
logger.info("The visualized image saved in {}".format(img_path))
|
||||
|
||||
with open(os.path.join(draw_img_save, "det_results.txt"), 'w') as f:
|
||||
f.writelines(save_results)
|
||||
f.close()
|
||||
if args.benchmark:
|
||||
text_detector.autolog.report()
|
||||
|
|
|
@ -35,7 +35,7 @@ def init_args():
|
|||
parser.add_argument("--use_gpu", type=str2bool, default=True)
|
||||
parser.add_argument("--ir_optim", type=str2bool, default=True)
|
||||
parser.add_argument("--use_tensorrt", type=str2bool, default=False)
|
||||
parser.add_argument("--min_subgraph_size", type=int, default=10)
|
||||
parser.add_argument("--min_subgraph_size", type=int, default=15)
|
||||
parser.add_argument("--precision", type=str, default="fp32")
|
||||
parser.add_argument("--gpu_mem", type=int, default=500)
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ def main():
|
|||
if len(post_result[key][0]) >= 2:
|
||||
rec_info[key] = {
|
||||
"label": post_result[key][0][0],
|
||||
"score": post_result[key][0][1],
|
||||
"score": float(post_result[key][0][1]),
|
||||
}
|
||||
info = json.dumps(rec_info)
|
||||
else:
|
||||
|
|
Loading…
Reference in New Issue