2020-05-10 16:26:57 +08:00
|
|
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
import logging
|
2020-05-12 21:12:52 +08:00
|
|
|
import os
|
2020-07-28 11:18:48 +08:00
|
|
|
import cv2
|
2022-01-07 12:56:45 +08:00
|
|
|
import random
|
|
|
|
import numpy as np
|
|
|
|
import paddle
|
2020-05-10 16:26:57 +08:00
|
|
|
|
|
|
|
|
2020-10-13 17:13:33 +08:00
|
|
|
def print_dict(d, logger, delimiter=0):
|
|
|
|
"""
|
|
|
|
Recursively visualize a dict and
|
|
|
|
indenting acrrording by the relationship of keys.
|
|
|
|
"""
|
|
|
|
for k, v in sorted(d.items()):
|
|
|
|
if isinstance(v, dict):
|
|
|
|
logger.info("{}{} : ".format(delimiter * " ", str(k)))
|
|
|
|
print_dict(v, logger, delimiter + 4)
|
|
|
|
elif isinstance(v, list) and len(v) >= 1 and isinstance(v[0], dict):
|
|
|
|
logger.info("{}{} : ".format(delimiter * " ", str(k)))
|
|
|
|
for value in v:
|
|
|
|
print_dict(value, logger, delimiter + 4)
|
|
|
|
else:
|
|
|
|
logger.info("{}{} : {}".format(delimiter * " ", k, v))
|
2020-05-10 16:26:57 +08:00
|
|
|
|
|
|
|
|
|
|
|
def get_check_global_params(mode):
|
2020-10-13 17:13:33 +08:00
|
|
|
check_params = ['use_gpu', 'max_text_length', 'image_shape', \
|
|
|
|
'image_shape', 'character_type', 'loss_type']
|
2020-05-10 16:26:57 +08:00
|
|
|
if mode == "train_eval":
|
2020-10-13 17:13:33 +08:00
|
|
|
check_params = check_params + [ \
|
2020-05-10 16:26:57 +08:00
|
|
|
'train_batch_size_per_card', 'test_batch_size_per_card']
|
|
|
|
elif mode == "test":
|
|
|
|
check_params = check_params + ['test_batch_size_per_card']
|
|
|
|
return check_params
|
|
|
|
|
|
|
|
|
2022-04-29 13:39:07 +08:00
|
|
|
def _check_image_file(path):
|
2022-08-19 20:15:37 +08:00
|
|
|
img_end = {'jpg', 'bmp', 'png', 'jpeg', 'rgb', 'tif', 'tiff', 'gif', 'pdf'}
|
2022-04-29 13:39:07 +08:00
|
|
|
return any([path.lower().endswith(e) for e in img_end])
|
|
|
|
|
|
|
|
|
2020-05-12 21:12:52 +08:00
|
|
|
def get_image_file_list(img_file):
|
|
|
|
imgs_lists = []
|
|
|
|
if img_file is None or not os.path.exists(img_file):
|
|
|
|
raise Exception("not found any img file in {}".format(img_file))
|
|
|
|
|
2022-04-29 17:50:44 +08:00
|
|
|
if os.path.isfile(img_file) and _check_image_file(img_file):
|
2020-05-12 21:12:52 +08:00
|
|
|
imgs_lists.append(img_file)
|
|
|
|
elif os.path.isdir(img_file):
|
|
|
|
for single_file in os.listdir(img_file):
|
2020-07-27 19:37:55 +08:00
|
|
|
file_path = os.path.join(img_file, single_file)
|
2022-04-29 13:39:07 +08:00
|
|
|
if os.path.isfile(file_path) and _check_image_file(file_path):
|
2020-07-27 19:37:55 +08:00
|
|
|
imgs_lists.append(file_path)
|
2020-05-12 21:12:52 +08:00
|
|
|
if len(imgs_lists) == 0:
|
|
|
|
raise Exception("not found any img file in {}".format(img_file))
|
2021-04-09 18:19:34 +08:00
|
|
|
imgs_lists = sorted(imgs_lists)
|
2020-05-12 21:12:52 +08:00
|
|
|
return imgs_lists
|
|
|
|
|
2023-07-20 20:24:42 +08:00
|
|
|
def binarize_img(img):
|
|
|
|
if len(img.shape) == 3 and img.shape[2] == 3:
|
|
|
|
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # conversion to grayscale image
|
|
|
|
# use cv2 threshold binarization
|
|
|
|
_, gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
|
|
|
img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
|
|
|
|
return img
|
|
|
|
|
|
|
|
def alpha_to_color(img, alpha_color=(255, 255, 255)):
|
|
|
|
if len(img.shape) == 3 and img.shape[2] == 4:
|
|
|
|
B, G, R, A = cv2.split(img)
|
|
|
|
alpha = A / 255
|
|
|
|
|
|
|
|
R = (alpha_color[0] * (1 - alpha) + R * alpha).astype(np.uint8)
|
|
|
|
G = (alpha_color[1] * (1 - alpha) + G * alpha).astype(np.uint8)
|
|
|
|
B = (alpha_color[2] * (1 - alpha) + B * alpha).astype(np.uint8)
|
|
|
|
|
|
|
|
img = cv2.merge((B, G, R))
|
|
|
|
return img
|
2020-05-12 21:12:52 +08:00
|
|
|
|
2022-08-19 20:15:37 +08:00
|
|
|
def check_and_read(img_path):
|
2023-07-07 16:13:32 +08:00
|
|
|
if os.path.basename(img_path)[-3:].lower() == 'gif':
|
2020-07-28 11:18:48 +08:00
|
|
|
gif = cv2.VideoCapture(img_path)
|
|
|
|
ret, frame = gif.read()
|
|
|
|
if not ret:
|
2020-10-13 17:13:33 +08:00
|
|
|
logger = logging.getLogger('ppocr')
|
|
|
|
logger.info("Cannot read {}. This gif image maybe corrupted.")
|
2020-07-28 11:29:55 +08:00
|
|
|
return None, False
|
2020-07-28 11:18:48 +08:00
|
|
|
if len(frame.shape) == 2 or frame.shape[-1] == 1:
|
|
|
|
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
|
|
|
|
imgvalue = frame[:, :, ::-1]
|
2022-08-19 20:15:37 +08:00
|
|
|
return imgvalue, True, False
|
2023-07-07 16:13:32 +08:00
|
|
|
elif os.path.basename(img_path)[-3:].lower() == 'pdf':
|
2022-08-19 20:15:37 +08:00
|
|
|
import fitz
|
|
|
|
from PIL import Image
|
|
|
|
imgs = []
|
|
|
|
with fitz.open(img_path) as pdf:
|
2023-07-07 16:13:32 +08:00
|
|
|
for pg in range(0, pdf.page_count):
|
2022-08-19 20:15:37 +08:00
|
|
|
page = pdf[pg]
|
|
|
|
mat = fitz.Matrix(2, 2)
|
2023-07-07 16:13:32 +08:00
|
|
|
pm = page.get_pixmap(matrix=mat, alpha=False)
|
2022-08-19 20:15:37 +08:00
|
|
|
|
|
|
|
# if width or height > 2000 pixels, don't enlarge the image
|
|
|
|
if pm.width > 2000 or pm.height > 2000:
|
2023-07-07 16:13:32 +08:00
|
|
|
pm = page.get_pixmap(matrix=fitz.Matrix(1, 1), alpha=False)
|
2022-08-19 20:15:37 +08:00
|
|
|
|
|
|
|
img = Image.frombytes("RGB", [pm.width, pm.height], pm.samples)
|
|
|
|
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
|
|
|
|
imgs.append(img)
|
|
|
|
return imgs, False, True
|
|
|
|
return None, False, False
|
2022-01-05 19:03:45 +08:00
|
|
|
|
|
|
|
|
|
|
|
def load_vqa_bio_label_maps(label_map_path):
|
|
|
|
with open(label_map_path, "r", encoding='utf-8') as fin:
|
|
|
|
lines = fin.readlines()
|
2022-06-30 15:23:31 +08:00
|
|
|
old_lines = [line.strip() for line in lines]
|
|
|
|
lines = ["O"]
|
|
|
|
for line in old_lines:
|
|
|
|
# "O" has already been in lines
|
|
|
|
if line.upper() in ["OTHER", "OTHERS", "IGNORE"]:
|
|
|
|
continue
|
|
|
|
lines.append(line)
|
|
|
|
labels = ["O"]
|
|
|
|
for line in lines[1:]:
|
|
|
|
labels.append("B-" + line)
|
|
|
|
labels.append("I-" + line)
|
|
|
|
label2id_map = {label.upper(): idx for idx, label in enumerate(labels)}
|
|
|
|
id2label_map = {idx: label.upper() for idx, label in enumerate(labels)}
|
2022-01-05 19:03:45 +08:00
|
|
|
return label2id_map, id2label_map
|
2022-01-07 12:56:45 +08:00
|
|
|
|
|
|
|
|
|
|
|
def set_seed(seed=1024):
|
|
|
|
random.seed(seed)
|
|
|
|
np.random.seed(seed)
|
|
|
|
paddle.seed(seed)
|
2022-02-01 17:46:42 +08:00
|
|
|
|
|
|
|
|
|
|
|
class AverageMeter:
|
|
|
|
def __init__(self):
|
|
|
|
self.reset()
|
|
|
|
|
|
|
|
def reset(self):
|
|
|
|
"""reset"""
|
|
|
|
self.val = 0
|
|
|
|
self.avg = 0
|
|
|
|
self.sum = 0
|
|
|
|
self.count = 0
|
|
|
|
|
|
|
|
def update(self, val, n=1):
|
|
|
|
"""update"""
|
|
|
|
self.val = val
|
|
|
|
self.sum += val * n
|
|
|
|
self.count += n
|
|
|
|
self.avg = self.sum / self.count
|