commit fix by running pre-commit run -a (#12165)

pull/12171/head
jzhang533 2024-05-24 12:12:42 +08:00 committed by GitHub
parent 3a66efc7bf
commit a2ad2124c7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
57 changed files with 102 additions and 102 deletions

View File

@ -21,7 +21,7 @@ class ICDAR2015Dataset(BaseDataSet):
filter_keys,
ignore_tags,
transform=None,
**kwargs
**kwargs,
):
super().__init__(
data_path, img_mode, pre_processes, filter_keys, ignore_tags, transform
@ -75,7 +75,7 @@ class DetDataset(BaseDataSet):
filter_keys,
ignore_tags,
transform=None,
**kwargs
**kwargs,
):
self.load_char_annotation = kwargs["load_char_annotation"]
self.expand_one_char = kwargs["expand_one_char"]
@ -138,7 +138,7 @@ class SynthTextDataset(BaseDataSet):
pre_processes,
filter_keys,
transform=None,
**kwargs
**kwargs,
):
self.transform = transform
self.dataRoot = pathlib.Path(data_path)

View File

@ -254,7 +254,7 @@ class CTCLabelDecode(BaseRecLabelDecode):
# character_dict_path=None,
# character_type='ch',
# use_space_char=False,
**kwargs
**kwargs,
):
super(CTCLabelDecode, self).__init__(config)

View File

@ -254,7 +254,7 @@ class CTCLabelDecode(BaseRecLabelDecode):
# character_dict_path=None,
# character_type='ch',
# use_space_char=False,
**kwargs
**kwargs,
):
super(CTCLabelDecode, self).__init__(config)

View File

@ -38,7 +38,7 @@ class DRRGTargets(object):
min_rand_half_height=8.0,
max_rand_half_height=24.0,
jitter_level=0.2,
**kwargs
**kwargs,
):
super().__init__()
self.orientation_thr = orientation_thr

View File

@ -32,7 +32,7 @@ class EASTProcessTrain(object):
background_ratio=0.125,
min_crop_side_ratio=0.1,
min_text_size=10,
**kwargs
**kwargs,
):
self.input_size = image_shape[1]
self.random_scale = np.array([0.5, 1, 2.0, 3.0])

View File

@ -359,7 +359,7 @@ class RandomRotatePolyInstances:
max_angle=10,
pad_with_fixed_color=False,
pad_value=(0, 0, 0),
**kwargs
**kwargs,
):
"""Randomly rotate images and polygon masks.
@ -487,7 +487,7 @@ class SquareResizePad:
pad_ratio=0.6,
pad_with_fixed_color=False,
pad_value=(0, 0, 0),
**kwargs
**kwargs,
):
"""Resize or pad images to be square shape.

View File

@ -53,7 +53,7 @@ class FCENetTargets:
level_size_divisors=(8, 16, 32),
level_proportion_range=((0, 0.25), (0.2, 0.65), (0.55, 1.0)),
orientation_thr=2.0,
**kwargs
**kwargs,
):
super().__init__()
assert isinstance(level_size_divisors, tuple)

View File

@ -589,7 +589,7 @@ class SRNLabelEncode(BaseRecLabelEncode):
max_text_length=25,
character_dict_path=None,
use_space_char=False,
**kwargs
**kwargs,
):
super(SRNLabelEncode, self).__init__(
max_text_length, character_dict_path, use_space_char
@ -638,7 +638,7 @@ class TableLabelEncode(AttnLabelEncode):
merge_no_span_structure=False,
learn_empty_box=False,
loc_reg_num=4,
**kwargs
**kwargs,
):
self.max_text_len = max_text_length
self.lower = False
@ -786,7 +786,7 @@ class TableMasterLabelEncode(TableLabelEncode):
merge_no_span_structure=False,
learn_empty_box=False,
loc_reg_num=4,
**kwargs
**kwargs,
):
super(TableMasterLabelEncode, self).__init__(
max_text_length,
@ -795,7 +795,7 @@ class TableMasterLabelEncode(TableLabelEncode):
merge_no_span_structure,
learn_empty_box,
loc_reg_num,
**kwargs
**kwargs,
)
self.pad_idx = self.dict[self.pad_str]
self.unknown_idx = self.dict[self.unknown_str]
@ -909,7 +909,7 @@ class SATRNLabelEncode(BaseRecLabelEncode):
character_dict_path=None,
use_space_char=False,
lower=False,
**kwargs
**kwargs,
):
super(SATRNLabelEncode, self).__init__(
max_text_length, character_dict_path, use_space_char
@ -1019,7 +1019,7 @@ class VQATokenLabelEncode(object):
order_method=None,
infer_mode=False,
ocr_engine=None,
**kwargs
**kwargs,
):
super(VQATokenLabelEncode, self).__init__()
from paddlenlp.transformers import (
@ -1273,7 +1273,7 @@ class MultiLabelEncode(BaseRecLabelEncode):
character_dict_path=None,
use_space_char=False,
gtc_encode=None,
**kwargs
**kwargs,
):
super(MultiLabelEncode, self).__init__(
max_text_length, character_dict_path, use_space_char
@ -1381,7 +1381,7 @@ class ViTSTRLabelEncode(BaseRecLabelEncode):
character_dict_path=None,
use_space_char=False,
ignore_index=0,
**kwargs
**kwargs,
):
super(ViTSTRLabelEncode, self).__init__(
max_text_length, character_dict_path, use_space_char
@ -1416,7 +1416,7 @@ class ABINetLabelEncode(BaseRecLabelEncode):
character_dict_path=None,
use_space_char=False,
ignore_index=100,
**kwargs
**kwargs,
):
super(ABINetLabelEncode, self).__init__(
max_text_length, character_dict_path, use_space_char
@ -1497,7 +1497,7 @@ class SPINLabelEncode(AttnLabelEncode):
character_dict_path=None,
use_space_char=False,
lower=True,
**kwargs
**kwargs,
):
super(SPINLabelEncode, self).__init__(
max_text_length, character_dict_path, use_space_char
@ -1619,7 +1619,7 @@ class CANLabelEncode(BaseRecLabelEncode):
max_text_length=100,
use_space_char=False,
lower=True,
**kwargs
**kwargs,
):
super(CANLabelEncode, self).__init__(
max_text_length, character_dict_path, use_space_char, lower
@ -1654,7 +1654,7 @@ class CPPDLabelEncode(BaseRecLabelEncode):
use_space_char=False,
ch=False,
ignore_index=100,
**kwargs
**kwargs,
):
super(CPPDLabelEncode, self).__init__(
max_text_length, character_dict_path, use_space_char

View File

@ -465,7 +465,7 @@ class SRResize(object):
min_ratio=1,
mask=False,
infer_mode=False,
**kwargs
**kwargs,
):
self.imgH = imgH
self.imgW = imgW

View File

@ -37,7 +37,7 @@ class PGProcessTrain(object):
min_text_size=4,
max_text_size=512,
point_gather_mode=None,
**kwargs
**kwargs,
):
self.tcl_len = tcl_len
self.max_text_length = max_text_length

View File

@ -133,7 +133,7 @@ class EastRandomCropData(object):
max_tries=10,
min_crop_side_ratio=0.1,
keep_ratio=True,
**kwargs
**kwargs,
):
self.size = size
self.max_tries = max_tries

View File

@ -41,7 +41,7 @@ class RecAug(object):
jitter_prob=0.4,
blur_prob=0.4,
hsv_aug_prob=0.4,
**kwargs
**kwargs,
):
self.tia_prob = tia_prob
self.bda = BaseDataAugmentation(
@ -74,7 +74,7 @@ class BaseDataAugmentation(object):
jitter_prob=0.4,
blur_prob=0.4,
hsv_aug_prob=0.4,
**kwargs
**kwargs,
):
self.crop_prob = crop_prob
self.reverse_prob = reverse_prob
@ -151,7 +151,7 @@ class RecConAug(object):
image_shape=(32, 320, 3),
max_text_length=25,
ext_data_num=1,
**kwargs
**kwargs,
):
self.ext_data_num = ext_data_num
self.prob = prob
@ -199,7 +199,7 @@ class SVTRRecAug(object):
geometry_p=0.5,
deterioration_p=0.25,
colorjitter_p=0.25,
**kwargs
**kwargs,
):
self.transforms = Compose(
[
@ -237,7 +237,7 @@ class ParseQRecAug(object):
geometry_p=0.5,
deterioration_p=0.25,
colorjitter_p=0.25,
**kwargs
**kwargs,
):
self.transforms = Compose(
[
@ -289,7 +289,7 @@ class RecResizeImg(object):
eval_mode=False,
character_dict_path="./ppocr/utils/ppocr_keys_v1.txt",
padding=True,
**kwargs
**kwargs,
):
self.image_shape = image_shape
self.infer_mode = infer_mode
@ -315,7 +315,7 @@ class VLRecResizeImg(object):
infer_mode=False,
character_dict_path="./ppocr/utils/ppocr_keys_v1.txt",
padding=True,
**kwargs
**kwargs,
):
self.image_shape = image_shape
self.infer_mode = infer_mode
@ -437,7 +437,7 @@ class SPINRecResizeImg(object):
interpolation=2,
mean=(127.5, 127.5, 127.5),
std=(127.5, 127.5, 127.5),
**kwargs
**kwargs,
):
self.image_shape = image_shape
@ -485,7 +485,7 @@ class GrayRecResizeImg(object):
inter_type="Image.Resampling.LANCZOS",
scale=True,
padding=False,
**kwargs
**kwargs,
):
self.image_shape = image_shape
self.resize_type = resize_type

View File

@ -33,7 +33,7 @@ class SASTProcessTrain(object):
min_crop_side_ratio=0.3,
min_text_size=10,
max_text_size=512,
**kwargs
**kwargs,
):
self.input_size = image_shape[1]
self.min_crop_size = min_crop_size

View File

@ -26,7 +26,7 @@ class VQATokenPad(object):
return_overflowing_tokens=False,
return_special_tokens_mask=False,
infer_mode=False,
**kwargs
**kwargs,
):
self.max_seq_len = max_seq_len
self.pad_to_max_seq_len = max_seq_len

View File

@ -34,7 +34,7 @@ class BalanceLoss(nn.Layer):
negative_ratio=3,
return_origin=False,
eps=1e-6,
**kwargs
**kwargs,
):
"""
The BalanceLoss for Differentiable Binarization text detection

View File

@ -41,7 +41,7 @@ class DBLoss(nn.Layer):
beta=10,
ohem_ratio=3,
eps=1e-6,
**kwargs
**kwargs,
):
super(DBLoss, self).__init__()
self.alpha = alpha

View File

@ -31,7 +31,7 @@ class PSELoss(nn.Layer):
kernel_sample_mask="pred",
reduction="sum",
eps=1e-6,
**kwargs
**kwargs,
):
"""Implement PSE Loss."""
super(PSELoss, self).__init__()

View File

@ -675,7 +675,7 @@ class DistillationNRTRLoss(CELoss):
multi_head=False,
smoothing=True,
name="loss_nrtr",
**kwargs
**kwargs,
):
super().__init__(smoothing=smoothing)
self.model_name_list = model_name_list
@ -713,7 +713,7 @@ class DistillationDBLoss(DBLoss):
ohem_ratio=3,
eps=1e-6,
name="db",
**kwargs
**kwargs,
):
super().__init__()
self.model_name_list = model_name_list
@ -935,7 +935,7 @@ class DistillationVQADistanceLoss(DistanceLoss):
key=None,
index=None,
name="loss_distance",
**kargs
**kargs,
):
super().__init__(mode=mode, **kargs)
assert isinstance(model_name_pairs, list)

View File

@ -50,7 +50,7 @@ class AsterLoss(nn.Layer):
ignore_index=-100,
sequence_normalize=False,
sample_normalize=True,
**kwargs
**kwargs,
):
super(AsterLoss, self).__init__()
self.weight = weight

View File

@ -35,7 +35,7 @@ class EnhancedCTCLoss(nn.Layer):
feat_dim=96,
init_center=False,
center_file_path=None,
**kwargs
**kwargs,
):
super(EnhancedCTCLoss, self).__init__()
self.ctc_loss_func = CTCLoss(use_focal_loss=use_focal_loss)

View File

@ -29,7 +29,7 @@ class E2EMetric(object):
gt_mat_dir,
character_dict_path,
main_indicator="f_score_e2e",
**kwargs
**kwargs,
):
self.mode = mode
self.gt_mat_dir = gt_mat_dir

View File

@ -77,7 +77,7 @@ class TableMetric(object):
compute_bbox_metric=False,
box_format="xyxy",
del_thead_tbody=False,
**kwargs
**kwargs,
):
"""

View File

@ -335,7 +335,7 @@ def PPHGNet_small(pretrained=False, use_ssld=False, det=False, **kwargs):
stage_config=stage_config_det if det else stage_config_rec,
layer_num=6,
det=det,
**kwargs
**kwargs,
)
return model
@ -363,6 +363,6 @@ def PPHGNet_base(pretrained=False, use_ssld=True, **kwargs):
stage_config=stage_config,
layer_num=7,
dropout_prob=0.2,
**kwargs
**kwargs,
)
return model

View File

@ -395,7 +395,7 @@ class PPLCNetV3(nn.Layer):
lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
lab_lr=0.1,
det=False,
**kwargs
**kwargs,
):
super().__init__()
self.scale = scale

View File

@ -32,7 +32,7 @@ class MobileNetV3(nn.Layer):
large_stride=None,
small_stride=None,
disable_se=False,
**kwargs
**kwargs,
):
super(MobileNetV3, self).__init__()
self.disable_se = disable_se

View File

@ -118,7 +118,7 @@ class MobileNetV1Enhance(nn.Layer):
last_conv_stride=1,
last_pool_type="max",
last_pool_kernel_size=[3, 2],
**kwargs
**kwargs,
):
super().__init__()
self.scale = scale

View File

@ -323,7 +323,7 @@ class SVTRStage(nn.Layer):
act=nn.GELU,
eps=1e-6,
downsample=None,
**kwargs
**kwargs,
):
super().__init__()
self.dim = dim
@ -506,7 +506,7 @@ class SVTRv2(nn.Layer):
eps=1e-6,
use_pool=False,
feat2d=False,
**kwargs
**kwargs,
):
super().__init__()
num_stages = len(depths)

View File

@ -196,7 +196,7 @@ class ViT(nn.Layer):
epsilon=1e-6,
act="nn.GELU",
prenorm=False,
**kwargs
**kwargs,
):
super().__init__()
self.embed_dim = embed_dim

View File

@ -51,7 +51,7 @@ class ViTSTR(nn.Layer):
act_layer="nn.GELU",
epsilon=1e-6,
out_channels=None,
**kwargs
**kwargs,
):
super().__init__()
self.seqlen = seqlen

View File

@ -58,7 +58,7 @@ class NLPBaseModel(nn.Layer):
type="ser",
pretrained=True,
checkpoints=None,
**kwargs
**kwargs,
):
super(NLPBaseModel, self).__init__()
if checkpoints is not None: # load the trained model

View File

@ -50,7 +50,7 @@ class DRRGHead(nn.Layer):
center_region_thr=0.2,
center_region_area_thr=50,
local_graph_thr=0.7,
**kwargs
**kwargs,
):
super().__init__()

View File

@ -117,7 +117,7 @@ class PositionAttention(nn.Layer):
h=8,
w=32,
mode="nearest",
**kwargs
**kwargs,
):
super().__init__()
self.max_length = max_length

View File

@ -36,7 +36,7 @@ class AsterHead(nn.Layer):
max_len_labels,
time_step=25,
beam_width=5,
**kwargs
**kwargs,
):
super(AsterHead, self).__init__()
self.num_classes = out_channels

View File

@ -217,7 +217,7 @@ class CPPDHead(nn.Layer):
max_len=25,
vis_seq=50,
ch=False,
**kwargs
**kwargs,
):
super(CPPDHead, self).__init__()

View File

@ -40,7 +40,7 @@ class CTCHead(nn.Layer):
fc_decay=0.0004,
mid_channels=None,
return_feats=False,
**kwargs
**kwargs,
):
super(CTCHead, self).__init__()
if mid_channels is None:

View File

@ -83,7 +83,7 @@ class MultiHead(nn.Layer):
self.sar_head = eval(name)(
in_channels=in_channels,
out_channels=out_channels_list["SARLabelDecode"],
**sar_args
**sar_args,
)
elif name == "NRTRHead":
gtc_args = self.head_list[idx][name]
@ -124,7 +124,7 @@ class MultiHead(nn.Layer):
self.ctc_head = eval(name)(
in_channels=self.ctc_encoder.out_channels,
out_channels=out_channels_list["CTCLabelDecode"],
**head_args
**head_args,
)
else:
raise NotImplementedError(

View File

@ -222,7 +222,7 @@ class ParseQHead(nn.Layer):
decode_ar,
refine_iters,
dropout,
**kwargs
**kwargs,
):
super().__init__()

View File

@ -58,7 +58,7 @@ class RFLHead(nn.Layer):
out_channels=38,
use_cnt=True,
use_seq=True,
**kwargs
**kwargs,
):
super(RFLHead, self).__init__()
assert use_cnt or use_seq
@ -69,14 +69,14 @@ class RFLHead(nn.Layer):
embed_size=in_channels,
encode_length=batch_max_legnth + 1,
out_channels=out_channels,
**kwargs
**kwargs,
)
if self.use_seq:
self.seq_head = AttentionLSTM(
in_channels=in_channels,
out_channels=out_channels,
hidden_size=hidden_size,
**kwargs
**kwargs,
)
self.batch_max_legnth = batch_max_legnth
self.num_class = out_channels

View File

@ -695,7 +695,7 @@ class RobustScannerHead(nn.Layer):
mask=True,
padding_idx=None,
encode_value=False,
**kwargs
**kwargs,
):
super(RobustScannerHead, self).__init__()

View File

@ -47,7 +47,7 @@ class SAREncoder(nn.Layer):
d_model=512,
d_enc=512,
mask=True,
**kwargs
**kwargs,
):
super().__init__()
assert isinstance(enc_bi_rnn, bool)
@ -167,7 +167,7 @@ class ParallelSARDecoder(BaseDecoder):
max_text_length=30,
mask=True,
pred_concat=True,
**kwargs
**kwargs,
):
super().__init__()
@ -361,7 +361,7 @@ class SARHead(nn.Layer):
d_k=512,
pred_dropout=0.1,
pred_concat=True,
**kwargs
**kwargs,
):
super(SARHead, self).__init__()

View File

@ -249,7 +249,7 @@ class SRNHead(nn.Layer):
num_encoder_TUs,
num_decoder_TUs,
hidden_dims,
**kwargs
**kwargs,
):
super(SRNHead, self).__init__()
self.char_num = out_channels

View File

@ -49,7 +49,7 @@ class TableAttentionHead(nn.Layer):
max_text_length=800,
out_channels=30,
loc_reg_num=4,
**kwargs
**kwargs,
):
super(TableAttentionHead, self).__init__()
self.input_size = in_channels[-1]
@ -259,7 +259,7 @@ class SLAHead(nn.Layer):
loc_reg_num=4,
fc_decay=0.0,
use_attn=False,
**kwargs
**kwargs,
):
"""
@param in_channels: input shape

View File

@ -39,7 +39,7 @@ class TableMasterHead(nn.Layer):
dropout=0,
max_text_length=500,
loc_reg_num=4,
**kwargs
**kwargs,
):
super(TableMasterHead, self).__init__()
hidden_size = in_channels[-1]

View File

@ -42,7 +42,7 @@ class DSConv(nn.Layer):
groups=None,
if_act=True,
act="relu",
**kwargs
**kwargs,
):
super(DSConv, self).__init__()
if groups == None:

View File

@ -46,7 +46,7 @@ class TSRN(nn.Layer):
mask=False,
hidden_units=32,
infer_mode=False,
**kwargs
**kwargs,
):
super(TSRN, self).__init__()
in_planes = 3

View File

@ -41,7 +41,7 @@ class Linear(object):
power=1.0,
warmup_epoch=0,
last_epoch=-1,
**kwargs
**kwargs,
):
super(Linear, self).__init__()
self.learning_rate = learning_rate
@ -88,7 +88,7 @@ class Cosine(object):
epochs,
warmup_epoch=0,
last_epoch=-1,
**kwargs
**kwargs,
):
super(Cosine, self).__init__()
self.learning_rate = learning_rate
@ -133,7 +133,7 @@ class Step(object):
gamma,
warmup_epoch=0,
last_epoch=-1,
**kwargs
**kwargs,
):
super(Step, self).__init__()
self.step_size = step_each_epoch * step_size
@ -177,7 +177,7 @@ class Piecewise(object):
values,
warmup_epoch=0,
last_epoch=-1,
**kwargs
**kwargs,
):
super(Piecewise, self).__init__()
self.boundaries = [step_each_epoch * e for e in decay_epochs]
@ -219,7 +219,7 @@ class CyclicalCosine(object):
cycle,
warmup_epoch=0,
last_epoch=-1,
**kwargs
**kwargs,
):
super(CyclicalCosine, self).__init__()
self.learning_rate = learning_rate
@ -269,7 +269,7 @@ class OneCycle(object):
three_phase=False,
warmup_epoch=0,
last_epoch=-1,
**kwargs
**kwargs,
):
super(OneCycle, self).__init__()
self.max_lr = max_lr
@ -382,7 +382,7 @@ class MultiStepDecay(object):
gamma,
warmup_epoch=0,
last_epoch=-1,
**kwargs
**kwargs,
):
super(MultiStepDecay, self).__init__()
self.milestones = [step_each_epoch * e for e in milestones]
@ -427,7 +427,7 @@ class TwoStepCosine(object):
epochs,
warmup_epoch=0,
last_epoch=-1,
**kwargs
**kwargs,
):
super(TwoStepCosine, self).__init__()
self.learning_rate = learning_rate

View File

@ -65,7 +65,7 @@ class Adam(object):
grad_clip=None,
name=None,
lazy_mode=False,
**kwargs
**kwargs,
):
self.learning_rate = learning_rate
self.beta1 = beta1
@ -164,7 +164,7 @@ class RMSProp(object):
epsilon=1e-6,
weight_decay=None,
grad_clip=None,
**args
**args,
):
super(RMSProp, self).__init__()
self.learning_rate = learning_rate
@ -200,7 +200,7 @@ class Adadelta(object):
weight_decay=None,
grad_clip=None,
name=None,
**kwargs
**kwargs,
):
self.learning_rate = learning_rate
self.epsilon = epsilon
@ -241,7 +241,7 @@ class AdamW(object):
one_dim_param_no_weight_decay=False,
name=None,
lazy_mode=False,
**args
**args,
):
super().__init__()
self.learning_rate = learning_rate

View File

@ -40,7 +40,7 @@ class DBPostProcess(object):
use_dilation=False,
score_mode="fast",
box_type="quad",
**kwargs
**kwargs,
):
self.thresh = thresh
self.box_thresh = box_thresh
@ -268,7 +268,7 @@ class DistillationDBPostProcess(object):
use_dilation=False,
score_mode="fast",
box_type="quad",
**kwargs
**kwargs,
):
self.model_name = model_name
self.key = key

View File

@ -76,7 +76,7 @@ class FCEPostProcess(object):
alpha=1.0,
beta=1.0,
box_type="poly",
**kwargs
**kwargs,
):
self.scales = scales
self.fourier_degree = fourier_degree

View File

@ -37,7 +37,7 @@ class PGPostProcess(object):
score_thresh,
mode,
point_gather_mode=None,
**kwargs
**kwargs,
):
self.character_dict_path = character_dict_path
self.valid_set = valid_set

View File

@ -40,7 +40,7 @@ class PSEPostProcess(object):
min_area=16,
box_type="quad",
scale=4,
**kwargs
**kwargs,
):
assert box_type in ["quad", "poly"], "Only quad and poly is supported"
self.thresh = thresh

View File

@ -237,7 +237,7 @@ class DistillationCTCLabelDecode(CTCLabelDecode):
model_name=["student"],
key=None,
multi_head=False,
**kwargs
**kwargs,
):
super(DistillationCTCLabelDecode, self).__init__(
character_dict_path, use_space_char
@ -836,7 +836,7 @@ class DistillationSARLabelDecode(SARLabelDecode):
model_name=["student"],
key=None,
multi_head=False,
**kwargs
**kwargs,
):
super(DistillationSARLabelDecode, self).__init__(
character_dict_path, use_space_char

View File

@ -43,7 +43,7 @@ class SASTPostProcess(object):
shrink_ratio_of_width=0.3,
expand_scale=1.0,
tcl_map_thresh=0.5,
**kwargs
**kwargs,
):
self.score_thresh = score_thresh
self.nms_thresh = nms_thresh

View File

@ -142,7 +142,7 @@ class TableMasterLabelDecode(TableLabelDecode):
character_dict_path,
box_shape="ori",
merge_no_span_structure=True,
**kwargs
**kwargs,
):
super(TableMasterLabelDecode, self).__init__(
character_dict_path, merge_no_span_structure

View File

@ -204,7 +204,7 @@ def save_model(
config,
is_best=False,
prefix="ppocr",
**kwargs
**kwargs,
):
"""
save model to the target path

View File

@ -145,7 +145,7 @@ class TEDS(object):
int(node.attrib.get("colspan", "1")),
int(node.attrib.get("rowspan", "1")),
cell,
*deque()
*deque(),
)
else:
new_node = TableTree(node.tag, None, None, None, *deque())

View File

@ -99,7 +99,7 @@ class ExponentialWarmup(LinearWarmup):
decay_epochs=2.4,
decay_rate=0.97,
warmup_epoch=5,
**kwargs
**kwargs,
):
warmup_step = warmup_epoch * step_each_epoch
start_lr = 0.0
@ -222,7 +222,7 @@ class RMSProp(object):
epsilon=1e-6,
parameter_list=None,
regularization=None,
**args
**args,
):
super(RMSProp, self).__init__()
self.learning_rate = learning_rate