mirror of
https://github.com/open-mmlab/mmocr.git
synced 2025-06-03 21:54:47 +08:00
[Config] SAR config
This commit is contained in:
parent
41d9c741cd
commit
ca35c78e69
@ -1,8 +1,15 @@
|
||||
label_convertor = dict(
|
||||
type='AttnConvertor', dict_type='DICT90', with_unknown=True)
|
||||
dictionary = dict(
|
||||
type='Dictionary',
|
||||
dict_file='dicts/english_digits_symbols.txt',
|
||||
with_start=True,
|
||||
with_end=True,
|
||||
same_start_end=True,
|
||||
with_padding=True,
|
||||
with_unknown=True)
|
||||
|
||||
model = dict(
|
||||
type='SARNet',
|
||||
preprocess_cfg=dict(mean=[127, 127, 127], std=[127, 127, 127]),
|
||||
backbone=dict(type='ResNet31OCR'),
|
||||
encoder=dict(
|
||||
type='SAREncoder',
|
||||
@ -18,7 +25,8 @@ model = dict(
|
||||
dec_gru=False,
|
||||
pred_dropout=0.1,
|
||||
d_k=512,
|
||||
pred_concat=True),
|
||||
loss=dict(type='SARLoss'),
|
||||
label_convertor=label_convertor,
|
||||
pred_concat=True,
|
||||
postprocessor=dict(type='AttentionPostprocessor'),
|
||||
loss=dict(type='CELoss', ignore_first_char=True, reduction='mean')),
|
||||
dictionary=dictionary,
|
||||
max_seq_len=30)
|
||||
|
@ -1,33 +1,75 @@
|
||||
_base_ = [
|
||||
'../../_base_/default_runtime.py', '../../_base_/recog_models/sar.py',
|
||||
'../../_base_/default_runtime.py',
|
||||
'../../_base_/recog_models/sar.py',
|
||||
'../../_base_/schedules/schedule_adam_step_5e.py',
|
||||
'../../_base_/recog_pipelines/sar_pipeline.py',
|
||||
'../../_base_/recog_datasets/ST_SA_MJ_real_train.py',
|
||||
'../../_base_/recog_datasets/academic_test.py'
|
||||
]
|
||||
|
||||
train_list = {{_base_.train_list}}
|
||||
test_list = {{_base_.test_list}}
|
||||
dataset_type = 'OCRDataset'
|
||||
data_root = 'data/recog/'
|
||||
file_client_args = dict(backend='disk')
|
||||
|
||||
train_pipeline = {{_base_.train_pipeline}}
|
||||
test_pipeline = {{_base_.test_pipeline}}
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile', file_client_args=file_client_args),
|
||||
dict(type='LoadOCRAnnotations', with_text=True),
|
||||
dict(
|
||||
type='RescaleToHeight',
|
||||
height=48,
|
||||
min_width=48,
|
||||
max_width=160,
|
||||
width_divisor=4),
|
||||
dict(type='PadToWidth', width=160),
|
||||
dict(
|
||||
type='PackTextRecogInputs',
|
||||
meta_keys=('img_path', 'ori_shape', 'img_shape', 'valid_ratio'))
|
||||
]
|
||||
|
||||
data = dict(
|
||||
samples_per_gpu=64,
|
||||
workers_per_gpu=2,
|
||||
val_dataloader=dict(samples_per_gpu=1),
|
||||
test_dataloader=dict(samples_per_gpu=1),
|
||||
train=dict(
|
||||
type='UniformConcatDataset',
|
||||
datasets=train_list,
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type='UniformConcatDataset',
|
||||
datasets=test_list,
|
||||
pipeline=test_pipeline),
|
||||
test=dict(
|
||||
type='UniformConcatDataset',
|
||||
datasets=test_list,
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile', file_client_args=file_client_args),
|
||||
dict(
|
||||
type='RescaleToHeight',
|
||||
height=48,
|
||||
min_width=48,
|
||||
max_width=160,
|
||||
width_divisor=4),
|
||||
dict(type='PadToWidth', width=160),
|
||||
dict(
|
||||
type='PackTextRecogInputs',
|
||||
meta_keys=('img_path', 'ori_shape', 'img_shape', 'valid_ratio',
|
||||
'instances'))
|
||||
]
|
||||
|
||||
train_dataloader = dict(
|
||||
batch_size=64,
|
||||
num_workers=8,
|
||||
persistent_workers=True,
|
||||
sampler=dict(type='DefaultSampler', shuffle=True),
|
||||
dataset=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
data_prefix=dict(img_path=None),
|
||||
ann_file='train_label.json',
|
||||
pipeline=train_pipeline))
|
||||
|
||||
val_dataloader = dict(
|
||||
batch_size=1,
|
||||
num_workers=4,
|
||||
persistent_workers=True,
|
||||
drop_last=False,
|
||||
sampler=dict(type='DefaultSampler', shuffle=False),
|
||||
dataset=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
data_prefix=dict(img_path=None),
|
||||
ann_file='test_label.json',
|
||||
test_mode=True,
|
||||
pipeline=test_pipeline))
|
||||
test_dataloader = val_dataloader
|
||||
|
||||
evaluation = dict(interval=1, metric='acc')
|
||||
val_evaluator = [
|
||||
dict(
|
||||
type='WordMetric', mode=['exact', 'ignore_case',
|
||||
'ignore_case_symbol']),
|
||||
dict(type='CharMetric')
|
||||
]
|
||||
test_evaluator = val_evaluator
|
||||
visualizer = dict(type='TextRecogLocalVisualizer', name='visualizer')
|
||||
|
@ -1,20 +1,80 @@
|
||||
_base_ = [
|
||||
'../../_base_/default_runtime.py',
|
||||
'../../_base_/recog_models/sar.py',
|
||||
'../../_base_/schedules/schedule_adam_step_5e.py',
|
||||
'../../_base_/recog_pipelines/sar_pipeline.py',
|
||||
'../../_base_/recog_datasets/ST_SA_MJ_real_train.py',
|
||||
'../../_base_/recog_datasets/academic_test.py'
|
||||
]
|
||||
|
||||
train_list = {{_base_.train_list}}
|
||||
test_list = {{_base_.test_list}}
|
||||
dataset_type = 'OCRDataset'
|
||||
data_root = 'data/recog/'
|
||||
file_client_args = dict(backend='disk')
|
||||
|
||||
train_pipeline = {{_base_.train_pipeline}}
|
||||
test_pipeline = {{_base_.test_pipeline}}
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile', file_client_args=file_client_args),
|
||||
dict(type='LoadOCRAnnotations', with_text=True),
|
||||
dict(type='Resize', scale=(160, 48), keep_ratio=False),
|
||||
dict(
|
||||
type='PackTextRecogInputs',
|
||||
meta_keys=('img_path', 'ori_shape', 'img_shape', 'valid_ratio'))
|
||||
]
|
||||
|
||||
label_convertor = dict(
|
||||
type='AttnConvertor', dict_type='DICT90', with_unknown=True)
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile', file_client_args=file_client_args),
|
||||
dict(
|
||||
type='RescaleToHeight',
|
||||
height=48,
|
||||
min_width=48,
|
||||
max_width=160,
|
||||
width_divisor=4),
|
||||
dict(type='PadToWidth', width=160),
|
||||
dict(
|
||||
type='PackTextRecogInputs',
|
||||
meta_keys=('img_path', 'ori_shape', 'img_shape', 'valid_ratio',
|
||||
'instances'))
|
||||
]
|
||||
|
||||
train_dataloader = dict(
|
||||
batch_size=64,
|
||||
num_workers=8,
|
||||
persistent_workers=True,
|
||||
sampler=dict(type='DefaultSampler', shuffle=True),
|
||||
dataset=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
data_prefix=dict(img_path=None),
|
||||
ann_file='train_label.json',
|
||||
pipeline=train_pipeline))
|
||||
|
||||
val_dataloader = dict(
|
||||
batch_size=1,
|
||||
num_workers=4,
|
||||
persistent_workers=True,
|
||||
drop_last=False,
|
||||
sampler=dict(type='DefaultSampler', shuffle=False),
|
||||
dataset=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
data_prefix=dict(img_path=None),
|
||||
ann_file='test_label.json',
|
||||
test_mode=True,
|
||||
pipeline=test_pipeline))
|
||||
test_dataloader = val_dataloader
|
||||
|
||||
val_evaluator = [
|
||||
dict(
|
||||
type='WordMetric', mode=['exact', 'ignore_case',
|
||||
'ignore_case_symbol']),
|
||||
dict(type='CharMetric')
|
||||
]
|
||||
test_evaluator = val_evaluator
|
||||
visualizer = dict(type='TextRecogLocalVisualizer', name='visualizer')
|
||||
dictionary = dict(
|
||||
type='Dictionary',
|
||||
dict_file='dicts/english_digits_symbols.txt',
|
||||
with_start=True,
|
||||
with_end=True,
|
||||
same_start_end=True,
|
||||
with_padding=True,
|
||||
with_unknown=True)
|
||||
model = dict(
|
||||
type='SARNet',
|
||||
backbone=dict(type='ResNet31OCR'),
|
||||
@ -32,27 +92,8 @@ model = dict(
|
||||
dec_gru=False,
|
||||
pred_dropout=0.1,
|
||||
d_k=512,
|
||||
pred_concat=True),
|
||||
loss=dict(type='SARLoss'),
|
||||
label_convertor=label_convertor,
|
||||
pred_concat=True,
|
||||
postprocessor=dict(type='AttentionPostprocessor'),
|
||||
loss=dict(type='CELoss', ignore_first_char=True, reduction='mean')),
|
||||
dictionary=dictionary,
|
||||
max_seq_len=30)
|
||||
|
||||
data = dict(
|
||||
samples_per_gpu=64,
|
||||
workers_per_gpu=2,
|
||||
val_dataloader=dict(samples_per_gpu=1),
|
||||
test_dataloader=dict(samples_per_gpu=1),
|
||||
train=dict(
|
||||
type='UniformConcatDataset',
|
||||
datasets=train_list,
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type='UniformConcatDataset',
|
||||
datasets=test_list,
|
||||
pipeline=test_pipeline),
|
||||
test=dict(
|
||||
type='UniformConcatDataset',
|
||||
datasets=test_list,
|
||||
pipeline=test_pipeline))
|
||||
|
||||
evaluation = dict(interval=1, metric='acc')
|
||||
|
Loading…
x
Reference in New Issue
Block a user