diff --git a/configs/_base_/recog_datasets/MJ_train.py b/configs/_base_/recog_datasets/MJ_train.py index 953f5395..9e54bc36 100644 --- a/configs/_base_/recog_datasets/MJ_train.py +++ b/configs/_base_/recog_datasets/MJ_train.py @@ -1,15 +1,14 @@ # Text Recognition Training set, including: # Synthetic Datasets: Syn90k -data_root = 'data/recog' +data_root = 'data/rec' train_img_prefix1 = 'Syn90k/mnt/ramdisk/max/90kDICT32px' train_ann_file1 = 'Syn90k/label.json' -file_client_args = dict(backend='disk') -train = dict( +MJ = dict( type='OCRDataset', data_root=data_root, data_prefix=dict(img_path=train_img_prefix1), ann_file=train_ann_file1, test_mode=False, pipeline=None) -train_list = [train] +train_list = [MJ] diff --git a/configs/_base_/recog_datasets/ST_MJ_alphanumeric_train.py b/configs/_base_/recog_datasets/ST_MJ_alphanumeric_train.py index 003f5645..8a6fa7e8 100644 --- a/configs/_base_/recog_datasets/ST_MJ_alphanumeric_train.py +++ b/configs/_base_/recog_datasets/ST_MJ_alphanumeric_train.py @@ -2,12 +2,11 @@ # Synthetic Datasets: SynthText, Syn90k # Both annotations are filtered so that # only alphanumeric terms are left -data_root = 'data/recog' +data_root = 'data/rec' train_img_prefix1 = 'Syn90k/mnt/ramdisk/max/90kDICT32px' train_ann_file1 = 'Syn90k/label.json' -file_client_args = dict(backend='disk') -train1 = dict( +MJ = dict( type='OCRDataset', data_root=data_root, data_prefix=dict(img_path=train_img_prefix1), @@ -17,9 +16,13 @@ train1 = dict( train_img_prefix2 = 'SynthText/synthtext/SynthText_patch_horizontal' train_ann_file2 = 'SynthText/alphanumeric_label.json' -train2 = {key: value for key, value in train1.items()} -train2['data_root'] = data_root -train2['data_prefix'] = dict(img_path=train_img_prefix2), -train2['ann_file'] = dict(img_path=train_ann_file2), -train_list = [train1, train2] +ST = dict( + type='OCRDataset', + data_root=data_root, + data_prefix=dict(img_path=train_img_prefix2), + ann_file=train_ann_file2, + test_mode=False, + pipeline=None) + +train_list = [MJ, ST] diff --git a/configs/_base_/recog_datasets/ST_MJ_train.py b/configs/_base_/recog_datasets/ST_MJ_train.py index b25fd434..9ddf7a2a 100644 --- a/configs/_base_/recog_datasets/ST_MJ_train.py +++ b/configs/_base_/recog_datasets/ST_MJ_train.py @@ -1,13 +1,13 @@ # Text Recognition Training set, including: # Synthetic Datasets: SynthText, Syn90k -data_root = 'data/recog' +data_root = 'data/rec' train_img_prefix1 = 'Syn90k/mnt/ramdisk/max/90kDICT32px' train_ann_file1 = 'Syn90k/label.json' file_client_args = dict(backend='disk') -train1 = dict( +MJ = dict( type='OCRDataset', data_root=data_root, data_prefix=dict(img_path=train_img_prefix1), @@ -17,9 +17,13 @@ train1 = dict( train_img_prefix2 = 'SynthText/synthtext/SynthText_patch_horizontal' train_ann_file2 = 'SynthText/label.json' -train2 = {key: value for key, value in train1.items()} -train2['data_root'] = data_root -train2['data_prefix'] = dict(img_path=train_img_prefix2), -train2['ann_file'] = dict(img_path=train_ann_file2), -train_list = [train1, train2] +ST = dict( + type='OCRDataset', + data_root=data_root, + data_prefix=dict(img_path=train_img_prefix2), + ann_file=train_ann_file2, + test_mode=False, + pipeline=None) + +train_list = [MJ, ST] diff --git a/configs/_base_/recog_datasets/ST_SA_MJ_real_train.py b/configs/_base_/recog_datasets/ST_SA_MJ_real_train.py index 51ba1800..37d68e7c 100644 --- a/configs/_base_/recog_datasets/ST_SA_MJ_real_train.py +++ b/configs/_base_/recog_datasets/ST_SA_MJ_real_train.py @@ -1,7 +1,7 @@ # Text Recognition Training set, including: # Synthetic Datasets: SynthText, SynthAdd, Syn90k # Real Dataset: IC11, IC13, IC15, COCO-Test, IIIT5k -data_root = 'data/recog' +data_root = 'data/rec' train_img_prefix1 = 'icdar_2011' train_img_prefix2 = 'icdar_2013' @@ -9,19 +9,19 @@ train_img_prefix3 = 'icdar_2015' train_img_prefix4 = 'coco_text' train_img_prefix5 = 'IIIT5K' train_img_prefix6 = 'SynthText_Add' -train_img_prefix7 = 'SynthText' -train_img_prefix8 = 'Syn90k' +train_img_prefix7 = 'SynthText/synthtext/SynthText_patch_horizontal' +train_img_prefix8 = 'Syn90k/mnt/ramdisk/max/90kDICT32px' train_ann_file1 = 'icdar_2011/train_label.json', train_ann_file2 = 'icdar_2013/train_label.json', train_ann_file3 = 'icdar_2015/train_label.json', train_ann_file4 = 'coco_text/train_label.json', train_ann_file5 = 'IIIT5K/train_label.json', -train_ann_file6 = 'SynthText_Add/label.json', -train_ann_file7 = 'SynthText/shuffle_labels.json', -train_ann_file8 = 'Syn90k/shuffle_labels.json' +train_ann_file6 = 'SynthText_Add/train_label.json', +train_ann_file7 = 'SynthText/shuffle_label.json', +train_ann_file8 = 'Syn90k/mnt/ramdisk/max/90kDICT32px/shuffle_label.json' -train1 = dict( +IC11 = dict( type='OCRDataset', data_root=data_root, data_prefix=dict(img_path=train_img_prefix1), @@ -29,32 +29,60 @@ train1 = dict( test_mode=False, pipeline=None) -train2 = {key: value for key, value in train1.items()} -train2['data_prefix'] = dict(img_path=train_img_prefix2) -train2['ann_file'] = train_ann_file2 +IC13 = dict( + type='OCRDataset', + data_root=data_root, + data_prefix=dict(img_path=train_img_prefix2), + ann_file=train_ann_file2, + test_mode=False, + pipeline=None) -train3 = {key: value for key, value in train1.items()} -train3['img_prefix'] = dict(img_path=train_img_prefix3) -train3['ann_file'] = train_ann_file3 +IC15 = dict( + type='OCRDataset', + data_root=data_root, + data_prefix=dict(img_path=train_img_prefix3), + ann_file=train_ann_file3, + test_mode=False, + pipeline=None) -train4 = {key: value for key, value in train1.items()} -train4['img_prefix'] = dict(img_path=train_img_prefix4) -train4['ann_file'] = train_ann_file4 +COCO = dict( + type='OCRDataset', + data_root=data_root, + data_prefix=dict(img_path=train_img_prefix4), + ann_file=train_ann_file4, + test_mode=False, + pipeline=None) -train5 = {key: value for key, value in train1.items()} -train5['img_prefix'] = dict(img_path=train_img_prefix5) -train5['ann_file'] = train_ann_file5 +IIIT5K = dict( + type='OCRDataset', + data_root=data_root, + data_prefix=dict(img_path=train_img_prefix5), + ann_file=train_ann_file5, + test_mode=False, + pipeline=None) -train6 = {key: value for key, value in train1.items()} -train6['img_prefix'] = dict(img_path=train_img_prefix6) -train6['ann_file'] = train_ann_file6 +STADD = dict( + type='OCRDataset', + data_root=data_root, + data_prefix=dict(img_path=train_img_prefix6), + ann_file=train_ann_file6, + test_mode=False, + pipeline=None) -train7 = {key: value for key, value in train1.items()} -train7['img_prefix'] = dict(img_path=train_img_prefix7) -train7['ann_file'] = train_ann_file7 +ST = dict( + type='OCRDataset', + data_root=data_root, + data_prefix=dict(img_path=train_img_prefix7), + ann_file=train_ann_file7, + test_mode=False, + pipeline=None) -train8 = {key: value for key, value in train1.items()} -train8['img_prefix'] = dict(img_path=train_img_prefix8) -train8['ann_file'] = train_ann_file8 +MJ = dict( + type='OCRDataset', + data_root=data_root, + data_prefix=dict(img_path=train_img_prefix8), + ann_file=train_ann_file8, + test_mode=False, + pipeline=None) -train_list = [train1, train2, train3, train4, train5, train6, train7, train8] +train_list = [IC13, IC11, IC15, COCO, IIIT5K, STADD, ST, MJ] diff --git a/configs/_base_/recog_datasets/ST_SA_MJ_train.py b/configs/_base_/recog_datasets/ST_SA_MJ_train.py index d8f0983b..876c3a11 100644 --- a/configs/_base_/recog_datasets/ST_SA_MJ_train.py +++ b/configs/_base_/recog_datasets/ST_SA_MJ_train.py @@ -1,17 +1,10 @@ # Text Recognition Training set, including: # Synthetic Datasets: SynthText, Syn90k -data_root = 'data/recog' +data_root = 'data/rec' +train_img_prefix1 = 'Syn90k/mnt/ramdisk/max/90kDICT32px' +train_ann_file1 = 'Syn90k/label.json' -train_img_prefix1 = 'SynthText_Add' -train_img_prefix2 = 'SynthText/synthtext/' + \ - 'SynthText_patch_horizontal' -train_img_prefix3 = 'Syn90k/mnt/ramdisk/max/90kDICT32px' - -train_ann_file1 = 'SynthText_Add/label.json', -train_ann_file2 = 'SynthText/label.json', -train_ann_file3 = 'Syn90k/label.json' - -train1 = dict( +MJ = dict( type='OCRDataset', data_root=data_root, data_prefix=dict(img_path=train_img_prefix1), @@ -19,12 +12,27 @@ train1 = dict( test_mode=False, pipeline=None) -train2 = {key: value for key, value in train1.items()} -train2['data_prefix'] = dict(img_path=train_img_prefix2) -train2['ann_file'] = train_ann_file2 +train_img_prefix2 = 'SynthText/synthtext/' + \ + 'SynthText_patch_horizontal' +train_ann_file2 = 'SynthText/label.json', -train3 = {key: value for key, value in train1.items()} -train3['img_prefix'] = dict(img_path=train_img_prefix3) -train3['ann_file'] = train_ann_file3 +ST = dict( + type='OCRDataset', + data_root=data_root, + data_prefix=dict(img_path=train_img_prefix2), + ann_file=train_ann_file2, + test_mode=False, + pipeline=None) -train_list = [train1, train2, train3] +train_img_prefix3 = 'SynthText_Add' +train_ann_file3 = 'SynthText_Add/label.json' + +STADD = dict( + type='OCRDataset', + data_root=data_root, + data_prefix=dict(img_path=train_img_prefix3), + ann_file=train_ann_file3, + test_mode=False, + pipeline=None) + +train_list = [MJ, ST, STADD] diff --git a/configs/_base_/recog_datasets/academic_test.py b/configs/_base_/recog_datasets/academic_test.py index 6929b228..e7f4d5eb 100644 --- a/configs/_base_/recog_datasets/academic_test.py +++ b/configs/_base_/recog_datasets/academic_test.py @@ -2,23 +2,23 @@ # Regular Datasets: IIIT5K, SVT, IC13 # Irregular Datasets: IC15, SVTP, CT80 -test_root = 'data/recog' +test_root = 'data/rec' test_img_prefix1 = 'IIIT5K/' test_img_prefix2 = 'svt/' -test_img_prefix3 = 'icdar_2013/' -test_img_prefix4 = 'icdar_2015/' +test_img_prefix3 = 'icdar_2013/Challenge2_Test_Task3_Images/' +test_img_prefix4 = 'icdar_2015/ch4_test_word_images_gt' test_img_prefix5 = 'svtp/' test_img_prefix6 = 'ct80/' -test_ann_file1 = 'IIIT5K/test_label.josn' -test_ann_file2 = 'svt/test_label.josn' -test_ann_file3 = 'icdar_2013/test_label_1015.josn' -test_ann_file4 = 'icdar_2015/test_label.josn' -test_ann_file5 = 'svtp/test_label.josn' -test_ann_file6 = 'ct80/test_label.josn' +test_ann_file1 = 'IIIT5K/test_label.json' +test_ann_file2 = 'svt/test_label.json' +test_ann_file3 = 'icdar_2013/test_label.json' +test_ann_file4 = 'icdar_2015/test_label.json' +test_ann_file5 = 'svtp/test_label.json' +test_ann_file6 = 'ct80/test_label.json' -test1 = dict( +IIIT5K = dict( type='OCRDataset', data_root=test_root, data_prefix=dict(img_path=test_img_prefix1), @@ -26,24 +26,44 @@ test1 = dict( test_mode=True, pipeline=None) -test2 = {key: value for key, value in test1.items()} -test2['data_prefix'] = dict(img_path=test_img_prefix2) -test2['ann_file'] = test_ann_file2 +SVT = dict( + type='OCRDataset', + data_root=test_root, + data_prefix=dict(img_path=test_img_prefix2), + ann_file=test_ann_file2, + test_mode=True, + pipeline=None) -test3 = {key: value for key, value in test1.items()} -test3['data_prefix'] = dict(img_path=test_img_prefix3) -test3['ann_file'] = test_ann_file3 +IC13 = dict( + type='OCRDataset', + data_root=test_root, + data_prefix=dict(img_path=test_img_prefix3), + ann_file=test_ann_file3, + test_mode=True, + pipeline=None) -test4 = {key: value for key, value in test1.items()} -test4['data_prefix'] = dict(img_path=test_img_prefix4) -test4['ann_file'] = test_ann_file4 +IC15 = dict( + type='OCRDataset', + data_root=test_root, + data_prefix=dict(img_path=test_img_prefix4), + ann_file=test_ann_file4, + test_mode=True, + pipeline=None) -test5 = {key: value for key, value in test1.items()} -test5['data_prefix'] = dict(img_path=test_img_prefix5) -test5['ann_file'] = test_ann_file5 +SVTP = dict( + type='OCRDataset', + data_root=test_root, + data_prefix=dict(img_path=test_img_prefix5), + ann_file=test_ann_file5, + test_mode=True, + pipeline=None) -test6 = {key: value for key, value in test1.items()} -test6['data_prefix'] = dict(img_path=test_img_prefix6) -test6['ann_file'] = test_ann_file6 +CUTE80 = dict( + type='OCRDataset', + data_root=test_root, + data_prefix=dict(img_path=test_img_prefix6), + ann_file=test_ann_file6, + test_mode=True, + pipeline=None) -test_list = [test1, test2, test3, test4, test5, test6] +test_list = [IIIT5K, SVT, IC13, IC15, SVTP, CUTE80] diff --git a/configs/_base_/recog_datasets/toy_data.py b/configs/_base_/recog_datasets/toy_data.py index 2b62eabb..77d206c2 100755 --- a/configs/_base_/recog_datasets/toy_data.py +++ b/configs/_base_/recog_datasets/toy_data.py @@ -1,4 +1,4 @@ -data_root = 'tests/data/recog_toy_dataset' +data_root = 'tests/data/rec_toy_dataset' train_img_prefix = 'imgs/' train_anno_file = 'label.json' diff --git a/configs/textdet/fcenet/fcenet_r50dcnv2_fpn_1500e_ctw1500.py b/configs/textdet/fcenet/fcenet_r50dcnv2_fpn_1500e_ctw1500.py index c72c0a51..d0ed170a 100644 --- a/configs/textdet/fcenet/fcenet_r50dcnv2_fpn_1500e_ctw1500.py +++ b/configs/textdet/fcenet/fcenet_r50dcnv2_fpn_1500e_ctw1500.py @@ -1,5 +1,5 @@ _base_ = [ - 'fcenet_r50_fpn.py', + 'fcenet_r50dcnv2_fpn.py', '../../_base_/det_datasets/ctw1500.py', '../../_base_/default_runtime.py', '../../_base_/schedules/schedule_sgd_1500e.py', diff --git a/configs/textrecog/master/master_r31_12e_ST_MJ_SA.py b/configs/textrecog/master/master_r31_12e_ST_MJ_SA.py index 70a4a24b..f8c72187 100644 --- a/configs/textrecog/master/master_r31_12e_ST_MJ_SA.py +++ b/configs/textrecog/master/master_r31_12e_ST_MJ_SA.py @@ -44,7 +44,7 @@ test_pipeline = [ train_dataloader = dict( batch_size=512, - num_workers=8, + num_workers=4, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), dataset=dict( diff --git a/configs/textrecog/nrtr/nrtr_modality_transform_academic.py b/configs/textrecog/nrtr/nrtr_modality_transform_academic.py index 0b43228f..3f276b29 100644 --- a/configs/textrecog/nrtr/nrtr_modality_transform_academic.py +++ b/configs/textrecog/nrtr/nrtr_modality_transform_academic.py @@ -45,8 +45,8 @@ test_pipeline = [ ] train_dataloader = dict( - batch_size=256, - num_workers=2, + batch_size=384, + num_workers=32, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), dataset=dict( @@ -54,7 +54,7 @@ train_dataloader = dict( val_dataloader = dict( batch_size=128, - num_workers=2, + num_workers=4, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), diff --git a/configs/textrecog/nrtr/nrtr_modality_transform_toy_dataset.py b/configs/textrecog/nrtr/nrtr_modality_transform_toy_dataset.py index c0e4c06f..d8a351ce 100644 --- a/configs/textrecog/nrtr/nrtr_modality_transform_toy_dataset.py +++ b/configs/textrecog/nrtr/nrtr_modality_transform_toy_dataset.py @@ -40,16 +40,16 @@ test_pipeline = [ ] train_dataloader = dict( - batch_size=256, - num_workers=2, + batch_size=8, + num_workers=4, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), dataset=dict( type='ConcatDataset', datasets=train_list, pipeline=test_pipeline)) val_dataloader = dict( - batch_size=128, - num_workers=2, + batch_size=1, + num_workers=4, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), diff --git a/configs/textrecog/nrtr/nrtr_r31.py b/configs/textrecog/nrtr/nrtr_r31.py new file mode 100644 index 00000000..c0b90d90 --- /dev/null +++ b/configs/textrecog/nrtr/nrtr_r31.py @@ -0,0 +1,30 @@ +dictionary = dict( + type='Dictionary', + dict_file='dicts/english_digits_symbols.txt', + with_padding=True, + with_unknown=True, + same_start_end=True, + with_start=True, + with_end=True) + +model = dict( + type='NRTR', + backbone=dict( + type='ResNet31OCR', + layers=[1, 2, 5, 3], + channels=[32, 64, 128, 256, 512, 512], + stage4_pool_cfg=dict(kernel_size=(2, 1), stride=(2, 1)), + last_stage_pool=True), + encoder=dict(type='NRTREncoder'), + decoder=dict( + type='NRTRDecoder', + module_loss=dict( + type='CEModuleLoss', ignore_first_char=True, flatten=True), + postprocessor=dict(type='AttentionPostprocessor'), + dictionary=dictionary, + max_seq_len=30, + ), + data_preprocessor=dict( + type='TextRecogDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375])) diff --git a/configs/textrecog/nrtr/nrtr_r31_1by16_1by8_academic.py b/configs/textrecog/nrtr/nrtr_r31_1by16_1by8_academic.py index b4b92b0e..11f5c41c 100644 --- a/configs/textrecog/nrtr/nrtr_r31_1by16_1by8_academic.py +++ b/configs/textrecog/nrtr/nrtr_r31_1by16_1by8_academic.py @@ -1,5 +1,5 @@ _base_ = [ - '../../_base_/recog_datasets/ST_MJ_train.py', + 'nrtr_r31.py', '../../_base_/recog_datasets/ST_MJ_train.py', '../../_base_/recog_datasets/academic_test.py', '../../_base_/default_runtime.py', '../../_base_/schedules/schedule_adam_step_6e.py' @@ -13,34 +13,6 @@ test_list = {{_base_.test_list}} file_client_args = dict(backend='disk') default_hooks = dict(logger=dict(type='LoggerHook', interval=100)) -dictionary = dict( - type='Dictionary', - dict_file='dicts/english_digits_symbols.txt', - with_padding=True, - with_unknown=True, - same_start_end=True, - with_start=True, - with_end=True) - -model = dict( - type='NRTR', - backbone=dict( - type='ResNet31OCR', - layers=[1, 2, 5, 3], - channels=[32, 64, 128, 256, 512, 512], - stage4_pool_cfg=dict(kernel_size=(2, 1), stride=(2, 1)), - last_stage_pool=True), - encoder=dict(type='NRTREncoder'), - decoder=dict( - type='NRTRDecoder', - module_loss=dict( - type='CEModuleLoss', ignore_first_char=True, flatten=True), - postprocessor=dict(type='AttentionPostprocessor')), - dictionary=dictionary, - max_seq_len=30, - preprocess_cfg=dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375])) - train_pipeline = [ dict(type='LoadImageFromFile', file_client_args=file_client_args), dict(type='LoadOCRAnnotations', with_text=True), @@ -72,8 +44,8 @@ test_pipeline = [ ] train_dataloader = dict( - batch_size=256, - num_workers=2, + batch_size=384, + num_workers=32, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), dataset=dict( @@ -81,7 +53,7 @@ train_dataloader = dict( val_dataloader = dict( batch_size=128, - num_workers=2, + num_workers=4, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), diff --git a/configs/textrecog/nrtr/nrtr_r31_1by8_1by4_academic.py b/configs/textrecog/nrtr/nrtr_r31_1by8_1by4_academic.py index 9621575d..cd749e93 100644 --- a/configs/textrecog/nrtr/nrtr_r31_1by8_1by4_academic.py +++ b/configs/textrecog/nrtr/nrtr_r31_1by8_1by4_academic.py @@ -1,5 +1,5 @@ _base_ = [ - '../../_base_/recog_datasets/ST_MJ_train.py', + 'nrtr_r31.py', '../../_base_/recog_datasets/ST_MJ_train.py', '../../_base_/recog_datasets/academic_test.py', '../../_base_/default_runtime.py', '../../_base_/schedules/schedule_adam_step_6e.py' @@ -14,33 +14,7 @@ test_list = {{_base_.test_list}} file_client_args = dict(backend='disk') default_hooks = dict(logger=dict(type='LoggerHook', interval=100)) -dictionary = dict( - type='Dictionary', - dict_file='dicts/english_digits_symbols.txt', - with_padding=True, - with_unknown=True, - same_start_end=True, - with_start=True, - with_end=True) - -model = dict( - type='NRTR', - backbone=dict( - type='ResNet31OCR', - layers=[1, 2, 5, 3], - channels=[32, 64, 128, 256, 512, 512], - stage4_pool_cfg=dict(kernel_size=(2, 1), stride=(2, 1)), - last_stage_pool=False), - encoder=dict(type='NRTREncoder'), - decoder=dict( - type='NRTRDecoder', - module_loss=dict( - type='CEModuleLoss', ignore_first_char=True, flatten=True), - postprocessor=dict(type='AttentionPostprocessor')), - dictionary=dictionary, - max_seq_len=30, - preprocess_cfg=dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375])) +model = dict(backbone=dict(last_stage_pool=False)) train_pipeline = [ dict(type='LoadImageFromFile', file_client_args=file_client_args), @@ -73,8 +47,8 @@ test_pipeline = [ ] train_dataloader = dict( - batch_size=256, - num_workers=2, + batch_size=384, + num_workers=32, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), dataset=dict( @@ -82,7 +56,7 @@ train_dataloader = dict( val_dataloader = dict( batch_size=128, - num_workers=2, + num_workers=4, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), diff --git a/tests/data/recog_toy_dataset/imgs.lmdb/data.mdb b/tests/data/rec_toy_dataset/imgs.lmdb/data.mdb similarity index 100% rename from tests/data/recog_toy_dataset/imgs.lmdb/data.mdb rename to tests/data/rec_toy_dataset/imgs.lmdb/data.mdb diff --git a/tests/data/recog_toy_dataset/imgs.lmdb/lock.mdb b/tests/data/rec_toy_dataset/imgs.lmdb/lock.mdb similarity index 100% rename from tests/data/recog_toy_dataset/imgs.lmdb/lock.mdb rename to tests/data/rec_toy_dataset/imgs.lmdb/lock.mdb diff --git a/tests/data/recog_toy_dataset/imgs/1036169.jpg b/tests/data/rec_toy_dataset/imgs/1036169.jpg similarity index 100% rename from tests/data/recog_toy_dataset/imgs/1036169.jpg rename to tests/data/rec_toy_dataset/imgs/1036169.jpg diff --git a/tests/data/recog_toy_dataset/imgs/1058891.jpg b/tests/data/rec_toy_dataset/imgs/1058891.jpg similarity index 100% rename from tests/data/recog_toy_dataset/imgs/1058891.jpg rename to tests/data/rec_toy_dataset/imgs/1058891.jpg diff --git a/tests/data/recog_toy_dataset/imgs/1058892.jpg b/tests/data/rec_toy_dataset/imgs/1058892.jpg similarity index 100% rename from tests/data/recog_toy_dataset/imgs/1058892.jpg rename to tests/data/rec_toy_dataset/imgs/1058892.jpg diff --git a/tests/data/recog_toy_dataset/imgs/1190237.jpg b/tests/data/rec_toy_dataset/imgs/1190237.jpg similarity index 100% rename from tests/data/recog_toy_dataset/imgs/1190237.jpg rename to tests/data/rec_toy_dataset/imgs/1190237.jpg diff --git a/tests/data/recog_toy_dataset/imgs/1210236.jpg b/tests/data/rec_toy_dataset/imgs/1210236.jpg similarity index 100% rename from tests/data/recog_toy_dataset/imgs/1210236.jpg rename to tests/data/rec_toy_dataset/imgs/1210236.jpg diff --git a/tests/data/recog_toy_dataset/imgs/1223729.jpg b/tests/data/rec_toy_dataset/imgs/1223729.jpg similarity index 100% rename from tests/data/recog_toy_dataset/imgs/1223729.jpg rename to tests/data/rec_toy_dataset/imgs/1223729.jpg diff --git a/tests/data/recog_toy_dataset/imgs/1223731.jpg b/tests/data/rec_toy_dataset/imgs/1223731.jpg similarity index 100% rename from tests/data/recog_toy_dataset/imgs/1223731.jpg rename to tests/data/rec_toy_dataset/imgs/1223731.jpg diff --git a/tests/data/recog_toy_dataset/imgs/1223732.jpg b/tests/data/rec_toy_dataset/imgs/1223732.jpg similarity index 100% rename from tests/data/recog_toy_dataset/imgs/1223732.jpg rename to tests/data/rec_toy_dataset/imgs/1223732.jpg diff --git a/tests/data/recog_toy_dataset/imgs/1223733.jpg b/tests/data/rec_toy_dataset/imgs/1223733.jpg similarity index 100% rename from tests/data/recog_toy_dataset/imgs/1223733.jpg rename to tests/data/rec_toy_dataset/imgs/1223733.jpg diff --git a/tests/data/recog_toy_dataset/imgs/1240078.jpg b/tests/data/rec_toy_dataset/imgs/1240078.jpg similarity index 100% rename from tests/data/recog_toy_dataset/imgs/1240078.jpg rename to tests/data/rec_toy_dataset/imgs/1240078.jpg diff --git a/tests/data/recog_toy_dataset/label.json b/tests/data/rec_toy_dataset/label.json similarity index 100% rename from tests/data/recog_toy_dataset/label.json rename to tests/data/rec_toy_dataset/label.json diff --git a/tests/data/recog_toy_dataset/label.lmdb/data.mdb b/tests/data/rec_toy_dataset/label.lmdb/data.mdb similarity index 100% rename from tests/data/recog_toy_dataset/label.lmdb/data.mdb rename to tests/data/rec_toy_dataset/label.lmdb/data.mdb diff --git a/tests/data/recog_toy_dataset/label.lmdb/lock.mdb b/tests/data/rec_toy_dataset/label.lmdb/lock.mdb similarity index 100% rename from tests/data/recog_toy_dataset/label.lmdb/lock.mdb rename to tests/data/rec_toy_dataset/label.lmdb/lock.mdb diff --git a/tests/data/recog_toy_dataset/old_label.jsonl b/tests/data/rec_toy_dataset/old_label.jsonl similarity index 100% rename from tests/data/recog_toy_dataset/old_label.jsonl rename to tests/data/rec_toy_dataset/old_label.jsonl diff --git a/tests/data/recog_toy_dataset/old_label.txt b/tests/data/rec_toy_dataset/old_label.txt similarity index 100% rename from tests/data/recog_toy_dataset/old_label.txt rename to tests/data/rec_toy_dataset/old_label.txt diff --git a/tests/datasets/test_recog_lmdb_dataset.py b/tests/datasets/test_recog_lmdb_dataset.py index 87180fca..c52d048b 100644 --- a/tests/datasets/test_recog_lmdb_dataset.py +++ b/tests/datasets/test_recog_lmdb_dataset.py @@ -28,7 +28,7 @@ class TestRecogLMDBDataset(TestCase): # test initialization dataset = RecogLMDBDataset( - ann_file='tests/data/recog_toy_dataset/label.lmdb', + ann_file='tests/data/rec_toy_dataset/label.lmdb', data_prefix=dict(img_path='imgs'), pipeline=[]) dataset.full_init() @@ -44,7 +44,7 @@ class TestRecogLMDBDataset(TestCase): # test initialization dataset = RecogLMDBDataset( - ann_file='tests/data/recog_toy_dataset/imgs.lmdb', + ann_file='tests/data/rec_toy_dataset/imgs.lmdb', data_prefix=dict(img_path='imgs'), pipeline=[]) dataset.full_init() diff --git a/tests/datasets/test_recog_text_dataset.py b/tests/datasets/test_recog_text_dataset.py index b30ed49e..02dc7c90 100644 --- a/tests/datasets/test_recog_text_dataset.py +++ b/tests/datasets/test_recog_text_dataset.py @@ -10,7 +10,7 @@ class TestRecogTextDataset(TestCase): # test initialization dataset = RecogTextDataset( - ann_file='tests/data/recog_toy_dataset/old_label.txt', + ann_file='tests/data/rec_toy_dataset/old_label.txt', data_prefix=dict(img_path='imgs'), parser_cfg=dict( type='LineStrParser', @@ -30,7 +30,7 @@ class TestRecogTextDataset(TestCase): def test_jsonl_dataset(self): dataset = RecogTextDataset( - ann_file='tests/data/recog_toy_dataset/old_label.jsonl', + ann_file='tests/data/rec_toy_dataset/old_label.jsonl', data_prefix=dict(img_path='imgs'), parser_cfg=dict(type='LineJsonParser', keys=['filename', 'text']), pipeline=[]) diff --git a/tests/datasets/transforms/test_loading.py b/tests/datasets/transforms/test_loading.py index b8526c9b..dc0de156 100644 --- a/tests/datasets/transforms/test_loading.py +++ b/tests/datasets/transforms/test_loading.py @@ -142,12 +142,12 @@ class TestLoadImageFromLMDB(TestCase): def setUp(self): img_key = 'image-%09d' % 1 self.results1 = { - 'img_path': f'tests/data/recog_toy_dataset/imgs.lmdb/{img_key}' + 'img_path': f'tests/data/rec_toy_dataset/imgs.lmdb/{img_key}' } img_key = 'image-%09d' % 100 self.results2 = { - 'img_path': f'tests/data/recog_toy_dataset/imgs.lmdb/{img_key}' + 'img_path': f'tests/data/rec_toy_dataset/imgs.lmdb/{img_key}' } def test_transform(self):