From d594ab122e3eeaec011e44934785641681a0dd6b Mon Sep 17 00:00:00 2001 From: YueXy Date: Sat, 3 Apr 2021 00:52:49 +0800 Subject: [PATCH] [feature]: add CRNN and RobustScanner --- README.md | 2 +- configs/textrecog/crnn/README.md | 37 +++ .../textrecog/crnn/crnn_academic_dataset.py | 155 +++++++++++++ configs/textrecog/crnn/crnn_toy_dataset.py | 6 + .../robust_scanner_toy_dataset.py | 12 + .../robustscanner_r31_academic.py | 198 ++++++++++++++++ mmocr/core/evaluation/hmean_ic13.py | 216 ++++++++++++++++++ mmocr/core/evaluation/hmean_iou.py | 116 ++++++++++ .../textrecog/backbones/very_deep_vgg.py | 64 ++++++ .../models/textrecog/decoders/crnn_decoder.py | 49 ++++ .../decoders/position_attention_decoder.py | 138 +++++++++++ .../decoders/robust_scanner_decoder.py | 107 +++++++++ .../decoders/sequence_attention_decoder.py | 165 +++++++++++++ .../encoders/channel_reduction_encoder.py | 23 ++ .../layers/dot_product_attention_layer.py | 27 +++ .../textrecog/layers/position_aware_layer.py | 35 +++ .../layers/robust_scanner_fusion_layer.py | 22 ++ .../textrecog/recognizer/robust_scanner.py | 10 + resources/mmocr-logo.jpg | Bin 11242 -> 0 bytes resources/mmocr-logo.png | Bin 0 -> 31646 bytes 20 files changed, 1381 insertions(+), 1 deletion(-) create mode 100644 configs/textrecog/crnn/README.md create mode 100644 configs/textrecog/crnn/crnn_academic_dataset.py create mode 100644 configs/textrecog/crnn/crnn_toy_dataset.py create mode 100644 configs/textrecog/robust_scanner/robust_scanner_toy_dataset.py create mode 100644 configs/textrecog/robust_scanner/robustscanner_r31_academic.py create mode 100644 mmocr/core/evaluation/hmean_ic13.py create mode 100644 mmocr/core/evaluation/hmean_iou.py create mode 100644 mmocr/models/textrecog/backbones/very_deep_vgg.py create mode 100644 mmocr/models/textrecog/decoders/crnn_decoder.py create mode 100644 mmocr/models/textrecog/decoders/position_attention_decoder.py create mode 100644 mmocr/models/textrecog/decoders/robust_scanner_decoder.py create mode 100644 mmocr/models/textrecog/decoders/sequence_attention_decoder.py create mode 100644 mmocr/models/textrecog/encoders/channel_reduction_encoder.py create mode 100644 mmocr/models/textrecog/layers/dot_product_attention_layer.py create mode 100644 mmocr/models/textrecog/layers/position_aware_layer.py create mode 100644 mmocr/models/textrecog/layers/robust_scanner_fusion_layer.py create mode 100644 mmocr/models/textrecog/recognizer/robust_scanner.py delete mode 100644 resources/mmocr-logo.jpg create mode 100644 resources/mmocr-logo.png diff --git a/README.md b/README.md index c553ca6a..2c80e608 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@
- +
## Introduction diff --git a/configs/textrecog/crnn/README.md b/configs/textrecog/crnn/README.md new file mode 100644 index 00000000..6846217e --- /dev/null +++ b/configs/textrecog/crnn/README.md @@ -0,0 +1,37 @@ +# An end-to-end trainable neural network for image-based sequence recognition and its application to scene text recognition + +## Introduction + +[ALGORITHM] + +```latex +@article{shi2016end, + title={An end-to-end trainable neural network for image-based sequence recognition and its application to scene text recognition}, + author={Shi, Baoguang and Bai, Xiang and Yao, Cong}, + journal={IEEE transactions on pattern analysis and machine intelligence}, + year={2016} +} +``` + +## Results and Models + +### Train Dataset + +| trainset | instance_num | repeat_num | note | +| :------: | :----------: | :--------: | :---: | +| Syn90k | 8919273 | 1 | synth | + +### Test Dataset + +| testset | instance_num | note | +| :-----: | :----------: | :-----: | +| IIIT5K | 3000 | regular | +| SVT | 647 | regular | +| IC13 | 1015 | regular | + +## Results and models + +| methods | | Regular Text | | | | Irregular Text | | download | +| :-----: | :----: | :----------: | :--: | :-: | :--: | :------------: | :--: | :---------------------------------------------------------: | +| methods | IIIT5K | SVT | IC13 | | IC15 | SVTP | CT80 | +| CRNN | 80.5 | 81.5 | 86.5 | | - | - | - | [config](https://download.openmmlab.com/mmocr/textrecog/crnn/crnn_academic_dataset.py) [log]() [model](https) | diff --git a/configs/textrecog/crnn/crnn_academic_dataset.py b/configs/textrecog/crnn/crnn_academic_dataset.py new file mode 100644 index 00000000..2762fc01 --- /dev/null +++ b/configs/textrecog/crnn/crnn_academic_dataset.py @@ -0,0 +1,155 @@ +_base_ = [] +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=1, + hooks=[ + dict(type='TextLoggerHook') + + ]) +# yapf:enable +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] + +# model +label_convertor = dict( + type='CTCConvertor', dict_type='DICT36', with_unknown=False, lower=True) + +model = dict( + type='CRNNNet', + preprocessor=None, + backbone=dict(type='VeryDeepVgg', leakyRelu=False, input_channels=1), + encoder=None, + decoder=dict(type='CRNNDecoder', in_channels=512, rnn_flag=True), + loss=dict(type='CTCLoss'), + label_convertor=label_convertor, + pretrained=None) + +train_cfg = None +test_cfg = None + +# optimizer +optimizer = dict(type='Adadelta', lr=1.0) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=[]) +total_epochs = 5 + +# data +img_norm_cfg = dict(mean=[0.5], std=[0.5]) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeOCR', + height=32, + min_width=100, + max_width=100, + keep_aspect_ratio=False), + dict(type='ToTensorOCR'), + dict(type='NormalizeOCR', **img_norm_cfg), + dict( + type='Collect', + keys=['img'], + meta_keys=[ + 'filename', 'ori_shape', 'img_shape', 'text', 'valid_ratio' + ]), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeOCR', + height=32, + min_width=4, + max_width=None, + keep_aspect_ratio=True), + dict(type='ToTensorOCR'), + dict(type='NormalizeOCR', **img_norm_cfg), + dict( + type='Collect', + keys=['img'], + meta_keys=['filename', 'ori_shape', 'img_shape', 'valid_ratio']), +] + +dataset_type = 'OCRDataset' + +train_img_prefix = 'data/mixture/mnt/ramdisk/max/90kDICT32px' +train_ann_file = 'data/mixture/mnt/ramdisk/max/90kDICT32px/label.txt' + +train1 = dict( + type=dataset_type, + img_prefix=train_img_prefix, + ann_file=train_ann_file, + loader=dict( + type='HardDiskLoader', + repeat=1, + parser=dict( + type='LineStrParser', + keys=['filename', 'text'], + keys_idx=[0, 1], + separator=' ')), + pipeline=train_pipeline, + test_mode=False) + +test1 = dict( + type=dataset_type, + img_prefix=train_img_prefix, + ann_file=train_ann_file, + loader=dict( + type='HardDiskLoader', + repeat=1, + parser=dict( + type='LineStrParser', + keys=['filename', 'text'], + keys_idx=[0, 1], + separator=' ')), + pipeline=test_pipeline, + test_mode=True) + +test_img_prefix = 'data/mixture/' +ic13_path = 'testset/icdar_2013/Challenge2_Test_Task3_Images/' +test_img_prefix1 = test_img_prefix + ic13_path +test_img_prefix2 = test_img_prefix + 'testset/IIIT5K/' +test_img_prefix3 = test_img_prefix + 'testset/svt/' + +test_ann_prefix = 'data/mixture/' +test_ann_file1 = test_ann_prefix + 'testset/icdar_2013/test_label_1015.txt' +test_ann_file2 = test_ann_prefix + 'testset/IIIT5K/label.txt' +test_ann_file3 = test_ann_prefix + 'testset/svt/test_list.txt' + +test1 = dict( + type=dataset_type, + img_prefix=test_img_prefix1, + ann_file=test_ann_file1, + loader=dict( + type='HardDiskLoader', + repeat=1, + parser=dict( + type='LineStrParser', + keys=['filename', 'text'], + keys_idx=[0, 1], + separator=' ')), + pipeline=test_pipeline, + test_mode=True) + +test2 = {key: value for key, value in test1.items()} +test2['img_prefix'] = test_img_prefix2 +test2['ann_file'] = test_ann_file2 + +test3 = {key: value for key, value in test1.items()} +test3['img_prefix'] = test_img_prefix3 +test3['ann_file'] = test_ann_file3 + +data = dict( + samples_per_gpu=64, + workers_per_gpu=4, + train=dict(type='ConcatDataset', datasets=[train1]), + val=dict(type='ConcatDataset', datasets=[test1, test2, test3]), + test=dict(type='ConcatDataset', datasets=[test1, test2, test3])) + +evaluation = dict(interval=1, metric='acc') + +cudnn_benchmark = True diff --git a/configs/textrecog/crnn/crnn_toy_dataset.py b/configs/textrecog/crnn/crnn_toy_dataset.py new file mode 100644 index 00000000..76854024 --- /dev/null +++ b/configs/textrecog/crnn/crnn_toy_dataset.py @@ -0,0 +1,6 @@ +_base_ = [ + '../../_base_/schedules/schedule_adadelta_8e.py', + '../../_base_/default_runtime.py', + '../../_base_/recog_datasets/toy_dataset.py', + '../../_base_/recog_models/crnn.py' +] diff --git a/configs/textrecog/robust_scanner/robust_scanner_toy_dataset.py b/configs/textrecog/robust_scanner/robust_scanner_toy_dataset.py new file mode 100644 index 00000000..6fc6c125 --- /dev/null +++ b/configs/textrecog/robust_scanner/robust_scanner_toy_dataset.py @@ -0,0 +1,12 @@ +_base_ = [ + '../../_base_/default_runtime.py', + '../../_base_/recog_models/robust_scanner.py', + '../../_base_/recog_datasets/toy_dataset.py' +] + +# optimizer +optimizer = dict(type='Adam', lr=1e-3) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=[3, 4]) +total_epochs = 6 diff --git a/configs/textrecog/robust_scanner/robustscanner_r31_academic.py b/configs/textrecog/robust_scanner/robustscanner_r31_academic.py new file mode 100644 index 00000000..7fd9c1b9 --- /dev/null +++ b/configs/textrecog/robust_scanner/robustscanner_r31_academic.py @@ -0,0 +1,198 @@ +_base_ = [ + '../../_base_/default_runtime.py', + '../../_base_/recog_models/robust_scanner.py' +] + +# optimizer +optimizer = dict(type='Adam', lr=1e-3) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=[3, 4]) +total_epochs = 5 + +img_norm_cfg = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeOCR', + height=48, + min_width=48, + max_width=160, + keep_aspect_ratio=True, + width_downsample_ratio=0.25), + dict(type='ToTensorOCR'), + dict(type='NormalizeOCR', **img_norm_cfg), + dict( + type='Collect', + keys=['img'], + meta_keys=[ + 'filename', 'ori_shape', 'img_shape', 'text', 'valid_ratio' + ]), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiRotateAugOCR', + rotate_degrees=[0, 90, 270], + transforms=[ + dict( + type='ResizeOCR', + height=48, + min_width=48, + max_width=160, + keep_aspect_ratio=True, + width_downsample_ratio=0.25), + dict(type='ToTensorOCR'), + dict(type='NormalizeOCR', **img_norm_cfg), + dict( + type='Collect', + keys=['img'], + meta_keys=[ + 'filename', 'ori_shape', 'img_shape', 'valid_ratio' + ]), + ]) +] + +dataset_type = 'OCRDataset' + +prefix = 'data/mixture/' + +train_img_prefix1 = prefix + 'icdar_2011/Challenge1_Training_Task3_Images_GT' +train_img_prefix2 = prefix + 'icdar_2013/recog_train_data/' + \ + 'Challenge2_Training_Task3_Images_GT' +train_img_prefix3 = prefix + 'icdar_2015/ch4_training_word_images_gt' +train_img_prefix4 = prefix + 'coco_text/train_words' +train_img_prefix5 = prefix + 'III5K' +train_img_prefix6 = prefix + 'SynthText_Add/SynthText_Add' +train_img_prefix7 = prefix + 'SynthText/synthtext/SynthText_patch_horizontal' +train_img_prefix8 = prefix + 'mnt/ramdisk/max/90kDICT32px' + +train_ann_file1 = prefix + 'icdar_2011/training_label_fix.txt', +train_ann_file2 = prefix + 'icdar_2013/recog_train_data/train_label.txt', +train_ann_file3 = prefix + 'icdar_2015/training_label_fix.txt', +train_ann_file4 = prefix + 'coco_text/train_label.txt', +train_ann_file5 = prefix + 'III5K/train_label.txt', +train_ann_file6 = prefix + 'SynthText_Add/SynthText_Add/' + \ + 'annotationlist/label.lmdb', +train_ann_file7 = prefix + 'SynthText/synthtext/shuffle_labels.lmdb', +train_ann_file8 = prefix + 'mnt/ramdisk/max/90kDICT32px/shuffle_labels.lmdb' + +train1 = dict( + type=dataset_type, + img_prefix=train_img_prefix1, + ann_file=train_ann_file1, + loader=dict( + type='HardDiskLoader', + repeat=20, + parser=dict( + type='LineStrParser', + keys=['filename', 'text'], + keys_idx=[0, 1], + separator=' ')), + pipeline=train_pipeline, + test_mode=False) + +train2 = {key: value for key, value in train1.items()} +train2['img_prefix'] = train_img_prefix2 +train2['ann_file'] = train_ann_file2 + +train3 = {key: value for key, value in train1.items()} +train3['img_prefix'] = train_img_prefix3 +train3['ann_file'] = train_ann_file3 + +train4 = {key: value for key, value in train1.items()} +train4['img_prefix'] = train_img_prefix4 +train4['ann_file'] = train_ann_file4 + +train5 = {key: value for key, value in train1.items()} +train5['img_prefix'] = train_img_prefix5 +train5['ann_file'] = train_ann_file5 + +train6 = dict( + type=dataset_type, + img_prefix=train_img_prefix6, + ann_file=train_ann_file6, + loader=dict( + type='LmdbLoader', + repeat=1, + parser=dict( + type='LineStrParser', + keys=['filename', 'text'], + keys_idx=[0, 1], + separator=' ')), + pipeline=train_pipeline, + test_mode=False) + +train7 = {key: value for key, value in train6.items()} +train7['img_prefix'] = train_img_prefix7 +train7['ann_file'] = train_ann_file7 + +train8 = {key: value for key, value in train6.items()} +train8['img_prefix'] = train_img_prefix8 +train8['ann_file'] = train_ann_file8 + +test_img_prefix1 = prefix + 'testset/IIIT5K/' +test_img_prefix2 = prefix + 'testset/svt/' +test_img_prefix3 = prefix + 'testset/icdar_2013/Challenge2_Test_Task3_Images/' +test_img_prefix4 = prefix + 'testset/icdar_2015/ch4_test_word_images_gt' +test_img_prefix5 = prefix + 'testset/svtp/' +test_img_prefix6 = prefix + 'testset/ct80/' + +test_ann_file1 = prefix + 'testset/IIIT5K/label.txt' +test_ann_file2 = prefix + 'testset/svt/test_list.txt' +test_ann_file3 = prefix + 'testset/icdar_2013/test_label_1015.txt' +test_ann_file4 = prefix + 'testset/icdar_2015/test_label.txt' +test_ann_file5 = prefix + 'testset/svtp/imagelist.txt' +test_ann_file6 = prefix + 'testset/ct80/imagelist.txt' + +test1 = dict( + type=dataset_type, + img_prefix=test_img_prefix1, + ann_file=test_ann_file1, + loader=dict( + type='HardDiskLoader', + repeat=1, + parser=dict( + type='LineStrParser', + keys=['filename', 'text'], + keys_idx=[0, 1], + separator=' ')), + pipeline=test_pipeline, + test_mode=True) + +test2 = {key: value for key, value in test1.items()} +test2['img_prefix'] = test_img_prefix2 +test2['ann_file'] = test_ann_file2 + +test3 = {key: value for key, value in test1.items()} +test3['img_prefix'] = test_img_prefix3 +test3['ann_file'] = test_ann_file3 + +test4 = {key: value for key, value in test1.items()} +test4['img_prefix'] = test_img_prefix4 +test4['ann_file'] = test_ann_file4 + +test5 = {key: value for key, value in test1.items()} +test5['img_prefix'] = test_img_prefix5 +test5['ann_file'] = test_ann_file5 + +test6 = {key: value for key, value in test1.items()} +test6['img_prefix'] = test_img_prefix6 +test6['ann_file'] = test_ann_file6 + +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict( + type='ConcatDataset', + datasets=[ + train1, train2, train3, train4, train5, train6, train7, train8 + ]), + val=dict( + type='ConcatDataset', + datasets=[test1, test2, test3, test4, test5, test6]), + test=dict( + type='ConcatDataset', + datasets=[test1, test2, test3, test4, test5, test6])) + +evaluation = dict(interval=1, metric='acc') diff --git a/mmocr/core/evaluation/hmean_ic13.py b/mmocr/core/evaluation/hmean_ic13.py new file mode 100644 index 00000000..49ba3ff1 --- /dev/null +++ b/mmocr/core/evaluation/hmean_ic13.py @@ -0,0 +1,216 @@ +import numpy as np + +import mmocr.utils as utils +from . import utils as eval_utils + + +def compute_recall_precision(gt_polys, pred_polys): + """Compute the recall and the precision matrices between gt and predicted + polygons. + + Args: + gt_polys (list[Polygon]): List of gt polygons. + pred_polys (list[Polygon]): List of predicted polygons. + + Returns: + recall (ndarray): Recall matrix of size gt_num x det_num. + precision (ndarray): Precision matrix of size gt_num x det_num. + """ + assert isinstance(gt_polys, list) + assert isinstance(pred_polys, list) + + gt_num = len(gt_polys) + det_num = len(pred_polys) + sz = [gt_num, det_num] + + recall = np.zeros(sz) + precision = np.zeros(sz) + # compute area recall and precision for each (gt, det) pair + # in one img + for gt_id in range(gt_num): + for pred_id in range(det_num): + gt = gt_polys[gt_id] + det = pred_polys[pred_id] + + inter_area, _ = eval_utils.poly_intersection(det, gt) + gt_area = gt.area() + det_area = det.area() + if gt_area != 0: + recall[gt_id, pred_id] = inter_area / gt_area + if det_area != 0: + precision[gt_id, pred_id] = inter_area / det_area + + return recall, precision + + +def eval_hmean_ic13(det_boxes, + gt_boxes, + gt_ignored_boxes, + precision_thr=0.4, + recall_thr=0.8, + center_dist_thr=1.0, + one2one_score=1., + one2many_score=0.8, + many2one_score=1.): + """Evalute hmean of text detection using the icdar2013 standard. + + Args: + det_boxes (list[list[list[float]]]): List of arrays of shape (n, 2k). + Each element is the det_boxes for one img. k>=4. + gt_boxes (list[list[list[float]]]): List of arrays of shape (m, 2k). + Each element is the gt_boxes for one img. k>=4. + gt_ignored_boxes (list[list[list[float]]]): List of arrays of + (l, 2k). Each element is the ignored gt_boxes for one img. k>=4. + precision_thr (float): Precision threshold of the iou of one + (gt_box, det_box) pair. + recall_thr (float): Recall threshold of the iou of one + (gt_box, det_box) pair. + center_dist_thr (float): Distance threshold of one (gt_box, det_box) + center point pair. + one2one_score (float): Reward when one gt matches one det_box. + one2many_score (float): Reward when one gt matches many det_boxes. + many2one_score (float): Reward when many gts match one det_box. + + Returns: + hmean (tuple[dict]): Tuple of dicts which encodes the hmean for + the dataset and all images. + """ + assert utils.is_3dlist(det_boxes) + assert utils.is_3dlist(gt_boxes) + assert utils.is_3dlist(gt_ignored_boxes) + + assert 0 <= precision_thr <= 1 + assert 0 <= recall_thr <= 1 + assert center_dist_thr > 0 + assert 0 <= one2one_score <= 1 + assert 0 <= one2many_score <= 1 + assert 0 <= many2one_score <= 1 + + img_num = len(det_boxes) + assert img_num == len(gt_boxes) + assert img_num == len(gt_ignored_boxes) + + dataset_gt_num = 0 + dataset_pred_num = 0 + dataset_hit_recall = 0.0 + dataset_hit_prec = 0.0 + + img_results = [] + + for i in range(img_num): + gt = gt_boxes[i] + gt_ignored = gt_ignored_boxes[i] + pred = det_boxes[i] + + gt_num = len(gt) + ignored_num = len(gt_ignored) + pred_num = len(pred) + + accum_recall = 0. + accum_precision = 0. + + gt_points = gt + gt_ignored + gt_polys = [eval_utils.points2polygon(p) for p in gt_points] + gt_ignored_index = [gt_num + i for i in range(len(gt_ignored))] + gt_num = len(gt_polys) + + pred_polys, pred_points, pred_ignored_index = eval_utils.ignore_pred( + pred, gt_ignored_index, gt_polys, precision_thr) + + if pred_num > 0 and gt_num > 0: + + gt_hit = np.zeros(gt_num, np.int8).tolist() + pred_hit = np.zeros(pred_num, np.int8).tolist() + + # compute area recall and precision for each (gt, pred) pair + # in one img. + recall_mat, precision_mat = compute_recall_precision( + gt_polys, pred_polys) + + # match one gt to one pred box. + for gt_id in range(gt_num): + for pred_id in range(pred_num): + if gt_hit[gt_id] != 0 or pred_hit[ + pred_id] != 0 or gt_id in gt_ignored_index \ + or pred_id in pred_ignored_index: + continue + match = eval_utils.one2one_match_ic13( + gt_id, pred_id, recall_mat, precision_mat, recall_thr, + precision_thr) + + if match: + gt_point = np.array(gt_points[gt_id]) + det_point = np.array(pred_points[pred_id]) + + norm_dist = eval_utils.box_center_distance( + det_point, gt_point) + norm_dist /= eval_utils.box_diag( + det_point) + eval_utils.box_diag(gt_point) + norm_dist *= 2.0 + + if norm_dist < center_dist_thr: + gt_hit[gt_id] = 1 + pred_hit[pred_id] = 1 + accum_recall += one2one_score + accum_precision += one2one_score + + # match one gt to many det boxes. + for gt_id in range(gt_num): + if gt_id in gt_ignored_index: + continue + match, match_det_set = eval_utils.one2many_match_ic13( + gt_id, recall_mat, precision_mat, recall_thr, + precision_thr, gt_hit, pred_hit, pred_ignored_index) + + if match: + gt_hit[gt_id] = 1 + accum_recall += one2many_score + accum_precision += one2many_score * len(match_det_set) + for pred_id in match_det_set: + pred_hit[pred_id] = 1 + + # match many gt to one det box. One pair of (det,gt) are matched + # successfully if their recall, precision, normalized distance + # meet some thresholds. + for pred_id in range(pred_num): + if pred_id in pred_ignored_index: + continue + + match, match_gt_set = eval_utils.many2one_match_ic13( + pred_id, recall_mat, precision_mat, recall_thr, + precision_thr, gt_hit, pred_hit, gt_ignored_index) + + if match: + pred_hit[pred_id] = 1 + accum_recall += many2one_score * len(match_gt_set) + accum_precision += many2one_score + for gt_id in match_gt_set: + gt_hit[gt_id] = 1 + + gt_care_number = gt_num - ignored_num + pred_care_number = pred_num - len(pred_ignored_index) + + r, p, h = eval_utils.compute_hmean(accum_recall, accum_precision, + gt_care_number, pred_care_number) + + img_results.append({'recall': r, 'precision': p, 'hmean': h}) + + dataset_gt_num += gt_care_number + dataset_pred_num += pred_care_number + dataset_hit_recall += accum_recall + dataset_hit_prec += accum_precision + + total_r, total_p, total_h = eval_utils.compute_hmean( + dataset_hit_recall, dataset_hit_prec, dataset_gt_num, dataset_pred_num) + + dataset_results = { + 'num_gts': dataset_gt_num, + 'num_dets': dataset_pred_num, + 'num_recall': dataset_hit_recall, + 'num_precision': dataset_hit_prec, + 'recall': total_r, + 'precision': total_p, + 'hmean': total_h + } + + return dataset_results, img_results diff --git a/mmocr/core/evaluation/hmean_iou.py b/mmocr/core/evaluation/hmean_iou.py new file mode 100644 index 00000000..4c7bad6c --- /dev/null +++ b/mmocr/core/evaluation/hmean_iou.py @@ -0,0 +1,116 @@ +import numpy as np + +import mmocr.utils as utils +from . import utils as eval_utils + + +def eval_hmean_iou(pred_boxes, + gt_boxes, + gt_ignored_boxes, + iou_thr=0.5, + precision_thr=0.5): + """Evalute hmean of text detection using IOU standard. + + Args: + pred_boxes (list[list[list[float]]]): Text boxes for an img list. Each + box has 2k (>=8) values. + gt_boxes (list[list[list[float]]]): Ground truth text boxes for an img + list. Each box has 2k (>=8) values. + gt_ignored_boxes (list[list[list[float]]]): Ignored ground truth text + boxes for an img list. Each box has 2k (>=8) values. + iou_thr (float): Iou threshold when one (gt_box, det_box) pair is + matched. + precision_thr (float): Precision threshold when one (gt_box, det_box) + pair is matched. + + Returns: + hmean (tuple[dict]): Tuple of dicts indicates the hmean for the dataset + and all images. + """ + assert utils.is_3dlist(pred_boxes) + assert utils.is_3dlist(gt_boxes) + assert utils.is_3dlist(gt_ignored_boxes) + assert 0 <= iou_thr <= 1 + assert 0 <= precision_thr <= 1 + + img_num = len(pred_boxes) + assert img_num == len(gt_boxes) + assert img_num == len(gt_ignored_boxes) + + dataset_gt_num = 0 + dataset_pred_num = 0 + dataset_hit_num = 0 + + img_results = [] + + for i in range(img_num): + gt = gt_boxes[i] + gt_ignored = gt_ignored_boxes[i] + pred = pred_boxes[i] + + gt_num = len(gt) + gt_ignored_num = len(gt_ignored) + pred_num = len(pred) + + hit_num = 0 + + # get gt polygons. + gt_all = gt + gt_ignored + gt_polys = [eval_utils.points2polygon(p) for p in gt_all] + gt_ignored_index = [gt_num + i for i in range(len(gt_ignored))] + gt_num = len(gt_polys) + pred_polys, _, pred_ignored_index = eval_utils.ignore_pred( + pred, gt_ignored_index, gt_polys, precision_thr) + + # match. + if gt_num > 0 and pred_num > 0: + sz = [gt_num, pred_num] + iou_mat = np.zeros(sz) + + gt_hit = np.zeros(gt_num, np.int8) + pred_hit = np.zeros(pred_num, np.int8) + + for gt_id in range(gt_num): + for pred_id in range(pred_num): + gt_pol = gt_polys[gt_id] + det_pol = pred_polys[pred_id] + + iou_mat[gt_id, + pred_id] = eval_utils.poly_iou(det_pol, gt_pol) + + for gt_id in range(gt_num): + for pred_id in range(pred_num): + if gt_hit[gt_id] != 0 or pred_hit[ + pred_id] != 0 or gt_id in gt_ignored_index \ + or pred_id in pred_ignored_index: + continue + if iou_mat[gt_id, pred_id] > iou_thr: + gt_hit[gt_id] = 1 + pred_hit[pred_id] = 1 + hit_num += 1 + + gt_care_number = gt_num - gt_ignored_num + pred_care_number = pred_num - len(pred_ignored_index) + + r, p, h = eval_utils.compute_hmean(hit_num, hit_num, gt_care_number, + pred_care_number) + + img_results.append({'recall': r, 'precision': p, 'hmean': h}) + + dataset_hit_num += hit_num + dataset_gt_num += gt_care_number + dataset_pred_num += pred_care_number + + dataset_r, dataset_p, dataset_h = eval_utils.compute_hmean( + dataset_hit_num, dataset_hit_num, dataset_gt_num, dataset_pred_num) + + dataset_results = { + 'num_gts': dataset_gt_num, + 'num_dets': dataset_pred_num, + 'num_match': dataset_hit_num, + 'recall': dataset_r, + 'precision': dataset_p, + 'hmean': dataset_h + } + + return dataset_results, img_results diff --git a/mmocr/models/textrecog/backbones/very_deep_vgg.py b/mmocr/models/textrecog/backbones/very_deep_vgg.py new file mode 100644 index 00000000..56ea84bf --- /dev/null +++ b/mmocr/models/textrecog/backbones/very_deep_vgg.py @@ -0,0 +1,64 @@ +import torch.nn as nn +from mmcv.cnn import uniform_init, xavier_init + +from mmdet.models.builder import BACKBONES + + +@BACKBONES.register_module() +class VeryDeepVgg(nn.Module): + + def __init__(self, leakyRelu=True, input_channels=3): + super().__init__() + + ks = [3, 3, 3, 3, 3, 3, 2] + ps = [1, 1, 1, 1, 1, 1, 0] + ss = [1, 1, 1, 1, 1, 1, 1] + nm = [64, 128, 256, 256, 512, 512, 512] + + self.channels = nm + + cnn = nn.Sequential() + + def convRelu(i, batchNormalization=False): + nIn = input_channels if i == 0 else nm[i - 1] + nOut = nm[i] + cnn.add_module('conv{0}'.format(i), + nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i])) + if batchNormalization: + cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut)) + if leakyRelu: + cnn.add_module('relu{0}'.format(i), + nn.LeakyReLU(0.2, inplace=True)) + else: + cnn.add_module('relu{0}'.format(i), nn.ReLU(True)) + + convRelu(0) + cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2)) # 64x16x64 + convRelu(1) + cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2)) # 128x8x32 + convRelu(2, True) + convRelu(3) + cnn.add_module('pooling{0}'.format(2), + nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 256x4x16 + convRelu(4, True) + convRelu(5) + cnn.add_module('pooling{0}'.format(3), + nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 512x2x16 + convRelu(6, True) # 512x1x16 + + self.cnn = cnn + + def init_weights(self, pretrained=None): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m) + elif isinstance(m, nn.BatchNorm2d): + uniform_init(m) + + def out_channels(self): + return self.channels[-1] + + def forward(self, x): + output = self.cnn(x) + + return output diff --git a/mmocr/models/textrecog/decoders/crnn_decoder.py b/mmocr/models/textrecog/decoders/crnn_decoder.py new file mode 100644 index 00000000..1ce5226a --- /dev/null +++ b/mmocr/models/textrecog/decoders/crnn_decoder.py @@ -0,0 +1,49 @@ +import torch.nn as nn +from mmcv.cnn import xavier_init + +from mmocr.models.builder import DECODERS +from mmocr.models.textrecog.layers import BidirectionalLSTM +from .base_decoder import BaseDecoder + + +@DECODERS.register_module() +class CRNNDecoder(BaseDecoder): + + def __init__(self, + in_channels=None, + num_classes=None, + rnn_flag=False, + **kwargs): + super().__init__() + self.num_classes = num_classes + self.rnn_flag = rnn_flag + + if rnn_flag: + self.decoder = nn.Sequential( + BidirectionalLSTM(in_channels, 256, 256), + BidirectionalLSTM(256, 256, num_classes)) + else: + self.decoder = nn.Conv2d( + in_channels, num_classes, kernel_size=1, stride=1) + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m) + + def forward_train(self, feat, out_enc, targets_dict, img_metas): + assert feat.size(2) == 1, 'feature height must be 1' + if self.rnn_flag: + x = feat.squeeze(2) # [N, C, W] + x = x.permute(2, 0, 1) # [W, N, C] + x = self.decoder(x) # [W, N, C] + outputs = x.permute(1, 0, 2).contiguous() + else: + x = self.decoder(feat) + x = x.permute(0, 3, 1, 2).contiguous() + n, w, c, h = x.size() + outputs = x.view(n, w, c * h) + return outputs + + def forward_test(self, feat, out_enc, img_metas): + return self.forward_train(feat, out_enc, None, img_metas) diff --git a/mmocr/models/textrecog/decoders/position_attention_decoder.py b/mmocr/models/textrecog/decoders/position_attention_decoder.py new file mode 100644 index 00000000..2ef85255 --- /dev/null +++ b/mmocr/models/textrecog/decoders/position_attention_decoder.py @@ -0,0 +1,138 @@ +import math + +import torch +import torch.nn as nn + +from mmocr.models.builder import DECODERS +from mmocr.models.textrecog.layers import (DotProductAttentionLayer, + PositionAwareLayer) +from .base_decoder import BaseDecoder + + +@DECODERS.register_module() +class PositionAttentionDecoder(BaseDecoder): + + def __init__(self, + num_classes=None, + rnn_layers=2, + dim_input=512, + dim_model=128, + max_seq_len=40, + mask=True, + return_feature=False, + encode_value=False): + super().__init__() + + self.num_classes = num_classes + self.dim_input = dim_input + self.dim_model = dim_model + self.max_seq_len = max_seq_len + self.return_feature = return_feature + self.encode_value = encode_value + self.mask = mask + + self.embedding = nn.Embedding(self.max_seq_len + 1, self.dim_model) + + self.position_aware_module = PositionAwareLayer( + self.dim_model, rnn_layers) + + self.attention_layer = DotProductAttentionLayer() + + self.prediction = None + if not self.return_feature: + pred_num_classes = num_classes - 1 + self.prediction = nn.Linear( + dim_model if encode_value else dim_input, pred_num_classes) + + def init_weights(self): + pass + + def _get_position_index(self, length, batch_size, device=None): + position_index = torch.arange(0, length, device=device) + position_index = position_index.repeat([batch_size, 1]) + position_index = position_index.long() + return position_index + + def forward_train(self, feat, out_enc, targets_dict, img_metas): + valid_ratios = [ + img_meta.get('valid_ratio', 1.0) for img_meta in img_metas + ] if self.mask else None + + targets = targets_dict['padded_targets'].to(feat.device) + + # + n, c_enc, h, w = out_enc.size() + assert c_enc == self.dim_model + _, c_feat, _, _ = feat.size() + assert c_feat == self.dim_input + _, len_q = targets.size() + assert len_q <= self.max_seq_len + + position_index = self._get_position_index(len_q, n, feat.device) + + position_out_enc = self.position_aware_module(out_enc) + + query = self.embedding(position_index) + query = query.permute(0, 2, 1).contiguous() + key = position_out_enc.view(n, c_enc, h * w) + if self.encode_value: + value = out_enc.view(n, c_enc, h * w) + else: + value = feat.view(n, c_feat, h * w) + + mask = None + if valid_ratios is not None: + mask = query.new_zeros((n, h, w)) + for i, valid_ratio in enumerate(valid_ratios): + valid_width = min(w, math.ceil(w * valid_ratio)) + mask[i, :, valid_width:] = 1 + mask = mask.bool() + mask = mask.view(n, h * w) + + attn_out = self.attention_layer(query, key, value, mask) + attn_out = attn_out.permute(0, 2, 1).contiguous() # [n, len_q, dim_v] + + if self.return_feature: + return attn_out + + return self.prediction(attn_out) + + def forward_test(self, feat, out_enc, img_metas): + valid_ratios = [ + img_meta.get('valid_ratio', 1.0) for img_meta in img_metas + ] if self.mask else None + + seq_len = self.max_seq_len + n, c_enc, h, w = out_enc.size() + assert c_enc == self.dim_model + _, c_feat, _, _ = feat.size() + assert c_feat == self.dim_input + + position_index = self._get_position_index(seq_len, n, feat.device) + + position_out_enc = self.position_aware_module(out_enc) + + query = self.embedding(position_index) + query = query.permute(0, 2, 1).contiguous() + key = position_out_enc.view(n, c_enc, h * w) + if self.encode_value: + value = out_enc.view(n, c_enc, h * w) + else: + value = feat.view(n, c_feat, h * w) + + mask = None + if valid_ratios is not None: + mask = query.new_zeros((n, h, w)) + for i, valid_ratio in enumerate(valid_ratios): + valid_width = min(w, math.ceil(w * valid_ratio)) + mask[i, :, valid_width:] = 1 + mask = mask.bool() + mask = mask.view(n, h * w) + + attn_out = self.attention_layer(query, key, value, mask) + attn_out = attn_out.permute(0, 2, 1).contiguous() + + if self.return_feature: + return attn_out + + return self.prediction(attn_out) diff --git a/mmocr/models/textrecog/decoders/robust_scanner_decoder.py b/mmocr/models/textrecog/decoders/robust_scanner_decoder.py new file mode 100644 index 00000000..0301e153 --- /dev/null +++ b/mmocr/models/textrecog/decoders/robust_scanner_decoder.py @@ -0,0 +1,107 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmocr.models.builder import DECODERS, build_decoder +from mmocr.models.textrecog.layers import RobustScannerFusionLayer +from .base_decoder import BaseDecoder + + +@DECODERS.register_module() +class RobustScannerDecoder(BaseDecoder): + + def __init__(self, + num_classes=None, + dim_input=512, + dim_model=128, + max_seq_len=40, + start_idx=0, + mask=True, + padding_idx=None, + encode_value=False, + hybrid_decoder=None, + position_decoder=None): + super().__init__() + self.num_classes = num_classes + self.dim_input = dim_input + self.dim_model = dim_model + self.max_seq_len = max_seq_len + self.encode_value = encode_value + self.start_idx = start_idx + self.padding_idx = padding_idx + self.mask = mask + + # init hybrid decoder + hybrid_decoder.update(num_classes=self.num_classes) + hybrid_decoder.update(dim_input=self.dim_input) + hybrid_decoder.update(dim_model=self.dim_model) + hybrid_decoder.update(start_idx=self.start_idx) + hybrid_decoder.update(padding_idx=self.padding_idx) + hybrid_decoder.update(max_seq_len=self.max_seq_len) + hybrid_decoder.update(mask=self.mask) + hybrid_decoder.update(encode_value=self.encode_value) + hybrid_decoder.update(return_feature=True) + + self.hybrid_decoder = build_decoder(hybrid_decoder) + + # init position decoder + position_decoder.update(num_classes=self.num_classes) + position_decoder.update(dim_input=self.dim_input) + position_decoder.update(dim_model=self.dim_model) + position_decoder.update(max_seq_len=self.max_seq_len) + position_decoder.update(mask=self.mask) + position_decoder.update(encode_value=self.encode_value) + position_decoder.update(return_feature=True) + + self.position_decoder = build_decoder(position_decoder) + + self.fusion_module = RobustScannerFusionLayer( + self.dim_model if encode_value else dim_input) + + pred_num_classes = num_classes - 1 + self.prediction = nn.Linear(dim_model if encode_value else dim_input, + pred_num_classes) + + def init_weights(self): + pass + + def forward_train(self, feat, out_enc, targets_dict, img_metas): + hybrid_glimpse = self.hybrid_decoder.forward_train( + feat, out_enc, targets_dict, img_metas) + position_glimpse = self.position_decoder.forward_train( + feat, out_enc, targets_dict, img_metas) + + fusion_out = self.fusion_module(hybrid_glimpse, position_glimpse) + + out = self.prediction(fusion_out) + + return out + + def forward_test(self, feat, out_enc, img_metas): + seq_len = self.max_seq_len + batch_size = feat.size(0) + + decode_sequence = (feat.new_ones( + (batch_size, seq_len)) * self.start_idx).long() + + position_glimpse = self.position_decoder.forward_test( + feat, out_enc, img_metas) + + outputs = [] + for i in range(seq_len): + hybrid_glimpse_step = self.hybrid_decoder.forward_test_step( + feat, out_enc, decode_sequence, i, img_metas) + + fusion_out = self.fusion_module(hybrid_glimpse_step, + position_glimpse[:, i, :]) + + char_out = self.prediction(fusion_out) + char_out = F.softmax(char_out, -1) + outputs.append(char_out) + _, max_idx = torch.max(char_out, dim=1, keepdim=False) + if i < seq_len - 1: + decode_sequence[:, i + 1] = max_idx + + outputs = torch.stack(outputs, 1) + + return outputs diff --git a/mmocr/models/textrecog/decoders/sequence_attention_decoder.py b/mmocr/models/textrecog/decoders/sequence_attention_decoder.py new file mode 100644 index 00000000..6e7aa9e1 --- /dev/null +++ b/mmocr/models/textrecog/decoders/sequence_attention_decoder.py @@ -0,0 +1,165 @@ +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmocr.models.builder import DECODERS +from mmocr.models.textrecog.layers import DotProductAttentionLayer +from .base_decoder import BaseDecoder + + +@DECODERS.register_module() +class SequenceAttentionDecoder(BaseDecoder): + + def __init__(self, + num_classes=None, + rnn_layers=2, + dim_input=512, + dim_model=128, + max_seq_len=40, + start_idx=0, + mask=True, + padding_idx=None, + dropout_ratio=0, + return_feature=False, + encode_value=False): + super().__init__() + + self.num_classes = num_classes + self.dim_input = dim_input + self.dim_model = dim_model + self.return_feature = return_feature + self.encode_value = encode_value + self.max_seq_len = max_seq_len + self.start_idx = start_idx + self.mask = mask + + self.embedding = nn.Embedding( + self.num_classes, self.dim_model, padding_idx=padding_idx) + + self.sequence_layer = nn.LSTM( + input_size=dim_model, + hidden_size=dim_model, + num_layers=rnn_layers, + batch_first=True, + dropout=dropout_ratio) + + self.attention_layer = DotProductAttentionLayer() + + self.prediction = None + if not self.return_feature: + pred_num_classes = num_classes - 1 + self.prediction = nn.Linear( + dim_model if encode_value else dim_input, pred_num_classes) + + def init_weights(self): + pass + + def forward_train(self, feat, out_enc, targets_dict, img_metas): + valid_ratios = [ + img_meta.get('valid_ratio', 1.0) for img_meta in img_metas + ] if self.mask else None + + targets = targets_dict['padded_targets'].to(feat.device) + tgt_embedding = self.embedding(targets) + + n, c_enc, h, w = out_enc.size() + assert c_enc == self.dim_model + _, c_feat, _, _ = feat.size() + assert c_feat == self.dim_input + _, len_q, c_q = tgt_embedding.size() + assert c_q == self.dim_model + assert len_q <= self.max_seq_len + + query, _ = self.sequence_layer(tgt_embedding) + query = query.permute(0, 2, 1).contiguous() + key = out_enc.view(n, c_enc, h * w) + if self.encode_value: + value = key + else: + value = feat.view(n, c_feat, h * w) + + mask = None + if valid_ratios is not None: + mask = query.new_zeros((n, h, w)) + for i, valid_ratio in enumerate(valid_ratios): + valid_width = min(w, math.ceil(w * valid_ratio)) + mask[i, :, valid_width:] = 1 + mask = mask.bool() + mask = mask.view(n, h * w) + + attn_out = self.attention_layer(query, key, value, mask) + attn_out = attn_out.permute(0, 2, 1).contiguous() + + if self.return_feature: + return attn_out + + out = self.prediction(attn_out) + + return out + + def forward_test(self, feat, out_enc, img_metas): + seq_len = self.max_seq_len + batch_size = feat.size(0) + + decode_sequence = (feat.new_ones( + (batch_size, seq_len)) * self.start_idx).long() + + outputs = [] + for i in range(seq_len): + step_out = self.forward_test_step(feat, out_enc, decode_sequence, + i, img_metas) + outputs.append(step_out) + _, max_idx = torch.max(step_out, dim=1, keepdim=False) + if i < seq_len - 1: + decode_sequence[:, i + 1] = max_idx + + outputs = torch.stack(outputs, 1) + + return outputs + + def forward_test_step(self, feat, out_enc, decode_sequence, current_step, + img_metas): + valid_ratios = [ + img_meta.get('valid_ratio', 1.0) for img_meta in img_metas + ] if self.mask else None + + embed = self.embedding(decode_sequence) + + n, c_enc, h, w = out_enc.size() + assert c_enc == self.dim_model + _, c_feat, _, _ = feat.size() + assert c_feat == self.dim_input + _, _, c_q = embed.size() + assert c_q == self.dim_model + + query, _ = self.sequence_layer(embed) + query = query.permute(0, 2, 1).contiguous() + key = out_enc.view(n, c_enc, h * w) + if self.encode_value: + value = key + else: + value = feat.view(n, c_feat, h * w) + + mask = None + if valid_ratios is not None: + mask = query.new_zeros((n, h, w)) + for i, valid_ratio in enumerate(valid_ratios): + valid_width = min(w, math.ceil(w * valid_ratio)) + mask[i, :, valid_width:] = 1 + mask = mask.bool() + mask = mask.view(n, h * w) + + # [n, c, l] + attn_out = self.attention_layer(query, key, value, mask) + + out = attn_out[:, :, current_step] + + if self.return_feature: + return out + + out = self.prediction(out) + out = F.softmax(out, dim=-1) + + return out diff --git a/mmocr/models/textrecog/encoders/channel_reduction_encoder.py b/mmocr/models/textrecog/encoders/channel_reduction_encoder.py new file mode 100644 index 00000000..0eae4c14 --- /dev/null +++ b/mmocr/models/textrecog/encoders/channel_reduction_encoder.py @@ -0,0 +1,23 @@ +import torch.nn as nn +from mmcv.cnn import xavier_init + +from mmocr.models.builder import ENCODERS +from .base_encoder import BaseEncoder + + +@ENCODERS.register_module() +class ChannelReductionEncoder(BaseEncoder): + + def __init__(self, in_channels, out_channels): + super().__init__() + + self.layer = nn.Conv2d( + in_channels, out_channels, kernel_size=1, stride=1, padding=0) + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m) + + def forward(self, feat, img_metas=None): + return self.layer(feat) diff --git a/mmocr/models/textrecog/layers/dot_product_attention_layer.py b/mmocr/models/textrecog/layers/dot_product_attention_layer.py new file mode 100644 index 00000000..efa55a8c --- /dev/null +++ b/mmocr/models/textrecog/layers/dot_product_attention_layer.py @@ -0,0 +1,27 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class DotProductAttentionLayer(nn.Module): + + def __init__(self, dim_model=None): + super().__init__() + + self.scale = dim_model**-0.5 if dim_model is not None else 1. + + def forward(self, query, key, value, mask=None): + n, seq_len = mask.size() + logits = torch.matmul(query.permute(0, 2, 1), key) * self.scale + + if mask is not None: + mask = mask.view(n, 1, seq_len) + logits = logits.masked_fill(mask, float('-inf')) + + weights = F.softmax(logits, dim=2) + + glimpse = torch.matmul(weights, value.transpose(1, 2)) + + glimpse = glimpse.permute(0, 2, 1).contiguous() + + return glimpse diff --git a/mmocr/models/textrecog/layers/position_aware_layer.py b/mmocr/models/textrecog/layers/position_aware_layer.py new file mode 100644 index 00000000..cf8cf27d --- /dev/null +++ b/mmocr/models/textrecog/layers/position_aware_layer.py @@ -0,0 +1,35 @@ +import torch.nn as nn + + +class PositionAwareLayer(nn.Module): + + def __init__(self, dim_model, rnn_layers=2): + super().__init__() + + self.dim_model = dim_model + + self.rnn = nn.LSTM( + input_size=dim_model, + hidden_size=dim_model, + num_layers=rnn_layers, + batch_first=True) + + self.mixer = nn.Sequential( + nn.Conv2d( + dim_model, dim_model, kernel_size=3, stride=1, padding=1), + nn.ReLU(True), + nn.Conv2d( + dim_model, dim_model, kernel_size=3, stride=1, padding=1)) + + def forward(self, img_feature): + n, c, h, w = img_feature.size() + + rnn_input = img_feature.permute(0, 2, 3, 1).contiguous() + rnn_input = rnn_input.view(n * h, w, c) + rnn_output, _ = self.rnn(rnn_input) + rnn_output = rnn_output.view(n, h, w, c) + rnn_output = rnn_output.permute(0, 3, 1, 2).contiguous() + + out = self.mixer(rnn_output) + + return out diff --git a/mmocr/models/textrecog/layers/robust_scanner_fusion_layer.py b/mmocr/models/textrecog/layers/robust_scanner_fusion_layer.py new file mode 100644 index 00000000..30a8421d --- /dev/null +++ b/mmocr/models/textrecog/layers/robust_scanner_fusion_layer.py @@ -0,0 +1,22 @@ +import torch +import torch.nn as nn + + +class RobustScannerFusionLayer(nn.Module): + + def __init__(self, dim_model, dim=-1): + super().__init__() + + self.dim_model = dim_model + self.dim = dim + + self.linear_layer = nn.Linear(dim_model * 2, dim_model * 2) + self.glu_layer = nn.GLU(dim=dim) + + def forward(self, x0, x1): + assert x0.size() == x1.size() + fusion_input = torch.cat([x0, x1], self.dim) + output = self.linear_layer(fusion_input) + output = self.glu_layer(output) + + return output diff --git a/mmocr/models/textrecog/recognizer/robust_scanner.py b/mmocr/models/textrecog/recognizer/robust_scanner.py new file mode 100644 index 00000000..7189396e --- /dev/null +++ b/mmocr/models/textrecog/recognizer/robust_scanner.py @@ -0,0 +1,10 @@ +from mmdet.models.builder import DETECTORS +from .encode_decode_recognizer import EncodeDecodeRecognizer + + +@DETECTORS.register_module() +class RobustScanner(EncodeDecodeRecognizer): + """Implementation of `RobustScanner. + + + """ diff --git a/resources/mmocr-logo.jpg b/resources/mmocr-logo.jpg deleted file mode 100644 index b916627ad8c2604eef12357438f62df2247374cd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11242 zcmcJVcT`hdx9&F}(iB8MM9{nf0vbSRQUvmX0@9@`RRuu^y+|htA`*cpRZx1b5$Rp& zMS2MxX`w?xPYB7~zTX+=oN?|y_nv!h7;EgsU?*g*J=gR6=G>byPWcU7&`?!Z1*oW~ zfREr0K$!Q_7d=+s40r&i zmu~1?{Ezvc^8l)MX#h})qD%qz0s6CN>CV#9)6vl}FwireV?Te6iRm2IMK%`pt5+b` zu3q8c`HNpv@Go9rJ{}$+>6^lL#3UspA%e0BGUD>05|ZM7K7@*af#Dp}xy$FzUlza4 zb6x!Z`bGHxu+jt7R6{gWyZ|*T6%8vDr4@jH-;@?yi9d_+Z!aopnlrR#>F604nZOe& zE&$Y2G&Iy_XlQBAfZ_sI#x@T@|yy9=@*_A(~m;PknlhDU;Ja->tyu>d#e*^y~w0|P|KMO4Q zzZKcP2lgLvO#HPKfHH(Zg2O=T4kw>he}zy?)u&6 zI)S1Y1GCb>d2x%r0{_33Qk!ee`!8IeN*m%7OivDjye^@}uKRaQ!ow}qxeLX5{i@AN zycE`xZnxs_H6u6u%e@e9hBq3e>qjq7O=2gWZ9VJVUm0mR+bRj+#@16NJ5Mf`;Y+hXdLcP1J&gxAs0`8Nk)jw6tuF&RP7d!)gty&4)H9DnMe&_DJYGN4Upr#?As#peGu`m`^r`oSoO{So)lH};qI-A z?ThyF*L(SjF!C4~p1kU}&}ZE})V`aZ}zhxDq5a4*I^)ANBo3RG*k^j%{j z>5g>(F$x-(;QrC~Y;`Ja={HP8@73xF>8~LZg+5icbZ+P53S(}(xPiG}lYxZN4a|Jw?) zlX?!&{=={(PB;ZHPav3te|3T+?tfLl>n@pr>(Q*3_RoU+KHxiz!7DbeY_}m8!uy}M zWP8W8kTgOwhBhl6Q-HC@)0yNmV9mc+}?IqQ0-45~BID?gi?s>rPIo`t+Gv1_$d z7%Jx-$UM#2MDz&t&nT``Jh<8CzKk5HjZ4>&AAY5#Cp`F*EBXlstFLeeV8Vh9Pr4~^;!Z%I;zk1bW%?AR427% z$E(DrU}AgO{=L&y^*(ijv%jW)-k~nz!`!@QNqgvbWBxna&i$maN11pEaA1<-St8&4 zs<`K>u=mo)PS{FH{V!|@i8ZTlYHLhW{hp+-DvUAD6UARe;&J6#SQFBQk zr{8ELe@2)?UjmdZN2&(L%xfnv4-sj>cJjvuvumD0sy>YnejxJ3~pa`MtAam=P~_y~kIdsjxqy`fm&xIGZ_MCo%m&03oCo70)hfP3GwE z>F5~l@VJ|tRrw>#Cg!N$721~-uWlo*vANxG{o zZdPq&9PO^?oRqtXY)6Gv3HN;DYN~S5li4dTyzrG#UlX*XW4RPpMQWoZm{U!cl;vmh%$ZBxrHb>BW&JFDv6Iz;uLdIdlon zwo-T#0)LymnWuX7W4WG+jVDi7RR6Z;9PvbV#lm$J`Vl{W+Wv(CSlN>{&{ye(s`5!f z0eu=)9r?SLkU~Yw-$3C5{u_e^>&sJKqTVH{9cuovP%UH-pV~% zHMP#4RC@`GRpzO!nseWMjBO0wCBJ>@7wmfqeHS6SE|e0*n_HHB^OahpI#eC{P)GBk zeO-b4&j1)zCCRoGPi?F71#42a%zAsqMdr`X0-%u?>z+5=v=7(u&AC2#{%S`(klsj>63RgbJUiBxyrkmFVa4J zB>QplkPFmqH)sq;BJMsS9-bqVPCdveaDfr{hZp~wF-BULM0M| z@|=r0Uk+U^npqXhJM&Hk6(EQQD|!n49qrl@TR|L}73K})X)&w0#Xi)P(A7Hz*o=A< z8<)-~S0NKda~^7dSbz#qFL6=9l*Vc<+pJk8;-dSv6#t4}jS^YtBqpt*4WeTcSB5hC zO~v*Uz5!?Hlr&WgN48aR65ZTVYq+HL7%aw#iR!8%jiG5uTs%n2(bSC2q1CbPVm*?+ zQt1!kz9=zxs61RG1-LOM{*zm6uV~Ur?sZUrb;*7r2~55Bt5Ap3=CEHY*(6_5=s%9A zAygFT-CFAsR!r)dK=`yQh01ypSvHI=*W6Qcx0Ctlp+3RQZsUiEfI}`Gd|0vhKJ|iX zt=!pNp^F?`qZz46#Fu&ax{1N|hVekVW^B=VtO=v^R04iQ%RALv zjV0#)H8z&KaO$t+`gFwBi@enF%L+p@>K96~+$?z@kte;Q+R-#<4hd7_f3mIL_j2Q&YZ9_8~D)3{HwvWyhq*1E5JaaI#e zoSg%)1+N&JQLm|S99gpVtI+M%Y_nWZ%XyS757`&3fjf%WZ@NImqw}=3V1PRm#| z{5M>0w3wNs_)N9#`>P;3brp7#>(kdW>Q@#x!w(|aj4MNQ>Ua5kyzlG|es>R4DuIzN zu^=TV!1enSAc&6wEWjJ1ER2iT7e*7YW4%IyU*5*lDoi4A>3F6#8uD{r3Q);P0q{}| zEa8p3?<053v|v*oNMgpj1skPFeRp;;6-r`~etSZ~$GnGcROYgbFIm44a%|Ua@;Ze# zo?-pye%=jSVx3`Q1xBg0F_WBaIZ%!wQ3=yzd}9UlC%>zj1WUBmmXt+*-<$zyBfGj> zoTOM*uSYJx528wGI1Xa+dt4+Ip6Mo$)#Rg$;$na2ccL>TnMOw>nV@MNSmheUV{D^K zufIw22?eO|c{){!om5WqzXlJH3|-5Ar68OUP+{($S&bGwE)7g{bW}P?>qf-+Mf#|< zGJ(MzS8kk-{lJQzAFFm>^)PO-9_)2+pWm9s?zL$_DHe9OltU;PM17@ zxA^u{<9?#<{7?h2Y$+tc8k;wC7{~6P60`8dsPx!dp&)`jU8Q3I+U8-ovN$Wxm4~Qp zB3?i<^g+X*PQy+|3g;GX8X+yhu*pduBx}$Q@tazae^e-ZsA-AS4iq5+P=vZzb~rbz zRM%j;^ktxz-6Vzj5Gn@=DTk`XHxod@!s}*SucZJeb;LBRyWT{p;qEoQs;@j$qGLoh z%V94HaP2zY5{ZhfKvViq4)p&yRlBHXe;T3AB#)hP`a$H78b5|eK)>Mn_}8@hLYRNn3U z`vL7|BGu!oBI7^sF5wxt+0QoImfO1Vm-{`|14Dhh4i5UVUmclvNR~Zxf6~;?foPqa zPCBfTnyT9VId$=n5h{AwrY7-vC{x$<$$|!Tsr_+b;g<&3_)UUvkmzIN*U||`@rt4V zvEubxS1?-Do(fzE4yoymj0Y?X$Be@dLq#j-hZabCd+w$FR2w@Mxg0%KQp<7q(B#jK z$=&j!7WJsJUw+Kqcq82ycb(=WstECy_A^}$+Ym@_)vp17PJ zOr#A`G+FK1W^w3Cae|U}VQ$6d@$yY|@r`zUqpOxQ__9gOYccuVrZZ9FFRXvro#ajt zOr#c2=gg1ck9M;JG=0UNeS!G+LEIi2t?RJk4eJw^4DwAFOv==TJx%yPWND={9jH&7 z^BT?JbNBm3zC-~O<1i*YM!B~%;VsY7L4tM{j`z4CXUE_JxIJ?? z_VCVBo?+!-6=w{6aaz1D3$8sBwPwNjRvrQi7uEV6k8G9*`^XH|7+#3>hA-Mfj-DT$ zxmY`XY?s^Osj5-X_FO2V*9#~e{W>mR>4lL`{Ht7Z`QLgud%lWHH- z529r)idVI=DlBU-!iEBr-=hF>yH7~B3zbSrqMQ;-K9j1KpCB4iHTGAn^G%{^CAwRm zLZ2-a7brk}1W5*3U$IT|@cR&(0NF6~J3)=r)vnY-=QUlP@6DM+&ep2ZE~950k;Cu$hH` zc5Cw9^_M$sfwc244Cb|=%Mknq#N|*$v~<2#!n8b^`^)2_Z_V8WH0cLtt3fG^P8H60 zRg|K`Gx%w5#;?ES_JuBACjpelbzhMY$ukFHN02vsIK|QB1mi7xUxl)-_2QC_YI!o` zxTfFS@2c=QspVfET3q%E&Lu9?iIp!Jhae6koiTpYz&L6G9w@w?#ENA{re^2e#(`UVU z9kq6m*apsd(KKF=wLaq|pT+t|HTBi+t_xJwCDEOcbT%ZPt*qzFyLzuJp3RPN$kBS! zr?`~$FxFJ8xoZf`i4A1(x>Hq=WP4JYZoN<4=+IOn?HAhibAvyW0!UbHEf!LMZ$7Gr ze~)`xeAw0wI7wRQo`2bG7vyrU`wst%Wic*6r>$W-qh;q?xW`ui0b1Zp%p0rw$I?1i z>_Q!%@-Yt*A`hGq#qdPKV zI669E9VKWH|VIM0C2okC-+Tydg#RVjSFU7H&>BnyXdbwJLj>vZmx*stcbZ{ z`#9x2k4?7~gVX6}(?>fjh|VT!&|2fmH*U8y?cRQjz;G06>^TIuj2z@q0A|H7HazF4 zD!B4irQn|3Qd2TlYqch$vJsd~&??-HRO}6o9}yA_$dY6w^W0Vir6PH#TzX6218WRj zv6t5hUQAyHJ5%dyWMY`r&GF4AQj_W7_UK+yn2nfEf|gqcQOkPoDXch};MRtAz*bi%EEd#8BoIt6$l`G#{!uvq1~ z2sCD-RBAXt{*5g@Innun!wv!OfTVJk`S3H%!Xb`)!BzS0$70GI76{;?ZFD zC&p|$Y~2tE_~KMowzbPjq?)QY8DZS9@mm5jt*WY`URzR^bh4Y==oAjbs?(9s>< z1c;r&d-jZ7yv!n%$NCshU2sSOWGLUZkVSLier$w_E?m*tx}{P(|2VjP(0A_!7m>zF zgZHh9(PhRvz?S80Q1?!B8Wi?K^=IX&5%uuN!meKh9!ATX{<0RLraBgV)(b48a5>aT z=0BRoT{zbWrb;IY+-v{9aH>+tud-v^k@?<)9g}ncdk?+s>>0~XOQ6dADZb4ff;6X) zaO$eN1jX9G>aB_c#j+5cS9-|Rg5+RmXYZ@}1HyQi6xO8*7La-lghzi^d< zsD%!WGVK@VigK&leDDFq64TMOFLzynldP4T@@D0Bt-9&f#u^+-NwR4j^Ct`<&z*i^ z?L7{pN=-5OTeoDn2E+K-b^O~ga}#9t#@M?Uo5>~vO90hL#1sZtjmQ^^_H{xIhB#qL zmJf)s5QoW~0-%&CL>Wt9SaE?M&A$kv@#!`kT{ml0@v=h{Ap4?7B6y`;~w z03C!T;%C23CWJa0QHiUZjMC({$@7mu+qJ$r^5Axn@`4rq-Yx#2sXq@_S$f%OzwNKU z#_Bcdn%?>CK6cPwi)kD$X?)qW%JYhZ$o8Lzlh%KiET!lzT#&!ODQ&^>?2owV7X1-7 zwu*qAg?pj3_-pBMJcT-~5Jybeafc;Jc_1fNQS`=H8Iu~prg=+7bz5Oyer!WgPRv-Q zB0{z(RYYpoz@WvAt6n>Ymjd*6OE3MP02;)LDsxQN$v1fGt><%+Fex|9f}85fm6t#m zbEn&b#rc~!bVwz6MO&~A#tb`zxD+bdC2HvMO4B|Lv4Ff5HJ)lWR_VK>JxF$NBO(hI zPG!sc+2@p1IkLnBbGSdD`bxKQqsOq&_fh=eE%`@uQkzY4kA@56`DYl90=f1ZIKHH> z55WDjI0sn6IkD!uk<(X(&@r**7Kw+h(Xoc{zf6TPJ}K+XA`38Mg#neS3u2y~<|=jA zTlW?|m}_U(fHx`9>^_n}68N~4dg@OBHruNicgRq1@4V_KH&B2>YjfH_9h%cg!452J zM_hDnE#Zp0Y5i}fnozs&*d-3wh~E2mV#fw+U=h z1i7C5+-9Nu3tKycx?+;sCB{_sxmCfU+Yc*!W_0wMn!nrBxeN1x)e9XZn69@nk4uQ zujDNSYSa76ISAgyvymS-shy5efPv6}y(A*%g7h`ok)|1|Q+kuWDX{i{y*<_Cmb7ZH zd|Lz3SZxF*%lDPwKYhcmgPnV$1P&0+W^vb3}xA(H*s7 zMP%zVPWSS0kT#9kCu$0isiE0CQ#u{Q(O<~%wEN*xcY1jwk*U>>w&}fRRoRziL<4*E zGWS^hZIeN?i)QVx< z-V^<9uLgB)qrNZ3xoifO7_|KTJgJ#sYaX@BCwU6cJJgS9yZC=I6S8Oo0h^T3wb?T` ziB|mjWcho40E~SA)qQ zf@LKMbh}2&Y&Cry{q?}WTW;C3^qVNyapF9-;hVPUjI_4n-ELs1W8)Ztg?A}d?6(xu z#y#bF{4`_$gYudBd2x+RJ5W=u7tFTJ|4u1>bLHYLF0 zWP%AHH~-|pPEol6XL9@GeY}EQYbxue=W)`UhF4uJ-?gV$$)kQmbA53Id}S0?o`k*c z+k{9?QKA4W4%x=SMcZCXv6bba*rfZRMqE;>gU$LaZ7D_pFC4W-$?Dgpep7%;Eeo1g zzdJ>d|B@dMLmY-x?UKrdG1q_3(YDQxhmLk#TxYJfy~=m(16c~Qh$Q?{D?3C@I0ou) zPM+M@@|OSbU&?3%-imy`5^(^6ZP}II&{Z5{dr;Bp4h8W0v@aOyi%W@xt&PD>&zPW& zr2#iMPG! zI~MRtPpVwyV@cUFdX(FWozC#L2-p1!h{g5zV$ z{t^?bAcB^yqhh@jI1Pt#Uo@7cP^-h_c!IpPQt93+o<|S~pj6kV&^tdD)4ljG@}~(9 z@@U*X-qc)#BeMJzzRdaNbNF*n+#fAL@eDOp#cAq^Xr9EJ&1Rwx@_YACg&=(2~w*m zFTxl9VN5lURy!|skQ;{ok@MDnrouUVi`$kR~o3`}Cr-E{@B)s_MsavUDGd=7-K>88dy2R@m1A#jw>5q~#zS3ZWIK~7JtjI3f2U7=Aj$4u*%K#XUM+K8HT zcgw0DWT*z7N_YHj^nHSQjUYKOk^&H!;j4_$f!xF~WZB5xtrzS9JKUKop19c>J|_#_ z6EZCg3l<93J$74QYCrqZx;)~*-kgnaE_K;*=8cMBnQ8CKXaO!M_8ozVu4Dfnrj~De z_lU z!*kYkhV8c!X&&FBB{%DE* z;`m1eJ8&4^iZ^byI4^@lO>QZ4Fkziko4K%q$W0^l`YDFb_e%dA(o-4&|3+ALwjk#cJ&XOIQI(>_ zF5#=*&v242;T&kUml;T`Tpv@%weutOYWY*sPt+a5dCXXnT6vaIeh@KWg~^BpOXrkB z$PvEjIO$LWT9P7HA+xKp>T1-JXY}Q3m2FV?3xVYH@rhrCU1u1$Ib_eg2|*5IAeWq~ zJSxTX)cJa(`rnxx$+h*ie-SA#?VcLtf2E&wz;km^0jdzh>GS5YxcPV?&Bq7mkBFV5 zLpf-LEyswjv#TbwrQ3W=+-b^8^JTn`ljYOb_k>%|I=ZWMZK%NY%&@B{vH>;`f8!U=zWM;ihuTv zfij$l(aGH=kKD3nqI}Z|;|Su$%touc2yU)5N$q{n0Y{X~V)$;e8dpb9+}3GfM~95u z*FkRuc|q~etY8pBxsH7{BAE@`RasUnLOFVMoWIt5YIu0FBQAw5KR_`c^%mz0xwA+D zx6*q1uM?@aB{l2g)Qt(_O*4=Z8szY%9?+oHWXFt?;+Y zg;o8tb;C<9-$3d=V69q_#Cs}WO?LGiVr3RIc<>G3#kanF_!M<5*1k{U22m)Zy6;C; fyWgM+Al;+8jb7GiV}C>qKFjq#`J^2cW#WGU`^|L5 diff --git a/resources/mmocr-logo.png b/resources/mmocr-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..8e14913cb24f7bddfbf6e9864578d0912f3c9022 GIT binary patch literal 31646 zcmeFYcT`hdw=YcZO_3%Y1(X_EXaWi%BF#pxArM3eozMwL6G2ds-sDjM>AjawBNB=r zoj?!-l@NkRkx=f&=Xsy=-fz6;yyuSl-9PRacaOnfXJ_rXX8Fys=30rrZLG)4aGrsT zjEq_T=JmT|WR%rpWaJ`rG{6&CW7AyVAHCO2OJ6cFnG2_Xd?GzDmbP)5k?tPE}P^79=k#FE0&@ zkoFDo@Ph3y zX#zmCMn-?n@h^FS!~dSb*H0G#i1C*~{^e-j`$1mLvUi<*J^g*4&bkPI%?tmK#@Fwz z^MB~`zwtUS{P)XVYC1m75I;|!`<|Ze|FH7yf50EyecfJns zbJh@%2g%DxgA}CY!T05q)#R1bDrz9mpF@p2om^o-|7xfL7=XwBG!#Ia6T}bl zZwEU;)nJ}Ja0s9)S2)DQS=P(LMMUUt3#jRMx_kNn69e2S{PS~t9i7`go-kK;;0xco zdfGzzx;kJvRWMjuUPkUWyGBN8`X0W15D%!c{&fuzKvo%7S0^<&XOJ>fPFYo2&e_RH z+EGE#Q5xdpEHAC#1cInQK`Cq<3HE~Lm~3YASg&0 zrl_DS?F3bXNvlGXou!qW6qS@zoaN;e9i9F`&BVtQ@MVbmKT|#B$_b#T>?j9w1}n)) z!$5KnX^5(#va~7`3h<`{kq1K*Ax^;mQ;+$Nng2oM8X%u9U~k{QJW?Ct^4FcatLDFE z8mgouub==?mUebjQkI4}0ZcoBoE)X)V2V(Xlajm~7zVl`d+Lk;386pl{v<2=U(f&B zMbvJ(p7Im)7Z>-Oz5nJ%=(ks>L7=DJt|0+IvJAp-O9gbV$RW+6XM zA&8fkyDJoODz+@b!|AUu_O~)c{?mDUA&7sYASF3@WoH$zw6YU`LPcjqdBB$Ps?uN( zfEbvw0t~Dy{|^QKD+MVisVOS|!*l=F74(nM3^TI--w5>Q=j(s!@y4lN{?4pw z|0>q}cku_LS~VaK{GA#8mty!|^Lzg9=7Q6R@muhJs-OPx2q?-_ zm2hI2lPN6n`;w=(;>D7k`F%ZaIp?T*{q%Bjywv;T@#*!x_x59sKfjGHV3YjwC3YQQ zv)|*ci^Xb}{T}z&kSWIEPcoU@4Ap-Tm-~MN|F6@bTkBZs!Dv5eL~0`@6w zR_)gwZ$_A{DofY?V9$vAr90is*e2*US2Qn&%O^uJm62&t9G~Ck#ZgqLrPXHiZ6?r~ z1a@M`7*Dby8~4KnkNaClL5tY9{a<&^EpCPg+B3pHTC52Pqj`}CUaNu54WegjJ7P`H zbOaf|BsK4Of{x<-D01e^^LMWS%+)j;)9q`K*;9HXY_mWO%A;v|2UmxB-c0-v&p0IjF-+roLNY}JS7SKD=N@GJ6PLo4vClKhG; z0L-6nHSzAt7aSZgYhN1rfkLCUQBqY=qwV|Q4)Gqv?I~;zH{?0p>C5H=rm|u%D z`MhC@TcnJmBj2U-IvYqAsM2wA92oL)bZ8&0{p?5-@}k&$8EPwzi7u0?2h=BTdaA#y zOkqF%V*B4oB%Qgw7Ww=Y~1>l^l`&6^aY!vNgr~NF_gX5L@p$g4Q79VYHo_| z+MdEIoi^ey_nZVksD^M#=)3Eg78b3W8Fk;ZsCO%Rg4CmB(3NB-7YZm2MYX@|h2t*5 z8llH@uw;kbn}=-y^uqf^D1&Ir=UPIEZ;$w>t$-+!dHYmPN2(6$knXoyWXGxn6fJiu z$dNpr5&Yfwgp4riT3IOKY!h>Bd&ZG440)5TR7)&+Rv~)l!Q#{89aS&rk$vo`=Xsqn zbTD^~d+)&hY=5E0-1O?tz;sa5Rwp$K6hOOQ>3D1|M^b|oMZ~D!G^utwDO(ep)4g}6 zxRLsFO`Gf`9jExp&H|#`&3bgiCRyi=4ADS^W6whJ(k&VyL%%L=#0%fwdLcuL`PeN( z8iEjq&4D~F*r!?-?^`3UT9m9^>c3OP9;hBU)1HBi^A@`Q;1_@5 zVmDsd>q^{K%F__~Q+8}mHSI-Pbv?s>e=mZPcP>Y1f_x#dv6HNk4k1pYu8Px6ofKq+ zKERsG33Pi45>F`h(z8Y_0AB!~s>R*8kpdlwq&w6@-Dy}jk+Gw-qaKbVDXYx!1V|sV z6FZst`tiwhwk0Rj#Du5rr`h7Y8L380gaXpP)D!(-U69P44B_!00h}i;)wr$Nv=_kz zBFS+c<)bvB8$BI&k^TEJ#F>7u>8bb01Ku}t3l=#SsYjnWRY}abDN_szFV=s;wcQBa zMfMzeU^?QrD2U|C0zz&yjN71b7Gwgz=}RE_J$G%SE&I z+x9eT^)xT`%>e-7z;%jZ5Lb?WCrJ4SKrM!ndKMyU%8^KxLk5VHyW0ic`gfUKYvw5u z?hV$P1{p_}VfKCab5&fMXp&9@kCfEa)~B$moB*@ofK8pw4^bwQo$wHuyIYEVd1wF7 ze@-=h7-kM4@xb^o2uD83aJuVIjmrLRAWPq52E>zx3(y?%5V^e`RVgNZ`uNb4TE7Pm z$1L>XWiZARA(4Ci2*Qlz?-cz`7YKfZef-h%*~jEWr8+0$u;CbiHgnA=LM}iD=&q zYO*D;r||}LzLq5T{~Ljgt^g4!}DtnU&P8P zdWCo8F_?%w(~%~oL;m54NPe0C@da2!>hsx;54-SuxN}ZGH;PNzlhPHz^3!x4=NewL_))@P!a`CY%GIM zZ8G$m!oU^CSmB8z5l7SL>{D+=>>?3oiV7HbCj=&gnFGnU7klUEF~($T5!pKRJZ{QIgz8Z5}I_+BmG0EI;WQc_mvh_ZCc_7f*{m)9H+Gz-D(GWINx zhF~W5%ypDWmu5endqGhAuxeDQ_k4l+_ThZ6DRl2_n#}`#M0Ut8{J!HlUHvA7 zAX|$ufYBnvX2McF%h#YwUqmBjX}Yxc5V%6L!?RBnu=$9uc!D$UCXRsScR}W7r71HF6EMfSk^1h$26LI7-CT!twH)FlqII-`h^4-6y?t74=Em@K8zhtabMj5c zT~3%ig}QOZ{q>`MJ*zoEZ7bBZ?v}0-sz9=R)@X@v-hCR#l^q?Fw}T+j4Ru!d8Iqyx z7F|7(w(Y6~`<__I7yxs36<|hJu_r=S%)-5ubHpyr0?DmPSI#+x3L}derR|9bEz$c z6%?dQt6b5Q&4oJ(D9~Vs343VLG#Jtb($BA>E;#HtHY8XHPojpRh13@oH9P1IS%gvH z?JHZ!5mUt#oc2!|^ZOfF&q!2@N63<)KX+~lhq82@+1xYGRtZ;KuoI`6Pq@$q_{ABX ztAJ-^`RXf`6W(ZFP>-0wVym)LT}(w)n47HMW0-Kb*)^{%|G+)C#XM)l$NiRYJyfX! z5Z{B_?C>%))XKH>8RzgaxKflBWqAJRMjIvsmCc+)GHyE(>Ii$u_A(t%4;4!UmT{hU zvzCHDjq@BZVe^066?6(g8UPSHb}mYZtu`e6U=~XB7%_|LlVUmtg_WGOXR3FdzVhe2 z#n?uf?%j>$l8)73icpHSq{dX@@*^y`$~tJQVe39*kFDk4_#VzWTGX%Pdi3|NE*Ek( zcLGV~C>acaG7V+SE>TaSzjo}`cElB3LfDHy>RUAE0YK=#{#$gJ=+~v`5VUlecb_k( zN}?}%EJP~hZiulhr-uQ}h%vOvD6KLxo7FZUfgF)jIfLhenai~*<-OgPeLo~oFUCo4 zI_gP|)Xwk^^g#J8P%b!x+R|uJhTkl%JKjUdB&MKx9Fp0z@#Rb4!Dx?x6IR@^T&_K^ zKpVUV;Bcwjge`}Xz_9*8Y;EYu!?yuWgj?Ehv7Y-okMd$2vfIF(?Y8A;>(urL?=!Wu zTgTUMWDv1M9~el%FUg(#(NccI(HWS1f3vk)qr&|==nbp3u$$bTq()R{H=Yb)G<))r z&Ee!SQC0Z;f<+Vpz@5A5X{x!Ruz$!PC=t;8d^K21#+4wakmIaxt>!U!1u%fr~O z%z+ieIF8MHHf7K5+_KpP)*s}dZJy?|_ zx4^i)u5i{vW#W9iZ*7y4;qFx~%WO95pY#x);wh9};t0u}U7`lbM5v@6JUA^Q9-pQZ zMYs0eBp0peuDlNqO;{m=C+qQ&rJ@^yoEL3{e~MM2MbTC%UmPaQT5W4`lA@wZQCIZ^j7kcfm)?2h zr~0W9X4>(`FG%2!#^sXr>LxmaWYh?fCS8-oV3Vnm@>|oi`W9mC678U`9w?jCN z5I*1$xO2n&gv_#cKK_E~-z;soX|4Miu+Myz8O1MKE)|ic#aJ82a#`@L%wAs0H12bn z-%0-Dc%8OR=GqVxR@=LM8irmZC-9=Q`ta-5j|o4|ti5t(`(;QCw)_6>V78OKZQ>1i zk1y^uTScqP*~u42aBb7mRzUx0Ah^-k!k|!#>*sUiSC3@J?f$%CW~w@rlsxp*P1+1r zJ2DV$DoG14j)H&=q}vXF?rM7^Bh+mc zkMk2a3-10B&p4u4Fkb!)%4o}X^?>UP#uWuJBCT7$TXkOV94PD;(X3AHPU9jk{lyOi z2B-VKO~RFYhwr4!zY3pYa_tSOx{H%s%vx zrVD#g(S{%HD}CSzW#AhBbT#y)puhrUIMX4^bXW%eP>h8_Mn1n^qAW!x`Jv1zl2^^%IiSzR~AuK|5%Up$>3kYKouc<+BWI$ z35d;XE7DpPs*+8N!Y|POG`q^ky5@1;UtK07NphxnF>p3d$+b4QeCv(5rGKk}I#&yF z(T5Q~%OO!)OBn@AEzM|8+!G{kq|_VJOC$yrcTfcY#bWaEk>DsyGdEF^w5((G+#f6Y zC}&;f!u7MkA(|lqfvKQ@vDwEWkJYeX=EGOO8u+~m(BgUjHHXolwaF|a9&u#}9?SLw zQM6~Fd;4oVeP2bp=-@Ytt)!J>HFR8Fb=|jZNs7%Zy-XVMrXY4gGwbRq4(+1N&5JOs z+@k46I|M-IpB6D+SBXNns&r8AQ$YA9kdJ1hg>za)m72tR`;m>GA9g06fFlqAkBS!b z^E8DM<2RW0x(_-Z{Bg;y;u6cq`md-MfG^I|+Gu&nrZ|x!WR1mB%PYtI@sOE*ExYW_ zPa0zhzh-sDks%sP;7LnY!{)_1ePIUuACj!c8Jc{Fzf7A*Hv{1(>#W_FzMfxX3FzEl z^~9MC)SM`3=uAod2{&G=2k(^e=CVifuTk4+saj{kHfKpq`Mz&#Onlypqfh`EYmv#Z zVRuq7+UfY=hq=|K3&0whQ#ZSk6E%??5kq+Q8ri(am_yZ)qV~2c)zCuvQtaH=?QB)! zgB-_wJiGhlxlX*O`;TYK8h(Fx+~gbPz@=v#u`CeCTWQx8WXkTfM))EhZFQpE>1%C; zUvg5GN=8q=7!r+aSdn9zWx zIl(K~<=C>%%X)I7vAE1Jo4G-hoiqJP^B&*2+Q9facis2cCiR{+qfy6fwVlZ`a&7s^nt_LYX{A5z3KV^yDFd!(BsenFLIg>f%kni^@-zFK1in*e|!! zwrPDjlf%fqP{mLVz7V>UPpQs?FJpRsTR#*UMrv+WbfB>VTRZVT8?&0826 z>^BqvOI+_HM;@hDg^@sdLF~)YZ5kAZ48(lO&V-pt$Va=-5Sq{?O#vZG3Gr>K7jOMa z#1K9*SW)}P9jt!76jqZ)2K2oKtnAsFzm}(L4j2gh3{XjguXrSqYF8K6NqPKb6fG~< zZ`Ck+AzDQ}dYP(9gG;E4O{_$_Ofq^5Z!z1Q|IPP<$GH%Z^e-7TTXh4&X`QNw_Q4N| zYZ{oLo_F__HuF0{827$(PzW<#tVx#QB%n>}Y6j-=xScl9u=`;!a%mIDoEpaR+1~Qj z-YYLayh~VBtMW4?Ro=G=lDT!uALrU(y0$ zj$Gz2tg{tiEwEM2Z(mVR;?uW~GMU~}{^C#htK;84Pi#d&Gc?W-5ay9l+_XY=;J0TO zA)-9&>i7WU8lk%>MRA8i+(JU2iOr10U)|%=mnM9WGZaeeCsTY+hhoxGA_jTtEwJp{%Qy2^=^5ej3+sCsQFEL-*=qc^phs_~&k zad&VViF@88(ru_B;4srDTnLohT?#~^)i-RCHT4)uR`O1o!!!MrYm;`&$%5ii@NEI3 z&OYn5Uj0_3*_NwNsY+GeJR%r?r#Yvj`Nc-huDGX_m0kW7mLl3f`$^yr3uToC?1YGWVfbm zJ*{z2m#V!TnVXp)8RLfgZVD)$c5mL7xT>3xU%8{`vP(W~4I2jewkxpcIc#(l_`hUU zz-V?iCoqlkhata^ z-J0rcvYWjrPe`4~r2G?R!k2Ui1VZ$E6aaYeTKbr?pAFyDL2dUeUCI7lu_^T+$ro3! zPtbxSo5xByBsze{uv*JLOb>=hV>8Ig$iq(&SYBB|fkI|{eNT=Ap01<_(I2~dprOHg z&L_1W?8Vl^82A*@;+z%t;C;Mzjyem1w!-}=1Ftf!SpaUYw6AY)vT=7-UcvZ2LEjp= zx6^})@UotJ?<&!px3e&U2+8nR;)+bST}h7s4SLHfRGako-Yw2ME8D7bEccbhep4R$ zAVsdG7Xl8N53)KAyHYvk^w3>lx`A?)^F!&ddO+92NVj|6c|o8AuDN`w!y>dCsWP(i znRRLHv;E*FPg!p2;M-Bu+R7h;G1|IrFXMADNlVkFnNv5iu3RR~>H#^6v`VNbnPzdV z`|g%-+g6L_kgnM{B*6=L_@zx%ssQIfEjH#(xYwS*uK;Yf)6QG5VPXooxN)8j0 zYydkj(&T#1x->z${mYPG((rlX^L8mg0m;7Y>3}HTb?=`G=>Cj&Q2&i2HYD4N%~D45 z0y`}=6YtJS%G(~3?Q^{1?`MK_zv)Z8<8t3TWvn7!K_!Wz@pj|7>n`_dNDtOQK}CQj zK+;sJ%DL8?AZoD`S+`ykbK5SkUBhU5DQH37rNY|Vpu(lP)zyb0UA=Gg#)yeDj*Tcp zyocNF2vMh%lN4m%b9}Pv#U0YlbPa}z!w}8J_ z-gP(JRpXNEW&R>DsC@MgHEDm%c=pfQhTCO!L{L&JMo<_dRukyJnE0alLt^&U}T>klRJtMbr;>Mb`e?37-DGELZ@uxPch^r4))(^4;@E{4a znKH#-d*s{S5}W%x>c44a(tZ%T#ekfr2<6$ABU)~NRcJk~?qPgmy$WMV{E=Ny?+-;SB0%J;Y6~9+A ztdt8nrPSq_O-J=3r;Y|Klmp^auRASF;Y-T0leYWfRsjtx30QJaX244|`a$0cd6Wo` zzDFE!2RMSQHM&e7l1Z)`VYX8dCp>|SwPq*KT>(vGl4fPIpSy>C$cJ6GHs(G$h28UB zXH{;mTTWF%?GMiG@-EVmSJ>g!*?Gmj?nn-niW$X3Y0LofrP>7(ydS@kT z>Ze$PfkO5x@VPVHM=kCC0Ec(q+$wv2KYr?Q^^5R^8{t>nJ0HRW9cY{Gp&DTD$E9!X zynY6X^WCFt^b1)`d#SE4WMXV8(UwRzpY5AnR2Dqtp%=`d|1_8Lm zjoS7M=kb>`&IQE!h9fQ?=bZMTgMUZfJ})z~p=d)E3FEYvVI7pyk-liU$dy$bO_K-L zC@iJ~s8>mHjaOx;VOe)E@aCy!&JNs3Zotm&JsW*DA9N{Q?AsKczHf@iTzQj9hUp7t zgd5qPE*=uTy~ycr2C3(7IU8WM-WKa4Q*oZ8t$rGUv8PFl-QeUxG2SCP zQ?9Ye{!4d@)B;WysI?dxp67nY=7trdjny(9Lz9GY6;>lefd!f~S`d&E~+?w+VYhfZa8Thw)*{{pPDjjz1c1n{R(8el(F5HzGHHNLf znRRs(fBx=8Xmjj$dcRw+oB=FmLpk}EtV6gt9TR1P6JMFzp_VkQocg0*KlI;?dD#Vq zjgqpu;XQatoHK|qMR%uJppQP!XR2m2sm5zU!TU~O`*PNVOIwhuvR6)vmeWF8{KjJP zQM-NIgq5k`1w*{wa}O_|6w|DjMt6NeFB>i+Te3&o`{oA15Y&S0v3vPHH3d8+B_kMZ zBT2L)a}gmA?YV#wW`+$YVea(>`&f9YYy`d9;~og>XX>ZqZk)^!1yfmWW}kBmASgC*aXhys1^*t}rmt4f_4EQp_D}s*$b-t=xTy~^g z_jB0uz~_Zz8BY7N@2eJ^&vuDbSf?3Z(R+5$udq?tz4?~=R$SayhK0@~%lp5A+MK!Y zSgy&Sjm?V?`ah2I7l!uiPWKf~_vFVfmdkVKyc|Jp8FydHYOLuhMMJ(LG>rP69T*Fb z?qGK#kpX>+%EzaCH-eO%Q~)O#6L)mD^RS&V4B#11PhDaCG3Viw*V-g#Tg zpzaxI?a5nLW}I+4SJSvU*hXw_p(PuL0*NueN=Z>)JEQ2fZP|9)ezHv}V}xqeJ=E|4 zvwpwQU6;9pEiYw@Nmt*ynS*BELeF|=5C!_}p&ai-dBLTRqNcnrFe{Nz}Z@YHRQy*lcUDkL^4t`=rM6h_KkL|>VG+e4v zeZqa)0ZYku!SAvFG2vP}D`Ft-bU&p3>T>w?LjCh;Pijw`(!;*7cRaz`jc*jXW`E;CsiUpCD6!YOu^6U#6~8o$R@+pumZ4 zQQ+*pi$;>I`IJ9>RrDuzZ*K zgqecbWyf_x3Jx$Wf@`|V=F(7Zs|CLR8y}{^aaY1iV}8kY)BA(0;A1-Z;!8-0@wZn#8`zq#gc}2L^(~gl zh-V;~-7i+O$_Xw9@D+}3?k^h7tg&w$nSC`YIvV6TrrrG;EaLqmPUr%o%sLmLZb;7L zv)V_=v;@oTy_fo*v&w>6Ls#$M{6gJWiCCH?(EqF=c)bb&mCmL2#NsqU{9tz zNbxb(Vbq&cl!{-=lZGburuy#AZym~!j4FHDt`U{)WutzW9|*@rZ3HSqv{spx=0w-@kAp?#1-57(eNkBBK05OF9KXM401TW9FgY{x4sIukOWWJFP-`U9{! z_O=_%g8l1>mI&X84KV8l7&D9-%08NHievj~?XB*XQFGV*@=vRuvxQUVRtsvp( zOczVLmO7R-Ti!vJq%#s_Cakmm%onGAxZ-%y((}G{k_w+6c`t_miMCnVvYhLA^;2I6X;x`uZofs^QU(ZE@Nn-xftRtf z+0Mt-$n=>GNisL+SY)?-5|PUHW7~gYr!HlPuY+4-_kQVUKeiQ){&H8QqAkm`AW+x`?FZxj8yLLQk4or%Qiaux4PGF1f37e`l2vG zVPbecgV}V-Q^AI7slG};>uOo*IO>~9qLu;O${Otzz3UBhM)-^JMlWP1X5u5BEbj$( zL~tP_LZtZCFZEY{6c@jhteoJ@HC^$OPh(B0BdG@MGpCCSE!TfZV>(tM_D&*C3+}sh zms2);fFy?jTjj6FhtL^pFdg3+56Q?!A0KT35(qu{5Jua26%5EO7oX=J^TtI9Q$b%q*3RI^Fjd%lo-eVdwYy;0W;)O}g?U@RG4>_kg+~{b z7<8tVRe}$)R-aScB_{QfvHIl9Nj+ZY@Z^d}@ zt@%~@Cj|$=u}^vST2eI_+!q9ut~=SD7JMo0{xvn(RePv4htdqo%?b{P%)Dmw%#vJP z9d!<>N!>hBnsK5J%t2|pw>jLB&2_8|@`v`qjp`zoqrIzl z{MC#e&FFSZ^_kOde7$Klt=}!-a0dHOg6$V?;0eqoOty1ZQhBNO@-z~JxoZQrKT!SI zk#e$BG|~_x@lD6qB=<-&g$vcudAa3fY;~o}R~e;?>+9UFo|L`kER4^+UKoEyoNTH+ zHw-l2PI1gvMY~J}O3!{JepF*ygD9IJ$Jz09UJ}q)WZ_k$@roQ(`ZerVb5ST4Bd)A} zw_3vtEcGd(5!F6JQhWG*7=f>Wzf-v}5q;zB+E3{D{+7aP2yrh~qWRULNrhcPOtL0R zmk*K3WQyL+%8q82`AP%Na8&&ZL8V}dr9r={c?{D1c+*QSSc(r~Oyu+l0 z>udU)d^rWJ;g4V?h5>z_`6miXU|YS0q#{erT5mT@EL%J0(#hG|yIHZe28dby3!zDZ zb6;a0zd!iBW*FGV+#lKV?y=~?`+Ma_@7Tj-Dv+WfEBOyI)t^S8j(~$xUaxET2dwfZ zIO!GmHw~kp*jMRw;`EsX-=-q$pAF|+!8y!KyBlOHyc+F4Z8ZQl0xNU2erE9rd;pnZRVz@H$V{Ar~ z)VH2ixIUz*8N2Oi_>A8(A}p;6>~S9nQzyrx49%H>EPkM-!+RZc@jT{GJoo zOqPcSQ&>q2-5#biM`=DtCH%Hdp2j&w>l6lG+X0XN2z6m6=TwmWAmMhUS{Lz9GG|CA7dRMNQ zJ^I>n$QZRVqfBdIhm-Ym02eVBMsk9#u~N1&cdT~H`4aZj>I|4F}` zr>34nMv{W?Plb2Yn@^ue5iuEQKZ1bRoNi$kjEIJQQqChIU4d+jFU|6$W$07Pv3obJ zjzb?z73aa54vhkt3yzZIXV-A8gm<+gvnfT{(GSjKdYxyNlsA~I%TUFbp~53) z-hPlPdb@mWC=yHE5aMNhOHA#%haPJ83t@c`HYW1{8(&xhKVqv**Kg@IOLCF^Mq_ow zvqB?mc5HlClVtYv-U1)gQTXr`PfLLHN;mhX^VSN26ta|pQ^+Svm^jdSdF#CK$_Uh~ zy#iiDnHQ3UD zj(W`Q?XiWk0lUA~Q-PKIi=#hm$}Yvk+0gw_7m=Sr^~9#D>P>KlYzcu{oyO+tT+(;h zjf7~_OF`b5dmDJw2VcIS+Ns;H;~XuEz6OjQGuvtHyX3Zyun|u-FHK6&y+zkqgVe8O ze0BBG_GhVBq?;HHcI&tMgV&KZIUxprcPWGH5XJv|7LG$K6o8!pNC^Wfp}yBYS}Jo)dIC z1bP`gG}8ts_Vv5cENg*a56>~vMty%jsI;s`*N@CK9)j;HnLZKg#NX=`$(La&YZ2i7 zQk3|Sj-}7MP2e2$JnbP{liAF(@3Q*?7quJF8}a#fr#n60Czj?z@&{GI2VQMiH20WC zf2{E}i#^D)jTxVsYV?nKDk<9swd&alT@9XPn%4JfWQVa6ko^;;cx5Ez@v5hnV3>`I zjr(-^F*Ab!>3xFujn~(GFS8A|xpBgure3AFZHAwu zq!ASCW8eB%@ee|;$UX66M_X)pJvErJEpCg2?Mp~`nqTD5f?&(p#pD^nu`M#}7YxA& z6PM%MhFaGaym})EpIKhY2Rc?~Y6rtw2{6wzJ3)y&>$V278iRtTso2`V>`C`^-yt`6 z@22jj-2F>_<9U6)Pu}N?{7{&%^;(82btmyfZp@LMs!9jv`~4`VVX|52dEl{W|AyUG z*j;z3jk<=!oK7sb>(N7RU$abl6KqkQJB@QHuszBu{9gG zO>sXh{#8a2+(P~x`>&%~6G@uJys31;Aq`A6Zt5Nf=V14PnR?-q^P$zk=Z>Xq8uMBi zAc#+7S-J3l>8CZ-xdNCG+>t)UcFn-8t~RAtOn>qVF7hweLX985zG5897%jDMaf zJrmYgB6z2Jm3S=V#c(L%1e4E`_g4N4>Ba)mz%gT@OKm-g8+@B9Hz0g?P}f)bG+=v& zW8s#zQnM?UJJ3Px}%cfT#__ zx%*5VrV~B$ZiWN})WRDJY-veNA*2ewuOIRwy|hg_X5GpkxmDr)(sA94L9($o;CvS za!!{}Jmh9SIp42d-%?Vz7SY7GD%XASD#tCI5orf>7vd-DFs1ebWuhJJFu8efWlzM~ z-hI_tl302%%=$w(=+a!kCo(tAO0L$}?8fD2LduSr_J$Ur?yH(`uU#1_8lRctea6!h zvRW#e($!&IfF&R@WlDR`H!g3;V62)2g(rW^Rww|`0y6pe3TvnvbJnq!(*U#c49)w@fW8B zx@*)|8b92cAjc58-UsN)lzuRaowI4R>&0_mJa$=Gog=+)Zc?h^98>ix+@`{QD?)X# z!je**e&TtgEex|%_YInaIfsf#d@r!8svraQyLK$9ti)W-%jd`=GpbST+V369Hm-Os$?oY!h3#ywWm-B|^Ux&!#`5cBeav+yM&Rss zrkHbqByWw6txdU62GcibpJn|7;j%z?%362^7U&;IBZ}APV2)5<60egX`?$m z-fnXRhH!Y^vC`iNu|1%|5d7a&JLjjlnQSTv1}=zMV`VzR8ZN$ni+xgyO=#~qwB7$I z99q{toMA!guXIEwbq%Hda9dnITcGYeem{UeuqzY$buM+9$;@nd-PY1@A$c#ZYs9b8 z00v&ZOjo^SFr{Ru;r8`Ssk6|l*kbQ+4k(U;5&wNwkw zd-NeY6Sgc*Mkk=;%XW#B+!Y9#D4iu$&X!G1N(X1%ChnE7-FAjH>-Cn4*@L#xnCeT- zS|J;^9=Rh<9LIe}N`t}{5FMnqeN)z3)5{zaAZfmU?wE7CK{m}`l;@dN<^LOmx z%Zt@`;vzS`ZLHkpj4P>m=)%M5klr;Mb<$>SB*~a$C}dfl)q|P ziK9mbt6QNQKj0WUG$y#a$ob@b>@~{TAWYI3vBwgww@i3YL)`VA0Y$k<2?Xm9MBD3Pm1qNFbC7rzRutmDa$mph(It+`Qegj*9%<( zW(^ec=@IQpaQkiNn6wclu4CYb^&4lAu0~eEe`4qUKy2>$NQwDy%39xy@)aE$Z+nJp zQ3%d>jK?lv%|MxJGlZ`;ro*0)u(mm)3?E^oUk-AMj;jG#cvs{#FsYcy&AUiWL9s&0 zY5vns4W}#z=UEh#L~RrImX2IQ>*9)a7B>!*@So`MU!SvytOq5-QtL~jN3FvtHX(B& zhyw^Qci-Qv&3syFH90bWb0NK5MyX+8B%mFBq_}abHK?@8b|~GNzMw8%&lTLW@vG^U zlv~lA$MxbSp1UK}j}uQ1nJ7vyd%UDo!e6LTMwtauSRzjHWS7AP`;WXSGB#9^-Zw1p zbY4^$lFZMXhVLC@F_5y3sPIR@-wN0s23KD51P$cXmdU;Cn)-oEYJZVVZ`0$EhY5MX z+>qJ1rH(?maw1b?w1g5U?m=~NZ%A5 z`H`Gf4yiZGafQn`Z0PBbfMYh|E!HMQHI-ht)m1&)F0Rnjf_l1sYDL4nhA;WCA#m&+ zC>xn$-?H|f6bfII?rMK3>Q-(w(2+@M?J75-tJb@WxQuA-?`5uO?b^KuzF=JOC^OdX zR_76K?3U(qQ1``aI~4}tvK7Lm=rzX&WgPBqVrh^b6PeE0$JSsJj4*h#VI9~?>%NG( zq1wG-oNJ6F^s=?kc2c&PLG4X4sJ`oVf1P;Zw!XAFL~xU2eYKs@NsA|gmCs){N;``9 zB!OnBF4q7u!nk6on5L&mp2uB|#hXvltP6s4TU@`IKDIRgdLAF~8aXdd&2jCQ1^7&+ z;f&YBcCKni563Q}CMu9?zF?j`#qjRr5a*iq`z5*oL-YW41{oEU*yE6(fgX0Ab$=!G zYfKPf+c&>RA^2tXi_QF-g`zJ>xeI)35@T_;Ct)EKE~{#Le4f2L@LFuVo|-XVNhqJT zw6EZR7J?V17ttveJ^WDfN&cs=JMAVJ@ck}wQV{L}PV^wgqSzPC8JVIIpuSqb(Kw~< zRUXJYniX!lq#Rdxt;`iPe{+O<(X9%3*8j{s?vi-#?a&2n5Cnrz}E7~9i#NTcN>(r zHKko^;rnnckv;MOJGbs%>K%(?YHYu#+xXEBVcIcD{otk9cI7q0Jk9D8hu$b(OJKHdzcfBkWrB-gKs(Hx0Wv#&?)b^yOTs3?EZ zP$vicc8nYYYl$pdQ=z1=KUkB1xqwLSUqN@ye|Xn~ygGM-xnaGXLnlM_Fqg{8b2~$W zyFZ!5Lb8GVS}U_}tx@-^c(gNuCN;H^%=0JqHTF@n>O-pi4F^5BMMB5Ie!GF@X8qI* z|7vZ6OqO3~o0CaiUfp-o6q(l{pF#Ka#)*}vx`sShaBE+}d+uWO>i3Fr39X;N$K8i= zOM^>ms9`Vj+4Lr_@k>9cHOX==CR!f0txPMRF5`qo3924G=LV1N^eqSS&dXj-iUYd# zAuT+6bIPp-`XMD*7QrW|seAdBn$MUAi;m)2m(+8Fd~Il9EKRI}{7hX9BXa8e`1cSm zmc3eg)S*poS{Vfwz1um#HwStXdy&2qdfyyKdk_QGn782vbICK2KhrD&6Zl4N+2JCN zduf^l1Hk!W5cjjC@h+~DLqEB99(kq^k!H5>Q=>mJH~)B>blr&z@auPuzpC!!JOEls zdqy}4FX}Z9*~LaDe93ESKHwCA?h7_Ir@4>)-V|tfge0quXynY3WZtbU``+dsvd@gP zb$#G9!WBM?cvXg{k^?Kv=wLhavlnhz_gUjKjSHXTXe9A8Y3uT|MuAFyI+T`9=h7Bn z`;UI=X5{i8^6e*%z75p+B8=b&%noP2Dw^_GbgFO`jJud*i&gmLMV64_UJBTf_nvKe z-)S>vsKh+QEGA|(Wv@(E)QqFE{wxK4oH>7|4Oj@BMLs+8{LE=PjbHLeA-(tm?70$& zZl7(useO9&td(_J?~3 zGF^d%h!O7Ew2GS3UHd+DS!VoP8HFc#=U1dhcgEClfe#mKZ+V{ZES=e^oM_{+6t*Kx zb25sPckA=0U^Cd3sZOylaz8Ib8+S1JC2+RqWR-LB=aImWWEGi7VzZKnJh`HnnbUWt z!B^nG^>CgOroh+1QuZBfSL1cRoqPHR)?qJd6qUD(mc3<# zYlUmC%*-Tvr4rd(`x-avhRZF&y)O6KBi9P|+P*J+e!sus`@R3beXiFTkMlT>BNh+7 zUw(mcIt*MBy)DXC;ek2jjV?#4Af?h7z6>a1_YQL?{i9ahqs4w!+={$KXd;_tBIt`3 z0wEG18EIc#`?>Wp0tDAJ9e6LTANSa@Dj_XWEP;*G)^vSz%8ybC@OX$B^F= zwWNmItO=nzoyd(_`8=dPw~Dj4NQAX47$G?AL+`&+@N|u^rCJQy5qhoh=1Vt|xREL! zVM5LBu19H4#5#;_+WRBhM4BISpV2=1Tx+$$!4x-rVWMk~d{wW3HRr!0GoZMp8pRrVj z9P!6k=@Lm$eSG)wxVSngwi)}+K=FP{cMFSB#hf*9v< zAuwaN2-0PZFy08uEZ-+(-JNEKVE*qZd|tNvAz|)6TQ`AbleM63sa>w!foxNa)vlXg zCI>BwEe;oRW-*qivAeY-C|jFdO{0(7_JXDc3fjI`uz$BW9xMeI>lZ>Vj>M@83f~!p zyjzbpv(>m{uq0mHXTKj3*hRI{8G*SzyP(HxdTA4Ze%N#^ud36DDP!}wav1vDJ&8X< zQKL$jM9bFqG=E4e^o{P+Mni3IUA?7W%5LV9}djAQ84y%4%- z+V1C%qtacjeII2aS?E-ZcY#%mV#4_JrmV{HY>259dYy|I29EVJb{1zTs z&7OIV4j1vFfOhAKGh|lNG6mHp!tt5vmcQtf{kIDTJq|t8lm<1y+3G|+60Z{x~6fIk`|FOCpUpkKw zVUf(AW@M8_DIMsqV$B?{bI_~}*i{I71!Q}e%UKaywe4RNf*$^ZJsc=a#yN)zY2Hi! zC9~{l$;PROrY}Wp2+U3#Y%M;Bi@-i6N`g%Nu##3U8rmtI8?#CBek2(xP?WA8-h>7l zMR?DI2SK>%c(#=@A!{jqLX!8Fg||K(AGNKtQ9|Qn&Pyp#8qYgWLUzM*ivBw!$Z`SmXThgGmBPuhnoB4kE;<5GZS+UWGGl zy7D*i=6JFTUoHfbFffV`<3TtNnCDF2GVH8=m-nhv&~}-rky4rQB!XG5yDh(0cim7Y zYx}lgnvXr}#+~8{pVof6Pw!+Z#J;)2OtyhCsjy&2sC-$|V7TEcu>zd{dI9$T5P!dt zEbO}W_6dj<8YnxCn$)ad>y-J|?N<+F4JAJr%g>ap^J@X6f6WRQAtXNZoR*hQ%pe_a zHAdZhqi1GvWkFyFLli1{OYUya=BvFBp!Lup!uc|*@;AEX_{D#fn6SC8&K;A5B3hU0 zI3k@Yr&i*qMnSv z*uS%n)+#^V)S)2!5Kug18hR3KWa%UdvoU?NrtIX;tj<5)9@j^8qD2}Fn#=N=a*cLf zSD9V+4Zj;0S4((Qs=E(+0zEue+Lfog@6;5N{79HCFuuEPaKb~f;t!?_3wJXL*Twe` zRs?tbi3)*~Tb_L5`I$n5vQP^$y z5kn1sG>(n|-!)viHhL$oPuW@6!{ZRyUl{Hd@L@?VeB^JA&S_G@5AJ{E79;YSKRdXe z>~@w(%?VG9*%xmZt`n}|i1ySeR<@6FQI zvj}?P6+#qiss+9}OTD3c^%dzQ(NR!Ccaf|8;rN2#&}ovx^SYlI2jj?%t5WT;WvnAc zje#(pg;z>RlOZ(|FD-(zxHmNd?J5JyT!QrfJgqh7Po=n0FZlfN%v+$!j^S9Ur_DI3 zi80ecBDWg2`Z^1dgGQ=j4sw1 z{A9XOJ{VaZYPEbn@%-~v=S?qf=7dN21)OcgVrJqB9Q6a!StlFh9#qHNxF$PD$A}7H z)ovQQrB(XE{?zC!Ws?)F8@<`LwAFf(@mnX)5AoKU4Kr`2mAzMboF#MKVl1->w_Cfm z)&=>`l(N>C>V~nF&JvDeJDG?GC_|ClMq%;ME@|&FLQVg2eX7fI5te}q7)&)s#`x%6 z22H{y$6p!NTFlC9lSy1nz4W$6B(sD6oQmGxF4?tu$v!8eMujXz8yYFYM#1Ph)uB&M zS>}^9|K8%)gAVHwhod{@O&3PLnx4sP)iD>gr`e!=v$guj6(4tp3cM>BUq|BYobc%Z z;baFb6E4Kf?vCC2GsafJHgBt}%}W>RLW65(Kz5B|8hWZJa~qf4wN;%z1l9&rp%>%g z7MmkScko=9X9y>GJ|5i;>Kbyof)f(dL|P11osP_>*cw{K ze>HCit++ST)bNABy^?B0$+be`x;0bE4H46jh^xgGHD4 zsekbwD5(C?vuRzd&PcZ zeb^PoR*mg9w^juTbF9%^gd+a=%c;;1r))9_!55D&Xe$n|h_B61n?F@2XD94-M5%4v zk7NE|8(G^J(36N$cC=c_+Aq;daRl$6ZlZ{-bTn~K7ot$DTc_*mF_f?555m=N=$}b; z7W(4fFPM)9=7D$UQS503Pvw!B(PN^8KSdcJn9w`W8!O#a_*jg|c9=8{HtlolzTZK0 z5?#AEGCbHZy4m||?({YohmSeznHk8RpM2iV-iV+{ZK zO}{!%tgh#Z9Z^=3wD9$!5PvJo>7fI|KO;J=+^=r)g(eP7qLri2ZtTkUM+Ngcb@7-G z>Y3hArxXHR6;7`;_+RGVl=L^|t?|w-3rfPBJ|;;je#Uzb<*7|8jyzqGL+s^*(?v?1 zpMens@oaotDzEy>*(P^#^TzXs^02{=%x>&&0`O$+Bs)kjDfIS8CoevQUV?i% zL)ncr7Hyhdmm56&!dXP}+TAHzyzop#9wFE~ogrD&(6NYfJ_pa^|Kfqeo!b}TY2oa{ z0poQ#QfYQaidpF%UBLb>Jv1s??Exo`1s{Fzn^(ojxVug@Q4E}hDlfBn4%)74aC3si zt=CN{SCyMz!j8iXoMd4%EqtC;sxuy^Gc~H>Uzob35h^O7t2qbr!(4$dAF8wmZP)+LB{`CrjfFJ6{mKx3Y zeZ3m1K85QITV)pMvUwR64=3kN(36{)zMxG=*UbIJR7#&l_4EPy?)`R#6NPQM*+)y( zBl}HIuv|DVG<0nRL^@jQbRKZ79+6G7suI!YRqqt8GUd_7PA!_tf9vgbfb`))GP61# z?xF}|<`pIn{?SJS?m6}hee}Zm+yJyAB;!lF-H#r244k>hy+6J?i$|R3W}nw?aiUaD zy=bTsUY2d}fOX$ER&P*_fL_0=;;N_i5Q=P6R#)V4fLZxFZ=YSm;f>ua!BA$46MABT zAmOJwdc?i9ZA#ib-cKvsx zi1CsDaHm|a&+jTzhs1wQ$7P?XJc`#(3=-Qs#91@9u^jNmHA?!hcwoz}J$4RpC$nKC z=!4$!n2#jg_qvHrhlD}QDWW!663*TOUe=B*^Xf`Xn6#sYN;5z7pA8M3-2w5Pe8<+t z9Ll6`mkIKV)kS3*-GR|TLraQh-cLRtb?BT2%VILn0k2Ef*10b@Zn^qpza>6MNf(qH zO7!KOx9Pv}+Q>^u4Fw!z+nJz6_61}(KwVn6^X5YwcJQ}z(VNq&gdmjnV87s3_g4pw zot*5jwmBV>PE$SOR1UaO2H{sD)Vt|q8>TXHL z0LO{fvD_@%Q59S6<`A=m8PAWd&imgX*|T?2nC1V-wx?w#q|e4ONh^K(v8=9}(-bH6 z1px88gHq)}@kP-jyVs=~iB4|IZ_; zu6Ca6js#5O=-?9E%V@MZ?7l?TmP-3M7tV$YT>>RG4tO|D-f+~#m^$vypJIt8n;8We{um4P)vupmD0v0#(Eh42GK=0GrN=Wc3NP@*sg8o~g}vorYSnoy(F$e$`;q6GSk&v~ zk?Bt2>vq4RIEtZmG`WR0!|#`@xXv*CmbMC9Hb8#)NyGqjerzHPz0}pt=Yf z4<*WG$IRTM$52oS@1SBQK?pLmiwmtTk6hTN9}fQy9a3b$Vs;_cGj@xEAl9j15;h=J z#cS0Uz_#$H%5y=DDU0A4MO26|;gY`&`Pr*ZX>=Ztg`ot$m8dx**E zGSRPS9dIsDe)ddpR%otTS@!wmaFMs4j^BRTXzcF`ViFfGo>$SZ=;C%`@Bh6yGtN+O zI&8~Ji7jYSBZV1ScC}@kE_R9MY4kC^FbeT~QT3gxAEd}2y23N^*iv4PEG*=ORkKbL zz^7n#$Wz`ZRION-uJ8Njz(%Df*P>1--402cMoC5XM3*yk-)?3>y$$Hf3UcdQ){9X* zhfM{hqU~RR=ElMwS&%lvU=B5ScmWMx>A><|J2xPY#Ys^c57A8Q0bs-?{Y3Eh&F|pg z4Fy*dwI=1%jVYFX#Kasg5_!*AVRMitCcL7#z|>cxpMw4TrycdPfyV!&s_!3}C4jdT z$psV(OIlnwBV^1*oYo2t)pCdbqwnchu4eueOIx3})~4SiGca}-i#nLR~j3O4AlW8to&$Hf=N5ejM!Jwk5K7TST@as zTI(9s4j0NzT7iza={z6EN+j2}9+>K9Mt&VWCLw6UYqXv-oUmTmY&*sr@Om4{LWA)wj6|-Cu zd-Ai89zR)Rz5|~T0EZ**Y+FE( z^ZoUL8UfiBEL%YRHOy2h=e{xtdv& z6D8ZLrx!XGO@$^QvtNw0MBPv_7SFx)ur`%anMOS6lUcz8K;;g8td{)7_u73}48EhI z4S4QiO|T|^#P5k5uPYvX*k|pHWAK8i8zO2~A8ll+4KfL@|0ukBD$yg^>XVdwxxR-6 z?X1m9hYfCDwlXgcp1ICvZ_!k47xrJkl$xKh8zKQr{=cvF`JYP-^;Ih>0K@Q}M+dq) z4D-P_uo4-%B=A!vm@a^lupIFyMf#~c1AhrXU;!uUOY%QaT{Lcntw%_55Goh9UQSNw zu_?l8nlH58NHACZ%@5eS%G)3gEu{qsPjokLlMkqai8$VUGIP*!CGTW)mf;gJn8oor zPg9#3(%EYvByO&ajh}L)Qul_1Iy7!c4)5GT1_CcNJPFQ}BFA(8#pnnHpNjC9tNQ@F zFK1JA`yItrITE7@i$=eb^{(lI{zn(l^d8-3omtoG2lUX%@9Jy(H`dflq<-%QTBD1j zFi&KTBigLqy4hq=3w-vzEY;pQ_|xk2>1X%F8(4Tc}MxQMJ7k@|$)N3)@Ho(Nf3 z$yr`n=#vNtjMSm$(QL{4)y<};BLGk*(-VF*#!#6<5tTb;aE(SN>Ck%|OaCKI= zU;Qy5MGv5ayvo|UvbAqL85*SsH->bbxg>cthX3a5bBO>kfa%Hg0`jk=pxy2CyZR^v z=CtSkyNI&YnLkPA;zjhU3biDl@WG5O*Sy!HA4*w3zAPIS-AvkXii$E#Uw=MP{eBYX zi=S6O;h%+7J;FThaUPT8LvhngfR*sm!3O3fNg)HW{}E@HruBQ?yAV6-`VKeW>)>oX z*z*b^4GdrT;H#2Jaodn5C;lWqqT#1>)YD~~#IK6I6zKHQBKwA%;4WabPx{TREY){K zhn&xN57Y%Dm=@kDgR7I7Kjo$jBZEFX9X`Y5`VKQrZXZYUljl~Kf^Nqi3fR3C|M^d@ z4d{78oMgzB@rEm-hd8%ZI+L=fq1}h?w{uVDZ6=nt53cl^%`TLy1vAA14{)|k z$u@wH$o7NeHXVRd4B2KPgJN_ic<#zgzDLTAda1w4gI}Y&*p+R=J$9!E{cb{SvBX^x z{q{=qJEcmI`FE{NKrl;{#QmX*A>aNGNzy(OusuXB>?p{0E97quBwaY?!xoP!KJhzC zPKs^hf_EMbkQ}uA$awaw+S&_=)cqhenl)ONm=y~QUh4cF8gJjuK;&-8AeRK32!qf4 zuXcTI3kf6((n$UKs^#IS(XntGuhH{U-ySwKEl~Y^wNvuQhDi0(RIh4Zd_Z{pNHkSC~!?O!+ z%EYdA);jCpUr;9RWHqHHSp^&%)LzdFT79PmQ;FcQb@tjEGoSl7?s{EN3_F|PzeuX_ zDV&;wJkV+M`of%dru|hj=Wzcf&%tO!-lb*-{9+*Ewq#OpL9-Sd73(oE4JIM7UOjy$ z(HQTe?IjV-!6Ncv1>W4Ipi zk(`VwKjkE(ll=ut?`LL7)rB}&-dFTi^{?D-g`~R#uH7ASE-c1;(I0U3LDRN9aA1Am z{}O=FFnQiTHcQEz0D`+mGmkfBCy^wCLV=eSTr>~_ygj5qChd2G;(lOqhJ_A~7^0oM zyh&}rR=nm}M`&CSAf^^)J=yRF7ojel-qe$bu5?aNa-1H+&p)xmOPm_?0&PO(5#)W@A&3g zHjaRhtKxN58ago%+70>>dw!caHLc)-*m&SB+LSh^bMycR1@pvd2A9>ZpX3r75Ec>!@0Fg38AiTYeL?+!)0@+8ZTvw}Lw= z9q{ibU$kyzH#cS%C^s9sLLSW?;D7jM;RZ$+H@*j^?*8I+BE5vT9!x5N5B6tIDRBIK z%5M-Q8wO#&*UW4tH6xNMK{tKc8lEooKI=ea&prkV7B(Iz4~vYLJwesC`?fo`Z#=qU z7xea*NXUC(;4>~cbT$I~o}R!3twB!Ilu@kJIGi?|4w|S|EDal9`!j_d0pv01Tb`dQ zF!Fr(C||aGo!_e?$lBi<@7(}>$zwq^aYyc47?INBdMD5jf|qwN=QU3c_hI|}A^h2Hids+#xo6B@6GYt?QLR;W@$#)W6D z6I;xnx_m%(VBwk^!*_@N8g^cHDxYB5j?Ze31!rv#&Pt9`#<-JbDt8v55}VaiXXcP8 zDTh>(jjOXB&J$vxRwXskttk%bJo?p=!_C7Ufom$6St+cV-H&21E0Lac?R)62<(%^|d4sZ7UnBdt5dd&C}>iK#ldsJ5{B-!^P zAjIkk2|xJnj5ZuJ)LAsM(u!2iX_BDOrj>hR?TV|cM;W-&cun7LcTI7u34Y*DTTr-w z@OJ{X}&6_l0fTHByHt90tF==vDj?p3??1s{N+8G8e5NorFwXz{>XBcLLaH6h;$ z*LE-2K{kZ#t&HBXp?6f<18^^tGjU4oPFyja)l*(q0hf$HjbbA8D8mnqCi zT*b3!q14WpH{X6_UKAv)&Whk!kucem_f^>t@n^C7I`ohHCSHeJZohJJMJ9bS`&1pD zZWOwcQkg1iO=0Z10!y3xPx279_ilorrWJ0%-7hyMv&(DZ*Ddg6rbC-eigRBKCXYjOEdjzh2K^bs)p1oO3kiEtGR_9|iC~ zIuJF;_xLzG?Z3>aABAzswhB;7UT^ze67c-WiN~U5Vt-lqowD`ZCU4eKpUQ za#%sw<%N#T*tFGYcRm|&SlzJQCtBKkq%B=Cxm#tl#9N};TzLH9P~5h_ohgCYvtQh3 zv?QFCCV14wC1llvU~bFullpz+H+8ALrnfb>=W>-fG!!FzW-vLWKi&z<-qp{#8~A!q zYT7H;M(JlGE6czIPgC6_0+3mzJ`0a_B>*b9)Wz%L9{Co<_{r@*+SOMxyIUFA7Ce2v zLuPOM_E9E5WeyFNRABGRrx*z4nckq zs0fY-lZbFi-b*QV%U;!}_lluiU%Kc*=%>wbr}ctpbK;|^&RtuM;{u8BRarlw>Yz|J zcJQ}CnTn=v0=$RcDZ8#(f7)Pm^lZ!YxM3yY6dsAAskAU9#wJ!&Wf~W*rKD|;h#5IV zpeYDwaa93~Y1N$YwsHIEW%OIj$~cU~keIbFkZrK^#>EBb6wCYW<>&BIFwjw->#pqMU6pW zwZ~^EJ=qIJvmqOQvV4AeFmYW^ply1Ri%f}XoUe}YDYXh70^W!St@h^;1K!lPd_wd! zp~NZevfdGj3b&AI%IF78x>(z9GD#5ELG!OwLzjBV_)?NENbr^|k0k*7q1|#P(d#A&jl)IO_9@J^ z{I@Fk0zaR<;OXkb!ud9cGn+Pi`iKGHX35qpJ<@jWu%Z9O`(e1yl++KLH3B+)7et@R z7Cjr_uCUr#Xd#m_uXwclfjUKV*4wXNi{0i0DjC;9D%krLxnt1TFwsPALpj3SZ$J5} zm0T{g%`vNukI=L^N28%qz&peu-8YsHUJ)N2Q^xABp87Ufd!9nZWaUEt`woD0#?vnd zZqqvfLI+Z-J@8QF<_>2R&T>vRjFt%59b?RyA60BQ3B_nUp1drXFRk*(JKi0$^B#dD zZGt|${no2#nt0#_57#Q|pRh$E1sk0HQ1?Ex)lk3VT)`L8KmkM$Gy91&NudzVPxGr2I86`iM zjSgGzgUZpbjF?;v3VLZCgkRHw$x6XPpX?$keD^EAtov7L@LZ-rR$BUyBq_@C zn{$Xle?*`y@KUNSMi}(3-6{UFN2#eacpOplk|+*X><(p;s@L^b>enG?W(#$#*ROwe zcVd{$?IqkXvX8McDPAAw>v+orHzG5-+wP4U4elIV-Bya`S2>c4It)ZNF-r%I#rf2> zwcSHh*={60c$W8LS*AI;tYg^8X+>7!$s$L*>h8g~`-2KP#;(i1z^fn+7sn|f4ch<{ z&qeRQ$nU|`6rPBGngsyyHoh_gAUD8=;OAu1jCbE)u8c4+DmcbU-7}xFxnk!Br|%Ap z+IT0&ujF{$$Hz2SxYBvbl7^75`%7%%JIf*S>S#_VqTr=-gwU+wtfZ@C;hI9s;5E;R_~9B`9hq4WZ4Vo*Okn&)&%QOJZcoZnBQP$Q|h5K3?cLjsp}z^J4k} z<6W<fbrqX*zE0UZuk)LL4})GGxa5hkmERljX7Yb~AH@_x*LJAo5v|>wE>U7c zNt_vR+Kv-w0aKDg7Spei6}=3J_q0z_YA-^Z@ZwOm1!mh&Q)lW=O+)$#AM$H|z2&oe zSqh_NRjXnQ=_k-5QJV5HoKm*a@MHg-u*e@M7R<}bbsI6vFWp%@6CZ@}s824Z&{|UbfqChM(Rkhn~5{jx+q3$H>B!uC5^i9UzTSL|1EN8nas;KNCDV` zXf-^yE1Klca_$+q=ALr1WHh8`cjMr z3kjV@8KdO`MU{UaJu;e{Kd2MdZ8rS-)1wDzIozig>>B~VRb*Ry?rwAL>W0!eeday^ z%R6#D0UB;Nl%{j$57eCMZp%2Ab6D(8K3>bZREXnt@MUGq)yLKa2E(@AEbPm3%kZZy zS3)!^{4#QK0z(?&AL=amrIo3rHmkHZ?%?J+L=sdRPOem~a^tLudyJ6A-X3f#g87N? zsyFNM70MN$y8lm02UxI_cgi)2D%7Y_Xn%w8zWgR_iPYG`*6fXb7F*iye$`YyKC^Lu zy4$W;CTM}h!R-DbPS?STNdoS}edJyv**Twwc{^DrtoDELPUIvbMn_&wKvInHZkL%T z5fNP__~#%wxt6SOTBhmLjcF>S8jjR-828R4J8`(elw6kghFSaYuN0@Ae52(x-@8uN zSKe1eeP-2Mm&tm15q?ktLfn}4(T}o*OLc^Mkw$kNVtiD}DjSww=b?B@%Y*-ZRAii` z%|N&2)N1Pa=m*K`U4PIQ?DKM*) z7h9YYi2BgV6m;`#IMx}e3%v+dnOsoNX8gDnIsGgPf58dHE+G{fL{^gfX zxuPck42y6G0*cE&mK6XPno-+rR-)T+rT#q)H3Zc3K2(?JiTpTzW#Lg?oatwfFhpR} z3EO`hXiL83tQGH$vi}vr`_frPgI`l>_n<}7O%58Mkyw=1_emx3Q;ZCCqg<+=Q-Pn8 zEZt1X*b5kIv{{12CUy`IL#k@FMOmY}_^Zch>o8d@EDKc^a>O0~TS$|4tzy>DJEL@Z z4w1cYd5OF^jv*7G+JXFvT)AY+doX>rVM_m;Y{dY~pSzpo?k2f!mE(KI7@TBGxAdUmg zC8Etzau7})<_?oG;VOl50TT}k+z*T>yEwosEu+mDK6EWgxJxUwas077O0j`e7qQ1b z_3Ek6D|g@S)O_!x4WU#{lBGn>Tdoq#xxrAkUiM9tSQWZI{VrS7hN$IqK|qV&%bT-u zNPtFK3)CcemsfV8%PUB`oI3FZeWvk~X$3tJzLEw+IxNMq@%9-tuDi0&o!@R#taJYx z2C2P#*z0F(ji@=JuME-yPiWFj`P8ZgxR_nY6a@i!kArvs&%q>|DZ+x*Eh~)gzz=Ca z>2oh1RB^N)E9wxAMY7I3U)dCiUq zM+PIovzu@~Klt*2m=n#Ya|!DPk+x~Tz7g=8Nl)Lp zH`3Pq~H(__BBijbqGgsdps>cLVU>LujCEg5i`W`m@5g zn)&G~k}!i0)Z-2})?d|h`p#Hg9KxTAL#Wt+`~4fBB(co_XPH%YB3%`0B+UUF#IJYt z9ED#Gj;wvKNQEQ`RH{*W+R%#+^0w2dYEhZ-d-9oA+HO*hgkBd}2t>})774~7<~ru; zo>D{jDjO4;GXjdu#lgTH1=lqJx7JiIio;Q=lZ8=$X&5P(D3}n%L{TpqFSo6T-+E!t zc}~-cvSr5aDlOi8KpPW;(b(jX+E*5*E1R?VlpbV^E0(6t3Q9js?qAG>QDgutP#T=| z%``)I(AOIB^GumoqVo1q0El`z7GrJ84)w_P^#zT#fj4F;?CFMN?~EDy(slR9uMXJowU@2T$Cgc8ZITreGo?ks-V`FMENU5_f#2k`}^! zEF@b(Xh&+gv-oN4?E#zepo#fn$Gqk1M5r4h#wPGMvDJnLdq3g&tK+E6O6 zA>gMMFI#mCh|FL7>WRS90R9%RX8&(A9B@u7Y5zA)!`ta1Ps=SNWKI^UUQE!|d1&yU JTGRH;{{a>RamfGx literal 0 HcmV?d00001