From 9f39da8859503425d275f33a298185ae279c4e0f Mon Sep 17 00:00:00 2001 From: WuHaobo Date: Thu, 9 Apr 2020 02:16:30 +0800 Subject: [PATCH] Init PaddleClas --- .gitignore | 8 + .pre-commit-config.yaml | 27 + configs/AlexNet/AlexNet.yaml | 75 +++ configs/DPN/DPN107.yaml | 75 +++ configs/DPN/DPN131.yaml | 75 +++ configs/DPN/DPN68.yaml | 75 +++ configs/DPN/DPN92.yaml | 75 +++ configs/DPN/DPN98.yaml | 75 +++ configs/DarkNet/DarkNet53.yaml | 71 ++ configs/DenseNet/DenseNet121.yaml | 74 +++ configs/DenseNet/DenseNet161.yaml | 74 +++ configs/DenseNet/DenseNet169.yaml | 74 +++ configs/DenseNet/DenseNet201.yaml | 74 +++ configs/DenseNet/DenseNet264.yaml | 74 +++ configs/HRNet/HRNet_W18_C.yaml | 74 +++ configs/HRNet/HRNet_W30_C.yaml | 74 +++ configs/HRNet/HRNet_W32_C.yaml | 74 +++ configs/HRNet/HRNet_W40_C.yaml | 74 +++ configs/HRNet/HRNet_W44_C.yaml | 74 +++ configs/HRNet/HRNet_W48_C.yaml | 74 +++ configs/HRNet/HRNet_W64_C.yaml | 74 +++ configs/Inception/GoogLeNet.yaml | 69 ++ configs/Inception/InceptionV4.yaml | 77 +++ configs/MobileNetV1/MobileNetV1.yaml | 75 +++ configs/MobileNetV1/MobileNetV1_x0_25.yaml | 75 +++ configs/MobileNetV1/MobileNetV1_x0_5.yaml | 75 +++ configs/MobileNetV1/MobileNetV1_x0_75.yaml | 75 +++ configs/MobileNetV2/MobileNetV2.yaml | 74 +++ configs/MobileNetV2/MobileNetV2_x0_25.yaml | 75 +++ configs/MobileNetV2/MobileNetV2_x0_5.yaml | 75 +++ configs/MobileNetV2/MobileNetV2_x0_75.yaml | 74 +++ configs/MobileNetV2/MobileNetV2_x1_5.yaml | 74 +++ configs/MobileNetV2/MobileNetV2_x2_0.yaml | 74 +++ .../MobileNetV3/MobileNetV3_large_x0_35.yaml | 75 +++ .../MobileNetV3/MobileNetV3_large_x0_5.yaml | 75 +++ .../MobileNetV3/MobileNetV3_large_x0_75.yaml | 75 +++ .../MobileNetV3/MobileNetV3_large_x1_0.yaml | 76 +++ .../MobileNetV3/MobileNetV3_large_x1_25.yaml | 75 +++ .../MobileNetV3/MobileNetV3_small_x0_35.yaml | 74 +++ .../MobileNetV3/MobileNetV3_small_x0_5.yaml | 75 +++ .../MobileNetV3/MobileNetV3_small_x0_75.yaml | 75 +++ .../MobileNetV3/MobileNetV3_small_x1_0.yaml | 75 +++ .../MobileNetV3/MobileNetV3_small_x1_25.yaml | 75 +++ configs/Res2Net/Res2Net101_vd_26w_4s.yaml | 75 +++ configs/Res2Net/Res2Net200_vd_26w_4s.yaml | 75 +++ configs/Res2Net/Res2Net50_14w_8s.yaml | 75 +++ configs/Res2Net/Res2Net50_26w_4s.yaml | 75 +++ configs/Res2Net/Res2Net50_vd_26w_4s.yaml | 75 +++ configs/ResNeXt/ResNeXt101_32x4d.yaml | 74 +++ configs/ResNeXt/ResNeXt101_64x4d.yaml | 74 +++ configs/ResNeXt/ResNeXt101_vd_32x4d.yaml | 75 +++ configs/ResNeXt/ResNeXt101_vd_64x4d.yaml | 75 +++ configs/ResNeXt/ResNeXt152_32x4d.yaml | 74 +++ configs/ResNeXt/ResNeXt152_64x4d.yaml | 74 +++ configs/ResNeXt/ResNeXt152_vd_32x4d.yaml | 75 +++ configs/ResNeXt/ResNeXt152_vd_64x4d.yaml | 75 +++ configs/ResNeXt/ResNeXt50_32x4d.yaml | 74 +++ configs/ResNeXt/ResNeXt50_64x4d.yaml | 75 +++ configs/ResNeXt/ResNeXt50_vd_32x4d.yaml | 80 +++ configs/ResNeXt/ResNeXt50_vd_64x4d.yaml | 75 +++ configs/ResNet/ResNet101.yaml | 74 +++ configs/ResNet/ResNet101_vd.yaml | 75 +++ configs/ResNet/ResNet152.yaml | 74 +++ configs/ResNet/ResNet152_vd.yaml | 75 +++ configs/ResNet/ResNet18.yaml | 72 ++ configs/ResNet/ResNet18_vd.yaml | 75 +++ configs/ResNet/ResNet200_vd.yaml | 75 +++ configs/ResNet/ResNet34.yaml | 72 ++ configs/ResNet/ResNet34_vd.yaml | 75 +++ configs/ResNet/ResNet50.yaml | 74 +++ configs/ResNet/ResNet50_vc.yaml | 72 ++ configs/ResNet/ResNet50_vd.yaml | 75 +++ configs/ResNet_ACNet/ResNet_ACNet.yaml | 75 +++ configs/SENet/SENet154_vd.yaml | 75 +++ configs/SENet/SE_ResNeXt101_32x4d.yaml | 72 ++ configs/SENet/SE_ResNeXt50_32x4d.yaml | 72 ++ configs/SENet/SE_ResNeXt50_vd_32x4d.yaml | 75 +++ configs/SENet/SE_ResNet18_vd.yaml | 75 +++ configs/SENet/SE_ResNet34_vd.yaml | 75 +++ configs/SENet/SE_ResNet50_vd.yaml | 75 +++ configs/ShuffleNet/ShuffleNetV2.yaml | 74 +++ configs/ShuffleNet/ShuffleNetV2_swish.yaml | 74 +++ configs/ShuffleNet/ShuffleNetV2_x0_25.yaml | 76 +++ configs/ShuffleNet/ShuffleNetV2_x0_33.yaml | 76 +++ configs/ShuffleNet/ShuffleNetV2_x0_5.yaml | 76 +++ configs/ShuffleNet/ShuffleNetV2_x1_5.yaml | 75 +++ configs/ShuffleNet/ShuffleNetV2_x2_0.yaml | 74 +++ configs/SqueezeNet/SqueezeNet1_0.yaml | 71 ++ configs/SqueezeNet/SqueezeNet1_1.yaml | 69 ++ configs/VGG/VGG11.yaml | 69 ++ configs/VGG/VGG13.yaml | 73 +++ configs/VGG/VGG16.yaml | 73 +++ configs/VGG/VGG19.yaml | 49 ++ configs/eval.yaml | 31 + ppcls/__init__.py | 20 + ppcls/data/__init__.py | 15 + ppcls/data/imaug/__init__.py | 94 +++ ppcls/data/imaug/autoaugment.py | 264 ++++++++ ppcls/data/imaug/batch_operators.py | 115 ++++ ppcls/data/imaug/cutout.py | 39 ++ ppcls/data/imaug/fmix.py | 217 ++++++ ppcls/data/imaug/grid.py | 87 +++ ppcls/data/imaug/hide_and_seek.py | 42 ++ ppcls/data/imaug/operators.py | 210 ++++++ ppcls/data/imaug/randaugment.py | 87 +++ ppcls/data/imaug/random_erasing.py | 53 ++ ppcls/data/reader.py | 275 ++++++++ ppcls/modeling/__init__.py | 20 + ppcls/modeling/architectures/__init__.py | 44 ++ ppcls/modeling/architectures/alexnet.py | 172 +++++ ppcls/modeling/architectures/darknet.py | 120 ++++ ppcls/modeling/architectures/darts_gs.py | 543 +++++++++++++++ ppcls/modeling/architectures/densenet.py | 204 ++++++ ppcls/modeling/architectures/dpn.py | 337 ++++++++++ ppcls/modeling/architectures/efficientnet.py | 616 ++++++++++++++++++ ppcls/modeling/architectures/googlenet.py | 237 +++++++ ppcls/modeling/architectures/hrnet.py | 459 +++++++++++++ ppcls/modeling/architectures/inception_v4.py | 354 ++++++++++ ppcls/modeling/architectures/layers.py | 250 +++++++ ppcls/modeling/architectures/mobilenet_v1.py | 218 +++++++ ppcls/modeling/architectures/mobilenet_v2.py | 230 +++++++ ppcls/modeling/architectures/mobilenet_v3.py | 310 +++++++++ ppcls/modeling/architectures/model_libs.py | 143 ++++ ppcls/modeling/architectures/res2net.py | 225 +++++++ ppcls/modeling/architectures/res2net_vd.py | 294 +++++++++ ppcls/modeling/architectures/resnet.py | 240 +++++++ ppcls/modeling/architectures/resnet_acnet.py | 332 ++++++++++ ppcls/modeling/architectures/resnet_vc.py | 194 ++++++ ppcls/modeling/architectures/resnet_vd.py | 293 +++++++++ ppcls/modeling/architectures/resnext.py | 195 ++++++ .../modeling/architectures/resnext101_wsl.py | 182 ++++++ ppcls/modeling/architectures/resnext_vd.py | 257 ++++++++ ppcls/modeling/architectures/se_resnet_vd.py | 336 ++++++++++ ppcls/modeling/architectures/se_resnext.py | 253 +++++++ ppcls/modeling/architectures/se_resnext_vd.py | 329 ++++++++++ ppcls/modeling/architectures/shufflenet_v2.py | 307 +++++++++ .../architectures/shufflenet_v2_swish.py | 293 +++++++++ ppcls/modeling/architectures/squeezenet.py | 133 ++++ ppcls/modeling/architectures/vgg.py | 108 +++ ppcls/modeling/architectures/xception.py | 281 ++++++++ .../architectures/xception_deeplab.py | 320 +++++++++ ppcls/modeling/loss.py | 99 +++ ppcls/modeling/utils.py | 43 ++ ppcls/optimizer/__init__.py | 19 + ppcls/optimizer/learning_rate.py | 169 +++++ ppcls/optimizer/optimizer.py | 53 ++ ppcls/test/demo.jpeg | Bin 0 -> 298976 bytes ppcls/test/test_download.py | 39 ++ ppcls/test/test_imaug.py | 271 ++++++++ ppcls/test/test_super_reader.py | 116 ++++ ppcls/utils/__init__.py | 22 + ppcls/utils/check.py | 129 ++++ ppcls/utils/config.py | 201 ++++++ ppcls/utils/environment.py | 39 ++ ppcls/utils/misc.py | 47 ++ ppcls/utils/model_zoo.py | 179 +++++ ppcls/utils/save_load.py | 124 ++++ requirements.txt | 4 + tools/download.py | 41 ++ tools/eval.py | 84 +++ tools/export_model.py | 77 +++ tools/infer/cpp_infer.py | 103 +++ tools/infer/infer.py | 119 ++++ tools/infer/py_infer.py | 101 +++ tools/infer/run.sh | 49 ++ tools/infer/utils.py | 85 +++ tools/program.py | 370 +++++++++++ tools/run.sh | 22 + tools/train.py | 105 +++ 169 files changed, 19860 insertions(+) create mode 100644 .gitignore create mode 100644 .pre-commit-config.yaml create mode 100644 configs/AlexNet/AlexNet.yaml create mode 100644 configs/DPN/DPN107.yaml create mode 100644 configs/DPN/DPN131.yaml create mode 100644 configs/DPN/DPN68.yaml create mode 100644 configs/DPN/DPN92.yaml create mode 100644 configs/DPN/DPN98.yaml create mode 100644 configs/DarkNet/DarkNet53.yaml create mode 100644 configs/DenseNet/DenseNet121.yaml create mode 100644 configs/DenseNet/DenseNet161.yaml create mode 100644 configs/DenseNet/DenseNet169.yaml create mode 100644 configs/DenseNet/DenseNet201.yaml create mode 100644 configs/DenseNet/DenseNet264.yaml create mode 100644 configs/HRNet/HRNet_W18_C.yaml create mode 100644 configs/HRNet/HRNet_W30_C.yaml create mode 100644 configs/HRNet/HRNet_W32_C.yaml create mode 100644 configs/HRNet/HRNet_W40_C.yaml create mode 100644 configs/HRNet/HRNet_W44_C.yaml create mode 100644 configs/HRNet/HRNet_W48_C.yaml create mode 100644 configs/HRNet/HRNet_W64_C.yaml create mode 100644 configs/Inception/GoogLeNet.yaml create mode 100644 configs/Inception/InceptionV4.yaml create mode 100644 configs/MobileNetV1/MobileNetV1.yaml create mode 100644 configs/MobileNetV1/MobileNetV1_x0_25.yaml create mode 100644 configs/MobileNetV1/MobileNetV1_x0_5.yaml create mode 100644 configs/MobileNetV1/MobileNetV1_x0_75.yaml create mode 100644 configs/MobileNetV2/MobileNetV2.yaml create mode 100644 configs/MobileNetV2/MobileNetV2_x0_25.yaml create mode 100644 configs/MobileNetV2/MobileNetV2_x0_5.yaml create mode 100644 configs/MobileNetV2/MobileNetV2_x0_75.yaml create mode 100644 configs/MobileNetV2/MobileNetV2_x1_5.yaml create mode 100644 configs/MobileNetV2/MobileNetV2_x2_0.yaml create mode 100644 configs/MobileNetV3/MobileNetV3_large_x0_35.yaml create mode 100644 configs/MobileNetV3/MobileNetV3_large_x0_5.yaml create mode 100644 configs/MobileNetV3/MobileNetV3_large_x0_75.yaml create mode 100644 configs/MobileNetV3/MobileNetV3_large_x1_0.yaml create mode 100644 configs/MobileNetV3/MobileNetV3_large_x1_25.yaml create mode 100644 configs/MobileNetV3/MobileNetV3_small_x0_35.yaml create mode 100644 configs/MobileNetV3/MobileNetV3_small_x0_5.yaml create mode 100644 configs/MobileNetV3/MobileNetV3_small_x0_75.yaml create mode 100644 configs/MobileNetV3/MobileNetV3_small_x1_0.yaml create mode 100644 configs/MobileNetV3/MobileNetV3_small_x1_25.yaml create mode 100644 configs/Res2Net/Res2Net101_vd_26w_4s.yaml create mode 100644 configs/Res2Net/Res2Net200_vd_26w_4s.yaml create mode 100644 configs/Res2Net/Res2Net50_14w_8s.yaml create mode 100644 configs/Res2Net/Res2Net50_26w_4s.yaml create mode 100644 configs/Res2Net/Res2Net50_vd_26w_4s.yaml create mode 100644 configs/ResNeXt/ResNeXt101_32x4d.yaml create mode 100644 configs/ResNeXt/ResNeXt101_64x4d.yaml create mode 100644 configs/ResNeXt/ResNeXt101_vd_32x4d.yaml create mode 100644 configs/ResNeXt/ResNeXt101_vd_64x4d.yaml create mode 100644 configs/ResNeXt/ResNeXt152_32x4d.yaml create mode 100644 configs/ResNeXt/ResNeXt152_64x4d.yaml create mode 100644 configs/ResNeXt/ResNeXt152_vd_32x4d.yaml create mode 100644 configs/ResNeXt/ResNeXt152_vd_64x4d.yaml create mode 100644 configs/ResNeXt/ResNeXt50_32x4d.yaml create mode 100644 configs/ResNeXt/ResNeXt50_64x4d.yaml create mode 100644 configs/ResNeXt/ResNeXt50_vd_32x4d.yaml create mode 100644 configs/ResNeXt/ResNeXt50_vd_64x4d.yaml create mode 100644 configs/ResNet/ResNet101.yaml create mode 100644 configs/ResNet/ResNet101_vd.yaml create mode 100644 configs/ResNet/ResNet152.yaml create mode 100644 configs/ResNet/ResNet152_vd.yaml create mode 100644 configs/ResNet/ResNet18.yaml create mode 100644 configs/ResNet/ResNet18_vd.yaml create mode 100644 configs/ResNet/ResNet200_vd.yaml create mode 100644 configs/ResNet/ResNet34.yaml create mode 100644 configs/ResNet/ResNet34_vd.yaml create mode 100644 configs/ResNet/ResNet50.yaml create mode 100644 configs/ResNet/ResNet50_vc.yaml create mode 100644 configs/ResNet/ResNet50_vd.yaml create mode 100644 configs/ResNet_ACNet/ResNet_ACNet.yaml create mode 100644 configs/SENet/SENet154_vd.yaml create mode 100644 configs/SENet/SE_ResNeXt101_32x4d.yaml create mode 100644 configs/SENet/SE_ResNeXt50_32x4d.yaml create mode 100644 configs/SENet/SE_ResNeXt50_vd_32x4d.yaml create mode 100644 configs/SENet/SE_ResNet18_vd.yaml create mode 100644 configs/SENet/SE_ResNet34_vd.yaml create mode 100644 configs/SENet/SE_ResNet50_vd.yaml create mode 100644 configs/ShuffleNet/ShuffleNetV2.yaml create mode 100644 configs/ShuffleNet/ShuffleNetV2_swish.yaml create mode 100644 configs/ShuffleNet/ShuffleNetV2_x0_25.yaml create mode 100644 configs/ShuffleNet/ShuffleNetV2_x0_33.yaml create mode 100644 configs/ShuffleNet/ShuffleNetV2_x0_5.yaml create mode 100644 configs/ShuffleNet/ShuffleNetV2_x1_5.yaml create mode 100644 configs/ShuffleNet/ShuffleNetV2_x2_0.yaml create mode 100644 configs/SqueezeNet/SqueezeNet1_0.yaml create mode 100644 configs/SqueezeNet/SqueezeNet1_1.yaml create mode 100644 configs/VGG/VGG11.yaml create mode 100644 configs/VGG/VGG13.yaml create mode 100644 configs/VGG/VGG16.yaml create mode 100644 configs/VGG/VGG19.yaml create mode 100644 configs/eval.yaml create mode 100644 ppcls/__init__.py create mode 100644 ppcls/data/__init__.py create mode 100644 ppcls/data/imaug/__init__.py create mode 100644 ppcls/data/imaug/autoaugment.py create mode 100644 ppcls/data/imaug/batch_operators.py create mode 100644 ppcls/data/imaug/cutout.py create mode 100644 ppcls/data/imaug/fmix.py create mode 100644 ppcls/data/imaug/grid.py create mode 100644 ppcls/data/imaug/hide_and_seek.py create mode 100644 ppcls/data/imaug/operators.py create mode 100644 ppcls/data/imaug/randaugment.py create mode 100644 ppcls/data/imaug/random_erasing.py create mode 100755 ppcls/data/reader.py create mode 100644 ppcls/modeling/__init__.py create mode 100644 ppcls/modeling/architectures/__init__.py create mode 100644 ppcls/modeling/architectures/alexnet.py create mode 100644 ppcls/modeling/architectures/darknet.py create mode 100644 ppcls/modeling/architectures/darts_gs.py create mode 100644 ppcls/modeling/architectures/densenet.py create mode 100644 ppcls/modeling/architectures/dpn.py create mode 100644 ppcls/modeling/architectures/efficientnet.py create mode 100644 ppcls/modeling/architectures/googlenet.py create mode 100644 ppcls/modeling/architectures/hrnet.py create mode 100644 ppcls/modeling/architectures/inception_v4.py create mode 100644 ppcls/modeling/architectures/layers.py create mode 100644 ppcls/modeling/architectures/mobilenet_v1.py create mode 100644 ppcls/modeling/architectures/mobilenet_v2.py create mode 100644 ppcls/modeling/architectures/mobilenet_v3.py create mode 100644 ppcls/modeling/architectures/model_libs.py create mode 100644 ppcls/modeling/architectures/res2net.py create mode 100644 ppcls/modeling/architectures/res2net_vd.py create mode 100644 ppcls/modeling/architectures/resnet.py create mode 100644 ppcls/modeling/architectures/resnet_acnet.py create mode 100644 ppcls/modeling/architectures/resnet_vc.py create mode 100644 ppcls/modeling/architectures/resnet_vd.py create mode 100644 ppcls/modeling/architectures/resnext.py create mode 100644 ppcls/modeling/architectures/resnext101_wsl.py create mode 100644 ppcls/modeling/architectures/resnext_vd.py create mode 100644 ppcls/modeling/architectures/se_resnet_vd.py create mode 100644 ppcls/modeling/architectures/se_resnext.py create mode 100644 ppcls/modeling/architectures/se_resnext_vd.py create mode 100644 ppcls/modeling/architectures/shufflenet_v2.py create mode 100644 ppcls/modeling/architectures/shufflenet_v2_swish.py create mode 100644 ppcls/modeling/architectures/squeezenet.py create mode 100644 ppcls/modeling/architectures/vgg.py create mode 100644 ppcls/modeling/architectures/xception.py create mode 100644 ppcls/modeling/architectures/xception_deeplab.py create mode 100644 ppcls/modeling/loss.py create mode 100644 ppcls/modeling/utils.py create mode 100644 ppcls/optimizer/__init__.py create mode 100644 ppcls/optimizer/learning_rate.py create mode 100644 ppcls/optimizer/optimizer.py create mode 100644 ppcls/test/demo.jpeg create mode 100644 ppcls/test/test_download.py create mode 100644 ppcls/test/test_imaug.py create mode 100644 ppcls/test/test_super_reader.py create mode 100644 ppcls/utils/__init__.py create mode 100644 ppcls/utils/check.py create mode 100644 ppcls/utils/config.py create mode 100644 ppcls/utils/environment.py create mode 100644 ppcls/utils/misc.py create mode 100644 ppcls/utils/model_zoo.py create mode 100644 ppcls/utils/save_load.py create mode 100644 requirements.txt create mode 100644 tools/download.py create mode 100644 tools/eval.py create mode 100644 tools/export_model.py create mode 100644 tools/infer/cpp_infer.py create mode 100644 tools/infer/infer.py create mode 100644 tools/infer/py_infer.py create mode 100644 tools/infer/run.sh create mode 100644 tools/infer/utils.py create mode 100644 tools/program.py create mode 100755 tools/run.sh create mode 100644 tools/train.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..3904a7ed8 --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +*.pyc +*.sw* +*log* +/dataset +checkpoints/ +pretrained/ +*.ipynb* +build/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..1ab8d75f0 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,27 @@ +- repo: https://github.com/PaddlePaddle/mirrors-yapf.git + sha: 0d79c0c469bab64f7229c9aca2b1186ef47f0e37 + hooks: + - id: yapf + files: \.py$ +- repo: https://github.com/pre-commit/pre-commit-hooks + sha: a11d9314b22d8f8c7556443875b731ef05965464 + hooks: + - id: check-merge-conflict + - id: check-symlinks + - id: detect-private-key + files: (?!.*paddle)^.*$ + - id: end-of-file-fixer + files: \.(md|yml)$ + - id: trailing-whitespace + files: \.(md|yml)$ +- repo: https://github.com/Lucas-C/pre-commit-hooks + sha: v1.0.1 + hooks: + - id: forbid-crlf + files: \.(md|yml)$ + - id: remove-crlf + files: \.(md|yml)$ + - id: forbid-tabs + files: \.(md|yml)$ + - id: remove-tabs + files: \.(md|yml)$ diff --git a/configs/AlexNet/AlexNet.yaml b/configs/AlexNet/AlexNet.yaml new file mode 100644 index 000000000..fa46e6804 --- /dev/null +++ b/configs/AlexNet/AlexNet.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "AlexNet" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.01 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.0001 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/DPN/DPN107.yaml b/configs/DPN/DPN107.yaml new file mode 100644 index 000000000..d44418fe7 --- /dev/null +++ b/configs/DPN/DPN107.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'DPN107' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/DPN/DPN131.yaml b/configs/DPN/DPN131.yaml new file mode 100644 index 000000000..95b6345a7 --- /dev/null +++ b/configs/DPN/DPN131.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'DPN131' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/DPN/DPN68.yaml b/configs/DPN/DPN68.yaml new file mode 100644 index 000000000..e1fa30b43 --- /dev/null +++ b/configs/DPN/DPN68.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'DPN68' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/DPN/DPN92.yaml b/configs/DPN/DPN92.yaml new file mode 100644 index 000000000..ecfae64dc --- /dev/null +++ b/configs/DPN/DPN92.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'DPN92' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/DPN/DPN98.yaml b/configs/DPN/DPN98.yaml new file mode 100644 index 000000000..51dab0ac6 --- /dev/null +++ b/configs/DPN/DPN98.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'DPN98' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/DarkNet/DarkNet53.yaml b/configs/DarkNet/DarkNet53.yaml new file mode 100644 index 000000000..e3610d9a4 --- /dev/null +++ b/configs/DarkNet/DarkNet53.yaml @@ -0,0 +1,71 @@ +mode: 'train' +architecture: "DarkNet53" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 256, 256] + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.0001 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 256 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/DenseNet/DenseNet121.yaml b/configs/DenseNet/DenseNet121.yaml new file mode 100644 index 000000000..2b13cacb7 --- /dev/null +++ b/configs/DenseNet/DenseNet121.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'DenseNet121' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/DenseNet/DenseNet161.yaml b/configs/DenseNet/DenseNet161.yaml new file mode 100644 index 000000000..c69376126 --- /dev/null +++ b/configs/DenseNet/DenseNet161.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'DenseNet161' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/DenseNet/DenseNet169.yaml b/configs/DenseNet/DenseNet169.yaml new file mode 100644 index 000000000..d8f80309b --- /dev/null +++ b/configs/DenseNet/DenseNet169.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'DenseNet169' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/DenseNet/DenseNet201.yaml b/configs/DenseNet/DenseNet201.yaml new file mode 100644 index 000000000..c848de2dd --- /dev/null +++ b/configs/DenseNet/DenseNet201.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'DenseNet201' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/DenseNet/DenseNet264.yaml b/configs/DenseNet/DenseNet264.yaml new file mode 100644 index 000000000..4794be4ee --- /dev/null +++ b/configs/DenseNet/DenseNet264.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'DenseNet264' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/HRNet/HRNet_W18_C.yaml b/configs/HRNet/HRNet_W18_C.yaml new file mode 100644 index 000000000..d74e16318 --- /dev/null +++ b/configs/HRNet/HRNet_W18_C.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'HRNet_W18_C' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/HRNet/HRNet_W30_C.yaml b/configs/HRNet/HRNet_W30_C.yaml new file mode 100644 index 000000000..c270a7f49 --- /dev/null +++ b/configs/HRNet/HRNet_W30_C.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'HRNet_W30_C' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/HRNet/HRNet_W32_C.yaml b/configs/HRNet/HRNet_W32_C.yaml new file mode 100644 index 000000000..97f748f01 --- /dev/null +++ b/configs/HRNet/HRNet_W32_C.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'HRNet_W32_C' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/HRNet/HRNet_W40_C.yaml b/configs/HRNet/HRNet_W40_C.yaml new file mode 100644 index 000000000..cf6d3e81e --- /dev/null +++ b/configs/HRNet/HRNet_W40_C.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'HRNet_W40_C' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/HRNet/HRNet_W44_C.yaml b/configs/HRNet/HRNet_W44_C.yaml new file mode 100644 index 000000000..2e435a635 --- /dev/null +++ b/configs/HRNet/HRNet_W44_C.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'HRNet_W44_C' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/HRNet/HRNet_W48_C.yaml b/configs/HRNet/HRNet_W48_C.yaml new file mode 100644 index 000000000..b63341d7c --- /dev/null +++ b/configs/HRNet/HRNet_W48_C.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'HRNet_W48_C' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/HRNet/HRNet_W64_C.yaml b/configs/HRNet/HRNet_W64_C.yaml new file mode 100644 index 000000000..8684664ab --- /dev/null +++ b/configs/HRNet/HRNet_W64_C.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'HRNet_W64_C' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/Inception/GoogLeNet.yaml b/configs/Inception/GoogLeNet.yaml new file mode 100644 index 000000000..795bbcb13 --- /dev/null +++ b/configs/Inception/GoogLeNet.yaml @@ -0,0 +1,69 @@ +mode: 'train' +architecture: "GoogLeNet" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.01 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.0001 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/Inception/InceptionV4.yaml b/configs/Inception/InceptionV4.yaml new file mode 100644 index 000000000..65c73264f --- /dev/null +++ b/configs/Inception/InceptionV4.yaml @@ -0,0 +1,77 @@ +mode: 'train' +architecture: 'InceptionV4' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 299, 299] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.045 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00010 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 299 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + + + +VALID: + batch_size: 16 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/MobileNetV1/MobileNetV1.yaml b/configs/MobileNetV1/MobileNetV1.yaml new file mode 100644 index 000000000..ff2b62b31 --- /dev/null +++ b/configs/MobileNetV1/MobileNetV1.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "MobileNetV1" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00003 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV1/MobileNetV1_x0_25.yaml b/configs/MobileNetV1/MobileNetV1_x0_25.yaml new file mode 100644 index 000000000..12943b750 --- /dev/null +++ b/configs/MobileNetV1/MobileNetV1_x0_25.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "MobileNetV1_x0_25" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00003 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV1/MobileNetV1_x0_5.yaml b/configs/MobileNetV1/MobileNetV1_x0_5.yaml new file mode 100644 index 000000000..14baaf658 --- /dev/null +++ b/configs/MobileNetV1/MobileNetV1_x0_5.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "MobileNetV1_x0_5" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00003 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV1/MobileNetV1_x0_75.yaml b/configs/MobileNetV1/MobileNetV1_x0_75.yaml new file mode 100644 index 000000000..3563e1d95 --- /dev/null +++ b/configs/MobileNetV1/MobileNetV1_x0_75.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "MobileNetV1_x0_75" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00003 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV2/MobileNetV2.yaml b/configs/MobileNetV2/MobileNetV2.yaml new file mode 100644 index 000000000..ba25a430d --- /dev/null +++ b/configs/MobileNetV2/MobileNetV2.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: "MobileNetV2" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 240 +topk: 5 +image_shape: [3, 224, 224] + + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.045 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00004 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV2/MobileNetV2_x0_25.yaml b/configs/MobileNetV2/MobileNetV2_x0_25.yaml new file mode 100644 index 000000000..25957a91b --- /dev/null +++ b/configs/MobileNetV2/MobileNetV2_x0_25.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "MobileNetV2_x0_25" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 240 +topk: 5 +image_shape: [3, 224, 224] + + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.045 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00003 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + ratio: [1.0, 1.0] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV2/MobileNetV2_x0_5.yaml b/configs/MobileNetV2/MobileNetV2_x0_5.yaml new file mode 100644 index 000000000..4591353e6 --- /dev/null +++ b/configs/MobileNetV2/MobileNetV2_x0_5.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "MobileNetV2_x0_5" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 240 +topk: 5 +image_shape: [3, 224, 224] + + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.045 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00003 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + ratio: [1.0, 1.0] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV2/MobileNetV2_x0_75.yaml b/configs/MobileNetV2/MobileNetV2_x0_75.yaml new file mode 100644 index 000000000..757c87831 --- /dev/null +++ b/configs/MobileNetV2/MobileNetV2_x0_75.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: "MobileNetV2_x0_75" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 240 +topk: 5 +image_shape: [3, 224, 224] + + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.045 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00004 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV2/MobileNetV2_x1_5.yaml b/configs/MobileNetV2/MobileNetV2_x1_5.yaml new file mode 100644 index 000000000..f23634721 --- /dev/null +++ b/configs/MobileNetV2/MobileNetV2_x1_5.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: "MobileNetV2_x1_5" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 240 +topk: 5 +image_shape: [3, 224, 224] + + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.045 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00004 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV2/MobileNetV2_x2_0.yaml b/configs/MobileNetV2/MobileNetV2_x2_0.yaml new file mode 100644 index 000000000..39996f76f --- /dev/null +++ b/configs/MobileNetV2/MobileNetV2_x2_0.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: "MobileNetV2_x2_0" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 240 +topk: 5 +image_shape: [3, 224, 224] + + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.045 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00004 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV3/MobileNetV3_large_x0_35.yaml b/configs/MobileNetV3/MobileNetV3_large_x0_35.yaml new file mode 100644 index 000000000..bc27a07f3 --- /dev/null +++ b/configs/MobileNetV3/MobileNetV3_large_x0_35.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "MobileNetV3_large_x0_35" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +ls_epsilon: 0.1 +validate: True +valid_interval: 1 +epochs: 360 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 2.6 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00002 + +TRAIN: + batch_size: 4096 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV3/MobileNetV3_large_x0_5.yaml b/configs/MobileNetV3/MobileNetV3_large_x0_5.yaml new file mode 100644 index 000000000..1aa847924 --- /dev/null +++ b/configs/MobileNetV3/MobileNetV3_large_x0_5.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "MobileNetV3_large_x0_5" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +ls_epsilon: 0.1 +validate: True +valid_interval: 1 +epochs: 360 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 1.3 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00002 + +TRAIN: + batch_size: 2048 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV3/MobileNetV3_large_x0_75.yaml b/configs/MobileNetV3/MobileNetV3_large_x0_75.yaml new file mode 100644 index 000000000..3d859e2dd --- /dev/null +++ b/configs/MobileNetV3/MobileNetV3_large_x0_75.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "MobileNetV3_large_x0_75" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +ls_epsilon: 0.1 +validate: True +valid_interval: 1 +epochs: 360 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 1.3 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00002 + +TRAIN: + batch_size: 2048 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV3/MobileNetV3_large_x1_0.yaml b/configs/MobileNetV3/MobileNetV3_large_x1_0.yaml new file mode 100644 index 000000000..32d0fe6fa --- /dev/null +++ b/configs/MobileNetV3/MobileNetV3_large_x1_0.yaml @@ -0,0 +1,76 @@ +mode: 'train' +architecture: "MobileNetV3_large_x1_0" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +ls_epsilon: 0.1 +validate: True +valid_interval: 1 +epochs: 360 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 2.6 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00002 + +TRAIN: + batch_size: 4096 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - ImageNetPolicy: + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 32 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV3/MobileNetV3_large_x1_25.yaml b/configs/MobileNetV3/MobileNetV3_large_x1_25.yaml new file mode 100644 index 000000000..a368b8d02 --- /dev/null +++ b/configs/MobileNetV3/MobileNetV3_large_x1_25.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "MobileNetV3_large_x1_25" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +ls_epsilon: 0.1 +validate: True +valid_interval: 1 +epochs: 360 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 0.65 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00004 + +TRAIN: + batch_size: 1024 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV3/MobileNetV3_small_x0_35.yaml b/configs/MobileNetV3/MobileNetV3_small_x0_35.yaml new file mode 100644 index 000000000..7fee09f2c --- /dev/null +++ b/configs/MobileNetV3/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: "MobileNetV3_small_x0_35" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 360 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 2.6 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00001 + +TRAIN: + batch_size: 4096 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV3/MobileNetV3_small_x0_5.yaml b/configs/MobileNetV3/MobileNetV3_small_x0_5.yaml new file mode 100644 index 000000000..4659bd52f --- /dev/null +++ b/configs/MobileNetV3/MobileNetV3_small_x0_5.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "MobileNetV3_small_x0_5" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +ls_epsilon: 0.1 +validate: True +valid_interval: 1 +epochs: 360 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 2.6 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00001 + +TRAIN: + batch_size: 4096 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV3/MobileNetV3_small_x0_75.yaml b/configs/MobileNetV3/MobileNetV3_small_x0_75.yaml new file mode 100644 index 000000000..23d13b0ef --- /dev/null +++ b/configs/MobileNetV3/MobileNetV3_small_x0_75.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "MobileNetV3_small_x0_75" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +ls_epsilon: 0.1 +validate: True +valid_interval: 1 +epochs: 360 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 2.6 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00002 + +TRAIN: + batch_size: 4096 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV3/MobileNetV3_small_x1_0.yaml b/configs/MobileNetV3/MobileNetV3_small_x1_0.yaml new file mode 100644 index 000000000..f6369ec32 --- /dev/null +++ b/configs/MobileNetV3/MobileNetV3_small_x1_0.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "MobileNetV3_small_x1_0" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +ls_epsilon: 0.1 +validate: True +valid_interval: 1 +epochs: 360 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 2.6 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00002 + +TRAIN: + batch_size: 4096 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/MobileNetV3/MobileNetV3_small_x1_25.yaml b/configs/MobileNetV3/MobileNetV3_small_x1_25.yaml new file mode 100644 index 000000000..cb711f845 --- /dev/null +++ b/configs/MobileNetV3/MobileNetV3_small_x1_25.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "MobileNetV3_small_x1_25" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +ls_epsilon: 0.1 +validate: True +valid_interval: 1 +epochs: 360 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 1.3 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00002 + +TRAIN: + batch_size: 2048 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/Res2Net/Res2Net101_vd_26w_4s.yaml b/configs/Res2Net/Res2Net101_vd_26w_4s.yaml new file mode 100644 index 000000000..2d5cecfd9 --- /dev/null +++ b/configs/Res2Net/Res2Net101_vd_26w_4s.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'Res2Net101_vd_26w_4s' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/Res2Net/Res2Net200_vd_26w_4s.yaml b/configs/Res2Net/Res2Net200_vd_26w_4s.yaml new file mode 100644 index 000000000..5cd51eb7b --- /dev/null +++ b/configs/Res2Net/Res2Net200_vd_26w_4s.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'Res2Net200_vd_26w_4s' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/Res2Net/Res2Net50_14w_8s.yaml b/configs/Res2Net/Res2Net50_14w_8s.yaml new file mode 100644 index 000000000..69d249670 --- /dev/null +++ b/configs/Res2Net/Res2Net50_14w_8s.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'Res2Net50_14w_8s' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/Res2Net/Res2Net50_26w_4s.yaml b/configs/Res2Net/Res2Net50_26w_4s.yaml new file mode 100644 index 000000000..2565bfcb1 --- /dev/null +++ b/configs/Res2Net/Res2Net50_26w_4s.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'Res2Net50_26w_4s' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/Res2Net/Res2Net50_vd_26w_4s.yaml b/configs/Res2Net/Res2Net50_vd_26w_4s.yaml new file mode 100644 index 000000000..9aa79156c --- /dev/null +++ b/configs/Res2Net/Res2Net50_vd_26w_4s.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'Res2Net50_vd_26w_4s' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNeXt/ResNeXt101_32x4d.yaml b/configs/ResNeXt/ResNeXt101_32x4d.yaml new file mode 100644 index 000000000..08c364894 --- /dev/null +++ b/configs/ResNeXt/ResNeXt101_32x4d.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'ResNeXt101_32x4d' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNeXt/ResNeXt101_64x4d.yaml b/configs/ResNeXt/ResNeXt101_64x4d.yaml new file mode 100644 index 000000000..8a662284f --- /dev/null +++ b/configs/ResNeXt/ResNeXt101_64x4d.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'ResNeXt101_64x4d' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000150 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNeXt/ResNeXt101_vd_32x4d.yaml b/configs/ResNeXt/ResNeXt101_vd_32x4d.yaml new file mode 100644 index 000000000..4a70e2e02 --- /dev/null +++ b/configs/ResNeXt/ResNeXt101_vd_32x4d.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'ResNeXt101_vd_32x4d' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNeXt/ResNeXt101_vd_64x4d.yaml b/configs/ResNeXt/ResNeXt101_vd_64x4d.yaml new file mode 100644 index 000000000..1587b425d --- /dev/null +++ b/configs/ResNeXt/ResNeXt101_vd_64x4d.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'ResNeXt101_vd_64x4d' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNeXt/ResNeXt152_32x4d.yaml b/configs/ResNeXt/ResNeXt152_32x4d.yaml new file mode 100644 index 000000000..d073066eb --- /dev/null +++ b/configs/ResNeXt/ResNeXt152_32x4d.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'ResNeXt152_32x4d' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNeXt/ResNeXt152_64x4d.yaml b/configs/ResNeXt/ResNeXt152_64x4d.yaml new file mode 100644 index 000000000..4cf492e25 --- /dev/null +++ b/configs/ResNeXt/ResNeXt152_64x4d.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'ResNeXt152_64x4d' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000180 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNeXt/ResNeXt152_vd_32x4d.yaml b/configs/ResNeXt/ResNeXt152_vd_32x4d.yaml new file mode 100644 index 000000000..5d89b7e24 --- /dev/null +++ b/configs/ResNeXt/ResNeXt152_vd_32x4d.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'ResNeXt152_vd_32x4d' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNeXt/ResNeXt152_vd_64x4d.yaml b/configs/ResNeXt/ResNeXt152_vd_64x4d.yaml new file mode 100644 index 000000000..877f4b6c9 --- /dev/null +++ b/configs/ResNeXt/ResNeXt152_vd_64x4d.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'ResNeXt152_vd_64x4d' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNeXt/ResNeXt50_32x4d.yaml b/configs/ResNeXt/ResNeXt50_32x4d.yaml new file mode 100644 index 000000000..f8a7e8dd9 --- /dev/null +++ b/configs/ResNeXt/ResNeXt50_32x4d.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'ResNeXt50_32x4d' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNeXt/ResNeXt50_64x4d.yaml b/configs/ResNeXt/ResNeXt50_64x4d.yaml new file mode 100644 index 000000000..4a5bf99da --- /dev/null +++ b/configs/ResNeXt/ResNeXt50_64x4d.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "ResNeXt50_64x4d" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.0001 + +TRAIN: + batch_size: 32 + num_workers: 8 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/ResNeXt/ResNeXt50_vd_32x4d.yaml b/configs/ResNeXt/ResNeXt50_vd_32x4d.yaml new file mode 100644 index 000000000..b779b0524 --- /dev/null +++ b/configs/ResNeXt/ResNeXt50_vd_32x4d.yaml @@ -0,0 +1,80 @@ +mode: 'train' +architecture: "ResNeXt50_vd_32x4d" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.0001 + +TRAIN: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/ResNeXt/ResNeXt50_vd_64x4d.yaml b/configs/ResNeXt/ResNeXt50_vd_64x4d.yaml new file mode 100644 index 000000000..b79a63513 --- /dev/null +++ b/configs/ResNeXt/ResNeXt50_vd_64x4d.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'ResNeXt50_vd_64x4d' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNet/ResNet101.yaml b/configs/ResNet/ResNet101.yaml new file mode 100644 index 000000000..0ccbb13e4 --- /dev/null +++ b/configs/ResNet/ResNet101.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'ResNet101' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNet/ResNet101_vd.yaml b/configs/ResNet/ResNet101_vd.yaml new file mode 100644 index 000000000..c74b5b20b --- /dev/null +++ b/configs/ResNet/ResNet101_vd.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'ResNet101_vd' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNet/ResNet152.yaml b/configs/ResNet/ResNet152.yaml new file mode 100644 index 000000000..c7934a040 --- /dev/null +++ b/configs/ResNet/ResNet152.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'ResNet152' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNet/ResNet152_vd.yaml b/configs/ResNet/ResNet152_vd.yaml new file mode 100644 index 000000000..fbf08ede3 --- /dev/null +++ b/configs/ResNet/ResNet152_vd.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'ResNet152_vd' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNet/ResNet18.yaml b/configs/ResNet/ResNet18.yaml new file mode 100644 index 000000000..270cd8ed3 --- /dev/null +++ b/configs/ResNet/ResNet18.yaml @@ -0,0 +1,72 @@ +mode: 'train' +architecture: 'ResNet18' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNet/ResNet18_vd.yaml b/configs/ResNet/ResNet18_vd.yaml new file mode 100644 index 000000000..54f36b0f4 --- /dev/null +++ b/configs/ResNet/ResNet18_vd.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'ResNet18_vd' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000070 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNet/ResNet200_vd.yaml b/configs/ResNet/ResNet200_vd.yaml new file mode 100644 index 000000000..f2004fdef --- /dev/null +++ b/configs/ResNet/ResNet200_vd.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'ResNet200_vd' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNet/ResNet34.yaml b/configs/ResNet/ResNet34.yaml new file mode 100644 index 000000000..cfe715d87 --- /dev/null +++ b/configs/ResNet/ResNet34.yaml @@ -0,0 +1,72 @@ +mode: 'train' +architecture: 'ResNet34' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNet/ResNet34_vd.yaml b/configs/ResNet/ResNet34_vd.yaml new file mode 100644 index 000000000..39b9a3556 --- /dev/null +++ b/configs/ResNet/ResNet34_vd.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'ResNet34_vd' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000070 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNet/ResNet50.yaml b/configs/ResNet/ResNet50.yaml new file mode 100644 index 000000000..1fb825b58 --- /dev/null +++ b/configs/ResNet/ResNet50.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: 'ResNet50' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNet/ResNet50_vc.yaml b/configs/ResNet/ResNet50_vc.yaml new file mode 100644 index 000000000..233f00ce1 --- /dev/null +++ b/configs/ResNet/ResNet50_vc.yaml @@ -0,0 +1,72 @@ +mode: 'train' +architecture: 'ResNet50_vc' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNet/ResNet50_vd.yaml b/configs/ResNet/ResNet50_vd.yaml new file mode 100644 index 000000000..dbb52e32b --- /dev/null +++ b/configs/ResNet/ResNet50_vd.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'ResNet50_vd' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000070 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ResNet_ACNet/ResNet_ACNet.yaml b/configs/ResNet_ACNet/ResNet_ACNet.yaml new file mode 100644 index 000000000..309f3821e --- /dev/null +++ b/configs/ResNet_ACNet/ResNet_ACNet.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "ResNet_ACNet" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Piecewise' + params: + lr: 0.1 + decay_epochs: [30, 60, 90] + gamma: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.0001 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/SENet/SENet154_vd.yaml b/configs/SENet/SENet154_vd.yaml new file mode 100644 index 000000000..72adc722f --- /dev/null +++ b/configs/SENet/SENet154_vd.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'SENet154_vd' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/SENet/SE_ResNeXt101_32x4d.yaml b/configs/SENet/SE_ResNeXt101_32x4d.yaml new file mode 100644 index 000000000..bd9f20e7b --- /dev/null +++ b/configs/SENet/SE_ResNeXt101_32x4d.yaml @@ -0,0 +1,72 @@ +mode: 'train' +architecture: 'SE_ResNeXt101_32x4d' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000015 + +TRAIN: + batch_size: 400 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/SENet/SE_ResNeXt50_32x4d.yaml b/configs/SENet/SE_ResNeXt50_32x4d.yaml new file mode 100644 index 000000000..c2a766013 --- /dev/null +++ b/configs/SENet/SE_ResNeXt50_32x4d.yaml @@ -0,0 +1,72 @@ +mode: 'train' +architecture: 'SE_ResNeXt50_32x4d' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: False +ls_epsilon: -1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000120 + +TRAIN: + batch_size: 400 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/SENet/SE_ResNeXt50_vd_32x4d.yaml b/configs/SENet/SE_ResNeXt50_vd_32x4d.yaml new file mode 100644 index 000000000..f0adfb4b1 --- /dev/null +++ b/configs/SENet/SE_ResNeXt50_vd_32x4d.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'SE_ResNeXt50_vd_32x4d' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/SENet/SE_ResNet18_vd.yaml b/configs/SENet/SE_ResNet18_vd.yaml new file mode 100644 index 000000000..9684c6b51 --- /dev/null +++ b/configs/SENet/SE_ResNet18_vd.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'SE_ResNet18_vd' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000070 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/SENet/SE_ResNet34_vd.yaml b/configs/SENet/SE_ResNet34_vd.yaml new file mode 100644 index 000000000..1ffe543dd --- /dev/null +++ b/configs/SENet/SE_ResNet34_vd.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'SE_ResNet34_vd' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000070 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/SENet/SE_ResNet50_vd.yaml b/configs/SENet/SE_ResNet50_vd.yaml new file mode 100644 index 000000000..8ca11f271 --- /dev/null +++ b/configs/SENet/SE_ResNet50_vd.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: 'SE_ResNet50_vd' +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 200 +topk: 5 +image_shape: [3, 224, 224] + +use_mix: True +ls_epsilon: 0.1 + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000100 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + mix: + - MixupOperator: + alpha: 0.2 + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/ShuffleNet/ShuffleNetV2.yaml b/configs/ShuffleNet/ShuffleNetV2.yaml new file mode 100644 index 000000000..5993e3f8c --- /dev/null +++ b/configs/ShuffleNet/ShuffleNetV2.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: "ShuffleNetV2" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 240 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 0.5 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00004 + +TRAIN: + batch_size: 1024 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/ShuffleNet/ShuffleNetV2_swish.yaml b/configs/ShuffleNet/ShuffleNetV2_swish.yaml new file mode 100644 index 000000000..e8ee5f446 --- /dev/null +++ b/configs/ShuffleNet/ShuffleNetV2_swish.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: "ShuffleNetV2_swish" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 240 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 0.5 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00004 + +TRAIN: + batch_size: 1024 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/ShuffleNet/ShuffleNetV2_x0_25.yaml b/configs/ShuffleNet/ShuffleNetV2_x0_25.yaml new file mode 100644 index 000000000..9ab65f0d8 --- /dev/null +++ b/configs/ShuffleNet/ShuffleNetV2_x0_25.yaml @@ -0,0 +1,76 @@ +mode: 'train' +architecture: "ShuffleNetV2_x0_25" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 240 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 0.5 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00003 + +TRAIN: + batch_size: 1024 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + scale: [0.64, 1.0] + ratio: [0.8, 1.2] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/ShuffleNet/ShuffleNetV2_x0_33.yaml b/configs/ShuffleNet/ShuffleNetV2_x0_33.yaml new file mode 100644 index 000000000..134d5b9f3 --- /dev/null +++ b/configs/ShuffleNet/ShuffleNetV2_x0_33.yaml @@ -0,0 +1,76 @@ +mode: 'train' +architecture: "ShuffleNetV2_x0_33" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 240 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 0.5 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00003 + +TRAIN: + batch_size: 1024 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + scale: [0.64, 1.0] + ratio: [0.8, 1.2] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/ShuffleNet/ShuffleNetV2_x0_5.yaml b/configs/ShuffleNet/ShuffleNetV2_x0_5.yaml new file mode 100644 index 000000000..120ea1c11 --- /dev/null +++ b/configs/ShuffleNet/ShuffleNetV2_x0_5.yaml @@ -0,0 +1,76 @@ +mode: 'train' +architecture: "ShuffleNetV2_x0_5" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 240 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 0.5 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00003 + +TRAIN: + batch_size: 1024 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + scale: [0.64, 1.0] + ratio: [0.8, 1.2] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/ShuffleNet/ShuffleNetV2_x1_5.yaml b/configs/ShuffleNet/ShuffleNetV2_x1_5.yaml new file mode 100644 index 000000000..c1fc3d18c --- /dev/null +++ b/configs/ShuffleNet/ShuffleNetV2_x1_5.yaml @@ -0,0 +1,75 @@ +mode: 'train' +architecture: "ShuffleNetV2_x1_5" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 240 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 0.25 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00004 + +TRAIN: + batch_size: 512 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + ratio: [1.0, 1.0] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/ShuffleNet/ShuffleNetV2_x2_0.yaml b/configs/ShuffleNet/ShuffleNetV2_x2_0.yaml new file mode 100644 index 000000000..b45b70b64 --- /dev/null +++ b/configs/ShuffleNet/ShuffleNetV2_x2_0.yaml @@ -0,0 +1,74 @@ +mode: 'train' +architecture: "ShuffleNetV2_x2_0" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 240 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'CosineWarmup' + params: + lr: 0.25 + warmup_epoch: 5 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00004 + +TRAIN: + batch_size: 512 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/SqueezeNet/SqueezeNet1_0.yaml b/configs/SqueezeNet/SqueezeNet1_0.yaml new file mode 100644 index 000000000..163bb33aa --- /dev/null +++ b/configs/SqueezeNet/SqueezeNet1_0.yaml @@ -0,0 +1,71 @@ +mode: 'train' +architecture: "SqueezeNet1_0" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.02 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.0001 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/SqueezeNet/SqueezeNet1_1.yaml b/configs/SqueezeNet/SqueezeNet1_1.yaml new file mode 100644 index 000000000..4b716bc59 --- /dev/null +++ b/configs/SqueezeNet/SqueezeNet1_1.yaml @@ -0,0 +1,69 @@ +mode: 'train' +architecture: "SqueezeNet1_1" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 120 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.02 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.0001 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/VGG/VGG11.yaml b/configs/VGG/VGG11.yaml new file mode 100644 index 000000000..d1cd5fab1 --- /dev/null +++ b/configs/VGG/VGG11.yaml @@ -0,0 +1,69 @@ +mode: 'train' +architecture: "VGG11" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 90 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.1 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.0004 + +TRAIN: + batch_size: 512 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/configs/VGG/VGG13.yaml b/configs/VGG/VGG13.yaml new file mode 100644 index 000000000..732695a15 --- /dev/null +++ b/configs/VGG/VGG13.yaml @@ -0,0 +1,73 @@ +mode: 'train' +architecture: "VGG13" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 90 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.01 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.0003 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/VGG/VGG16.yaml b/configs/VGG/VGG16.yaml new file mode 100644 index 000000000..78f46b7f9 --- /dev/null +++ b/configs/VGG/VGG16.yaml @@ -0,0 +1,73 @@ +mode: 'train' +architecture: "VGG16" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 90 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.01 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.0004 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + + +VALID: + batch_size: 64 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/val_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/VGG/VGG19.yaml b/configs/VGG/VGG19.yaml new file mode 100644 index 000000000..94aea056d --- /dev/null +++ b/configs/VGG/VGG19.yaml @@ -0,0 +1,49 @@ +mode: 'train' +architecture: "VGG19" +pretrained_model: "" +model_save_dir: "./checkpoints/" +classes_num: 1000 +total_images: 1281167 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 150 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.01 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.0004 + +TRAIN: + batch_size: 256 + num_workers: 4 + file_list: "./dataset/ILSVRC2012/train_list.txt" + data_dir: "./dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + + diff --git a/configs/eval.yaml b/configs/eval.yaml new file mode 100644 index 000000000..8edec558b --- /dev/null +++ b/configs/eval.yaml @@ -0,0 +1,31 @@ +mode: 'valid' +architecture: "" +pretrained_model: "" +classes_num: 1000 +total_images: 1281167 +topk: 5 +image_shape: [3, 224, 224] + + +VALID: + batch_size: 16 + num_workers: 4 + file_list: "../dataset/ILSVRC2012/val_list.txt" + data_dir: "../dataset/ILSVRC2012/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + to_np: False + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + diff --git a/ppcls/__init__.py b/ppcls/__init__.py new file mode 100644 index 000000000..3cee44185 --- /dev/null +++ b/ppcls/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import optimizer + +from .modeling import * +from .optimizer import * +from .data import * +from .utils import * diff --git a/ppcls/data/__init__.py b/ppcls/data/__init__.py new file mode 100644 index 000000000..72779cb55 --- /dev/null +++ b/ppcls/data/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .reader import Reader diff --git a/ppcls/data/imaug/__init__.py b/ppcls/data/imaug/__init__.py new file mode 100644 index 000000000..55be7a373 --- /dev/null +++ b/ppcls/data/imaug/__init__.py @@ -0,0 +1,94 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .autoaugment import ImageNetPolicy as RawImageNetPolicy +from .randaugment import RandAugment as RawRandAugment +from .cutout import Cutout + +from .hide_and_seek import HideAndSeek +from .random_erasing import RandomErasing +from .grid import GridMask + +from .operators import DecodeImage +from .operators import ResizeImage +from .operators import CropImage +from .operators import RandCropImage +from .operators import RandFlipImage +from .operators import NormalizeImage +from .operators import ToCHWImage + +from .batch_operators import MixupOperator +from .batch_operators import CutmixOperator +from .batch_operators import FmixOperator + +import six +import numpy as np +from PIL import Image + + +def transform(data, ops=[]): + """ transform """ + for op in ops: + data = op(data) + return data + + +class ImageNetPolicy(RawImageNetPolicy): + """ ImageNetPolicy wrapper to auto fit different img types """ + + def __init__(self, *args, **kwargs): + if six.PY2: + super(ImageNetPolicy, self).__init__(*args, **kwargs) + else: + super().__init__(*args, **kwargs) + + def __call__(self, img): + if not isinstance(img, Image.Image): + img = np.ascontiguousarray(img) + img = Image.fromarray(img) + + if six.PY2: + img = super(ImageNetPolicy, self).__call__(img) + else: + img = super().__call__(img) + + if isinstance(img, Image.Image): + img = np.asarray(img) + + return img + + +class RandAugment(RawRandAugment): + """ RandAugment wrapper to auto fit different img types """ + + def __init__(self, *args, **kwargs): + if six.PY2: + super(RandAugment, self).__init__(*args, **kwargs) + else: + super().__init__(*args, **kwargs) + + def __call__(self, img): + if not isinstance(img, Image.Image): + img = np.ascontiguousarray(img) + img = Image.fromarray(img) + + if six.PY2: + img = super(RandAugment, self).__call__(img) + else: + img = super().__call__(img) + + if isinstance(img, Image.Image): + img = np.asarray(img) + + return img diff --git a/ppcls/data/imaug/autoaugment.py b/ppcls/data/imaug/autoaugment.py new file mode 100644 index 000000000..e241855ce --- /dev/null +++ b/ppcls/data/imaug/autoaugment.py @@ -0,0 +1,264 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#This code is based on https://github.com/DeepVoltaire/AutoAugment/blob/master/autoaugment.py + +from PIL import Image, ImageEnhance, ImageOps +import numpy as np +import random + + +class ImageNetPolicy(object): + """ Randomly choose one of the best 24 Sub-policies on ImageNet. + + Example: + >>> policy = ImageNetPolicy() + >>> transformed = policy(image) + + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> ImageNetPolicy(), + >>> transforms.ToTensor()]) + """ + + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.4, "posterize", 8, 0.6, "rotate", 9, fillcolor), + SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor), + SubPolicy(0.6, "posterize", 7, 0.6, "posterize", 6, fillcolor), + SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor), + SubPolicy(0.4, "equalize", 4, 0.8, "rotate", 8, fillcolor), + SubPolicy(0.6, "solarize", 3, 0.6, "equalize", 7, fillcolor), + SubPolicy(0.8, "posterize", 5, 1.0, "equalize", 2, fillcolor), + SubPolicy(0.2, "rotate", 3, 0.6, "solarize", 8, fillcolor), + SubPolicy(0.6, "equalize", 8, 0.4, "posterize", 6, fillcolor), + SubPolicy(0.8, "rotate", 8, 0.4, "color", 0, fillcolor), + SubPolicy(0.4, "rotate", 9, 0.6, "equalize", 2, fillcolor), + SubPolicy(0.0, "equalize", 7, 0.8, "equalize", 8, fillcolor), + SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor), + SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor), + SubPolicy(0.8, "rotate", 8, 1.0, "color", 2, fillcolor), + SubPolicy(0.8, "color", 8, 0.8, "solarize", 7, fillcolor), + SubPolicy(0.4, "sharpness", 7, 0.6, "invert", 8, fillcolor), + SubPolicy(0.6, "shearX", 5, 1.0, "equalize", 9, fillcolor), + SubPolicy(0.4, "color", 0, 0.6, "equalize", 3, fillcolor), + SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor), + SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor), + SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor), + SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor) + ] + + def __call__(self, img, policy_idx=None): + if policy_idx is None or not isinstance(policy_idx, int): + policy_idx = random.randint(0, len(self.policies) - 1) + else: + policy_idx = policy_idx % len(self.policies) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment ImageNet Policy" + + +class CIFAR10Policy(object): + """ Randomly choose one of the best 25 Sub-policies on CIFAR10. + + Example: + >>> policy = CIFAR10Policy() + >>> transformed = policy(image) + + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> CIFAR10Policy(), + >>> transforms.ToTensor()]) + """ + + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.1, "invert", 7, 0.2, "contrast", 6, fillcolor), + SubPolicy(0.7, "rotate", 2, 0.3, "translateX", 9, fillcolor), + SubPolicy(0.8, "sharpness", 1, 0.9, "sharpness", 3, fillcolor), + SubPolicy(0.5, "shearY", 8, 0.7, "translateY", 9, fillcolor), + SubPolicy(0.5, "autocontrast", 8, 0.9, "equalize", 2, fillcolor), + SubPolicy(0.2, "shearY", 7, 0.3, "posterize", 7, fillcolor), + SubPolicy(0.4, "color", 3, 0.6, "brightness", 7, fillcolor), + SubPolicy(0.3, "sharpness", 9, 0.7, "brightness", 9, fillcolor), + SubPolicy(0.6, "equalize", 5, 0.5, "equalize", 1, fillcolor), + SubPolicy(0.6, "contrast", 7, 0.6, "sharpness", 5, fillcolor), + SubPolicy(0.7, "color", 7, 0.5, "translateX", 8, fillcolor), + SubPolicy(0.3, "equalize", 7, 0.4, "autocontrast", 8, fillcolor), + SubPolicy(0.4, "translateY", 3, 0.2, "sharpness", 6, fillcolor), + SubPolicy(0.9, "brightness", 6, 0.2, "color", 8, fillcolor), + SubPolicy(0.5, "solarize", 2, 0.0, "invert", 3, fillcolor), + SubPolicy(0.2, "equalize", 0, 0.6, "autocontrast", 0, fillcolor), + SubPolicy(0.2, "equalize", 8, 0.8, "equalize", 4, fillcolor), + SubPolicy(0.9, "color", 9, 0.6, "equalize", 6, fillcolor), + SubPolicy(0.8, "autocontrast", 4, 0.2, "solarize", 8, fillcolor), + SubPolicy(0.1, "brightness", 3, 0.7, "color", 0, fillcolor), + SubPolicy(0.4, "solarize", 5, 0.9, "autocontrast", 3, fillcolor), + SubPolicy(0.9, "translateY", 9, 0.7, "translateY", 9, fillcolor), + SubPolicy(0.9, "autocontrast", 2, 0.8, "solarize", 3, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.1, "invert", 3, fillcolor), + SubPolicy(0.7, "translateY", 9, 0.9, "autocontrast", 1, fillcolor) + ] + + def __call__(self, img, policy_idx=None): + if policy_idx is None or not isinstance(policy_idx, int): + policy_idx = random.randint(0, len(self.policies) - 1) + else: + policy_idx = policy_idx % len(self.policies) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment CIFAR10 Policy" + + +class SVHNPolicy(object): + """ Randomly choose one of the best 25 Sub-policies on SVHN. + + Example: + >>> policy = SVHNPolicy() + >>> transformed = policy(image) + + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> SVHNPolicy(), + >>> transforms.ToTensor()]) + """ + + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.9, "shearX", 4, 0.2, "invert", 3, fillcolor), + SubPolicy(0.9, "shearY", 8, 0.7, "invert", 5, fillcolor), + SubPolicy(0.6, "equalize", 5, 0.6, "solarize", 6, fillcolor), + SubPolicy(0.9, "invert", 3, 0.6, "equalize", 3, fillcolor), + SubPolicy(0.6, "equalize", 1, 0.9, "rotate", 3, fillcolor), + SubPolicy(0.9, "shearX", 4, 0.8, "autocontrast", 3, fillcolor), + SubPolicy(0.9, "shearY", 8, 0.4, "invert", 5, fillcolor), + SubPolicy(0.9, "shearY", 5, 0.2, "solarize", 6, fillcolor), + SubPolicy(0.9, "invert", 6, 0.8, "autocontrast", 1, fillcolor), + SubPolicy(0.6, "equalize", 3, 0.9, "rotate", 3, fillcolor), + SubPolicy(0.9, "shearX", 4, 0.3, "solarize", 3, fillcolor), + SubPolicy(0.8, "shearY", 8, 0.7, "invert", 4, fillcolor), + SubPolicy(0.9, "equalize", 5, 0.6, "translateY", 6, fillcolor), + SubPolicy(0.9, "invert", 4, 0.6, "equalize", 7, fillcolor), + SubPolicy(0.3, "contrast", 3, 0.8, "rotate", 4, fillcolor), + SubPolicy(0.8, "invert", 5, 0.0, "translateY", 2, fillcolor), + SubPolicy(0.7, "shearY", 6, 0.4, "solarize", 8, fillcolor), + SubPolicy(0.6, "invert", 4, 0.8, "rotate", 4, fillcolor), + SubPolicy( + 0.3, "shearY", 7, 0.9, "translateX", 3, fillcolor), SubPolicy( + 0.1, "shearX", 6, 0.6, "invert", 5, fillcolor), SubPolicy( + 0.7, "solarize", 2, 0.6, "translateY", 7, + fillcolor), SubPolicy(0.8, "shearY", 4, 0.8, "invert", + 8, fillcolor), SubPolicy( + 0.7, "shearX", 9, 0.8, + "translateY", 3, + fillcolor), SubPolicy( + 0.8, "shearY", 5, 0.7, + "autocontrast", 3, + fillcolor), + SubPolicy(0.7, "shearX", 2, 0.1, "invert", 5, fillcolor) + ] + + def __call__(self, img, policy_idx=None): + if policy_idx is None or not isinstance(policy_idx, int): + policy_idx = random.randint(0, len(self.policies) - 1) + else: + policy_idx = policy_idx % len(self.policies) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment SVHN Policy" + + +class SubPolicy(object): + def __init__(self, + p1, + operation1, + magnitude_idx1, + p2, + operation2, + magnitude_idx2, + fillcolor=(128, 128, 128)): + ranges = { + "shearX": np.linspace(0, 0.3, 10), + "shearY": np.linspace(0, 0.3, 10), + "translateX": np.linspace(0, 150 / 331, 10), + "translateY": np.linspace(0, 150 / 331, 10), + "rotate": np.linspace(0, 30, 10), + "color": np.linspace(0.0, 0.9, 10), + "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int), + "solarize": np.linspace(256, 0, 10), + "contrast": np.linspace(0.0, 0.9, 10), + "sharpness": np.linspace(0.0, 0.9, 10), + "brightness": np.linspace(0.0, 0.9, 10), + "autocontrast": [0] * 10, + "equalize": [0] * 10, + "invert": [0] * 10 + } + + # from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand + def rotate_with_fill(img, magnitude): + rot = img.convert("RGBA").rotate(magnitude) + return Image.composite(rot, + Image.new("RGBA", rot.size, (128, ) * 4), + rot).convert(img.mode) + + func = { + "shearX": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0), + Image.BICUBIC, fillcolor=fillcolor), + "shearY": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0), + Image.BICUBIC, fillcolor=fillcolor), + "translateX": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0), + fillcolor=fillcolor), + "translateY": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])), + fillcolor=fillcolor), + "rotate": lambda img, magnitude: rotate_with_fill(img, magnitude), + # "rotate": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])), + "color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])), + "posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude), + "solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude), + "contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "autocontrast": lambda img, magnitude: ImageOps.autocontrast(img), + "equalize": lambda img, magnitude: ImageOps.equalize(img), + "invert": lambda img, magnitude: ImageOps.invert(img) + } + + self.p1 = p1 + self.operation1 = func[operation1] + self.magnitude1 = ranges[operation1][magnitude_idx1] + self.p2 = p2 + self.operation2 = func[operation2] + self.magnitude2 = ranges[operation2][magnitude_idx2] + + def __call__(self, img): + if random.random() < self.p1: + img = self.operation1(img, self.magnitude1) + if random.random() < self.p2: + img = self.operation2(img, self.magnitude2) + return img diff --git a/ppcls/data/imaug/batch_operators.py b/ppcls/data/imaug/batch_operators.py new file mode 100644 index 000000000..aa18aedfa --- /dev/null +++ b/ppcls/data/imaug/batch_operators.py @@ -0,0 +1,115 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import numpy as np + +from .fmix import sample_mask + + +class BatchOperator(object): + """ BatchOperator """ + + def __init__(self, *args, **kwargs): + pass + + def _unpack(self, batch): + """ _unpack """ + assert isinstance(batch, list), \ + 'batch should be a list filled with tuples (img, label)' + bs = len(batch) + assert bs > 0, 'size of the batch data should > 0' + imgs, labels = list(zip(*batch)) + return np.array(imgs), np.array(labels), bs + + def __call__(self, batch): + return batch + + +class MixupOperator(BatchOperator): + """ Mixup operator """ + + def __init__(self, alpha=0.2): + assert alpha > 0., \ + 'parameter alpha[%f] should > 0.0' % (alpha) + self._alpha = alpha + + def __call__(self, batch): + imgs, labels, bs = self._unpack(batch) + idx = np.random.permutation(bs) + lam = np.random.beta(self._alpha, self._alpha) + imgs = lam * imgs + (1 - lam) * imgs[idx] + return list(zip(imgs, labels, labels[idx], [lam] * bs)) + + +class CutmixOperator(BatchOperator): + """ Cutmix operator """ + + def __init__(self, alpha=0.2): + assert alpha > 0., \ + 'parameter alpha[%f] should > 0.0' % (alpha) + self._alpha = alpha + + def _rand_bbox(self, size, lam): + """ _rand_bbox """ + w = size[2] + h = size[3] + cut_rat = np.sqrt(1. - lam) + cut_w = np.int(w * cut_rat) + cut_h = np.int(h * cut_rat) + + # uniform + cx = np.random.randint(w) + cy = np.random.randint(h) + + bbx1 = np.clip(cx - cut_w // 2, 0, w) + bby1 = np.clip(cy - cut_h // 2, 0, h) + bbx2 = np.clip(cx + cut_w // 2, 0, w) + bby2 = np.clip(cy + cut_h // 2, 0, h) + + return bbx1, bby1, bbx2, bby2 + + def __call__(self, batch): + imgs, labels, bs = self._unpack(batch) + idx = np.random.permutation(bs) + lam = np.random.beta(self._alpha, self._alpha) + + bbx1, bby1, bbx2, bby2 = self._rand_bbox(imgs.shape, lam) + imgs[:, :, bbx1:bbx2, bby1:bby2] = imgs[idx, :, bbx1:bbx2, bby1:bby2] + lam = 1 - (float(bbx2 - bbx1) * (bby2 - bby1) / + (imgs.shape[-2] * imgs.shape[-1])) + return list(zip(imgs, labels, labels[idx], [lam] * bs)) + + +class FmixOperator(BatchOperator): + """ Fmix operator """ + + def __init__(self, alpha=1, decay_power=3, max_soft=0., reformulate=False): + self._alpha = alpha + self._decay_power = decay_power + self._max_soft = max_soft + self._reformulate = reformulate + + def __call__(self, batch): + imgs, labels, bs = self._unpack(batch) + idx = np.random.permutation(bs) + size = (imgs.shape[2], imgs.shape[3]) + lam, mask = sample_mask(self._alpha, self._decay_power, \ + size, self._max_soft, self._reformulate) + imgs = mask * imgs + (1 - mask) * imgs[idx] + return list(zip(imgs, labels, labels[idx], [lam] * bs)) diff --git a/ppcls/data/imaug/cutout.py b/ppcls/data/imaug/cutout.py new file mode 100644 index 000000000..1d80a36f2 --- /dev/null +++ b/ppcls/data/imaug/cutout.py @@ -0,0 +1,39 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import random + + +class Cutout(object): + def __init__(self, n_holes=1, length=112): + self.n_holes = n_holes + self.length = length + + def __call__(self, img): + """ cutout_image """ + h, w = img.shape[:2] + mask = np.ones((h, w), np.float32) + + for n in range(self.n_holes): + y = np.random.randint(h) + x = np.random.randint(w) + + y1 = np.clip(y - self.length // 2, 0, h) + y2 = np.clip(y + self.length // 2, 0, h) + x1 = np.clip(x - self.length // 2, 0, w) + x2 = np.clip(x + self.length // 2, 0, w) + + img[y1:y2, x1:x2] = 0 + return img diff --git a/ppcls/data/imaug/fmix.py b/ppcls/data/imaug/fmix.py new file mode 100644 index 000000000..fb9382115 --- /dev/null +++ b/ppcls/data/imaug/fmix.py @@ -0,0 +1,217 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import random + +import numpy as np +from scipy.stats import beta + + +def fftfreqnd(h, w=None, z=None): + """ Get bin values for discrete fourier transform of size (h, w, z) + + :param h: Required, first dimension size + :param w: Optional, second dimension size + :param z: Optional, third dimension size + """ + fz = fx = 0 + fy = np.fft.fftfreq(h) + + if w is not None: + fy = np.expand_dims(fy, -1) + + if w % 2 == 1: + fx = np.fft.fftfreq(w)[:w // 2 + 2] + else: + fx = np.fft.fftfreq(w)[:w // 2 + 1] + + if z is not None: + fy = np.expand_dims(fy, -1) + if z % 2 == 1: + fz = np.fft.fftfreq(z)[:, None] + else: + fz = np.fft.fftfreq(z)[:, None] + + return np.sqrt(fx * fx + fy * fy + fz * fz) + + +def get_spectrum(freqs, decay_power, ch, h, w=0, z=0): + """ Samples a fourier image with given size and frequencies decayed by decay power + + :param freqs: Bin values for the discrete fourier transform + :param decay_power: Decay power for frequency decay prop 1/f**d + :param ch: Number of channels for the resulting mask + :param h: Required, first dimension size + :param w: Optional, second dimension size + :param z: Optional, third dimension size + """ + scale = np.ones(1) / (np.maximum(freqs, np.array([1. / max(w, h, z)])) + **decay_power) + + param_size = [ch] + list(freqs.shape) + [2] + param = np.random.randn(*param_size) + + scale = np.expand_dims(scale, -1)[None, :] + + return scale * param + + +def make_low_freq_image(decay, shape, ch=1): + """ Sample a low frequency image from fourier space + + :param decay_power: Decay power for frequency decay prop 1/f**d + :param shape: Shape of desired mask, list up to 3 dims + :param ch: Number of channels for desired mask + """ + freqs = fftfreqnd(*shape) + spectrum = get_spectrum(freqs, decay, ch, + *shape) #.reshape((1, *shape[:-1], -1)) + spectrum = spectrum[:, 0] + 1j * spectrum[:, 1] + mask = np.real(np.fft.irfftn(spectrum, shape)) + + if len(shape) == 1: + mask = mask[:1, :shape[0]] + if len(shape) == 2: + mask = mask[:1, :shape[0], :shape[1]] + if len(shape) == 3: + mask = mask[:1, :shape[0], :shape[1], :shape[2]] + + mask = mask + mask = (mask - mask.min()) + mask = mask / mask.max() + return mask + + +def sample_lam(alpha, reformulate=False): + """ Sample a lambda from symmetric beta distribution with given alpha + + :param alpha: Alpha value for beta distribution + :param reformulate: If True, uses the reformulation of [1]. + """ + if reformulate: + lam = beta.rvs(alpha + 1, alpha) + else: + lam = beta.rvs(alpha, alpha) + + return lam + + +def binarise_mask(mask, lam, in_shape, max_soft=0.0): + """ Binarises a given low frequency image such that it has mean lambda. + + :param mask: Low frequency image, usually the result of `make_low_freq_image` + :param lam: Mean value of final mask + :param in_shape: Shape of inputs + :param max_soft: Softening value between 0 and 0.5 which smooths hard edges in the mask. + :return: + """ + idx = mask.reshape(-1).argsort()[::-1] + mask = mask.reshape(-1) + num = math.ceil(lam * mask.size) if random.random() > 0.5 else math.floor( + lam * mask.size) + + eff_soft = max_soft + if max_soft > lam or max_soft > (1 - lam): + eff_soft = min(lam, 1 - lam) + + soft = int(mask.size * eff_soft) + num_low = int(num - soft) + num_high = int(num + soft) + + mask[idx[:num_high]] = 1 + mask[idx[num_low:]] = 0 + mask[idx[num_low:num_high]] = np.linspace(1, 0, (num_high - num_low)) + + mask = mask.reshape((1, 1, in_shape[0], in_shape[1])) + return mask + + +def sample_mask(alpha, decay_power, shape, max_soft=0.0, reformulate=False): + """ Samples a mean lambda from beta distribution parametrised by alpha, creates a low frequency image and binarises + it based on this lambda + + :param alpha: Alpha value for beta distribution from which to sample mean of mask + :param decay_power: Decay power for frequency decay prop 1/f**d + :param shape: Shape of desired mask, list up to 3 dims + :param max_soft: Softening value between 0 and 0.5 which smooths hard edges in the mask. + :param reformulate: If True, uses the reformulation of [1]. + """ + if isinstance(shape, int): + shape = (shape, ) + + # Choose lambda + lam = sample_lam(alpha, reformulate) + + # Make mask, get mean / std + mask = make_low_freq_image(decay_power, shape) + mask = binarise_mask(mask, lam, shape, max_soft) + + return float(lam), mask + + +def sample_and_apply(x, + alpha, + decay_power, + shape, + max_soft=0.0, + reformulate=False): + """ + + :param x: Image batch on which to apply fmix of shape [b, c, shape*] + :param alpha: Alpha value for beta distribution from which to sample mean of mask + :param decay_power: Decay power for frequency decay prop 1/f**d + :param shape: Shape of desired mask, list up to 3 dims + :param max_soft: Softening value between 0 and 0.5 which smooths hard edges in the mask. + :param reformulate: If True, uses the reformulation of [1]. + :return: mixed input, permutation indices, lambda value of mix, + """ + lam, mask = sample_mask(alpha, decay_power, shape, max_soft, reformulate) + index = np.random.permutation(x.shape[0]) + + x1, x2 = x * mask, x[index] * (1 - mask) + return x1 + x2, index, lam + + +class FMixBase: + """ FMix augmentation + + Args: + decay_power (float): Decay power for frequency decay prop 1/f**d + alpha (float): Alpha value for beta distribution from which to sample mean of mask + size ([int] | [int, int] | [int, int, int]): Shape of desired mask, list up to 3 dims + max_soft (float): Softening value between 0 and 0.5 which smooths hard edges in the mask. + reformulate (bool): If True, uses the reformulation of [1]. + """ + + def __init__(self, + decay_power=3, + alpha=1, + size=(32, 32), + max_soft=0.0, + reformulate=False): + super().__init__() + self.decay_power = decay_power + self.reformulate = reformulate + self.size = size + self.alpha = alpha + self.max_soft = max_soft + self.index = None + self.lam = None + + def __call__(self, x): + raise NotImplementedError + + def loss(self, *args, **kwargs): + raise NotImplementedError diff --git a/ppcls/data/imaug/grid.py b/ppcls/data/imaug/grid.py new file mode 100644 index 000000000..2f3a238a5 --- /dev/null +++ b/ppcls/data/imaug/grid.py @@ -0,0 +1,87 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from PIL import Image +import pdb + +# curr +CURR_EPOCH = 0 +# epoch for the prob to be the upper limit +NUM_EPOCHS = 240 + + +class GridMask(object): + def __init__(self, d1, d2, rotate=1, ratio=0.5, mode=0, prob=1.): + self.d1 = d1 + self.d2 = d2 + self.rotate = rotate + self.ratio = ratio + self.mode = mode + self.st_prob = prob + self.prob = prob + self.last_prob = -1 + + def set_prob(self): + global CURR_EPOCH + global NUM_EPOCHS + self.prob = self.st_prob * min(1, 1.0 * CURR_EPOCH / NUM_EPOCHS) + + def __call__(self, img): + self.set_prob() + if abs(self.last_prob - self.prob) > 1e-10: + global CURR_EPOCH + global NUM_EPOCHS + print( + "self.prob is updated, self.prob={}, CURR_EPOCH: {}, NUM_EPOCHS: {}". + format(self.prob, CURR_EPOCH, NUM_EPOCHS)) + self.last_prob = self.prob + # print("CURR_EPOCH: {}, NUM_EPOCHS: {}, self.prob is set as: {}".format(CURR_EPOCH, NUM_EPOCHS, self.prob) ) + if np.random.rand() > self.prob: + return img + _, h, w = img.shape + hh = int(1.5 * h) + ww = int(1.5 * w) + d = np.random.randint(self.d1, self.d2) + #d = self.d + self.l = int(d * self.ratio + 0.5) + mask = np.ones((hh, ww), np.float32) + st_h = np.random.randint(d) + st_w = np.random.randint(d) + for i in range(-1, hh // d + 1): + s = d * i + st_h + t = s + self.l + s = max(min(s, hh), 0) + t = max(min(t, hh), 0) + mask[s:t, :] *= 0 + for i in range(-1, ww // d + 1): + s = d * i + st_w + t = s + self.l + s = max(min(s, ww), 0) + t = max(min(t, ww), 0) + mask[:, s:t] *= 0 + r = np.random.randint(self.rotate) + mask = Image.fromarray(np.uint8(mask)) + mask = mask.rotate(r) + mask = np.asarray(mask) + mask = mask[(hh - h) // 2:(hh - h) // 2 + h, (ww - w) // 2:(ww - w) // + 2 + w] + + if self.mode == 1: + mask = 1 - mask + + mask = np.expand_dims(mask, axis=0) + img = (img * mask).astype(img.dtype) + + return img diff --git a/ppcls/data/imaug/hide_and_seek.py b/ppcls/data/imaug/hide_and_seek.py new file mode 100644 index 000000000..8bb394494 --- /dev/null +++ b/ppcls/data/imaug/hide_and_seek.py @@ -0,0 +1,42 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import random + + +class HideAndSeek(object): + def __init__(self): + # possible grid size, 0 means no hiding + self.grid_sizes = [0, 16, 32, 44, 56] + # hiding probability + self.hide_prob = 0.5 + + def __call__(self, img): + # randomly choose one grid size + grid_size = np.random.choice(self.grid_sizes) + + _, h, w = img.shape + + # hide the patches + if grid_size == 0: + return img + for x in range(0, w, grid_size): + for y in range(0, h, grid_size): + x_end = min(w, x + grid_size) + y_end = min(h, y + grid_size) + if (random.random() <= self.hide_prob): + img[:, x:x_end, y:y_end] = 0 + + return img diff --git a/ppcls/data/imaug/operators.py b/ppcls/data/imaug/operators.py new file mode 100644 index 000000000..98e89ca83 --- /dev/null +++ b/ppcls/data/imaug/operators.py @@ -0,0 +1,210 @@ +""" +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import six +import math +import random +import functools +import cv2 +import numpy as np + + +class OperatorParamError(ValueError): + """ OperatorParamError + """ + pass + + +class DecodeImage(object): + """ decode image """ + + def __init__(self, to_rgb=True, to_np=False, channel_first=False): + self.to_rgb = to_rgb + self.to_np = to_np #to numpy + self.channel_first = channel_first #only enabled when to_np is True + + def __call__(self, img): + if six.PY2: + assert type(img) is str and len( + img) > 0, "invalid input 'img' in DecodeImage" + else: + assert type(img) is bytes and len( + img) > 0, "invalid input 'img' in DecodeImage" + data = np.frombuffer(img, dtype='uint8') + img = cv2.imdecode(data, 1) + if self.to_rgb: + assert img.shape[2] == 3, 'invalid shape of image[%s]' % ( + img.shape) + img = img[:, :, ::-1] + + if self.channel_first: + img = img.transpose((2, 0, 1)) + + return img + + +class ResizeImage(object): + """ resize image """ + + def __init__(self, size=None, resize_short=None): + if resize_short is not None and resize_short > 0: + self.resize_short = resize_short + self.w = None + self.h = None + elif size is not None: + self.resize_short = None + self.w = size if type(size) is int else size[0] + self.h = size if type(size) is int else size[1] + else: + raise OperatorParamError("invalid params for ReisizeImage for '\ + 'both 'size' and 'resize_short' are None") + + def __call__(self, img): + img_h, img_w = img.shape[:2] + if self.resize_short is not None: + percent = float(self.resize_short) / min(img_w, img_h) + w = int(round(img_w * percent)) + h = int(round(img_h * percent)) + else: + w = self.w + h = self.h + + return cv2.resize(img, (w, h)) + + +class CropImage(object): + """ crop image """ + + def __init__(self, size): + if type(size) is int: + self.size = (size, size) + else: + self.size = size # (h, w) + + def __call__(self, img): + w, h = self.size + img_h, img_w = img.shape[:2] + w_start = (img_w - w) // 2 + h_start = (img_h - h) // 2 + + w_end = w_start + w + h_end = h_start + h + return img[h_start:h_end, w_start:w_end, :] + + +class RandCropImage(object): + """ random crop image """ + + def __init__(self, size, scale=None, ratio=None): + if type(size) is int: + self.size = (size, size) # (h, w) + else: + self.size = size + + self.scale = [0.08, 1.0] if scale is None else scale + self.ratio = [3. / 4., 4. / 3.] if ratio is None else ratio + + def __call__(self, img): + size = self.size + scale = self.scale + ratio = self.ratio + + aspect_ratio = math.sqrt(random.uniform(*ratio)) + w = 1. * aspect_ratio + h = 1. / aspect_ratio + + img_h, img_w = img.shape[:2] + + bound = min((float(img_w) / img_h) / (w**2), + (float(img_h) / img_w) / (h**2)) + scale_max = min(scale[1], bound) + scale_min = min(scale[0], bound) + + target_area = img_w * img_h * random.uniform(\ + scale_min, scale_max) + target_size = math.sqrt(target_area) + w = int(target_size * w) + h = int(target_size * h) + + i = random.randint(0, img_w - w) + j = random.randint(0, img_h - h) + + img = img[j:j + h, i:i + w, :] + return cv2.resize(img, size) + + +class RandFlipImage(object): + """ random flip image + flip_code: + 1: Flipped Horizontally + 0: Flipped Vertically + -1: Flipped Horizontally & Vertically + """ + + def __init__(self, flip_code=1): + assert flip_code in [-1, 0, 1 + ], "flip_code should be a value in [-1, 0, 1]" + self.flip_code = flip_code + + def __call__(self, img): + if random.randint(0, 1) == 1: + return cv2.flip(img, self.flip_code) + else: + return img + + +class NormalizeImage(object): + """ normalize image such as substract mean, divide std + """ + + def __init__(self, scale=None, mean=None, std=None, order='chw'): + if isinstance(scale, str): scale = eval(scale) + self.scale = np.float32(scale if scale is not None else 1.0 / 255.0) + mean = mean if mean is not None else [0.485, 0.456, 0.406] + std = std if std is not None else [0.229, 0.224, 0.225] + + shape = (3, 1, 1) if order == 'chw' else (1, 1, 3) + self.mean = np.array(mean).reshape(shape).astype('float32') + self.std = np.array(std).reshape(shape).astype('float32') + + def __call__(self, img): + from PIL import Image + if isinstance(img, Image.Image): + img = np.array(img) + + assert isinstance(img, + np.ndarray), "invalid input 'img' in NormalizeImage" + return (img.astype('float32') * self.scale - self.mean) / self.std + + +class ToCHWImage(object): + """ convert hwc image to chw image + """ + + def __init__(self): + pass + + def __call__(self, img): + from PIL import Image + if isinstance(img, Image.Image): + img = np.array(img) + + return img.transpose((2, 0, 1)) diff --git a/ppcls/data/imaug/randaugment.py b/ppcls/data/imaug/randaugment.py new file mode 100644 index 000000000..03805c608 --- /dev/null +++ b/ppcls/data/imaug/randaugment.py @@ -0,0 +1,87 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#This code is based on https://github.com/ + +from PIL import Image, ImageEnhance, ImageOps +import numpy as np +import random + + +class RandAugment(object): + def __init__(self, num_layers, magnitude, fillcolor=(128, 128, 128)): + self.num_layers = num_layers + self.magnitude = magnitude + self.max_level = 10 + + abso_level = self.magnitude / self.max_level + self.level_map = { + "shearX": 0.3 * abso_level, + "shearY": 0.3 * abso_level, + "translateX": 150.0 / 331 * abso_level, + "translateY": 150.0 / 331 * abso_level, + "rotate": 30 * abso_level, + "color": 0.9 * abso_level, + "posterize": int(4.0 * abso_level), + "solarize": 256.0 * abso_level, + "contrast": 0.9 * abso_level, + "sharpness": 0.9 * abso_level, + "brightness": 0.9 * abso_level, + "autocontrast": 0, + "equalize": 0, + "invert": 0 + } + + # from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand + def rotate_with_fill(img, magnitude): + rot = img.convert("RGBA").rotate(magnitude) + return Image.composite(rot, + Image.new("RGBA", rot.size, (128, ) * 4), + rot).convert(img.mode) + + self.func = { + "shearX": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0), + Image.BICUBIC, fillcolor=fillcolor), + "shearY": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0), + Image.BICUBIC, fillcolor=fillcolor), + "translateX": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0), + fillcolor=fillcolor), + "translateY": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])), + fillcolor=fillcolor), + "rotate": lambda img, magnitude: rotate_with_fill(img, magnitude), + # "rotate": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])), + "color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])), + "posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude), + "solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude), + "contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "autocontrast": lambda img, magnitude: ImageOps.autocontrast(img), + "equalize": lambda img, magnitude: ImageOps.equalize(img), + "invert": lambda img, magnitude: ImageOps.invert(img) + } + + def __call__(self, img): + avaiable_op_names = self.level_map.keys() + for layer_num in range(self.num_layers): + op_name = np.random.choice(avaiable_op_names) + img = self.func[op_name](img, self.level_map[op_name]) + return img diff --git a/ppcls/data/imaug/random_erasing.py b/ppcls/data/imaug/random_erasing.py new file mode 100644 index 000000000..76527535b --- /dev/null +++ b/ppcls/data/imaug/random_erasing.py @@ -0,0 +1,53 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import random + +import numpy as np + + +class RandomErasing(object): + def __init__(self, EPSILON=0.5, sl=0.02, sh=0.4, r1=0.3, + mean=[0., 0., 0.]): + self.EPSILON = EPSILON + self.mean = mean + self.sl = sl + self.sh = sh + self.r1 = r1 + + def __call__(self, img): + if random.uniform(0, 1) > self.EPSILON: + return img + + for attempt in range(100): + area = img.shape[1] * img.shape[2] + + target_area = random.uniform(self.sl, self.sh) * area + aspect_ratio = random.uniform(self.r1, 1 / self.r1) + + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + + if w < img.shape[2] and h < img.shape[1]: + x1 = random.randint(0, img.shape[1] - h) + y1 = random.randint(0, img.shape[2] - w) + if img.shape[0] == 3: + img[0, x1:x1 + h, y1:y1 + w] = self.mean[0] + img[1, x1:x1 + h, y1:y1 + w] = self.mean[1] + img[2, x1:x1 + h, y1:y1 + w] = self.mean[2] + else: + img[0, x1:x1 + h, y1:y1 + w] = self.mean[1] + return img + return img diff --git a/ppcls/data/reader.py b/ppcls/data/reader.py new file mode 100755 index 000000000..6c3d39ef4 --- /dev/null +++ b/ppcls/data/reader.py @@ -0,0 +1,275 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import cv2 + +import numpy as np +import os +import signal + +import paddle + +import imaug +from imaug import transform +from imaug import MixupOperator +from ppcls.utils import logger + +trainers_num = int(os.environ.get('PADDLE_TRAINERS_NUM', 1)) +trainer_id = int(os.environ.get("PADDLE_TRAINER_ID", 0)) + + +class ModeException(Exception): + """ + ModeException + """ + + def __init__(self, message='', mode=''): + message += "\nOnly the following 3 modes are supported: " \ + "train, valid, test. Given mode is {}".format(mode) + super(ModeException, self).__init__(message) + + +class SampleNumException(Exception): + """ + SampleNumException + """ + + def __init__(self, message='', sample_num=0, batch_size=1): + message += "\nError: The number of the whole data ({}) " \ + "is smaller than the batch_size ({}), and drop_last " \ + "is turnning on, so nothing will feed in program, " \ + "Terminated now. Please reset batch_size to a smaller " \ + "number or feed more data!".format(sample_num, batch_size) + super(SampleNumException, self).__init__(message) + + +class ShuffleSeedException(Exception): + """ + ShuffleSeedException + """ + + def __init__(self, message=''): + message += "\nIf trainers_num > 1, the shuffle_seed must be set, " \ + "because the order of batch data generated by reader " \ + "must be the same in the respective processes." + super(ShuffleSeedException, self).__init__(message) + + +def check_params(params): + """ + check params to avoid unexpect errors + + Args: + params(dict): + """ + if 'shuffle_seed' not in params: + params['shuffle_seed'] = None + + if trainers_num > 1 and params['shuffle_seed'] is None: + raise ShuffleSeedException() + + data_dir = params.get('data_dir', '') + assert os.path.isdir(data_dir), \ + "{} doesn't exist, please check datadir path".format(data_dir) + + if params['mode'] != 'test': + file_list = params.get('file_list', '') + assert os.path.isfile(file_list), \ + "{} doesn't exist, please check file list path".format(file_list) + + +def create_file_list(params): + """ + if mode is test, create the file list + + Args: + params(dict): + """ + data_dir = params.get('data_dir', '') + params['file_list'] = ".tmp.txt" + imgtype_list = {'jpg', 'bmp', 'png', 'jpeg', 'rgb', 'tif', 'tiff'} + with open(params['file_list'], "w") as fout: + tmp_file_list = os.listdir(data_dir) + for file_name in tmp_file_list: + file_path = os.path.join(data_dir, file_name) + if imghdr.what(file_path) not in imgtype_list: + continue + fout.write(file_name + " 0" + "\n") + + +def shuffle_lines(full_lines, seed=None): + """ + random shuffle lines + + Args: + full_lines(list): + seed(int): random seed + """ + if seed is not None: + np.random.RandomState(seed).shuffle(full_lines) + else: + np.random.shuffle(full_lines) + + return full_lines + + +def get_file_list(params): + """ + read label list from file and shuffle the list + + Args: + params(dict): + """ + if params['mode'] == 'test': + create_file_list(params) + + with open(params['file_list']) as flist: + full_lines = [line.strip() for line in flist] + + full_lines = shuffle_lines(full_lines, params["shuffle_seed"]) + + # use only partial data for each trainer in distributed training + full_lines = full_lines[trainer_id::trainers_num] + + return full_lines + + +def create_operators(params): + """ + create operators based on the config + + Args: + params(list): a dict list, used to create some operators + """ + assert isinstance(params, list), ('operator config should be a list') + ops = [] + for operator in params: + assert isinstance(operator, + dict) and len(operator) == 1, "yaml format error" + op_name = list(operator)[0] + param = {} if operator[op_name] is None else operator[op_name] + op = getattr(imaug, op_name)(**param) + ops.append(op) + + return ops + + +def partial_reader(params, full_lines, part_id=0, part_num=1): + """ + create a reader with partial data + + Args: + params(dict): + full_lines: label list + part_id(int): part index of the current partial data + part_num(int): part num of the dataset + """ + assert part_id < part_num, ("part_num: {} should be larger " \ + "than part_id: {}".format(part_num, part_id)) + + full_lines = full_lines[part_id::part_num] + + batch_size = int(params['batch_size']) // trainers_num + if params['mode'] != "test" and len(full_lines) < batch_size: + raise SampleNumException('', len(full_lines), batch_size) + + def reader(): + ops = create_operators(params['transforms']) + for line in full_lines: + img_path, label = line.split() + img_path = os.path.join(params['data_dir'], img_path) + img = open(img_path).read() + img = transform(img, ops) + yield (img, int(label)) + + return reader + + +def mp_reader(params): + """ + multiprocess reader + + Args: + params(dict): + """ + check_params(params) + + full_lines = get_file_list(params) + + part_num = 1 if 'num_workers' not in params else params['num_workers'] + + readers = [] + for part_id in range(part_num): + readers.append(partial_reader(params, full_lines, part_id, part_num)) + + return paddle.reader.multiprocess_reader(readers, use_pipe=False) + + +def term_mp(sig_num, frame): + """ kill all child processes + """ + pid = os.getpid() + pgid = os.getpgid(os.getpid()) + logger.info("main proc {} exit, kill process group " + "{}".format(pid, pgid)) + os.killpg(pgid, signal.SIGKILL) + + +class Reader: + """ + Create a reader for trainning/validate/test + + Args: + config(dict): arguments + mode(str): train or val or test + seed(int): random seed used to generate same sequence in each trainer + + Returns: + the specific reader + """ + + def __init__(self, config, mode='train', seed=None): + try: + self.params = config[mode.upper()] + except KeyError: + raise ModeException(mode=mode) + + use_mix = config.get('use_mix') + self.params['mode'] = mode + if seed is not None: + self.params['shuffle_seed'] = seed + self.batch_ops = [] + if use_mix and mode == "train": + self.batch_ops = create_operators(self.params['mix']) + + def __call__(self): + reader = mp_reader(self.params) + + batch_size = int(self.params['batch_size']) // trainers_num + + def wrapper(): + batch = [] + for idx, sample in enumerate(reader()): + img, label = sample + batch.append((img, label)) + if (idx + 1) % batch_size == 0: + batch = transform(batch, self.batch_ops) + yield batch + batch = [] + + return wrapper + + +signal.signal(signal.SIGINT, term_mp) +signal.signal(signal.SIGTERM, term_mp) diff --git a/ppcls/modeling/__init__.py b/ppcls/modeling/__init__.py new file mode 100644 index 000000000..e5badd50a --- /dev/null +++ b/ppcls/modeling/__init__.py @@ -0,0 +1,20 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from . import architectures +from . import loss + +from .architectures import * +from .loss import * +from .utils import similar_architectures diff --git a/ppcls/modeling/architectures/__init__.py b/ppcls/modeling/architectures/__init__.py new file mode 100644 index 000000000..f1ff7d2eb --- /dev/null +++ b/ppcls/modeling/architectures/__init__.py @@ -0,0 +1,44 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from .alexnet import AlexNet +from .mobilenet_v1 import MobileNetV1_x0_25, MobileNetV1_x0_5, MobileNetV1_x1_0, MobileNetV1_x0_75, MobileNetV1 +from .mobilenet_v2 import MobileNetV2_x0_25, MobileNetV2_x0_5, MobileNetV2_x0_75, MobileNetV2_x1_0, MobileNetV2_x1_5, MobileNetV2_x2_0, MobileNetV2 +from .mobilenet_v3 import MobileNetV3_small_x0_35, MobileNetV3_small_x0_5, MobileNetV3_small_x0_75, MobileNetV3_small_x1_0, MobileNetV3_small_x1_25, MobileNetV3_large_x0_35, MobileNetV3_large_x0_5, MobileNetV3_large_x0_75, MobileNetV3_large_x1_0, MobileNetV3_large_x1_25 +from .googlenet import GoogLeNet +from .vgg import VGG11, VGG13, VGG16, VGG19 +from .resnet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152 +from .resnet_vc import ResNet50_vc, ResNet101_vc, ResNet152_vc +from .resnet_vd import ResNet18_vd, ResNet34_vd, ResNet50_vd, ResNet101_vd, ResNet152_vd, ResNet200_vd +from .resnext import ResNeXt50_64x4d, ResNeXt101_64x4d, ResNeXt152_64x4d, ResNeXt50_32x4d, ResNeXt101_32x4d, ResNeXt152_32x4d +from .resnext_vd import ResNeXt50_vd_64x4d, ResNeXt101_vd_64x4d, ResNeXt152_vd_64x4d, ResNeXt50_vd_32x4d, ResNeXt101_vd_32x4d, ResNeXt152_vd_32x4d +from .inception_v4 import InceptionV4 +from .se_resnet_vd import SE_ResNet18_vd, SE_ResNet34_vd, SE_ResNet50_vd, SE_ResNet101_vd, SE_ResNet152_vd, SE_ResNet200_vd +from .se_resnext import SE_ResNeXt50_32x4d, SE_ResNeXt101_32x4d, SE_ResNeXt152_32x4d +from .se_resnext_vd import SE_ResNeXt50_vd_32x4d, SE_ResNeXt101_vd_32x4d, SENet154_vd +from .dpn import DPN68, DPN92, DPN98, DPN107, DPN131 +from .shufflenet_v2_swish import ShuffleNetV2_swish, ShuffleNetV2_x0_5_swish, ShuffleNetV2_x1_0_swish, ShuffleNetV2_x1_5_swish, ShuffleNetV2_x2_0_swish +from .shufflenet_v2 import ShuffleNetV2_x0_25, ShuffleNetV2_x0_33, ShuffleNetV2_x0_5, ShuffleNetV2_x1_0, ShuffleNetV2_x1_5, ShuffleNetV2_x2_0, ShuffleNetV2 +from .xception import Xception41, Xception65, Xception71 +from .xception_deeplab import Xception41_deeplab, Xception65_deeplab, Xception71_deeplab +from .densenet import DenseNet121, DenseNet161, DenseNet169, DenseNet201, DenseNet264 +from .squeezenet import SqueezeNet1_0, SqueezeNet1_1 +from .darknet import DarkNet53 +from .resnext101_wsl import ResNeXt101_32x8d_wsl, ResNeXt101_32x16d_wsl, ResNeXt101_32x32d_wsl, ResNeXt101_32x48d_wsl, Fix_ResNeXt101_32x48d_wsl +from .efficientnet import EfficientNet, EfficientNetB0, EfficientNetB1, EfficientNetB2, EfficientNetB3, EfficientNetB4, EfficientNetB5, EfficientNetB6, EfficientNetB7 +from .res2net import Res2Net50_48w_2s, Res2Net50_26w_4s, Res2Net50_14w_8s, Res2Net50_26w_6s, Res2Net50_26w_8s, Res2Net101_26w_4s, Res2Net152_26w_4s +from .res2net_vd import Res2Net50_vd_48w_2s, Res2Net50_vd_26w_4s, Res2Net50_vd_14w_8s, Res2Net50_vd_26w_6s, Res2Net50_vd_26w_8s, Res2Net101_vd_26w_4s, Res2Net152_vd_26w_4s, Res2Net200_vd_26w_4s +from .hrnet import HRNet_W18_C, HRNet_W30_C, HRNet_W32_C, HRNet_W40_C, HRNet_W44_C, HRNet_W48_C, HRNet_W60_C, HRNet_W64_C, SE_HRNet_W18_C, SE_HRNet_W30_C, SE_HRNet_W32_C, SE_HRNet_W40_C, SE_HRNet_W44_C, SE_HRNet_W48_C, SE_HRNet_W60_C, SE_HRNet_W64_C +from .darts_gs import DARTS_GS_6M, DARTS_GS_4M +from .resnet_acnet import ResNet18_ACNet, ResNet34_ACNet, ResNet50_ACNet, ResNet101_ACNet, ResNet152_ACNet diff --git a/ppcls/modeling/architectures/alexnet.py b/ppcls/modeling/architectures/alexnet.py new file mode 100644 index 000000000..36f7e8678 --- /dev/null +++ b/ppcls/modeling/architectures/alexnet.py @@ -0,0 +1,172 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle +import paddle.fluid as fluid + +__all__ = ['AlexNet'] + + +class AlexNet(): + def __init__(self): + pass + + def net(self, input, class_dim=1000): + stdv = 1.0 / math.sqrt(input.shape[1] * 11 * 11) + layer_name = [ + "conv1", "conv2", "conv3", "conv4", "conv5", "fc6", "fc7", "fc8" + ] + conv1 = fluid.layers.conv2d( + input=input, + num_filters=64, + filter_size=11, + stride=4, + padding=2, + groups=1, + act='relu', + bias_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=layer_name[0] + "_offset"), + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=layer_name[0] + "_weights")) + pool1 = fluid.layers.pool2d( + input=conv1, + pool_size=3, + pool_stride=2, + pool_padding=0, + pool_type='max') + + stdv = 1.0 / math.sqrt(pool1.shape[1] * 5 * 5) + conv2 = fluid.layers.conv2d( + input=pool1, + num_filters=192, + filter_size=5, + stride=1, + padding=2, + groups=1, + act='relu', + bias_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=layer_name[1] + "_offset"), + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=layer_name[1] + "_weights")) + pool2 = fluid.layers.pool2d( + input=conv2, + pool_size=3, + pool_stride=2, + pool_padding=0, + pool_type='max') + + stdv = 1.0 / math.sqrt(pool2.shape[1] * 3 * 3) + conv3 = fluid.layers.conv2d( + input=pool2, + num_filters=384, + filter_size=3, + stride=1, + padding=1, + groups=1, + act='relu', + bias_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=layer_name[2] + "_offset"), + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=layer_name[2] + "_weights")) + + stdv = 1.0 / math.sqrt(conv3.shape[1] * 3 * 3) + conv4 = fluid.layers.conv2d( + input=conv3, + num_filters=256, + filter_size=3, + stride=1, + padding=1, + groups=1, + act='relu', + bias_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=layer_name[3] + "_offset"), + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=layer_name[3] + "_weights")) + + stdv = 1.0 / math.sqrt(conv4.shape[1] * 3 * 3) + conv5 = fluid.layers.conv2d( + input=conv4, + num_filters=256, + filter_size=3, + stride=1, + padding=1, + groups=1, + act='relu', + bias_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=layer_name[4] + "_offset"), + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=layer_name[4] + "_weights")) + pool5 = fluid.layers.pool2d( + input=conv5, + pool_size=3, + pool_stride=2, + pool_padding=0, + pool_type='max') + + drop6 = fluid.layers.dropout(x=pool5, dropout_prob=0.5) + stdv = 1.0 / math.sqrt(drop6.shape[1] * drop6.shape[2] * + drop6.shape[3] * 1.0) + + fc6 = fluid.layers.fc( + input=drop6, + size=4096, + act='relu', + bias_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=layer_name[5] + "_offset"), + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=layer_name[5] + "_weights")) + + drop7 = fluid.layers.dropout(x=fc6, dropout_prob=0.5) + stdv = 1.0 / math.sqrt(drop7.shape[1] * 1.0) + + fc7 = fluid.layers.fc( + input=drop7, + size=4096, + act='relu', + bias_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=layer_name[6] + "_offset"), + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=layer_name[6] + "_weights")) + + stdv = 1.0 / math.sqrt(fc7.shape[1] * 1.0) + out = fluid.layers.fc( + input=fc7, + size=class_dim, + bias_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=layer_name[7] + "_offset"), + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=layer_name[7] + "_weights")) + return out diff --git a/ppcls/modeling/architectures/darknet.py b/ppcls/modeling/architectures/darknet.py new file mode 100644 index 000000000..b091e6ffa --- /dev/null +++ b/ppcls/modeling/architectures/darknet.py @@ -0,0 +1,120 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr +import math +__all__ = ["DarkNet53"] + + +class DarkNet53(): + def __init__(self): + + pass + + def net(self, input, class_dim=1000): + DarkNet_cfg = {53: ([1, 2, 8, 8, 4], self.basicblock)} + stages, block_func = DarkNet_cfg[53] + stages = stages[0:5] + conv1 = self.conv_bn_layer( + input, + ch_out=32, + filter_size=3, + stride=1, + padding=1, + name="yolo_input") + conv = self.downsample( + conv1, ch_out=conv1.shape[1] * 2, name="yolo_input.downsample") + + for i, stage in enumerate(stages): + conv = self.layer_warp( + block_func, + conv, + 32 * (2**i), + stage, + name="stage.{}".format(i)) + if i < len(stages) - 1: # do not downsaple in the last stage + conv = self.downsample( + conv, + ch_out=conv.shape[1] * 2, + name="stage.{}.downsample".format(i)) + pool = fluid.layers.pool2d( + input=conv, pool_type='avg', global_pooling=True) + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + out = fluid.layers.fc( + input=pool, + size=class_dim, + param_attr=ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name='fc_weights'), + bias_attr=ParamAttr(name='fc_offset')) + return out + + def conv_bn_layer(self, + input, + ch_out, + filter_size, + stride, + padding, + name=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=ch_out, + filter_size=filter_size, + stride=stride, + padding=padding, + act=None, + param_attr=ParamAttr(name=name + ".conv.weights"), + bias_attr=False) + + bn_name = name + ".bn" + out = fluid.layers.batch_norm( + input=conv, + act='relu', + param_attr=ParamAttr(name=bn_name + '.scale'), + bias_attr=ParamAttr(name=bn_name + '.offset'), + moving_mean_name=bn_name + '.mean', + moving_variance_name=bn_name + '.var') + return out + + def downsample(self, + input, + ch_out, + filter_size=3, + stride=2, + padding=1, + name=None): + return self.conv_bn_layer( + input, + ch_out=ch_out, + filter_size=filter_size, + stride=stride, + padding=padding, + name=name) + + def basicblock(self, input, ch_out, name=None): + conv1 = self.conv_bn_layer(input, ch_out, 1, 1, 0, name=name + ".0") + conv2 = self.conv_bn_layer( + conv1, ch_out * 2, 3, 1, 1, name=name + ".1") + out = fluid.layers.elementwise_add(x=input, y=conv2, act=None) + return out + + def layer_warp(self, block_func, input, ch_out, count, name=None): + res_out = block_func(input, ch_out, name='{}.0'.format(name)) + for j in range(1, count): + res_out = block_func(res_out, ch_out, name='{}.{}'.format(name, j)) + return res_out diff --git a/ppcls/modeling/architectures/darts_gs.py b/ppcls/modeling/architectures/darts_gs.py new file mode 100644 index 000000000..ff5c9655d --- /dev/null +++ b/ppcls/modeling/architectures/darts_gs.py @@ -0,0 +1,543 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +# +# Based on: +# -------------------------------------------------------- +# DARTS +# Copyright (c) 2018, Hanxiao Liu. +# Licensed under the Apache License, Version 2.0; +# -------------------------------------------------------- + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os +import sys +import numpy as np +import time +import functools +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr +from paddle.fluid.initializer import Xavier +from paddle.fluid.initializer import Normal +from paddle.fluid.initializer import Constant + +from collections import namedtuple +Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') + +arch_dict = { + 'DARTS_GS_6M': Genotype( + normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 1), + ('sep_conv_5x5', 0), ('sep_conv_3x3', 2), ('sep_conv_3x3', 1), + ('skip_connect', 4), ('sep_conv_3x3', 3)], + normal_concat=range(2, 6), + reduce=[('sep_conv_5x5', 0), ('max_pool_3x3', 1), ('dil_conv_5x5', 2), + ('sep_conv_5x5', 0), ('sep_conv_3x3', 1), ('dil_conv_5x5', 3), + ('dil_conv_3x3', 1), ('sep_conv_3x3', 2)], + reduce_concat=range(2, 6)), + 'DARTS_GS_4M': Genotype( + normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), + ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), + ('skip_connect', 0), ('dil_conv_3x3', 1)], + normal_concat=range(2, 6), + reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('max_pool_3x3', 0), + ('avg_pool_3x3', 1), ('skip_connect', 3), ('skip_connect', 2), + ('sep_conv_3x3', 0), ('sep_conv_5x5', 2)], + reduce_concat=range(2, 6)), +} + +__all__ = list(arch_dict.keys()) + +OPS = { + 'none' : lambda input, C, stride, name, affine: Zero(input, stride, name), + 'avg_pool_3x3' : lambda input, C, stride, name, affine: fluid.layers.pool2d(input, 3, 'avg', pool_stride=stride, pool_padding=1, name=name), + 'max_pool_3x3' : lambda input, C, stride, name, affine: fluid.layers.pool2d(input, 3, 'max', pool_stride=stride, pool_padding=1, name=name), + 'skip_connect' : lambda input,C, stride, name, affine: Identity(input, name) if stride == 1 else FactorizedReduce(input, C, name=name, affine=affine), + 'sep_conv_3x3' : lambda input,C, stride, name, affine: SepConv(input, C, C, 3, stride, 1, name=name, affine=affine), + 'sep_conv_5x5' : lambda input,C, stride, name, affine: SepConv(input, C, C, 5, stride, 2, name=name, affine=affine), + 'sep_conv_7x7' : lambda input,C, stride, name, affine: SepConv(input, C, C, 7, stride, 3, name=name, affine=affine), + 'dil_conv_3x3' : lambda input,C, stride, name, affine: DilConv(input, C, C, 3, stride, 2, 2, name=name, affine=affine), + 'dil_conv_5x5' : lambda input,C, stride, name, affine: DilConv(input, C, C, 5, stride, 4, 2, name=name, affine=affine), + 'conv_7x1_1x7' : lambda input,C, stride, name, affine: SevenConv(input, C, name=name, affine=affine) +} + + +def ReLUConvBN(input, + C_out, + kernel_size, + stride, + padding, + name='', + affine=True): + relu_a = fluid.layers.relu(input) + conv2d_a = fluid.layers.conv2d( + relu_a, C_out, kernel_size, stride, padding, bias_attr=False) + if affine: + reluconvbn_out = fluid.layers.batch_norm( + conv2d_a, + param_attr=ParamAttr( + initializer=Constant(1.), name=name + 'op.2.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), name=name + 'op.2.bias'), + moving_mean_name=name + 'op.2.running_mean', + moving_variance_name=name + 'op.2.running_var') + else: + reluconvbn_out = fluid.layers.batch_norm( + conv2d_a, + param_attr=ParamAttr( + initializer=Constant(1.), + learning_rate=0., + name=name + 'op.2.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), + learning_rate=0., + name=name + 'op.2.bias'), + moving_mean_name=name + 'op.2.running_mean', + moving_variance_name=name + 'op.2.running_var') + return reluconvbn_out + + +def DilConv(input, + C_in, + C_out, + kernel_size, + stride, + padding, + dilation, + name='', + affine=True): + relu_a = fluid.layers.relu(input) + conv2d_a = fluid.layers.conv2d( + relu_a, + C_in, + kernel_size, + stride, + padding, + dilation, + groups=C_in, + bias_attr=False, + use_cudnn=False) + conv2d_b = fluid.layers.conv2d(conv2d_a, C_out, 1, bias_attr=False) + if affine: + dilconv_out = fluid.layers.batch_norm( + conv2d_b, + param_attr=ParamAttr( + initializer=Constant(1.), name=name + 'op.3.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), name=name + 'op.3.bias'), + moving_mean_name=name + 'op.3.running_mean', + moving_variance_name=name + 'op.3.running_var') + else: + dilconv_out = fluid.layers.batch_norm( + conv2d_b, + param_attr=ParamAttr( + initializer=Constant(1.), + learning_rate=0., + name=name + 'op.3.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), + learning_rate=0., + name=name + 'op.3.bias'), + moving_mean_name=name + 'op.3.running_mean', + moving_variance_name=name + 'op.3.running_var') + return dilconv_out + + +def SepConv(input, + C_in, + C_out, + kernel_size, + stride, + padding, + name='', + affine=True): + relu_a = fluid.layers.relu(input) + conv2d_a = fluid.layers.conv2d( + relu_a, + C_in, + kernel_size, + stride, + padding, + groups=C_in, + bias_attr=False, + use_cudnn=False) + conv2d_b = fluid.layers.conv2d(conv2d_a, C_in, 1, bias_attr=False) + if affine: + bn_a = fluid.layers.batch_norm( + conv2d_b, + param_attr=ParamAttr( + initializer=Constant(1.), name=name + 'op.3.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), name=name + 'op.3.bias'), + moving_mean_name=name + 'op.3.running_mean', + moving_variance_name=name + 'op.3.running_var') + else: + bn_a = fluid.layers.batch_norm( + conv2d_b, + param_attr=ParamAttr( + initializer=Constant(1.), + learning_rate=0., + name=name + 'op.3.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), + learning_rate=0., + name=name + 'op.3.bias'), + moving_mean_name=name + 'op.3.running_mean', + moving_variance_name=name + 'op.3.running_var') + + relu_b = fluid.layers.relu(bn_a) + conv2d_d = fluid.layers.conv2d( + relu_b, + C_in, + kernel_size, + 1, + padding, + groups=C_in, + bias_attr=False, + use_cudnn=False) + conv2d_e = fluid.layers.conv2d(conv2d_d, C_out, 1, bias_attr=False) + if affine: + sepconv_out = fluid.layers.batch_norm( + conv2d_e, + param_attr=ParamAttr( + initializer=Constant(1.), name=name + 'op.7.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), name=name + 'op.7.bias'), + moving_mean_name=name + 'op.7.running_mean', + moving_variance_name=name + 'op.7.running_var') + else: + sepconv_out = fluid.layers.batch_norm( + conv2d_e, + param_attr=ParamAttr( + initializer=Constant(1.), + learning_rate=0., + name=name + 'op.7.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), + learning_rate=0., + name=name + 'op.7.bias'), + moving_mean_name=name + 'op.7.running_mean', + moving_variance_name=name + 'op.7.running_var') + return sepconv_out + + +def SevenConv(input, C_out, stride, name='', affine=True): + relu_a = fluid.layers.relu(input) + conv2d_a = fluid.layers.conv2d( + relu_a, + C_out, (1, 7), (1, stride), (0, 3), + param_attr=ParamAttr( + initializer=Xavier( + uniform=False, fan_in=0), + name=name + 'op.1.weight'), + bias_attr=False) + conv2d_b = fluid.layers.conv2d( + conv2d_a, + C_out, (7, 1), (stride, 1), (3, 0), + param_attr=ParamAttr( + initializer=Xavier( + uniform=False, fan_in=0), + name=name + 'op.2.weight'), + bias_attr=False) + if affine: + out = fluid.layers.batch_norm( + conv2d_b, + param_attr=ParamAttr( + initializer=Constant(1.), name=name + 'op.3.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), name=name + 'op.3.bias'), + moving_mean_name=name + 'op.3.running_mean', + moving_variance_name=name + 'op.3.running_var') + else: + out = fluid.layers.batch_norm( + conv2d_b, + param_attr=ParamAttr( + initializer=Constant(1.), + learning_rate=0., + name=name + 'op.3.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), + learning_rate=0., + name=name + 'op.3.bias'), + moving_mean_name=name + 'op.3.running_mean', + moving_variance_name=name + 'op.3.running_var') + + +def Identity(input, name=''): + return input + + +def Zero(input, stride, name=''): + ones = np.ones(input.shape[-2:]) + ones[::stride, ::stride] = 0 + ones = fluid.layers.assign(ones) + return input * ones + + +def FactorizedReduce(input, C_out, name='', affine=True): + relu_a = fluid.layers.relu(input) + conv2d_a = fluid.layers.conv2d( + relu_a, + C_out // 2, + 1, + 2, + param_attr=ParamAttr( + initializer=Xavier( + uniform=False, fan_in=0), + name=name + 'conv_1.weight'), + bias_attr=False) + h_end = relu_a.shape[2] + w_end = relu_a.shape[3] + slice_a = fluid.layers.slice(relu_a, [2, 3], [1, 1], [h_end, w_end]) + conv2d_b = fluid.layers.conv2d( + slice_a, + C_out // 2, + 1, + 2, + param_attr=ParamAttr( + initializer=Xavier( + uniform=False, fan_in=0), + name=name + 'conv_2.weight'), + bias_attr=False) + out = fluid.layers.concat([conv2d_a, conv2d_b], axis=1) + if affine: + out = fluid.layers.batch_norm( + out, + param_attr=ParamAttr( + initializer=Constant(1.), name=name + 'bn.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), name=name + 'bn.bias'), + moving_mean_name=name + 'bn.running_mean', + moving_variance_name=name + 'bn.running_var') + else: + out = fluid.layers.batch_norm( + out, + param_attr=ParamAttr( + initializer=Constant(1.), + learning_rate=0., + name=name + 'bn.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), + learning_rate=0., + name=name + 'bn.bias'), + moving_mean_name=name + 'bn.running_mean', + moving_variance_name=name + 'bn.running_var') + return out + + +class Cell(): + def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, + reduction_prev): + + if reduction_prev: + self.preprocess0 = functools.partial(FactorizedReduce, C_out=C) + else: + self.preprocess0 = functools.partial( + ReLUConvBN, C_out=C, kernel_size=1, stride=1, padding=0) + self.preprocess1 = functools.partial( + ReLUConvBN, C_out=C, kernel_size=1, stride=1, padding=0) + if reduction: + op_names, indices = zip(*genotype.reduce) + concat = genotype.reduce_concat + else: + op_names, indices = zip(*genotype.normal) + concat = genotype.normal_concat + print(op_names, indices, concat, reduction) + self._compile(C, op_names, indices, concat, reduction) + + def _compile(self, C, op_names, indices, concat, reduction): + assert len(op_names) == len(indices) + self._steps = len(op_names) // 2 + self._concat = concat + self.multiplier = len(concat) + + self._ops = [] + for name, index in zip(op_names, indices): + stride = 2 if reduction and index < 2 else 1 + op = functools.partial(OPS[name], C=C, stride=stride, affine=True) + self._ops += [op] + self._indices = indices + + def forward(self, s0, s1, drop_prob, is_train, name): + self.training = is_train + preprocess0_name = name + 'preprocess0.' + preprocess1_name = name + 'preprocess1.' + s0 = self.preprocess0(s0, name=preprocess0_name) + s1 = self.preprocess1(s1, name=preprocess1_name) + out = [s0, s1] + for i in range(self._steps): + h1 = out[self._indices[2 * i]] + h2 = out[self._indices[2 * i + 1]] + op1 = self._ops[2 * i] + op2 = self._ops[2 * i + 1] + h3 = op1(h1, name=name + '_ops.' + str(2 * i) + '.') + h4 = op2(h2, name=name + '_ops.' + str(2 * i + 1) + '.') + if self.training and drop_prob > 0.: + if h3 != h1: + h3 = fluid.layers.dropout( + h3, + drop_prob, + dropout_implementation='upscale_in_train') + if h4 != h2: + h4 = fluid.layers.dropout( + h4, + drop_prob, + dropout_implementation='upscale_in_train') + s = h3 + h4 + out += [s] + return fluid.layers.concat([out[i] for i in self._concat], axis=1) + + +def AuxiliaryHeadImageNet(input, num_classes, aux_name='auxiliary_head'): + relu_a = fluid.layers.relu(input) + pool_a = fluid.layers.pool2d(relu_a, 5, 'avg', 2) + conv2d_a = fluid.layers.conv2d( + pool_a, 128, 1, name=aux_name + '.features.2', bias_attr=False) + bn_a_name = aux_name + '.features.3' + bn_a = fluid.layers.batch_norm( + conv2d_a, + act='relu', + name=bn_a_name, + param_attr=ParamAttr( + initializer=Constant(1.), name=bn_a_name + '.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), name=bn_a_name + '.bias'), + moving_mean_name=bn_a_name + '.running_mean', + moving_variance_name=bn_a_name + '.running_var') + conv2d_b = fluid.layers.conv2d( + bn_a, 768, 2, name=aux_name + '.features.5', bias_attr=False) + bn_b_name = aux_name + '.features.6' + bn_b = fluid.layers.batch_norm( + conv2d_b, + act='relu', + name=bn_b_name, + param_attr=ParamAttr( + initializer=Constant(1.), name=bn_b_name + '.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), name=bn_b_name + '.bias'), + moving_mean_name=bn_b_name + '.running_mean', + moving_variance_name=bn_b_name + '.running_var') + pool_b = fluid.layers.adaptive_pool2d(bn_b, (1, 1), "avg") + fc_name = aux_name + '.classifier' + fc = fluid.layers.fc(pool_b, + num_classes, + name=fc_name, + param_attr=ParamAttr( + initializer=Normal(scale=1e-3), + name=fc_name + '.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), name=fc_name + '.bias')) + return fc + + +def StemConv0(input, C_out): + conv_a = fluid.layers.conv2d( + input, C_out // 2, 3, stride=2, padding=1, bias_attr=False) + bn_a = fluid.layers.batch_norm( + conv_a, + act='relu', + param_attr=ParamAttr( + initializer=Constant(1.), name='stem0.1.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), name='stem0.1.bias'), + moving_mean_name='stem0.1.running_mean', + moving_variance_name='stem0.1.running_var') + + conv_b = fluid.layers.conv2d( + bn_a, C_out, 3, stride=2, padding=1, bias_attr=False) + bn_b = fluid.layers.batch_norm( + conv_b, + param_attr=ParamAttr( + initializer=Constant(1.), name='stem0.3.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), name='stem0.3.bias'), + moving_mean_name='stem0.3.running_mean', + moving_variance_name='stem0.3.running_var') + return bn_b + + +def StemConv1(input, C_out): + relu_a = fluid.layers.relu(input) + conv_a = fluid.layers.conv2d( + relu_a, C_out, 3, stride=2, padding=1, bias_attr=False) + bn_a = fluid.layers.batch_norm( + conv_a, + param_attr=ParamAttr( + initializer=Constant(1.), name='stem1.1.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), name='stem1.1.bias'), + moving_mean_name='stem1.1.running_mean', + moving_variance_name='stem1.1.running_var') + return bn_a + + +class NetworkImageNet(object): + def __init__(self, arch='DARTS_6M'): + self.class_num = 1000 + self.init_channel = 48 + self._layers = 14 + self._auxiliary = False + self.drop_path_prob = 0 + genotype = arch_dict[arch] + + C = self.init_channel + layers = self._layers + C_prev_prev, C_prev, C_curr = C, C, C + self.cells = [] + reduction_prev = True + for i in range(layers): + if i in [layers // 3, 2 * layers // 3]: + C_curr *= 2 + reduction = True + else: + reduction = False + cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, + reduction_prev) + reduction_prev = reduction + self.cells += [cell] + C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr + if i == 2 * layers // 3: + C_to_auxiliary = C_prev + + def net(self, input, class_dim=1000, is_train=True): + self.logits_aux = None + num_channel = self.init_channel + s0 = StemConv0(input, num_channel) + s1 = StemConv1(s0, num_channel) + for i, cell in enumerate(self.cells): + name = 'cells.' + str(i) + '.' + s0, s1 = s1, cell.forward(s0, s1, self.drop_path_prob, is_train, + name) + if i == int(2 * self._layers // 3): + if self._auxiliary and is_train: + self.logits_aux = AuxiliaryHeadImageNet(s1, self.class_num) + out = fluid.layers.adaptive_pool2d(s1, (1, 1), "avg") + self.logits = fluid.layers.fc(out, + size=self.class_num, + param_attr=ParamAttr( + initializer=Normal(scale=1e-4), + name='classifier.weight'), + bias_attr=ParamAttr( + initializer=Constant(0.), + name='classifier.bias')) + return self.logits + + +def DARTS_GS_6M(): + return NetworkImageNet(arch='DARTS_GS_6M') + + +def DARTS_GS_4M(): + return NetworkImageNet(arch='DARTS_GS_4M') diff --git a/ppcls/modeling/architectures/densenet.py b/ppcls/modeling/architectures/densenet.py new file mode 100644 index 000000000..e8ba3818f --- /dev/null +++ b/ppcls/modeling/architectures/densenet.py @@ -0,0 +1,204 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + "DenseNet", "DenseNet121", "DenseNet161", "DenseNet169", "DenseNet201", + "DenseNet264" +] + + +class DenseNet(): + def __init__(self, layers=121): + self.layers = layers + + def net(self, input, bn_size=4, dropout=0, class_dim=1000): + layers = self.layers + supported_layers = [121, 161, 169, 201, 264] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format(supported_layers, layers) + densenet_spec = { + 121: (64, 32, [6, 12, 24, 16]), + 161: (96, 48, [6, 12, 36, 24]), + 169: (64, 32, [6, 12, 32, 32]), + 201: (64, 32, [6, 12, 48, 32]), + 264: (64, 32, [6, 12, 64, 48]) + } + + num_init_features, growth_rate, block_config = densenet_spec[layers] + conv = fluid.layers.conv2d( + input=input, + num_filters=num_init_features, + filter_size=7, + stride=2, + padding=3, + act=None, + param_attr=ParamAttr(name="conv1_weights"), + bias_attr=False) + conv = fluid.layers.batch_norm( + input=conv, + act='relu', + param_attr=ParamAttr(name='conv1_bn_scale'), + bias_attr=ParamAttr(name='conv1_bn_offset'), + moving_mean_name='conv1_bn_mean', + moving_variance_name='conv1_bn_variance') + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + num_features = num_init_features + for i, num_layers in enumerate(block_config): + conv = self.make_dense_block( + conv, + num_layers, + bn_size, + growth_rate, + dropout, + name='conv' + str(i + 2)) + num_features = num_features + num_layers * growth_rate + if i != len(block_config) - 1: + conv = self.make_transition( + conv, num_features // 2, name='conv' + str(i + 2) + '_blk') + num_features = num_features // 2 + conv = fluid.layers.batch_norm( + input=conv, + act='relu', + param_attr=ParamAttr(name='conv5_blk_bn_scale'), + bias_attr=ParamAttr(name='conv5_blk_bn_offset'), + moving_mean_name='conv5_blk_bn_mean', + moving_variance_name='conv5_blk_bn_variance') + conv = fluid.layers.pool2d( + input=conv, pool_type='avg', global_pooling=True) + stdv = 1.0 / math.sqrt(conv.shape[1] * 1.0) + out = fluid.layers.fc( + input=conv, + size=class_dim, + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name="fc_weights"), + bias_attr=ParamAttr(name='fc_offset')) + return out + + def make_transition(self, input, num_output_features, name=None): + bn_ac = fluid.layers.batch_norm( + input, + act='relu', + param_attr=ParamAttr(name=name + '_bn_scale'), + bias_attr=ParamAttr(name + '_bn_offset'), + moving_mean_name=name + '_bn_mean', + moving_variance_name=name + '_bn_variance') + + bn_ac_conv = fluid.layers.conv2d( + input=bn_ac, + num_filters=num_output_features, + filter_size=1, + stride=1, + act=None, + bias_attr=False, + param_attr=ParamAttr(name=name + "_weights")) + pool = fluid.layers.pool2d( + input=bn_ac_conv, pool_size=2, pool_stride=2, pool_type='avg') + return pool + + def make_dense_block(self, + input, + num_layers, + bn_size, + growth_rate, + dropout, + name=None): + conv = input + for layer in range(num_layers): + conv = self.make_dense_layer( + conv, + growth_rate, + bn_size, + dropout, + name=name + '_' + str(layer + 1)) + return conv + + def make_dense_layer(self, input, growth_rate, bn_size, dropout, + name=None): + bn_ac = fluid.layers.batch_norm( + input, + act='relu', + param_attr=ParamAttr(name=name + '_x1_bn_scale'), + bias_attr=ParamAttr(name + '_x1_bn_offset'), + moving_mean_name=name + '_x1_bn_mean', + moving_variance_name=name + '_x1_bn_variance') + bn_ac_conv = fluid.layers.conv2d( + input=bn_ac, + num_filters=bn_size * growth_rate, + filter_size=1, + stride=1, + act=None, + bias_attr=False, + param_attr=ParamAttr(name=name + "_x1_weights")) + bn_ac = fluid.layers.batch_norm( + bn_ac_conv, + act='relu', + param_attr=ParamAttr(name=name + '_x2_bn_scale'), + bias_attr=ParamAttr(name + '_x2_bn_offset'), + moving_mean_name=name + '_x2_bn_mean', + moving_variance_name=name + '_x2_bn_variance') + bn_ac_conv = fluid.layers.conv2d( + input=bn_ac, + num_filters=growth_rate, + filter_size=3, + stride=1, + padding=1, + act=None, + bias_attr=False, + param_attr=ParamAttr(name=name + "_x2_weights")) + if dropout: + bn_ac_conv = fluid.layers.dropout( + x=bn_ac_conv, dropout_prob=dropout) + bn_ac_conv = fluid.layers.concat([input, bn_ac_conv], axis=1) + return bn_ac_conv + + +def DenseNet121(): + model = DenseNet(layers=121) + return model + + +def DenseNet161(): + model = DenseNet(layers=161) + return model + + +def DenseNet169(): + model = DenseNet(layers=169) + return model + + +def DenseNet201(): + model = DenseNet(layers=201) + return model + + +def DenseNet264(): + model = DenseNet(layers=264) + return model diff --git a/ppcls/modeling/architectures/dpn.py b/ppcls/modeling/architectures/dpn.py new file mode 100644 index 000000000..61f8f596a --- /dev/null +++ b/ppcls/modeling/architectures/dpn.py @@ -0,0 +1,337 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import numpy as np +import time +import sys +import math + +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = ["DPN", "DPN68", "DPN92", "DPN98", "DPN107", "DPN131"] + + +class DPN(object): + def __init__(self, layers=68): + self.layers = layers + + def net(self, input, class_dim=1000): + # get network args + args = self.get_net_args(self.layers) + bws = args['bw'] + inc_sec = args['inc_sec'] + rs = args['r'] + k_r = args['k_r'] + k_sec = args['k_sec'] + G = args['G'] + init_num_filter = args['init_num_filter'] + init_filter_size = args['init_filter_size'] + init_padding = args['init_padding'] + + ## define Dual Path Network + + # conv1 + conv1_x_1 = fluid.layers.conv2d( + input=input, + num_filters=init_num_filter, + filter_size=init_filter_size, + stride=2, + padding=init_padding, + groups=1, + act=None, + bias_attr=False, + name="conv1", + param_attr=ParamAttr(name="conv1_weights"), ) + + conv1_x_1 = fluid.layers.batch_norm( + input=conv1_x_1, + act='relu', + is_test=False, + name="conv1_bn", + param_attr=ParamAttr(name='conv1_bn_scale'), + bias_attr=ParamAttr('conv1_bn_offset'), + moving_mean_name='conv1_bn_mean', + moving_variance_name='conv1_bn_variance', ) + + convX_x_x = fluid.layers.pool2d( + input=conv1_x_1, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max', + name="pool1") + + #conv2 - conv5 + match_list, num = [], 0 + for gc in range(4): + bw = bws[gc] + inc = inc_sec[gc] + R = (k_r * bw) // rs[gc] + if gc == 0: + _type1 = 'proj' + _type2 = 'normal' + match = 1 + else: + _type1 = 'down' + _type2 = 'normal' + match = match + k_sec[gc - 1] + match_list.append(match) + + convX_x_x = self.dual_path_factory( + convX_x_x, R, R, bw, inc, G, _type1, name="dpn" + str(match)) + for i_ly in range(2, k_sec[gc] + 1): + num += 1 + if num in match_list: + num += 1 + convX_x_x = self.dual_path_factory( + convX_x_x, R, R, bw, inc, G, _type2, name="dpn" + str(num)) + + conv5_x_x = fluid.layers.concat(convX_x_x, axis=1) + conv5_x_x = fluid.layers.batch_norm( + input=conv5_x_x, + act='relu', + is_test=False, + name="final_concat_bn", + param_attr=ParamAttr(name='final_concat_bn_scale'), + bias_attr=ParamAttr('final_concat_bn_offset'), + moving_mean_name='final_concat_bn_mean', + moving_variance_name='final_concat_bn_variance', ) + pool5 = fluid.layers.pool2d( + input=conv5_x_x, + pool_size=7, + pool_stride=1, + pool_padding=0, + pool_type='avg', ) + + stdv = 0.01 + fc6 = fluid.layers.fc( + input=pool5, + size=class_dim, + param_attr=ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name='fc_weights'), + bias_attr=ParamAttr(name='fc_offset')) + + return fc6 + + def get_net_args(self, layers): + if layers == 68: + k_r = 128 + G = 32 + k_sec = [3, 4, 12, 3] + inc_sec = [16, 32, 32, 64] + bw = [64, 128, 256, 512] + r = [64, 64, 64, 64] + init_num_filter = 10 + init_filter_size = 3 + init_padding = 1 + elif layers == 92: + k_r = 96 + G = 32 + k_sec = [3, 4, 20, 3] + inc_sec = [16, 32, 24, 128] + bw = [256, 512, 1024, 2048] + r = [256, 256, 256, 256] + init_num_filter = 64 + init_filter_size = 7 + init_padding = 3 + elif layers == 98: + k_r = 160 + G = 40 + k_sec = [3, 6, 20, 3] + inc_sec = [16, 32, 32, 128] + bw = [256, 512, 1024, 2048] + r = [256, 256, 256, 256] + init_num_filter = 96 + init_filter_size = 7 + init_padding = 3 + elif layers == 107: + k_r = 200 + G = 50 + k_sec = [4, 8, 20, 3] + inc_sec = [20, 64, 64, 128] + bw = [256, 512, 1024, 2048] + r = [256, 256, 256, 256] + init_num_filter = 128 + init_filter_size = 7 + init_padding = 3 + elif layers == 131: + k_r = 160 + G = 40 + k_sec = [4, 8, 28, 3] + inc_sec = [16, 32, 32, 128] + bw = [256, 512, 1024, 2048] + r = [256, 256, 256, 256] + init_num_filter = 128 + init_filter_size = 7 + init_padding = 3 + else: + raise NotImplementedError + net_arg = { + 'k_r': k_r, + 'G': G, + 'k_sec': k_sec, + 'inc_sec': inc_sec, + 'bw': bw, + 'r': r + } + net_arg['init_num_filter'] = init_num_filter + net_arg['init_filter_size'] = init_filter_size + net_arg['init_padding'] = init_padding + + return net_arg + + def dual_path_factory(self, + data, + num_1x1_a, + num_3x3_b, + num_1x1_c, + inc, + G, + _type='normal', + name=None): + kw = 3 + kh = 3 + pw = (kw - 1) // 2 + ph = (kh - 1) // 2 + + # type + if _type is 'proj': + key_stride = 1 + has_proj = True + if _type is 'down': + key_stride = 2 + has_proj = True + if _type is 'normal': + key_stride = 1 + has_proj = False + + # PROJ + if type(data) is list: + data_in = fluid.layers.concat([data[0], data[1]], axis=1) + else: + data_in = data + + if has_proj: + c1x1_w = self.bn_ac_conv( + data=data_in, + num_filter=(num_1x1_c + 2 * inc), + kernel=(1, 1), + pad=(0, 0), + stride=(key_stride, key_stride), + name=name + "_match") + data_o1, data_o2 = fluid.layers.split( + c1x1_w, + num_or_sections=[num_1x1_c, 2 * inc], + dim=1, + name=name + "_match_conv_Slice") + else: + data_o1 = data[0] + data_o2 = data[1] + + # MAIN + c1x1_a = self.bn_ac_conv( + data=data_in, + num_filter=num_1x1_a, + kernel=(1, 1), + pad=(0, 0), + name=name + "_conv1") + c3x3_b = self.bn_ac_conv( + data=c1x1_a, + num_filter=num_3x3_b, + kernel=(kw, kh), + pad=(pw, ph), + stride=(key_stride, key_stride), + num_group=G, + name=name + "_conv2") + c1x1_c = self.bn_ac_conv( + data=c3x3_b, + num_filter=(num_1x1_c + inc), + kernel=(1, 1), + pad=(0, 0), + name=name + "_conv3") + + c1x1_c1, c1x1_c2 = fluid.layers.split( + c1x1_c, + num_or_sections=[num_1x1_c, inc], + dim=1, + name=name + "_conv3_Slice") + + # OUTPUTS + summ = fluid.layers.elementwise_add( + x=data_o1, y=c1x1_c1, name=name + "_elewise") + dense = fluid.layers.concat( + [data_o2, c1x1_c2], axis=1, name=name + "_concat") + + return [summ, dense] + + def bn_ac_conv(self, + data, + num_filter, + kernel, + pad, + stride=(1, 1), + num_group=1, + name=None): + bn_ac = fluid.layers.batch_norm( + input=data, + act='relu', + is_test=False, + name=name + '.output.1', + param_attr=ParamAttr(name=name + '_bn_scale'), + bias_attr=ParamAttr(name + '_bn_offset'), + moving_mean_name=name + '_bn_mean', + moving_variance_name=name + '_bn_variance', ) + bn_ac_conv = fluid.layers.conv2d( + input=bn_ac, + num_filters=num_filter, + filter_size=kernel, + stride=stride, + padding=pad, + groups=num_group, + act=None, + bias_attr=False, + param_attr=ParamAttr(name=name + "_weights")) + return bn_ac_conv + + +def DPN68(): + model = DPN(layers=68) + return model + + +def DPN92(): + model = DPN(layers=92) + return model + + +def DPN98(): + model = DPN(layers=98) + return model + + +def DPN107(): + model = DPN(layers=107) + return model + + +def DPN131(): + model = DPN(layers=131) + return model diff --git a/ppcls/modeling/architectures/efficientnet.py b/ppcls/modeling/architectures/efficientnet.py new file mode 100644 index 000000000..082a14226 --- /dev/null +++ b/ppcls/modeling/architectures/efficientnet.py @@ -0,0 +1,616 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import re +import math +import copy + +import paddle.fluid as fluid + +from .layers import conv2d, init_batch_norm_layer, init_fc_layer + +__all__ = [ + 'EfficientNet', 'EfficientNetB0', 'EfficientNetB1', 'EfficientNetB2', + 'EfficientNetB3', 'EfficientNetB4', 'EfficientNetB5', 'EfficientNetB6', + 'EfficientNetB7' +] + +GlobalParams = collections.namedtuple('GlobalParams', [ + 'batch_norm_momentum', + 'batch_norm_epsilon', + 'dropout_rate', + 'num_classes', + 'width_coefficient', + 'depth_coefficient', + 'depth_divisor', + 'min_depth', + 'drop_connect_rate', +]) + +BlockArgs = collections.namedtuple('BlockArgs', [ + 'kernel_size', 'num_repeat', 'input_filters', 'output_filters', + 'expand_ratio', 'id_skip', 'stride', 'se_ratio' +]) + +GlobalParams.__new__.__defaults__ = (None, ) * len(GlobalParams._fields) +BlockArgs.__new__.__defaults__ = (None, ) * len(BlockArgs._fields) + + +def efficientnet_params(model_name): + """ Map EfficientNet model name to parameter coefficients. """ + params_dict = { + # Coefficients: width,depth,resolution,dropout + 'efficientnet-b0': (1.0, 1.0, 224, 0.2), + 'efficientnet-b1': (1.0, 1.1, 240, 0.2), + 'efficientnet-b2': (1.1, 1.2, 260, 0.3), + 'efficientnet-b3': (1.2, 1.4, 300, 0.3), + 'efficientnet-b4': (1.4, 1.8, 380, 0.4), + 'efficientnet-b5': (1.6, 2.2, 456, 0.4), + 'efficientnet-b6': (1.8, 2.6, 528, 0.5), + 'efficientnet-b7': (2.0, 3.1, 600, 0.5), + } + return params_dict[model_name] + + +def efficientnet(width_coefficient=None, + depth_coefficient=None, + dropout_rate=0.2, + drop_connect_rate=0.2): + """ Get block arguments according to parameter and coefficients. """ + blocks_args = [ + 'r1_k3_s11_e1_i32_o16_se0.25', + 'r2_k3_s22_e6_i16_o24_se0.25', + 'r2_k5_s22_e6_i24_o40_se0.25', + 'r3_k3_s22_e6_i40_o80_se0.25', + 'r3_k5_s11_e6_i80_o112_se0.25', + 'r4_k5_s22_e6_i112_o192_se0.25', + 'r1_k3_s11_e6_i192_o320_se0.25', + ] + blocks_args = BlockDecoder.decode(blocks_args) + + global_params = GlobalParams( + batch_norm_momentum=0.99, + batch_norm_epsilon=1e-3, + dropout_rate=dropout_rate, + drop_connect_rate=drop_connect_rate, + num_classes=1000, + width_coefficient=width_coefficient, + depth_coefficient=depth_coefficient, + depth_divisor=8, + min_depth=None) + + return blocks_args, global_params + + +def get_model_params(model_name, override_params): + """ Get the block args and global params for a given model """ + if model_name.startswith('efficientnet'): + w, d, _, p = efficientnet_params(model_name) + blocks_args, global_params = efficientnet( + width_coefficient=w, depth_coefficient=d, dropout_rate=p) + else: + raise NotImplementedError('model name is not pre-defined: %s' % + model_name) + if override_params: + global_params = global_params._replace(**override_params) + return blocks_args, global_params + + +def round_filters(filters, global_params): + """ Calculate and round number of filters based on depth multiplier. """ + multiplier = global_params.width_coefficient + if not multiplier: + return filters + divisor = global_params.depth_divisor + min_depth = global_params.min_depth + filters *= multiplier + min_depth = min_depth or divisor + new_filters = max(min_depth, + int(filters + divisor / 2) // divisor * divisor) + if new_filters < 0.9 * filters: # prevent rounding by more than 10% + new_filters += divisor + return int(new_filters) + + +def round_repeats(repeats, global_params): + """ Round number of filters based on depth multiplier. """ + multiplier = global_params.depth_coefficient + if not multiplier: + return repeats + return int(math.ceil(multiplier * repeats)) + + +class EfficientNet(): + def __init__(self, + name='b0', + padding_type='SAME', + override_params=None, + is_test=False, + use_se=True): + valid_names = ['b' + str(i) for i in range(8)] + assert name in valid_names, 'efficient name should be in b0~b7' + model_name = 'efficientnet-' + name + self._blocks_args, self._global_params = get_model_params( + model_name, override_params) + self._bn_mom = self._global_params.batch_norm_momentum + self._bn_eps = self._global_params.batch_norm_epsilon + self.is_test = is_test + self.padding_type = padding_type + self.use_se = use_se + + def net(self, input, class_dim=1000, is_test=False): + + conv = self.extract_features(input, is_test=is_test) + + out_channels = round_filters(1280, self._global_params) + conv = self.conv_bn_layer( + conv, + num_filters=out_channels, + filter_size=1, + bn_act='swish', + bn_mom=self._bn_mom, + bn_eps=self._bn_eps, + padding_type=self.padding_type, + name='', + conv_name='_conv_head', + bn_name='_bn1') + + pool = fluid.layers.pool2d( + input=conv, pool_type='avg', global_pooling=True, use_cudnn=False) + + if self._global_params.dropout_rate: + pool = fluid.layers.dropout( + pool, + self._global_params.dropout_rate, + dropout_implementation='upscale_in_train') + + param_attr, bias_attr = init_fc_layer(class_dim, '_fc') + out = fluid.layers.fc(pool, + class_dim, + name='_fc', + param_attr=param_attr, + bias_attr=bias_attr) + return out + + def _drop_connect(self, inputs, prob, is_test): + if is_test: + return inputs + keep_prob = 1.0 - prob + random_tensor = keep_prob + fluid.layers.uniform_random_batch_size_like( + inputs, [-1, 1, 1, 1], min=0., max=1.) + binary_tensor = fluid.layers.floor(random_tensor) + output = inputs / keep_prob * binary_tensor + return output + + def _expand_conv_norm(self, inputs, block_args, is_test, name=None): + # Expansion phase + oup = block_args.input_filters * block_args.expand_ratio # number of output channels + + if block_args.expand_ratio != 1: + conv = self.conv_bn_layer( + inputs, + num_filters=oup, + filter_size=1, + bn_act=None, + bn_mom=self._bn_mom, + bn_eps=self._bn_eps, + padding_type=self.padding_type, + name=name, + conv_name=name + '_expand_conv', + bn_name='_bn0') + + return conv + + def _depthwise_conv_norm(self, inputs, block_args, is_test, name=None): + k = block_args.kernel_size + s = block_args.stride + if isinstance(s, list) or isinstance(s, tuple): + s = s[0] + oup = block_args.input_filters * block_args.expand_ratio # number of output channels + + conv = self.conv_bn_layer( + inputs, + num_filters=oup, + filter_size=k, + stride=s, + num_groups=oup, + bn_act=None, + padding_type=self.padding_type, + bn_mom=self._bn_mom, + bn_eps=self._bn_eps, + name=name, + use_cudnn=False, + conv_name=name + '_depthwise_conv', + bn_name='_bn1') + + return conv + + def _project_conv_norm(self, inputs, block_args, is_test, name=None): + final_oup = block_args.output_filters + conv = self.conv_bn_layer( + inputs, + num_filters=final_oup, + filter_size=1, + bn_act=None, + padding_type=self.padding_type, + bn_mom=self._bn_mom, + bn_eps=self._bn_eps, + name=name, + conv_name=name + '_project_conv', + bn_name='_bn2') + return conv + + def conv_bn_layer(self, + input, + filter_size, + num_filters, + stride=1, + num_groups=1, + padding_type="SAME", + conv_act=None, + bn_act='swish', + use_cudnn=True, + use_bn=True, + bn_mom=0.9, + bn_eps=1e-05, + use_bias=False, + name=None, + conv_name=None, + bn_name=None): + conv = conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + groups=num_groups, + act=conv_act, + padding_type=padding_type, + use_cudnn=use_cudnn, + name=conv_name, + use_bias=use_bias) + + if use_bn == False: + return conv + else: + bn_name = name + bn_name + param_attr, bias_attr = init_batch_norm_layer(bn_name) + return fluid.layers.batch_norm( + input=conv, + act=bn_act, + momentum=bn_mom, + epsilon=bn_eps, + name=bn_name, + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance', + param_attr=param_attr, + bias_attr=bias_attr) + + def _conv_stem_norm(self, inputs, is_test): + out_channels = round_filters(32, self._global_params) + bn = self.conv_bn_layer( + inputs, + num_filters=out_channels, + filter_size=3, + stride=2, + bn_act=None, + bn_mom=self._bn_mom, + padding_type=self.padding_type, + bn_eps=self._bn_eps, + name='', + conv_name='_conv_stem', + bn_name='_bn0') + + return bn + + def mb_conv_block(self, + inputs, + block_args, + is_test=False, + drop_connect_rate=None, + name=None): + # Expansion and Depthwise Convolution + oup = block_args.input_filters * block_args.expand_ratio # number of output channels + has_se = self.use_se and (block_args.se_ratio is not None) and ( + 0 < block_args.se_ratio <= 1) + id_skip = block_args.id_skip # skip connection and drop connect + conv = inputs + if block_args.expand_ratio != 1: + conv = fluid.layers.swish( + self._expand_conv_norm(conv, block_args, is_test, name)) + + conv = fluid.layers.swish( + self._depthwise_conv_norm(conv, block_args, is_test, name)) + + # Squeeze and Excitation + if has_se: + num_squeezed_channels = max( + 1, int(block_args.input_filters * block_args.se_ratio)) + conv = self.se_block(conv, num_squeezed_channels, oup, name) + + conv = self._project_conv_norm(conv, block_args, is_test, name) + + # Skip connection and drop connect + input_filters, output_filters = block_args.input_filters, block_args.output_filters + if id_skip and block_args.stride == 1 and input_filters == output_filters: + if drop_connect_rate: + conv = self._drop_connect(conv, drop_connect_rate, + self.is_test) + conv = fluid.layers.elementwise_add(conv, inputs) + + return conv + + def se_block(self, inputs, num_squeezed_channels, oup, name): + x_squeezed = fluid.layers.pool2d( + input=inputs, + pool_type='avg', + global_pooling=True, + use_cudnn=False) + x_squeezed = conv2d( + x_squeezed, + num_filters=num_squeezed_channels, + filter_size=1, + use_bias=True, + padding_type=self.padding_type, + act='swish', + name=name + '_se_reduce') + x_squeezed = conv2d( + x_squeezed, + num_filters=oup, + filter_size=1, + use_bias=True, + padding_type=self.padding_type, + name=name + '_se_expand') + se_out = inputs * fluid.layers.sigmoid(x_squeezed) + return se_out + + def extract_features(self, inputs, is_test): + """ Returns output of the final convolution layer """ + + conv = fluid.layers.swish( + self._conv_stem_norm( + inputs, is_test=is_test)) + + block_args_copy = copy.deepcopy(self._blocks_args) + idx = 0 + block_size = 0 + for block_arg in block_args_copy: + block_arg = block_arg._replace( + input_filters=round_filters(block_arg.input_filters, + self._global_params), + output_filters=round_filters(block_arg.output_filters, + self._global_params), + num_repeat=round_repeats(block_arg.num_repeat, + self._global_params)) + block_size += 1 + for _ in range(block_arg.num_repeat - 1): + block_size += 1 + + for block_args in self._blocks_args: + + # Update block input and output filters based on depth multiplier. + block_args = block_args._replace( + input_filters=round_filters(block_args.input_filters, + self._global_params), + output_filters=round_filters(block_args.output_filters, + self._global_params), + num_repeat=round_repeats(block_args.num_repeat, + self._global_params)) + + # The first block needs to take care of stride and filter size increase. + drop_connect_rate = self._global_params.drop_connect_rate + if drop_connect_rate: + drop_connect_rate *= float(idx) / block_size + conv = self.mb_conv_block(conv, block_args, is_test, + drop_connect_rate, + '_blocks.' + str(idx) + '.') + + idx += 1 + if block_args.num_repeat > 1: + block_args = block_args._replace( + input_filters=block_args.output_filters, stride=1) + for _ in range(block_args.num_repeat - 1): + drop_connect_rate = self._global_params.drop_connect_rate + if drop_connect_rate: + drop_connect_rate *= float(idx) / block_size + conv = self.mb_conv_block(conv, block_args, is_test, + drop_connect_rate, + '_blocks.' + str(idx) + '.') + idx += 1 + + return conv + + def shortcut(self, input, data_residual): + return fluid.layers.elementwise_add(input, data_residual) + + +class BlockDecoder(object): + """ Block Decoder for readability, straight from the official TensorFlow repository """ + + @staticmethod + def _decode_block_string(block_string): + """ Gets a block through a string notation of arguments. """ + assert isinstance(block_string, str) + + ops = block_string.split('_') + options = {} + for op in ops: + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # Check stride + assert ( + ('s' in options and len(options['s']) == 1) or + (len(options['s']) == 2 and options['s'][0] == options['s'][1])) + + return BlockArgs( + kernel_size=int(options['k']), + num_repeat=int(options['r']), + input_filters=int(options['i']), + output_filters=int(options['o']), + expand_ratio=int(options['e']), + id_skip=('noskip' not in block_string), + se_ratio=float(options['se']) if 'se' in options else None, + stride=[int(options['s'][0])]) + + @staticmethod + def _encode_block_string(block): + """Encodes a block to a string.""" + args = [ + 'r%d' % block.num_repeat, 'k%d' % block.kernel_size, 's%d%d' % + (block.strides[0], block.strides[1]), 'e%s' % block.expand_ratio, + 'i%d' % block.input_filters, 'o%d' % block.output_filters + ] + if 0 < block.se_ratio <= 1: + args.append('se%s' % block.se_ratio) + if block.id_skip is False: + args.append('noskip') + return '_'.join(args) + + @staticmethod + def decode(string_list): + """ + Decodes a list of string notations to specify blocks inside the network. + + :param string_list: a list of strings, each string is a notation of block + :return: a list of BlockArgs namedtuples of block args + """ + assert isinstance(string_list, list) + blocks_args = [] + for block_string in string_list: + blocks_args.append(BlockDecoder._decode_block_string(block_string)) + return blocks_args + + @staticmethod + def encode(blocks_args): + """ + Encodes a list of BlockArgs to a list of strings. + + :param blocks_args: a list of BlockArgs namedtuples of block args + :return: a list of strings, each string is a notation of block + """ + block_strings = [] + for block in blocks_args: + block_strings.append(BlockDecoder._encode_block_string(block)) + return block_strings + + +def EfficientNetB0(is_test=False, + padding_type='SAME', + override_params=None, + use_se=True): + model = EfficientNet( + name='b0', + is_test=is_test, + padding_type=padding_type, + override_params=override_params, + use_se=use_se) + return model + + +def EfficientNetB1(is_test=False, + padding_type='SAME', + override_params=None, + use_se=True): + model = EfficientNet( + name='b1', + is_test=is_test, + padding_type=padding_type, + override_params=override_params, + use_se=use_se) + return model + + +def EfficientNetB2(is_test=False, + padding_type='SAME', + override_params=None, + use_se=True): + model = EfficientNet( + name='b2', + is_test=is_test, + padding_type=padding_type, + override_params=override_params, + use_se=use_se) + return model + + +def EfficientNetB3(is_test=False, + padding_type='SAME', + override_params=None, + use_se=True): + model = EfficientNet( + name='b3', + is_test=is_test, + padding_type=padding_type, + override_params=override_params, + use_se=use_se) + return model + + +def EfficientNetB4(is_test=False, + padding_type='SAME', + override_params=None, + use_se=True): + model = EfficientNet( + name='b4', + is_test=is_test, + padding_type=padding_type, + override_params=override_params, + use_se=use_se) + return model + + +def EfficientNetB5(is_test=False, + padding_type='SAME', + override_params=None, + use_se=True): + model = EfficientNet( + name='b5', + is_test=is_test, + padding_type=padding_type, + override_params=override_params, + use_se=use_se) + return model + + +def EfficientNetB6(is_test=False, + padding_type='SAME', + override_params=None, + use_se=True): + model = EfficientNet( + name='b6', + is_test=is_test, + padding_type=padding_type, + override_params=override_params, + use_se=use_se) + return model + + +def EfficientNetB7(is_test=False, + padding_type='SAME', + override_params=None, + use_se=True): + model = EfficientNet( + name='b7', + is_test=is_test, + padding_type=padding_type, + override_params=override_params, + use_se=use_se) + return model diff --git a/ppcls/modeling/architectures/googlenet.py b/ppcls/modeling/architectures/googlenet.py new file mode 100644 index 000000000..9cf97c65a --- /dev/null +++ b/ppcls/modeling/architectures/googlenet.py @@ -0,0 +1,237 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = ['GoogLeNet'] + + +class GoogLeNet(): + def __init__(self): + + pass + + def conv_layer(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + channels = input.shape[1] + stdv = (3.0 / (filter_size**2 * channels))**0.5 + param_attr = ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=name + "_weights") + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=act, + param_attr=param_attr, + bias_attr=False, + name=name) + return conv + + def xavier(self, channels, filter_size, name): + stdv = (3.0 / (filter_size**2 * channels))**0.5 + param_attr = ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=name + "_weights") + + return param_attr + + def inception(self, + input, + channels, + filter1, + filter3R, + filter3, + filter5R, + filter5, + proj, + name=None): + conv1 = self.conv_layer( + input=input, + num_filters=filter1, + filter_size=1, + stride=1, + act=None, + name="inception_" + name + "_1x1") + conv3r = self.conv_layer( + input=input, + num_filters=filter3R, + filter_size=1, + stride=1, + act=None, + name="inception_" + name + "_3x3_reduce") + conv3 = self.conv_layer( + input=conv3r, + num_filters=filter3, + filter_size=3, + stride=1, + act=None, + name="inception_" + name + "_3x3") + conv5r = self.conv_layer( + input=input, + num_filters=filter5R, + filter_size=1, + stride=1, + act=None, + name="inception_" + name + "_5x5_reduce") + conv5 = self.conv_layer( + input=conv5r, + num_filters=filter5, + filter_size=5, + stride=1, + act=None, + name="inception_" + name + "_5x5") + pool = fluid.layers.pool2d( + input=input, + pool_size=3, + pool_stride=1, + pool_padding=1, + pool_type='max') + convprj = fluid.layers.conv2d( + input=pool, + filter_size=1, + num_filters=proj, + stride=1, + padding=0, + name="inception_" + name + "_3x3_proj", + param_attr=ParamAttr( + name="inception_" + name + "_3x3_proj_weights"), + bias_attr=False) + cat = fluid.layers.concat(input=[conv1, conv3, conv5, convprj], axis=1) + cat = fluid.layers.relu(cat) + return cat + + def net(self, input, class_dim=1000): + conv = self.conv_layer( + input=input, + num_filters=64, + filter_size=7, + stride=2, + act=None, + name="conv1") + pool = fluid.layers.pool2d( + input=conv, pool_size=3, pool_type='max', pool_stride=2) + + conv = self.conv_layer( + input=pool, + num_filters=64, + filter_size=1, + stride=1, + act=None, + name="conv2_1x1") + conv = self.conv_layer( + input=conv, + num_filters=192, + filter_size=3, + stride=1, + act=None, + name="conv2_3x3") + pool = fluid.layers.pool2d( + input=conv, pool_size=3, pool_type='max', pool_stride=2) + + ince3a = self.inception(pool, 192, 64, 96, 128, 16, 32, 32, "ince3a") + ince3b = self.inception(ince3a, 256, 128, 128, 192, 32, 96, 64, + "ince3b") + pool3 = fluid.layers.pool2d( + input=ince3b, pool_size=3, pool_type='max', pool_stride=2) + + ince4a = self.inception(pool3, 480, 192, 96, 208, 16, 48, 64, "ince4a") + ince4b = self.inception(ince4a, 512, 160, 112, 224, 24, 64, 64, + "ince4b") + ince4c = self.inception(ince4b, 512, 128, 128, 256, 24, 64, 64, + "ince4c") + ince4d = self.inception(ince4c, 512, 112, 144, 288, 32, 64, 64, + "ince4d") + ince4e = self.inception(ince4d, 528, 256, 160, 320, 32, 128, 128, + "ince4e") + pool4 = fluid.layers.pool2d( + input=ince4e, pool_size=3, pool_type='max', pool_stride=2) + + ince5a = self.inception(pool4, 832, 256, 160, 320, 32, 128, 128, + "ince5a") + ince5b = self.inception(ince5a, 832, 384, 192, 384, 48, 128, 128, + "ince5b") + pool5 = fluid.layers.pool2d( + input=ince5b, pool_size=7, pool_type='avg', pool_stride=7) + dropout = fluid.layers.dropout(x=pool5, dropout_prob=0.4) + out = fluid.layers.fc(input=dropout, + size=class_dim, + act='softmax', + param_attr=self.xavier(1024, 1, "out"), + name="out", + bias_attr=ParamAttr(name="out_offset")) + + pool_o1 = fluid.layers.pool2d( + input=ince4a, pool_size=5, pool_type='avg', pool_stride=3) + conv_o1 = self.conv_layer( + input=pool_o1, + num_filters=128, + filter_size=1, + stride=1, + act=None, + name="conv_o1") + fc_o1 = fluid.layers.fc(input=conv_o1, + size=1024, + act='relu', + param_attr=self.xavier(2048, 1, "fc_o1"), + name="fc_o1", + bias_attr=ParamAttr(name="fc_o1_offset")) + dropout_o1 = fluid.layers.dropout(x=fc_o1, dropout_prob=0.7) + out1 = fluid.layers.fc(input=dropout_o1, + size=class_dim, + act='softmax', + param_attr=self.xavier(1024, 1, "out1"), + name="out1", + bias_attr=ParamAttr(name="out1_offset")) + + pool_o2 = fluid.layers.pool2d( + input=ince4d, pool_size=5, pool_type='avg', pool_stride=3) + conv_o2 = self.conv_layer( + input=pool_o2, + num_filters=128, + filter_size=1, + stride=1, + act=None, + name="conv_o2") + fc_o2 = fluid.layers.fc(input=conv_o2, + size=1024, + act='relu', + param_attr=self.xavier(2048, 1, "fc_o2"), + name="fc_o2", + bias_attr=ParamAttr(name="fc_o2_offset")) + dropout_o2 = fluid.layers.dropout(x=fc_o2, dropout_prob=0.7) + out2 = fluid.layers.fc(input=dropout_o2, + size=class_dim, + act='softmax', + param_attr=self.xavier(1024, 1, "out2"), + name="out2", + bias_attr=ParamAttr(name="out2_offset")) + + # last fc layer is "out" + return [out, out1, out2] diff --git a/ppcls/modeling/architectures/hrnet.py b/ppcls/modeling/architectures/hrnet.py new file mode 100644 index 000000000..32f06df6a --- /dev/null +++ b/ppcls/modeling/architectures/hrnet.py @@ -0,0 +1,459 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle +import paddle.fluid as fluid +from paddle.fluid.initializer import MSRA +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + "HRNet", "HRNet_W18_C", "HRNet_W30_C", "HRNet_W32_C", "HRNet_W40_C", + "HRNet_W44_C", "HRNet_W48_C", "HRNet_W60_C", "HRNet_W64_C", + "SE_HRNet_W18_C", "SE_HRNet_W30_C", "SE_HRNet_W32_C", "SE_HRNet_W40_C", + "SE_HRNet_W44_C", "SE_HRNet_W48_C", "SE_HRNet_W60_C", "SE_HRNet_W64_C" +] + + +class HRNet(): + def __init__(self, width=18, has_se=False): + self.width = width + self.has_se = has_se + self.channels = { + 18: [[18, 36], [18, 36, 72], [18, 36, 72, 144]], + 30: [[30, 60], [30, 60, 120], [30, 60, 120, 240]], + 32: [[32, 64], [32, 64, 128], [32, 64, 128, 256]], + 40: [[40, 80], [40, 80, 160], [40, 80, 160, 320]], + 44: [[44, 88], [44, 88, 176], [44, 88, 176, 352]], + 48: [[48, 96], [48, 96, 192], [48, 96, 192, 384]], + 60: [[60, 120], [60, 120, 240], [60, 120, 240, 480]], + 64: [[64, 128], [64, 128, 256], [64, 128, 256, 512]] + } + + def net(self, input, class_dim=1000): + width = self.width + channels_2, channels_3, channels_4 = self.channels[width] + num_modules_2, num_modules_3, num_modules_4 = 1, 4, 3 + + x = self.conv_bn_layer( + input=input, + filter_size=3, + num_filters=64, + stride=2, + if_act=True, + name='layer1_1') + x = self.conv_bn_layer( + input=x, + filter_size=3, + num_filters=64, + stride=2, + if_act=True, + name='layer1_2') + + la1 = self.layer1(x, name='layer2') + tr1 = self.transition_layer([la1], [256], channels_2, name='tr1') + st2 = self.stage(tr1, num_modules_2, channels_2, name='st2') + tr2 = self.transition_layer(st2, channels_2, channels_3, name='tr2') + st3 = self.stage(tr2, num_modules_3, channels_3, name='st3') + tr3 = self.transition_layer(st3, channels_3, channels_4, name='tr3') + st4 = self.stage(tr3, num_modules_4, channels_4, name='st4') + + #classification + last_cls = self.last_cls_out(x=st4, name='cls_head') + y = last_cls[0] + last_num_filters = [256, 512, 1024] + for i in range(3): + y = fluid.layers.elementwise_add( + last_cls[i + 1], + self.conv_bn_layer( + input=y, + filter_size=3, + num_filters=last_num_filters[i], + stride=2, + name='cls_head_add' + str(i + 1))) + + y = self.conv_bn_layer( + input=y, + filter_size=1, + num_filters=2048, + stride=1, + name='cls_head_last_conv') + pool = fluid.layers.pool2d( + input=y, pool_type='avg', global_pooling=True) + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + out = fluid.layers.fc( + input=pool, + size=class_dim, + param_attr=ParamAttr( + name='fc_weights', + initializer=fluid.initializer.Uniform(-stdv, stdv)), + bias_attr=ParamAttr(name='fc_offset')) + return out + + def layer1(self, input, name=None): + conv = input + for i in range(4): + conv = self.bottleneck_block( + conv, + num_filters=64, + downsample=True if i == 0 else False, + name=name + '_' + str(i + 1)) + return conv + + def transition_layer(self, x, in_channels, out_channels, name=None): + num_in = len(in_channels) + num_out = len(out_channels) + out = [] + for i in range(num_out): + if i < num_in: + if in_channels[i] != out_channels[i]: + residual = self.conv_bn_layer( + x[i], + filter_size=3, + num_filters=out_channels[i], + name=name + '_layer_' + str(i + 1)) + out.append(residual) + else: + out.append(x[i]) + else: + residual = self.conv_bn_layer( + x[-1], + filter_size=3, + num_filters=out_channels[i], + stride=2, + name=name + '_layer_' + str(i + 1)) + out.append(residual) + return out + + def branches(self, x, block_num, channels, name=None): + out = [] + for i in range(len(channels)): + residual = x[i] + for j in range(block_num): + residual = self.basic_block( + residual, + channels[i], + name=name + '_branch_layer_' + str(i + 1) + '_' + + str(j + 1)) + out.append(residual) + return out + + def fuse_layers(self, x, channels, multi_scale_output=True, name=None): + out = [] + for i in range(len(channels) if multi_scale_output else 1): + residual = x[i] + for j in range(len(channels)): + if j > i: + y = self.conv_bn_layer( + x[j], + filter_size=1, + num_filters=channels[i], + if_act=False, + name=name + '_layer_' + str(i + 1) + '_' + str(j + 1)) + y = fluid.layers.resize_nearest(input=y, scale=2**(j - i)) + residual = fluid.layers.elementwise_add( + x=residual, y=y, act=None) + elif j < i: + y = x[j] + for k in range(i - j): + if k == i - j - 1: + y = self.conv_bn_layer( + y, + filter_size=3, + num_filters=channels[i], + stride=2, + if_act=False, + name=name + '_layer_' + str(i + 1) + '_' + + str(j + 1) + '_' + str(k + 1)) + else: + y = self.conv_bn_layer( + y, + filter_size=3, + num_filters=channels[j], + stride=2, + name=name + '_layer_' + str(i + 1) + '_' + + str(j + 1) + '_' + str(k + 1)) + residual = fluid.layers.elementwise_add( + x=residual, y=y, act=None) + + residual = fluid.layers.relu(residual) + out.append(residual) + return out + + def high_resolution_module(self, + x, + channels, + multi_scale_output=True, + name=None): + residual = self.branches(x, 4, channels, name=name) + out = self.fuse_layers( + residual, + channels, + multi_scale_output=multi_scale_output, + name=name) + return out + + def stage(self, + x, + num_modules, + channels, + multi_scale_output=True, + name=None): + out = x + for i in range(num_modules): + if i == num_modules - 1 and multi_scale_output == False: + out = self.high_resolution_module( + out, + channels, + multi_scale_output=False, + name=name + '_' + str(i + 1)) + else: + out = self.high_resolution_module( + out, channels, name=name + '_' + str(i + 1)) + + return out + + def last_cls_out(self, x, name=None): + out = [] + num_filters_list = [32, 64, 128, 256] + for i in range(len(x)): + out.append( + self.bottleneck_block( + input=x[i], + num_filters=num_filters_list[i], + name=name + 'conv_' + str(i + 1), + downsample=True)) + + return out + + def basic_block(self, + input, + num_filters, + stride=1, + downsample=False, + name=None): + residual = input + conv = self.conv_bn_layer( + input=input, + filter_size=3, + num_filters=num_filters, + stride=stride, + name=name + '_conv1') + conv = self.conv_bn_layer( + input=conv, + filter_size=3, + num_filters=num_filters, + if_act=False, + name=name + '_conv2') + if downsample: + residual = self.conv_bn_layer( + input=input, + filter_size=1, + num_filters=num_filters, + if_act=False, + name=name + '_downsample') + if self.has_se: + conv = self.squeeze_excitation( + input=conv, + num_channels=num_filters, + reduction_ratio=16, + name=name + '_fc') + return fluid.layers.elementwise_add(x=residual, y=conv, act='relu') + + def bottleneck_block(self, + input, + num_filters, + stride=1, + downsample=False, + name=None): + residual = input + conv = self.conv_bn_layer( + input=input, + filter_size=1, + num_filters=num_filters, + name=name + '_conv1') + conv = self.conv_bn_layer( + input=conv, + filter_size=3, + num_filters=num_filters, + stride=stride, + name=name + '_conv2') + conv = self.conv_bn_layer( + input=conv, + filter_size=1, + num_filters=num_filters * 4, + if_act=False, + name=name + '_conv3') + if downsample: + residual = self.conv_bn_layer( + input=input, + filter_size=1, + num_filters=num_filters * 4, + if_act=False, + name=name + '_downsample') + if self.has_se: + conv = self.squeeze_excitation( + input=conv, + num_channels=num_filters * 4, + reduction_ratio=16, + name=name + '_fc') + return fluid.layers.elementwise_add(x=residual, y=conv, act='relu') + + def squeeze_excitation(self, + input, + num_channels, + reduction_ratio, + name=None): + pool = fluid.layers.pool2d( + input=input, pool_size=0, pool_type='avg', global_pooling=True) + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + squeeze = fluid.layers.fc( + input=pool, + size=num_channels / reduction_ratio, + act='relu', + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=name + '_sqz_weights'), + bias_attr=ParamAttr(name=name + '_sqz_offset')) + stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0) + excitation = fluid.layers.fc( + input=squeeze, + size=num_channels, + act='sigmoid', + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=name + '_exc_weights'), + bias_attr=ParamAttr(name=name + '_exc_offset')) + scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0) + return scale + + def conv_bn_layer(self, + input, + filter_size, + num_filters, + stride=1, + padding=1, + num_groups=1, + if_act=True, + name=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=num_groups, + act=None, + param_attr=ParamAttr( + initializer=MSRA(), name=name + '_weights'), + bias_attr=False) + bn_name = name + '_bn' + bn = fluid.layers.batch_norm( + input=conv, + param_attr=ParamAttr( + name=bn_name + "_scale", + initializer=fluid.initializer.Constant(1.0)), + bias_attr=ParamAttr( + name=bn_name + "_offset", + initializer=fluid.initializer.Constant(0.0)), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + if if_act: + bn = fluid.layers.relu(bn) + return bn + + +def HRNet_W18_C(): + model = HRNet(width=18) + return model + + +def HRNet_W30_C(): + model = HRNet(width=30) + return model + + +def HRNet_W32_C(): + model = HRNet(width=32) + return model + + +def HRNet_W40_C(): + model = HRNet(width=40) + return model + + +def HRNet_W44_C(): + model = HRNet(width=44) + return model + + +def HRNet_W48_C(): + model = HRNet(width=48) + return model + + +def HRNet_W60_C(): + model = HRNet(width=60) + return model + + +def HRNet_W64_C(): + model = HRNet(width=64) + return model + + +def SE_HRNet_W18_C(): + model = HRNet(width=18, has_se=True) + return model + + +def SE_HRNet_W30_C(): + model = HRNet(width=30, has_se=True) + return model + + +def SE_HRNet_W32_C(): + model = HRNet(width=32, has_se=True) + return model + + +def SE_HRNet_W40_C(): + model = HRNet(width=40, has_se=True) + return model + + +def SE_HRNet_W44_C(): + model = HRNet(width=44, has_se=True) + return model + + +def SE_HRNet_W48_C(): + model = HRNet(width=48, has_se=True) + return model + + +def SE_HRNet_W60_C(): + model = HRNet(width=60, has_se=True) + return model + + +def SE_HRNet_W64_C(): + model = HRNet(width=64, has_se=True) + return model diff --git a/ppcls/modeling/architectures/inception_v4.py b/ppcls/modeling/architectures/inception_v4.py new file mode 100644 index 000000000..a81d9f49a --- /dev/null +++ b/ppcls/modeling/architectures/inception_v4.py @@ -0,0 +1,354 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = ['InceptionV4'] + + +class InceptionV4(): + def __init__(self): + + pass + + def net(self, input, class_dim=1000): + x = self.inception_stem(input) + + for i in range(4): + x = self.inceptionA(x, name=str(i + 1)) + x = self.reductionA(x) + + for i in range(7): + x = self.inceptionB(x, name=str(i + 1)) + x = self.reductionB(x) + + for i in range(3): + x = self.inceptionC(x, name=str(i + 1)) + + pool = fluid.layers.pool2d( + input=x, pool_type='avg', global_pooling=True) + + drop = fluid.layers.dropout(x=pool, dropout_prob=0.2) + + stdv = 1.0 / math.sqrt(drop.shape[1] * 1.0) + out = fluid.layers.fc( + input=drop, + size=class_dim, + param_attr=ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name="final_fc_weights"), + bias_attr=ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name="final_fc_offset")) + return out + + def conv_bn_layer(self, + data, + num_filters, + filter_size, + stride=1, + padding=0, + groups=1, + act='relu', + name=None): + conv = fluid.layers.conv2d( + input=data, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=padding, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=False, + name=name) + bn_name = name + "_bn" + return fluid.layers.batch_norm( + input=conv, + act=act, + name=bn_name, + param_attr=ParamAttr(name=bn_name + "_scale"), + bias_attr=ParamAttr(name=bn_name + "_offset"), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def inception_stem(self, data, name=None): + conv = self.conv_bn_layer( + data, 32, 3, stride=2, act='relu', name="conv1_3x3_s2") + conv = self.conv_bn_layer(conv, 32, 3, act='relu', name="conv2_3x3_s1") + conv = self.conv_bn_layer( + conv, 64, 3, padding=1, act='relu', name="conv3_3x3_s1") + + pool1 = fluid.layers.pool2d( + input=conv, pool_size=3, pool_stride=2, pool_type='max') + conv2 = self.conv_bn_layer( + conv, 96, 3, stride=2, act='relu', name="inception_stem1_3x3_s2") + concat = fluid.layers.concat([pool1, conv2], axis=1) + + conv1 = self.conv_bn_layer( + concat, 64, 1, act='relu', name="inception_stem2_3x3_reduce") + conv1 = self.conv_bn_layer( + conv1, 96, 3, act='relu', name="inception_stem2_3x3") + + conv2 = self.conv_bn_layer( + concat, 64, 1, act='relu', name="inception_stem2_1x7_reduce") + conv2 = self.conv_bn_layer( + conv2, + 64, (7, 1), + padding=(3, 0), + act='relu', + name="inception_stem2_1x7") + conv2 = self.conv_bn_layer( + conv2, + 64, (1, 7), + padding=(0, 3), + act='relu', + name="inception_stem2_7x1") + conv2 = self.conv_bn_layer( + conv2, 96, 3, act='relu', name="inception_stem2_3x3_2") + + concat = fluid.layers.concat([conv1, conv2], axis=1) + + conv1 = self.conv_bn_layer( + concat, + 192, + 3, + stride=2, + act='relu', + name="inception_stem3_3x3_s2") + pool1 = fluid.layers.pool2d( + input=concat, pool_size=3, pool_stride=2, pool_type='max') + + concat = fluid.layers.concat([conv1, pool1], axis=1) + + return concat + + def inceptionA(self, data, name=None): + pool1 = fluid.layers.pool2d( + input=data, pool_size=3, pool_padding=1, pool_type='avg') + conv1 = self.conv_bn_layer( + pool1, 96, 1, act='relu', name="inception_a" + name + "_1x1") + + conv2 = self.conv_bn_layer( + data, 96, 1, act='relu', name="inception_a" + name + "_1x1_2") + + conv3 = self.conv_bn_layer( + data, 64, 1, act='relu', name="inception_a" + name + "_3x3_reduce") + conv3 = self.conv_bn_layer( + conv3, + 96, + 3, + padding=1, + act='relu', + name="inception_a" + name + "_3x3") + + conv4 = self.conv_bn_layer( + data, + 64, + 1, + act='relu', + name="inception_a" + name + "_3x3_2_reduce") + conv4 = self.conv_bn_layer( + conv4, + 96, + 3, + padding=1, + act='relu', + name="inception_a" + name + "_3x3_2") + conv4 = self.conv_bn_layer( + conv4, + 96, + 3, + padding=1, + act='relu', + name="inception_a" + name + "_3x3_3") + + concat = fluid.layers.concat([conv1, conv2, conv3, conv4], axis=1) + + return concat + + def reductionA(self, data, name=None): + pool1 = fluid.layers.pool2d( + input=data, pool_size=3, pool_stride=2, pool_type='max') + + conv2 = self.conv_bn_layer( + data, 384, 3, stride=2, act='relu', name="reduction_a_3x3") + + conv3 = self.conv_bn_layer( + data, 192, 1, act='relu', name="reduction_a_3x3_2_reduce") + conv3 = self.conv_bn_layer( + conv3, 224, 3, padding=1, act='relu', name="reduction_a_3x3_2") + conv3 = self.conv_bn_layer( + conv3, 256, 3, stride=2, act='relu', name="reduction_a_3x3_3") + + concat = fluid.layers.concat([pool1, conv2, conv3], axis=1) + + return concat + + def inceptionB(self, data, name=None): + pool1 = fluid.layers.pool2d( + input=data, pool_size=3, pool_padding=1, pool_type='avg') + conv1 = self.conv_bn_layer( + pool1, 128, 1, act='relu', name="inception_b" + name + "_1x1") + + conv2 = self.conv_bn_layer( + data, 384, 1, act='relu', name="inception_b" + name + "_1x1_2") + + conv3 = self.conv_bn_layer( + data, + 192, + 1, + act='relu', + name="inception_b" + name + "_1x7_reduce") + conv3 = self.conv_bn_layer( + conv3, + 224, (1, 7), + padding=(0, 3), + act='relu', + name="inception_b" + name + "_1x7") + conv3 = self.conv_bn_layer( + conv3, + 256, (7, 1), + padding=(3, 0), + act='relu', + name="inception_b" + name + "_7x1") + + conv4 = self.conv_bn_layer( + data, + 192, + 1, + act='relu', + name="inception_b" + name + "_7x1_2_reduce") + conv4 = self.conv_bn_layer( + conv4, + 192, (1, 7), + padding=(0, 3), + act='relu', + name="inception_b" + name + "_1x7_2") + conv4 = self.conv_bn_layer( + conv4, + 224, (7, 1), + padding=(3, 0), + act='relu', + name="inception_b" + name + "_7x1_2") + conv4 = self.conv_bn_layer( + conv4, + 224, (1, 7), + padding=(0, 3), + act='relu', + name="inception_b" + name + "_1x7_3") + conv4 = self.conv_bn_layer( + conv4, + 256, (7, 1), + padding=(3, 0), + act='relu', + name="inception_b" + name + "_7x1_3") + + concat = fluid.layers.concat([conv1, conv2, conv3, conv4], axis=1) + + return concat + + def reductionB(self, data, name=None): + pool1 = fluid.layers.pool2d( + input=data, pool_size=3, pool_stride=2, pool_type='max') + + conv2 = self.conv_bn_layer( + data, 192, 1, act='relu', name="reduction_b_3x3_reduce") + conv2 = self.conv_bn_layer( + conv2, 192, 3, stride=2, act='relu', name="reduction_b_3x3") + + conv3 = self.conv_bn_layer( + data, 256, 1, act='relu', name="reduction_b_1x7_reduce") + conv3 = self.conv_bn_layer( + conv3, + 256, (1, 7), + padding=(0, 3), + act='relu', + name="reduction_b_1x7") + conv3 = self.conv_bn_layer( + conv3, + 320, (7, 1), + padding=(3, 0), + act='relu', + name="reduction_b_7x1") + conv3 = self.conv_bn_layer( + conv3, 320, 3, stride=2, act='relu', name="reduction_b_3x3_2") + + concat = fluid.layers.concat([pool1, conv2, conv3], axis=1) + + return concat + + def inceptionC(self, data, name=None): + pool1 = fluid.layers.pool2d( + input=data, pool_size=3, pool_padding=1, pool_type='avg') + conv1 = self.conv_bn_layer( + pool1, 256, 1, act='relu', name="inception_c" + name + "_1x1") + + conv2 = self.conv_bn_layer( + data, 256, 1, act='relu', name="inception_c" + name + "_1x1_2") + + conv3 = self.conv_bn_layer( + data, 384, 1, act='relu', name="inception_c" + name + "_1x1_3") + conv3_1 = self.conv_bn_layer( + conv3, + 256, (1, 3), + padding=(0, 1), + act='relu', + name="inception_c" + name + "_1x3") + conv3_2 = self.conv_bn_layer( + conv3, + 256, (3, 1), + padding=(1, 0), + act='relu', + name="inception_c" + name + "_3x1") + + conv4 = self.conv_bn_layer( + data, 384, 1, act='relu', name="inception_c" + name + "_1x1_4") + conv4 = self.conv_bn_layer( + conv4, + 448, (1, 3), + padding=(0, 1), + act='relu', + name="inception_c" + name + "_1x3_2") + conv4 = self.conv_bn_layer( + conv4, + 512, (3, 1), + padding=(1, 0), + act='relu', + name="inception_c" + name + "_3x1_2") + conv4_1 = self.conv_bn_layer( + conv4, + 256, (1, 3), + padding=(0, 1), + act='relu', + name="inception_c" + name + "_1x3_3") + conv4_2 = self.conv_bn_layer( + conv4, + 256, (3, 1), + padding=(1, 0), + act='relu', + name="inception_c" + name + "_3x1_3") + + concat = fluid.layers.concat( + [conv1, conv2, conv3_1, conv3_2, conv4_1, conv4_2], axis=1) + + return concat diff --git a/ppcls/modeling/architectures/layers.py b/ppcls/modeling/architectures/layers.py new file mode 100644 index 000000000..f99103b05 --- /dev/null +++ b/ppcls/modeling/architectures/layers.py @@ -0,0 +1,250 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import warnings + +import paddle.fluid as fluid + + +def initial_type(name, + input, + op_type, + fan_out, + init="google", + use_bias=False, + filter_size=0, + stddev=0.02): + if init == "kaiming": + if op_type == 'conv': + fan_in = input.shape[1] * filter_size * filter_size + elif op_type == 'deconv': + fan_in = fan_out * filter_size * filter_size + else: + if len(input.shape) > 2: + fan_in = input.shape[1] * input.shape[2] * input.shape[3] + else: + fan_in = input.shape[1] + bound = 1 / math.sqrt(fan_in) + param_attr = fluid.ParamAttr( + name=name + "_weights", + initializer=fluid.initializer.Uniform( + low=-bound, high=bound)) + if use_bias == True: + bias_attr = fluid.ParamAttr( + name=name + '_offset', + initializer=fluid.initializer.Uniform( + low=-bound, high=bound)) + else: + bias_attr = False + elif init == 'google': + n = filter_size * filter_size * fan_out + param_attr = fluid.ParamAttr( + name=name + "_weights", + initializer=fluid.initializer.NormalInitializer( + loc=0.0, scale=math.sqrt(2.0 / n))) + if use_bias == True: + bias_attr = fluid.ParamAttr( + name=name + "_offset", + initializer=fluid.initializer.Constant(0.0)) + else: + bias_attr = False + + else: + param_attr = fluid.ParamAttr( + name=name + "_weights", + initializer=fluid.initializer.NormalInitializer( + loc=0.0, scale=stddev)) + if use_bias == True: + bias_attr = fluid.ParamAttr( + name=name + "_offset", + initializer=fluid.initializer.Constant(0.0)) + else: + bias_attr = False + return param_attr, bias_attr + + +def cal_padding(img_size, stride, filter_size, dilation=1): + """Calculate padding size.""" + if img_size % stride == 0: + out_size = max(filter_size - stride, 0) + else: + out_size = max(filter_size - (img_size % stride), 0) + return out_size // 2, out_size - out_size // 2 + + +def init_batch_norm_layer(name="batch_norm"): + param_attr = fluid.ParamAttr( + name=name + '_scale', initializer=fluid.initializer.Constant(1.0)) + bias_attr = fluid.ParamAttr( + name=name + '_offset', + initializer=fluid.initializer.Constant(value=0.0)) + return param_attr, bias_attr + + +def init_fc_layer(fout, name='fc'): + n = fout # fan-out + init_range = 1.0 / math.sqrt(n) + + param_attr = fluid.ParamAttr( + name=name + '_weights', + initializer=fluid.initializer.UniformInitializer( + low=-init_range, high=init_range)) + bias_attr = fluid.ParamAttr( + name=name + '_offset', + initializer=fluid.initializer.Constant(value=0.0)) + return param_attr, bias_attr + + +def norm_layer(input, norm_type='batch_norm', name=None): + if norm_type == 'batch_norm': + param_attr = fluid.ParamAttr( + name=name + '_weights', + initializer=fluid.initializer.Constant(1.0)) + bias_attr = fluid.ParamAttr( + name=name + '_offset', + initializer=fluid.initializer.Constant(value=0.0)) + return fluid.layers.batch_norm( + input, + param_attr=param_attr, + bias_attr=bias_attr, + moving_mean_name=name + '_mean', + moving_variance_name=name + '_variance') + + elif norm_type == 'instance_norm': + helper = fluid.layer_helper.LayerHelper("instance_norm", **locals()) + dtype = helper.input_dtype() + epsilon = 1e-5 + mean = fluid.layers.reduce_mean(input, dim=[2, 3], keep_dim=True) + var = fluid.layers.reduce_mean( + fluid.layers.square(input - mean), dim=[2, 3], keep_dim=True) + if name is not None: + scale_name = name + "_scale" + offset_name = name + "_offset" + scale_param = fluid.ParamAttr( + name=scale_name, + initializer=fluid.initializer.Constant(1.0), + trainable=True) + offset_param = fluid.ParamAttr( + name=offset_name, + initializer=fluid.initializer.Constant(0.0), + trainable=True) + scale = helper.create_parameter( + attr=scale_param, shape=input.shape[1:2], dtype=dtype) + offset = helper.create_parameter( + attr=offset_param, shape=input.shape[1:2], dtype=dtype) + + tmp = fluid.layers.elementwise_mul(x=(input - mean), y=scale, axis=1) + tmp = tmp / fluid.layers.sqrt(var + epsilon) + tmp = fluid.layers.elementwise_add(tmp, offset, axis=1) + return tmp + else: + raise NotImplementedError("norm tyoe: [%s] is not support" % norm_type) + + +def conv2d(input, + num_filters=64, + filter_size=7, + stride=1, + stddev=0.02, + padding=0, + groups=None, + name="conv2d", + norm=None, + act=None, + relufactor=0.0, + use_bias=False, + padding_type=None, + initial="normal", + use_cudnn=True): + + if padding != 0 and padding_type != None: + warnings.warn( + 'padding value and padding type are set in the same time, and the final padding width and padding height are computed by padding_type' + ) + + param_attr, bias_attr = initial_type( + name=name, + input=input, + op_type='conv', + fan_out=num_filters, + init=initial, + use_bias=use_bias, + filter_size=filter_size, + stddev=stddev) + + def get_padding(filter_size, stride=1, dilation=1): + padding = ((stride - 1) + dilation * (filter_size - 1)) // 2 + return padding + + need_crop = False + if padding_type == "SAME": + top_padding, bottom_padding = cal_padding(input.shape[2], stride, + filter_size) + left_padding, right_padding = cal_padding(input.shape[2], stride, + filter_size) + height_padding = bottom_padding + width_padding = right_padding + if top_padding != bottom_padding or left_padding != right_padding: + height_padding = top_padding + stride + width_padding = left_padding + stride + need_crop = True + padding = [height_padding, width_padding] + elif padding_type == "VALID": + height_padding = 0 + width_padding = 0 + padding = [height_padding, width_padding] + elif padding_type == "DYNAMIC": + padding = get_padding(filter_size, stride) + else: + padding = padding + + conv = fluid.layers.conv2d( + input, + num_filters, + filter_size, + groups=groups, + name=name, + stride=stride, + padding=padding, + use_cudnn=use_cudnn, + param_attr=param_attr, + bias_attr=bias_attr) + + if need_crop: + conv = conv[:, :, 1:, 1:] + + if norm is not None: + conv = norm_layer(input=conv, norm_type=norm, name=name + "_norm") + if act == 'relu': + conv = fluid.layers.relu(conv, name=name + '_relu') + elif act == 'leaky_relu': + conv = fluid.layers.leaky_relu( + conv, alpha=relufactor, name=name + '_leaky_relu') + elif act == 'tanh': + conv = fluid.layers.tanh(conv, name=name + '_tanh') + elif act == 'sigmoid': + conv = fluid.layers.sigmoid(conv, name=name + '_sigmoid') + elif act == 'swish': + conv = fluid.layers.swish(conv, name=name + '_swish') + elif act == None: + conv = conv + else: + raise NotImplementedError("activation: [%s] is not support" % act) + + return conv diff --git a/ppcls/modeling/architectures/mobilenet_v1.py b/ppcls/modeling/architectures/mobilenet_v1.py new file mode 100644 index 000000000..b968a9161 --- /dev/null +++ b/ppcls/modeling/architectures/mobilenet_v1.py @@ -0,0 +1,218 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle.fluid as fluid +from paddle.fluid.initializer import MSRA +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + 'MobileNetV1', 'MobileNetV1_x0_25', 'MobileNetV1_x0_5', 'MobileNetV1_x1_0', + 'MobileNetV1_x0_75' +] + + +class MobileNetV1(): + def __init__(self, scale=1.0): + self.scale = scale + + def net(self, input, class_dim=1000): + scale = self.scale + # conv1: 112x112 + input = self.conv_bn_layer( + input, + filter_size=3, + channels=3, + num_filters=int(32 * scale), + stride=2, + padding=1, + name="conv1") + + # 56x56 + input = self.depthwise_separable( + input, + num_filters1=32, + num_filters2=64, + num_groups=32, + stride=1, + scale=scale, + name="conv2_1") + + input = self.depthwise_separable( + input, + num_filters1=64, + num_filters2=128, + num_groups=64, + stride=2, + scale=scale, + name="conv2_2") + + # 28x28 + input = self.depthwise_separable( + input, + num_filters1=128, + num_filters2=128, + num_groups=128, + stride=1, + scale=scale, + name="conv3_1") + + input = self.depthwise_separable( + input, + num_filters1=128, + num_filters2=256, + num_groups=128, + stride=2, + scale=scale, + name="conv3_2") + + # 14x14 + input = self.depthwise_separable( + input, + num_filters1=256, + num_filters2=256, + num_groups=256, + stride=1, + scale=scale, + name="conv4_1") + + input = self.depthwise_separable( + input, + num_filters1=256, + num_filters2=512, + num_groups=256, + stride=2, + scale=scale, + name="conv4_2") + + # 14x14 + for i in range(5): + input = self.depthwise_separable( + input, + num_filters1=512, + num_filters2=512, + num_groups=512, + stride=1, + scale=scale, + name="conv5" + "_" + str(i + 1)) + # 7x7 + input = self.depthwise_separable( + input, + num_filters1=512, + num_filters2=1024, + num_groups=512, + stride=2, + scale=scale, + name="conv5_6") + + input = self.depthwise_separable( + input, + num_filters1=1024, + num_filters2=1024, + num_groups=1024, + stride=1, + scale=scale, + name="conv6") + + input = fluid.layers.pool2d( + input=input, pool_type='avg', global_pooling=True) + + output = fluid.layers.fc(input=input, + size=class_dim, + param_attr=ParamAttr( + initializer=MSRA(), name="fc7_weights"), + bias_attr=ParamAttr(name="fc7_offset")) + return output + + def conv_bn_layer(self, + input, + filter_size, + num_filters, + stride, + padding, + channels=None, + num_groups=1, + act='relu', + use_cudnn=True, + name=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + act=None, + use_cudnn=use_cudnn, + param_attr=ParamAttr( + initializer=MSRA(), name=name + "_weights"), + bias_attr=False) + bn_name = name + "_bn" + return fluid.layers.batch_norm( + input=conv, + act=act, + param_attr=ParamAttr(name=bn_name + "_scale"), + bias_attr=ParamAttr(name=bn_name + "_offset"), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def depthwise_separable(self, + input, + num_filters1, + num_filters2, + num_groups, + stride, + scale, + name=None): + depthwise_conv = self.conv_bn_layer( + input=input, + filter_size=3, + num_filters=int(num_filters1 * scale), + stride=stride, + padding=1, + num_groups=int(num_groups * scale), + use_cudnn=False, + name=name + "_dw") + + pointwise_conv = self.conv_bn_layer( + input=depthwise_conv, + filter_size=1, + num_filters=int(num_filters2 * scale), + stride=1, + padding=0, + name=name + "_sep") + return pointwise_conv + + +def MobileNetV1_x0_25(): + model = MobileNetV1(scale=0.25) + return model + + +def MobileNetV1_x0_5(): + model = MobileNetV1(scale=0.5) + return model + + +def MobileNetV1_x1_0(): + model = MobileNetV1(scale=1.0) + return model + + +def MobileNetV1_x0_75(): + model = MobileNetV1(scale=0.75) + return model diff --git a/ppcls/modeling/architectures/mobilenet_v2.py b/ppcls/modeling/architectures/mobilenet_v2.py new file mode 100644 index 000000000..8abaa416f --- /dev/null +++ b/ppcls/modeling/architectures/mobilenet_v2.py @@ -0,0 +1,230 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import paddle.fluid as fluid +from paddle.fluid.initializer import MSRA +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + 'MobileNetV2_x0_25', 'MobileNetV2_x0_5' + 'MobileNetV2_x0_75', 'MobileNetV2_x1_0', 'MobileNetV2_x1_5', + 'MobileNetV2_x2_0', 'MobileNetV2' +] + + +class MobileNetV2(): + def __init__(self, scale=1.0): + self.scale = scale + + def net(self, input, class_dim=1000): + scale = self.scale + bottleneck_params_list = [ + (1, 16, 1, 1), + (6, 24, 2, 2), + (6, 32, 3, 2), + (6, 64, 4, 2), + (6, 96, 3, 1), + (6, 160, 3, 2), + (6, 320, 1, 1), + ] + + #conv1 + input = self.conv_bn_layer( + input, + num_filters=int(32 * scale), + filter_size=3, + stride=2, + padding=1, + if_act=True, + name='conv1_1') + + # bottleneck sequences + i = 1 + in_c = int(32 * scale) + for layer_setting in bottleneck_params_list: + t, c, n, s = layer_setting + i += 1 + input = self.invresi_blocks( + input=input, + in_c=in_c, + t=t, + c=int(c * scale), + n=n, + s=s, + name='conv' + str(i)) + in_c = int(c * scale) + #last_conv + input = self.conv_bn_layer( + input=input, + num_filters=int(1280 * scale) if scale > 1.0 else 1280, + filter_size=1, + stride=1, + padding=0, + if_act=True, + name='conv9') + + input = fluid.layers.pool2d( + input=input, pool_type='avg', global_pooling=True) + + output = fluid.layers.fc(input=input, + size=class_dim, + param_attr=ParamAttr(name='fc10_weights'), + bias_attr=ParamAttr(name='fc10_offset')) + return output + + def conv_bn_layer(self, + input, + filter_size, + num_filters, + stride, + padding, + channels=None, + num_groups=1, + if_act=True, + name=None, + use_cudnn=True): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + act=None, + use_cudnn=use_cudnn, + param_attr=ParamAttr(name=name + '_weights'), + bias_attr=False) + bn_name = name + '_bn' + bn = fluid.layers.batch_norm( + input=conv, + param_attr=ParamAttr(name=bn_name + "_scale"), + bias_attr=ParamAttr(name=bn_name + "_offset"), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + if if_act: + return fluid.layers.relu6(bn) + else: + return bn + + def shortcut(self, input, data_residual): + return fluid.layers.elementwise_add(input, data_residual) + + def inverted_residual_unit(self, + input, + num_in_filter, + num_filters, + ifshortcut, + stride, + filter_size, + padding, + expansion_factor, + name=None): + num_expfilter = int(round(num_in_filter * expansion_factor)) + + channel_expand = self.conv_bn_layer( + input=input, + num_filters=num_expfilter, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=True, + name=name + '_expand') + + bottleneck_conv = self.conv_bn_layer( + input=channel_expand, + num_filters=num_expfilter, + filter_size=filter_size, + stride=stride, + padding=padding, + num_groups=num_expfilter, + if_act=True, + name=name + '_dwise', + use_cudnn=False) + + linear_out = self.conv_bn_layer( + input=bottleneck_conv, + num_filters=num_filters, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=False, + name=name + '_linear') + if ifshortcut: + out = self.shortcut(input=input, data_residual=linear_out) + return out + else: + return linear_out + + def invresi_blocks(self, input, in_c, t, c, n, s, name=None): + first_block = self.inverted_residual_unit( + input=input, + num_in_filter=in_c, + num_filters=c, + ifshortcut=False, + stride=s, + filter_size=3, + padding=1, + expansion_factor=t, + name=name + '_1') + + last_residual_block = first_block + last_c = c + + for i in range(1, n): + last_residual_block = self.inverted_residual_unit( + input=last_residual_block, + num_in_filter=last_c, + num_filters=c, + ifshortcut=True, + stride=1, + filter_size=3, + padding=1, + expansion_factor=t, + name=name + '_' + str(i + 1)) + return last_residual_block + + +def MobileNetV2_x0_25(): + model = MobileNetV2(scale=0.25) + return model + + +def MobileNetV2_x0_5(): + model = MobileNetV2(scale=0.5) + return model + + +def MobileNetV2_x0_75(): + model = MobileNetV2(scale=0.75) + return model + + +def MobileNetV2_x1_0(): + model = MobileNetV2(scale=1.0) + return model + + +def MobileNetV2_x1_5(): + model = MobileNetV2(scale=1.5) + return model + + +def MobileNetV2_x2_0(): + model = MobileNetV2(scale=2.0) + return model diff --git a/ppcls/modeling/architectures/mobilenet_v3.py b/ppcls/modeling/architectures/mobilenet_v3.py new file mode 100644 index 000000000..2ec175db4 --- /dev/null +++ b/ppcls/modeling/architectures/mobilenet_v3.py @@ -0,0 +1,310 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle.fluid as fluid +from paddle.fluid.initializer import MSRA +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + 'MobileNetV3', 'MobileNetV3_small_x0_35', 'MobileNetV3_small_x0_5', + 'MobileNetV3_small_x0_75', 'MobileNetV3_small_x1_0', + 'MobileNetV3_small_x1_25', 'MobileNetV3_large_x0_35', + 'MobileNetV3_large_x0_5', 'MobileNetV3_large_x0_75', + 'MobileNetV3_large_x1_0', 'MobileNetV3_large_x1_25' +] + + +class MobileNetV3(): + def __init__(self, scale=1.0, model_name='small'): + self.scale = scale + self.inplanes = 16 + if model_name == "large": + self.cfg = [ + # k, exp, c, se, nl, s, + [3, 16, 16, False, 'relu', 1], + [3, 64, 24, False, 'relu', 2], + [3, 72, 24, False, 'relu', 1], + [5, 72, 40, True, 'relu', 2], + [5, 120, 40, True, 'relu', 1], + [5, 120, 40, True, 'relu', 1], + [3, 240, 80, False, 'hard_swish', 2], + [3, 200, 80, False, 'hard_swish', 1], + [3, 184, 80, False, 'hard_swish', 1], + [3, 184, 80, False, 'hard_swish', 1], + [3, 480, 112, True, 'hard_swish', 1], + [3, 672, 112, True, 'hard_swish', 1], + [5, 672, 160, True, 'hard_swish', 2], + [5, 960, 160, True, 'hard_swish', 1], + [5, 960, 160, True, 'hard_swish', 1], + ] + self.cls_ch_squeeze = 960 + self.cls_ch_expand = 1280 + elif model_name == "small": + self.cfg = [ + # k, exp, c, se, nl, s, + [3, 16, 16, True, 'relu', 2], + [3, 72, 24, False, 'relu', 2], + [3, 88, 24, False, 'relu', 1], + [5, 96, 40, True, 'hard_swish', 2], + [5, 240, 40, True, 'hard_swish', 1], + [5, 240, 40, True, 'hard_swish', 1], + [5, 120, 48, True, 'hard_swish', 1], + [5, 144, 48, True, 'hard_swish', 1], + [5, 288, 96, True, 'hard_swish', 2], + [5, 576, 96, True, 'hard_swish', 1], + [5, 576, 96, True, 'hard_swish', 1], + ] + self.cls_ch_squeeze = 576 + self.cls_ch_expand = 1280 + else: + raise NotImplementedError("mode[" + model_name + + "_model] is not implemented!") + + def net(self, input, class_dim=1000): + scale = self.scale + inplanes = self.inplanes + cfg = self.cfg + cls_ch_squeeze = self.cls_ch_squeeze + cls_ch_expand = self.cls_ch_expand + #conv1 + conv = self.conv_bn_layer( + input, + filter_size=3, + num_filters=self.make_divisible(inplanes * scale), + stride=2, + padding=1, + num_groups=1, + if_act=True, + act='hard_swish', + name='conv1') + i = 0 + inplanes = self.make_divisible(inplanes * scale) + for layer_cfg in cfg: + conv = self.residual_unit( + input=conv, + num_in_filter=inplanes, + num_mid_filter=self.make_divisible(scale * layer_cfg[1]), + num_out_filter=self.make_divisible(scale * layer_cfg[2]), + act=layer_cfg[4], + stride=layer_cfg[5], + filter_size=layer_cfg[0], + use_se=layer_cfg[3], + name='conv' + str(i + 2)) + inplanes = self.make_divisible(scale * layer_cfg[2]) + i += 1 + + conv = self.conv_bn_layer( + input=conv, + filter_size=1, + num_filters=self.make_divisible(scale * cls_ch_squeeze), + stride=1, + padding=0, + num_groups=1, + if_act=True, + act='hard_swish', + name='conv_last') + conv = fluid.layers.pool2d( + input=conv, pool_type='avg', global_pooling=True, use_cudnn=False) + conv = fluid.layers.conv2d( + input=conv, + num_filters=cls_ch_expand, + filter_size=1, + stride=1, + padding=0, + act=None, + param_attr=ParamAttr(name='last_1x1_conv_weights'), + bias_attr=False) + conv = fluid.layers.hard_swish(conv) + drop = fluid.layers.dropout(x=conv, dropout_prob=0.2) + out = fluid.layers.fc(input=drop, + size=class_dim, + param_attr=ParamAttr(name='fc_weights'), + bias_attr=ParamAttr(name='fc_offset')) + return out + + def conv_bn_layer(self, + input, + filter_size, + num_filters, + stride, + padding, + num_groups=1, + if_act=True, + act=None, + name=None, + use_cudnn=True, + res_last_bn_init=False): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + act=None, + use_cudnn=use_cudnn, + param_attr=ParamAttr(name=name + '_weights'), + bias_attr=False) + bn_name = name + '_bn' + bn = fluid.layers.batch_norm( + input=conv, + param_attr=ParamAttr( + name=bn_name + "_scale", + regularizer=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=0.0)), + bias_attr=ParamAttr( + name=bn_name + "_offset", + regularizer=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=0.0)), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + if if_act: + if act == 'relu': + bn = fluid.layers.relu(bn) + elif act == 'hard_swish': + bn = fluid.layers.hard_swish(bn) + return bn + + def make_divisible(self, v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + def se_block(self, input, num_out_filter, ratio=4, name=None): + num_mid_filter = num_out_filter // ratio + pool = fluid.layers.pool2d( + input=input, pool_type='avg', global_pooling=True, use_cudnn=False) + conv1 = fluid.layers.conv2d( + input=pool, + filter_size=1, + num_filters=num_mid_filter, + act='relu', + param_attr=ParamAttr(name=name + '_1_weights'), + bias_attr=ParamAttr(name=name + '_1_offset')) + conv2 = fluid.layers.conv2d( + input=conv1, + filter_size=1, + num_filters=num_out_filter, + act='hard_sigmoid', + param_attr=ParamAttr(name=name + '_2_weights'), + bias_attr=ParamAttr(name=name + '_2_offset')) + scale = fluid.layers.elementwise_mul(x=input, y=conv2, axis=0) + return scale + + def residual_unit(self, + input, + num_in_filter, + num_mid_filter, + num_out_filter, + stride, + filter_size, + act=None, + use_se=False, + name=None): + + conv0 = self.conv_bn_layer( + input=input, + filter_size=1, + num_filters=num_mid_filter, + stride=1, + padding=0, + if_act=True, + act=act, + name=name + '_expand') + + conv1 = self.conv_bn_layer( + input=conv0, + filter_size=filter_size, + num_filters=num_mid_filter, + stride=stride, + padding=int((filter_size - 1) // 2), + if_act=True, + act=act, + num_groups=num_mid_filter, + use_cudnn=False, + name=name + '_depthwise') + if use_se: + conv1 = self.se_block( + input=conv1, num_out_filter=num_mid_filter, name=name + '_se') + + conv2 = self.conv_bn_layer( + input=conv1, + filter_size=1, + num_filters=num_out_filter, + stride=1, + padding=0, + if_act=False, + name=name + '_linear', + res_last_bn_init=True) + if num_in_filter != num_out_filter or stride != 1: + return conv2 + else: + return fluid.layers.elementwise_add(x=input, y=conv2, act=None) + + +def MobileNetV3_small_x0_35(): + model = MobileNetV3(model_name='small', scale=0.35) + return model + + +def MobileNetV3_small_x0_5(): + model = MobileNetV3(model_name='small', scale=0.5) + return model + + +def MobileNetV3_small_x0_75(): + model = MobileNetV3(model_name='small', scale=0.75) + return model + + +def MobileNetV3_small_x1_0(): + model = MobileNetV3(model_name='small', scale=1.0) + return model + + +def MobileNetV3_small_x1_25(): + model = MobileNetV3(model_name='small', scale=1.25) + return model + + +def MobileNetV3_large_x0_35(): + model = MobileNetV3(model_name='large', scale=0.35) + return model + + +def MobileNetV3_large_x0_5(): + model = MobileNetV3(model_name='large', scale=0.5) + return model + + +def MobileNetV3_large_x0_75(): + model = MobileNetV3(model_name='large', scale=0.75) + return model + + +def MobileNetV3_large_x1_0(): + model = MobileNetV3(model_name='large', scale=1.0) + return model + + +def MobileNetV3_large_x1_25(): + model = MobileNetV3(model_name='large', scale=1.25) + return model diff --git a/ppcls/modeling/architectures/model_libs.py b/ppcls/modeling/architectures/model_libs.py new file mode 100644 index 000000000..49d708eee --- /dev/null +++ b/ppcls/modeling/architectures/model_libs.py @@ -0,0 +1,143 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import paddle +import paddle.fluid as fluid +import contextlib + +bn_regularizer = fluid.regularizer.L2DecayRegularizer(regularization_coeff=0.0) +name_scope = "" + + +@contextlib.contextmanager +def scope(name): + global name_scope + bk = name_scope + name_scope = name_scope + name + '/' + yield + name_scope = bk + + +def max_pool(input, kernel, stride, padding): + data = fluid.layers.pool2d( + input, + pool_size=kernel, + pool_type='max', + pool_stride=stride, + pool_padding=padding) + return data + + +def group_norm(input, G, eps=1e-5, param_attr=None, bias_attr=None): + N, C, H, W = input.shape + if C % G != 0: + # print "group can not divide channle:", C, G + for d in range(10): + for t in [d, -d]: + if G + t <= 0: continue + if C % (G + t) == 0: + G = G + t + break + if C % G == 0: + # print "use group size:", G + break + assert C % G == 0 + x = fluid.layers.group_norm( + input, + groups=G, + param_attr=param_attr, + bias_attr=bias_attr, + name=name_scope + 'group_norm') + return x + + +def bn(*args, **kargs): + with scope('BatchNorm'): + return fluid.layers.batch_norm( + *args, + epsilon=1e-3, + momentum=0.99, + param_attr=fluid.ParamAttr( + name=name_scope + 'gamma', regularizer=bn_regularizer), + bias_attr=fluid.ParamAttr( + name=name_scope + 'beta', regularizer=bn_regularizer), + moving_mean_name=name_scope + 'moving_mean', + moving_variance_name=name_scope + 'moving_variance', + **kargs) + + +def bn_relu(data): + return fluid.layers.relu(bn(data)) + + +def relu(data): + return fluid.layers.relu(data) + + +def conv(*args, **kargs): + kargs['param_attr'] = name_scope + 'weights' + if 'bias_attr' in kargs and kargs['bias_attr']: + kargs['bias_attr'] = fluid.ParamAttr( + name=name_scope + 'biases', + regularizer=None, + initializer=fluid.initializer.ConstantInitializer(value=0.0)) + else: + kargs['bias_attr'] = False + return fluid.layers.conv2d(*args, **kargs) + + +def deconv(*args, **kargs): + kargs['param_attr'] = name_scope + 'weights' + if 'bias_attr' in kargs and kargs['bias_attr']: + kargs['bias_attr'] = name_scope + 'biases' + else: + kargs['bias_attr'] = False + return fluid.layers.conv2d_transpose(*args, **kargs) + + +def seperate_conv(input, channel, stride, filter, dilation=1, act=None): + param_attr = fluid.ParamAttr( + name=name_scope + 'weights', + regularizer=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=0.0), + initializer=fluid.initializer.TruncatedNormal( + loc=0.0, scale=0.33)) + with scope('depthwise'): + input = conv( + input, + input.shape[1], + filter, + stride, + groups=input.shape[1], + padding=(filter // 2) * dilation, + dilation=dilation, + use_cudnn=False, + param_attr=param_attr) + input = bn(input) + if act: input = act(input) + + param_attr = fluid.ParamAttr( + name=name_scope + 'weights', + regularizer=None, + initializer=fluid.initializer.TruncatedNormal( + loc=0.0, scale=0.06)) + with scope('pointwise'): + input = conv( + input, channel, 1, 1, groups=1, padding=0, param_attr=param_attr) + input = bn(input) + if act: input = act(input) + return input diff --git a/ppcls/modeling/architectures/res2net.py b/ppcls/modeling/architectures/res2net.py new file mode 100644 index 000000000..e6b118036 --- /dev/null +++ b/ppcls/modeling/architectures/res2net.py @@ -0,0 +1,225 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +import paddle.fluid as fluid +import math +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + "Res2Net", "Res2Net50_48w_2s", "Res2Net50_26w_4s", "Res2Net50_14w_8s", + "Res2Net50_26w_6s", "Res2Net50_26w_8s", "Res2Net101_26w_4s", + "Res2Net152_26w_4s" +] + + +class Res2Net(): + def __init__(self, layers=50, scales=4, width=26): + self.layers = layers + self.scales = scales + self.width = width + + def net(self, input, class_dim=1000): + layers = self.layers + supported_layers = [50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format(supported_layers, layers) + basic_width = self.width * self.scales + num_filters1 = [basic_width * t for t in [1, 2, 4, 8]] + num_filters2 = [256 * t for t in [1, 2, 4, 8]] + + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + name="conv1") + + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + + for block in range(len(depth)): + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + conv = self.bottleneck_block( + input=conv, + num_filters1=num_filters1[block], + num_filters2=num_filters2[block], + stride=2 if i == 0 and block != 0 else 1, + name=conv_name) + pool = fluid.layers.pool2d( + input=conv, + pool_size=7, + pool_stride=1, + pool_type='avg', + global_pooling=True) + + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + out = fluid.layers.fc( + input=pool, + size=class_dim, + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name='fc_weights'), + bias_attr=fluid.param_attr.ParamAttr(name='fc_offset')) + return out + + def conv_bn_layer(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + + return fluid.layers.batch_norm( + input=conv, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def shortcut(self, input, ch_out, stride, name): + ch_in = input.shape[1] + if ch_in != ch_out or stride != 1: + return self.conv_bn_layer(input, ch_out, 1, stride, name=name) + else: + return input + + def bottleneck_block(self, input, num_filters1, num_filters2, stride, + name): + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters1, + filter_size=1, + stride=1, + act='relu', + name=name + '_branch2a') + xs = fluid.layers.split(conv0, self.scales, 1) + ys = [] + for s in range(self.scales - 1): + if s == 0 or stride == 2: + ys.append( + self.conv_bn_layer( + input=xs[s], + num_filters=num_filters1 // self.scales, + stride=stride, + filter_size=3, + act='relu', + name=name + '_branch2b_' + str(s + 1))) + else: + ys.append( + self.conv_bn_layer( + input=xs[s] + ys[-1], + num_filters=num_filters1 // self.scales, + stride=stride, + filter_size=3, + act='relu', + name=name + '_branch2b_' + str(s + 1))) + if stride == 1: + ys.append(xs[-1]) + else: + ys.append( + fluid.layers.pool2d( + input=xs[-1], + pool_size=3, + pool_stride=stride, + pool_padding=1, + pool_type='avg')) + + conv1 = fluid.layers.concat(ys, axis=1) + conv2 = self.conv_bn_layer( + input=conv1, + num_filters=num_filters2, + filter_size=1, + act=None, + name=name + "_branch2c") + + short = self.shortcut( + input, num_filters2, stride, name=name + "_branch1") + + return fluid.layers.elementwise_add(x=short, y=conv2, act='relu') + + +def Res2Net50_48w_2s(): + model = Res2Net(layers=50, scales=2, width=48) + return model + + +def Res2Net50_26w_4s(): + model = Res2Net(layers=50, scales=4, width=26) + return model + + +def Res2Net50_14w_8s(): + model = Res2Net(layers=50, scales=8, width=14) + return model + + +def Res2Net50_26w_6s(): + model = Res2Net(layers=50, scales=6, width=26) + return model + + +def Res2Net50_26w_8s(): + model = Res2Net(layers=50, scales=8, width=26) + return model + + +def Res2Net101_26w_4s(): + model = Res2Net(layers=101, scales=4, width=26) + return model + + +def Res2Net152_26w_4s(): + model = Res2Net(layers=152, scales=4, width=26) + return model diff --git a/ppcls/modeling/architectures/res2net_vd.py b/ppcls/modeling/architectures/res2net_vd.py new file mode 100644 index 000000000..5e3639749 --- /dev/null +++ b/ppcls/modeling/architectures/res2net_vd.py @@ -0,0 +1,294 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + "Res2Net_vd", "Res2Net50_vd_48w_2s", "Res2Net50_vd_26w_4s", + "Res2Net50_vd_14w_8s", "Res2Net50_vd_26w_6s", "Res2Net50_vd_26w_8s", + "Res2Net101_vd_26w_4s", "Res2Net152_vd_26w_4s", "Res2Net200_vd_26w_4s" +] + + +class Res2Net_vd(): + def __init__(self, layers=50, scales=4, width=26): + self.layers = layers + self.scales = scales + self.width = width + + def net(self, input, class_dim=1000): + layers = self.layers + supported_layers = [50, 101, 152, 200] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format(supported_layers, layers) + basic_width = self.width * self.scales + num_filters1 = [basic_width * t for t in [1, 2, 4, 8]] + num_filters2 = [256 * t for t in [1, 2, 4, 8]] + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + elif layers == 200: + depth = [3, 12, 48, 3] + conv = self.conv_bn_layer( + input=input, + num_filters=32, + filter_size=3, + stride=2, + act='relu', + name='conv1_1') + conv = self.conv_bn_layer( + input=conv, + num_filters=32, + filter_size=3, + stride=1, + act='relu', + name='conv1_2') + conv = self.conv_bn_layer( + input=conv, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name='conv1_3') + + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + for block in range(len(depth)): + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + conv = self.bottleneck_block( + input=conv, + num_filters1=num_filters1[block], + num_filters2=num_filters2[block], + stride=2 if i == 0 and block != 0 else 1, + if_first=block == i == 0, + name=conv_name) + pool = fluid.layers.pool2d( + input=conv, + pool_size=7, + pool_stride=1, + pool_type='avg', + global_pooling=True) + + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + out = fluid.layers.fc( + input=pool, + size=class_dim, + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name='fc_weights'), + bias_attr=fluid.param_attr.ParamAttr(name='fc_offset')) + return out + + def conv_bn_layer(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + return fluid.layers.batch_norm( + input=conv, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def conv_bn_layer_new(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + pool = fluid.layers.pool2d( + input=input, + pool_size=2, + pool_stride=2, + pool_padding=0, + pool_type='avg', + ceil_mode=True) + + conv = fluid.layers.conv2d( + input=pool, + num_filters=num_filters, + filter_size=filter_size, + stride=1, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + return fluid.layers.batch_norm( + input=conv, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def shortcut(self, input, ch_out, stride, name, if_first=False): + ch_in = input.shape[1] + if ch_in != ch_out or stride != 1: + if if_first: + return self.conv_bn_layer(input, ch_out, 1, stride, name=name) + else: + return self.conv_bn_layer_new( + input, ch_out, 1, stride, name=name) + elif if_first: + return self.conv_bn_layer(input, ch_out, 1, stride, name=name) + else: + return input + + def bottleneck_block(self, input, num_filters1, num_filters2, stride, name, + if_first): + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters1, + filter_size=1, + stride=1, + act='relu', + name=name + '_branch2a') + + xs = fluid.layers.split(conv0, self.scales, 1) + ys = [] + for s in range(self.scales - 1): + if s == 0 or stride == 2: + ys.append( + self.conv_bn_layer( + input=xs[s], + num_filters=num_filters1 // self.scales, + stride=stride, + filter_size=3, + act='relu', + name=name + '_branch2b_' + str(s + 1))) + else: + ys.append( + self.conv_bn_layer( + input=xs[s] + ys[-1], + num_filters=num_filters1 // self.scales, + stride=stride, + filter_size=3, + act='relu', + name=name + '_branch2b_' + str(s + 1))) + + if stride == 1: + ys.append(xs[-1]) + else: + ys.append( + fluid.layers.pool2d( + input=xs[-1], + pool_size=3, + pool_stride=stride, + pool_padding=1, + pool_type='avg')) + + conv1 = fluid.layers.concat(ys, axis=1) + conv2 = self.conv_bn_layer( + input=conv1, + num_filters=num_filters2, + filter_size=1, + act=None, + name=name + "_branch2c") + + short = self.shortcut( + input, + num_filters2, + stride, + if_first=if_first, + name=name + "_branch1") + + return fluid.layers.elementwise_add(x=short, y=conv2, act='relu') + + +def Res2Net50_vd_48w_2s(): + model = Res2Net_vd(layers=50, scales=2, width=48) + return model + + +def Res2Net50_vd_26w_4s(): + model = Res2Net_vd(layers=50, scales=4, width=26) + return model + + +def Res2Net50_vd_14w_8s(): + model = Res2Net_vd(layers=50, scales=8, width=14) + return model + + +def Res2Net50_vd_26w_6s(): + model = Res2Net_vd(layers=50, scales=6, width=26) + return model + + +def Res2Net50_vd_26w_8s(): + model = Res2Net_vd(layers=50, scales=8, width=26) + return model + + +def Res2Net101_vd_26w_4s(): + model = Res2Net_vd(layers=101, scales=4, width=26) + return model + + +def Res2Net152_vd_26w_4s(): + model = Res2Net_vd(layers=152, scales=4, width=26) + return model + + +def Res2Net200_vd_26w_4s(): + model = Res2Net_vd(layers=200, scales=4, width=26) + return model diff --git a/ppcls/modeling/architectures/resnet.py b/ppcls/modeling/architectures/resnet.py new file mode 100644 index 000000000..1480025b9 --- /dev/null +++ b/ppcls/modeling/architectures/resnet.py @@ -0,0 +1,240 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + "ResNet", "ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152" +] + + +class ResNet(): + def __init__(self, layers=50): + self.layers = layers + + def net(self, input, class_dim=1000, data_format="NCHW"): + layers = self.layers + supported_layers = [18, 34, 50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format(supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_filters = [64, 128, 256, 512] + + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + name="conv1", + data_format=data_format) + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max', + data_format=data_format) + if layers >= 50: + for block in range(len(depth)): + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + conv = self.bottleneck_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + name=conv_name, + data_format=data_format) + + else: + for block in range(len(depth)): + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + conv = self.basic_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + is_first=block == i == 0, + name=conv_name, + data_format=data_format) + + pool = fluid.layers.pool2d( + input=conv, + pool_type='avg', + global_pooling=True, + data_format=data_format) + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + out = fluid.layers.fc( + input=pool, + size=class_dim, + param_attr=fluid.param_attr.ParamAttr( + name="fc_0.w_0", + initializer=fluid.initializer.Uniform(-stdv, stdv)), + bias_attr=ParamAttr(name="fc_0.b_0")) + return out + + def conv_bn_layer(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None, + data_format='NCHW'): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=False, + name=name + '.conv2d.output.1', + data_format=data_format) + + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + return fluid.layers.batch_norm( + input=conv, + act=act, + name=bn_name + '.output.1', + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance', + data_layout=data_format) + + def shortcut(self, input, ch_out, stride, is_first, name, data_format): + if data_format == 'NCHW': + ch_in = input.shape[1] + else: + ch_in = input.shape[-1] + if ch_in != ch_out or stride != 1 or is_first == True: + return self.conv_bn_layer( + input, ch_out, 1, stride, name=name, data_format=data_format) + else: + return input + + def bottleneck_block(self, input, num_filters, stride, name, data_format): + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a", + data_format=data_format) + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2b", + data_format=data_format) + conv2 = self.conv_bn_layer( + input=conv1, + num_filters=num_filters * 4, + filter_size=1, + act=None, + name=name + "_branch2c", + data_format=data_format) + + short = self.shortcut( + input, + num_filters * 4, + stride, + is_first=False, + name=name + "_branch1", + data_format=data_format) + + return fluid.layers.elementwise_add( + x=short, y=conv2, act='relu', name=name + ".add.output.5") + + def basic_block(self, input, num_filters, stride, is_first, name, + data_format): + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters, + filter_size=3, + act='relu', + stride=stride, + name=name + "_branch2a", + data_format=data_format) + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + act=None, + name=name + "_branch2b", + data_format=data_format) + short = self.shortcut( + input, + num_filters, + stride, + is_first, + name=name + "_branch1", + data_format=data_format) + return fluid.layers.elementwise_add(x=short, y=conv1, act='relu') + + +def ResNet18(): + model = ResNet(layers=18) + return model + + +def ResNet34(): + model = ResNet(layers=34) + return model + + +def ResNet50(): + model = ResNet(layers=50) + return model + + +def ResNet101(): + model = ResNet(layers=101) + return model + + +def ResNet152(): + model = ResNet(layers=152) + return model diff --git a/ppcls/modeling/architectures/resnet_acnet.py b/ppcls/modeling/architectures/resnet_acnet.py new file mode 100644 index 000000000..e17046f0e --- /dev/null +++ b/ppcls/modeling/architectures/resnet_acnet.py @@ -0,0 +1,332 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + "ResNet_ACNet", "ResNet18_ACNet", "ResNet34_ACNet", "ResNet50_ACNet", + "ResNet101_ACNet", "ResNet152_ACNet" +] + + +class ResNetACNet(object): + """ ACNet """ + + def __init__(self, layers=50, deploy=False): + """init""" + self.layers = layers + self.deploy = deploy + + def net(self, input, class_dim=1000): + """model""" + layers = self.layers + supported_layers = [18, 34, 50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format(supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_filters = [64, 128, 256, 512] + + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + name="conv1") + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + if layers >= 50: + for block in range(len(depth)): + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + conv = self.bottleneck_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + name=conv_name) + else: + for block in range(len(depth)): + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + conv = self.basic_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + is_first=block == i == 0, + name=conv_name) + + pool = fluid.layers.pool2d( + input=conv, pool_size=7, pool_type='avg', global_pooling=True) + + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + out = fluid.layers.fc( + input=pool, + size=class_dim, + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv))) + return out + + def conv_bn_layer(self, **kwargs): + """ + conv_bn_layer + """ + if kwargs['filter_size'] == 1: + return self.conv_bn_layer_ori(**kwargs) + else: + return self.conv_bn_layer_ac(**kwargs) + + # conv bn+relu + def conv_bn_layer_ori(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + """ + standard convbn + used for 1x1 convbn in acnet + """ + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=False, + name=name + '.conv2d.output.1') + + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + return fluid.layers.batch_norm( + input=conv, + act=act, + name=bn_name + '.output.1', + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance', ) + + # conv bn+relu + def conv_bn_layer_ac(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + """ ACNet conv bn """ + padding = (filter_size - 1) // 2 + + square_conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=padding, + groups=groups, + act=act if self.deploy else None, + param_attr=ParamAttr(name=name + "_acsquare_weights"), + bias_attr=ParamAttr(name=name + "_acsquare_bias") + if self.deploy else False, + name=name + '.acsquare.conv2d.output.1') + + if self.deploy: + return square_conv + else: + ver_conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=(filter_size, 1), + stride=stride, + padding=(padding, 0), + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_acver_weights"), + bias_attr=False, + name=name + '.acver.conv2d.output.1') + + hor_conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=(1, filter_size), + stride=stride, + padding=(0, padding), + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_achor_weights"), + bias_attr=False, + name=name + '.achor.conv2d.output.1') + + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + + square_bn = fluid.layers.batch_norm( + input=square_conv, + act=None, + name=bn_name + '.acsquare.output.1', + param_attr=ParamAttr(name=bn_name + '_acsquare_scale'), + bias_attr=ParamAttr(bn_name + '_acsquare_offset'), + moving_mean_name=bn_name + '_acsquare_mean', + moving_variance_name=bn_name + '_acsquare_variance', ) + + ver_bn = fluid.layers.batch_norm( + input=ver_conv, + act=None, + name=bn_name + '.acver.output.1', + param_attr=ParamAttr(name=bn_name + '_acver_scale'), + bias_attr=ParamAttr(bn_name + '_acver_offset'), + moving_mean_name=bn_name + '_acver_mean', + moving_variance_name=bn_name + '_acver_variance', ) + + hor_bn = fluid.layers.batch_norm( + input=hor_conv, + act=None, + name=bn_name + '.achor.output.1', + param_attr=ParamAttr(name=bn_name + '_achor_scale'), + bias_attr=ParamAttr(bn_name + '_achor_offset'), + moving_mean_name=bn_name + '_achor_mean', + moving_variance_name=bn_name + '_achor_variance', ) + + return fluid.layers.elementwise_add( + x=square_bn, y=ver_bn + hor_bn, act=act) + + def shortcut(self, input, ch_out, stride, is_first, name): + """ shortcut """ + ch_in = input.shape[1] + if ch_in != ch_out or stride != 1 or is_first == True: + return self.conv_bn_layer( + input=input, + num_filters=ch_out, + filter_size=1, + stride=stride, + name=name) + else: + return input + + def bottleneck_block(self, input, num_filters, stride, name): + """" bottleneck_block """ + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2b") + conv2 = self.conv_bn_layer( + input=conv1, + num_filters=num_filters * 4, + filter_size=1, + act=None, + name=name + "_branch2c") + + short = self.shortcut( + input, + num_filters * 4, + stride, + is_first=False, + name=name + "_branch1") + + return fluid.layers.elementwise_add( + x=short, y=conv2, act='relu', name=name + ".add.output.5") + + def basic_block(self, input, num_filters, stride, is_first, name): + """ basic_block """ + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters, + filter_size=3, + act='relu', + stride=stride, + name=name + "_branch2a") + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + act=None, + name=name + "_branch2b") + short = self.shortcut( + input, num_filters, stride, is_first, name=name + "_branch1") + return fluid.layers.elementwise_add(x=short, y=conv1, act='relu') + + +def ResNet18_ACNet(deploy=False): + """ResNet18 + ACNet""" + model = ResNet_ACNet(layers=18, deploy=deploy) + return model + + +def ResNet34_ACNet(deploy=False): + """ResNet34 + ACNet""" + model = ResNetACNet(layers=34, deploy=deploy) + return model + + +def ResNet50_ACNet(deploy=False): + """ResNet50 + ACNet""" + model = ResNetACNet(layers=50, deploy=deploy) + return model + + +def ResNet101_ACNet(deploy=False): + """ResNet101 + ACNet""" + model = ResNetACNet(layers=101, deploy=deploy) + return model + + +def ResNet152_ACNet(deploy=False): + """ResNet152 + ACNet""" + model = ResNetACNet(layers=152, deploy=deploy) + return model diff --git a/ppcls/modeling/architectures/resnet_vc.py b/ppcls/modeling/architectures/resnet_vc.py new file mode 100644 index 000000000..36e7e5943 --- /dev/null +++ b/ppcls/modeling/architectures/resnet_vc.py @@ -0,0 +1,194 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = ["ResNet", "ResNet50_vc", "ResNet101_vc", "ResNet152_vc"] + +train_parameters = { + "input_size": [3, 224, 224], + "input_mean": [0.485, 0.456, 0.406], + "input_std": [0.229, 0.224, 0.225], + "learning_strategy": { + "name": "piecewise_decay", + "batch_size": 256, + "epochs": [30, 60, 90], + "steps": [0.1, 0.01, 0.001, 0.0001] + } +} + + +class ResNet(): + def __init__(self, layers=50): + self.params = train_parameters + self.layers = layers + + def net(self, input, class_dim=1000): + layers = self.layers + supported_layers = [50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format(supported_layers, layers) + + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_filters = [64, 128, 256, 512] + + conv = self.conv_bn_layer( + input=input, + num_filters=32, + filter_size=3, + stride=2, + act='relu', + name='conv1_1') + conv = self.conv_bn_layer( + input=conv, + num_filters=32, + filter_size=3, + stride=1, + act='relu', + name='conv1_2') + conv = self.conv_bn_layer( + input=conv, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name='conv1_3') + + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + + for block in range(len(depth)): + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + conv = self.bottleneck_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + name=conv_name) + + pool = fluid.layers.pool2d( + input=conv, pool_type='avg', global_pooling=True) + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + out = fluid.layers.fc(input=pool, + size=class_dim, + param_attr=fluid.param_attr.ParamAttr( + name="fc_0.w_0", + initializer=fluid.initializer.Uniform(-stdv, + stdv)), + bias_attr=ParamAttr(name="fc_0.b_0")) + return out + + def conv_bn_layer(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=False, + name=name + '.conv2d.output.1') + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + return fluid.layers.batch_norm( + input=conv, + act=act, + name=bn_name + '.output.1', + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance', ) + + def shortcut(self, input, ch_out, stride, name): + ch_in = input.shape[1] + if ch_in != ch_out or stride != 1: + return self.conv_bn_layer(input, ch_out, 1, stride, name=name) + else: + return input + + def bottleneck_block(self, input, num_filters, stride, name): + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2b") + conv2 = self.conv_bn_layer( + input=conv1, + num_filters=num_filters * 4, + filter_size=1, + act=None, + name=name + "_branch2c") + + short = self.shortcut( + input, num_filters * 4, stride, name=name + "_branch1") + + return fluid.layers.elementwise_add( + x=short, y=conv2, act='relu', name=name + ".add.output.5") + + +def ResNet50_vc(): + model = ResNet(layers=50) + return model + + +def ResNet101_vc(): + model = ResNet(layers=101) + return model + + +def ResNet152_vc(): + model = ResNet(layers=152) + return model diff --git a/ppcls/modeling/architectures/resnet_vd.py b/ppcls/modeling/architectures/resnet_vd.py new file mode 100644 index 000000000..8a9f99eac --- /dev/null +++ b/ppcls/modeling/architectures/resnet_vd.py @@ -0,0 +1,293 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + "ResNet", "ResNet18_vd", "ResNet34_vd", "ResNet50_vd", "ResNet101_vd", + "ResNet152_vd", "ResNet200_vd" +] + + +class ResNet(): + def __init__(self, layers=50, is_3x3=False): + self.layers = layers + self.is_3x3 = is_3x3 + + def net(self, input, class_dim=1000): + is_3x3 = self.is_3x3 + layers = self.layers + supported_layers = [18, 34, 50, 101, 152, 200] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format(supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + elif layers == 200: + depth = [3, 12, 48, 3] + num_filters = [64, 128, 256, 512] + if is_3x3 == False: + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=7, + stride=2, + act='relu') + else: + conv = self.conv_bn_layer( + input=input, + num_filters=32, + filter_size=3, + stride=2, + act='relu', + name='conv1_1') + conv = self.conv_bn_layer( + input=conv, + num_filters=32, + filter_size=3, + stride=1, + act='relu', + name='conv1_2') + conv = self.conv_bn_layer( + input=conv, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name='conv1_3') + + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + + if layers >= 50: + for block in range(len(depth)): + for i in range(depth[block]): + if layers in [101, 152, 200] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + conv = self.bottleneck_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + if_first=block == i == 0, + name=conv_name) + else: + for block in range(len(depth)): + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + conv = self.basic_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + if_first=block == i == 0, + name=conv_name) + + pool = fluid.layers.pool2d( + input=conv, pool_type='avg', global_pooling=True) + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + + out = fluid.layers.fc( + input=pool, + size=class_dim, + param_attr=fluid.param_attr.ParamAttr( + name="fc_0.w_0", + initializer=fluid.initializer.Uniform(-stdv, stdv)), + bias_attr=ParamAttr(name="fc_0.b_0")) + + return out + + def conv_bn_layer(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + return fluid.layers.batch_norm( + input=conv, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def conv_bn_layer_new(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + pool = fluid.layers.pool2d( + input=input, + pool_size=2, + pool_stride=2, + pool_padding=0, + pool_type='avg', + ceil_mode=True) + + conv = fluid.layers.conv2d( + input=pool, + num_filters=num_filters, + filter_size=filter_size, + stride=1, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + return fluid.layers.batch_norm( + input=conv, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def shortcut(self, input, ch_out, stride, name, if_first=False): + ch_in = input.shape[1] + if ch_in != ch_out or stride != 1: + if if_first: + return self.conv_bn_layer(input, ch_out, 1, stride, name=name) + else: + return self.conv_bn_layer_new( + input, ch_out, 1, stride, name=name) + elif if_first: + return self.conv_bn_layer(input, ch_out, 1, stride, name=name) + else: + return input + + def bottleneck_block(self, input, num_filters, stride, name, if_first): + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2b") + conv2 = self.conv_bn_layer( + input=conv1, + num_filters=num_filters * 4, + filter_size=1, + act=None, + name=name + "_branch2c") + + short = self.shortcut( + input, + num_filters * 4, + stride, + if_first=if_first, + name=name + "_branch1") + + return fluid.layers.elementwise_add(x=short, y=conv2, act='relu') + + def basic_block(self, input, num_filters, stride, name, if_first): + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters, + filter_size=3, + act='relu', + stride=stride, + name=name + "_branch2a") + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + act=None, + name=name + "_branch2b") + short = self.shortcut( + input, + num_filters, + stride, + if_first=if_first, + name=name + "_branch1") + return fluid.layers.elementwise_add(x=short, y=conv1, act='relu') + + +def ResNet18_vd(): + model = ResNet(layers=18, is_3x3=True) + return model + + +def ResNet34_vd(): + model = ResNet(layers=34, is_3x3=True) + return model + + +def ResNet50_vd(): + model = ResNet(layers=50, is_3x3=True) + return model + + +def ResNet101_vd(): + model = ResNet(layers=101, is_3x3=True) + return model + + +def ResNet152_vd(): + model = ResNet(layers=152, is_3x3=True) + return model + + +def ResNet200_vd(): + model = ResNet(layers=200, is_3x3=True) + return model diff --git a/ppcls/modeling/architectures/resnext.py b/ppcls/modeling/architectures/resnext.py new file mode 100644 index 000000000..2a5f46150 --- /dev/null +++ b/ppcls/modeling/architectures/resnext.py @@ -0,0 +1,195 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + "ResNeXt", "ResNeXt50_64x4d", "ResNeXt101_64x4d", "ResNeXt152_64x4d", + "ResNeXt50_32x4d", "ResNeXt101_32x4d", "ResNeXt152_32x4d" +] + + +class ResNeXt(): + def __init__(self, layers=50, cardinality=64): + self.layers = layers + self.cardinality = cardinality + + def net(self, input, class_dim=1000): + layers = self.layers + cardinality = self.cardinality + supported_layers = [50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format(supported_layers, layers) + + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + + num_filters1 = [256, 512, 1024, 2048] + num_filters2 = [128, 256, 512, 1024] + + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + name="res_conv1") #debug + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + + for block in range(len(depth)): + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + conv = self.bottleneck_block( + input=conv, + num_filters=num_filters1[block] + if cardinality == 64 else num_filters2[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=cardinality, + name=conv_name) + + pool = fluid.layers.pool2d( + input=conv, pool_type='avg', global_pooling=True) + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + out = fluid.layers.fc( + input=pool, + size=class_dim, + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name='fc_weights'), + bias_attr=fluid.param_attr.ParamAttr(name='fc_offset')) + return out + + def conv_bn_layer(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=False, + name=name + '.conv2d.output.1') + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + return fluid.layers.batch_norm( + input=conv, + act=act, + name=bn_name + '.output.1', + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance', ) + + def shortcut(self, input, ch_out, stride, name): + ch_in = input.shape[1] + if ch_in != ch_out or stride != 1: + return self.conv_bn_layer(input, ch_out, 1, stride, name=name) + else: + return input + + def bottleneck_block(self, input, num_filters, stride, cardinality, name): + cardinality = self.cardinality + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + stride=stride, + groups=cardinality, + act='relu', + name=name + "_branch2b") + conv2 = self.conv_bn_layer( + input=conv1, + num_filters=num_filters if cardinality == 64 else num_filters * 2, + filter_size=1, + act=None, + name=name + "_branch2c") + + short = self.shortcut( + input, + num_filters if cardinality == 64 else num_filters * 2, + stride, + name=name + "_branch1") + + return fluid.layers.elementwise_add( + x=short, y=conv2, act='relu', name=name + ".add.output.5") + + +def ResNeXt50_64x4d(): + model = ResNeXt(layers=50, cardinality=64) + return model + + +def ResNeXt50_32x4d(): + model = ResNeXt(layers=50, cardinality=32) + return model + + +def ResNeXt101_64x4d(): + model = ResNeXt(layers=101, cardinality=64) + return model + + +def ResNeXt101_32x4d(): + model = ResNeXt(layers=101, cardinality=32) + return model + + +def ResNeXt152_64x4d(): + model = ResNeXt(layers=152, cardinality=64) + return model + + +def ResNeXt152_32x4d(): + model = ResNeXt(layers=152, cardinality=32) + return model diff --git a/ppcls/modeling/architectures/resnext101_wsl.py b/ppcls/modeling/architectures/resnext101_wsl.py new file mode 100644 index 000000000..ba27c671e --- /dev/null +++ b/ppcls/modeling/architectures/resnext101_wsl.py @@ -0,0 +1,182 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + "ResNeXt101_32x8d_wsl", "ResNeXt101_32x16d_wsl", "ResNeXt101_32x32d_wsl", + "ResNeXt101_32x48d_wsl", "Fix_ResNeXt101_32x48d_wsl" +] + + +class ResNeXt101_wsl(): + def __init__(self, layers=101, cardinality=32, width=48): + self.layers = layers + self.cardinality = cardinality + self.width = width + + def net(self, input, class_dim=1000): + layers = self.layers + cardinality = self.cardinality + width = self.width + + depth = [3, 4, 23, 3] + base_width = cardinality * width + num_filters = [base_width * i for i in [1, 2, 4, 8]] + + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + name="conv1") #debug + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + + for block in range(len(depth)): + for i in range(depth[block]): + conv_name = 'layer' + str(block + 1) + "." + str(i) + conv = self.bottleneck_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=cardinality, + name=conv_name) + + pool = fluid.layers.pool2d( + input=conv, pool_type='avg', global_pooling=True) + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + out = fluid.layers.fc( + input=pool, + size=class_dim, + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name='fc.weight'), + bias_attr=fluid.param_attr.ParamAttr(name='fc.bias')) + return out + + def conv_bn_layer(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + if "downsample" in name: + conv_name = name + '.0' + else: + conv_name = name + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + param_attr=ParamAttr(name=conv_name + ".weight"), + bias_attr=False) + if "downsample" in name: + bn_name = name[:9] + 'downsample' + '.1' + else: + if "conv1" == name: + bn_name = 'bn' + name[-1] + else: + bn_name = (name[:10] if name[7:9].isdigit() else name[:9] + ) + 'bn' + name[-1] + return fluid.layers.batch_norm( + input=conv, + act=act, + param_attr=ParamAttr(name=bn_name + '.weight'), + bias_attr=ParamAttr(bn_name + '.bias'), + moving_mean_name=bn_name + '.running_mean', + moving_variance_name=bn_name + '.running_var', ) + + def shortcut(self, input, ch_out, stride, name): + ch_in = input.shape[1] + if ch_in != ch_out or stride != 1: + return self.conv_bn_layer(input, ch_out, 1, stride, name=name) + else: + return input + + def bottleneck_block(self, input, num_filters, stride, cardinality, name): + cardinality = self.cardinality + width = self.width + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + ".conv1") + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + stride=stride, + groups=cardinality, + act='relu', + name=name + ".conv2") + conv2 = self.conv_bn_layer( + input=conv1, + num_filters=num_filters // (width // 8), + filter_size=1, + act=None, + name=name + ".conv3") + + short = self.shortcut( + input, + num_filters // (width // 8), + stride, + name=name + ".downsample") + + return fluid.layers.elementwise_add(x=short, y=conv2, act='relu') + + +def ResNeXt101_32x8d_wsl(): + model = ResNeXt101_wsl(cardinality=32, width=8) + return model + + +def ResNeXt101_32x16d_wsl(): + model = ResNeXt101_wsl(cardinality=32, width=16) + return model + + +def ResNeXt101_32x32d_wsl(): + model = ResNeXt101_wsl(cardinality=32, width=32) + return model + + +def ResNeXt101_32x48d_wsl(): + model = ResNeXt101_wsl(cardinality=32, width=48) + return model + + +def Fix_ResNeXt101_32x48d_wsl(): + model = ResNeXt101_wsl(cardinality=32, width=48) + return model diff --git a/ppcls/modeling/architectures/resnext_vd.py b/ppcls/modeling/architectures/resnext_vd.py new file mode 100644 index 000000000..b0a2fe6c9 --- /dev/null +++ b/ppcls/modeling/architectures/resnext_vd.py @@ -0,0 +1,257 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr +import math + +__all__ = [ + "ResNeXt", "ResNeXt50_vd_64x4d", "ResNeXt101_vd_64x4d", + "ResNeXt152_vd_64x4d", "ResNeXt50_vd_32x4d", "ResNeXt101_vd_32x4d", + "ResNeXt152_vd_32x4d" +] + + +class ResNeXt(): + def __init__(self, layers=50, is_3x3=False, cardinality=64): + self.layers = layers + self.is_3x3 = is_3x3 + self.cardinality = cardinality + + def net(self, input, class_dim=1000): + is_3x3 = self.is_3x3 + layers = self.layers + cardinality = self.cardinality + supported_layers = [50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format(supported_layers, layers) + + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_filters1 = [256, 512, 1024, 2048] + num_filters2 = [128, 256, 512, 1024] + + if is_3x3 == False: + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=7, + stride=2, + act='relu') + else: + conv = self.conv_bn_layer( + input=input, + num_filters=32, + filter_size=3, + stride=2, + act='relu', + name='conv1_1') + conv = self.conv_bn_layer( + input=conv, + num_filters=32, + filter_size=3, + stride=1, + act='relu', + name='conv1_2') + conv = self.conv_bn_layer( + input=conv, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name='conv1_3') + + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + + for block in range(len(depth)): + for i in range(depth[block]): + if layers in [101, 152, 200] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + conv = self.bottleneck_block( + input=conv, + num_filters=num_filters1[block] + if cardinality == 64 else num_filters2[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=cardinality, + if_first=block == 0, + name=conv_name) + + pool = fluid.layers.pool2d( + input=conv, pool_type='avg', global_pooling=True) + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + out = fluid.layers.fc( + input=pool, + size=class_dim, + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name='fc_weights'), + bias_attr=fluid.param_attr.ParamAttr(name='fc_offset')) + + return out + + def conv_bn_layer(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + return fluid.layers.batch_norm( + input=conv, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def conv_bn_layer_new(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + pool = fluid.layers.pool2d( + input=input, + pool_size=2, + pool_stride=2, + pool_padding=0, + pool_type='avg', + ceil_mode=True) + + conv = fluid.layers.conv2d( + input=pool, + num_filters=num_filters, + filter_size=filter_size, + stride=1, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + return fluid.layers.batch_norm( + input=conv, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def shortcut(self, input, ch_out, stride, name, if_first=False): + ch_in = input.shape[1] + if ch_in != ch_out or stride != 1: + if if_first: + return self.conv_bn_layer(input, ch_out, 1, stride, name=name) + else: + return self.conv_bn_layer_new( + input, ch_out, 1, stride, name=name) + else: + return input + + def bottleneck_block(self, input, num_filters, stride, cardinality, name, + if_first): + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + groups=cardinality, + name=name + "_branch2b") + conv2 = self.conv_bn_layer( + input=conv1, + num_filters=num_filters if cardinality == 64 else num_filters * 2, + filter_size=1, + act=None, + name=name + "_branch2c") + + short = self.shortcut( + input, + num_filters if cardinality == 64 else num_filters * 2, + stride, + if_first=if_first, + name=name + "_branch1") + + return fluid.layers.elementwise_add(x=short, y=conv2, act='relu') + + +def ResNeXt50_vd_64x4d(): + model = ResNeXt(layers=50, is_3x3=True) + return model + + +def ResNeXt50_vd_32x4d(): + model = ResNeXt(layers=50, cardinality=32, is_3x3=True) + return model + + +def ResNeXt101_vd_64x4d(): + model = ResNeXt(layers=101, is_3x3=True) + return model + + +def ResNeXt101_vd_32x4d(): + model = ResNeXt(layers=101, cardinality=32, is_3x3=True) + return model + + +def ResNeXt152_vd_64x4d(): + model = ResNeXt(layers=152, is_3x3=True) + return model + + +def ResNeXt152_vd_32x4d(): + model = ResNeXt(layers=152, cardinality=32, is_3x3=True) + return model diff --git a/ppcls/modeling/architectures/se_resnet_vd.py b/ppcls/modeling/architectures/se_resnet_vd.py new file mode 100644 index 000000000..fbe961198 --- /dev/null +++ b/ppcls/modeling/architectures/se_resnet_vd.py @@ -0,0 +1,336 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + "SE_ResNet_vd", "SE_ResNet18_vd", "SE_ResNet34_vd", "SE_ResNet50_vd", + "SE_ResNet101_vd", "SE_ResNet152_vd", "SE_ResNet200_vd" +] + + +class SE_ResNet_vd(): + def __init__(self, layers=50, is_3x3=False): + self.layers = layers + self.is_3x3 = is_3x3 + + def net(self, input, class_dim=1000): + is_3x3 = self.is_3x3 + layers = self.layers + supported_layers = [18, 34, 50, 101, 152, 200] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format(supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + elif layers == 200: + depth = [3, 12, 48, 3] + num_filters = [64, 128, 256, 512] + reduction_ratio = 16 + if is_3x3 == False: + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=7, + stride=2, + act='relu') + else: + conv = self.conv_bn_layer( + input=input, + num_filters=32, + filter_size=3, + stride=2, + act='relu', + name='conv1_1') + conv = self.conv_bn_layer( + input=conv, + num_filters=32, + filter_size=3, + stride=1, + act='relu', + name='conv1_2') + conv = self.conv_bn_layer( + input=conv, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name='conv1_3') + + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + if layers >= 50: + for block in range(len(depth)): + for i in range(depth[block]): + if layers in [101, 152, 200] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + conv = self.bottleneck_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + if_first=block == i == 0, + reduction_ratio=reduction_ratio, + name=conv_name) + + else: + for block in range(len(depth)): + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + conv = self.basic_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + if_first=block == i == 0, + reduction_ratio=reduction_ratio, + name=conv_name) + + pool = fluid.layers.pool2d( + input=conv, pool_size=7, pool_type='avg', global_pooling=True) + + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + out = fluid.layers.fc( + input=pool, + size=class_dim, + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name='fc6_weights'), + bias_attr=ParamAttr(name='fc6_offset')) + + return out + + def conv_bn_layer(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + return fluid.layers.batch_norm( + input=conv, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def conv_bn_layer_new(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + pool = fluid.layers.pool2d( + input=input, + pool_size=2, + pool_stride=2, + pool_padding=0, + pool_type='avg', + ceil_mode=True) + + conv = fluid.layers.conv2d( + input=pool, + num_filters=num_filters, + filter_size=filter_size, + stride=1, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + return fluid.layers.batch_norm( + input=conv, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def shortcut(self, input, ch_out, stride, name, if_first=False): + ch_in = input.shape[1] + if ch_in != ch_out or stride != 1: + if if_first: + return self.conv_bn_layer(input, ch_out, 1, stride, name=name) + else: + return self.conv_bn_layer_new( + input, ch_out, 1, stride, name=name) + elif if_first: + return self.conv_bn_layer(input, ch_out, 1, stride, name=name) + else: + return input + + def bottleneck_block(self, input, num_filters, stride, name, if_first, + reduction_ratio): + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2b") + conv2 = self.conv_bn_layer( + input=conv1, + num_filters=num_filters * 4, + filter_size=1, + act=None, + name=name + "_branch2c") + scale = self.squeeze_excitation( + input=conv2, + num_channels=num_filters * 4, + reduction_ratio=reduction_ratio, + name='fc_' + name) + + short = self.shortcut( + input, + num_filters * 4, + stride, + if_first=if_first, + name=name + "_branch1") + + return fluid.layers.elementwise_add(x=short, y=scale, act='relu') + + def basic_block(self, input, num_filters, stride, name, if_first, + reduction_ratio): + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters, + filter_size=3, + act='relu', + stride=stride, + name=name + "_branch2a") + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + act=None, + name=name + "_branch2b") + scale = self.squeeze_excitation( + input=conv1, + num_channels=num_filters, + reduction_ratio=reduction_ratio, + name='fc_' + name) + short = self.shortcut( + input, + num_filters, + stride, + if_first=if_first, + name=name + "_branch1") + return fluid.layers.elementwise_add(x=short, y=scale, act='relu') + + def squeeze_excitation(self, + input, + num_channels, + reduction_ratio, + name=None): + pool = fluid.layers.pool2d( + input=input, pool_size=0, pool_type='avg', global_pooling=True) + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + squeeze = fluid.layers.fc( + input=pool, + size=num_channels // reduction_ratio, + act='relu', + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=name + '_sqz_weights'), + bias_attr=ParamAttr(name=name + '_sqz_offset')) + stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0) + excitation = fluid.layers.fc( + input=squeeze, + size=num_channels, + act='sigmoid', + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=name + '_exc_weights'), + bias_attr=ParamAttr(name=name + '_exc_offset')) + scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0) + return scale + + +def SE_ResNet18_vd(): + model = SE_ResNet_vd(layers=18, is_3x3=True) + return model + + +def SE_ResNet34_vd(): + model = SE_ResNet_vd(layers=34, is_3x3=True) + return model + + +def SE_ResNet50_vd(): + model = SE_ResNet_vd(layers=50, is_3x3=True) + return model + + +def SE_ResNet101_vd(): + model = SE_ResNet_vd(layers=101, is_3x3=True) + return model + + +def SE_ResNet152_vd(): + model = SE_ResNet_vd(layers=152, is_3x3=True) + return model + + +def SE_ResNet200_vd(): + model = SE_ResNet_vd(layers=200, is_3x3=True) + return model diff --git a/ppcls/modeling/architectures/se_resnext.py b/ppcls/modeling/architectures/se_resnext.py new file mode 100644 index 000000000..0ddf05353 --- /dev/null +++ b/ppcls/modeling/architectures/se_resnext.py @@ -0,0 +1,253 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + "SE_ResNeXt", "SE_ResNeXt50_32x4d", "SE_ResNeXt101_32x4d", + "SE_ResNeXt152_32x4d" +] + + +class SE_ResNeXt(): + def __init__(self, layers=50): + self.layers = layers + + def net(self, input, class_dim=1000): + layers = self.layers + supported_layers = [50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format(supported_layers, layers) + if layers == 50: + cardinality = 32 + reduction_ratio = 16 + depth = [3, 4, 6, 3] + num_filters = [128, 256, 512, 1024] + + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + name='conv1', ) + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max', + use_cudnn=False) + elif layers == 101: + cardinality = 32 + reduction_ratio = 16 + depth = [3, 4, 23, 3] + num_filters = [128, 256, 512, 1024] + + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + name="conv1", ) + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max', + use_cudnn=False) + elif layers == 152: + cardinality = 64 + reduction_ratio = 16 + depth = [3, 8, 36, 3] + num_filters = [128, 256, 512, 1024] + + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=3, + stride=2, + act='relu', + name='conv1') + conv = self.conv_bn_layer( + input=conv, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name='conv2') + conv = self.conv_bn_layer( + input=conv, + num_filters=128, + filter_size=3, + stride=1, + act='relu', + name='conv3') + conv = fluid.layers.pool2d( + input=conv, pool_size=3, pool_stride=2, pool_padding=1, \ + pool_type='max', use_cudnn=False) + n = 1 if layers == 50 or layers == 101 else 3 + for block in range(len(depth)): + n += 1 + for i in range(depth[block]): + conv = self.bottleneck_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=cardinality, + reduction_ratio=reduction_ratio, + name=str(n) + '_' + str(i + 1)) + + pool = fluid.layers.pool2d( + input=conv, pool_type='avg', global_pooling=True, use_cudnn=False) + drop = fluid.layers.dropout(x=pool, dropout_prob=0.5) + stdv = 1.0 / math.sqrt(drop.shape[1] * 1.0) + out = fluid.layers.fc( + input=drop, + size=class_dim, + param_attr=ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name='fc6_weights'), + bias_attr=ParamAttr(name='fc6_offset')) + return out + + def shortcut(self, input, ch_out, stride, name): + ch_in = input.shape[1] + if ch_in != ch_out or stride != 1: + filter_size = 1 + return self.conv_bn_layer( + input, + ch_out, + filter_size, + stride, + name='conv' + name + '_prj') + else: + return input + + def bottleneck_block(self, + input, + num_filters, + stride, + cardinality, + reduction_ratio, + name=None): + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters, + filter_size=1, + act='relu', + name='conv' + name + '_x1') + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + stride=stride, + groups=cardinality, + act='relu', + name='conv' + name + '_x2') + conv2 = self.conv_bn_layer( + input=conv1, + num_filters=num_filters * 2, + filter_size=1, + act=None, + name='conv' + name + '_x3') + scale = self.squeeze_excitation( + input=conv2, + num_channels=num_filters * 2, + reduction_ratio=reduction_ratio, + name='fc' + name) + + short = self.shortcut(input, num_filters * 2, stride, name=name) + + return fluid.layers.elementwise_add(x=short, y=scale, act='relu') + + def conv_bn_layer(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + bias_attr=False, + param_attr=ParamAttr(name=name + '_weights'), ) + bn_name = name + "_bn" + return fluid.layers.batch_norm( + input=conv, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def squeeze_excitation(self, + input, + num_channels, + reduction_ratio, + name=None): + pool = fluid.layers.pool2d( + input=input, pool_type='avg', global_pooling=True, use_cudnn=False) + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + squeeze = fluid.layers.fc( + input=pool, + size=num_channels // reduction_ratio, + act='relu', + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=name + '_sqz_weights'), + bias_attr=ParamAttr(name=name + '_sqz_offset')) + stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0) + excitation = fluid.layers.fc( + input=squeeze, + size=num_channels, + act='sigmoid', + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=name + '_exc_weights'), + bias_attr=ParamAttr(name=name + '_exc_offset')) + scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0) + return scale + + +def SE_ResNeXt50_32x4d(): + model = SE_ResNeXt(layers=50) + return model + + +def SE_ResNeXt101_32x4d(): + model = SE_ResNeXt(layers=101) + return model + + +def SE_ResNeXt152_32x4d(): + model = SE_ResNeXt(layers=152) + return model diff --git a/ppcls/modeling/architectures/se_resnext_vd.py b/ppcls/modeling/architectures/se_resnext_vd.py new file mode 100644 index 000000000..8afb39415 --- /dev/null +++ b/ppcls/modeling/architectures/se_resnext_vd.py @@ -0,0 +1,329 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + "SE_ResNeXt_vd", "SE_ResNeXt50_32x4d_vd", "SE_ResNeXt101_32x4d_vd", + "SENet154_vd" +] + + +class SE_ResNeXt_vd(): + def __init__(self, layers=50): + self.layers = layers + + def net(self, input, class_dim=1000): + layers = self.layers + supported_layers = [50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format(supported_layers, layers) + if layers == 50: + cardinality = 32 + reduction_ratio = 16 + depth = [3, 4, 6, 3] + num_filters = [128, 256, 512, 1024] + + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=3, + stride=2, + act='relu', + name='conv1_1') + conv = self.conv_bn_layer( + input=conv, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name='conv1_2') + conv = self.conv_bn_layer( + input=conv, + num_filters=128, + filter_size=3, + stride=1, + act='relu', + name='conv1_3') + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + elif layers == 101: + cardinality = 32 + reduction_ratio = 16 + depth = [3, 4, 23, 3] + num_filters = [128, 256, 512, 1024] + + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=3, + stride=2, + act='relu', + name='conv1_1') + conv = self.conv_bn_layer( + input=conv, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name='conv1_2') + conv = self.conv_bn_layer( + input=conv, + num_filters=128, + filter_size=3, + stride=1, + act='relu', + name='conv1_3') + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + elif layers == 152: + cardinality = 64 + reduction_ratio = 16 + depth = [3, 8, 36, 3] + num_filters = [256, 512, 1024, 2048] + + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=3, + stride=2, + act='relu', + name='conv1_1') + conv = self.conv_bn_layer( + input=conv, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name='conv1_2') + conv = self.conv_bn_layer( + input=conv, + num_filters=128, + filter_size=3, + stride=1, + act='relu', + name='conv1_3') + conv = fluid.layers.pool2d( + input=conv, pool_size=3, pool_stride=2, pool_padding=1, \ + pool_type='max') + n = 1 if layers == 50 or layers == 101 else 3 + for block in range(len(depth)): + n += 1 + for i in range(depth[block]): + conv = self.bottleneck_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=cardinality, + reduction_ratio=reduction_ratio, + if_first=block == 0, + name=str(n) + '_' + str(i + 1)) + + pool = fluid.layers.pool2d( + input=conv, pool_type='avg', global_pooling=True) + if layers == 152: + pool = fluid.layers.dropout(x=pool, dropout_prob=0.2) + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + out = fluid.layers.fc( + input=pool, + size=class_dim, + param_attr=ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name='fc6_weights'), + bias_attr=ParamAttr(name='fc6_offset')) + + return out + + def shortcut(self, input, ch_out, stride, name, if_first=False): + ch_in = input.shape[1] + if ch_in != ch_out or stride != 1: + filter_size = 1 + if if_first: + return self.conv_bn_layer( + input, + ch_out, + filter_size, + stride, + name='conv' + name + '_prj') + else: + return self.conv_bn_layer_new( + input, + ch_out, + filter_size, + stride, + name='conv' + name + '_prj') + else: + return input + + def bottleneck_block(self, + input, + num_filters, + stride, + cardinality, + reduction_ratio, + if_first, + name=None): + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters, + filter_size=1, + act='relu', + name='conv' + name + '_x1') + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + stride=stride, + groups=cardinality, + act='relu', + name='conv' + name + '_x2') + if cardinality == 64: + num_filters = num_filters // 2 + conv2 = self.conv_bn_layer( + input=conv1, + num_filters=num_filters * 2, + filter_size=1, + act=None, + name='conv' + name + '_x3') + scale = self.squeeze_excitation( + input=conv2, + num_channels=num_filters * 2, + reduction_ratio=reduction_ratio, + name='fc' + name) + + short = self.shortcut( + input, num_filters * 2, stride, if_first=if_first, name=name) + + return fluid.layers.elementwise_add(x=short, y=scale, act='relu') + + def conv_bn_layer(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + bias_attr=False, + param_attr=ParamAttr(name=name + '_weights'), ) + bn_name = name + "_bn" + return fluid.layers.batch_norm( + input=conv, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def conv_bn_layer_new(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + pool = fluid.layers.pool2d( + input=input, + pool_size=2, + pool_stride=2, + pool_padding=0, + pool_type='avg', + ceil_mode=True) + + conv = fluid.layers.conv2d( + input=pool, + num_filters=num_filters, + filter_size=filter_size, + stride=1, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + bn_name = name + "_bn" + return fluid.layers.batch_norm( + input=conv, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def squeeze_excitation(self, + input, + num_channels, + reduction_ratio, + name=None): + pool = fluid.layers.pool2d( + input=input, pool_type='avg', global_pooling=True) + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + squeeze = fluid.layers.fc( + input=pool, + size=num_channels // reduction_ratio, + act='relu', + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=name + '_sqz_weights'), + bias_attr=ParamAttr(name=name + '_sqz_offset')) + stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0) + excitation = fluid.layers.fc( + input=squeeze, + size=num_channels, + act='sigmoid', + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv), + name=name + '_exc_weights'), + bias_attr=ParamAttr(name=name + '_exc_offset')) + scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0) + return scale + + +def SE_ResNeXt50_vd_32x4d(): + model = SE_ResNeXt_vd(layers=50) + return model + + +def SE_ResNeXt101_vd_32x4d(): + model = SE_ResNeXt_vd(layers=101) + return model + + +def SENet154_vd(): + model = SE_ResNeXt_vd(layers=152) + return model diff --git a/ppcls/modeling/architectures/shufflenet_v2.py b/ppcls/modeling/architectures/shufflenet_v2.py new file mode 100644 index 000000000..b63a26c83 --- /dev/null +++ b/ppcls/modeling/architectures/shufflenet_v2.py @@ -0,0 +1,307 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle.fluid as fluid +from paddle.fluid.initializer import MSRA +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + 'ShuffleNetV2_x0_25', 'ShuffleNetV2_x0_33', 'ShuffleNetV2_x0_5', + 'ShuffleNetV2_x1_0', 'ShuffleNetV2_x1_5', 'ShuffleNetV2_x2_0', + 'ShuffleNetV2' +] + + +class ShuffleNetV2(): + def __init__(self, scale=1.0): + self.scale = scale + + def net(self, input, class_dim=1000): + scale = self.scale + stage_repeats = [4, 8, 4] + + if scale == 0.25: + stage_out_channels = [-1, 24, 24, 48, 96, 512] + elif scale == 0.33: + stage_out_channels = [-1, 24, 32, 64, 128, 512] + elif scale == 0.5: + stage_out_channels = [-1, 24, 48, 96, 192, 1024] + elif scale == 1.0: + stage_out_channels = [-1, 24, 116, 232, 464, 1024] + elif scale == 1.5: + stage_out_channels = [-1, 24, 176, 352, 704, 1024] + elif scale == 2.0: + stage_out_channels = [-1, 24, 224, 488, 976, 2048] + else: + raise NotImplementedError("This scale size:[" + str(scale) + + "] is not implemented!") + #conv1 + + input_channel = stage_out_channels[1] + conv1 = self.conv_bn_layer( + input=input, + filter_size=3, + num_filters=input_channel, + padding=1, + stride=2, + name='stage1_conv') + pool1 = fluid.layers.pool2d( + input=conv1, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + conv = pool1 + # bottleneck sequences + for idxstage in range(len(stage_repeats)): + numrepeat = stage_repeats[idxstage] + output_channel = stage_out_channels[idxstage + 2] + for i in range(numrepeat): + if i == 0: + conv = self.inverted_residual_unit( + input=conv, + num_filters=output_channel, + stride=2, + benchmodel=2, + name=str(idxstage + 2) + '_' + str(i + 1)) + else: + conv = self.inverted_residual_unit( + input=conv, + num_filters=output_channel, + stride=1, + benchmodel=1, + name=str(idxstage + 2) + '_' + str(i + 1)) + + conv_last = self.conv_bn_layer( + input=conv, + filter_size=1, + num_filters=stage_out_channels[-1], + padding=0, + stride=1, + name='conv5') + pool_last = fluid.layers.pool2d( + input=conv_last, + pool_size=7, + pool_stride=1, + pool_padding=0, + pool_type='avg') + + output = fluid.layers.fc(input=pool_last, + size=class_dim, + param_attr=ParamAttr( + initializer=MSRA(), name='fc6_weights'), + bias_attr=ParamAttr(name='fc6_offset')) + return output + + def conv_bn_layer(self, + input, + filter_size, + num_filters, + stride, + padding, + num_groups=1, + use_cudnn=True, + if_act=True, + name=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + act=None, + use_cudnn=use_cudnn, + param_attr=ParamAttr( + initializer=MSRA(), name=name + '_weights'), + bias_attr=False) + out = int((input.shape[2] - 1) / float(stride) + 1) + bn_name = name + '_bn' + if if_act: + return fluid.layers.batch_norm( + input=conv, + act='relu', + param_attr=ParamAttr(name=bn_name + "_scale"), + bias_attr=ParamAttr(name=bn_name + "_offset"), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + else: + return fluid.layers.batch_norm( + input=conv, + param_attr=ParamAttr(name=bn_name + "_scale"), + bias_attr=ParamAttr(name=bn_name + "_offset"), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def channel_shuffle(self, x, groups): + batchsize, num_channels, height, width = x.shape[0], x.shape[ + 1], x.shape[2], x.shape[3] + channels_per_group = num_channels // groups + + # reshape + x = fluid.layers.reshape( + x=x, shape=[batchsize, groups, channels_per_group, height, width]) + + x = fluid.layers.transpose(x=x, perm=[0, 2, 1, 3, 4]) + + # flatten + x = fluid.layers.reshape( + x=x, shape=[batchsize, num_channels, height, width]) + + return x + + def inverted_residual_unit(self, + input, + num_filters, + stride, + benchmodel, + name=None): + assert stride in [1, 2], \ + "supported stride are {} but your stride is {}".format([1,2], stride) + + oup_inc = num_filters // 2 + inp = input.shape[1] + + if benchmodel == 1: + x1, x2 = fluid.layers.split( + input, + num_or_sections=[input.shape[1] // 2, input.shape[1] // 2], + dim=1) + + conv_pw = self.conv_bn_layer( + input=x2, + num_filters=oup_inc, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=True, + name='stage_' + name + '_conv1') + + conv_dw = self.conv_bn_layer( + input=conv_pw, + num_filters=oup_inc, + filter_size=3, + stride=stride, + padding=1, + num_groups=oup_inc, + if_act=False, + use_cudnn=False, + name='stage_' + name + '_conv2') + + conv_linear = self.conv_bn_layer( + input=conv_dw, + num_filters=oup_inc, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=True, + name='stage_' + name + '_conv3') + + out = fluid.layers.concat([x1, conv_linear], axis=1) + + else: + #branch1 + conv_dw_1 = self.conv_bn_layer( + input=input, + num_filters=inp, + filter_size=3, + stride=stride, + padding=1, + num_groups=inp, + if_act=False, + use_cudnn=False, + name='stage_' + name + '_conv4') + + conv_linear_1 = self.conv_bn_layer( + input=conv_dw_1, + num_filters=oup_inc, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=True, + name='stage_' + name + '_conv5') + + #branch2 + conv_pw_2 = self.conv_bn_layer( + input=input, + num_filters=oup_inc, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=True, + name='stage_' + name + '_conv1') + + conv_dw_2 = self.conv_bn_layer( + input=conv_pw_2, + num_filters=oup_inc, + filter_size=3, + stride=stride, + padding=1, + num_groups=oup_inc, + if_act=False, + use_cudnn=False, + name='stage_' + name + '_conv2') + + conv_linear_2 = self.conv_bn_layer( + input=conv_dw_2, + num_filters=oup_inc, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=True, + name='stage_' + name + '_conv3') + out = fluid.layers.concat([conv_linear_1, conv_linear_2], axis=1) + + return self.channel_shuffle(out, 2) + + +def ShuffleNetV2_x0_25(): + model = ShuffleNetV2(scale=0.25) + return model + + +def ShuffleNetV2_x0_33(): + model = ShuffleNetV2(scale=0.33) + return model + + +def ShuffleNetV2_x0_5(): + model = ShuffleNetV2(scale=0.5) + return model + + +def ShuffleNetV2_x1_0(): + model = ShuffleNetV2(scale=1.0) + return model + + +def ShuffleNetV2_x1_5(): + model = ShuffleNetV2(scale=1.5) + return model + + +def ShuffleNetV2_x2_0(): + model = ShuffleNetV2(scale=2.0) + return model diff --git a/ppcls/modeling/architectures/shufflenet_v2_swish.py b/ppcls/modeling/architectures/shufflenet_v2_swish.py new file mode 100644 index 000000000..8683dfc08 --- /dev/null +++ b/ppcls/modeling/architectures/shufflenet_v2_swish.py @@ -0,0 +1,293 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle.fluid as fluid +from paddle.fluid.initializer import MSRA +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + 'ShuffleNetV2_x0_5_swish', 'ShuffleNetV2_x1_0_swish', + 'ShuffleNetV2_x1_5_swish', 'ShuffleNetV2_x2_0_swish', 'ShuffleNetV2_swish' +] + + +class ShuffleNetV2_swish(): + def __init__(self, scale=1.0): + self.scale = scale + + def net(self, input, class_dim=1000): + scale = self.scale + stage_repeats = [4, 8, 4] + + if scale == 0.5: + stage_out_channels = [-1, 24, 48, 96, 192, 1024] + elif scale == 1.0: + stage_out_channels = [-1, 24, 116, 232, 464, 1024] + elif scale == 1.5: + stage_out_channels = [-1, 24, 176, 352, 704, 1024] + elif scale == 2.0: + stage_out_channels = [-1, 24, 224, 488, 976, 2048] + else: + raise ValueError("""{} groups is not supported for + 1x1 Grouped Convolutions""".format(num_groups)) + + #conv1 + + input_channel = stage_out_channels[1] + conv1 = self.conv_bn_layer( + input=input, + filter_size=3, + num_filters=input_channel, + padding=1, + stride=2, + name='stage1_conv') + pool1 = fluid.layers.pool2d( + input=conv1, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + conv = pool1 + # bottleneck sequences + for idxstage in range(len(stage_repeats)): + numrepeat = stage_repeats[idxstage] + output_channel = stage_out_channels[idxstage + 2] + for i in range(numrepeat): + if i == 0: + conv = self.inverted_residual_unit( + input=conv, + num_filters=output_channel, + stride=2, + benchmodel=2, + name=str(idxstage + 2) + '_' + str(i + 1)) + else: + conv = self.inverted_residual_unit( + input=conv, + num_filters=output_channel, + stride=1, + benchmodel=1, + name=str(idxstage + 2) + '_' + str(i + 1)) + + conv_last = self.conv_bn_layer( + input=conv, + filter_size=1, + num_filters=stage_out_channels[-1], + padding=0, + stride=1, + name='conv5') + pool_last = fluid.layers.pool2d( + input=conv_last, + pool_size=7, + pool_stride=1, + pool_padding=0, + pool_type='avg') + + output = fluid.layers.fc(input=pool_last, + size=class_dim, + param_attr=ParamAttr( + initializer=MSRA(), name='fc6_weights'), + bias_attr=ParamAttr(name='fc6_offset')) + return output + + def conv_bn_layer(self, + input, + filter_size, + num_filters, + stride, + padding, + num_groups=1, + use_cudnn=True, + if_act=True, + name=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + act=None, + use_cudnn=use_cudnn, + param_attr=ParamAttr( + initializer=MSRA(), name=name + '_weights'), + bias_attr=False) + out = int((input.shape[2] - 1) / float(stride) + 1) + bn_name = name + '_bn' + if if_act: + return fluid.layers.batch_norm( + input=conv, + act='swish', + param_attr=ParamAttr(name=bn_name + "_scale"), + bias_attr=ParamAttr(name=bn_name + "_offset"), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + else: + return fluid.layers.batch_norm( + input=conv, + param_attr=ParamAttr(name=bn_name + "_scale"), + bias_attr=ParamAttr(name=bn_name + "_offset"), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def channel_shuffle(self, x, groups): + batchsize, num_channels, height, width = x.shape[0], x.shape[ + 1], x.shape[2], x.shape[3] + channels_per_group = num_channels // groups + + # reshape + x = fluid.layers.reshape( + x=x, shape=[batchsize, groups, channels_per_group, height, width]) + + x = fluid.layers.transpose(x=x, perm=[0, 2, 1, 3, 4]) + + # flatten + x = fluid.layers.reshape( + x=x, shape=[batchsize, num_channels, height, width]) + + return x + + def inverted_residual_unit(self, + input, + num_filters, + stride, + benchmodel, + name=None): + assert stride in [1, 2], \ + "supported stride are {} but your stride is {}".format([1,2], stride) + + oup_inc = num_filters // 2 + inp = input.shape[1] + + if benchmodel == 1: + x1, x2 = fluid.layers.split( + input, + num_or_sections=[input.shape[1] // 2, input.shape[1] // 2], + dim=1) + + conv_pw = self.conv_bn_layer( + input=x2, + num_filters=oup_inc, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=True, + name='stage_' + name + '_conv1') + + conv_dw = self.conv_bn_layer( + input=conv_pw, + num_filters=oup_inc, + filter_size=3, + stride=stride, + padding=1, + num_groups=oup_inc, + if_act=False, + use_cudnn=False, + name='stage_' + name + '_conv2') + + conv_linear = self.conv_bn_layer( + input=conv_dw, + num_filters=oup_inc, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=True, + name='stage_' + name + '_conv3') + + out = fluid.layers.concat([x1, conv_linear], axis=1) + + else: + #branch1 + conv_dw_1 = self.conv_bn_layer( + input=input, + num_filters=inp, + filter_size=3, + stride=stride, + padding=1, + num_groups=inp, + if_act=False, + use_cudnn=False, + name='stage_' + name + '_conv4') + + conv_linear_1 = self.conv_bn_layer( + input=conv_dw_1, + num_filters=oup_inc, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=True, + name='stage_' + name + '_conv5') + + #branch2 + conv_pw_2 = self.conv_bn_layer( + input=input, + num_filters=oup_inc, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=True, + name='stage_' + name + '_conv1') + + conv_dw_2 = self.conv_bn_layer( + input=conv_pw_2, + num_filters=oup_inc, + filter_size=3, + stride=stride, + padding=1, + num_groups=oup_inc, + if_act=False, + use_cudnn=False, + name='stage_' + name + '_conv2') + + conv_linear_2 = self.conv_bn_layer( + input=conv_dw_2, + num_filters=oup_inc, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=True, + name='stage_' + name + '_conv3') + out = fluid.layers.concat([conv_linear_1, conv_linear_2], axis=1) + + return self.channel_shuffle(out, 2) + + +def ShuffleNetV2_x0_5_swish(): + model = ShuffleNetV2_swish(scale=0.5) + return model + + +def ShuffleNetV2_x1_0_swish(): + model = ShuffleNetV2_swish(scale=1.0) + return model + + +def ShuffleNetV2_x1_5_swish(): + model = ShuffleNetV2_swish(scale=1.5) + return model + + +def ShuffleNetV2_x2_0_swish(): + model = ShuffleNetV2_swish(scale=2.0) + return model diff --git a/ppcls/modeling/architectures/squeezenet.py b/ppcls/modeling/architectures/squeezenet.py new file mode 100644 index 000000000..9e21b3102 --- /dev/null +++ b/ppcls/modeling/architectures/squeezenet.py @@ -0,0 +1,133 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = ["SqueezeNet", "SqueezeNet1_0", "SqueezeNet1_1"] + + +class SqueezeNet(): + def __init__(self, version='1.0'): + self.version = version + + def net(self, input, class_dim=1000): + version = self.version + assert version in ['1.0', '1.1'], \ + "supported version are {} but input version is {}".format(['1.0', '1.1'], version) + if version == '1.0': + conv = fluid.layers.conv2d( + input, + num_filters=96, + filter_size=7, + stride=2, + act='relu', + param_attr=fluid.param_attr.ParamAttr(name="conv1_weights"), + bias_attr=ParamAttr(name='conv1_offset')) + conv = fluid.layers.pool2d( + conv, pool_size=3, pool_stride=2, pool_type='max') + conv = self.make_fire(conv, 16, 64, 64, name='fire2') + conv = self.make_fire(conv, 16, 64, 64, name='fire3') + conv = self.make_fire(conv, 32, 128, 128, name='fire4') + conv = fluid.layers.pool2d( + conv, pool_size=3, pool_stride=2, pool_type='max') + conv = self.make_fire(conv, 32, 128, 128, name='fire5') + conv = self.make_fire(conv, 48, 192, 192, name='fire6') + conv = self.make_fire(conv, 48, 192, 192, name='fire7') + conv = self.make_fire(conv, 64, 256, 256, name='fire8') + conv = fluid.layers.pool2d( + conv, pool_size=3, pool_stride=2, pool_type='max') + conv = self.make_fire(conv, 64, 256, 256, name='fire9') + else: + conv = fluid.layers.conv2d( + input, + num_filters=64, + filter_size=3, + stride=2, + padding=1, + act='relu', + param_attr=fluid.param_attr.ParamAttr(name="conv1_weights"), + bias_attr=ParamAttr(name='conv1_offset')) + conv = fluid.layers.pool2d( + conv, pool_size=3, pool_stride=2, pool_type='max') + conv = self.make_fire(conv, 16, 64, 64, name='fire2') + conv = self.make_fire(conv, 16, 64, 64, name='fire3') + conv = fluid.layers.pool2d( + conv, pool_size=3, pool_stride=2, pool_type='max') + conv = self.make_fire(conv, 32, 128, 128, name='fire4') + conv = self.make_fire(conv, 32, 128, 128, name='fire5') + conv = fluid.layers.pool2d( + conv, pool_size=3, pool_stride=2, pool_type='max') + conv = self.make_fire(conv, 48, 192, 192, name='fire6') + conv = self.make_fire(conv, 48, 192, 192, name='fire7') + conv = self.make_fire(conv, 64, 256, 256, name='fire8') + conv = self.make_fire(conv, 64, 256, 256, name='fire9') + conv = fluid.layers.dropout(conv, dropout_prob=0.5) + conv = fluid.layers.conv2d( + conv, + num_filters=class_dim, + filter_size=1, + act='relu', + param_attr=fluid.param_attr.ParamAttr(name="conv10_weights"), + bias_attr=ParamAttr(name='conv10_offset')) + conv = fluid.layers.pool2d(conv, pool_type='avg', global_pooling=True) + out = fluid.layers.flatten(conv) + return out + + def make_fire_conv(self, + input, + num_filters, + filter_size, + padding=0, + name=None): + conv = fluid.layers.conv2d( + input, + num_filters=num_filters, + filter_size=filter_size, + padding=padding, + act='relu', + param_attr=fluid.param_attr.ParamAttr(name=name + "_weights"), + bias_attr=ParamAttr(name=name + '_offset')) + return conv + + def make_fire(self, + input, + squeeze_channels, + expand1x1_channels, + expand3x3_channels, + name=None): + conv = self.make_fire_conv( + input, squeeze_channels, 1, name=name + '_squeeze1x1') + conv_path1 = self.make_fire_conv( + conv, expand1x1_channels, 1, name=name + '_expand1x1') + conv_path2 = self.make_fire_conv( + conv, expand3x3_channels, 3, 1, name=name + '_expand3x3') + out = fluid.layers.concat([conv_path1, conv_path2], axis=1) + return out + + +def SqueezeNet1_0(): + model = SqueezeNet(version='1.0') + return model + + +def SqueezeNet1_1(): + model = SqueezeNet(version='1.1') + return model diff --git a/ppcls/modeling/architectures/vgg.py b/ppcls/modeling/architectures/vgg.py new file mode 100644 index 000000000..2c5b77ea6 --- /dev/null +++ b/ppcls/modeling/architectures/vgg.py @@ -0,0 +1,108 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +import paddle.fluid as fluid + +__all__ = ["VGGNet", "VGG11", "VGG13", "VGG16", "VGG19"] + + +class VGGNet(): + def __init__(self, layers=16): + self.layers = layers + + def net(self, input, class_dim=1000): + layers = self.layers + vgg_spec = { + 11: ([1, 1, 2, 2, 2]), + 13: ([2, 2, 2, 2, 2]), + 16: ([2, 2, 3, 3, 3]), + 19: ([2, 2, 4, 4, 4]) + } + assert layers in vgg_spec.keys(), \ + "supported layers are {} but input layer is {}".format(vgg_spec.keys(), layers) + + nums = vgg_spec[layers] + conv1 = self.conv_block(input, 64, nums[0], name="conv1_") + conv2 = self.conv_block(conv1, 128, nums[1], name="conv2_") + conv3 = self.conv_block(conv2, 256, nums[2], name="conv3_") + conv4 = self.conv_block(conv3, 512, nums[3], name="conv4_") + conv5 = self.conv_block(conv4, 512, nums[4], name="conv5_") + + fc_dim = 4096 + fc_name = ["fc6", "fc7", "fc8"] + fc1 = fluid.layers.fc( + input=conv5, + size=fc_dim, + act='relu', + param_attr=fluid.param_attr.ParamAttr( + name=fc_name[0] + "_weights"), + bias_attr=fluid.param_attr.ParamAttr(name=fc_name[0] + "_offset")) + fc1 = fluid.layers.dropout(x=fc1, dropout_prob=0.5) + fc2 = fluid.layers.fc( + input=fc1, + size=fc_dim, + act='relu', + param_attr=fluid.param_attr.ParamAttr( + name=fc_name[1] + "_weights"), + bias_attr=fluid.param_attr.ParamAttr(name=fc_name[1] + "_offset")) + fc2 = fluid.layers.dropout(x=fc2, dropout_prob=0.5) + out = fluid.layers.fc( + input=fc2, + size=class_dim, + param_attr=fluid.param_attr.ParamAttr( + name=fc_name[2] + "_weights"), + bias_attr=fluid.param_attr.ParamAttr(name=fc_name[2] + "_offset")) + + return out + + def conv_block(self, input, num_filter, groups, name=None): + conv = input + for i in range(groups): + conv = fluid.layers.conv2d( + input=conv, + num_filters=num_filter, + filter_size=3, + stride=1, + padding=1, + act='relu', + param_attr=fluid.param_attr.ParamAttr( + name=name + str(i + 1) + "_weights"), + bias_attr=False) + return fluid.layers.pool2d( + input=conv, pool_size=2, pool_type='max', pool_stride=2) + + +def VGG11(): + model = VGGNet(layers=11) + return model + + +def VGG13(): + model = VGGNet(layers=13) + return model + + +def VGG16(): + model = VGGNet(layers=16) + return model + + +def VGG19(): + model = VGGNet(layers=19) + return model diff --git a/ppcls/modeling/architectures/xception.py b/ppcls/modeling/architectures/xception.py new file mode 100644 index 000000000..2f294791c --- /dev/null +++ b/ppcls/modeling/architectures/xception.py @@ -0,0 +1,281 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import sys + +import paddle +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = ['Xception', 'Xception41', 'Xception65', 'Xception71'] + + +class Xception(object): + """Xception""" + + def __init__(self, entry_flow_block_num=3, middle_flow_block_num=8): + self.entry_flow_block_num = entry_flow_block_num + self.middle_flow_block_num = middle_flow_block_num + return + + def net(self, input, class_dim=1000): + conv = self.entry_flow(input, self.entry_flow_block_num) + conv = self.middle_flow(conv, self.middle_flow_block_num) + conv = self.exit_flow(conv, class_dim) + + return conv + + def entry_flow(self, input, block_num=3): + '''xception entry_flow''' + name = "entry_flow" + conv = self.conv_bn_layer( + input=input, + num_filters=32, + filter_size=3, + stride=2, + act='relu', + name=name + "_conv1") + conv = self.conv_bn_layer( + input=conv, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name=name + "_conv2") + + if block_num == 3: + relu_first = [False, True, True] + num_filters = [128, 256, 728] + stride = [2, 2, 2] + elif block_num == 5: + relu_first = [False, True, True, True, True] + num_filters = [128, 256, 256, 728, 728] + stride = [2, 1, 2, 1, 2] + else: + sys.exit(-1) + + for block in range(block_num): + curr_name = "{}_{}".format(name, block) + conv = self.entry_flow_bottleneck_block( + conv, + num_filters=num_filters[block], + name=curr_name, + stride=stride[block], + relu_first=relu_first[block]) + + return conv + + def entry_flow_bottleneck_block(self, + input, + num_filters, + name, + stride=2, + relu_first=False): + '''entry_flow_bottleneck_block''' + short = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=1, + stride=stride, + padding=0, + act=None, + param_attr=ParamAttr(name + "_branch1_weights"), + bias_attr=False) + + conv0 = input + if relu_first: + conv0 = fluid.layers.relu(conv0) + + conv1 = self.separable_conv( + conv0, num_filters, stride=1, name=name + "_branch2a_weights") + + conv2 = fluid.layers.relu(conv1) + conv2 = self.separable_conv( + conv2, num_filters, stride=1, name=name + "_branch2b_weights") + + pool = fluid.layers.pool2d( + input=conv2, + pool_size=3, + pool_stride=stride, + pool_padding=1, + pool_type='max') + + return fluid.layers.elementwise_add(x=short, y=pool) + + def middle_flow(self, input, block_num=8): + '''xception middle_flow''' + num_filters = 728 + conv = input + for block in range(block_num): + name = "middle_flow_{}".format(block) + conv = self.middle_flow_bottleneck_block(conv, num_filters, name) + + return conv + + def middle_flow_bottleneck_block(self, input, num_filters, name): + '''middle_flow_bottleneck_block''' + conv0 = fluid.layers.relu(input) + conv0 = self.separable_conv( + conv0, + num_filters=num_filters, + stride=1, + name=name + "_branch2a_weights") + + conv1 = fluid.layers.relu(conv0) + conv1 = self.separable_conv( + conv1, + num_filters=num_filters, + stride=1, + name=name + "_branch2b_weights") + + conv2 = fluid.layers.relu(conv1) + conv2 = self.separable_conv( + conv2, + num_filters=num_filters, + stride=1, + name=name + "_branch2c_weights") + + return fluid.layers.elementwise_add(x=input, y=conv2) + + def exit_flow(self, input, class_dim): + '''xception exit flow''' + name = "exit_flow" + num_filters1 = 728 + num_filters2 = 1024 + conv0 = self.exit_flow_bottleneck_block( + input, num_filters1, num_filters2, name=name + "_1") + + conv1 = self.separable_conv( + conv0, num_filters=1536, stride=1, name=name + "_2") + conv1 = fluid.layers.relu(conv1) + + conv2 = self.separable_conv( + conv1, num_filters=2048, stride=1, name=name + "_3") + conv2 = fluid.layers.relu(conv2) + + pool = fluid.layers.pool2d( + input=conv2, pool_type='avg', global_pooling=True) + + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + out = fluid.layers.fc( + input=pool, + size=class_dim, + param_attr=fluid.param_attr.ParamAttr( + name='fc_weights', + initializer=fluid.initializer.Uniform(-stdv, stdv)), + bias_attr=fluid.param_attr.ParamAttr(name='fc_offset')) + + return out + + def exit_flow_bottleneck_block(self, input, num_filters1, num_filters2, + name): + '''entry_flow_bottleneck_block''' + short = fluid.layers.conv2d( + input=input, + num_filters=num_filters2, + filter_size=1, + stride=2, + padding=0, + act=None, + param_attr=ParamAttr(name + "_branch1_weights"), + bias_attr=False) + + conv0 = fluid.layers.relu(input) + conv1 = self.separable_conv( + conv0, num_filters1, stride=1, name=name + "_branch2a_weights") + + conv2 = fluid.layers.relu(conv1) + conv2 = self.separable_conv( + conv2, num_filters2, stride=1, name=name + "_branch2b_weights") + + pool = fluid.layers.pool2d( + input=conv2, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + + return fluid.layers.elementwise_add(x=short, y=pool) + + def separable_conv(self, input, num_filters, stride=1, name=None): + """separable_conv""" + pointwise_conv = self.conv_bn_layer( + input=input, + filter_size=1, + num_filters=num_filters, + stride=1, + name=name + "_sep") + + depthwise_conv = self.conv_bn_layer( + input=pointwise_conv, + filter_size=3, + num_filters=num_filters, + stride=stride, + groups=num_filters, + use_cudnn=False, + name=name + "_dw") + + return depthwise_conv + + def conv_bn_layer(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + use_cudnn=True, + name=None): + """conv_bn_layer""" + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=False, + use_cudnn=use_cudnn) + + bn_name = "bn_" + name + + return fluid.layers.batch_norm( + input=conv, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + +def Xception41(): + model = Xception(entry_flow_block_num=3, middle_flow_block_num=8) + return model + + +def Xception65(): + model = Xception(entry_flow_block_num=3, middle_flow_block_num=16) + return model + + +def Xception71(): + model = Xception(entry_flow_block_num=5, middle_flow_block_num=16) + return model diff --git a/ppcls/modeling/architectures/xception_deeplab.py b/ppcls/modeling/architectures/xception_deeplab.py new file mode 100644 index 000000000..b76375ed6 --- /dev/null +++ b/ppcls/modeling/architectures/xception_deeplab.py @@ -0,0 +1,320 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import contextlib +import paddle +import math + +import paddle.fluid as fluid + +from .model_libs import scope, name_scope +from .model_libs import bn, bn_relu, relu +from .model_libs import conv +from .model_libs import seperate_conv + +__all__ = ['Xception41_deeplab', 'Xception65_deeplab', 'Xception71_deeplab'] + + +def check_data(data, number): + if type(data) == int: + return [data] * number + assert len(data) == number + return data + + +def check_stride(s, os): + if s <= os: + return True + else: + return False + + +def check_points(count, points): + if points is None: + return False + else: + if isinstance(points, list): + return (True if count in points else False) + else: + return (True if count == points else False) + + +class Xception(): + def __init__(self, backbone="xception_65"): + self.bottleneck_params = self.gen_bottleneck_params(backbone) + self.backbone = backbone + + def gen_bottleneck_params(self, backbone='xception_65'): + if backbone == 'xception_65': + bottleneck_params = { + "entry_flow": (3, [2, 2, 2], [128, 256, 728]), + "middle_flow": (16, 1, 728), + "exit_flow": + (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) + } + elif backbone == 'xception_41': + bottleneck_params = { + "entry_flow": (3, [2, 2, 2], [128, 256, 728]), + "middle_flow": (8, 1, 728), + "exit_flow": + (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) + } + elif backbone == 'xception_71': + bottleneck_params = { + "entry_flow": (5, [2, 1, 2, 1, 2], [128, 256, 256, 728, 728]), + "middle_flow": (16, 1, 728), + "exit_flow": + (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) + } + else: + raise Exception( + "xception backbont only support xception_41/xception_65/xception_71" + ) + return bottleneck_params + + def net(self, + input, + output_stride=32, + class_dim=1000, + end_points=None, + decode_points=None): + self.stride = 2 + self.block_point = 0 + self.output_stride = output_stride + self.decode_points = decode_points + self.short_cuts = dict() + with scope(self.backbone): + # Entry flow + data = self.entry_flow(input) + if check_points(self.block_point, end_points): + return data, self.short_cuts + + # Middle flow + data = self.middle_flow(data) + if check_points(self.block_point, end_points): + return data, self.short_cuts + + # Exit flow + data = self.exit_flow(data) + if check_points(self.block_point, end_points): + return data, self.short_cuts + + data = fluid.layers.reduce_mean(data, [2, 3], keep_dim=True) + data = fluid.layers.dropout(data, 0.5) + stdv = 1.0 / math.sqrt(data.shape[1] * 1.0) + with scope("logit"): + out = fluid.layers.fc( + input=data, + size=class_dim, + param_attr=fluid.param_attr.ParamAttr( + name='fc_weights', + initializer=fluid.initializer.Uniform(-stdv, stdv)), + bias_attr=fluid.param_attr.ParamAttr(name='fc_bias')) + + return out + + def entry_flow(self, data): + param_attr = fluid.ParamAttr( + name=name_scope + 'weights', + regularizer=None, + initializer=fluid.initializer.TruncatedNormal( + loc=0.0, scale=0.09)) + with scope("entry_flow"): + with scope("conv1"): + data = bn_relu( + conv( + data, + 32, + 3, + stride=2, + padding=1, + param_attr=param_attr)) + with scope("conv2"): + data = bn_relu( + conv( + data, + 64, + 3, + stride=1, + padding=1, + param_attr=param_attr)) + + # get entry flow params + block_num = self.bottleneck_params["entry_flow"][0] + strides = self.bottleneck_params["entry_flow"][1] + chns = self.bottleneck_params["entry_flow"][2] + strides = check_data(strides, block_num) + chns = check_data(chns, block_num) + + # params to control your flow + s = self.stride + block_point = self.block_point + output_stride = self.output_stride + with scope("entry_flow"): + for i in range(block_num): + block_point = block_point + 1 + with scope("block" + str(i + 1)): + stride = strides[i] if check_stride(s * strides[i], + output_stride) else 1 + data, short_cuts = self.xception_block(data, chns[i], + [1, 1, stride]) + s = s * stride + if check_points(block_point, self.decode_points): + self.short_cuts[block_point] = short_cuts[1] + + self.stride = s + self.block_point = block_point + return data + + def middle_flow(self, data): + block_num = self.bottleneck_params["middle_flow"][0] + strides = self.bottleneck_params["middle_flow"][1] + chns = self.bottleneck_params["middle_flow"][2] + strides = check_data(strides, block_num) + chns = check_data(chns, block_num) + + # params to control your flow + s = self.stride + block_point = self.block_point + output_stride = self.output_stride + with scope("middle_flow"): + for i in range(block_num): + block_point = block_point + 1 + with scope("block" + str(i + 1)): + stride = strides[i] if check_stride(s * strides[i], + output_stride) else 1 + data, short_cuts = self.xception_block( + data, chns[i], [1, 1, strides[i]], skip_conv=False) + s = s * stride + if check_points(block_point, self.decode_points): + self.short_cuts[block_point] = short_cuts[1] + + self.stride = s + self.block_point = block_point + return data + + def exit_flow(self, data): + block_num = self.bottleneck_params["exit_flow"][0] + strides = self.bottleneck_params["exit_flow"][1] + chns = self.bottleneck_params["exit_flow"][2] + strides = check_data(strides, block_num) + chns = check_data(chns, block_num) + + assert (block_num == 2) + # params to control your flow + s = self.stride + block_point = self.block_point + output_stride = self.output_stride + with scope("exit_flow"): + with scope('block1'): + block_point += 1 + stride = strides[0] if check_stride(s * strides[0], + output_stride) else 1 + data, short_cuts = self.xception_block(data, chns[0], + [1, 1, stride]) + s = s * stride + if check_points(block_point, self.decode_points): + self.short_cuts[block_point] = short_cuts[1] + with scope('block2'): + block_point += 1 + stride = strides[1] if check_stride(s * strides[1], + output_stride) else 1 + data, short_cuts = self.xception_block( + data, + chns[1], [1, 1, stride], + dilation=2, + has_skip=False, + activation_fn_in_separable_conv=True) + s = s * stride + if check_points(block_point, self.decode_points): + self.short_cuts[block_point] = short_cuts[1] + + self.stride = s + self.block_point = block_point + return data + + def xception_block(self, + input, + channels, + strides=1, + filters=3, + dilation=1, + skip_conv=True, + has_skip=True, + activation_fn_in_separable_conv=False): + repeat_number = 3 + channels = check_data(channels, repeat_number) + filters = check_data(filters, repeat_number) + strides = check_data(strides, repeat_number) + data = input + results = [] + for i in range(repeat_number): + with scope('separable_conv' + str(i + 1)): + if not activation_fn_in_separable_conv: + data = relu(data) + data = seperate_conv( + data, + channels[i], + strides[i], + filters[i], + dilation=dilation) + else: + data = seperate_conv( + data, + channels[i], + strides[i], + filters[i], + dilation=dilation, + act=relu) + results.append(data) + if not has_skip: + return data, results + if skip_conv: + param_attr = fluid.ParamAttr( + name=name_scope + 'weights', + regularizer=None, + initializer=fluid.initializer.TruncatedNormal( + loc=0.0, scale=0.09)) + with scope('shortcut'): + skip = bn( + conv( + input, + channels[-1], + 1, + strides[-1], + groups=1, + padding=0, + param_attr=param_attr)) + else: + skip = input + return data + skip, results + + +def Xception41_deeplab(): + model = Xception("xception_41") + return model + + +def Xception65_deeplab(): + model = Xception("xception_65") + return model + + +def Xception71_deeplab(): + model = Xception("xception_71") + return model diff --git a/ppcls/modeling/loss.py b/ppcls/modeling/loss.py new file mode 100644 index 000000000..20825f82d --- /dev/null +++ b/ppcls/modeling/loss.py @@ -0,0 +1,99 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import paddle +import paddle.fluid as fluid + +__all__ = ['CELoss', 'MixCELoss', 'GoogLeNetLoss'] + + +class Loss(object): + """ + Loss + """ + + def __init__(self, class_dim=1000, epsilon=None): + assert class_dim > 1, "class_dim=%d is not larger than 1" % (class_dim) + self._class_dim = class_dim + if epsilon and epsilon >= 0.0 and epsilon <= 1.0: + self._epsilon = epsilon + self._label_smoothing = True + else: + self._epsilon = None + self._label_smoothing = False + + def _labelsmoothing(self, target): + one_hot_target = fluid.layers.one_hot( + input=target, depth=self._class_dim) + soft_target = fluid.layers.label_smooth( + label=one_hot_target, epsilon=self._epsilon, dtype="float32") + return soft_target + + def _crossentropy(self, input, target): + if self._label_smoothing: + target = self._labelsmoothing(target) + softmax_out = fluid.layers.softmax(input, use_cudnn=False) + cost = fluid.layers.cross_entropy( + input=softmax_out, label=target, soft_label=self._label_smoothing) + avg_cost = fluid.layers.mean(cost) + return avg_cost + + def __call__(self, input, target): + pass + + +class CELoss(Loss): + """ + Cross entropy loss + """ + + def __init__(self, class_dim=1000, epsilon=None): + super(CELoss, self).__init__(class_dim, epsilon) + + def __call__(self, input, target): + cost = self._crossentropy(input, target) + return cost + + +class MixCELoss(Loss): + """ + Cross entropy loss with mix(mixup, cutmix, fixmix) + """ + + def __init__(self, class_dim=1000, epsilon=None): + super(MixCELoss, self).__init__(class_dim, epsilon) + + def __call__(self, input, target0, target1, lam): + cost0 = self._crossentropy(input, target0) + cost1 = self._crossentropy(input, target1) + cost = lam * cost0 + (1.0 - lam) * cost1 + avg_cost = fluid.layers.mean(cost) + return avg_cost + + +class GoogLeNetLoss(Loss): + """ + Cross entropy loss used after googlenet + """ + + def __init__(self, class_dim=1000, epsilon=None): + super(GoogLeNetLoss, self).__init__(class_dim, epsilon) + + def __call__(self, input0, input1, input2, target): + cost0 = self._crossentropy(input0, target) + cost1 = self._crossentropy(input1, target) + cost2 = self._crossentropy(input2, target) + cost = cost0 + 0.3 * cost1 + 0.3 * cost2 + avg_cost = fluid.layers.mean(cost) + return avg_cost diff --git a/ppcls/modeling/utils.py b/ppcls/modeling/utils.py new file mode 100644 index 000000000..b239b9e41 --- /dev/null +++ b/ppcls/modeling/utils.py @@ -0,0 +1,43 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import types +import architectures +from difflib import SequenceMatcher + + +def get_architectures(): + """ + get all of model architectures + """ + names = [] + for k, v in architectures.__dict__.items(): + if isinstance(v, (types.FunctionType, types.ClassType)): + names.append(k) + return names + + +def similar_architectures(name='', thresh=0.1, topk=10): + """ + inferred similar architectures + """ + scores = [] + names = get_architectures() + for idx, n in enumerate(names): + if n[:2] == '__': continue + score = SequenceMatcher(None, n.lower(), name.lower()).quick_ratio() + if score > thresh: scores.append((idx, score)) + scores.sort(key=lambda x: x[1], reverse=True) + similar_names = [names[s[0]] for s in scores[:min(topk, len(scores))]] + return similar_names diff --git a/ppcls/optimizer/__init__.py b/ppcls/optimizer/__init__.py new file mode 100644 index 000000000..9a192d9a1 --- /dev/null +++ b/ppcls/optimizer/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import optimizer +from . import learning_rate + +from optimizer import OptimizerBuilder +from learning_rate import LearningRateBuilder diff --git a/ppcls/optimizer/learning_rate.py b/ppcls/optimizer/learning_rate.py new file mode 100644 index 000000000..197f8af14 --- /dev/null +++ b/ppcls/optimizer/learning_rate.py @@ -0,0 +1,169 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +import math + +import paddle.fluid as fluid +import paddle.fluid.layers.ops as ops +from paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter + +__all__ = ['LearningRateBuilder'] + + +class Linear(object): + """ + Linear learning rate decay + + Args: + lr(float): initial learning rate + steps(int): total decay steps + end_lr(float): end learning rate, default: 0.0. + """ + + def __init__(self, lr, steps, end_lr=0.0, **kwargs): + super(Linear, self).__init__() + self.lr = lr + self.steps = steps + self.end_lr = end_lr + + def __call__(self): + learning_rate = fluid.layers.polynomial_decay( + self.lr, self.steps, self.end_lr, power=1) + return learning_rate + + +class Cosine(object): + """ + Cosine learning rate decay + lr = 0.05 * (math.cos(epoch * (math.pi / epochs)) + 1) + + Args: + lr(float): initial learning rate + step_each_epoch(int): steps each epoch + epochs(int): total training epochs + """ + + def __init__(self, lr, step_each_epoch, epochs, **kwargs): + super(Cosine, self).__init__() + self.lr = lr + self.step_each_epoch = step_each_epoch + self.epochs = epochs + + def __call__(self): + learning_rate = fluid.layers.cosine_decay( + learning_rate=self.lr, + step_each_epoch=self.step_each_epoch, + epochs=self.epochs) + return learning_rate + + +class Piecewise(object): + """ + Piecewise learning rate decay + + Args: + lr(float): initial learning rate + step_each_epoch(int): steps each epoch + decay_epochs(list): piecewise decay epochs + gamma(float): decay factor + """ + + def __init__(self, lr, step_each_epoch, decay_epochs, gamma=0.1, **kwargs): + super(Piecewise, self).__init__() + self.bd = [step_each_epoch * e for e in decay_epochs] + self.lr = [lr * (gamma**i) for i in range(len(self.bd) + 1)] + + def __call__(self): + learning_rate = fluid.layers.piecewise_decay(self.bd, self.lr) + return learning_rate + + +class CosineWarmup(object): + """ + Cosine learning rate decay with warmup + [0, warmup_epoch): linear warmup + [warmup_epoch, epochs): cosine decay + + Args: + lr(float): initial learning rate + step_each_epoch(int): steps each epoch + epochs(int): total training epochs + warmup_epoch(int): epoch num of warmup + """ + + def __init__(self, lr, step_each_epoch, epochs, warmup_epoch=5, **kwargs): + super(CosineWarmup, self).__init__() + self.lr = lr + self.step_each_epoch = step_each_epoch + self.epochs = epochs + self.warmup_epoch = fluid.layers.fill_constant( + shape=[1], + value=float(warmup_epoch), + dtype='float32', + force_cpu=True) + + def __call__(self): + global_step = _decay_step_counter() + learning_rate = fluid.layers.tensor.create_global_var( + shape=[1], + value=0.0, + dtype='float32', + persistable=True, + name="learning_rate") + epoch = ops.floor(global_step / self.step_each_epoch) + with fluid.layers.control_flow.Switch() as switch: + with switch.case(epoch < self.warmup_epoch): + decayed_lr = self.lr * \ + (global_step / (self.step_each_epoch * self.warmup_epoch)) + fluid.layers.tensor.assign( + input=decayed_lr, output=learning_rate) + with switch.default(): + current_step = global_step - self.warmup_epoch * self.step_each_epoch + total_step = ( + self.epochs - self.warmup_epoch) * self.step_each_epoch + decayed_lr = self.lr * \ + (ops.cos(current_step * math.pi / total_step) + 1) / 2 + fluid.layers.tensor.assign( + input=decayed_lr, output=learning_rate) + + return learning_rate + + +class LearningRateBuilder(): + """ + Build learning rate variable + https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/layers_cn.html + + Args: + function(str): class name of learning rate + params(dict): parameters used for init the class + """ + + def __init__(self, + function='Linear', + params={'lr': 0.1, + 'steps': 100, + 'end_lr': 0.0}): + self.function = function + self.params = params + + def __call__(self): + mod = sys.modules[__name__] + lr = getattr(mod, self.function)(**self.params)() + return lr diff --git a/ppcls/optimizer/optimizer.py b/ppcls/optimizer/optimizer.py new file mode 100644 index 000000000..707fbe9bf --- /dev/null +++ b/ppcls/optimizer/optimizer.py @@ -0,0 +1,53 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle.fluid.optimizer as pfopt +import paddle.fluid.regularizer as pfreg + +__all__ = ['OptimizerBuilder'] + + +class OptimizerBuilder(object): + """ + Build optimizer with fluid api in fluid.layers.optimizer, + such as fluid.layers.optimizer.Momentum() + https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/optimizer_cn.html + https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/regularizer_cn.html + + Args: + function(str): optimizer name of learning rate + params(dict): parameters used for init the class + regularizer (dict): parameters used for create regularization + """ + + def __init__(self, + function='Momentum', + params={'momentum': 0.9}, + regularizer=None): + self.function = function + self.params = params + # create regularizer + if regularizer is not None: + reg_func = regularizer['function'] + 'Decay' + reg_factor = regularizer['factor'] + reg = getattr(pfreg, reg_func)(reg_factor) + self.params['regularization'] = reg + + def __call__(self, learning_rate): + opt = getattr(pfopt, self.function) + return opt(learning_rate=learning_rate, **self.params) diff --git a/ppcls/test/demo.jpeg b/ppcls/test/demo.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..fd91ab95eab4cb7e34590324c050f5cbd400cbad GIT binary patch literal 298976 zcmb5UbyQnl&@LRLSaJ8@6qh2!3luL<+%;&iLUBm27S|v}3KVxjaHkY^w+5OPD^76@ zTz>EOt#{pj@2quBCfjD`ob0{ld1fA$AAbXg)s@wh0ccOci^dB8JpKX9Dfv4)005er z0B!&PfCIopBLbj5Y0sW6Sv2DR(ea*i#qjL^DMNeGHKUvXm`|Vo=>|_1Ku=rC1n@>r zkx$oHNKo*Pg2F78DYb6cm#b7Ge?-l@xg?DfFLJLdet2?hye{1Yn^5C;#0so&*yM z^FP7C!g_{FXISVM z*l0Ka+^1^wr)Q3ij)wjW9TO7+8v_lU2n`(r^BD;#fEkO7MM$1p2V2<2lY%uYMWFzW z!}e0Qcjh0Z?H@(2@Dp|sD3x9z2k0ZN=$rb6r?KeJ{y#(iYYFf)6xuTY7WPx61PK5Q z1O3Sc=F@bZY(7otX_6Ssn507TIyV2vSUkg03gFLrXa0~2pJ1^nQ0Tt2WebPaKmG*Z zWBgB35`Y}wIqCA&0xSy`+9Knk&DQj(VPp>WW$YAh zaxL&>ZS?NDEpO#QAddi;{A#bIz3Ke#06Q=#oQut)j=?6nD7A{^h7L+L5e4SI^Lxl` zZ~xbAgb|uKCriL)tO%oh!vD`mfH9noHV$_1$j7L z?(9+VE`F}0{r(6LA>r@f=sFJ>tLhw-1Fo~)zbF06%EQf48D*Rf?qEb9M^J&;C)l68 ze3mr=-oVKkgh!?#?(PWtYTOLxk2(zYr3~8m{;nt6aSaw0DwB`Voy`R`q+BiOFY@uJ zfB3h+J_3e)p2x$#l<69ecfZAk=G`&@$5nwqnBgzrx0}^qBHPUg za@XJl=y&+^3TNAaTi;>n_%S4^Zxxw(qqZm|%4vf8WEk15a53D&_&OPl{k%&lqQ^~0f z)SfR`;+k$6OCWS<>OtL?1W2m&DidR7gd=nCKyx9AZAT5J>e@aT4By}t4M4*N5 zo(xeylYKv6xq03+M}rV z639>vWi{or0?Gs_?*!Nfc@1we3{7fQK9q$(Qg3u~7|jaD#%W0O?>Jkf?#_QUb1T5r z-V1QE9aIf_IJWZ2dGr|3f3paB@%;HqLl@}V8Ijm_JIs8YyAViotu_sQw{uv3r(7ms zW7#1rFJ7fag~{igRypRk$*9<{8() z=OuwRoWri|MwG15Jf&~&VW}XJixu%bULyH-BzAXVV$tJ6*{#vy#EKNCjePbr;; zfo<0Ji=)j7C)foH7J8U`1boiY#+q(emm`0d-R;5D ztg4PPv!W5a1AU=nN?zK2eca{Wmuc5wc0ySalF`{xLWLL9|^mnL$&`q zwnQKeE0;o__9+wC%|*3K5JMoef3}gufWK#ti} z+?pD8&=tzL{0Kn8qJ@ERzv#QZmgP?Brk5ABo0z??E%A2}a{BJe+TxMAG}n{VE%C3> z>a)oL3-N~)t{%V2vXY(4%`@rRF)m>29l_o+Mec#h~w z*CSvT)xD&s2()?V;?6FKb{X}oS9eycAEAw4w%R|OmAZ-bTja6~gPF23v#Mo{Pjvr6z0X^_ z6JVn_knsoUb8}jVHLVg&`&5IjWUCa-lfr+=HaV3zsix4SoM|nCqwg5AbSLz0!{~Lu zGP>V~5t{+P-2F#@`21FUbh5lorJ|HL9Q*CWZD^nerBQ#nkN)C=u0Y0KG~WJ(>0vjD zZW*WD#y&&O#$^oJAz3Vp19lKkjmV#~pm5-R1bjbdqDT+P8hkNgj|w1J^{7Gi7tD@1 zlzZF4mjhUZIvxd3}>6^EGS zTKm(iU(8z#;K(7+tH4s;y&6i6p(g5bODp&4Q&uuEsz1AYIfO-ai{ZFSljF zI1yl6?#%Rgk;LC^~(F+t_lH1i$ zpja-(-&Ykv{(y~r%{IwPjJk&|o*_E#l9t4gQG81$!ad((BnY$_Vi{lARN7^;$v1#C zUuWPI>O?F*4J>WP{^D+waL}?sqA^GbJxB>v9Ker7vj#^mvCdvYbIs%nwE5LAls3&$ zzu4bFt z@8c1CBNJ7Og;gh#L*(HM#gS3n$<3Lm|7GxCyjzc^>&}yJQHeG|N|4e>d6a2pd~(S% zrsZc4H|CWn4`RByq-BL5_TMa3n&vGas<`~Tw6Qk&^HXO#1RP9AapI#Q8o1Nf4OB+l zzvm;0cDfV?n-yl_R8l@N_CazO`>H*eScIpudsA9Og~$z`3n#zxI_i4_B(^M$S=NGL zHd8LuaCX4m-jFq(ng}bQ;B0}|pP@;?78v7DM9rhK6VbyQYbp^P`x!nGI?Fc1l zvT6=r{luq~Cf~+GX2p?W-_uD{6U;r$EV7FRR4eSv@!e5h!CO`;FURg>*-MiX;y0|y z%s!-N^s=i-a0V^2@>U*)=e*DYCErbO62gL!{$wfq>`f8(GE`zJ1pQpRUa@x@HV)noLq*k2kMGp&o?WaBWA@U-&iv6gAyTq?+2RC{&;D7x3L{rChmSrvhb@|= z7RuDWnQ;QJ@Rs6Q9A0gE6@0(&spMhNVi>dw-@x!i z@$22;G=|K44@8as6sa*B!!Qr;kgDg5_2PGqQZmtT~lwmmk9mMSCInW?G! z6GfBrq`cJkA5d>ix0E*GiS#+=OM-Q~ch0j`77LPqXKW`TN7E25@ya+ZFLM4~Mr zU0LA8MTwnxHnv-Zi3%y?0p&3$AFYyUoUFyZg03VR4w*Qceua?w%a(4NGc_yZgGms4 zT=UP?z>CqRK;(na5vjpqk9=(@A0gXY_FNw`uY3)9p~do9k=R$qiXOAuFc$r@>``eC z#Ofj40fb0vBjUNG1GLI?dfCKMGxurduvE#eh_3bAqTqq^Q`!?s+#Ms3)u1)4iT3Y1 zb*6p(@4J-BFNhXC9KU?KzqfSX#cOsmQV4BwnyxXr`0&om>Vkm|GRfKhQeJfb1B{sD zI>`iR1Dh))`+G00kh1}A9kw<8zWJMY^Wov|?Zwe_BjGOF$%;Sqi2}bikLjz4i$sFf z`OoSZhgULAaR2SiQ|mL%`f;$$#X!it3{~<8_2|0g*MG&8#Q}UQ`SMHc`8}#x)gXEn zUF+Wo>g3_t55X7lFZn-2u{a}r951Xi6HHmsJeNODOCQ#c_w{%M1|&^^PKi-`?OVmt zP!oyIhmU|E^V=X}ePll-&=8{lKpI``%(4OF@2FWraV3JtO}rJ85G6YH2x#O?Jh=8bd4;6KAq`01 zavpve(c|NZwD@p$v-bcTQWRcP$ym?VE$6M3>6Kv)Zc-;@Yc}}?ea8-f7y_3Ng~dE7 zK7&~KKJkQ-{C#rk{LWq!HEKS!qUduniAQIY41KA`MKk_Z1Ne&|2CLpwM~zWmrNF6R$57mkhEiJ|o1jUyxI{8NR;0t%VKo2B)ah_v0tX7QY*aHBO z{TK{4olP5(s!e;g>^K!pPoAjS>C&z@mbq8==C8_ed zSoC&ru`NTFk}Gt{1B~kM89K72-4IjUf6*Ec5yDK(fQr7}p|91A(8tdRg8$q(z*zJj z^Qro=SZ!~oOBQS(+A~*Pr2`hHt)4ILiwdAig|-j24!><$<_{wcLdEQvo;Sm1hsY1< zJkC_hk{?i1{05u}%ir`B8~JXY+I?Jz^ex`@WW!Y% zrlXc^g9w0$l?U5ZBkpt*e2ufObhg3{UN4zn@>+bqX9g>EC0bqX=Cbky7`_H}Hx)0- zwP;r^w!Y?hPnldxCc`bBZtK)5_xocZ&d#3{3Da0sHZ{g`mFA%#T?6#ttbSNsid(8QaGu44huWa7=v7i4yL8 zV61*@Of~9~q}8Eb?>-62wJ{=F-X`Mowv#c1bJ0LYUvid;dagd?Fi;U3%&Ji6II*EZsKAiTCA)?>aR8?RhUlwwFld%#{IHqAcm&#Fh z-jwFivuPyi)<;`9!PIoyuS!ZFlSUhUeYrngxu>FrQ!vMK8@1j;^{12XbB=i6IS;wD zAagJB@M?cEY2XoXnas#BY5)s&W(qG7hFh+CB-Q8>U?~}@HBGEOCpiBZ~6(Nn9@#3L1Hl(XFny!_4spF*7 z5W1ivLs2u*Xa& zBWrt>yF4Z8jQ(nAN|pKO>i`>6=~6pioGDg0gN{@+h#Vgx8)DgFaB`%Ot5TKJ&T>Xv z9o^RobPwxL*y8>-PGW0@oJh!4EX8Q?*q>cZf9uLYhTmzilvW80l=JLy|6FaoV2xLs zA6HM>OQF_~g#OK%JbbKPX0RItR(Tlv_3GqH>~>zB>4M4-$lACt_GKB+VdGK`f(p+1 zqYj~NNr##7I7}EjoIl5tH~}3PgS(=3j~A{YJ59d+qe|GQ`IA**#^Un{M?15jK+XdW zL=+V&QDM6p)@6J}2y~mLoGnHk&#xk5AcVzfzegP_d%echf2n;iCOcmCR>l|*yiRlp zQ9=uBDn4BtePx$?zkj)3kx6E67ZampeB9l>I0Uv^yGwG1%WW<483Pkhw*5XS4_zOL z27z?j_pF~>W_k@Jg(drsniVF+d`85d0NcrQ*N;qS*dS(Ab+bb^-B|XG@xHi2dXBla z(vXPWHz*Co{1qnSH_uB!S5_{q6PCLT(WiV**d*-UESu44ltYDunR zt}l{3`ub9PH|!$SM9F81qE5BCReBx)QLx-jzH8We50QU{Hi^O5xI?1>Fl)2k?*K3L zO7i%yJL*PrD($(au_fAP6b4fgBy41J$JU0@W7#;>+AXnbvSc<^%a25NK7$9W zKV3CK?-e;m(g2%-Wrh5t8oH^<<7xT>6;-4!v^z!4t1%2ST>)ioeN3|e3F@HSG=Z34 zgPiPNlJv%vtQiutYW8|fUmkRBT@GjC-4)(*TUaMF)qJtElDSyGGNqTHm+bY*O{#J5 zq|K7yf*;9^AuD!tlstJD&3>K$8NAB3c%$oS?I=qPUIs{oo7SDq!){1?R`2T_C=}D?-b_kgI73oo zoak$_&-Xyq1banw?zzZrprS12|fRjFGEt56XPb zU)PIwgxKV@J2DuyihKm*a~C&^?Vxoy2YFZ%Hg6rggGD_8=2}dhxl{ylB@5047zyclsnwT5=!DWePiliL&`wM@6eenR+?yIa$96a4zY5A9pO?-J62E;n{6 zvQqgUiX^W)9jbGr4e49Hypd7C3abU{m$Iv=Z8IN`-{%t9EHq|viwm8a(lMK9gJ`tA za{JkfskaLmA`ut+h-2!of?FA!@7q^5v+sAA{7oUHo>$gj=n1(|aXa_^b8nCsH#RhYzpy4P@ z!eQ%@3wFl*(baSM+=@op5<$KjwP9n%WBP5|Al-4FgKryTfnl_lPe{gc*x_01nzy#i zxb26tY)Z|J6BBpBAMT9bb1P9(nsoi5W#e;sU8+S;?iu~FzaN>6b+j7GT6QGk%i>k1 z=KZ04O9?5_BuoJkkfFN6nGEd6u$Wy__{cleaEslJM?my(%i(l3NMkK18rwka%MZKL zrR`L-v|7(g5&V~=FEtm)eI* zucFx55x?wle0UQJjhxX5QqFoj=c=aG%bn0!-sQRo5v8D1TGWGZ&`ODk zT}dn%$Lu$Yd~?5k9ga<$hCRd6dIa!apyb>`M;(g?HR;oDVu_HV`~%bXmLthP>&6NrmYOYG#P@CZLHSF^OQ6A!<`Ng0^2Mbn4UpS# zbOI2B2NgR&T8pVN++J(-HgRxHrVW+~lVpWQ(lK-d<@Jbh_BVjCw`Di$2V={pmFep5 zQvdriwWq;F`@G;kRuu-@`cxDi!msC0#T+e$d)L|wf`dKOw!>kCHAf+~L1?!=CZF#8 ze$-l#{i&WEz-;Dm5907);R4X&V>jPk=YMZQh;_#aAy5o<^iSLCR&b6L? zku4f6ND`nMepg>{A$TR7)OMFw>mVhCQMl_!df8!cbzmv{Z+wN8x_!mTu>p#|un~RA zuI~ze1f)UAN`NuH2ujvnc{A+BMnR!ce)anyt*CNLRt+I z&#lC`y*7KNaFaS@Y(+j1Kkgs#<&f5 z*%Ku=QcQ~9towHLl%WD|K|?(;*Hu=L635Th?{z*EXtSq$;gi@)V~ zyLY>cZozNV1MolP(N{5)$2(&zg}mTzHi

vOIl9_-PK~Hhs?xlOsU7R*He%yK+7? zigX~QQ3L2jcYHj5WF>AnFKP>w<0HB6+a_b4xqMoiLN|MX!RcAi879=cE(0O3WuG0V z-$NpcB?4Sb{|HuzmWt>4Vn1kA6lPL!#~Zvqx4qSnbw0z7o&nIQAd+R*umvYJ?NY;s z?5Y6cA~-MWX9oO1g&A&)Y@TwJOXuUA+yq_BmfAiQJ8Blu` zde|MR=S;#AB=)0hc)Q+U6I!VFB9`mb7o8T?<6Cd16Z(_c+ratiPSQHtt)TLmh0KCd_KK}7sE=h z-F2TpJP;@B#KCAc9p8qxH#pelmgFApvL(-BeG15Fix6% zn-8uh{I|wDbL0b-qt(09@0_>$1!1|I1{FB(4vV%D5i$>uK!28gu+*|kgOLGtyr(il z*o`~t`>u#MM|QvOvrWI47=`aJfrS0iy68IPWhnuVAu&ys)2~cb)U`BNZ7$uDT_bmS z8L(HVmsFSxA2`7esFxinW5w*NZn&EYp;YIRH@h-9MD9kpBf25t9wECk%@&nHocj9e zr$Wy`(K2^Huut1JTL8Js|C;<-UYa9!ZdytNh&Ndx3fMk>=$J7dqBOL zOXN+I8_QUa8QqbH%e2b9I-D=*%4tx4gGH0hOq5)+tq`HuQtm6$qm`xW*uov)z+_Z+ zSc`1Y@Dk;y@KT?hWV3z*6sPF2(oXGHAiwU6=%-e zCg3ALKk^9a0g0?N%|>${;s1MD+lYO*NNQIl`&$WwIPMnHpgkE^lw! zAk`p{MLmrcqP=vr>%;qO^;6zQwJnxB#pDaf2AC=M^Nw+&Zw~xC;aIUy?t-8=tT4dN zVG|GTk<_AjP}xXdrmvLb9^2e3MpW_pRQZtRnMq{JtxxeJ&uY#@j4gAci0aJUj*kqM zmWks_HF4{2SDYI~yCyZhG^MShpIVCZlV#C%wMs872HzzsSAm<0WRmAr432wjRUQEh zgctC^zmoM0Tr+eSR^_{qjFFOovnUM$Ey9ip)vH7>76%_%zp$ZY!1q^cK7aE zij%l6in%0nNn%^pkRk7H=Tt?uJx!f^q1hVw>8*ts+XvpFhqvB*%?%&NB9$DipF7nZ z-}=a1km=S}y+SLp{%LbKvs@*{XP1;>r(&zCx^hm0tdydUc&S5a-_FlXl^w$WDwY)c z5#asMWn(K{NtL&%r4M;xg8w2Ar7JGBnYx1i<|CcAm^Gd z$t+bT7YUzfTwUrN)@DyAUs7`9ISNyb+Ns{H^TkXiCc5I36l)UB_k0me}vO4Q+}OzJX7}D=1YjJ1|zkL)x8c{G0zQ`2#d$gLtQJt;`Ix`Y!f|rPVUZa54t$MK?kBBdkcOU4atQqKI*H zkvQI{z4U3AJ55oA#rjt*)$i&|byjm1dy$JiDytk#1KWRCO-A5sP?I^02MXD~tKCl- ztZ>qIAc0h-=?g!Ct3AILC%Adjfhw)30Xt3(rRLW`H8H?Pio&?krO2Fz0j;fj?sE52 ztGGsiPcl%?Vc~N%+NRuPYIh~MedWfqnjRE$-DJs{ux_AR%r(cNLTAzUSE_Xt@YY*d zr9W$g-u7Ttb;AggSghEG=a>2K#m0b=b~^y6N&O{ybXFVFdBj*9N)jADGDbmSsl$ce zoj~9Oi7N5iwYASR{ThSs6p|gU_N?b{X|p?H&DsSgMcEs@dFV)zQ^bH0$9#HAYvshk z#vn#slkhEq2{ompfAW07(s&K@w_eSD1$CRzm6(CI-5YDDEJ;u(FJprxzxN?`&4$fa zK`h9-%m%UjsgZ%CK^v!zezcbDfEs2eBUr(LQq5CgRMo|RA82*;xTo^ch_Y9OJcQ*n(jD-IWhD;qlah&5I{sRXcsCfO@zHHvD>@S0JW{bb`IZi`iWX z?GpHHNrQVsu%SO(*+1T`7xBdx6{3}CTozd|HUgWJ1%&^#%WswOkNsd4R0v;jO6~Xj zm8aer+*(5Gz}f*!jD@k|YzVEwu+PqQ6*-n65r@;I_}+ODdT~T1GUqDCv&!1seVwYu zq|NjBo=@cWo5jYJ<~OXW+QQzm+$R>{dTh+s=`!DUG$&Es1)Fc}i>IeG_v$zWb(+P-E?8CHw{@Y4%lwRlmM@ zdBnVdUN8speo3#FZoXn#|Nd@PBKD0=|6!)@)A0cN6PK`gx(a%+L`)dQxlyyL$f>R^ zWTQ{8O1u$+Jf4lqeiqwD$8xd~n~t4I5^I?Mz85L-E5BbOL%>O=`^N&ka`Z_g{ti+iJT<}mk}b)!T2fFSMi{6jWGTeG zo!SUnO|o=GW@~<#*tcWfgtLtfX)exn-J1@TPNO)&aTN{QLOY7NgS79L`#OK-iKnG{Xk1y4a$B#d=cN1A;F1H?(Q(c9+K%($7q) zV;Xp;*v+_uqX$PY3GVhSx^8cyPC^DOnZK`>!T-d@f{tV_`P|eC$cS%by>3C_M&1|Cp2gAiEpQZh2KjAqI zUVY21km8Re?xsr`r9VL2#1xR{qPhRpu8;o&Yl^&mZ5cemmShbf(lzYVN$y#vlb~CC zn>xj1k~Mhax|2Gay?sQw0}bfhv$>=2a8DZ5Z}3|GS%9fYR?dx>;k-ZvRzx&-lQ9c5 z^Wcszr~7vPP;E-#?dDoGH%fV1OpR%(!^>^xMD4`D6J18TB0p!l1*@XoDuZz(^zo1F;DS+*dbvIy8If(()GX-{?0DUWS#|2Wd zB<*VEh*4TZ+6EEJbda4aj^0Qcnpnq9sBxy4>5fkID~o&fQY)%Ev}Yp0E ziI>i-yxPpeDGz03Vhy3g=uGRF?2AkTwDkd^+PUThj-2oHfo_mBr|b~I8Rwb>sEK{D zmzK^6N$SZl(&Baj#AQK@GZ*&diJ(jJy*rO7HRB)7=8pokbUy@gH0B=|h4MHLo9F%v zGPFn*nqJyM1^#olBcg5aKsYL1$+%Zw>c~m#Nyp|DH6XqWcK<%N{oFLD`s-DA0dK|b zVz;EPznBsedArP8`j=26yB&rnr!*H-FRX~J;x)hBF;s$ohu60pHv9YYCQiMtB!1QL zQj%8SY=>5@%Ju7&nf*!TMN3Z6a0!pJ6x03&({Reo%Aq!^+lywkjA7@3VtC}t8#++I zmCSQ`U*-b3lyS!XXDc#YQ3k%>3L|M7ggbbhnf-y#qVyGDZFiuFw>c&*d#rC>xY|>o ziMaY+mA+>SY51zm)oe(XpiM~z^Gw%x^F`t(KYEBt?>=6gOtnF$&RCP_S^u0u4Y1Tv zzOEYkgLdz}f!PDgz;DihxoX4QESrppFlt95C3by@BkYfe<@5GWhQy+le*Vmbl$_OE zrbAJQI@^XOsi`iT6?Z8!enQ)6qDSw@{=KFymwF+8cvb_+6&;+=@^@Yw{)ujAJ<|!v zB6nH7cghQGGGnEmvce^g?=Jtv!Q4pXD{pt zxefYrzwmx}r$V=Nc4*rx?&hm{!~@wSN>U|S_$Q%*D%9T~1%qh3dsQ1HncKwIX#-`C zcB+&1Kj6ZKFX>C+|EwA1doCq-g60>@e&Fj)RPecYNbR|2W;l-Ik;+lX{@i|^K#dp| zYk2(mddz+KN@5bp@($tJ#p*?`P*!lM@VYVdFTlc{^u;0PWReA?>$n(~n(g@TNoWmK za`z{s7u{dG{esemi66Nd2~Yyw66vP({g-ykFy=425g{VdV`~a`F5qYDt5=_^bs`b# zwoQTU|2DsQ;vNyXBlh#nho>MK0-O|jM-k1pRu1Q$Z{P;e4XRxW=IL@?fi77bEF^e( zg+2)RLF zqd8sxP^Bt3^lB$(3=@Yv@`Wu0lGRIYW`_5s(?Eh<0+dKXPWoq8u$LoLW=>?+z0yO~{Pn;SmQP*WK;c%tyg) z%3lF3p^1J;N&deL z1MasawW4$W=Nu1Du&X?f-mHIVk~km)lLIvy88? zFn97jLc$jKI`#eA9x}`bhzfkzKbA0|+I%H1k*amAqyOwEx7AKc?MXf z525LLV~l=Q9o_L9@m?EQvUnu+I=%KN=JV&b-=Gy$w1?e<-H^Locb$?7!L65`J}VcD z*(D~&t8gPKa|HyaQWN}&?iwY@pA=R9QqzjvF+TatB)uBVL0?6Ox_8IW%!{rX;gX+M zoC>#?0;E)M*5k$N0~HD-<8-^M`M4$ zVN8Vjx^j}i;N6Y;H;*r-*F$VG8kP-{Y)rmhz%Pj6j8vCT4IiJmPHpVJ8x;|y9vP9> z#{egpR%-ApLb-P9llQyV<~=rpys@Q7Vh2M*HpWK)_|E59T#LKuoNJ|Yprq{a9bs0V#vUU_NAl_7cWIPd z@LSq?i|w2fb1pEjWmS3IoAwlkmC>kq`$>#l3^S#kv8;^1rDq)<(-d z3FUj4pP=8wD4Mp)rJfQMw8GI>bu+U`>&uVSj{}USVB@ ziv#AE18*v@UkqL7^5r|3q2qa9akxRTCB6Q>h*_i^JH z^C90fFFDd<#?}4b`^yix%#~+m-)0PIHo12@ciApdPE}EX-$sy;6B$DT;)?@<>$Yq> z%y*&SM}T7Ymz970A`~UrcO&Wwbi3vhSyT6ANrShV=%cO#EO?Z!M63*6I_@S5vr?so zWL`1wyw9LDa+EqgLsaslnc`BaddBE>CiG&6J>hMuyrxyy`QK6hG59;h^5v&zRAymn z0Kofpp5nQpUwv6neQHE7@Ri`R9B6TGZqYPcynKu&sDmLAg>B$sm~Hb+%E(U5Y>7VQ zQUUVwW=}n^g}0&P)uw~4&lN^>nYjT!wtd2f;6m2nChx=-c1|l+F1178Ab!)WQXju{ zTMz4?)WuF^B2gDk(#(}1+i|l~qSVEM&s?5`jvzX&v@+J^o)FJz+O&o(Oww%O)Q>*- z3%pGPAnclp`-!_JIsw_&O+TO_todHeKKbhT1H;3bmW~)_>Nwi`ew#jLGwQZ#h}U{B z%mJJbgF8cTwZl#NT~S@h3Gb?LUL-W@gvtJRugQl!oDUjz~C>U>NbX3D)H-< z&>1ya_)Ed9Hj~6lh@@p^=PsNcrln(LK7w7RotKbENJ=~lQgls>RIMCj#CdC9pN=4GWj#?%_P4$@aFN*L||4Hj;?h&^hI-H}$TziF9 zPfJS)O2{A?DVADsBU`*2HtEFlJtX)ME|@(tjq18}1EDbl2O59ju942o7C}oRs+0YF zQ7Yl28#SsL8)>=PaU8-4cB)*8p)!%C9DNlhz9zBkQ){Fc=eMMY9QZY0s%PmToYR8- zoze#J*TStJkD}VpcqK?e`EcbaFl{0Y<#Lhrw7MxoGNO3f4#ACB?&8}+npS1d9sVZ>Tp4LW#+V-OMWbhO?;%a$cb;2U@^@Vxg$*xS4rpVZl zs8SkTA-@sa@}uq>_TKX(&&6B+_^+d9eKgjw#>pxz^3rFfA02}5$H6go=a-R?_o_+w zB%vx)AM&1pW2UG7_$u{^ouYn#__u2$-u(^32<{*(e>q>j+$wJCYaDw}$dC)3GV@%t z&!&09M?qt6^)j1)3JCVDrCV{TXFmOcIY!O?+I^g86_{A;qtagbL+tfI?<%zQ$T&kk zx<`m}3nf`h^{aiz@+01-Idrp(rzLwc%gMmj2n9#+Cyl2$Kdk@<$ABxMMJ zDEb)m8sDj|MRdW%Khi~ydbZbgqdDqr)t1|G+%9r5jp>-$UAjHLCV_&5QaM9S?A-w| z_V8}J?ytzv$uKtog0rMq>>c@=U58YhBN%0P=kUn+TvZ==3OU{S(7g8k()8GMfn0dV zGbexh2GK|r?r{I2aN@7TJIP1~x~sdC!=(uc^TQa2MCU!}u%IfELLx-XkA>AJV2yC4 zaA;V*3pL}Trsx>tpr{t~6*LkJI2Na^1515%Ak+qlp)HbmLyn-Em-& zynhsNn%e=KC(6AzV~8_+8;AME1Cp%G_ml>JSrYm4i%kJ@f4Pst8#yVZ%N0EFXtu-VBrl=-4Aw|7$2h@P*O_MHS`MP`E zUaDg)-u9}d^1nvEeB1Bb48s^9szdnwYCmBWG88X^%JTbGO|=7?+nIw!m64Q*{V76B zpLuZ@{2zd|e7sya2}L1O!+s1Buu{acddh2d=+8=HCsHxXH+5I25X&w`78rfLk`3!0 z>h2>R7JEz6dA=6X>gL*&zK_%0f z&aEw_Yh@uJ$_~_*_!YFnVsSkB)^@=C>8i|;-8FifYlSb3con6daQ;W*OoGW zFt8&AB&(S76TADw(I8*>1ma;$MS z2fvA*bw0;qa=SwHSjLGJM^bWBD|Pnv%e5}G?j$x_ip>zve^8!EPC9dHcS} zex4%D*l)>N>>H%)c+p4r!x&qJ{}4s=6x9~Bw;Sr}aL%O0aAYI*#oXyBiAo!R(~mOH z)eX|UF17WwHXD7WXw|j`ms6kE`ZOKS70_lWS(J{&NNCt>?8WyUHxJ7v=g%qLKT5Ed zp2~PAz>Z!ys*bbN$JyV3ol8u;&-gSzwE{@4`xGeYF_w34;XzB#S#E!xJdml|?^E-H zn0;pO(2#Jqpj;p4KS}5g`F$ox3VZvQYTE}Wby32Gt)a5L!&2L{&f)FxM)?cZ4X!0< zqprxmVS~Xtae**cVtL)){$>`Nq;%`#Q{z8S1BcWku#*Rv{d*%BT*2OH{1HH0^8{FV zaC9k5h#4(9lBF*40bhYGVK~^XD`;yY-3fk^m!^| z^N`@6^F_rGe4!UOldkQWte=OK>=`PD-rG-Wzj)y#8vYcykv#(hQ4!5`<)zbJ%m(hwQrVytai zoF|d=w!4-;`S$2a>t1mb5>izvn*L)Dcki#z_2uuWNESK!vdTziC$_rwt=ody$6}3H z(pdHwDGoLAeos%R;)rOM*|f5Mx~ws2b_%y(2LAs5J^E8LRU>zGKqgf8LRgLb57VZ3 zDobivS|Rl6y_@a_NTrdgSYM-XUO)o_-2UIER4`k*n$RN1HJZqC5U^uiorTyeK6lpX zGAfD|Cq{Ar1Pp>|PZJWqX{9-PteO5C0_X&F&Zsrhl&e{PDkdQ^nGr)Fz) zNTrfzv*nTW9UC8;>5c+6x&41aW}cBVXb_zf+jZ zT(Yo{=S?y1{vtY63`IOeJnhjIb~s2+v6dBmFhs0JVpV>6&g`$sV!AOH^uPi;{f}6{ z;;X|DkfANLD!Lo}`ZCC^W+$&Kg#jG42<}1a1EJD4NH9#om=IL~UQ%On+bhR?x*^PB zJv7V#I#nR_c<}Bzc>Kxl%C5U&fOq0L!^)CU{7O1097erlew|Vr4uwA2r7}*kg9ET# zA5)&>{{Y*gY0MQowU3&(*Jjb{RzkMqiZK(Z9UXa1F8- znX$buv2ia%Qb?o!0E|&|8He0|=K6=Vl2%4F z9z|q6lm7s2y~cQhB@~7^EUNjQL=KYq67Gm=}71(HvmM4!yxbJX$rh9sE@_}{qe zr-x_#V=zR+Yo|r=SwfImc;RGn{l}nxPq#{~;#Qm3b!1}0N@4?!iT59;L@qtHjVub? z!wz4UzeEaBL}h`X8%EJp_Sfmr2yTTjrzx>YBd;V#3ac<~+bTQ$gQB2U1((!>dxQS~ zUY*pn1V@WjCH3EAb(N^GM`jq1@*wOIe|`SFA8D}*&Z&J65I|U}AntzWKy>#U60(G` z>L79;5J2f%mEww3(l{&u9k;XAZ8!56I`<>g0QTuM0eyl~rPfXD3pKg<6sYMn>dQ{?j z3=Y*)Z(q0Hqoarb<%nKuwo2>cBbpHSfk73YLQi^!tcM6WJ?oV%jzeYR;DMggvLyipI`x@$ZvY7%!(&4+T zqR(H76oTzY{+?gSBgek~03)T@+O=Vf+JHhMm=8|)=>DA>6w0zk8{2szi1P@V*kSsf zwC9-Fth3&)B#Iaiyt}YJW7nvT&Y&PfuQtR&m3!7JRxmU{!yo0b_~_hi)`%&7Oa39h z`M*wxu-LY&s}&SO8-jU~PW8}`T3GowiZ+nJoao#0(T|# zt^S>58_+PcD_v72C^chW|duS+3fRCA2 z*b}|4ef7GOq9l$*Lp17pq4p#7>-CIx8jWDeRz>auXl#D{XsD3IEK~W~VBX9>G5c$# zh;tK6I4tF!P|`&5q8S_#fB+PJoo=IM)ps$=R%upK{I)1aVfu7U=8jtxuNtvxzLkQm z_xfx906qG!yOe^B2%i^LNEynkjW)EPdxif1l=VGIg`f= zK>~$8Z@K-tM3*wMTbgL+SkqtxCyI`>U$nL-CYo)Pi|iANGwdpm zSN1#j>(r^19Ka>yi70%VP4%*?ry{ILVB7D|TxDWqnVEIi4qrdFM&*oBw9-Z%B<4_o z?aQy&{3=>4Yn_o5Wb-AEsVcfXQd|RMrUMNN)XE^5C~dEn%BT6VC%^rA-u_G+0?ej; z7Xg%%VfO>vbS#*K$;&C4YbRj&4pap!J;wh4>DQEMUWvn7oy&04Jd|jS_vuBf*xg_a zOYGbht@4uOdkds~J&5~th`B9UO0?=j`hg=~(0#hwlflC^l4{wC0!GRa=?C;9p)YuT zfk#n{2u-KrGlG)Y^Q?M-yN5NEBdEESe|Y=Qr&?0?biIo>sIGV(32n3 zKGrZ!f&G5{E5P85j#sYUYAX=rn?ue20Q)^VTR0m`lF9WeAdo0-Bxy&v{rX*2F|U@1 zgd`1-SX@R@x*HTKK1xC9{OF3Gf6eZHZk=QfU9oZfIj0?hcPvAZZI8JI9fFOZNIz{3lX!c>enZVJSb_!G@C;)z-j-o5UZY$ym2u6?oAY-xp4^M4fM~*n+f=PmClFhvz)AZ^l z;5#~^qe{a_gCMO9TDCpfDK7?81Sv)Dzt>vW>)B$hQa|Dqt_F5Z7>L;E4YruhKb93( z=Ts>7?6cQ@GnZmO>_^}9=(}$!)ez8)o6gX2uVeS$RrYO*1m-|g0jqfqEfk(A+inN8 zkK68d)-h4V{K^mKK)To7C#}3nk!wTs-PsHj+6UzqVq;K3k!>4E~jeMidwCsz8mhR<`@araI1}iDE~#z4{@m)v@YW6GV_mu>!uqKmd-syJmkmYT1QBRop}`VmcbW8qQdN zhlxh+2>IzD{n@CnffrljyET~@LI`M3o@?KT*SA8f_9R4{^Das5!$Y_Fo~nE?;0tCO z7Gy0X1~RJ{T^3{Q-|f;55@WAnWm?K)XH+Ip$FjHo0E48{#nG!&)23vvG_aUjZDcHD zQ7YM)8Yqu&x>66=@26PBp(QDy#n+5j3B98L-{0q{h)gyL`IVgLe=(G753oHQUTS#s zX(5%7-1dwO=zaPs%rOl@=1r3-cc-j8tB9bZ5Jt||qvXgm^;MvgfmYj;?sd{*k%nnc z)B`Co_ib_i0K?Yk1!&C@Pc>(`ohaJt7fHAmgbJdiA91o5EcHZOuN>H@78AyOPLEe>`MR)NK7vBKgsoCyq}wf#E4`4b2xZoIIDNY(2|BVaU2pMI7hi6nB>%%w-E$=!4c~i!2B#lT@8tMLdzpgMOa-{l`wcF^9=k zo$pYbD#$iNuC{*N`dv=060N9;WS%zz)!TdN-|@&{slq!3Z!dg7&?lrtg(U?Cgt2IJ-c`J=>_>+V2nEUQ&^0(?0tvFiV?iG z2vz!kPsb1k>9g(k=;$l1F$@+NA;^pec}bhL;NH@FI? zYJXqfs!BJnfjrZ=db=9c>o?$zE&#sFLMrF|qwsXERDjjpA^u4*kga?bhiQ z8K*UGG!dk=yI7o%V+VTn_d6YV9V+wIicc?u$ee<@l_NxGhWmZG2gFg$k)l>PT6I)e zAP&b-kbQ{*ZTIL~1i}#{!q-#8)G^NnJVi1($->!@xTJ~)q-*2vzJ~E;N;ym)z-&0w zgvzE)T4H^d^w-JhHH?hb?Flk96`25uf~OW^@LT-4GQ3vpO&Zm$<*4nVy5!$K`SI0V zi&)^~c_UW*w2 z5Lm{=HR|@Iw4}Usfo~{%nCrRMTPtGnl2hduh2Y^aWJ4q%$f13kd+QET0Tv6uIM@X- zV5KHWYl8AJ@*{tg>$h6uCDkBxA;#GJd;Il9W5T(o2_!g%`iV=iVgUgF-*1n5|63nycI~`6sO~-l$+S+L@MeNoiTVC3p7uKc`qzp+D6`p*VmKB6Z*TbnC;I%X3=E zOp#S)sEXQ^xbtG#3WNMd-`n7HF%6Is;Giifn$t%VV~InhDt&G6039^&^w#iMIr5KB zYgnSlgJMIoq6hrC@4wTgIP4{;FRi#DYLX4ei2cf+xcdNlKPQaJ+3`B%YtFewu#ry0 zrxIk?^g8=Dw!Xw3u(&y}h_=hzy4`~0Hfb9(iBQiMub-D6&i)AR+pj6(YTvZ;t#Me( z87lAss7|}@zjMESnMrTN*(}YBmP)*J>`g1lC2=2@97E&*{t@~Pw%o@+)sodCI*@sy z0s_QXdvW;$brcr&ebSF~!AWBJ$ReJiLd`TxBu-~qKRizTyLGO4QzbHiS>vKN6d`3AXnh)p{U^Lg@`$0s}!?Zwy{WJV(hV!-d_ITdyVy`j#Cws zPxt83x})+*BVw+n_V**-eS0N&Qc-@ZlmWQt9XKC7crozH1hi%Heyvy)eqGys+>h(f zJZApI6^^S;K>bCDR#rzuygCJC2iMrmH=NNkW!W zp|jld((QS&7QU`S##Z#PQQPCR?bk|}4P+9Y&;U)%h^xcM*W^GO zC$^98(1XOBW)OE8B$K|x{{3l|+?QlXSZV|VmoCHl^`hQ3n#-XuUWQ!nT;oG+xNinkGUP$FcI!m>m+s+xm2Y&a8KpPd~_apgJiO!4H@jIFYqd zd;b7lg?K#l2Zon=&>cRNFxf=N2-@VCncd4MLN?9V{+(wQYJ@JHWL$>6J01OxLM%2V zR;-``qkGqL)+>ysAzn_(H>U01qB%*rVQ?Y}J#sKi&CXv?8&CUmx_esYozywl@4mLl zj$<%2QTRqZ$kE^H))@+N1XduZW!T@(S{m082w)YEnz%7T9DMr*b{`+6x@lemu!gB zinFh)N9SE3u~H~nL~rI8n2n(S0A7moB8GK(hIB31_uh}+stg3Uq?$+ai%8n%v+4fb z01u;kke7DZ16CMr2qW^i5;xp^y7J`*7GLvmW3W_v^#0A9U1&^6RU_hAwol3T*Gayn z3Ds6fJwbukBprVJDUWlE#gMd}R*{EEtwStjm>%p8zxC*$h^rj3D2{Kas&>?_UVprs$hkM+4WgP36k0hbe4ZK9s`z`AJp_iUXF$6+G;Bl-{3NzSRbP= zK-7}L$^59~HVh#RUob8yE4gAxvBlW9RL z+W9?Bwdfn1a(hU2J9g;nF-IF9o~(*OaUP;O^oJz`iz9G}fd_lm`WiH8gJe|N-I4h_ zFuh{5DFl&MiF7|dw_eeb)v5rysz@XQfjZ^-^{SlySVK(~Spt=E`q2G*^{1Pv`37cY z4p@(nb?`c9rgG&(1));Z@b)#PQ$4Ll=zQj5zWVsrN3J%z2&u%-Pa3DBbJ6Yb->JHi zWMigDfrAF@7B--c5AsQz|G1N#8`_k$jmAEFp@Xw z2S@&$PON4tPa3Zl*2gdN*H!RZU!amZ^1$Gdy7N6;htE+n7qw-XW0g3!Ze9ES{ac4| za4mMpw9JSomn_7j$`y}qeQ0!DSsJZlB(_y#P(lVB&q?)VNtRAav&3)Q>!vlzyP#lEElXNNEce$vY*O&5pPtQC-WgxbROg6{{YjbQ(dbA%L>2> zx2H@7zp(2%HSm&efudU;Vp!rU2q;dD!TBFQzfCK=*C{Nr z+LChZ%0*-B-D-!!QMN(ZUCnfDsFETJkCFOx>S$zI?_!-)Sm9E9ur=-PuDx2NG5})7 zIBbKCraBZUA|EzlR4SjIy`RIdJSe8Qg3N8}kji~P_xI~67|YpYTQHFb3`#pMGEcxD zdvs1twAjT~zitWUJ((gUfIq4B>U_jJG&<-ByaRMchp`kGqM6|`88k$X$yoc3f7`40 z@tz&XB$(j}i0%IXGS>mekRRYWKHmLvOf)jXU|E$Vj?Q*36vqW95*W z4QWDZ7}1DPuiM;p&&SZKUGT>r+5*z&Kq{lh*qBol6qpGsK^{Rc*0pDTWug(8_)u#hUo00Zs6eRQS;zd|-guBuq=yLbNmbUvSk zb{dvPx=MQ+0WnG~AE~F0{%=wL0507eTNU)+G%U_k1Gdm3_WA4eIO3cSej>eXYANgS;=r`u$nj_~a~#jQzhw~+uIwsG(MI#WVK^Mfio zFdzVf->S;iZ9@c`jmom|(l*Ms>JLhL%`pK_Qb2&0_^TO*s4BlXznX=Lj_MBnetMf# zn)IT*c?zq0F16dIIcJnKHLQ`ihbGzYueVF#v0iCiD#4@KyT5QgdT7%yTTO>8%0C}b zC@G%A+?WjoA3bZxWY#p6C?W46dQV#9w(-tFK_-V~R{8DLm}HK5#P(Bzj>Ud8_UM|~ z?>RRt1-7nc&2a&l#B2FmUG%?xi(B!?>|Gqf80m@EW7eF!m7$6jTymFSc35#gq3Kiy z>gEDsV_)$xJ%{Pnhz>G>NeS3U?8*&N+`Y!zzPj6y`iZLXhCe4m`K6oOefrT-NGPB% z4>C=5{RczjGFNO$@I(T%iQFG?)?AwfpdD7T;m5Nw`BzlL$_QpB{W|?77C0C}le-pv z8V6yz_LAL}SWw7Mm)(IH*!vEPbWwO&4o;tvsn@6UJ$cm&OiJ{|>1oF;c^M*=qLDTT zVXplT^>cZ~OBNq`;!gT|m!NJcS*TnzpqC8W2VZWNHZtqc{&jCJ)B={*arYg0wVFqG z-5XzJzn|9{?Jy-%YB&!1>bDnT15prpMH{eU!2A3jq7B(7NU_F4G=80q^@{{SfWBj>lrK)p$4o$UH!=5`d1f7hb%%GM!sD0gNdSBUrPu39xu zF{%byHf^kO*M?1x%)GRB_xp9eO08bRX<}KTLZh0I?tk-rH@^g!>OxA*B&ARDeLa`= z=?%H1y3t6C2_DKj_2!)32xm5h9BKKkXQ~;98wcn5^|2{R(Q3id1GVk0g2vpQ0?{iF z0q&>cqlKbm3?H8#e*4hGb;mLc41cHdjPixwu1tSFnW+&23W_Ser& z4w6FfH%Xh#sVizY$P2x&?t6T6ZVx9~)s=_n+2(+WBOrY5f+|p$W>GuZ-U>q zLe{4h0J1`)I(G$IOgGVjOJ**#@l)9uY)x)he}@9K&_Ku-1F$!}e%jvp1HBkHCWE+Fq>@@g-YD+PfyAYvzAdJ5Zu2|PfalPWyy?wCo*OFT1wT)tq#Jj z_Bj>#*zS57&I;{JoarJTA}I}h({yD=x;<1>))b$I3^lW z6-SS=#*GYq8kLA_*RTx2W>F-H0pb^t?mK<^^p?C;<&9)m+*nj+BWH?_fIsu!tg&7n z@di*vvrq=SZMkOl;7IpBeyQttUhPa=k;h$`<6kmHeFZ(o9y{;sdR}%@Xka%B%%n}y zye%AssiAM9E(W*y(+h5;Z8A}y+9a1^OOBg4SglxBS z+h_Xz-FuM9<>*DJG3^{9Xe2B73wPIVe{Q37LibrcjAbF1rZadm2X~Bi;Ra}RERr;V zOpl^A4!e8x}yRdiqpdTby1}$@VX!C)2e|cIcvXj->nw>1&;FMg(Ctm-y0kD>^1xKU5m?oRiO!<7D$xG!AEjFNd1pS z!;yzFuV%}A9e7Ki0Rl7V_9y;cpfL8(h?=u{fQgp=^c9RZHRS5VQO!FCF$9nQKd z3E@GB$QugP(PmFAl<^D{>qCF9zQ$KwvJlo^K_I6l*gO3+IzJ0`wa+0g z$x^D$Px9pM-PcFc?hb&__8aN>-;-Gj%`}NH5h;(V!R3%wXrm7seqTuK^YS_)AK~0{ zJklBCFCP}#5DPHx>FxV<=0^>Or%f^tW?Cg7Taw4_hp-$^>#|SSb-3oL#UyhZ$!a+f zWKkb>MF&bq*JISB?%5`#R?~Di1YABFpC(#wGB|j!P+55%Bj0=XJ^B|b;S=OCSBHd( z+dLP$54a!su8OCfSe&I$-fWIr^KVXj?`QVuh6lx%>$XNF-MDKstgjJpPjlFf`*q`0 z(Fe1%Vh{z^bmGpO@su#Zgdc(vt#Bl^{HMNmYNw}ih5OuBh_B-m|CE`qLT3mf5 zks=6HxJBtFd++xf`<|(Bd0c$06I~Ug3lUK0z`F0)AN=js3`7ue`>!&U7r3#%Wx`?b zrJqs$7bK4+^%6I6zi!^%-#s<0_2j+bS>45fKyp0g5tj>y{*I_g#4Os-7FkbSC+uV7Ar((HZu{Fv3AYYg+3 zf)>zK_b2W89+&*8qf(UIINQN|^^p8IxlLTh0|4h_1q5`a$APU&Cy=qbf;3Z(hhJ5# zJYHMr#BjkL*Ve28`G?>SkaR!KsJzO>S0eua?tCH>`5nOQee@4hH|mdVmQ|oB0N`XV zm#p|lGes<19lx>o z>K630qxv{Z0TxlByg(%H-}?1iKCt2^DjKd7gv#Mi;3cZws2j{i;C{=zK4ek zPMo~viV^8js(=k|qr|xEl1MC+?0leuKn;6${rbc8pj|m^i1uKX&DY9_gv->m#?4mp z)Nw4y*arFQLSUw>zH>zWrq_P!`;NYnIO@Vd6^cSca$A61k^%SE+oqFXaE(dtWN{Fx zx`P@jfw8}xckSDzpR3UqEh%)IDj1k6<|9idQF%)dDnB=*kjj2LeD$yR7GpFLC^3_9 zc03z_8y~-4QB?4^hOcSrM2@hpDG?W5Uw+|<`;VTR(!yfuS}fI;Fu$0MunVrmM;?LS zTBS(HYI&d;a%FHY4So&e*e#pU%M?z$S48ib_#M0SW&Z%fTbSD~MT^SY;(f?JJxltIqK(Q4m<~)JBwR_33>70EV6+r6iKZSj0}4a3JsP*U+=!>?ES& zBQz-rfx$m89moUYT^C~R9=<*+lE)jVDoE_Tzi+osA6juX3mnQ&Os|>l_&f6=iYwYz8_AzJXgN? zF(05kQAg^P9^9;K!p32#)^=J%mGIV61(BmPiE^}xF&p>j-v)oOCNIRk9h1d=81dhS zq=7vtnxE)B2+xOEUL@q5l9y#M1%p9n<0YUlBKl^7U(^aXigp zUgi-F&o7Tq?mDEY;9nHgq9@=KlvZ?SKA*pSzKP2CXT$yunrJ*r;><=LDkbt_#LthO zMq(RkJmuN~!`0umzIs`bY+b6hDB7|r%@)KXi2&^PBj;bgQ9J6n&QRS}R{CS`?V8OZ zbe~o8uZa8~;)*rSLTbKbaj!AJA8)@>5#*Y1@RZ`mvkpDL{kr;|)#~`Hcv)5pG0K5o zP0MOK_W1t*@b%7fejo64n{9d=bXEM_BeZ?dS-rQ_IDe?LjI|r+kyi>B22!~;Hx!jF z#XBEVfbdcVpZEK8=2Wc>`0mXVWVWMZ3O&B$`*hzk;o8ee87jsiJV;(Ivm+Df{dKO9 zcy|Yow|+{%t4gdn#1~#yACKRvjd4_Gn*}rJI1xLh_UdEb`C-#+Iq*~5H^AwA{C!(> zyo7rZNuUaOK>q-r+p0Wog>or+?mq1vL@<^+MTzoL{$fYD2iSE>L&KD-{{Y0W;oTKv zckZpe!~380>z@2zrbXpAU@8y@D}>}cKbLu4ybCLkeJ6?TKK&nG@c#fWXM~nzF!3OT zcGkM?bucc4WR{#s7+tAtl1BS$rgGr?rH*M98A%~PZSq0;dmpz)Q|j-^GKEDv6Q{mm zaa;cYh882HCylvCqkG?^{{U0ew4M*k^D-n(#CvG$d-e4j<6fn9SqyF^1fD(hzwn*@ zoed5eC@S!lgiNzJUfSQczka7@)?Z8<*F{o;TLGnf!($CJGc#JK`c(CH`PaVs3JY;i zw6XQ@rWrN4jXqLa!1KcTo>Uvwgr-o=bOv1*~B*ZQw;nSFlA2gIC zc*KqW01Z=TUpqZ5o>Pvu8MRu^7IIH9*pt`P-XHKxRb)!lXhY>N2_StE2i%X-p*%(S zec_|#v{?0a5}h+Hhi|ay1M2jJLQ8B@jupUXb%Oc!eBETAX+vMib|AYk*it=;pWCH! zAiY4v88WecYRtz-e{Q;mi9ZOVyCT`HVuCg#GMD>)pB(|=-@}P0-*wv*Zp=M^AEx@I zmNN{-XKT5n95qJ&08qJ$Rm>qOHKk4HAAYn;;mpITWDP!1%0SrJApX659Z&H802w2T zRv?Z=&{S4&$e+`y1n{;pwP%~ijgk}uD}nRv?b03f>NLp6B$UR|cs=L~=6qj*GtFR1 zwaE6~zJIS)>G)}uHKwy*6y~5F4f!@Z9=onL0gs={niYVukkW9EL)*C@8tB{9tK;d- zD`d&@Thkapp})W1sbBTlwM;Yt;=CxAG`KI6M}#~}22j?oTFm=xZ5Mop>#o0egTndT zQ!LhN$q$%2DH=V#$t`ENHVJ>TFa;x{v@ve>)&K-bgvuW`7%q+-acaJ!}Fn2uDi@%hq(HTe6nK@kmM(* z1Nwu}fBIH-j7c0$@o#;X*nQ8~buaU&fLlo1?yQfg_U+y zPq-aQc$dOgE89q7rzona#a&@N!1>=@zR!hi*0dxG=AL_1AOe1R8vY|8Jkm|{(2XJV zk>C4oN2Osay_HLz>6M+GMxJYoQNxI%4+G3tHi15!euRDcI+M!_NYmGNo0f+neMA0k zs!l5R)NqVz)w8A2{{0y@hi_!U$1HBD0V>)6Z}#f9V%t6PfHn0A5h_8kwKBnF`fg+m z$jj}a{(Wk@;rcMw`ix~M5vC-U8zn~duiLAo@V^@IQB>9!RV=dT9gKmfyK1OtBc7;DAu^g3IcRgudp3z;c6G-G=5~fwaI&u zdh8VZ9f*|1VQ(dzX^dxU?b!F|R{T3UYbfM6-(&~d@6`?Ztq4FW2MJuw?if>y&SNS! zq0=l@J}c3*rOJr(-jEL=)g_<9@2Z6?MDHjK#zyu&{{H~CR22LpQnUs&5+1-a4&AyZ z&98VGe+KqksJ0syg<$18ERUL-= z7XJXhUSRwqJ!dl7Bq$r&9c%aM96_U8XAW9vX1FS5Uj-~pEr=`?0BjdkJ-?vu(!)}t zy49B77UFh&f}hi_ptyS1qO)2UPoVpaeY^DymhhzTPSeK31Ulco$4+J?wQOAwyy|Bx z%9^=)Txm*`rAq;OE0sTgo|&7>&^j^30H)2ovf1Pt-FWoIObZ&x4Dsz% zbglmYQ`M}iN-l1RfJBsn%}DLf^#G)7yjUmreY(fYHK#C?;@boT{eF7cE(bGe+>-=l zNSBE^*C0K*cSpjOEF^PUVugqvq2Ko%JGG}wcbtgCpz1(pvgpLBNTpavb-&kLC9#8g z837bTI=SB=*GsA4PpFyNz$H;Py>{vEhddKUAzDVTi?wYghRNw4&!Wd_yw>J)HF8RR zA9*E_(z+E*v2UKVxr@noqpAdp+q(A$@4mZ?{{V;e>bznV3Z$OJb-uET@QS60e8+@^ zamj%8=m*qj>Pjxcp)f+=740-LsF6#d?8;9}rD&ucd{F2Sv)5F;_%Br?5?9+HZo_^3 z`rVNH8M_@dk4OvlAy0jMx@YsS&LSL^Xg+2aFs#w}OCj@I9cU@}`RinG+OaqSyQ;sW z_x2ri3@_nhUA%LCs-<9|Iy{{XAfW!e*)g>J4Zj9C0Q%feA9Q3))mu${>3WIu-)YA+hPa(Mu4 z5x-Z+>$uMZRT>#4f=JR8CTZEbH|hKIvk~~K;Qs&-W#qzQ)-R~8JrcKY`uE~~Na;uF z?1K#@TSw@w#I|XIRDDmuyV*Maeo+-c*ATl0_U+N}eg}$K*`=l#;^L+?li7aY{rc+F zGLpl^;;OQOe=yk^=*hfLAP4GUjwL6GmO9@_A6%&00cF3$qR!)*Y`8%1Rt7rpiacIA z@8+yxJ}Mj^e%~7FZc7AKdeEhXfg+V=C`Q?ojS_YL0AIgdO9xLKkBwn9gp*haI5H^V zuP~`ye4P%Aq2WA!N=x}jl~4(I6xiOwY#;CbL#VrAOlG=Eu#Iub*J`=MULNqBpE@ge zH5S>ADOpG8dwhHjl7Hw5h~LDa=9J8i=bs+OXI_vyjsE~`b=oO-CcWibqVh^hG^d*A zlfCiR*ngi)&w}vDi%8l=nG=d}C%G)%zz@{)bn4V@g*;(i0E1=0BJg$k(Q`@RjwIB| zfpz8$_bfg))2g*`@===Gtbb5{>hrwLCVJV7Ul zn}vB4DA@ur@_T&s)FtpYg=+a~%q5WVQWvrVy`SoOf$_&GHzdZ?$1_!`?hL9AtONOX z_t^vW>1N4Q=o~vUIVHzDCMI>j+?)B%v(RMlz%Km=hH0pIb8k!01q)2V@!53yToAQJG*gKVg|MC)YezSej|d!lqtlo9HGm+ zsCy{i_dWIfy6+QMsapEgsWovk1IgP#Q2gi*)BQTpVwN`}EO_L|M)DMIGEZ_8`?kM- zVb_}VLBp^%Smn$y{t1=}sTIr1YH*6}D@LbXv_EYRo`%Nw2Mdt9X4NY*nUXWd&Lxx` zeCtE?>$5}PY;+iF82Z*08JaIA>IwCocIkCL2vEjTNaeCxM2&%pHaa^!y}vQ}>s(+- zDOGS`&|P)N@gIhkan1ZgA#NK8eLP~?g4@=I&bO}pIFs=9lsNfmTAVtT23I4No9v+e zr|0e0L!ZI2@mS|~iMT4K^EPxyAoly~u<5Pu4pOVkR-8MrapKNM9Tx2mKK}K4o4oxWA@&=b+Ug2VVdN5EBM;iZh^#E0LFAXc2V1~ z-@o6ksp0PoQ%uCp%obwJA&dd8>NVW`zw6T|@Yu=hNmRXJtLri_9fxtH`+c|3GPAP8 zriwNKLitni88c~f=&9nYMMagOf@9rC)__s-vU_zsa|z-v4N`^&l-jWL(p@5GApZcA zvH9M*`s*Qy!9!{~;+=V&f0mx1qS`*d`+W7L4+!{9LfH$H(e12TF`rOf`|IuOKetb2 zEuaRGWfe>dD!910eDC04bt=g&M;P+YEAimUK;DPt`}_QLaZ~WZpt`oAqV@itjEv>9 zSL0v(k>C3D)hN`!Heuvi(Mo*6$m}#%iuNOZ+Wk7&WXR%p!@T9q%n7eN*5+LYO7>L^i{{Ro$x&Hu8 zfK?3;iLPj+*|hrXrS14F=C2>{TP8W;QNx)AE+m4${GIpS{{W{&W4{mTRK~ZOiz`af zGOR<2NJDnp9{TUqjoeH)=x4ba=|CuWupqbmr+(i0>kJk`TRADD`gdO|lA;G2L!fpC z>Am#9RnU!&$n1_6DhV+b>Rq*t#^GL|umJjp*=ZCHZT#t<1QgJL%BpaGwfrMlz(e6K9{R^r) zRBob4yR^}8T6Xd^=B<3yV;|;oBJ8;D@BaW!w7}P_#%p3=u>Np)9>X?(Z*CwDZvOpu zoA}IABCNHda2zWjjCbsH)=7)R$x|M)%qI*xqRpC2U0 z&7C5Ul`LCrC+pkeuMIe@*KL zWSYc~%z;@3)abVOgZDbwKhveF_)(h6jd?dJy;)hMcaP0>{{WNKzi(}g_Uq{h@ce4L zscIlVBxn*3@6v32=_-Y*9l0bzoQWuSk?uyo{rcbKI*^hQP8qClYua>NhZEqx5zr|_ zvL8rSl~>q%Y!B*xw@p(ME-k)-+-6sWNsF53v|P3|@&^0$((k@Hq^3zBSBaO*hfnDQ zkJGvL`Rj*%I({RxH)HUoA*8C5(0zQ-MjB_I+(v-*>CO6% zBlztw1&s?#H$k4|ti!8OQ$ro@*kzplA?2gA@aymnEx%W0-sM7jw3kys#ohI;@1p2E_ z4*16_e!5q+G>>soSzHxB%mc7h1xA@b;-!0QvmcYw+I}g=WFc!=1d3YIR#xX$1X(yU zaXfo^y^p8!d;PW74yog)vDsSRUPJ)Miz<%sg*pX@DhTwAZ(Y3|*mvlq@if?L)+I0u*z;_2~-L!dob@o0oEKK!9%M@dpq_L!syk5^64XFp<5J&gvpqK=O@PAtR zIQniOuA%)Mr!Go1)LHA$N&H=mpUY56Az0LgKKuI-(rTDIotdPP)5G3g5h}M5dv-eZ z{dL!2mTJN{;~4UhlCdIhU5~cD{kQMZ>wX_-0pr(LV~ zDEePLs)dt$ky(k+_$2g^{4qSzq>>I?xw&@qf=~MP>^c^b+qFz^EM}Kn%P06_i#4H# z8!4$2(ns|Y88%28=2c1g8UuYHt@v3JXDryGWs)Gy(Za|Wx`fgJ@T7O%&%a%|&ww%# zs+iZ2An-pL`To1>eGdm(Pc+QQ=Ej|Zpdv{U4;y0};*28N$jMSWP|5Xe3nWC3_-o&kgdWP; z(An$h(fCD*#=^PJrLT6>I+iN7BL)d)17TdIn}I(1KHKZ&Ovm8=0OGBinLiQu^6!T{ zGhSOyJkJb<#E4msq8NYx4R%1!*bSbHp2@XE-|s?N95^3 zF7>g*k@3m9b@EBi;5-!rraD_zENH9WvQ?&W@2<;5+Umg0J1yZkQTHCbiEAMe%PQy^cW(*7>-~a{r!ltb^h$_(p`@GsfdaIt#Wd-vKFJo%^^e+l=aa z@g1}WW3IU$gS=Vb>;^(V5_nRUS09?jOJX}u&E%01QVOh!q%%Yck50AydhU2H_LQgM z9}(B8xGq$*H*QIIdiA2o8x4>)c78Y4cj@>XGa5}t0#C;U^Em$is@1CCDOeAlE9P9! z;LcL#;)%v(irD@iE7&i|wIJddBW?SVzT?}XKN3F=aJRf$mhk$?TBVwDG-)IVRa0Ov zP^Z}U9epqOv*Cr0HI4BW9wrsC6PfZ>5AzgpRT^IV9Dw}?W7jhASAuaFA01uI{duz% zPT$mNLDBTDZr$~v{#{FnpfHn7nuBiz2h{3S`hytZdx>8&_=m!I93C2K5Q%Qi=^D2E zMEZz4PPC(4OGV*d5mmJlPlTF6bR>`~Ye!)~_5^*-y^mjIN8x-Ner#f@1(RU3ME3h{ zr7hr?e8O?QTNxZPb0vTa>^J&!fmy&^a@~S^Vo#F!-y`^Ze}NcsbfyhE0V70%agBOT zz5D(8BMsrKP7|Ga)Zvn03MZPI6LJ3l5CnJr{e1~Hg;g=L3sT1#PT)@}1}*mZ?Y^|* z{{Rep8z@GsL7Pm@?-(s39@}HC{kjoV&Ja};=*gI0K8kl5;g262lQ$nFqn)?UTP)td z0o)GftZ;btlH{;hgPD04n1T*5{gf}i^y}$KKMt^lnq%uyiWppIh{t_=_xRsh@c#gZ z*j!Z1BCe_goSFEJME-~Vy?DBIMA}Y}BFlkWrAr@@5}acgB#f~1jttA&THgNvpzD55 z2MxI(`bk8Q2+Oh!o(Hzae*Jgb-U`Ino;OO3E}4S*eYf9bk4G-7;h6VDQR`B8QCu=gmjIZ*lSZ^hGXN40F!a6#~p6Rwr^R zk@f>X{{4L=Z^9UiooLq0RaJ|L{y_kpea^e}IVXUt*T@-Ztu(30^-@WVg4Kyt$kB+CuXD>@ z-F+TB8R7E@V@kWq+fwTux$D?#qAK`%4O#;gOms`4c{caow)g)4&sas#fARU9t(O7+ z0Min>Ly)p1iCr2(oQ~9e$oSt)aoBoPs@Q7y+es|IFBwp_)Ba^A0CYi%zTo}(?6cx& zM)AvaBK4??vW+*`0rva!vVRGyu z_AFVBr2c03qd6cDeV2Rhu7IbH%iXyhD?)e?mzxs{9whvC{d)RZwmO{97MMcZqad@U z)RKD=54q?W{68Yrk_nfaIN)T6jevfCPPDhX#VP*)P7XPXiR8X`zu>!3%^&d@fC+fHS8l1Ja6`Kmq3Rl0v?= zPRC+L+oo8|TvZ`=w+w8P$gz2dG@jkNeY)$md>@9Ev~kZgzDkoO#GG4a-)uimzWV$; zIpHgErF4p0DjYdxK7{SBZ)4l7Z3c+sjD0$rU6>oLbCk!$CO%;T)%8~js(>($N7e1G z^6Q>w{3n@}tJSCGIF-?h1?Q#Z+;%;P9rSwoUkl;i3*$ga7}Sy^ZKQ6*4HMt`9-qmX zdi5HX8yi-N$0JUFL^P!KC3o_AF+hJ}Zj9iYP^#m*33q(HoW+F6Y5^Yws5VkOb|vGpKrfZm9cpBvTR76 zXPl(JF3eaR_Sfsu$YrsN$y%0`>FfEq;O ze-+Pjzl2|k7_SfB@oYG{*xXfTGTEDvm^=f0*%%2BAGsa3-?v&kHIc32T81aWPmDuN z1Om?xlM*+{SQG8`Z63aj@IQ+3%{`dBM{wH0WMgE#T4m+TIuJ-FzKHkz`sS4U4F(~b z2My~NXbGh;Jm?j~CAO=-VgS(l^va@`U~>+;9YOpmUe*?dWF8~nu6ZAYWoq+Rnt?LR zycMHTzM<{v`RF;QuZO9izT79UoVd9BjJ=BX9moFw9d@R~c@G!UF+fJR=y7KFKijMZ z20M2--jq5H{CJM!_8|0-U|9gev>5^l;MFlLZBf3KDIJF(+yLeY{{6aTbHH?LMOGtK z_aYxLlzvmcZvA!Z-WkI(EnhYWUb*B-F1RnqKX2Eq*#7_y;;%H1UUIR-ycR>Sr2PK< zV_f#Q-7H|t9&3Q#{3WLrNv%WX8p;#Q0Bw}-x!3K{kbW3bx0S9jb(Um1)VxZPKF58( zzh6KvgeW7)&mpSz0B>zDZFlZIIu7mu%c;jr8X_>;g+0`My=&a_y4K*^NS81!Lrd`2 z0Bzo@R+6o_4=GwnccZS_2e)p8gTNS>9!X`W%(BQJ4uFEK@^#m4Og(s%wYP~(idmX9 z2V?aox#(yxn65e=X*&+CU(`?NHPW3jj;mSHobItmqv`{vCX=^e zzmAf@_)X!D5VK>=t5KL3NL^QmC+)wtUA`)jIv5q*nY%IG{Xg_QVf-5%PGa%uHz0C& z`<;H>P$|ho{2e!um=86@fBgsH;>gWzIoe1>eq3<3xIZU-d;b9FMhh1tidc-bqfilx z4%_$m`5ks!RBQ1`;Ns*+7i!+)eW_h2Q3cZe_p&94qg~K0M1ajqdzaN%vhc{GC0WKfl1uz#fJVn5wYK?A4>Lgs^5m9 z;ofTJGJXzYXN6?PMJQ+o<&j31{0^cr{{V#93U(xlI_Gk2nNvkU>*-On*60y?uAZzvgdin%r)^Ze8&^zJ^d+YS;AL8FxFO=BkRvweI?tiCV6(G!| ze-{p0l&_oWegI#Jb50oAc@ssXAD3S`-%Vuy0B6}gwV zM+m?2udw@Xpr9CO2lZ5G3!Plz}?G68xI*u;SzoV*p@V?k}SIHTrQ9G>ni^M zfSEeCjB?77!4mBW8t<=R*M6aHDP}1gEg4a*xa@i+)c#v-W9`_NDch!+<-g{3--Y zFQ4)M0EJg<&u7kq%l2IpqDR5kPjO#{(_*JzrG)@1>fdp{zPjvj%^dLwI4Yj~!Rr7b zmy$^mddM7)V1KVkIgW*Sm>P75E!A`BKMq$Q>otkcDv*128~gR@cpgQTctylmit!A4 zX#SmAtS26_Lkv$WK$hRX^y^i+@Y#u`lB;>c@}21Z-DCir*-{2#>Kf1C#i>Pl(ce+6tf3LSx)bchf0|<#Xt$TMJZId-=<1zCQx%k)pI`dvN zC3~}@sZsOzDi~=`C7Ir4MIZz0e{Q_P_*)r3$31u%jEl*X_XWCpTHT8COfFdxBb|WY zdUy8g!H|wn(Y%Oww$!I?wn0nFLokFocv6+dclreZK{`KvlUwj*l)REcPS^7B><50U zV91E}BP^=g zNV{+(9z$dL`~Lu^PNV>ky4Y&1qR`fj3eWK0@dq80mZyhumR7v>C;DWVodEveAK(2) zT~j^yY?(6zGtcNo4tM?_J-Xs`{{RFti0_)P97!d%kvr(ew%YrBy6xT%sp4F1X_q;X zdD)8To%h%Q{(pY8?QY39kxLSLNWSV{C*((Q>_=i)@XDCixwofvtOfoo&c?PTaMt$5ID6myD7I3CRb3{{VfT^6B@7JS9@C z$eLt24ATi0A6_S65V(2*)LEB$#^h+dPp&tbPy6vu!$6?J`WuB}Hq4fk9_OTrS z+x7kb0A8I8MVWQxq^j!tj==tBZ;#XU>lX0&u~~s0TQMm6Qd5q;e*O2?i-}39%9Rw^ z&e4@7$VP%mKoRWr^7(tPA3p^|Kvqm9N>w;iY#oq_^(@7#MH{@(p5%yD#}W*y<-#MwhV9PNz3RI2fD zkCd{IKjJwfh#UI?dZNOt__?K@AuJ6!92rq|bUnNM{`%_CA*w7#A}n&q9JF8$e%d7c z_1C!!MmlAdSt5-kECTvQzJ3SWeL2l8x_QDG?IlUi;z&aMGjEw*+kJq4U+d6TvdLC) zZc7|T6=Y@X2tAJe`}P2I=Ffw?`E;s-&FniJAAk?}{{ViDvEf{O<&vcFqsCLn3!$Li z+Zz4Pew5l~byJ)jO0ry1TusF>i&f>7;g)YfVZT8Kx!+qd8A}mTk|SIQnS9P8eVCr# z*F&d?h{t12xA=E5vCPmsN)Zr7vO9Z|?g0b8PiLu8C;|$ojNA2ZxdYg1 z?eFi=RWURm42cqjj1j(F%O7G7>((}zX%MxzS2HOJXE|BpwGE2lrg#4U5FvzMf02z`GJ*{IPv55AFo19Sp}WUsL~+{#ZP4) zARp=N*PTx>lCjt@U!5SbUMQuMAy#P$F;xw<9>8zgd*AixjW(O<;#NYO!Q<=qAa?zU zJvNUWURgZZLhvAzIyxQxN6$sij*8ciTaR}1wza>2N87Ds0W!Ty0@jlLDmMJG)`|&Z z5hU7B)fA!J9H1G;c{1o4y&95?vq@O@R!WV*k7Sx%q+U>PtWK-L((7p zU*LZZ85YNgsYF$|_nAm1=jYq6oPQg?9(+lYziS17o9XFCxTJNHe8=tF@2{G&Sqz-+ z{{T~!wJ{|Ac{?7NN{w3MG{9eDhyMVMVW_k}5=n94vi^(9`0e<0J%Jr;^+t5kq^Jof zenCI}4UV2*e-UsNBLc69DcOERg#|$Yj{AJ?d-wgi{#P}-bnL9r&muaIdB*<7{JZsC zg8W6jQZSbCQ%j;hz}&U{Kl~k4Qv+BG&2;%HTlCMTKG$jEA%9M%&L!GkNk&B4$c5zC z>T zKHwdXpQ*nIKN~zT@@y_C4TY)+5~B%HUn?7Jg+92DqlUC(~L{{Z|tz~a0;;QU<-MP!Bp zT_w_}9GQS6yLQegK`UP&XTK-J1q=?Ev-~GON!V18LX%xRVCv3VTZ0q}cb=YUn=rzYN zBy;^Ks{a7Uv2_3q0lowCE8_gO{{Yjs3tJS=P)C}+RDtzbSFVV|+150JQaAq6=s))l zkDRjq0D-glV2t{M z9&6wC{rchG?IHMghqvLz&QZhBOSPS+mfX=cmLR0Kc}loM>@*10&ut$47B}fss!$?` z%0ljk9=+e6v+wE;|f~73h<&(#pDK(E~*69WML#>)|P4NF+IZmOBj0brVg& zcf_qMt_gLVaPL z+uN^BV$rDpnDnft*n&HJk8b^Jk*d@36Wc@Q`gKT%l2lBf=A5pX zO{y8KZo!&Ggb&PpJ3sa5b=>9`!uBg!%wA6>Nd&Q@%ic|Y%7bisjr(i2$6VjVUyRma z3yko1^IbL#*&si$^5Z|7xc6bN)2>&4IdV%fW%D=Wvm@=Kc?p_V`<8w7{ksA7>Rd)D zmQu!CucN^K0LJQ8ikP10;lJs3j~V{}v;`{hHM-R1io8S6@*NJ|S4!y`OG_@U{`M znQG}YLy2%0$iyA{`nT*gvUleDeQ57>q5Iu@_ub`K?D zS*NirIaQA8$r*KEr{R~~es$RX{dP_&iHIwb@1$cIa~4mSU6FIn_-JvHO(d~Gq-sv! z1s{|SkTb_5a(QvGapJL!ACd9*>O#MdDA=Y_4Ap6#PfwL1yg?WAaBd#P?4 zQF%+nQblq|9|Ruf@2`%Z{D$HP1gWaw>d_tM5IhqKbWBxtS;7&Zk`b-#4E{^H^KQJe#fBiTDcOmaw^B>k_mrKn2y^&ZFH{XD)U;3>j{LBsv|OhstEk2 zf8qmYw))(pXp64^k7y-o;F zst2$i2jilq%~a-*G-zGHbz%8}{ra29V$G?bMxI2h+d#?*4#e-c`2G4ZvAE|9@y{n1 zNm1(V0*>K!JD<~i2oJh8oVuzPw-_jnm(US8_c#xWA?%&h*>2gyRG6HYr#a_0b)W8{a{{Tsn^qaG?>0A%Ii1JNN#*4`Rinwu!Oo zU|feJfPId=yM4OG?Ukotk%#7DByqAEUfUlX1a7k~Qz@Ky>X!E<)KJU5BoZ~&sOrCjd$)-gPCEOJI!La^=|^&{h;Bgjimn4Svg%)k=N zM)pSj`aK0&OZge5K@ngi1sWy)0EhGazIE54Y&A?OtxPLe`9p|w2S98O)OD`OR%oJ$ zUQXP}QP>T8cj!w~TGTXRnN}8X@<=!71AV~X&sdidJ1R;_Aypwk?cd-30G^szus~gC zM}1PO$rqBcw;d#8509ReT(b>liWF8?lasQ8#Ax(Q=3HzGJVjb>I2Vwy&;%WVKetGy zdvaMviYS&qS&7%DXWe(b^bZaJEzck(Fl8%jFm?{wL*GDuZ+?vys+4YW5!#==kM@6fc^UR^-JDbvRs8ZIU7E<-@w?<(_`ksCh1d-nVEFaDotP<=b9$U){xl?cFle*XZkK-R+E zOUA}VHC2W)8ctZKa8JJdiRfHv7!hUX2WAixuR2I#GRJUe5y?T`hhMpgS*B|fDlfMt z=7$6fn=r=n?JryR2r zX`~SV2$#1#{g3^6CWA1V4buMr7FV!ikk+=s#iEh&2h=uyDc8QoN8;d{8H<9Xk?9ss zED*=B{eSuB9FeZiSIV%e(Z46;Lcbt;Z{+l3QOPA67gBAQ*t!*BLHPdw&fR802`DqV zND1WD?WJjBjviBm^!M$**X_`Bu3B2g69Xv&o3L%{`)jPRqje0_|u;>$h91 z!m|aS{{YOAKvn(M<9q!(>!N~gv^o1P4eIRi$td#-YE*^nRQ0ajN@p=aQW(?&zcIbX z&&Rpw41`j~(WrfCf}Uqx{z%uj>)WdfGAWqFB!xiohJ$ha59!yWuXcw->Ko93BNL+I ze0b=Q_ujt!No2fO*xOc;Sv^1&Je!DA4ksNb+ z42-;i40OMzS;Imql`Gex@evw^NYO&4Z*kk9@)Iwfi#0{AtTtcD;=9?|Q}TK++@n+m zTG%B&hcEQS0PmyCbPmV=03B<;;tA!oB#QfJ{Z0Vb(f0oROwA?RP^|dxREAN>$>MZB z@2;?R7u%7fy8`H5|nA_ZqbcX&~KR6RqIE-^rkF9&_xBWU*V#VVT zk*DP%uw_*4R3EtOTtOB>Y6*g>T1;;+;J*I=Dgo`k+pj-}tP+^vMT?jqg1cj})c!90 zsqx|FnjMw}#=-vIJsl=Wr6~a%iB-`Dx~|W`=v#AT;^5;gvLH0mE~bAqv^jhNI@{&c zS$8#XWnr#b)`#1yYEH9Ayunlln<*WL{W{le4c9heX4fqtE#F;uumLOGm=_BJUQaoe zy@-$j+tu^;=y{&}fuL#8Kp<=rrafn47ivFWvhd$6_avVE3gN2C8ha;guHp88f(EwAtj(uuoMPV2LP&&`24#&)je0T`yY|sz}h4-@h+`v(T17 zl&|BoKQThr$jV_eOCJ);uu0c$wY(Q?K=xvtqtO~(Uzqi{wRUjcQUF(x2jqU8WS1cv zSVED5?4#gytv7_mw3kBi3GGGz1-S+8ALFk+xKs$Pqs%S6h3rR0M=e_l&RuppcJH(Q z0B*F+j0?Ha$ERcbKHWA*5}sR8f@L0=%F{2F8Can8Z??J)_lPSjhA}9AxL?0Q`7*M| z@Jg+~YigeTFG?1dl!&13ux>r~^V{_39a+vW5>02gh)?0M0X}F^wU?3Bv_`Z)>DQJX zzmp)6IVB2JP!E0kpYD2+gT&Nio$AXn0;9jLxC34P0KYSJomB=d{{vw`RZi=F;&)93qjvYp7%^2cI8e6RxX-rRsr*q^yR-57BTMNO3o zJZ2V%I9XOhu)lNn{@rSVylD}c#GSU@rP1beH^1}fX=Zd-WQ&YMt1yfoS#Q7e=*PDe z%Fw|hl~E{FP`hmf?%&^AJ!mqCpuDW$jg=kl)>z&+xd#uRPclan^Xb=b*RRvxinOIA zm0O!TgROnOJ_mhtE%}7A*H>*v0uoj+rswWK{Rd(C^*xhyf^lRPncG?m?uRIurtgQD_7F37CHR(v{EX9}&s3-G%zhTm-JV^No3<7Z&+Od6c1HSuf=ePr@ z7%JPg)#IM*kooX4NF8?~Yt^z3!5aSnA=8Q!YOjOhujhL)M&-JpWvteYL%Z!F{=ZJIqb{hHL-=gDIFt+3>T?kM%NE-Mb<*47H~BBJSiMC;i5_5)izcsP><8*661>&%~uYr&XI zTO~e5I9_ZVTuq ze{#qEpKi1-i4A$=l4OOO@-LQ~3^myP=cvCEX+jv}busvgJV19Z{;tTB` zhVMyfOsMt2F~bihU4z^A={{t_w<{YKV15=g<;3jVTm#shXs-T2JuP$7tS^3inaKSF;uzqh|#r~RdW46{BT&dK6@eR*FIWxZ@)kq&pU+uOeTd-ck( z@m8%^+P%sUM{ZIT(f#`B(W#z@EnlHidn=Jp85;6K-NRu@>N(y=B(f^@V!!)=9{%6h z^*58DRw+ElUPj>lgIzz0dE+b`@6fcy1rjj>`@ z6V-}J^0%f)oz$rVL=NLkzUM=@-(%MQ0K|X5T-V{IIvmv2{-kn^CR*o-hF9(riD1p6 z+mTgLLD#YBV+mq%rfU~0NYQo>Esjk7+*OMDK=ESqs|}TGgQdI5w=toCue70 zZe7O4{{UX1ser4OJrm{lLx{uRac+vkE4YhKj?4OwKaTL#!MN9=uoX%lDnP(6J;C@N z-?=&)>zeq}#+Gw=2x3)af{GFdPk9OMN4YuyM=gE*yY`5n`Cz=vJIS5r8hSafgG)k3i>4uq4U(y^dK9)NjUJPVtkgRK< zy3-AQZo^yo?c1;ARi6>VMrDf~EDH=%DoF!X9G%=AEULUiB9tJh_6LyeypP#mfgkN@ zTgA5U-Ujh53P>^&fl)FMOd1K~14;m37!$9`eOn#5Y!meV07J!6pwmV6B-@(ff56And$SU$11c9#SU-lhjG&_y==nT$t6XD!^*__@=3RLUH(n~DP6+(fs zd;Wm->-qiu(7)}6;!hggv4rpyyoMvgmD!L<2cp&X3`cDps*$g9J^i*i?_zOOu&vDV z<+|Wta22ud%Sc;uuwO;~J%6?Lg}(+fkAkU>Jg&2F}6kN`@fp1F%n5cnGGl$!{|>X2IECEeITU`B8SnDJR^w^gVpOH|qF@ zHQ7#9n@2-c_IN*`VJT&n=5V~p*TP~IBvfXt0!L+=%OhutZ8lH0ZGOMs@6t@&h^yO( z!*W<$ogX)l^FBd6zi(I@(eJU`P*3GFAk-iet}(`^mdNe49sWO2*F5p&LfstAI?YI| zQ$^-4n9L6_$FTr!e<6prx3N8ZplNWG^hN=KtGqAbD-|syGf5oLTGEeSCFDo|5(!hX z>ZI>}pnbim?p?EtiE_B%H<34}w#e6ip8b#S_Ua24cg}bXkt9{_7@@Kxot%iI^fL|o z0NjD!_($kP9DXk}qcw8uu*lHJ((Ly0QHso`@9SP#{{Y|8-VILhOJT@iQsO*caygz# z)}^dw{PII90(Mf-;Sd*mTRsnGX-uts$i|hCRy4yny{ifZyzT;`g#c z%`7=IOi$%dH{at#b=JUOJ5KQ$De)M<%SxxkWaVjm`6iIFlc08oU-av+ei42n_zww@ zo&05Xo+?Vwae`^3a~w!}u?P{KM?xlpv1TNRD&l~^*o=CIPAs59s%(5mzKW#oThd? zMHhbUR){=fI{yIbKAiloUO|$|-jVNLyEoO%sZ6YNWIwR&ucpS^)n5&D;Gub)?S~t0H)&D>>6`KqF(^gYJ85e*G6~!luMxO03fG z{K(_D-?$s@f3MGP*QHN~o{|aDH$?yfTaZ=&cN_Qz=j0!L#Pr_Px+)DLvfWPowh|$o zK{7ti5()kNj{g8(^yvB-Yp~80?#C4W0PzrU^pn4RkDYs;@6uP40BfOW$z3DHKq1Dx z_4;*+O+2igddapYmh=w!dwgsBxAE54=uo+?3$9~)U6@F;m!(!{*u0FyBRK%~R`0$4 z0O!X?*8Eejh>+x~Q(>+isL0tTwwXQu09|k2sO*39D}6)`rbW_oQUUb-Udyj>`)^*| zERq>7Y`BP6PT^WH&A!Kx_}_2arnak_p;12#POY8+U4oB|DaZ`1ja85#cT&y^cJz_o zTzj1oeg{}h_>G3E7;6}mFBfnOvZp2AW(-NzzfrCGc0F?nx2KAF@jJ3gP^62?YunrW zKR<2!^{+ZsNR{N#SkV4iwxhAu_x(XXKR<4mE50>I{{Z8#^BD)C>-79_;aku&t8Vlm zm(!sXfPtV}k=xaxM<*j!;y}NbI zW{S;v#yV(=87#5}nfI;e76Y?(2f6$I0A7Wbw94YVZ2os;ouecyi{D2>h|a zxN3@@({Sy!Rn;5E-V&U^ruDSo;TfaRROCjxY?t2m+ef$0S;ykv4#|*eDRQX0VSn8c zX}B$dJ^ZF^D=-6Pe=z%e^~!PdVnvZGvb2(dBq@v@0yozNLK?y<0xPiTn-O1S<{yOpN zLKZ}h9?Ud$#Z$Js==+j>ch@`c%vY)wf}TmrqN}GcJh_4H2~*pDuWO3`dsWs(CZKX026^6?fF_Kh%AuHAXvuFGOOtz`}ZC9?eEsf zau7n$(?Ud&cjUSo+1L4gy=sQOE=3HHTi|%Su&sq<1b)0rX#MxsmKz&Nc`*pUhs_$8zol7#zL47-NApW3hx9irib0FjSxavci@ek#wCr4m=5;T8L z(DY{($(E`=_)JOfNFM7M^LXZ6M9KL=s|Wf20Is|C>m;7M6}Vn#R4D{8>{xGlB>QwN zsIq1jp1d$5`s9I~jwTAxA1(|onT z!_gL*nA6IHN6Y|u75U%g(db;o_+go4fFnBa4d#*%{`&U*y%kFdg?jBRX)mIZq}H)2 zN~p&`klF|B@9p!~kj;q4@)vP&YkWB$<_QSz-H9qc{NL-<4615(WoQ2Y7mIN3K#sOc zGJYmB<&K>VO3g2sEAr4^(`WrU(Ecff94ykWAnZ#pc%?h(n6Pq| z&owifuqo7;Z7h5y^^nqQe3#sBw{DW*aCr*}I#hJdJs=gNs zLDG1*U9uRM<*-zhI`>VDF23Lo-=OkdCAUr6ywB(WahH5a_9OcI{{3Z2qGef6=#Hy* zx)KUL9`P(`9cgi&O9v6oTdt84_aHgg?a>n8GBAXSgaUPu*OYL~vND0{3__n`fBEao zkBjVbTLxV-=+3U)+x0s>`so|^lupc)<#_mz6}H>n{l7u)?b6O58!MaiCT0G{e32J3 zl0Zxa-DZJ^oDe`$y?*1rTNgDO+$${3=%am(-}YhhH_>mCsh5H{GqC+kJ=ct}0N|17 zWtm8Aj{T0uewWRPz~kmJ){V=$XnD@;qeo!-=>GtpUSM?QnNd6G7>2tmTqDhGmo*Ia zN&0XrofIvw%E#c2gMSivRw~uzimc4Di9a}9e}w+p>t&1dR-u9MH0Cl9vGnLe{-@{m z=t(j@6`tZrKZa{TJAtNHV{b3)2PHZhKHEP1Fw`i&_*_T-0AcU=TP)<{RGQ2)m5wpp zH|i_BZ1(>EK8CB}nOe-kVo`t#AE@hJaC-6N##oYgX=G!F$+H!CV^moCeozy zXZm$NmBM(J86OmTHNs4PFdNzX_Vztt zN?Mt_QKd_yMIdC>wmx^iw_L#FgqWyowA0OMQhP4F!Pj7azf_(bt7jUkDr8g5mLPn! zQh1M^{{ViLD!HyCi!E=|9U@z0YeqQmGkI%xOuQRhmxxb|cK-lgh?gs0v{PG0k$8g| zDH1q7{{H~$*EF-@YYQGio7f!IaJp^nj=}7GwbN+)NZ9*ICa)Z^%oX8wA++BhliYOE zTtt*Z9>&;ME1UK)@FbInv?AhE=oTY-(bsMDv20YGB5BNOmNoOm*aP<;Zn@S&#gpEY zK(QoD)2Q#E$o~L{pY5^Hc^?(S0#{f((;`LOgV=rl0DipMpp}CZ!*FF>5sYp9JSY?s z>4^H254h-yIecYkk!>g{szNia_I^6%PscmAb-)s3ZxO&~L{ zcHeeAyL@zJwp)aM%V3x{Rxo9mAI_FZ79!r42e{wq_UqB&7}27y9i_1}PsB;B9UqN# z6Bmhsm-9QS5_+}N_KnXbLKYz# zOaioi;lD%Ig=r~A8GiJ_Z-=7Pxn7pgu0qj8`=>DTHxD>GM=iIm0MqXV|r>(q^Gf>~Wtnjq4T%t$}4UZsgu6z!BSAgDSV zzo%a9PX*zKGYXC6xneTwDlr^E-=}}4S}jSDni-*j!oeCRB-!Lx4~_JT9gvn~l1WPM zL$?0_DF>#hJQ5_(^B~E#EPc23A7R!Pk`W5-bO4V@jI7h5|{G+sY&P9liOV>NkxLvNf10A*lD%)>Yk2f24$q3_P;0N z=dEdqsIJ~#^gYh^zmEL}B}$cEH;O=(cB8g0o zvAvJ!^VaBcmCQoKXb?yT{6Xwb&r55!8DtTitbvz*;UEvcOJu(pcZ?YsQ9v6#f$PGd z7e>eNP!+r-a^07(uE}B90!+ugi2$p700({bQ%Av}he=ddH+t(jb=F98*(!ugU;08Q4r)cFUbQw8&k-vW6YxL{bvNfyB4Y?&@?w+M{uHX$1*GF9?w`v5j31?W5 z$D9$)%W>_m^WRe*A@N7>>c+%Un5IT!vI8j_(yD*U?sk7pkoQHJr~XSRT= z0xE~*g+Na37@YtI-+Jq9pAc20Bx)opD?76*A8-fd{{H~!*A~X)Ggh6Jotc#H&WZQm zd*1i#0MP4L$>(OwNj=J|7=k2nWjugsACGW8+AY6s-FQ9GV2YpAXZnp7a7@frE5&YK zEtk!9U{~+lf0)ocVD=!$SDh=sspkIxEK42Z^6e^(GY zF8zrl^wa+UR81~68#5APUnqpv0DVC~{r%4RXKWi~uEsQy9&}a}s#B`)Ey={vyds{o zISuG&AJ<<$uUo7LmQqpTf?Dc)vItU3d~ExlZlI>jWb#*#Rj<#;EhLN^v~jPylkK6g z{aAEHGc6y~R$r>-kj!}(+j^Le{{XN6^`fgbUS>9tc0rzuU3*Zg8DU>e2CbN+hej% zZ*3mkzfO=_$7D>F1~%ZUV%j{Mo+V=;-pdl(8rN>OxO>Q^ke42z8F@bRW$pTJt#9ER4njHFw z@9algsQ8N)bHl9)&Kgt-=?rJ{C=Iq^M!+SP-}-cwau#x8Y<*uXok`&-Z5BiUNKkvR zC$RbHga(dfKBiVcYGrRwPaDf|RxpXoB9C%3KR`Zu-s?vu>|`M;i3BQ#{6$93>^6FY zT=cQXV7RgbuW4bC2tV;ScJ}S;56^8LsbliIiekG-=j3e5r%+gh9e~&T%->tgzkAgf#ak&cW z)!sr@bvr;l04L+G=p~D`;nq5Ia`HH4jid_9%Nhf)J^lT^UjE%h{viGyVtjGosb3*k zVw04Lr-HEz3I6~VwtIX7uBoG3&~&t0b@iV``kh z>y{7xK1X2v^gaH1uv0ZgcHMpBaSZ@)L-?y1;!Kn=oSsRoBr2&GX$ue)LV@38=xIj% zhhd@A?PdB!HB||HDG3{%Ko9XTACwYLf9=&jh&)Y?wvkEA6_gU(2F}RV$kzAU{Q8H; zWa~VSBt-3wo>m812e8@J{{Zjy>bxr|xI@&~w_)iCq`k<6wxmYEI3HvK(m(c(zv@Q6 zbKj}66C3s5sRPY3GNf^+!g3Peg=fzF0tcF(yfUwddlZO=!yr4@ktGpYC;b(<7eTxao^)PcnO*pg{{SYCo*?)9$o!P=+ojXzqPT&iERqx?FFkMc zQQV&T?f(FSK>KUAOffk- zG%_*du;r_=@hnR4B|!ucu)Ki$z@M@2zCBApnwevkvdX(IrZh;t7r5@ry>FW%t#dWiw2ER~2@2=gq(5qUdXM*!E zrNd$AVX5S%ASG1suN|-AGtaqX(y|o_yZ|~-J^Z1q0j(Wq0!iI}h7tuTk&~t+j|U)Z z`?2}oxBmbgPvY#okkPZq>7ygG$6-c@*$Mz{dbPjHvD|BBuK{wj3Sfd1W3BS|_U)p3 z4}I_FsPUC0uobv=#mcj*ZK>8-Nk|y$O(E`2=uXK$usu$GB4WRd&5g?=eKfGRj>Me! zu}^ezr?>#@Z|p}zeSLUlL|IJMWQ??NMByB$_iksjQID2A&&OHU@kIn#p}gX1^2W|% ziHvZvkMSq~$uavokfe^?x|W?F=8!jDtl|u%@Kv@cG+JpDV|w8Sato;dgZ*{-_Wene z@i&O-U%*X%?CKnKCHK4^;%uy}F}9_6q9i94 z?0frvW7q6e{P%_5%C@eOg=EwABxrwMp1LQ6ymgH5^#M=Kg`zx6g#K@_JN>uYsBG;_ z&x|tCw}!;GV1*gk#D17m5>DIh6a&ZKuGI&=uHvT~eN3NUU-@yi!vQKnBCLaI;+j}3sVt>f~jNAR1 zZrqqHT+LWvk=aVi$R{o(y8sCr3VR)Q?fUg``i35pdp4L+w+ULKyGJOx-Xo=ogv5_p zV#~*Zw!gUdJvQ*4hO)ja$;WA{ven+kmqY%YTjM|3g0><^=f9YW!jDsNO(FN$-@n6u zZo1Yd!`QD4Vxq57FbHl8bOyEW{)gY=u2v%)!Xl3QuEh#DKs6WaMj{MGGB{z4fkfCU z8-2a|Z|&b*IK^aPu?U)^k;sQ?!3ZN@`i76l-{0e*9(FVNY{iu5sNdh?`W=q{0Ix<< zK!Jz>e=7o`xIX>=0MFy&t|N(+S%RLYmI$#MFBT(x_oMUHR_j)f^KC0I(||SbdJ4Sj zt{jJ`c0QiRU5>rC*7dTo=sX38(IdW&x&ql3LLCVO%rc~;z(|dW1xWOc!}9BYJ!t*_ zlBk-rTu}BTfO?IHW^T9z}~cfuD*K4xbkL5(b=|0U{3Y_09|y4Q*h9y zHmKYsIKQsPN)3@Dk-F)MF2er+LGBOq_xShdxuQv=YcZ>W-p|XqVe#Dd_}0($>D@Tt z<;4+4tJv$`^#1@)w?^eW5mPTz&yJss#a)m99L~Lu#`@M)sz0^T-pXXlY@wmC!p5M} z&mZ#NiWHq?TvKl!#z9a@X{5Vb3F(dz3eqJVA}Pp5gM^fHgLHRyjc%ocfdi*>3??08 z{Gao@9sBGJ_qoq6uIqbY8H8Yx2HF&66VMkr+9JlqIaevCC~og+cr2dPMl53DXyr@z z_rMmAm^)RKrY3QlZ^I#C2lWrUh)(JA7C69+m&=3DM0fBA{oM{A&5v-$;Hwqj6=q%O2`}q7r4ler-2O=Ng2aGt z|FiE}dtcb*@glwd1yF{9@$@+#E(zv$Y zTy2XHSY98Wk%V?yl(mG#u}7xF&>>g?T?f!;3Z#5MPGfexacg;OS(097t_EYaTjBaX z$-SJv2U5KSBX&-D$fegX$^-{6p(E>ofI_KhKB@+>LYctE?SI4bPPNUA^qTaN_a%S-h>=!U57ntU*hbc+ zNYC28wwl2m{^ximnZuCoN(k|fwyBdL4q)%>{+)PzzWkW^ImqkL z;HYkY)q7lut0gyyBY42dohF{4N2TM>fq}?o{$Kd#=W=EE<=L))a#f)VVgAIff6O@5 zmcNSqJkk%1QbfToxEuUUMsKTjTuw3!pTm4S6a)7?JLh%~6CQ~{JF#qMb-LEmYV3(Ls$t^3$Co6g!tbW0EXKkyUGxd? zkITBFfIaWKeO1>oUllW!xSePR(Y2F7?fn7%1hrb6+EI0-nsOi>aX}XGB_tuX?ymp@aRnp$ z4=eW=>QOMQKW|KE2nSy<;hfTo{ELsjJUyG(jkF?;Jh?|dkckH%h{jLSjGEmaA;$tQ8wR3O26uC49<}2?~q}JSFJa0 zI;rTV*_OYEjJe{-CkcM2ow!C9^zn9GD`|`U+TswdZ2k``ZgI6QR*;6}>6Tl~V#gG) zHV|&ob!SE`8(Q)I!&F7APmHilmwq#RM$54eVP=F6Oke_T3Mw}2Eh_3gBEcTcccuJ|2E=lm^+?7fdj#A9@y3-%l}gaX+p)vip1w=N z?tOh$0$a}OaDs0Dtnz75xDp-9{uR5$k+hY_m=fQz+pHJ2qrvvnXu+=&YUuUR)=_A@ zl%MyRX0Vfn)a`(3?AS<)&}ul|rwLF10FJ+~WFEiM6Nr__>DK(ij`(zfg+jrB3+sr$ zOK!RTy>EXF+MBx8jxuUMxji#O8#in@7H9n*ch&&f84Zppn?%u9K2lHbEPI$}%=8LP z^nORh$Ee!;LLt3+s{xI$M{zJ29cju}TLldSycXGI0MxlmaBF^45S zF`jC_Ece&hpF|)&V)Ql&mCT2_=9T|b5OJBFM6C}P?ue391njOX|UP9b^ zDra%iIn?fLx__@7Uu>0E+-)6R7dk1UaGIiU=SF}%|BQI{N$1Zt$%8e8jk1(Qxd7!b z=+XHSqtQNO#C^{Xf#t4kP%lr8`p>ejtxKh73npsZ6U7f9FZr$S{c#FxI3azBRSMNOrnPh^FGncPnZ9ky?gXa);ehp z@xGanxI>&>P396i)e6UaI3K$i-P zRMhbn@|?x*C10*&vaw|vl6^uS&%2x6`}{J>-TO5bwD%KO3L{7RU>_p-tb($L8~j(t z#-ze=jSBC@I^P4v3{F7h0S}C9UCje(wDBqz^6GBvLqM+Yl2W4yE z)90TH)uBk||nbjV8b( z)~dg>z|0|yp^Wxz;R|-Rh}DRdrrC5750K)K>d6n4>1LZ-jjt5-jyPTPcy||grx!Vz zml_M*4N?@PjsG6r*v@ABn^I$ zUMBe((#TAS;@$1bQIxg`wO*={E&u9km%6$$RrV|NU!Bajwk%R*dqbLXQUq;$vhZ zRD$)St-4m!Zh|umg~bKH?es=CdsVWR7=uI_`_*sZs^Ilg_x*={;oAIGj;TG}A*1Pz zFq{D;>qV8BD)$U``k2P8@O1X; z@QQk|Wo=JAanJU^qf?wM#}@XM7VI3VTn?}+pk7;~UcU5ofxqICGOMA|hbNP&ow;dfMA?RaZ)XZlk6uX?DB4}MX z^{Y+xDwAhEl{w7|&zcd>Zgi1va=`p^&sM$f%^D`)T2M`Jp+Jg;-|aDgC_4nHXnrln zeY`u(uVMd)1+_MZnJ`r%CB-qKHh-5kGpwl?N7fW4CpR%?s#<#cpt;ErUzr)0fGS4L zq*qJlhSSR4Q08|;4X#XR8L)b3?hX1YdM_NKu8)5BKYx9kM~^G`(|b)*$i#V@-!3U< zP1liw33R;_TB+n0O|bd&w^seHVpk;x8e@4y$_4t5*sPkKo=xoB`G=}UKkC`oj3-lW0gc#m8(wEOz-NB*yBEw>PHQ?>%FvvJ(i$5_PZ2lM=V zl27OJ86&DVGYsLqZBwSLJd3Hc;ZY|5AL|a|v=RFq%1TGGiC-t%1bVg))K(I~2@lZ~ zkOnr}4?#JM9{qQ>cCX@Nofu8t6EM6s8Xup=OtE0e?*(H=cfV4mmMD9yMHM3!@_7_u zj7}VcVD8?FrRq+y?yXKU%|E>+OgsrgFOxr==}El^L94;K=;eUh zs-C}|!#~|XdD+ZtBjeD}o!$gOuHrp@0UzWCV)2OGyzA z!{ag+`V1W3k?yDbGg<+eMTw3^WKEmOUhGYBZ3;SHyT8_&jAY-(=h1V-xyKZKt-jV+ z{(0s8$O5#NWSDjXwrk<+<$xf)=H)K>uQYMdslojN0c*Z^N$>J9J{V^m9Pz}e@OVxu z;)L8OREA7cEcv^gd;^iJQFpnP?A^@xOV{ub(X2+2Y6C=fotnvSC((PK86UQf%Khi) z2;2WE@fbN8F?=iJAb%zW47{%xEf?-z_1;^_jy9JONC?-;5(o}hV*0NDW+0yDKNO=^ z>37GYm9xz{)~HS<_BgZz_93B#h5h<9sE^J^t)*i4h%_A^b=<<4?F$)^n2uYl(nQ;B+U%Y(C3rjt5*z6+|UW* z5c+@p0SlAPpDaEy7u8&9TJcmp?US3%Twub4$=%u7R@pC(&Z`N=hxP8gNsbSvQ^!3v z3%mGC=PSB6oYu!ghx_Axt6_ea*m+vD2ILv%`ed@!!IZD)SX@fBQ?^i zM=#BuBh3^$cyp-mIWvV+i9vrl!sc&QFcYc_xn*xV^9sP-)MS`7~!^hJRJI;$!IxJ76q?=e7n1#TbfE^$(j9F|q{s$IkVK)|P`5HKd%FCnh zjx&Fs-#iX^6GnLvyHGpd<^%5Sha%6S7uzH#a_SrzGkmAiycMg`^{QQ5CwSP{+8RXe zqUe#_oIzF=yOn`a^O2xS^SEhEl*Y`HltK39VC>fQgv1_0Dg<`{Eq9!haV3ShChIwy z@~iry-*+rz0~)(b|O^`TS~>u>MNanvq{I@H^N46!xU8J(Knq}Zm=MW~a=3&vN-nySW>5zjqn%^_#kfisU&lZ5I4yK51y+7IzFnpGu zxgA8<1>b#$|F;vz*G-=?^_0lmlvUX%KIL{a95lWZ%Z=!Qb%K-flSn zUi}_LvXBG?->nHE$`=&kD9WuJERg zkC&t~TpC2yf0BOrjE6Nak!hHpxw4uML1Rf9n0c$Ee;)exERHR6rqXRFBxXW{^+CYBYIl?RNK*H zQRT)5`MPdfbHj1 zT_3s8juk?g2}Cs6P2*#i^o(cSe;Zj%umiOW)0ux}(4mNgmkQ|%@JfvFv^Nkjoy`m;=xdp{4_F;mek`UcEXr{{D zd*OfGRVnYrI1Kz%NlkQHAfN|eoT5|bxoq{l&BXBTfu=3`74=vF&OPa*gZoPAn5Mqe zc5Fo&lqDW+i`cH1yR_~0;nQ$2>jPw0jV0x|emH0*Xjc(nd_!mW4yeZw6Av`hb zam^gl^dX+Ct6GMdaJX8ol$gB##Swv>f0g<@p?eut`WrVtj53y%;XeCJzzvK82YN)l8Pq06>T0Lkc3?8T9@%ts4sbg#{r>&y z`p6;k^n!m%MyGX%?5P#wBqzkCNNAA5dodva!tDTl>B=JtE3R817NLJ?9GGOd{z9GF zFaBX z@J7vRQ4u$Sw9{Ge;h)pH&60E>e)K>11ftesJ(N|aU&)B76$?IS#%XdyxNZ0KTJ9(8 zUme3kes9%L$Y0b(yfksInIJ5@Nt|f8f_XwO@dP~`ItSMP zg;lqc=a}`Vdw)_DKDM*14Y=VB@&HX|4hAQTFertUOxV$@wd^dN_-(9wGCFIY`7u_f z%l;)!Uw)hUM*CJ7{gTVEb$h=j@7EF6eG8 zpd|8IsLz$kIsV(rD1N>qfWX^?vq}=NWc~DkeW$5A_8+aQp^JcpQ?ik^h+BTfF8pez zpBF11U4Zd%4wmfGe)aC1^|FQLtz{4Dq82_j{6N?4#XQ;ZzS`}D-(Dkkv$4VG^;438 zn4|6c0Tce@=`|P3Y$nGhsIwXly}jJ1oZH(TaX4R4(X3giYtLO4CmUZWf1f~L9isY3 zr+=8F^Bt5v${_jK(?GBh84JBY@IXNHGo>yDHM_s|WY@QbcKi%rLdPiL{iKo5;UHL| zD6iIoj@B}p9_N8x=wDS026fCvERtl9d&@G|G4|gB#pB2CAmr)b8a3SHAxrgTrR^cj zTltJNxhanO3+PA)b>*Kty^(759OWqwBf0Xtn%;rIRjV+UpSI_ zv9uv?Lv_$Hn4So#gb+d4{D<4uBepq;7DfXy%dQ-k;Wd;^3;$smzYV>`;2+U-i@qB@ zJjC@d>ww6SR}MTJ*2(|I=o(K6)+JQMr9Wr=vM@LEaSM7hkXvtdqQV;8)WSWF06S?a z9Ns+?hLnG#uQg{eGkp3vpqAdPi7%-g*xiwf+rt0Da%;T1K9HN467Xb*$4;*T!#>WD z8qWjMpqJ2@`cd9EwG<`IIi9?0SqDH}Q0UtNrW8UIGJ!I_ zCu^vpa(o45jJTY&jOKGWhAO@W(q_4dPf$4gAz$#g$JYB_i)et1_X6H={lz;K*ls5a z)&iY;A@nX)8dss*^aXCXSRghcl)0F_^q@)LFMQ?`jXr1&Uc{j)5ZN#T?eXbE|oLjT>rJ>{v-5sty#}FGDbiXaH(EVdT#rKb1PD z@$ttu zvRe<9hbCOzoP_FzB1#wxgx=EP*Gf9OiRn+CiaeSnPQF@+MKGd6dYLQDIU1?N1%5%r)lazijS`R!1KvkXu;&JMVTikXAmL>vZA+x< zHd|BkGt*!K&fV4Z@zOgkyGx>=#pRgEB}NTDXBT!G?Vg;lbFkxQT??%i*FVCut774yoP=j~_SFK!EUcSWEYD(d{cVQ0s3&jO;y zX}7)-(w(Tw)@Fodehr18^4>=W z=~`Az1+&^!&V0arT53Nq;VlkLf=(U^v)}mVk0`jk%i>dqaMc+5b)ntDI<0=+e?}ak z=2vnrB?vy;OwA-CS|PyjedU`KrWbv6bI>6(dhI+#d|fmIo{h3hLx+ZX&V%RPc|u>9h$ zK6;HGJgeh-Yhi&ZL2&#^^cR9uy9;=J6yo+JKcGyq_!Dv5ATVLW?Cu5}o7)S@j1|-I zEoD4*;hZ&)dvb_Wa8l>>;dGE7oSOc7{CLeEFK!ATir{w9xVc@}Ka{76=sSN_o8&Fg z0_^zC&!M3n1m#aO?+3&{Pj>6yj5w8Po^2!6vYlIN;MigUL#=8YA&%kX z32fzjegnq=6)#)<9DLqs$FYgH0{Zc#TK1d#fO=DXA}!i9?^bf7NnntU_aD|UNPFua zQmsm zB9R->(Rt?y>^zzj;=6erb7?mA%oKQ012RaT{(b@%6z=z@VqoN41j)N))_*1?bM<3lI8=tEHG!OtI9 zkylIBK7JIZCM2RDV}IC8I{sP&)5MA9?-&diP=)@WYd^TPEo(G4BhKVn9Ao8P(?BL5 z%sCn!)W;uW|NREP)Vtl?$;wiS7JQZDeue2d9>BWStIVcy))q^7<4jW;f*n5nN-u(V zl<_e?YCR;MA`m%!`PiK6TdRTPb2o-&Gru}mv$73H{?`IV>6)>B{7#y0+V0OT?+wJ* zph2aynTfws+$iy{_RD{dlk-pI&=FVi7^vQg@lLfJt3ADR>O@Zl&hmd){H}W)Ve%N> zp8(@dKhn_a#h*%~ZMBA3ZWrIcb`_`ZU1DX+1KpPh8e8eLE0#2LAmnhjyv)~D?2*Wm z-k_v=Ahj6WuxPYtjKwAtEc=-~)EynFv%#~J!dqWuZu^Pev^oV$m+tW=t+}~z2_1Nm zz9m>5l?nDuPC-~kqXv3}RQudiMvw0J+(C#7n8vYDTl8!~tZs%^F*@idm?y@teccdw z!`TJZyZF5=URMU(mUSM55{`-TIPtPE)eF z9O<3`H7-IFo^MilF^Q2Meh^%nj$1ArRrcqNA z<@Dz``#z`Rn~GF1v#TanA&bm{yfQy)N|?(xvb@v|c+;j!=BKg1f>QLo0kB=IBhXO+ zlVV5slr7k#-aBP&K#HwG3kWr}yhCwc8Bu({c?H&zULpEX1nkR*h|1 z`m)O*?N?P5{#0FZF`(;&l^O91x(ITlaTpn`)CBmBrrpNg(p@9We&+6k2&W_nDB9y! zNc{bdlX7|ti>isr_S|`#Sez8Kw5VUwCp@Kc@2b*yFwj`D6@RWu37Qro{xNbcd=fnI z?l9g&@Fpy8C{g5_ps~za`$PIsNK5(w>P7O|R&^ zVnm}O7GJ+6p&0J4&6O0v`_AxfEq0#$^6(_+jhjVzozEMNSX#Yl*z$`VCBe)xl%(jC3(#2E2rJM zUooGI1Zp9TaU0MJ8!wi#3~I)l^25V<7KwECG)Y&n3WG_I0S)Z|p_O)~_^TiFL-ppn ziPZ78YZfBxfJp8m`&eLQFzDu!XjU0iX2iw{%^uRh&@)~>^m7*Vail(j??((zIP#DA z+mLfSA3C^aKK-tl*<;`}bFMpLheXB--m#dNYLf`aS z8bffTP#*Bc?hAM3h!U%{SR&8NKZKpML|D~pSB<`!Ivp$Rb8#4xqc502hP1!EAKWTT zowq1G~%@*@b#frAw9gC8(lslQBTw6VG4qKv_w+N7@(8nyd0 z<`)!YyB!k{xul4HtHMyArwK{kYSpvTBw>dn9uTD>O2wF!<`gB>HIk(Et3slj&fTb+zC1hSo8@%4$mUA#Bwr zgipXu$*f*xGG#O_wUaz-<{AIX>q{>>*biX;#U=VO{onD|bbiiSDcYTIBMa>NBwb-K zvV%>XXtFc$g{U;#Kz`p9aqsdeqAR=13dWM2K6ng8vzQ4D$AhYr2 zTcPDUy^F!JVK{}vW^D)bG9tJCv%i6ZLfE(51phLe8eA~C=?x%ze{LqaB5nV zZPF#zel3CbqHCM@VVfT-ROb$1DEMefmxRh_?{O?kD+4*N0S`S+GG6l^4f`{V13?>q zE`gGU`}WsF9}Qa!$$!;~`~6ij{i#MIsr&;3Sw+;EAK!s{`n59DGPOF~G#}v?o2?V; zn1qa6>}~+FvD7I!&3alE&5+DgnoQFn!cmF=z}q`_3r6$jiM(!}O3`4gdK$CDNIK@D z$qwDe3kjS3UdDPnN-c@&2B|6kVQecq^j;c!JRGIgkGaI?nhC5J8T~OeE6J$vfBm`a z%jGHwWPa!PE)`@T#{b6M{N7h~#6Qw$whEL*x}<=M(a=MnmO1MfZw`1T&PBE{q^)-a z;__L$1TyxcuIpZy9@h`y>(w`J5C8ll02h)uh)|4wJeBM9@z;Su-|&7?cf&e zQ2Z_XEq#O}%f26s7-`$eSM%+6vO~t1XjIXL0gWs!fLtM${XQK+`!X+&YUP^S`TmD7 zH-wKO_p|bN8MM+q03ep+`o+W?DN)@%tNL3jefze`fIdgp+oX9_x%Zki@h+@68h~3e zJ0Xp@!9ialQ`+xWAo3MBO;(7CDb$K|amDj?9E#+0bdUB z(#E0u>}R8;EvHZY!v4w4!HXSV>`~`C_&16d^}${x%?-av$lQr@8U=GkL7%=^ zVv%B!>~!Gn7y&i-&C&iy1>Y>T!4do_2eSAWSCq6r8|z7XeHy0V4{rxyH}s-O#&T=S zZT7dD6;-*>6B#W2air|Hjf%6Yn+%q`L_<^krWR?vMsBXQ7EmbnwvyAQY|mT)>4^$>cMw7Y}q<3N~bV4auxMbG#;|?u0T; z1T1h{-dPu(zAK;^AvAz{Ib^icjnNvJ$3v%uB+kq<>{n5im}!nesmBK&oJ2&YSwE1O z!00*f^K2PFjf7E<%iA-kJ&34A3a#Fm-a2@o0;ubsv!_f|an-|>OR9c^;V69_% zc6%7<)3;}~OIkhn)~TFQE~f95AA#1fH0gi$Cg99>-zW5TxtiT8L^qB^$HzX6QqQ{! zJafk~bcY1Xf~F4bN3++sPM>6ll=Bk@LksEUyg~A-`%^G?R*?w zs!J{`Jq%^(=!7p9MW}}DqfWzT9xXJs^$g02f2Tm(#1URw(3umN>)WdX_fP9K*;=30 zPjQD%5BKj36ffTd#MsBP31g$(HJN~3Qt?4{zIu~nkl!^?*7ih!}SNIA?sqhyLj6 zWSqA$s{9j96J82OGhdIwMN{283Gn^XDspB%En<#aPn~(dM^!(22o=aJpfs zfjWh=p_FJW8Ta16d(S&0_WA{r3cvBV+}}5kGs2_n0*lsc6mM6cOJzNpZ<49m@6}IV zR8s#tZ0t>aLNXEeQE)vR&t}x?xEKwdb@p>*&1_=|Br=n^W;rkn3MuqFf|;BOM?Z9O zWL16+)@9O{I&N;5Kn-jtz31EYHki5>qw|A3PBrGBjlQ30Zjm^KSre+zW^~A!P%{wa z=M39B{FQzDIFGZ=&5VYs;^pld9w*|!0#=i54p$&dTp}`*s9HDMvwB9}=_mfen0`Aw zsT%#WLkojxN`VizT|#TqH7M1Bh>_j10)~L|j1O-JREj@ptn@#OblP77`At#Gr*XT= z!h$cFl?O}(cBb?P(~zMO6ZYrJo)dZetO*4{_taE{uOULk!64OqDS^ftJ6Uz&OtzXY z8mR;fWED>*XUCv#Eu#;P7x$~Cbh*x}^cPn2%Z|>yw}3i^qX~5f)-$peR<+6tU!37b zqEENlP^VOv= zeK}(_W)q6AN$bhK3j!2!&5VFx|9IXE&v`qF)aPlH?%Z$#8gmiaIiWlyvMtN~Wm1KO ztzT8R^rS$~p!m*<{z!`QSHyQqVv$LSeu{~2DaXfSy4Q!Vj(NU%WSJy5`l(_wzyFRdD^R z0?oILoFIb6u14HkwA-yc6khWhIp&&O77Q&Br(22mKcFKRb%`*b=}1(pP?xW=$x)=k z$`*q&2Ujx^xveQLfc0VsGpVQFu;k}kKzDZQ88mfpEk|mQKI~)oY9ny_n1CElVf#Jq{!I`Scru(n$@YW>=ps^&zOqR- zey3yC{2}srKUL=F5-~S%wg1ICeo$I38A%Il}=pJz$`n1`;(pF24g{_^H^uFC`UKK>gJ?+hh~a z2}3hla#yBUO49W(<7;;KB{E>Xqi<;_;=?GHlnngkwWIdq zA8C$?h)?j4ErcbozO{;>$-Zw8m`**tB9z$&!sQUV(n53wIF|%8f2TU8B@kAC6Xp2M z$E#@QTiOSR27AZ1u9g3=&M)>&H;uc73k%YTX#U_asghGSuf$&Cg*nWyi!RI!>?Z-0YzmYH$*qc*oW5|4QFdH*Ha|O{ zM~4SrD1>CS&~?yge93Z?`Mi|`4Zcr8UrY&+#wdWdP2bM&!x{|t)0i(#fK5^r!opST zAG3J|e&7pk8`teCM%i)G1~I>|(W@K0tF>d`=Ru94Z%1rejU&QXJV8xpWKuzdFr{2E z7rAzm(~NYy=i` zp2_kU*MBDQCpS)})$n-0BRjmG8dNeHSb_cwc_lXhf|{Ifc@?ng=roVn!eT%dVSvBY zMpJ>iYPK`7emPTXHj*s>&n843e$8BUuvzLN!QZYe$o3a($_^;DWBC~eW8PhheCR4b zxRU{}U@p)|h>HKmuG|;fwYjkLz;mqB2ljC&mQMOgf!oH13PCVGt?Fs-NF3N4zuyED9;VG#r%lAJ+8Cme&M$fz~{o z*JtZ!*RMg~@9ui7=9;d*YEIu!d`x+2%cvo@oPLdm049l+38(2zeR?^YVd*WRYWJ({}s1ddkv5Hm2nce7Yz@hgCnl4_9N1Ucfrjglpq% zFx39e_vrKsAX>-~b7l6hP5cb&7}&6{86KSC6J{r^(HfzD0iI{cM+b%nEc{&;)fRp8 zAx~($o7qm{y!6iJZq}zoB3`lUtzRQ+e?A6tS0-s%vt>kv2VTq2_zhJ#ZM=sWEY-L$ zY0mwJRbR~+ohW@zuFI|{%$e5YiE0XbD0;|u9&_;cJ5O2Q#S0Po5`LtG<$>5l>O6!l z+jg3_SFiXY(q|R-ZqfJXqs|AWcA}g6dN2H(0v*S{7pcnmkLrRax>%Syp>O!gmvR3^HQlqm-e$YpThcTM(z)B2N= z1BAbeokqz*4#CBU2fcN$2kC!U>$ICDBo7*jcNTkJL0v`z4F5*OgASh4?D!S2ZaADM zWJ;icAi3!e0*Go!1KJ$DjB6Tp$3RK6bJ&7pH&Z&;DRq=SWmkpe_&bf;a|LeI2mmLY z5ZT~;>GT)7;6?JAdTM>i=O+s84-*Y3SoqetdbHNRpD!g@x!B!n8@s%12a}t=Z>p!+ zHZx6dz2FE9$&nW)PyDdm&75O}@2=5&+(EF!-7(+`ud87Rnn)3iWp8}jHapjm${E6w zE7w1CbXi zGuY+@cfN3^FndP8YW{2qNGwho^ZcTvY{(X78?45VbiVoE_%cVP_4}Lnd06;MB3eR; zCvt#oM;sGCJbb%4Q&n!PTW#dL{)m#I7_kmH8r*%+zQyif2^xTGteY=XO6^Kgv$k3K z?(bm01J$Sg(b2qCV<}^5`nkBF6S&^`3c3-%)=PMyU*K3~$$ynZYnT#$InWt1-s7&^ zmw7s1>HI$Nr+LQP&+8px8n*yu{HZvXcIldkcAcnBaw{p<`^RCIqFj^gTU+&S?2X@l z7=H4BA~(sKHhALp*+nl~EnWGrwL4w(w(i+3)1R;tv5-)Yh<7~$`b#`!|6y$yso;!0 z!si%|p_PP`H?go>@*RZVILY9j_JwG@&{RgL`eG%8+`%l^c;-}C$s9{r7yx2+ z9?s~)Mj&>x=aEyQ^Jc0>Y5a5f%w*44T(ol#I8m5coN>aA!=;0jUs98>DVc2a2z?o; zRguP^`njrhwfu3f{z_Vgg<8>iE`oKH2c2;5^B8!+waj`dmgBlj*68+Jufkd(`;uOl zoBme3lvTDozIKZ4$W%ON=Y$2nL+zTLm3GW&Nlt`%*+Im!gy8mv{>L$2G;G25`q{2C z_VOLudLrtCx6&N`Aj>JVFEhN}ELVTMm`gp!w5~q9afi`Y76g=jP~DPbqK*P;wWv`g zvkol%%0HO)hSuloW~DW$D!c(F8(BfwOm_;`M{A>sS^^Y!K8zF=ycRrd!zx%wy=P1% zNe%yDd6Rf#>1LX0N4RC{tHrxrZ#8UnB1#0epV7F(N-4FL#!;`LpyLEUf`jq*;Pt#p zjMR3_^hr}kTzKtm$e)LT4f{{*jN<1dwgRq?B2A-W5s>4neWDH*S8X>2p`K@MK?&-# z1s@a>JM#s;uIZ$o(xVf6-Vn9%E~aKMy!Pe&K1N0No@{uhTUdz08!1zmpOlz2dYw_^ z$EUG+!>5b$I8FN{&RSMTiS9>GrxI*t!!);FvRjK))1311rK4pEd=-Bfjp+V+tyRQtDW>h!-*C1Cm`K#bUyLyFg*~W3V zc|XU&iMwfQ4CFZ*+|&3E7E|JEwUce%GH!mAUTNS-Js(0sN@fPr7G9)7~cj{E-0jkD#Lrs)T0*GfeZ zrI-T$F=n^7Ar8?$HNcEF;r*BnT}``x@Rbk6 z=1l~GAAOpq%%vXTYy>8Y^{hN>cvL#|J4R4(xzdY$`6UkWwuqJCjCri|dy)=wr_C>3 zs#cF{E@TjM2G#RmH*YfdUiyXb{Dbd#>pISfE;KZx-b>FkH*a(Ht64UUs`EYEh7{sE z3Xx0qMZv@?c@upcx|xtbEL7D`{`TS;2)EWm&Z$aQR-7(PCJH84qxZY>_X&C z?G)q5ZU+JrLbI&6ya^Vy6o)%LsGCdS$j|PiQz1btgZU^R65ONji~-+kNwp@)^^$)! zlZ_|-jB~+-wchLEbgP9d{Tdkvcs*Y`_6fV0rE&dSaqdfxd(u>s_zU}6lgxtL;4$ia z;Y`y`iv5B05r@a|9*FsD;pY_+8xR1amIzU1Y+^@~OG;LXY#FghvAad7&>9l2pM!BN ztC5x6jT{l7l({6efeIrb`TLJ3GtJDjnC+-97vw~1vpab0*vfN5tIywQa>UbS@gy@kE$TK9~=;KKQxXraM6}xb_eD;kNX3{1_-_N$8&S=vp0iKrSfqQEC5V4-v zqRW;^o?(8mxKX`2~L*SrIRGuAYD!fH$pFRNn$_ z@@I4o8v)Ed>Ek&_ZL7Y2-%TIK+NUCLK6I{XnLH@dCQWqTGKvFiY9p>3L7_koW28~H zr@eqI#MmVH)ZZkCPG~)vX{#k%M%Yfhb0wpbswZ{Mo>`yT(2%T)JlhV5l_=som1(o9Fd7AtfV6m$KY1N3GnQF!mJg*FG9fNNf&Q%OARh z?YQTQbyEzY>Mmu;T!k8i(x`e?6#7yjrEgxF(XVd2Hu&{YUu4}PD(yM@gC0T=4WxQh zd$-HC1dRR?G>&YfjZ>thl}zaT(KZ6$XO9;T4L`jW8Pha*u3a3(wE=F3>-iE&GWVtS z^7LIwqZVqIo``r=)XRyzTc5_wEzp-6UL z>VS)zKlosb(L;6Re-vGHSW|Bs9s&X)sURH!qJ#(%(%s!99nvKo8=*7|Myr5ycZ?ng zI8ss?C*2@5VvOH+zJGUJ+d12N_Pp32}8Q+YXya~-8e;q^# zsUw%XszaECO6OIj_MC3!Ru$t%vHhe<{3c^lgS$syg;1B=`1ZA=D`w6$t}WqK^G=^< zi%7sUn=j$4f2$&I`Ncq9-@?@)&8_hv(VmtFt%aNM5I2(W%cw_N0xXY(+CvdO1QQfO z*&n)E7}|Uo{PX2~#-(9qZ_qnM8h37{fk-*ngHX(6;&o*TuW{s|k2taLb3cosU}eb) z#?}E6e>SLPpyySxa|X>)sl~>>vLjvfOuWKHCFqzD6!LHW!$L@i8{>Vh^^1GbALGN% zzhr;lNqdR2!6w6;VR!`?PGd#0fY$u!6UjohbvnaL{(;KNi<1YfwvlfssRQWN2O7lC^GHZ? z%WL8v=N7@H+-FAFU6f4<`qRY0a!IZV-nh!uV>1;U#vcs3w>u90K6-aicv_{@W4C$N z>dzQX(l39-ynJy`&J)5fE1_(}xG4d3#7pZwu?U|BhTeZ(@dVy_k`2;jKjRa$OFh}@ z7iCXFvl5|xb--g~+=9+jhABVGjo(wEaN!F;w|cNgvwq3}zEKIF3&O7rzcs?oi9DHm zmp58aaC>**zE|KJr9dXIGR#tcO&|m4UQo8uT2bF-Tk|x$Xi^?bRrurhp%7kSmvyXKmV zReyFGJOpz=!ec30t?tzDx7zf_ zZr#tT+d~pEkDVG^u701C>nw&jhZx$|Q*PlijVG>jXS_wH_3!=zT|t*iRO6$q&nGO{ zV!ciD%?SmC_Rv#wKpe~1)8u~%apw+7@nLtbHdO&t`#n=;vjn%GpS$}k8+V?fs^n$U z=?Rbx9!!^!puiFRU{5z;ZPh`=*OUT^vw+r7b|+nJ4Y{yXCQH06cinKlv3a=7*t@B{ z{MQM+pGC`P@_wj4Tj%{}Y^VPXuzdnt>t_2yWuZ@vL(jsXVP}%9sAc7glX-E9cyGV7 zJzXdAd>c!eAH7fiWk1!u0^TrzxVcCeEFWIWXxbV9!;GNswS7{DkeHBzjiQFLH^VZ; zhakbCHZovvikL5uko$?(u4)b&7uOKK^&P1~)GxK2T+QpODPzBL8ciE(-U%meg|gkT zgfqR<;LqK~;7jj|gS(w0mPBi8H$4Qf8o%ek>q?$H!P+Q*wFo~Bwz>|1MR?cK!G_75 z&4_68t;Lm+$&e#txfdI!&oagCnApO@Gwt3=d%aVqA)nKHHkcw9% zq=@mJJ$l?KzsV4aIY9@-!C%!G9gpVZ;J#!ByX?Kby1jug*C}nMu-eY7cd9Z5S8e06 zS8rB$eo6(2VFksp05WRE&+;;fyqwD(L;{vsn@1n9-mmhV-nI?JuKTQ2D(x@(em4=h0Tu+Xc$@nsnMb0w3D z0`q%4DY9Ek?gfmMh_b_1J%hbgN%fvTsO1Yu&)9Z>#;D;onC!_R1TmrRFf6FCrRq4vI)8NQde~(`p%XZHhp`8dx44 z9i84f0aEBkwx&lvKT)5>qE;e(A@2V7tGLu_BiW-*T78Wsk804Un~R?hN#5yzcF6RSO@*qk-H8#$=@XN&VIQGOB#! z*92b%x%eLpn(pN2{>y$8y$I_-`?~lppZi_JBsF=%q=znu!_UT6pf{PFoPcS04%zVk zDZ|O+6-#w*md??4B5iHEE9Pq{F{z2yYfs7j3RR4NFsX82MF_q-nl8LN|KvuB;B)u1 z4}n;n%oVs#qmjHNZ_~+2<}Pb*4>U}MR>&)HmBQrB7v|7p=)ID+Jd}bOQPyV++OK=> z5zB=fiG%n5gk!^ANd5Ix0N^k-6r$}w?lkD()052lF-BZiQOac=suaNe59X{4PFPwf zE5pTcm#q&h5c6RlzA1n8FHhUX-Iocw?Afq@-vWT|cdnd8!c=+O6@o)wFv{K6_O>vi ziq)(e{UJ4w0E9%H0qCc8J2N;n{-ewt&66$kYyFg~tE>IW!4I4hXSB3PFNhWV9s+Ji zu6B7aPGfBj^%wwSZWuczeRdP26sNWD+eRttwf(2WQ3&sQt1``lg#?yJZkJMrX`ZAS z_%1|yHuM5{t(-w*4^~GwGHtq5V5L1WP3xYhEG1~<&bP+SPuLxyYQl}9<`PT_7!voM zWh%{{(lOs0&Hb3LZI4rnWT0O1#%WnTsaN*gyY-!5eydWNCi0b;k}A5p<>;v49E|_Lm?=O`+zz6{cu=m zm;9PvT*7a=r@Q@esY=NIKq2)vbI_Z-Rlc8pI%D9E)jfU4i)LQwDP69B^O<%wEZ(aJ zc{a&4M*bq>2=n3Ggm|CV+0ABxX<*jS5})FiVsw(7B*~~cyV|7W1get84r+Q<`GJK+ z@0)_*l$%toCtV5{fxKk`%Mvup9l4Nb*1ydQABATAwyVdvzh^PzV8Bu4by0(l+rorJ71A1HdBCGZ>yO9G~Zl9z{J&}I0^T$@&$`xnQhSEc7uU^D2-}5}9<|{LL=iR|AZpM}7;JLFqB+YStMHu6D_zt|4>Co*pqn zb|SXQ*f9@e78T=HH1o=TppVmitUB|rh%Yn0F@DQ9mW3ktv4Brz?7p#}?U#oF>Jp0K ztjgYigOX&&z9?7A*0e-Ip;|;O8+av(WcMDkPcX41Ma8fG4G2pKU**hL|0S?I`V9K|u%P7dreYYKS6iGrsnC-<0FU}P~ zK;RnpFI;3k{hEDXW<0YX%|OH>Hg2EJMetlHD#X)@p1BTbAdSrQI~!QDe8cpd9<&C7b+hp2n(L|UAUu+=m0Iz~#YMQ^CFf~N&ehQO zv5-ATwug@SEns8W;KyX|(OI$g8K3xXLa(D5rA0j30qQ`H6|KPU#6iBAJ|&>4pe1_% zjaayPq@lyNE=Yg1hJFH%TX*<^&9YTF8&+&{p$bcrTsPq*~*?)lJz`H5`kQ&cacnIN`d92WTfp52E3Z2eR0WWZ4ope>j$ z`x_A!Kz)d6(+yL9Jf=mn2B4R99t6VyO9%Zl$tRYEu;+$4A78!UG-LKy+9kecDZ6BH z7*!sh7wCOZPUU}qo@86z2tN1_>qv7ba|WOkL;GSs`Tqlv5G~sLBf3LOUXGt7l;6F#^*ao^2)Ug6IIewAc-L4$z4m8ch-a4`5M6g@|Ino2(YCOmg+kQ;xxdT} zf!Wa%zdUbz%Bn*;_H9<+Z&}V2t_UFT1arY!a;4(MBWkD6PbL&;An}OD^z$<_5LapM z(NP^Zn{tQPb?>o@y$79k(h2f#WSR^tU9|RL2EqtOk2Xc=`FLGXFI{ABz*edc|DBV z_vmFNNeqlbOfJM{hhYk8N7@I%o;>tO}aiD8^)B?m3TiyxZZ~jGp~eDN{Ny3uwR~?HuJO9WPd4+ z?;yVE6oJ?Zt%O1w4#J4_c|EIpwALLSQ!_plzxxYK@|V=G}`QxjL_d?Vz%4NUI(H@Z*H1Q^ARfN^SMb82B`oRvexv?T$&&TQ2QB!**)sEzbq-ibpaxi`;fW1-$fWtsaDu1J zN!SIHECygvKyOAibami?uUAcWIoUN?;jI0Tp}pw|aK@}y^O&}`FViIXBC_#B;VcQ4 zo+Og2pL7F0v|q&rg?)DR-ZLM2kna4n$R=1W$zYnK1h^q5H5ISaW z)8?hCNa+*;GZmPm@LHU9rI^c4~G{p;pX@|72{^6XH;>3*olx|Xt;KPTjKOMi#$>RG}90$xR=xIj8v-`QZDHfak z#a)-%4wb=~|F*NgzFhek_k{H^+-;OobT7u(Ai3)l)NA?F$cF!1 zznOn0)%jBbbu@;|*6&zo0TU9i-!bN`zs@?LDxvqxULx?{t9xoxC(bh9K2~Q6@X^0C z@SNZHXwsPmr5eZi$c6VTZ$j!mPHP3wtl}HD!C$>15us7xZ~IE2-YYk$vag=3372SD z3_RpYyUkCXq#U$ebx}(VN|CM}V0$&Fg_f24*>^)Hg_URu7Ulbbt{L0Uv-9!%%$eMB z_7Hv>fbmPu4lx@0eL(zNXDDje&l@l|S#WxSaQB7ne?D9FmDw-3a#ndZ@pFES&87M7 zaj5y2HAvj-RAE!|<=QnA;3xvcX<&${uMSC_pOpv@!Th;+?a7x6!z8i@+0Nm7>okFnRcNQtpvl z-N&AtU`DR*(OVo(LBs)a=0lc#FqQpLN5iBKiSTW&&6i@=mKap;ZoIx(O>!gJb!gfv z$wF6EZvyE;#I(@{Xs|T**Evc+O;oAx%!Vz(vlP{-Jhg}J>I%@3By{pZ=(V#aQl-p&L=qiu_E!7>bK}*#CDKW@p^763J}>gApAp46rI2A*NbmMbgTT<~wNvdl9}iLC-%oG&X1VTnJF&yYXd( z2zDsGZ9Ys4G9fy6miCw_!W*I%)r%AR!*(s5hG z{+ilPDEq(?v;dEfhm>2N%dx_O4AXCM*2CCP*4Q2b%gv9CMrZu975X&}J9eB|PkiKA zwd@5y?c)DI+Nw@6;(UcargCMA)iBKAOc^sx1$m6gi247y7g`R(b`YJ-uHA|>ycPWU z3%*sp`6ssxP+MZ5YC?^%`9d6Ky^m}7Jz59HM9Qpn@vpfJeU(l^WZf>EhRL;zwm5`4)%HG$H+RUg=aDoK z(cBobvCTHJOh|(X4kWV>P-Gts+>W|BLZq7%EW>B_DmOD=Un&GB|rejte6 zkB7l9fa)T(W0cva$QqV+01#ys|NQF6EBZ7zTS1~G>|jFuw&}UYyXF=60-040=8Ajn zc2TriY=ditodrmaNgJKc)Ds4iQR8B;9RzJNptQ;*g_*Zvmsf_vyt0;-RC^jJd_+AR z_0J)y^Gq^n$5N|TM?D#%2?b$xAy3Srt=PsvjTIE>9EP%oI1B4VYC@BSU8TcPuLF*^QMT!bzz-jWA_!ZvIhix9RK(Yi*;u zo4!o+SD(o|gm}S0AXwj0BFt4vU*U7)Tlf4B6f4mAT`2uROSVk?2kJfbwi4m|*8g7M zm~2dcwz!Eh1ie6ay-UOcpDpf9C!=hTPNCvk;aedOxx{dtKGwXsVJ@k&*jcbtqYxEg z+G)Ms1!L8puB<&*(ZgN(mg;xLE}Ut_7?>c0r|{_au{zDsVriCaoV0|))gGoTzQsvg za8U6x4$($8sR9PK&Q_`O$zgEDzh@F%mh9ck*>_1Q6uzW4{1C{lbUh|H-(g8nAY10+ z0<|Q1AH-YNc!e9*D%o+%Bd(?M2)o&=k5lBz^SQk!z(Wt=N4lA3#v?os_8*87$ZPAC zcXXs=yL)HTeJU|g+o^IlIqM9D4HFLc)pWs z{T#a3z3-F+8j5UUe{)Z~XbJ|s+bm1_D1}J+Cbv5%Vgc~28*!i~TGMx6BX*g28Yhdv zDU?0=9u62=rzw(Dd1P41^_eGrJ!&NS1DxB+vB3`DV`j;m1bQC8FXzYaR^-$+|DR!O znmL9k&0ql3HT(Fq|0Ayciyfr_vS)An$0T`#t{9hrboGkZ%{X>l*F-0KsSmcDizqKb zVBN6p_tfcc4ETtTEhuB09$03%u|+O zvgk%m=;nM)u{gyqO&p?Nm!(fDaRAafLOgFT$2fj@|1$`GV5d7x{Cg*FGrkt3Ny$xF zs>Bw>NOA~Lny9x!AWXJxM2;iyiv8ilFzJ^HHFYQZz}+VeC_Ytt+9^`Z=+7p9agFg6 zt4ov2*`XIOX&lq`97`d)V1w@l^YEW+uU+ky7*dpC!BqKcM+Pxe;0M|?M?Z|AZAO+1 z5p$j7Vij=5o{uE2rTkXxiz|g%Ep;RNmmBUjtk^0h`r97sE|)mxSRAlNw0u{~`v585 z47tn`SgYZkdRxaHyc358ndY(m2RiJ4E+l{HV^4?gmxd||UXOU2D$`d9`UjmTbf_}K zeE1L4><-Caxl~{4iZ6~Mg;RYf;CAz@&l_}>#>Qx$?TN&FhdCWj` zx;vXfFu=*7sK1p5NpF(b0R2_|Qxc&6@>B`;Vnu*RgIPzHV5e|$!Y7qQr@awA&GrF* zX=qdco9-_u!ct*2z!vcW`Bs6oRUVF~{Hg}l0IR{Kr}lK6w#uoC((cRM+QYnJQkVYa zUSgZ`XTm7rUl_#qN)I1yg{1y8*s5EQaFednqwdF1F3>*B{IO(sk;V`O#jR7h!C+iV zP0hKUM{}e*hYoP50(qlWWev;-Jx>ppoaZpNmv{q9N6mOt_eF8{-69R!S}B^ZHz^;` zt1brv*thn<^w4TB$Gbi&=yhzo$W{+8E$#}%a>(`d(NS&33wdp$F2wn3l#ClQ=5pLg znbBNVP$P)1MBW{Yv?`zX+?!Y8*eYZrHSk(^ED60$UfMI1VI^!=6L=^>wi&|`Njf7F zmV6KJYP$a2)ll}P<(biA4iK$CSlz8}Em}#;e@xgm$F@;A>qRFwbd2m+Oqy}f1W6hn z`7S90>hXX1365*2sO0dX0j&Aa!#JLRlR`fLV_Y`hE2SLsT8wS&R4G%qVnhP7fqhqA z8_n{pSz4WkhEsEKj6EA@teA8B7f};-Xy4Ez&j3XYCJcK^6ZMifq~e8b)hyg8gCWzW zx3;B6!Hqe2NgBH`IcXu~#Gbbkw|F$<3P&G{g{=nrpKD6@gLv|{XTJ%h2Ar+>s$ zIU=4=SOoj@x8FdC{+8gu<8|h_xHD|01k`z)(qc}`~2k!2}jvS zkyDK+h+NWMcVBc%8>^F^lmItaFW^j?Zx?us5OUk!vkiq_oTo*#&PUjib|zsUJE&@R ziBhHM$Rf>?#TbAR&9zmCyz%a5u~s1z>0yD6C|@vN2fNC1QteUc@KKBFqGqL(ZvYN$ znJn?7;@q-OBR&X_UlVoxud{!BJ_&;D|1uVnfoO2s`?yT-$tcw+KSQ^fJSKY$JDPG0B|A-fGR0BCjZB(!G|EM!irrGXIF}H`# zSfb{uw@eNyO+THQ%2jdt2!~)XDPc3!lT+UlA9DmnD^JHPK{)>=17`jk?zNyw2LpC1Vc!$6j+W0iP# z(idJ(K?IbRjwR0}FHdXy|NnVSl%p<)nU2biBv*?oME=T?x$1G>SlwRt<&oSQqpvMI3Za9xKc)VLb&@=(y}jNGo%=?uz^H^L)(y-wNV1MKA;Tu1g;b`IKhgPzCUS`+J-vU7B0Pkh1kVsDL= zc8A)b7v>L6=kBB|HQZBb&-fiS%J4vlauj$O1bk4(Om>y!ukB1B)o&3){0mb@wbBHT zc_59G!#CRJd3~TPAtp)O=>F8>=aFm5=@ifQTeKwut+ze!H9USlG3I7GhcHhnm zrVf=j)_#{7!T1nZ#ODU90Lng*+5D)t6=s2Ej!?Z9Q3c0-3-<$|QP>baQ&{BKlShT` zeuEy@Rt-?i+u|IHhjoxl?%Hs)wpJ}V^OApq@Qbs;5C1HocKwddC(oBFtW{uRwT-R{ zZy5Uy0~Vn33Ye<{zo9(8Kw=^VaMB@P@fE;NpFr--9?b{C&OWOq^3`;&nqM$W_6S43 z;mz;LTGuEtlU(Mh^FEv{FugBg$mm^y5o|9IEF@tr`z36$Lb9Gi+moQrRSzYVGcQr7 zyXN4%Jg(E{veBEP11e7+o?V|=98wA`rbiUjO4TJ}edOs^&)wg!+WK=<;^+_&Gyu!e z5ym3)>b1NCr>mQ@sA|cK&k+I;@NLvgu_-0ijHad64fTb~|MlKlecYg%K|wA51a zVG~cfeJ{QJ-!5`f+Y#<ZVJe#Hep-lbG;uns}rf4cNsTUm`?%h{%T zOsErbvgx@Q0tVO$M9BH1)EAT^AWJ)C`4vpaf1vG-pBjeoK4+hCGw>qAE>8goy&rct z$D8yY6N#a%;UanV`b9|1WklQc)zxkXm|cX)pCQFaGs!7D3`lja{TNoWn4RVMO4vdz ziJYwMKBoF#j%inVTw=p#G0w0HVXcV!T)S)=kQa`>#0xWit)b@5cm=VADVsvbq-$LY z|KeT7x5Jw~OI^p1GfnH(n;2)4aP6MHBdpl1I>9&BBnD@`)J-{4d#AB)L>CoY-NTEP zt3Gvuo<#Hika4H?Ty?hJE<)83UqAB6OmeSn&lAHJCFLy?f8)_1txDpWFP1A&5_k**ROJ4%cKq zeKgJVK4(*=D3z%f@DYcq-6`i)Z9YmEDWWFle0k&ZXY|d~q0ss7A75=pHyN%RUa!HSD~_3VUFPFRDaQVa7>_Qa95ecQ^Z9*{#ojK zNpaG%Hry068~k$b>QjnQ`et<+hu3Fo2XUR9Haf;uu(p{(tjW(dA_Kk@(^}KpAO;!1 zXk^aN8DsYI)BI{81WfQP-}|&W(bnT|8Vm@ltOB zZkZ8a#QRYl6I=C2AsI5lVfGZV)|iHqS%GZ^q~eCfxNn+usSeUZ)LUux_daacHZX#f zu@kC#MF59SXjIO-`N&%Bvp1O9C&sWTMgvZocuc6Rxhdh-y)Zhx!Wi3q`eShn6wtrI z;tIUzI-9*~hb%&`q;D|3Za+Wp!=Gx_Ezzu(+fe8d8*z=Y$pKczaJEvE_pN8e-igwr zP|#P0P5G_lmG}N)sEwlx_AdsXBPk%eEoYc+M{3$hmq>Oxc|YGtzqp{}8}?&{=&3eV?U~OtL2OxPl}F~ckWkTLA%m?-Kyu( z9_kppQCZ*#y9<)mR%GJi(k<27aFZ_kh?p<3AA3_JM6XLr`7(tloMSSo9%2zyn=M;} zpXit1Vz~Yx=|`riDj=AXyVYUnpoRiYBMSRctTL$2^)CG`sBd31ZB_;&Ao4L_;>U@HY8%rEv_$x=2$25M_FXh$zha>}=n_l3e4k!LC8H4SDH(g0#Cm zxxJitJ?d_lv9y6x=Fvq>xjHfne?<5Jt^2(sof5tgO~}2{Y2T~9wIw)OG7yQ=(IahWz`QRBa@#OYbntzcL4v@fj$fzb8!3gN{%z~$50Y8 z=`Zuv-V;<9^kUxfXzPc+zSy^~H|)APsV9SYe>z%UI7sEFJ`!-k2WP?)6%ZW!9*rA|Egc zXXkrp;8GDZu+?*x42Oi%{lh%}__42)uW{Lt+iOmUB|zU&h(qX}B>>pSg#mk1>pO%6 zVbyBMFACiEW=HHFNnWk@jpUD`8l+Gj%^RUQ3m82+?_jl4Mv23rGopYVo(@*hW|_GQ zSroW?*ddKoZna;Y&0nX`tbQshv8jzS>n-b~j-bC3@^atBGz#+;+eYtNGBKk6DseI3 z;_B6Jn#Pp%Jn_KGCu7EwjX#lNa^m4MXu=TB<6v_^Y17Mm^G7p5&wE8gdq zXAonD#Y|(9wgC`GAbRojlAa6*w+>uTaEb@JZW@j3YM3h{ONrF#jY;he!%pC3A@%^7 zJX`9}R>m2nah;{8%h03_vP-Wz)oX*n-151=)2olIjs(Ykyy3m|k#9Zv`=8@S*zs=T zaFVN2vlDEO8RXlbEc3oE|gn z>rCHPzb{lw(-_ghIwhU$gsMA{C1F0b8`s%M%2sq%D#D$N6g@O#)_apuWO+w7>Tghn zkFOwc9M$$WFKdQvc7zL9SN#yWw(k!4>gRuUSmd8FT&;h3wQI)@8{2%V6F&chRMyJI zHzpsf0m8eQM_2~pmkg7ZGix+6bp@YVs$crmHU~8RW{3GIFM!pG%WZ}GQx1i?-N=LO zpg9Bd&g9tzsWskgaXUkv3P+u!n!wd1K z9;~p{dQx&M630jTJZR4r8rCdw@?8q-PoOV)GQso6#&A640mPik@yiQgYI*WY%6BP{ zmvb13dLh)w+)D1PpTvl7kd|ojg^ZGp>dJ z7AJvRa%GmM6tz2WWlf`30+jE~ne5c^-WNKHDi1vf;&abCLWMPp`+gsZDDJYbbCDd+ zu#o$0dirzW2@O!cIf6LP$%N@~j8}G@DE(@{s}dD=O1WX(3(m$3!)xIWh24}JyE5cp zp!KByE}E0%Uex<}n&bB1tm3XSxjtB6u{Mv!$lWLFlSVWsG=OuX^fa*c?$?~p9Qjg8 zGpwp1h){o!Dl05}UB$9b#v-brgEocY(Ue88>Z6Psqc0zY+H-AFo-&|mQHuUi_48nq z1O%K|ASW*Ho|m}laacQ1TC5}N<=3dpYsTR{s?eH+GklLZlak8hHF%BJW|;Yvt?J^{ zBumHz5+971DpjgJZOKSFMW+fYBw9192HaCW#wK@nOe!inCQ(d#d2!A!+8IAdbK*Jo z)qfu3=s>sBGO7@X)Mxm%Pqy7`O3fOMzPwVqQ3_<8ca8I8;mo={S5>zPO9 zs!`x5pteD<5F8*=Sw4S4k~@Q~+YD0DJlV&y|CM5x(3q$uwH05f8%(y$3FVc@V2iK0U6U|Vht!3a zlk3j_9|2~s0Bo?Q`*)=6-eiKM9AEYsh^zwY2}x!P1NyB!{DcPcjG=6PvT-b6t3x_) zpVxmWzHqQtAsA}~Pjlwx2_E{z&{c&(JZUU;!h|KaEU&nhz0ugM!f}e&WP_+I@+`6B z`-r}j#3JhSFY=lhn6%iPRTj~V_XLPu^fflPHD1_y+^nJkI40*Xb((J*n(>$rP9vgtKS^o!#Dxp+%70LfH=)kN0 zRg?ae7+!^XE6o_53Ui5c zs^B}{{q0h#vEsN#%VFxj^9JTP9pVht zGkH+!k9?&e(DS`gvz;6lwd3j1|xC=Ejf zF2Tbotw`SJ6GV#Sfl~yUp)Cu?(yv>kE6|wKDWmr)XKE#C6)-In+h;Gbg)4d>44{l( zY5e^sf1VL4b3H1{&9@?~h?MmG&qc90fUP z+Kl`Dy0qc&3(~nQq-Vc_sveylc0aAynyhQ}z~k%09e|#a&A`q2iC|P|p()-|3m5eS z;ML9u>@5cy(NnRtCRT0m-;-bz@;1Z|*M2{74~IYg`5%p~(7xwouenc;>ZOzcw9stL zP4J>+n=!3{o@qqp`ugNdEWT!huFDp$@{n{jrU4gq0d#I8-W-*60v$&Yk4Y~?4wbRW z%in|he5JJTpDP3oGh$qOddQ9?Z?X98wrR0=XA68aXJY?HR6Sa^mShI_sJ+NI>}ME5 zqiTT*#=zBz!rJ1jw0kCQ4}zs{PA8jJDLOu6=FAN|EAmZcvm_bt$HM@lGk~5wx70e| zPA~Y~9fNZwi2)bEFR&ey@UK#HdXG9X07Jru1gMi5QY>*8d$3+Vv1(z=ENPzbwe-%h zxL@f~;jCQs{qsz&M<2bjJXeWOtWDjJ87qWdPKW+nmX7;6r^^!kJmMq3pQ0YjS+f(X z`0}=0@L@KZ)9RpZB{^>NnGM6`Y8=sE57EtkpfJg6|-^BF5r$KOP8Q$}wFDo_q~Mdw$GM_2dV z?8SAD#0O}EV_(XE!ul}S?w-}hbDy;Ciq}6m(~!%2TzLQ~02-!n%`_gj4i1eowM|tX zn@8m=BmlD#0XO?tZls#mRWf?$+0Gc<31I)|eP!ZcS$01%W&vd|SuKV+N8~mcAYzq8 znp(Dl>L|4q+|mvS@B8nuLF1oT>x~|Nu&6xq*;fib>2?Kp?>oyi7kM`QDO`k)RnT*t z2j~MO$v{(2K9woaLn-n5+#WKx#?ge)3)2kka!<3lZsaXQC%Cr-X}`h9W7Mz62HJS9 zGC6q-m}YeQ@H1yQ48Q(7XgNpDY^C2pmE@Juv#Vw77DtkN%cHP8k6*^nLfsfo*bmK5_a*{O%CEy#5{F z=?vy$4ca#}r(lx7ICoMk!%=qkxsT62%X}a zIP08>-Te>;g=DCbc^~p|=2^uSod{LHZ-Q)Z*#a1!fHO5ST!i~G=_d6=PeIHL7y$C! zvF#~eqW;#EfaCNB@f>^iJ)i)KK z(#NSmxNwZ{fNgS{l_K|v-)PVBvm`wM{-WCxGBc;@8#ns5WyCKn@V$hk1f_Q&W#+sN z{5L5PACg}w*eNbITpenAqIzP1AtDhmDE&1v2W_5Carp;)RS(t{;Aj(yK_4`1@YdWh zzK6k19tHXMfM5g-lUEWdfXM}|bFoY$C+V_*2Z3GiNz>^S`UW*&!Py%Gd-cWqLVMWY zT%5FO$SITzK(;phpbZ(x?x{wOne=>kPo5^F1#c|+hXAqFKqabEdmD&(VmDqn<<&rZ zn!@cQ4Bf^HVCvrtxVqW<5a;tzJ;<`Enva;98kEx4-Fqw!0)$?{CsPNoFQqSnO5}_b z{ZjFA`!)(i&TFC&K4Pa$4#5Xm_biTSY{|E8X``JvjGV3yw3Ph`)>NR=^LMRU8xv;~j2Pnk(g zsrzS4>lL~bP(6~m`yXg5`H zEoUKkF(+Xi;B5bR$g9v6+nxc?Kxjw^z&1@NQmy>Fax=Pvo)Upa&!J$Iri*P zYe~3F?=Y>_!5;CkABHjHl0RJ4V+podjvpv`wy><>{{Yyu${;g!I{1H!jYMTEh=0^~ zi%oObvAuN>Nv~sDc~W;K$$t9lo&6oZOQLT<9j`5~Ql0dNY)U_du!$EINhdoN`n6m@ z=t;knTDr^#duH*}kXhdpZbuSX{&JXb$(mW<%4dTt=EtXtIh@s$mO7CQXk*R~tMHnD zn}deW1$r6Ovm`k%gvjkS$FJL*5ao+-vh=%*F=-+Rx!*qjSdL-%p2n@QbI{9!p6h&q ztW)Zb%vQKOI<1gF>X+=#Ry2{-6as9|tyGa_v^O>^xCmyX$#h zmPLUPM()-2Bv)~qP=c9MqX!m}zvT=q)4Ossbr@`G`MjB$)SC+L-((EEXnI^HVLTnC z8MQC1Muor5AkFky&TP!a#W4D@-$j$Afd1?~ON$xHw=`X?O8-$>8-#HfBXe>wpy@MV ze)cf9y438Sy7P8*!Nh~WlkmG<4Aib@MncKBpI0RIO><)qk#}I=r*aQ(0j<~qNN&j^ zdb<=WuMWIizlkp6b>kM?Etlmcq0ctM*}GhLtXeUR(R6c{rHp2C??l0&UR#GPYvy)d z=M4O(ileDv9G60SDlqahFZT4m0R!c7G$MV6A->aDu}8dt^Zozks9YPfbc)i*Qp|(x zin^1r>a)X}I|FN>%OCqpD>Pj?|Dh;1<`N0E#U76UJG42<5Ou43?KjwGe=z5~5lNp> zX^`Q-%@A8#s>LoaG!*1L#b)hxQsnlByz5=uLMVU;*Y4TU=9wi{Y-P#Oq|;Ab9Guky zn>;}na|n*!>Fr>`SnXfVcMH`K_wt|+#{WR6H+v*1w*M}!8d_>6IAT(`=f2@P#e3SF zlPmQobrkk4B^{1rTIZS!ZsWYKik3YV4e>Oe!=UF)_W3Hln`?cwWbMbws765PGU-Ox z!5VHoDXRFfSHR(Ar&_Pkyu1pxDJUlLChxl-7w8NwGkM zcgha+Xs{xq6J8>69ci^q_jqRU;0e|>muIizHd`BlVu^DIIYN?09$q14EQE)UTID`aL8Be6k{9dW zWM@1#nN@d}4g8NjJTro`D{dAlfIM$5Zz0mzoRCkW-{MfahM#w>AdtZxd9*GS3;7iC+vIEv5kR?0~RS zKTrZ-EI+h|(aQC)Y5vlYAMQ>GIMfChrC-f*Wwfazt=GPHRJUaHwh7fv`N`nGbOfW8 z5uSGQ8Fhi+19c^u(f*qUolfuOJsOdJ?-mksh~{AhA7Y_i50NKR8TBTg?dzR6z`7}w z+TcI;t7^DQ4!_iJHoDa4BqYraD@di@na9%cc7suH`Naxb8g?ow6{ec{>%H7>+pk6R zCfccjsOImARt`%1Lml*r3yC&VzLnb>9Wt8i`XyJ=yx5|z%;O+j7>g)lAAdAVOEh+H z&fIpOv7pNL3%&Cx22Ch4&*;xwe=-@Q{dD;&gU?f*^7t3N*5Wyja-AV|%he6--|h~c zwpP(vr^-4=X1m>y&FmDqyJX;hwP*eH0`#(BjZTDXser?Zmm?wOC9-S`b01?76!~ww zT7t}s+v<34m!dIP?Z8YL(sS!BY%Au>kWtxq%A!)%104k;2nvC-*X$g_(~1;#dpXh} z;Q2k@DbuUdEu<3sW8z+kD7_S*UUJ%%eFd|75?hK-3nH-m3&cJwv&^+#=U z1RMcjNKKI^87i@-8HZz`gCEw_{|BN$UA~z^4kt%oe0y}8>V)>HO(b%1c36DyNL3_v z=A`)Wl_x`5+jj@! z->NjCZP;qnp0J9c0I?Z+rPN$QdY~+ibw}2bObb+i}F%a=Cc+WOV_Hkh+L~v5+I5A1G0pT`huP(YNQ*1B^=d_gU1Fybg<;}45z-Ey%tw^@BBilu(i z$b{{<608vD?_0q<5uSPJ3T`4uxb8-ia!A2}WgWf9Iw#}X@6*wWy;h(5+-j0Z9|fKT zS)^px0g?BhdoKP%D|`;Mm77J`vdQK?#Q?1KypZ0HiV}HocHpyG!w|Fo0J+U#Pl<8h%YNF@LtbN>Jnutg+}Mg3fRo~oLxpIHSw zq!sDUdaRPiG;%Ebe51vI;(L3i9d-whKHc_PhUI)(Qg#r`@F|mZ!=%ydo-7F-<3OIr zUHfcxI3tJHBT@@7dO{{V+P zSz{wsTD5u7W|1XVkjAl;?gwGLNW^j9Zp0qIb<$?~96Z)7!Hi{sdBoDqF%3H}s0L*t zKy7uRHKM&p!aO0h(ZUI99D=#%HK<~wr=(hV2*540Oqbq$R^=Zu>aEYP|mN7l=)IP zlB2QMPeSTm$3&6G_R;z1jYDSc0fjy-~n`+z~$w@1?OwR!E-yI#8n#;=^c?-)77PFK`N|ls!W}jRJj$>`zs;ZSjf3YsNROgVvplnqW@b zvmLK<_h33rJkj3jQ)xU=aB(xgUv&EmJoy-F#}&Z~MO{lWAy6Ay2SY=M*xt_jcj{O1 zE5uOvbHh0*c-vo5b1}N6$RS8}*p*{DK9xYNv+}@u`!#I7KLtl4n90a-_3I+bEzT>- zNY1?+ecjHs2kY^!xc>m+t-L-$?lSH#Gi7D7R0`sF%DgIjva0Cp0DF_N2|t^zrV|L- z=d?O6p!zq{#xl6X)&79bi5mgKpt<&CaX(s^>#r+;?uxyGdj&e*ZrU382j{I%gTzZl zOEl-RB1AoCQ~?`z&?UWzAAj3>>H{N)%whaU*%i%X>dR&1Zua=B{ErgcPh;GTjeg&+ z?;CipGNQFfv$-UlS)~BUY4!?7Fdulz?6A{l@Vch4S7~wvawkUM#OJgBmzkH_SpS3(<)Ll?&6A` zA+@A7%TvQFZzFNvlzhmadNIhP6(Iir%6kG2->-ZrmzzxpP#ROsk%o@J?fr(wZ-LXi z$A>d`85%sz86%C6N=&bwpE z`2f{^n=XDo>H`k$$-3^k{vzAjy4Pf3>X%@U2|N`5B>wLI0O!&9IYbvAV!PuW!t1dE zzi-!(AKdjn;ii(sR37F@ByIe0C64#MeX#!BQh1IAGt_9;n#>rIf8|{d=yt$$6;K6y zimC*J9g7Zq@V1q)im>qSx9D$l$!{f9{Tegi+`T~0L!Wx0MQhZZlJRaMNDy4 z(fVq;DWFK^%1`vr{kPMsMa2nP33x1R4QmJp?0Cqm!2ExR{{Tbtu9Dt{38alfbA-l_ z`;SiM{m?}9drKxuWH5#T(6m0aVZV#Lc8+! z_VlR!{a4`c5Ya^;?rQ<+8zgO)dmsI!AJ?vSErhmJRE*hIt)!2Vt9AtO2V`sGx9rEJ zA5_!Tc}tV25%!>sYq9V4_Wu1%MKf7c(~mGB@3=kq{r>==>W2c$G%pPOmD7tTIw0@d zeE$I8>y-)cj~3s_=PH8IFk|0Ge{g@l_UgX^;6RHh8so8}t?2vy-SyNJ^%2)7`YNMq ztpd9M1FpmVzu&5#4CG#QNcQTx1D5xoN54YhW$hD;m}eq3EJAv@Y=Q{(JNE>Cr%9d6 z@)sI;LJg2N$Pe4tjp+X4rdcD&B!H0KXdzetPjB@A?|<~^C1on6Iax%DrxGg@rUSA2 z`*tVfefj{76Cl!5lTk>fMqQw8z@7eI(|s$=n%&>RZJ8E!~BNh-1rN{{Rq;Z(nVb-{Y6*(K)0v z6S~F_T?yZZ^Dz0cLSbf)kZ1Ibs}OlEzChXTJ_k+gOB}9Z9Drr*%zGZ)$Nqh~=l&bC z>2#?icjBOcJ-*+!e&74G@!sS{k{IPyPcWltiS|5BzW)GjmTjUpi^@}G%hT={08btJ z?mhM+_WuCCO-^BBrWAx$Wob0T5Pna8zkhC|GIpVhCh*51tD@}0Td?2TUf$=wL0QQ; zwGRn*2>$>9?V>=?*T=WFMnr_1pqr~$u0`klN~}gV2Sa0e_Z`04{{W|chr0_ch^1n( zD|*J8eqs=PI`-dU4*qr9w^IVl#UoU3RQo7xo?6fY`|JAj>I)ylaiFUj)+-KDAs^uf z+;88w2e-)n;shB+Euw1-(JJ|%O?n5n=4hoL`+HObt zkDt_b>zvx2Y7BKJnv+JQgTH(ci|s`)mHa0b*ze7ZIQ(K|D5nKp)ia+kb9`09es=xZAHhv7?XXHqaLNZvOz{ zA8-%(bmzm@9zQlCJ2YlaDj56h{r>>(`u^QYS+tPYGP0{E9Ekh+zsL{R{+`_)nP_Oi zPTEbyT%EBSUPN!)e}}fa`+rWb@I>KdPnMPG%vHgUu{x4J%l0JiTG1UwXD!VJj+u%v zP|c!yHiDD$(`?R7a&%pw43fhcDx`u59f&^v0NfvKe%(P@%uOUR{X~VrIMa6=PwVme ze{Qmvy4DC6M$)-3ozYWji?Oslf$!-D*n9TIy-GvYidF{YSR<{$TWGC&bfLf@-@dD#(tA?0bR;ApZbecl#0V(YX{| zG?p0PMmBa=pXXm>1NHXTZ6AO0=r698R(N;k1q21*u;W|T>PNWw?fdn+CzZ6Zx1uSd zQOVt--ye;gexJYV(u$VklZk2gxK2TopH{vB?epJ5TJO-zgwP1pZj$$RWT_*TR0xp` z6d*9{KTzJFf8_*gUBbI=(F##m?DsG30Qug&{@>T4-z`k4M=0{08`+6%exqIVcl(X? zqV?K^RLG7&^z!A{b|c^S*Z1hlonWvuC3i9t1(9b&2S>a9ZhP&c@7w(U0L!B&Bgn_k zl0y@05*K8T@SXkl{{Z3cdYHfUcBN&r7y>6Oy7%IL%=hS{cEy-H^I%lrNl)3P@YHBzukMXzS;z>9k}pl=+qIG&KT=gQ=HexCdkH->jg3 zM<$*ydB_R-fO~KKKet|F7j~P$oY zt+MGjuGL~z;UJ9w8*bxYZi%Z96Ff@nrAHm1*dX`W*T+EN9Xz32Ldp-*Ual{-5T0f8hKSxhvG*fH8Zrq#zG{mIJr+ z*IgVq0-cmqkA6;pb^ibbc)G3+Ed@N)MV|+1tk^u&XpvfJs?IsAsmr&U6u>$VLjr(r ztQW(6AjU(AN(9=wmWn?<86;jj*~lRPJ%Isry=!D0cIwB)J`l&&hP8ThVKwc_i&h!O zAtk7fCy?tzfyja1->ikGaJh_Qc&bY<2`R@rf!B2)^n>trWvx8WI9FKuGJ1o#)X~k-V#;G)!5bCWA z0Z8(dPJYPAt##1h@bIr4T2tbth3lE+m{;SjQgI4E?gth4@7t{WJ>a*KBQ5TB z*P?j*9PEJo@cB z0=ng1jP>zl61E)+H>$FB^M7Cg+kBVM-&a__4#k*)=3<1o6u%{?tJI0&UQs_{@@$@9 zDrh5XxIX>5^GTW6G9Jc54a^@dzQ~^#W2;~BE>j~zF4l2BR^MgDTa9_rizu`AVe#mqfeuv2KU?((H<3tBQ2-K*^(=XJd%iIXC-A#?Z+>k<6xhj zpVG`!mX*0!JDBRv3}nVV5rzPPzp?CkYbD>>zy&F)U*9%KaIYnM`5a`m+{ZNr;K-m8 z!TYZMB;KAW0P3 z;V0R)SC944>s(YU*;)-^zJpiUBp zm8wS=23ch&#D(=NTK3+DzfP`2emvuf)T)`W%quF$+UD$gYnNa^){n6r6)L1WgT!$B z6f-V9OQ2u1gSi5l{uZ}UCXp*Im2N%?sDGNpM}GWIN;B6ID$+ux#AZ|I{%V!uz4q7X z){59j7X7f8$R!xM;lGzEvxahXNJj6zBm@2W3WhH!a%e;_nB_@?cUDILE4Ol*`1l7~ z>3F;td6EVB55QilhiTA@tr-i@z{)AJOE5ZM-kmuc*X{mYc|kAcD!t1#Y}aUJMM#_t z6(hg&Bd7CRHRgC`o)H`)(x_0WO?FfIgZlKQPY_qgpQlL{7KKc281KjfvfkZ~>-OvC z>rte_a^4p?>}w6ySNuI7nrZARkuq^QBS8xG-oF0;Jq2gMSgX;Ya*@i;j%EJ<5(Hf+ zA7FlZEwYwoh8U}PO(MRwUt@vRkN*INpzm3#Co^4@AI%{oRpNs~T2KQi@7vt#d+A}K z;GU%o5C|aZ7Kq}_K&v6ETCiD!UsL3Uoly2>*7U&Ze%-qc{d-P$ZDb|4RwA-x-j$(@79Co63C`Roa(UQ~8t?ZhQ3a?z-qaADxfJ zWaj=okg+uEumNsl)3`j-hJfUE$FU^05x(S;MIge`TuhCj442wS^42~Tjf`!%Wst^c z*^L6vUcrr)c*!jvpbhPaW)1QgNgcXT2Z^4Jb{v_=*0UrtBbeh~RAefUgeci8P`;0T z=il3*F**C|JUI1J^!5>)*iT60!TW}C8MI3RpmxyKwa_r;vDEV|ji{2=GEzi|Qmk(b zXL>!k03U#Dd;62`zNa;THnN<&45gMc;;1iLwD)XOhV+9T_eq9DEQf&}Y10ihz#yG} z%16&b<80PjmFZs1%IRs`2_uCiVIqOE$FL-V2AKP@op3SHB&x;gxEGF@5K* zOU55}x-Y}4CUSVk3^FT)v-n+W!|QfKLat*um1%(7n_DO%?$Nq}+#GV)01nzajzab# z-RNe;Qn1z44cTi-9k`HrNWjL$AYLe=i7nYqhatUK>-Im0(314>$d+PuLea=eOAOM9 z9FOpV1b{a5LxG{8$u1AY&HPqee}<@8b@66Qz|kbk(#XO%)gyIZ%&g3hpjA_karV0t zjHg|JrGckT?SyNf-gi3{L-8Q60MOIR+w7fWrmvLo$0m%)llpN>EPSPSs&H;TVkux1 z&X93+(IH%^407 zqE9oPfzeXOzhVS#v&lD;3O7M{o0%ikc{;&V6unYQ(#brLPgxxhuQ@qJ9lLT206X<^ zKN|MN?}t1&S1AT^iamMi#%x8xb!fzfG9(0RZhdlm_V3$ZX>(Zc#uleMdAmFq$rH?U z41Dk;kVN3O{6-wWX7?vs_&sNoyChap1gnvk%${BjVhzDc@l_+2r)Ask?0T0t4S{&; z-3=lPwDrL|#9{Ek5Y=JTOS!aVsJ-L3eCYrUG znT$hGk6#o~uH<_T+vzob5n^FypC-#(REfOS2Tmp$<;%Xg0OL!od!M=K&C3xsN{(7| zMujR?GtVaxBy*~T{$CdL9!n<49-ne|)UI}gtZp*h^~YVEN-%+Hm6w2jRAj5^n?!^p zgV_6%t*@~rp?E_4G!pMMLt)yE|p%A?DzO;wSI{%BZzBVO77uKN+a^ztnJLjGF3 z73#DT%V*5sVgbnRsvBL7!8-kcKYqL>0BGa#k=SpwlWP=12`*Qq=Hpgl6GZHM!!EeD zCSSQb?b6JThhAW_SB0i?ZYt6Zjf7$VQ?JZG=lf`Nl-a7%K^{5n389(eqNxeuT0+}z zw%8u~A8qfVaz1?1p~g$ZIc>)f;$9+xI9z-kxe2lCbba5f?K6^sr4aMp&?XPzI#~Qv zveI;p(<3p0M+{XXW#a4)zQNF4=#YNhh0hAruSB!RWwA}8iDcp%kx{N74&0fUyMgWd z^jtGbYSl{ch}y(9rHQM|TmiDmP9n5FoU1A8BW0PZ{V z%9n+Y(45SWLjqNue3XViRB{h6yYISztqpf1_8ZabW+EE1Jd>r>`fO`UGzDVe69k7q z9qVWhzW4b+9{n=@KQiG=pICyaNNbnS4yfxXb_9NA8;|eY4R#yrDbkpfw#H&yU~G|M zTD%g$kE|=KX(&n9w`e2SUF)|JM-%-=US;n801rmoVO3!g4IXiN-ZGp|ZRl&?Uj1Qf z83kBxOId5!y%cW60>7IujQU6&`zaum*1*|69WA$ZLz%558H}+j7!!#~@%wb>_FT(kYBa<;Pn0EUMRON4eU4?24>_61|^2X9bfb?isD z{(#rVQt>7x4-h2(09RJr9G#tkU>t8+9^?)7J3A+PJpuT;2k~rHKEiqHEN0ZN=6VAs zHsY#S1E3UW0DZUYdqI8`WV3z*vn{D9JNSM@5zPmGGsp z!_xj6X}nJp&x-p8_LwI&n+!k$$B(_$ep?Y%*E&;G2kE3xfjF^^TSIEU01fPDYhys@ z4yI@D{C6f}a)99@pgVPNzOkSI_ap(W_R@fL)sAz=o*QLstVN5ujfG}LT_1g`ZBKF7 zZ(aRgE1nz17|f7Y$=#9Ne88~@I3kB$I?|91kIQdcAd#`v!vTlT6kpD-sZljQ4DH&O zTcugz^H6JTI=OM9R4DB&+%65pTAfXn5u_!g2mS;XE!n9QI6hx{Oo{{V|*4}Se8 zxs$U?EYxwq3%=NawNF>5?0lbo!{?}c%(m=Vd8|w3N6eB*_deSr@A`ao%lp6D*phl^(Uoi5>~qzkqKDj^BeD_1dsFR>V8uY z{+%!8{d&r1BjjF3AUY5mUdMjg>h`!Zx-?wJ16RIH)lY?NGRGRmzGD?d>^lZ+6Tk8u zU*{OlUMcD5h*e?%^{tlxe{=KfNgZ=f4bHI79~pSFHzgYE-Mb(1{{Sw!4dztGLnE&* zrP)rjLRiOqye|b?li_PkjV@y|o-p$NvEOgRZ<>h6b8KpOL8Dhc1;f@}6YZW*29qK#w>ki#O6 zPq`3l-{Zde=+nbP8d3%(Uf`TPjySu_#BMdQFK>`!S5E%`el5N9Z3$3g@X$PsU-RmYyH=blYg!v4dhB?y1N{F0w@_CQ&=nNS z5}e1CXsy5Am++Lu7|iFe0{b*Uj1`Ba?cHj94RW2 z`Fi8sXX}WHBQX3MAAgc0Oq>y`fMNW zI#2j}n34&ZEIR25M94`6bba;kJ~#VyC}w67Hd^lKUBdL0`HK!lopwR_*ZG6`buV^V zD_oO+U2qK_t3dK3cGtP>uA7?^*sB|%CzB3pPiE{kHh%v5={`~=k}!)HK4O##K@Hq* zK>VJ^-{)VyN@HN77gib$R_H-2VVn~Cg61Z#h&JN>#2#c3~tdRdS{DQ`S$k;BHRIwa+1a#Vn*!EG`$A4FUoo|28{B>Q9w<}szN%=<0>{I{_x9m0FXJhp1 zoy8=x$^7Db5>h~R2Oi)a{{TJ4{>S&~mj`&(WPk(Z?@9q3hPT`F?g$^-w@V@uNU}}h z+14)-+5y6{1!e8OPLF@@q5JE7c;c+JeC|%F?XACk??>Ox;oikaDPS|xBGO; zrhdyAAb$HYFxzOIk5Yb~-M8<)j;0)uA!czfx;2-1(PEWRe5nqCfOZQW#C?X(zwOtP zt0XW_43b7r7@J3WW%nN2JO2QuQrym44eK_uh{q+vnT&@6&JMw!W3(ft!QI z7GEuPb~XVY+>h6&-7Ggun<{*6SW5y+EbZuG%1P`%(BJ8?#Qy+(u+CYH>ggqD*d9vU zyQn(Y{^P&v)N;=~I2IO$a#$-XFadSPUl&|00zwg!&x@Pd6LE=fOO0}Gj zyWyl@cdY}X>^3|90OtA+#5bc^9?Wm%v!3IZ!TtM>+rQ6JmM#gb3Ar6arQDt90od>L z*G=NF1?Z5((W-ILEIYW_8Yk!5_vnL5OsyOiH7rctY8hDrCnX(-4clP$(d>HGv&g}g zSbd@g^gHV*BvC0!;asYQW#zU}uYG%K zx48M=w)*sfyhH?5T9{Wa3ebtazd(M@2MRs)ba z1b5K{`)mIIHP$e3LgTVgYbj#0$t`d~CeV@Fu{zj4ZR`8=Ux)3zvFv)+e!TL?k*_5}K1TOCa0v7kh4aMOZz$TmI4sB7QYd;Puou7+8HmsA); zd}$gaovV5W|XgYUAZxA~6U6q!LGRgA+qk&7po3F0^KN4NQol5>#ETZmaAU8a}< z!{8r&+W!Dge%%9d$e>AJ6(#pnI?(UmK>nll>2BZ=3$3}|5Xlt5T^UwFRkX)P`~JO1 z$1_coQ46t2c{HPuD1JBnxBa@&kjYPO!BE9Yu-GGd@7VX~iZMor?HC&%peNiOz1fC~ z?z~R~XOh=#4k|(pL@u}NefsJjg*f=^`1cD*WmucfW@bL6PAkZ<{==w`52-OrWrw3h zHLt(_06lfD!Ax|f##6)#0C$L`NZE(8KFc3}aHynf{%^NWeN%@3fut@U)-eMOfvZ^X zk27KNyN$s^DVoI1Oz2~Zn2`FFz4;9t_hNm=VcV@uD*=?N46jE{{Y0;^tCLJrWm?09Fh4bX6`+; zy%F2Le{Q~D{3#*}5Hx!u`D16;6Zh(8)fHG~(7yNnE?g39U*_`~AshNhn zx{*I(=!n0QUnGIs_v+gVh;iY|`0pSUgBubo@w;ryfP#Nf-G;wTyzpXC%w+7jnIiI} za%FAnQH_=Ze*}B}$ER1X(8tn>HworBl6j1MywElfp8o)F4$oqDvC&hdNM9P9AtIoy?x>meM>|LhO$Hs!sxj>^!j(Np11fz!?ohe z&5ywgPF8jm?8zc-G(?gn8(u-|5mTej)J2D=$OFw>&vkuwFknbLNgc;es`#mCXK}c!gtbFn4OaZsBNBy*Q!zFnmu(jr4@_5J;C||)J<9L<1#jJ)Nu0ob|UkyOkN=+c-y!N$6T~Ykn7X|AP$X}Ia3LW z@jK!1@WR&u+LE-B5R9?1`hgwywJrzlJ~|tv!?q_ZWlz?LIhL~i3mc4PgzLx#>2@V;lp zi3CZK`lpZxiQKG1D3jIhpceFN5-nVWbzl8S~a(@$PHHEN_JO3^W2$~B+~IwbrP z*0QE@(Y?xFX_Uztryd=(YOPz@I*{|bdh5Yeis(id1q8SL)Eu?f_=5Fx=tE}Jf13j z(FY`waZWR60U6nl{OAP-Uvv6(*}>5+WRjfM0MRR%LE&@t^C24`nOO}&SjuQ5$5`DVsnT$q0?O5lbUU|~Y+j-r{MA+;Le~Z7s z>1%jOwJUZWYgVM5IK*Hy+EgTaroDmJTK@o~(-Is3=DcaO#^G|@N8w}7)NCOMC&asVofREWAI4-06w{O{+EVxxE@JegI6e)9Oxz5oRjGA zKTZRoU^|bv?beHU+|`_>k803U9;UNYT^vxT?e?0V9b)+yRLVOa73 zS%-a=LO}=Z(3zXQFchp&G@+KPZKGl+$SA>x8zB09y}w?$V*`XP)Q>L&uEv>l<#QRR z=PNkjkI26XIL#2QdB#1!Bl;fWw^}l@+OJ|;&E`hw{{YZsAPDvEukz};XyKtW6WMHx zrS|W}+a31T7Wwz@`g8@@JUepwCHSm`DauI_$qk}EDc<&X);|MArI_zu@e!iJAQjbb zLkE)twdavWK1O36fs^wGiB*0Sw2q+v0PO><85q1x6p%?n7kDW2A!_cc%0NpUkO2c+ zH}T)^(-s$TvD>`W6@IF$6UQ1ca};DqRov)rO5_j+VxKzb87|vtUY%=}Yr%i{3PU*& zopEkwdhft6KHJ~lu4w_*X@XZ|QqI{nB_W5!F{PkDm|jc zPo+Zf+Y$)k8<)2FX7SL$*r%VC#f#+Hk~+XsaR6RmBt`jp*m2vx6B_LCE!$yXJQG(9 zmB~vcCXl;o>5*2dko9UzyrGPyz}vG2cUAS5jDA%F)Z4LMpPg>~YIL6+Sz`h?xWvFG z63zhhdUe@$Hh(Vqrn6`Fn1qYQ0}75=&}K3MsW zA*PMN3IPX!bpdz1j^ulFrn4;aSA!6%XxV=r5hcm(Y%G4kIfKvk5f*8MVU@7#g&->JxL z&4smM${WQ64WdB<-(c&x?m_8JYQ_krI-K`%H!--EH$JK`BSMTyp+HX9 zyo$`H)z}gTt3baD^pUy4gLs;wzD3(iOzo)p_Rg7ma9rr5AtP+rjjbv z#BD1x0>LCevvP2*G@k9UNz#tA(8GUZkQhg2$9x_W~5wQJFS$4+&@WH z+(LOV%TBw-8GtRr%K(A7yK7y~9qE{BzEyJhDX6r0x(RBvxm2}flr(esY{*sBH|i{6 z-rJD`@AGS^JkDDc6s?xY;aVD3Ol>sLG9RUjkyew6u#Gz@EwTrreaRhBTgu?#d%5Va z_~?PQwsM7L@;2eWQoPqS8hA2nJar$*L zEqSx}sc2QnJTcF0Br;Awo#I6+wT(|EVpM^@eR6K8v8g2teMvFx1hIvVHikJ7WOQE6 zAO2?jM$U%)heb`L&ygfp`KqY2x~C|fy4?_J#?)?H#d$?4O8oq!DI#(u5FTHY5_EO# zwI4kZ#wh4587B|*6OK%!fspb8u|H$}d;9bx7oQUgS*dB2=Ze5uk^=Hc8Foby5Agyx z@){$s`){u#%U7e1ju=^%wdn)`I}@o|Hax=2fCHj|ROsu!Js)n1FbR^!`U)PgnG0yox*;@)3U%^5m4& zjZBBCWi9#-N{ zqt;b94gUa(x3^BB*3%$^BU8h#%4nn-NSTY6_SeVXRpfQ5Dq6Dzs9GmSnI$aC8gYIg zG1n%3L$?(J`Da9rb$%*LWe*Sk02HrN9`aVLG}BzMkUWICB}`SwRg8`7jT7(O>~!#_ z)4@2cEJV~+Gl?SPlOg~;3=Xv01TX9Hu9sHT>KPghPD35|E2NRB05QEx0A9eB@5pEXHSh;dyiNE^_@Ch%fV1Hk4tEhw z(^snEL`zT-HkGAW7y>x^I67qvSZok}Yy3fr#@-gcE;~PqxkUkKAz2@=Q0#|9Y%+!# z2bXXG`Put#rsJuac>&i?HT3wZl(8kXshw8q8QG<2V}@tq{+0J9zt6!Z{{TDnzJ_L2 zM2bncuE!Dy9^U%z-?z{C_0FTs8Rl6enlK3C=#&ma>~wec{r*Ae)r^|MFFGX8S+vXK z%BAD`uOYHW=zDcxsv1Bk0G(BppTn!ZJDRppCxXQ|2@*ti$-T=ERDcg{5!jD^Zmh?I zG2R};{Vr$2SQV`-0V0znL`f&N6*0)Hk|PelfZEsu`;NIz6Y(DI6}TfZaU18O8!P_+ z%n18-`w#Nf=Z1d~)5O^e7V}pbnkZHnAr@RDfEI52e=jB~G*z|^i5dV>Yq6B%uQi6N zgHu<1g=#K|hh z7!jt_$ln1^A9M0O&wjYiX{`LfFQdY}xO#bIQ}~1M=6sLg7CZ4n2MV~y%{^SBBl9b( zawJph+ySIU`2PTgPP_HU^T=jqoFM*g$zD3*``I7ndj5oevnPe|o*(=kt>K>;W7+FT zhO=ujYR9*|4D8d*R%n=kCwEBWJ&z?}+z#I$f3yeTWZ#8eILP>N2DL6E%Z! z-=$buu zKa{TOImwK;i_7ig$VR_%zTdZ8d|YAB=cDOZ%0F4 zKm7FX%@HS*NIHr|DhSg0IR5~q`jn?ISB=9R@dL2_{XT+Us0Jn4D=PuM{^4~y8HTcH~#>;_v;KY81OZwjDW~wDgZvcO3YjO zY=iwi{V$^{%T`tnP)v}lS_9TwN`IgXf2lunrWGN0@D%E%);12TLGIwlcHaL0Iy&ll zmuz*S>C)DDCb>29ZOZD0ZmD2=kR*(-2fzH$f!54!LzJw2zn0Ftz4Uwk0Mz^RB(~!c z&h0WZkH8>8cX5 z?eD$!-iQ4^&~)1eG~tyaU3s8y`M={D_8RU^-?zVgbu9(as5YuXCis*2hwJz4)9h5y z!y*kSRb%QWuu?l7-o$o0>KD_>2vt*jnO!0ab**8ch=LtG$GKHx*2mw!xIeGYT=x>w z(#_7$q>L6{=7j(qwzOCK4*mZCr&Thz5xXLVeqLJ;-Tn8o`~Cj?C`cAOzn>?R5?K|c zRXxIxM=gKK$G6XNH`L5*NE@nLK>>8EE={iEM3J*PgypUJzB@1*KYyOx6OfdcBa$P< zUG`hsThKfEbu*5dOiz_&GF2qvSTWEb`-Ao%pKh}89$pye{X`J77C9(kzGzsmP<8LE zkWR+C{->$j05(R{o?Etlv_}&%c__sQ8y;Y9QvU$?+HcsC(>VM<^V52gn8G@OpnIbb zb_TogJiB~rW8fZ8DfnFuR3;`!!n0EF%J#NQDlf043A}q@r1|5g$ ztOA{lpO0_MItR8}DXlj~-7x$_iq&}*)G7pk z`H}DS-#+8me0{t9k5U->^Q;CrY%;2A@}6GGdw+E!<6vm@*TT`wL=d2={H{mXjng3! zC0;2RiBgj(BpY%x@$1;{ z+gklV{^0y{6xSU|Gfq?aheUP0&0OzM%MaZ5846B#3$YK{zx zt)3+NoqOnSe-mhe!Cj}-Ex|@_}h@kHY8ppzcz>iN8i7< z-?;sI^vbogvpjsG)5Hgr>tVOo9g=igx5s~;wM$m@|k$&P-~I|V7XoT zO8oxAvv&P@OCEaVX^_dwi!kJ@bj0Xv5%<@}OCZVzi8#0=mqZV6+t}A{*Zcf?^aZQp zSCSSBq~hJRy)f;)eY%#O8!vQ)M`$$#SqzD;5WJ+hMgS4NZu;N$-$PT$+7^7s9hj-w zt2Xq$=U;K7?fM>(!D2b-NoGl$`9u#dN58=x{@VwpG_opuzC0CU(vBn&e#9TR{Rg(U z(v27eFgt>2D0X&tjbj@hBSi5XyZ*huuR_(fYeXfBYLc<^mO2~vAM5?P&h7Xez-}Nq zFJ}1~_Sf_yzkRNc-=xw{Kl0iFO6;$dE9`!4as>N#8rSd8GM%oAHSDY)$sy;;MrLEc zy6ie^9gUA~{yuufOK?H8lh#JK0r12Azo`EJJ@r18nzJJ}q@4n*y_MdAk_UZg=>CVS zr83#Ltt5o1vjTUpdv`nihxh45o$iiIDmd~`iTKIEyuu1QC|=w4KfZ|TMfrk9dmr@c*repiIG!{_VH>gRIGt&?*aO@SrlLubHnJ8n6p{k~J~T9Y z{QMu+tuw9rGp43wSycL4+gjGQ>_Hv>0H<3{j&0K3=(@UV+-4I>={n4sa5f5&pxf8a zeg6QT$6D`HuN`S2krKnCzGAz!g7zC;{{VlUkYcNuS>kM&LOl#mW@G(F_B}nS!p(T# zZy>VrSlNhCzFHeU+-Uaob&zSBEIr{Xbt_^?CXNWXlBuL^awHDnNj~}=fj#}V?0U{r z^Y&Ydc|{oMNZ-c(>+kcf$%8hGEEaL9HEq_3`uA{k!e2eIk}xMp-=2 zyj98)SZD#VJD-l@<40coV}#r$G{O$`xm?KQ3GSi!9{rAq_}_hRUG@jxx>B(dW@u6Q z793aeZHOD}eUERq!07y*O0-Dmu)82de&bz;(=Ptw{J|X}xq53gr;U)g^9o4Lr1^?< z#QnkT)f5^^%A_=NUghL`c&PHn9J9XGDA=}?1KHburn_%@==?55@tbdQJu038I?4}! zx%nH{Q&}l(O>PRR=@_pD+fs7wN&d$}+xF?E9n4XZSs#he44d!o@4wrtT}%ea)pN~R zbSq02u1O4Xo6;R_jc z1CGci`gwUpbE4!|mV{BM0oS!;yw{%V@WVbj+ue=)$&iZiWu`8~(^^)r#n ziQ<)+*HN%7q5;tw+1IznLu9h@I|%Ez2zx2sj=ui@`To5kX=JY(M&6>pDBrQ~_Z?+A z8vJWuQ*lUjj(=5JMGtX z@SSmA!1ydJa|BYZHmPQquQZYW0GRE>_s}ojw)-C4`kxh5?K0h0$o~M#s+u?!F}jz0 z1NTqq2Cc}K`i#$=MBkE=1!?`DCTN@|Xbh>J{qmBj3?oK{;T2rIP+u{yBg&;Bnf?!f%^1orDZt|9UG)t>C{8bM$@rE7&y;5Ly6 z>^9xT$07mW&q||B#+WCa*XGRvCC5oUb5Z^_CQk?Dsy!hRNlsXV#Rw89*RdY@9^auq z9els|QGO{e)2t6;BxvY8^~iPfSB!i>_xvG#)INCUj3>%@LhxV+?4Km;_Z_=+^1tC^ z?>>JY6e_CkVllEky+gkGy9#sBdDutYeaHMxqn$~_o*xVD=xgAsV`irm3@pzPt|Y9f z=E5X9-kA3%TJ}27=vJqQnyim*n6Vny75JAc$$1|89>-++@87KuGc z`n!VN`)lpco+p|qvWs8pC3|^QHY~#uljQRuf|)jZ1OsFC9Tt}2G&nV)WU(w_ILuA! zP4Q+r@1lK&&wjAgaV4Z#V{;0Q&Ee{Rv5o!-4ceH=B8c38C8IIe@`&1^N3a(}>#^C_ zzPjf59|_HB%#y#-*~~8Ghw81yOt1d{(rMJ5&ZB%(dPib+*IY2c&xF%Q9fxr84giS0 z0c1YyquD|2u9IYNbu#y6wMh@si1uttU(53(-n;Q9=j?yiq<=WE*3(7B#NuC^z~nh^ z>%^`tiu^jxR?OeMnV#K>2CSuIk%XR8Nyrjedj|w)pM@h`x=WJ8+f!!k`f&z|idT^p zloldgF(-Q7iu?EMdhT9T8n^t|t^_>USVgc|zb|joW9Q$jpv2-!*;avXJf*p-gRZ?GiCN!qtWj|TXnl$Q08%^k)$=|ZuLY`)e;p(qUwEdm1ERLieR3N1KHU)>I~8iX z%pcB%6T!ldOF4aOUm)v!zv+g4{{T+Cig+r6N#a(nN0%f@NaxG3J08cd{@q`L`pfA!Nw7r9@!cz-T=A_~>^2q$ zjjNY+c?V@(_fP=rIG)6hjdY*HxuGQqpi2l$Az64y-IyH%zkP3i<<$KeD_ZQQu zDc#)OLRf3G$jS9-!1310p)KwEZ~OGe5q{n}9(#01PnL=0sA&oASf7Ofhkm9p68V$6 zOBh*VC6yF!l7;S6{m%V|zWN&7?3A|o2&G#21X$xnaHJq$8FjwLTK)CcEx4RS_SuhL zwcz(gY3uz?>)ps3GZ|I5yu9NKb=dZAe~#S0UYoayYty4&8c}Wu8MU#YGZWbUzo%0& zPbtnlSnNd7T9hg}w`pJm>M(!yb<8EWgsM&t=%#D`zpAN=pqdzGci8ct~`fRQ_op(Fz2aPYIUTqAfB{!%Qb+=@wXOVhjFqA*FPzM1@+Uqw59Pr zA}X1i)7{A9mvby7qlWYvFRlxqU04poMYXfsXoV;)0P4H6oMdJ)QEIkf&ztc705D5b zSOLRvMhv54)!DczfCDbb9^2?CB&$ANcaFR(lgU~7>oLV$)m4@ti~uabH^~9fI$^MO zdRJPtyAKe1$%~ZK`gNH=kjWP?k(Y~nS%Mz@hh}d5V^72lh@J_hWEoklH`ECIUV6p( zIB0LmZM1-PVejyCy0wUNgxXTEH8Q;HCy!T5v(~fNptoBaHH%edIWdx@WhCJtV1_9a zs<##=*1f|OC%3pyZx?SVlZzt-h+Rjb8uNcG8bK!+bbFnCQV+g|>Il=~TiM}d@$unl z-)W(g%N{P%$ty-(c}d1)*JdPk(cb&_0Be+9##XJAo!+)b4_R(Xx8Kqkm@fE$+B+(w zp2Px2u{}jurqQD3)j6AafR=cG=Df$r=BdS6Hh8KDBY#UWCe?r&C*Rooci26;YxsFy z!uWn1sHC>;WM_KO+j2t0j^P$9GZ6bO+UzyQ=Y21d&MJSasges$Z>E!+f<8uB<$V$e z zOK-=bijiqK+jvxY3RwY+N{B04n#Cw7m|7JnA{J-!5s-j5d53S2M4ulm%ze7)DmsRhA{+vv@$(NLz>q|pj{7;*wogcYrqnp7Y1+r+Y};yR!o&{Z zSmROvMd7f)l>_nYJ8YiEX4R?maa8)Odc)_awIaVQqG>qtC)vGZkUNzG5u!R(ABl2R z^FeXxD_5;8u_=iYfPo0+unm1Cr~jdfzICm@al@{A!3zFW~=fvsNtBu|5kT56_Gr=DfTEDoMxa9xl1T!Sb& zQpa3?9rf0lSvXm3;-jvLm*z6sbRt9&Ym)$xjwE`Rk-ZN80B!Wj#yZ`ZY502>m0^xp zQdvt|0yLH-JYy%8ke#Vr_uGN7zJ=ZKm7>7M`j$K3H-fLlX8b=wkjGhqJ2GF|eJM^w zSriUnjRD7=`FoxA?V-{BY&fA(%oz*PBl&F=G~=qr*Ts?85~;;Yki-puc6ZdKDAuo- zYtm4bIT|4>Q?rxw@woFsa$&?fsM!n4N>pt}ZntJF%XS&XMy%DZOtHl*_CVv!y$i_f zod9+9_Ev;Xk%li2R=>_kY)SmqkQoGWvDPS5SECkp!N_f%YMf zw?X7`jgXG5i=;;xSYfi(X2@8>eKbYO+Hze-1 zqLjnVAx+p1Gn}><&Oq!_pFeZ?xQ%EH^$%L!N-7aeFNd=d1&8Tn(+L-WEJ@sClttpe zlkmy&zPHbqwMPq$sIxECIEpwwnY1Dw%-Uh(Sfvmiy2Nyso!$(VKpLa5>a^&NYHdZf4FT*f-Ndy<*> zJE?`(0V{-z_CdYz0E6G%*Wewxl~%kJ*u&PQa{;7o7VNZ(X{p7H=Q6~y+VZ6u=HY@c zBz6VNdOS~caz|d`4Txs7PF@W@o>wSOmh$ZJZSO#HJ%RW8cO4_Am2sVi%rZ+cpz)Jc z47^fACz_GHl_h~ahJXX$nu`*)0~uQjjm8VsFWinn_P5R978IH$F zh{;_dYckEnIY```MnKzYKDUe=`b!Sl-%=lmABGfsYf0Y^Ja%Q39#|{P!DfxqipeUn zg1h7gIJOD%HUT>Zn(jqtL3#|esAq-cia2H@_5F*-uSL0pYJ6K`t8hHx+vuj*sdrG?h`Hi4LQ73`cg@Ro~_;zR8%UTHMFoXA&+B!RL!5!=7H@4mj!U-nD~LEr70Y#D)3bRt9$RWd#UQbz<=z~_&4CM7k?)enx0jHOEgXE z5!$x_^2@I^lq_oM0WJKn0MR4Zbo!rIZcNRd9UhAHDzu#>4LrTq7W^{*0A=3*$AptN z_=3||`l!-c#$;wn;E3N$jOeC9&IXx);z|Din_U;fU+nYvH{kr7UM#`*Dt5{xq(~#5 zjF489wjl_TNZr~+;kE;V6&vEf_17}=sZ+(?$W~hHJjAmVhPaXt<%x7cssx3dI=^&C(%FgJ#56ka{Kq!vVpxHpRbRvQmd(#oUY(bhtMf% z_MNe7-4o-dU2sW)rEdXFw9`c;fpYYC+lvpD8mqZe!IPGibj)|(@}FXR&;C1p7q{YP zg6`D4ZlhakvCD?F1d7NWSfpj0iDAYG4x?>s={bMMI=Z?kMPf{inovOv%8LmTzni7^ z^)A$aSnf?7>2!7nSS@^he;I15@YIMdELN*}I81ZIq^U;;STCiSO0VVvup@mPJe0sK zbZ}2)GT-QO#1ZfEzm=XQ!Q`-ci&!k>DKYoyOwd?{Pjq4Lzx4;?5z^15vrblmc!*>q zF#SQ%9esiR*1v^AC3g|xs<^hLVp7~j8NE9WWN{v93j z6Nzh+6@??YWzZ*I9-b%q9=?MKi*6xBoXC;o906|cDAF=xm zx`&75OJHeE03wA;(&h5}_ACxVG2aj^6wIy6REMFz}XMWk3Ss ztc@r=w;5e;_dQ9DXZ?&ENc%4D)82bG9!rD8!X`!0)`F5x&WFEFD$c3pup`pS{{XlL zM1Nv>8cGf%aEaSxhxO}7o9S$MQgQ$u+V|J}`uaxz@V;@S)ZKMOMne^}S7c={ykxI( zp-*ie-~A6&_H8fHkc4fLFo1!z9f$+<(f)*=u<9om##>1ug_X|uGwgRgyDzyr-?!hU z_bkql^DIL*yLKJ5pbzSQ%UwrSoWkqStqmy4*L>>#08w8<%l`lfZ)5B~eGl~O2`70g z33;8O4&-(^Bj5Dx(zxs&DU|7z_6Hu`+R+3506TR@ja+nf7CCtq+U)*^+;ufVDTPfk zYAk7AQt-oRkAFiSxBhnh_BwM;_(Y6FDR&zV0q@7yf%@xu>qv(k-QZLb1H?BSYeQrD zogR=^hTIC4;G33<30Hq4kXQV=pEGKS(NW%}M(XAa<+{x)d9rYk8bZLG4G!Rawn_fr zbwv(9T#gChF{I)-BrX_oP& z>T128WRbD1-{1Gx*GcS)woetKRoI};kEaV_9#NJ;6zjNjFRlKEKoRo2Ph_(#VPdAg zlg?xw{*ue?Kd9IC`*j~5iLY9TV>@`(c*?07A~jx4SMAsU2kLq&!?xQX^2<2i9y2H; zlicr*>-l<;t{*APtD{snLa%g+KM&QZUKI1ufo>``;kTy!PV_%bsOSdC${<-HkP8t7 z1d-g4+4AOr&IF)j}{r&yF@N1}^)`Y;~miWF4HXv7U z(h(6=ADEu!eZJ#=zkU35&0tA+myOJ-i?>ug$sP7KJ9`7|*z6yEyIxNga#-n19G*yA z%ft`Uxd3+{>st36k6f=a3{-B*3{nWEglMjc?Hkkx14XoZAAN7GkJGByu}6v#4oV{n zIM^&d{LoZ}R(rTlwp(IIeq)*({9tb1`o% z+k4mtN3qt&z&*$5{WsS3d0rnVL{>xqCXb{LJD&dD-{<|9O-?B;M+kx0Sr`x(M1ni( z+qi9zJO2Qlx*1YuuCU6o@;-#^t*F`iZ?|#>>PNP^EZMGx2-OuI6VK{cva2fWf)5?? zJ0H~ky?MvU=3O=Ascn@TJ@ih;b{^XQ06ws!wfw;p{$NQoyqIz*Pi^-72|qs_G`C3B zh#8_2qDn#Hy5$)fC3o-af4cQE?C4Z6($)Heb|gs3XE z`HL&Jl*sF#hh3PT)9yXSk4W+IPc(}JCSNEyjg@;HpM8IDdLvRpBgJ7$AWafZ%4m+` ze;)nEw^KcYl~Z42W$UV?V-c2frjVqa!Q?}4x3DAp_t7+GSrK9rm1fiP_c|o@KK{%; zJMXD^U5_M=Nm%j#op&dEMu*P(f1g{aU9lq_ar1j{QQVMD_P?&dKYxzAaPrs-!mtjy zt%1j!tpv}w$RswHs{&7N>A(8*<&2eqKQ@>TS@O`KP^Z3z`}RHm0MDtpvT_Mb^<-9Z zeZ%|wkMFH-_19bAwJfpEGDI1Ks#HwHSpnA1u??fY@B8$JVa}yERp6aXX1-pOl;3lt zgHk53I;oqngU zQ)8TNiS7xAvv~giF(1r{$&Hyn0QLt&e#86p6*zA^`zTmNr3+VD}{s_Da3*h_XocBqC0Q)Jx`#tHua;_ z1RXj{{{Re=y>Tb7-u2k29WjR0*{Znm(Wizop!Wtv2VUdYfPKff`|GTz!Y+oJwoG!l zbwmy#{&x>CWI#&sKI3D)wX!|_-#ydVC?xZlq~{K#EC~cgPPd>pt$=%b5x&1$ntG+@ zZ#f|&n=HIZQ@-a~*1MhPXy~7j*1EI9EW$}jfQk=dK~g*Dfvto09UwO;ke@Ok6O^$n zFZY{VixC?T=VyPEfI$0q{l5C?btuiE=@4L!{8=0Z05`Ee=kcz?=da!&vol(aM))sn zjr(ukUu_lZJSr78CovB)yiUW^IQ~*c*y#TNUvKNvX$-B*hM~-1Dzg+PFD&KTiR^?D zJN2%)m+DF(4vu@N-og9t->+v$Ac`j?y#yfuYfJJ^Z`be8GUT9w-}}4oiPI7{`+Ie5 z7MF$1#1Pjemf^^*(m)b7I^O>PU%5X04K;|Xp@}8s#B7}tv-kUcoiv7O_aZZxXPQnt zwnz#G_3!V}JGP^F)#a`cUQLx(_8!N%AJlXZW)TCGvnXk-x{X2y6RtZu8vQ;xF9nRd zleOi=845~(-re=_(+nqu26E4#`R#T&0=!T*y~od8FB9QNY6M7OBZBcAyX#*+uekks zT}qJHZm8MbbAaHfB@tU`;WiJKurvl zWoqCRbSiX5U^F{}{dV zyLZOEABP=)k25WM-h7m1jJiyz$@L=qrhz1VfgN=0%UaE6i)N#$_RFuBnE6i;Cdd%P zpnoX<(yH%34{qn7sK&k7oC&)45BZae>rtc*fS*&pgd)5%mYWL&%*IkeW%?s=rYLIHP!r;Q>#owu&w$zxh4L3C`{!)H+_ffr{Q6r(NWv9wAt^4!t zNLaj(Mdw0dgz#a#4e<1WzbB8u>8@f+RB|%GhPssJu4IL71}e!DF$4esAd$XW?hf>N zaSdfWpxBsS9w-c^rh8~S58ZNa#cY+lFMzW1WGH$kj%ropRt6_%4+##(_|=F9Uc~(O z>*sI4nCsf{t}-h?EKxt3GoNKZJGQ@WyI=cYrWeD$7O#!O2~f32xkR#$E+=!z^cO>I z>1HHsvmN!uKiRT{i{2o&d5kL^YOV>Co;}BZZvNd@LX&7=DnFX~{{W;kU@(t+j}m-6u*p67Cl!A{Mu=PIs zYg_Dn_4@V8Wm5Ak5wiKt-?V(2b#>(kPuB7>&cr}D2|Xvc8YF(e9;k6MKv^TmP8DL2 zWN^n^LX9r9-??r708XbZv~7^NlA#z|83qA|o5>+hWmQl#pbc-n{rcNw>QF?-eK(hR z*$kzeksN*52EBtey^r-C-4LkW@H;GRah$o@ZK`#af+=m~@8hjBGR(0_Uy^_}qz&-{ zvi9HKTj^pw^@S~3nY^`?Vj%59Yu~W@{$a2WS>-HCW@xM9>q+IqsbM(RE)tOj#~vJcNzjkQb5t~+ymd|w^0XQDQe?ab80-fqy0fok;Oh)WAgt1ndcu$ zFXaJ&9^XCp-|l)c8&#=DEPZmLi8&Hq*Q48gy^rjFoOW{H~z!Z`_VwuR_J)_N$D7I8193SXrYDvL$p@?lgN3Uq`ME zohaU0S5{>c378#`A>r-6aG>kHx4yqm^!W%yiwi0{Gjk~;9ce&Nsx&p*+xO|?G#5SH z;+a!3-ft1gHhW?NERhwE2GEX0_F?z!t$$vZ&{-`rqO&x!kP~|C#ftX+zWr$*yiIuFgzmyhx@O`><$s>U{{WX-@^IoH zv-Pvvu^g-eBYtfr`?tqq-}`h6ScfGP-%T`)^osgjX`F178b2Ecs!dWsVA&PHAZ~`m zPiCzbQ0wMN$b$WhYCXt5u^-!_aoG9teD1{yO^^(_x}3Q$ z)flXqWQxJs%L!Ah0ypuoy#cPDc#6v7JSd(Nr(uN1(d2S+sqE?)tZ?t=j=?{!x@9Vn zcN+~ncKo5tN0M8Mo~(D{l1P~Y+(wPr_^f0${{YPNMpfxeG}499iomqKU)(tZ{Etd2 zg||g%M$wmoaVkl&9H#qr{m(#Vyh8+tD^*1k5>Wy7Z65m{ze4BBpxhtQ9?}FWD|M&I z2&mad25^u_{{WPgUf}EY?Y@a|QU*30#~&bG6j88LFj6)5*T47aE(Xo`tCU*^(wZ{5 z5RJ5xu{$T@K=jq}mHfX<#?*+Q@-@ork=;-2?fo~=2M|eAvg0vGs$}uL3X)p4$T!QKO9eSf*u^m&*dmE0PSZd*?l~vL|uM)~Rwu2Va9xKQW!~yTU zk-ml4NfeTK(a{{Re3#PZ~;C5r~UFeC;ESWm?O^sz1J_uw_MKy7GyPQjeB ze+ys9a}@aXhB@Lb&4RE4%x*(!+av&f3X{F{O&&=9f>u9X z*f!aTZF>%p$kngWpPhzjY}ST5B)}t7Uphu^5fj;ps)6mle!%Ocvr@##4S|=PWR$s) zl#sD1%Gz~xZSrR=%RqmJk7MfT?}l(#%nbPnqP;FdA0%oNtPQcZC3Ld`-;g@j!k%4- z9Wnq0P4-B9G!RUnb67a3P`h?!5m|H9{38d(!F76Rk1a*YS$?xvRR{#M8=+%LI@*6k8P8ndf4w4lQGX! zRNlt|-W`9aE15fqiIT53dlA-xYR09}KC7X2b!S!zScuEUHUVJX`v48#Ygt(tatY!I ztkae$B9gl?mI(?cDdA^2D%|vYo;pSz$3%(o&`@NdiL>;Vn;x;Fyv079<9SNh=O-7~ zVq1d^9Vi>w>wTN}2q;3awOmzGVIh?&0bV%dBo-n-JpM4LVbh{OS%11`tUQEA-?beD& z>_=hpaFnem1cl%j^I*QM$IX3@eQ%~O;riAwGF`Wttu&dbcjXHT5g>^gCz0K?z!@EY z8ygA9Cz9EEsDSh=PPH!p>jbIhhH+L| zOGLm(z^ey33WTw^b_3a%zt6?bqBhb52Axk83iDI2l}t;Hu8jbWN!~e4j!&=_HCEu! zvw14az#T8|)dpJ}llZ-{xO@Kq%EkGG<+2_s^y7r_GZKZEM8yIIKBi)eqE5O?E*_Pv zwR~1rE-Yqco;d5{t3e_NCM2TAH0!8KERnEaqif~1*q={F=du+pU3^!CtVChRPxZ@S z3js2gULb-^4Ua=05hL&l31T@hJvXvz84lZ`KR!u7Kr)zDwe)K!@A0Xmdlsgy(nk!2z|tMb5D}IH4%%R&^8lo%?X$k5>fOcHjpR4% ziE7V1OES`dB#b0b25AtMRVR^-G42T@uWfW>HnVWstp)}bveQHFC1VuGtfiJ#CAJsP zHo=%NIzHg_BQWo$B<{S|J-1d^T=Y~VuX7<@jFH0_i+lcZabo2cKoCl*sMo(OZ0z-T z^{m+AM&zTEM~Mk9c-_5HF8!O|fjZF81K9P-@LxqenDZ9qf@v`TWaNe!SBZGs0Shk@ z0C`$IDhc%O*bb{PxjCVX(DNE%o@+=84s#HMu`Dz}BsX!c%r(|XYe;tg01C=g!(pz| zWlcvLA;_D#OOJ}KCW+uvDQV#*PUPru4a9pM`eE2_t#vXos$D7ska=wpRxB1Ry0GHT zHq5+0^r#vn^6lRowsQVLox7Il)RmrBm=KE_7L(Sly}?gcu=da!-nzx6rw#b!f@hU1 zD#XHNRI$&Ds5!9MJ9YpapO0>`qr~?*+n^OSE3jIa^W)^Uk|7IWELf8sS>Q0U4jfUG zlKg(e_F=9{NzmCH2KH@YvQTC5%LH{|CSh45RT4*_Sy{c&f`yQs?^^!=3F{1X`BCzi z_-2lHtJsy99eDFn!0|kfZLY_@yAP4o4r2qEi^DN}J}Vs2m3Q+Z$kdi+hkzVWL-S+{ z81(_KPxkdPO+3wQ+slI5Ra|b(xLy>+zY<`nWU5q!FY&c$U~2#ukNyWy{d6h z-^>{7xV+K zZRImuYQ>DnQ_0id97eNkejt(eBAgL4QxqTYQwZ6dGH zO3mm%8FW#&07m%@D-8fgLeKbZgtcbP>Ne}OGa*|ZMT03x5QpYi{!*qc%h^jE`nzg( zF~O5c^Up)UDUN+D3C`y#iLllYDCu!kP9~X zHuv`II^TxLSHk6K!5Yuy`BGP~$s+)$8C(>Q0j+s#o%@2rUAqjiDI}R1xD0f(D48Ob zA2LQzBY@0bn3T7uMio^-Z3Wmp_12rx&q5)IneO5)JOoc(?R8@vp&(-Mzqn68Iv(V= zbMiTe0pIx_JUu_fMJnzfkS!ywp(p%y!o`*FhFVI>wjQI`&eigwnK;E6bd^7v19pl> z_}9L-uD&Ai1T}o?6O~yObZqM#9(H+Ob{yN24@Slxs_;NDyR)Wm5 z>5C9&uQl`^L&gfseJ%z^f!E*Vb9`DSn)DyJt09bUw$vT3{W|PeigM)qIgzdcubEC# zsyk~@_XF-bd-ca}qhqtRXL#5xxV*PN21IQi{QWxaABRlVDg>3eE62q_2frc5{->+{ zsVxksID^-k`cJ1=xrf1U+uDlZLWMYy$K-pSr}9Q-kUs+5_D}28w~eu{M)Z-k_C1Lo zJO2Ri^oUoEWDbLthW36({(q-ktUX{_9J#ov2Qg|{kGj=4BND3M@^7C10Jm~FfAvyj zQ_O~BCyuxzVL?8`@dNGi?ccEZ=>@nvkQK*f2V>+OiNM~BKhQcAG83sbNLBYBDEs?+ z^-=FOLY9;AJ0p#j(9K?E4ts;|zMVsIG>KJ16UTcezy5#o(3j`{JoCQ5=zD*CeE!?$ zD6A}O%2SbU+`hwo`*kHo3V~lu=8=M$E9qy=m6m7N_8y+c zL)f3Uzf#WRu$!<}KF8yySnS1m;78_g453flp8fv-vFcj1j$Y_hQLG7v6jf*dw>5iY zw0j-T+y4MQM@}KZBE5EkUob}Og$wP8029Ce06282(bFW+FwtENciz3X{X2C-h?q}s z2VN+=8K2ya+VB0oI+m-QOlDTn4ROj)bn6v11fVqGH5xaw9k*}Sxjj6`RaA~?;37ni zMlc6(gl~x-pmo>(0DGr73RUc5mgO~+$s9qOh&)b?{{Vks@BaX_be@fdl zwPpoJ{{VClpRcQ_1%BD2s@lwi1P*TG)>%=d?HUE-r?;nn>JIyS^fgE$nlK&RrHm8| z4vzKM{=XmR)~hJc%7)|b%ur7I9^?H$>4h58JaNff4#&S1_8@=wH~S8}A*`||8FHiW z^p0bR$T7%NY+-|((f-Xzy61}=`$R0;B z2?x0hJzT#q?hkYNefs($*cM!J;7T(qlA0*Lok!0L11p9ephIjDclrU#?bnHxq*_TV zV#-9v*?%cz9f|Bn?oRt}N9SE?vtx`1;FsEVECAwuRDt^U*SEgCy8T|%bP4clp(U z?muI#ALvI|Kyg|L0ui*bPYD5*HrH-sb^whH{{Vdt+;xqtvsjuYQzFVVvX6%C$A6*i zqxbLg(%BQwQ8YVNU6P@B56k*%dp)~-_pY=-VVy+tFC)yAVm~?U-+u@H01w}$3Jw0t zJ9mFlnUS)xus%}|;Ok$Z8}^mL*0G|HQV+Mb{(o=V`Wj}e_1-rTBk~NP zp?6*HznHqv2mWo3*Yq5wG)xr)>V~Ca+KvskgGbDa4~-q2kKew(PMK+9KT@*0Mp!dQ ziL$OXccI^U@7v!;tz{6YFHIOmdG5cupV<44e#G+q`qI{to2{C!>Xsr3fFH-{N16-H&10UFyooX`_RMc?dpN zemr-f?eY)LeRZQBVn;^?QzHfQV?@%lfDYr2%Mb6{xb3sB$z7bJa#)f{XZiL3i`M-_xe$op(W+G+VAptQmj=kX;pDMEi0YB=KND^tWO`@22PQ_aMkV2t z;A)MM<8>F9#pNtqo}~!XSB!$vF+XkS50j(csar7F$6N}~hEwc7*MHOR*JH}${4iivY0BeHg;;Dz6%_&uD zXXwwBE}Pqk9>?Qj=lAF;IQXW&>}a(;Gyr?*uYe0nhwNP}718c{3PlaL^!ez@E3#GNqz06&rK)>f<=Id@8H97K}5@Qwv3R)%jW%)d#^4xx-x9{!x zA;(!tlgTT`w01l~p5cy;xB3!4uTeNbSG34i--Gq)WPE!hmf?`36y#{1iEt(|M;uI3 z-GFB#f#0#$U8BUfdcF~q;;PSdnUQ0TR)N*$l#crc^2WSQm(s+LPX0MJ;X=(F``IjA z>0?BWxbvgfWsti*iS6HLzdiNWW^Wv2{{RoMH7w=4ACaC+!)Q-#Y1lMi{{Vx{^bM|w z>AHc5WO&RM}2D?niU;(0(QHHMi$> zPYc_mDnu)#d2)Vouq+vzGa|B%I?!%kJ;wUV?m`N)o{j48)8_-n~&-a?HuDZ$KW ziaVC#o}X=UJ;!c`5a!f)?Ck)14tg#d@`#P2!ob=(KYx#Z+ONdfJe`U(vLyME&m_*K zB8O+ca6mslZrD0)$A*2$71eiR^n1(|$dbHHiR?Ld zI^S-)II2dM)0oC@d+=SF?>K*_ma|g>ue$14Ule#!tTklF=WpZ1qIgM`sMA9pwX>*+ z5$KQ$2+p>DFF{+A#rV58yH*~fL1IT}XQYu$Ri9myOrz9}t;j2dJ-4lnhVdQ>Jl&{m zUc}S2Ya0X29L8jgKz3a!@F!}dd&qaa{+&}B5n|~_6lfOdNR=1Ok_zwsCLnfGqv;#o z_xg3ktyPR@4Hgi-S-`kC#K5=j?+RB1kMbv%DV#9W$k#`B*@=oX5FuldO|FZbs7-tO zp4vSKaw2QiY@~CUXJ8sh2t|3@YN_l=Zp8N9_rIR4qwuBA5J?>i`3TJ!CSGxH3^_aO z2Ugzn4`IHolbKL=CqcA5!nw zj^K79=cwCo*`tcHIFYWXGD4{o$XYP%ze{|syOZ<1^`jA*jw>~*HHl@Po5IF5Z7hLQ zl?40fp6B%)Pg$-m0k;y6WOp7dZogl3*Ce+`9?tZvSF=*10SSdmubNn&IXCP*K4Wnq(Vv6LJTV7(w;0m7I6p z#Q7c(%VNlMjTPW@1JnZoHK4qCe%&2{vdt!cieuv z=zB2Ga_V^rp&nTB<4muhd(e19?&&Ij{20{70_Rd7M zGCAh4UKM0y4yfT;PGp@h1cu)G>s>3cY=N?#tTgvh+|0jKS*D*z-du66yF84&$?fgi zKp(eRQOD#X#h@N#mbkJ7Kmle~eWw2aMO}~6tus}z4HmObO1U6aW|(8rx1viG*JJV1i0)#u5wFurNhJk&utT+2gg(P?9XCKcCIX8conY{1fqey=U&B+^ypbh z%p^vO#*v$Bx8icUcgov#i2XaKmQ(Wb8XR@@-jD09m`ibdi6e6=kOIWSYv)JX=XxD3 zizh-F@<#9%No8oBCS3x@)7`%N*T+L#j`K~7u9ErBBFSM<4#TMY-u~Jk?|nJ6%bW{E zf@>OP8jSF(S-IkCHF5^b^SzI&f>5P|FmU9iQ0IY1P(bdz*q!TLS9mYOsb@V7OE;0D zW`vSi#M#9Qka;(2U0VMDQh+PyF!IS#IX9UEM?e5aJH>dLDQm2CK{XY4O?Ka_Wfd?Vs-3Hbj2Jz8818ELCMNc@RIi$W;mZX~mEGH_bp{{Z&E@349b ztz2b1+>|28*}nCmf^f?pIBPuf@QquysEFc_JM1QyjqI;h*-VfArNqV!67W{KfQ;z-ML z^{J50#P$WerXKz3bujsO9pAl|st9zqO}ye2U2}@_DlIAnnF%o%yT*=NNC9KZkk-H; z5Pvghp1uAWnY)10p3BQ%D@8L%M3KzOT&lA-r`5xL#eh}c9)QbJp<0BPNbgArc5w`r zVqRg8G0{PEBOfQpvqK@3fd_oq zgnD3mv2E{N2jWk~N{@|$ErqEK=DYP+!!)sZk}O*Vg2}^?1jPQ@`2d~lx|5DjOM3H! z!2Zsa#nu!&M^h(@#`updlYuL*C`62cV$*Ok4$M!ik*$-hk8b^VGItUW_Tj8?_l?^NPs#GpB6dDW(uj*`M=sja79A$uAiHlbX4~@+h%48Qxj-G9r~}-o2e)DVB6rm7aeK^&f`3Y8kS0yh*0b5V{vfxC z^3%+cHjV^XOhm=cjA723WRNz%sXRc}a(exDJ(8d5CaarZ=^?HqK(Ue(FW7=fDC|y# zxeafzIpOVF%g2{mQ~gbxOL3HmJc#F$(GpiOvXw?>nH5fmX(Um->!#AaGconJi*wnX zsY5k-vDlIr(n!@+q%S0jM1%<9-z6oZQaugW5zOe$EPCX;0WOydEj4o)OSrrE`7hj` z%ytVjZqg4mz?Ycgu*C2Knvizipy>L!X#7QHC#xKYxOk(EvO^=uG>Xx)%IOHtinjcdzNNx6&e9`*6$DDeumTKEnb1@ z#U!g;B#?;MGVtU|vF<{iTtV&>e1p;p+Xb=9_wu!*mnxinNbK2`cyV5N$JTIkgO@!j z!Xri-3H-7TW%EBN?XB$b7lm+jt5R-4uZk`|my)aWil{6xVg%^22{>uL{GB+fkU5gO zL$?{6x#26%nW=8+mn0JVPZR}Wc#srWlVo!W=X{2oZO^M*dZ)#HALTFQ?7;bZNFx!5 zEv$SbGm`^-N=vRRe7RaO#dcYP4HMPaXPC=3;kWzZyvhar(XU@Ck=*d)srZWrfaLKM zmY)7b9D3fK0T-4WfErDb5i~}C1P^5aI{|7zG`a=nla^>iWNc^BC)Qal*ka6gDB@jiCBvZV@?)VDaKS z)3OfP(2s6_!G0IDEZm+TiR%n?TJy*n%PYCG;bV+9uv4!!%KHKV1QzWh1F8aM4!@<( zG-^oMd7p}^!Wq$|PoP9JG=Wr$AUB0?pD(?^_>=v6Y~L zjmTM>n@o(6!{37z2%x8!RFS?tyKmd8tlx#RQsONvb5}^|W#!Gv_MmYZ#6)p2Y?6$~ z;DsYaotN9W>2`O)_{uo=@c0-qjZJD!@z`tOlG~!I$qcX<*zyR>$V2JA+?bxNb4@}g z#X7wQUBtz`*EP${Qip}>u(f_bip0uvV=Kb{V86=ZC1U%5IS6y=9Va9b&HAyQ?c2no@OdL{{ScKI-&61 zCn1n4OBGg~>kAg6c`ri*tavhj6v@YtBZiKKf`WG7l{uI#Lp~~vC?&Z$s}3=1(}|f^ zj?u{Ob{g&foHXA>M}VTHCoo4-e}s?b|q@ZK@>!cbV)`#W3i%iI*O$^ zBGwim8z~1JCyK`_ddg&{t$l1%XwT6YYy%U-$=Wl;fCvM85!>K<^jgBd!t76O)c_+_ zmO!(4lB@@PrdWaY_wVXDW32xsR(DvtGsN=emYuS>mh|gprPsSm>_7 z+dBYsKn=2|$S6aK82f8nyi%1|A`t@wFuX^M@^563zAB)vBgu*40DS`{x1^b*h8e6L zb>2pZGY4)e9ypG|>~inNw_*T2`Z1Q9<=XW=sHr)vcm0~GtXvpOiN>Ge*#!C{XZ_{j zOEK06!cgxbkT?SR0S6C&2X2t~TmJx0c)|x3vZkg?(JGNyttTj1H5+>d^{+~!?w0POmsFndYK>MbaJ=j$JlA)XrX0} zd6&$MWoD8x8H$0ZV;-FnId5l2sh%md*BA5a$xZy4FD0&Q5##2osCZ8*xZ#nt;Q8d3 zGWnCoYI^X;7Jyj*e#OtHdL>9+<9i`Q*=m@Ywy1cU6>_UNl_9O?s3B0Xl3+M!M*M&T zo=kPGW4XK8oRpKbi8A>sk6V;6B$uQJj${CFUO)h-ng9@>b~^{Gr_bKRQcDs;7F#4i z=WfXcI&y_wt#kl{I`8H;wt-MjtF3eJ)d4ib5#jIzpH)!BVcgetb~};*`$};~7k9)p z?yBM}K-J+!ilCy_?6GzpQi#wNUP_M6(s$sznR|oMi+S91Fg8CQfXEi6LfAI!4>g!+ zLm&e|SCK8I_8!3WirS)5pTu{s*#k)%~AVmVMhl)4fECG=@PXI|%}yAEAU76@_DZzJu&FFs}r zqWbaw02LgWh_Us~GbbOJV>5LWKpT+?>=qP^0m)idk--dD5UO>cIVzULTeu0$$Gk1U za%UCcDz8qG`LRsy01=QRRnoI>T*oCl)>zK~XFNgU8g#8^rHd6yC0-j@HLxYI7A!;S zfGQU9)j|jJTS1w|w52(<&t-f+EHyAb9l^FE!uZ>e#SyU4tl5HPA?C8(nf-Dqh>yV8 zO%OUS;(*`#93V^%Gt1pLG4z?9u(6m2fAfyYcfv*~W}7!Qdc@P#iu~~+#IM>L3(e>li{-f*jZ}Vsk8%EA>(?gnzlUmDpw}uVl`VJt!`*k+usDz#*}FwfDv5V|YM?S$ znN}c5oj%AUe2>?uyH(vJDBy-2#`?6am?>Rx*=WXt4NLL;KHX~0;G&c@s!kpfIVkxb zasGXE=;94CmZz%Z*TV{p&reta&603@?S&~htQpdM$tb97*M@_|+D8)W^76&pC;gqtf@ZbpS!?@r1^n&y$VIEN0Jo_(iZ*%_upQlTf#L$&tv90?K zlKLXMNK9pOhH#x1UVjlo205b|XJN!_{$AT3uj%&ROFp@b5wf>E@nar<9+vy-XJcJL zu4-qAQdVLHv`^pPtj=Cl0aW&Fjc?$OZlD+f%LUT@dmnX1wx`P7^f}pJppo0{uKib2 zl`-|rC6Kucyz~L&zT5l${X^kj>El^WgEre_`w{;Dn(Cgtwy@#mZ$}<_-x0lkev^o1 zUg*(2gUg0vs@)q2UE~k6Wg0s8<-hOm)eZnh8?0Orxyj}2hREKJA^LBrfH+8~n*l*wKsQ~wTNs? zzvD2v1=#!OvXlBB)1(G>!%8gHD~kCC8DvmKC+THawpYIe>`D0ExB7pVOljAGFDr64 z9&Ve6jWY7kJN*d%0Kw2cDR}BNXpF<{{SqV4WxtJ zPRDP!-?$`nV4ZB9EZl(yiP0dI_9woyPQSlGzCu)WdqGE-(Z?rqsdRO=*T^alFW0!~ zYy=~58l}(XQ;jMoCygD1e90-ryAVFZS~~rDA~%p%7FUpNLy7I!sTyJja6#?9vauG> z%U@2s`0!vy7VqhAZ@=%ZvPleZyl|4TwzOYNvH(Y=h}Qdj_B#0QzMT_wIXO%(4BJPJ zHe`_-Tzs-MzjY^F#`oX*{rb~P?7F;5$B+%6x^0gA$k8C|clbSfV&p63v%;>-!Br5S z&;hgVeaCV0)?ZT;EGyT9MDAPiqle|~-@j%a-G}ed20)U8zDxp{!)m0^m#H1NT@IN% zosdUv+F}@c`hMQuZihsZfs!;4F0{flCvRUhRy*1um(b!!^ch>&Fa!hrd+&YzIxsyr zPbpwl3;qwO+JJq|f}M`vWBc_pojNKS082%7@|r|MBflf8c{lV)ibH?(H1Is0Wk3a&%o%^2ZKVS0bc!_PSYOdfbn2-yrfpOOz=v`YzNU{{r9*;iS(#I63R z_ToqN9{o5r4bq(OMUf%aI1(w_Z)o>CygSh8dD7>`)=( zpqT+vtupBIQQKhXfJbn8nxkzA)ao5jwk|}h@~e4a$D7JBTgxM1oB(!5ZR~r3c02X1 z8@7-KhU`ItGpUSh0k5C=m$tQ_z;qZk8AX!dn3(yb=}PGLB~P*Aqp&smcO5sy!fP?V zB@aHNd|BL-bSg;!imP(g#*bsI{k!xfp`gk>(73HKy*cZ0mE?xhOsGJNDup0=2l;>x zZ?Wz=CjGc;<7q4dl+3_L6mwDl(&+D9_>=9wkJqI4ZhsTcNot*Q5*Ywk!yY>dG&TJ{ zzggFqs=V^YrG;3c?DzGJf2sXPNOK`MkIb6Tfu^zbQ-s=bARy5E+W>!F{{XS-vi>I^ z_%TNrX3aY`&lj;16?BP#*jFEwkLBOM-nw1j><&{mlC6z~xi%&%$BxKV<+hQ(_JDhz z*Y)e@EMJ0PdaX`6YVSyZ0p8p7n;s@Lj@B3_g_4Fs>58?T54A{;rD(|ZptH)r4Ihh*U{ne7cSkNAtPf`tCZrVUMAEE+Ii-eK45f9k8ZOAj~t+TK?cGw-a*q?LQ7uBZ9xJ>7Vy5!Pxu=uXbarQ)Gb59k-GeaZ7<%6+( zooI#M-{1R=mD}^GXC#70JWR_d1p9UcyZ*cAmKA1#D|Ff!78WlnL9iH)Zas(QBm4L1 zABpl2*zpWfK{`6j3!VN^vHt*1{PydES)>VHNr!lGh1-7xEm5n4$yk#e2$D;jMb2&| zt`$P@P#cNgA+4z)fFOWRSBY}{h%MJkwgi0ESy#_?ieiWPPQ%#0Af#*!Yxf;<{{X_c zsxODDo}Iy`h|0|N%asu5Hdvttc}H;YP>O!CAT>(9}p9tnf@kHN$sM0 zpTA1HL7KtCPE4L&DY4D;Qpl36aIsZJV37h3slSyQFj7Npz$ZhkmMXNC0=>9pj?IDR z%?an`DuK1%w{yg6L*x2zANIHWM)NJn;i?nFvIJP_$>^txG`QJ;xQFc1J zXo0DdDi5YkEWy3^;i~!KdlGo7$F}cHGP_&0bs+Z9ZESq|ckB8u_)#V=!d@MxRtW`( z*~NI8N8=o76MW-cY_ZVp}W{eL>Y3t$s?wwuZc9&-DA_t)QeHJ%K1CDlZhthUwI z{{X{{0A?XiZbf~G_V2F!eHYj92MXpffIthS^{VF&N|6s~?dZM3QColf`@c>yaDx(An!bn9;IQypoB0 z!ex|5beu6JNq*7HoWY!$qZxQ;tJrn5y&h zByFDI{g*>Sx#<=gA3V8(5xW(5sT0V#Djg9E=qLx1p2|PZ+n};0dkr1NRt*T(6dF)Z z$v({8yOIgn>%sYG%v_Vhp}{QG?Ar|+@YktEQl5`5)l^FbN()Z%ptm(!o@{K#)+A+a z40~%^Adr2>w_apoN;vqd*o*5VnItlH0yw0SMJ=$slzK<%dR1~OmaoNbxshR>HFAbQ zpx>94kG{g6Z;{gs^5!m0E7%JiQc^!a3N!|kFTU9O{=HEzZ9etSSNK{gmNbqlRyKgP zv5nD=K*K}KRS1J$mwm^+!RvL3k{o!gQKdHGupY~y?!C{+@5BHPZTI_hjWSOhGDwn2 zlSL6jva8>)Q2zh}+hp~w&QX%wpyU#>L_{Qj((Tl3f|0HFBj>Mn%>aOH#Sd+lLuA80 zfw`9uWa-Nga1rH2P|ocUZf(cm`0IW9?4JGlm(KWiB((KLtlL;5nUXQIfRb59>KpX- zBoC3*P75&BT4^izf|6K;nS*^Si5tkCrEf4P0k`k*(?g8E$LrB;k<^1ED{IZF=+DHn4XXkUk8a;O>uJ_t)o61@s#e*u zEi+h}Sxmq0Mp-2CQ<()>0bcFyJO2LwZj^X+@52VOD;zUHeG5l3dRigrJ3nAYSD2BH zyF@{55yT@y3mAi-3<(8?-d zKLh-_GgP!_jiv;`TjE*ZsSQYPEqGCqT%57jM*_5Sc||4Ok2VNZxNBfBAPsl@;Cd7j zFD^1!sT+?IPO-BUF|#K%Z8!yhSK`BcfgYWM(rVs6#Q3Z68F2F9?F^0kpHEB5a39PO z`8C<}G|?<$zQ(>=Ky|V|5L1SHR$8qbacerDm&JyVhm8saPh-fV7ajC=J8T|~y)xSo z{wv<*yHRjs%c841ADOLMeBXrXWGhQvR8veyyqz!B5=@36%s+*4G28I`b?e)jYJOyPF|w;P7VEzjQX-TH91+P-!2PtuP56H+jKf!~wPxpP zc^YXfM_w**IP@tX>^hJp_Z#d9_#Gg}{w`!_c*ZXhVlvn5M~}zJEVzb)%&U1^e>S=o zAz2+Hb&<8D3`&wlx~~sS&z9x4k2Ik0PC_lqhTfl~<|}f45n+DqXUb$qvwlXm=YrgJ zWa6rQOnW!5fRO3gm&4sy6eRpMBU_Y9gRvGr($*F)qy9b>~6 zEMq?osinL>(bvdfZA7X(`0b&W&N6G&B~&sa2K20mHU1&ixadQTl;ZQ3MeE|dpQ?@n zLZ&HA_rtKEH&MAD24-?{duf)BG5~^{HiIixX%(+OdekyW;%&8PZgU{ zrF3;}Dv)t6vJ?V98zq4E1oV!EUd5+NF~PEOHaSZ*SSqlO%TSX?8v1<5_{pGo4jegY zwn|?BWT|ERNiT<~;&I!=2gohUbR&Ke59Y=ck{I^{D0V!7C%0L6yAN+Z^*m2bgu=bp z@67njN;|z-WL1r$RAzIctFyOYtXtsgrV3WeZ%;cZ%S+oN>X}~rV}lRio3!x|P|2lA ziTqCN(pXfUNzatXVcwOR6lZQly}%yhq?bHh;q;RkQ}FUSSnJZ(opX5i#m4^tO)QL| z9yV~oYUHaDbRBd_Rm6-z{8uRP#49>I%oJ-_B3w@GWL z@Xv}D#VG_+Gzuz38wMoZbumX79_0c|XK+Wb3-jK(IA$K^@z*XZTS!Rh63cF+(3T_b(Lk{Fjkg z5z4VDc{4xe)tLo%(fYA(}sfAiTwBi@g_-8cF!7mQ;Km1WS{15XWyu*g63v}-5>8?;>UtyjTKLa z>SJey3?%qBwT;77`n$eaV-f?fB9W6!!Ge}8$P&yD0DY?PH#|LMK$?a|(3vH(TN!0k z3lkPdqVwht$`lTOm2I&Al5|I0x)@@c8IhxqW%Dg$<29i3Oio!twA_yLI4kc&b_5Q@ zp02zmv&9umULzG9n4_lBC@_u%OpeEw5L>Y-b_RyFSPk{h?Rz1cdmoNHc%@SA$RS)| zv3R`pBg@U+n)XKg!Yd03Yw7F6$^rrgf)!#95Dtg{_dP$u=jHIr(ZE`@VJ5VuDY7<# zO%=#EqPnb_AI%QBk89W}er=AWVarQeS#p)8DrKN1gQ68*P zM7BPmNerdkRlP-3NDK#V{4@t+*S4=k1vZ!IC$kJvG*4Y6^43`yONU*Od$Vs^*QEOc z*Mdl@S6h);iV4E3b54lJ8dPK^CL8qvUB@rgkIuRu9R)+0QjUP}S4X(pL z^!BCfTjOk8cu}s>w-v;O#(-u)t*O^-0EB`zM)ljbOJBn{*tweXJecd!2--nM5Pe5BGV%Sv1%OjmX7|dk;RRe5vcqks@ zn**(PDxHe7EX$S0W9M1yfFQP12#m4@Duy-W6qX<{9R)a~mT<*I;c( zvP2}}k*Qu!z=F^5*lTB}6~2QP5x;WBlZcim=K3s+XILD7U1g25+g@67KvJIGmtqQv zGFZDVGarn_&uV!bvW1mY#+BiD9hu*>X~&+}0q>$gI|}modUtI@;jDB6GmnL=#~h5u z`I01~%$*MWbiX#fa^qkRlY+!isPNqg#EfKc86w>TD8xuTME_v@@E&8)Y z@yN1PIs#{P(DKxVQJ_b#1QHaW%Xp3qawPQRogA@--G@LLXqbiH5$cV0OC6s3JdeWb z$YpR*Lyz#wzp_>T)k<)H8F!;&)ZeffT% z%>MuhOS3gPSXZfT<}=BiVUL7NgnM#xDZkTwS{T~twmyJs?3{8m2Ys#Nl} zAlspPGXWF&C^@YKj7-F`1_NB^k`@eipGq!~P{$ZDlb^ zC5I-^JJx)3%^%aq>;dxj&oBk_i?F1P5D!YS*YN&OTD5K86_JifEY^VXn#+k#k*|U| zBc15+jhoa4i>#e=7MXJ%B%O9B|w+G&fdV$|yWf!~%D|llW z6|9~Q2+uUN;8OA>zyUKBQY2LeO0c+TJg(fAKpP)ImNF_dz=7ss+&oVh8cOSb0-a`d ztT1By_A1Rl+>5DlE03d7K2xG3O-ih>A79+E-o3UrtZF!VL}>qX9|MALvZ z>&vZnNb{Il)oeioaKjyl*Xt!mRd-oKvZJby+~m}f0qrL_WgUq~!rH=l2;a>X8by>s{g0{nInUOLjM5I^SMZX$lD#&Sg}%hZ_(Lx-1K#rTtRJ!k>EIct3QlL zYnx&vPvd+*Va8m+dGJeI&@sy#PonXxM7xDT0|Gdp4d2o>zS?2pPBeT^ki}!6hV!Jh zEJ-}9Ryd~|6ckd2O{UW@17wY8DJ1cP`a}Fq3bj%=+sTFepIqgAd<&mjC{!#o!+HSk z+pY0p{Om*Lr>$H=V2>~PMZT&cRdgMg@m-PF_9OP}1=)L%f45(x6(3fx3IFw_j_>X)ahh;su8^3@^yO(Ji?j~0#~Sx4tP^3^zi>#|>Xv^D z<5v%kzAvnF?j)3H8Bj>dimLu=$~=Wd4F!dr?^iEzBuZ{2gk6A*VNtO4`c6Nk>V<4Vap>7^Zz@47|0N15gXyq_0 zN2+SwnRb1@PuH%8Ljv}#yRbf5pHFXo-}nCjUY=U;)Hg9n9Eb}LOLG4Jn0$X>{+(0& z;Z4wP5?_N45-e92nrwae-J_4`EFQ<~-M{m{Ni5c}S5G1yB<)*Wo9;U3ekAZPMEa;> z?5&}t-}(OlZlmtOdXjxTnAzb?w|{Vd>DOZmh-E?CK1+>@!#$ted4>5^EZ0j=#uZrb zkOk0J_#fN9Z@*hS(h97DT(;ZS!0p&{1q~ZNN{mLq0S>|5hPHpF`JRZCB$32qh6p#y z;D29^omGI8id9GmjiHTI-9U54XZ~-4`+Ibu)>M~eisHCo zX#@8<=DwC5+`i5IdKzrXwyVS8Wf?PR3j#;C-=`SLA=Rex`4k+9`2D(5k5!d|u^s;a zPOYidO6Y@4xNRaaB7p3Y-lt(2`f^5t_EhD4dvg@89(4ttV+?3PS*@OA+n? zA8*_B>&fc3kqWx&(9B1+o0h-n(;O8U9>(?Huvow;sl$n%(wMiLP zUQ9^z@*d=SXn$U_v1f?#z~UKMIS&5-l#h?wr&p=NSc#+r1z%85Hp6;7{`=`ORr@U9&$c_AVI_soel5aoiKwdK} z$Vv3~3WNjSu;Sn3KHYQ5lvXPNb`mKa9wizfi6cOM=fCVax{B3#asE_|=y;f#ekk~N=sZ&*#~J#22_2|M9^;Vps-KV_mu>Ygeip41Nf=@{ z@hm$N{{Vzb{{Wr7 zdJ*fg)!E*ehRUC95<7mMKmH!DSmTSZLo2HD0ed?SeFZ&_x5rP)36z580SJ>M#?xAB zZpvX;-V-6wUyYIn^lc9R0I$ES-dw~+g!4~HEv{Pc#F7Vo{{H|z{rX?^64#X)HQKly zSvuN%`2F|tr=r>^19_A3iYM-%AW^Tj^`X%?AZc1`O5Bib6gYDMp zG+H^uIZtQTq=J02uHWhThw1UYi~^^Z6k;`#deKRPC|}n@x6Z*D_wUx_$F$KSByv2R zlt&z~oe+Nfh~M=a*X7>NLPp0#ZOb{gMpJ>zhC36+rAQJ7ms}6V!*8+pI???PTY|$6 z)EF1K9XxJF7W-+oP>kMsb3n(Iwjj8Ukt z(Smj5aO~PYulT!<*Way^$vrn$i6Q67S%OF0k8ls}H?i0e(SaH)E}c<0;*9c56nkcV zFBIs3SohbdIcv9YdnxuHj){sp3L+-DNX`N-xS?%(aXQxiHOpPK+qXeglAX1cnt0+> z}Zuy552`qb)cW&oFJHi5J`v+-tGz_fxIw+vF)_6Wfx* zX<{I5J|Vy!{DJo!{=?sWZ=U8pOhmFs&kU%4kuhTZ*S@_V5;gYI4&SdyGXXN!LRBd( zrk;0s0%_S@2m}vrZ4f{td+XohqOzHsaG@d>d4*vjNzg3h>UNaK6AUv`E03bd1{{XM+(KiUw39!fB4F z5W16q%CR!I@|c2(Nje|>klOFtzPh5u;QprRQj#=*)x5MDEZg@VW4#hRyL6JFu`@D2 z#4|pNXg6(xqJL6HU6=6h!qUl*t|Su6B!mGHfwy8ued}ZA=Uw{e;%XM$o0V-c1%O>M z@b|&6cvA`W)7BDSzJaHaek0gEQV;okn4R}OpdEA^G%&$Bg(T>9aGEZJdlB=k{{TWd zZ{nW<*~H=C$ySzK#S~&tSP;BGUq~Qq9dw%vWjqp4d-4I0=xF>8)NB0v^~|kJ%&87T zO1OJ711OIbcy_i+3380JmDbd5ju>bVz^V58_3(aHwANyVoV5(rQi`{v0)D2eJ!w-% zTMQ!#fBQi7{TQ`UIq?#qDCa-`PM8D!d++^q*TEl)IL5+$Ao!aT6#oEEBpI14(hL6p z)QaVGm=Dw$;&J*CdZz;{sNgvtPxK`@+3Zv_5#NF#UI=ApsEI6NV^TolYQy>;Z@$M* zF|Z5M#S_OXK`GisTqhUp+rIusaq;ibcz_EQpIH<5)pnZoAhY!GZ7g z{+(RQBsqxVm8&_LBjw35&BSs&$Q(7b17fy3u>MPcYIQCX=Fo5aYXc6$g zJMG)Q$5);isT77u;(|LIUrz};erj~Y>-XNrzq!{?8qn+Lwxul1k*Y+7#x^N3D-^=D zCMA_Z>Oytprw}*rd+$IVxt+;fvkYdktYU<)2eD^WVt;Z@y6m1Xzl(y5=CqY77$Td^ zUo%Cf16)dMo=Sdu1Fzd$eugcw8S1c#;hJYe;-I$qFaUddlG^^gClgz?HW6q}z*Fp6 zNiw@e3p?USv32o+VHG@Rj+9lNK!U=oD$Yslce3E0Vf*#7!dL$Q6g*zp2gYM^_SVw} zs?Y~1%{ZrR5C+s8Yu}E~UBeCF9}jpYG`Hf?ZRP=z%hFf!;iC~<6q$kBcKplo$MouV z#$Sw={(?ww_ZE$1LaLAtEbt#18sfhJM{;|y_vk2nDbH-;TtVOrmHAd5=y;5NYSr9X zn3EnVZ^hmj%=in!658=bZjF1fMtD~QO)@)oV<`uK+4v;4VeQw#Ux-;Y$mFJ$2WjM* zLeoz!jFPwX?4#@lU3c+o@h-*bl7rrsQ^2u4;BViL!2M3PdgMMD@RoDOd1+$DRtpLp zyv#{Rm+pV<*V5o{*lYt0MvS9!e=E034MXXyEMQ#Nj#~Ai`RsNm;@vDV27_5buZeLPJcY_vyg5}B*{(x9 zH)t9N;~NpQVEKGbgk?zC<-U6zdxf%@D;Ba+x^tSx9!2yNY;s}xaytIqcp zAhlw4O)N<)urIr9h~iYXkGWq+Jt`R*(B-S+>ErS?YY>sF?QJCnMbU70r~nc<0k&^{ z&<~yrbwXb^7VGw+eK!g~9gLV9d^l?-im0B_)+WqcLxCcYs1Q!f0+FCKvJY}c!TCJ^ zVWPBO%}C@(M+i$h5bO?_j=sld_1EdvaboP;oU9VeA+22?iBOX?(=jE03!(>Z>~*2O ziMKs?Zl$}o&6`#VAEpID?PDJ7o|RPqYz{>K0H0Pdj(c*8e0Zd}rS5^(G$l$C{W1z} zew`Q{ATQ>HCx{1MakJLj(=AL}U9(jpSDG=%r(mV|kB$6yAE!ZNyi01@P8QsWI6{bl z{{WT5cLh^mZ`=X?eJrOeGA+8Tk698`lVFu~W@gyrPhs!9AKRm)O|;uBGduYy%No-S z*%KZ5g=NfC$f{M27gd*o5TKn9JA5~9Z=SrCY=zn|#$js79CEm3LK#W)CeP|S{{U`< zB7)-K#nfIj;~z-M6N_>LdH^HbX}!MvAAc_$da%}(bPqcKflj$94|DUNd;5Fzy10iH zmO@mxh&nDc>Z=V8z6aIggz<0LIR|LZXe1Y*>@u*baoZa~&3Dz8dwcjCTYt zZp@7+00bVgSdU2GeV?5HzPk24?grG41a%QnC28~&k1}$0d3=(49eaCq1>)bsoQ>DY zXCAFf*D^Yex~y6-z@u(92tNSpdeHB))p$yjoEpbyJtwX-=!)Qa4eAsG_~D5N1%zQDe=KHW02 zDH(MmBd?W&c7N4$DPlyW6W2(RX9YFvO8)?e4SO-`41PxKcC=!Y`r0H?Hp*k*OM(F! zKHdKP3y93)t;Dpf%^S}s?+|}b>~+3BKHd5(Op-s+#L+F6kqOq$!0ZA2Na;+dEaII_ z(F7E;F_8;r&mcn0886B92FTxY-}>}zdrgYUHM+~o$_6J5zR1b|a6PsL$J_k+7UOCk zKIKiA(p4vjB4EtB-u=MV`d?!_(yX}n=GUM3yv+fiNbmXpefrl+UD_R1=a*)czV1?{ zHt?@heL6@Tv`}w;{D!?J`gFQ1&03LKin>d#Y1&5E3c!sK@ISB5QTbfV@}%{zz!K4-b04ziu~#_za`>jK zH>JNuOPJPmqa~lBu{<)t=EvpjnOY|Wfbt8y z9_Ouf{{RqtU6h)oi`e?+BO0uhA+D(~20_XbAGl=S>p*hY>O564!rOS?<4-l& zr~$aw^1R4Zu?}_!ZJ6glQ90VO{P-)w<&JH=g1ofBwn*ddPg53rIA~=qO@R7w1|bc3 ztt^H?UArnzR#n(}IeO#KDH#k&W*bXS9pa?MXDd%)$+HGl?4ET=RbqG}fHdY;E4@qQ zuqHoPK%q$lo}_agBTLs!jBR<=whB>TSAdw>&u%NDu%H5bvaUz~Y=g2oA=JqAU{V+YfrThyK*BxUvi=88g< z^>Jlo>_)n4srI8Zd8O7g+;cQ(uU_6<)n%;K4-(O3DdH$KT)d`8ty4#mQ1Oo`pp%0E z$}Vi1I&BX~@7p`s40QLd;4oOLGnlJ_7BXWVS&Ww;E0Up8%?w7Emh4MwrXCo`;;&Pu zmz931kjukwl;qbEM!{B48ey{BeV2af3mp#TRQx=_V{>(UPl_1K(5nhg(#CkbsQjl` z9v?6>DJ2fT0ztt40Eb1SE)Yw$(N9UVsy<($lYDdngCSqRnJeN;_}tziY<#wDgDp9u=2W7aJlkyRArTbxu?2Yl0EKy5 zctaZ8E0Xi~XirB}z#^mAG=T{OSQAqj;j*s< zyhnqaJ#I{i{6gfi2Z}ah$6!ULW4&;XTHCKzc=+phMrktnd=^hHmX>=&G%%ACRsN~` zsbyqV3Qof=_3!Pq3+J;^L=@YsyT4-(j+VaDzP~C z=&vUz-a5#wc@guds|7gmw_X`{3Zf~Qj@QWy&&(-Pqu*q9O;-N^Etw9NwuIkL&WKzb zU6gKsy;nWsB&~PCmMfb#rnMj-J7}T=6}RKF0FQ73QOWk>4yGImc?4~rdTD8sZT$Q%EmP^t8((l<;jZ; zYjEOaj0|Hyh~IA;&xg8O*w#*K9lU)Z_!()r2LyOZnki?mB2hk%?o` ze=<1cP!7AFf5;D8@U$e=SDR~YY5d8il1UjN=FS#9oo~9oGo!z5+Ui>gkjz>sOT;q& z03w7^SBiLIjUkQ#S=s^=4Q-v*h#bF}*bt-&D!wNe43c;gasWW?;%Dg*EMTI%%70w71Z>O3#HqM>%3aWPBLNgPfc6NcMvK-(RL z_AYqn>%RTEcOBtQRkxCOpq{;WVG!~KgE$)ES7-@NB!b%@0Dnl^86#6G<;#ApdlpMP zUseG0KnuSXsSI0zk&8nJan`dEShH;3l`7-Bl~T4{5Iu+64f%1S#+`uM%m?@zMZL-LREBHAmF--p zY+XxC5Sbu$^8AoT65inPiAP3LdGt!{v8sC(xphHa7v(>G#3BTSf!ULlr&#nzYVp%ymtp-9pQVS-@n%y}xKJD@5K*lXja86|nr zIA+A!o_OXJRtnMnX@r0n{Gg-EV(-}Y-|fDf)VaQF%zFX4Qhn$TmW$(KsdHmV7jJ)4^-C1pHA zFjim(J)rEMZ@X+(C`Annb?Vm*8qu@Xo*r^C2Sq)jeYsT(RG{QT7AOhWY^Dg6skUD9 zb*7WZX{1sFjUuwKP&O2{-F^uiShn@PzhUw4%{97pcPP}kDlqWJm`&4LS<-M}7K*~&tszROf z*wG`bX3a&9%u%Hc`KyT{6GbGN5<@eWaVMmX;IgUK$vW20V_Q!37lJO{wB|g50?NZ7zDb8&Y1K7%gm6bGj`xC~K-1%&|CCJbN%aT!Xec z(b4NFVF9~Ns9l3&qb2-02*s&tU#XofBxcoPib-_JO35E7Sy%z0Q?ri1nMb)A>SG~? zj~8-fnu)t}GhNyxaPd3~c-9%_A@1lPp!2=TJ?mY*|i_& z`WMkG4a#6hJbKl6_-31R)%RL4G2;9?8{zP9VuU#dT2_3j0vP_LIsTq7KjJr(1KF2+ zxq^dyfg`P#@UJbGOpVHoV;f;(f;O#EL}VUYBuu4s9HF}-At=C&6SWGISn*wKh02*) z@c#fQi`WYP0Lx@YV;OY-1a?BjWMA=Vy+D$8_>&$p`TEOV1I!S|E~Fj#$(ymgBZ`Byv5rI}_R6xt_Lb z_?|Wh1eB^vYsoIki6cQGtFasH=1jX@<7@U|(o98LwC?zV#!jU3TX#f~NMSlDt!`}(G~qO**i&3cYnwSFW?537g;fd{z*L0?1et?$rBB|#}L zmY_5+b=JmBnTY1YZ4?_DJexuwccIEg^nus!2xM*KmP&C*CKfAKj(V(&o@}xyFoOKF z$D0W^=`H2Tq<@R7JWZ62S}BA2qde4B52~6vLTs0VXQU`1)(Y z1cGzszm{cRG7s8%Y`lE`V|}m*gHc3cLd@_N)%ZiRe7f zi1IgRLqjKJlhMKv2uboe|jUNo!_2 zLy^W~E8}fT9SEv2LK9CEa&eE$n?!Pm5f>n<$d(DC$m^pq)n{J};*ETNn;J9)rbxwq zH2Kp?kpu(XcCRD8VA!`r#?l?s#p0J)+nPwT$E-l1w{sSR|7+$)b} zV6n!crJC{to)3@AvPfeZ-)KFD&&yx!AB#x;01{HBw2tkO>K7QmN{vxeX}JIgsCFN>+uN%AEqc;qXvZHP zhlqUjxj8a7&}rBmERIOcKd7ooiLt^L=wR^pbq0(Ab+3z zM?gj56h-1>RXZfHIs<*r`SrFO?N*d|k_8*ujScbre&0Xat~sa*yh>G-!wDm5&J;iR zdy(Jm@BaX9pz~f2iq|&g6^CsG>;U~b>6WfRG-fh8zV*LmI~vjX?0Q#ftyy-Bu;XpA z2EOP0dSxokre&ph&1>xC^ZsYUmhm^Elp_{nwU7cf!@j>>-rW%GkCLjw@f4Hf007_H z{W|HH`n9;lURXTE^9cm^;J_V`zhkbI$@%qA|2mFlKiCd<+o~`#Aab{-4{U zXqaP!M(h_rG8HGkW54|UKVG!4p$=w0I?AnUD9GfIrIUJHpN*d1)OE^yn=YdxR9)&1 zYCkm#0PV?#Eo)$Ge~>@FOHCiAs||y{ORuQ^0L*qb{zI&m&E%TOMJZ^bE&2V*?0f$J zP<5}=bmJX0JdP;HssjM0k1g-uj{g9^P}~(mfP}V4B2|Hi0njW#m$A{&?e;p?f7hls zoPm4RX*5KbZ91s;F{Zfu_>1;8HuFq05D$YTRm%s_J+vk&${kmM4G!6W13c>|d zwPXX|`E9}V_dIwuzh0{B^N3No;kPPx@k1vwXXS!fH}ud!9==rhV`uyJ*!y)IPc5Qk zk#!;wp|0ND{{W%){XcHK@m6{Bx%>Hk<~ikJidadc_E0j z{JQ#D6a^|RYqbGzDq?#|q_;^&t&=>997OTr4+##mK=>!y*ndxcw#Ng`1Zy0d-=||? zPuzdzkLlK$kj2_Lp=R}HBQI{>sqOxq5Ys_aBxSc4Cx*h zNOp_{y*HV3tP0an;{kNuzJG0vaKi5OU6q`VVr5a*o#I<_HUgZ?0WQh@t-a> zR)L2w(aK0uv)BN3Kg@UQHM1-bn$X=A*;oD?gefcTN9+BG=ti}ReV|V!LP2fvMF;0w z*gv?}Ly(gwg-`|4d|fn=)ty>95Rzk(73IYD_HBmO^*v=iN?4hz+7^`+wp1?1{{U;; zF#estLDGSctccOqoXX(rs;F>2R7WHStRMXD=;eB_v{bm1Kfaj`g8}1vJsmeM9`ox4=>G9K$l07FdoO`Y#!VF$3rcn z4mwAaNb00=NgJ2%qBKDrj*o7V*tpUnO3TRbQ79Yk-TwaoKO^t$(pyWtD4koo3n!3H z!4#=6`vn@%I@kVdrdX_XxtT;G`KlB#_ygbipP%pc>sA*UZ5OpWtPrUR&dab4y}w@H z1FGsjO$2Kl9(r{==8$@%KLGuQORZ2c$mr8G4U=CDEi7HQW30<&MA;|oHh(W}{{H|y zc7Fn`sFYY~K$V+-A%Eenv;ic2h~HdC!s!fmWSPe{AOhvPmM6Zx!{e_10N}d}agiA$ zSkw?1h&x~Af%E#6{)etEC=&Ty5fs}2eKYt?dn@8hOuie)Nz5AT)ueS-K;He;f!}Zr zzpqpIKMYvGL6kQ~a+$)R2fDH8tx zGC;915F}yhUgJOw?C<=xe@?y`{{U;)q{sdu<8n3QQT15)8(<#qTEe<){{Z$}{{Z>N zueO%#YZZq!6e2S!p!~bvZIk)|*U2C4%US77;f$(AI0>hYU3>7EQAqyi55Gf#KmPzN zqm29?x?`kME+?HszZ%colT#q08M!d-?WL`p0DwS_UfB?zA*GETxuU!wro+4-@2E493@SG-5cZmicKc;rK-2&6kMwa4G*WAs1R^mKV_UN%3YuS%*iwDKudVpWm9 zDc|PVAJeX5=TeL^Q=c8nki94qY`v!eJiY-G0to(r#3s{!&gq3`TZZFNQXX%(ya@YNx6GbFQZ zN3sK<-{1U0`i_N;mw>jZ6llH2fn6W*3-JddJ^OXOsmwD=%2U4bci(gRp8ER^xh1pX zj}<+ruvn?=K`B_{^4GSuet%xu{=4g=*zndm)vI&n~sJpsw?CBBMiA;o8x}G+&IZVN#BR;ub=3D zv(@X9LE;}B=wREcvr7%0j zS!9)yUSo0qab0%rz36q-d{1*4aB$`9A_0uY8JZaDg=ve-0=#2HtCOS!fglDyC$2$0 zM%Rq1_b7NIvh6vBo`h2k~ddmSOf$!X?>;NOBcJ(oU3eJ`tw$QJe#p7z@ z2o+!5D#O7!3z<(2=C2yIte8tyZ$kppF+f*d`i%n}1HJJlw)%UF&*x-Y-Wac$HN4vC z1?NNPwP-m32E8O8b{w_&x9_d>ojhJ+0XfNHlEn*Z^0Au3C1r&d`O;WUS`$3{whE5J zf}QJK6I%t2&eosAc*$qSN-N2CB}X3@)JqM;c=ui?yB$B82yR_~ajgP+j;;he<)$Gh z(I7Mqhh1v3@U~8+J2`ZYr48RuTXRmcgm)YYD#)blDI-7?I@fc)g|#!2YsHW;^738d z*ScISEtR=eTs1X+OSYa(o_jhaSa((U`}Ktj_~|kg@kvfZw`y-7PO<`JW*)6_Qd>X` zkaRennd7n4rk>Smlgl+*CS?qqqmUjxv9919u_M1?2j8QsVsYZNx|Q)Tq?WZ^WS(+$ zTroaUlwre|E5DEw{=FRs5-e@W6FbkB6+M`qom+KXxy35P-dEJd$c|X|eU3^SLtEr~ zeY)9ZrF?|7txUnEfm$HkH^i~z2+`~`NZ)<+p{dr2oMK5DO$CMP%OkvgVD7P_4(DV6 z>I3AH_iJjk3lItoRjM-MK{ zSbWr$4Y+H?B1)+$f4r=O#@x<{>{X7t_U*)vj;SF9h^<df z`OHQRtt>NGsSCcsI?5zi&`It@VTzE_4KeNCUp+I)Sfveq36ZF+=6^CI8jz{b*zKXd z{r>=OZkzmezDY1Vm%oh{NUUeRlGse8qPAoVW2%v;@<(q|k^UkG&>z(Gjn~FxRtl|E zXGkjQSqRsY$dXk1aVNI5_C0Nj9=2hb(iw=*B!Yb)duyk( z>*ev8h^00xS*RdnxmG!S7FSKrmci}VA4mic2<|#ET9smwyor8RSmG@dvA-qq$UJua ziD9wsJ@?g=st2$Vb=436fa0VcjVrPVB{kKp!3}4EbFNWulWdXi+voP_p{0q&2?csU z){GeC5RM&?1Frr3i2(h_TVe3Pk!i&RnV>+kOJOWV43hAW=<+)nZGTPlR&6K0l)Y9e z(8cu}B}mMJL^}2x*KeQl=!+c2X4w7_8ZQjaMY<_{n5gO;OtH0!(Z-V3e(A^$xcxQU zACP+6;!Gs;-nCqHvX$qrUgEc68=7o@KH!D}eQ2e@@vUa;o>7izqb%~4R$@uw3EtE( z`}FfON@jx^&o`AMc4gUb4s8+xAwwouDg0<=`V400Il>Y!Ig<+vK)_i6b_yiA#7@TMhO!qsfs zxg$7~q4O(3vPRD8&Fo~771$@Rjk})R5myOvl{>aqHypA?_8^DP)M2L*GBi6QiH1hx zq%bObKM~(`BNJR!lHBIp5YV++SW+qcu+Fa1#v~dgz*KfBPJl)G^rG$xe3cruaQ0-W zOym`;Mj~l819L1HN$ku4W{GY z>2X%2W5jrru{KW+630sGrFcS#L&UzBCp!gDgbWABA7VPyh?Wl!OH;(tP{=K7bcFdB zgi^atQIked8E(=@sN+fpCsl3fyS25lG|b#}Kcv9+(_v$^Dp$Z$LE&EwHX{8w(m649 zHn7U22bvf=u?J0|?!&H&QhHT!^eAL1h#``lO7G<4SW5Po){G)MD+QJ~ zl$gs933I*oB_vqP-Z>R-?r&x@KnAc zgCP=lR_)m`5Wx)a*Q|*PDspn%QD2LVs08mvi0#~Tn$KxBiJA9GZ*}qnlq9h>BE4If zZ1!f~)=pK0xPz5viB)2d0G~|cvl6^?S%?gKbm}}^bnz7{4Q}Ook%zHY8(!3*=P?dT z8c0t88<8TFV8uak{LS%F;Eyfg^^UD7^BW4^LmYWYzMxv4ut4#JP)Dm_k8n?NI@Ftm zA0t|!EhWipJd-lX5)cXw3QwthLv8Kd0pDi|72TM;=sw&&@6CRrKt#mQs4C@D3U{sfY0u#k>4k)UO40Vj>dQ ziQ%4fR@eZ6Akfibte|mTOzXoDOPjMbEUr#xYZj%h7Fo=^S)4~2fTLm06jlmI){kc1 z#PK!zG;&pM<0!`R;(nVKRv3wlOd#4Kpdf2;!19gv8}`{d%D9I(W^T!DdmQ0VY=Sj@ zpiPyD#hQ{(N+vmx)JIk_)XB$71Z zcZ`bs$2JT@YnW(v7*IoOuGFQM&iKa9hp}lK?U2j|X<~0fjl_xhwuezj8-CjO>CQs! zJ9(*i6t7x(@xfiz#{pUrRqI6_hp72}TDy1dTe0T*n4F#Waf_{MoF31fvA`O0Gy*c(u3`1T+gy=JJbx)8mbAfj z?jHa=J3`GIn=4VKX+UZjx!6WWBprnYXiGNLkE?9pg*#_sDl)T9P$X%=dY~neblNoQ z-;!Gohw`??JZ;H7mYuptX0c)dty0qzSehhe1=W=EF%6Q#Tlp+}GlRkSPJ6iuu-ll$ zS?ziA7lJt-$y8;HBP1zf!16jeE3viZ)B2ILo+O&57;9Oj6qa#_pfp0XlLjjgAK_jS z=n#K5Z&z|PY&X?GYL$8Af(h(DC|UWkMn=qP-sfd_@3{fLeud2xTmwbt@Q)3VwL^xY z{+)HRe0|$B@Mh~*uZ?J|G>H{hROoxhDaCI9F%1>UwHC#W2^B1hvj`LER=%^b<&5mmo>iJRmyuBqsEZX%h z)j%3!qM)9(D2#?7JlUj`zLy%xWsa|^KPJJ~Vo#c)DBoZ|zaXj^^#K5PtsPsR1je2V#i3aL0Fj#`5p6K`?JP#ikPsCM*m7m`=r;60CI=Te*~j%kQRSqD8(Kdp zR!;ic!Q*v7WtIxal@OycT9#*dIk6*Y7#@)A>>KsjW#Rj;j-Hth#?%hek2yS`RoF3Q z-*Ux9#~#5?+>WxAl6jG;{{*;^6 z`$$b{87@(=kjF~(E=v`e=9XV6g+DN(jcjt{M4?FRID&rNQQ+^*MwE;EWty~@*l5ik zn0WwL?Ufh2XUka!9_5vW_3hSCYs1{yw-ODxNgS`H_DiDNuet8>^`A}oswlEpuOeFT z_{M8j78IJ~Z-|wu6`$3{IaWaaV(f)wjQRoEjg3#hXv`$7MiiEvh^snFI-xPb%jLGC zbp(*g#B%n~2St?hqR@G>mGN_#O=;nGFBDuH#wK(O7_a~X<%1#7`wol8O*MF>lER3k zjmRFWCzy}e4i|YK4zP5JZNc_jDdrs7MngnqDsvn4LS$2o79!q`u3nY-@TmwB- zhFbPn(glhjB{IV#XmrJppUT=pC)}_pd~rrAk!Nhwy)@CqhNmd=l2X3T zYB~umViSViK1-A%d<(i2BW1jW17nX%V_lWTq!G7*3?)NZt}eLyDja4K`&!K0y^)1! zK}~7-b5}pi+`JWw=r5QRwxLTg9JZ^qJqw9^RZFz#SFsP$vdf z98{8okDXzE#V}+SIdDSLz_vwt)dX>{c-F#{MD(eSZHHn)faBQmQ_5Uhrw5Df)XYzI zy?3nzIjzSJHbq%N`ZezAl1!u0PpI#Fk4_aSwWB5f0NFnYvc@z)-rlOyD^tw;K5r>2 zi?;Gp-UeCq8_YEnVOTELXn+FBz-gI|x=SWHPYur=9`%HhdSz^7Wlu-*Wtgkm`#A=fz9U=A>wH#xFj&u z%1bocg!z!g9I-~B4`ov%lBz2)*4KJ}ZkJM+E+yLO;sEPJ1G1=Z{@lX%6P1I)G%I~X zZzYLhOdXJDqs9=TmQmUF$R|p(H#5|e8)7+NS!Aya zbfmX7`J_>*E381XNbKDBl?8_W{lHc4(#kB|zbblyT1e1o*Id;iV={NK)7Z#SwL4gk zfqI;zp2YSPup?nquy&2W-*N~$*L{(URTsvlCPe2wo=rMi5D@m#GX?~z`rTVw z+U?SBkhdq{vFCFo(2i@vK}((vn_%1p`MwfCjn_ za~>&AnGJ`%^$1{s1H*td(OOwshyo$W_^2b@n_*Y4H#0x%GXmb-Stg@0oW${-QPzoM zmS>@W$KbR1$SYb~lTAD|tM1L44Hv1LM%+TW#T>4!<&a2Svbh~-#MXF%O0`#xHVTy* zOK2W>UQr>?PypEyvmV^^no;s`%2Y~rXyFjeIk)B)sqzwfgd2 z$mjn6*hPop-C0&=Z-v0(YKewEo%Eb*a+#t3UcNo^5z`CJ3WK2 zVXuY#hf?2-xT=|t!Ot00PpOpnbs@=evBpQ>lNDklFX#-CFaddN5C9(C77C!Jhz^ZS zxs5*3uKFG%)5S8Qw=v{x`(F{|>%$qZo1y`!W-%&)HN{VUC-yrZ-|f{l4*3}H$Oai5F zw(tIMLY%-H0N(TuVc2~A`j9yTHp@sCeYMtb zT8+^{uns{|zi+ot92fy~8Hkl$-XeZ0Fm_T;<>l<%5WXVw0uZh_`bVo}e@%V9-9#m_ z72y2KKd|2PM{s{mw7yNMwt*+O?Y;G_!GoeBTsKuwnallF*Ln=&-)EQW(0^Rf?H*4= zC65vF{{WimW^C+W6=_Ldp8Ed%Y>ziaR4ld>avI=Hzxe8=Vcx(jl5s3?)Gsr%*6JAq zKr*O4uW)|b{{2N-hs%&7ldylN>e76KGQ$2_^78;2{eS!&PG;(oT23Gq+0gjwt%RsY zbgoW5kavBl*MW00V=Vsw4Vk~)r9NAe@EcuOW6)yl*o4TXRgZt)XL|jQZn=&nnDLjn zWn|xeEBMOT~8_7MQav92~k!zK5D1{ zkAgHge_alf<)ju7pqFt8y@~v*y>|V^{{W}->0UbgX(FAk4IQEO*TFy7bWhYI))jUL zBrdck56yu)X%01$qInu zlIprzv&i-BM<^1OhB86?;kmZIx(>OAjdIcWey!|OUTeLZGAvG~Tw-Nb_aFVy{r=r_ zJQjMOFs!nW^{q!L1|5oed4EC7e?WTRpNM%&S3F~tNnv=!OAk$B@1+qEo7d_Wf7h;; z(x{r%``Q6Ng~j^4afhKJQekVaG+Y1x79+7ei2lQUFs+f&d1=M?f!W8;+fNwyt>nNK$@%K!06(e`D9tzLc+;W+N(pYD;S3LfwjjST2D6-N#w( zRvg|VD)-oxOov~i4*vk>{ra62r7SnI@yH$j0NbMBy&wR{&&hu&`TdVTX^<0JS)|=H zSqhE)DB-^Pb#bq5!H)j`$n~`?*M#|8q>O-9jE9mwkFi!7{+j7>-gFGcFv|Y?f%g5+ z{{SPbY)>176FOHVp3HaH@onqppB36_Z=ibV6X#`8w3O=xAy4D`;iHd5a>bJ3)pYDDoUU2-}LA^ zIJz-dn-tY0nCTY$gly=8#YgoAuYR^f;?hn70k1QY9%JlwK>oe@UJYoR!)6c*_tzr3 z{{Zv9@6hs?NaB&YYuyWm@AudJy7y;wrnCgr?!hQ&B$Z|gNhh3SkUvdttW34#LK4%E z0PJ65U)!aqVn~FBM^Fc>l;Q{W9^coYS0z;>V$h`L;lr|n{l@y<$ek8{Ab;qm!ql9+ zrJv1Dkm{@l-|T;}=uDKD)?Y5Me=wbpcf@{x@7w$ImV2o-%z)^30B?x?{{Z&rNv@S0 zkM=s(TG}R1KN8r8Piy@^+@+QvjqXW2dUsC`F{b$xmIA1(L5(|xU3~AjAdh~aW6N5B zi$gI}t~vv;A8x5JSsS)-=B2A>Rm#YMG~(Ot2^#IacKF|3t3|*_D{%ORIOT$ln=KYV zLZMw^cszG*2rfesUxy zMZMjC@1gsDr$E(fIT>14jpQ!d-^YKB{{X*J1~ktAsBv>ZCgiIOB%j4hQcn^r5q5?h z=pZM5-|kOy)%7~K*y*^OqDtVbajEaI*+>0)T@ ztyC4(uah7O3`QsP`g`%%Kd=7)9do=_pE*fov&B6+@_-uG$o)6*`t@gnzjmU;qS;`8 zw;31wJNx~;I`;YfdgH!aWU5_LHJ3xiWac>I1eMw4b@L0bBhcK&_C2)9n)&bFvFzH1 z)yNE$(dBtGrO~}blmH3#+0fT7u=naFw~Y=qqeFHWmKgTR0`m0o8`k@50!RnnXQtTC z#jI92NoXgGN6mcIjFKB`+zoqcTK#r94NcPW#*3w6vGPd-t#LR!%Fm)f_B=@^_XGU; z`Lq4DamL?*qOd{b3nM3|u;AQ@AJew_?l}Jd#vce?$F!?&jAeZ9Fl+@ZeY^MfBoU$C zUAp7H?Gch+6ZlIa#sbJL?l{~fkL`pUvaH>2Vd*gQxn{I1yw}mrXfD7W?3r;jfRm4?m%Ez{keO8UY}!C z!!4;oyng74IFdj=Z*%w8f1iGn&_s?BzDbEt0Ap>1Q}O$I{C?nc%Nq671c?lDBN7xA zDq$e|ss8{!*ZcLxt=@8_z0j%YQH)!z^#BWCNvS0Dh520(uY!B^Q}|z6#y2CKj;7XR zqO~D-;qzot&I#bU;#dVAn?wB!?iGN$M#TPGiQH+Bs4hMB;1l0puk`A(!Impak?|_q zx@tpVS|codm}^W*5!@Xge~on&SDGNWY`K421>a1sAN^0Y)n!^SW#XP$Kr-YjijuXJ zi3<>c9po`8fxhH%XZO~?*Ht*UsaMTpXKZY;LWv^6y9K0aAu)yu2V_ylj?RX^uUaZx zp$g33d}a7 zmnxsz<79QteP_O>Xwv|4+=&3M#hTHW7MlZU^GJUZ@y&|FfQvmBo|JJ`ks@U%13P@3 ztE{pq*1PN-g;%1V%WogS5YwBi5+SC9MAR;goSg-|l%6sp7}%0gZ}_JpPyv}d<~NPJGW;ch?=%)8dPgit$I5#f#zuisT~0YRxuj| z1$KExe&?yitCy#8l`FI+xje6sdd4VHSFcV7jiODGJyJ;Jw#+#V9=c9KtT`tbm5QKB z@Nj}PkIW&@F5B(0!(E9w)`wbgm}R*X?Jdy}F+UWH2;)UxPc}bg;(q?X4}Pa^BAc@S zHP9$CcC{g!s6140`KFFue3VWhc?!<3w6)|y4uZz)N_&r2eX!B*)nn6+8Ve=+G+J9M zC50=s^kO5j~Eu)xf(OhnG#%@0$ z!-qW*pl|o@_Z={kC0;UNXvvZlGB5}j1t*V~-z<4{*RThnDO%wqFpCV^m5iHqEUTmG zPDkzW^R4`K`d9j2E@|UIAd%GY1xY7<;C^Cv^Y`jDWv0*t%-v3EHLlew$asQF60}&T z<$XB49x~>`u7+4|Be#BXqee z(8h$-f->=fMw$5c(LK8ZuKxfdp{2qk(@R!3RTjiLo=taT2fo<<0GGGP>mA-ONrJp9 zI!Ov9E7k1E_2$RBtnS6;knxMk2p9mQciU^*N7x^3sWG{Fa#=_xjz20X0!$J6k)lXX zf4A-1zf;*y3tU@mTMoZamZfGMWR2z5hBPH~eTXOIZ{Mm4UKf&;t5FLnJJ(`mbqYM` z6e)f?_Kiy(`t@iJzf`i5T$)$Q5^Gux0NDf%+935R7VU$SNoxdJ_PEC(VYEfqEC@Ru%70<{^z*~noV3p# z4r!r{UD&L#j!Xymeb0S%ApMEylo~Stf3S$=jieM^iqhDnYQf;ese!_A2;yKWw=$?`EpGpK4C`LcfX=so}v8Xw3ws z+4v@`hLmX}+@n|wkxB}qX!0BR41dpk4J{#%u#l3`%o-6Kp?Q_s@jKc30q^?$-E5A^v&z~!&n&o2P zyo&_^N8dnbclYaL5J8x*trZurTWp2<4QvM>4@nAp5=k2y=vjt7tR`yFMGUl>I4wFz z++~>wStUYyfgmb$J8T|@5YPb=Z!{5=#Wa$G$qYVLL1B_xX=)!Q)6L2-3U%C($$M*B zAn(5V*Nw&87{_^@86lb&k)&$wvB?VeV9)G(2JSm-AK#^~kxY!y)}-@ua>m4xff$Yu zeQ`&`_EXqwcOzu%>!(w$OifOgm(By_$kE24IP}_)LH_{wYP?3fZ+ai%=pqfdCsQE4 zb_nbBpohcQj4k67uT~OrVdLegq$=fhbV1-%2_D;F-+Sm<$H>P`bjFpaB&ijXUEBF2 zN9KNQ^VXdcUgR+M2d~v^WAtLPWLc760o8pg65Bqe?cA=(B#!N`eut|?%yqK2+Cde& z2Bm0G8;_m;01ipK#!wjVhhh17d~EAn=jO~uhr(fTV`35YJU@=f_;VRjMpt^0*~`_I zX+0<;U;*vfAmGQ->|84ibaiY3_+o1J@v)-AV%k`cN_}!vXw&KY1_Crw94aFn+Auln zlh&J!4Jc-Y8J0+G786$myuguc!yj#e5OHoQe>op+;$-hlD)4Iry!I9pWnssPuF4&E z-wl93Bh#&d2DZI(PFQv8{YfXY8K0`vSHYOM@)BLnE!RmE?)?&Mzyi7=Z5kaT;>}c zRjP{KA7yDQJ)-7UC6&Xj%J6iOL&JLqfFo3`b5dKE)u~4pGv-Yqsw0(W@(0Un%}tdB$@J?(T{Ey z&aLRQ!{Ll>IVY!%EG|$<4LF_FM3a<#nMrMqJl(+V#Gr6?4^MIx>SiC!i7VJk3bM;! z%Or5|c-9wC{eh4qOun6w%iFo?PZyi;{Tr04%3-%+<3m_YW@524+AI`8O7eayzghgJ zTtPbpoXXtB+{>fIVp^>0U?PICD#gGU#=Ri4iLf*QIwQE~SV_4(1nO*KKycTuFVblQ z*bEMGrdtzGS-VRWGbG{sxYksGn8_OJn@BvqUa|=E_U;E$wlCFcaaVM#hB`7ti&<6( z>$sQF5fkinAt&LSdoKDWoeA=`;m6aZ8`-r&)C!FK$Yrp+Rflk{h9SUrCC>eT=>*AM z#fdFgk*n9OZD(Z>`HG9~{fd-rvlxINFR#C0u8ygw_UUd~ZPAoCiccVur|0ge>Xm4! z8lUbz#2sv`&k&tZw7ljGw*;u<6fU-Rq1+u6a`oF8m?MWFbO^O=1a3;dHY((ekh}tb zdu%H<{m!K8vjmOvnsKD@%K$@w}->qKNKtXF&ATSU3@lyf_9LJdICPM(^Z_lloAQnw+?-IYVCkT!LrzmwDHE!V489f;)|DJ8p^D3z1UVdimCKl>peQ@s)2 z+uZd7Vv|8`xyI+PS6)$8OKS#EqB5Rfjwv!lwI`mpq6h$wj+}U5<;>ekmzib~D@kpl zc*$CdRpnoO`owSDeYgJryG1!Fx}#ag^Zr6c96UUS37tJt|z zMb~kohHsNrEjXm3#!!-qE_Uf*bK}#@&#oZ2TBL9Uu5*~bJ|>gUQ>u6MC z{Fx9tKNryEq;dtHxnQ6WSdWV97YDh%AFjVD%I+`Ba_gnJKoRPkUr1uHM#ed(F~yNy zHYa2WCar4RmN-nZ z30Ht<#>hG#GyecBxe!Y4%pR^Fbhgth7Bd1o=G|REOhds`P+gYa)J;k$W0E!+EUOfS zTS_DW$k!9H0oN~ZHQS<2O6{-Y;?mWP)n-WHQ0uT9kllg&&Y+$9wzdaIDq=Hp_#1c%T=73$QW2pldkH0}Xu{@7Y5NZ|OT5?kmOB!<%d8T0zk#;}=SqEZOxae1Ku_Vr3;z_Zw-eDcQ4aVMPau>!_7W`r}h6;NYWJV5Twnr(J*ho8)` zkCkuF<{K^b4-{jJ>Np3Bmj3|YksO4+y+HRGP(rT~=Gh^)QyR-HDUwQxzsyKcOp!<*^G6x80toe{;z5WoJ=Kdj8w0GfGr>QJ;;D$qMQV1gYraM%MvXYm zURgPB)UI^o`*8CTK0xy6Ux;p1u_D)uTCW5FR4hD!5yvn=y=eK0j>nH9wr1A7`YeW< z9@*;Zj5~AEbn$`CMC1X(2O9J+YIP6U7mH_B=r>K;*F*T#no0pf~SJXB^m{ z6g*94Hm?pns#LKohnr7Ys6`%GQq%d2@-~L_Q@)7mdk7$k^_hb$hn^EBq!H!=3dCaw ziUCd{amWGec6LGNyyi+6q{QWoPo>lX-0IS}b2(RK*7h52RgZ2u?W3;KVcN=6iFXvfs(BDlkIcT#uif~69Rba6JuJT7}SXLRHHE9|#$(fHfU6%AHWenTyX?Sa!ayYLs zwCPxE1kWCcT9+?V13P^c86%A&u@oXDQ6b?GE(564V2yTNvuQy(==?4+e2z)71>W4a z+Y0bZLYS^iGX>)yC;2XtN6nC)8sfkJz?AZ=Et;axXO6VwBmK=D9Uzd107YUp592#IeGuDyxR&8_3)oa~ootK9jD2%NT4v zKIK96G~@HI@Wad`^GiA`lSm^Ukql~!$=iAdUk9gFt>j{?X+WlErmXdg$0Tw6Gq?&P zkZb^=Egv(Fe=Wwq8t8h}@zeOGtq2_3$k&1GqOvHj=hy|BLPWk~kHR?uub>ri-;WSc zNsgynI13+({sxl^X@N$tbZh>nR3xE*@l||YH4;|G$7Z^j!TMyltnB5twF{(l*w7_= z6UYFo6@DC+X&`&^*~w)`IMNu4DyNefk*97U0FEDWSPc`SuKfeyJpH*cUssZP(Lka| zOdH-*F~t;4rZsTA#6cqD zdlAS1uWpU?897~{I!Bhtn2OoJ4c`8S79Jq0Pa9fX#cYhz)RGDC#MV@frImd$LBWgv z05p&|wz%--babT8$2AEoWUkV)Qy#c6x|M6Rn{IOzZze&=@hqz$M_@a!?hhWHRv4_l zP-GJ4mqNBeTa_xxzCjYEg4&c88h%8$RRF0Dc1oyO{Oa4WjhdBtxa6|3ybBa9u>50) z2?Gwz{{RkBTW7G>wz@nLahUz>Q@vQYHM+6m~Cn|Tb@=q@R0Ds%97iOtuIB&qEex@PFAMla?08#Dz z2j8yA{?*uqABcA{))-6tb01qECP&&&G)wfd%im-d5Jw>$&e-?w*Uzw9C>n`w%*CSz z*tgsC*#7`dzP|?gc>FtR(lRzYm*`Ezii~C;Xz&l3VqJnZpJ0RTKHqMzhT^X+{&0J5 z=lAOzWhryfOBF6pZZqhJ`?iMl`W+74B(Y(`kgRzrA=QS8us#0(uT-{%Vq3u(#2R2c zZmE8)BuCgFDC~Fh@!O@At{D-Tfa&mK6-sTAYqi6 znC{R(M_Gokl*i_)sQ`jW?se_Z%^hRyw9xqWJz*VZARka8w*ETWO29~xHJlIdcmDuT zM!KLj4V2mD zq-s4N@?U>QBmV$Dw@Ume;SU>UF*kf)Q^MIR7;HpBOSv&!g=o>3ASOegPbN|6-|N$7 zX!bA7wcwD7g29Oa2a!{*w?mOraX}wD(D@yC2!?pX&_+3q z@+P!bq25y&Wg|-|4y61rKk3&f0ODnL!7XSZP+O6vvVt2Q%g|*cd#?L-`}fybR*}Lq zGP`ZuvXVIh+8*cj@BI%*21JKbuIS$f;QN8o>|}w*lO(PZR|SCfXF%Lf=yd1z{+&-% znHzOQDw)r9G=UbzLlYu{%11E2eQ(gwFwy@2`KW)VT%Yj{#FhM4n}a=wF@e=j=qNcC-AE0{9}sAO!3QFjhW@U6Coy5ej=q}l&w;S<=W+98 zJRc>S>LXeZr8KuZ&k};xS)NWR0y2=Tpm?2-6rP=c#X5yqYjX&KW+L*wXY2Ta>g~d? zb3CnO{JDy=T4J?9nU`FGDPlix2U@*Euto%raILaB{;qfruEImka9vz<%Zc#mm z*-`CAQhB+epQi;Cy=DBswc&SvNp3!&JaypD!tWT~tp|#H83tm+Qwph%`dZfRR*z;$ z8K*jMII<9m;Xy1FPTO5t-&kz}E@_S-e9cOtmClR#X$zE3P&|k9Jr@n7Q>me6LOsK1 zkL%apSi0FC_j>gr!o!B=i(R1wZc%qKS(!+hO&u6bU1wij+Zu05FXKc@Y@JO2QY>a!VI zqB(y!+4k6!a^LJp8}0HB$4c@40E9n{Rx&csr}%T8vun~GG^z<*?~805Xz_0T$46Zh zzlZ+-i}={tEdCmJkq{x2`BfxkWB`_JsqfeyZvBs5s{A|b+JpZ9^gk;6FVyMNJ@_b# zwQ*Bzeu^{_e2BpP$?dcLzkY{@4~$UYt#YsJrQ`Fl@6`5h#5w$7S{E_5G1A63l(`c^ zd;b9KU_Lre4~a6o8IoY=NZ14s`*du>)FMGuyB$QC1KC#J@U=%n_}QUN+2lpmhDFNA;D>+xH~$}KJ~3@v#Slzg1tRE|er1S8f# zKO_OKVtN|*rqFI`t_|@>eQY%WH63pi&7TblJW<9;EPj4^b4MeN#x!ckT1O%^RU2Y{ z0)gE1)-3-3vESnTa=8PM!O4qh6&mu`lU%r~G6?u{X9&pLM!PWC9rf6M48H(8Dfl&% zTmB)xL&VwZCF=5e=jhNKKB6pn0?F!GQ1S$9duZ}8-&(=YrJ2vo$Zx&)BtPpoTq|?Z zg6<3EUlOKc8;|qsU@gI7SkdDW#>pZh0z{lh>u-}LGpEWSsFl9Dt>OuoXu*mE{;!A93HiyT)HSjD|@)K6I zUR-V1TT{T%fwunuoGH;=>*KNa8tZj7EXgM)$+8BkAhLp+N$xC<)s*1s|K3N$`?2`%-}mqSU35%c(C_rJaz^y*BZ8~jd*Al{-F($n8f1mp zWmdikE23l8IY;FVjQb$y@m|_t`uuC-sw#dTseU-3u_7pD^BC)kGZxr8`0cOl)J`ig zP9UL{96~ibWn?U<_?g-KSKfhB9+be-=C${`C;jCmn zWlVAzh%z&U2iz0CzsJXJqP%w*$V4HTI6xz2c4j(^@jbyma8LFfTjgS&r38(uI?KqN zA9edLZ|nWK=3XJqM?-OC?TGQ#oe`wqSjKnm58K=U*mu{@MAkUJy{g8!xqG@Vm^?Ve zCS{4rG*UO|+!6=G>SEOjIR5|>WNOmA9qK~dh8g2t zDK<`JswX@uHL5_h|E-f8{>IGgg-~79OPP=%xd60(;NB|XIje;o5L9GDMjtroc z;r2bpasGp+m8-nA3>u6{vHt)KfFSRD{^RYx-{YzLWu!!plv5Cl1e!Wm001?kz3Zy_ z(X?MtBnMI{*pWXm1pI@uy?l??->w}%bHP^X6RbTN^A~rJHk*q+xa-^8e0To=HC|N&9<$)2ZvTJ6zGYNW|0asJ@r~0ANR`h0!xILt^X@*YcxS z-*GY_3M5`fK%a54P?PRX&u*4y=f}*TtRh)vL`f75^E_?k@c@2Z$f|v`2|shu`D)Wo zJ4XOWqKYF5KnJBfzBotz zSc3`tS~+et+mxS_%+ZLxqB#{3L{Jr#oCC|Y`6(UtdY{DjI`&({Htyyz(&BMg)s|s) zNZ5(11PrU(cbvM2)PIZODI0q)gsvjhMRy^B8UY&i#Qu0009H zc9Qu1*YN%77rSQ?>V$t{N0(F3QrN8eVJTl*p{BH1;jbZJ8RrP98At;_630idZYp)M zdTo!t6mr-^y7xuaiG0GXw5u8u<+7j1#q=9Q_f2f51f#R~V^~y*qyneX6KX>=tp0o3 znr}8|AxUmT@z)XgKmhNpx1g&QVdy-wypcp=HN$X7V2_GNA970{P}u`}4ezd-SW{~< zu=j-cpSmWs?qLnnWX89M{JWAwlR7=djw?iwz$h&G1Pz8d(AT#8{rY=nHyrBicBD#t z(IPBJ+&Chr3dg?JmWV%pe~!7$nCHhlxiqGgw315CGQXvX;~ygXDgfiX5Ahv!RpH#! zP++R5u8uBqcaBK*va+!N>smm~>h0gsKVF8TjTVAUqunKi#A^eu4vK86Zg-m1sN2rt z!3)Q?Qb(lkTG`k=kH>z8vmmFbtyo23r^-^?N0?tsLw)zHf$gpQ^fgNfeyjrAG1s)Q zG4g!JF!xeEV z!a+pmW@*dvm)Nk8OKSI?o^WO6+(}nwJXH{#0()$qw?bXb%JHSSY%{Z=VIV>oTl60L z?mto1R#noha93z$ltvVpA|Zn>W3jH<**~DyOC_z7lE`V|W#B^tupN}1>ObY)hja1v z>3{hQZrYVQ!4TGj`i_2CEUdD9J5A-ZvjCw(@zRn)jVUDW_0c^Y{Ar$8kOvb_5%iCJ z9qows&>jB(Zl|bJsU7KR(`R_>G?U8H*#I*Wt_O+i28Ors&=&J~dy`woJ{rwfs-%eV zLh=SO2|Tb)xflG$zrv05*3fItvQYsB%CaA#OE9I2X0>wS8D>NZM0*^x0r&XVVd?{{ z>)wHK7ji0iCCJ_+Szm-n6AiB-cfEW5{Y~R3%St80T#F%6#hAp>mQOL#B;=t$V55&9 zSb%mwQ|-~$aJJ*lEn5n1M2Q@=;O1j#5S3k$KQ6od{l{bSPs6#5fN%L)?9GTrSNaiN z#1v+3E}T`F!2t2@P?NLsuG{a>Qu(z6kij(7;7Y|L@LAQ{i^Y`lFmYkw^p_X0|$tN(O$!_Ege~7RjbMw?)oZZ|0CNse#Urh*U zlUqAdfsK04%|{^nZOEwk4Rz2 z?2g^Kf1B{%;j%?yFAI2Y6CPqonqhiSOsZFk$qX#brW*i-*1Hfp_1F|x*{J7U4Wo|3 z{Y_&(^b!mgf;>l*$FGSd78aC}2wHh0jtcU!c34m|a(*Xeim$AneGZ3{mL2||CtlUN z&m%;|rj0_7`%rlR2>9*WUB2B5J6wtwBDj|<&*n{Lv5cQ7d2tOl25m;nKwdv?y=(QU zh;7`e87s~u`co`m65n8UH@@fhJwaC)k-!0Hh&Sr#&Ms^x9`;tG)q`4e6Qwfw6BAF)h>v_o{^4|grFKvAMd;9yIwy1ql9_H9_ z@IYMRXJvVV!N=vL+ZU?KG|?5SjO6j!bs#7jASoa?JVO_45Eqt*LBV?c!6lglkt4w% z@)^Bqgem33{&G=w-H#m=9lP|d42{gzQ~v-^S$c6*uQsJ+k}`bN4#R@3*%?=eRN#5P*=wdjRyn*0t8LJ$16KlMQJU z_UqLStBw%Yxl}j&6|qr%q)EXQ&mT1FLq%9?h$N^d*!0$o32oDtH#l2shG?p*82}r3 zNsO+Bz4ypyb|Cxox@W`Jpd$0KSrJk7+9kSB_*a&=5}T6L_iYi6%?w6f zwi1+8r~?#H`8EgC&eKRA%uytS1uLk=z#1w~Zj|>CVdK}z*ftI}>#t-@be}7bmI&dT zRH*Kre7 zi=URwrS4ab$?SLE+o@edy|8uHuN$CtS4&0uD}5>K#gsO(5t(5`^2!C$4KUa$bWbgw z;B5R2o|$9q*vHFO!rrX}sc*#vHzsay#O7!}5hJ2X2J0?P(%Hu+}X)LX=M9 zb|4R1J04&SsM#I;N8ENYjaco}^MzDM#PGp5u@>0!=8>un+?XrM{JsMB8um6*dmY{y zes@IL83@ds_@ZiY+O#33EYjn$Q`nuEP?(gS!azM-fZLL)RIvx=LVn=01xfLAU2oow z!x-Rn3<(Uy6u2ZLfj}Gi(byxmLwJwG6r}M@%L!W}EIBNM-%Du4%R|_(Vc*$+-v0o? zKQZ51F}Rs25;3uPf+-{N{*=hRZygrZs2xy-4@pAT z$K&qVhW0YWWUCdmbY*^AXN0Qh9myT^%Af_>kiWCIpu^&`7BRF_>FM4{qD4}UjU=+R zv=6v`Wj>?P328wxGa|1M z9*J%3Q3)eMV2#=NF4W9d$Ril4;_QFU%5)Zz#80?Y`w0Z{G^Yz(Lz((Wy+ej&TwCMO}*YDslMAwbspP zc+V8h;Fy)xB@xDSbfshUuxSWo-w=5a$A~Q&6)0yO$%6PaS)vQcD^?XE3&^q29G&c!T~B=iNf|KZz>AW6AM^aI`Q} z^XHT4%NCA*tdtU}r&{hwEv4hgf(KIuwa+dB0oZ767c>Csm)~wZa8kJTG2?37e=IIP zB_s zvNiB^@f=+(W+t9FuQ+yOj!Np>3p0N-#yq3Z`)VgV`u9>=R}O$h1Gtd*s&8^KZZfqM*tY zkP5EO&eySc*jC-gua%)b&hDcUfI1sGkeum$xfNhomx(^jk6ex(W)#Uy~ zC9YwnrOQVw#aqf}UGP)S(o1PUE65%}t(sF~Y)58=z_*MRWJ#)%D@9zX(>EVWGVx%c zeL-A;N2C;VD�tt!62zD7{X?S6HJcft3_S954z;77wsnu|GY!MQLp?0N?TXQMs?C z0U~ehWM1h*62&b@8khn&ctm{3q!I>14C}HzA%G*WW3jCcw0&)gmSeV;<*f(MJ-OOh zBVlmuAqY?}$lCW&+rH0!n8h#gJS$6PLuycsRV`TpHFx^73PbFNA%hJsbuYyFIG&kR z#njGtzV&=f3a=bqXsHa05EYKYPOR-MF@obHiXBm(5|UT*y&$tzXHULtaB+qn1$^ z0ELsV8;rPA_$Oo&@4Bsgeisj3jCW?R^`1E{ypnIsREW-@$=NK3n(y1WKI}E46*cn% zK}6g7yr-2Uq81!xLf+uUeM}jO_tuAf_RwQ1O?I%2w(F96nc9}ph_u_DtM`tpv-qT1 zp;94Q+Eu;kkR)a3n*0JX-jt>t|FQ)X!GZ9WJ_PojJMPg)E zm?*CaGAw?Ugw>rBjh**A>!o2Yi6(m8zKIipRyA@m4a$c%B#~haClr2Gy9()fbD#K?7WL0A?yk zJX9WaLe~%+Za4$+s)WT@lX)iIUuC8|RPoTg;tcKvR$0Q$j*86}jZ#Z)V`7qJuxo|glC7X^CE z?n+}1tAdp=qDvf+lO$4*c!ei^{{Wbt<9Cdu!(PRNmJ8l7q5%Mb(8wG!(l?8PZ?>RqXk7|aO*&G|Pp!CH{9NlLeo5k*g!RkMVErc` zpa_u@oS??Y%g2bNUJ>>nDD!n?60;dkV*FeYQDr7QL?nq#fZSt;8 zwqf%s@sEO#tLaxNJNlG>c6)&(vy-umyIJE;slAX`9i)aorb{lNVtHr{35xL~lra?^h1#%3MdqqG zd5bhp=WU}PFJq=}-CyJ1r8~8wawLAfQowr}0sjCkbz6zY;&Ay%B98h?UX1jngd|+0 zFv2AcRB5%$o+?S!!=Hc({{RdOjo%e&c#KKm>}xC6LaJo7WQh2dP!BIu$7JnvsyiqH zU2GtovQk&GPI>TED^-fCGYH9%3~XfLLoyCUdGD~0_Xnofh#;>ehseEKSuBQe=aLv( z&RFDuB}CCsQA!2}i#C^JdXH|X4|{-oKm%@aRn)3E&ufN*UV8gV({|7CS~($AOp@4= z7qTL#hDUJ}ijF-;k#71VDGCZOZK>*l{j-qB%(&>{g3?t;>D*!?hMIDb6t`l=J<5zu z_6~xn=mS!wGZW>PicjJoM-0vyB4U5h#LwP%utaSB%rW4cRO#2a}-*%+`V4=1oizH z&EmiHhk|bS0+c^VUd|fq&s;YhIx#RSIrR`GjxI6pTLq8hBzzg-9Aix~fSNu~EC}}P zvO4em0~bs;vt@w)0Dmj&eveRNa4x3b{2O=>56r3DJ>-h~cRcovV7|URx#;hIzt8mQ z+a=+g7meU-zCPSla(#`2*j12@#fbfW{d39SYVs8z1>bM;>DySAt$1av$x1H$C;VSJ zBd?;bOqU9j%oTPTuxm{^jgYn7@NQQbWh0w}Ovh+La`x}mF?eV|N=#Xs+@FRz_~^=> zE6YmUEY8y^4@i&(*Y)@LJ#L@I5<2;E<14!_ZpoqjxBK<((zb*nj4q;l(Ct`20+`0T zcOJ*~=n2wBWsW#_mpo7b-Otzo+@IU8pg#*g0&xB_@SR^5XM9DK{{VccZe+*ONFX7i zm7`zgNEkRJ8<*rGX`{2QyQhGE2s{(#fStH6ntu&H*;B_~h(AYTE0D$bR*m2#hFr9a z)+UE#eAw#8>nq84ub9#DBn9QyU;I7)0A|m^>>f&b_i+~dLztW8u^BvO zKN!?JJ6_AddVBa@0lGuLaG- z)^nQ##OQ|cjSAOrSx>|Ia`~9drZ!AsRA}Sj8u@InuK}&Je1x8s0ShP~lcD_cu_0+8 zS8rP)7XWt+p}xPOm5|fD1XQ4`kB$ETf+VdF`b86Z+4N*qk&5ZNfyvuti6^h;{{V~X z#RrbF)vLQn9elmjMqmLTSdb|?0DEZm1NP}ZPH-jTYahz!{Q%^BN?<^->(v7pkz6uJ zB9TUeZ(IEjSYNxvWBRys;fUmivF-N${cLn(<0k?%ciZjISrX3)eGP^kbXWm!mGzG# zQ!t5!C<*41uM&1Pc1S+QT{^3J4=z6?46Zk)`298d==Cu}ik~w>>__N-^WUd+A4-Nc zVdgTANcj034k3u}wJ$m^rT+lg&k>)Rwo}CT<>Hj4ju|HsaRN6`Hjac{)qsxgOfJwIr_t;a?5C_F_%g%2kg zM$LF{w6oayWRVIs$@0+yXwFpV%7k(t^{>Kl($1yC@ePbdQ6iEl9MP2$#5M~^uuD?P z>PqDUA1%NUmL+()yB@r~7mR1KE+$$v@%3efs5Hh-7@>+KRv{r`$r(6oi!-k zwink!snG>{50#*6V`sHKLKrU#vsf^y$c0H9=EJ;3w*Djh>OBsI;U}b^| zMkTFcrD&#xN#Ka2{i2dWKbBeKbS1d$nLysVZHBg9?rG(VA9~gWl`KId)mTW?nFH}D z84k*TfYKFb+P5Wt%r@XLsflV8u{V~_lVL8jSoA4X6|}}hHW(wZJM}3W2eK2}sLMfN z?(4YU*RPcu+TbHC_@aCvNanBOu$k{FxPZrESZryP>A?hsRx7jXRE5WJ#hDn8dI~JH zyLe1)Qza}mzllX0(=D3mF|RuIBb;mxmXk2zp>-b(4^Gpjd3{aqV{QYo!wS|d97X=e#G~Rl@r>)eDi7jE?77_s3_N?kj+mWlwlN+y`U@cR@i}yaa9fFD|>*vzcTulqdZNHT9o`f zlkne$q-)lzPTZ`_@(OMB(kv`g9RP_74UOxPu?%#2*NdcT!+vXdD%LU|K-J>6I(bzY zhRFs{tfe=x?aR~bN$n$W?sY*>Xioerd?sxwM#BIuGD)wE$=sS!lykOzaC8PYow*>EiJU-)n;ykmO@ii5T2Uh0@?xiIw%gZ}`N za!E0#00iyjWT#Z@-dfi|9zAH2;{O2IukgZ$iYPCJe-AP6X3_>!f_#M2!yV-T#~(F_ z=MwNTNUBg4lz@UFMNUfXitkGej=S|0@r@;mR+?fX$>pRm#Md2}xixaG78slIth*zV z6Vcuf&SmL%YFoD#&+!L}YLvBZqrmS+6xu40g)E`$0L)M(k^rNQq;2fr^l*0XB^#MG zpd_%=>_ZaBSUMtd3Hd^<^vd0bza1VuH`cF+Yg<+1yFy9xkMnL}RMArz++i~s>l^%p z4ZB$mYAn@}Y0jacwv3@HJ2+7lRpbF;jL1rFfNyY4`rqQXvc3t=$%D_+uML{O4>cKQ z4rD6Kv7+)-ZsB{9Mu`6a4@6>oVUoGwDtW7TT9$9FYH8@gqCs9_kyi4IU?2YgC0(6I z$JRmF`!BtjaBTRuH-}l|)k!{1yr;@&7cpN?af#S!7{W9GFcDp_7SR5c~3W_dUXiV#PV6%~u^e<(fn zdU=M(ra|=VHI+D84&I%)VT9j zZL0HVLqc1y?$U8xsZ%+d{5kwT{8s!r&n29F41QY`4AyH}n3WMTI8;8({{YqqjSylr zV3MG!o|wm%6?|saE+uNhkY~ue{mqr&!c=g8ldxD22X;Nihiy+^89?Q$sE+kp70eWD z%I>^s&pZ+=%&L5{0n?Uk1>@Wi+o%0I5NXxUL{Bm0(AN7(+$C(agWG8RFX#7-ydwB( zmm{kS%N0o-WMv!R%f4UJU3Y&F{{XVyOT#~gczoO0nY=?+9R=*wBI-yXjMFTduw#eP`CObnsPLXc^p( z74Sws8*T}jAnL>ry9@~5zx@XK={_9C*`Cjt3{koW)VrzdzRSq``vQNd>raWi9)~HB z$YLH57_}skLeeaLN|`pQ8-0ef2KVjn@1msXqH)(4wP>iCn4#!VQoW{kxl4$l#A6prCfu>8bgL%2puuC)j9vXb_x}F?_39hMzlT|$ z!+#Lg!{;&)eRcYaep6r6kU5c;%M5=%9s_JUZ+h*bZ*U$jmfFWhJ&b_L;4F8bZamL@ zb6MK6b2VuD>$@us-(mi}{{T*p!C~*qENQn?jrZmU0YM(X zpO0^2*9pW2U3LbF;{l5n&6&hpwHGAsvJbbh*M8sRHPvmLL9rbx{Dco?qejQ_l_)pw z`hfocuWp+7I~`9KU(C$Mu`)*<)Qy02K7Kh6KTm#`c)o0P3|v#xm1Kssh4S3`cD|t9 zkJ#^}IiwuPLa5CJZ;K-pI(}ra86dq}30OJg{CzyWz-hT`5#Qh2qi7pd;#{QCs!e61 zlnG)XnkCTX%=-ctk8f}fP{T0UKMNd8s?{!1$VW;!S8AbTPHvcDTY>vep@ z^WqchX15Hnz>ooM&@QTyc~2Q7wZxse>$Zov-n!-ESjui9VEGMtEAlU|Q>TT;Rckjg zKmOM~2fm1iKiWSDOPZ(Sn)uLo7vZMpulYXx?K$^Zo_sOw^9e17?Yo#7rKoq zkO8AnTF7?Bas}tx7crBcO#x_KRI*c&o7Qaa4NmSRPPHc81XYhPD_(VAcmt# z9TUxEBav0s#~?k=@77S#$5N~fD9gx`NA*t2$~8t{UQM3rHQbMF@grRhxf;mkv~KE* zrTH?kQh5Lye%s#2*G(~*3b_ZK{8!_FB&xNeMg#(`q%hMj9TjcppONgIve-s8xy)hmg@Lk8xQ4rW|BWb4kOY)&}0Cn`~LvfqohqH z<#CdAUSsm5gBe~csdh`k8J)tkJ1xY1P+g9;KQ74k(f8jf#sw&)r z2+ysEB|-zW(IoUM5wGBT`zm}3|SgJ1&6dqpzdv@Pj7a2bBl`YF& zM|4&($^7pnkElp=eI&NX`R(@T9%N=)+1hP#rj44B#5lEfUaE@2Jo z_t#(vKd)j>UT-H1)5{KJUXwXW0c*fZ7MK3cJ_F&i(qS!(==^e>+1h;!~8zWRo8X*N5p<;JWe3Sg$RH zUP?gh2pStYJtB>hovR)~tcx$6n~Z3yZDe2%a(4S9dxaewo-Lmm_zIuNdRFTeo|7ne z^=v!& zTVkagC@idfw!2@@`~36(j)rR+XD?qLH#=y&#CV!``k2h5ck5kF zRpt?r!IYJBnl9m87;9S~{$O-^i^h0%Sh7baEsF0uG%-;!#=pqiWHUy;eQ$w2%y;qC zUOu#M;C$zYYroUojyW=vOBH4^!Vb{u;gFHr8*1^b3XCMUbroDNzkd-vu|xDlGbJ#mt!vNb>QEa0oKDuuG{Uh zK6=L?kMX~WF|y_=WoZbhI=tBe(b|?#$ROIF+-bhRu-Q8A-=@AE@IG!V!SN&HE5lbA z^zv1LIQ-cxah(H)ByALO*wOBLp2Ma&Zwg0B^qFrCLh2`)%~{}yd8CN)A2Jf8jiGKk zO@rzL5$PhDH5eQXuOB^l1liAWqS~WFJKnL1f$cQXR@{!ruE7!1DgqdgMmn$>vD}QZAXaH=*kLoqqj$5n0ZL zaCek_(ShA9-D|brXzAjTPYatO^>G^kc@fC4KQQlg{&mH4 zg)>T)X4sXZ2RO+N7?FNU73D>*J^iYXVr6s^0mY;b9EFjA+JF=Sd+&q8@?T3ioK{lS zHL?;TQcfRM(#;SaVnP8e=SUHxRVr6b2QXMEVEAgd)W~J2WTU9FyrR@%sBa<%VI#(l zqi(`Q^7%0b`O&!?xu{i4OJfmueJxs-=eG>9Onk37BZ_C@gIefOus| zDT6qc7@~Y-vn_sn<$~(Qox21+Tk$=&*dLwsQ);L2oL)xN+jX(8a-?Q)6NsTkb$4ap zci1s;wI|e}fK}H+GR6EpLzyXYR>bnY)Nx5YL0y^no=1v$wXRE|H{Yf(HlP-rkQ!n4 z1CpAW)S}(&oYB|4=IYO$Dof@_(@M!79k(taxb2U)40OFdlB_aVfd2sL<$>&l6TD9< zpQ-ZVX54Jd3O2a%*gpVoeJI65Z&k}djH>qxmBhxqSsUu3l@&v(sP(e{06CcP1-SwL z054g1I2(RWyq}M@ZyV8o|M@L;zx=IkJLPc`$Zrp(pU|4Zu?=} z^s3K^rn3WMqp$=vraxUQNU*!r3X*?3k=O!Q_V_#Q#FcqVH4W1RY=vc(Yn6qAGcZ@0 z!^V~?mK*f2iXq?Oqdz2(V6NAf>x9uUzKpz&dv`ViHl(#a-JhSDQ zBXvQ)PajBakNN$!Pg~a$PK@Iy^2cVR)*ictoJA|b(JOtqXoe~@NMg)2zKNFIyuLBY zs?T~>79n{~*}#HG+hptl7w5P5wY?6O;IGuDn~H>z@<=Pl_2BXd7Nv+DNfYdbDj69> zgIW#g^z{wm0hoDBS~X_T&?NEMWiHGK1oWUg zUh{OFxIs|8&3nLZgMtSOaVkk&k|L9cVyPlW2djl>crYuo>P2>4w{l4%+kF?9$y37I z@a`_P6;wx3jE;F8 z5#fJ-Bxi|n50bH7>fCFUB)H1ejd8eAF&JV~M5=9_SdbgIC$^77czWD5ZF0<8X((c+ zSjU(c5#@ryMU(SBs9gpdFk=3) zsysbBuZ^!1_ewK)mS>4qLRBJHmL>{$mL*CJY%6H?AQLy2%%(fTSekU^^DNhqV`W9@h zN#Y!Iwz72Lq9jUF*pB1cX`?ejJt-@<5&W&A`GBs-GJ&J{fzzsSbj3E2GCcw4C$X)p zVUe!fx`nMWqTU*16q3aA`5vJcDJxfsAlpKu1XglS4nE-iQ`@SnRkg=>n#JjKZBmvY zDSarC7LrXBkRIFK%CXvl7ye))W~3^}a`Y;z%`lwTf>Y;&Crum3TYR)gJV@=x0tp}r zxcpgzuas-J>^?&;ZVLj+&wnOkiW3}Vi!FE+c@8I*o%T!jA0?8?qzM}DJ9tUjQC4$Y zMq&hV=<9SQSl6+MND=0RX1ev_4H0n!OT;)(3wekV8Av}WM`OK^Yt=a1geu(1;)6T1 z3^_#uE4nO5>RE^t7({QvnPJzdhnL#y^x9wRB1HTl^S%BOR0FED8Z2S@`LQVGX)=)dOGWEn3KgG zEX!VlD_km5A(doTjbR6yh*hVBF_L)~Eyr4O(b)<=hSTP<8?-PC{ zeBh2| zF|0rd%LE%%8!A8x@Ck0EGc}jQw*07RdZsn$SNN!k>-eU|LfM4Y>iX$SPEEYy3 zdvvx3R%4zmc=G6}9x7rRm|9J^C-hEVCbPv#6Js9@Mx!O4Fbfe)9IWh|+tQKmf`CgX z=I+F>3)4K7LcMvZK}Mu={c1Fj%_0Ytz*x}|5<9459oVlTr(00K5ilMj$I|f4b+HqU z7BV;dm}8Dqu+zm9ph7tDF@RKZA$y=IS87MT7^zPsJU1muaNJvR`Fcm;5{>at2QD!Q z3aA?-u>^t!!m8zI2XHKS`TOxrXl!O9Kmg}uLynqUP1&zPlv0B_GEE$c5+#ye9AZ$a zd665Kz}J>a_Y2hTiLS0Y&*>6 zF>%&xenM+)j=O4d@j?S1qf%74ZbP>)JzHkbP^Y(UZtVdUlxly)So5yKlF%1a|{2cz6p zUhNB$Q?o3zrF)i9U`WJAZz=O4^3R|E94kiP_6x`!{d4gzEboDs8%zn~9z)ve=BiaM zIjUZLd_2Ao_&M7p33Bw_;^9@A&k1ev8Bo`|5axo=P>T)voy*t|&tBtCJR)`tX{!0rhu#Qy+?ysmS`87ebe&(>=fMD@{!!1ecsCu^Ljzmvj8&05Xlb{?9{r>&>`i8DjgJ&xVCu;TMuP`6U z;=(xNQODeY%U}cVrBU0k>*r6$d7gj7e1z}9Ic-B;X(L7iM(q;rGp*>24gCH3v*{(& z>SY^)?GV1-{v%KducS0B_#^UH7V#z_;E@@ZYwy3;tu+9#Ku*8v%GLP-bcaOis|>|9 z$xx7h3&rVOw{88k@zNX>OEWB${MA)W^4ahI0O1{dl`7@bud@1jlmlw=DO@zkeR)mW z=UsT!wP>jtJ3WWM>Y^HxRj{dHRIauehPxk~bR>~grId!vs6_R-@tqgDG9890f93$~ ze^5HsR?J~0tsJ3!UH;DXlPmZoUK3JFG2`1K2!mlf=}h86t0uW1C{<*UIT+BWQO(eG z(yGj_4oe|w?RBt_eHCkB6yq8Q=LoREG_~U68z_%^E5x~Gegl?%2tvLY{5<{zdP(fv ziz#$cjtCiGn{h0)?MW*U$sHYJiX;SWxiJLwcbd0GN#>L4rDUwd-b&69!$lcZcN|ER zutssrmhMPy$FG$7hF9XA$%B7C!Thg_#MNQOJ(HyU>^kDMcv8k{c(OU`lxa(n$-@L# z)pQ~+CNV0-3xU{fP&ozJ(>nI;)_8c~8M`4GOod8SuDF&-O0@RpJRVXD2;v0{jKx6N zXC&md0E92$DPGhW`WUlVpmSG>6{vfD6} zoO0Ms0!r}~aF7pWMs{tn*8c#&s-Q87tx{ZCcwe09_w`Dvhz}RBAE=I6mA06#CQfS^ zCYF_HWLr8(f0*T+mNnwt_D477!~jnF_3>wl*IqX9JeFCSZ{ruad54n&=egNGI|O&@ z?0jdJVy2QnvQvs_ZQ7nOBRz>^U?-3cPE2boqk1EJb-n0(rTE*7{tfu8;;A5Lqyp?x z$YPCA8Q#23`c>=!W+(e^xjl3~miD?@4x`rp0KbLwfAPBE^;{05Prung+HwHm+bGW8d4RIU^gQ%!5Ibm+XFeRPj5h;!cV98rb*iy-Ffi*3={z3v@ga(2b(Q z(tdrmI&q4PeCR_05EgPf>#*Mc0J-T^YI8KGV*$Le$;P+6fAQ5$8j%?)^#OMCKa=@= zxe?p<*KU=Gcxy~1;i-~jud}=#L+HFQ;d})uv0b-5HYw&-fV{QhAV;Ms+$(L#@30i> zT>DbVYMl#_re`4bXRRDjNi|5z#H|S;HgORf;A9FyopOBw3b7M-YGgqiSvVjKW;TU( zbtaSrMqVT?fmGt8H!m=f4`w9Dkw=Njm&fq6JYqVZ!`3;-a?2c1F4P_OI@v+6Nj-eS zp61rKMSfpf6`H%q+@BNrRv4GRj>X-~Q^?D9mQJ9*jkQ?Wpgx)=^2#()>~cfN*&GsLY#-}|ik$E{6o(Urc ztcAhauH{tDH+|U9tZF`8<3*O5WMTR1nIn&CAPfI${S$I$|;3zV$!@)aIXt~MhQC;$mpmhAbV(Sdf?Z4@bw%BnYUhxmm;H^O}ZU@zaM@nM~Nz9 zuvPLf*MsUS+VKUL)9Pag?ZwDRBadgWatU?deM-P=?7FQU0_`THW$j9}WwA#oSb3uD znB5Xa@?AKxtDh(gWVOx#y;)1-4(4>U3~K3k}k0vAky&X^7o2II)LxjnVh zsHs_@jfy%sm}u9oSz2_mK(vbyGI@(9W{IJZxRvyRc@420HwwspZ~bwL503(L+hs@b z^E%*i9Tguv3b~IJPxW(89%^fm-4iS*zM&I@2zAGTM=^~XvnnAW`wpJXHFzSD3G78; zDn3*O#EL<*$T%s0owROBhvas|F)iJW*w;p-g`X)FYHL)Zf&=7BEMe;`i6lr6xMDe} zE>E|me=lY_DQmEDcWY0cx9XGR@=zwJW`k8Byu1em}nIss)7&`>SN1&QU3rMz^8`c zN;RU3>uvCB)4*LNvc^V|O#(PPX&(_IF$?^?00`L2fnyFrwVaGps9AH498=AC5#$ZX z!LBSy6(9ntoLD;m2HQoRl9hQdUKhu~ik{rlKBNlw3!v}((o#5^NdBnNRYIW!#))%= zS9X;hVmwcmLx54%bLs;kB0AM8`Ww_7=HgpF!t%xLbIu`_xcYd~2;qerfY6PU21oTd zCzx=+V4fbeo}rk{_(wh}WU}aI8qqKs-`F3l?4dKpPCCfc$IY3 zr;2<+)6aJkUm1*rps{j!ZZz^pT+PG~3-a8KS9{X@_4@;Q2Fb^X64h*`YP9%^1-L6q zB+zh;04Iwuk!zFh$PI!D?9E!U8Bw9lmbh5+&=}!AgSks?T;MxH_fpu83ufS&GysPP z{>p%C4IaSs!yT8Yku{<=vGk_1Tk9uD;4mzbET#4LGL_Svf_%>~ohjc)toXjJEo|c# z7bLc_lwTQI6}0g%676D8x^1r);=FhP<|sQSeIMe2o>v!!?WcOnAC9<&vaGc&tq{s~Qem3b;fEPRz_kly&Z9b9p=+d(Q2XQ{B}3l!~Jrpe3%R(IsPx9)CgZuPwRil#$~JnX;>ZN zCsWT)1;Ttu;b`!lA33RJT5;E4tP#tWLF^H92G3!~zPokKv44wMIx;D-i+4plfLXha z{l75x{=Izz_|f5t_`d*Stk$<$#gx3yZd8!B0W?g=fm^Uq?tPdO--sX&mv7OL=41!h zkaRn5uKxhjA4eHXz1O<>7vgB+nkwOcsF6Hlt1Eeb7#k`)qlvIKfk<;6hPUo}pPs*X zZ#|QfGaa)Sjr_%pKqQ_U1o!$bi6j>KcrZqM=n0Q~i5gB7V+{&dPCmM=@5z%V09 ze%tTW6=_kV2&NP0huFBMKaUbw%lM1KSy-iwjJQtgy}Gcl`{X^w_t^f2r8r36%hj07 zdeGAoqxz2Dannx{8&^C7G9y-a&2m&alEGq)Dkf48^ar+E4}=^Uc8m8K(21b{N{ z-_m}4xA*D?RSl-rF0B?^yNY|^kF9vb?IF6m7&>-&td<>JS~!wD zG5hixJ09QX)ZQAJ(Wn|yfg7=6zT|w5>#nqAvSnImxVxx=28GCs-FXHlR1WwU2B);qhwg!i8{{W%ZiR^lL;|~=Z-VmW;wY8W%z!98w zk(DF`SP$`c?me~${W{h7drF>KyWgI&$q$eo8C?nBz;8uC<-X+Wzqd)`#kYvDwco?o zi`huAISh=}E5l^R^)CVu$TI@k2upyVmf(w`dv#5AQ>7f27QQ@T=)VzHKspKEg8VU` zh~Z$!*Nf>>K(gu{^T)1Nohxi zr*w#*tt3v<2;F$I#JdGZZ4>S6eLelU`Iq9HY7p=*!^YFP{Iea3{(d3&hwrDfFL6It z;s|WulPzVk79d4~=`9A5L@FO@CG^DOx@>o0zfQ)_m&J-1y0xW5$IfPr(w*4!8o3My z7SZC|v`4qd2jG=w0g2A!sje!_c*crAFN;z6(nSKqT1SlfOzKl!k;RdYn2to1T;|Bj zk?_=+MzV1dePvqqQ1OH-I1|C#`7Y_c%1WK-_71?}X^MfN`ts9HFJEOFsh2~Efu~#f zuW4FJRpt_0lr=QquIQnW%V|js?%U8C(wz-->RVE+m=)l8VvWC@Euq~{ux2aRl0Z5K zw_&c6*u+C(#e9?0Nn^;VfI`rA8?Me{Z4Lft*?=T~Sb#={Md2=`d4^KNNM&CoThBb9 zaHIt+y~8qu01m+SQ8%tH`2`GSoCH}kR53Uw|ciSseUw~smGK_2|_$n6Kg8vO5wu>yOHho%{AZ6C!};V0h`&qPsvPF=;2X zp(_adxuiYE5YFRg*dJm3T?1<3P>YhVCUy|I25rc!cyT;^!+U>S{B^#(=<>CS>#FiA z#S_Tf_<^oX828uMpMP)Hpr?Z>EzMSx5;MGu3~uVA0idC~>tnkTqrSoI+pR1P8G$p` z`n`REqp@B5K{g_$3^emyu+b%FVf7J!JE%b0IvXK}zO+CmT?k&R7VJlEDWZ5+$>Q+< zLa88z2fq8Ce&eK=oM3wqW9Oa<%T}MQ2xikZ_aGlUnF?rk*wFUtEdKzA~7Ah^ZL=qkc4}vF8==j)w*>ov|zLJ zHl~$iD4<6Y`J7jKcOzi-CtCRhK0l<+ z%+qm?CBF46USlCze+v$HZwwK|m;pHgxhl}5Ook8ZG<)I!Z1+;(D%$&pdw zZc4@{0a=QXqNJW9x$o0Vzly8pvga*AIVn|^Ra$gl1ImiJ4JR@G05DyWe&_eYkLEJ!>!bRhrx}XFKHu` znJo_NR3YKpTkcO1HLsKUYot}O$p${UW1muqjgcG^N-bUv7^fa>c}ZnpgZyNAx9`M|KK}mzuTCmO z!2J`kA_{FBClu)OUb>xzCHSk{v%;zirkW|ijNvVhnKNw0c2o%s{y{mZ$x#q zaxBwvt7k{Z^UxOB$r${=H)0qNJ0E?4^U~=WjcI8%O=xuGAy6(PVDQpW$J#%{a&8{{ zv4Q-!m>>6OiS-6xNq!?of~S3+hrLq^Ry3MSa?B>jax>Nj2_RycbZOa12ZBgFJa%MI zNae^SlN}Z<;KvEpOERj?(;^m0WE*4r#UX%JXWM>L%h(gXs;XFQrAaTh(rl>uf?b49 zGVGLKJ8~_sH{7UU?t17s?FLA-hx^wsyEPizkm15Q=7`77E<@;XG^MXvGb7iUM_;Fi z#~gsl(f~Fz?xT?;?i0skf!3^lhVh_Rg<3I>vtCPne4BQ7g zJ7+nprEFBkjVrWTc05QjAoNEdQRrYFi!Oj00FL|Wy5Ii*jPjJSO?@Z>3>C!guDUNX zgilD*xaF=SsNw?>G*3-?OZ6HlGd872i-e;aY)SQUW>VEzDP0sKtZf3So?qqj6J#Sf zC)?zLdvyMddGj|X@Wq)*M@B0kMfDHGV=c_^*CKWid$C|Z-oPhwb6dyYGl+|17-Ws$ zSuF*U1Y{g}v_NnzwHhqVrYc5=8evY|s`#9Lh$_>WGM5=H$kHa%NTkaiw!o^si7UMi zJPmwxjd2cx5vhaiqHD}Jf-hrjhfu#!jv4Lj(?=OPv5D-dVIYO$VrF9Akf^Msfji{k zlrh$d#928@`Ab(`#6Ry6Ts8!N*^U+B?9BTnR8rhTWQK7d+j7=}vdxXZVdjLy8R2So zOhz#1{K80t{$L_kAn5m2I|X_M%(f|HEYYhJP+7ZL9IR0ZTUEqTmh#t+ZcP^!2;So*qn+e0&4#4cj$LFY>~dHIA5 zA0!F6A#h3NHt2YJlHVaD_x(GSdrBoQ6|!&NXW$b59(vEk0}Lu zRZ0(Yqz?}bW8n+f9y>!LU-PddWh^aNW@e7j!`rR6r7K)LmR5}M^3jJD8XjbBN`}*~ zzQ7YHVulj6Oa30OJpTYlszsZVX(o-FZ;*>*GPJHW+x%c=1a=W)Zrv~Jd|YD6`HS)0l!8$zZszyb+=Na2Ur z4*viT6hnqlHa&SMTSc#?T~5%I+?LOWd_x5&B!)orhj#fVXaZROCqg$7rpDEd~^ckAjcc)Z&vYSRPPUCo@1g6{_5}_Ow(AN!jUG=Xk zm0J-s1*U?WlSgjMS|MG_aAD=E`Uex;y)Ww{)2*F;6UhB#sWG{FHSXGu-iQ;>X zoK25)%~zw7QBNIetV)*}aVqS{@ogpSto@n3#1%U|_s(fvs#?S*-GdYmEvTe2E4*={ zh4nKMN$kVyRlmdS)gbX5+;4{C#z7^)T?YF8}#T=oNr`|o)4o#MXDzzC+dzIuymN}W`DISB0)7?JUre64lH)v*?Fq$4Q%XQ>HDwhHVvGFbYx{+@rQm1nUerRyxQsqDmpbQGO~ z+&Bkd?#sb-aw~e}X=>Z5uLbH7$dm$oL-%DW3lwO>MGJaIa^qSD((E-SQ>a|s?bnWR4VLZcBp&12tZUpE0RFnVMt4+bXw;s*Q;9C- z1*={xn#|J23^k**=h$}hoV2n-6pBCMDzP%Mx6E<&@6dTWce04LVo@~KX)4J-9wNLD z$W}?%C?NqW=1HTXB#i((xPh{nsbG@Rd5mgA!f30!V0qKKa^y(v5G-->ZIkSv07>eP z@b-8yxohTFEG=tQ)}@nWxixC%%96A{3M67)R~k}0#6|$Fg;+vrQ6btMKD~Obg*oiq zn&V6HQP`}F)t1#7OKLdmpnIj5yt2fFgB05Di!dOBJb4Zy?be)rRT??)(aIsC8nKuJ zGJOb@-OB|Gpvkk$_dUmb??uOoEkl6tn@nuWQnjm=R{Jp2ZjbOsK9;n31pnOO=tF0y`qbvydgN{MxyYx^n8<~-x)#rS;Jus)v`PVDt;`a!tUyQD>qyOfRh zugn%*cjMMIZ-lXH!kEm9h z8?Ra_h||wvXyuY-kz9!3KnO1>d12Q#aD>R`NfwO08z+)q7946+d^d;TX(NK9u*VBB z$sB?wZYYGyrXG-?RD-Y8HEW6#o>*t?Vd>)un74`yie|<>7P^$O3zWuFvWZa*7`dCU z#b<@zHb*@jDxujHV)Ov=qMi(R-Goo8!>%6rIi>W zTdL(ypEW_*){}ChXlMc#WOSVEuIBZ`afK&uIa}29Q8zE(TAdltRm?Uw8D6BmVjnIc z=7pWbbp_T<^JS3pk3+hH?IDe&p2+xz%+5e%yK)zTS>DFfL#826N2HBaRm(Dgv$NZM z&+#oBv9T|QtY_qkv=)jFK3PMfDi(oNb?_EK0%!sihQ`lD$&OrfE^sMw{MWpZGtD$Y zoqCf+>u_V+m38EH%nR~G}!#Yi6$zRE)7S0PhpS-s`DTT<-4Q>^lZ>2IwlO&sy)v_Q3RHvtj?higG#3}K2hWv4d zu{J_%9(NxcFo}M$YI4UFXGLvpVw?j83`j0@zzqT1rky7}fB;CjHa3&DhvaCacyX{3 z8pQETfLSsYQd3GhtsRJ386@LXB#ro~3-2J0)&|Qd=du)y71QA|l`FxOyB(OZu+;&% zI>{vV<(N$H!~xh>M~ybaOojWOR<(X9@wwtG#%~{&g`=woTXdATdapkcj~c3JBAd-8 zw`kqC6Q)hI%-Dm%izXUbY1zl%Xv8zCHHMOLaKwVNNWHx;I4S_x3a%SRTXU$@q|q0W z2`u|O$=o_7c+3;MJrh-@ZL*Qt;WP{$525qU(QdIRmlpaWO7cm(r8epRs}#o z2W?Dk*P_Qe`5oJXZ&>T>hsWkH+5Z3w<}CPI8E~{xCHe5}+GLiv=JAIo`H1kdgk2uFPkip zZQKR`=$(7${GW5a<5MoUETtShEaSFXybCoV;#4wO^Te|}n%s=C~R5uMG}>UGsF_HjSeUnkO#omTlD%Z732tzO8VdUr2@=Dsze;7 zeMORtT(vSaW)-0=8?SIqv^(sd=m*FB`k1do*38+d$&p`guljzyTUNy;FT>RGwyhW} z~B?Z<(%D~2TqC5M1{@r&3(k^MyeY2vh z5V6Ko^W|vd+=_#03hb%<$G1aeYOBNK%O|K4q8D6#$G7(ChM+a2Q^m)a27z!j-~B&+ zhqH$n3pRY0B09)snrMxJp8o)t05|Gl2hT`lL!ElF16w6_55U~gC*l2hudFRM zh3;8cmNjoMt3}GQ$U)Z#hn}Eo)0#wuo0~>_am7ht=6t z&piYrNB$A`mbOFqjv|#gYf>7n%2xNh5J1uC`^@WK{y!EHXJfe6>~O zip-n@_9xV6m0v}W_a#ijTN>>Ws;+}ST#uZ-+9D zxe4*~Z(^WH;xSvkLP;FS<_2JZf+c|ZN~3@z0J9J{q-ImbLE?;KOX2G4PQ*C^`5af2 z_JgdJ|d_WOIAJ6Qq1|-ARU;rOb84 zz8wiw6I{OnP_i`bIFdv7WFSpqHJe~p^AG6TMv+yLS(rFI;|lfXh{S}e53ia(2hO8G5-LF zcIrL3Lt1WE+EHVRi=HevZTa$2m7;1hv}QI*8FZxo0H^Q&0MAbRB@0X9>||-S<6&J? zjgomO1NxEH%sp?f%SVil8)p2rk&D4zCYoI-^*9I%5IYi1fY$am)pz0dhhnGWit%J` z(}q|jJYZ)s&KX^}gB)Rkp8n3tM{eCgSe&_t*;+_zTsSW5lhW}13OQ}cWs;Q4Bv@0B zY0NIM1c-l$Tp9hp`dcSu2S;4DTAnF$`6?+T+@mZF1jv$+C#883z5LmfC<+Nx+|8m$ z(pwqK4A|QK9;uMC450+KG7wjd)_`^dGI_qz%Ap*92vk)*&FR}yJ%{lP&*O8>Vp>&7 z$0|rd!la%n#PC&f7z$ZP*Cz+N5_puD=RgMWd+)a$dHC$V4P_V9c5?42MBnjUNMuKw z7TF85>7=0x%LKO!bH=a@B*jo{v`Po0@nU>UF?N3JRe~Dg^0kYrR@Lm3_L33i((<9N7o|`U*n$EF2uaZQKERos z#Q5p;ew{1tTD;|@VH(0@h|D=J{4zn=Z3XofBYIx_Hkp2GuD!*;`~JEa9bC4h2QyDvp#yf+Q+1XaXJ=~p3JEow<8l^h-UF*|U1 zPQ0H?$V0O*cPtl6zO3xxKaOFmQpJa@Ha8JoiLeo4%i>up!C9(Eer5sv8tVi528-}PEWqqai)tf<*c519eg~jH#|vM ziECX&%yyi4i~J>6`~E6V2}$^gYBh_+mL$h#a#7LdC370b1ym8gAgQR49xqZ7!9YB< z)djooy~}ZbqGqGb1&l_b17d@tPYf&NIg6T<;J#T-K#dh` z2GKnV3T3g!Gg}ct#T<3SlFa5t4@xN}hyy4Ncq^eEW0fEhJB{=u?-6Hn6n=_4I(-eQ zXBA>$C@|GX#L5&MfT~NhoOYnUBHWLsr*^F@%Tbn%VBbsg1Co6=i;<|`{NRfi;bIoU zippx?X^9|6nOSU25C}`_yAvv~8y&!XAGeQ`j@}-|X`Fo|zFHuP!7R>?5D}6FgAXme zTy^f--=vF0>CuqVjyANn(x#qMPti(VuhG2bA}c(A7MiP+Wxc$!}g;)#NP5>%b> zlF-9PW6b6ptGwq)QZ$%pq*Q4eYupc4QQjef8`&DSvopNb@%$&2u%v7iX7bqjv=TLU zj38ESE5~d?vu=k4<7<8^{5bIij|5eR>bw(De3I9eC6L*ICn9>Nsuz}=#QlH-Fe7{G zzlUdQNIEBnqI0QN0K}Qr_7ncBaWTi@_^bXQX7M(x-M+T4*<@%^Audul)W>0+)T*_L z?9NK)8S6^qfai}n{5`?_c1JOft6a&F@YMK^slUjt4?S8tDP9t*oAvQ>`!oM)W$@_+8`NzF#+Q!rmgfxq99!#?55Obf~E%@&i+VN?09E zk+fWwL`gJF$BJz7>0x_d=CBN-PEohxkD5biQmP9Y44J<@hv)Tr&&2#aYu*y^9f4lW z>X1%2+l>`#DxvU;{pmJec2UnAymGgZmTktZEF1aG(N*W11p!duSZGTB&P zEn*jhP!+NeLludlu`59BF5?B^idiFVpc__?K^wncBKXpbmGc)67Uf!Ijf)QcldYY5 z_XndT7NH(e5lif#8d_7GkemMmNG}}Y%`;Lai zY2WGQ&{yn7w`2QtUE%m*5AQGBXTTwSGaDx9ZdH{$Lyr1mE(&hNdxmE5B~tz{@pL| z?R<_N=hQ0)9nTWG+WhwFGnLHDRg}pZE1cD+X~OWyZDW&*dnxsH@1gVCs7tw*v1#lq z>n{*vG62VXfGgN{-ofpz{raWKyjG&~o<(j$K^YGHC%@_aen(KS$3i@fkvpBp%oGTT z*L{+E`*;4G_0VC1Nhs@*8oPIed_xWc!LjBcmP>LoChIPvc49)u8iDFi%s?(ihP(W3 z>+sVDIH(rD> z`Hw~;I{?5K(SZ_5Lv~d}-u**Fje)Xy`nv6!SnM`h#x~p47*ZBYVQCVnCM1offFLUH zZe<#OW*?bX)MNUEVo0Xlb^I@c{{WNK0j!}qnLb*3sp^>e>63Y(o<)M_N#%+_lQdvN zIeuQjh~iiiy`OS=$0d@hkFRQ_>U?~~b>)6dx+!m02>0bgL2f*l5;h6Z+3IJ-b#mBD zjzh!QT#Y+a>r5&|a~{oJTze`btgT7dkfllH(eqXui91bofbk7DlIA}-jCPn;7JSHZ zAQp>=9QIRApdEsYb_oYsAbj<9I+LS9e_C05Q7@wHBdTFS0gxX`5c91LMBzCEKvzVaQU(3YAtn(c4)jDDlXk zq*BWv+b)^}3Z})=aj!3l@!1K8E*&1a-3K0h>e+>BO9hCgtu462%A!Onp6&+uDFm+| zNILyB(!1GwibeDh;~|K7iySLi!>MTxVmo{LkGCHG06j1hMQ#|WrCP~eDB*w=-v@IwxIB*S4mkmkI0a zfW<<9Z0<$gz;a*;W`-E%K#emFAxdof?2ZTf-Z z`Cqy2!?8WP^_`yyG&OAFt7brwYYS!>WSP_}G@$5}LO>vqtsRgH71wQ&R>k2iS`?tJ z*YbAOa>&JkvGvTyi3|MAAI-fn?W5nTyEe2A&&$v5j8k()Zn`@r6ywLqfR`y|yZu}b z5XUWOk@(D2ip{Avwuy(Cw)B=i8`oPc(ytCR=9bM^F2@R0my8Ir*j`?Qb_Ys9J@?yM z&^><12&&`j$3iJ1fX)-ldDt{$@jQOy`)hvtKHVx5qoT6N(|MI&%r&4`7_?3aX9bVu zZ}@G#JMD&*i5;((-4Q-CQ~3OAw-i;~^JVtafJ zy@L^2E4=bD%Qci!ZR2u6!cP4jz(0OItwMdN#6X`U)>~z8vrRm25}Zh}9yD zc}iKh{{VqW9f?+#XHHAFw^TSq#KU@<*?KjiR9GZs zX$7X);N%SyRqxxk#)io1Ru2(QYu2?r2`EOT<_?m1s?sOsK?4#_{y3H6uKMYgKM@VA zFu4@-LrxLr*orz*Nnw?ayulIUpwFSMMC|tLI-#`j70k2)V|YwsUiQmE-ESWqm{}fU zHBvmwFsN9hB1Rmz1MD>2%KLltgFQzXUozFINn%<*Pal>{21zTQ$Pr5ER0j6h^?RO< z#%5|+$UKp(vPl4pOtQtoSk=y>ZVj?j@f-UcZ)2h|A6FbAq^Q!?kO3Yas5y8v(1|?T9t88zW^fYWgxlnqTgUwu)Qy(qcQp+MVk@9cP?5(l#r8u9y z)b*O=W6X%^tchC9UN#vKit&Cr4iEm*v%T#}-%O`b)S4epSw%viU?h(PpNg^>XU+>b z4OxT4He<*uFJ=oS-%P{Vm4g6J!s~wBFU8^JS!x`l#>IL`G;$*X9z)4mFn_9*KYquj zRcDti5wG-D62y>qy)26$nM1R)Os8Agldd}My}i#^OOKz=rB0>vzW3vHgHnJ*aLD1g zmOh;YCq(-Z?Y(s?%c5E}E5JB+nyRr?GFLIHE6xF|i0Vv2bd@}!1Nm$010Vs3T@kKb z5%qXK_e%A%lX8Ww{do z&*nI8Qz{k&gwfYvzLwVPLfd=SVZV~MC7-B@#c5=PRV7^nc-Zq|KnodB=^1Txf;{EUl>#7OlPDszb^eoxB8G4Th786zYe4(td6@|}@M{6@w< z>gS-fKZ#@JfkaK@yb96~BOS`4EP$R`=j=&404tTQWO21A@s_Q7P&y?k_@wd3cFI;o zk$^qlWy+6H*JUamWO3K&MTx6os1lTxIp=8>CNdb^80F<_0w>mB=E$ zPK&h>dFxuUXbT9?xQYklRLD4i%}_uF%NCQ|Z1p77a=H81?3~lsk=998-MOP6OC!8; zkqGyiAo6hJHms=K^{%Y)HERlKik`d)4WPA^W8~4gN24h0K-ZZm*JJ+x0YL706YH>3 zPi~z_iu!0E+yT|6A@Tuk*03h%KdL4Py8s6yGc;C`{abgbRBpn{Wbm~>RxlX!${Xa@}RV)zm z@|on|wPO)yD%kbmbqdJ27}iTL(%IF1MU|NZfI6=t29;ESzO~%V;&Dw+!~%`EptXsP zKkQl&=8W{9X71APrIjw8ossY#csU{%VgB`0AFtiD<#ayUGTs8EDx zD!2uS_&>0@lQc3S%Nrx&Axth!V_SA+xnisGuAz`dn^={Y6bJcmrnMk$p4SSapE1Lu zWcoeRoyTqO%>$6DoWfkj(w3snZo5%hTm6I~YW1x0l&zqjt=>9};9LSC+m@7aVybJ13VDh5=+d1S)P=O2|n7 z`I{he9d7Z?Vkx{txcPK!Vljn8Vj&B{RDz0QL&PVRU(>y+sMg0~eLwv(9o+StjaJ6h z%;Tnv#SE_`COPe?m0g{W7B3>Q=m0ds@`X?}=`&eMc6qB@#w1e2988{c5)MqUzSt2j z7>M@Pi+5w&sKH~U?a=-ABgrgO4QU(x)J;m0t(>gEOp9I-6pVw+4>uBC2-DoNa98dK z(e^%&U101>W}UW%t@#}wjolJfNn^SL_Vo@LAmqa-z=U#E(>kBwG!<+vk&=|dJ!UB<%z ze=i<|kr_pVDLXRIFW35-uG8AuXpaLpw~o>(cn zM@?_Y3%qM0%0U5>$t8hP+m^u6^|QeEyp%U9Qp~|hc^=Js_M(<(rI{vsF*xrFn9r9A z`C$H5-!GD*1ty>D0l(fpQ^g#vX@Gcs$`d(a3KT6iR+$&-<*Q3xLc3_oD=d=vqtxuA z`J7Jl$7%;I=TpjLs@cC^He2#d^5n?M7HHhVZGin+o1&eRFRrL1SaUI7> zd_hwoM+bt)(+Oq~S`|wX89B{7vKTB%c<4O5AyNTdfye+k6VuEnads;e{D|d_RMD1{ zQ^Lybtso$;sgsE+3E&yIFeLQ1c6k`bT1MS-Os2+~?aCv6fA`rRlkl?Dw7K6C1*vWs zNnA{oqn=2V7I@6SI+&FCF(F4`wHssuv2kXC*1d?#czY#mthnmZU9)PuYL?PTrg_}B zm@orMrcUQ!JGeU%OPR>4)~8zk0Fzf6yVmzIvt6=TI3ovAtgE!-SP|+~0EHu`UM`aF ziLf}h@b;?I&Q0O>$i-R?LFPeX$2lQgISb|%%A6CEBjc1a3UI z%83^dk`LO%{=s^aU`xpHcAZo#+M-Bu7Shw|9*00QgmOL71o zy#u>1Gpq^d#l-VX0FOX$ziZ{Wj{SQ6RLt>Yb9n0bdp5FA+Np_Y?!iG`80#THBuQ?| zKNyi$lX7=rOGrBn?R@tzn5?8RQ>z?lET=4twfW0onFF-GVPFwmTx*WU9RdcHx`ZDg*CF*A(&#Lt@P;_ zvod;OuHj=Yy_BZ3$3&2K@zK7Mt56Jm&HZ}jq^wKfhfyk@#F-Yw!&U{YR!J67D)0^F zyCFdgYs3S#9~rr1`L%1fdiyO4YRKx%>1?HMi6aP z*}d^K%XL=Kjbdv|w=Mllb~f;@(zzv85qaxSZgz$@OhEKM8NvjsZn-dP0lDO{JY}9Q^hL2>IW|FZC-lA(Taq8{Zvz`p_=YVdCsdoWOFu3UI+^>+xNJ)j<`BA)zrDJDg zk@$IPKmaoOi=NM%=(FnYEw>sb%Kj8i+%4Bo7yKeNWdA6)Y$=ul`&zAzQ{I4m}f^-!X*!NHm z()q6r=KlZ>vHt+XBJi$K)<*U?8E;p|%QP?^EKoB9DH|CTf!P`yIS>fz=Uim-Vmuq; zI>OYgMwJQ`=8_Ov8yBrzh?bSd{{T%^v!V~5%r(jStLb=0HT|K|1P@vDvihH)5k<8i z*yRJ2@8)OA$xUW*bL8#)PD0U|_DWAW*^L4K00|l>AC(nRRaeG zD--;6g?kBJSFbIEo@Y>ANTubX!(eQC{1f``sNG4;Ze$fIHmyv&K7U_+O55;8{XY)) zzI*c|cA|n2ZU`I_qS8na8eM_uQ7bu7`G5*enYyl}PGORt3v&7L;&)Q<*a8^@jw9_S z5Yax_$nJSCP*`yQ^B)D)vo>|2p~0+AUPXmuMIuM+ReeCN+gwJ$8(>J*`t8;#V`t0c zauVYNF4;fgNn$C)NT72o=BEZoqX{Tt-G=`F!|Cr1HNr!Q{hpzGH~fvQZXzAH7UFNi z!A;k~WZT9xan&*T86D~U6&YvZ(X)Aal1F7GuVYNm&9X!wFY2z}Lq42G#rpMP= z=u^8KqCQW~WhH<-hd1RUt?6Uta07t#X!_hohGoji0k3AuRmVeW)o~-sUNI9`l2Qsd zMw3y16ocj1()Su#arA6YUFE%P@s(;zenXLk{O&A)Oj3EMG$WB@kJJ^|5Ov6~2yiT8 zTp}$#ljZy5xma=hc1?xuXMmXVF(|t-Tk!t?h3r>~lTB%>pCT2Eu$FlP7{LqMLHKD- z{{SH;W3c6ZIgYLm6Gy^TW0DghL6OK2BS>0rr%@6<`zw;tOi&UOOA7DMdg#6*%36!V z`220@ic*!@JMtKT+)>x_tPsTV_L5mTujawDz0Nnc1cEmS&NO zVm5S7zP3Q#wsqH6==gv7i@1`$kLUo?qG}VVpGyx*taEg$(8FNFLJ1zk7H3vfAd+l6 zWIwTDJdWDf)`s`61)+#qbYjTo{<<1>y>)9u3{zwy4#?6(?4=SQ>QKr!swX!*>1Jehl*$+$ zAOHrvw7VQfW!8p=ZmfL}C4EI4Q{F%Z64cIR^0aJW;hm{eRaJVF)I}VFV4pazs#Cw2 z3wPJB01k=xJ5WuOua>$aQ(MdKP!!|~2PAxsI_>k+b}}d_m0rSY5;O$p9fG^!e{-+z zueV)e@Z%NyK6a?pT4+3{EEs8DYW;v6{{TbRD;V+{m30}EQ*j&$ag>_9*ReROVeHyd zVpT$pk_kvfkjbFDnLq+CrD6&_UX=>Z6Oq5+{9Sa#<7rc^L911b=80`7mD!acjyGpg zWFfgOiiK$z+i|fA>xio(y=Hrrp$hD=PYiV>>h_UUvQHzakacNx7PJxjg)J z*Mc5j65h9zY)#x;k-d7vS+b#_SD8QtIHXhDvaljOa$ibv_5l9?tj8reVN?cbZZZ{s zvl$tPEX%1hShknG&YXa{Jx62gT87=~FN%(|D)KOe@z!H!e^6vbV;XY+5wKisNMd)s zoYeAaOK_QW&aIO(b#>ZRaJhIHC!tq}J0O9IdZhOId&4q_B zbsRv`PbIXfyd3T@I`i~RE=LuJnj0e83aMT=>Xtr5H6CL!_Tu4*Sd=>`8ZrXj+xCo9 zp~>5@VMnobo)Q=WpG&t-FPCH7hu7Ix-__DQY_|n$i$jNaG1jLtGsr>puP;hF7B1X4 z{$vC<5`D-eMV^ALZ#QmAQ6$SauJOk<;sg#zBLhJ3XHBUm+qV5Dp{VEAd>28>r-q)X z)t+m?7LAjPD3|znirgoaV2Z+h0LTIuv zRygRTvPE|*^$#d31Z9}(%zgG8nScYgMa__@n8C}FTT5E4X8LI4vjc~j0-X+O3Moz_ zv17hK4ky1$aE{hrN@}$E#jiRmpMF|7wubm!~0 z+r=t@A)V7F!~NM`V`^Y?vL;&9&*AolSuaRk0@I4DWdqnybm)B0>Z4&ncK~Q9>I(~6 zc(GV{B4aeR&3MYA7m^tQkA6ULMv>V;8`E;w?cK+SF5|3tejg7|fp{8;WgRUS5VDzF z5m#P?;GjQXIkS=3dx`=seURj2y#+uWkE-YvZjEgh-YX_<@>+MXBnIsmND{#TESaw<;yk8m4Z4Syvuj~kAv z4Ds8s4am}2M>!*M3z4zNhe+5TkQ=x^Dk#G~YK3uT)q4?9QEPU7tQfesP~V$==0@YQ z>^Io=V`~*+k*;dv<`~=6>`jc85-iN-GN{p#j9wzqDS! zHZs44@`k-?_?djzDwi@>vdC0QjOofwOy)rR!zmmzK;%|qadXwFHcA|{SFOz(2RP5>skBkXiX19gqgKuCRSU z*m>EB+cs<7#bY%(m_y$zr1Ar6bOc5f=mJ?lwgQy)GF? zH$SK=0r)eB{4K8rWAOg~!+tN9Clr%qzlZ%=O9qwsWitc}cuBReK)G!WCb4n!KLkETD$ zWgzh?<7Af6*QE7RKje;RG}^*MfFpU*XF)oi7KLRmsAqGVU^t%*6ebh!mO7b8vT{(H z7X%p6uK|&fB8qEA&W%SRcF{71BoKLx5*J-H%lK~!JW*b2b`m*8G7*!r3Q735ET8#^JFimf`>Tq&KdeLS8;R(3>*Hc#`N*acmHJB{mOWS+NUuEA0y zNiIQUq)04FV`ZdyWGX->M{LL!N)HjW0Z0R-V!o#Ot^|m;U2c37c&tcngRj<$kKz13 zlc(XCVZ=Om?2XLsPa{B!^P)jF@f!Y5C4pV_xFli3ZFqs~-MUclWG?soRc9T6#o5UZ zI_woINixG>xihfySR$yE$CP+1ilx9IN`U<3WSMKt1&C!~!Z&qh*b2p6n6M=G+23$S z#`o8iLCE5A)gJ7gWcHK&=1DlT{+K{^Z+#FDDjp>A_$2gB`lG7TCYXzpz2y0Rc1abB znn-S?GQ~k&?sBev?TKv1;)`>~Zp4hzOEt(LQOlG|D(hq2 z00W5M$X|~y$1IVwscJ~%@=oI{r$>o9I@s;M)2@%wDjY>uIBD1W=DxfBH&EBD zQl`Xn)_Oi_S$vwy&Sn90es(qbcIe!8Rtc`d9eIN(W6XE#KW>JGn8?w1IQLIhm%ks^ z{{TMSV@Jahv%Ihl5x_jc+ave(_}5*EnMCF*>s$`Xuf-Okvj#gM1o2ybZyF&|8C8>f zkKbHC>vlHLwE=doDoDp;jPF=)fwFB1ndUYz@RDULSZXky1+-4Jpnlswr~d#w zPeVcGkXx3!syX5~ogiV~zkdG!9a~?aX*ky@q!B9x5s#CgJ9~Hk0IyY^4)7z(6qyTB z#`URkl%o^MM+4Hj_}SOrw^DY+jv=bLfw_Ciukc61{vPoD4&RFWO!LawDb}T7T2hiT zDCWQbK;jsnB%P8wdv(=i$YOF>XqaOpmc7|k+O=Ik)@!&Cx<1v2+t8%*`Y1)9ww4#36lj&8jwsA( zva$aF7-Jzogeg23hWFP3vtM4ZtD8e{unY1ps$%~DmyC5m5%$n~T{ZFc8v}OC`EL-{ zH>+HYY0Dj$(S&Nz1T74Jcac%aR^K8C(DvMJ7r?c=ITdP{9G{&gK5wqaMQvkwV_<)p zs6!C3N}iTo1G3CCJ)YdhTKc)_#-QA1Jtra zkW8?=Ax)^<7hYU>_W58RL}>lTZpg&fsZ^M=K~c5sdA7Y>EXR0mmMyZDXe3bAzddKL zmgVNLnAv#_Og}k9>;UZA(LVh#$i;H6hHf@0Ixx*OsHxJ1LRplkX?*Dd_ig~7-u1G0 z{O|ToOpPh$vo>fX4MVTV)8HgVuF*i5Y{HP^{4~_=vI%A)Ymvp}O3be;jlJvu ze&b;F-?vJ0o*z%+SzDH@RmM=pD>;(Hs6->c;q+BGiX?n`Tq zNE-#75E^M!)+qJCceEa10B>65-?v&kL0;P8?3^veOst$*84RWq6gme=-*N#u?oa%q z-=#xDn1Dt6;ye;w#+$WC94lJG(~9ldmn++qqJs9omb8UKAq2L{urdQ57D7~%BjosT z;yV+;6qBM?m72|tq#EtuQ>Wd|Y!pjoau?m^z`gyo13&)5cNW)}o zazB)UROo)F+RQlH&_N!Ec+HD3wV5ZPSrm)XX)+NO9BM0p+vHA1SkZa2zc*clcB!Eo{-R^nMyTUYf;{pAqvSnO(|&=8C=Fa zd)X!ZhsfysJ}OFZR;7Hr^Hqn>lHQqImH;yMKbJs|HLy?E>!;|LlftwZ<(^8AqQV+J z1oj}mF|C!-_SUus>N=shX%6tY=%Z+zP&M(F@-=0e)K+ar=STkHlLI0zen2^6~nd?W)S8nHtQ~nSFq1UJpzj30b46CAOXu> zrR;PF@5{O9=qSl*)R`-euvoaW+LlyO2NK7LRaNfAm~Y?$JA0n8db2@F?RR)u$3H4I zSqkxt5y)(QZ)2|Gv2TDn^+!GK%42)^=#t{#wqt5Mo=~A$7>st5I+iNjLjYVa$qc{~ z{t>UYT>Hhfyi+`Ha{>~~L(Hj4sEn*DVdUd6U^{3k5*VLveV*N2Vthv>Ow5?hGW|We z_A4;+IQh!nt~);7sKLyydyZm)sCom7|KM1Vh3M0j4>-A0k|>PQO9JH^VXSsJBrCu$jgeu*u~3}m1MUv z&lHj;ms`u`JkBez8Y+8(>Fw(3>@{|%%B{7OjTvc(Ljfmr}B<@Rv4#@7ncG>i>2lVT~ z;V%!?!7U74O+}jVy(NXJG;@f++Ll%49~}Yz0NYhl?lww`8%E_iHE!LG!&9y!3(TUs zc|dPO0Cq;k{tv?222*1L^oub;Gx|#7lKA6nGX*20wKx2&+Vr@$s#zwROAnbxW zk@q|4eT-I%tBKa`Dp=nkqb_%Fm}c=FQM7N&aWWt$%~b^@Y; zSrRzdnAw2@mez*<0B)n9@XkWB81eOd%2UkXi@8eiz$9=OYBJ(R*0dQ%E$`oFsf{S^ zk$!%uL5~Iix#XPLp>pLaej>(Jrxh1;GDRGuc`eC-UJ=bIp)E>3e4L0Tp&8FZc z6>{RQ(61!EAHPlQ(3_hkL;c>l+-yMdm=tx;J=Bryy=-*$8W(?z&|9_Z#~r+dRX$s? zH#LY#o)3aR3DJAS?T8^iRhQO3@Md6wjNjf`MnvKPZr}mieIblv!~xgJxw%7bA!7`dDwWAXs}xNk zid0Cn!cqs8%0|0-`54F{TbhDHb>;i_+Z7w&-6EM~u^Cw7GNO|(JK?WUSb+tP$mBQQ zrx{Fx<8n6e)vIH{uM9aDq)N>u;xM6<#?lf1Wt6wSVyEAU*wAD0ULmP&cfRJmWtQ?c znDL7lZq(nVHPE^ZYGtIb*&Io{#zM5 z(oqL67qDH9-ueU@-W$SX@R5u6*23135#(7$NadAg47w@Zj>5=G=oCtKDqE6yZAB5gf+sS7oHLxuXs@l#7Y)a^f+UaWkU6?q+-K?n0R zU;hAVhSs)6Sl6lbR-kNkH1gd9mYEo`NYTooB<@+1f5l$=9fwp-*#7|IRcS`{V0#3a z<}4ABrDY+xf_wIe4R7pv*)2ztb6>~ZcaCW+R=Z>@#YrVf7DB8DXJFjRXg4oVDtc3C zwUP&2b6(ob@)Frj7QSNWSI zN|l5#U>Qm*qer&-kInx85X4UQgjQqA)3`XJhTNRPGQ!}J&fG}#ownm%=c1|q082x) zBKGIS2@a#yDdAxS_hlnx}+RIp?Y_)W+JQ7JEjUUTJWmbJ22xU@9BYHaS0NJ$E zYam*B`BXZVYHn~A5r1FDfZM!<$umyXo%GAcR+ zWeNOK;k%hS(q#4LRLMy^NeTAPr|9T5N_gSqNV+n4{+ElMTNX8w=_q z(tnml#>iu<7|iVmj=0ZIriQ*VPL}oENZiFh{vFBvOcU^x;#l1*85#=BYC$6eBo}UC zxo_~Pk%wRhS}K^@-HaR)-g-uYTiL86`g-gkC{|wOTo!j;<4PO32fs~%2F+HU7?Bp` zikS!(tjdjS$$GmLI01Qx&Jih4a3iUBd!_5vtdlvGL#a_fK4V8`&AJW!w-~Jx_ z#L}NEpmOZQ4Q;fOnxjdk*(dXTc_RM+t?=i4ywJDuRxv&fxgEJf#b%|7XUZO0S@UBG zvJk|M24xx}L=ZRns%p9Gn9M{NTa&|(wOYN&&BD!IDCHsv;SaA%_Rv$sog@6*l!EKT zZ-KELY&3YQAHx?PG~7(3p23D^B1d)fMjkhbf|WaAqDewoi)1W#d&3mr#MMlkm6W&C zPr(gn(})_x%DoGW5oC3M_Q@1mxCdj%_J`b&{&@vRxUA3B{za~ z5DgL{WmZ)Vj7w|&Rvjp;_HtS4m~S;CZ}%%n1!)KS$McaLl2$Dn!Kv65*-fct`67nz z@XQiTXByGkrjw-Ywl*VAE<+t?V$y;YoIc-3WG+s$G#bjLlMI-+9LTiCkwyvBX`%L* zLg2h{W5Wr8xNtbknX!+WS@HLAgvDaHWRfUdBmkqJDZ~@BW;$cPQT{uJ5#hNgWxOG7 zTOJkhK@#Gmoh6VOn{TM2Hpa`OjNF&L(G|UqE~GKuB*=J{@T-@hR>c@?neKrLM9xfX z##pxJWF2VfRH$Xp1z0huO9cybc7%n7B{{RU~Y@^mijzFX^ z0SNq{8mWewWv(&>#}WEJAzOtlrA}OQ7GBFFa>tLWlg3Goib$frC5el>*{M3emRh+R zjHATOw&Tmsk7L=5KPHnQQSvzP22&3Nv3*U58aXKhyE_dQJdko^3^?*`JUe#=R*VoE zDV?bdo^*>WRiR!BB>Wt&o+V#Pad^X$-lQ?GV!-y%7m1lMlpuTWl&z4VX6>2eWiLFD z88$4_1S8o@I4SG_A>F^U$Eu^C3Bs2OTM0sRyqzuS+GFmbHs$V#&ufT9#{svSNpehmj*J zy5%D9^)MQdf$RzDLmigN;H^@#k5(VUmE%WN;~T)n(q{^8RA$2{OHJ8%CHLBkJZcE$oxH4p3*Ka%B8oEsrnI)3d zV;zF32?P#)XyP_C8=Vby)p0j~rq?B@1Q5pdM3!CPk~))s@wAFRm|8%LLV?(VOCjJ& zTvjU}WwVjY-h!~Z3s+i!0|=Gv2Uab}DQ1E;PzeEi!?W+zvS}8YmT7~}ZM&|FJS z-AZ5L3NJs1Af=4V;iDX;iaJew62ezAyM%#J)Urnt5Z+?|$J9cD(|mR+#R)7)e8x;H+CMs1agU(xUrm-FbhD zWp=JW0W$tQV=Y)nhRVffgUX&!WPu4{vS|=0^7yH3k_c0_+22ma*Q9c^2|vE2Pl?Hx z09u>#htw$4)np)@hZR?hc)s>tB=0Jpcqfx;_^5R^by6{q>cRNEh;!Jij&xo;6 zT8k=>)r5;IVvZ$eR*DpffIa7BRsg8uvRI89d_75SOJ+|JlVZA0a_UuLylnYP19|39 zJsT@C2JFrzU_Cs#MTxIND63N|nMBUyrfiNPg-9i;YtLFnX#CkE9$atblL}Tvc>=S! z5r$3GgJT&^S|U#1c<;WQN+!cuk)j8ZXa% zF4Mh}oA^w^I@e~&c@WC6#89I$_q8s<11DwYEQ%gQ0QF3}f;~>DB(G{W%r5*p^>Hz?>i|r-&=cW*a-v@6%}VJ~_)xi=QIb zUl_vNcJcGZBZ|C;(X~0%9XQ6w?h5t*`AOUocBXqVW-O*BFD*5RvENW@`QB-WD+y1V z6Y?rm5H`phxSfV0stSc_{{Rz)jh3YiIwvOdpiv|6nOWkRO}~Fuj}QU{3_$JQn(R?7 z5C|jE;1jmT-D6rOIFTNg6MkMRN_Z0u<6D@DH}6drJHozn{*asu@y3-(b+el;i_j3202AoE2Fe_ zC3>pzeN1h|BVI9xoFtL#yC3F9`tH`${?h0gtD#5$*PRDmX&X+T#WFT8V<}#{ zoQX2_Y~HM}tFxG=+SPCf47=uJ9F?Ug0`OqVvh_2WKNQffPN=jf-L)D@@s;x=tuHkj z1eS3h7bmj%)DLsED+JMHAhTw-3pZkFc^gnc3{gF`n39pV6n*(}M5F?s4kUNcG&z40 z)XZivxRZAzys z&Vt;2RK7dI7`ol6*$sO6$qq1oiy3VcIHAAiIRlA4t9HJXd$%~I^L)s*cC zP^eppG3-AkAO$D3_tq40?<4(9J4V5&U8`4?bd`$6BajyaGYJ^4CptUiz#jYPo?T1~ z!c2{PPaawk#Wkm~X?Ot6oc(xm7bPjmWAW8Dl+CMGv1-TC#cCX(StLy`voMp%W{m`!({~zt*~>snlqyD0sfY~OD%zW6Qk(tlB1VE}iUY{>yw)*RRKk8KbW`_=58h&^#h6@^6@WZ2^Uyh-CV@AyM zYo+o@8#^IB488dMyHhKMAe(_J#jOEv+j)G3Oq=LrdS=*bnsIUqI!&J5t)F7JHX3O)#YB@KzAOC1_Px@j50=y6K#xG5Qr% z+ilm+--H#Q$#~j~7EOjp#DFYLp<<0AX(e{pB!!72Zr%0ffF%D<}0(1yx0Dj!D^ghL%p{l!DAhx!gUyK*(^AG}m7-&uQPm zas^J-Y0Sr=<>C8N-;22Xe3&`g9b$8S*th)#JO7sIMdWvm|1m}v#T2Yn8>7nG|Ip$4#c|aucCZSiO1)06J5s5TJ@!?GEE=V zMm&LHD*?Ewf@EJYVrEt4{{ZBuKpvvME?zgV!aN0sf2c9TC79kuf1_6ZWOYD0;8&M7 zLD)#dd!g&>evm`3;lKJC{m)ykrNCDrgK3Rbz1-zI--dG*vGJr)Ve?g|tq}nvOntas z9vnk3)N)s>yn`K?Nn^L{6nR9jR=|~%xg*f3bi`{~KIgDI*Xh!q1k#fyn7`r3WLKBO zcMWdqB=3`0O2kKRtF2 z0b{WF%t)5v-Ih7blq^)LNPAJq$Z!k@DhiDf4BWV!9Br70VHT=I!u`zbyrBQgFNFHXu+YX4tWh$?R+CLKI z7astKS&+hvC#cQt@xHVOP9j6VD}J1IJX+a1*myGWU@v`3#GYMuW{D$!@l=hNfSy{S z6)}y!5gvfwE`z(_&k{#~e=Fd%A!CIxDYzj=4^5Aib^t z!8%{Bo-6W_%I_eFBg1ZfDys*Bf;;veDOxjPX`_b4rTV(Woy8g{02q-=C^4_8dy)%j z*<_aB%jYDuJ*LJ(0z;30)?|_>qlQ1HRYU<2Rv;Za4k$@181@gbxNN_NvC>q}Vx-1n z=Z;xrsWiN-7~_n2Y~m$ij@*b6DNS&FAGWCR)J|KCC}gujC`35xm*#?`b0dKyE|A0n z&N(<=fH?5dk;DyWVmmV8BTEtK^JSG8RsbS7LOThOG3Bxc0RknDJvH?uS*!&06%v-_ zr6l4WKu~jxCn9h&?5%;#eQV zJ|J3H#HDsV5aKJCRvP6JAk6@cScH+7O(aA4hC*Fiyox&g^AD;Dc6(uJ-mrhu^?EW zkOHp6F?R@k*r*(fdjjW?2e1nxWpVioaJ%(K{q|c@EGiuNq0@;74y2Hk1A9K;4FSHg zfn})-Fw!j@d1U@vvA>v-ujeeBb@1S@Ugwu_vCwv7X0495S`9o@YZzxew$71rCv(>FMO=cAQu0%YZ zx{x&P```)IySIIgmw0w-a6?}umT@L;CuQFxA&*srlE(i4$&BpkD1szty8NlgkWR|i zsgO5P2E^KWwPKZH^3qZs%*d_g$asyo5b+=mE42XobPP4TOtsasvHc57@p%y@xT2LM zw7$dzBoce}CBEHx*@Y~zsJ|cc^ptA8n?Pu_*4z1{t(LcxV-NeiR7#cORacu@9$;1M z0SJJ%$@`vNJiuLw#zxjkY*~0#vb)!25+!I^QLM?JHr#cy#E+dF`^M#PelZZN(ZX@n z>&h%NwxH#>xuU3bC`XnDBhN**Ay9OqRrD;yiFBwVHu3VQTWTXQl$d|R<6%i?&jn6M zRh5I1K|1M-pvn&!;^V~I{pOj5aiaeIk#4-!S|CBznxt|s)I}Nv6H5}1#>{d0Tg`|U01iPrYj211H7mlGEo02-9Av!AtV_ER%#oy< z-~eAzoiSpgifnYs*dp%zO%DrC8~b}LG|ON(*d2B!^pA=gBBWP-muAZd-Nc%RNC-Si zgTxZrK+NSIKiDdnsnM+ifCE5kfbt| z3c*InX5>j?c?41hx303j0A-ROiJdl!55FBbB-f^79Veo?>t4ky9#h|z!H#8V;)Qsb z2~a~9aygxqIs@)I_8op&@nmgL$4bc(-kxJL=zPg5BI}!!RRMo1sMgDJ-un~RJjcwq zi?KI}vgnh{648Q2;{-}JjzQke`0+eIZ4w8z`lF}fh;laOtwLpS6eq}7iG#~SC{Von zJ84=}@2*E%_|kCrV@28O0dIePKIpK_c3^4*`!R~6lV-PYTB$wjC2GR6vssTF3xUSS zG7W9RNQ)Z3sgBiGM0jygR(SGxOpI8#tSOpAuFEM&PZ-cdqkBc-plo*7B^HOnFr=$*s&pi z3J53j9rDl>RKzzwb+^-(`*ltttrxt&k_>XUR_G@5%oAiNK@K`=2+<&(#ojS_Fsqde z!X)HnJXKWzBWPInZH8rGjtOeet5NAafg(rsvOhUeIMy-Eok5j$p_xXzcKp4Pt-96X zd}S+@UNmQG436GelT6GJj@*IW6q0ql=oahr_U;+xv5}I*wxo)@fFwnTQwoKBk)fp> zyJ!L5-(4)%%;z=3P8*VMX#h$x}_e zNwdXyZ))GEKN5Jkn<3&0_acLePik3}Lu?>tBq%4`4S~PYud4h}m!pR8ZcDAo|O&vn? ze6t{xBMYwW*@^6aeY<~7raU)KjG}0Sit-T4FCBz%c>}+KKkfGGvv_;MQC{)HSL~=X ztR;M;eZQEp5PqpxwwKd7Osac&LX)lZ+*k)Q$`i6a$`5`8Qa2x@oq6F5)-S_!hEPD#iz5 zKQxi|>_#ZhcYpd$L6lW zgSh=)1N8*|0Fda<6nXMjip4By%qzz7O5A+krDjqU_-LygYrFOwj=SrwMOwXu6H(Xj z)hqr(j5u5a>Niti=lYU?xs=4@tI}EHhRB!85=!WjJd$_r6I{Em5C`1;niLq^oVRS= zsYc3Jj{8@wII$ACBFDE*$J#(&h~u(7UBNxM{hHs!r;=JiTY+ZsW2}g=vbv`?EbGWK z6WMjXUDwh`>wEa3WWXiZXxMoMNJfaH`OhjvA}Do0?Vu%5umMxwU2~{C?PhKLY;E_~ zb@?+ZRAs>H@}1oAOJuLrrAFLT*37b4nZtUORULpNogmtS_t4|lmtn=$gAl}CQxu>q z#E~r12*F7pb^*sC8-np4o8P{=jT8i_2o|WUE=4N$3mYR%0pSHA68f#e0ClCMrg|vix>EI+hEvinU#R{O&#x zF~O_=i=U3QRydmeY8-^QtaNhOk;_NTm&$@dIfJzbeMd*REX1F<>BeUvN|nm*ky}dK z9${Lwbr7IvR4$%I1fTe4eqO*)(m9SlBXYGIUNR^muW4ZNC3i5w;D8KVcgxgBA<5DC zAP$!2^LYG)ucU-d#cTw$T2d7d1&^3x*%eeK%I|tR_8kpE^G|4z$7D{a*|dWPpMs#h zlg8Q*PG_}RIxiJhvpd5qeA$N~7*M2E!xXEHZR&R*Fl%UKAiOx3dyijO=-v!tXl`Qk3% zb{o?u9lg&Td-RrD(Nmx4?No)LYkx=0wCxun+#~J;U=6=>$m_EmXmo9yrDz^3hE66G!7CMBGyfHX%{{U6Rbix%4wp49`-H~*|?LTr5&~3m~1m*&q@2Xiz%(z#aF*p8o&?TL++IXBfo8(wbP__nBhXluJm!FbIsp zM~@wzDt$zHyJ!u!qz`W*E3B=Kjwsn@c@VUr!FL>nxd6g2Km*j;*Fapg1zDiSu%2SC z5qTqJQ0fMc4WCLce*XY{^@gFtk_P_(mAS|ew@}Drs7>_tK3DG^B|%G|9B*tTvgF{%k;uw_1omwIA* z>#_GAC${?69%m0$O6JWgQZ(^IBUWVc_^*F7fxY+d@4lyN$tN%|K7DS7F>!@LCWbl8 zx0>YhmQm%zP(`8Y=EM$UY-`+qPM3wVM$L$6K@$~_NQ)3vjSCU}A^Eg7?t1((I17;1 zuTGv;H^-zNF|9I zCtdZ`Z2;35wB^TwHh}F-lAOkRNh;U7j+rd3J`y91Vo@eR-PeKNx2L~;kCOb) zhlp3M>^{6jDv(MQRP~S3lW6|{_j(%O!qKydu~!+7hA7~bZJKh->ZVOdQFkLL8gG4m zZ(;{b@}Egx2VS-&k&4{*PbpbRjn~TGHs8840Dk?nJMUGv0VZKIHc23%=*u=f$;8WL z#B;b<7A6XOpufvLP(G2!gSI66fO>m7<*^vbmn50%LuqA=S_@0JMpr|yLC|BceU7`1 zq_Ov_<1(^jtkiqFUL=Ji*bg91Lwol7{@NY7W0%ObT3jtlHyJA&e0&@(%Sa0n6b{-Q zfg`_u-#s#smS&wI2LaaWL!3v#-vzmA;_u`tO~;gq{nRti{xcn;b>W3Gp% z45ag0F0ahGwT2rDuH~1vsQ3Bpus7}3=_bcPBp8S%Sx%bWrjhw|3@Mg_FQwXKeFXo9Lj z9z$og`|sHH>qbAr_*9VBHB7~42#E>M0f!~hfOxjgW83;~tnsWVK~^T7MtD?tg`;D# z+SZqQe!o^htbFw~CDmb9YP<>j z%mXp_-y!{ab3u6-#jPHxvo4sj=}}folVil$1dgSwlr<`q*j*V^orN3tb}l<9Wd~&T znw&>&)r0tTjxpNtP<_B#x}jIT~*20ji^$jqF9+!#(b*> zYUF~7cFDc=KHa*;Ty}RgkH=&#v{YoXcEwb36|GJ}iBIM%0Ry_oSN{O29F{7e*;&Qz zfpcNy9egI!4irP8XVzsUtBc5GZ(GfHk<3-&nsls@TY^a^A<%>AEA8iK49~XN{t%91f%fk8ctmEhgmq1qtRhNg8S6Q^Yqd;ua^kQ7a1g z?p|MHS(HYgY{~+&4_?cHHM7T&7=<`=wWbgxW_c=EHYJtXD)A{muMiEV^P}4DhzwUu zeU(sUZ~#kB`n_4DgaiG0vGqyb%#vki`h!~Bb`pCEl2{7q{HA0mRFV~d3$@yYQ=mZX z{?k!=_vul{OIA0QA0kr#QCc~oEd#SFv73C6i*aA^4X*XpyjD7_IU5-{aChrhl`hD) z8<2!d)uqv>0DC}$zlM1X40yRvHW)gAzzu>j_&hzhYfzUNl=A0?$@+L+63JZbB$EbE zs_Ipw(zm4+FU&~LiftlGl9@^YaWnYnrL7EhJVE7TY&^87TD5li34G9M)`jfLA95KP z%u5#%2mDoC0C&>w!4DSV{{R&6w{HF;V`wzk%b6?r)wVo`NS>3+Eb7WZCL2{`K&1C+ z$0heYKds>^z96fc@+mc%;~NRGEJBjUDk8SxAIu(qGg-Ez=!Me)ytZZvo*%>B0Lt47 zcNcTSttK|4b!n-z$b>rvf(aQVS1}|YXlu%HO91WHMvJM2jLo<+;w@tIMDuVxH-{Zt`h9O6|*C(PMA<=FI-8nTe#x$U6Vw1KxD*;BAQG`2{N zcKJY0Qe3VszAA4HRkF=5m6xRy-ek4pX;r+)$RHAS1d)PH_>u!}t+o6ka%yb^ej6hj zjxb|iB|^f;>_lcIVu86x$k7PiBBYcMvYy*H)xC)D!YHdk!JflQGnv?Pi2|TLS|RN0 zks=brfV{o?lD`VJrk?G&;u!i?365IDcNM3PkI)p+D~gpR zv3`}CHz$+Hilp+gR`*jPNJipqA$I_j3J)!v0b`b-guzKi3YPLQHbo7pmo3C4Fv13=u(L&2RItt}S9u)SkrzO&;{*omRa6n` z0FbJCH;Qf>{GT4RLRe`Gl?T7=4$CDpu@#Fk%ptL22`tn>F-4HX46IpLA_D`UeheGV zati43bBe;lBSltrsa(x9xFD%J`m4x@Tgr5W1Z~A#!)b{i{GAS#(7kWO3k}4#lY)BC z#b_*cO;IE|R8|f-kkA?1v!fFn#129|b+Iy7e-WhBJT;O-kd_l>YLMzV=P@`27$<1_ zxf!GG5HCMVY9=R-O=TGX3vk<5=qlJ=Bv|q>Fow@1%vE)-PGvP~E6GoVhhwE-#Wc)7 zDvXMB2dW3y=`?fS@W+VjoA~9D$CCP5*tqgCS(W2ehZ$NU$wb!Xkyc3v<-osi0R)c{ zzB>LA+GcU2*R5n(ghQ~Jmx`E3A(~EZfr&xKh;SQhc%<+LigI&eZTP}DAw`oY*y{SP z95JPW40BAphU5?q1ZeG@(0B6Kb+l+SRqYOfC($90UMj|%c`l1vEbWV*!5Pfl7qU>m z(In3VZ(hBQEZ$rJPSP@Al@&n=BQSESwQjU$tyfDi$zCj_IICA!8Z%T1^D6{c*-E=P zBW~&#AYM`uN$fP;ui%^jm+R=u6(b?BR{P85Rs>fm92|LUaIfU>cF^0g({d$Rc&orC z!@dKOTJu<%(nW~KHj0>}{vfl>HFkB`c;fQF=i51adf|sONf{sJATuu)2Uiq z!Eg`9`{Iy8YZ9+w8L8Z>X2h}#h(j2TYZ_E(c3W+z$YK?}Jlu_!(uG$U;cDI|#`u0b zn1ku8A1)gcLf}lXdILi?n?$OyC>x7NSP%;XA!9X}7IEKM9NaCWwkCpd>-4a!@*Qi8_9nK@O))P92;N_r94n}bv1JR~usnO6 zH{R;_bHmoD(ye|xmE6Tk2~t?B$3G{6Mrew2QbSmf1TJ(`k50y}sXQ_+ z)oK!C>({d~!KTwZk%Uzn5s4N~F7k3Qx}G~caPgOkay}f!{v9?hYPI6IJ*ruJd?X6? zrdXn?(9a)qkyw5`l`R8X`5@Ac{0y$WM$7ZJl0tOPzva?(cvFQk+HBi!tnolCKY5kKkW z!DHT9L=tE6tGq9mT-D61aN-)?D*h71xGdBw5soUetcw(yCHb%lW}*{jNR7nKyD1Z}P{ zIF)y%*gQCu&^-=5kgb$5*N%t=+L-#QAhRQ%~W{Xhg40S6duomIEgfRiaWL z8VIc1w!?a3+E4LzWwG9*`C0Db*Uoy9q59aF1Q@gstZ^Q55QzGS_R%ZJfgQ+Z_xve; z4W7B-yO*n4%-3+LlU-Pn=NOpAVP;aU8Z_k}5;<^iq@C>F4E`2=AyLQAp76(pCGh-r ztV!qJz384v7f}x}XDd2Kq6+a5gQn9zexRX%uIib9Ih!pJFfY7m=%95v)oS-amfG6; zBeArp!{Pg$ZaP@=GQ(aP`+(Q2SIuTE9boL+XFRwt*g{@4W9BZPv0e_(_=&OV^hsxDeQ8x; z^AI>dpfGLhatb%s>G1a&!q0kpQ#|lUZ_MgxypjmsNav1UGB$?!f?}}MtDYsB3bAGSIFUsNUW1yog zxGK`lO}u@pRpUb=&YZcHWNo^mO(7h#N{@XI-4m9})5qW^ij}ymwVJ^uSV31XEH4d6 z-(8WNBe5orvdb9Q8rc3j&SECSL!QXuG7{ORkW5=fnMjgLPSut)c@L1lvXuxlb_oD> z&>78t3F1_>fyd*r7p_TG#PPr~N#;%|f@4_qTSUK;DyGKWVsq?AE^TZ$?Mt(Z+Vfx_ zpZxOo3w%3B486#?i*;gee?bXs1G_7ZR;yvqFqh z)z9Rz6530Xt%w#Cl4K6VM2rD(=m>f3y)Cd$O8iFt9mUqlWjrrigmH1XlBm)o_S47; zoQoMa2sB~z!ljgt<{h^#wSlx++CX}LZ0LN=uC$9MsOlG9QBrwIVpRbr#WZw78{L;1T3M!+8L%OWNlSK!AMS$CGDdnV# z#fkM1uKL;v-XlshZ(6Y&vRBMinj3J}engd$Adquk&i-1M%joeeLY)HLI<+@7?GFYZ zjd`C9pei!uc4rNsdj26?QL{$}h-RRgt5&r3eA!&IXxv=`EL#ey3&eJNlHd{Z)I9gH zIjpUGI(SFULx!&`w=Js*@w><8sx7P0qW&BrH__kw&^rwRfWKc^K z+BguLX;LPOV~jga9#+Y$)e?hK1j&i3aMUfqyf~YfP69Ge-U`w5@_dc%`HDOH8o4M3&~|FxwSs!Mr3Wl z4DG!sB}T`n>*}N7o4HGHp9)clqlIN?9+ZtN5RW5_Z7l7z<*r_p?i*x)HMH&i%^nrP z_?o|radYHryUpZP&}b8)wX5ZulnB8L~ObQh1WPH|9AQi^zx%zT~R5Q(xj~WBU%Zcn=(z zysbKt*fpxpX0$eMwRe^paZM`5C6F(&26gu%e#5?pR#{W{H{g7RFT{{zKM`!*ujE+6 z-~Jy7)tNN$>pGsgmp6YYh@svk4LW++QK=^A~j9?1K_U^csIos&xmpn zVwRg%m7YtH#B5eKVFAod&Qlz#9??Q_Vq-ceNj*jn;8=eYnxDi@k4J1C^KnCqbOSh+9ZtXBF~u*PYH_yqCTYYb%NAeG>GB~MDo znrQaEW{!_;xz$-vMK+Re>3#h=)o^L|WHj|XdM`3QC9C2YF|t*)W*c}ICz`^T)nKe7 zkg16ZDF6@U9y-xr#dH|&gZ?T099{6oip-&kCUQwEh$Jr01fZ58SqKB0oxcxoc_I9} z>%aIf49z2^e#J{uQiREq`Xd*VmRXR)SSk^vjS8qSZg(M~rL$GVGFi>is^i@#_9bNp*vWXJ(9pNgfI$7 z(brX9ikEQBo;hC{-I`1#%5Y?A;c*kCdG5(AXq{edD;pt(W!P2gxX%+{u+|~M;P5kWFvC2@erlnV ztkNq;tjCiE1DC70CuDc)k@W|X_-3Q7p&la;XkiH_`&*a$Wv%h}YDKdiGVvo;M~{?( zJ8y3M>=chJU;Y$2QcJS(Kl>ZldxnCZA5r*zwo4O@c;wafLtaZut<4IzH=4o{0X|a3 zy91c`=_iOWGuX*B{AN-&OIBh<^DHca7xMSzMk55VV#Sp|KX&~3J!r?8NC4Q;1CM%t!M344h`yUGFu9EJ5dQ$v9(c9I6>f4c$c(&L6Etpjn5=R{W-R(ZQ{WTC=qu~@@zYsMy&L%CbrYzI zE5@Y8c;e*zeN6{9L-|2a$B!>N18PT8l7uYBIaM=UajkTQ&nt?K%R01xH1^%?+l zfR-hl`ejoCm|P?H!o_(g<7AZ9#oV++32oSMW{F)xN`*_GGy(?3n4JL`tm?-mv8;Z7 zE6k{BcX%R0Sdx`1+1V!gyO_6Sf@qXT$rJHc40~{4DiEpw?nuz<9}P%^QH~adK!|2S z%#4OeUG>uIz97nGvC{Z*)Uu>8SCZv8opzBsLSpjN4|d?O@9(e& zaCSZ(#I>t6TWc!^h6-w0%R@RJDSLvakpL?L@1j(V^qfnZVK(Ya_40HHpH&TLzdew_*PVTEHf;s45l*2yc=&CcyeY?$s&T<^zzb!vH|%-k|=TW)q8QUf@xwq{%KWm5z%$rva=3Hd-R`R zIs?O%MLNOej#FzPQhL!zY1T_+mQ;6Xmq5o81RyG?8_*jdjLh8VP!Z`r^+&v5Say>l}VRHj19i)S&`|-sKPj@Iie$Uxg-^G>i^3Pi(43NBu zSl)q1DGRd*Vq}nViceFi^E3w3@^5~<#HCpcxDlm~4NZjBe5*Vf!<|PIO6R*5W^`5r zX*|X|3P{(2u<;y*SHN=7zlpsFizAnn-Weh#j|kceL4XRu0YV)${ymL#<*QihQ#XcT zhxj&X28zYp6q>GWNg=5WH$9S8?(%Sl&SrM_;>fSIc*(aOMc9vPY>0|TVQ}XT@#=v3J)^4|m zaX56hHynmnBF|0I{4ncdY-wN$KY{^RvT)vOEil)SpNW+yolQt?5&_xMz%>C zI`-@I85RCTI18gZgb+r=d{~xXc235ZzqikRpu|xj%ztnZbIKs!;ILt+e^!h+P+u2>7=S2H_58W#~aG;!diW6m$HFi~09KXiQ zLF~KkdyqO%CGVO-APe)~Tqx>;5kxE4A*7>yscxgNhm`bc&U1{+gTB>0v3pVF{Sp@XI${f*4= zKcz+CEJcQ*fhzgNKy{}IS-pq9u-{!b!xgMei_B4La4XOT6|@GV31lgh5(xkXhBh1C zjIF%_2T8HhMTf>bF{l!k3mZnP8>dPxSYEu*~~cVs^??;QR00d!Ct& z0gMc0g3GstV5LYgng9U!RdnKjRXCPVsKyjYJO2PLZL&vk?x=nH08j7Is~K!=W{zBD zvENCKssf8`0TIj@_gpuYcF)Vo^qq25IwOfaHObtI0S-d!kO{nm5%EFD@_hmi$RaLm zK09{r(rGUxDksC~Re_ej*VsuE$j!D@g&c#X+z+rVchAT@8OCrNKK46Vm zy+=*u=<8%6`)GAleqRyMmPnyhj!)td<19;VQU^T{+R@v~*@y$NJJ}ujhqS3p^6fpR z*?ds0=8{5rBv|MLY*V`Aq^TH=NJW%zgriE^*2viGy@A+JHSQ4NYhC&%ZN*jLf=JA$ zlTF4d-e)f?Ip4Ri17(SCu-{0rxy$g8a!YkGl$iNGX+coLA(4=W^1wX9_Vkj39UkYG zOGk(?*~WRTOzlAz_;PPYiEwoBlia@`NITdX_5}57@l9o?&*D)U#jPD5@m%7PRi3#y zF?ho{nVmSvGfoh>U=V9B?y|s5AnC4)zFAJ;%BD0Cb-_aq<~NNa+}-ly)o_ zp^PfC71;zGF(hrh{>%v3Nk!VX%h-5fhCy-USecC8uGQl@*Oy|ns^yE9I?4S$ySkrKD59EUr{_xzmvC{ zg63JJNS-NTRw5J&Ia$?ep#C9>@WU>$cCnj)%WS#g~-C*0d(IU6B>k>^lt- z9S)ST`p09~f$m3A_=T~LN%FBpA!9X*5*3k2VZ_80nEwEQQDi;+mwNBksy2Aee{p~yHK2W^W`nuxP<)aiJ!wq?*jo6YC zjHP>@6wMG%aMyC6cX8o>E>%5Y}HUSam5lTUHp5v85d+iJ9QRCeODxZshFbB@g_yOue7a%%b26+-%O{n;VkAb}f~SAk z=1&uD(^SvWk!%-rtvun=$%aGz0J`o8CyS>g^pXA^mR|7%3dM1lc;%^=`g6+5t4O+j z>&P(Swh=K2?WAGfT^&wSfX~-U-X+K6FHIPwap8^E&XbhRQ3EqC82rA+klRK|jy}dP z?kq4MbMQBaxVFD!eVwFewqWOi$b$3PXRj-?jItufGCG6XLtd?KVEcv#T`*bTTNxWQ zWVDk`QW@Qs%_@>70HKQyPc1I~0Xpes0+l>V%F$bSey+hSZ<4r3$}#OOgD&}K#&xgM z^{(ZZakT!SmPp%Bkfd@GGadPT^|CZh^mX5*JjG7T4Hg(3&=X;Xvmq@tMp0tuwLFP7 zl_E{X9eefzg0Mn1LDynI6g9Hhg~dySt#UAlB$Bd8#!(KGfxtTJ@3q%(#Gd{7n#tv% zkIbwjjA}Jz;0m)gl=J1i=ypH8x7Mg~k1U^2F2qsRd5C`A@5Da7N&dg*)B9;TJ{1A| zpK-eGns}MSIoTws;jCnp!d97{BxnzGUO*I+N0SW$qDr6Bvf1v?k#R{T64Wdp55HX+9hhNpIuBIl#1}bbimQ#V1l(?!{-^0+eo?;% zR15=pM%O_6Y<}G%${LvLjoW};nQ|}Xyf$qJl^F6~DgqJ@E&J>pj;X1`cBo63>n0&` zTQZL<@hn3pA6C2fW8c5Fx*D|^?lqxptwj?CF|~WE41|W!VaV+L`vcUWz`d>j*qOH0 z>&assAQ(;jD+R2bMaBJ!}f=orqe>2RT%AT~bde{F;J>r~dIhbvv^|LxNvT?u zG2a%`*R6OW4sKXmd{>T#Arv>S-m@Ha)cIm?lA7@s9Dpc0L^*oBIs`G<+YYkv1}8ZT zH?g>i5L{bQv;wSf83`*~^Oi^B#04>);D^$CC}4UzmUSo`$6aEs1Tln8T6Cs+rguj}*1zIrKTd>rJn@?$D z=lrPQlgg0nC?-cxfuq-`j_5J@C^H$jT9!Jz)w1gLt=!2f+P7%QC_RznW$UD+^b65$t1I1va|Ai8BzeU1L^nY#1a*VR?uw@;YC=pmFAXfIh%Fltz~a5 zc^~SckwQ19EAX8Wcsg;8MI4dG4#b9VWiB5Oc|G41AW#gixvGY=nw28ZQI@7e^(WMhOeX9wx7L9Cb8M z8hmt7s52FgCJ4oul|j%k%do4xme;}1z8N`*1-jAGT2v!7CYB2E0I+I!;SnL%eH%=m<6Sc@m({+lp~UjCmDFFW2u|AZIQ}b3__j<$U)eMKbE^H zJjpokxCA_QV;3fJ$BO(>iTd00ZAnrLZ8j3YjISQRRbrcJ_Xr z5~eO1Qzl7l{bpuVgiRDO?gWmrN~DqHubk)E1d=}x-J7ZU{vf!%9E!azI?8=T@dUCC z5?3LIsC($DawlK~Uy=t#*E*CkFS+ut@ZoUIF!Xz>`MN0_Z&^2uFd?y&B^7`@#3yE!15t< zy(ORcgm-Y2Yhq<)xeZ2Ho?;BJvc^olSsNTgE6=b|-Y|IrOG~**AI0$Lj%kpnJT(kvCL19QiLXmE zkYns5?OqT(xKU;&ZWg}B6R`3ZZ~eee4SxlPA1*y*vMIq4vw0=C^#vyGD#nAqvMUxU zyCjjm`{_J%o_`EsWuudm%$B6mVsDx$98t9Y0L~4#fgW!edqe}X6=K9`&s82G$AKO? zmMR}K$^=#t*jHmPiBN)^muF?qb{Z?DE8nG6air|vYhmH|{HbcNGy3vC$C8%D3dL;w zTKPuvq=8{ z-~dUkaxfYp7xOlm(z9L<53Xk;3{cmO%%}}lSiYMb*zb=Y%v5o4v#s?-N(m}a z%Ds%7SzF&p1hIW$j#G!5aW;mzFFhTTwJIBrvve(ad&8bAJ$%p_J)rOH>-kWg7w~>_ z!I^VtSCY&(O?vXnED%E#J5zbAu|A_;F_l+L5S33gN$h=2{{Zwp4jiqTSh^p>S`ls< za7&J8T=0#HGs@&Th5$e3MbtnJ-OV;;on*<+Ha^Yf$IuT2apuV*By2gz7n-#%*c~#aCY2XvH|peUHT@!W z2DoPjL)0jNO5L1YyuXU@v{S@#&8ZU}fkd%KG;QZg674iZT$GSlq=S=YLv&_?m` zRy+pTnVY!cK$Eu-*h{B|d`nhYBg8>p$!{)t&A%>Z4%h^PEetz*FcBk>KBH9SrESz!ig=1R5YB_#4fh*|}Vh`F*IGX{+q zsX1b%o`o#72b9&I#Z!Vy%W8KFW_bjnXr+|1pz^_u81Io_8Gzb!1`okloHv28xjOSw z#>e73WKqrh=dB~iEypp1UKL(?NR^0HMRp}w2n-h5cz{MyU<;qOTB<3%;K{JOj=GXm z?d$k|1O6N0GC6+_Wpnqkc?qDaTO6V{X(m)-<~Et82oNv|x(dW>9rem?;rv&VshIJP zjc_&E7_8T;gtM4DFn?4-*_*GR#u0MghB(YrcCv8fELgC@ z8tTlk*JvU~X-XAR0|`(LGBU|GCj5cAA2;}N(5@37R=pRgGMTc1-DnbtxAf3OB*|(< z4H1yT#maiwQF%%Nv~oKQh^0-T(h1%#tlQLS&JfdKBRkBFpmZ*x{0-qMS=PmPqR)wg zB{mq=p;p(_#h9)&6jRrg);Sa)u@cEEBWp^bo?bwcXYlXA&rNdPE3sBAQs$$(^mg(d zL~zQqVvA?l}U#5GdB0@V&A54+==NC%pEo!vrDjT1OKvk>>_yX3wQ_2Kqk^ z$4aeh_^eX>OWdo7w={vJcwtE;KS=^c64N8#NzP)qZ61i^-Ud8yn{{V+6 zRf@G7ReWpOqh{1ADkZAYsfc-_XskuNYvB$7DX zjRn5<*$qskr1h`v>UZ%2Gac{5%QEC^e-q1Ls^RR(TDg)bV!Iwa#7YkxDU8^NZ8?p^ zSZSE+&kUTOjHZVjUpe9oy!fc#)eMDZo+3P{BZ?04d4^3!Wa5FGK@_Km56HCf2ZB6K z-Ymn~%jRddYU*S27P0bDj+-H!d!dm-nK$Hu6^axy$U`cf2v5d*q@Eb?r8wa?Em^N= z+I*c#CB!cxhc6%eHuBj}!jd`JhO{f(I|9d26)KT39jA%$8+EE zg>x8>5Uvl#*0K0Ro^%sUQp2u8M3geUS%eQF0#%tpkOC3v-&XjgicA9J@IDm8OM4eR ztaLKhm-%&XKb-QzI9THlMKrK7G=a*qasiIVQ$L1&E6-o><|7;9O8)@*s}XWAVuIN7 z8mla2<&eCK#SlftMs+C4Nk;BN4zJ=mcOu8%&G>5DvKw(%oT`u~wFQ1CtQ%3DVnt~b zYn+~F%@_*lQ7I}Cg*AV{t^f!$>x|kS*4e+kh5N;8g0Tks-7I= zNavKSjOD~_41q(QAdo?I>QYvl#8!nIA2s@m(McU?>9?DbvFGr37!Ve3u|pWvbcJ$>jjDym30WmnZ|1s{*rDyeHzX4QHD- zHG`IloE0*0WNO*4(cG$&DQmKr9*VpD~24k)!u+M#Vy}17Fjlsrea(|CUyYsL9YHU9t& zT(^sO?N+%2%F7&+SdL{_XNeV0mm5hN5iFdGs%aFjVRbDO6*81;y-z;QUR9TgGa9iK zvGk$h(T(Il#DsChSbCRXR1t!T3K8Y}QL5^NQjxBr%d#ZJ#ay z%}ciubj}d-8XV^*;lB|0Q^f{K*lg?BqvwAMu+%4taT|@9yqO0GoP~`MB`X)8mkrBC z`zak7J}A2tD5_bpZJQrooIY@?5(a5pk{B+uz4++)din372{F0##aEL3i+M|S=7}I` zha)tyGQvERmF7s}FE6KTB;r`{j1WP#g(iwXxD3S1nBnUm;_1_%!ogMXn4TlA0Hx@7 zXDfR%S0yAEy0>mWE4HcAlH0|^ahFrbe3%f0+Z1nZK@cCZ%z1KI__3ZS6Wht$u9(Eqr{qX>&B*7{u{Pk(ZPp4dzLuX%s6CK!!yH*s=5lEwAP1#o?xg zQ(hDNQk0Ry3M`Qs1%7^69oQZcN84PzC`dltEx(k>LHw+7xX`>2yu6g8F6BE)L#Xbm zSeNy21fN0niPb9hjeYWgJKUcrJbq$Uc7=ok!^eL`qZi@&H?Me?!#Iphc!he?`9Dn= zFu@V3La~}j9JB63S3cT=Z?FJYTRs=ZEII0!*0oMni}-aZDToxX8HzYR#w;v`h7J`QKC|GAQK)JYS2JJKTjk^Vmb0`kS=sS;}M@ z-R!*fq_@f=J1(SUQm8$`tW2Q*9fzaRds=JuIUEGI#7~S*r2ML}HQLvQ(-JN68!LZ} z{CW6^;#|$Eb~0Ie3t+`YD&pc+XbypqnL}^|N0xxcwj8(Z%cN1Pv-p-vw`<;>D$ubp z%?c3`gi+=48)EK5b_@s4wt=*(;fXIQ<1!T?hNf1l*@796L0VUgtc4fMlhQo6xactp zqqgpP2QMxcDaEn*gtGXEtXE>t2_$H^w$#eOHKJQXkt$>NRVRz;7UBUlP4>R0Pfa?N z>_P3ppDur>UpM&w0KscV#Q6UJ5?-NJ)bY;_JHvRnpazVRyr`*aboIB__Qkc0vk^5QxXg$k^x+!x*0%ur?(jT-Iu*UJ3I1zWLQohTpL z-?8wezYs0tJY{DJky-lLFXkkxW>!(VDlE%1eAsp>BbGJ;u-css00k8sb_&*2vUR7k zlfqERPA4+s{J~_eoZ!b2@(SGB)B$ox3U$L~L-4bC4-cfanuTLWDfJNjLhMFY9ud`$ zZIpSgr?4dPlSC2KCxCN!yoZZy=5e!R9}E;4?ZPVUUI=4Q%8`L0uP2b`ZMkzLwA{Ld zl-*6_H~#=OpC8h;qa9k0VXjy{WRJA^Cec%+NXLut`L9MfRi7g^Hi4+Ks+L}ChhdcQ zE2Rp9lQMz^!3kHy7P0j2WAb>}@~vjv6)rq#(k+6aR7E$K5m0RHmA1;SvH@;@@i&Wn zNnXA|JSmK!nUckYG1Frj8*wRBkz6x^$idhJ98*#;8U%8>jq1;9A5QIPLfy*LgtQSu z6W6SCuL3cTLz7Dy_U=I>61yE0X6AEC&ZPKT-*w7uHi0DWl2ZN`fWp@DZ7Lb>toH0z zrwk%yXryPEim@eLNZE%X7!n50xkKX&l-05Lj}Z7iIg+J#YD=f$-~rXVP!dYA3MS8sYd(L z4Ho>bp8P@KUk&FdH;W|jgw?!DLF7xdBFg#j)OL-TWU{mKi5fRNlazu!AMt#$Iu{+` zJRgid6|3O$wqEQQ9MCK?R-+=Ncq0-ms3djr%8xN3X#=w}9$Z0IRvmLJCTg7@8RTu+q}hc~0R6vex?s_xFEazLbgMm+9y-X!0m)3M zVZ#aw7;yoWi6Th^jwF6M0aHOcZ%jz*p+ASF&Ungu7Bf@R#a6{hD#o1THS0#7Pa4G% z{zv@&Uk`biNg#o)Q}{pO{H?5IT)==;fXvm^ymfZ znZ{er!yP)cY&FU__%e4>fSg;*^N1mt5#5`ec-(#>T5N?e0$O{?UU_jTMWBYPZs$eo z?Thi)B4rW)M`s}R!%rv&UFXZI`LCT6?=HC4&1u79XkzhSz@)hm$VY0ug@%iZhAEmi zRf+(rQOqG)hCTHcx;Yh^^Xfrmt*3q0?4P1gURzGoG+bTIN69;MC)Ok z@n>y_hShyZJRN}g`<5kqb=hN%I`@_B#CDD#g^0U&%u6RxAqKQ#g+wF_3;lR*kGYs?0$Iaem=OlV0~Vni!q` z09NW!1u<2pD7WLy4=;keH{dI)W5qa()?YNV4XQYH#mKd*$g0K}E?86RJb53ciCck? ztc16ubH=}V+}ASs+m@{7CZH#p#O&ro)4~*b&n038l}G^q1O@bfPFuiI_-n{bMF$C2WICVshwmdv>hga!pGs|i24wI)2fZjsv-w+?!{x{>IffZ! zwF5ly$nq(6Ix(vGO%YKmG24OGzXuM>06_Qu0DMp!E1hh)KW2!1rd$UPi8g*je zvdZQ;B|W%Agn|^7Dp}8D2zAJirene0A**i}k^0P#5W?_qX*5zGgZ<#B(B{BN0;=qK zj}QqUuK7GfE?#LTg4A@pMvOPV4;c1gca@7jDa3R zJ0wn3NK>?15i7Wd@J0-SCt2gs&TBa$i5L~m7RPd>5prD^3?0YgM1LJ9ow2T(y# z<{KSCn;LS9eYnRYRhmUG?my)G^byu;;?olqY<4~dt(vzzFQ!M42(LwpIVA{YjLPyc z?gMH^A-15u@S3K2o;MleG^Enlj>Wq4W~Uf{$>ghurw|m9ILulKjKDGERB-@+adj-F z2g4U~a4taQ>fcL`S~re5_8M<70&yWqw35Zhu?kW%vUS+1UJuFmZVLI6giDzV-e=cK zBtg>KGpnyJEa-3t$su;ktL6emv{XrCz27?hqwor59+R4TP$U6y>95LV4~ifE09fH_ zSicFSjjd}bb16^}DpIKk^Q-DIs%Mf?Dz721cgV7mPRX$O8`&Ih;`CJ@#Qy;5TI-Wf zCJ@8;oy2D!Pq0H640Z&ZiBQm^Fiw4m%u?{QRO65T07|UQkl_`2)kL(!sah2(PcZC3 zo#s%)G5-K}3<>3~j=W>PQjdeMcQN?LyveYldv~QV#GtV zs&O@gk7KDmJ$UY@tnm4yT_)UYeLrYCcZe(7lEyaO*$o$`Sf`2tG?wR#O3MHpi^*5b z4H_dBkTrI2K|vo#ZQ`KC#Feb^k>ptxytOQo14Ql-nI;N+&5qv6%eLjF0YmHDG1$mF zHBz;DwjqHnO4FHOtqiXTF+ia<04X73zDLx@xC5m)yxcWlOng-q^++R@!z2&I<1xqP zOEfYS+A@F!*)(>%m$26t5Z?G<*VOn&Ehmp99d=~`nK%6H*Qy#9qHo7>YC#=nK!mIk z@ggK=1QW|=VD}w|sHxf6>wDd!{`o!X^)AT;xFMnClOpLk$>tFIO&vx-{$V_|v)`ff zf=qTA&SDEf_9DfA(Al_5Z7VVD4VbsMgaMdE7zAkGIX)H#F zj*4003i}|hm*tYbDevv1icV`zP&jyb_$kDJV3$M0nk05cBj2!S|u}!)>A2 zn_h)dN#Dkg-={Weku7)p2Swx5YGn_4y*_(8q~e+v#gvOwFK@ zHbo=J$6dcD=1-W1S^x$3O?x?FoSE6Pexna2WjRT@n{eM5^MLx9M*cu&o44wE==cVbo_ODdeKsO1Gap%Ien_DG_Bfq=$v*va z@ijx<0p_|rprXdK^B$5%s0a@paF;QUp!cS)HQPVLtQeylyp&E0p`)|JupY#ppMIpg zTb{E_jO_maK@2`upKi>MpDhc&Bfck-5I877>ExOo`sx1wkE9g5W#UwxHd!WG^r4b8 zA-uREj#(Fx)|JVPO9C~v0fxxxQtneFQtjHfKN1SjR+1{MBu^{}JibaS$AI4_+Bw^R zr)Rj|UprEvbKGhZmSR18ID4*;vfStVl*~;Ys_{0_l#{;pD7)QW){tf?pc#UCf}t&e_^8_0Do_Owcw$( zGi3!DHe<+G5RjGJ`OU>OCu0TZS^G%4=tR@t~syKgC8vG6mX=ZKb-1W-GBu`61-FfBS81>px4CS z7|AV46{x%4yY&8Rsd*9=Ss#;@P)8BK$H#w|A4$+X9pO207~GoDYzzYtQu~2Icw?A~ zr3m_&WMj4R*NImQq=ow4KF$a7Un(nKGTMmn91MvPiOWogs5DV|c~usZ@U z0n(1KhL%6WxSM_#PYKqN$JZj0dc)7=UmkIivP%hF@zLborDtv+`ghVVF05HA3oa)e z7CR3lQA-7NeoZq-u)c?yOGgluJkDOk>wlPlqe+z7`Ym+E+wIH2cr=da>ypnbEMsyr zjiNx5k_bZ1SLgY7V_Fx^8o(<^=}!Cx7-kaPMk1=9`W1n_e%`Ac18H|c)S%`=}z4B*1n{X z(W2m~DUweuecD&zQCq(bCqsK}Zq^3AV#DL2irn+lRC^5=Xd2SV8#@R&G9dIj07)ab z^p4$gYnQKDnp;^P56v}vwOF8r!Yt`5QhA2NizlrDItx*Qv&V`l;@j%*J_N7vTk%&{ zEXk2BPm?8v3PLgsl@3{CW&>I;`VOnFOj3sklRxJzeN!C5(+Eq_<);(fXf8==gUfPh zqAJ1PnnbL|L_U&c1P zGS3-(L>1ThkUEyo2V9BPyLI{M(x3Q3wEjS)B(lL?Zc7|eOq>_Y;Edu!&5pZkXR`YZ zc-!!JZwr%36_X&CpDK-da>~)cYQ**yC{{r*-iR*zh!O&zb|YPBgTvV+$fkDBifY%b zsM?Kd4J3rjp}eM)2*UCX>={AsKsx~Tcm>es4j_DKu7=A_VkI)nNe(6d0EAStE7u-M z+pe)Ok4*cG_w2`)B7Oe=Kfi95=d-6D;u_SfTxI&P3>R#?P0bmX{iq znx$*>+sdI8jwOUoBPo(j{mSqf05~K!9)-9d_eP}7hf69cQpw(MxtRR^5mfv@Enugb zMn6$r52rgpxcJIPV1N5Q#QyydGMNe$YTzk@Ui{ef^GR^h7M1xbY>f@?Wb0$&Tj?b{ z%2Bg#D<&`{>oN-qSI!QiFElBz$jSiZNg;GOcRy~7xW-)5c)qO2E7O(Xc=9t>5K0n<~P=PzGW)btZC!jtpvFk!H^J3g2!6g1Sj8M4UOxj zusnBKHT$YtBo#({O&U>-o8=~eR(FZEG@fB6Y=HLB1NPV*EtcIZ)!8PcjC!$hbT2Eb z6k>gp0(%qNe;e1^q}fW-;-Sd7z{!~?fosVerbLiyiIveitbBFEj@x2+4&7se$>6W% zt1s}9(a#c>bhPKoWgg0`2vASB*!zR_WjrHX?)kC(t))npM_d*Cdzm_!HpH!pvosbS zxm2AaAOW%hmL+?4`w{Lst;s``qm8Xpg73`ayVs+QsbB>Vb-Bez_qG2>i5 zE&+p@lu^qa-zB7uIHoH*w_&*S+dP2Ywm#ndP~&qpJW1htIgEZjJ9g_c)QOCxT4i|% zND#i2%X(wcLZLvZA z5`QN>>#*aq->~X_6qenSZaWNOnhDKuBxPA0IIui`(L6vW_tv^y4l5;Y^w}HY&Mjez z#Ba(j22{~sNF9L(YQtMnJNxy!!`>pXPSttq)UOmt1Z^#78J*L<-)i76{l>xly5!>D zR;!$9HZA4>)2?d_>QkGzi}12UTw4(gz)J-!Jh7?gz6=UM8;= zHrLVQzl!Iva?5hQLMp7Z;gTr6ndN{;O1z3aD#d*zkgmuDa>r#XWBFn!y=yM9$n)Hh zQb{6~P@%q^Q15~P#rqaM=k%U~BJ`T5rHnUkU8-u;U_@@#S&O{IJ&b^)BS%^+&8?ku zEN(|g7gs*0rm?_V%uA4(e3efO)mY%J(WvGl^1Zl%31Cq39-XOQpMwF}i%9FO^h++x zLj>zS-?Y~&Es#PjK27FM8HHKZ(^hF=F6~ z80SdjDm2iU7_ifJ22Fb(Z5@-PFlHdH1?(-He5B0ABt@5;k0KT^i5P>f&+MS~e}l8v zxH{)EW;;gb(bk4IEK6ob4vh$CjEr{M*z(y!9THrC4#Jecby?eg8A_RW2dGp@P6kG> zijlm`zC=>9pDYq1j5*l{L6?&b-ly%bKyTqpOZaL}5@qXng2cB?d#{a}Pbm}2QrfdJ zHj0o{IE|O($RhHE`X0Ub7IQvWJvzB+)^a%rp^?1W^o(I^#oY1?;3!^Q%d+!`tlbcP&{yv|y zx~kNm&5}6jcM8j4nO$Uy9Y2zi%QWRxYP$;6rCE{KBd`tYfn+87@(Zo#os}}t-Zv*+hni`s2Xx%t ze8(>iD4SH*GvcF#7>OsZlUbp$^tQgAW+;2e>^U&viC#0MCAp6JU{^SDnGD7YG2$DT zNn0_HwQ}5;tE?WZo@~#|Ixi&B>PWeQJsTd!T?K}a)67(8vlB~Z-HDvsk@$*otL+_& zHozgkP&@Kp`A{Cbwk5;(S*bwLK+0Aa0?0z7Y*a52=ZcWq^pbQz**$0S8W>D!b@8_? z6%44}$8D*7Up{O?oA-_*_JmBqyk2Up%J!>fsbwU4jNkj z0QzO|xqlC&AH&2~Y&1}ZUUDRSq>S%EMG{zkXM9(Zs+Dc4TKL?2Sb03#RD{%GceQ6} z956hZ;R~ZE9$YcW5ZLu288_%rsCkm!LnAH%wJ z-sj1?Ro&0$mlYiFLO}EekZ`%(X1yQi~5Zu85@% zr9cvK_$nOxjUA|7+HJ};5_i0bK^(gFQK_veGn2yW|2Yc#U z!`C6mTaLC$c%z#(w{c%oSWK+(y**hjNal?^s}LbZ~MiZZi>Q?g8Mr9t7dj>5i~0lI(*@H$5xX_RTcse!hqfak(xRN^d4 zpT<5iYEwGfo+|caRaCDVqlV!l$U-Y9C(;WYaXfaXZEr-ke}>_b#X8Yl$xR(eVYZQ| zNX&k0V5atbj%^=i*;xqoJuAf@5M`#VcAi31N|kLxEvqcdut?KMG<8BaF_9qw(p@PE zt*$P-dTA^*GB^sd&Sj%L{{XL!!q-2~kNGH**n_y>Sz)*nwu5W1J-V+CNWFtKf%G6p z4}z%dn_gaYA`d+e-_WwigN@X54rm+yk$aa*u|r0!#!IoGD9?0j51t_SVUbD zU8CmZRG#Fk=mqy6<|l*0Ih_5UGMCduMwAz+sNC}z4y1k~YK~lYlhBN0;tBkr_@+70 zv6*X&D%Oh<@l>5;5(1)F3{njHzMb9roV3d49$=!&$^dR-?sk z`u$x#P)3UhMb|kBHuR8%?mGf_j@?aR@h?Uyk3DMgTY?Ehj}570iX@egYDJWlA@igdItG#8Nb$o=UKp9A+_ganyzLwd_YQ^O$dZf+B;>Re)Y&Uaq~yTQ;oM%lJpd zHle!a7oH}T>$-s{4KBo@GR?tUZ4kzI9z>mV#~I+{&O>7bQ!R&}ClJ?r5>D~OSyUB) zU(Ecm$KPwE2SguZqE?P3?k+zOOPlcBC6S^xX|c5hhO+*N7D*LaIVDL8N2yc|s3E{3 zmr8+nW^f~Gk$yXmEp}J2m2&2%Zy)sm9(%?1GM00`G{)jsqQO!dRaQ%OEJ_uGwqzg# z3B}xn17)3g1H|pD84Qect@u@qY>jN3(1&^|u**lBiXyTUpp8+)o>~0BdcVwUNN*jge*CnEdKyY8$*t`V~?7}Bflp}0~a8G zrGv&w2Lyic7<{fK&Pgs;l6a_Gide$P&RP{#^H?^+F;yzZThR_SOLUgoR{BXiAxe#F zRmE3~85BV&Q6rg!jwG~VPbBCr!ukiWX6QqzVNPa=9ZdLs6Z5*C_7>ZRpI<2y)N?qI za<*2(wYf6}HB>_>kt1-L^2Z5P+^)|1huGt=;8WbkoK_w2#++A)c^k@5#t;~^D6F!) zo8pY@{-)-tJs`1Oil=rQzlSBLk=hgFEzexp;^oS+`RvvAu`K>bivH65WsR_CxrK;4~yk7Pwzsvrf?%;~HjS z?5M&>oN0ChYQx!!0VBx0>enyXlOClgU3IqA(HZwTMq>6HJ5R_40ml*T+h*f3l&;bK zHw>!MW8<6Y>=_nBU#Tpic@e|~e=&Wun%2+AhPs%_<1YJpuR4R+#(-}=#yb0{T!8Tg zE^4_+60ERL5k`OAii);jc9)TDY^xr?74EFSdb@P)-K+MpHtW#6ZjxEE6o`VVh@!0_ zihn6mFs#w4vV3o7jiwCx5?u6@5`4_IXK5s+Tp6q>OCFKu6E7VGQ^C%IcShFBfzm$| zP&V%-TEClE&x5^!kB6dIT+Zx}ogZ?o1fZR9jE|peX@(cHl26YO=L%|d9f!_lf@ig&MgU zmn;1owiU91!LLPYq8m>aloBOpjHGC+%_|0R#aT8U8^RtJwUJv{%m})$NBm-m6`Z7h zP>5!>6<^C`B~X-L!QGIlA1~lI%q}}Uj`0m_-eS&b&+!)$X4(rBB>IplS4HCBKoOkz zeSq@;A)FH%>DI&AL_jx=cJ>>7R`%?1AP57mjm6at13hy-z9%^7tyTIZn-_$uG||_t z=xDU086?zXq_b6#otX1vBW1J}nJM4MQW**^DoGoo&FAtAg$NHB^WW6E z1h63Y*G=c}{Mp)acuN@evJ>TL0YM?E$s#SRn>=xUH7~4k$nq>vvLY!wia@O+9f7xI?xZSeelHf-!e%3Y z(Sz%8>XtGR#XCt_n|~>FT^teTFnM;xdv^)kb5W{G9UNpBoJ*RKV}wxNx@AGaxGb*> zZ@wo*6fq6SounI88zN(>U(R^8l$jnBF_S!T+Vbp2A&#l@8%343$&wj~J@f+~W!FPY zR4~vdpI)kF)vWxXquZa#Y2vR1cz+e*-wb6Xp1tWYm`gP4K@5h)$mN>O-qfzYlZ}aI zhA2+im?W%9pxmE{@~sWDu+ZAF)~@5=TJY$s114Fw0cf%r=@dNRhDMG=kCT|>D9g=|Id%U458NH5jyn-%?BeWN znmaJ&C5kAy1$d(Ki|Q#xmC!_Xt=YS4M5*ZU4IcwF%-!jd48vITqmQT<+7;}XBb1;Y z@s5M(+yps5QVi>E5~U15aCfs@x%xZIs`s>vRNIqne>}!Wmi(X8kiZ64Vg7 zquHx7N|Hu+1`#wf%U}Q$7|)lmcxdzLSXoXRGla=kIeUI3nh9g_tTZwG4Y?Kyi0tS{ z;Z~EhilefmtH>7LRG3c@z8k<9EG$#aP}>$Gu`*qd+GypAUO?Lc#iS612!=-rR2>B! zQDW&!;Y=20&UeGJU&<_#*}&1ZBv_a-c{0r+GkWSon{b;3K^vlw1jnkWR?ag6Yk6|> z`3X$K_LQN7$s=8_WfNNig~efM_?sv!7&3VYYvFLx!ttr7mDxq(C{>MOc8>hb6k()( zN?DXL@2XYtjz(M**Stx7T!unutAsZpl5rz(fAEpP6oDgX4I(K}pf-d(@8UT+wSN$N zPn2Axi&pMqg>uUh%Xk>&`ejhuVNWd*HH{PkkTNcd5Y4eUC~n|tSi5ztRNPkw}nK9J!Jd+A_vFVD* zc#(W%LOhmO(}GzbWwv37W>#YyDYSQ)(2Y9<3W`0Oy4zW7CL=qDu}=&c`8r7?;Zf6+ ztb%CSAuV1*vFXzy^8}*+2;OXEsMfi5-S}!Cj;M`nA~BJ~#3F=ZIU^23i5v<1+tE|5 zN|?M}S?PFx=!b|Ujp@C6cjl5>t2BHCs2r39f~v2S(g38B>Qc(S4h4Wy?z@J&oo)N% zoGMEIX^kRyOlh)pZsqR@cxNk94)H@QzE(Y^^JAwM^BIbwK%24MSO7pOU*rL5n`ubpqO1rS;uq1rs5~b_!P(5+9~*c$y?PchG~k;SuOeBcXq0DG zVoP~!B$rC8{8N$J0>u%BCZmUBY&9FfH$EF%OKKENd3&^3>45KfrGn9rAyBa)E`gq6SvxPsV#-v*nKP5viKV*)KgZ*y(m$12mBZ%DtPz9AVs>`Y z5TQ(-;Av!2Et$z9wnHHZ$EBn|f>U2e%PTUhNT3A_J)37hld=Zb>}y!}HQgEBf7Hs= zStYNq+u!lxzfi!}@N~1!i?or?Qq)*}R#?hWT@jg#PzRVuE5y+tla9RBZaY21hJOpe z;;AutjPw$V)@ewzbFtB-xlbL%9+=-yVRlej5T%cn%%p&G%GDE9uSW%hidxAWSpghn z;Xhf(M;w4GicwY=kObC%+Z}ATQ?dA8!Z<3}?2c!_YbH<2tK?5lCus-}tZcRkGB@)_ z6^K8m9xg!I^6Kmvq(*J_ne{5FEDmT4)6ElkybYYaFNg4+5XgNE#*!R`cyqr>(n%|V zjS|lj26$hStSKNN)S+JEMCI`KOBnmvY(76BESS==<}1ZgRqr8+SS+d-RT?%}S{XdD zp?S)(#-)UYav#FVRHTu-GfphsY_#h)^#HXf++Sl5U-%6PKAYy8Jb9+0r==6br4C+ z0rE)fW1qqpoURWco3Sa2FKX>5C$Q2jjBPL-Wjw(sf8I|QR#F)vBWta<@kMB_c#j{L zr-r`-*6@_=HOORVjjT%6Be6LOrBtkG9C`(Gba;+iH5V^rD&*-<1L&l$i;79^sz)c( zygE0KGp{Xx%KQ}VRz*51$c-4Na+&OY0*-#%vrQ}}gr@C+(%NYzX47fEm$cV@*u9Did!BG*jc{NvlB`2z=>GF?q2Vfb8V} zI@0W+Di>_MW#hVVP|OdKRL!Rbk$pB7qt?rQ!=OBl%E!kFexAZ{eejxdsn8mB!YC6!E)NNMHfM7NnY0 z1GktAaBF8k8yh^i0A*vyc2GtC0MYKfN};64X^s?1Qn{OL~2n>))+- z>?Apeuhw~~(y5S|8EV){`+^v>?uf(u5fJKt`havhAxR=q=TwH1e?uuoqpCVc?;pb5 zkAZF1T5U#OrLpA6<+`J)s-2>8Q^`XU6LJl;ZsSMWT3{lA&N|qy4EXyJ*cM}yhDcUP zB(6NqWsz9zQOE(i9Rf=AWs1gDp$~~MO$dg^hr>@{CxMb#5-DAQ@{@nf3SE0Pin5LR zL-j3g)_R^pA6-&Wm!ph}>Z-*|GC;?f0%VJ5(Gw~LUQ9icrj|DwKZX2Flg2TFBX+cPYup(B043|%S)!6yo&Xjp{Mb?W?4y{&k;X{*ykCnl zJ{zu>$~d>KD&i|wm@JjnPois0Eb)}{CGx#!k+5QtzR<5?IX6^?1(2^byiM#!k(#Uo zR`EV@m1!q2ERY%8k;|H_(aE6IrB#i~tCj(Hzwqn${5hzC&-t0OJB_Xy@Wn`^oILbN|A$Zc%Zw;V6o*mJ+*7m(I&Ih?H*Bbm<8P5Yk5ic!2_mP=ntADGYI{uFLm-Hw4I4|AhKDt{hoIGEeVfc&qv`c*c4Cj@HO z9qNW++D(UvC#Xn2hrP(^26IUOQ%v=OSOpEn)QBi-HBS$376DZPbfX=PGmEiBfI^H4hHV5%1 z@i~&+OqINoW2#uSENN{ek{MRrw?Z9a<}4M4Hd!SujLi%opP=aYqPOFIB2Nr>ufy|V zv3TzzDMIBN5*DcQDM*&<07dPhmISJ`aiIYb?g#2UwB846ZZV|u>x9B?BO2=fbRPTFUd&O;)=QD&YvBcH5@Q=n3)b@tJB^2F7fuiOMvrp3 z>I)BZ6+F|%)h6Sx5>`L)6b)=fPhfYye{FBSQ97$!^3k&D;8@0j&}G&q(~nTQ76PPJ zD63YH*%++kG5oGQnSj|ImvOy){{UXQ1{x*DyUUV($^(xyGUAjD5R3{A*5i#K>QF0*XTQYb#;foB3Xi+#nVSj)si4IV=*Yw5zGy?ta(hC;VTT;%OT7T~lWgdiPoxGc|KG>BE@wDs%I%#VgAj zL_K;zhvy+-x^cG-5a>(d*30^!= z5Gf>$0u<{Uj#a8+XxP3g*@9J9s%8r+$gvRIfo-f{YBS{k3J}bFM}1begQ4+9hbMfC zwk~YErV1GNLzc2(+f3^2c8VLz9GhX+StypNOeK$Dgh_bJh zEOH|$*+nYKLv6V!8#~s~h;CvD-ms56@nEpkP_D878M=CsHJN!Pd4YhU?eIT;Ng4bae?} zFJQ|gh{bzz1s{4Ob!M1};L0BnH7_OsT`NSPsoG#bRZ5QM{B#i^_uDwKL+AV+DowQu-mo)r9$W~ts&HT$( z-!Fc8@mhwuxtU6g&x8{V+|b1v&JUm-Q9g)yDs!P zQxF+171y(q$P9#QX)Jj{$!bBz>m^7Hw-;uZFQr5RP2m2o( z`Ri3bl8*t6#eWU({{V%pU4pfem%K-um4x>mGP45^f?g2DIFujp7;sShwlo$`!L%pD z)oE_pdGfG%^>M;*zGa5xDhceW?L(%MfKF}91>zT%RN1<7=5d(ZrHa_;G3jv28c7k4 zWXS>Nw9t`#U6s8j%kB=xNNZy9_9wZV%4!Sw8{%2385$`W#+n&oXDp8DS6duUasq+d zrvCsa6WSQJ5gvgBQeN!GeG)&!4-;C)=HbR;VX~&pTG=S-*O8G!G9-mj0sPMD%eL9s zJ;#2J&g9|nags`wtXRwEuUM^I%L?#>;$xPng|}wZmhVU!ytCP7Fsx(N z&K+j7~cvl7iNKD{!M)}2g`SGvO-fDjv+^7bc>$Cld~Juk#XnY%{j zGsM{nGncng-YN?9lt^ZdITkd3DEmym$qL)#I4K{L_1D9FL62rem;;r}aPd#=i_E&- zOOWBb72@v`Qpz;&B;G8^V*db$GTE@PLsp-fj~dEOgl{TEBY;n#0()}o&2i0B!Sjl6 zQooci!;8sHikd@CtR`VRHq@0++>9k(i9BThgZ$lfbDW>?lKyx_gUDfO8u>-F401$~ zO%!@ii7a!6l(}H)dwVYsd+V+Daq&sw%U(FGVxD6gJwKwikD1%*wIK^KdA?r4`ebwj zb}FjcAdtgan;LE;>A)C>@VpOIxmsGeo?a_iEG9d|9wN%lC&*Niq?L9oN(q(OPc4w! z0Xu?wpVz4m9DQa(FHYdaUa#cAUg3s#)JnDZENp>@P{BY9k8~mqVM^E@$;kgpM z!}QYOUMm}LJgH`>AnmwR^NL913A3QOUO?#dg8_la;By`zm2uV*#Ces1A354-3nG*Z zk-0F$uFQxyry>d4>wOIzejEosFZI_$b7ATe=`oB1yhmFo?+1PtUW>u>@xCFl@JV`S z%2jKu5aO0TK~w;a;JjnZke({2H^?5TY+*589-6)j_)MOB`07_~WW0u7s#F3I^vVEa zjh-USAlZ{-><--(J})HO@O3P-7GE=X$(*$0>PrlJ#}gf_mBT9_I@7WZjRoDOhI~1c z$H|Xr?NqaK8seKpEyEmQE?kw>Apkps-=t}I1|)Bzp-QD{qqJ}Uk_p-(eYRc=40Nau z4E`SY6CstaB>7ABB$2A)tSe9#$}we#HAybFmpII1m7| zTIX({cqA4F7+jacIWH4d@gTWbJnTM0l~^Yd^Ox!hE0u3yvK+A^?cc00V*6YY3sDNu z)JZ2L=h--WUs70OXFc!L!LR;QXPQ%4-2h@uD)d#B~@ zzUB5t$sJZ*7eJC+!;k zzX^trnQhcx)zq2qQ-7%PH6#Qv49vy(ZI}V36And>%K$yPY;)^~y9KyvRmR4h= zcs%baDH%xF0r-V-29!u!+zzL)wK5PvU9URFPj*u5l7{AC;`09hhtvxm3OBCcAC8^J zLpyI3Ud9$$_1M8;10=-x73EpgN1G}5L(PhN@305kt_OHl1bBSiOk7;!3vNPsG@`*{ zV^meAgs%)PBji+{QD6ujlPr$hKmeU@e*IHmD+GHKZ&t4I&XVKeFDQ*+k>g+vm%5P8 z2|neH)Qx*g*TsM;+Ocv|N(y_Cqv672hBYjyJkQxS9fG1U+YOZ~+Ui+r_-hr2v-~kx ztxGkllSrahMnaZP8Cz#+tO?+%2F8dW^yf3Nd3%ojJO_fLmeYKldbj0G(!*zRR|Da( zH|LVwd29nx7tzyk(U9pPV7l9K9D5Q#8tJANz|uzqH11{a(%ZW_w5b${Wep^nNh6LZ z;Sa%4U0BT{DUpm}K?}Cs{4URnvRA0(u1#XBc7sVhre{L}i6rvXNmV;httsL*H}lhJ zbE3yJQ{~&URjf#iW~M_A(98I3mQdhw(dAz4(yj9$V>`z5((h{FI+eIEY+ zn|&g`a>`+#zfB@2j}pffwswi+*kiOrU5-IP3ZUqsgX-Tv>bb8MP3>7lrXZ4S zD5Dc&)+V)HNEndL5Tt&4LFxhI{{WF+hy)L9p4uH?x1>pNqH6%WkVcb9J1O-uA%7|% zqYxiYre+7ZByk$)O@DSAG#`Ghn99I{!=U>u-Z`~zFP-2OeCl4GdN-zWw z$;P+tN#b@Jr8!9Bo5E3UIPK-WhSiI62Rj9alw#yP`8u-=u_{0d*@)?!H>a_W%gtsv z6y1rVM64JztQAkKS9$}>E$!G1==MDw9xAMq>-csU>`EbcY%H?+K#oaN5ZrVC4iL83 zl4WDqFJoh!S+L#Cr^zMmX>En?emkyv<6jTiJ_?f;^e=ww6|5<%Snx9uEx1t`$SWHO zEEBc|W4Y|3t==h%5BxsGIAEC9@VpS$w5u}e@}eRwm)G+nLn1oH3inaw9zJ0r+M8d< zqUAA4D+tx4xph$?k-Nst9C9j#MJ!bFJ631kt4C6qJO*B7$YZGZZnZkLs5Mh!NROX> z%T9>WGB^T7A|+8HcL6~pf)G|}GNT1OG#By_9e5*am;jOb;F5T2#TD1Y$(DL@oMaL; zdEI%JXcZK=j$cQT{#oqcjxP$S8g2EH;^wb3H7R(L>a0ZVc1)c*vH^0rPz~jcfnoxu z6;d7Df@}l5=itmXODl6FpUC6vV`8riGDTKsOGXDE4_?J#!^o&zeF)kuBJ<<-TUFVu5C_esc%RpT)|YS<{9R^T&xjyV9TvsO zS}4_Elvxr4GER8uIJ_D`86==1iTunwK+sh1?}QU2jx|=av8?dPG^s3*Ne=vo`EAK= zGP=pc{{X}tg6~SQ-X_mQjE4s6jOE@T2!S@a#EsKuj^;f>csn9LCnAJhz_kXwB|7vd-v9|OlcYi1dj%ulF}Yc}~H zP3^hJyxY_OJ0R#TxtAnZUaL{Ba0KwvfD_l{NLYg{6wuwlcrH5_+hJyF64q#MODJiBhKw^QXo2j+ z6)I2$eE@2F6^R}phaHW*6m-)!E8}XkGck#S)^8>`V`zX#+>+s$hz^n<9-VY{c7=RS zTkD{P3fS`)olGu8iqfM2Hz!p<6ayJwTXv1X1FmH2``$+Df*shW<;*dLMC?MC=vvG!_tww)rkFq%uZ=7KV* zJ4+s;Sx5`z6pfsn@^)z0kg9iUJ(G;DjKFUtAq!+f-MRJ=e}o=2nrqoA#olFd zQ6!!)QWD@WL}AD>tMbw|j>*>cS)NzI)@&yG!l zCu0i^LOp32zv2Q0fDDX&!8C}@U#WrRQJ;4*|8;P zE6FP_n9VAdkjM*{^LVQRu%vc9EyF>Fr{byXVth$kC0%gw%>;C;#%ZlrBs)pXiD4q? zj}FfKiw&q@MKz~Pjkq%TncBWGSN7L6);ItJ{*}}i9NePdi}H?M zk?M@cTd`iuGs8N0bIvNyMmHz;m$u!I#)SwAF5Z;|{iPD@;@aORbT39PolC#dTxoAmA6<5Ur|} zt!5$3bh6FAwT7Gcuh#QXDUz$!X{0t<#F8|+9F}wnP$Fw=hC&o}(gEBy$d+!1a8cQ; zlwz92V-<5O638QGWl7OvG9Kg>`0fgCVal!^8z$CI3y%)w#kP*6nrU5Q62Rr^%*r(JLvYxikWp7^b1<#$GSr--4B;v;uO5i;{uBa(AEB&iQ!Yn_?q zb=2r>)z|NLIs)gt0xDAxI#OKQ>6U{_wm?mymOoLm1+$6Ocoogv@4DBBh?8YP1qhl1M8{ zRvCV#FR2p8XPivbca!`VEIIpxMf6r#;`cUfkZ$}M>UE5TG=0}S-@vJHFCwvv}Kyq@zlRl$zxyhG?3U;Va6HBL#_ppmOXI< zcBi?MWQ0N;DK6bn$Rj=5*nY0^vLJvDq z-a9a|Td`iPtJRvzG;x`DkIfe-iR0?y8wKp8hSZf|)^+5sPs9FJI*_-+CcjH2yw+mp zk$D*Rck?1JkR#Vb zoJ9iHc9h-SC-AgAs~#AO37?iZWfldDK0=hJU8JcGmtp|ny*zorrD5!{h8pADM}@Lk zoV3|xuU4h$v(h!7@~CDOU8R;ab{k|_8A?Wb29dzn-it59OFkg+1(D&}R%|R-*rCc~ zDiX-Tc@%()87s+>sFCCczZU>(@C7%AK9RH7$T8Wz9&GS`JW`M>kixpL9@P36@Q^1p z2z;$+8{My@=N1!T`A80UsM0hR^&gLQU4_Zv@%}Pw#X8GxC;V~>wSXxKZlzfh%Z@Oh z2WKD+_Rt(xvV$FtsWnP+RIf%#m8nHq-QT9KEW#qm;&tV~^6Q`yVIY8-o@X#W6+%CZYg!ZYTYs5s3KBWF&f^cjM9HHpZ_eQsvPL&NeJ z=a!~UTD17By-e*lmafw{W-f~5d3NG>h3U^=kk`7}{2wIhjLgu|b-lXWtvK2ldYP;y zE~81|n5f+Pg-G;St3=L{HHk`aAgGz8EwQu5h*8pe{{X{tc6>#e#0r&hb)XnZ%956b z8Scp_o6c5JK}xkI;!cN`Bp(hzu(zLY5z~^^LlKR`c#k}7MFYi;2hEBz3%beYAkksT zBX;D3##w;S)Yfq^_0t)Op0#wlC3U@Wc;XYwHOmUBs6ZURjbpUHfWvwq5!_=4qVh<* zXl<>8^fcnKJ2p5)gg`t`0DnM$zh1!woW<4oRB)8QAqiol5wQ z#p{yV#9E&nF)=nhRvR&_ks{af<5UhqhVvr?Fb7^rjUN3O;TOmzLHx>Qw25G@KkA}n zE|RAeKB2B(n2>&5zd~}TG%2>Q7N28iQoZ#U*1LiB(E6(jlV@v3=Z==BP)PVb* znL_t}5d0s2o?9_Tl!oniatn3LEOFKojXPFZ&t?H6Mq}TLR#G|sWVPtxJV9;<;K`}T z+j;U4)vpDIzXvHcqBuB&xgLg{VsSVFlxrbYjG$0`I>EH&FBj@ggH4v6&{c+LbiZ4` z>Q>JTTg~KrTZZt(d2Zx#wxp+8%0nYe6o=(zD93%O1e5eC4{|rQpZbo;@U@JMoP*=? zzARgI*Xpf2aM-T$6c9+~o7Gtv!>++bQiPbnR#B>WS2Kn2eoMq&d^p^ESX;7Nx`-KI zON#X*wHYH@Xl0#%V6r1JjsuuI4dIIx{9k(}*_u)KatxDwb-PfF#fVsxr_?Nu$o6#d zmOe#`v}vy6R(HcdB23J3(_KVge^pgiRG=Fm=8@(#pH6yS>f7n($6n3kvRIg*HLP8Z zD)xbiuT3;?$mHyo989c9O&-Y@<>q(iAI2o{Ms{2#U-*a6c~d(>U6xBz%&8 zyQu7{MXkVxFBO5LB_2OIPcv~y%tn)# zbn^@Ne}5A+S==sn89rtTGSji*8t*Wx6hJth{c==op0Y430U-I2s8{ZNMw1@FQKrD3 z6MO6B<*FBxh+qnwJY4_%Tw7BWrEyr+;82LAww z3{k9rEUgDxDSjZ`%1>W4;<~U{tyZgFNov<|4R)9mT5!aJ*Hh^ukxVK}Ady-#<}(Q< zyk+7HPJYC`7VzZK(ZFKy_G{9iJW+r*C=%S2LvCv_7~6bD2-QSr&&Am0obJsQV!frQ zj=`A`BjO{8<{XPmy7x$t0=9;flgE;g#Gg~Mc&5W<-{aOzhMZCjQr8M}jfd;}37x8W z+^%NlfoKZH4lGnyvJT*+v7*>>W+@l`9Bl-$uOYU7v2^U-jqC)uG|yMZtn^`}^zh?R zTXxJ-m|aOA#S{q`XMO3JxG!_vbyUlEQal{kyVcU2ighBbPQpPX5R-3D1W4c!-Z3mj zP^bamq^{eoxL*xbm%|Tp#5e_9OgT$<%Q$Ru@{=JvMzo(y27@3$9IZP)2^l0#05dL| zNrlc17i@g_@sC|$biSH<3`1VagYb18zPwY5H)!IqmONMDmX7_nmX)X$yd9-vvRCR6 zB>rC&XHtdFm$=_UfC^VzY^=T-dsHmXWtyB;Ey<9%C9kforZ$SZI|$M(Du-kn2g=ay z08*<gEUrK-xSFqAaau@OxG1&UGV&R{^ z2&9q9yr{s5qSA5zS+MV9ERcb&AeN0J?atcy{{XAf8~t=`{U$HYR;SEl zX2Ret#41Y!>6e@tsicBgzKBU1wcSyWUpp0nBh6IQ^c7HRV+mn zcuY0fgpMRh6?PeXxfu&8u-k$NJsAgt9N@e@nJTLsqTR`(mdY{+;&_KQh9)Y_AUxyV zxP+10)V=!4@HfNIVQ*t^_&4jXTaPVjTvwk;DB*_M)L3JTpplrWtgPXf1F@o@x~r8Q z(w%@5MjQA{rbbc#*1-W;!mYvMZY7L9QyGP`6Dg7XBkM%!*8L)7(f?WIQ$RV z@+(=Z?{1tcY&5qmPc?ah#L%aZ3`EGKih&dhC#*ODzaS*&Z?8A)$2_&{r_&hFMr4c2 zi(QljC)7yq>e}e`3VpX&#^m#O81a@MIRhp@R;WUjmdq^@F`7ZkiXrYv&|o*|0w(KX zQEYBnr0P)t{Zm7@wgxMhpU8W9mAkO*c4HV*bWDzf^L zJmC0QHlxi-pn>N*TcPrq*^edsSW}K!VR-FBJ$6E}Q+XslOD`4QnHW^hsqC#D!)PpFOAQUTmaJIvW7oLqnJkFKSI<(c@y%IQQCrT9 z7%O_#HX!Ov#rdN=6aX1X*GxW|wHt>c!rj%8+1VBrlj^yoh(eM*XIm;zvX@;BBhq20 z4KV75W6Mi&T-vN5k8vB@OU!;!J9JXLeC=6QNgqDXTaz(&l4#K*E`wu^jCI>&fQQUI zhR0nFOa(6w;5|`=?M@(XWJz29IWxM zmxvyF1QH){=-FL2r8Uez@WwV9qa?G#1$lCHNRP`|8qPu`iM+CR=DJdK;5L11-;`_> z;*8x>Za)d*r21?HTV`c@HyU3)MvyO=ERE^gEGVH=Dxf@c-Pq&e*m4V)%j4)+ri+TF zPMcVHH53B02pUFmHVQdm*cM}@P)Qw5>dj~^0%V5!U(z@wc8v{jj$U{CBJW?Rl%sDO zFBC`~3*2|cDC3wqFi8L`zKsoziswL`x*`_bg0^JplGrZT1>tDA`vkav0e~A)%CdQU9w#M8;-Q}8wq$~4{&Gd- zRzv0SjxgM(nRFP+%n=5PhHbx1^VVwkRw3cHu~PO$9$4?oN;POIkzf^qBg>VG1;(V8 zkyIqcfRvI$AC8Y9n8@YpUat|yV_A$v7zq|3pkr0nshhDKwXja~4wGK7Wnw9bF{chg zQ9NvG(zwLWwgapiR3!E zzZ@C7jhp@$DFx^PNi1{5IZYVlDo+tTh~mn!FaE&Yz->m!JTgcknVnY|R@FM6OY8b^ z5)s7!J;4p^2sPWC-hZy{N4W3MGuuUf1yq;d$QSt5lQB#dm5irkl`OdrsMQectu79qgl48Nbm+%&!&9ACG_uH>cgLSbwy<^M5%{JtP32Bgl3jrC~(0#_b z_W_vu_A`rcVPx1%_?`VUPo<1yDQ!V9K4kmYPX1{|Hxr4c;r!+vEB51qF-0Jp#Udw^ z{Gl6p zt7($jcz#-SxjV%J8g*g;OHPVObFC6d(d<4o?bRI^OT(9WszqKIY{3pKUci3S4>m+( zPjU=IDmc+DeYez2#y!A>9XgMB33y+u;;Fp$Z!v#^@8YCfCx&i_DcO3F!(J)o4LdMt z#36%$vY`3$fTK;Ve}r$Q)iSGk&94xO_H5j(Y)u+giA-rp{3f|Nm$3dAgAawo zhOv&ncXeX19Gv1hXK~Gj_DRqOYQ;$)Z=~5#D6Iw(3^kfi&{*qOv}^h;c`?71fIg!O z<~c3JyQwF+05yM#adPBt&xyjow&(sUKa(7Y8Kh{{q||HNu^va<5;lKMqOVC?!5Cad zc384A%a5r%H>Ab$WU*-?B4EHWtHklDCm(n6U6*?RW*xb<*TOus`Lv*`Mw=a!PEjIc zagH20{?C^>;n2^QVrM41TD04$17==*Tj$3x&BC7h(QP^`6baRyqk^Sy_PC6Y=dfaBzOfuxoq zx{-rzqi)+H`JGZyyB;6H(BLdxhb@=8B~z5ST6t=Da9HAFoUAJ&bAKu(WeUah3Y84R z4}S~_q3%1|f#C<{*3~Vv*;T}sTj#*d&y|yW?fUcQsowC^)~({IeLYM-nCaLdEg_aL z0TM`FSr~6&q#NzutvMl6C5A|`kiBYiSZ%W1(TsVKIhoHB+y)`IAPPMrL~=7eF*!Ux zjxbofN@XXihN?nF`3S*^#fNCYEBWj!3`W?r1P;Tev&}6`7^96>IeG24mRDh_QOrY> z0F8O1sP{SwKz%YBUD{p!wVnw;-4!Td4%~zM4y5R{BU?w9%36;2RE0ao3N?u9lxaOm3o>@&zeqj1_Z=XlaE9Vw4Yb>Co}^z$uGIg+8_>L6m`j`?cwO7i_gLRjTc zi5sJHa?U{X1^_7pfxe^^oa2>}mrOCj!0ML6;(Lvoh_X#6$}D>F$9Y{&AFLiX4!J@k6j6uuailTTM8XEF3Vql+aw zn9Ku`^GLk53{jbNrodw7-MBn_IOvrmgeOFSjjLXOBjq7g;AG9fdN7T-+<%a zL!ir$+(;t&eUIb8V@)?y8NVDl@gq>w{p%mU>Z z9u=IlO31UL1F|IHxd!*P1ytzkT@{h={{V_Ec%A2`J$NWki5@c~-b5+RFXlNr1DV*~ z!|y<7^9QV}nzP#GG(q(i{djXia-Zqtp+I7;S*?tt9)6f?t(cvrtqf~A(ypbQq$V@O zcVW#`A4+!bzLa=&)?+E+8#ZN`6f~1VH=4^SssaQK6l}QBPyrl$Bz8L;G@m7Qr_f)- zvdvyMiUDbBh~)B}-Mo|M5XiiX>5kjg?XHz)vNkdE$`XmmT7~0|TB_{mjM2D6Kn~>z z#o3s)iC{kCs&dj}2ywqHIbU-F%QLexo^i+6so<(d!jgM-JrOY?TtimN>#x3Zs^a$;+ z4{h~7g~_~C4VdCKq*DwCV`W7SPQ+n=I^+?Tm$!e=b!|$z;_$LbRv7JDnFRAgYA-Y_ zB=d+E+p!=CHczvTBmwVE9;HUkYab8utEN$XXHH`tJ$oh7{{Ro6h=)BMZyz*tCz3Vu z7D(l5PTDghEP>pwam8Ga!&}iTPyphQrt_yh1HDW;fDvJD!FU*obc5eNf z?$YfCx%F9W_Ej2xGAN*sC!ZXMG)j42l?t-B1$d1RPMB3#>r3e1y)HqLSw2qtr>RXuj}WUQH1c~#hAI4qkiI}JBeY}9yUDH zSp=NH8X)%dX#Mnhv#;U#7L>U=Rm$Y-@4w#TnZl|EhU7pRG zb#c{7W5wF@w9-6|(96stD*1`q{6u~Iwx`>nEKdyVa+AR$k09BaO?r^V&f)4NCk9^N-FP>mce3$)mZlcfI#`* zOlj9Ih-hqIononDY@CX!FEYoEVXo;PVc7TW(s-`Z#=w))jCr;T&Kb|86V#ox-;bq^ zkI4t7Pm+q&8EL$*!qYd@6;O7p#GXn4_ujkx&sE^Qu*@vAzu1G}d(HQ-2*dISo2b0T z11p-Z7?T$ScWhaPnHif4Q7@1TYcxe1Hv*Gn~y!_7k}V9RDWZDfp5 z1XQoZQX;6&C{T*8?yDI)CyDHO3lV~+bH;eE zjq8rax-06b+>0TVu9+C1S&fnP(Z}YqN$O@*i$DfPLQfD|OZ`)ct(F6u8g*~psV#32 zr}~AhfGc|aTGgyyxna3^SEeS@%PaZ9HjN1LTnRv8>@}_N2axKHgz)4qMM3jw%U>gq zg^cl(OGMNskdK&Hk)0V2AP$)ecGy+E8phYBkH4JB$5N}mm}}XuU5E0^Dx*otQas5+ zNUEv?gVqQKrdIq*3^L-XTbCz107W%Sl1jEM#-WH&C~dJ)RE3IC7=&XJ`bi@8=QFv! zokrL2(Kt~*{zKw=>t#g@s(e3k%2!xw$rypmAeC50HqdupT(X3P28iM$V&2_PV*Eo& z#1h-g_>$E#DP;2LRA-U_$STIHWQ-LIQGb>gd+oQrn&%;kl#SurHYwDlhpR4DD#4i} zo_;bjhGIhxDN;1n{{TB{TJ6%@9vdyNgV=KA>!|7y9@5tgs zk^s*lSaA!Osw7ep$i7%@yCZ0KO>bYCM(?59Pm6Q5;io1e!OCWe+>RspI5Le` z$`N-Mn~dZRGt4CXs1a0l9m=phx7=%Mo1=$oRx;TPQnHifjHW9b z?iU(~^go|vsw~~HN=j7TTGj-u9aSBd*3p$k*1cbu19JEGJ-cOzua|3*Sg%ISiqQW6 zxqTwLG@;OUMDA8D4Cij=k8(Xg>!EAYt>se5A)_8TKr6>3n?^|;(Y-vQATk0;1fsUj zr?G9Ru*GvZcJ$*lHH%7J;*NBZw30Z87j)u$k`QwWoZ0q(l{|aPPL@Fx44ztSrfYJ7 z+MIgTG4TXA%Cz9beQk=A4Wx=&m4jJ(FC^#x0H|Axn{MaSPRRjuqhaEpRPm!ron(bB z!xuU-NTx)r$nh#m!`KBbj`pZddt|6C#O^^7|cF8`{98 zAcaq3+B{?n3O5?oLeP&JEX$9!^{hq@HmrpZ?T51<^oCKb0zJa$>BVA#WZG-DD&!}} z)tpHLQlym?6PxpvQOO5zqtG8keZd+fTzqa$JGSGlzejDFX`x5+WtQ}d89^e23&;kL z^Y8KbM{bnVpu((GEDz!n$p_P{CMQ_Z2wk1fH*$rXA{Sp?m4N))>Uxb0s0h1{AIC0! zihPHDufZGQ7RBRn7jbwg z4i|veB@MIO^}EEYO7OE@cGCw zWYw(2RVuw`=eNZb%R~faVe;JQo?rz41H|%e@$~PqiBcG-zDldwe>S_#jz#i}lDLVD zaKsX*Sy*v9CHw4xdh&!#J!59%khR-0J*vtXAtpf-i0<5qd2S?-0JUS3ztwXUpR#OKTqR&=xJg z>*{8(h>0H}K;&O)pqU$rh>$k@TBfBP;VmfW+J-8z--dS3`jO;ay{IMBvD!071DKgrmPA z;1xmGa>24HvKV23%JV5FI8X#q<%shLW>rOvwpDlBFTv|pedtbdxy#$v_E^f=&CeZN-lk;P!-hB~rXoYk)thMsc(jTKnAbVn?tkfo0!-d;Za z4ME#?*k?gE+GgA@{-#xVBPezHvfVuz@#N`Aov$+EZ^dHD1+rl&8W%Fg;aiTmDk0XEQD)LOR*Q!^%9&t{&BnvU$cA-aHZ(rIPPzcLlYLKkYYNr8agVK;v~kdVTghQ=h7A}C{9E73TL(DtTv&n4#pT zujLS282~C7*;VcQwTd-y@xaYqY^+~fYR%};rZZ`1C>@A}gou(E6_06{9!d{Lv$TA1 zoX+H%g})F~p;i+L-@5R99hqlOHaQYWJpp4fODutO#v`U7Ix$bQ5Z1|$L+gKlu7=d7 zVI&WK^T}n3OHY`!k-J>SRVz%f#a1?)Ma@i%9D;pZc=mSN76*O$@*8z)SD7l-doV!@ zJP9;GN)~dgOj~9ij(Y4#1bmK{+N8;DPY&X%$8ln%b|`J6A_;9s(JWGfmK z1B`oONFX?i8Alh2rHGR8$=I4P6{J*XQ6v&b%P=a%fhD3+yV@vXsu6TzG@IH3HvN2Q z?788DjNs|f@eWpGXO0yXZ?jAL`;baxG%^#9N=z7em0|T&; zHx=v;N@>%`(@E|igX-a#Yt?WG6UxwWlE{sjnNd(o?b&pA22smI*av1yH;>Pc?OFp` z3=?oV1Jxbj%5}05UC3o}ktG%~M;*vkIO~Z62bp{J9NTIY0Xi}T8eN^1T=a2V%Rf#XNp3yMnAC00X4bzqEvC(RSlMv?1$M5n@%pkhSYFxZ6r% zL}=sUHTg#(7t$ZrnuQv)aXux>{X1W4ILm?>-l&*K3hks~Lh)(hZa@%GKlg8{n8v}U z$6a-ww%nx~SOXl^@4lMpZx71%z_)F|<9j|L$>jMoaW+M3G@FjTxuh!GhjejhA1~5* zCmm?_o6f#M&L*|*5W>*guN1YUF{5eZfB;~4_4$lX2_$zK<^B<)A_k3f3QHVP*UB2W z=**H$Ja1A5^NfL|ZO0t;A=>FQ&7uOP$}bJt%x5c0fq4-!gE0*7+4Cg0lG~mrqACy( z<|$zqaT($?03(nc7xxVl&&Jm4(#Ram9>RN z@_ac$xxE!(83omzsy`M|cnJ)V>=UhkM$XHqkoG)pBKPq;I3Z~d>Xeqa!(Zv2N)5?2dfh2Z}`}n)y*ADWwKPBUjh>hI>+uKw07>F$X6V_D^wb#H#XRTCifJc;DBisM z%QFKJm1IH3(EJH4FHW&j{{RcY<#L!7feOa8t4kG+sI^k6$r{HLgo4sMFEx~IVNT;# z1CF;#=`fq{_TxTqJOcH2E-h|@srz~<-xK6AxAMm^;i}nKW}TK>*~2t)(1s@s9Pmmc zRf(iKh`5pb#vE2P*I>TRHzQBRnR^Ee@k@rUke|=T#}v_4n8cB?#>t~1wA0MWCD|sq z(x5x(DVY;g(ry`1c0NhelmuPtDMayQRIgr^=wB^AIOHDL^8%4 zNX#6(afg2_5*pen@?zarVlLz*%Rx@G^(xk*O2f@jBoNKzs=w4SsRzvF@}Ooc-Nu5b zCzO&f{QqK^X0> zn0T+mabPgR#4ScoIT}jX!~`&c8qtOI5wXQ%O7Qt-)#h2U0US>2ya$7e7d&{6;m|`h z8do9>kAz1VveU+*$zJTBfo4UFlE4THeCwHbr%w747toQSjqxQK^|QH5F*W4n zdUc}Hwy9H@OdpjPU3lHF$d5nFtEoo=BJ#!sbE(B1de^DxfwQp5` zfDzmf4vO}9E;}RQFBbTB!dEbLF^3g}$M}xqS7TUYStJQO%_hKS(0E{N0wY+z33hCv^DSbgwR7Foz+Wn*BKG z#XYumor{K$2eB&cNeak-uO~nmN|0bJG2OIs)cGBG5UFhzFlBiJw?I8ljO*hHoa7_L z_(rAxiy5fAOJ6)cy@a;;_BrE ztFzL`A`z5Pv=Ya~Wjo!KjArSZER-$0F2eaMP9YgURSSH(K6l1 z&cBq3@H7y{82YR*A>R3kh%V^2^+kTXkMs+<}QqKs$I zd?J!<5;?b*y}}PxcnoD`@Wx-nI4lk>1AwhdBU-hKAk+G!fGU=fLR8dIByzeiEEyqA z;m-L7iStrWhaWCSEhc5GMD#5^iB?H+6T8N$XfacwNobu^ig|_eZNLfB zg!-;+Orjh~xYLAwQCS}g`6D5EcG16J0*X?5x!n5o=K_N%-LE@W^iY3r^g{cAIpM3Y)Zk0 zCuA;r@mEnAo*t3$v~te&>`s)V%NJP^8U0d1k;Yh%$fXp@{-eb>aT+X6cw`3NB4@^a z_wg4`!#V(O058LT_eha@B5}8H4P|78lnIrJb+vB1GfiH~+mZa)CXpY_MJ6H4r>&TR z6%N4zG?>vWTF7Ma)GSYDg4bQk8@-Q9nYJY4#vf8>2_xET@;WK04nkNb$``42{p$-t z5n?LvPRNZYu>_=gL$47SaLmk|fB30~dj-Gnt$FjBHqS{#tsgM;32@a~eVY8%O zCX!)ddyX=*SdI==fT*)60%wl1qcBMH&I&i79u=yWG+^V2OU}V zo?hgdx+S}+Li0Z{3Q5e8uJ6lTU)Q~WaBmCBi`MOiFKBrza)JibNP$g)WyvZoU2h(^HVpuMLa zT=;Q(EseAcHr{H_mT81Sy|slFyez4VEr7OwDXCIErcqav8B-L!q^t=IL1aq`Ev^#O=TsO~$}wai zbbpwnXei)2@eh#=Yb#MJRcleFm%>_yp*_m+I`T8*@sd=_WME-qCQp*q zYblOMSn*{%TSyQ1#?KZbw`J>m(L$J~}$}(Nu!1^(=hi%ul7n%=`;>JoI&; z&{D)WKOngry4g_2=W%AFefm5W4UkBvW3E_@Y)*y4%E3yhFUmOO=d$Fht9Rns(bx(B zHx1(GFcuPOMys?68WT}uYO8hju^vi;(uH0?&deF2RRDVh35m)N4CFF#j#V+0U15ir zM>nmi)`{JC@;YXA0liv?5HwDiVxh+5@wIW5HWNA>hU2YQn_i!5*nnq@jv!jvcW8YFhqWRG)FJ^VGiZZ|A)*C3A!ZV9Y#VIS5jK~^}9g2n{w z{G90U01d{+t(Ck%E-I{<3Xm$~vXD(-=atJWmTtNhE2GGWX_6ItFF8r>038o^ji|DJ z!)(`E)V#V`WlnY^n#;xJMp`B;vc}M)ff0D3GFUn++v^CV=HvqL=V7M}Nl2Z*d7Az2C&^fha>kO&&7{{LUm}~W`P^V8B{2uAW;~`6N*uP~aio^m*3qvBeDI#Z@NHwy% zMv$){2nZB1f6~aFG5Oz*kD{4}B<0wj^Y(U5=||#BwpzD`d_8(V%Un$>%N(E207v}E zpIZljEPIaScdvf5Wn!s`$<(Dr40W~`sB5H?a%r*A8zuQE*jVS8G~K;Oo9 zh>^8}B$DyhVBp2vjTyVPk9IncpfV9IAlz&@U!s`!r7ZxGLGm02GxDiz;HJ#Mj-Hf` z#qF(mEXQedRn|!yMvJZ`f}H?QvnXI&K+h7Z_%9GcX$4$8J9N*#W0cjX473 zdB^X4)V1stTQxJfHDI$^%GIg`;pEElkinh8ffS4&haMIw z8pg+%SeuLT1(k>Z9wE6mRvsz6Vul_lXhV+1X7iP*D#JS$2o&nX=v6^!=5ZiU5qOBC zW#Alj>U0|FVA|vHdGhxvHeui$|HY;iFlUVAUeRwncqiZ(KFl!_cu>*ym{60%7oQr3;f6R{BUAA2RD*A+gcVY;`6 z>(`BQIXq-A?Qu>clUDij%6gAWivpDrJy+IUl4oshG&Jcs9! zNsp(OlI5B>=~nbtOnzK~20vw6XEzyI{;@g;_u+xdVFI^*vg=0g_=Af8p~-6!wnDS4ESPMgun>NF=v#q<05S zM@_8eX+a#>_Pr(PFzp4ZGeZK&#>Db7LO_vwfZ|y@F1ZNE`>-;NFyKX_;a5)yPY-O; ze%*6H=CRnhv3F``B&%+jwrg1DS}QOk#2Ch`9CCD0^J`qWJS1;Q2|9ZMh~uBSxue!&a~)#^o9qREXtCTXqyTp_EA* zzc6>W*>%Q69d4funa5;DhI8IcyV#t?8?~c_!}-r0r)eXN&W{&OHkfHiI_^k1X%<0? za^oZ9d)YOCtbLnfZ&UY2n-bw>ziyPTEs1g$lBbbg86K>uWOA4(^u|$Lim!H3y7tlQ zMAx9pQ=7dkc^RaG6<9=)$q+mW zG=^5t1gUW+wz&d?F&@M9ES^TB*0Q;KOj8y`c^0>p%VH|$a*#)*go^3{@*IHl?1Q2; zTEms5&E+%1^|D){twP4>CVuCa4^7Zia=s;t6OD^MkU6ZbHlTtrW>{DFjzxy$+Yke) zLV3stL9Qw^WY81CvCzo)ntv2U7Xw)$wi!_?3s%)$IG~;Q>O<$eZd3p?>n6`0pHK7@ zrlpIHlDTC1$!WU%GgmX%4$wyNapf9vAQl9%LW~Cn06cXZEcjQ*_)<)TDdN7eI?Y`0 zXv-a^n-J~*<)G`ZM;=5ENM=2w#EBaDk4OPwWYlXxOdv{jG4yPuif@m_&vK26uWg%8 z8+p8Vk`$ljNyN8(s45R4MD9-wUo}poHat3Le-LV|YB*c{;?0$aq?0#o1^!y66TSs> zNZ1G5P207sv>4=uOk@_Nw3VT*3#`@a`IVHDu20C$^EngC)1aq~1~frF0_TZDZ5BpsYzqW3O0kG?4GTpyg^A*L z_x8|JWOnxHWCkfNT&HnWu2`upO15K7pef1XaC>?XlB3;EKESEpR5fvVOI{tbRw_k7 zsv^hTxhD}gHiPp4!BTwQL>&+dqOl~X2!l|Jqi(OVQ%G}Vt-rkWqUW9g!^>4hGFTt! zIkV&!iUSOrE0GKN-#flZ>fi?KLaB9daL zBbi!a(iS@m+;qS%eWz{pwC5{Hw_0%Irl(Fs4GsSQ3L@QmO;aUSSS{IDDn#+DT-sJq zwF=IAk8N{13#KQ(Q~n;8F_+Ck^W-YmuU6~H6fjDz6;pv|#}ZH!5=ZJRNhfXHm#bND zxrV7|SDZ~e^opd77NBtyLjk$7p@N}bslv2waaxWBCo^}8N z6sZ3I6l@PK%oG;a5PDlEv7^2up=;@~?QF-n>rYh5<{jn6wr@F*sa~!}8Feqm4QCB# ztjbLOfFnr=hE1-^2YGe9Yo@rr65?(6D;+&ybj4a#o=WmaEKL(M3W$sZU@%aCfIE7% z?Y&Dub1758Kf$b8k*iM`j~ywQ?JAyO<#^ltE5d6dW4TZUy?|v$50~T%0ju7 zwZqoKxF3^W0Bm+3bp=vrQiku>*(|&UO9ywYeHPr_R%;fI(bkOw>E@diA$Vg}PFe(= zv#0>DL)Zp70G_am7fSqflOc3OmSZbfrb28qj&$dVB^Pc{LEkb5x3^q`ejw#CnOjDT z?FD>g$rsJDU>M~K8;9DEpcXz~q;Fas{raWO_{P1t;Eoe59Bx+=4q9t4$L9=f)OVIx zoGW>ND?^}HZT=9feemHcmzPAFetya$R6Lgv_Ek792WG0?r%>f%Xy7eU^?og3u5yuB zL|wE;9c;1h><4gm+c)AmkYnsTD=hah5<^-$b>mcx&66Mi=$>2x_dSOv-^W|LEtZ-% zMq4dzovKnr5@YSwVJt==CnIJT9*!V0Ym61^Q>USkp8{QwxRmOR8&0=Q3F^vG9+(FW0Y@$J7({r-9irgEKM3&n+l5k5M{)m*8bxssV2K}lS)$~dYk z0oaZ64)%UtabpI?BS)0xD(WX|oJyNYzO(ft&-#xhlwNj$WINi@Ky*JO2I z0X={^-nGz@W?-{}yH*BBEn?+{Wmk)fpl1y3p!@efZ5CI$+X@hTniswU*U&`=MG*k;c#bIp?p=IAHU*1p{&rPZrj-_t)#Ly~pKkSgB@Y zW|ljVML1G>PXjp5!HnV>_=V| zM3%f&YC$$Y2T>=N#JEk4z?S2^dl1APrliusSpDtbv7ny>vnw7>)oAOR5Wtk-N{K9! zDkNRlv$yEHy}R$@rgZLoEk=^uk+&d_M`1zraRY3MN3m~x^oJfQUa>7}jS^OP^(w^d z48gWo51+QlJ@v1im|!xNZer%inBt0@P4wlyotd3E0m44#U#kJzW535mRlwB)PkS9Y z^GtI{cX-_VDbERGt!FRTOl~TzjHDFrM(*xQO7i34NTLO_=bN*z8w<)~*y(z$wMzCa zXyR;7{Q9-T6Dd$0qp?{XUO-9o0*S~SjvGH3dxWt=4?*)-sH3TvziDgC^`b6dkUV=m z_Dd*?oJq!~iHbs{e9te-NIcS8Z0Fyw+~1BZWirc;#m9CrGer6@gvB-TO%Jn}`V-FMe)70FkcFLamDzCB)<4vZh;MkMgWfZ|UXo*haH)NTN*z zC666)CvCN1Y0sCp;tVu@`j!bQ{Z&I1Tx=d)cxyuw$YMf3ca4gv8ZMv@w_!D4z%o3< zY1g{v;u;w(1Y3tvpJeT~g6de81(md!5|1Q~N#sjT8WK^7MPB7c5zmSeTex140DeG$ zznrZcj0xg7@h=rvo&>!uR3a&PIfMQlhM$~8`9?v|W8IGWf8y*^Pg)xB`EbrFthTM4 zt3+BS5Gs&J1s-N&r6lRS^hgyd!Al!M!zhqVju9M%Y-q2_+?LlP3~gX9{6|(uihyJ`bT2M%-dC1lV>2 zhvmrD_-`A3B`!g2K$L1lKhoNoRw^QqcESN5(PmKHgnQ(qGkc9P!+0&KWAZe-O42|x z6US|_k}l+l&YHqG;!C2=sx(*@3fzcr7*^56rcSK0#ahgfW8;>*3&hSI>9ZU?mxxHm zCjc+1Mt&u!wNIzgeQi}&tByWNV#Hr^y90KyF6*i<%wlRiIZc4 zGo(_Mbmha4E>sSQ!_O@EEWd|wx1h~U=R*@ixStf0uSLA`vYe^{@R8i9w$&7X;##DcUhjez_=b_?@TuDSUs zBSRS=v3Lcy*P-$cDf46Xu38jC3zp$I_M*9Vm&^d;U@f9qf+Ru4QtRs{JIX9jeD0EZoC9 zwgP#j8-r*LRv0(x+CP+Zb=lfBU9lc#qbq9MfIOC)_f9K3(%>r2mFU()6{U4Yw^2Ts zl>-m>ambPaPxzyEI^uQF)*zj2JQh5;7aI~hvjT;sjjS&=0e8153}b-+8Av3qy3^u$ zE86iz%IjEIimkVf1&pkF@}r{^RgU{JNhA!a%s~9UMYE&QX*?@`!no+_nt3GJcxzNH z=5W!^^*I{QNiFH+dg3UjFznal8b)vUvLVp{waDvfwxbVGuYOY`$oAKO@$gQj%v}%t5H~=AZ6% z+Vk*$w3@B}+1A&2)hNk9N-7rfs2L?l=Lo_fC7HR+wtjBj%h-OcqubPaSCEGlkxH{P zNlL_*p|?!5b2M;F%)C$(98$soxc08!nh0U14@P@je^1TGSQI$T9T= zxj;T!^Kz}ok;p$80y+Du=nmR9_~dvMrI5#)ci^QczdPK&ID#Nc0NfK%ZJt|2PX{iq z8y?TTokz26X>$kMRA#$dnLh9#QBCe z{IxBi8+&hLoc1^_A2W8OLfcvqP9dxnIL#qL$g$*pnAJdC zKLaBvT`K!~j<%u8Qf5c%io;pWsN(T69|!FSZ^W5#6L^X57Ui?P;=%t}oBU>*l7}`Z4YIzC7CLgDIj_{u^k`)|T9#q&u0V8_sKzWgl zt~WPt6Fx#|t5GE*V<6)GsLMKlO9m0jSA%czD}D5Qc$&0xP-L<>+L1nA8y#U?hUVt( zWM<~8+>)v|vyuyT(AeuJ)ior$P?=1;yR{$KY53Q{0pT*HzlWCGuJb`9Teo16Fukf) zF+>@{@@Hj!LA}42y7mB0c!tEk`iC2WzlO0aHfaWu%1tauBvYySr|*vzentGy`f6C}W-Qz2;Cc`udb6f6M+S%E-z)ioavOJ=rjJN!xvMQ9yS zTD^+#Lm^ZRDOcLYd5NWEZD_gm0w1_D#INE!&x$g62wq3EYb8>!g=APxhnMCaNLLSojB2Dv#for4#y&(wN{-|x8Yg`wi-Zv4Hq)*j zl%Y+sbKijYQhp$lJ7%@&m}r)^FV!3qMBaDNszV&HPdlGX&E`S}m^4Eo74(;0u*zj| z%UIOWF2gRyRFR-o)W*Pb!9X93;TwAKj#vXAAcJ6@0REWRDh#~wEQ>y+^MT3cvR zIEzPBYO6Gx=lRU1%qZ=_qlf@bz(2E8Y+_p$xyDLUz>-OlhPVY|w{9?!=mlfI3sLqQ zyF$Ig9>!{uNRn-F_xA(u3TKx**w}gh0G9Y#JSQlzjj1WiPY>}XXSHh0WJVB?5A$T@ zry&=U`5=OM$aG49)>PTK~77S2%YE<)OE7+*9)k|1)_5y4t%ICw9Yv~t}Nt%J_{@dkESGAC- zm5RDkDLl5}dy5d|6fz`m$K)X-6+-%xu>Sz*5w7eyM}P3@aS6kTs$?aTxuy#*utxN_LB31N zzhlw}ZDn4V?GHSVG^%(@*G9-tMUxd8q> zAfFgPKsPxK7r#DVe`OQl46V$wSbHQ(W;lwd;AfrGiHH*-%t#)#aEs|38&Z#iCW_bb zTz*0q$X36U&czjO-fBe0g9Q$nndAfoZAiqZU^KwHc5B|59_}g|=>&EmhP8>MG}5tH zlY_>(=b+LKN4WI-b$P^ObWOD$0DAOSN|~oZS*!&k;oDV{#q}_EXG<3nqTNrXk>0&n zWkVcwkg#2(X~kn92EdWM&wbyAI2#@;@fKWJrt46x+HTdWub(A(*%m03m(*RQG4T#L zsa{=x-&>~e%C*`)F2yaN#hQn$Swz7V(x9QfGBvg=&)UMGh*@Btb|!Slg%DV+%JH~tW{$US;-b=MkIM<14$T=HxTl) zoLHhLwS?6G#Jb@v?Yh#YYo_@Rm0>hQVUyid2fTB>B+HV6s)6 zxHV~usWLYZZp8ssO6dj0V3ZU^`t8vm)@nsYE zPo1$%#91RID)u^}sJAGL#QsV;I{_eGNK^yMiBdW77{}&pli|Q}@br&_yf`<#ZWiO$ zFH{a7&x6RpC2V|iA|utvr5Pk?JoiOOP^^bz(GW4Q*yRxfD=wPX!d?}r;fj^h#dSQ> z9$^h-f>$!V8cHLs$lBRiB@##h9)Ub(vpRJ%jjKU3klcbiqyF5%4 zS8U1NW>woN0yeBiRFin2?%}Uu(qAr6ap{ez#7SxxTeM*jkULJnO2|3#W>ipna_WA} z(;$=2)AD{fpd%|yL7_jR*Chv@j|CToXR(s8c-ym?y;~`pts>1OSykbZSY(xb>z9;y zn)fmYz~bgYyAw7?8GjD2T8rwhOK%x0@IlHs?Nx!LuK{ND37#J%PcT6-u?2xf>!3+W zr-#1|Y2+6d4Z4&6029qmEbtQW!8D5yjiKarr6FY`h6Ez^+MTXPe-C(xXuK_AXld7$ zD;L&jk=dlxKjw4Mk|`NNmgMOgI+CgYB4Sj#PENyL&)GznoLNf0PKnL-jd@2u-t!^Kq9sa+nNb*8h7F7momUJCL{ zDz>6cb5G^KjgZA&G5sq)i|&oZg=hMSx)29$YGfA3}|a(Y)w zmb*>7+f*gV&5fxoM-ekSydFd#tG5<0#iR2mNi^&x_l+LE=Kw%7hLBS$onw!;Zs))LY8a1&bWy<>vJ)`u+?4XMpl z@ztro_aiAR_HIEIgFt|>RxG@Vjh&ErXaH6l`A6{|iYUt@_C`k9%Ls}n*;OXtV2EQy z9Nc=r1G~tqt)Zq5+b4wZh5Nalaom=voJ7({$Q`19b5AT5(`d&VHqU=CK^%ECRPsMB z{{TU(@YBg%hYYMIZhmN7W*+ryhw%!Y9*ZrPyCr;8OifmAGQ3s`M$pc~V6XDrv$tk< z18M>Z3BPjgFFX7xauv?JrsS(>MZ>H{4&Y54RU>6g%GgW}5*iDYTJc9j(Xg>fqnfRjdphh`11zen zcfUaY0FZ*e!?L)ck-w9@U+Cw#C8;B`7>=VNNYO|ha68D%6@+QC=p^@I8gDC|$ymtQ zzk0`*VyiYHt(hS5;tnU4S3X%JDyw$m+yG2dvv55LEpsy`X! z?`0yZkF^d$wP;M#mRO@#c^cHw%gCNU5X2YOwmhaik7JfHX(Gl;LRjlJ@hpoltj!#T zR+Wq*PAtdFi4;QM1!J)ZY$~8 z=(yVTd$Cie$|I3B!dNW(glBBai|%&N5#M z_;13VA2^#?vl*_rsV0;bvtG4^nPcRAOp{ufmtYd_DfS9Fed4T*)$r|{l`A#j%EuH{ z=mxa{$u-X{_OY`9p^RoHJb_S8*^gd$do70X9#0?Q8*OJ*uEQl*lC+i~ryL->qin2( zNgw|Jvh}V}ZK64^+5}300XKAFG<4Ce{C_{p*H@*dmf}>m%_8n5iRt%e%&z)Ibm5AkY z8E-kHi`8N|2-6jia&<0ASyMyQ5ILdlmt`5a80Zi-nFCF`SZm(Z*$RD%OvIM~I@YHY`aYoTyS)(iD-~xNI_!tqeT& zE=yt7cUkO;>XO-sV{o9nQGyg5f>+=jd}#WUA4r#*jn6#>nm|QI{;u@M>+Gj;)grLg zZb4*8>U5s5tdItg*^i&)l@cjuGpjQa51KRpJcN05rBf3IN*taqoo+F+MM!y@ z^NQ#dP|p$X*g7!p+jQ9J{6BZ4IkDFuw_yDP8GN~U29Z@b?01jM3$5%C@zTtc z663R#W{AAAR>Hk{%E%bYFrGDufzUimlSmkY-+sg5H`I4*>W(%ePYZZ(NbPghnuq%f z`FlYSx8b}qHcE}^7^+lOgwxi7q&MV|)sOs!Lc2D4eImVi~%LWGmJNtQDyj7aWA zz$#oRC3v1k*+J=7hj7qXrDGqA$IW6}Q#6#8Xx>Fd4E}2%8V|-YTaR!<5(wkIwbd$? zw;+>#H#&pouvS-l;A9%jysUgKJr+J2#nZ6yMzFLET5spg!+dJ-W2Vyg(`bSSZ5AKm z-&-Owc%Jo~^?HYu1VBV;$G?`782u8iM-1g`Pfjd3|-?glvc3d z1G0r;8UY4|%IOxyLd1|qRcV6%08KTi6_jJ^rrK$o4m_W0=}1zYnHn%ydf2E-%A#!8BO9!suqP zTZ^rKF|HD^^_B^6R%N zNb^SEg^^5r2H*TGvZzBufaq_h)^=%d)(^jr_^b{UYC5}gg9-LC8wKA6Yb|iOCyCBJ!rpUB5J7d5jr^0b})%O7$Y(e!5QS0*RfSX%^+rA z7!u1w#>P(rilr=9X0>%PoZv|%ifuHUgGA^K&lFS)00CHQY)3~=p0#(wIa$*mBv$Kr zsJ-UU(;#Sb&NoER8CGBclan+A{@ZP|dw-t==brD_Wl)JTE=0 zrPrC*SCZG$`EI(?#>E0kBSGmTDP5={M(T#scVnL=$=-JKgx1YAX9J<>sne1#!_i^u zSC*ZdhCGFc8zAC-K1)r?Hb+GZuT*7YLd?Xn`zxv2_r+tHT3C1s8qwRDq+dgPf4wx( z`F2kih$}2Ri5f#7*i|5w97q^i*$X&UTa($mam_nKr$S1^p>AcgWeF0nk&vk+o&q?q zZlkP^wA#jxH<*e93wP&6tQOJTc z(IX*#ep_qUtVR%qaRF@}vIL{Py%B{U44<7D#$^#1@c_-&W)OqiLkej<1V%Tk&q z{uNs;wymv@0`Q6^ji0p2%-VEbCPCQfbv0KB;-<@eW?K!Fz{N`)c1MKF&&4`#Vb&dT`7%8E)b3nbV*Io+@ZMLlrJL6p$f^*6S6p|p4#KxnKl^K-^io^`f3K89@4DMPbDPfNsmFSHkidwN8f9W* zD~*^y(N$Lw@nSo!$m~cUd9rpN&cov8g1m7&8K_@mF9nUus<(CrkCq4?#2ufIRyQ#g z8O3X{Br|;%*V&C>;1X#Y`C&qhs8Cb6<*kxC6VgS@vjVc%=f;_a7 z@t4k;IaTYd%!JBIE~P-rDxXks#4`^2AeUnmO4`8qMz7^OP_?Kcy#vSfDiLONjIy~U zl}{iKW3PRVy@?KUEM$q(#&+<(x~1)NaQ)kp!u&e(Tvx?uLIrOG_->GFg4PX^B@rvr z7ia!tiNSdp7|N0Dj=dd}pI1~Z;mqbng^8ob`Btf8d&Ln}nIz%K;*n07k)5L>UmFB> z?OS8RxQ`9ZeANrJ8qr)vhvpfedme%C|^K^$=7uy;8-qrQyEhYWu%6`@fluE zBqMj;kcJxK!B9vxJAze#1ocf)!&dt1u@{|ZR(>(S&m?LA4$uVk1tbzzxE=O2-=)`a z*$J~&p~PcaS!_gWEs*|L979GJGL4=X(64L2px9mwguY^*7e-He3c?^Ak^SFpswj-VLy$Fl9x zH35Gwzix_FIUFP_oOZ`Wnc%c0N^-3BOU6sERX>qSCfMs-^iH;VCN`~lfYJ~(Ct)0Z ze<`%4p#XK`*Sb>v8Tz?Ai+H;4h_eZ=caBvf97JxqDs&2W;(PB}9|NU0xGB>pKDJAj zYqYROen(bziTJm6LRLUX{NJMcdv#Zn$M|_J3_{60QBp{j2qufpfxzD+mR2g;_EE5R z$Q}LH+{V+wXQlFEvbHx`^(yfazCxD=iY~kD6Z}VA{&&%2FQP+?1E)XqcrQJ@&`SMh z83iaFHn5zoKI4r><9OK=hD{F2SHFLq^j0pd?A;4#kiGPn=w@l{*q3ePMxccE_t`y% zw)_52diKK~66H8h!num;$cqzNQRT?TP8BpluVO&h17vpV%&vDETIcX;xXew_88f+< zSY=5FDrG7iSBq$cQay=1f!9L78&c-O?V=FiE>M!PWHQeZ!!3GLp%94q1`tA{(m4^b z%nv4SlEnOgJXRS6f*DdcG3jtutrR;86iAFoZr!#td;9Em>`z6<;s_$IV!C)^3oL0E z^U)%e7(~t#A>$}xu0Vm`L+l4zueKbZH7ZL^X@VL4n;>R|(~07$BvT@|Qo20I_Zr^1 zeL5^_`*P{)ud=}M_huznq*&ZUHp<&A3g*%%Wv+OeWdo~_+64(`Dj0*X+ap7^7N~em z8u+G)D-g`q$}9zyMYczf#5n~n5ImB|fM^0nxpp81&D^U-zCrQ!D^Q^=zvTI0-bPi0 zRK}r5ZfsbN+w|{`VXUz^oTR=rsdG66VJ$2A6 zA*5=qYpH^5#Ht$@%!XO%_;=Cj7NM#+55IWZp zR;yS>m+R4bkriIreC18}FAY;2T+ec>b)t^SvWV(LsV4~p_Far%^Nk$Brb0tImK=ye zdOOK`ej3VU%Pf;DA4_FzhMHHZ@&fIy++_2XDMrvUd*PqW#aOHA1{5^anCyFSR z87qh-72y(r9GlsB2=W!Qdo-HaP$lTTf-G9cy@|eZz|O5V)0f~lTtwx{QD$(S3ZyrWh0RTeKCBM`d8=h^2ZEvdF?{2 z*H%(sbnZZJbmJ>`*K&he=;<(c47%9GSgBs}03jpN`v*}p!#Sz!T#gD-^oogWCph+0*tuP8D z4867Gs355X4QQTv=sEtrliag^F_wIc;T9NcM;Mw2*&|1mV5F}UW-JF1KnK!$Y~0|^ z>qO4HJ$UY_E^RVJ*O6qb(8HL#Lobl=tIv*%hDWPxF)<${S6)$NR&_i{ZZaqigO=Y7 ze-*1jYqn_=-KZ$klG}Yc&jGOW+IR-GWB{gt9s2-D1a&KJTxLGydNJ9P8+KyN4R?uc z+n!b+cH+dy#Y-{K*Qztz?^QUfBzRVyDQ!Kg4Qfje*%hW`t7d))SQLb9V?~Te%%H|R z;@!aCTR=Lg%<-VMm)aMo)OAM!?M9sc0M0_twD^O@nB>f^4wiQx97#^hGD94OCWYcu z;&|~`LIDytAlfR_k-QG%`N)^c>LBE!KUQ6v@O53;gLVrcj|Vu4GM zu^R%Cr-z>dn6r#H+7Zc-Rl7)q$IWFzqv$YQRB zIBS}Wv7$#G4sih5C5h$9Pvy}24w7Nj(bBUs_1T2PB)JP2TanyZv4afn6m@1Y zOL9d~BA{mMfGj=OkU>&4-+f^cT=3R5&Qi?n5NX9I-dBxb5Ui5eAf6#)@8!lfvBVww zkW%L_C@V$oLQ1}o=~KESQvQ@ z;P%s@ z5Z4fJmPn)ER4i*F0L-D7=>GtSKDIQaoYckp7bS}>BZNjHg2WN4ztb=&yduJmQ+qeBrCRcDnz62%-dd2y_ zAc(}INXxd^(QSn)ei2Ul^rmHVvJJ%kXPO)thZZ|cz7MjT#x=V*XqNUaC``sko+obR zGdMApZIE@!Ql6rqc0W6zGC^NGR@`m5r?*+FyT`_5=f(>Rq>x6!L8bOQq&>H=s{@?1 zmdshHlZvEJ%sl*krHlUn$iv4Ps!BlMVrJ|^9j1`?;z3iHr(%W@DYEt2KcmT{p=J(d ziDcKFoD;jbgvEj;PL)agS|INyjA|~Ljz%@$vn0m9ZLj$XPWHk8buOu(NsqYi>@V@ zgXvz)XjZl23)17#S!%_XLp)1dhj_~<+Jc%4OQ=GsKr1T^w=R*}vtPu%DX)l2tx7r< zBdU|hKdq}*j#WeifK}R7khZp5$TSBK3jtvu36lagBx&j)S5m89(C2Z1&Ocu(BQ*I; z-|+iN;-kxM+s7MnMRa*fc|d`*G8SM;g(ZRFc6Yk{2xDtF(Ox>feejS%oSG?-6stHS zX}t=|%(rbT{{YHBo*kG$;)o`lD?=nOty3K4IEu89m{|(}r{ZOSx4n{sTkd-F<8n16 zmehG?TTKC!)L1x2S|ovW11y7YV261bg8~%*J1%s%6&fl!+n-WD5QXsbob|<2;WBdP zFu4pa$H7)%any4jCZ}rU`qC$rW}q~oEJ?3QMDwLq14PQ}8448g*&yhGyTcgVO6DWS zWoN8po~enW*?RRJSc=z;*zqiY7zbh$=zoQ^*6apGw0TNU!D<@PM$_7|u|&n8RitLz zx*#b52AAZ99|Vno+!w%`v4+0~1I{^3L6e3|EO`!(@$t?mCsV zBwAK$$5tD2U$Y=c(l(Y^k(h*sjDbkv%f4Kc1v?w= zy)m^7Obtr7%CxG>ii;BRtlWylVbV?z!^|-;s0zRj`K8$%_tMHYY6iPn?heHn4mx!a zA40)JL~w9B*o~wFpc?-Ghs&~;-&!HDQjG+T-1g&-9{}?%$F)gxTmaqs`bPoC0k`L~ z=^c5Gsv1iG96Y>c0uVzOT+L6gv=22qIIo-WWSBW1sl3ik38$`zf>f_j-R4l^^G4iO zA^{-s0O+o|cjGIUJVR$I70iT|FHF;UEhd?SvR87eD>x^KB{s@-s)JqnLEw0-SMem~ z>?kLtcHL!}=3Xf6=DeX(zL$uBe)}%nxgMRtdcI1&R~*d*QZ=}5$Rp$$c%zm58sE7f z75V;HBcdP^SO!tWH_^Ts*Yhp3etr~_>x1e$38;&=e3K$A=6V_>C2D)%C6910}H%W5*JWI@H} zhA!`7K;;>v8Usp-Kpopn^#O-BOZgu?0NkUb>V?^x8VQ?i@D|MUE0;qhIN`|1WEEwW zymBlNI!KX-Yr`SbWl}(>3E!`5K|1MN)Uh+*9;Bisr6kc@jwF!CD{zn?n1V`z7Dpbf zl4R|zb(SwBZ!0uykDhRUiUzSORtp?W8^2FdVuW(EPa3Ppk#R`khDA_E7erR16$-yIV*MNfpRg;UOP7?nlx6GG-8KNs;cAyp~>W1U66z9dLsTh{k(M( z!lgMcBKb1PjyZrvpFjqaTI}n-UD=zrRkO5tEpWIR`1PW72R-!*st6Ect{f3qtW+47 zWAP4EgslooEEOR1#17q8m>`xqXFrq>%3HWj{zp>QMoTc`>)5kzCw@sCV2$N@VXG1% zaZUvP04_)1Phd8_$Ck5O^R`v$HDQ^viG}1avOG)3DUd260N!ShfO%{4fVyGU&lcux zVg8E;^;}@91Tff!2;orHN;J~~;dy#ylmZC=gd~IAx>ZodwC1x%4P@JlYs9)4 zxxKtU2tN>FZ`Q=UJ2iznD9RkpoK`6Kdb=NS7WR903%{}HCVvF^n%KV<*uu>-;iHG? z7S!Spjt>!Gxu>kYm6=u8mW=PS5z~v9dFf#>i7PYN$axVTEb;+lm7K1(=vE{zr|w3M z$?0>N?M%%q*Lc*ZMcKUZBAHOH7>GOV8y|8Q7HKux-IW2N3=IT$7#8;{-_(`^;Dg8{ zwQ}_!w+%eBwI{9?Q6Cmb5g>6YOuA_u1i?|8e&K@xNa$Qgg|RnqhTJi=YIbhP1~%3) zew*reKn$#9fuEePfQnX7g=7q%4F)Z4)_W4wSV>tbtnXyiu7x5XN)|RN#ZeSjM=DQ# z3ZxU;qi}h=WGQ-EULBsKULJ+x9$o1s`gz@v;DPpw4D*=TrI?f|oK*2=U`;gfZfl(9 zaJ2ZDC*tY<0LhYE=zuJp*2UAyzHRMd@zkzn)+2g^CFc{ zMb{ ztwO#D7#58smMSs@8$@;p(@e_1@gL4^Oh5yvJb0VKm1xIn31Fw2R09NsqNPDuYr$xY&S#M`GnP)`&^m^wQ>nR}L$kf8`>JY(*M zN5019u{eBYJ)_N7rApgXxFWXoE#7sO(EKhg9XHi6djaN<%}94|b$b zHCbS2C_d!#mRpMpOK6}YLy2`xOjSRXAw0IA8{nD~*U7t}tt8YWiiBZdc1h}&xh;t$ zleYzKQLk{Ilm}ZN1o@0!M!$;`WNpf8{S)4^{s2STnkGeJLXtr$n;p!be|!~eSQ-dA zdVij2l*n@n2AcUq{u56SH9_Iu4A!}IDP?ZKHT#J+r}E;mQfej0Zu2!@4;9>u(=NM* zQRX(~%zSk`IULpFvs#+Tkcq@Hv6Mo_R*}eH2x4Ok5!vOr4x9i3WwN+Crj_fj1ePca zN_j}G6Y0m(!d-^R5cZ8&KXx3OkOlhobeas5{vO2RYGrbiYRKxfH}%s;2rC3E+{9vI zBC`ey*+A|>qV&p#wX7U1!@}G9o;;AdJ|d(x$8Dp}h0wWKvad%WoSjnJrb-I2V=S`Q zh6t+6)-7=-hZva>Gr>SD=ArMch1D??aoEf}wG5MAS9J4+q;n(6bX6FTeMO3f3$mr# zVRRpO((OAFCRX-tFQ>3zNIdi&T(U#vsaWGLCabFrmU2lv*^b+Zp1?_rzgshsiWuj7 zb$2!6nxmNF)|4Pn!DML}!Qg$0uXD$tV_Qx@xCfxCp-Zz4GdK6xedF(?EH(0}#+brb zOM*ojmNH^xO`!6LP|W126DGKlNze!nMAD%!&ZWt2OkD`DN=)^lv*nT`B~FWle|97n zBbJnL-Z}yS zK5v!vGKSX3atvsI`VRY^M2=h5RRbfok*FiXbrnjZD~ym2UjG1YX_abp1v&5$Jnokn z440-t7^jmb)Je<$T!>GT$oE+thhf%?t?R{a*sUb+MN%lBsQdn0R$exe0PnrVoN6&T z1bRqf0UIJbns|f6a@U^u`ec!(o<$)dNWB2;sMiw66l{CDG2Z$e-z{UqbP{|dKmI(w zEVU4geNCFNg^-|P8)NFF3P2s5yOMiPKsLF~Ty0@!lVLiZT?Gvsl^=3pv+KuQte$4^ zn9I50mTbFPwSOelYu3JnVls-SW@te5@L?7$c!~S4&{$g=2G_kbPwK|eWEJXJan#}E@6d)SppcQ77l~aC@!}1EA*29f3 z3%$60ZhiGsVi+9W;Qj6P;|bxvI52TwOx_l zT}I@xli9g7TiP0YCsp~b=S{&z09j~`NyaYm%6iqN2HwQ@kral_+HYIUVP_J^PU12t3wPFJ7 z%>=0b02317MBnpw-oYc)()p}c&3KQ)^VB^yHiPnQzF=*!vMtgFL)xgwG5 zNe>T?@eTXf?-I{%8+sgs)^V3?K}{oWF+y4;ZW>UcKEt>8n_A=m4rUe_Ig+;Ho_iR` zEXf-G04X7XV}pSAMOSuxf!rWaJ+~9;IM>Jy)1(N48}Wox*-)^Pe7#4msqf+)$DW<( zF2gm6hk6Qu(~S|g)s;A^g7EJaq$#UfQrzLi!oQfNWtv(7TnHu$JVU4JgLCJ~WS-Tq_LB*xu!F7el4R(|E_N}dZT58M^ zAY4Y5K0FXOh_RV`ytM{CD_Iw(CBZz!1TxOB(`GnZ0T5o8ZVg~ zN{7;Tf;fV~hWUc_F?hn1zlY4#Db*olsWK&2US5UUsq-S_!)KIach_=t$#xjihd6op za-S<3U2>mH(xf{`*I%~lWOl6E$7G<%cxHC1EKO>+lSj)H=kpAu*hl{W({~@uXD&j= zt&Ma=Zwg-iC0E4O=`EDTLWwFxwVo7hiPCio$t3~VO0M)&ax2)6d_Re=n7X(a9ps?| zGUP2rE4@i&L_%bd*L-DC#YZW232?*m=qmpJ)t)r7<69JRb?hWJA+aTT-b8;!7!+{w zJFh2~Vpbrr9l+Td)zq5Gl-lPRJY(C-RYO=EKscG3d!2kWS&S9W58~c)JuvWoDYE`4mV~Bj3vVHgr!q8z`B6(|G#o);=LO@- zkQ_h_*|*d#L-7;;09D(C?O%Hp;Ikx&92VwOXrIei+@LCsn|&5l4aAP*0y<^jjKMxA zw{G?~H%ZvrwJW>@wZ`-YC?H&tEfN$299M_vsrnaQ)nE1Oc zk4WR~+Inx2$+ZgRk|(0Mjk!t7dSK-x3_(M#04Y9&ec=1~d%29g>G9E6rTjL>lUh3E zqK11`A%w>2fK0nc-P|x;d1~ICAoVGX@ZL&B$5*{x8_>g68RM7`L{mdvNW|r&1A%UE zaz|c6nFSb?>YD@R;c=cPf?T>wZ|fti71$Jt>|&NgMA&BM9+F;Pn{A-qlpcYsaJ^9k z13YI#%@tF(m&n93(-Uu}su zr98InP+7)dUm=o_R!b6EM_U}&qf}!geb|WE78mZrj+kk7Na+Mma)cADNnYfU`>hd_A03w1~E`n;B*W?5j6*M0Lv0G4k{ z4Y3TPZb1?bpF}m2P+LoB;(Vic@P*LAy{vq*y92L}bv>8y7HcOgm8qqPAn`(3%FSgG zcqE=%63R?-P8CF)SVg7c5E+w}_Mke#ON@M`4qJ0Pve3N>3bz#fMx&lKmSFEg?o)P!<08zM8Y@V^%5qq&I>mbEh;f_+(;#egSF|nFkokzy4dn- z!wP^^LP_U&v6PH&^7&}YTYZd$cdk7x^?udN-{z}!0$WZV- z{Jo4#isvAG=mg4L$E4WTD7lKDMht9QEd>lwle} zqvmi#1ISpw4$|p~K@cNkgVsNaEn+JaF>o$Yv*qTz>{6hTC9u*JFiOOEir^13VZB0S zJT$xNg>$t(534_h>9fHyS6a4g7<}TBHzo+6wzOU|EX8(YMBtkn-qdgo?1+Z-r@WO# zH-a*e4HqYJd3=rAk$crY`r%@ZZw1^UM%`^njhv6ISxE+*a{2@P%;1??A?<*X% z(OsdzMcLXmc`TTSI(OwHD6w!;%0M*g4^zG*zg%7~PX{HZ!rWM1C@aG(NRT)D21x)z zUN%KXV!$362e~KBsZhgCj@msx&K6PEt5MF+8rVnA>!W233YKnql3+YlUI$h4@#$_c zLfmq+Z0#FK7R0czk>8YTS~DY%8Fr*Oro9c!mLDmKvr?rTY<1%-0?iRkGoLw?wk3Zu zQO9e(W#vA`y=^&^9BFz7?KDONWCX-j|f!EJQL#&aYv7i(nZ`m<`~ z8&R#08J;maCz-&4fb;VYEe6u4_$R4V#?`+PW8jX}h_aE&xcs^VnytBIk|l~3S0F)U zLmog5JeC;)YJNmt5%|JPb)4;H%2hb()~jRVig_ZejWiK+i@R-jj^{w~A&+scnO6;2 zq%ar+ThMRmAGMW*R)GfGw9#4Mvz6~HyWz4tWf9~k$6kIBJ0W=H01`Ouok0p$LCNTQ z*{r|tr^#e3!IY92EUlKvM22#wY>}@%;Fm)mn0KTjxFirRK`2k-eD{a^KOR0Pjk{A) ztwk0%_YIawcrf&mLMv%N2ax{&lx!-{3OH-o9HqJId1(dLND8CJ6gCzait_@GdLM7X(TU2zLHbPQ-+}S!3Mx+($$p^6U5+=Ez`7dT2^$hmyP&Z>$)Od`G!(23x z%C>o|WnCQlKjB8KIQmZZHP{BR=C59l8YtuCu}>#Gc%(q!v~^^%6{JJkY>6_SL)19_ z{diC^8KJP-Q+~K1s9FdB%y0hiT?4~cG5FsSE*gAWN^Vw?w32@ic4F2eR9(i($C)yq zCzknakly5K+3analxO-|a#!-2StB>1GPJD7ghpB~SX~{B0(k@5M{&@^a&JdGGH}(G zD{8d0Bvxsmxg1W@9k|;_=#%&SI{*w!f+R7ka{_*(tR*~57{XC`G_`m%@1#48^Feu!K zUO5R?UJK>%2eWNRJ1hrqPe54l7{=!#uY$icBW5ArU6}9K zZB`c64(2LpBe!mQ)sG=UD36+v0k1kzSSzXER4jYj5L<}XBj(3Yh|iL-YEYbwXng3A zOQLC}W0cj%F5zQVQ)n-}+ugmeO@LQ1;B9lgTuwG|T#z{%{ zUP#`gU4VnzM4!0miudT^V|(o%(%G!9zcoa0q%yB1BbtNi8X_$>I01w_0X&^AOfy9EgAiAoh$9 z0)`r2e{J+xUP;Yir|nMQ^Etmh_d>}^yH}4Yj4VBx9LuwpX97S}5_jBk0PnFXcieQw zTtyql=qoc!vp}`vkjAMbEiW#7eZz4)Tk`CH0PY6#WmY&#)5Uo|K@D*-%A8SH0#uUZ z`at4z3G7C@_4>WKwi+n0aZdtOReAY1(jP1xFCB5%JJ25c`02DFdq6Bb_Df>}iE-LI z_@aC*gu-}pJ6e^ic5)XamN;cHKRT7nZWohiA)Z%b#Bb1{H`tO4{{V<-C7&B+?BlZb ze~X&ty&IR?>Li9aWNDm1&A^bAW(;;2QFMxU^y$7`t6sPZ3ov7#;3?7fAgJ%heG@KjD8)7cPc>FLtJ_i%WIzXh zooiol(0QrP52v{5zFE|hM8lAgLx8N^{{Y3$*mmB&ch<_d>Z6s+hCgi_i*bTXSV}yu zDBBC6+e6=d!5Sd+hPm}Y6*N;cI0@LJ@EI5+g7t}Kvtrn$u7P6(3OFFSefBI$gdgiP% zlTAw(ZD6Xh$Rqr%C|^=Y!+c2!0_(dH+9Rb-BFN)0`A-m<&jpE^rb0NR3Ca<~ zaM2CK+!%P9j;xB;@g`V5{= zFIXNgKgaSyk033;KKqv-c zjH3KA;W4=OT9hu;$VFmEMy8w1Xrx6XadMGJkLV0{7K?Z5TnQ4}53<^av2JGAn;u-g z>s8FTr&kWijIAC`=?MUd)OkK(mE}fe=gkOMc}Ph0Mi?Ibo_KoSIu_z=3|6u=Y&EKq z)sV3^#*>-?cdt_OPQk)AVstu3;=FyI5_paZSiEh#oHE5Fc;Z+a$YzC@l4KvtPf}}c zz^DPZ>)o|lc+6F{$63n9a>Keu1a;*4cv^E9B@CU(?;#8qvE;qMU^*uU0hpof-ABKy zrgC4ik-Z*6#F?lh%37PsM~lge&6H_Lmx_YHL0?ehfDY@n1Ett`^H@(4SMe4@DlrgI zvtF(0O;|azD3i@tW74mfBq_IYLh;iQPL!>^M!obIn5@%=IHYW7c?B!A0=w;lQ;rIaG+QWbYo;PI1(s5qf}w)^sS}DfYQB5 z!o7~EMtc5F5lPz>kj7$Y*JO{?V@xS|gpa6|xRwPre6$LnkT{+Nj{Ig~lV)nGLFU1J z3oSx5mE(>`;|t6Z7k*o`p;RavBbT#mqOh{sil%n_&5jVwH9D|C63EF6amuhoDz6gK zFaYv(V&-A#JXCaa)bl-aD6+aNWFnVIoQ5hiV=6zz(Y}+L zOt=g6_f$HKS~WYyVBf_RR-_pRYq4ajOH#(BNvv05_0Y#4nGmj=$VUa*=p7<~?s}ue z<{+ctqc4T9RB{=+^cOnN3gKdVGKG#0G9-N~m1!L!J+^^HijB(nWGx(hF%}?cB!TELFa@6q(?aYWZQ-CRpTkZAe1KgPSs< zfDf1fC;=r{Vgw3@p{L3WD}^(i+FdTQEjqaw>CGi-bu!A?IdQVtW>}970z^dEX!#i; za2a2>AkCmxJj7UfH85_q=BExJn-OSBVWQz7nh6T1lsiZ_AYaVxN-*S4O0VQ-H-_!W zCx-1rm8VlAN>+A|J(WqPR0*)HD0d1@hnz_~M@?-(70ji&HmgAuwsvHVNB}x4Vl*w> zu_}j=?%x3GKq%H3O)ZIzI&Xgzr&Z1UM=m`{LE!Nhlf;euFBH{sGFF!pC!1IO>YFT$ zBrYTh(V0s)9-*P=(K{!qPY_(C1sE#iEY{A}ioS_+6rWvc1t{`p@M$8Ak;wl58C7Go z^9F(OCF#65kXhulS|`3d*Ns3#jas`%@5_GO`*I|4P{Uoh5(zq1w0ClWaT@u2b-FfH0kA)^S$@6ij|cIai7b{vah|Id zic$y&s8A@0Kt)B|dTtvEdk%z-YB%!LnyzZ~DIk7HPD@Df*e>CB$f+`s9~C2DhCG7} zX?@R6b#eJC@ktd~AfQJPv;fVo<|}A2s4VP%<s8g5AkhFY^AxX7aQgi%+HU)I!|>{?L&n4&kw?GlGv(<`8)cKRQ`?_bM* zf_FFI`Zq0jllUH9S~gX$eM1U>g^3h0NDRJYi0lzz?HMV{UHdMov7R_StnIU*@8x++BQ%}zb!_eel6zG6kV0hxgdIdv#NFe+}j62Bkuf+1z=#_`C?QUj-vqLJpV%10o|>_dK5sIZ2Y18I-*eSCb^ zfqR6^$^G@_nBkzgt!3cA25_q9qnJQVXoIBX?jClsP+42eqwZL=e}> z_B`y^uNf7yx zIgF@K0ap#*zYS7&p43L(L&VtpBh^TQ%3^_z{Nkjy@|YDye#_-|RngJb*QjaJAPC=Y zUv$vtI*hg);&hFM{#*L2u^5b0nS6cdt4lO>B?=%|)JV{X5PdQOzD&O5gJ^Sok`Bw; zRwJW-61gmmCCROcax+CWzb)lDLU`6v$ID(JG|m(V#*WW*IPVY~R(CfpK`bnkBZiGg zjDiT-)YBqI7iAo{pl3eBl39lI1do97IU85&%Mz@HHM<9zs`9m+K_<$dW&UFKE$KTW ze2CK@6sgD;BhY$kr*%e90mJ(ndMdf|XAe1V2I4hhdj@GP-hfFW%dMFfDL25DWzb)e z02}T{#@)H%$-+sHbjsR@B{>~in)#85BdXx~Oo|vZEOpEmhylSmoNQeQvH0k-ek}0D zF2qCXtVei=k|!ZTrC8rYQYD6xymGW6LQ#?Y zwQoxWM!`W+H+BCKPAsX_-;qM32hG_X@rIlnz9j29*7+I$ByAk~7 z5%L{}Xn+HjhzG%$&ks}aoLTEv#B=2+Lz2WvVi_e_t=eGXBI9H*{$g@%`hnR5^6OrC zvc5HYP{EX+1!KeaEm;o6BR$xm;~=$GRVRze!br$E&x) z#Y&dz#IqNQDH_3XUD-hg`Ht$O9WWt=i36-P1|6ccr(yA7!{jY#c5vWDv>qDyrd|>7 z?gRM#Un`rAXmb$cE#8XZlvt&6nUZvhXb9$pIG_#57@#f?uO;6~GSqTDdgiOwN$yn1 zyp|=fym)ysNFA0&U^&TJX;Zm#u!mf9M!MMhv?%8=J{`$DQ;x$`$26IClggQ8nySe= zm`|9`u%Loe1r83!eyj6-8^?HqCyvNUpHDU|q=HdhJi%f~W0p5{9--$U?7%1uq1$Ig z46Qj{*q@L(^OVp*rzCOpnE0O`8=x~%y%TRcUfrBST)$FWjhgeLB(g)~i6&Sy0x*qr z`H0c>8!B|>Y*)u;>}Ty~E-S*aL&hof8LJ5rc`=Oz1)@);9M-BLNMTf1=8V6Xq)Ew} zVnXGH`zX;IPg~%}NgaqVjPfj!MI(d?5m$j;OssgRMR6bu$7C{*={+d58(NOOQ*w$? z7-UF^U@LG0OCEsrhC>)6ci98od$Fl4za<`|qMRE^OEZem*M<7sSqcm#}8uh;= zkaNC7z_ z_5j>bvu$)$qy&%4*FtI1hTAR2j+ga_ys8=nCuL|EV8GvGlf$c11!hbv@kpwcYEEED z4=`Y?$h#+gUt+EBN#omH`gP%_jUuaVabR;e+1cX?JjhyTyycPCL>&{d0Me<^?a*N| zl(KfdvLiMV>BWV)0%;tZDz(C~CxLiJAiMtn7k=tMHkqw@o*Y$Uja6ky1x@MOLms#PJ2K4c7AG zp3KKmKPMyrid~38+Fw&KKCMmHzmUn|EK;5bqqmf|7DLL0O~=E8YC{v*j@8tFLhN!I z8rhyAX{OC!D^HT7DE|ODSLInIjiiLfCeZC<@)i#`NUy(~NzLZZ{DGQ(UJs z6)nvILnB0yz{U9=%z<3DKk<2B=#~eo+nJ!%Cx;+Bwz?kFyq9g&&g*}Lq@6A1GO|&y z7cZEeYG^%1MzQ?;a?#{fKvF(?4fpSbJfQGdDxXT(dp1!ewopoH%@) zPZf1m2W%VW>CvrTTxJTznW4FkYq3L8y0pn+M{XQ7>h52Y9Rdcm$bdSU$5W#v`ZHh4 z$33ei)Gi0t` za73}+`g+yi2qOZ!Ic9ze>V+bYkOP|g@;xBE;OlYF!^d9rtW|pw-pWn%b2<9hBLQRF ztK1=U=Iybyna`-If{Vfa8M!tl)b=h?#N)DeR_sqa1&YG1#yJoX%V*30X>zSiS?xT0)QbO9%IONX~#F*Ns~lC?x=60BC|f;na^&E>|W)bK#^ zZP*~*-H$B|icBQq6N9sxIQXmEqZIe}eKNe3C0=a1C3!BGFz>Qe2>bQU8myOV$%=wV zi%A5M%w8ubl4>%n40}=KlKT$B(#Ku)y>x?h@%|%(Ioi< zaU1T^$WFmJI$7v6)OEF=GxkhvTCF{!&5u#&raU2+md-&aOqt7)%y`zRi$qskZ=D0B`Ryxl&~{9h5!(%(Juwq;}h+M zy4B%yso^|do13#NvE?!_&^~Jct(lvZ-Xy*h7z@& zI3FF3t7g=4T&*+6BWX`1)3+pj5`bzs2OaU;v?;c=JnwY3Q*%3Q!pMNA`eCHl>XrDP z#t_M8Fty-l>{w8^Y2hJ78lIIzZbT_B5+CMvF}Z%sND8kPpIW$Ttqel5DUZyNX-L7= zXJoZj3Z?7%Hj;LWR1yOGZhU8B5N5*upvG3QF$yC`!lYPn8w{jO>JE{f?+E1>YJNOR- zwpFN2jM$!ieXO-$=~sgxC8~2Iweqk@O_W0M#Rano1=>9-qfE^Ho&lOgjBFP@H@cK6 zQ>li*!!WLLS7nmD!mr-!sfNw1=Q^whxN2V=+pt>d?K5nHsUnT^tTCG_2YLJ0VplPY}vl zGDCTr>e_+Lq&L%h*!A~Uc4CDt@RC6BPbIBJEHw;$e<{{Ei^(9UiqbH0ib#wA$bXne zr$dPC+kF#O9}wYl7wN}A;l;@x)<}96M z;-Jb#!i`C$j!JA4Ic=#~Ih7-h_^yHUuD?^OBjQXI88g+XMon8*l3AC5g0yt&KoKKQ zhnn-TEU~HH)j(0QS4yQ^_HL6AVd2asP_7FcT4A673MR$;?W-+i-U+0Y{MZ>(M31Rn zK_@jUzz!_vbOF-2&q6(jtK<0h>$7nSVAI ziut?OVwUV_M;{boorxj1kyxuYl&v8~+J_$7R8<97jfUGcjLtj65kjXogQqk;YC}fn zonaQkv}Q%}RiqL-DgZdY5FGJvJ9K5$48`Y<$@jke5{*~rFFTpMb@p47(8g9ZrH0Me zX;q4AjPZyO*02d17=YZ85+qeuVma)$W7=25xp`!O*QO$gA+7Z=2I93h3}!$|k`;qK$k6I@Ay(8C zadzR6Y~^xQ;f|~?^yEMSa`R+afsJKy{QX>!5=iy~D{{>ZHzgEymAVDNAGl=1pLhLJZQb29$#Gg7K97Qq&*l*+O%iRqJ zhEps%pG!~Ubf%fVb}Ibq89Z5LR=X8}ggwlx&t}I{ZtKk}Q?0*YK;PrsE#A5oa(!s;e!DJkV4~;Q&#hL~*fg za?vN$%s_6D9d*q-;zQgri`&$FhdreW<2sgdIBXpX@fq@2DC0h3viWrr)`-D@Ag+g} zi7UjDzj8G-Y^`bwTN8V^Cy~WQrO`DQtsi1*A)oLp#Ed7IwyT&hVUndvUxABj~gpNT4%3PSXOt1 zTab)9y6c%xsH75cVM?uuD$$NPgw}0RjP9j# zJj}otM2Z!Hb1mxXdzO7j%@o4KO2V2UWCAqtyLjKuMc zIRT|F+tRuP&Ep(@h3!|PcJ=HwI@(Q+o%(lHUnV~!}I(B#j|{_(8%OO+qMxSLNL zxQ3!CBo#|E%{^(|ytsL<9&9-zX|vdl<(H?m;7Myc%@z`5+mEuLwP}VD*58Ex0Gty_ zFnD(^wtOp(&SuzLh{bkeO1jR%tD$#^Iq*#fVhIjZ0(MHCo_KCb^zt`muRu0mEm`a< zBm~V|FEkO#(maeoa8M0^2#^f{-ZSvq8A^PoheN@Wk#iW6$&ndcMPb}|&>|AxmHJ?$ zo$gOa`8NJ1$l?4Yh5rE3l_8(SId~_EhFzesG!<>!=l=7)0cMdRWKbPhRe(C!U`Ej{ zXe59DjVDv58}YPiotrHl(6kR;UUMaEv4OFJ$E_=v`DsTs+)=qQFA3FNN0`Oya!Vuc z(x=pQ=^T1j8eu78>_au1HR#0yG=dsQVm32PB&v*z;4#xk27x}14`K&jj#3{G$B>#V zU37*R;>Xyt7bsKJ6PT_j*=OG5M%xzPk+#n!1Yb2b9CM1v3{4D{9o<)D6U_*y2HT3N zldi*S?bKMDLs?P1lR>vXc#bpoP~r$?Ud#e-uS0T&hFI|a9J5}Pxo))AO)9n41&U#4 z#v_mbK<+rIp5zf8_6N3BFH`XCY*4XUD6ds5YJV=*DdI_Dlt&RjJ1Qt%=Z`0nL$6kV zJ|RmK^L6|?B+}ixUSdslDyPo1B7|N*u^w7CB6#-T%yfFwYau_HTH%Yu=w{jjFf~r=s(a@GzFU1W0 zL{;G_xRim&JBb(qd+Fxe9j|hyuxU+%!9>wKHDH!#yYOk=c{Cl1X;q9c#o37~y&Y&0 z1a_AhjJaaw7*C0ntyg4_JYw8(w2;jhAf6;4Vptzfxb8a<#W;aZq<>vt7%^?Heu@f} z2<|>CJtU9MpuNiHbI*nH>x@TQ+ya3l2hums*>VQmKnk#T7_eyb-p*(+EzF_?dw{&sl#T%;EDD>sHKp zGO*y0q_Ira5Fg0tB%B0bwwX^8v!UGR9V(2d@EJK`+{NZJgRZ92yI&P|@q-iR| zWa7%s!H=mzo~mvg)PM{*gdIuuoA1MG2woIOErXbI{pg$ce=P@vFqeh`DP)#7>(yqj z8`UG2WRhtc{{Yvy;8HgJYMn^sBz0Kd{vT3N;k<=u*Mej1!ThrkPdCegL>v`8Lvn)5 zyWg=pCueVnVZdMT{&?9vmffzyU}^mDi%IdM=ZdE(EyJzwX%5j9l+{7+%lxP@qba*P@h7fU;)}*+@ccUe=U^6 z_|GA8BVNSzsK*&uCTL|d$g=s5&B)sYS!8bPG@+D|`xHJfsdpyxUae+1ZePl*9)(on zBUZ4opl3Q{XK2(p*d}HQ01OVh&tW_-R?1=PPbOa_V%2#5nE_G=jymM)lcxx0a1?1v z@$a=gVVb2j2;P$ZPOdI0kWXhGCnwDg{CUh0FwVRV$MXV(Wd~&U4yxwRU~aY=+n?Wb z!dNpCCc}Ot$G09ymRh8Ej|J9-PETH~2q~M>N|q~&6=0auO$N}3gwFg3<;o%$@c~Sw zOeRA$YQBE*n5m^G;gcnin88$9;~3x$iLohVR&On2Wj_A<>8?LD9jr(2jCCmzv~tv! z>tr!mbjZ7@oCqKJYN9VSL2%x+Mpg%1V1~bnY*YRtYI$R`W*#<6dDbL~8PqHh?D|)L zQo%qS!6flj>UX!ab4fQN$PGN?l(u2ZNnz9iIY#&A;kuy7Pld}|$mFVvGi4(%;!V3X z+9<=SI0*~@GlJ47CT1RBNP!3xlhiePx3V^02Z*U=JJ}S8>ovs?hOp15kcaHLhCPK) zNhKA%&lh&G838)I9Z#Csm+-Z_c>CT3g4Jf1D=j;42qG^Ez%KEfqh}{k#}G$u+g>3+ zEIaeeHU{6~Wv>>=jL1dd+&qy_=GZjl5Z0j_%wwI|RhmgeL1XfX4I`NJ0o zHYhnVLobi=wlUbrq8?Y$SxAJBG?EWgo_oAdy1*5DsMzv8SW~;lekq3;9zXDV5q+es zgS7>0wbz)De>q`hlFZY_2rjX#2^y24Lv40O%~hM&w7j-SgXe2_SRN>G0jvN4V{^xl z@Za{Q{3+t9e~6wN&)_^O;V5w48>^6-4+}wPh|ObKNCzpI3mI)(HW?%(h%T{!c*Fpl z@sHu1PaS{hoE)AlT69E4#ak_cRLSA-z90S~ zwJObdSu9+ts9CC{F$a0LR}T7BV5M0>2il62DrOa0JKORc=gfH~7S(%uqr846^g@im zc&o-1JS{aVx!lZI?+K7qFd&?bf%h4 zJLBdwb>!q65iJ+KQSlx;Vh$QBO)E_dQAon51H3Fpl7JD%@~K87wt)vl zK+NH-oAqw^V2r9pv40S)4JM7D5jQfTHI&4#@<8X2E{q2_kO2KwViJjt%8F5BtY4nA z0+pI(C$GiODz5o$%gi_G2azE5*Gu#7;w%jd(n&`>mAXlr7ak>K5}6(Y8A|dd#FG=n zPiEl61@3tfp;se|DlobT6FZx2ug|bYOywCICO6jRH&fBkO0xdJyN}fhJv6Lym z(%G7Xby1{vwW_Zoyw#arKff5)|N@diK(?m{{VPqj6p4O3P&O^99{-6xNaJaz((2Pyt%n% z?k~cz{ZEvzM>$k~J>p>*B?75q89bXyvRkte-=$(^oANM8+Beew0G?7mxY6h*$ka@bt6|bIJgO1K z2BNJ4Y_T24QR($;?41+&x@Nu(v4X)=t(mO*yO0E>g)n6OE`~Ex#8ukyKYBi z*+zgH>-2-c=Qk}Hlrwb>PRq11G_7cv891+-5RF2{~H;$=}rYdBRO`VyBFWmLncZEounlh87H>yiLUwfY4pMQn4M$2%e+cs$7dl ziMI9e#Zi@}nlT)NBQap!Yj*{>t89JGZrj&Mvi2hWCvLqMY^`LH7@}VBdV+CR+U&l- z-+k|4d+TRpz9RGCl3O)a#7oT!%Vfl|FfPiWlm^nM^oC*iv;Zf4dE^0@pWTuk;vI5~ z$=a#nds5TOK~6>hfYQQ_JjHEnrj=E<&$@x5@7(X8gcNH`_eAv|fnjHVE@LD>g;gO= z*y#HLSf6lxy496`2gOev9f~(YT2x$=i;^m_&&SQ*8AJMim`ACBBzuoxqQuZvXlziG z?oze0HJLSB!=c?|i%b-@`w+!g>qPg{gPa3{h%v*TqFbKl_eyG(g=izL;(jx7f)OK! z9KPI#x9i(P0tWsY)vJ0R4Ox=x5OapqKTaZ)IFz>_6-KrPp&0(!J`^qjr+LF$a!CA1gv--uYe$NnMeG z!(QdQlU|C1Ey78m1C)%~R#le{B++XX2tA~j}=x~Y)2#c9$?z2){uyX z&dA>uKV$LVr8hh=l9Mx$e;Hq9j`RsoL2BGcA@cbS5eSbUdzLDrk73fNFgjxWXpOYN zyIXH$75QG2|TY8AIY`oBRg{0S| z3l{7*p`+>pK#q?&9}VIx{{RYOk;zQ4wLC zBE2|XLoDfE(d6i_Er16w>7aQNzWuk;3tHEAoFx9yxs4t2wXRYaYYO$K*7{str=pf2 zMv#)J3#+P)F$_sicg0(O;@?Z>G_BE%GD-a4YsxLav8?39w2d4E^$sjYFMV;@?bvs* zUMX@`7Q;!}8my{3vE~m`o<9UGkL*WVF;)^um2W%wG(rmOM`>qoO7BD1Y_a`^Lr)b^ z&nKy2Nf)jVv2 z73NxqD_;uoGeM~T0O7&r1XW&K%A}0j=i8Ea(;X&Cd~Qa(GtYgiH47^luKZz@43Nlx z>&uo>yjPPIjRKOYT%Nn^s0B+Kk*^(>S(zE#VA}l7%KIDQNb-4_;|y%nsZzRPYF4>S z?m^|N&0*BDEWD*>mBODvWCU%2>TAQ*BFAR0czVoYe-R{hj!r{qU+HEs04v7CfV>hE z4Bdv$up8dL#2NEm81Y^M6f#j&!(t<+cQ024vOMNZtF>t*Dmm@d`RhQ0PTOxySBhzU zUyEvHntW{YVRG$m;H*5uR^%2)c;sE})uQ3EqP$PZI_YmgHwKa^rG~Jd@2F`za*g6a~>4Ng66nm~UM=vp#bb9Lm+{ zSY->8QrZOfo6K=(i$V@I|gIL%x|H@ z4v11NXo~}GA+ znFI~*j3i^KjU$u+6&g7(qo=V}MiN9FACcXW<1umXhGByuFt=d*8LCaVms z<~=C#5>+ww;V1%V*n#;8NkcCCb0@aPSW5{AJWULeTBg`a^{{HiL5f{U#ZW2HF+gOH zvCv{*Zrr*W&n~Q5-8bde4lt~$;ObeKW0;@+0OU{4nwPIMD|R0bWaYn3timUb)47Eu zGD-x79!hyevT=uy1cHoLUBSXEwD8YFhlZ^iR3fxtTK;?tcpp}^u)r1NqtDn7OAeZN z%I-tNSgb?~7Djk5_}D5^vf7$S40fhk;S`QLUoksxMU!VmdZCCx4ms%Mf>^nJc7Op8 zCA^tEQw0Q+R-Py0{{S(7@z~h{DS~^Z0KEBrY@(>g({W`pVY-)w>~+>ld0A$X#fuW- zt_2=vkRCnB3Y9wH5ah(~e8Y{Au8OgSylu@{;l<=#pP!pvZI9&@gAZk z-n5$}cyQvA7h>aL{FtlSv5bMIMvXkS9;RrAbLy5w{{RVZNYOfDwA0!iNi1Uo656kJsz+-X zXgDFt85TB4e60hn{{Wj~(sZX}YpknRv224p_dxd+!<*`vOWSE`~31K1? z{DXN|dseDR7uHvhP0mPJXA8d}mQa2B9dHEv7AMo66a`@M$bli;*rGa<_js-A} zC{HmvCyKicUd!Bjg#(AIT}D8>`s}?cdZ2o+Oxss+ePTF3Bv3TRGV+CB2LuQOKWYV=Ds*qQQznA>5OBd3DquZ8_&%Gt4 z5O{9(CmEZEEVuG8t2H?%lX4I|`A3!j+jCL@-oZS<1EsaGxm;~)cjvPeYUaeQ^F)PZ z5OW+MX5?IkAwdC#)mQ=n2dipOo)$AbTB|xH1ys?Xrxhkzs^d zv5;8Z9!F!#h*>mZPznwlNGf>p17J)X>R^}u0A3ze^5e|%QVQoR&ARK&H_6_`)44sk z9WplShpB1nM(!XF$c@F?QreAFF$jyxVb692-?M6rmHaPKtQh653|AT(a=m70*&tH! zf*HA|J?(&vL?8m(ix6{p+Rtw8T8=HGv1`ows{&??gmIu%X&o76Jfq;OqD3UX<{igd z;Fg?zGJ`3frHgp-7HmOt%Zx{IX)ADzM<$S}M-nO$Sqb7*d3sBAkFQ06)Y|L2O?0;b zK0wccuAN3Pg^UtT=Yi-Slz)jIATmiG`lFYE$TT|Y}W3e$Lk+pFg&%&{JBrCS^Rf9Qa z-o}&;>Q7xG!8yri@MSB{LL2A85?`@Bnp=;~u4ES|B9$dbD(@PQrHEagMfS|91HmzE5rU2&1D=_#yy;5 z4R*^elEjh6&S~3**gQzU00$A>fY|DjQk$lOOxShn_D^dKH=FIZw_la37x<0f`~Dfo zWKyMicAl$Sl_v{EaP4L<<`&`Qw!n5NAtks4Cq#ha)(%V!$UHqBF1%|A$VD|lP#DZ$ zLnU(#LyqQFF-WSZ7>Me}EI~Fro<%+?#QI4haEXTUCNOZ3hWmqBNBvAU^k7_XuV<}vi z8qLV@@w_KCm~ihCBIm+>NLQGgh0N4b<(m&uvnEcKGD%iaBeJbV!ufKnivDN|I>c3p zB$%6-?Vv8%OnIfB@YVTj!wm}ds82jPl39c(vOJN<;;NJ@z_LYEKbev4Y0^ zRJiqn#gja;sn;VSR`i0c?y6DOwwRP|<6SDoLvIQE0Zg^>GRrgJ@_}2+j_#%r!8l{t zCF3-DBYGR&(TFissB2|l5OzE9x2NHHwMRSvN888cF_Ji8z#AghHK-)v9}Y}FSr3@4 zzQRc)VeQ?AbKG?1`Jsq^2EjH)$J9xJn=6t)5P1;Bk_cl~B#&0xeSigndzC9X_=$0q z>R_?=C9@sL8XC5$=m&-6K+H#V)O_YLDfvU{){9}D@f+k4-fpB+%hq${3)Vk zglG>+nEby;(LR6j3c!HAKwJhiAR?UJ$lEML&GcbvYcH$82hrW-e(sk7BHx}~tVM|G6ajdmeDxMOo zWkCZ(5?VvVjSk9viS45nE1wV4x{4a2VOmflt0Va-QfK~PRb#WLRFSc_Wf~;(WYl8D zwn)ztMON;r&LZO{Cn(JEhfsepc)(%U@dWnYQ)V+s)7>&XAsEd3rfnimx~#!k!e;5m zD@|Q$;AftPmeGasngT3Btb1~bDV^O%Q6b;sz_N&W{hdp}C>pZn7$yr_n^Cq_; z;7A04>hid1- zI<~g6*t-52@WayE3YNY=Ckb@SB%px)7>*?1a) z=UU-1{DyVthSD{cPpaX2cja!q%N~b`+vV5c0xHqNj3m z+2p;s>}&$tQp=sAmsxCBnB1~qn&oQn@kwShkES@lW)7U36xxp9k+8i>TvTNzIpoY( zaWnbbjkv0u$6?u?q4eD`)O!Qqh*V6@zHIYA5uO5#q6`1HG})JLw(VjxP<2`h3)qWSYf^uG>ns)+iN& zsVZet$&nT~;?!gk$bD#7D5JfYnr{ok;mCs8yDUj5A|YBjiaEy^jZpspDmG~ZYZ~ur z>|1TJ-ouDFsN7@bo4; zT^flk{JeB3V{(}TFDcPw@}&{9YsNSuR|;4lR(Tpy#Zh8Ty3~s@0ChJ%j%tdmd0~F0 z@$KqDrNQ`{1>*G?I`Q6GbRgP8Eb6SG!9P$?09HprLl*TAhkEa@twrGstvWYj!b_02 zf(ej|60$*I{L-dZpm|)O5@?wi zs_)a~F8k=~UpF4Fj>mY0;~1hwtsJ*n!q|R5$l()4CDD|zP&pCqyJEAxxiw#XPd$e( zpSqQ_@?G6^`$Xd~kwI-TS;{cbuUL9uJ2@PQBo-xofB^T0!%>E$b&`Zj6{y4Qb=Frr?A&Z ztD2)j8g2ym%t`dnLMnvR0gTgba(H#36EESovYr{k#b$W$*76TFNo$u8s)`Sb363$GbOF z`SqI}CC?IG6{}XIAS!OgUNowxu`&c2EYkT=Ko?5PqI13^6Fgpf4|2?&9IZq0_4S)X2>Jz3(gaCml}GR5(auNEQ?sCp5CB6%ebsFzGo za*Tn#L})XOl^sfBG1(mMUP|~@`dPhlK^gaJaxQM5E#Mk~v(EHnqFtaBbAH)}>R zep}ba&U~7xp%Ph3`DRuCX@N+TDP`9HKyqEWoSy4q`}=rvNyD~{HX4yUFT&}!h_e{_ z?~%LVx@%J)4AyPWZX;uyi@0;*MyguWZgFc)5AC$l8xXl0qtn20_h74{^&~4Ln<(!um$w6Q<*b z-D|OK{{X=guPZ8)5O|i3M=6M5Zb_`}tyyAJnrdKkRYZwGx0YFpDrty3lpQjSIIP7# z<5JZ~rTR(`t*c0~MzQ(6sF6<)H_5!xKq@#PBV-L>M=^`5PVNgKBK;;VILj<;B0Vwy za8NGW%!W9%Bz#M_otI(4uyK#@;au!7U#l6vHEQ=R7&!!;rcdW_E2LgZP@|5_8<6$v zGhIemoDT8U`}OL9l)vkSe`&vuN6*C{OE8suIbR1JGz(dE7fo>(cu_XOvz6EQ$mD23nEXSJDs-F{da?kmDARh3)?h_b+lrS_*%PIBb*gRWCAU4Q zcV&c2c2tv*s`18&VraAoPiW$e2W^CIDmf?~cBHt8@bz(!iE}j?-cPQMq+@6k20dqS*<>knGl-|+p6FOLVZ{hdwM>o~m%hHX$rh8Dd_am<8R3wFY zmyjOO#;1{8j`AOFJL?Rld&8J$V#ZaDJf>J#6HCI!I(67Ebl``xq7$~T{{Zn_e6$1( zPqcpx%T&$VSq}>4zn)}e0hYnvkc1CZVo22n_sDy3KmnJ)(!D|)r-|~5UG2+c+?>3j zD-Y%BCFLg_c`@l#Tz+W-{{VqNMIik0E?`07;pwl36t))z2Db4h_wt`%ZKSktX;QmU zlI57Z`b{dg2$aX9BSXKSkXUj6p4-_aTfVrcBfoj=%UZQeaX^-t-6Q%`^E9Q9hV9MY zYIW`1Pe4`13ihj-t6mDYn)7f;Mw)%MA2L!v48#cu?cb7t>GvM&ZJEQ=mtRj1nps|X zRC#g31iN`iDpAvkj0m;d5IYgRif;z+N6o%MV=6!nakEdvN4g^BbHcQ3&0`mftx{EZ zDH-E4O0aDXkz0{61VY@n=}#Y9EadFuYU0Ja5bEKtI!fV75ij#xI-PM~8`MZ7 z1lS$9Z^2Qvu_P8Ig{xAe{!>n*q;fHlW8}I#*-1mhgXTLi@2-r4WsS&|>9bO-j}5z# zA)3X&l7w$BXG#jQX^Hg~1gRa5v+W=dIeTfzCWBj4LG}Ih#cz-J?N+y9wcB^aiM;Xk z840)mRW-yJG$Rf?#+pDB1(5OJ0VU#c-Zo2SmE6p^J5?xo(!3bgRhD@HTJ@FB!SdOe zf&8Q$>$gGU@!30>EH{TOc&hbGU8&UP5<6l$Rw=raR415b+j$dqQ^>SHd}~LBt;M-LsJ$SCAcCoDN!$AXk zAC%m9_s3Z5EN8?wBe6QHM$#K0t0~a${O)@J_MugK=yx0JZ;0niy!Jdfm%Cob6@2LG zRkDyWMG;|YRRHM940AS$_K%vTj`oI3M7b(+7|>%we!plx19(3fb(h63=5i6x%w2|f{;rH+YrAS9usm`)Se2qv z6hU~9iz}5J!->xm_y)(1d|QRYVRBb!87&+6DW#OO=vK%`B80&no@6|gKwwY>J0vj{ z*L+=v%tIa8d3w!`YRg**9e*~g6rwRR{{SL07!ej&_(#u(C|HvH*p=bT6n+-)R!<>o zA^amFnxDxMPW)x4ot6cRO~m<*>Z%Kn#S@orGpZ@E(`JG^bd5UvE{#f5=|v*ne}Ijc z<7K~)@n#P>PIzlvN-H=?mTNB^$mg^pJ;ZDkqX-bYa&p|1u?1}Y9F|^BQ%L7%f@-X5~aI#*1A!anwAcQTnza8F~?pjH*9l^nPi^BtH~nqAsO5P zqEE<301|l-yEARt!{sHi_H5^-t!K|xRT0*m7Dyo)01{+y$@!nfD6ec?w_uEccUkp5>WG`^BT`*uA#@ty3IY%*EPSB_kVh%n;LKupWHCW3z=+Pac7#eSY= z-pDNAYh6K+*pM@5&6nz+G<#jCLGHhs2*W%!+MLi+@FZOXT$y;GE$DD?}tk)NT9rEJ{{R(6w@rLp6-~LunC;qVjnpW)1PBDmI{KYREj(U%wShh_6 zqs=NrjmNpr=25%vw=JH;_0A5;10RtZ;)7QM97_#|@E4HHwLp61D;t4y5AMqZw!IZIy@Xd-?ns%vphsAW`iy;PpF=7ie zNfKz6Luf{_uy) zj(Y>cPzC&WqJ1+?mU!k+)u zU1cy;bq^kdh{Z|}V?JKT66JIUZi~lYq{vvvU8RDvk*5iU86;UjuPm%Nd3s=~5xJ6C z0knp#je zF3TfB-Zo@-iCyhuWDa(FGNp>2DU*`Gtzy*mqOmRbCWt(<4=P1l0!;y7O2&v>6UZtv zaV6QMa+h(`Y}}}GkQpo3lOY6H*bB(uZ5Bq5g@_#l^t6st70^9YeiwL)!&2sBk~{&h z)}~~j^I0LJ;}4O|BLPsOODA?^9`!f z`L|KeY5NrV4IQWd0H_}wT2maRM)!$4TWceWf*N!pyCPIEbB;xU6=GiG?Rhc|U4S8d zyOGq@*pW-?iR0$r7Z$?df@1XK`q>_}k64KPCDl9i=k~MA-$_uDMgo!0@G@`2ab}ipZ zS#ua#U6OT^KOPHlnKHvkl5=w8KILaZ>%k;`eaU&Pp~1q(n|idMKDO6EkX=SLD9fQKsYd7j;BoWvZCAA{innsF3o?LNCfcD~c#L4W- zuz-%|xY4fm5ZAX@J~z^MCDg9q+v~55?d9{T6B~q~XFB&Ab}eQqeLIm5N$xC=1wWBC z4xhTS4^F_@JL;D|Jtwv1*pV#oCJ3NL;ucAHK_G5AH7wD*XRN5zPU9q zRR|87iJvtaoa$}P@9nSS+>VN{@Ko7pF4D_ar5tdVg`;~AnFLWuKXp|nae`2&JLEX% z2HOs%+Zl?ED%gs3Ymn@V9eBYIyt+gm%+|t%1n+?dhjP2*A9!yr*BWfDNvx$x7Zye{ zBdKA?4k$-q87(u8+_px&$pCmtrZ`%MDl8FO4%70vYm{v-odT?KnB+wWRr}~3{k5^S zJuD?h8g#`?HMu06_Vf8q?>($xxaYs8pXEQcd^h255M#4&!6?a6mOl>R6@gI?6fl{h zSCfcDgwG%$2Kf`GBfLFYo*VHNGM$TlC4sEq?CRX?(DLEcygnCN2ks%v3fY_CvOh;?1|-;cG9o;F1yORE^2wLHB!!dF+rw=r^+ z$X2IPYZj}e`Y|I$v&~tiE~~Fhk(QB&W&oX%#ee{B<)2>~Uh!15E>)r88IrBMg_^L% z1=wCb%MH0@L{2+t3M!ovTS1?%a@4C6l*re=95GvvM~Ipz)*&otB#9b8dAmrFhY!a^ zqyf7tdQ@gHIGYzMc&gMi>)eXEQ%e>gPpOc`XGkJaPZ$cRQRXhl1xs($(6Q|@9!c1L zr|11uyBkbpWI&HSe7NxAD-1>wEMeWj+Cyt8rCq}$U%Z#@@+9=R<>$ zUbG5G>~_x=f1Vp1c@9#>PAjtJ=}RoL$o~LG0!PX+s^wlZQKlo3I_zwgjQey-Qj5bs zhj%COElRQ+o<5*y*6@j}P@?{rkN8zqckdu0YZ{LJUdvd9LgV9vbuLAN@c#gXagS!5 zSvHp~%K^6{MA4HWiw1fc7A1eMEIbV)upcDSCzxtHRaU8zFkU^H!2&a?+zs zWdM{ZazRC8UP#EJL07JMABbDUP$pXT9;QKQYU+7V%%Ujb8iiq1U=%LbZO2+-PjlCU z2$?K~N|mODvIz?;mo7k;reOTTGDhXQ73_sZhPVDE=-BsD5lc6cTZnVb6c zb+aRo#QBt!Z_=Sz=UZ~barXJiZ0(GUUD=NIdlExchX=C5FUdo}JTs=wBza;3_n8lC7mwAy7nwbMF59hz+d{ zmiTu60E8#BxU7CdEm-SmTFw{PjzyS_Z|&xmY)5ytp>to7?!YIygyjVzDl_W^exK+Wpx!}F34 zVmh2(8&iC*DAO4oTvUv+#Re8QZP{f)MQV~t%$)R8Gj=Gg*LMMCkHAC1LTu4HIohB)DjY1!k6bQqS2 zNIN9yOEDz$P8%aeDNa8W)#*Z*_F9$IShQ?V%Nsau{IwA$k=bQ!pN^Og}O$G!4qxaDxxat!XYFjyMP28qT!!`IK zsRR?Tl4}y7DYk8EeuQo45L?)5stcYdhyMUmMTV(Pa9&Agn2D69n0uCTc6LeGy5je-@6Y5?>o=U02k_U1@?Z>V5 z?}mF8mCV5`BxqVArT`Bp_W06{{@$a~3LYe?*DK>D$VnvCY&~)yfN;|3iju(VWUyvD zyX<@WG>*+&wn<}5O!TL)!a|jh%M_)yt;xBs7SKO+*7lu=Q|x;{9k?Faqle-y1a&Lg zG1Sf0izgMh#e4HQNfRF~%oC!R!mw_|O6Z3l3c&teo8_FGe3)opj+rtdn=u}L%h6Q} z+!7f}H!qUIsQEH@Iv8tKD`hTub}h7Z;&>V&wmin|%hROXpp{I+NJoTDN zb55A*ERiL68drIk5!|okM333dfY|xzl-d~M4ZmG`Cz?ipXb@aPo?pMTF0D6+aQP@( z9c^Wr$JAJ`kk^mODiA1MQ!woOd7Y}TB$7`1^(&tG4A+LIj;<>o1)epeSSVPQK_#VN z@x;Dk65z(OF(l|{pJSk=uzQuNzCsZcSqnBI^1SlS%DhrzwCoV`0J%D2y><=LtX=$k z8Pt|6keADj?5XuoDhlLPXkv)4;$+h^xBzqhVNgARbvM)mW{LYxN6~amOpqHE{HD0a z!uW=UKOu|E6}vrUvgtKJWqh)+sa-63J%hI*Js=kD7p8egyj?enzm3tU7Ec+HgxQxT zO5h-|iHKH@Y6|mF#iLhk@8`JZei=z(sFkG2qECvIDb|XkM;J&TEU79#J+m+|w=~m* zP@s-Qx@CHHvo&84Wp2rj%f(LAR+gQl5YAWaq?}Phu$;6>uFAFr3(d#2p<|_%)@m;mm6I?=?>& zjgq7ixVZ|?C6H%B8ds0BhF6sgfy1bcV)bmWM#scc)$x=A!&u1|1B1tAq_ZY7UmcTh%zHEYZhGQ?;Q?o99p4L{xS+CrYjl5j=t|iu#6=Z-FR79G` zG*PAt>GQM`zSJPuEuzglJ)Egt8vY%Jb#c@qsIgAV7|R0cWizXAmNsrKSA!6{Qc9Hv zsQ&;GW9{I4K{h`X0yaA%Mk=)Bxd3AF7=&Pe3F7JW@VCht8}2$)K&h)05{mL6<)?=?nhb|3Yj}MDP>j&&lexV z?bJgiPa`yw0V=1FUPxn=PS}7-&WCDt7%?5az}Kid zGaJXiTZn!~0KurbptL1PF@77Kl%l*dR`OTMGse^0@uHi_GY{{Rw30zSQu5uviAe!nl!SN;)_MW=2EYSMNK zA%+m^B%C$?+XhG95yNvo+yKHXXa1eQRk>&BAj^gNS`dOKt4Ue5(-Oq_j8ACB$^Ka+ z5wtE|b?~98(VUoo0!tPimLipGJ=|@0@7PjJWb+@%wG>ZYNjD)Umz5U|f8uXh z-ntLNK0L4E81ficp?fgZuCm>h!+9~o3PL4kjl#KQ4$iGeTbsXUAT!RVf-P8 zxbxeUyx9?=nIV?ML?@XWatewiU`qaC0qhRe;r0AJY6)dHwCoNAND}7!0q#(3vujM3B z@$D5iM=(eVofQp*%hIbpRy0~7-cqws$H+b1aTPUjpI_Gi?ndc6gMXxQP-)ub{>mq(`^y1{=FCq@bOKJU;<6O`m$%4 z$eVHD?b8os(d(?$K#nGs<(^7~h(21dDuKIeW3fFl#m$_X$y!3R>0#rFC`@81z@3>V zmU7zQ!+b#Q!XwGJmSxWrl;M*wx$4nWiZ-Q=k5U1`TJ=1pMPLIqi+N*?nT(7Ek62i; z^5yD`V#>6mvm|pcl@vgXijL@n@p1XscM>Lp(9QGRKJLA zO^U6R%I0g%@?)cdM;Gcx9Fofv-^-FsmGcMlRgjP5Yl}u(e9XicFA%ODDQgC>WQFGr zlSfUUsU2w~6HRTv*#UgM0G4v>6#9t+t4gwDq*lZDj=YwsT3ePDX)Q$x$I~@3d*&#t|F17K86Tc+>^1*BT?uBplHs* zV#I<6A*aWf_Qp{a9hBLm`T_d&Q8`Qxj4<&)mN8Z5#z_<(T9s=_^#XR36)zYhhCI0y z4CxC4$oZQc9UTj}>n&p^^aF2l<&=k;{zx`rSM-*T&`X3Pd_wyTmRI~?;wm{PvY4zy zSL=#ZZ_P@y!llaS?jx4WLy+?|Aq9bDeI8x(PfUDIx9W)aBJ zxvo>vQ1a126T33$h$oj}>b;9D!LKQwYhPJMU$Yf~rU>zZBX18vw0M)l_S40FH?dzS zS1IMmk(xdQ)nod3p(u>)8kCWvjBoP-HWiNa5z)C>BC(Om;hf{-#ae20%eR|!q_QEc z5dgS}QDGiwLN?r0OY8?eFv!#%DU&S%Tf^7!1qmdr6sm32x|dcqX(vQwXj^O$tb1}% z$@pRoYnTe&ESnb5Ey-Ypt}^ATcF_L-I$VF5HvqknL#wLS{{Rep6qVl*W-|{=)qO%V zJ8d`X#aCLJ8vfMkC-Sq*Lw?p+_`3C29UMhwf=gu9h8N)(u-w(pAS7q<<6dmxfh+3x-YQsfXAmIp`}*w{9+0|0&Mv6f%x~A(Xz^3;e6L>KAf;ecVJ!7#>D>kNDja5vCn!tcJHkidC zwx5fY1%Lrd2Kh_4xbwF0H*&V<C0TF~l3V4tf!{6@w2^ zu5;dEVmO`&-U>gd#$eYAlCdmQ{9^uDmrLc-5XRBTBR{y}PyzrWh*7e15jEUiLm30m z3oQ(4Q+;Ddn1y}>H zHPfM#$BMI1r&^phCugFy;frNZNfeJMJly~;siVULKX|Bm_%Z9WeKk8IEOHHtp5P8#jUR3Sp20^Y_J2Qdyp2aes;Hp z{6&I;54!hlRIwV!h2b~aNn2?cR{sF#agCLKGNf@8)4sY};}~kZxVd4uQZY)k3|u7L z?)BEwy{Jk^n-3eP+3p-NFxg;22rWT(B=jt#k$(}pCT5hjAz2ocWep=qGTqwsuopiL=Z>7 z=p{Mium+uG-T_rk)Nq)4ej-VpD!9A>xW(iV#~m53I?J+HDRd3Q080)5hj2Rfd|e;m znBxt#HRP#RkCeQ>m=ZN-RpLsb#Sde$z{mjauDtQCJ?HQ>2ym2QkusO9RLx2`G`~SL zN0A302RB*x0i|zd2isvyoOg(?MII*)PW4I2@~XJ?H=M7s2HAGRv#!Yjd2EdUI$vP+ zvWNm^{-@a+sTx@u8pfRAW#PJc7^ll-Z6q=^$>FCiE)redss2}#N{-Dkw7XtJjq`5f zk5}FnNpaF)^H}>(+O#TI7Hq`47QE4!DmV^+Y#CyK*ST#pfcFc=f5P|-Mk^O)3G%T^ zcIldQuK;;ur7YZxCz~)((ZunAvc$8$We0-1#Aa#NmnI;^)*`$XFG=+?pCu+GeA^T0 z(my%K1VE!@)bk^>X1CBLWbxBq5zo`nG_j*ITJHSVaO=rs;tYkqsIh~seGLmWBf&)S zuzjp#E69WLEAp;3dnj2OXORli{vh#%yq;GxTOY!dGS^E(>EtD)jJecuO5UKLvmPLE zCuD~0ps>C#mnC*N+5tTDv9iNzhnA@WPYg3WXcZ2LDBf5Lt|mdmgRZneYZSMG{dLP8 zASPhIjyUFGe=7|bIPYjUG0;}$)y(nPQ9bUr^7G=i5t;qQy8UVRTC&xK zA}g61_o_i=y{RLLHI_L%rFi5qnK-%N{2}BKf}dC#_twm8?P9hTycWf(wQY;JFr+V; zB1OiHa0hl!N*}j4#?Pm?xARzU4pqg{o}q83s|0b0Yx6~tBe67#$P?=(O=)?z5JtHI z%fMIh^kh#G<8%Y0@~70I(eSM8>u^@q~<^ znWti44qxw9L?T>o6e?4hrL_)PDhah$=VO?PJv-bO(Qn|d8z3n7Bcp&WueDd zqh{U9lFJN#RVb}i7nGK1rDkL0f}jB$Wr-*ePS}iYWGq@~maKLy+4WYE)q>%ErItAK z6Or@)!OH$-3aC7?S(dX4i<=#U{u6U2YS6OKytn0(otqXJJiy5jR*B;Z;SwZJRI?t$ z9=gqYGJ^AK2;w+e_uq=8*n;>Cupa#%!UE+Pu0vg`%yP5+3W(ZI_h^*~-?NW-eEKxLTAd(2B1y@R0ef#L?oa;r7w&t<9FAijK z80f6ekzNY3K@@1^9K60@X?bzt3x#j}$lOC~kkyUE-ppB}naJc;r5I#OZ(bKH_9Kr& z$(6QPj(%SvNDbwc3$Jew>CI3?-rjb!afFw$?sZ-E7d>+^=9%X*wD31-VC=fQvr!c$ zmQKnfh^@fM>hyA-ik9DHE8o9%UK)}(e}_EK(-~Bj&6ThV0<+8nN$5Ku#>+IF{lHV8 z0_sZTDKi=Q?b(5w&ABt`S(j)WhGu2*4nyBZuW|UCp~KLN!m-uKtGj(gfIGqbz|sI2NjmfeamZ~?<<`32Qy=^~HLZ}a z**v7c2e&` zhDhqymRVYJ$znX6Q9}^1#oLz{!5;eooj(+3@;T@(+`W^5A(bViO{JBfpRFhsy)K+PhI7R0 zy|p3+oM%J5y!fP6s@Dv(!6Cf(?_?wbD&8BHFK)D!&`esaLSm_5mrnKxPVTCA5G$hI zpbn7rz6db3e-Y*x{{RivmXCudjJ?~U!Hx>{QWRqIw|s{ ziQkv07%LBEY2T0<;vuD)i6GkcHA?k?a$!UgG*JVmW@4dS@e05b>`z5uJW)bSjl8}; z!*$j<3%%!{bfHH^myjYLO~3dAHQ)Hb25qZva<+V=c)DwTXh5hRi-@lzrzA5c&Rmz8oDwXKxL*Rs~$C@sm$K`X`@S(G;|mu@Q=knE6T z02Ms};+s(6vY8C5Ci-Nu_CA(DLHQYrIW_iq2-RZ^r91E4K zRi&ctCJx+x)B(J0Wi5CrKcuPklw*{%aQSYp42b8-QQ5f0+6P)w=CLP$1aBR@bvz)R z8?euPttFr!X#hr^c_6PRWnx1Z%!Ki96h+*fXtVH#hH{bkn-?SD>R4NMR<+A6=^XI1 z(ky;OXx=d(rVx|Q(p{I9v|E(di6!wB$*|PwVvLuo@LB82SOtx|zTm5N?DL-N+S+@a z0nyQ>`fU&4>P`CRZv>jdDQj3Yw_K%E!Fs{UUd3Z$X>C`h9cmRv5JiDJkr`AL-)*20 zb_0Ps*w|;|tTY(v@!+eNzlLM+@yQg@&WRPoW<`>4%gc~55;&3m0q?$&=F*r!h@iYw6@= zU*>RwlX-+OAvgj%uu!Vab78vNbscEEhz@Nw(v5%Lef3(bXFMZDzn^>PvR3Ydy=t;@ znPSIL)Sh5aip;8f#aRZ5FfJEr15hp|9CU#u%;oY`YEqt*s|`K*<~YBZ$=Iv;jzC2S zr@V{?yATdD9TfDVnskXVLYx&*tv=x}G)pawAYkt_oC*a^@eUNJ*(Y9A=aO@eqS9mN zWNb-#^zUTY$=Z;BvIJfpMMNyB0x4nwF=tcDs%gV1v}S{>o9h~SZktC7U|wKf@U*sO z1CaVkJ|D9A39SoKy?bJHuVyKC6e&D@Tx`lz6B3fDBSFZNAOTV{a@)izW1@x?Ro)VifE-^j2%dH(vHo@I045a)2g@eZ<%XWE2T_+Ic!TXp2Iv}PXsQgqgq}-CWCsI zv+Y=u-Z1!{)?!I+V{5}sh_NDuw;tUh*%MB{X_@3e0QGCu6JQ?gW2v#Zo3CD zYE)X*HpJhLx%&@=TVCa8{4%!j6Tlkvnbn%gPaKvaMj|)mU5ts&MI}@=h-Eu0;Yqnl znJT^+vEkY1SCb`}3kBtX6c@tl%#Ko^Y1oktyMU}23rq6;!W@WSW{LdCe0Bmhb#)pg23 zTaOWKLgq_bF~Z&-Z7i~wmS+*bduNN3hNhuIi4uL}w z0A4H0UB@L1%} zByHqfkPvbp4rKI3i>cG>vq3jJem;DBRQ%N*-qhUBr=kA)9 zvdXbd6u0BL>d!otd6G#8kgHdI0o0Ss5UiwlnJ|>BK^=)>@;;8O$S#H3ToRN~z{ClU?`u)q&!VqEP~Z!)8v*L-fZjj-SKc9Kn4o zHImJ#9=uk$BdM;QQLEjNSk^l(RBJ_}cM9FhH*!>U&!*?K zq(;`h@(k^AJS_-XUrDB*)+$H?sH^tn6v7@3oDkRNg+ZC3?U&hHM zHK14}Knu=PE~P0ViKB=}Se6PGiTwWKrPaec;+p0G8rzKC#`|zW_EawpZkpThZ|seu zMy57gkBTzc+ZdWSr=?|O^Evsz2?%%#9@byTM+^ZG>6nmX1F1j6-yBTY*RL)Xjy>G+c&;hY95GvnOmLOg@mcSx#N z^Ni4#l~GBJJrDxS&9xiSh2HdS{9S*!R%ta`Vv*-z?O}i^IlD=AiJ2%8d zb1V31cL$ZsMD2>UG8L>=fhgH}lZA@Jj}*Owt%Zt1uVE(=b~n`cR#c|n`)o-&&iA*6 zGpCSM7B~RPhh8VaLeJsq{v@|Gd{>7uc=fjuwVMd-$|a0}DmR*p#gSJ)ZB^BfgQnR1 z;lB|z&lufIOtvXqsgD-cI0SOZ9eHPnhU9>d{+41p5=lB6UaNBOi+IYO3d-d0wP<7{ zD$?X_oQ3MU#9WO1z z8Ym~yjokNch0Tm4`8e>lYYSE^)}u~a00gW2xlC;!(LQ13lX3@x9srZY$5>^pc#e)m zY-4fM>%}y)UY|R9FD@t>itU^EfxwL=j4SJG+|o7#x$0_dhKg-4(|b*)b3B*3CsGb! z*o`Og@v>_U8q?DLDdGn>YEF{boH;FGIJlIHgjxqgav-oAoPY;@!`bl|@axBh!O^o9 z@YpA^&T1{3hnN>gsvAZ*W+8_)Le0mDlp5Mhz58Ku^W(TjTNbaRioj{@T6B$?c}F*p zw&W-j77>G>eakGTim`shoP~=vpsA6)PAiWM*M@nP2#|{WkcM(flFA7HE0-!rQw2ir zZ8m>SdP7wjJ9dwF-eWeWF?GJ$UNx|HIe6%Q36L{Bt#4cC6NS(&IvI@ z%)Q7X=B{4x21H`9cd?C1)!O!%s$2Snof17n#Gm*~6|`4F(C_(y2dT_gi9B8Z0H@`* zepQbk>29n|5{Ool$gGf+;p*5dyU|?~ko%32*8P!6OY%{-k<6Ims@_IEOA<^%MHihQ zfF~v@q_=US$%|-`9a^Sf14d(@k57}o!lBhgQ`Y}Yf-VIpD@D7?u0dhSy@y{2Ve^Z8UFwW_(pl4 zr9087^^?}M4pw33PLCiSY_pYSNSOn&kq;z3S`SItKBt9gam)l8^4=$m`tF?UGwX*w zul|h2o&&%kJV9|Wbu0L7V1}{8&}(*Nkdlowtrwt>1dWMzgj+A)Hq z+!-gA0O+zB0w@nHXCgTL@AbLySr0J(gncAZS`fC7Y78i}_bu-%2#W zcWFC+osy$je?}L>Q{LZWxZy1jAH=NS9v1@N9t8P2dIL& z2V`|W6|5zTH1FUho_S$)llhj_fSnu6GALCX83(OfNT53Qb8Ddguj+5;kdK3)%=ym4@Gr zm*lD0*Rj+rY0eD4Y3ry$X|o ztEt~2TiBi;FLHY^@87oN!DFXITQe<779>@i2-ZgO8b0;QuYNqEAwesm&)-@F4604^ zBHtJBNi$XnI$Q7Rl=y=;j>6(jYazSSNcGwUh1m?xVnZ54K^g!?ISqFaj{g9iM@HuV z0E&2qjd-YBx=fj0;dd4}2nfW2C-&cUjnhYZb@gw3gwEsRtCz+lxCH59(Bz?imtTQl zV30uoxUM2rAb`9AjrzLF!OspmGo0~Fi*{m57~Uxln5UXA^IlRn$TLqTm$TfIat^(* z&0}T9QFz$$1C{;uLemlzhHCu}7i6XIY;nU5KK?ziI7GR`MO|y5{!_?7R%VK61E$Wr zz*}T=Pvdr0lzA*BAHo*$R%~T*@MI~;6pIy`R@nnq0HczMsL42eor#o_zACFf1y(GE zZxxTqD!rSsO_W|RrzoUYn~rA=~^r>Qp9jUl?S3Y2@RpWO8{@_^MOe ziPDYH=2b<7Iq6a8#{kp(oshf%9lBr|BsiHrlcKmcw8P~)p1uCad-?2_id!R-%0pG{ zQn_9^(4Hpwo--nu8+?xvjHOtRNjlLdt$q$xio{bvmx?ItP6Sofvb9*;D|0R+B(c&+ z3n^ca2?KpG&8tX52B46{@7tD?tBGUd`M79KC=_f2qhLN-t4_XHk?ouP{?#~Y8c zM)dY9GD{rt%F(*T^yosik#wvy0+rUa*Dj;OJG=3XbmPTCh$fS=ZY}xul*hz0>G+DB zJk&8XZqEiq-fEJhhO?=Kq2vbHgUFHt?Z!ac)`=4(Rs1iD%U8}$zHIfYM9&?D^2I3C zO_r14RL&U!Z(LSZZq2TlcyaC=GUDNe#3@BZTC!ZOW8o1oyXl+!8-$Vum()9ZYfepR z=BZk#cNEu7M*KKxZS=CSp(K^;NgRQdx3&wua`8~Zl8vD~HhXeIG#c@Ne}!Ep8ti^s zG+ualZlhBj4VA57=WK6=A;Fx^MISYnPse3;rN+q|io`q&tIc&Q-C!<4^4X#E!B!$p4D#I@%~U_8YnJG-#jCn}c9d5%P8Qof3A|s3Ch@LY8;#Coag?)I+=N#=uQo$1=n;peQUC^04!LjC8`KYW zJK~{$xraFn{{W7@x;aWGlZ?e%v2wvIWp!7d$@PZ{eBR&`Jd1S09r$@05sp8y!Tt* zC4=z36|s?EGFdi)#85LW2CDb%G>6I}E_!HDnDJKp#1}j4R7|FJw}+=%w&%Z?y<$@; zqVPXZpb%XGjl_kSl}8{+LH6pFm56?n_>RYf4obw53i$fQ)rr=Uln8|uIV0HNB)eis z16tPg)?Oj}G4Sp}N6ch=KjC@eq*Aj-C1<~3m5|6I3buVCld~T%@&LB`fDA^R3iYO= zh;8S~ZKI!$AcG*RKqdzsqvb5X(yNih)A3qW$kfQymcXpGXoQxmatQfpIi4Z>q836+ zcWp1M9;oj4eq3+yUl>xXe9LipZ7qpKbbt~EmaiZPqz$nfW4_w#I%!)ciNWKlHhE@;mltC; z`8Y4>K&S!kLmsGWWtO%MHf$Cmz58)6DwX7|G>BZ4MwMHa1u~KW+v`K551f`ogujMx zmBe>r`go{Ucw?yqZP}l45hDOM#TQGl^VjL_<89JETO@#ASp``ymk}3I5<|u@**kza z5y+jL4fHN)af)YMPc9vAqG)j~0k$7yQ4fi5w<^VE_Btx5K~!3eZp_VTRC-d7@b6 mXyhb`VU>MK0q*gJ9CzKqo0=S5 1 + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_data_dir(path): + """ + check cata_dir + """ + err = "Data path is not exist, please given a right path" \ + "".format(path) + try: + assert os.isdir(path) + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_function_params(config, key): + """ + check specify config + """ + k_config = config.get(key) + assert k_config is not None, \ + ('{} is required in config'.format(key)) + + assert k_config.get('function'), \ + ('function is required {} config'.format(key)) + params = k_config.get('params') + assert params is not None, \ + ('params is required in {} config'.format(key)) + assert isinstance(params, dict), \ + ('the params in {} config should be a dict'.format(key)) diff --git a/ppcls/utils/config.py b/ppcls/utils/config.py new file mode 100644 index 000000000..98812b3c5 --- /dev/null +++ b/ppcls/utils/config.py @@ -0,0 +1,201 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import os +import yaml +from ppcls.utils import check +from ppcls.utils import logger + +__all__ = ['get_config'] + +CONFIG_SECS = ['TRAIN', 'VALID', 'OPTIMIZER', 'LEARNING_RATE'] + + +class AttrDict(dict): + def __getattr__(self, key): + return self[key] + + def __setattr__(self, key, value): + if key in self.__dict__: + self.__dict__[key] = value + else: + self[key] = value + + +def create_attr_dict(yaml_config): + from ast import literal_eval + for key, value in yaml_config.items(): + if type(value) is dict: + yaml_config[key] = value = AttrDict(value) + if isinstance(value, str): + try: + value = literal_eval(value) + except BaseException: + pass + if isinstance(value, AttrDict): + create_attr_dict(yaml_config[key]) + else: + yaml_config[key] = value + return + + +def parse_config(cfg_file): + """Load a config file into AttrDict""" + with open(cfg_file, 'r') as fopen: + yaml_config = AttrDict(yaml.load(fopen, Loader=yaml.FullLoader)) + create_attr_dict(yaml_config) + return yaml_config + + +def print_dict(d, delimiter=0): + """ + Recursively visualize a dict and + indenting acrrording by the relationship of keys. + """ + for k, v in d.items(): + if k in CONFIG_SECS: + logger.info("-" * 60) + + if isinstance(v, dict): + logger.info("{}{} : ".format(delimiter * " ", k)) + print_dict(v, delimiter + 4) + elif isinstance(v, list) and len(v) >= 1 and isinstance(v[0], dict): + logger.info("{}{} : ".format(delimiter * " ", k)) + for value in v: + print_dict(value, delimiter + 4) + else: + logger.info("{}{} : {}".format(delimiter * " ", k, v)) + + if k in CONFIG_SECS: + logger.info("-" * 60) + + +def print_config(config): + """ + visualize configs + + Arguments: + config: configs + """ + + copyright = "PaddleCLS is powered by PaddlePaddle" + ad = "https://github.com/PaddlePaddle/PaddleCLS" + + logger.info("\n" * 2) + logger.info(copyright) + logger.info(ad) + + print_dict(config) + + logger.info("-" * 60) + + +def check_config(config): + """ + Check config + """ + check.check_version() + + mode = config.get('mode', 'train') + check.check_gpu() + + architecture = config.get('architecture') + check.check_architecture(architecture) + + use_mix = config.get('use_mix') + check.check_mix(architecture, use_mix) + + classes_num = config.get('classes_num') + check.check_classes_num(classes_num) + + if mode.lower() == 'train': + check.check_function_params(config, 'LEARNING_RATE') + check.check_function_params(config, 'OPTIMIZER') + + +def override(dl, ks, v): + """ + Recursively replace dict of list + + Args: + dl(dict or list): dict or list to be replaced + ks(list): list of keys + v(str): value to be replaced + """ + + def str2num(v): + try: + return eval(v) + except Exception: + return v + + assert isinstance(dl, (list, dict)), ("{} should be a list or a dict") + assert len(ks) > 0, ('lenght of keys should larger than 0') + if isinstance(dl, list): + k = str2num(ks[0]) + if len(ks) == 1: + assert k < len(dl), ('index({}) out of range({})'.format(k, dl)) + dl[k] = str2num(v) + else: + override(dl[k], ks[1:], v) + else: + if len(ks) == 1: + assert ks[0] in dl, ('{} is not exist in {}'.format(ks[0], dl)) + dl[ks[0]] = str2num(v) + else: + override(dl[ks[0]], ks[1:], v) + + +def override_config(config, options=[]): + """ + Recursively override the config + + Args: + config(dict): dict to be replaced + options(list): list of pairs(key0.key1.idx.key2=value) + such as: [ + 'topk=2', + 'VALID.transforms.1.ResizeImage.resize_short=300' + ] + + Returns: + config(dict): replaced config + """ + for opt in options: + assert isinstance(opt, str), \ + ("option({}) should be a str".format(opt)) + assert "=" in opt, ("option({}) should contain " \ + "a = to distinguish between key and value".format(opt)) + pair = opt.split('=') + assert len(pair) == 2, ("there can be only a = in the option") + key, value = pair + keys = key.split('.') + override(config, keys, value) + + return config + + +def get_config(fname, overrides=[], show=True): + """ + Read config from file + """ + assert os.path.exists(fname), \ + ('config file({}) is not exist'.format(fname)) + config = parse_config(fname) + if show: print_config(config) + if len(overrides) > 0: + override_config(config, overrides) + print_config(config) + check_config(config) + return config diff --git a/ppcls/utils/environment.py b/ppcls/utils/environment.py new file mode 100644 index 000000000..e6061ecea --- /dev/null +++ b/ppcls/utils/environment.py @@ -0,0 +1,39 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import os +import paddle +import paddle.fluid as fluid +import paddle.fluid.framework as pff + +trainers_num = int(os.environ.get('PADDLE_TRAINERS_NUM', 1)) +trainer_id = int(os.environ.get("PADDLE_TRAINER_ID", 0)) + + +def place(): + gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0)) + return fluid.CUDAPlace(gpu_id) + + +def places(): + """ + Returns available running places, the numbers are usually + indicated by 'export CUDA_VISIBLE_DEVICES= ' + Args: + """ + + if trainers_num <= 1: + return pff.cuda_places() + else: + return place() diff --git a/ppcls/utils/misc.py b/ppcls/utils/misc.py new file mode 100644 index 000000000..5cffa1f67 --- /dev/null +++ b/ppcls/utils/misc.py @@ -0,0 +1,47 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +__all__ = ['AverageMeter'] + + +class AverageMeter(object): + """ + Computes and stores the average and current value + """ + + def __init__(self, name='', fmt=':f', avg=False): + self.name = name + self.fmt = fmt + self.avg_flag = avg + self.reset() + + def reset(self): + """ reset """ + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + """ update """ + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '[{name}: {val' + self.fmt + '}]' + if self.avg_flag: + fmtstr += '[{name}(avg): {avg' + self.fmt + '}]' + return fmtstr.format(**self.__dict__) diff --git a/ppcls/utils/model_zoo.py b/ppcls/utils/model_zoo.py new file mode 100644 index 000000000..543b2558e --- /dev/null +++ b/ppcls/utils/model_zoo.py @@ -0,0 +1,179 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import shutil +import requests +import tqdm +import tarfile +import zipfile + +from ppcls.utils.check import check_architecture +from ppcls.utils import logger + +__all__ = ['get'] + +DOWNLOAD_RETRY_LIMIT = 3 + + +class UrlError(Exception): + """ UrlError + """ + + def __init__(self, url='', code=''): + message = "Downloading from {} failed with code {}!".format(url, code) + super(UrlError, self).__init__(message) + + +class ModelNameError(Exception): + """ ModelNameError + """ + + def __init__(self, message='', architecture=''): + similar_names = similar_architectures(architecture) + model_list = ', '.join(similar_names) + message += '\n{} is not exist. \nMaybe you want: [{}]'.format( + architecture, model_list) + super(ModelNameError, self).__init__(message) + + +class RetryError(Exception): + """ RetryError + """ + + def __init__(self, url='', times=''): + message = "Download from {} failed. Retry({}) limit reached".format( + url, times) + super(RetryError, self).__init__(message) + + +def _get_url(architecture): + prefix = "https://paddle-imagenet-models-name.bj.bcebos.com/" + fname = architecture + "_pretrained.tar" + return prefix + fname + + +def _move_and_merge_tree(src, dst): + """ + Move src directory to dst, if dst is already exists, + merge src to dst + """ + if not os.path.exists(dst): + shutil.move(src, dst) + elif os.path.isfile(src): + shutil.move(src, dst) + else: + for fp in os.listdir(src): + src_fp = os.path.join(src, fp) + dst_fp = os.path.join(dst, fp) + if os.path.isdir(src_fp): + if os.path.isdir(dst_fp): + _move_and_merge_tree(src_fp, dst_fp) + else: + shutil.move(src_fp, dst_fp) + elif os.path.isfile(src_fp) and \ + not os.path.isfile(dst_fp): + shutil.move(src_fp, dst_fp) + + +def _download(url, path): + """ + Download from url, save to path. + url (str): download url + path (str): download to given path + """ + if not os.path.exists(path): + os.makedirs(path) + + fname = os.path.split(url)[-1] + fullname = os.path.join(path, fname) + retry_cnt = 0 + + while not os.path.exists(fullname): + if retry_cnt < DOWNLOAD_RETRY_LIMIT: + retry_cnt += 1 + else: + raise RetryError(url, DOWNLOAD_RETRY_LIMIT) + + logger.info("Downloading {} from {}".format(fname, url)) + + req = requests.get(url, stream=True) + if req.status_code != 200: + raise UrlError(url, req.status_code) + + # For protecting download interupted, download to + # tmp_fullname firstly, move tmp_fullname to fullname + # after download finished + tmp_fullname = fullname + "_tmp" + total_size = req.headers.get('content-length') + with open(tmp_fullname, 'wb') as f: + if total_size: + for chunk in tqdm.tqdm( + req.iter_content(chunk_size=1024), + total=(int(total_size) + 1023) // 1024, + unit='KB'): + f.write(chunk) + else: + for chunk in req.iter_content(chunk_size=1024): + if chunk: + f.write(chunk) + shutil.move(tmp_fullname, fullname) + + return fullname + + +def _decompress(fname): + """ + Decompress for zip and tar file + """ + logger.info("Decompressing {}...".format(fname)) + + # For protecting decompressing interupted, + # decompress to fpath_tmp directory firstly, if decompress + # successed, move decompress files to fpath and delete + # fpath_tmp and remove download compress file. + fpath = os.path.split(fname)[0] + fpath_tmp = os.path.join(fpath, 'tmp') + if os.path.isdir(fpath_tmp): + shutil.rmtree(fpath_tmp) + os.makedirs(fpath_tmp) + + if fname.find('tar') >= 0: + with tarfile.open(fname) as tf: + tf.extractall(path=fpath_tmp) + elif fname.find('zip') >= 0: + with zipfile.ZipFile(fname) as zf: + zf.extractall(path=fpath_tmp) + else: + raise TypeError("Unsupport compress file type {}".format(fname)) + + for f in os.listdir(fpath_tmp): + src_dir = os.path.join(fpath_tmp, f) + dst_dir = os.path.join(fpath, f) + _move_and_merge_tree(src_dir, dst_dir) + + shutil.rmtree(fpath_tmp) + os.remove(fname) + + +def get(architecture, path, decompress=True): + check_architecture(architecture) + url = _get_url(architecture) + fname = _download(url, path) + if decompress: _decompress(fname) + logger.info("download {} finished ".format(fname)) diff --git a/ppcls/utils/save_load.py b/ppcls/utils/save_load.py new file mode 100644 index 000000000..ac2593936 --- /dev/null +++ b/ppcls/utils/save_load.py @@ -0,0 +1,124 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tempfile +import shutil + +import paddle +import paddle.fluid as fluid + +from ppcls.utils import logger + +__all__ = ['init_model', 'save_model'] + + +def _mkdir_if_not_exist(path): + """ + mkdir if not exists + """ + if not os.path.exists(os.path.join(path)): + os.makedirs(os.path.join(path)) + + +def _load_state(path): + print("path: ", path) + if os.path.exists(path + '.pdopt'): + # XXX another hack to ignore the optimizer state + tmp = tempfile.mkdtemp() + dst = os.path.join(tmp, os.path.basename(os.path.normpath(path))) + shutil.copy(path + '.pdparams', dst + '.pdparams') + state = fluid.io.load_program_state(dst) + shutil.rmtree(tmp) + else: + print("path: ", path) + state = fluid.io.load_program_state(path) + return state + + +def load_params(exe, prog, path, ignore_params=[]): + """ + Load model from the given path. + Args: + exe (fluid.Executor): The fluid.Executor object. + prog (fluid.Program): load weight to which Program object. + path (string): URL string or loca model path. + ignore_params (list): ignore variable to load when finetuning. + It can be specified by finetune_exclude_pretrained_params + and the usage can refer to docs/advanced_tutorials/TRANSFER_LEARNING.md + """ + if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')): + raise ValueError("Model pretrain path {} does not " + "exists.".format(path)) + + logger.info('Loading parameters from {}...'.format(path)) + + ignore_set = set() + state = _load_state(path) + + # ignore the parameter which mismatch the shape + # between the model and pretrain weight. + all_var_shape = {} + for block in prog.blocks: + for param in block.all_parameters(): + all_var_shape[param.name] = param.shape + ignore_set.update([ + name for name, shape in all_var_shape.items() + if name in state and shape != state[name].shape + ]) + + if ignore_params: + all_var_names = [var.name for var in prog.list_vars()] + ignore_list = filter( + lambda var: any([re.match(name, var) for name in ignore_params]), + all_var_names) + ignore_set.update(list(ignore_list)) + + if len(ignore_set) > 0: + for k in ignore_set: + if k in state: + logger.warning('variable {} not used'.format(k)) + del state[k] + fluid.io.set_program_state(prog, state) + + +def init_model(config, program, exe): + """ + load model from checkpoint or pretrained_model + """ + checkpoints = config.get('checkpoints') + if checkpoints and os.path.exists(checkpoints): + fluid.load(program, checkpoints, exe) + logger.info("Finish initing model from {}".format(checkpoints)) + return + + pretrained_model = config.get('pretrained_model') + if pretrained_model and os.path.exists(pretrained_model): + load_params(exe, program, pretrained_model) + logger.info("Finish initing model from {}".format(pretrained_model)) + + +def save_model(program, model_path, epoch_id, prefix='ppcls'): + """ + save model to the target path + """ + model_path = os.path.join(model_path, str(epoch_id)) + _mkdir_if_not_exist(model_path) + model_prefix = os.path.join(model_path, prefix) + fluid.save(program, model_prefix) + logger.info("Already save model in {}".format(model_path)) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..13ad95e59 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +opencv-python +pillow +tqdm +PyYAML diff --git a/tools/download.py b/tools/download.py new file mode 100644 index 000000000..ab88fb44f --- /dev/null +++ b/tools/download.py @@ -0,0 +1,41 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import sys +import argparse + +sys.path.append("../") +from ppcls import model_zoo + + +def parse_args(): + def str2bool(v): + return v.lower() in ("true", "t", "1") + + parser = argparse.ArgumentParser() + parser.add_argument('-a', '--architecture', type=str, default='ResNet50') + parser.add_argument('-p', '--path', type=str, default='./pretrained/') + parser.add_argument('-d', '--decompress', type=str2bool, default=True) + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + model_zoo.get(args.architecture, args.path, args.decompress) + + +if __name__ == '__main__': + main() diff --git a/tools/eval.py b/tools/eval.py new file mode 100644 index 000000000..9fae3044c --- /dev/null +++ b/tools/eval.py @@ -0,0 +1,84 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +import argparse + +import paddle +import paddle.fluid as fluid + +import program + +from ppcls.data import Reader +import ppcls.utils.environment as env +from ppcls.utils.config import get_config +from ppcls.utils.save_load import init_model, save_model +from ppcls.utils import logger + +from paddle.fluid.incubate.fleet.collective import fleet +from paddle.fluid.incubate.fleet.base import role_maker + + +def parse_args(): + parser = argparse.ArgumentParser("PaddleClas eval script") + parser.add_argument( + '-c', + '--config', + type=str, + default='configs/eval.yaml', + help='config file path') + parser.add_argument( + '-o', + '--override', + action='append', + default=[], + help='config options to be overridden') + + args = parser.parse_args() + return args + + +def main(args): + role = role_maker.PaddleCloudRoleMaker(is_collective=True) + fleet.init(role) + + config = get_config(args.config, overrides=args.override, show=True) + place = env.place() + + startup_prog = fluid.Program() + valid_prog = fluid.Program() + valid_dataloader, valid_fetchs = program.build( + config, valid_prog, startup_prog, is_train=False) + valid_prog = valid_prog.clone(for_test=True) + + exe = fluid.Executor(place) + exe.run(startup_prog) + + init_model(config, valid_prog, exe) + + valid_reader = Reader(config, 'valid')() + valid_dataloader.set_sample_list_generator(valid_reader, place) + compiled_valid_prog = program.compile(config, valid_prog) + + program.run(valid_dataloader, exe, compiled_valid_prog, valid_fetchs, 0, + 'valid') + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/tools/export_model.py b/tools/export_model.py new file mode 100644 index 000000000..e2db8b184 --- /dev/null +++ b/tools/export_model.py @@ -0,0 +1,77 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import numpy as np + +from ppcls.modeling import architectures +import paddle.fluid as fluid + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("-m", "--model", type=str) + parser.add_argument("-p", "--pretrained_model", type=str) + parser.add_argument("-o", "--output_path", type=str) + + return parser.parse_args() + + +def create_input(): + image = fluid.data( + name='image', shape=[None, 3, 224, 224], dtype='float32') + return image + + +def create_model(args, model, input, class_dim=1000): + if args.model == "GoogLeNet": + out, _, _ = model.net(input=input, class_dim=class_dim) + else: + out = model.net(input=input, class_dim=class_dim) + out = fluid.layers.softmax(out) + return out + + +def main(): + args = parse_args() + + model = architectures.__dict__[args.model]() + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + startup_prog = fluid.Program() + infer_prog = fluid.Program() + + with fluid.program_guard(infer_prog, startup_prog): + with fluid.unique_name.guard(): + image = create_input() + out = create_model(args, model, image) + + infer_prog = infer_prog.clone(for_test=True) + fluid.load( + program=infer_prog, model_path=args.pretrained_model, executor=exe) + + fluid.io.save_inference_model( + dirname=args.output_path, + feeded_var_names=[image.name], + main_program=infer_prog, + target_vars=out, + executor=exe, + model_filename='model', + params_filename='params') + + +if __name__ == "__main__": + main() diff --git a/tools/infer/cpp_infer.py b/tools/infer/cpp_infer.py new file mode 100644 index 000000000..665dfa162 --- /dev/null +++ b/tools/infer/cpp_infer.py @@ -0,0 +1,103 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import utils +import argparse +import numpy as np + +from paddle.fluid.core import PaddleTensor +from paddle.fluid.core import AnalysisConfig +from paddle.fluid.core import create_paddle_predictor + + +def parse_args(): + def str2bool(v): + return v.lower() in ("true", "t", "1") + + parser = argparse.ArgumentParser() + parser.add_argument("-i", "--image_file", type=str) + parser.add_argument("-m", "--model_file", type=str) + parser.add_argument("-p", "--params_file", type=str) + parser.add_argument("-b", "--max_batch_size", type=int, default=1) + parser.add_argument("--use_gpu", type=str2bool, default=True) + parser.add_argument("--ir_optim", type=str2bool, default=True) + parser.add_argument("--use_tensorrt", type=str2bool, default=False) + + return parser.parse_args() + + +def create_predictor(args): + config = AnalysisConfig(args.model_file, args.params_file) + if args.use_gpu: + config.enable_use_gpu(1000, 0) + else: + config.disable_gpu() + + config.switch_ir_optim(args.ir_optim) # default true + if args.use_tensorrt: + config.enable_tensorrt_engine( + precision_mode=AnalysisConfig.Precision.Float32, + max_batch_size=args.max_batch_size) + predictor = create_paddle_predictor(config) + + return predictor + + +def create_operators(): + size = 224 + img_mean = [0.485, 0.456, 0.406] + img_std = [0.229, 0.224, 0.225] + img_scale = 1.0 / 255.0 + + decode_op = utils.DecodeImage() + resize_op = utils.ResizeImage(resize_short=256) + crop_op = utils.CropImage(size=(size, size)) + normalize_op = utils.NormalizeImage( + scale=img_scale, mean=img_mean, std=img_std) + totensor_op = utils.ToTensor() + + return [decode_op, resize_op, crop_op, normalize_op, totensor_op] + + +def preprocess(fname, ops): + data = open(fname).read() + for op in ops: + data = op(data) + + return data + + +def postprocess(outputs, topk=5): + output = outputs[0] + prob = output.as_ndarray().flatten() + index = prob.argsort(axis=0)[-topk:][::-1].astype('int32') + return zip(index, prob[index]) + + +def main(): + args = parse_args() + operators = create_operators() + predictor = create_predictor(args) + + data = preprocess(args.image_file, operators) + inputs = [PaddleTensor(data.copy())] + outputs = predictor.run(inputs) + probs = postprocess(outputs) + + for idx, prob in probs: + print("class id: {:d}, probability: {:.4f}".format(idx, prob)) + + +if __name__ == "__main__": + main() diff --git a/tools/infer/infer.py b/tools/infer/infer.py new file mode 100644 index 000000000..9a3b8fff9 --- /dev/null +++ b/tools/infer/infer.py @@ -0,0 +1,119 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import utils +import argparse +import numpy as np + +import paddle.fluid as fluid + +from ppcls.modeling import architectures + + +def parse_args(): + def str2bool(v): + return v.lower() in ("true", "t", "1") + + parser = argparse.ArgumentParser() + parser.add_argument("-i", "--image_file", type=str) + parser.add_argument("-m", "--model", type=str) + parser.add_argument("-p", "--pretrained_model", type=str) + parser.add_argument("--use_gpu", type=str2bool, default=True) + + return parser.parse_args() + + +def create_predictor(args): + def create_input(): + image = fluid.data( + name='image', shape=[None, 3, 224, 224], dtype='float32') + return image + + def create_model(args, model, input, class_dim=1000): + if args.model == "GoogLeNet": + out, _, _ = model.net(input=input, class_dim=class_dim) + else: + out = model.net(input=input, class_dim=class_dim) + out = fluid.layers.softmax(out) + return out + + model = architectures.__dict__[args.model]() + + place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() + exe = fluid.Executor(place) + + startup_prog = fluid.Program() + infer_prog = fluid.Program() + + with fluid.program_guard(infer_prog, startup_prog): + with fluid.unique_name.guard(): + image = create_input() + out = create_model(args, model, image) + + infer_prog = infer_prog.clone(for_test=True) + fluid.load( + program=infer_prog, model_path=args.pretrained_model, executor=exe) + + return exe, infer_prog, [image.name], [out.name] + + +def create_operators(): + size = 224 + img_mean = [0.485, 0.456, 0.406] + img_std = [0.229, 0.224, 0.225] + img_scale = 1.0 / 255.0 + + decode_op = utils.DecodeImage() + resize_op = utils.ResizeImage(resize_short=256) + crop_op = utils.CropImage(size=(size, size)) + normalize_op = utils.NormalizeImage( + scale=img_scale, mean=img_mean, std=img_std) + totensor_op = utils.ToTensor() + + return [decode_op, resize_op, crop_op, normalize_op, totensor_op] + + +def preprocess(fname, ops): + data = open(fname).read() + for op in ops: + data = op(data) + + return data + + +def postprocess(outputs, topk=5): + output = outputs[0] + prob = np.array(output).flatten() + index = prob.argsort(axis=0)[-topk:][::-1].astype('int32') + return zip(index, prob[index]) + + +def main(): + args = parse_args() + operators = create_operators() + exe, program, feed_names, fetch_names = create_predictor(args) + + data = preprocess(args.image_file, operators) + outputs = exe.run(program, + feed={feed_names[0]: data}, + fetch_list=fetch_names, + return_numpy=False) + probs = postprocess(outputs) + + for idx, prob in probs: + print("class id: {:d}, probability: {:.4f}".format(idx, prob)) + + +if __name__ == "__main__": + main() diff --git a/tools/infer/py_infer.py b/tools/infer/py_infer.py new file mode 100644 index 000000000..b566092bc --- /dev/null +++ b/tools/infer/py_infer.py @@ -0,0 +1,101 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import utils +import argparse +import numpy as np + +import paddle.fluid as fluid + + +def parse_args(): + def str2bool(v): + return v.lower() in ("true", "t", "1") + + parser = argparse.ArgumentParser() + parser.add_argument("-i", "--image_file", type=str) + parser.add_argument("-d", "--model_dir", type=str) + parser.add_argument("-m", "--model_file", type=str) + parser.add_argument("-p", "--params_file", type=str) + parser.add_argument("--use_gpu", type=str2bool, default=True) + + return parser.parse_args() + + +def create_predictor(args): + if args.use_gpu: + place = fluid.CUDAPlace(0) + else: + place = fluid.CPUPlace() + + exe = fluid.Executor(place) + [program, feed_names, fetch_lists] = fluid.io.load_inference_model( + args.model_dir, + exe, + model_filename=args.model_file, + params_filename=args.params_file) + compiled_program = fluid.compiler.CompiledProgram(program) + + return exe, compiled_program, feed_names, fetch_lists + + +def create_operators(): + size = 224 + img_mean = [0.485, 0.456, 0.406] + img_std = [0.229, 0.224, 0.225] + img_scale = 1.0 / 255.0 + + decode_op = utils.DecodeImage() + resize_op = utils.ResizeImage(resize_short=256) + crop_op = utils.CropImage(size=(size, size)) + normalize_op = utils.NormalizeImage( + scale=img_scale, mean=img_mean, std=img_std) + totensor_op = utils.ToTensor() + + return [decode_op, resize_op, crop_op, normalize_op, totensor_op] + + +def preprocess(fname, ops): + data = open(fname).read() + for op in ops: + data = op(data) + + return data + + +def postprocess(outputs, topk=5): + output = outputs[0] + prob = np.array(output).flatten() + index = prob.argsort(axis=0)[-topk:][::-1].astype('int32') + return zip(index, prob[index]) + + +def main(): + args = parse_args() + operators = create_operators() + exe, program, feed_names, fetch_lists = create_predictor(args) + + data = preprocess(args.image_file, operators) + outputs = exe.run(program, + feed={feed_names[0]: data}, + fetch_list=fetch_lists, + return_numpy=False) + probs = postprocess(outputs) + + for idx, prob in probs: + print("class id: {:d}, probability: {:.4f}".format(idx, prob)) + + +if __name__ == "__main__": + main() diff --git a/tools/infer/run.sh b/tools/infer/run.sh new file mode 100644 index 000000000..abed02cda --- /dev/null +++ b/tools/infer/run.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +python ./cpp_infer.py \ + -i=./test.jpeg \ + -m=./resnet50-vd/model \ + -p=./resnet50-vd/params \ + --use_gpu=1 + +python ./cpp_infer.py \ + -i=./test.jpeg \ + -m=./resnet50-vd/model \ + -p=./resnet50-vd/params \ + --use_gpu=0 + +python py_infer.py \ + -i=./test.jpeg \ + -d ./resnet50-vd/ \ + -m=model -p=params \ + --use_gpu=0 + +python py_infer.py \ + -i=./test.jpeg \ + -d ./resnet50-vd/ \ + -m=model -p=params \ + --use_gpu=1 + +python infer.py \ + -i=./test.jpeg \ + -m ResNet50_vd \ + -p ./resnet50-vd-persistable/ \ + --use_gpu=0 + +python infer.py \ + -i=./test.jpeg \ + -m ResNet50_vd \ + -p ./resnet50-vd-persistable/ \ + --use_gpu=1 + +python export_model.py \ + -m ResNet50_vd \ + -p ./resnet50-vd-persistable/ \ + -o ./test/ + +python py_infer.py \ + -i=./test.jpeg \ + -d ./test/ \ + -m=model \ + -p=params \ + --use_gpu=0 diff --git a/tools/infer/utils.py b/tools/infer/utils.py new file mode 100644 index 000000000..2e65c3f30 --- /dev/null +++ b/tools/infer/utils.py @@ -0,0 +1,85 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import cv2 +import numpy as np + + +class DecodeImage(object): + def __init__(self, to_rgb=True): + self.to_rgb = to_rgb + + def __call__(self, img): + data = np.frombuffer(img, dtype='uint8') + img = cv2.imdecode(data, 1) + if self.to_rgb: + assert img.shape[2] == 3, 'invalid shape of image[%s]' % ( + img.shape) + img = img[:, :, ::-1] + + return img + + +class ResizeImage(object): + def __init__(self, resize_short=None): + self.resize_short = resize_short + + def __call__(self, img): + img_h, img_w = img.shape[:2] + percent = float(self.resize_short) / min(img_w, img_h) + w = int(round(img_w * percent)) + h = int(round(img_h * percent)) + return cv2.resize(img, (w, h)) + + +class CropImage(object): + def __init__(self, size): + if type(size) is int: + self.size = (size, size) + else: + self.size = size + + def __call__(self, img): + w, h = self.size + img_h, img_w = img.shape[:2] + w_start = (img_w - w) // 2 + h_start = (img_h - h) // 2 + + w_end = w_start + w + h_end = h_start + h + return img[h_start:h_end, w_start:w_end, :] + + +class NormalizeImage(object): + def __init__(self, scale=None, mean=None, std=None): + self.scale = np.float32(scale if scale is not None else 1.0 / 255.0) + mean = mean if mean is not None else [0.485, 0.456, 0.406] + std = std if std is not None else [0.229, 0.224, 0.225] + + shape = (1, 1, 3) + self.mean = np.array(mean).reshape(shape).astype('float32') + self.std = np.array(std).reshape(shape).astype('float32') + + def __call__(self, img): + return (img.astype('float32') * self.scale - self.mean) / self.std + + +class ToTensor(object): + def __init__(self): + pass + + def __call__(self, img): + img = img.transpose((2, 0, 1)) + img = np.expand_dims(img, axis=0) + return img diff --git a/tools/program.py b/tools/program.py new file mode 100644 index 000000000..7d799a204 --- /dev/null +++ b/tools/program.py @@ -0,0 +1,370 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys +import time + +from collections import OrderedDict + +import paddle +import paddle.fluid as fluid + +from ppcls.optimizer import LearningRateBuilder +from ppcls.optimizer import OptimizerBuilder + +from ppcls.modeling import architectures +from ppcls.modeling.loss import CELoss +from ppcls.modeling.loss import MixCELoss +from ppcls.modeling.loss import GoogLeNetLoss +from ppcls.utils.misc import AverageMeter +from ppcls.utils import logger + +from paddle.fluid.incubate.fleet.collective import fleet +from paddle.fluid.incubate.fleet.collective import DistributedStrategy + + +def create_feeds(image_shape, mix=None): + """ + Create feeds as model input + + Args: + image_shape(list[int]): model input shape, such as [3, 224, 224] + mix(bool): whether to use mix(include mixup, cutmix, fmix) + + Returns: + feeds(dict): dict of model input variables + """ + feeds = OrderedDict() + feeds['image'] = fluid.data( + name="feed_image", shape=[None] + image_shape, dtype="float32") + if mix: + feeds['feed_y_a'] = fluid.data( + name="feed_y_a", shape=[None, 1], dtype="int64") + feeds['feed_y_b'] = fluid.data( + name="feed_y_b", shape=[None, 1], dtype="int64") + feeds['feed_lam'] = fluid.data( + name="feed_lam", shape=[None, 1], dtype="float32") + else: + feeds['label'] = fluid.data( + name="feed_label", shape=[None, 1], dtype="int64") + + return feeds + + +def create_dataloader(feeds): + """ + Create a dataloader with model input variables + + Args: + feeds(dict): dict of model input variables + + Returns: + dataloader(fluid dataloader): + """ + trainer_num = int(os.environ.get('PADDLE_TRAINERS_NUM', 1)) + capacity = 64 if trainer_num <= 1 else 8 + dataloader = fluid.io.DataLoader.from_generator( + feed_list=feeds, + capacity=capacity, + use_double_buffer=True, + iterable=True) + + return dataloader + + +def create_model(name, image, classes_num): + """ + Create a model + + Args: + name(str): model name, such as ResNet50 + image(variable): model input variable + classes_num(int): num of classes + + Returns: + out(variable): model output variable + """ + model = architectures.__dict__[name]() + out = model.net(input=image, class_dim=classes_num) + return out + + +def create_loss(out, + feeds, + architecture, + classes_num=1000, + epsilon=None, + mix=False): + """ + Create a loss for optimization, such as: + 1. CrossEnotry loss + 2. CrossEnotry loss with label smoothing + 3. CrossEnotry loss with mix(mixup, cutmix, fmix) + 4. CrossEnotry loss with label smoothing and (mixup, cutmix, fmix) + 5. GoogLeNet loss + + Args: + out(variable): model output variable + feeds(dict): dict of model input variables + architecture(str): model name, such as ResNet50 + classes_num(int): num of classes + epsilon(float): parameter for label smoothing, 0.0 <= epsilon <= 1.0 + mix(bool): whether to use mix(include mixup, cutmix, fmix) + + Returns: + loss(variable): loss variable + """ + if architecture == "GoogLeNet": + assert len(out) == 3, "GoogLeNet should have 3 outputs" + loss = GoogLeNetLoss(class_dim=classes_num, epsilon=epsilon) + target = feeds['label'] + return loss(out[0], out[1], out[2], target) + + if mix: + loss = MixCELoss(class_dim=classes_num, epsilon=epsilon) + feed_y_a = feeds['feed_y_a'] + feed_y_b = feeds['feed_y_b'] + feed_lam = feeds['feed_lam'] + return loss(out, feed_y_a, feed_y_b, feed_lam) + else: + loss = CELoss(class_dim=classes_num, epsilon=epsilon) + target = feeds['label'] + return loss(out, target) + + +def create_metric(out, feeds, topk=5, classes_num=1000): + """ + Create measures of model accuracy, such as top1 and top5 + + Args: + out(variable): model output variable + feeds(dict): dict of model input variables(included label) + topk(int): usually top5 + classes_num(int): num of classes + + Returns: + fetchs(dict): dict of measures + """ + fetchs = OrderedDict() + label = feeds['label'] + softmax_out = fluid.layers.softmax(out, use_cudnn=False) + top1 = fluid.layers.accuracy(softmax_out, label=label, k=1) + fetchs['top1'] = (top1, AverageMeter('top1', ':2.4f', True)) + k = min(topk, classes_num) + topk = fluid.layers.accuracy(softmax_out, label=label, k=k) + topk_name = 'top{}'.format(k) + fetchs[topk_name] = (topk, AverageMeter(topk_name, ':2.4f', True)) + + return fetchs + + +def create_fetchs(out, + feeds, + architecture, + topk=5, + classes_num=1000, + epsilon=None, + mix=False): + """ + Create fetchs as model outputs(included loss and measures), + will call create_loss and create_metric(if mix). + + Args: + out(variable): model output variable + feeds(dict): dict of model input variables(included label) + architecture(str): model name, such as ResNet50 + topk(int): usually top5 + classes_num(int): num of classes + epsilon(float): parameter for label smoothing, 0.0 <= epsilon <= 1.0 + mix(bool): whether to use mix(include mixup, cutmix, fmix) + + Returns: + fetchs(dict): dict of model outputs(included loss and measures) + """ + fetchs = OrderedDict() + loss = create_loss(out, feeds, architecture, classes_num, epsilon, mix) + fetchs['loss'] = (loss, AverageMeter('loss', ':2.4f', True)) + if not mix: + metric = create_metric(out, feeds, topk, classes_num) + fetchs.update(metric) + + return fetchs + + +def create_optimizer(config): + """ + Create an optimizer using config, usually including + learning rate and regularization. + + Args: + config(dict): such as + { + 'LEARNING_RATE': + {'function': 'Cosine', + 'params': {'lr': 0.1} + }, + 'OPTIMIZER': + {'function': 'Momentum', + 'params':{'momentum': 0.9}, + 'regularizer': + {'function': 'L2', 'factor': 0.0001} + } + } + + Returns: + an optimizer instance + """ + # create learning_rate instance + lr_config = config['LEARNING_RATE'] + lr_config['params'].update({ + 'epochs': config['epochs'], + 'step_each_epoch': + config['total_images'] // config['TRAIN']['batch_size'], + }) + lr = LearningRateBuilder(**lr_config)() + + # create optimizer instance + opt_config = config['OPTIMIZER'] + opt = OptimizerBuilder(**opt_config) + return opt(lr) + + +def dist_optimizer(config, optimizer): + """ + Create a distributed optimizer based on a normal optimizer + + Args: + config(dict): + optimizer(): a normal optimizer + + Returns: + optimizer: a distributed optimizer + """ + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.num_threads = 3 + exec_strategy.num_iteration_per_drop_scope = 10 + + dist_strategy = DistributedStrategy() + dist_strategy.nccl_comm_num = 1 + dist_strategy.fuse_all_reduce_ops = True + dist_strategy.exec_strategy = exec_strategy + optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy) + + return optimizer + + +def build(config, main_prog, startup_prog, is_train=True): + """ + Build a program using a model and an optimizer + 1. create feeds + 2. create a dataloader + 3. create a model + 4. create fetchs + 5. create an optimizer + + Args: + config(dict): config + main_prog(): main program + startup_prog(): startup program + is_train(bool): train or valid + + Returns: + dataloader(): a bridge between the model and the data + fetchs(dict): dict of model outputs(included loss and measures) + """ + with fluid.program_guard(main_prog, startup_prog): + with fluid.unique_name.guard(): + use_mix = config.get('use_mix') and is_train + feeds = create_feeds(config.image_shape, mix=use_mix) + dataloader = create_dataloader(feeds.values()) + out = create_model(config.architecture, feeds['image'], + config.classes_num) + fetchs = create_fetchs( + out, + feeds, + config.architecture, + config.topk, + config.classes_num, + epsilon=config.get('ls_epsilon'), + mix=use_mix) + if is_train: + optimizer = create_optimizer(config) + lr = optimizer._global_learning_rate() + fetchs['lr'] = (lr, AverageMeter('lr', ':f', False)) + optimizer = dist_optimizer(config, optimizer) + optimizer.minimize(fetchs['loss'][0]) + + return dataloader, fetchs + + +def compile(config, program, loss_name=None): + """ + Compile the program + + Args: + config(dict): config + program(): the program which is wrapped by + loss_name(str): loss name + + Returns: + compiled_program(): a compiled program + """ + build_strategy = fluid.compiler.BuildStrategy() + #build_strategy.fuse_bn_act_ops = config.get("fuse_bn_act_ops") + #build_strategy.fuse_elewise_add_act_ops = config.get("fuse_elewise_add_act_ops") + exec_strategy = fluid.ExecutionStrategy() + + exec_strategy.num_threads = 1 + exec_strategy.num_iteration_per_drop_scope = 10 + + compiled_program = fluid.CompiledProgram(program).with_data_parallel( + loss_name=loss_name, + build_strategy=build_strategy, + exec_strategy=exec_strategy) + + return compiled_program + + +def run(dataloader, exe, program, fetchs, epoch=0, mode='train'): + """ + Feed data to the model and fetch the measures and loss + + Args: + dataloader(fluid dataloader): + exe(): + program(): + fetchs(dict): dict of measures and the loss + epoch(int): epoch of training or validation + model(str): log only + + Returns: + """ + fetch_list = [f[0] for f in fetchs.values()] + metric_list = [f[1] for f in fetchs.values()] + batch_time = AverageMeter('cost', ':6.3f') + tic = time.time() + for idx, batch in enumerate(dataloader()): + metrics = exe.run(program=program, feed=batch, fetch_list=fetch_list) + batch_time.update(time.time() - tic) + tic = time.time() + for i, m in enumerate(metrics): + metric_list[i].update(m[0], len(batch[0])) + fetchs_str = ''.join([str(m) for m in metric_list] + [str(batch_time)]) + logger.info("[epoch:%3d][%s][step:%4d]%s" % + (epoch, mode, idx, fetchs_str)) diff --git a/tools/run.sh b/tools/run.sh new file mode 100755 index 000000000..d718a3873 --- /dev/null +++ b/tools/run.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +export PYTHONPATH=$(dirname "$PWD"):$PWD:$PYTHONPATH + +#python download.py -a ResNet181 -p ./pretrained/ -d 1 + +#python download.py -a ResNet18 -p ./pretrained/ -d 1 + +#python download.py -a ResNet34 -p ./pretrained/ -d 0 + +#python -m paddle.distributed.launch --selected_gpus="0,1,2,3" --log_dir=mylog tools/train.py + +#python -m paddle.distributed.launch --selected_gpus="0,1,2,3" --log_dir=mylog ./eval.py + +python -m paddle.distributed.launch \ + --selected_gpus="0,1,2,3" \ + --log_dir=mylog \ + tools/train.py \ + -c configs/ResNet/ResNet50_vd.yaml \ + -o use_mix=0 \ + -o TRAIN.batch_size=128 \ + -o TRAIN.transforms.3.NormalizeImage.mean.2=0.4 diff --git a/tools/train.py b/tools/train.py new file mode 100644 index 000000000..3a5afd6ce --- /dev/null +++ b/tools/train.py @@ -0,0 +1,105 @@ +#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os +import sys + +import paddle +import paddle.fluid as fluid + +import program + +from ppcls.data import Reader +import ppcls.utils.environment as env +from ppcls.utils.config import get_config +from ppcls.utils.save_load import init_model, save_model +from ppcls.utils import logger + +from paddle.fluid.incubate.fleet.collective import fleet +from paddle.fluid.incubate.fleet.base import role_maker + + +def parse_args(): + parser = argparse.ArgumentParser("PaddleClas train script") + parser.add_argument( + '-c', + '--config', + type=str, + default='configs/ResNet/ResNet18_vd.yaml', + help='config file path') + parser.add_argument( + '-o', + '--override', + action='append', + default=[], + help='config options to be overridden') + args = parser.parse_args() + return args + + +def main(args): + role = role_maker.PaddleCloudRoleMaker(is_collective=True) + fleet.init(role) + + config = get_config(args.config, overrides=args.override, show=True) + place = env.place() + + startup_prog = fluid.Program() + train_prog = fluid.Program() + + train_dataloader, train_fetchs = program.build( + config, train_prog, startup_prog, is_train=True) + + if config.validate: + valid_prog = fluid.Program() + valid_dataloader, valid_fetchs = program.build( + config, valid_prog, startup_prog, is_train=False) + valid_prog = valid_prog.clone(for_test=True) + + exe = fluid.Executor(place) + exe.run(startup_prog) + + init_model(config, train_prog, exe) + + train_reader = Reader(config, 'train')() + train_dataloader.set_sample_list_generator(train_reader, place) + + if config.validate: + valid_reader = Reader(config, 'valid')() + valid_dataloader.set_sample_list_generator(valid_reader, place) + compiled_valid_prog = program.compile(config, valid_prog) + + compiled_train_prog = fleet.main_program + for epoch_id in range(config.epochs): + program.run(train_dataloader, exe, compiled_train_prog, train_fetchs, + epoch_id, 'train') + + if config.validate and epoch_id % config.valid_interval == 0: + program.run(valid_dataloader, exe, compiled_valid_prog, + valid_fetchs, epoch_id, 'valid') + + if epoch_id % config.save_interval == 0: + model_path = os.path.join(config.model_save_dir, + config.architecture) + save_model(train_prog, model_path, epoch_id) + + +if __name__ == '__main__': + args = parse_args() + main(args)