From b742b7c18b5826caa4ecc1f066dd281ac9f51a9a Mon Sep 17 00:00:00 2001 From: xiaohangzhan Date: Tue, 16 Jun 2020 00:05:18 +0800 Subject: [PATCH] upload code --- .gitignore | 128 +++++ .style.yapf | 4 + README.md | 107 +++++ benchmarks/detection/README.md | 12 + .../detection/configs/Base-RCNN-C4-BN.yaml | 17 + .../detection/configs/coco_R_50_C4_2x.yaml | 13 + .../configs/coco_R_50_C4_2x_moco.yaml | 10 + .../configs/pascal_voc_R_50_C4_24k.yaml | 16 + .../configs/pascal_voc_R_50_C4_24k_moco.yaml | 9 + .../convert-pretrain-to-detectron2.py | 36 ++ benchmarks/detection/run.sh | 6 + benchmarks/detection/train_net.py | 77 +++ benchmarks/dist_test_cls.sh | 35 ++ benchmarks/dist_test_svm.sh | 15 + benchmarks/eval_svm.sh | 38 ++ benchmarks/eval_svm_lowshot.sh | 62 +++ benchmarks/extract_info/voc07.py | 20 + benchmarks/srun_test_cls.sh | 56 +++ benchmarks/srun_test_semi.sh | 60 +++ benchmarks/srun_test_svm.sh | 15 + .../svm_tools/aggregate_low_shot_svm_stats.py | 128 +++++ benchmarks/svm_tools/svm_helper.py | 171 +++++++ benchmarks/svm_tools/test_svm.py | 174 +++++++ benchmarks/svm_tools/test_svm_low_shot.py | 212 +++++++++ benchmarks/svm_tools/train_svm_kfold.py | 162 +++++++ .../svm_tools/train_svm_kfold_parallel.py | 151 ++++++ benchmarks/svm_tools/train_svm_low_shot.py | 144 ++++++ .../svm_tools/train_svm_low_shot_parallel.py | 145 ++++++ configs/base.py | 18 + configs/classification/cifar10/r50.py | 59 +++ configs/classification/imagnet/r50.py | 68 +++ .../imagenet/r50_multihead.py | 89 ++++ .../places205/r50_multihead.py | 89 ++++ configs/selfsup/deepcluster/r50.py | 88 ++++ configs/selfsup/moco/r50_v1.py | 59 +++ configs/selfsup/moco/r50_v2.py | 75 +++ configs/selfsup/npid/r50.py | 64 +++ configs/selfsup/rotation_pred/r50.py | 64 +++ configs/selfsup/simclr/r50_bs256.py | 77 +++ configs/selfsup/simclr/r50_bs512.py | 77 +++ .../imagenet_10percent/r50.py | 69 +++ .../imagenet_1percent/r50.py | 69 +++ docs/CHANGELOG.md | 2 + docs/GETTING_STARTED.md | 192 ++++++++ docs/INSTALL.md | 146 ++++++ docs/MODEL_ZOO.md | 1 + docs/relation.jpg | Bin 0 -> 326204 bytes openselfsup/__init__.py | 3 + openselfsup/apis/__init__.py | 1 + openselfsup/apis/train.py | 275 +++++++++++ openselfsup/datasets/__init__.py | 12 + openselfsup/datasets/base.py | 32 ++ openselfsup/datasets/builder.py | 43 ++ openselfsup/datasets/classification.py | 43 ++ openselfsup/datasets/contrastive.py | 23 + openselfsup/datasets/data_sources/__init__.py | 3 + openselfsup/datasets/data_sources/cifar.py | 55 +++ .../datasets/data_sources/image_list.py | 36 ++ openselfsup/datasets/data_sources/imagenet.py | 43 ++ openselfsup/datasets/data_sources/utils.py | 36 ++ openselfsup/datasets/dataset_wrappers.py | 55 +++ openselfsup/datasets/deepcluster.py | 29 ++ openselfsup/datasets/extraction.py | 19 + openselfsup/datasets/loader/__init__.py | 7 + openselfsup/datasets/loader/build_loader.py | 81 ++++ openselfsup/datasets/loader/sampler.py | 299 ++++++++++++ openselfsup/datasets/npid.py | 20 + openselfsup/datasets/pipelines/__init__.py | 1 + openselfsup/datasets/pipelines/transforms.py | 92 ++++ openselfsup/datasets/registry.py | 5 + openselfsup/datasets/rotation_pred.py | 35 ++ openselfsup/hooks/__init__.py | 7 + openselfsup/hooks/builder.py | 7 + openselfsup/hooks/deepcluster_hook.py | 109 +++++ openselfsup/hooks/extractor.py | 50 ++ openselfsup/hooks/odc_hook.py | 67 +++ openselfsup/hooks/optimizer_hook.py | 16 + openselfsup/hooks/registry.py | 3 + openselfsup/hooks/validate_hook.py | 71 +++ openselfsup/models/__init__.py | 20 + openselfsup/models/backbones/__init__.py | 6 + openselfsup/models/backbones/resnet.py | 429 +++++++++++++++++ openselfsup/models/backbones/resnext.py | 222 +++++++++ openselfsup/models/builder.py | 38 ++ openselfsup/models/classification.py | 79 ++++ openselfsup/models/deepcluster.py | 88 ++++ openselfsup/models/heads/__init__.py | 3 + openselfsup/models/heads/cls_head.py | 60 +++ openselfsup/models/heads/contrastive_head.py | 29 ++ openselfsup/models/heads/multi_cls_head.py | 77 +++ openselfsup/models/losses/__init__.py | 19 + .../models/losses/cross_entropy_loss.py | 103 ++++ openselfsup/models/losses/focal_loss.py | 82 ++++ openselfsup/models/losses/ghm_loss.py | 171 +++++++ openselfsup/models/losses/utils.py | 98 ++++ openselfsup/models/memories/__init__.py | 3 + openselfsup/models/memories/odc_memory.py | 217 +++++++++ openselfsup/models/memories/odc_memory_gpu.py | 190 ++++++++ openselfsup/models/memories/simple_memory.py | 42 ++ openselfsup/models/moco.py | 189 ++++++++ openselfsup/models/necks.py | 132 ++++++ openselfsup/models/npid.py | 100 ++++ openselfsup/models/odc.py | 103 ++++ openselfsup/models/registry.py | 8 + openselfsup/models/rotation_pred.py | 63 +++ openselfsup/models/simclr.py | 79 ++++ openselfsup/models/utils/__init__.py | 16 + openselfsup/models/utils/accuracy.py | 31 ++ openselfsup/models/utils/conv_module.py | 163 +++++++ openselfsup/models/utils/conv_ws.py | 46 ++ openselfsup/models/utils/gather_layer.py | 22 + openselfsup/models/utils/multi_pooling.py | 38 ++ openselfsup/models/utils/norm.py | 55 +++ openselfsup/models/utils/scale.py | 15 + openselfsup/models/utils/sobel.py | 23 + openselfsup/third_party/clustering.py | 308 ++++++++++++ openselfsup/utils/__init__.py | 8 + openselfsup/utils/alias_multinomial.py | 66 +++ openselfsup/utils/collect.py | 83 ++++ openselfsup/utils/collect_env.py | 63 +++ openselfsup/utils/config_tools.py | 12 + openselfsup/utils/contextmanagers.py | 126 +++++ openselfsup/utils/flops_counter.py | 444 ++++++++++++++++++ openselfsup/utils/gather.py | 69 +++ openselfsup/utils/logger.py | 66 +++ openselfsup/utils/misc.py | 37 ++ openselfsup/utils/optimizers.py | 95 ++++ openselfsup/utils/profiling.py | 41 ++ openselfsup/utils/registry.py | 79 ++++ openselfsup/version.py | 5 + requirements.txt | 2 + requirements/runtime.txt | 12 + requirements/tests.txt | 11 + setup.py | 191 ++++++++ tools/count_parameters.py | 38 ++ tools/dist_extract.sh | 16 + tools/dist_train.sh | 12 + tools/extract.py | 160 +++++++ tools/extract_backbone_weights.py | 34 ++ tools/kill.sh | 2 + tools/prepare_data/create_voc_data_files.py | 193 ++++++++ .../create_voc_low_shot_challenge_samples.py | 131 ++++++ tools/prepare_data/prepare_voc07_cls.sh | 34 ++ tools/publish_model.py | 34 ++ tools/single_train.sh | 9 + tools/srun_extract.sh | 27 ++ tools/srun_train.sh | 26 + tools/test.py | 123 +++++ tools/train.py | 142 ++++++ tools/upgrade_models.py | 27 ++ 150 files changed, 10692 insertions(+) create mode 100644 .gitignore create mode 100644 .style.yapf create mode 100644 README.md create mode 100644 benchmarks/detection/README.md create mode 100644 benchmarks/detection/configs/Base-RCNN-C4-BN.yaml create mode 100644 benchmarks/detection/configs/coco_R_50_C4_2x.yaml create mode 100644 benchmarks/detection/configs/coco_R_50_C4_2x_moco.yaml create mode 100644 benchmarks/detection/configs/pascal_voc_R_50_C4_24k.yaml create mode 100644 benchmarks/detection/configs/pascal_voc_R_50_C4_24k_moco.yaml create mode 100755 benchmarks/detection/convert-pretrain-to-detectron2.py create mode 100644 benchmarks/detection/run.sh create mode 100755 benchmarks/detection/train_net.py create mode 100755 benchmarks/dist_test_cls.sh create mode 100644 benchmarks/dist_test_svm.sh create mode 100644 benchmarks/eval_svm.sh create mode 100644 benchmarks/eval_svm_lowshot.sh create mode 100644 benchmarks/extract_info/voc07.py create mode 100644 benchmarks/srun_test_cls.sh create mode 100644 benchmarks/srun_test_semi.sh create mode 100644 benchmarks/srun_test_svm.sh create mode 100644 benchmarks/svm_tools/aggregate_low_shot_svm_stats.py create mode 100644 benchmarks/svm_tools/svm_helper.py create mode 100644 benchmarks/svm_tools/test_svm.py create mode 100644 benchmarks/svm_tools/test_svm_low_shot.py create mode 100644 benchmarks/svm_tools/train_svm_kfold.py create mode 100644 benchmarks/svm_tools/train_svm_kfold_parallel.py create mode 100644 benchmarks/svm_tools/train_svm_low_shot.py create mode 100644 benchmarks/svm_tools/train_svm_low_shot_parallel.py create mode 100644 configs/base.py create mode 100644 configs/classification/cifar10/r50.py create mode 100644 configs/classification/imagnet/r50.py create mode 100644 configs/linear_classification/imagenet/r50_multihead.py create mode 100644 configs/linear_classification/places205/r50_multihead.py create mode 100644 configs/selfsup/deepcluster/r50.py create mode 100644 configs/selfsup/moco/r50_v1.py create mode 100644 configs/selfsup/moco/r50_v2.py create mode 100644 configs/selfsup/npid/r50.py create mode 100644 configs/selfsup/rotation_pred/r50.py create mode 100644 configs/selfsup/simclr/r50_bs256.py create mode 100644 configs/selfsup/simclr/r50_bs512.py create mode 100644 configs/semisup_classification/imagenet_10percent/r50.py create mode 100644 configs/semisup_classification/imagenet_1percent/r50.py create mode 100644 docs/CHANGELOG.md create mode 100644 docs/GETTING_STARTED.md create mode 100644 docs/INSTALL.md create mode 100644 docs/MODEL_ZOO.md create mode 100644 docs/relation.jpg create mode 100644 openselfsup/__init__.py create mode 100644 openselfsup/apis/__init__.py create mode 100644 openselfsup/apis/train.py create mode 100644 openselfsup/datasets/__init__.py create mode 100644 openselfsup/datasets/base.py create mode 100644 openselfsup/datasets/builder.py create mode 100644 openselfsup/datasets/classification.py create mode 100644 openselfsup/datasets/contrastive.py create mode 100644 openselfsup/datasets/data_sources/__init__.py create mode 100644 openselfsup/datasets/data_sources/cifar.py create mode 100644 openselfsup/datasets/data_sources/image_list.py create mode 100644 openselfsup/datasets/data_sources/imagenet.py create mode 100644 openselfsup/datasets/data_sources/utils.py create mode 100644 openselfsup/datasets/dataset_wrappers.py create mode 100644 openselfsup/datasets/deepcluster.py create mode 100644 openselfsup/datasets/extraction.py create mode 100644 openselfsup/datasets/loader/__init__.py create mode 100644 openselfsup/datasets/loader/build_loader.py create mode 100644 openselfsup/datasets/loader/sampler.py create mode 100644 openselfsup/datasets/npid.py create mode 100644 openselfsup/datasets/pipelines/__init__.py create mode 100644 openselfsup/datasets/pipelines/transforms.py create mode 100644 openselfsup/datasets/registry.py create mode 100644 openselfsup/datasets/rotation_pred.py create mode 100644 openselfsup/hooks/__init__.py create mode 100644 openselfsup/hooks/builder.py create mode 100644 openselfsup/hooks/deepcluster_hook.py create mode 100644 openselfsup/hooks/extractor.py create mode 100644 openselfsup/hooks/odc_hook.py create mode 100644 openselfsup/hooks/optimizer_hook.py create mode 100644 openselfsup/hooks/registry.py create mode 100644 openselfsup/hooks/validate_hook.py create mode 100644 openselfsup/models/__init__.py create mode 100644 openselfsup/models/backbones/__init__.py create mode 100644 openselfsup/models/backbones/resnet.py create mode 100644 openselfsup/models/backbones/resnext.py create mode 100644 openselfsup/models/builder.py create mode 100644 openselfsup/models/classification.py create mode 100644 openselfsup/models/deepcluster.py create mode 100644 openselfsup/models/heads/__init__.py create mode 100644 openselfsup/models/heads/cls_head.py create mode 100644 openselfsup/models/heads/contrastive_head.py create mode 100644 openselfsup/models/heads/multi_cls_head.py create mode 100644 openselfsup/models/losses/__init__.py create mode 100644 openselfsup/models/losses/cross_entropy_loss.py create mode 100644 openselfsup/models/losses/focal_loss.py create mode 100644 openselfsup/models/losses/ghm_loss.py create mode 100644 openselfsup/models/losses/utils.py create mode 100644 openselfsup/models/memories/__init__.py create mode 100644 openselfsup/models/memories/odc_memory.py create mode 100644 openselfsup/models/memories/odc_memory_gpu.py create mode 100644 openselfsup/models/memories/simple_memory.py create mode 100644 openselfsup/models/moco.py create mode 100644 openselfsup/models/necks.py create mode 100644 openselfsup/models/npid.py create mode 100644 openselfsup/models/odc.py create mode 100644 openselfsup/models/registry.py create mode 100644 openselfsup/models/rotation_pred.py create mode 100644 openselfsup/models/simclr.py create mode 100644 openselfsup/models/utils/__init__.py create mode 100644 openselfsup/models/utils/accuracy.py create mode 100644 openselfsup/models/utils/conv_module.py create mode 100644 openselfsup/models/utils/conv_ws.py create mode 100644 openselfsup/models/utils/gather_layer.py create mode 100644 openselfsup/models/utils/multi_pooling.py create mode 100644 openselfsup/models/utils/norm.py create mode 100644 openselfsup/models/utils/scale.py create mode 100644 openselfsup/models/utils/sobel.py create mode 100644 openselfsup/third_party/clustering.py create mode 100644 openselfsup/utils/__init__.py create mode 100644 openselfsup/utils/alias_multinomial.py create mode 100644 openselfsup/utils/collect.py create mode 100644 openselfsup/utils/collect_env.py create mode 100644 openselfsup/utils/config_tools.py create mode 100644 openselfsup/utils/contextmanagers.py create mode 100644 openselfsup/utils/flops_counter.py create mode 100644 openselfsup/utils/gather.py create mode 100644 openselfsup/utils/logger.py create mode 100644 openselfsup/utils/misc.py create mode 100644 openselfsup/utils/optimizers.py create mode 100644 openselfsup/utils/profiling.py create mode 100644 openselfsup/utils/registry.py create mode 100644 openselfsup/version.py create mode 100644 requirements.txt create mode 100644 requirements/runtime.txt create mode 100644 requirements/tests.txt create mode 100644 setup.py create mode 100644 tools/count_parameters.py create mode 100755 tools/dist_extract.sh create mode 100755 tools/dist_train.sh create mode 100644 tools/extract.py create mode 100644 tools/extract_backbone_weights.py create mode 100644 tools/kill.sh create mode 100644 tools/prepare_data/create_voc_data_files.py create mode 100644 tools/prepare_data/create_voc_low_shot_challenge_samples.py create mode 100644 tools/prepare_data/prepare_voc07_cls.sh create mode 100644 tools/publish_model.py create mode 100644 tools/single_train.sh create mode 100644 tools/srun_extract.sh create mode 100755 tools/srun_train.sh create mode 100644 tools/test.py create mode 100644 tools/train.py create mode 100644 tools/upgrade_models.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..aedb855e --- /dev/null +++ b/.gitignore @@ -0,0 +1,128 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +mmdet/version.py +data +.vscode +.idea + +# custom +*.pkl +*.pkl.json +*.log.json +work_dirs/ +pretrains +pretrains/ + +# Pytorch +*.pth + +*.swp +source.sh +tensorboard.sh +.DS_Store +replace.sh +benchmarks/detection/datasets +benchmarks/detection/output diff --git a/.style.yapf b/.style.yapf new file mode 100644 index 00000000..286a3f1d --- /dev/null +++ b/.style.yapf @@ -0,0 +1,4 @@ +[style] +BASED_ON_STYLE = pep8 +BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true +SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true diff --git a/README.md b/README.md new file mode 100644 index 00000000..1a7360a8 --- /dev/null +++ b/README.md @@ -0,0 +1,107 @@ + +# OpenSelfSup + +## Introduction + +The master branch works with **PyTorch 1.1** or higher. + +OpenSelfSup is an open source unsupervised representation learning toolbox based on PyTorch. + +### What does this repo do? + +Below is the relations among Unsupervised Learning, Self-Supervised Learning and Representation Learning. This repo focuses on the shadow area, i.e., Unsupervised Representation Learning. Self-Supervised Representation Learning is the major branch of it. Since in many cases we do not distingush between Self-Supervised Representation Learning and Unsupervised Representation Learning strictly, we still name this repo as `OpenSelfSup`. + + + +### Major features + +- **All methods in one repository** + +| | Support | +|-------------------------------------------------------------------------------------------------------------------------------------------------------|:--------:| +| [ImageNet](https://link.springer.com/article/10.1007/s11263-015-0816-y?sa_campaign=email/event/articleAuthor/onlineFirst#) | ✓ | +| [Relative-Loc](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Doersch_Unsupervised_Visual_Representation_ICCV_2015_paper.pdf) | ✓ | +| [Rotation-Pred](https://arxiv.org/abs/1803.07728) | ✓ | +| [DeepCluster](https://arxiv.org/abs/1807.05520) | ✓ | +| [ODC](http://openaccess.thecvf.com/content_CVPR_2020/papers/Zhan_Online_Deep_Clustering_for_Unsupervised_Representation_Learning_CVPR_2020_paper.pdf) | ✓ | +| [NIPD](https://arxiv.org/abs/1805.01978) | ✓ | +| [MoCo](https://arxiv.org/abs/1911.05722) | ✓ | +| [MoCo v2](https://arxiv.org/abs/2003.04297) | ✓ | +| [SimCLR](https://arxiv.org/abs/2002.05709) | ✓ | +| [PIRL](http://openaccess.thecvf.com/content_CVPR_2020/papers/Misra_Self-Supervised_Learning_of_Pretext-Invariant_Representations_CVPR_2020_paper.pdf) | progress | + +- **Flexibility & Extensibility** + +OpenSelfSup follows a similar code architecture of MMDetection while is even more flexible than MMDetection, since OpenSelfSup integrates various self-supervised tasks including classification, joint clustering and feature learning, contrastive learning, tasks with a memory bank, etc. + +For existing methods in this repo, you only need to modify config files to adjust hyper-parameters. It is also simple to design your own methods, please refer to [GETTING_STARTED](docs/GETTING_STARTED.md). + +- **Efficiency** + + All methods support multi-machine multi-gpu distributed training. + +- **Standardized Benchmarks** + + We standardize the benchmarks including logistic regression, SVM / Low-shot SVM from linearly probed features, semi-supervised classification, and object detection. Below are the setting of these benchmarks. + +| Benchmarks | Setting | Difference | +|----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------| +| ImageNet Linear Classification | [goyal2019scaling](http://openaccess.thecvf.com/content_ICCV_2019/papers/Goyal_Scaling_and_Benchmarking_Self-Supervised_Visual_Representation_Learning_ICCV_2019_paper.pdf) | Total 90 epochs, decay at [30, 60]. | +| Places205 Linear Classification | [goyal2019scaling](http://openaccess.thecvf.com/content_ICCV_2019/papers/Goyal_Scaling_and_Benchmarking_Self-Supervised_Visual_Representation_Learning_ICCV_2019_paper.pdf) | Total 90 epochs, decay at [30, 60]. | +| PASCAL VOC07 SVM | [goyal2019scaling](http://openaccess.thecvf.com/content_ICCV_2019/papers/Goyal_Scaling_and_Benchmarking_Self-Supervised_Visual_Representation_Learning_ICCV_2019_paper.pdf) | Costs="1.0,10.0,100.0" to save evaluation time. | +| PASCAL VOC07 Low-shot SVM | [goyal2019scaling](http://openaccess.thecvf.com/content_ICCV_2019/papers/Goyal_Scaling_and_Benchmarking_Self-Supervised_Visual_Representation_Learning_ICCV_2019_paper.pdf) | Costs="1.0,10.0,100.0" to save evaluation time. | +| PASCAL VOC07+12 Object Detection | [MoCo](http://openaccess.thecvf.com/content_CVPR_2020/papers/He_Momentum_Contrast_for_Unsupervised_Visual_Representation_Learning_CVPR_2020_paper.pdf) | | +| COCO17 Object Detection | [MoCo](http://openaccess.thecvf.com/content_CVPR_2020/papers/He_Momentum_Contrast_for_Unsupervised_Visual_Representation_Learning_CVPR_2020_paper.pdf) | | + + +## License + +This project is released under the [Apache 2.0 license](LICENSE). + +## Changelog + +v0.1.0 was released in 15/06/2020. +Please refer to [CHANGELOG.md](docs/CHANGELOG.md) for details and release history. + +## Benchmark and model zoo + +## Installation + +Please refer to [INSTALL.md](docs/INSTALL.md) for installation and dataset preparation. + + +## Get Started + +Please see [GETTING_STARTED.md](docs/GETTING_STARTED.md) for the basic usage of OpenSelfSup. + +## Contributing + +We appreciate all contributions to improve MMDetection. Please refer to [CONTRIBUTING.md](.github/CONTRIBUTING.md) for the contributing guideline. + +## Citation + +If you use this toolbox or benchmark in your research, please cite this project. + +``` +@article{openselfsup, + title = {{OpenSelfSup}: Open MMLab Self-Supervised Learning Toolbox and Benchmark}, + author = {Xiaohang Zhan, Jiahao Xie, Ziwei Liu, Dahua Lin, Chen Change Loy}, + howpublished = {\url{https://github.com/open-mmlab/openselfsup}}, + year = {2020} +} +``` + +## Acknowledgement + +1. This repo borrows the architecture design and part of the code from [MMDetection](https://github.com/open-mmlab/mmdetection). + +2. The implementation of MoCo and the detection benchmark borrow the code from [moco](https://github.com/facebookresearch/moco). + +3. The SVM benchmark borrows the code from [ +fair_self_supervision_benchmark](https://github.com/facebookresearch/fair_self_supervision_benchmark). + +4. `openselfsup/third_party/clustering.py` is borrowed from [deepcluster](https://github.com/facebookresearch/deepcluster/blob/master/clustering.py). + +## Contact + +This repo is currently maintained by Xiaohang Zhan ([@XiaohangZhan](http://github.com/XiaohangZhan)). diff --git a/benchmarks/detection/README.md b/benchmarks/detection/README.md new file mode 100644 index 00000000..caeb7ae3 --- /dev/null +++ b/benchmarks/detection/README.md @@ -0,0 +1,12 @@ + +## Transferring to Detection + +We follow the evaluation setting in MoCo when trasferring to object detection. + +### Instruction + +1. Install [detectron2](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md). + +1. Put dataset under "benchmarks/detection/datasets" directory, + following the [directory structure](https://github.com/facebookresearch/detectron2/tree/master/datasets) + requried by detectron2. diff --git a/benchmarks/detection/configs/Base-RCNN-C4-BN.yaml b/benchmarks/detection/configs/Base-RCNN-C4-BN.yaml new file mode 100644 index 00000000..5104c6a6 --- /dev/null +++ b/benchmarks/detection/configs/Base-RCNN-C4-BN.yaml @@ -0,0 +1,17 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + RPN: + PRE_NMS_TOPK_TEST: 6000 + POST_NMS_TOPK_TEST: 1000 + ROI_HEADS: + NAME: "Res5ROIHeadsExtraNorm" + BACKBONE: + FREEZE_AT: 0 + RESNETS: + NORM: "SyncBN" +TEST: + PRECISE_BN: + ENABLED: True +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 diff --git a/benchmarks/detection/configs/coco_R_50_C4_2x.yaml b/benchmarks/detection/configs/coco_R_50_C4_2x.yaml new file mode 100644 index 00000000..5b7e4240 --- /dev/null +++ b/benchmarks/detection/configs/coco_R_50_C4_2x.yaml @@ -0,0 +1,13 @@ +_BASE_: "Base-RCNN-C4-BN.yaml" +MODEL: + MASK_ON: True + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) + MIN_SIZE_TEST: 800 +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + STEPS: (120000, 160000) + MAX_ITER: 180000 diff --git a/benchmarks/detection/configs/coco_R_50_C4_2x_moco.yaml b/benchmarks/detection/configs/coco_R_50_C4_2x_moco.yaml new file mode 100644 index 00000000..8e310683 --- /dev/null +++ b/benchmarks/detection/configs/coco_R_50_C4_2x_moco.yaml @@ -0,0 +1,10 @@ +_BASE_: "coco_R_50_C4_2x.yaml" +MODEL: + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + WEIGHTS: "See Instructions" + RESNETS: + STRIDE_IN_1X1: False +INPUT: + MAX_SIZE_TRAIN: 1200 + FORMAT: "RGB" diff --git a/benchmarks/detection/configs/pascal_voc_R_50_C4_24k.yaml b/benchmarks/detection/configs/pascal_voc_R_50_C4_24k.yaml new file mode 100644 index 00000000..a05eb5e2 --- /dev/null +++ b/benchmarks/detection/configs/pascal_voc_R_50_C4_24k.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-RCNN-C4-BN.yaml" +MODEL: + MASK_ON: False + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + ROI_HEADS: + NUM_CLASSES: 20 +INPUT: + MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) + MIN_SIZE_TEST: 800 +DATASETS: + TRAIN: ('voc_2007_trainval', 'voc_2012_trainval') + TEST: ('voc_2007_test',) +SOLVER: + STEPS: (18000, 22000) + MAX_ITER: 24000 + WARMUP_ITERS: 100 diff --git a/benchmarks/detection/configs/pascal_voc_R_50_C4_24k_moco.yaml b/benchmarks/detection/configs/pascal_voc_R_50_C4_24k_moco.yaml new file mode 100644 index 00000000..eebe6905 --- /dev/null +++ b/benchmarks/detection/configs/pascal_voc_R_50_C4_24k_moco.yaml @@ -0,0 +1,9 @@ +_BASE_: "pascal_voc_R_50_C4_24k.yaml" +MODEL: + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + WEIGHTS: "See Instructions" + RESNETS: + STRIDE_IN_1X1: False +INPUT: + FORMAT: "RGB" diff --git a/benchmarks/detection/convert-pretrain-to-detectron2.py b/benchmarks/detection/convert-pretrain-to-detectron2.py new file mode 100755 index 00000000..e8bf5434 --- /dev/null +++ b/benchmarks/detection/convert-pretrain-to-detectron2.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import pickle as pkl +import sys +import torch + +if __name__ == "__main__": + input = sys.argv[1] + + obj = torch.load(input, map_location="cpu") + obj = obj["state_dict"] + + newmodel = {} + for k, v in obj.items(): + old_k = k + if "layer" not in k: + k = "stem." + k + for t in [1, 2, 3, 4]: + k = k.replace("layer{}".format(t), "res{}".format(t + 1)) + for t in [1, 2, 3]: + k = k.replace("bn{}".format(t), "conv{}.norm".format(t)) + k = k.replace("downsample.0", "shortcut") + k = k.replace("downsample.1", "shortcut.norm") + print(old_k, "->", k) + newmodel[k] = v.numpy() + + res = { + "model": newmodel, + "__author__": "OpenSelfSup", + "matching_heuristics": True + } + + assert sys.argv[2].endswith('.pkl') + with open(sys.argv[2], "wb") as f: + pkl.dump(res, f) diff --git a/benchmarks/detection/run.sh b/benchmarks/detection/run.sh new file mode 100644 index 00000000..2b35e59d --- /dev/null +++ b/benchmarks/detection/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash +DET_CFG=$1 +WEIGHTS=$2 + +python $(dirname "$0")/train_net.py --config-file $DET_CFG \ + --num-gpus 8 MODEL.WEIGHTS $WEIGHTS diff --git a/benchmarks/detection/train_net.py b/benchmarks/detection/train_net.py new file mode 100755 index 00000000..8ae31c9e --- /dev/null +++ b/benchmarks/detection/train_net.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import os + +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch +from detectron2.evaluation import COCOEvaluator, PascalVOCDetectionEvaluator +from detectron2.layers import get_norm +from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY, Res5ROIHeads + + +@ROI_HEADS_REGISTRY.register() +class Res5ROIHeadsExtraNorm(Res5ROIHeads): + """ + As described in the MOCO paper, there is an extra BN layer + following the res5 stage. + """ + + def _build_res5_block(self, cfg): + seq, out_channels = super()._build_res5_block(cfg) + norm = cfg.MODEL.RESNETS.NORM + norm = get_norm(norm, out_channels) + seq.add_module("norm", norm) + return seq, out_channels + + +class Trainer(DefaultTrainer): + + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + if "coco" in dataset_name: + return COCOEvaluator(dataset_name, cfg, True, output_folder) + else: + assert "voc" in dataset_name + return PascalVOCDetectionEvaluator(dataset_name) + + +def setup(args): + cfg = get_cfg() + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer( + model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume) + res = Trainer.test(cfg, model) + return res + + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args, ), + ) diff --git a/benchmarks/dist_test_cls.sh b/benchmarks/dist_test_cls.sh new file mode 100755 index 00000000..dd0a5ab2 --- /dev/null +++ b/benchmarks/dist_test_cls.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +set -e +set -x + +CFG=$1 +EPOCH=$2 +DATASET=$3 # imagenet or places205 +GPUS=${GPUS:-1} +PORT=${PORT:-29500} +PY_ARGS=${@:4} + +WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/ +CHECKPOINT=$WORK_DIR/epoch_${EPOCH}.pth +WORK_DIR_EVAL=$WORK_DIR/${DATASET}_at_epoch_${EPOCH}/ + +# extract backbone +if [ ! -f "${CHECKPOINT::(-4)}_extracted.pth" ]; then + python tools/extract_backbone_weights.py $CHECKPOINT \ + --save-path ${CHECKPOINT::(-4)}_extracted.pth +fi + +# train +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + tools/train.py \ + configs/linear_classifier/${DATASET}/r50_multihead.py \ + --pretrained ${CHECKPOINT::(-4)}_extracted.pth \ + --work_dir ${WORK_DIR_EVAL} --seed 0 --launcher="pytorch" ${PY_ARGS} + +# test +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + tools/test.py \ + configs/linear_classifier/${DATASET}/r50_multihead.py \ + ${WORK_DIR_EVAL}/latest.pth \ + --work_dir ${WORK_DIR_EVAL} --launcher="pytorch" diff --git a/benchmarks/dist_test_svm.sh b/benchmarks/dist_test_svm.sh new file mode 100644 index 00000000..82fa67aa --- /dev/null +++ b/benchmarks/dist_test_svm.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -e +set -x + +CFG=$1 +EPOCH=$2 +FEAT_LIST=$3 +GPUS=$4 +WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/ + +bash tools/dist_extract.sh $CFG $WORK_DIR/epoch_${EPOCH}.pth $GPUS + +bash benchmarks/eval_svm.sh $WORK_DIR $FEAT_LIST + +bash benchmarks/eval_svm_lowshot.sh $WORK_DIR $FEAT_LIST diff --git a/benchmarks/eval_svm.sh b/benchmarks/eval_svm.sh new file mode 100644 index 00000000..8e8ca478 --- /dev/null +++ b/benchmarks/eval_svm.sh @@ -0,0 +1,38 @@ +#!/bin/bash +WORK_DIR=$1 +FEAT_LIST=${2:-"feat5"} # "feat1 feat2 feat3 feat4 feat5" +TRAIN_SVM_FLAG=true +TEST_SVM_FLAG=true +DATA="data/VOCdevkit/VOC2007/SVMLabels" + +# config svm +costs="1.0,10.0,100.0" + +mkdir $WORK_DIR/logs +for feat in $FEAT_LIST; do + echo "For feature: $feat" 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log + # train svm + if $TRAIN_SVM_FLAG; then + rm -rf $WORK_DIR/svm + mkdir -p $WORK_DIR/svm/voc07_${feat} + echo "training svm ..." + python benchmarks/svm_tools/train_svm_kfold_parallel.py \ + --data_file $WORK_DIR/features/voc07_trainval_${feat}.npy \ + --targets_data_file $DATA/train_labels.npy \ + --costs_list $costs \ + --output_path $WORK_DIR/svm/voc07_${feat} + fi + + # test svm + if $TEST_SVM_FLAG; then + echo "testing svm ..." + python benchmarks/svm_tools/test_svm.py \ + --data_file $WORK_DIR/features/voc07_test_${feat}.npy \ + --json_targets $DATA/test_targets.json \ + --targets_data_file $DATA/test_labels.npy \ + --costs_list $costs \ + --generate_json 1 \ + --output_path $WORK_DIR/svm/voc07_${feat} 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log + fi + +done diff --git a/benchmarks/eval_svm_lowshot.sh b/benchmarks/eval_svm_lowshot.sh new file mode 100644 index 00000000..8f16be4d --- /dev/null +++ b/benchmarks/eval_svm_lowshot.sh @@ -0,0 +1,62 @@ +#!/bin/bash +WORK_DIR=$1 +MODE="full" +FEAT_LIST=${2:-"feat5"} # "feat1 feat2 feat3 feat4 feat5" +TRAIN_SVM_LOWSHOT_FLAG=true +TEST_SVM_LOWSHOT_FLAG=true +AGGREGATE_FLAG=true +DATA="data/VOCdevkit/VOC2007/SVMLabels" + +# config svm +costs="1.0,10.0,100.0" +if [ "$MODE" == "fast" ]; then + shots="96" +else + shots="1 2 4 8 16 32 64 96" +fi + +mkdir $WORK_DIR/logs +for feat in $FEAT_LIST; do + echo "For feature: $feat" 2>&1 | tee -a $WORK_DIR/logs/eval_svm_lowshot.log + # train lowshot svm + if $TRAIN_SVM_LOWSHOT_FLAG; then + rm -rf $WORK_DIR/svm_lowshot + mkdir -p $WORK_DIR/svm_lowshot/voc07_${feat} + echo "training svm low-shot ..." + for s in {1..5}; do + for k in $shots; do + echo -e "\ts${s} k${k}" + python benchmarks/svm_tools/train_svm_low_shot.py \ + --data_file $WORK_DIR/features/voc07_trainval_${feat}.npy \ + --targets_data_file $DATA/low_shot/labels/train_targets_sample${s}_k${k}.npy \ + --costs_list $costs \ + --output_path $WORK_DIR/svm_lowshot/voc07_${feat} + done + done + fi + + # test lowshot svm + if $TEST_SVM_LOWSHOT_FLAG; then + echo "testing svm low-shot ..." + python benchmarks/svm_tools/test_svm_low_shot.py \ + --data_file $WORK_DIR/features/voc07_test_${feat}.npy \ + --targets_data_file $DATA/test_labels.npy \ + --json_targets $DATA/test_targets.json \ + --generate_json 1 \ + --costs_list $costs \ + --output_path $WORK_DIR/svm_lowshot/voc07_${feat} \ + --k_values "${shots// /,}" \ + --sample_inds "0,1,2,3,4" \ + --dataset "voc" + fi + + # aggregate testing results + if $AGGREGATE_FLAG; then + echo "aggregating svm low-shot ..." + python benchmarks/svm_tools/aggregate_low_shot_svm_stats.py \ + --output_path $WORK_DIR/svm_lowshot/voc07_${feat} \ + --k_values "${shots// /,}" \ + --sample_inds "0,1,2,3,4" 2>&1 | tee -a $WORK_DIR/logs/eval_svm_lowshot.log + fi + +done diff --git a/benchmarks/extract_info/voc07.py b/benchmarks/extract_info/voc07.py new file mode 100644 index 00000000..2680b198 --- /dev/null +++ b/benchmarks/extract_info/voc07.py @@ -0,0 +1,20 @@ +data_source_cfg = dict(type='ImageList', memcached=False, mclient_path=None) +data_root = "data/VOCdevkit/VOC2007/JPEGImages" +data_all_list = "data/VOCdevkit/VOC2007/Lists/trainvaltest.txt" +split_at = [5011] +split_name = ['voc07_trainval', 'voc07_test'] +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + +data = dict( + imgs_per_gpu=32, + workers_per_gpu=2, + extract=dict( + type="ExtractDataset", + data_source=dict( + list_file=data_all_list, root=data_root, **data_source_cfg), + pipeline=[ + dict(type='Resize', size=256), + dict(type='Resize', size=(224, 224)), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), + ])) diff --git a/benchmarks/srun_test_cls.sh b/benchmarks/srun_test_cls.sh new file mode 100644 index 00000000..472a97a0 --- /dev/null +++ b/benchmarks/srun_test_cls.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +set -e +set -x + +PARTITION=$1 +CFG=$2 +EPOCH=$3 +DATASET=$4 # imagenet or places205 +PY_ARGS=${@:5} +JOB_NAME="openselfsup" +GPUS=${GPUS:-1} +GPUS_PER_NODE=${GPUS_PER_NODE:-1} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} + +WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/ +CHECKPOINT=$WORK_DIR/epoch_${EPOCH}.pth +WORK_DIR_EVAL=$WORK_DIR/${DATASET}_at_epoch_${EPOCH}/ + +# extract backbone +if [ ! -f "${CHECKPOINT::(-4)}_extracted.pth" ]; then + srun -p ${PARTITION} \ + python tools/extract_backbone_weights.py $CHECKPOINT \ + --save-path ${CHECKPOINT::(-4)}_extracted.pth +fi + +# train +GLOG_vmodule=MemcachedClient=-1 \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/train.py \ + configs/linear_classifier/${DATASET}/r50_multihead.py \ + --pretrained ${CHECKPOINT::(-4)}_extracted.pth \ + --work_dir ${WORK_DIR_EVAL} --seed 0 --launcher="slurm" ${PY_ARGS} + +# test +GLOG_vmodule=MemcachedClient=-1 \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/test.py \ + configs/linear_classifier/${DATASET}/r50_multihead.py \ + ${WORK_DIR_EVAL}/latest.pth \ + --work_dir ${WORK_DIR_EVAL} --launcher="slurm" diff --git a/benchmarks/srun_test_semi.sh b/benchmarks/srun_test_semi.sh new file mode 100644 index 00000000..713955ab --- /dev/null +++ b/benchmarks/srun_test_semi.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +set -e +set -x + +PARTITION=$1 +CFG=$2 +EPOCH=$3 +PERCENT=$4 +PY_ARGS=${@:5} +JOB_NAME="openselfsup" +GPUS=${GPUS:-1} +GPUS_PER_NODE=${GPUS_PER_NODE:-1} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} + +WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/ +CHECKPOINT=$WORK_DIR/epoch_${EPOCH}.pth +WORK_DIR_EVAL=$WORK_DIR/imagenet_semi_${PERCENT}percent_at_epoch_${EPOCH}/ + +if [ ! "$PERCENT" == "1" ] && [ ! "$PERCENT" == 10 ]; then + echo "ERROR: PERCENT must in {1, 10}" + exit +fi +# extract backbone +if [ ! -f "${CHECKPOINT::(-4)}_extracted.pth" ]; then + srun -p ${PARTITION} \ + python tools/extract_backbone_weights.py $CHECKPOINT \ + --save-path ${CHECKPOINT::(-4)}_extracted.pth +fi + +# train +GLOG_vmodule=MemcachedClient=-1 \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/train.py \ + configs/semisup_classification/imagenet_${PERCENT}percent/r50.py \ + --pretrained ${CHECKPOINT::(-4)}_extracted.pth \ + --work_dir ${WORK_DIR_EVAL} --seed 0 --launcher="slurm" ${PY_ARGS} + +# test +GLOG_vmodule=MemcachedClient=-1 \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/test.py \ + configs/semisup_classification/imagenet_${PERCENT}percent/r50.py \ + ${WORK_DIR_EVAL}/latest.pth \ + --work_dir ${WORK_DIR_EVAL} --launcher="slurm" diff --git a/benchmarks/srun_test_svm.sh b/benchmarks/srun_test_svm.sh new file mode 100644 index 00000000..a6e817bf --- /dev/null +++ b/benchmarks/srun_test_svm.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -e +set -x + +PARTITION=$1 +CFG=$2 +EPOCH=$3 +FEAT=$4 +WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/ + +bash tools/srun_extract.sh $PARTITION $CFG $WORK_DIR/epoch_${EPOCH}.pth + +srun -p $PARTITION bash benchmarks/eval_svm.sh $WORK_DIR $FEAT + +srun -p $PARTITION bash benchmarks/eval_svm.sh $WORK_DIR $FEAT diff --git a/benchmarks/svm_tools/aggregate_low_shot_svm_stats.py b/benchmarks/svm_tools/aggregate_low_shot_svm_stats.py new file mode 100644 index 00000000..b797e0c2 --- /dev/null +++ b/benchmarks/svm_tools/aggregate_low_shot_svm_stats.py @@ -0,0 +1,128 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +Aggregate the stats over various independent samples for low-shot svm training. +Stats computed: mean, max, min, std + +Relevant transfer tasks: Low-shot Image Classification VOC07 and Places205 low +shot samples. +""" + +from __future__ import division +from __future__ import absolute_import +from __future__ import unicode_literals +from __future__ import print_function + +import argparse +import logging +import numpy as np +import os +import sys + +# create the logger +FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s' +logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +def save_stats(output_dir, stat, output): + out_file = os.path.join(output_dir, 'test_ap_{}.npy'.format(stat)) + logger.info('Saving {} to: {} {}'.format(stat, out_file, output.shape)) + np.save(out_file, output) + + +def aggregate_stats(opts): + k_values = [int(val) for val in opts.k_values.split(",")] + sample_inds = [int(val) for val in opts.sample_inds.split(",")] + logger.info( + 'Aggregating stats for k-values: {} and sample_inds: {}'.format( + k_values, sample_inds)) + + output_mean, output_max, output_min, output_std = [], [], [], [] + for k_idx in range(len(k_values)): + k_low = k_values[k_idx] + k_val_output = [] + for inds in range(len(sample_inds)): + sample_idx = sample_inds[inds] + file_name = 'test_ap_sample{}_k{}.npy'.format( + sample_idx + 1, k_low) + filepath = os.path.join(opts.output_path, file_name) + if os.path.exists(filepath): + k_val_output.append(np.load(filepath, encoding='latin1')) + else: + logger.info('file does not exist: {}'.format(filepath)) + # import pdb; pdb.set_trace() + k_val_output = np.concatenate(k_val_output, axis=0) + k_low_max = np.max( + k_val_output, axis=0).reshape(-1, k_val_output.shape[1]) + k_low_min = np.min( + k_val_output, axis=0).reshape(-1, k_val_output.shape[1]) + k_low_mean = np.mean( + k_val_output, axis=0).reshape(-1, k_val_output.shape[1]) + k_low_std = np.std( + k_val_output, axis=0).reshape(-1, k_val_output.shape[1]) + output_mean.append(k_low_mean) + output_min.append(k_low_min) + output_max.append(k_low_max) + output_std.append(k_low_std) + + output_mean = np.concatenate(output_mean, axis=0) + output_min = np.concatenate(output_min, axis=0) + output_max = np.concatenate(output_max, axis=0) + output_std = np.concatenate(output_std, axis=0) + + save_stats(opts.output_path, 'mean', output_mean) + save_stats(opts.output_path, 'min', output_min) + save_stats(opts.output_path, 'max', output_max) + save_stats(opts.output_path, 'std', output_std) + + argmax_cls = np.argmax(output_mean, axis=1) + argmax_mean, argmax_min, argmax_max, argmax_std = [], [], [], [] + for idx in range(len(argmax_cls)): + argmax_mean.append(100.0 * output_mean[idx, argmax_cls[idx]]) + argmax_min.append(100.0 * output_min[idx, argmax_cls[idx]]) + argmax_max.append(100.0 * output_max[idx, argmax_cls[idx]]) + argmax_std.append(100.0 * output_std[idx, argmax_cls[idx]]) + for idx in range(len(argmax_max)): + logger.info('mean/min/max/std: {} / {} / {} / {}'.format( + round(argmax_mean[idx], 2), + round(argmax_min[idx], 2), + round(argmax_max[idx], 2), + round(argmax_std[idx], 2), + )) + logger.info('All done!!') + + +def main(): + parser = argparse.ArgumentParser(description='Low shot SVM model test') + parser.add_argument( + '--output_path', + type=str, + default=None, + help="Numpy file containing test AP result files") + parser.add_argument( + '--k_values', + type=str, + default=None, + help="Low-shot k-values for svm testing. Comma separated") + parser.add_argument( + '--sample_inds', + type=str, + default=None, + help="sample_inds for which to test svm. Comma separated") + if len(sys.argv) == 1: + parser.print_help() + sys.exit(1) + + opts = parser.parse_args() + logger.info(opts) + aggregate_stats(opts) + + +if __name__ == '__main__': + main() diff --git a/benchmarks/svm_tools/svm_helper.py b/benchmarks/svm_tools/svm_helper.py new file mode 100644 index 00000000..1792d5ea --- /dev/null +++ b/benchmarks/svm_tools/svm_helper.py @@ -0,0 +1,171 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +Helper module for svm training and testing. +""" + +from __future__ import division +from __future__ import absolute_import +from __future__ import unicode_literals +from __future__ import print_function + +import logging +import numpy as np +import os +import sys + +# create the logger +FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s' +logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +# Python 2 and python 3 have different floating point precision. The following +# trick helps keep the backwards compatibility. +def py2_py3_compatible_cost(cost): + return str(float("{:.17f}".format(cost))) + + +def get_svm_train_output_files(cls, cost, output_path): + cls_cost = str(cls) + '_cost' + py2_py3_compatible_cost(cost) + out_file = os.path.join(output_path, 'cls' + cls_cost + '.pickle') + ap_matrix_out_file = os.path.join(output_path, + 'AP_cls' + cls_cost + '.npy') + return out_file, ap_matrix_out_file + + +def parse_cost_list(costs): + costs_list = [float(cost) for cost in costs.split(",")] + start_num, end_num = 4, 20 + for num in range(start_num, end_num): + costs_list.append(0.5**num) + return costs_list + + +def normalize_features(features): + feats_norm = np.linalg.norm(features, axis=1) + features = features / (feats_norm + 1e-5)[:, np.newaxis] + return features + + +def load_input_data(data_file, targets_file): + # load the features and the targets + #logger.info('loading features and targets...') + targets = np.load(targets_file, encoding='latin1') + features = np.array(np.load(data_file, + encoding='latin1')).astype(np.float64) + assert features.shape[0] == targets.shape[0], "Mismatched #images" + logger.info('Loaded features: {} and targets: {}'.format( + features.shape, targets.shape)) + return features, targets + + +def calculate_ap(rec, prec): + """ + Computes the AP under the precision recall curve. + """ + rec, prec = rec.reshape(rec.size, 1), prec.reshape(prec.size, 1) + z, o = np.zeros((1, 1)), np.ones((1, 1)) + mrec, mpre = np.vstack((z, rec, o)), np.vstack((z, prec, z)) + for i in range(len(mpre) - 2, -1, -1): + mpre[i] = max(mpre[i], mpre[i + 1]) + + indices = np.where(mrec[1:] != mrec[0:-1])[0] + 1 + ap = 0 + for i in indices: + ap = ap + (mrec[i] - mrec[i - 1]) * mpre[i] + return ap + + +def get_precision_recall(targets, preds): + """ + [P, R, score, ap] = get_precision_recall(targets, preds) + Input : + targets : number of occurrences of this class in the ith image + preds : score for this image + Output : + P, R : precision and recall + score : score which corresponds to the particular precision and recall + ap : average precision + """ + # binarize targets + targets = np.array(targets > 0, dtype=np.float32) + tog = np.hstack((targets[:, np.newaxis].astype(np.float64), + preds[:, np.newaxis].astype(np.float64))) + ind = np.argsort(preds) + ind = ind[::-1] + score = np.array([tog[i, 1] for i in ind]) + sortcounts = np.array([tog[i, 0] for i in ind]) + + tp = sortcounts + fp = sortcounts.copy() + for i in range(sortcounts.shape[0]): + if sortcounts[i] >= 1: + fp[i] = 0. + elif sortcounts[i] < 1: + fp[i] = 1. + P = np.cumsum(tp) / (np.cumsum(tp) + np.cumsum(fp)) + numinst = np.sum(targets) + R = np.cumsum(tp) / numinst + ap = calculate_ap(R, P) + return P, R, score, ap + + +def get_low_shot_output_file(opts, cls, cost, suffix): + # in case of low-shot training, we train for 5 independent samples + # (sample{}) and vary low-shot amount (k{}). The input data should have + # sample{}_k{} information that we extract in suffix below. + # logger.info('Suffix: {}'.format(suffix)) + cls_cost = str(cls) + '_cost' + py2_py3_compatible_cost(cost) + out_file = os.path.join(opts.output_path, + 'cls' + cls_cost + '_' + suffix + '.pickle') + return out_file + + +def get_low_shot_svm_classes(targets, dataset): + # classes for which SVM testing should be done + num_classes, cls_list = None, None + if dataset == 'voc': + num_classes = targets.shape[1] + cls_list = range(num_classes) + elif dataset == 'places': + # each image in places has a target cls [0, .... ,204] + num_classes = len(set(targets[:, 0].tolist())) + cls_list = list(set(targets[:, 0].tolist())) + else: + logger.info('Dataset not recognized. Abort!') + logger.info('Testing SVM for classes: {}'.format(cls_list)) + logger.info('Num classes: {}'.format(num_classes)) + return num_classes, cls_list + + +def get_cls_feats_labels(cls, features, targets, dataset): + out_feats, out_cls_labels = None, None + if dataset == 'voc': + cls_labels = targets[:, cls].astype(dtype=np.int32, copy=True) + # find the indices for positive/negative imgs. Remove the ignore label. + out_data_inds = (targets[:, cls] != -1) + out_feats = features[out_data_inds] + out_cls_labels = cls_labels[out_data_inds] + # label 0 = not present, set it to -1 as svm train target. + # Make the svm train target labels as -1, 1. + out_cls_labels[np.where(out_cls_labels == 0)] = -1 + elif dataset == 'places': + out_feats = features + out_cls_labels = targets.astype(dtype=np.int32, copy=True) + # for the given class, get the relevant positive/negative images and + # make the label 1, -1 + cls_inds = np.where(targets[:, 0] == cls) + non_cls_inds = (targets[:, 0] != cls) + out_cls_labels[non_cls_inds] = -1 + out_cls_labels[cls_inds] = 1 + # finally reshape into the format taken by sklearn svm package. + out_cls_labels = out_cls_labels.reshape(-1) + else: + raise Exception('args.dataset not recognized') + return out_feats, out_cls_labels diff --git a/benchmarks/svm_tools/test_svm.py b/benchmarks/svm_tools/test_svm.py new file mode 100644 index 00000000..854ec175 --- /dev/null +++ b/benchmarks/svm_tools/test_svm.py @@ -0,0 +1,174 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +SVM test for image classification. + +Relevant transfer tasks: Image Classification VOC07 and COCO2014. +""" +from __future__ import division +from __future__ import absolute_import +from __future__ import unicode_literals +from __future__ import print_function + +import argparse +import json +import logging +import numpy as np +import os +import pickle +import six +import sys + +import svm_helper + +# create the logger +FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s' +logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +def get_chosen_costs(opts, num_classes): + costs_list = svm_helper.parse_cost_list(opts.costs_list) + train_ap_matrix = np.zeros((num_classes, len(costs_list))) + for cls in range(num_classes): + for cost_idx in range(len(costs_list)): + cost = costs_list[cost_idx] + _, ap_out_file = svm_helper.get_svm_train_output_files( + cls, cost, opts.output_path) + train_ap_matrix[cls][cost_idx] = float( + np.load(ap_out_file, encoding='latin1')[0]) + argmax_cls = np.argmax(train_ap_matrix, axis=1) + chosen_cost = [costs_list[idx] for idx in argmax_cls] + logger.info('chosen_cost: {}'.format(chosen_cost)) + np.save( + os.path.join(opts.output_path, 'crossval_ap.npy'), + np.array(train_ap_matrix)) + np.save( + os.path.join(opts.output_path, 'chosen_cost.npy'), + np.array(chosen_cost)) + logger.info('saved crossval_ap AP to file: {}'.format( + os.path.join(opts.output_path, 'crossval_ap.npy'))) + logger.info('saved chosen costs to file: {}'.format( + os.path.join(opts.output_path, 'chosen_cost.npy'))) + return np.array(chosen_cost) + + +def load_json(file_path): + assert os.path.exists(file_path), "{} does not exist".format(file_path) + with open(file_path, 'r') as fp: + data = json.load(fp) + img_ids = list(data.keys()) + cls_names = list(data[img_ids[0]].keys()) + return img_ids, cls_names + + +def test_svm(opts): + assert os.path.exists(opts.data_file), "Data file not found. Abort!" + json_predictions, img_ids, cls_names = {}, [], [] + if opts.generate_json: + img_ids, cls_names = load_json(opts.json_targets) + + features, targets = svm_helper.load_input_data(opts.data_file, + opts.targets_data_file) + # normalize the features: N x 9216 (example shape) + features = svm_helper.normalize_features(features) + num_classes = targets.shape[1] + logger.info('Num classes: {}'.format(num_classes)) + + # get the chosen cost that maximizes the cross-validation AP per class + costs_list = get_chosen_costs(opts, num_classes) + + ap_matrix = np.zeros((num_classes, 1)) + for cls in range(num_classes): + cost = costs_list[cls] + logger.info('Testing model for cls: {} cost: {}'.format(cls, cost)) + model_file = os.path.join( + opts.output_path, + 'cls' + str(cls) + '_cost' + str(cost) + '.pickle') + with open(model_file, 'rb') as fopen: + if six.PY2: + model = pickle.load(fopen) + else: + model = pickle.load(fopen, encoding='latin1') + prediction = model.decision_function(features) + if opts.generate_json: + cls_name = cls_names[cls] + for idx in range(len(prediction)): + img_id = img_ids[idx] + if img_id in json_predictions: + json_predictions[img_id][cls_name] = prediction[idx] + else: + out_lbl = {} + out_lbl[cls_name] = prediction[idx] + json_predictions[img_id] = out_lbl + + cls_labels = targets[:, cls] + # meaning of labels in VOC/COCO original loaded target files: + # label 0 = not present, set it to -1 as svm train target + # label 1 = present. Make the svm train target labels as -1, 1. + evaluate_data_inds = (targets[:, cls] != -1) + eval_preds = prediction[evaluate_data_inds] + eval_cls_labels = cls_labels[evaluate_data_inds] + eval_cls_labels[np.where(eval_cls_labels == 0)] = -1 + P, R, score, ap = svm_helper.get_precision_recall( + eval_cls_labels, eval_preds) + ap_matrix[cls][0] = ap + if opts.generate_json: + output_file = os.path.join(opts.output_path, 'json_preds.json') + with open(output_file, 'w') as fp: + json.dump(json_predictions, fp) + logger.info('Saved json predictions to: {}'.format(output_file)) + logger.info('Mean AP: {}'.format(np.mean(ap_matrix, axis=0))) + np.save(os.path.join(opts.output_path, 'test_ap.npy'), np.array(ap_matrix)) + logger.info('saved test AP to file: {}'.format( + os.path.join(opts.output_path, 'test_ap.npy'))) + + +def main(): + parser = argparse.ArgumentParser(description='SVM model test') + parser.add_argument( + '--data_file', + type=str, + default=None, + help="Numpy file containing image features and labels") + parser.add_argument( + '--json_targets', + type=str, + default=None, + help="Json file containing json targets") + parser.add_argument( + '--targets_data_file', + type=str, + default=None, + help="Numpy file containing image labels") + parser.add_argument( + '--costs_list', + type=str, + default="0.01,0.1", + help="comma separated string containing list of costs") + parser.add_argument( + '--output_path', + type=str, + default=None, + help="path where trained SVM models are saved") + parser.add_argument( + '--generate_json', + type=int, + default=0, + help="Whether to generate json files for output") + if len(sys.argv) == 1: + parser.print_help() + sys.exit(1) + + opts = parser.parse_args() + logger.info(opts) + test_svm(opts) + + +if __name__ == '__main__': + main() diff --git a/benchmarks/svm_tools/test_svm_low_shot.py b/benchmarks/svm_tools/test_svm_low_shot.py new file mode 100644 index 00000000..69475906 --- /dev/null +++ b/benchmarks/svm_tools/test_svm_low_shot.py @@ -0,0 +1,212 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +SVM test for low shot image classification. + +Relevant transfer tasks: Low-shot Image Classification VOC07 and Places205 low +shot samples. +""" +from __future__ import division +from __future__ import absolute_import +from __future__ import unicode_literals +from __future__ import print_function + +import argparse +import json +import logging +import numpy as np +import os +import pickle +import six +import sys + +import svm_helper + +# create the logger +FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s' +logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +def load_json(file_path): + assert os.path.exists(file_path), "{} does not exist".format(file_path) + with open(file_path, 'r') as fp: + data = json.load(fp) + img_ids = list(data.keys()) + cls_names = list(data[img_ids[0]].keys()) + return img_ids, cls_names + + +def save_json_predictions(opts, cost, sample_idx, k_low, features, cls_list, + cls_names, img_ids): + num_classes = len(cls_list) + json_predictions = {} + for cls in range(num_classes): + suffix = 'sample{}_k{}'.format(sample_idx + 1, k_low) + model_file = svm_helper.get_low_shot_output_file( + opts, cls, cost, suffix) + with open(model_file, 'rb') as fopen: + if six.PY2: + model = pickle.load(fopen) + else: + model = pickle.load(fopen, encoding='latin1') + prediction = model.decision_function(features) + cls_name = cls_names[cls] + for idx in range(len(prediction)): + img_id = img_ids[idx] + if img_id in json_predictions: + json_predictions[img_id][cls_name] = prediction[idx] + else: + out_lbl = {} + out_lbl[cls_name] = prediction[idx] + json_predictions[img_id] = out_lbl + + output_file = os.path.join(opts.output_path, + 'test_{}_json_preds.json'.format(suffix)) + with open(output_file, 'w') as fp: + json.dump(json_predictions, fp) + logger.info('Saved json predictions to: {}'.format(output_file)) + + +def test_svm_low_shot(opts): + k_values = [int(val) for val in opts.k_values.split(",")] + sample_inds = [int(val) for val in opts.sample_inds.split(",")] + logger.info('Testing svm for k-values: {} and sample_inds: {}'.format( + k_values, sample_inds)) + + img_ids, cls_names = [], [] + if opts.generate_json: + img_ids, cls_names = load_json(opts.json_targets) + + assert os.path.exists(opts.data_file), "Data file not found. Abort!" + # we test the svms on the full test set. Given the test features and the + # targets, we test it for various k-values (low-shot), cost values and + # 5 independent samples. + features, targets = svm_helper.load_input_data(opts.data_file, + opts.targets_data_file) + # normalize the features: N x 9216 (example shape) + features = svm_helper.normalize_features(features) + + # parse the cost values for training the SVM on + costs_list = svm_helper.parse_cost_list(opts.costs_list) + logger.info('Testing SVM for costs: {}'.format(costs_list)) + + # classes for which SVM testing should be done + num_classes, cls_list = svm_helper.get_low_shot_svm_classes( + targets, opts.dataset) + + # create the output for per sample, per k-value and per cost. + sample_ap_matrices = [] + for _ in range(len(sample_inds)): + ap_matrix = np.zeros((len(k_values), len(costs_list))) + sample_ap_matrices.append(ap_matrix) + + # the test goes like this: For a given sample, for a given k-value and a + # given cost value, we evaluate the trained svm model for all classes. + # After computing over all classes, we get the mean AP value over all + # classes. We hence end up with: output = [sample][k_value][cost] + for inds in range(len(sample_inds)): + sample_idx = sample_inds[inds] + for k_idx in range(len(k_values)): + k_low = k_values[k_idx] + suffix = 'sample{}_k{}'.format(sample_idx + 1, k_low) + for cost_idx in range(len(costs_list)): + cost = costs_list[cost_idx] + local_cost_ap = np.zeros((num_classes, 1)) + for cls in cls_list: + logger.info( + 'Test sample/k_value/cost/cls: {}/{}/{}/{}'.format( + sample_idx + 1, k_low, cost, cls)) + model_file = svm_helper.get_low_shot_output_file( + opts, cls, cost, suffix) + with open(model_file, 'rb') as fopen: + if six.PY2: + model = pickle.load(fopen) + else: + model = pickle.load(fopen, encoding='latin1') + prediction = model.decision_function(features) + eval_preds, eval_cls_labels = svm_helper.get_cls_feats_labels( + cls, prediction, targets, opts.dataset) + P, R, score, ap = svm_helper.get_precision_recall( + eval_cls_labels, eval_preds) + local_cost_ap[cls][0] = ap + mean_cost_ap = np.mean(local_cost_ap, axis=0) + sample_ap_matrices[inds][k_idx][cost_idx] = mean_cost_ap + out_k_sample_file = os.path.join( + opts.output_path, + 'test_ap_sample{}_k{}.npy'.format(sample_idx + 1, k_low)) + save_data = sample_ap_matrices[inds][k_idx] + save_data = save_data.reshape((1, -1)) + np.save(out_k_sample_file, save_data) + logger.info('Saved sample test k_idx AP to file: {} {}'.format( + out_k_sample_file, save_data.shape)) + if opts.generate_json: + argmax_cls = np.argmax(save_data, axis=1) + chosen_cost = costs_list[argmax_cls[0]] + logger.info('chosen cost: {}'.format(chosen_cost)) + save_json_predictions(opts, chosen_cost, sample_idx, k_low, + features, cls_list, cls_names, img_ids) + logger.info('All done!!') + + +def main(): + parser = argparse.ArgumentParser(description='Low shot SVM model test') + parser.add_argument( + '--data_file', + type=str, + default=None, + help="Numpy file containing image features and labels") + parser.add_argument( + '--targets_data_file', + type=str, + default=None, + help="Numpy file containing image labels") + parser.add_argument( + '--json_targets', + type=str, + default=None, + help="Numpy file containing json targets") + parser.add_argument( + '--generate_json', + type=int, + default=0, + help="Whether to generate json files for output") + parser.add_argument( + '--costs_list', + type=str, + default= + "0.0000001,0.000001,0.00001,0.0001,0.001,0.01,0.1,1.0,10.0,100.0", + help="comma separated string containing list of costs") + parser.add_argument( + '--output_path', + type=str, + default=None, + help="path where trained SVM models are saved") + parser.add_argument( + '--k_values', + type=str, + default="1,2,4,8,16,32,64,96", + help="Low-shot k-values for svm testing. Comma separated") + parser.add_argument( + '--sample_inds', + type=str, + default="0,1,2,3,4", + help="sample_inds for which to test svm. Comma separated") + parser.add_argument( + '--dataset', type=str, default="voc", help='voc | places') + if len(sys.argv) == 1: + parser.print_help() + sys.exit(1) + + opts = parser.parse_args() + logger.info(opts) + test_svm_low_shot(opts) + + +if __name__ == '__main__': + main() diff --git a/benchmarks/svm_tools/train_svm_kfold.py b/benchmarks/svm_tools/train_svm_kfold.py new file mode 100644 index 00000000..b3a7f1d2 --- /dev/null +++ b/benchmarks/svm_tools/train_svm_kfold.py @@ -0,0 +1,162 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +SVM training using 3-fold cross-validation. + +Relevant transfer tasks: Image Classification VOC07 and COCO2014. +""" + +from __future__ import division +from __future__ import absolute_import +from __future__ import unicode_literals +from __future__ import print_function + +import argparse +import logging +import numpy as np +import os +import pickle +import sys +from tqdm import tqdm +from sklearn.svm import LinearSVC +from sklearn.model_selection import cross_val_score + +import svm_helper + +import time + +# create the logger +FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s' +logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +def train_svm(opts): + assert os.path.exists(opts.data_file), "Data file not found. Abort!" + if not os.path.exists(opts.output_path): + os.makedirs(opts.output_path) + + features, targets = svm_helper.load_input_data(opts.data_file, + opts.targets_data_file) + # normalize the features: N x 9216 (example shape) + features = svm_helper.normalize_features(features) + + # parse the cost values for training the SVM on + costs_list = svm_helper.parse_cost_list(opts.costs_list) + #logger.info('Training SVM for costs: {}'.format(costs_list)) + + # classes for which SVM training should be done + if opts.cls_list: + cls_list = [int(cls) for cls in opts.cls_list.split(",")] + else: + num_classes = targets.shape[1] + cls_list = range(num_classes) + #logger.info('Training SVM for classes: {}'.format(cls_list)) + + for cls_idx in tqdm(range(len(cls_list))): + cls = cls_list[cls_idx] + for cost_idx in range(len(costs_list)): + start = time.time() + cost = costs_list[cost_idx] + out_file, ap_out_file = svm_helper.get_svm_train_output_files( + cls, cost, opts.output_path) + if os.path.exists(out_file) and os.path.exists(ap_out_file): + logger.info('SVM model exists: {}'.format(out_file)) + logger.info('AP file exists: {}'.format(ap_out_file)) + else: + #logger.info('Training model with the cost: {}'.format(cost)) + clf = LinearSVC( + C=cost, + class_weight={ + 1: 2, + -1: 1 + }, + intercept_scaling=1.0, + verbose=0, + penalty='l2', + loss='squared_hinge', + tol=0.0001, + dual=True, + max_iter=2000, + ) + cls_labels = targets[:, cls].astype(dtype=np.int32, copy=True) + # meaning of labels in VOC/COCO original loaded target files: + # label 0 = not present, set it to -1 as svm train target + # label 1 = present. Make the svm train target labels as -1, 1. + cls_labels[np.where(cls_labels == 0)] = -1 + #num_positives = len(np.where(cls_labels == 1)[0]) + #num_negatives = len(cls_labels) - num_positives + + #logger.info('cls: {} has +ve: {} -ve: {} ratio: {}'.format( + # cls, num_positives, num_negatives, + # float(num_positives) / num_negatives) + #) + #logger.info('features: {} cls_labels: {}'.format( + # features.shape, cls_labels.shape)) + ap_scores = cross_val_score( + clf, + features, + cls_labels, + cv=3, + scoring='average_precision') + clf.fit(features, cls_labels) + + #logger.info('cls: {} cost: {} AP: {} mean:{}'.format( + # cls, cost, ap_scores, ap_scores.mean())) + #logger.info('Saving cls cost AP to: {}'.format(ap_out_file)) + np.save(ap_out_file, np.array([ap_scores.mean()])) + #logger.info('Saving SVM model to: {}'.format(out_file)) + with open(out_file, 'wb') as fwrite: + pickle.dump(clf, fwrite) + print("time: {:.4g} s".format(time.time() - start)) + + +def main(): + parser = argparse.ArgumentParser(description='SVM model training') + parser.add_argument( + '--data_file', + type=str, + default=None, + help="Numpy file containing image features") + parser.add_argument( + '--targets_data_file', + type=str, + default=None, + help="Numpy file containing image labels") + parser.add_argument( + '--output_path', + type=str, + default=None, + help="path where to save the trained SVM models") + parser.add_argument( + '--costs_list', + type=str, + default="0.01,0.1", + help="comma separated string containing list of costs") + parser.add_argument( + '--random_seed', + type=int, + default=100, + help="random seed for SVM classifier training") + + parser.add_argument( + '--cls_list', + type=str, + default=None, + help="comma separated string list of classes to train") + if len(sys.argv) == 1: + parser.print_help() + sys.exit(1) + + opts = parser.parse_args() + #logger.info(opts) + train_svm(opts) + + +if __name__ == '__main__': + main() diff --git a/benchmarks/svm_tools/train_svm_kfold_parallel.py b/benchmarks/svm_tools/train_svm_kfold_parallel.py new file mode 100644 index 00000000..1ffbcb8b --- /dev/null +++ b/benchmarks/svm_tools/train_svm_kfold_parallel.py @@ -0,0 +1,151 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +SVM training using 3-fold cross-validation. + +Relevant transfer tasks: Image Classification VOC07 and COCO2014. +""" + +from __future__ import division +from __future__ import absolute_import +from __future__ import unicode_literals +from __future__ import print_function + +import multiprocessing as mp +import tqdm +import argparse +import logging +import numpy as np +import os +import pickle +import sys +from sklearn.svm import LinearSVC +from sklearn.model_selection import cross_val_score + +import svm_helper + +import pdb + + +def task(cls, cost, opts, features, targets): + out_file, ap_out_file = svm_helper.get_svm_train_output_files( + cls, cost, opts.output_path) + if not (os.path.exists(out_file) and os.path.exists(ap_out_file)): + clf = LinearSVC( + C=cost, + class_weight={ + 1: 2, + -1: 1 + }, + intercept_scaling=1.0, + verbose=0, + penalty='l2', + loss='squared_hinge', + tol=0.0001, + dual=True, + max_iter=2000, + ) + cls_labels = targets[:, cls].astype(dtype=np.int32, copy=True) + cls_labels[np.where(cls_labels == 0)] = -1 + ap_scores = cross_val_score( + clf, features, cls_labels, cv=3, scoring='average_precision') + clf.fit(features, cls_labels) + np.save(ap_out_file, np.array([ap_scores.mean()])) + with open(out_file, 'wb') as fwrite: + pickle.dump(clf, fwrite) + return 0 + + +def mp_helper(args): + return task(*args) + + +def train_svm(opts): + assert os.path.exists(opts.data_file), "Data file not found. Abort!" + if not os.path.exists(opts.output_path): + os.makedirs(opts.output_path) + + features, targets = svm_helper.load_input_data(opts.data_file, + opts.targets_data_file) + # normalize the features: N x 9216 (example shape) + features = svm_helper.normalize_features(features) + + # parse the cost values for training the SVM on + costs_list = svm_helper.parse_cost_list(opts.costs_list) + + # classes for which SVM training should be done + if opts.cls_list: + cls_list = [int(cls) for cls in opts.cls_list.split(",")] + else: + num_classes = targets.shape[1] + cls_list = range(num_classes) + + num_task = len(cls_list) * len(costs_list) + args_cls = [] + args_cost = [] + for cls in cls_list: + for cost in costs_list: + args_cls.append(cls) + args_cost.append(cost) + args_opts = [opts] * num_task + args_features = [features] * num_task + args_targets = [targets] * num_task + + pool = mp.Pool(mp.cpu_count()) + for _ in tqdm.tqdm( + pool.imap_unordered( + mp_helper, + zip(args_cls, args_cost, args_opts, args_features, + args_targets)), + total=num_task): + pass + + +def main(): + parser = argparse.ArgumentParser(description='SVM model training') + parser.add_argument( + '--data_file', + type=str, + default=None, + help="Numpy file containing image features") + parser.add_argument( + '--targets_data_file', + type=str, + default=None, + help="Numpy file containing image labels") + parser.add_argument( + '--output_path', + type=str, + default=None, + help="path where to save the trained SVM models") + parser.add_argument( + '--costs_list', + type=str, + default="0.01,0.1", + help="comma separated string containing list of costs") + parser.add_argument( + '--random_seed', + type=int, + default=100, + help="random seed for SVM classifier training") + + parser.add_argument( + '--cls_list', + type=str, + default=None, + help="comma separated string list of classes to train") + if len(sys.argv) == 1: + parser.print_help() + sys.exit(1) + + opts = parser.parse_args() + train_svm(opts) + + +if __name__ == '__main__': + main() diff --git a/benchmarks/svm_tools/train_svm_low_shot.py b/benchmarks/svm_tools/train_svm_low_shot.py new file mode 100644 index 00000000..b5a0fbb2 --- /dev/null +++ b/benchmarks/svm_tools/train_svm_low_shot.py @@ -0,0 +1,144 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +Low Shot SVM training. + +Relevant transfer tasks: Low-shot Image Classification VOC07 and Places205 low +shot samples. +""" + +from __future__ import division +from __future__ import absolute_import +from __future__ import unicode_literals +from __future__ import print_function + +import argparse +import logging +import numpy as np +import os +import pickle +import sys +from sklearn.svm import LinearSVC +from tqdm import tqdm + +import svm_helper + +import time + +# create the logger +FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s' +logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +def train_svm_low_shot(opts): + assert os.path.exists(opts.data_file), "Data file not found. Abort!" + if not os.path.exists(opts.output_path): + os.makedirs(opts.output_path) + + features, targets = svm_helper.load_input_data(opts.data_file, + opts.targets_data_file) + # normalize the features: N x 9216 (example shape) + features = svm_helper.normalize_features(features) + + # parse the cost values for training the SVM on + costs_list = svm_helper.parse_cost_list(opts.costs_list) + #logger.info('Training SVM for costs: {}'.format(costs_list)) + + # classes for which SVM testing should be done + num_classes, cls_list = svm_helper.get_low_shot_svm_classes( + targets, opts.dataset) + + for cls in tqdm(cls_list): + for cost_idx in range(len(costs_list)): + start = time.time() + cost = costs_list[cost_idx] + suffix = '_'.join( + opts.targets_data_file.split('/')[-1].split('.')[0].split('_') + [-2:]) + out_file = svm_helper.get_low_shot_output_file( + opts, cls, cost, suffix) + if os.path.exists(out_file): + logger.info('SVM model exists: {}'.format(out_file)) + else: + #logger.info('SVM model not found: {}'.format(out_file)) + #logger.info('Training model with the cost: {}'.format(cost)) + clf = LinearSVC( + C=cost, + class_weight={ + 1: 2, + -1: 1 + }, + intercept_scaling=1.0, + verbose=0, + penalty='l2', + loss='squared_hinge', + tol=0.0001, + dual=True, + max_iter=2000, + ) + train_feats, train_cls_labels = svm_helper.get_cls_feats_labels( + cls, features, targets, opts.dataset) + #num_positives = len(np.where(train_cls_labels == 1)[0]) + #num_negatives = len(np.where(train_cls_labels == -1)[0]) + + #logger.info('cls: {} has +ve: {} -ve: {} ratio: {}'.format( + # cls, num_positives, num_negatives, + # float(num_positives) / num_negatives) + #) + #logger.info('features: {} cls_labels: {}'.format( + # train_feats.shape, train_cls_labels.shape)) + clf.fit(train_feats, train_cls_labels) + #logger.info('Saving SVM model to: {}'.format(out_file)) + with open(out_file, 'wb') as fwrite: + pickle.dump(clf, fwrite) + #print("time: {:.4g} s".format(time.time() - start)) + #logger.info('All done!') + + +def main(): + parser = argparse.ArgumentParser(description='Low-shot SVM model training') + parser.add_argument( + '--data_file', + type=str, + default=None, + help="Numpy file containing image features") + parser.add_argument( + '--targets_data_file', + type=str, + default=None, + help="Numpy file containing image labels") + parser.add_argument( + '--costs_list', + type=str, + default="0.01,0.1", + help="comma separated string containing list of costs") + parser.add_argument( + '--output_path', + type=str, + default=None, + help="path where to save the trained SVM models") + parser.add_argument( + '--random_seed', + type=int, + default=100, + help="random seed for SVM classifier training") + parser.add_argument( + '--dataset', type=str, default="voc", help='voc | places') + if len(sys.argv) == 1: + parser.print_help() + sys.exit(1) + + opts = parser.parse_args() + + #logger.info(opts) + train_svm_low_shot(opts) + + +if __name__ == '__main__': + main() diff --git a/benchmarks/svm_tools/train_svm_low_shot_parallel.py b/benchmarks/svm_tools/train_svm_low_shot_parallel.py new file mode 100644 index 00000000..f3a0843d --- /dev/null +++ b/benchmarks/svm_tools/train_svm_low_shot_parallel.py @@ -0,0 +1,145 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +Low Shot SVM training. + +Relevant transfer tasks: Low-shot Image Classification VOC07 and Places205 low +shot samples. +""" + +from __future__ import division +from __future__ import absolute_import +from __future__ import unicode_literals +from __future__ import print_function + +import multiprocessing as mp +import tqdm +import argparse +import logging +import numpy as np +import os +import pickle +import sys +from sklearn.svm import LinearSVC + +import svm_helper + +import pdb + + +def task(cls, cost, opts, features, targets): + suffix = '_'.join( + opts.targets_data_file.split('/')[-1].split('.')[0].split('_')[-2:]) + out_file = svm_helper.get_low_shot_output_file(opts, cls, cost, suffix) + if not os.path.exists(out_file): + clf = LinearSVC( + C=cost, + class_weight={ + 1: 2, + -1: 1 + }, + intercept_scaling=1.0, + verbose=0, + penalty='l2', + loss='squared_hinge', + tol=0.0001, + dual=True, + max_iter=2000, + ) + train_feats, train_cls_labels = svm_helper.get_cls_feats_labels( + cls, features, targets, opts.dataset) + clf.fit(train_feats, train_cls_labels) + #cls_labels = targets[:, cls].astype(dtype=np.int32, copy=True) + #cls_labels[np.where(cls_labels == 0)] = -1 + #clf.fit(features, cls_labels) + with open(out_file, 'wb') as fwrite: + pickle.dump(clf, fwrite) + return 0 + + +def mp_helper(args): + return task(*args) + + +def train_svm_low_shot(opts): + assert os.path.exists(opts.data_file), "Data file not found. Abort!" + if not os.path.exists(opts.output_path): + os.makedirs(opts.output_path) + + features, targets = svm_helper.load_input_data(opts.data_file, + opts.targets_data_file) + # normalize the features: N x 9216 (example shape) + features = svm_helper.normalize_features(features) + + # parse the cost values for training the SVM on + costs_list = svm_helper.parse_cost_list(opts.costs_list) + + # classes for which SVM testing should be done + num_classes, cls_list = svm_helper.get_low_shot_svm_classes( + targets, opts.dataset) + + num_task = len(cls_list) * len(costs_list) + args_cls = [] + args_cost = [] + for cls in cls_list: + for cost in costs_list: + args_cls.append(cls) + args_cost.append(cost) + args_opts = [opts] * num_task + args_features = [features] * num_task + args_targets = [targets] * num_task + + pool = mp.Pool(mp.cpu_count()) + for _ in tqdm.tqdm( + pool.imap_unordered( + mp_helper, + zip(args_cls, args_cost, args_opts, args_features, + args_targets)), + total=num_task): + pass + + +def main(): + parser = argparse.ArgumentParser(description='Low-shot SVM model training') + parser.add_argument( + '--data_file', + type=str, + default=None, + help="Numpy file containing image features") + parser.add_argument( + '--targets_data_file', + type=str, + default=None, + help="Numpy file containing image labels") + parser.add_argument( + '--costs_list', + type=str, + default="0.01,0.1", + help="comma separated string containing list of costs") + parser.add_argument( + '--output_path', + type=str, + default=None, + help="path where to save the trained SVM models") + parser.add_argument( + '--random_seed', + type=int, + default=100, + help="random seed for SVM classifier training") + parser.add_argument( + '--dataset', type=str, default="voc", help='voc | places') + if len(sys.argv) == 1: + parser.print_help() + sys.exit(1) + + opts = parser.parse_args() + train_svm_low_shot(opts) + + +if __name__ == '__main__': + main() diff --git a/configs/base.py b/configs/base.py new file mode 100644 index 00000000..f0695d52 --- /dev/null +++ b/configs/base.py @@ -0,0 +1,18 @@ +train_cfg = {} +test_cfg = {} +optimizer_config = dict() # grad_clip, coalesce, bucket_size_mb +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +dist_params = dict(backend='nccl') +cudnn_benchmark = True +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/classification/cifar10/r50.py b/configs/classification/cifar10/r50.py new file mode 100644 index 00000000..ff2fb8a9 --- /dev/null +++ b/configs/classification/cifar10/r50.py @@ -0,0 +1,59 @@ +_base_ = '../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + backbone=dict( + type='ResNet', + depth=50, + out_indices=[4], # 4: stage-4 + norm_cfg=dict(type='BN')), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, num_classes=10)) +# dataset settings +data_source_cfg = dict(type='Cifar10', root='data/cifar/') +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]) +train_pipeline = [ + dict(type='RandomCrop', size=32, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=128, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_source=dict(split='train', **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict(split='test', **data_source_cfg), + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_source=dict(split='test', **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, + imgs_per_gpu=128, + workers_per_gpu=8, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005) +# learning policy +lr_config = dict(policy='step', step=[150, 250]) +checkpoint_config = dict(interval=50) +# runtime settings +total_epochs = 350 diff --git a/configs/classification/imagnet/r50.py b/configs/classification/imagnet/r50.py new file mode 100644 index 00000000..6425dbf7 --- /dev/null +++ b/configs/classification/imagnet/r50.py @@ -0,0 +1,68 @@ +_base_ = '../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + backbone=dict( + type='ResNet', + depth=50, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='SyncBN')), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, + num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train_labeled.txt' +data_train_root = 'data/imagenet/train' +data_test_list = 'data/imagenet/meta/val_labeled.txt' +data_test_root = 'data/imagenet/val' +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=32, # total 256 + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, + imgs_per_gpu=32, + workers_per_gpu=2, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) +# learning policy +lr_config = dict(policy='step', step=[30, 60, 90]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 90 diff --git a/configs/linear_classification/imagenet/r50_multihead.py b/configs/linear_classification/imagenet/r50_multihead.py new file mode 100644 index 00000000..05b8f168 --- /dev/null +++ b/configs/linear_classification/imagenet/r50_multihead.py @@ -0,0 +1,89 @@ +_base_ = '../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + frozen_backbone=True, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[0, 1, 2, 3, 4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN')), + head=dict( + type='MultiClsHead', + pool_type='specified', + in_indices=[0, 1, 2, 3, 4], + with_last_layer_unpool=True, + backbone='resnet50', + norm_cfg=dict(type='BN', momentum=0.1, affine=False), + num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train_labeled.txt' +data_train_root = 'data/imagenet/train' +data_test_list = 'data/imagenet/meta/val_labeled.txt' +data_test_root = 'data/imagenet/val' +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.), + dict(type='ToTensor'), + dict(type='Lighting'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=256, # total 256 + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, + imgs_per_gpu=128, + workers_per_gpu=4, + eval_param=dict(topk=(1, ))) +] +# optimizer +optimizer = dict( + type='SGD', + lr=0.01, + momentum=0.9, + weight_decay=0.0001, + paramwise_options=dict(norm_decay_mult=0.), + nesterov=True) +# learning policy +lr_config = dict(policy='step', step=[30, 60, 90]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 90 diff --git a/configs/linear_classification/places205/r50_multihead.py b/configs/linear_classification/places205/r50_multihead.py new file mode 100644 index 00000000..135858d0 --- /dev/null +++ b/configs/linear_classification/places205/r50_multihead.py @@ -0,0 +1,89 @@ +_base_ = '../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + frozen_backbone=True, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[0, 1, 2, 3, 4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN')), + head=dict( + type='MultiClsHead', + pool_type='specified', + in_indices=[0, 1, 2, 3, 4], + with_last_layer_unpool=True, + backbone='resnet50', + norm_cfg=dict(type='BN', momentum=0.1, affine=False), + num_classes=205)) +# dataset settings +data_source_cfg = dict( + type='Places205', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/places205/meta/train_labeled.txt' +data_train_root = 'data/places205/train' +data_test_list = 'data/places205/meta/val_labeled.txt' +data_test_root = 'data/places205/val' +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.), + dict(type='ToTensor'), + dict(type='Lighting'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=256, # total 256 + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, + imgs_per_gpu=128, + workers_per_gpu=4, + eval_param=dict(topk=(1, ))) +] +# optimizer +optimizer = dict( + type='SGD', + lr=0.01, + momentum=0.9, + weight_decay=0.0001, + paramwise_options=dict(norm_decay_mult=0.), + nesterov=True) +# learning policy +lr_config = dict(policy='step', step=[30, 60, 90]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 90 diff --git a/configs/selfsup/deepcluster/r50.py b/configs/selfsup/deepcluster/r50.py new file mode 100644 index 00000000..63d5f301 --- /dev/null +++ b/configs/selfsup/deepcluster/r50.py @@ -0,0 +1,88 @@ +_base_ = '../../base.py' +# model settings +num_classes = 10000 +model = dict( + type='DeepCluster', + pretrained=None, + with_sobel=True, + backbone=dict( + type='ResNet', + depth=50, + in_channels=2, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN')), + neck=dict(type='AvgPoolNeck'), + head=dict( + type='ClsHead', + with_avg_pool=False, # already has avgpool in the neck + in_channels=2048, + num_classes=num_classes)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=True, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train.txt' +data_train_root = 'data/imagenet/train' +dataset_type = 'DeepClusterDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict(type='RandomRotation', degrees=2), + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=1.0, + hue=0.5), + dict(type='RandomGrayscale', p=0.2), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +extract_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=64, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='DeepClusterHook', + extractor=dict( + imgs_per_gpu=128, + workers_per_gpu=8, + dataset=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, + root=data_train_root, + **data_source_cfg), + pipeline=extract_pipeline)), + clustering=dict(type='Kmeans', k=num_classes, pca_dim=256), + unif_sampling=True, + reweight=False, + reweight_pow=0.5, + initial=True, # call initially + interval=1) +] +# optimizer +optimizer = dict( + type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00001, + nesterov=False, + paramwise_options={'\Ahead.': dict(momentum=0.)}) +# learning policy +lr_config = dict(policy='step', step=[400]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 480 diff --git a/configs/selfsup/moco/r50_v1.py b/configs/selfsup/moco/r50_v1.py new file mode 100644 index 00000000..84de7c88 --- /dev/null +++ b/configs/selfsup/moco/r50_v1.py @@ -0,0 +1,59 @@ +_base_ = '../../base.py' +# model settings +model = dict( + type='MOCO', + pretrained=None, + queue_len=65536, + feat_dim=128, + momentum=0.999, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN')), + neck=dict( + type='LinearNeck', + in_channels=2048, + out_channels=128, + with_avg_pool=True), + head=dict(type='ContrastiveHead', temperature=0.07)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train.txt' +data_train_root = 'data/imagenet/train' +dataset_type = 'ContrastiveDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224, scale=(0.2, 1.)), + dict(type='RandomGrayscale', p=0.2), + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=32, # total 32*8=256 + workers_per_gpu=4, + drop_last=True, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.03, weight_decay=0.0001, momentum=0.9) +# learning policy +lr_config = dict(policy='step', step=[120, 160]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 200 diff --git a/configs/selfsup/moco/r50_v2.py b/configs/selfsup/moco/r50_v2.py new file mode 100644 index 00000000..8c2bd63f --- /dev/null +++ b/configs/selfsup/moco/r50_v2.py @@ -0,0 +1,75 @@ +_base_ = '../../base.py' +# model settings +model = dict( + type='MOCO', + pretrained=None, + queue_len=65536, + feat_dim=128, + momentum=0.999, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN')), + neck=dict( + type='NonLinearNeckV1', + in_channels=2048, + hid_channels=2048, + out_channels=128, + with_avg_pool=True), + head=dict(type='ContrastiveHead', temperature=0.2)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train.txt' +data_train_root = 'data/imagenet/train' +dataset_type = 'ContrastiveDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224, scale=(0.2, 1.)), + dict( + type='RandomAppliedTrans', + transforms=[ + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.4) + ], + p=0.8), + dict(type='RandomGrayscale', p=0.2), + dict( + type='RandomAppliedTrans', + transforms=[ + dict( + type='GaussianBlur', + sigma_min=0.1, + sigma_max=2.0, + kernel_size=23) + ], + p=0.5), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=32, # total 32*8=256 + workers_per_gpu=4, + drop_last=True, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.03, weight_decay=0.0001, momentum=0.9) +# learning policy +lr_config = dict(policy='CosineAnealing', min_lr=0.) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 200 diff --git a/configs/selfsup/npid/r50.py b/configs/selfsup/npid/r50.py new file mode 100644 index 00000000..f1b17088 --- /dev/null +++ b/configs/selfsup/npid/r50.py @@ -0,0 +1,64 @@ +_base_ = '../../base.py' +# model settings +model = dict( + type='NPID', + pretrained=None, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='SyncBN')), + neck=dict( + type='LinearNeck', + in_channels=2048, + out_channels=128, + with_avg_pool=True), + head=dict(type='ContrastiveHead', temperature=0.07), + memory_bank=dict( + type='SimpleMemory', length=1281167, feat_dim=128, momentum=0.5)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train.txt' +data_train_root = 'data/imagenet/train' +dataset_type = 'NPIDDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224, scale=(0.2, 1.)), + dict(type='RandomGrayscale', p=0.2), + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=32, # total 32*8 + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.03, weight_decay=0.0001, momentum=0.9, nesterov=False) +# learning policy +lr_config = dict(policy='step', step=[120, 160]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 200 diff --git a/configs/selfsup/rotation_pred/r50.py b/configs/selfsup/rotation_pred/r50.py new file mode 100644 index 00000000..cb34e48c --- /dev/null +++ b/configs/selfsup/rotation_pred/r50.py @@ -0,0 +1,64 @@ +_base_ = '../../base.py' +# model settings +model = dict( + type='RotationPred', + pretrained=None, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='SyncBN')), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, num_classes=4)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train.txt' +data_train_root = 'data/imagenet/train' +data_test_list = 'data/imagenet/meta/val.txt' +data_test_root = 'data/imagenet/val' +dataset_type = 'RotationPredDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=16, # (16*4) x 8 = 512 + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.2, momentum=0.9, weight_decay=0.0001, nesterov=False) +# learning policy +lr_config = dict( + policy='step', + step=[30, 50], + warmup='linear', + warmup_iters=5, # 5 ep + warmup_ratio=0.1, + warmup_by_epoch=True) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 70 diff --git a/configs/selfsup/simclr/r50_bs256.py b/configs/selfsup/simclr/r50_bs256.py new file mode 100644 index 00000000..cb087730 --- /dev/null +++ b/configs/selfsup/simclr/r50_bs256.py @@ -0,0 +1,77 @@ +_base_ = '../../base.py' +# model settings +model = dict( + type='SimCLR', + pretrained=None, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='SyncBN')), + neck=dict( + type='NonLinearNeckV1', + in_channels=2048, + hid_channels=2048, + out_channels=128, + with_avg_pool=True), + head=dict(type='ContrastiveHead', temperature=0.1)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=True, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train.txt' +data_train_root = 'data/imagenet/train' +dataset_type = 'ContrastiveDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict( + type='RandomAppliedTrans', + transforms=[ + dict( + type='ColorJitter', + brightness=0.8, + contrast=0.8, + saturation=0.8, + hue=0.2) + ], + p=0.8), + dict(type='RandomGrayscale', p=0.2), + dict( + type='RandomAppliedTrans', + transforms=[ + dict( + type='GaussianBlur', + sigma_min=0.1, + sigma_max=2.0, + kernel_size=23) + ], + p=0.5), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=32, # total 32*8 + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline)) +# optimizer +optimizer = dict(type='LARS', lr=0.3, weight_decay=0.000001, momentum=0.9) +# learning policy +lr_config = dict( + policy='CosineAnealing', + min_lr=0., + warmup='linear', + warmup_iters=10, + warmup_ratio=0.01, + warmup_by_epoch=True) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 200 diff --git a/configs/selfsup/simclr/r50_bs512.py b/configs/selfsup/simclr/r50_bs512.py new file mode 100644 index 00000000..110f6f63 --- /dev/null +++ b/configs/selfsup/simclr/r50_bs512.py @@ -0,0 +1,77 @@ +_base_ = '../../base.py' +# model settings +model = dict( + type='SimCLR', + pretrained=None, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='SyncBN')), + neck=dict( + type='NonLinearNeckV1', + in_channels=2048, + hid_channels=2048, + out_channels=128, + with_avg_pool=True), + head=dict(type='ContrastiveHead', temperature=0.1)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train.txt' +data_train_root = 'data/imagenet/train' +dataset_type = 'ContrastiveDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict( + type='RandomAppliedTrans', + transforms=[ + dict( + type='ColorJitter', + brightness=0.8, + contrast=0.8, + saturation=0.8, + hue=0.2) + ], + p=0.8), + dict(type='RandomGrayscale', p=0.2), + dict( + type='RandomAppliedTrans', + transforms=[ + dict( + type='GaussianBlur', + sigma_min=0.1, + sigma_max=2.0, + kernel_size=23) + ], + p=0.5), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=64, # total 64*8 + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline)) +# optimizer +optimizer = dict(type='LARS', lr=0.6, weight_decay=0.000001, momentum=0.9) +# learning policy +lr_config = dict( + policy='CosineAnealing', + min_lr=0., + warmup='linear', + warmup_iters=10, + warmup_ratio=0.01, + warmup_by_epoch=True) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 200 diff --git a/configs/semisup_classification/imagenet_10percent/r50.py b/configs/semisup_classification/imagenet_10percent/r50.py new file mode 100644 index 00000000..2313c322 --- /dev/null +++ b/configs/semisup_classification/imagenet_10percent/r50.py @@ -0,0 +1,69 @@ +_base_ = '../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + backbone=dict( + type='ResNet', + depth=50, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='SyncBN')), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, + num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=True, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train_labeled_10percent.txt' +data_train_root = 'data/imagenet/train' +data_test_list = 'data/imagenet/meta/val_labeled.txt' +data_test_root = 'data/imagenet/val' +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=32, # total 256 + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=2, + imgs_per_gpu=32, + workers_per_gpu=2, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005, + paramwise_options={'\Ahead.': dict(lr_mult=10)}) +# learning policy +lr_config = dict(policy='step', step=[18, 24], gamma=0.2) +checkpoint_config = dict(interval=2) +# runtime settings +total_epochs = 30 diff --git a/configs/semisup_classification/imagenet_1percent/r50.py b/configs/semisup_classification/imagenet_1percent/r50.py new file mode 100644 index 00000000..ab5de513 --- /dev/null +++ b/configs/semisup_classification/imagenet_1percent/r50.py @@ -0,0 +1,69 @@ +_base_ = '../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + backbone=dict( + type='ResNet', + depth=50, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='SyncBN')), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, + num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=True, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train_labeled_1percent.txt' +data_train_root = 'data/imagenet/train' +data_test_list = 'data/imagenet/meta/val_labeled.txt' +data_test_root = 'data/imagenet/val' +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=32, # total 256 + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=2, + imgs_per_gpu=32, + workers_per_gpu=2, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005, + paramwise_options={'\Ahead.': dict(lr_mult=100)}) +# learning policy +lr_config = dict(policy='step', step=[12, 16], gamma=0.2) +checkpoint_config = dict(interval=2) +# runtime settings +total_epochs = 20 diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md new file mode 100644 index 00000000..76877891 --- /dev/null +++ b/docs/CHANGELOG.md @@ -0,0 +1,2 @@ +## Changelog + diff --git a/docs/GETTING_STARTED.md b/docs/GETTING_STARTED.md new file mode 100644 index 00000000..c13a479a --- /dev/null +++ b/docs/GETTING_STARTED.md @@ -0,0 +1,192 @@ +# Getting Started + +This page provides basic tutorials about the usage of OpenSelfSup. +For installation instructions, please see [INSTALL.md](INSTALL.md). + +## Train existing methods + +**Note**: The default learning rate in config files is for 8 GPUs (except for those under `configs/linear_classification` that use 1 GPU). If using differnt number GPUs, the total batch size will change in proportion, you have to scale the learning rate following `new_lr = old_lr * new_ngpus / old_ngpus`. We recommend to use `tools/dist_train.sh` even with 1 gpu, since some methods do not support non-distributed training. + +### Train with single/multiple GPUs +```shell +# checkpoints and logs are saved in the same sub-directory as the config file under `work_dirs/` by default. +bash tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [optional arguments] +``` + +An example: +```shell +bash tools/dist_train.sh configs/selfsup/odc/r50_v1.py 8 +``` + +Optional arguments are: +- `--work_dir ${WORK_DIR}`: Override the default working directory. +- `--resume_from ${CHECKPOINT_FILE}`: Resume from a previous checkpoint file. +- `--pretrained ${PRETRAIN_WEIGHTS}`: Load pretrained weights for the backbone. + +Alternatively, if you run OpenSelfSup on a cluster managed with [slurm](https://slurm.schedmd.com/): +```shell +SRUN_ARGS="${SRUN_ARGS}" bash tools/srun_train.sh ${PARTITION} ${CONFIG_FILE} ${GPU_NUM} [optional arguments] +``` + +An example: +```shell +SRUN_ARGS="-w xx.xx.xx.xx" bash tools/srun_train.sh Dummy configs/selfsup/odc/r50_v1.py 8 +``` + +### Launch multiple jobs on a single machine + +If you launch multiple jobs on a single machine, e.g., 2 jobs of 4-GPU training on a machine with 8 GPUs, +you need to specify different ports (29500 by default) for each job to avoid communication conflict. + +If you use `dist_train.sh` to launch training jobs: +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 bash tools/dist_train.sh ${CONFIG_FILE} 4 +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 bash tools/dist_train.sh ${CONFIG_FILE} 4 +``` + +If you use launch training jobs with slurm: +```shell +GPUS_PER_NODE=4 bash tools/srun_train.sh ${PARTITION} ${CONFIG_FILE} 4 --port 29500 +GPUS_PER_NODE=4 bash tools/srun_train.sh ${PARTITION} ${CONFIG_FILE} 4 --port 29501 +``` + +## Benchmarks + +We provide several standard benchmarks to evaluate representation learning. + +### VOC07 Linear SVM & Low-shot Linear SVM + +```shell +bash benchmarks/dist_test_svm.sh ${CONFIG_FILE} ${EPOCH} ${FEAT_LIST} ${GPU_NUM} +``` +Augments: +- `${FEAT_LIST}` is a string to specify features from layer1 to layer5 to evaluate; e.g., if you want to evaluate layer5 only, then `FEAT_LIST` is `feat5`, if you want to evaluate all features, then then `FEAT_LIST` is `feat1 feat2 feat3 feat4 feat5` (separated by space). +- `$GPU_NUM` is the number of GPUs to extract features. + +### ImageNet / Places205 Linear Classification + +```shell +bash benchmarks/dist_test_cls.sh ${CONFIG_FILE} ${EPOCH} ${DATASET} [optional arguments] +``` +Augments: +- `${DATASET}` in `['imagenet', 'places205']`. +- Optional arguments include `--resume_from ${CHECKPOINT_FILE}` that resume from a previous checkpoint file. + +### VOC07+12 / COCO17 Object Detection + +1. First, extract backbone weights: + + ```shell + python tools/extract_backbone_weights.py ${CHECKPOINT} --save-path ${WEIGHT_FILE} + ``` + Arguments: + - `CHECKPOINTS`: the checkpoint file of a selfsup method named as `epoch_*.pth`. + - `WEIGHT_FILE`: the output backbone weights file, e.g., `odc_v1.pth`. + +2. Next, run detection. For more details to setup the environments for detection, please refer [here](benchmarks/detection/README.md). +```shell +conda activate detectron2 +cd benchmarks/detection +python convert-pretrain-to-detectron2.py ${WEIGHT_FILE} ${OUTPUT_FILE} # must use .pkl as the output extension. +bash run.sh ${DET_CFG} ${OUTPUT_FILE} +``` +Arguments: +- `DET_CFG`: the detectron2 config file, usually we use `configs/pascal_voc_R_50_C4_24k_moco.yaml`. +- `OUTPUT_FILE`: converted backbone weights file, e.g., `odc_v1.pkl`. + +**Note**: +- This benchmark must use 8 GPUs as the default setting from MoCo. +- Please report the mean of 5 trials in your offical paper, according to MoCo. +- DeepCluster that uses Sobel layer is not supported by detectron2. + +### Publish a model + +1. Extract the backbone weights as mentioned before. You don't have to extract it again if you've already done it in the benchmark step. + +```shell +python tools/extract_backbone_weights.py ${CHECKPOINT} --save-path ${WEIGHT_FILE} +``` + +2. Compute the hash of the weight file and append the hash id to the filename. + +```shell +python tools/publish_model.py ${WEIGHT_FILE} +``` + +## How-to + +### Use a new dataset + +1. Write a data source file under `openselfsup/datasets/data_sources/`. You may refer to the existing ones. + +2. Create new config files for your experiments. + +### Design your own methods + +#### What you need to do + + 1. Create a dataset file under `openselfsup/datasets/` (better using existing ones); + 2. Create a model file under `openselfsup/models/`. The model typically contains: + i) backbone (required): images to deep features from differet depth of layers. + ii) neck (optional): deep features to compact feature vectors. + iii) head (optional): define loss functions. + iv) memory_bank (optional): define memory banks. + 3. Create a config file under `configs/` and setup the configs; + 4. Create a hook file under `openselfsup/hooks/` if your method requires additional operations before run, every several iterations, every several epoch, or after run. + +You may refer to existing modules under respective folders. + +#### Features may facilitate your implementation + +* Decoupled data source and dataset. + +Since dataset is correlated to a specific task while data source is general, we decouple data source and dataset in OpenSelfSup. + +```python +data = dict( + train=dict(type='ContrastiveDataset', + data_source=dict(type='ImageNet', list_file='xx', root='xx'), + pipeline=train_pipeline), + val=dict(...), +) +``` + +* Configure data augmentations in the config file. + +The augmentations are the same as `torchvision.transforms`. `torchvision.transforms.RandomAppy` corresponds to `RandomAppliedTrans`. `Lighting` and `GaussianBlur` is additionally implemented. + +```python +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomAppliedTrans', + transforms=[ + dict(type='GaussianBlur', sigma_min=0.1, sigma_max=2.0, kernel_size=23)], + p=0.5), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg) +] +``` + +* Parameter-wise optimization parameters. + +You may specify optimization paramters including lr, momentum and weight_decay for a certain group of paramters in the config file with `paramwise_options`. `paramwise_options` is a dict whose key is regular expressions and value is options. Options include 6 fields: lr, lr_mult, momentum, momentum_mult, weight_decay, weight_decay_mult. + +```python +paramwise_options = { + '(bn|gn)(\d+)?.(weight|bias)': dict(weight_decay_mult=0.1), + '\Ahead.': dict(lr_mult=10, momentum=0)} +optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001, + paramwise_options=paramwise_options) +``` + +* Configure custom hooks in the config file. + +The hooks will be called in order. For hook design, please refer to [odc_hook.py](openselfsup/hooks/odc_hook.py) as an example. + +```python +custom_hooks = [ + dict(type='DeepClusterHook', **kwargs1), + dict(type='ODCHook', **kwargs2), +] +``` diff --git a/docs/INSTALL.md b/docs/INSTALL.md new file mode 100644 index 00000000..f604b733 --- /dev/null +++ b/docs/INSTALL.md @@ -0,0 +1,146 @@ +## Installation + +### Requirements + +- Linux (Windows is not officially supported) +- Python 3.5+ +- PyTorch 1.1 or higher +- CUDA 9.0 or higher +- NCCL 2 +- GCC 4.9 or higher +- [mmcv](https://github.com/open-mmlab/mmcv) + +We have tested the following versions of OS and softwares: + +- OS: Ubuntu 16.04/18.04 and CentOS 7.2 +- CUDA: 9.0/9.2/10.0/10.1 +- NCCL: 2.1.15/2.2.13/2.3.7/2.4.2 +- GCC(G++): 4.9/5.3/5.4/7.3 + +### Install openselfsup + +a. Create a conda virtual environment and activate it. + +```shell +conda create -n open-mmlab python=3.7 -y +conda activate open-mmlab +``` + +b. Install PyTorch and torchvision following the [official instructions](https://pytorch.org/), e.g., + +```shell +conda install pytorch torchvision -c pytorch +``` + +c. Install other third-party libraries. + +```shell +conda install faiss-gpu cudatoolkit=10.0 -c pytorch # optional for DeepCluster and ODC, assuming CUDA=10.0 +``` + +d. Clone the openselfsup repository. + +```shell +git clone https://github.com/open-mmlab/openselfsup.git +cd openselfsup +``` + +e. Install. + +```shell +pip install -v -e . # or "python setup.py develop" +``` + +Note: + +1. The git commit id will be written to the version number with step d, e.g. 0.6.0+2e7045c. The version will also be saved in trained models. + +2. Following the above instructions, openselfsup is installed on `dev` mode, any local modifications made to the code will take effect without the need to reinstall it (unless you submit some commits and want to update the version number). + +3. If you would like to use `opencv-python-headless` instead of `opencv-python`, +you can install it before installing MMCV. + +4. Some dependencies are optional. Simply running `pip install -v -e .` will only install the minimum runtime requirements. To use optional dependencies like `albumentations` and `imagecorruptions` either install them manually with `pip install -r requirements/optional.txt` or specify desired extras when calling `pip` (e.g. `pip install -v -e .[optional]`). Valid keys for the extras field are: `all`, `tests`, `build`, and `optional`. + + +### Prepare datasets + +It is recommended to symlink your dataset root (assuming $YOUR_DATA_ROOT) to `$OPENSELFSUP/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +#### Prepare PASCAL VOC + +Assuming that you usually store datasets in `$YOUR_DATA_ROOT` (e.g., for me, `/home/xhzhan/data/`). +This script will automatically download PASCAL VOC 2007 into `$YOUR_DATA_ROOT`, prepare the required files, create a folder `data` under `$OPENSELFSUP` and make a symlink `VOCdevkit`. + +```shell +cd $OPENSELFSUP +bash tools/prepare_data/prepare_voc07_cls.sh $YOUR_DATA_ROOT +``` + +#### Prepare ImageNet and Places205 + +Taking ImageNet for example,y ou need to 1) download ImageNet; 2) create list files under $IAMGENET/meta/, `train.txt` contains an image file name in each line, `train_labeled.txt` contains `filename[space]label\n` in each line; 3) create a symlink under `$OPENSELFSUP/data/`. + +At last, the folder looks like: + +``` +OpenSelfSup +├── openselfsup +├── benchmarks +├── configs +├── data +│ ├── VOCdevkit +│ │ ├── VOC2007 +│ │ ├── VOC2012 +│ ├── imagenet +│ │ ├── meta +│ │ | ├── train.txt ("filename\n" in each line) +│ │ | ├── train_labeled.txt ("filename[space]label\n" in each line) +│ │ | ├── val.txt +│ │ | ├── val_labeled.txt +│ │ ├── train +│ │ ├── val +│ ├── places +│ │ ├── meta +│ │ | ├── train.txt +│ │ | ├── train_labeled.txt +│ │ | ├── val.txt +│ │ | ├── val_labeled.txt +│ │ ├── train +│ │ ├── val +``` + +### A from-scratch setup script + +Here is a full script for setting up openselfsup with conda and link the dataset path. + +```shell +conda create -n open-mmlab python=3.7 -y +conda activate open-mmlab + +conda install -c pytorch pytorch torchvision -y +git clone https://github.com/open-mmlab/OpenSelfSup.git +cd OpenSelfSup +pip install -v -e . + +bash tools/prepare_data/prepare_voc07_cls.sh $YOUR_DATA_ROOT +ln -s $IMAGENET_ROOT data +ln -s $PLACES_ROOT data +``` + +### Using multiple OpenSelfSup versions + +If there are more than one openselfsup on your machine, and you want to use them alternatively, the recommended way is to create multiple conda environments and use different environments for different versions. + +Another way is to insert the following code to the main scripts (`train.py`, `test.py` or any other scripts you run) +```python +import os.path as osp +import sys +sys.path.insert(0, osp.join(osp.dirname(osp.abspath(__file__)), '../')) +``` + +Or run the following command in the terminal of corresponding folder to temporally use the current one. +```shell +export PYTHONPATH=`pwd`:$PYTHONPATH +``` diff --git a/docs/MODEL_ZOO.md b/docs/MODEL_ZOO.md new file mode 100644 index 00000000..1cb01e52 --- /dev/null +++ b/docs/MODEL_ZOO.md @@ -0,0 +1 @@ +#Model Zoo diff --git a/docs/relation.jpg b/docs/relation.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db66ab837ba76ed11c7580c767ee6bb5620d4e81 GIT binary patch literal 326204 zcmeGDXIN8R)Gi8#CQU%3NRuc=DG`+}ARxU&MVgT=JtP8BqzVW~jY_YP-U%J0 zN|#Qk(i3VRg_Gxb-@W&Dz1MfHv(Ndn|GWd%VqRgcnKjlLbIdXBanFlC7fXOEIvUy< z0167gKR*D#1sd>0-Ot$p0MOF|+$MiO2e?FW2|z_|Q2;2(r>pUb0nq&WIRIcv z!TUekwiKfOtAB(ufciiCkjMN-{`r=FPXGCr>MOr$#Y%C}1ptz7frjE=`S&vNiGq@fn&uKM9X$ghxkJqr03`(#6(uzl z4b4AaLlH#&KY*H*=IYJ6YM0myU(yPAu*(D|e4`V5RMF01G=dSj_sTPbo`Lfk7dMaa zEfG<%+p==<3X1n1Jbt3Cp{b>w~BUp!Fxs+j#r)|44gu;XkqL>s{O0k|DIwY|DQDbpNjo2 zy=DQA0hIp=6(uDV4HXp?%_SPLT%x=5kI*sD{VNRrDVP5h=6{6cf64`U5DM}b)YR0p zjU(3Zj*(_r&W&oF|$Pc8jJfA|)vi;@2`HiPVg&yFo$0J`=g5Lb_w#M?;1Eh8>c@50ix%JM|ZZq}#yvTlb_ zVUQD1gclcpkR5CQP|qNQKiAGe+v>f@bb=|?w5%19NK57x~rO-nZ7L^g@arGgf==*Ckz!AfDw9| zGN`GfW$EbN1>oK;CbsdJTO}c$m6+>IbnLhQP$}Cjv#{7|7i`I!YG{c>?k37FDe zzbn4thP!1z@Pj_S0F*a^Nugfgz+`k$D{uFNhjpJvRc(p+u_~2Z*SNLrhuqvVYhmvo z{-nRK0z^Hau7=8h8Y|+#l^PcD2HwkhOv)8Eg8^7@}S<7YBdw!+?84Ip0=rjmD z4ei9t4lYYyp&Qk9kxVmE3fkh5IOGfSWcDep0CvaSJHY94_f9>Z=g})#Om%U|IjYNw zFY3msQic|mm}qy*&29#nl}$kqc81{8b6Ns>XXmNqXLrAhwmKf~=pRffH3#!rQ3hbi zZy)%)(c4FE?W8-Hsrcsm_REWOi}`uETXeSED?e$eODO4REX=R;J-iV@H^|+4dF2Pa zvW*D)Nr2pG{_ocWgZBfx<~qfCt~CaG&iC@ZH#aT!_JAm$oR}?Ke6MfMf>>s+8@2Ab zwYAnIoo;>&_XmlO;$$>5V07abfTjHOCuU)?ZU6%>Coc4!97o8bmbk8vXDqdc$NQG{ z%3@#azU|(r&dgbS(t0v6a0rv=|8eicueeWUw*>B~-@B#Gfo17nnXNqVmmaU>2S4!-eT=Xfm8vl;uj%NK zOuObd-Sv8JK@)t(jz4+FmJ@4_jA~}e_1l`E4Vg6`D?1kY_*3em2gAUXgq|f{T0V!U zG#kHX;kOJ=Z_OCA)G=7(^LuZE#}IKsGdtP67V1z<%;Cv4&SnMT0ymu4DQ{MIiN>2W zrUd3|D-Rw>K=_1Kqt8kO84k4+B!(LI#12QrCSCK!u)Ou_XnKGP)ba4T~za9XZf$vuA`&8fu7$r z9W*=_?^3-f&O{i|2K-gMO3;Z|Ms?1zMEcBFpX-(93Ol{5x0m^{?4!5g2DodaS<4c@ zZ(;LtQnK^ZWm4pdgku-^D-lOEttyMtoav_S~Ed-_33Co3?oJk$>bz zQf4b;iX;KG!(3~3V@5_2e4#GOX#+l(C$09~UGHC)8m>B06$y#12)%Y(Sbn3JsbROX z0GuA2or`NxZpx;;Y})zqVBe`ibu=A?l6-J>qM{K+sQZ8NUbYjM(34%94OYpz8dF>4 z=z(^zB&l)KH`u34TJY|D&GJchPzL<`nhDTWEb4Zba?!{rGYVa^Nr#8H#MAetsRtCt zKG;s%EQVm61$VbZkw6>@JBFTWXW~(@>Qz|3@9G68cq%>$Lzws5AeT)RullvrcRkujO zGvUt}#C7W*r;99Uy#$-M09QI{Tw8Dd8?RRyg;O~%Iv7o>)t4>C7Rv6#s zh)!0a1>vdPj}~Qb`%0fFlIROuIa*9ie*SGUOGsHWkFhtf4-fs^jIv$4H$6~gEzgXo z?bg#!g%`jRPH13W@b-tTBG89854R;#%&ivRZ&_+JH{D%ci>;qMnswN>Y03Z^TW2Sb z9o@Z=TPW7;1&aFWN|7|l8tVGbWIu=L$h`oxb^E98&SRC@;hfDt%vg6T%{UG5L*f>; z{zmSPz7i+P`_B$aWfcFaQD3DKIg;W)Uv{`A_w9I#e`S4P6nwd^W@-WE+YPk~>0VQe zFEl%ylA4Of<<#tuR*pev{ycaYR1{-dQ9LwdRyT7iOZo7A|1XK!k*X&CwJ(W~-`o8p z+6%xhP7EzE`rM3^sA>BRoVm`b%W5dKe+bg*&t2fpfcK$pAbVTvR>0knAQw>rdjGIJ*u#vFT~Bl`8)BsL?Y}Tn?_A2TwTKWjE^v zUb_I07a`DPxoG5NXZcfpu}O*7cddns%y+*f_`P;sy#K=QCdd2x)FE_8af!aWihbGr zjC#M$U`YEz73i2^$5IjA+C-gW<6Ls_C*C%SfUG6Qs2LVDle5DSiJEWCq1dh9Iccxz zSy7Q$_w!R^J&6Gd#|2_nG!aG^`>q5Viz`y4t|7hN%=?YqCSTZH2Xfo1>EU*LP*B-vs&7*|c4i|^hx}vJ#D!tu=6yy9CoZuy zg{xX7)UG!YY+*p5?uk5}@uE6z-DIoMvOue#_{XeSXfcyyo|>zl?FRNo6>9ctH^#=z zdJt8MyKpeuWxLIZEN-LI9DaxL>i8gXRF_%p zmqTInS-b<;bl zX4o|=hAp#qiw`QzP8YJb>$ZX%)%`Vl5Yar2?O+NUKfGB*hGA_@3Rlb>4zpX1baWCA zSCprshw;u(@i7?NCWyVJzTAb^I`Bi)XwvJt$_!^qk++xV3K>?b?pzPIiaPv}#w^pK zIFX~*CuLnczZ)1dbD|>Q5ER-uo_u@a`Rkm4{h1TgVu9E8x%ffb1)yE86gK+ay}NEV)JmSG?rYZIl$GK@KDpOxGlM}>Iu`)J>A&Awd2n_30H`?5 ztHj#Rw=*Tl4`o#SDoL(1nj>9N@iktJsUadLvVYdcQ+%lJ0BR~9E>!`xA9 z)!s_FKy{I*Q#~(o@WY&j)$K$u$quTnn1k6`(`y@Js}qP{%5%38YU!E6<_|ABhTKg* z_E5f-ZiEolVLoxoF}2#tYPHJN=Z&pw{C)CyyTnE^-8ax_f)oe@8>(|0Y>bDo8$P5V zAAE>-H>v7pd#1Lm z&|)^D$W8%kwHs#htafuj4s@@u?fyoACGjsViJZ=EcY{wDTFL1w3pfshd1`PbK@z?I zRKUPQ@mgfWvbj#a0@LHHZk^Mr3VdC-ToJnRX+PiJ4zGf~Ddp#>V8q-=-35SQmh6o$ z0BTHT7XVsQAR)ox!Nf3uJp*l;1&Ta|WLfOEm<^=sojj=g&RjZDlob+}yW7T>eIGfy zC0>EaG&;S@!FB<-Mxge?V106&P5Uti@g_UF@T|qv;bDVm-jhEdUO8`^7xvQ7b){}q zU2TYmt4=#qfF>6#S!wrKbn(5;kINc!0QjTrGaTk58iF34Jzso+d0{RoL3pvr=i#2v zxxpr@L#zHo>dO%q@-Kfi+Pb6W&Al*R|3`Y@hT-L5mKQ(F`{aJGu7){@9b5p!;RUIh zfzDqI!A#IM#1_(3sK~?~^lpxtLiBNyyqL0&YsE~bKql7$y3RhrgGZ6}#wRrnbSppE zqv!g$Za<3yvg0mo>sLB->N&$wJ#U|%xm^Za02W_0Ze!&sGl`j7C4|N0oJIjC!#|V%Xjzxk}2Qy7)4G z6C8peUM0#cd#l$TXBrp$$K0GyZt29 z6m$Jh_M{0mVdwr`yy?~S?D`D}Iz@Csn!(f6E5f*4Y=$||%?M6TR#R}(L`@JNSj5_g=`CXLPv=cJfj)-i%WN1@vx3Rkp-ujB8-rfY%K23>jv?L3ZQ z_9JE0#0NHNuK7(CwrYHsjw>A%qDD^|L`_60#bRGW2APf!=h@0(BHL~3{qhEiVZ*lvV=TARo zd{nM;_Fh}IPk1}>qZx@IqkveQ16BNNv9TX(^7PB`^vPQoFN&v^yGv0*nV4ny5tKnl zQt~ulzSrCnKY8biqv$)A!o{()5<>iBlz1EL5^NN48SccS>fFrjE5wtW9^NqK+R^KK zHIoT%l4h%%>GtTMA}#fD%go;1WXB|AdoyuiW}ZjeJ|RRv;BwVHegNQgQX9u68Y{ci z{=7iEVEhc)DcOyy$fn~k6WlPxByVNWne(Yh2^?!3->W~UGa?Oe#7mln}wSX zqTHgM|J>{p7hAK@e=G(Fx%0-2+i$F6k)`>*|MD>RmZ7tyUesQSe;i<#;vu(#gYxY{ z2?GF|?M7{cNOfz*VqO!14r9_%H*p43+3j}>+=aLWZrT-)a~G%$IeJjTxFQOFxhx@R zY$2wFTGy`mI@_UWi+d{c-2+{3oClA#2%=dC?2pl!U3v~I%(%u5_$9DSy_S7_nnoGKcqxv>d8g&yH&up1m=h$HSxShIt;DsOb1y4`!a6|;mSENa;zqrV)kGWg2Hjhl zSb^D;@nHFdLY~s&ld5PLkafQ!xqlIQNqO4 z@p)GX*#s8+OAuYUmNroY=6IYPA(7}?*IH(uV}SfdykQ9Df>IExn+3^X=`xtg3C7<# zx+hSTa@6mMmErg}!hoi~uhhE+eoQdJRV^8W+FpZSgFYPBK)m8p=LwV1RW7Ax?dDmn zx(um>gmRnveYG8Xpz0rcc0zIj= z0k)PJR_`5xDjrC|BLykOmtIe0FyHQ9Q&l$skIe4Z#1|mZ(yYe0+q!f+H6z_90*yAR zjd&9oiFz|Uv;B;iaZF@XufhyiVkRZ+>!pRqtiBAmRXI^A3*Jg5?vGuEb`H~bA+#Nh1b%T$Yy5JwQ#ze@v{GPKEXIO!S?-g#UCjGGiby8hCjL28 z6h~R)xZKp>R@3lXn=|cIiSVz-ZN77T3Ex!agOzugLTv}I&kILPZtINb`atzABROJw zbXmf4=QS1^lXuHe!i#pBEp z52)*VQ);8PGSsoGMxFX72BfHA${nrT+%J49H!<|H!Rd#XOwAA^$WiCJ+d4`%{@al9 zy1sOXZCydM^KJQ@U9pUxT>HbDXkH*LRR6i$fbXu+*Ze&Gt1 zGebJ`IQrDQjCzbQ!(z>=<`o-tOi4x0@1GRPmxd?~#XRIMd8>SWQ$pB1omu>N%!On; zJm+$ZE(uEvF$>5aJ&b}bG&%)ss`sb}#-Cqy+F{}}w993t-%vbzh2K@h~bfd}|zS8@TLKC60lX3gOQuo7n< zl4B-5Bx1^-FI%3WJ$*22GJOt2u^Xxf`Qv^YszX<>QzyANGYqBZZnK5A@Lr93_Ui}s ziKVF$5%2fwi#5{rcIY>p-tu&^MHOf;=UG@tZNMy&UwPEzM06N?WlRhE68#aHhTz0) zvaiq^Bh47L^65w%p7s>yKMmNy%Y&E}V}m9z_f9_J$a%N(q25w|!;SIMak-%kA5NwE z&r5DIrG41dm4K@hZHgis>r3^aw2_Fa97Ba!le0Xp&8lJMs?yr{$-7@1xqKTFZJWzx z*8`8)&BzNlH_8q90nAWSf@Ij_JXMvLd@FR5drYFTUP!s>D&^648Tv3K^!*B%1Q8us~m`WPMc%?Bjr}dj)dN z8vcQt6#fm_ZrxNd`w#j>iFUjZ9ua{(+5rc3ZfSefW<&65@b2}mupvv;_|oljy~)z_ zt^=#@vod0jd1!^}655B~wBv&DV5lSxf?4cTRlx$YT?tkNR&V4VJmnQ?zH8l8NqP37 zFuKXv(_KsH@fG!VCWCj5))a@F6}JfG{`LB70~PF>Cg0Timcx^k^rEIA@~-o zA5|j(s;=_;Rz{Ft0LFKQ4^U3MS}VYVv&QLi=-nu~iCvN(eAp!PVa5rG)4){ADE}dzXCKCovUXc*=N3@Z>z}vHzTIX? zkJ5LR-N`Mlbv;GtBJ9akIG7xH#XHc@yul4^|)Z z*47orC2s+#;BJfIC{Ds-7c>j2b0rRBj`EKBt#_xPCfiKO^yVF{z9v4G%kSCR-Y)T5 zTI&U88QA|v6mwFy98mv%GrPbk;7iSR_$Oe}^n=#0<&rFa!j<&w#!q`#<@ZY17&sukTK`|eGFr?Z%u|AT& zWw=qid{zw}T($3zyUW^Xm&|v&I7?w*9_2$uLe7zbI}G9*$QhB7h~IEW0~TkPz*Kcf zmZhZW-o?-bjo=`KfjUxK2ZaK>w3HX#6OB6 zq?2pJA^Fap;L-p@XWCaXAbfLz)8f&2tC(NwOOFdQbf}uyKnkAcXF=){V>`-0N%{B0OwOyTybZZx0rV^)nMWGoV_JbQ$?> zI7iR26V%nby6)K`Nro0i zD%yMHQthjF&s$dF8iFK$C86>aWygNNv5}@+D@e8z{VLDa8GJtL6?y}0-6@+<@c#Sy z{P=ij>Q|Twk59{?NRU%NfvyjEU?Z*vY;W@PF!nUyS}AVnFn; z;KURN$w(&nA13%wegF}H>_Oth^h!Tsl{)HjVS6@wJuJ<>SIKb7w&wCF+=Bhm8V!(Y2k{D77KYC;avGu_C8i!iobeCF87l}5L(FH$g^IgUS zw^%N<@V5f5bCwLM?jNe|QwjZ5EAKy04Iq-Euhn^|?*-t+;RPV)YO6fi)?Z%$dLz0( z=i7M~fWHcI$Gy8@g`Ih%!B&NASmNws%6QZ4WzpWPY2;_nQkqX?{>PQ=QIM}>4}wdj zP9uqoMg4Qbw@uMds~|_<5bcIV6^eanbdw%ttcb#i#S>!rFI@nTO2b6%>p=>5G4jBd zF{kfkQIV?m#$HYlY?dS0470b+)PIG!q5^qtq}~KWs03F$UitUXK!1@}$-5VTgq*g; z;|+iEEej&Y_bG}Cf(Z{IDT_BJ-&6diV3<7XvA2w8UMez5{@j3c{ner(N4V_ywl>yP zuxFx#x88GxIA_}02onh=jU#B9=~v=I7evOTWTTzrf>;b1P?gP5LUB&3H8K5F0q=Db z^!)?FY2-Us-)PqP1X16RXz5TqndE&NYLd`aPk}4+B4a1~_+WXS0 zG99uSr{&B2P6_PHfoC8maXRIP_N1TDdQ}IM@Zn1dgypmqkDyM~HvZ)EB24Wz?2j+AoCA?8H_U*^|I;In=zjV4K z4z8_MpVW+lKfic>-1AWIu&N7;(I1Xn;AwqO*MZ<^mrYphJ&Bj^SkpW;HHWtKF0KM~ zw(`>YxXRf_Dk|pPo5z{>1c)nY{A!%KAr{9ZDLq6`hS(sVS=x1p1d4QXJ#SXlB zf9EOUs+@}gx#;im4#l!XJ7A-6pV3O~&Qh;{(A~z56mxvLyp{B;UZ=p$d8Mv&P%spf z>(}GASJThS>nH8-F>TahIYzMF!9>J^`xg~bhv17?QP_>e-+{~|1jmB)E1l|ve4{~zx1DIV_ItB(9Qw~BOJH0@eTHN>)f-&G07kaB{Q>qB+ zs#oML=a*=AVE^%4&^GtYh~B}F6#mHV;OIaJr+Gcqv}Y=r!86YyZ;)QxWkPzmyy*?> zIqVnwAb|fSnYfd_L5h>ZtAnFam0&`4L?nDWzkzIwv9J-tWk>um==>Bou1vO5_o52` zSoj>60>&7-ozX&7{vrJ!NTDuZF9PrakmQdsAV$r&kv{c;$>6kJ`Axh+XTQy2i*&In zXqxhm;~}yM#zLOs%{UQKw~N9BK+CRV(2QXB2Z$wuC_1n0rAaapaNNraKyD7E=>&pP zz5wv4iv97sy>9VhR8X^J{BW%k9JB9SLyj7twX8RX_@K$buDh3vPErOtc zp5^7Gdv+uChQSw#n5HWhe@-2Ql70zIt?m=pUM&x*is8Obs*#eVVjJ&~I;@}Y!QP^h zVS{FFQE6rV4nWiOH>^7SgWK@2RS!SI&%fhrj~FP7<$M z41%S4bu@j(FK5EVWn9zM?#qtuhuDX79gm99?!4}ADY@z<5ET*KHjkGJ+Qqq#e+~8^ z(Ps$VhU6hzLxz%WL0_S+Ek=HedU?@b_gG=c}Ti!;y_xJ3?kC_zt{X z_GJT@#&6Vzc4xoPsyM*STpK=;QkgB_x4@J9^l@{Mo@iZ5RyArEV-tHbBAoGKj=|+J zo*%B-F5W>d2L!NJ8x$;+hkpT^0%O94{jh#^n)TT;HzFc;-fADN>bLJ*_j{!{_kLBB z<9r$&{{eNG!27)yz0d6D&@tLA$eN6Qprn;U3Q!6eWt(ddIz_gF!@%^)E?Bk|<}%D_ zXTXe;y97Ks9c#kshYNiL*eXFBuP>~n_rGe(%{64(fu7{ zM$??tlV8pFpBb-JchsRSfp`jN(0EyPzacKz&fD#lf~Mr)cFv1cdrKK+I@)cpC4mDc zwhVs12!J{+EwXKKHpWZ=XA~;5onDJp{BlUxRee=7^!L~1LbLTYTWE@vCNoy2#$3;{ zN%qWk8+`b;iGM*myhrn1S){p{#^Fu^waa`weI8Gr4tP&nw zu1iBIyDhc*$+`bl&&z~N?x^By#!9wPluIj+GytL^@pitLxfWLDD#vs`Cq)V0bxi^G z&e5|^=+pH$E>~X4_W=~$FeuqN-U=l3EXYo@wZ*HYq@AW}YzI4Vu}1)QaQ9KXV3h%5p_n zZBO54;OtlKZdxX`qpB##F)Z@T z>SSxOMmh?_uPlhs?L@BYzPkCL@NZpX26BO_@`>Qv6Pl7^--npJ3^b75+|NwYK&n@( zDsGf@u|NDyG{TcsqygE7_;ae;h?~Oe;+j^M{BwPzp*~#3%rt# zvXO$X%z#gM&rFqFKqb9Q$(ke^_51KBOVswjZ15)Wm0#v0_~1lOlfXIxx7A!RAh6ky zbm~4{0CpJ2F%VBXIrs=3hKuadyrhl1jGAV3u7y1`I9nP8Wg08HO4mF7p&G42EoA@? zOq$8=?}hONAn(r9=5UWeFqj2)-wBfIH~BLo+I`W!_5Fv&T8Hhm25JsPZVeg(>(`X3 zb;`re_H()t>h^vIxx0;xmdI_pRT+u!a=XsEog>M412e5d!;VDPduTaS+q_--Nda22 zHc9p4T5(uSoJ^Y?SC-ZxHDcjV^;N(jSPV9Xs*r(dO;j&lC0wr=C4dTncfGk*C0(dH zL-`^<^=t{zx_ec-{uD_Vj`NEq(NAM}JI#7camFj=w-S}VkVe-3SQnDE`dPkk;R&Ie zwt=7?w^`nk7n3fftpY8RdHGH3PbLgzUCJMB2{apl-TB@N{UizPTHiM{5&n2Sq#I|P z-MX`vao|hQMfms%geiMT*)&YxQFg_;??tD+oTLq~1 z(VO?ne6~X3!VTfi_iiULk9H*kbqH{rCb|hR`}pM3q_Ww{X+kw44WvP`F1fq7wckq4 z9-ouhz$L=$$z27`pM+m|HyjVF%CA{Q(uudtEW{ShMuO9BG#&`W=DhWuA+qp>Ghc#W z<4-(r;w!_k53=j>aV%X4ey|n2aBhdgTiMe0ucv7!fAXr1BQ+Z>&52Zgi$|KiNj7mh zv|4KNt+WmBH-|b$pVS<539)j}NyWp?pIJ>72dJNZg zwjEhMKZ;@MucM9IxP`ifdW#4#1p%6^(2LQF^j_aA7i{c)z5i63iiI8XNNmsxgdV)( z)uM84+k*cWXKTWwNuoc+&YB0i)VT3GpZ+MJrB~@*jpCxDpvVza6mndsMo)ycEeBOs z35zxzsux*mpEfx_wXpRB>54eZ6=7EkF%2xFCjLjO5bXEpAK0%FY{nCTS(C{lyx(rf z!Ohl20WtXvWa8`}$MPY_ioqYV6CccJm+yG1BtgBJG+ABID1Tx)bwm5|DaGSK*}myR z%Drb_*K|353Nh?l0Gxs0szNw3Ji8yb?ZL#j!l@;5ZJF}q?0_Eou+Qe{gu$ifwqzXX z5veU{dJunqvtkxr8bUe~Ax9kHHe`g4&%RW|h>I_Y)r>+qt|VS&G1+$J9Oi({Q}*$X z$cc|{*Vawm9oh|2feUV_SfMWf!QS&?M=ExmGePUw+dtYRtM$0}`^>2R#A&`E2ouXy zsb>g`xM=2loZL=^D_G5NQ0txurSI+=6lL;}mxZ|!e?+r)kQjNV8dzOwuo4N+~ZF54_R z=fHqST6OP1JK#~&QR0JR269<9_yTaC-b`@;cts*pC2|v{3eDOb!22AaE;5IE|6us^ z4Hymn^k4Z+#BpRti!aIG1cc{v0UbfTPf0ukKh*iD;Nd=->SZ7?=p(*}j3TZ(ZgyL3 z&)UfMtr)g^Qg?&!_6uf5MKaz6Jru_(p@HFha2J-D$L3gY{DQ`B36ApB+66PtV!Y(r ziPXI~&(#;1_kTJJ!rBOiKbj_B+_)y)ABqjlqtfWeW8!McUdeenfs2?4W56A*=ogf2 zwo+k*Y=QM-FT*XGe-%UzX*w9j3-Rp=qL3Wt$cQ7cUYeLLyt=-&=%FQ{XzytM9GHX) zv(2rjmr$tmzA>8=wmUG{Z@XB8pR&VDq1Q8VnJ9ZZd0szodWLeeQiOhIf8lCVuk3qY z?o1B*4y#5*pB9cdDr&O>lfKS8=umfjH)dI2!U zh_ApI35LWf)SKx3S!BdDAJP@f9Lt*QNH8@W;X1gx%cXi- znH5Op%sWOO={*P%>3bW-v`a2op?+)O~OIFRJFqOw_OM2|OBfOSw}!3IoE_P%g=ylR~zqmEiaPMw9FQ5S&mN{%AH@dwgTpz~#s{h#ya6{-^l z@)PM%BirTszp+(28n?>r-O!J z!WHE-UfYNZ^LJO~STd3KNf`l3?_QnL6P~M}1-_%YwaEotl@S#L#Mh&izJd?ju7OUj zR$c&pr{@yizZIw=W|fu;;-_X}kA_JYk81}pW}Fu3%bE>H$ywHP zzOey!7dFqrmLc8colk(sTA-3YPzxu5X6a;!xSD~W@_W^((cl!rq7#+P*s zqvux{`986{Aw0$5zi#u36Mr;6M(a^}L4wM+emi4-(m(rtAiCtz?LS@f(_`LK{B#|~ zxu0xBVvzdwUJxI^He6V11hU+~Yy-`WJC9Obd-vP%>BVVY@y%G#21`|)WDrZf$p4m!i%0C6-JC+gO|PxF_= zHbN4?1wIXZIzIc%iV;;M&{fAe$R1a|^_BAPt2WnMd1m*!=UHHCGESgQhGRlEF90Xk ztI15BYaGiMSLcr@9vZpD@@wKsE&FpP^~1M2HE)&a%Z?hxPNYok1j2U^%SwwJuptuN z#4&yPvWLUTz@F!T#CqbN*QbgxUgO7dXTLC1B!g3O#qvk*M^ILG&@P@8{_BUIG-(-h z?%se5fdSyZNZX4iTv(CAdSqVTW}(T@%j=)tz&us$_M3oB;iyFo8P;%6z4qiQQD)a0m7^xO0bDgD&eox5+HF+zymQ zo>~3g8IajB&lddv(b$sRcdQxnK`p|KHwV4YzMY7xPBJ(<3(Nw-Bd;$^IlfAx`0J_O zSN50VGN05|`KDGQb`VUxJ@l~$FjRwX!u|F^cG5y<*#B^tR6l^jq zfiAE)IQy}5_##bS)$e>rkU`femA>{f^m1@Qwy*J@&o*gW3ag&?Y{l?Qut8N;Rr(rS z`%0{gq}lU{?8gP_p3xHWCnL4(H)4Y?nc2VM+F-16r5D;D8^Y7cLp)=fL>gYSVVpYC zW1T(15u|r~VzATX?d*?N!7)H2{2_Ur+%ulaKPwYqO5+;~)mhGH#W$Hh_J&Fq?^#XT zSY806hZ{0QA9Ht2*hQK>|JqkTBdDBgn3@cSw%y08 z%kxdjGQkbpfp($4L+EMAJdkYK@m3z#q$(HYQjwtEt+3@5nFR$w&a$;>cr)#tGn>GW8@_`8#e1NGr)AJ>%ozIz92!{^O*ROMdX@W-gD@YO885>mU_;`z;OtRsoO1-(nv zg}6pgcU+c~nR*_g!l2EmRA}_h3~y3D$N8|0@9MpMFm2b-mM$RdI6pf)oV5g*Lti4z9ly`{U~`ujU_ zV4&h1#(gtp^Fu~;2yq`+L@(k;-OkT*eKP#SNG?MK%%?OJ&y{}fmM9-RdzpfU!zH`^ zT0C97Yg63o^vXO%5gbTtgx%c&)2lM*u|&A?Oj%){tmxGxGYf^^GCJ%lD7<;8P4o5; zm#6sQ1%MnTFYjN$(Vm2(a0Z1>`ypkTfe@A2m9?_HY#y~6zy(IW=uA%*6`H_a0*8ox zQ)(waVB*sS;QOpCxc$ZjAU{cyO=Od8D&s?s2DhX(dZlk-Y(Fq}o(TMT{&w+JiB;U4 zaj)A^xx&txx>C<`S9l=>GLW*LkKQs`7e~3*DZ@oEdz8O%H>oq%DGrz@U7;#=%7^Y_Xg_h6E`vSmB zBId#W=FgCcb7Xo4`Q4St%uI-(b95BNyUVx1ptQlRKYUVxvL(?a*`j*d)1Jd~Rin3u zbzSoDnejUnWw_9jx5P3zP_Sh;8lgS0hj^M=ob&YBKxM=8ojrzf6(JRO@l6Aq*ajhC z_%i&DKjzl6pb?SjQ_)9d>PI$4ednH|Hp-*DklRY4j?syRJ~0UW z3G(uRF&;h>%;q9LuLJm|WVhE?=Tu(uE3wJYsHQc8s@jNekpp+Qh9vtv6d&~E&gkSs zIo}tW*uf*)1qV&bgnNp}Ss?I@sjX+PL#lQMbD4>x{!jkO-FQqep$rKC6*8k9%eg zuYALEE1mh;$$4)O0>SD00JWopH<_MuGGpui*7bX?yE)|4Qhm%No%`w=Z&G|xdE2slm!Pm*N&R|bsak=0_nH|-xp;PxNnYLE{ z=>{kYwE@;8&D@!rC3VkSZ}}^*EWM-ti8Zw46-5

X%V`Mfl6Gh;|O|wRhwSMgsoQ zt(+b+^v)y-n}7EDyg#`_0f$P%p6-O!;2DSdblrwm z%&LDc*^K_+{rUQt1_iJrApQf41vY3)jlI_8Mu#&n7!?|M_lsOT3o}}N_wf1)=b_gE z+8ne4y_syI&0Zw>`GRhHyxbq_-c#hU_}Lld(y-gh2Yr3%^QTnbv9LoWvUpy9K8N^;LuJG-ig0E&S6cpL+&#P;Y!Jnvs(^!-PLP0p9V{%fn{}_~ zml6ZNL@Q~O3Ux8`sqwu&><$wA|1-Ogf)Q9USve`3V>o@cZ`%esT!Cd1Z(sJhuTk#8 zb35y^^FjZdwq)t(VdK#>zZL!=j&tQilK35}v@#sdjB%=4+^ntb(NNU-BRrqc^t$?O z=^bq$@!*Naz~3yh@wTW5_-mrBH6}N1lBa0QKK;qhTRr})?TUZ7KGXhL!a!D9j7jwE z*cIyoRBf|w{u_S%ed%4c1>Gti1(!wE9`}23^&IC7ODM`)8JG+;9ArhStR}2tWcOXu zn)tfaoc|RG8E7R%a!|vnnX7T?E1?Qm8wGtfOR2#U6;uho2$zSHjlPAgjAGcwvPZUf z?E=yK^p^AT(}3FHK`>6_35o*TnI0Bb+vo*-lz7}1t7dnvCclf7(O8;mzRAx30Sa83 z2Ky7-iPr38m2D6m%5`zNcBF>9Q8RTWJN`A} z-O;_pXSK2Hj$RhlQa-`IRJ8%j`r;2b#Fxla2h;AC{$Cq=hPGQ!Hq50yK#I(D@JyOM zWM^AD|Ct~?{2I=c64u}O;xove5%{@6K|wQthlz$caXuHbT|so%0>|TEqTPk;O8>}&7GAT z|D8K!c3FcvFFE*?)!0oI*Gi!q!Jk{2ae!Dc7v%l<aW5o)_4MMKmLe1IW-4 z)z;!4s^#@Otn^aF?_+#;!vqxQE^#?dS2-o-+iu#&y_|ROC)5XeFgw!k1>j_}4c^Hw zzJs5?UcNCLRnlIdDf;T0`p(@peAHnT)6&0b59yAA6zj;%#Ci4?d(N`NImMz|yL!y?@gqYYaJ30UL2WVpU){U~vM z_$KgPb19>z{|ueU->(cfGO7772Hegg{XrHS)yg}%GxUSi*fIp2eP{Yz12|CuP7 z8@Rwp75-71b4YixAn6%{0~E?_w9lVRHT*pH##*s^= zM}*g~0kpR}x%m$m|JbQ}j49PLJTtX236gN0N%dGCRW|sU>!sDoAl^Z_Z{I7N%tcf{ zXJKX(h1Z;g)mm*NzwMX3Zb;6%sd*I3QlEH<@6oF9cKCqY_PZT^8Zzz%P6n&24O1p`OolRJ7@7vDie*4h(TT$&$VwRQh zkLfm6J{_HKd z7*K*0C{yCh*9b5=Co-tEdXi`u;40!Jf%x(4oHa^oz2p*DXv^b8{ zY$_=>t#5i=bnpJuip$_KjPFbVYpexj4;;#-s=+9=nibUIU)QH{C7b?86y*0#u;jkk zAtczzj)Kbo*Ojk)105eibXwhmQvXSn=a=IQ=pIa!oiG>vl}usstc2!DOgc%B6a%y# z^Tca^DXiT7QtUjdy8vh+*Pa3Vh@(yrmL3>MPjuCzs0EVLoSoh|opT5B%w~GemJ8PB zjhXA@IOXD_fyfQC(V|?M<8=q1E_v0sB9{CcX)jI8d*o|=Uy(?7DZ$S~GX@}soxnc^ z7GHd2Sk58fSpd|2My)`;zFQa#e7F%3FY0uhM!uf&mjVQ#2QSV1rN}DqmL}^>m6T4Ct~{oju_YxT6fuE8 z{vU&P*Rhup6(nASbo+8;(h(zO0n7onDzgbfYqdlJy4l!rKFI0(*&|+qy<43TeXE0K z+v_`JYGB&1(lTbB^5q5K=AAyKyz2vFuxrX@(;Tf7kTdDsb3xv^SU6SxV+rqO2%Aj@ z6ZvLWm?!fl(NqhnF}xZSuWeAo^-I9+PABW+>xQn@Z2P1)^pl6_0iNULKmb8;g#qM!e@alSZ>B-o zMD}&JY8{~+pwA7wg<+M7^HEF8NOo*Dy}zifT<&F##KZIg$8v6g0K#i0oR;`cBZZZt z?4nE8CyM3qSS~63NXN6G*}sXXQF!@m?I4*SVX{$-oc7TrdYHxPjZJ=XD@^9(By>{i*hju%!K3 z&aA@TRP*key{g4EFD7Eo2dVGtuKeWXxs5R*iyZk0{04Zh9$+(lN6c0AG-_&zW&KDK zEiXhA`$7+0B z*{EfH>!4k;mh@xs1p@iGTsM@!S_>oHjcW63PoohKdYQBf@US2m`Z1CoZ&-dLr=hyw zz$C&Nd7%`oi^tD_PqR_*KmY;80(6h0c>u|)EC;G_jYHwhM|q?@Dl&LhU~h#5)?XwK z{0dBXKZcAF{Lwy4e6~iuheHgl!GUqjP24d_vHznm$04zGeb{?CIfl1q@w(DTpdz^s zOm&A;)XtA9S_k#&dkmR#efA30e`<-;O}*6dmm*c-N6_YJrz+u7G3hR;`gE^N7i)t# zsEhm^Y0tb(ZdzPY70JP&WCE1~M<6xO9^F_n=S&?ed_^Lp8;f_I5B3Iv>)p|bQX0w2 z)nC*(6fux%zf?nv zj0qE*`h4wjHx7@<6G|H`a@L;97;gJ3kM@ErF8XGxr04E4_PRP0TfXzhk5zw9dED7-FL^mX(D%|)^}`@qO4+9;W(yhm6M1;=k!2EhGa{ zjtB8|eSI-#>LL|(qfBtkg0x%hBD(mlu+Ga;barlTa)#8DiQ9W%@pqNt1P?JGPpVd-I#4Ic`l6;99l8b8|%ti?!^2lQ`dpjCN#dn zzt+WR2Z96GY&{Sz&3FuKo#(KW=Cw^w;Dk@3J-_hM@MQu>BxnaB7udG~sz#?BG`X)? z%4{S~sWr~M&vke%CGEK<;%M>nrAa8q?MvBRtud2z4uB52SUqV)^IW^F1qA78RAiyc zkjV}2^Yhxza8T2ftoAJuT|C|o0Jv1wRDsaU1uSqZCIqB;G{2h<{+&V+= zg8lTzSY_b=0gb8Zw^4r1QY;Ndp88ya+AU9WEhmL*ZY6R}!B&65kNLbot^?hwb0Ew@G_p4(4a>mf(d`49AF}q16Kcm)Hff?F@3&Td6~ut|1yR zv!u-R;Re0djG8~oiOovtoe+9ukpH7#6tcsLr)r$(Xy}C8=kld?#mLhq<~!SBQ&nT8 z4L+S5pvZ$p5m4s`S|$E64XufTQx>ylEO77nVp*e;w&Cva#JgOVu0j#In&dBg=p!uH z5V{!hTlLdD+qnp=vJ=CGlZ#-NL5hVamzkJR^+CZX__}+Hs)(3#?3PQu!~KkzWp~rP z#s3fEtTF4_1CeIWD2w{~49C>)q*bhIR1TA#L8-vOg-%2(eicYFM;w>n}EMh(XQY44tLdf090%R{4{15>Q8=(AGJ@s}r z9;m(bj5p@?5B^d-V8Bq3!u)lKhfX;PNy)#Y0&L(aldFz;p(0pDGa&;AUhmYH$U;Td zhz|icB@%xr@H%HI*juFZ);p!R>tpN2zx}9NRZAT=QB!&Cpm$NTqtm2sV6XeHnk2vE zhg4m@)+YlaUkCn{zYnN)ufP_u@6NkJY~=_~dWI4cgjk!7))_p@?{ zd9EToeQwm1U5Bb$ok4(2J~7%!edtKGa%&R45BgG2xG_Lctt+K-ROG|?!Yr+p~hm{tb-D`(%CGDm!`(*`Gwq{b`;hj(6q9@Mi}D5Rc8@eVP3xylfoa>r@L)+pVf2dYQ^5C% zy-P#s{gJ5gQv3R!BisYso-EyNUg~i&`@5Q2dQ?%W-_}zVE-n4sw*rLf=L^cY65jkE z6;d47Hs>l4jC0S!A$vHVai&Gr5#Al~ryfdwHSLi=GQTXn%y)Z#Tn*+ZRFV*F@0af>`orznJX8>uP!ejaS#7#pQ^%YWhu`-_spzJ#XPV8y=)t+i&8*~rz2FSAHQ4oE*thdgQ z%B*y$pOS$!m#8LnqC$qlrwg4kj&}ouKW^;45?2GI$i19)#ku7971r^yIvegF(I z-WgOnk6K0Fm1>L#Y?lz@#%OCYwO{1Oc1|aKTvv03n9FepK&@8P58IgaARhxM|GOCM z|9&p)@6>-2i#-DFq8o&O_vD190^_DoQN)oYI&VnI{QJ(Dm6$9^qwCr^#Buh=zBw_->YRfNYjn}m=(YGB zVL!lNL<6#)HW9*~>ewermk_WOBl}!9)#3o5CQI5!*2nLU)`Jlsbh_G0z$B!m^~8N= z^Fjg6ZN1VAi3HV9b%LVO5*^!x&`VmyLy709F|#!-Ae1|!-ktL=GT!e1EX>pY>aB~y zJ#*o|6pABD<}fgy*B-XEa&Iq>EKB* zV{NfW1hTRi6MkSwFc=~{(S3tnTq_TqAJ1#1A53yBwg{O26e{)f0e?xN78A|&e^~Ga z1OXyfD^3_4HuTSI^8JS5L44Tv?_rE1LM3JY#I;+zEBWwzxiOC3140M4hlaqZ3)$Lf z(EGCZFlxD)8Vk$UF+A0pCg1sfYJWdxIY`-ZWw^Dr`{quX?evmh&oa^n3{=6hRXgY- zTGRv7_BT5j<~TXlQ$9fVz|G^Z?)<74$f&;HB6Ny4K>uo z-i>wBnf`}GO-G}HM`Pr}R?{Zug$ogP-($GmSr$kKb0a?ByVFh5k8b^7 zbEk-NDQK9j;9k@7H!1u!H`T4vPvBwTtIBLdF7hO>0HHtrQrN(pF2F#Gs?peZqC5s4 z0-DxH{>6r2XiLm?=Bi%WcJjMTlT5m#t)Hl=Nrd$B8C+-hA$L^W`$gLLXp7|OBs1iz z$@VmmqS$~6+23K8|stdFF>U-nubA zVAMBf#Yk!Z3>tH8IlNri<-ILWzb+_+u<8`_UgZAtCzgxu^iI<$kMJ%%th<>ispf~AFPs$NU6xhx&KLu|>t@EgP@=($y+w0zu; z<^40!r#?p;4PV}@nf5d0#f?-MMx9+pogAU)Al_!kxixc)#WFU^h|plB+m( z>mBNUM??+AFx*Laa$cERnxTN==IiJa9a_Jqt!%gH)Nni@zBgCIw*~;dS`UZq+4TBS z1d%D?+2z2J%n9>thX)Wu$P3Nz$g@R(j29U%TbduRt)%J9&^FhacG$(kp8g{i>LwpQ z$rZn7jdcvsv@{?yO^BWK2Z@Ts5UR2Y6mvrs`9seuU*poyu(h$C12H85)QxD?V(zQ8 zQcM$LG)n$W))H-!4Ro?juw;@p+Utm#hZgu2)gNOxV{|!d2?B zWN&hW${RkUo{lw9RXsMF)SAPeDYlBKWZKRMO$`<3yebm7H|E#Vh9$QqT)+rAM12gn zFY7Hsl=_Q|7wCZ6^86iP$!p&x{1?8i2X6~OIIdr>(4nbB=mKBwF;tlb(4U%X1FN5X z7s}y(8K<)Tmx958ml+tQt!1c>Yskv}(vr!4>B6Nm;mmKTZk=OQxBMh`Cy)aT_(zPd z5oELS=V`UxH9Wld@PsXm^U%$-z9cA>KYJs69d$#X+gjqI#8+Pz-jOzW^TLLf0IQ6w zR)wzlGG5tsaYj8|#Jx)Mv&mVlA9L0h6F7l&C_7OUlMG>Q6?i*`@ph*V0Ns7)^ua&X zRCA-G>h+bIPur*FD9B?t2zH5-MO4XCDMot76PPx?NY|%*d2uhYYVh!xjlZX0pYpQs zS`JQtJ}WzK{fNWF@us?mvuobf#0RewU1H@gmz95c!t;a$dmizZ;!V!N)Ni??pM{~a z$;YzWT3p3`~n)f zR&zo>j9b0N1y=)b+Of|MiL5wqp|`ogb(!b}Ka#pbKik7kvNj+58)x|VKWHXClBq!t zVcl^G=%{sqN~bb!6utVjzy$@Ro_?yIruRgDF?Q0_@}3=>p#kuS@&63RXx9QZPh^k4 z@*@7U>O2$NZIbTYv}GDIU8-R!NR+fp`AboL-h}HRJi{6E!fF@}KvaED8nar&{Yu}t z{%&Qx^mXw2+HYZ=7fqfz%6tAzg09%P5SR;W;p8{s6`NUoJ(&mo?p({k2P!G~i_oFP zB<=EW>M9fXGcVN)el|SMJ4vf?Lh*_9je?@(iUH?CKGU}zr?#)3R3>@E&#PeFW}s$R z9?m_T8;dbUbBPfoG$Gg9$Za>+x)-vqFzOc? z2+F(rkKSsJJ)BY>0@Vo|1lVJQ2K&~a9a_;4f+jcfY~(>6;OnQ%Coh6q%KV4q^|qor1#KZ_^lh?)!UtYLkR$5f+$1E z3cTUZ-zZ0>MlZ?E9rPi#?3`jQ{q!2^{tU>DN@EC z-5wZm=u`JAJsI)2Xb<@O(cv|k8BxUHE+_1+aj~eIw&D9&`ALjn$-3(qbIG7$u^O?; z%6M-3A}0}!eu2DTCj!l_RVo-SWd^YK@8ayyd~3R$ix~uXI421EtU3Lzg&e}gMXt;y zg+1GpqSJbc_iV4Or{`isq#LAY$UJS?P&NR%3ZPFXO zsG6-EY6BTW=-R{RzG!oO^e)hkE1C~hg(>&-SltIq9YE1Jlwnx%Vo)s*481;oeV-Yx zAT~;z9m-#F?bU%vmcbM)RQ7aRxIBZ)GmiYmj8NG#l zfP(`1!jo0yc$sM{t2~sxhf_?P=O{%`z=u1p9_?#XmLC2OYQG*7daa<pg712m>Ys>5`MXEp-q4eiAKl;+Q>cTxtHUSxhzyJbQ49%cD8veJep#HGT3K zouU5(FOlUUq+ZIf3eSO_!}{T#phJwB=ahN=fEMH$z64~OD$jT*|7oCFtoO0eI}v4m_xzJ# zmLrZ+$0AAD{@Mj9em}iG{gI3LKIGtcHFy2J+m#>E?;nnbZI6GfOn^Cq5bNY9?&-}Ak-&Gx_HR3E#Fm>jw%W^P;IIEuEcCrvZ0e3kl|0%Iu{V{=y_;?Pe2xulxVJzk{g?Yu=O2qof7)9a7ml!+$^styFAJcSl zP{JQWr+WS+4j!;5S3!#g{MIa=5C0N;Teada`Rfh)m*A_j2Mv$6g&3AU=pOR8D~qD- zLlu4UwAPL|0DewymYgX4?>EyDe}=LR!|n8`_nn3ZacxsQ#-&l=GV*iV@taVY0I5*6A)F{p7src9@tfD3}hb%bew0xTTLzG;jm?BkzGPMo}yaha3uBia7#w}q-vlU zc?8K*2VYR><3x7DqBt#o%WKbcMbCd4o;4Pfja;`%SzsLSrAdx75aqKDWXe&;wa z{N{&pv$}WdQWrms3d<^M;_Y#yzZ5c?i1>qIg6zD5Xh~7D(FtwS>tMv{=r$pjQwE_+ zP*pFk?1LisP?b@D^;WIF&UEdL^ucv~yi4lmL)Eys_i-7`(mCzMUxI=fZ+tS|Q_dh< zBafN|{Ata2-r8>X_~w|Oc-pD5sT$vW!)%)uu&ip*Iw3so5aQtgW6;}nN>TovIjx_M zeNEgYW#mIzXMWS=Dh<(WHQ%HhKR%R5lJIY##&@qWeC5Q<3!v3Wng@vUovKy)R=I4u zMlUTE`43vJ@D5!iZ%rxj-sFJNZAGrJDNkMi+WRRi}swKf192V6BFzS%l~? z*+Sy>^Evm#zUvf2OuBB`j>cQw%_Rk<)8?|sm9Z)GM@#vaMWT~hhfg3uFeXKe2O$XKfirSeapOlX*=^L*rO9% z2B#xmAqwIYJOcdC>nh~_4E<+uYwc6+u5H%*KYt(hQ_?6&TJ}ZzbFAs|zMI7P^_m&B zR~2awskpLx(8)_}r@UE64p!jo7DI{f(%v#+elTd_82LcWXzOfBtH>`f-mu-e`|FYm zYx3T7jH~pbYch1G9kFLu4x53VRD=_p$s>r-Oz{GY#uPs_$HC!&80#wmT;TI%_INBu z=^AjQ4A=w=+rh1MD1*NQDI2L5cQvbijCR05bpKF->$+hg^_PcCxLqv@W=_J5JuD3= zvqC&@jyK8And7a0#8Kff`J3BVJdVY0t{WPhL)*@{*a~JKf}w>P62Fs(Qx=VTVcGhV zEMw?Hgu?ZMZo4K{N|^ywo^=RslcSQu)o2)P1DW&{VCRM@LIo&ZP zK0IBvIa#flS_hYS{n|@;h4; zL$})F%>7cEr};%U^`v4+=rp%~Rha0MaAy1{kn&o3jR1lw+~F(W)a|==>R|2sM21F8 zuR+yaQ8!2OB@>N%NZ-|yAV_5nJy5A11%s|1t@Qnxb#k?3CMV1H*L-mP;z~F|qR$OpOA?+v#Ug75oDqewBw5 z=>(4Eo4I?s1+ZrDSCIX(jhwnoSHudqZ8g7OlQ+_Xn}aPqmK$u3+u|ijEoZ`_)>u2v z_6))7N&LA^HV4IRMYqj_U;psk4(T8H2>xYH%>V^j}0GWYQn#c0z0Ml>SBnY+z@C;T(R`GN`puZjg}2 zz7X%2%{nHyYISEu`=m?HsQ#!8F9{p|>^F}b!T3eg$0n23o$tT<%Ssl%doO<4m&|mZD)4Dl z`r1tsTM+p(BgS&MfyAH{QN4pg%Y2wd=qFaUBa2bsDmG#URq$VmLmq%u3wM^|CmuNwCZ}ujQmc6IW`AOuDG0J4EmVeY4Q7T3)n|J0laKc66n&e z+C7NoG%vr1`pD8x@?L5z>sbZVNU`TYmA(t+1(fySKp? zKoAU^Nkv`}JyK@!x2SEZnJjm)eP_q0plhulcl0J5)b^|1e3L6Bvj0vy1Te#ohWtAq zvhNaIhp?)6$Yv(>T?t5(NmX@lul+I{;-dC2JKs2NCy-j|?7d<&&*+#L=VUIrt;sVaPT&vu-(;+1@ znja}T8?p##C&IYz`;^?lzZC6MX81#?(F2Re=S~@ke(tG6sdhsK*ZIP?r z{;#GQKwATZd&%s9BgkuHdM9J$JAer#azFOz@e*Q49B>_#dunM>u^5@JcXBuL69bQH z`UP?5L9sc93L|SdEwa6>7W{0jLah6B_4`0eBPsXtp}|Qbqa)9prEiDQfScN-iv^tc z8toqRDnMgV-AZ@gP!*4s5v26yn2wpajlU&vWe-veAUp6*zsfiWr3{Gf-7c5;RDrg- zHr)qa6vd#mK+zGO*OTlkH+3)9Dp~?K`z6d=jF^7_3Q8N02Wd%sY||Wt$1cPq2$eiC z5r}2^q}TsagF|s+ZsPjoPkGi}2FYHNf~`HAvOcN5MehOQ|etwmfgTb8EPM&Q8GR{lNPH zXGIF1Pr`?{fAZg|t)SpLIAS8NfNIo2erS}M;J(#0VO^p|M`zc^zF;9MeVhVB`ZT<8 z5mBF%-hQ2E-AEqC%!TDPP1lFA{d%})`T8S0zpB=!N0-ho%^QDt2S@83Eo3}zF>g8* zF4wiDdez+iwU>J1a#gKgc#RG3*;sg|U8A?(b^XsjYPDB({NbzFsc*8-skWx;n2)6f zrJ}a%ZjB8yg=cdW$l~Mp{S+^Z7dDKPE2A5mPC6qMg`6&a+m`)t_{PSCN7fvn_U$@Z zGsW}5^|<&w8wGxj$b3S6z8wbMSIog{xk7%J@rL~Hw%2NyP*iwBnO20Tm@hy0Ve&RM zfaN9yScrH4^!(Yd0d903DLe@P=Us#z($%e1my9Pj<)1-EYMr}VwYjIFB|M7tU+wh# z>=Mq`_k??GAOG=(6Yc~~Cz8ilKj0WpJ)y(Aj2J57i&Mg;TEcfGV$5#H{pUP+j~7n( z!Y(ufK)LOANzGupSX$LIqk4?VOyMh0mV$!X1I03`>vFwOpYbh%=N0bQ0Gt+j8-?7@ z1vau8F7o^0o)KoD-&!iT`uB@tn;vOfn|LMcuQ6Xy;I6@Ip0RZuYTw7BBswmbAS0Ur zQUwqYT7FEl+wSA9e@TwXX4>vnyL--ZTDeodFVbk&AIhJ(b9y+;#BAgjA2*Jr)CG8G zFIuLJ8JvW4$_vB*Q!|94E}IjlPs2E0HccHxK{~CrJo2)_>urw2*9B@6Ll4`>zptOF zldlU>kTP68tzSR>LZm=b z88QM+uXqb%-wj=3h~R``D-w_UB*RD3=>}Lev(WNjj-3|Xx`pty1puHb-1JQm_W~v- z8{SRvk<6|fy|lMDb=coO)VyidZs>G4*;2hX;dl;{f^{mxH|9^p@q}scY3|-$Jd3xx zHYPkw21es_-KyKHHhb{Hqx-@U!_biVwK#ffqB7276|f#)59w^bG6fWq(th@d&sR@u zbDT7LmVo>=YLYbMifC~>mm=~NgoR#DU*IRa_ZMK?X6&&`!QlJ~9ybXeRvgy1!({_$ z0XAs(I(S}TT@9exN65cFZ@!UNC3Y1op2h6qAH8RZ6ebn~S;TCV-W=-NDQ`F!B8Xe; zlo!Lg-c?-%NlTe7mv!K;9=ipWCLCGqVQAMN-D6P*%|Gy)r(Di1vr;bt-4ghpok?nj z+QejUvfH&oba4Vj`=bqTM|nd6Ks!;SC$2!YsA0swb}xohJ9dT1E={ST3C(keD_+^U zNAYpiBh*Qh#{-l|a+b1{>7o|G9K5@a3-5!oE8itAfI+oTYJaxo8ENp1h1(GW%*W;u z4J-5`2OY8XKYLl26TM$5+b=@CgX-mi6#XjwuS*aGTj6yxtul&i(LAxgtCp6`y19Dz zpCp7lzK+svx)=4@Hli{B=Yy_X+wRKZBivd$3~~N}sF%ACdQqJ)J|vu)_*8-It<$)0 z3r)hI ztiuIh8Vp)$r$6>3KPef#*J}PX-}8&eaL|VF)5S}-i(fuwmpK!N2HuWbXz1ot#D|7Q z!Wt@OXjSa3p$|2LtwUG-R2BNAzKK_vY#xh%R8#GrW5o3W?0@qzM=k0lC*AA$9B()I z(9Lv~>F2(#k*&Z=qcbh`66xEidE?Rj@d^*M$cC$}ZQt?)>NlSTg;(_}omeDGEVQu; z-Av|BjnX-9__l9-qI{!wE>*rxempsAlls_gd3S%M9y&F^w+02iy-!4K)L1McrzPjC`(Q&p%OTtw^x5DfVh^v@u#}LpCjoT4zc`MpCMcyCn zWAml@sV+a((+iM$K!4r=t}tT=KNY8Ski|2E)e-`PwQITF)YnD636szPrk{Wdo1&Xf z>A7cW8n${K`7}*-s#xQ0f=XT<9gFSbtBli}CH|t52P0BprKIW z+?PlwZ39BxD1m!!-xht|mwgTQgk|pc;?QEw&ApS11-|xdjm`Q`C1nyh5?SFCE#faB zR0%;Bn*jj9M-)O}0AQi~Qb?oW+89BO$d-gM!F7>&#-c+znh8(@z#N_v+Na;RSkYZ( z?;ZBDYj&rLGRS)x%a+fd-KKxx;*+V=H~nW_5;ttavp1EgA7!Ki6trd!ggxvV=e8S& z%r#_ff(|fs3kIT+#-Y>c{nm37q^t6h|7$c65X3|m{H1VmaJ9HM*}Xjijm$GrbskrA z?(k$EeY`W-X0b>kgmX-O)0$GJtQB$+Wg2K7-aD(^%cQ*sOF=*)SAi}do|v8!1hg;$ zKno+BPCO#D-b?F5)@6sR)BfQ(U|Q5{x6C^#FWcvNl}#IXG#3*yx4->~NVTRL7RX!$ z^fw}~nva%pBLpRQ2LGbs4yXOamc4tLr(#v_hesV{un0JHJ15bsDnu6-y+&|$5!UZz z{mPOmI={R*7;5v+JXi29sw+y@d>#?w3f^B+6iU>Oifa=~ftNE>u8|eHU*p0V(?>H4 zG)s#MGnZ<-ES)_HX2|v#fTage+WR|UXwa3P&bT7a4D0>s!<qfJva0(698ZUQtJFQ#E9|lLF~?tHQ4`YbFUR*EWggx7V=@YYqED zXq7d#hN4Di`L4be#3rzT>wo9kpX8=N_?ELrntgY7t%F#4LHN40diZon66q_ zJ&ft{C1lp`GXa4|)^1B<9BQvh2PGlP?Ue;b)o|+;ev}K_Kvgov`?{gyh|>08vcW`S zfz#leCf+AUF|Z5H+)9hm4F=9c@yp+hl#4j!dXSDCvJMYbo?MlDn@jDytHyqYeRLs5 zz8+t&TokEzt^B>UvuMFVqd(7&=NqwT-4|bo4Azj`97>oS=rdp=Eh9{_?~wJ9L^?`R z-%H)v+){q|ns6EY-GySCW4+P>pA)AH@)kxJg{^R5wql*jcZR#3uync1uioTgf0NL` zPpN-{hGVPQ$>wZt6aK1XGF`*L&%r6sEZd492F-p$z^9k#Vaojtgs1-bR9o}Am9#%~ z_QQWEuEO3!g5iu1K$6dJ>bLl_RXxC&o7O?J>=VtKkdM(@pR7Bm=M|bNlVGbrwU0SN zPmx9-7l5+4ot~5kth?|P;VKL~gm_>2z)9QA&!^{LKZs+zw{IxXGiK8y?$y~@1sqk> z0Rb+NbMXZ%vKfROnE2f_M-tjX4&B;60k5x^oVy&)9oe(5b4b5E9(ED9q$p$0>*XZl zXDWR*6##l#9Vj>;wQ@W6+v_*R^F3PMv{HuBHMN#&>gvo}4h_+eFljg^c}y9E4yzB{ zm#L&PT^OZ;i23oW-NxSL4w;`@7wCqw0Z>1d4!2kQZbf{mpl#A+uD?0z;by_vcegh! z^`ghAR+_=yL%lsZ&q>DBZwZi@$f zA3qpMn!L9KnnTO>cl)!=GTSGXGUFFmB=&%mwhbW2b;zDxCW?`k1xwO*Nsh&;x3@`8 zgvm33Z;8IRo^^&rYgA$Qs&1ICcGr%q3ufN@d7!-yHPed4!C-I6Cl3+EC)*;bfl&j% zmx>{!TOAz;7dkTbWdh(b0P+t2w43#rQ+PKtB!{k@(L|%jUmi`Wk4bv1b(hj-vrkvb zf%f^TC*AKGGutS);UV@E?pJy29-D2*l>kc99>s@=qb9?JDP}4A7Xd{=G2#WJW#rC` ziz!YQeXfHuK(GHhppmXWxicZE&zi)3V*A)9zYi*x!7)TjpS{zj$8<X=@cyMPc1rFMEnL3-62GFUKgT~LAHag2p|_)tiPdp(3}3rkSWtO~_x$_@4rgk8T|Pk27}x@w94G@JUIG&>LI3Mh zPZ%1&PI5@pe<>~urr{U&<8f~o7Iev)ey%B> zK$99rxnz^t-P#96+j^BZO6@mSkZ2u?_?YS?`u9e(^Ml@;v#mCB^7WHOp3Q*)b>rw# z=wceys%gqxAJEfGyroutXH(N+8gxptu|YECTY)RJ`e8y);s;3>f*Lx!)34k>1$~f6 z9h0DYuZA{Yb!laZdeJfFjh==;=#{O2I_xp5QweP6(MHgltVD=ktU|Yr|58Y(02}q} z^e;vCSSXA^nRy2^pTqEx%b9hi3O=B3n;aKrYEhfWINx&1#Bk_@+p-u{z}|&FUO76N zKb2@z@Agqw7I;vkU*MS@U$?m#ieGT~OHrD)30*wuY%gBfce! z)Ka5cWZiz>PV(dJ)KI(vkthEK*QIrd18^v99?l+f3!rg+4m{Us&Ngx357W5a6t_kk zwYukAk^jYuLN+K^=N}R@Rj?>>HS20Z|Mlu$gVov8x6w~njv1n1Z^RdBdeUQ>bJ6?h ziYHm4(tDAjs4;@XX5N$iD5^TAubU<*(I&%O#&zi_2hfDRZVStlF~MxD%wGRYA|_e% zJscxC z?Pa_fIxWoF*@#ykd#v7){cw*Xxw?1dPf5TYEef*lgyusvL>FSXf$uXyG2j6m3pu~_sZrGeNfbKu3 ztDJQ_1`fczqRi^=6s;YOa)~5Wr$0$4S4X%w6Kj+w1c-h^y!-o*woL1|vkJdY*}^pP z%n0qpfCk;+_0=19@>1+hC2^mZ?u>YF$(5N+9*82{0Jo(7Sy2WU7TQBEp2H%*`wvmD zZFfjrAPC@((7uz0hDoz44;4DFjxQj3nsSDMm#?}OZi|f6y`rn)KjTk7284JcK=vUD zV3^iX@Nl967Q$kw?csWF8*zyI*pShRJQD7Xl>2&TacU+Hv zCF}#h7yH_WgyEUz1EjZ1F@WT1WeHI#>T7`mM2R8)s|2D2s{zb7oN+)_eck_;;`DyS z2`cNYgt+n80^@7Zao!GvLRb7hX9~Zo8#;1$&0OR!V%0^RPuhsR4Jg2>-jsgQ=L90e1%ph_dvRBl%=w zz3ESb-)krWT}X*=7Z{BtjIBb^y2-Iip~AMLrmB?rdgyJF+a-)N_C{to!g|@00b44^CqYow^I**zag^ z4#o^Z+vzDYPsDi9YS_AsB$f#fq@y@hUmG?YcJlZdXT)}DtXsaPe^#=%61`A?cbdoh zM=Rx53%;(|m);C@bIE#chDmME#4ER4{5fq1_6ICXfJ6KKPRuKL8_U$w_G_7pwXRlFMM4V<<@Jll&$Z_=p(&{rK=2d zTr?-*1_V6M=E&@XK(-O7{*8qKk&>7^RsA&SU73*w12Qayk)j_h@g5{NF#k9Wx%eXZ zBZ}g`>+yH`zektxKW07@|JBcuG_=x4Y)`E>Qp6=hNvb2r=ucr**6*2___3V%b1VcG zkLK&;JZxDvJ-u+Q6TVg-VLE*L%T)+I%qiW`aaK9*C|GGAKk8(SVYp?rG`i?ewQ18N z!%fNa=GXbAG19UtO0ZJ!EI3N=`{nzGyM=$Aeu<+iLeDc1;EJZ7cLHftkfFzq3SAj@ z*6%Ex5mf!=-c0_@{_I5Bt75wW>^J?Q2((xSQY!H=L3!Q+**1qAi>%{kTv|zE0GLf<;y>R5bwPC=nu{G<6qOtR;#prVL1=f@SyXCH&J$5Ny8U2A(~uL^`xSSK@^ z(oe4g65*Hlj{oW_t&E4iKMGQo!I6N#u8Qsp+N_B>(VbyER`C&jmTeMjf)k9>7>bje zQk zKrBc9HW@rRt`SO<=>EyFpR8v#{-d}{dcE3+5N8o6>z#fk$)fzAL11@z9yrybxc|RP z4&@WxbM!fz0k^Ok4Zn<=q)eQ2yBB6Mx=>zm^JdaBUrLLF{@cO8+1$b>T)IHSejoqo z{G5?^9g}Q~u-JzT#d$Q`SeDap@Z_b~ppfE-Q<&RVWm(_i;GuglqhYDx5nZD^7XIjn zCv_r~HAiGe%b2D|Q|`#VVPH4#v3EGuozzNJ;FkyWAQ=}I9BA*h^eK%rcc*~;`lrUr zV%m&7A0Nuk{juLaASmFpuwLlT>69USj+`k3k>;@6yH^vpRM}rL->Q}#nE&=@fIm?K z{Lh5mr+&eg#RnCe%Ew-I0KYuq5wV-@mySe%+WAHi3wy(M8=2q%tc|vswg27w?f-5s zGyI+T|7WQCulCD-yxus*=&yzgSmWsi7}!(NVOFJHK6LaK3RiO88;1c#Z4;ynqEZt- z#t||M@3e&~^#;aOUBGZ>z9uhn*dRD&Tk&VeKR%7E+cYY;vpkR~e}I;1pqCRItuOw$ z-Y@ANG`6Z({#2@rbs{JCgdaNM)F(3roPZKhxYW*tmMJL8vs}?|&2g*hW7y2;ZI7m3 zJ9IU6WIb~TblDz3`Ku1>KrO+~kQ-%z&3ce9`931EF!s}Q11)Gji=Q{guEw4ZpnMI8 zA6JUZz?}eIn~JF@wvyZO8CPBHOLOa;joz@feemaR$#t1)pbS`|BDZ?p4Iwr*ogeVu3djLozQ2ke@75bpXaRECzM zrYrZdDRuKNMVoL<&PZgXReVSa2qQ`33lNJ+6dhL{PC~l7<@SUFW5`)`3=v3%kBMrf zr~tA4xOR_y#&t5@)xzWGfF8}KJBCoc{q9-Z52dFxx|>{8m0QhGr#s#X;Se4l@Uygw zY+DFN6~k(Gk8k~7if6EAbC9~R{WZJiX?;f8@fTZ3T0m2xHZx{HUPRX1tN#ZR^ez0| zzIgoJ#wGmR$(gE`N*8yNzv2JE+oKD?}fyO>I~LZ7M6*v0kEEfyc{gA!I1T`BedWX z;Byl6j@z4^JCwNUNMOGr99zl<*696sbl~P6o;L<;y#gYO;L8@ETcT&aVM_Jy>4c zu7Ok1)W3hA3E5L1&LQDKmdCgh&AGNMp6n+A-R>XTfO1X>;T99Z8&Rhna4vBy@*kdG zDW|Am%5WhK=qCbGYVSq~=R2^$ud@9mA^pHb!}E54l0MnBsF=}2i8DLrJf-$~BjWXG z7xNx8b@T*USp_C!%KHO1gS)sA2gGt$Bt}+cQGZA5xS_vr9$2i5ndj8*jE}ezi|k^a zff&oGY48pgt=5LV@E>!hUg~+*I5mSn&)-RiHP@uPwZ%HsCCQ33VEMFe{T;VSfndZ<394(fbD z@p&y#-usa)(flf3S7R~YNEOaK?Rc6iVF*ozXRibbYTI?rvu}IB0kF-CUK7rK-iMya z1nf@N!=uoRdwP4(1gC4`N^C2mQ1nsS2|n&o`0QC4hM#e1uXAwhv7R^ch%n!xt)xs7 zf|NQ~T`vl7zb=3S zMX6{&^f(!qO3>3gK(T2q9XBj#Cd)3}x+3{4`h>CVHG`7R_w@vM{>@+aCTy(7ZqU^q z6kKgp?pYIh7ytx~)v33@%K!v`HPgL{^P?N{8_+WtN|0^ zEMzd$ursKZ`|vlC=YW;>0o8BSGn;L@Tyx4DYhP^8Hn6TAKy~w=<#95!b!*oF7nSno zbZ_C(WF@9G(|gXGye5r0+|KFTh4dcW3Fwh2bSazNcH-o z-5n>ctG{f*u+~G+a${|gY&#NvH*U3Z?+fbt%8F&_?o?}a-nOjY2lgiv9S|PxMJo2y z7QXP9lc35azMInYcf3RzM zPvH`7VJe)2eXoHny$i)KGbYk@UU!O6m4raqvYfM=Q#RabXvt*t1w|nfFAQ@r+ZF>X z^8!u4BPtDOA0I+jb|v)DbUA6zp70nKwN&KnxuMGW%;{zK-UbgQSLpv^?lw5s?nJfSdiX7~%Xn@D)J?LoY3;)z}HNnv7felk`P!zoPv5+xFDL)gn z$aVDEgFZ8Leh8-lAmg0r5~-I);w0c+N>Q6V-5(c)=ggw zms1+g0gvqPjn~preuw8X03LTD*K*1dNZ`sZaFyZe*v5lSUF{{mO9VdmA)@En((0Es zt26J7iAm55cJI>naJB&ji1*qim4DHrLPN@s_eF9muZ#2gn1?4$B zyBpRQvTD!pGP2?qWuHaz3s8ge(<{_;^>k(;}(c$u_=Sw;1;j7wj{?&3S{PKUd zV(Wn6jpqz-bF zqASI498@S~HXX%s#NeNv@W0f&RDZ1reFy?(RDl`j#-xxgQ@X=dm3vs0>!m?}s*u0> zgDduIQ zhgAT%$8Pt4D08zJdln_fAOtWHTSbe~#CF4t+Yz|Ns|~y};3>8iix1&AM8p3WLxqNI z9@)?yRuICqAA5RX>k@q059tK|>R((fLxS;V*kK3R7Z3d5 zsTCvCD-Mzq9yMERlB&x&%a+J)ZrFm7^w%UXEl!sc4LR^U&1%sQ3yTTRnI8<>wupR= z7Q#L4gYdHzDGS4W+E=p1uJ6RiU)Y9jmSkZTEuL!YHMTKc&vECuPPSr=>}Tcq50rLl zIU)Nm-d|40)S}g!COu#X&h`ey++%zvFEa$=kM}|Z@GFd*eQk@1t@M;uiv9A z%7wLvD8jhrP)OM{0~qUtklJX32%G{E5%GMcm$TDP^t5LkL!2#dsID2>G({^fTm&&s z3PKB!$$I`nemJGnU2I3^mX4K;Si&urV~f7b(gkDC2#ba;ZTm7is_@x2fwsz&&V$}w zDTF%W-kMu%A%F+t4^Qe_pe5*w9NJKU9p5f^hL;8E(VtjmYKV5&e!kFmBAgn+uWc|o zh`_sk;}k(- zpwJ_Tm=7YMb(*|;DYE@g`jbfXd8^aQwA07+-1zyP-Dihbrx$xm(;cbVLM2ib>F}Cx z$Q!e8Fmi@(&@3mX+6P>omCsqakI$3+R3rRBeQzBybq9DjrJ-Hsg+jO8UbMe7eeET{ zkZDMi61jc|dl}&74PWBAS|oywKm;d&1<)I;VrjH7=3NZUgQeZKHV`q_GU2Q>`dwFM ze8aX&YWlJu2>$xsl$m-ZFM#kkV?o%l9a~U@KHD7S?nrP24BIz_h6erE!uO!J_hiGD zLp>reP3FejDfGgt!S<^U7&$FK%0fOH6XQX63V%!VXK9OuB%l|gM*pe!@^)KXpg$Cw za&@^KRN?#=rlwlIKJ&2_G*&nEz!g}5g;Qgt%vIo=w>{k{Q-7FU#x6iP1I}-zwOO6c zlBxzfmTR|BFG|(9!M476s79#py7HKNR_64#r33xVixfYgPb3fR&FTfHR4pn+1GtxB ze_Nt;(?n6AZd)H=c#)(~$hw3;^zYa5vnwKlTHHK66j0&P0PR1TnBWRJhQH{C_8ew| z^DJd|{a-9RWS=P-V2E;qpX1EK-jBqyZ+oH=;R}8Y%(0gk#-nHTFaGIE)#VBz?>Cs6 z1@Rpabwlfk_5yCMB-JY<3DM4n50oPE{T9vu0_a@ac_P$d`mHF#-fLDX5au5Bd~0lm zC3iQU83`*5qe=#Ih)qcHCv%_^84KRjSZkTwC8(=LVQi3?Vd@n`)CpdXk-Pt zZIrN2%D>Ge(jE35Lw~MoD*}!0Why73nHm$Os_HOU*eLJ{JHn3C= z3NXNhFCvX~o+f59U_v2923Pn;c5|$o|oK+ln#E=kVY(pEgU~bI2 z(84&j=`yGh4#FSvn@jhzZw+u5?B|TwdIx!74IM{ixLUS#hoz)Gx-yM%Mig|bVmOND zGX5mhHY2MAWcaiSErqeL_}okSj<*KQtV|z+&tKfbZ9%dXsqyRKY1W;57)wVeo~4kgh*TZ6IN)6w}~+bf`Z&4*D`X z^_Ye2!h&AtS!?W+ctv_5Ul6uJFj*gZ;VGCGT zZLu8jrPlr~PkE-UHPx3eff>Pyp&Ra%S={+&Bd-(DnZB1On$`G|w>k)PHRcbGn>pTq zid_fd))`fGJ^@37F!^cCh&vUAJvv#*-Jeh=!3($}2n9w2xnh5#q&R-)ezf+;6R*S2 z_1ib}*cPyXDFIF}>Y8ivMe6YiNr`uy`6Q3BJG><~B>2eZaDRRGPI_7qSD6s8312|Y zOY+T%V;LXAl)1h=+(mQ)=rRM_ma0w#o;_zO=Dq5PS9j*b#&g}TBI{aTpoH3qs~#ah z2a}55S3D{cfiObn*K9o)l`4&7okA?M^}WTVBUD)mhy#=#x9n$FTDc;O%}U!?q^H(( z%+Gxmx5A25L8=BGFdti)mPqUgkW)xRto5t5c$69&(*Ve$2X|fHI%ip#1ZpmeE0sl~ zXJn7y?0!QXr=zDXj=kzo@xtrQiYt& z+0#Q+LXPwkX0V{6g>T&8kB2sGe4=LF^3Bcz4Rc%m@15z9L|WjFwz1dNDL10Gr+T0} zX8PdO>)?{J=Av4PdRT-`m*lh-WVDyc=wodj9 zUb_5w_*{AMNlYftM7PyPo<`U5aU_Q)NzPGDQq*8@&`8?FGQyazRqVQGxr9B-ZOc149J3expj%Z9tZ9watm zLMlgD9>Ha^o5;*U-`?h!(he~$2F5vf=Pxh*2wzaY z&oDlQ9mY?jS1%^m&DX2aC9_T{bvv^)zOd(}7rnOa0VYMz>!p5ow_~kDVQz%ivpCd? zSOa!`M>&BZ)7Bzp(wpX5W1b%TE#`aWM-FeoD)R@9*a439KO_uyoK_1QfeU)&1r9yf z4d?JX%?j7D&T)SS9CijZb|d)#@35~FQ*1hziDj(&rPj}wiZLGkpsI)Mo{;e{^LCY! z+V=j7kePII|6c{lz#%=G-?)QqP$Pi0Ac?%>yUNhEw6g)~px)11d)cA*-Hqf|i8bEM z36-bCGh;Lj%vR1CULd6P@&mCv#-#Oi66o-9;wyqBs_TQ&AD)UFA%LH{!rW{D{lGLx zcp#Ew#@W#G^LpiNDW@Q-FQuRA1RvebqYOP?JtD&ZJr1HqDCAXp3eJOSjM;!K$Tkk( zgtx7qoe36QhG-C~L#}EU>Lzg1FtY$Ho&OYcQc5C!)%4&$Q6&g}yZHw4<`E0m(~5r`()5J%52Qh|YeQubCVlOctV@G(DO~ z#s)RD=P_mumE3Cy?NkYAEX!oaO4Y z=JXallLjpXr218=$>Y68Cn1L>Z%&k1*Jo8K=STvIU_6Bog#eLV71tGcT@$S4ac5&Y zNLnFh*Y3P{iNvU~zjYt4*s2rAFu1^f*5cI9=x@QlR0EBMy^)9Zbj)tzv3KjjKBbPC zhd%nlSBkqP=$fI#;sg4*HuRQ!)t#jMBZ~bV4d$*mNak zU1-KkP$XaiB~yag-#(PSJrWJnnK$F&HhP-I@EZ&OF+Qlwa4YmTNAs(Q^`5X%+iznH zs{LhGxv=Zy|zWG_3#92@kvfQ#Pn&^u)@#jCF3g(XH z=(gvyJdV@M$-bf`|knJG&phc$u=O(lT~eJZ4iKdIDIZv)3^E z%5@h>*}V#VMCv#d>W+X@d4FuUlZenU#ANOEbVF6$JdBG?kKc8ttF-aO38CJIWI+D>69y>S*=;IRFA2-7 zC(Ai#zyY0b2>F3LJx(OIMX)>FPOgkMR(O0AZag;tY*H@Hl|LX${CIsTZ@9KGsgs<` z$S|)RR_O6ko!iv@3gEBjFX5vp@HG!=b_>d8FDeouQI3$HZ#}&ecuSJbJK9JbMxEpY zb3x8GHQ5gNQ?8{a?|44#pJ1KJnuFe3-NB`WI>pZCCj^C~Ct+wbv$VlqAutK@;#M4d z+6R7-YsgOLDL$GZP2s`8DcGNik+!NMdwd}t$NB9AY-C35qz1L;g~C{w0ObA-CBRmr zoPjg`uG4hLzWwz`!ntkkHOMj_X=v%uEE!=CXt#_2Sd4mHr~W6}a?y;u>C3nwV9dv~ zpM{TrT44>BfGVkACsX_Lx_w}^(vtC+PoOkAxS^jl?Gi&N?M0$RG`(3sQD)ujGr70q zhLi@)(PeKhoO#Dd#8IfsTduB*UCuTw`ANBJdFzpp(I1{bc*8P792UwIV?!x^aE2aR z%kKO2KRlg=@?=ZC;X5AC4@JY_;KjV5Ptz)s(_eOWavGE7gN*zS3f{L&0)E{1KYBT? zB&lJFti|MAT2I`Z*o`J#jOSw?YygS|jrQ!3 z)%u=(SO@zzu?KR6^XB#9v|>=*BRS|~*u9Wvy~7sa_FPU9MHcQxIgX*~8`22$;)#uB$ zNvI?=b_wAFZCv3eo_N&OzCth^#2VM#Y_B^t#y=)pQ(iUq_eWqAD)Yg9-Kl3Y#h)Zpq+6+AOS=U zLY0*KtUi&)67W5@*?>0Ys!c9(8k?pFJP12VEeZ5S`U;tkZ_4@oN$pXoOMfAS9VW$4 z$6bSPGN9RnM-iB^#>!*65j8CAQs2^ywrTf|S2Vw>?eWa`EP6ib3U(+H;$(1D z*a}K|v!;TNYZCrH;Y35AF)kDOAYQf0u;#L73?C0>!eZQ@t6ER?r9==U#eU_Uv>U zUkFqv4MtJ?An$$T*Mys~KkT;FMvzI#lYsTMG@LoES4qR_ljHJGM8sp!QmB5@(_HhQ zT>abkk!Ce);8ZF&(fF?(d$~xoeb#7q&Wf+nn3Z*|zvGu?&-Cbmu4W$IT)+jQW%^&D zQ}=z@A?%kCh=iUIIkKbWw(a#|=x)jx~lGeLi)aJFs1TPNXA zMDo^XKz(9277gq9!y|-t`A3PzTQ}|@$Os2VJ&+5|9(lU5`I0OAx0XG0cj#jvkz~+< z9S4&uTS`?^xD{iW32jx{njnN7+Ey)Q?Y7YXPRKil zzCr2jBi$Xk<}%D(gIYrS5WRzZ;UJ2cqS&gW50;Xl3#2J5%2pQjN2IPWYx^d`C zIsVI;DBYdyh2w@mCGFz6iamtt#vZ6UDuonjOo+2-3sO)NoHD6pHGH22I^Qbc`i^k= zOXlOF@B;vP?U3a8-8r%b_jtUd57|LvM*VHrXCHMlFOugF&;B2RE0>G5=l}KR@4wGW zJgL}%K8ZL;&WW_WO5sG~TK7-fz+Ax4o=F9855U^Fs+=Gc|0?$xm%px_(%t0blEEn& z5m*(^?qX*N(Ju;h!uRXrs29Gh&wn+-G@xtEKfzp=={nH;RF5|34?-*a7&8$fE{wli zW)nlR)J~4ds1A3_fc)BUovTMTL8K^I)B$bNtVw5efY2jXl;#KDUv+XHAoj=#ZzUo$ zf#yKa0uoO$Q5%0$iuy76cbq?;k~Zpzd$oW8`+ry#y6RT{jBZ+x7**)$-Oipyyuag?qezn-J0 zVasxV_`x^umrWpSWD3d&ft+S!8hg(gDQI5_V4l0tN+No0*nQdn{T*Xaj!>iu(5^BD zW<1n3?-ZK~Ixojw9QfcLM4NQ~X@q@tQfIXzT0kNwQAe1V!6q^@HbwX;3xNj>M9x%> zr5oXl&~ep!fs|72Q3;A^oBT;Ec^@>W{K{1*fWRXiEoj!CfBvM$NJ;L}A@TXkRvXxp zg=td_ITjNNOWBn%4&i#cu$vDtX}CH3%J53-g0a?!?UbKIe%pml-aroU*Hg$SUo0O= z9%00Il!JCgK&U6CBF;bQ`PlyXqEvSBF-J}4jEn2is|`p#pRdqKhso#Mb%NBaM%)tp zT|vs7y!>FUWndo@`T@bjti*=1!x#oYZ=n){NCA`YzO8$i{h&XT@B6a~pSD^-&$X2N zoq^eDzz`iOtU_2vht+V3N{_yIW{4HgOAWHH=GRAl(L-{rQ|&yB>DP!Ay&KR}jpR3_ zHd(+zxbFh(D#uJ!?D(Jz(g7OfDpvA;9oy66`M-4y`p@tGL~TP_RaIqaka2#s`C+>U zJsS;=9=Eb5B?@B^1=3dWg#4jExQ$>X4tnRp5IYvtHM z=%fUA{*{rYb{xF$S?Xz{G5-)_v=1~NozBF@IHg|IvptK{0>(S=ba*}!lratLpEK!P zEu!`$bk--1FLeFx&B;l(nmVac*_gSu9k5I>E~w6$j6Lf|e{h`;rJ7&wvbMrlqP%}?7E-=9Ff$z{d60p0ra=!aCDc4PU7ji}>H3z#v{nGR@k^>>#O?*ig-7w^ zkdQ>*d{za!x2S`ym?35?a=)EsrAY8)fP@(!nk0}0Hw(B0xGNw`URPaw993Rnkm)G_ z_|6*Y?%)alsfg)uKmA|MQzHF^uan)C6Mkggh?%T-8@KVw^;ATN##%!)*6ch;XgqOFKvo?nZ0>qM8k z|KYhT+4l(-%016*gNtYjElfQhZYM?QDF2cC*|ic~qG$b0tU z(GR`*%;ISQID$6=YZ~iU|Ni@+(hf7xJA+k$e#=qogU18j%+Q&Pih(n)36WNyk9P%E zu0N4W{SrIcGrMwIlXjaUPBCbP??)K*hnCMgb|1GaBYVDW@M~X^j+`ZXH~}NJt>dd9 z)X_j-itxREvxY@n&1DH2w`hnrpi3W`&;p><+$4!C!TYxRcJ}EB-utgFT`zAp2M_uA zx!eP%uJ}vG1f|PHCOB&F>>C7}Ef|ZyL3oN0IAiAdjV&9ZZCCL9;iNnGd*f7XTQ@{U zsxhYj?obn^Yst>8*VY^!v134JVk=_0T?55W$fy%bc0KgAp}MLmt_GBiFP??2^xw`k z$h^%Y(t=+2W#UlEuX~X11Wkwvy1`KU$SfMF&$tEfWRKj(jZsaK&L4xp1h8gQ1qpkUZ1dJ52L|Kop!?zt8#n}X0^Afj|tnGc8PA~wR znFRm?^3)N%nRcmsAKf z{;r`k`Dj5`#YlBq?FtpHT5{-E!J1 zU3y@h)w2*2#dxX1*Y^>Hw5>1R`8qfoKLWm zVEGJda&-M3)-;A1ZJ${7V+gLeRc}w%!J9j6qsj0vTMdSH4#03^jO$hR%SeZ8$I)Ne z=SJ&(^4uuUvs;3kBO>JUbc_64?^pGgAG7~lB=7Lt;nv!cE+_?UI5q`1pe33l8c`4V}^DUM`w1BPB02B%cEdPy>yvK zi^7XVIE!Afx0`%?_z5bswq#1FWforAtnjRM6ClBUX=_eBJ~vtpEtoHPm`U>-=HpI| zqQ@(CoOTQKP?yoWSDAq=t=1X%i`=2{Hm_h+UFn#BN()_txRH#{8% z6@q}+mcNNWTtIo1YQ$ab2C=6W7CV;dFSOm(I<5_R!m}JKX<}yx-tvJufDh*_CMH4~ z(yi=_E+3#@H*po&US3-3)dd8AqY&wT?-Tof=kx#c{U<1#^Cu_AMy!nkk;>NNKeOgy z^$)+#Q?~p(!^8W)bp{H6XO%$73~caUcHp7+_=H#5<5_nBDZj~T9b&+~?`n2g{HlYd zq@sk`=&W-!v>~8AW^RGGA+XPM>}lR*T+5#PD7_*(&$&ka-Cj|#uTWHcNn(QkoJD$+ z!GoS=9-VgFTL{1^Y0$+>`Wg53qt1+Bg7lS8_Tpt@lyr1k zNnVf?TMzYoKm{^BQlm}2MesJQ-!}2EOYQ*!IsU=MLL{h+X3v_HhiO{MQ z<{{=9+xg7;m4z-VLWBCNS@vbKkJH5TIxJdGu(adjjVS5ZQ+atu9A)0{h{w9F_l5vs zWRYlXlX(~PsUm~WGFlTd#Z!J2_kCgwBUQ`aR0CV`Xu`goK8_duJG(JJ+d1HnC=EUR zSRJ=1wh=sW?hy@_-9mMQzX|)%*rN41I${$L7+`hY%oOx0VJlmWS~(8BGF#J|#mo@O zI8a3wl4FEudk1OW}HsXu->u|NI8A62{WoV3j*$&?AF3hMuZuLcU|-i z|2dC`!5pjYU((n3HemywV2h*KB4kfkM50`vJXB(I&&RYuYM^#2%!@BSVjBs6h$y1CY#H?mACa9M5LItu@|L1_n25!heUo*H#B08lmNq=iG|} z-v-yRSidLqOD-XAJb7vGR8t~Hm zXE@aPhZfbRDr;9VfzmvYyn?b$_VWyHBDe-&l^?6cazL|7cIr0>Kjvlxfmb3MXqu;?;>e7eW(wd~BMq%;jSwy~W6w z;E^P*cU>T*vW;*Yhi@XANV-8iFKE7>K0w&*dDNjUJ|iAI1CvQ#P3jF()lkLpO`Cq7 z9#>l`lFxjeb!(;Q-DxIK*lV@|9*wDJfatSaMYfi=D$|NgNVfKGus_=Lx)m4tjTecF zXtTX<=>&1^33S7)Im<&^k*9sMeob<+TL_4;ykV=G!WoCK6Qlz5pI7iEkjX zOJp00aQUHpccleciPD>$m_Nemm0N0l*QY@WGx`JY)g~75)`=8 zj!Vpaa5dsnN-U)GL};#6rt^b{PpKa+`b@`a)iA+i;(81vW5#8|ncoK8s7LZ^*ng+w z|G<7+)D(lo~jk4dTr5KE+8UBV<*r?FQISZayvitU}gXCV4YV1 z0v04S0`FPLyLVYV`trO`aQ{}~*-0?prr}+_$)g%LtXu(3 z6JX}$PMA;)s>SwWg4&w@GULV}&df`}}3#lt_VS`Rdj6F?v znX^z=D^pt>I&s&2Hn#r*kST-t)f(W4wyhA*(b>5cVTEUlk=+G=8N_L`O#P`>{NVE51c?}EO6{j0-@{|Xi#wTe#8 zUZI&eCzTh#DtwPVuyE9iRW}<{{&5cT6g;yTG5sU#owm{9D(IMdSeUACCoT~e;+HP-GIb~bU4_W5?S4$)wx!fE>p;7sa-fe5=0MkO`+ zFAiirMdTVz9{A}S{-^_5#Vb|C8{SGmM8|xWjrI7DQ6I0J;_YJG%MR^{Z`Hm+cDXF~?SV@#9LZF{JNk-ncuZc0%wL2tTSJg(p@C#@=M)LMI zyddg}95JugyB%qhNi#OX3PohyOwPButz=!9t`m*dV9c^y03M|=ElNc1qhEfr=RB&@ z`P|ld&#}O_@elYOnRM|TP_$x?GL~7+2opw)yNU_FKBY1=vxAc`d~2~M1==rj=IP;; zh;pTd6T^5-p18N|sw{&Oe)r35003w_e;1w3>TPBuw-heTM{uXn=^Xe(U^Z~fj%kr_ zt)1T`6^8CuS-ABrS5$}gXuneaa!Fqz>1LPKDGi)pNpbyP5$VT3;ap91N|1q|p{;@a zZ`mB_xFzr^-#d6Qy_xU*O$m7OY@Jd%*p<)eSa+&cqX#x*Gh!B64^ ztPa;*{4P7Cyq-Wq82c&~J(HT}W#tGV^Vf?fOy6`mbKu7W$Cf4dchQv1YU-%RVMom@ z(*n|5Khhx|6~R601JywZp_*-#sp@fgY=bI@_`vuH+OzSC+qg9GOiRGM)azu(K~xl5 zhhYR*LCgYiZ|t*~dQMUbqY7vI6O{p$6xiW?ua@pnt$nsJdMKx4&r6DD*3Hc^4(b2I zb3v`z{a^>*z%q_?GE;F}-i`)6L0Ay-%>Q3@>JOwVzM?mwkJlL&EO*)0RF-`Fdv!>n z^=H$h?=vJgQ7k;_=rx>9dNTT=SMtBSDU|=0H_Z@P)DT{h;=mz>)yX(>Ds#t`qKzyo z##+wJ6d81%H*46xpHnagiw0q)%kbYK@+|%A5_-i-PsYEbd<;OWS&)~8MAkfKI081* zLzN^q^D`SouOC%fJD`*8dlwwMPfneaer5$!6d2z)1^I+@st8s|ppwUai zINv;O1w8JV#fx$6rh>-rO8xJW=OcyN)yf0wVY|f?&^thujC?x&P}H|M>ZnHgi4vn1 zL*xX3-wC1SZX+A}uSftX=RVHYE0T+rE5=$eFB5%W@1?^ZYQoHy40a`SXNbLoGHheF zR_hdRJ@JK{cJC?Y%a?)-&6GPuV;24%IqO+@(cs=s`&`~c&c}JG9yUZ3bDR<0y3!#J znZH=Ux??KR{!%R9J4Jc_C318~{p#SaYsUxnClzF0#g@M#%^w--HD2N_)*@AhI|qIh zGgEn0EX^UPVF8mypVIz4Xf=Jgr~>}@zMeli9HoL#pg4>#bI(&x)QQ~xs;v~3`Zpx{ zM}9n{JDdYLi%AO&D{Fpu5968o9OhP9^=nY_&?46u0=d8#?ixhCaM1>DARhb-W4rnS30 z;5F;#3(1v3*&lxlpB(t~(BoPBCedVPf|*PruA8dAt26bS+1$VE>5^FS!2eperVpk^ zT*X=3!7@Dc$b{wro&aJd;xHqQy!rm#s#IH&?DMS=F|R|vE>&)u+g-YQ$+1W6r0ypC z3_^km583I(>|w&$dO5caMs((MN_1t<{b(aN4oBmHrXmrh>P>0ih#s$@!DF})@z%X}AIGjK-WM<2#xVGUvPXz9 z+}6~SePT^}*-?u@n&qbDFEE!9@YrBoR zR3rd1@vdZoft%c;Y_lqtCg(09CR>+OWn{ZrNSLv1$J*agft{A7TyGZ)KWk|y+o7*} z;H9p_2_ra!+f|u*K6&=>_Y{(~b%b@wC4aY5Hm%fOzl(^Gvp=;SLK?=8MGr045Su;r z`}ta6n)VjT-5hede`NBjNz^rk{G{uEcfJy|vaETH&pyhzlF3IxaZ)It||3uahw?ipC+8;Di|spD?E{|w=W#~@(if8WOwuZ)FDc1uJ8xa z374@(t^>@_;e{4BHtz=gT=+tQU09T=)g%6h(Md}-O>-WTChAg_cTuFYrgFM?tj|Uy z0)H7Gxk2I#?*6>`E>3#f^wzxoSqjoKe4z}A`G-rR=TmXr<3r)Flg9m*vEzRQ z@(iS3k@?a6YPk1@$_ndf{7m*}yA0iSQQn4r@GkC0ir*F5D8VMzt|J?`57|an9>s}iO2~2|^h8Ge$CVv}#dc46Sr?r3lOK|hE zC~3clgWl$?J9fX;ZQ@ITc)*%RU_B$3fmjoTb(^`uuXb0u2Xd`sjm^KtI{W9R=Kf>z zFwrE@UHjoe{H=^DzkpCK-JJ_rI6bWujIP#75Z;W4el8Jc#8?D1elF{DDh}*4_9#v~0lCtpK9Kn_q93ic%57LLQGzxh13Lw5ZouC%w`CNx9wCzX0yYOwVS zFf~cU9-3T%5Es|sGGCp1CTo$!a2f1V8Qb_PPdx93;fpp3FBZ;J4srHC;8bEtz{9Md z7JbRR+M2S2%J6CPKvh9l5+j?;*91K{k*vt?7c@0CkJ~H~zl9`! z|J!EItJj5g;B}0!;fV5&1E*dc);U~2(M%N+5Rp^0=tg^IGfxL~{a}2qwp-D6Wfeky zc?-fmx=w>2r~F*j*HlgU$G?y@9Ns>w9y2%~@R!B`-vbM&TFOm!8NDBJ&nRp;qwHH3 zG!QEOR8^^iQznXCyL;eZ%0Ky>JbvqAibq`&=-Zdu-wTM3NSG#`gSFw69?!dmwDjW+ zK6p2nzV|(e)%RUQcKi!xf+M~?tUpJR~4nwH=aF5#EIdkUclE{6&rb^r~;Qg z4zLWKB|k~k^{VaNzLwIwGopmCT4moSgO2(2&YDKx*ubnJw=v0tVt*|3vPtdCbmXu?1?rzWuYf+o-vfAft7AgbR0LS-%zstqH-54xUW z1UFixw8rI(V`BrIkGu1}G+fE9`5OE?oU_dc@t@J+J3Kv!xb3P+*}LQpnaJw{-4J@) z8Arbv)XbT3rKppJYbo@H)Q~moQ8|UOUcSZCS%bBszgB)7o_HS!zNY?4-v^W4qwUg3 zIb(@_-nc8;54QCnI4o?`+lD1JLiM}I+N~`P0_D=2mDrklOxy?B?OxYW7vy9by1|(3 zo#6*rG@>4Q>s-i)RXOIG!+Z6n_4n2Hr|jOZvMO!_*aZ+5rvYR}Qj1Yex}trb%&~Pd zXKo4jH{VOs96fG-L^Sa7*_BtXT;CKW8vmY)?NSc zFAxbQ!y~N;a;P&!>jZ|R(F=42Q$eqZy)RzB=sH35?}C-PJ(<(bvj*BQbg> zO{4L0uDAZC=zpe{C}=NZ+CBclk`m0ea&|y|DwWOb^fa$1R za0hlNVUtK#QlC1vhRST<}aR*sAXox zwe_^(#}Ax3ta+#gJ;BYJ#4$&6x{+mq6~qT;VMi;P=&)U7m;Uf z{!hX)|Fba8|LE`dt};?6fj>baW!N>G=yJr%rNcoh+82kab;zHNNBhgZ%C1Rb#{u2y zIRT4TVY zJx6UmId>*uE$Z=)vDw|%n3zy!w|V0T-{)B$f~9h0J33w+minl>#<}U<|}-qY@-G=xER8Q%@V|(``|B860!~z zW=$0Vpzy@?s~gJKuWAcvoTJ>(hUvn)WE!Ajyd;DFLfm^tHPyZAqOpRCROuxsAkup; zQIRg7AiYE>(xiz%kdP?7BVCFhB3(KGsgW*SiXgrBmQX?<)c$ z2V-QcnKsB=bH4L=pYp>E<}!Z`4Y5mq-!!(tw)=_*%B?rb9c3_XI`lxEmOFIEwW|KfDJcH zH@4TZLiaQolI{VY4$7)DfOuTK@_ z1@2SXd*L-)^HX9oJglN$&%M|)d?U)o z49HP38!TFw{Z!R)k=ge|o)fgKy-2Oyg-C)kks{xaq<&&)x;89#a>MmRnFi>x^M(@) z4k-6aLr+9+B9_!>QdXtit~f)o@=5Rne4tK4J?&)kr{&fs@kDS-4g-)~OM|6ZCIQ=P zh^8g35_Hgvk$P|*|QQT_oe((*hy zCH~JL;ACbOb$+L>uV1e=tV2IY$Ho107o>{!j#-Ca)H4F75aXqdjyYa2i zSTm@w-cj_F+=~?P~+&(#uo%2R$xYnU3IR@zK-eK^C)$tOTU{T z6?wa^_ddgSXBJwP*B7&1Kj1j+FqmTRK>w*bujq^v6-5cJfxN>xoEUV0H9 z(Oy_|EXFKM8?Kmo#E;!Di#eqYeDB61)U5C)riA&sB)!CZ3p<&~Zx2vDJ_ICEcwxvA zlp$iM5|(wqXeynnSXFR#n~}?@dr}V-U_E^fghaQBwt9thI?9)%e`5XpY=QSq-XNoE z!E{UcULdk>f-Fw%XY&hx!yeB}l+9k^8@n|u@-=>)KJth;^am9yg}lN~4qBM%AVCst z+yZ0~T{|9pYb0BfE2%DAc3KeHcKFq9uGPdu1#@k6GGq|TgF}4})Ac7xN%aV(I4Hho zGhyMz&Ke&~skNypVZ!516yVyq^INH=7T!gPFbiqPZ&fL*JghTKjK33CoD7GZ#``Ky z1V)RpT-3V&ijGg&=;RpI(L^scPggllxja zH6S4vO)uRAdasv>JXqWH_Jhtb>(2AnikdiQx(oEQ`g=dmwt95%WB(3^8Z9}eX6`St zJrbS|yLuQORNAkum*yy{N4LIg+$gNe8X$han`1gU=B4p1$VEL1%aE}$ z3yjdi?dqJG*75#ZFUBhRq4|)Tsw^-v;uizVFMw|pyr zJIZn4OhE)n+A{!R3E4;&vCSFBHq;aOm`b~Ed|B(mwxx2}v}drJDg3_j%BK9G<~H=H z*UOcrY`!{5XIM<}eUT@M_KVk68hg$69mN1nnU=h@nordc0P5mKo`xI^5dcpFVPJAZ zO=0$H2vF_in-Vfb2UoKvpG1F}sC(zn*OQ)D{6TgjVKDHf7=3Y{4#cp$;xCXs5G`%D zkiV>=psl%W3<&N3hUjGi`?BWyZwMZ|+!IVJG3-jSu|(h1$A;Z)3D||pujPOK97MQU z=wb_TPLqyauP{@O3DCE!wS>xs>wXu-y?(ZtUJ8y=M|^^1cF!|lTxQUJFYKr$;X6`w zb88Y!;K%Vps9lfR!GOo%PbyQDW^VlH=-vs7)A|Fn(r>=OYF>MBB>dR*1(vIyLCj1TIdF^-;R%hD-m3sUkhq34^!)1qG{nv zF1@F^tRQQ*)jT<(3Z(U5&^<;y4qtWzIFHf3(`%3ElSsc8LcMomT%feX8-NJh_MjN% z@|zFwIJM6$NA+vF5&urc8S@JBZaz)uu@agQfBn!h+V!%3xrnHiGZQcC%gEmvzHvI0 zLs!)p+AtC__)?+ZvZoz%`idVw}!OA&>fHd)ox$GU)b&oSiD z4giaO0BEaWBY%Nr$I!>l6;7liguo7$=%`pT`!cKEV0R>khno_;_1>S9d#fiBS)jKc zyr!F>|N2$sTuQ@E>*iOe9B$?tDyod>6|_EHT8TMKwfpd^`{}e%TzuZ&mq>yYH%VWC z8zvH#LvEP)3$*pvc;9#lU7Oz1CdS}M0IV_2jTjN1x{!xLUC2q`=id@02}Pk-O#%IR z1u(rv%{L%b@x(-u5xGMO7*%P3lYnp?ZD%Z^dz&Hh_E^~`@1K4B0*uEryLLdj0Qwm~ zHy;4r)xg7shG6>O=45w^;KWj0mrP2A!m&yA6N5EqH1?uLuxGIc4DpKSt*VDXFp3<| z3}%O8DsayA*n%uoZn$~l_m-APi8rZRw_|qG)@h_JG{s?;=wTQg`<0rH$v1OFV&HX$zm=xRAX++j7yQz3nmjhl6umP^lEO2 z@Ay4O#ks{-ugI?kslvt{0v$$Fub%jlev|ka3}o7+o6GDDhfmU&!0c_`jRn$sEdx!LAEhx{q>O$mbLPl?=IPG*XBRCcPrgG5!bpuD zEwCURVwE}?Z}vo!*$q4sx1tUVGJvxhSU%W~coW-$9O{vV5gQ-EgR-7~Q1L)2R zfU6wk5eY_>bxK+eFQsQIJS!C#qrP6*MiIB%B!n!jZfh3yWHhQv#Cc?p4gwC!>JFuqU^&0lFY@3k(`~78Op%xX6tLWK$ zQ!_k6-wr06g_2RJ0UuX{Fm-IOkl$wGQNLaG245bmc^D}lhTB(wE(h*sH5FnAxvh>v^%@jf=ud6aZut z>R|rxY0C&Azi@(b5~HL{%Wi(JZPe$zlw`3>7h(hFE&~}EoTx_1<;aJ3?zNW`{Q&ub zOyr{A5XWWo&20o58vK2{Z1sJdRQ=lzECI!<0Z}CCim;D-X%l%@1}%V0d$+nZA0afs z|85`&dK2S%!6JrnM_)vtZsv7k$^xl5NjVtjj71Xp(AR3Kqz)QKPMr_W$*VMCe}Qh_ zB6?f@-4kU-w$or1Y_RmI{Sj`{O5O{@nWtU}$1g7vEGA+2|SgXc||oH^Y$ z9IyEEe@D}9s}`B%oT~ev>!6YAsbFfN%GeR_X?13=9kki&<7bwGhG^{e6@N1>v1EDU zWnjJWr))&PcjllfMviIkm!~vC=LOL74^@YUKLwO-lmH8b9r$Wsrp^T1--nC0#mnK9 z0aQ>~`D=hw3iz4KIvMC@CiMW#qssEMzd&D}SG15h@%rFPFdhxeS8hjx`bQt)Vx{1t z^~7ua;Ox_(Reyb= zOq$CNZ2oz~0-r+Q!%H;F z1s21uxxYf`%4eIE4a6#YC$l#vH+H+s1yA0wIA5Ere&;>`Ra>l(@j9g6YUaFd;V29k>*`*0Z zvJeHZjs_Wlv@NK;0@V>`S>jFg?#%~w7r~=W6CNr=W{zmaz^{@SW@t!3vf|<(qylSJILg!4W z>pU&G4DIyE^(khq)W$XS9Mx7E-G3DA9}x6HX$i!$Wdne$HKhZkc$aO>W94lWKMNmFVhi=VwxW?n zJSzsfCvxqjTI4%$d32+to9ZLyrrP&ECi%8iZRGg@||Cv;4s!$f&&#$Ukro)&!Y{odBr zbJ+(-H8Un9z(rPQu36%<`@0m?C(=0foO!`cv3b<@leOzTH^8ErUqg!HvKlhWvXAaRDkRPD2scZaW3J@$@XwO{|bCYzXAB4JXky)}tBfyg5y;k4rj zs%b0O!qQN#{K&p-fY2tNQnq|~TSUdVW5C&`E6@RJ zJ34>56do&bai)lrx`MTRcPvtR)`XWF^237c*Y*%!~;MoS&!8dzmPf8z`B1%6?rBG%#nR}ocSXa1vS zHHR~6v!|NH=hRw^ z+vqDu#|kE*O_Sv>>a?ZJ90sTs8cC7-USF=hm{2;q0V3DnFD(s+w6bn$e}?Jwy5+J+ z{zNzzdI5I%CLX`ufT)EIn``cLuFG)oGx9tB-X}pyeIdE{oOsE7@aO8?Iy&#D>L|P& zS;*&bZhv~`Xrn`H>c$Tn<`E9qv<&uYm2akflJJL_jja0@C;(zC>niU0HHx1Iendaw z4rU><6vmokF_tFm_7m(H5pbb1kMxV3lr(|XaH{xoNM+e?w~Hwm*=_=Amm^5x_g4C0 z!N#+Ytilo8p|!|b72b!}gQ3>2So5Kp01G$ZU^L(Q0S_VQ!B&?1!?j!N%3*=xX!}n? zcvJF6t=FdDIR)tbYlDHGuyTtY!3aK$Q&T#mqbtjq$MYu~+4C+KUa*y1-gV+WJ{%L? zopbZxk>X%(_K?ZU%~?>hB*QO9RujnFEAZnu*W(P+4p#)z%QLY& zgKUt&jn~0SG;S-a?h2RpDl(-59ZfK}V%M@w)5NXw(BvR9QEqRJs>WjnKvxAF-Niic zp-P>U_*U7M&&hGs1i(3h+b;WDMFYnor<^}24g}K}@{J#i4T&on6rfWPCj$d0V6E9P zBD8WO9QFh;Msw<86MjrV0n0a_O=Xa&_Rx2a@3UqupAWAGlKJZAQ==mtFw3Za@S8TY4^n$38y*1q1$=SWw&N2`Hv6k zIvS45xkaSIM0XhCOM4WfrY!E7NQ0#(>Fw^w+imY7xfN`$>bX7)*&Z}1WLm}Pdb8mp z41kE~AeF(ru|wk4LX(717Ti`erB&I7A^;$Jjxw-$EDs3XQgqQqEtAm7DrU#nF-8h&R|EZX4k6j z&PaH)jv*Um{8L;QhgJt6c=be-nqu>oc$WwLgNwpm8o9=|t{6WN2VHsx+7y}^#;@NR z%Y6@^R&wfF3x_AlWOmGmvKR%DU+$pZ*Vd}x4d{R^Q@#pc!8#2>j!DZ^N8I-d0t z!T#*RyRC3Y)o`aS>(m=0gSoIso7-4>XFy3QO@|M{H2L>3Rchjo{u?JzUZavR$<~c` zKjDtYnjqqRB^y&Rd_k$~Gh=ZH$@WS0kH%UT!hHjCu*t4NneciClp zv}?0V$*e9+<1q1ewChS z>MYxQRy=4?vv!v|US{3^``gcY9a|Bi2>cyfOuv*_^B#PLxeN-wawBi^8yc9rR1ofI zqSr1jVU=~xd?_t4mBOd-iD~u)!db4M44|MA>LxbyIC7*0z1diHyA$*EqunLY(wSo> z6Q7vp>grXQNfpg9B~J6=zYu)Ry&qY5-p^7+?7PkOvU;43W64Vum8xvSCzzhjxvLA$ zM%?oVHeCe0T#xxS0kC9>azfTp_~kbT*2oBDNFJ;|N)??3t{4aO%hzD>w-=Xs}~Y??$Kg3_zrh8tA>MxmL?8l7=NA1qm3NF1bH`KloN z`j#8mXwT-wXL7f#-b1AWcKoM!%Pq?WGT+ge?hYR&9@RP!HT?`NgU3%gk0FlNy0Ya0By}zMy zX8l=l%t5!rOj)3ig=e{LWXDVMusgLPY>Cm)jea_(Q*#~@-C1Huww_aQsx3|yGJMR; zJ|(8DLpz8%hz#;yW9@NVES+{yXYq6xoSmAa`OuT(f=S`UxrVf)gD=~4mfP`zuyF2ydoP9L`wWva z*0R{^B@Y8}d({ii0&U|Gxf5lnzDZIlTn=Evh{Z#g;6jTPS6$AP(-N#)En;gYDyKF2 z4uu|v%KiMj_VmH9{O|*)R1bj*u8s1ouffVB`11Jcsg)tV2;sp-I-9Oi5+wPbSTIDq6h2nZ?YIOikkwB_*UQaOy-j`%*q zj$c0l6tpcfjP!&~P)W%6JaM$`!nJKW*`^iUhymZEJLZ;Em;FOz&$Mt>_lApks>+pc z)nS1*R;VMF)fl8{oZSSoJcm7`_MZgapEejb-E2+EcLvrjkFQ%yBU4k;pS8Y3KKycT z7X$>xmwDzekeOhv%XMLm8m(LcZv85#0Zsb*yW(2IRE3vbXVX-Hg278CTA1V4&C*H5 zSHEVCDA*i~`KI+P_h2rse9#o`=sUSHp7fSqXztv7^vr4(4q3hl?ow2#Yxz9Kx0W(Z zp-mg4z)6!w9l{0h#D79jXvx9`V!ro#2A)lLH!ZUn}!3W$?Gn1Q!w2nr}0tPbk*g zmmh|vL`2c9fc7>-wNH8&;~b)vIff~WE@NDhk}Z<6##%FqDh}Lycas*1Q|%&`srvsO zi~4tCYB&jin++{N{&|Tc@z+;mLtaUmE_am*w6QrfP>`qlpY)d2mB|Oe{s(`h%7@?RZxq4VAE^KOmv-F{GV{1@6od>lb~wGQOERGTVgf z{hBDJN{8O3OIqffwP>5~&v z%_{^sr^fnDgIGBvE$ZIU&5)#pSyfdvFYoUf!3)A>0en7a)M*$>kG-3Xrn5{z>JqP~ z()$E}4Vum)LO#!i%*x<4f`|-DfN{$$8!k*s5egNPYOBAIFl?F-FWfU(x%eC;WYEb4 z9%(kpo8469uGO8nftumfm*=Uv{F+iZNP!|@@P%53BPm?DHQ!)C$7hM;z#wbM+bUHXJ00i#~l zAr?e-|MNLbVljq;WbkabRR6|Tkm6kmmP8{Fcj81H9#Eap}(4Q*E8dXEzlcoaA7t?A{ z4QDI;WFe0YS7IYq4v;jgg%8E5_M1!)E$X%kXA~d2dJ)poYQ^+nwd&{7+CT#od=9%o z)_2tc{F$ar$ziB-{?D6=nqkrb)|VdES?b>4rR!pQ1A3d(%X_H3nw2yNqlt$&#^d$K zccpC4&#TAE@rToQ6JzvY}m+D(E%ZH`Fl%{}J&uHl@d!7mPNKz~a;^GADlB1|fA& z=4J(tF9rdmOfP3o{-{;;4aPH_>b==?Dw@;Acdk;4Ic=7Z;5xP}A)sgOYQ6oMN13dd zP$xega=&SH(>zjPLq)!#TM(lmvmq)7*-b1eX)*0g8l7ydUeL9ei|-M{*hp+p{|6(U z6@yS#WbX<9oc}QeT9zZ?rd!7F-`~i?;AtRPd zuZDcRvcpvmO+Khda@jI@i_!fPnfkKB}UhEGsSH14hg0+4};)sXA z*6FDCwtqL6lf(T&fQ6gIv)P7Me}OVYwn(byfvzj;NXl8*+g;d_2vqCYU5~mehvRnT zmU(aKhTeKZvlgm9HP^shQUhtVq&MBl`!_14{=8>F26jQ{Y+_vr?64>@Y&a7Byy7C) zBQI?sw__uoch}B20wyN_M`~uEkK`UVZ4j1gJH|OSJD_GDMVf4%IEG?0*PC(mGYcOa}+FQo9_c3-ff?iZErLuU%y@8?w@j|5Yuc@R(C` zT4Gt=3n^vg0NuK$U{L#HW$$)7>A=%Q>dfqz2fo1xn z@|LlS3{)y!GA56|rJ@nJhC-b~Ry?`}<(B+H-QBqxLQi%=Z#cO0cr)3>OMIa9z1PdT z$nZWZBYcrL2JW_Ua0L?8>{Qw9Ug%S6W-D=t>iNYD-VL$uG=(j&o)Dnhexj!7(RS6qAfI3cF%$QAgI5^O^dALV&czwQJ6eV8r*%QQZ!Dd3; zK)!RuMvki`MY@d5#cC2cc(?YLBvMh8FgA#lpN#1dbI=pg$Vih}IMOatfh@Y7d6%jj z?2-s-t)o41f*XB_m zQf_}!Dh36&G4>L*@#agAD2s<%*}U%}pZC@`M1*EsRq;)vD+aUd}+*R9O*(6{Z=kq863FiCcE`=QYM0!&hwD{XT2Sv zga1@l^ci*Y#^(xUQUvlhte><8IkPf#TRfQJ>1ezXmnT|NQde9$KW$~%ArbyccG!zu z!-3BIu7>s0<;oKf2i+#}vMMDI?U$+NfUH9UiwJ5EAREMc)SMwS)BghL@y<03;JTGI zVk-S*eIkc3Q{}RWG4zRRh6V|*Yb(mj>dVqRQq7ce6gnNh!^haQHLhu!YpbgFjkY${ z&DUAS`va~pIJFbZKhr}i{QO*BxXPRy_j-7^1HK+wZFF=R=IQaE3%&gLclSy;Y-g(Z zdG87LfL?1?3{*^e;D(si4*-jYw*E>7_Akv7JPJ3a5`V=NR;ATryu-O%2~WtA^#~f>IJ_*N~%!S#d)y6^+I=#lOjcqkCHQ1eQt`WaX3~kWs(wt z0$p(dq!bZl5u67bT>HH!Dj`zjng#o|j8 ziB8BVej{DG-uKqi$BB<3+04!EaPxi40vu-aBrYfNYK``_(OnSkTm&g5x~^A5aH+ca z&uEQ)EYAvGcMr3rfjUrpJ{ZTfQGAI$yL-qsTXyA4Ddr?C2ql(WR{2lWBy}=CwI!Ne&H} z7pA%F9?^IZQlUOzbj0Ej?;=|k14qo^(UoP%zJk`F1*$xPzJea)NqI?=sITfN> z73QpWVhMpBEwl{N+*#f=v4oV`CV5(O?TiuY_UumkqRxUBk$X~06;aOAp=BW>M7Z=Lj@g&)P{Pic=6<1Dy!pQ$W~hGHRRT3Kf!E{{Uy;Wu<@B z5&H+w`Dv1nPRJ?C4lt|R8o)*s)4;xRJ)FDJX=TYEpXzo_;Z(gfInpdQ0BgJxy^?K;?Hu*;cj#! zu+w&V8%~YKu2j6s7BQg5tF8n#cbVYY4H3isv=E;kn;rai@#zLd!i72E_>A(aNYhU1 z4y;YK4b%(A#f^UvGQ5d&S}PMAw3fjP_nzU9{A(o0h>aXZb0zIPSsDKj!U$7bc~!Ni zJ0qY)P|xZN>;-L8U(*B&QD+5FRGaP`3$=e`)N$WBfy7q*x&Fsuv}2ijMZQV;^nv;< zlS1x)FDL&$kZk-PrzKzb$7a2hwa?;&ujJ{ z{k9`uN^#HLPKjzq_DnnzA88`e?=Ps6czzDm49G$sOm_D57y1(T008^(Db2JHt`GzL zVXaaN*aMkit=rw}$L!#*sN_?Z!q01u!T!LB#ZDMP)p{3hgauG}`u^n#j%|^EH)m#f zU|}Zg2GC#FjDkq|+v@9iy)7s$-pPm^s`@sk8yE=N6CN8_*XH$A5FVJ~%`t)+kpU`1 zH97(zq|C*Kl}pdVKcjv0vGequ5ACH96?-1eWQie2o}?+@oP51?jLE9(!n|H+OU8U~M#A-Njd7qb1Kl0kZKRfDP?tIyz*51m1*$9$W zLQ=y_+EtYFQ7l?{oP-*Neu@17ZI!_{#u%bn7pkgkEWF8}9tl0187J|F$%}?v5KKBP z@!M>H3?ub|fNp()>iGW~1GoR(YlM#aajBU4NE9-bO(XZ~7IRu%)$~-U?v>_5jeN%s zAIzV(&T&GXy1@*s&oGHNDe2-fdLrkx?!Tu?mx$(1 za}Q72)#<6V_q8`~QESVCUh4~C^yOC4ZC5d7%=@pV=WTTRkB%1z2JK)9fwklbkF5MX zH(1x6o?qH2`Z&kB)TvRlbXmk?^3kt7`xbPw7l(MKoK)2@**+VziiMVeYk&mvxvy}c zy7*3;+jQGp0_h4#>|biB6tuj5S9L&)em!>t3j80)cWlgUqJK0ib?C^*xP;1KRpNJ zrRaeB@0^-s@LZUviic0vXqN4GrBoYz{G#$wwU89QalBJ&=VrcQD?wi-=Aio=ABZ_a zy^Oblz8-ZAkcsS9^RVV{B5jG`R>ZocNCxt4@UoPjNg&JRkfiWX^W5;kZ}7o4NS0%t4(Q zsZkC7w+WL>{R@Q)4@gTtIUj&fi1-nka?>o6cAew-Po^5Mscv;O_8ZSk0>Q>Wp3YCA zVx0Evgu9`AS|O4^4_cEuNRbZu8F>MKCem-q&eP$m6kzzco8}D;yG$G7$T5BN!mzL? z(Sei!EOrcMWL^xLrd=sVb6qnpGk>wrv;qz3(7=iVg@A7>`jC?XRnv=STFT~XZY2ub zD$S$*91IPVe_j*CQDafjs+#jV{miFo>Ml=)m?jWN(hDquwjw? zo7*WVVgGCfpbPzwa}nZfja;%Habi17FTq=9UgcPKwaT`*7~m(76gB zh>f^4t1UJCJUAwx<~XXdX^MDCD*s6br{Rj71!BvE7Wtibm}@rw0`;DOs{!dA{jULm zc{Xhiu2SM;dIKs1uXj08` zN*XtUXYFYpaFe;={-hkDF-8u`bh@~(X&Qs*D!Q}8%L#9w!01pI4^ggZv-TXccxQ`X z(!$|BON8MY_>x|ko3dxJ+P8ua9e7e4luM~-{TWx+)1OJuwT=S+g#kkOjg_b~JsZ;l z!+@R>>zu{&W7o4YyaCp=8+}#zh1-h3rFnMO?+A0-SGCOINvUD>KWAg(i{u?{zvDUQ zCgnh!pjQ#UvT4~)!!vg?@;^Tte>26xTIRrmSzv#@>nW~z`6^W&rW6-N%7Q<@C|NR# zGmpc?;H&2IE6FPGbZq-i3bh}elHzKDXPO;ik*(r*^_3f9?D#v!Q`=0AmpMKbMkKVd zCh@>M;xvB+v0PNPidQL1{r#q-!n=fIdQyBo%#M+zh!TI={d_gWGNAf^G=(&cJ}V?W z8-YTACQ_|j2=EmtM4NIG981kZ^>p=puUa~5Z7aG*Vsum$RyoAE11~{qUQ~&fT`|mT zbi|rCq)xU?0(y4})~=~SQs8jO-}};^CE&Jv_uugc`A3DO7p-ToJK2B#o?sirC%oS& zSW?u>n+>{DT#`{xT(J4p4Jb{lH6836*mb3k#I;spum02Nd>+jJaY4^8Jj2(>48K)y>--2C&k`Y_2w-N2O75WL^>yAdF_6fDItb-qk#`6P0uK24qMQB9)xxEv?c;qWFf z-_#!xrUE?^JvdR}!IhBe-DSr7F&m3rD-Q~YBQ`s9oYhvoPJ3EwcDwd}GZ{kH&=MnD znw0NqBz{~<)ZP``Ps`s{oX`8aku+*!Gxt4?#ny0te->1ta2IWXbc9S9JjQ#bk|nU? zUfW#vxRsxJV$2xEJRFn#5K%CL1MsHt2~{v<@t8$xj*CGPa@3k;QYZ-VfhdLoNi4qP6ct$=hkYc);QHuB1f# z7BvX;=3^#u9s$hix;+1eLvtp;inek&2 zCX)=WiQfIuZMZ>%`4G>AlwpR@l!7du>VVqDoX@IUzsEY+uBKP0vxdqDIk_8O>wPLo zaoqs!$XI0s?eI9`w_yVK-p29d2%SBlok|%8McS--XULE9ft#j^;{WszxO27@gQ8YZ zVqiRjMx(4EI^uYSJw;x^z>mtErm^NCL~v7@q^tkC%!HuR7=@cUsxz~;SBuv$RQDe1 z8qLbOEj_)N(J*YlsPo!0WXv`HcYiGaV3;k%7NoapyaT`%0Kw4S6w0m1tvWWPk?ULt z!yN@?BN?|O`biV5@4!D>d{Pxd!-`*}H98EK`W^;O-S$af-|W+p8PsIg$nz?cp57f( zC|bEU?k>HadFY&Wub@Uy0LZ=7&{hUrc~&!CyQhZyBl@M`5Zlz}6WFlz>FbJo+3cvZ zTyax|{@}Ltmr5~Z)1^Qq!*I=UnP??fLfE6IdCQ1VM`vm}6UnwnTL6=sh@CGk*jh6o z@3Y$yi&E7euZ(%Iew(zww~knw%8&yc4v~YL(+)!aF|RH5!u4FP%AMZI$t{@kwGxK~kV- zxew~72e8tm(ZfTRcOciSH;%`=_x`*?0LHaj~GF+11&Al4Z)bj*;wPOcf6KU+5LVn6Y z*+y%t?rhQIuS={2le(p8MPEv67XECd4hIxlX*zp=MR?13)X!^ZuXqUXmgD4-6HOf& zg%ZY^0X3j4#`)X;n1C|{@^laUs1g}A8O@R`Pu~`#7gpz!R%HiPxOMO3^33p>+yXv} z3#KPCC;h^JS;uQVWXyU@{rujSM(TI}3|HR!H|p|-4}%6gDR?tLLHqj??Gvs$TZ!}Y zb8Omw`}E(Ts*&OX@|}`K(adO<)K_4Ba~fg2Cv#@kRE1ya*Y*RD`TH3}I8s`x-PuFuvUtz+H z{TXVB8OMC2OPE8axVBCJCZvO|UDI=g;cz9F)5g%FkSN}5gVzT4Kfl&qq)O^Sbi%LU z>2isjBXX_%3$*i}2@dOu)UD6VS2bvsdy@WKM}rWToFNp)1}N*@W~|6L4Qlp%0$Vox zqxL`5?>zkPJ_p`3>QI)h>^1$?a*9W`DFX#qvsS8|Tu__RQ%m#)k{ItW={Ir*I1g4s zxUSzT(7y91R`?qoE&YH#uoc~Xca2voAwMPP&PDUcA0Qvz$w34J|L%OJHQ}p*o&q&% z_*Gj74*FoBc=rNea1%jO-n7N0mOS=h>7(aM9)Qh?9@!IXFnpSj9+5NfaIEdLD=xbTRb*uH|*j`%G0hWZ9%w z?17R_``5qJkc=x3Cd3Vx_i}7x#uH(mB2!wMJ4Zc_lD%3W#=CEWcgK5q<5;Hu7dX}< ztaCh1#xS!+Q&#Mnev6!bNwYZ+*CKWWAN}L)Y$Hvih+a!4yp-B5MM>FhB$@n)@-m%u zPupLhSQ|k+q%5#P3yE@0qf(uG!9#beqhM{P`o5w(S0`OppdVJQbJ{VcJhwV-RA6_P zqt>vWt)?a+sL#y7lr!9>uufGKk6P0GbmK|YZzH_`B@rFYUT(cAt?f?E?H}cjGIx+9K%imEE$;AY;|Nb;1VQlP@8OacCsWST+rML>uB+J~cT#DRPjB{2@z-Q5|e1&5;SV>#MHxem!7Ii8h- zaSq83Kj=to@Jo2?5;&er8Qq0o_o$hFP$%-R?GNj#4IU)}y`Qrm7g%0wwk0Dd$Rpqy zToXpFldNxZ+K~|~QQ5*%#PQLg=i)Q){f>3c_Cx3ugcJ7^I)IhQ!xuT;EBYZ6m zH_md#-5A#)ln4=+=8Eb2-DaP~En>fGLba1syyAq(qsMGg8(Lt552LRw1F8<#DcwYENO_{?_iVJh=8m4bB1LWc8?h-m!Wvnx3$S?z_=?W99Kvx zKFNv8=yy`QpO;%e zPMYB;OwbmN(9%O+2Pzf z5`ia1)_2j{`zQI-Ta7U8;F{uVP{iP!g| z{GF1|*BFEvtv5xE6`%D)DXesib}T8(@|c(;r)~>Qq8gjX;p6AvUE_jJGM1gKcQOeN zCWS@ppnM#-D~CfA<;&JgKeXIM~?yl2O zK7XasSwcC#7oW_PeqEy9bOc(>*tJ3w{$AB=s^(B^z$O$V9+S}fL#18{p-={8h1=r! zG0`U36Z74yGM!iMMn6VH=ww!Q*{7{OO}rrVJ;%6WL@J|dP%`E&KYPTpVx6HcGns_a zjX=mx(k1y)j$wgzTZ!+XI?3O*MHH{c(5tdY6x*l$~(6l9hMx&;$p)BB2TKn=CeY1b&YI z&(7}?5*dWg002*onLG>?CTh0eLrdUVmA}b+)9g15@s_$A><=mTi0wtWl3g0c-|d=| zvr%U~jHbQ!#_}V7=H43*No!3WcbjVPK=%A19z_Y+*E)}O+IuFuXy`8JxXA9#)}vQ8 z5mopmvbvrtxw#y9Y-zei(;gpz;K77NFN1Hw1X=B|%u#$*^(dz_@9u=Nc{_e+*1?n3 zbdiMj(9(H;s+?eC0n{l%IV7gvXG;Y3O{T{9PLcn?-FrthwXS=kQB+h)06{=XT!JE9 zKsqEU0wN-!0#YOjB25HEsv%KOL0SX^6og1`Lg*brN4nBGB=nw8LPCmnuC>2?_WJgB z)*a{Ed-fjV-akUZgh3`u-giFFuRZ!!mBH7>j3O^Y(|LLtRKflUZ3LGMEuE&}yhQr} z_wBil9|wn)m7SCqJ74-@TnR@+OP?&trv_nU(lYYP4RQ^zP^LORzO?ATt| zxC2wC4p_xl7Yu_#8mf+eB5K-H5pwTZsrQj_*D`7>jyR+=`&6LO9-$qa{wCDi-L-7o z?XwHu^|mzzBcw^V^)pRvbt;1x{T~Sm%R`0OPmSW5oGoWf^@S@BRSLE$SWNTb?3Ib; z@8oLk&oFFW_O7LEF9zW68q8p=r`>=&2|iYikj&j`$e?PNHwqim08A{;AcP!7WW$bM_{bvIz(bDS;!ICy3N@SDg^onH=lbP$kIMUpl*v3DfInsFU|n>x3@=H&$Lh+7XA6r)dF%GwPrGw- zG5UlfocF?}<&uw8YwID`LW|AXu)*`eZ8 zi%1;D?fzgKC&Axe++?MAn2?~~nk|W;G9+eK=&->2p{jStPr__6F7uX|_J#xs?Y)~8?G?5BVm~9M&Y{~Up`qdirwvK% zaC^>egKC2pRo;UaaVK_`UhtL#eoeFmw?XNUl{3pcJrPE={!o>^?7;ifMrxbR1+ zPO!c!2@mq#q*$;Q3$`?tCyh zv)Sx?4s`f6Ft-0cbojq3s;=R~|Ab2XI3OO`BxERk=<1>36R&_oJZwAyHA_`q7P{VN z4e*S=OR6+63b~I1>)|Eu;IW+(ZpMA0L@6~Lj1gn zn#WcGvPddJV_hIUSL=!18PoK&>?e@F`YxKodoB57pX#farZGm!{d+wX=k<<9{`?SO z8doSBGHrSsARdK5nzsjXif-S&*)6hD;2tJ?s_G>b^ARS1WLrhCdCIFB+AMj?>DErE zvtr}q)XZ6MdO*ypyC@q88_a}I-iTUMnb60W%3W04cO=4Ns>8 z-T=m;4&g%iEt{@@(}+#1+in_`56;jkk@3@)(*zvv zRjP#qoc{OiCEL{9Cq|V?NkEug0Meq_TImieEbpOwM`^zgz!cNb{cZu%LRy-n(91N_ zSee~WUC5K7$DMk{LjAO#g-b7AXzh}7=&VF^Xeq#Wx{!g0%CrONpx<88zOVj_ z2|6TNN)sd+&pv)l3T*;pL1p6(-$rvn$Im>!m86>C>o~HbT_x(reA!ZSRz^6{@A%F% z6A)Ol6jNcaT0EzGgo@N#Z)cz=Kew~JQRmV*4#3?MyUm0D23ggg--A=X&E^OZNUUX> zqUf}ErK}p)SzlNp70`&ROH*#3p>Gb!LTJ zHORB&RLEC%h>)=qw*8K!;-nXIYkPANfo?^O=8}U+khSgEk3(zaQ;r(1uaz2tMou(> zIX}-BiW=zb-ga-rrJ`7Ua7}ekVrwFhlrfc&oVHSU?CWO@J}oYNQ+su@I~@h5x1^~r zD4|4JGlq?R-+yMBw_$F|LX}-otA&B5MY$mn8U!GExQ?aIy;wgD1J-rQ{h zGF@%QbJWG7>p|uO*gBeKW=Jx>oq!*Hl^@~*Z8V(erWJpLv4_q@a6HZ@?M& zr-FI$wtP~4x`62sZ~^~B26a6%=e^ZI>?t*R#1z;=%~j!s+9YZBn1iR#t?TwR3V2`f z5~5k+Ia{5NFPF{Jwjz@C>BTxf5Z&9t@|{plqIWRjB|#c?oMH(~17aH*3sEy=+0-o8 z)fFz(K5RMVw5XT2b9Pihr*WJrHGl;8MG-4~XZcG`V<&zuq4_gALLu zEZS~XUCG$r;mpLwob=_Eq~D{r6~LFQk(z8#4`zd!+H67r5jtw9+7G2YZ}C_j*4laE zeOxsP5HS1=LKvs;6Xlz5hY^}&L!bpY>UK=+1LBK=7ic&Pvwv}TsT+Zz zU7;T%dAB5bUmu{Wd)vA4g%@8TY#5k?D#dnCpIuWx-qb1{^xxryUZw;;b>jahho#(y%9_;b0!Z~0Ln9w91 z(oiHp8`Z25=jR`mz7V2Woh>+!DtEBvdS;YHq|GuZXa*gGl)KlwcUuHzUUIu^FSt7R zPH)^9Qmvv#SO8e3omFd=?b8Cq*{8$D`+DuMZ9PS&ZE5M;Wfx|Ww@CuNn0=hXSkRbr z**qEQ*lGl|xA%Vm+*FK}M!zPpW!Y7hMLvFK${qnoX(B&d7Ut5|vC$-92@G^241783 zs9oIv_AK}KSV;og`2~HoL+d83&jwz@r7Y-280lJ~sNxz4g4VtKD{L>Rw!tjZ9qwNt z)+}o-S;Wk>RKk518#p(@b{JO@qFAgWTx2ZX;5#nfcUY>n;4A4zNIzeT`jk{?)58lq znWoF#=PKF2hwLQw(@~N|_0LVu^azSj5{#z*OAZXylaEY~(^ z?IC5}MWpr~Hi!J944H_I9wFuavIXIfbF5^MJ%C5C1UNq!F;=B?8I4)@UL5XQy`F?#JL)z zGWLsufT*Q_g zKSXWrZ@H?^x$_Y+r2P zxz~86I5LUTpw(|rxdGV&XwS9GiBZ2mmQ|#)lQA96CRZXu4aA?bKWX0BK7*!B*eUOf zMx5Xa?cSPro_fAj>Zv(RHR3%LWAN$WvwJn;vTagw3LyzG%Bx8H9Y90^3@F!5BCG~VTzWb0k6N8eWH0%7}KM%z_u_V zMFWOHYrn9R{Zi$Hw#)3%F>pC#qF{5k==`0qnOU*EET3|N%HbNxJV&!9@1d#o9HVY-wzq7(*CzkUMuGBq zmH}(A}4yp1(lU1J3Z>byemoml^qyRHhz^2iTpz!1gZ;FUe)Q zp4~|BcHlm+`0m<3`6Y{Mf^AP-79gXGGY+9hp4C%z$pp=6p--DL5$9XdqNM5YFxU5%1OE;VLY^~|4sUG#%hK4L#GBd|WCoO_=6YVJ>?oYWE7yR>Muq*x+@OWHV z5dGtQA!dUC04y(H`*Nr!$N`kYvyc#`bipt})Ayn4;k>AE#YqDx^=IPf4`Lyg!!B%o zA!ibh=E1G@ipg&SKVn7rqroQ>yY%FxVuUXWr~53`8tX99Y9u@f({HN2o#g)fv}D0> z=?8o4*0Cv&h;ghHOC^?<5yAXzIXu+{p*|pS&n+c!vBtJ(-Z!!(_vpZV^Ra7GE4Anc6|NI3#{}@kCS9@34`MKOfT#~-xUVo&u zya!w_(Qq^{BdG@E0z_pPG0QVxpEIq_SXx4lPV!o47P!0jbTw(sgzR&p9%Ccw#HBNQ z`deS`6zI*r?9A<9y;7)V3JX2Hg&AY1-)9Y|F~VbmY9d--;OZyxNdwk%1S(+-us3GQS2%l^KeQokhWn1V||FJ1cQS^&#S*(Rl4rHIY=()Cegv$rcJITIakat{evNQ7FReFf6eZLes&vvM$Zy>voUbp{-%mS;(nGh?gq26IRRPh$R(%%ACqGHnRv;uIU>L_wzb960 z9@Mn|&_aGe_$2E_V-RgK0@qln?G9dR#XPi6zc;=d|KymflhdgdUX$>w;K1WKtd~G7 zNbSZ-p=RUNPvNFJIP*zh11;WnXo0Fl97L0E9vE}A7+-50J3CssCvWQV0?27jK`H!y zSwH*_5-P($+`aWr_AW=zKcN-F-B1{b9={Wwwk&t&rp~sij)F*U${$i^FoeQ1^3&^k z5ht1X8#N*MQ^exWKTr#Pq$pimBl&lEbIT`c3g80kgnRrScCP}2HpS0V2hdQog(__8 z0t)Kby2U@F(mR$}w*V8@%z?4Dx4ue+9jIq1m35G`-f<|9{jP#VaWXf^JuzxoX zI%_bP*SFHM%>KOPbVpIrSM+v73ihXwO5%@nUO-3eO2W-VBojMLG%{jESDVV+g44B1 z*!^@R0l3$S5>)G}A6R2Y03YxP>2DDgtWqf-3jg4jn+^2;^lME4uzU_=+sfEYV18h^ z1{C`=&dl4=R&><;t!(K%6lWvT}$cV;M+SbdMCo4=ZPaJP3fQwtETW?x&Xf1xe# zT>TKZuSl4zmgR4C;#&=~?-vNxMUQ@8@!Ut9;l`Hc<`3-z9DCmxMif<25CAaOOuek?Eb7g5%R^-( zmDq3l{+;duyBDsZ78CY?fSHFuLa*sEgD>**bp|#{Tx}-T<-eu-IJ~=DIvdF;c+W3D zdjAo&w|88J{6bW&{~V=wrt`_F-xG_02VoxGt^&Zi2Ca%;SKs)qFn9^3jIp=*`O?|N z6!)11c_-|)YZ)rvVz~Dlxym%#_k%U;7fu(jt{b6?=!Zy*X_^Md0N{s-y3w{B1O>EK z&s=tgaFOFLLqiRNH}W1iI=qdUjTFdD&Hio=jD?xw?!zTxM#jl8%tG3Ng*FiR}04_rBEzBuzyVzB+W-w6Gx`@l);hG8!9+E($mg$Nc786)o$+W1mj zf|fpchLHm+Nvokq5?8hvJc_nMSX;NcHxwD|1pYm=j&7zThk7_dJ94%T+Wfy@>X+waKsZFn0q{7_(m zfUq}fS`T#36CM|E&wQuk;hXP@yEo&j>GpD)`#&8~N-cJ3p$eW-nsT+NqMg0letVkJ zqvhn6{Y*|r_*i`7+1`-d^KD}p0uP@CVTzmm575@2+u%#m%PZ%xt)!8aiuj^Di^<}> zFr0k)+tgFMrYSgV4S*lc{o5Uy2mb_Da^7i!3W_Qc-{s*w)XQA;j)b2Tv5U?D2GCzP z6hXjg*PI}oAh50t<~xfDlIqaP8wsB0pTs?2%W6w-lRlXLt)hd*9& zhYRYE_l5GiF*@08Sx-VJ8{&A2S?T^(yo-iua!O2oy#Fb{h}{6y39hj5sTv@Cvh0a* zbWHr?I7Vn?x-2fR|7K)~>~W^cR;KL))#Qp$p-hw}m$pjSenja4<_J{Or=Kv~oKC5n ztq|3QN)D}Q1>0GDwrh*}kiE+D(0A^k*Id-Iy~Og@HGKB&a%*&0W`mmgwphtqIJ7z- z@V*hS+pgHC3I?Z}tF}l*W^5!2Y-J2}uVOQ))l|sJQzd?zMCtaTeR}|)r@)CyO z#uQcHGBLy%0>BgKACEdMMgbDgGd5-sjp<@Gq7_>}$s_*gO)_=ewQen%^xbaH4mQ;Z zuF&9Z}fyT_kjF`>iqw9Z(~Z??Y=Qi=H6vKt0##ul0~C=jYQr zMopd@>t5<4Y0$oQHPmN6jwZoVo*v^p`fk9Ee34l;SeG#=O~Ln`53WHE8c^Fj=?$_W$r@ zSqkID-}787D!6*dP(cj@EKw@94KT$(?tAxL3<7G8rVBG&`pnKH&0XzRS}4+i-C=*R zYtQ4K3oCK5H>1}6Mwr0Bbr6+rKUe)G?4NvGIFGhKXaAir88`D83A>tWIC^^o`WHU+ zePvUVE2j_KeQ=;2bSoz-nG2M20W<@WbWy*)nEI|s@5_^bkn`CZ|5z=OQ3k>aC4V|+ zka(1uos<28;8(GL>RZG>dE5A}$Q_n8CM?<}vLx{=;4VWxUV1E6x$}o?_3(5M4tz7q z4zSUa9zz{dU|Yk0ZK;sXO!NwK{V%XSjFUOMfVoT!^skgsBG#N@ML3Z;u~E_g)KEEb=z{oVx@-hXzIzi%nA?zuBD7W#Lt7$U z_8%%)bO&#`9&5DLNlNOeHK~T!Uf##G*oX~ixu8PRCJ(q4j321f^ASd!5M31BS#CkN zki;4-F@ahZzG*9FMz+H5lXb!~B-IaJn+cnH_9H+egrRi4TPz}jSn+9c91f#M%&ON} z6P3-RkDZJ1^F`I~!rW@k5^BM%Owl5eevsVZ!M9s(dLoyXI+}_lzdsxaSy}~#Z+TD_ zJ&AG6sL}lzw8T44{evq|tM4B56_-~u)F8h8*DM)ax6Xy+-jd;F99-`nAP8For~RWC|2J5OG>DEV zuV6qS^aM7#$>fWB55sE^^U9{#R-+Psex*LiAD6MKUl!W>NfyR}f>urdylZ7GhP@4(%<(DdbkbdofF`wM%) zpsAmRG^2|dN8AjNT&PgDuh{f+vY1}icSQ{K&e1S^drjbArVvotxk|T9@g{T^44wg( z04^|A9^KO-qAG`tpDHRIm8iqk!$us)5ir&oKP`SjC|14^Dj6dd`Gp?q9mK}hrX^K= zOma5-+szYvG%7roNcIqZDsq=*s8<7+|1cd>V(vya&=+DjmOJl3SIcuo>6lx<<(h;Z z`p2IB=l%U(^ZHlczpC(>N4U+xz;rEQM&>MxcOvw@()Bl;(!W65ZO4N(s|$~(R&z-$ z-~Kq4eAsK}T1ctg(aP>oSt~6RIhI0AqaP;n#@F4RZ9etE=VG;<`t$|ijv0@6-49j+ z_~&fEwYI6vUtvlNxgm70leOg3td8vofKpcLU!QcD1N#=sl2VcPI-8w zH3E^Vd27_r%2eW!;fil3gkWndSib`vjhoSJYlWQjuf7)^-MFurI|Y+o#F>`stV~*? ztLon5p>DdDUDg+??2dKG~dmoLOSVpFy_6*NAbkYiJL#+Uog8V)TDWuC}8&+ ziyo(2lk%|p!W`8GWgk#^k;ZgH0jx2T0nbHD9$ehnNG>ZYFD?)1={x)GC^sO6IAF@H z2ULYXLhWHA_~%87mK(0#mYMBd`fKV(^bO^ka(MSvXmxWK}vX=Z^*Z(hjHrLFj zR0Fw#=x)+UtW!q5BJ`)?RpTEDhnRf$x-*R2I&Q~=sMd{Hix{7Arvu}dz4@&Hw7Zed zXdL&03#d)gS;)CT=6Wwgfxymrg0WCyH#dfxtlUZGqtfz`V|$yxXb1eK6y{o-N6F{? zHh~+c>Blu5){JbZ!m3rQ$Zc6Um6g~JXP7tf6G#p?>nESKsGogb(qTm4ejGIq6@n0$ zM*7#Ee*}DZ5T@ZrhJC)9IVtYV3wD4QbMdnDwMii?sG;Y&IFJe}Ol?vfuEGU*8dqnw zQf%F^N|Ag5t~Zx}3x$*Lu0f6TdHJ960~ESLgv%4C(8B!w*?kOlKW<%+2siwm(EOvL zsf61|1^Mpc!~J2FY~%?1_>-+NcW2eb!jMC{^tnjzGCXcjyEd&iZsPoC-v zWK|x4F|RG8Fmj;Ede8TpZ_||1Sbl*nU$){x4x{)V{sP?_I`IpHd!o+_ST&9#-yrRW zHRU*=ccbiqlh-TC1hj<`y3D+9=YD~ndqMVjjyqf5H(oadPB4LCaUIKdhvta{{-9=_ zL%1^U=1<>gHZkcpaBFQEs*8)<`{s8XXs*uh(d*!2O{&-!ub%)JV^Ztm-@7(RdV1i~ zPF6l_HL*(!kZXG8J8#Zpwphtfr-Z7Wpn5L=aSb7?c(O2=#kfk*Yol(q<)>iFKJ z0(JBk3$h4$JrDo>D6^k9ILCsEzUYBkvbpbV>9+1c44^@rV`;{{^u4l4G;OQTd|`6) zb&X3#z}UOjSytya&wk5+DOTh^{7LW-4lxP3rFS&XiBgHdPv%NS%j&R=q8mS?@Fb1e8L*^E3bpCjFt2TT(SN6tG-bt=$sSq zw-+gMQ4%~?TSR;h33K^}zZ`*ykPI2K9D;tTkls&w?n!EdI1-VB49E6~Db0nA$)$>eZ zUidX^M`I7d$?S))*65VN%5cT!+^7!zLOSSgUw{n=Jv=dAcY1ClPtw*7*|&$<$%VMa zJtG^3^u?Z2U1Ll$q}sB5##>KGywZ})rnEJC_-Y_y*o=N-o@+ZE0ygvd?XI`@ai(9L z1mfy2Qp#rR@tr<4Yxs#<3#gno?)%%7m49p)=L_i zGQ%GA_}R*8vA67L@2&yNWxzbHiUZ*~139z`;nuRs^wGI$JpupDTPb)C$Y~B|cAOXA z-w}?4#{hl|H<6HgGDuxNX}!*vaRR`OI;iWnidA* zwcV`3@}j=9aIY%t!q!l*%=@WA&b>n1!7)xRrL0(+HiNozFZxZD3tF934-p%(TS1L5 zQ8#gVIXUOAf~3MgYM|qM|A_ngXMp%0HXcO-v3Dx5W=DgMJ(d6+7>EX9KJ?jL|707i za{^jv^xSZotAqj2H9|gD2?lTY^Z$OXAOHbq+j8ksiB79DQU`ZUvNwA}L;q+rff?lZ zfWZH=Fu%1a@nmYFVJ;&!g$3dw=|@ z@RY$TuW~N1f;b1H-U3}ECrBjwFI!1zd5fVZ7ln=~i|854o!cgDYN z+-yIenVo_k3lBQN)~ziwMc?bXv{$hu$BBFO-d?0w!pPu zr7R(>s6uQ*T{xAyr>#|}KXAZ2&*bL2b1zn;K6p89X1_|;kb4tD*n{yXyGoe+DAi1a zn7z)!%FH>=hqoRt+6CnB+zrAJ(i_I3klBX5jbw@`VH~rg8QU7C6~wsv4K+IvO_xJ1 zrXq073WLl(7%L2~(3jAP6baOrN`!2tUf#ZcskT7ldtl6lo;fBKr)K(DK!fL7=IMx| zXK+TP?9+$v+K@RKcGVU9-C3VIYW5>OXPL1?Mh&6w?4L@hGiQ8)s)6562|@$=& zy|80!koc8}hh`5~n%+iFbI|WW?waETBj9(~EbmI?neU=L(^{=Q-EJ`n_mkIWSp8lW zJ`?%3;^=TW<{+F!jM6?MQPKAsYx5YF@0x&nzns$9O4!a1aNN)Oxr2kde0&jS-Z(e( zCw1*8e(T8xbF&+B>85@Q3NUtaf#SZ*fjj8q-StUOLnW1ZI<%r%H4fW z;a4uRTi;%6bve`$*XYBjVInM*l5h@4Kf9W)Z!kt;t__ip#tvKl=#{9DIjsY;;j9GX zz>8=RA6qw=(U9wq4_YWrOHoZkM1c3<=xe^=;SH|%k9SixKGUUb-Ynm|nIRO7Gfg{1J_7;RyWp(d=9LF#FBTP=bh6(qDH+(gM6+$ z{ES?_-bB+uH5L-R>C(My6Kf>c`-cxUg1}C;qvp|KAGT&v{5ZOfN-0tNg10oKbaZuAlb6X$I1r-wW!Y^6TZs>y#@ zoMb$A0mMgu6fM>-F?lE#o&CB%J{W7=sI|!0a&(SW;JBN4oB8aAT==b@ty(&}Ra33e zj0qI~G#xjSNM~z;+)iF!m7g*u-pBQBECH(zVX>F|-{d>~>->tZTR8M0LYc__`6Mn+ zhg|Yqr2qm&?DO7O{{?!zF1ovA90njQjGMDvrNonAvB5t4s%>4-qAB8(DRk#QsqJ0?QtR5T?jdkzZidUm8__n znnPV>x?=dvRR}kxos3$@dv^O0+t9nQ77mdapSz;eJAvp_e}jty`jO4gvLD;M0<}ww zm@jj_rYX`D`wrG6bF^a5&n3LSp5Y%W`{gB%%jgUEF^XMN@QKTPc`xYfj|EHw828vQ z&BIj?sDp z#ix&@A(erY>a{%55t_?Nxr(K;XmXmA90x*qlb9nW_FvQ`ck zg($-b&r#Q|8;@z3?$x`c8n&d$GyXcbP_(UvBT10oz>_ zN8_J(o&P|svu&KlkSR$v$u4{y``1Hew;njJ^C^Zz6<_&y$IKyV1$K;iYUOKaFUP(4 zs*+pZWKc6E>##iEg>0GvLKfFv60m9{!St~{*~VbTo1Spot#O`2|8Q zM!)}lK^ga6Pks)&hY?(vE|SeXxX&M)2!2%gN(Gr8Rsq}|Tsn5qqd^2@LghER(m~!A zTN1}d_HT$SE*T*|n7|dnh;5R�DVJz!NvLMD2nHS=@)e#qqo;p3+pm(P*T#j%5Z zvs$^|kT#nICwRGZGj~sk1dm z2-sMx>&z zKm><7XfDcMI#r&es?^juN=V6EskEn{qG4g~qT&npe4pFeX0Cku1p;ruY~~`*=>4f8 z|G(=ZL{;&*y?mpNc^ND5d0jtlu#Y&t?KRjXzsn#1m1NP|=L~%)HVXwzZ@ulS4iE`S#xwQY}kkdKpI9Pl=3j9?zekop+OgNhZbkv=$}puDO579sf20u} z+-#Z-%Dk%pP+LC5kc%b|s>{Y7y;8NF%3zX;7`Id(`ED8buc#wr=-=HwxBku!{d3;n zKPZZCwGI{qKc0vWcnWG&q%OhUClqCE7*h$auNz1g40$GS9BDMtH;L{Hfd*1OS4&0A z{Q@OP&&!|iNw`BoW{7{_7SMd~`nlH}5wy`(xw%uH`j>5r^f=U~;YT)4)ni@sxa@QYk}jxoe2uX22l*udVTA-(K(XL!@@O0!L7#D=%M&wC`R& z|1F_%)g)CwKlV3W-rLH@>5GgEC>NN`IkOEa8u&Ca+khE;3LHCyubz2fpb^nfq}CC_ z5cpQ>qSuemNt;}XZ$3!qAghGj8&5U^{PTZ|gW6RL>$!KmBV{~i11+T8jt~F3Y=}e` zGs|JZbU^~ctsZ%#V}d;T&{SD&hvwA01kv*KBV1 z-c~~PP$#qK+}@oZdjets(xf0lP}8J79vk|!M%(wZEP&EkcIh4Zp`dBCqH$sE@_NXw z{Aj|g5<-^rs=xGo2*tz7K4Mj3KH%niwVm;w4D+Pxui^JwMuk1t6`S$9@#Z!z@6-(QaAojP*z zUavw2Zmo)|X*IALrN2uwUhgJ8@F{K}(Qo~jnauTZRhfJXxmb2+E_&smT{WX`ZijE1 z%48YqCr3`fBKI@jez~)&K~1+s`FrO*@@>o1d57LoScKoklbi93yoK9nrrtk_)-UYy`*c+=l9gL9#_ZHjTp61z~nzk8xcTv(sSAw}=26phn&p{|M%$~-C`qFhWe zZjPmu)SkLu#$7>p*|ONRnA6m%4Hu@TnRh_g>O=tGo0smEK~d7DijuZ3j&w8}^bpTu zE-k5ba@86kcYLgxG+==U%}k2brB_zV`;VmA4W`)!3OBl*cfE`xaE;1S0n4TVBtVJE)XZelImC{O8YZa^jx1y9TRpI?l(2RvnA*NWmy2A|p% zm%Mr!W${J-6!>=cD=GW4yz>%|-hcdf$93s`&YrhdERdB5zY$9&2K0)Dar5bW*}Y;N zs#B+g(jNG{)OUI@9W;a)S2c_zXYc7dXEI{&z((K3>TZMprhL4gQl|uYEK+h!V^=Ob z|G-i-1>3W=mh=BEF%VBreUE^5x|?UdW318ZAapitJM7L@j~K~Cu3;)kXs>vSf+EV{ zr1vpjle8FlPzdEwr#=27ihC+UZwVde46gZh%#Cue09r~yN8lW>@0l9gvKa#rF%PJa z$Aw)|`l-+x;gJI;+X}GU7O+|`!d011+h%<>)NtL|EvFz-3mxzQ)l ziS6QZ!wz`2prnhW)VIW`XzhtdGo9s+#8##8to`cPAI7Z6UJNJV9FOnXFHo5qKWlew za|oEGVE2IhUEFWDTwnOvnHHWmS3!Ul1GEIh0-xHU3ohG3`37Rr z!?WGauQ2yAXMo`{(zLUa*Lh}J+^voZCE142pDEZ$;2&6|&J1>~Q_WDm(V{Qye;v630@cqP zw>*!{f`gs(!m(=9s2#q5go$ViE)^(WMhcIrf2TgGl-QHpED2a-`~X|hA+WYi#kQ?}9lhfS!6 zY2#k(Q*PXM5ckA7ro?YVrW%MHHa!#tE)5~e4+q|lX(v8VAZPd)<9?vB3+n4tX>rG_9A$()t!P?N4_7^>3-YLtn(zKf6j8DdJ} zHpda@k)!J!N?qYOhO7jzBQ#?n+ctAxLA;It7 z-~BkJDqr?Vc?i`hccnLhAj*4hA#?qdi+vTX1a@J4Lr(UbVSYhbW#N!L zw`0hkqH08Pr~myp>k=CU_3;a$eXpZ;9}KP{B`BKhjspM6-C8vKiGHogJX$_L*=-q_*i6XP6Vnd|QT zw{74#lZf6S#kFB@Xy*Eb|8=0{cUt9t=O1cM{5bYcF@K2uzpVCu&mRBJ$%+Hsf5A5V z6S4B2Hw!r3od3l{Wc|G#xIOedrdOZiUAWpPkcxNv1@Ph0-yUW*#otC8Rd}2=!jh~_ zb|B5&qj$X}=%h>O;xQvG8r|;#AD&CiYM*a`T&CZcjM0CN>gC&tdtYOeO~Kvaxpp*6 zq~KDe=5rsAx820v{sR+&+X-i+{6KTmK{3`RTyi%jl0YIOd)mxTeCu@};vqkFr(Ean z5R+$HSd*jLK{T7p^2-y6Q6}bEmN938h4(Wl!3^YP|P| zMYN5z6nU$geW>5S%{`pAP(fc$z}{Z%bl{J`A)Nc_HjSJ`v0}tz%Qw586t;BcCVL_e`zW`^bdYs6k+wWMFucN_t@p){_`4Trq>h%p0O^heJ9 zqng;8xjV3|EiGM9@$Z>$TVTHV7T#53@aWx2hKC9KBC{PMI6UjtkSNj2Uwllt`y$F= zB@m`i`C5O|nBjG$$nVhy4fDQ18j}CS@qt-0A!QGWn@XqDOri3Hn8C~={Zu46^LIRzWlE$ruo#3buc7xufTPj<&kAk{Y*u}p4M z;~)jiw&G5W=L{^`povNU#=3V4vcJ?6$!K{>!7gurJ0$0*c38N9a<_aROuoCA>K&%! z_j$;U z;@T+#m~42V^hr(Mpv`NjsadU8(%U_9B{2btz@z;fFJ`CJ0SZGHW6DRTesZ8L@|eCzMjFpYMA~4n-8eC#OjEE@{%<+U zdUQ;u$eCI=JYffF*IzQJV_t(c?#oN7srrOJ^Pa65@<7pL1 z5p};$#Y(h^<%hW&Fg&zOi0x(=pN4`-$Wlz?{!<9Y&5wKAV_a-h{V}(D z-W4AYHfcA4xL(S+%s@t6@O$)gaBjOHH{0nUYSlEg8 zF$VWzV*uHI{DN;-MB>Xl z84lZS-!8Ee=l9T2sF}041( zU`%9)npB7T<(UBK{u*wQcPsL+`R6PS4`Pz+~`U;BWfHFA#3ygTrIqYCX=A zOLd*I;cn?tGeFJUPhcJbmr2hhQVq2rdn%PtK^0q3)V_x8D*U4G z+1^vT?xa!rkS2YYt^w0%Auia(-X&9sm!wV^4echOn1vTpA6 zED?3qLC2fM-;wjB{|dck?P2m0tCmB`hdWNK2eD*@LH;lH-aH=ae*YUMmG;S&NTxzW zh%9B9X_I7`kR{tBA%rCRm?`@{Aqj;@b`#mR$-ZYdc4i1!XDnlwrTbIYIp@00Iaj~? zcg}TP-{0f@J?=kj^T318eCGXrzMrq>LQm*p>?n?HQUC`QQMEIMj%$r`w*%+tPwUf% z>s`b?DeuLMO23{j=9Y{QQ;hOUn#jYMZWk7t7L|N0H50%b;IxiwQNdzoDmL?1P(O>g z`ql7|XHW}~CgQ@@`?16?!=1cO`Jokps?@wU1|#{4H}dkTQzAsUk^u_e$xAj1vLboA zD(cOVcyEL6s_(0XzaEPVHhy1lvkSC6P(zUxwKD$~*qCc@z z@Xumcl z{URv^4zsz}!_4^&fgAM3ZwjX_hCzPRRUcwjQ6-m=TZP!LOB%bY4GR@e1S7+?d ziKotCr09qo;PI+ZK;reKyI(ffh6R4E-6`?fiA^Xl6PL_=7!{F2vh9C)%H%l(*1i&q z1eX&#!&cTSvd`Z`svVI&C)fO}K&o-CaJtr;?5`7p6NDP#N^ly*F-J38(bS&ok7gTZ z4o~g6nF?On#gd6prDrgBfqh&P??Ot9J+h>JMaOdA#i%ZTz}Z}+O^vt}A)Sp)M(>O^ zc-RPBRXS8~eeO}~{NtQGbC;)i>5`y*cM?q(t*dh920AyO=Nm=Kw5*rRiU$B;?E z^nKlCDnuy31j+^Bw_ylcX=hxAbRt3<+g?2#bsD^I^3&q6Axw<1q8nlytg94^%B1E8jA!74J%CF7L&>7#D z(rxNNFD^@63`60B54>7`INY*vRs&A%6;4B4lBGpll^N(&b#hqZ%s=!Rmi3)24L|eY>OmIGEz``$i(37r;F(_LVnRBdS7uQ+zj*`c%PR- z-$ze%HLs5wz!)?aqF%P#2blB2P_qB^_WF+#?*D!}2LIXBJD6asgJak%KD{=bn1_bO z=21SW+3@HFgtRl0%jizUB%vIC9IiL`dKoBxDwp15Mhq(ZJnQLM$ zKQrHBb%33W1zJvS-;Z)!Lu^PuWIKvv0puX-Kf`|B3B@hc^thb z5;wN`smBm!`4uS7m2@-fW^5o_Lyz1*FV%~Mo8jK?ctGV(-DRm9@HBVd*Jj!^m*7}RTC7jylr;R=c1eUVK#oyt{VoigwFJ|Sfg zmNmaAH8eDG#zP3`mJnH1BUJC8lu?bwC*sz@0nEALWvYGeOf;t6uHK^8FnSM;KZD7x zVH-eU$a9oT)Xah|JxMD#*r$>Gn$$K2eX~j55pmba%=_&F-p=5n3x(dhd(=l-I@|ax zgpao1awN^Vf28vb#1BS@Y8%cG6n z9UUD@^|5YIm22${<7!7ge&koPx+s`9jr%~8qZ}u!1X)VPKtIa(cvQr}K107oJ%TR) zI`pDMv9IU=_{Zzv$sR^c5jSnipVoCve44?+sR#vI}2fa0d@-wb6>s?}Ee#*2c zmJpyM{^ol2j6>U&{t9qZNWpdRDP6?p%o|nTTg1j@Z+I&oseZ5qW18|e^$cp~%@-?8H>1c?cji=^tK=0D4vGF6+l<@=Scaxp{ z0F8wKo(7M2dHx((hKw$+@?UzmuTZYnU7L-P8IvsH^+?R55+O#HXwKZ%TN}F)EVUJjU$}z@gcp*(A+k2Jrke zEAdR$WxVKjx!sU4bD@(~2ndpaSc){SN6eS>jsi9(reBz0&Z4&9`qp=OoyuasVrnw= zs|4WvFR)$v2W+2LOJ@EBmxuhonxV<(DDFMzqU_b{_7m4EzM?=yP&ONm z&n2pw((g|^P6&pMH(uxdpzMW4#Uo57QEU|HwdR4P=J1x)S&c#+?_;>ZNvgrz;NSkM z+t}Vk7j?C>vPaox#?1pGR1UoJK8TRyenfLN*3xl$;kvYm%!W5_RiDJavLGa#*D-e8 z!t^7BXWobA*)opz3G_cvEIXT*LdEdZG`&5(N4ch1xxxD_5l0T}br}e~PRh_!0O0gv zh_MHZ3OF(Jn;2W0eEZg5{Is3%gX&rCf^jK4Rv(>XwE>y+Abn*M1_6pMW!gAD+$3s+ z^F>DGTi1Si%7QwmN|&}6L_H#XEs3XA)R_r)(M;2tavpH2 zyEqE7@=43PxF`XFgpIH8aw3i!?|cEdG;HTy#W2MsO<0f_e1vuhOVC(_rXx4Dt$dC8 zV<5Sq+P91I{|Bru@K3mAXARC3O9RxkkH|f~#Az~;lyBA)4fLH~Bt6|h> z2bTUzD{z&%i?RKrebUIqLR2cmF0>#i2XB&6HD`;}qq{;Z>Cx39U}oA?O&0A~t4M;* z%bxqR{88&ocCY(UfkOqJ1`D|;rL6X_<9qdWpR4X=z1ql9rcS^hOA2A4HJw5E&uF=#jw53`Z1GKgSqJsJAJtVWpspAJ} z74#$X$4X)L#L`Egtk-uVCL`hddC`(hS&>7EG{B)TjndIhKO}@NyOl|5Rl>A74bjQS zFh;&e9=rx3-c#v?KX|M$UcfPbVCiH<=$Z8(_Gn zqtAEny6gSNyei7sL_NJXJ}1upP$t?fz(;fmZz*x43Z3GDg!UdK$u|->R?$ulaN!L_ zM9(r)L{|14cmcHTGKrPYkVaoOvx>ndmu&Lz=*MWC2&2PiY9_PJedm|k@Gqtr!&#g0 zv$36)n|C}%8KzSQ5NlzxgWAY>MC6tOgNnU|?!^qP;iC|u6nGj1`JmR;O^jTzHagnQ zYzf`yxY7t6>+~eIieOrt4C4l}MT_`zN9j$he*XKkQ#FmG^ns_v;9?KJE&uVVS=~D^ zoE6WrQh~b89o4%j8&nt%qX>>5LO#e6zP#U`cy_{yv8=!D(VR7W-NoWA=ghe0il5`Yw+^1`ETy^3( z>d_<6xRO0I-gCSn$36OX=|Is!nItAJ;Yn2M<95_@mSmQv7g)Gnsw>`~jzY29Euj3s zAHzRmrh&GvoiO`*k4T2Eb3PtQZX4sKCsle)x-g8|KO%4;QX#Jm%8NrN_bN&zI^DXT zKwq6#nvx-yzy*Pa(L*8269%(b%dS;)0C=~OCh4?GnLn~7HT9l)3%74XHan>bdwG-$ zP2t~7@xr45%kTD9D%!HW1}V7JX-XX*!#+RC@*%Ifr)QvcSWN7w&iMGQy?jm?w^jI< zfDkf$F|rodXHl7o+2k4c0?ne;2PFy6!xfM?44?MBU?F~Tv~ z3{_IK0M>;Aw$*3)K8ruewsA1A-sNX&{9x1K)23_}t{z?jjzHx@z&4D1MbY+6L3#46 z%s|V$oykSkUg@Kx1z$4@J9q0>PsD#TH zVAZYn1cXvSvZ`y6vS+@O#Z}#p2`#}#!|x)1a`16(;%NE1PDTtym7^FMFp6l7TUfJp zMtxGXr7WoRO3LQybgK6DwwG7OUpNIfi1Epwbs$^jlJ-~1rVQE?i(pQD9U#^emb3^* zJ(EAT@LBr06Fic5{{)GnUf;mZQ6J?_p}ww^Fuf{X*A}BWAG_K(V0M$dQIgsI`@ohw zx#NdtZXeSxWb7HcrThzVw_}EAFMg$Ae_6(KO?zZ)1n*`fN2z@;0_5{4)ZV5eTD`Awqmlr>-whz<$(xnnjs69JD{B1~frEpA6zJb&j}@p#5lS|y!~I(V zaw{pI-R|e+g=9_jq--xnod3oouG~q50;S#~m;Rs0V~;>|^^phs7lQK)_lw^;Ak+$~ zittgHdPG;Xh!E?Y%QJD<{qj^onvs0y#r#(ah9M3y55~@~IX55=;VOADjzf)otrlN> zF}oL|`owVtBv6{EbbQM3u=_DRQ-e#TtAmqj9L}6sP=6dYe_j!&@!np3_Rc+4EBfX@ z?!e5rR#R4DdwPaIJ{JjE@8~y-^||jwRmKe~@}Fjz4OQNm{A7Lf0;DN~W(UD5T{qQT zaK9+el{)~45T4C$CPmI7B!k*!H*y4%LY z-)Sm+0gw8k%5w>_Fq@UA7$MC%N8=*wbjme%^3D!k9tZ^4y3}3qVNGd@A5%%QB!}d+fDu3-%Mz z6@A40<1LbFGS4&hCl`E}Kc8>ZQpg7^l>5k%ffJxu)!PF_?+zmBq#8XA`vR#!CA8|I zf}5g?e`jN_{*1Eq9Q#)V#YyI8`aG;Y2gNy=w**KZKcR>ir-8ca;F8o!VSo?5)(wc5K`e0sGEe}pp>!<5 zGFCxP-nO&#_zMc?)EJJ@@|g={y%5 z`P^YzqzOM+G^=eYo!g-3M5_NLfY13iQxloXIu2o7{`5Lzu<@{Wjfi+)x>e8#5y^iKUl*xiFDRXs9|=am)d&e^+`^-UIVHb@0pEl~5413mdyUt;K=*@}Q+r>9h(M*AhP zR;o-Lz0Wq$#5%O-+&4CgTt3C>?YAP(t?6ZtyaK3?Z$M5T0|Is2QFs-Qd#r$;NekM^+dUYcdzr zEhsKY{DS$Z&Hin1EV!9wQFkCcK277{z_6qN7QYn*$db3DsF(f>rNeFA&Rsd|CZo+C z>HwnoG1qsd83uUsa?#lxx435hIe_*t}dEXijh;qv6pBlLl)`j!vaT z_6yzW=RX2ia5~D?{=7z?Yyw55N_1>FZ~3*L(}}k8{B_&gF0$KDqTOWT8{%?@thcGo zO8t3Er|`J?Bh@q9SStrQ>v7bpN3YH{1(Q}WW@ak}Jz`t;u&@MZ+#|}vx?rgujG2*( z_o)gMWRl$Dq+P&hdckr39wqIm8$Gjzcznr)2TQP9FstXD>$<(dyQ|-)gfRSD3N#=) zeBK6x#}X#oW{^uS?ysMd(L12@5PO7*{Z6D0CG<9D`50W#ey@8pXC$G-l zs!U|8UKMX>4?i2>`LS%Yp00-Js8nfM?MV-Sxw*|1(@+B2)Va7i{$8Gtj(PDjX?wQY z?jU;9Ja23plQjXevLK2h*0@x2pqk%5PwV>SxNl5uhqqThL&q?E!&}B=6=E!zyG|81 zt69Q-qSWTH0BdugR3nJCM-gO&$+GKgK3Cm#kbecfFbxa4xQY8X+$uSizAQ;~L($?s z0GDeBD*5Y5^#29P$$x-o-Ura|S@-&!`K%d6UP)vWP$^yf^q0)OTTH)%BCDGrg*il{ zD#ZVVn2j(@Nfh>Z!mfHkw6#w36Y(-@hOChFe0*ABq}3*_vp{A}F@KS1SyT4d+-XEA z>FQ*2j}>Etn6FpxrnbdlkvaI)#uel$)#Z4B!u`2zM>WgXO?E{EBlwV@%Gv6BO4(y4Gx{)+Tj(JI{ZbK@R-L~_9 z4G0Iq?f8cmR!s5tIKW${dkoC8=8F}?!{Q@SPlOx|6fRSKdc0*(>_{E|ij>TMV&@MpFgx`@ zBH0q}s@=?25QQ4zkJ)%F5J7fF-zUe;6hnj~9>%Am?XQNUTV1uZp#tEGP$g%Mj zYDE#%J6ZT$reBndFUoT4|r^UfnBH*!Ci_&&3_xCW`vhRlty? z)>(SkXAIS3+_@oCQWkC*-gI5{*bDT%Lw2>K{b$LBMP_1Zm{?(z9tzm`CdhwWs+u^fah07BWl? zI)Jp`ZuPV5?}%j)H<|&Fv1r}%%g6U%=H(}?>jf#9#L(JmwdXc9lK zYP6-83ei6ZC!JcfkG$w_c0!l0Wnul1lZ;{1VJM!N!i?GPp;R$(c(Ld1Y&1D% zYhsJ}J^&6nJCJMm-Fj^|^W@a^w+vGH>B|#N6z;G)C9+0_B30}&NQ}y+7=89DXc082 z+(`fj%RAA>%I#CxMhSPCf$X=tZ1c5PwS^YsE(#w)iK30~JWz==?Q?CD?P`MBl+qWI zx0kF$m7ijaFmDJ-bqj{AG5WZIbtB0&2+n`CGwbddZ98Mr?JD>d!UtHv4sQcaCa0EI7PFu>k@7fgiR!U$V?n5`xcd z>9ZT=Tph9OuMS2TiNjce-9rKl{9cMy{QV)V{bV#f*|_0j=OhfMN=vyi;;?)RkYSEw zQnOTP1-cWIPN*BtmVjoj<3=ces3dlmRg^pNX(t1S>M1Hww5qCd)49pc4$zRcv#~K- zeb0R0i-KK-zk}|C956;G_u`BPx~)hDhY?La;*r=d@!%Ddcc7VX1Z?#^$3Zj!(W;k? zb<6(B)g!h}ozUH~23i5iB91y^8rxml9lnpP&|qT|pw3NaAN(1h z_;0_1nUu6tMfk?F2=#XzKl+T^Nl>i6Zss>Ka?b#|Iq?$8OH2HaU)HyhTR0-KCLu9> zBNe`)6vm}W(De4FI#MuHYeFt20FS7{Q$@OCd%Tjy%V@WnwhUe&+pCZA3sJD|cytlG zH@vo#D;A&A3x-M}b z&>o)zvPtCs_>D>T{fLS~df&CT&Knn6a?3?9AH#I?Racd5doty99F{7u`oLmw{%`Qp zuKaqqU>FxCyV{TNW~Sr+#LeOS|EAa+cK(fGlLpKGMc11MM4KX>i|Fd2^858h}PO9XZA+$E3Hw&Myi99j!#bau(^n|+SKUizb^%Klw!-r99vm(d8^fT!-=wuo36C0<*t_BQ#7uIwA-L|wwKoV zOCrp;P{GJ-;LB?iFzLDbOA#O;IT%Z@XD5uiLP@Jx3Q~Zf|0!zXYGPSu&&Jl?oomCL z$2$tcIlDeQak*e=d2uIOmv0aD8ss42wr|t6>Y%LfZQR-09&Np!FMo~QBt#q zYZJsB2hBby94z6nV)EQ8pF!qYgy$QCVe=t^X6`z8at%RD=dkCf(abeq-?M@>acx!_ z2Sl6~BX^A%sc+CEH^E7dB2F3d;RZG5MndV(W8Sm#X#lkIORD-S2a><{yTM^eC4oLz z{BcSHcX001h&{pX!mQu{P@yqn<`Q&L>=@WJ2Q#SNG(q%XbeA6h0u_cRF+x43XERHg zlfk+Q*P*;rF*1et2nS)m&I}F` z7dn(Co=W+saj|gV;nIrvEVQCD{64%pI~g-3AlUPO!IpjVR9n zn_n5^sA0Hf6CC-qlEibgzY?Y9jC5x@(2v-mHQCzNk0FHp9yZxI|hyPBUCX!N$4T48QP4 zz34^eQD)AOGR%5n(^_Qkpux1d<>);pnYrE=+q~|O3hyW-G&z*go^(*@A|0L53dIZb zKM#^>DG5#0QpMcw)SrF+&ftqoL5mDp=JWliD?ll@I!B9O;yaw`6zIqTYUf9JIVl;!j$Ir!7G0~gJ= z{$it8@JGYmx9{XamiP;hj&7@_=;?@)pPiJ{;zgf4@#zsOSP%xHLxl!69ET<*ab%%N zh!W}A(LQ05p-WS*1k8ap@VP|=5T1XQWdX38Pdq*w6#x&2;PvTt*e>bEE;Q3igj(h% zACc<1jl?JPsozbMf}e7WvKNTl#P%Gtrmqu?-hRFn45PXOO})P0~eDQoSUbdn%H$aqR1t zpzW)bL4lLcRJUYV$Kw{NKl0)2H+cD0ojQ{4Syg-aJhhRy)pTra+duOflS zW^@?bz_qtj}9Uea{&yYy9=lcxU^4ruDl!RU2!I;R-s2V2uncy;8OjsTBfd_Rq+x|up;r+sQ}*&r)qK9D zA8Idd2?t(0*Us9WK2oY&!#Z4Xlh#4eBeAdGXM=jyUq)+|*Sk4^2pYtBXk}dU#x9Jf_uY6$T5V91T+K#PM%TB!PFa(>Tmz=Kw`9MBo)3 zBX1i?!FF~tEW-`-^1C122V$Rg-m1T-+yW4ZFmw1B%1!@P6V-pbPw{Z7!$A4PnGu^^IFF!Nf(cLx5T{UP%!@X3)r+t-`^+cS30;ij$*7 zxPre9+k3h?r&~r7TDOhxbbZ6$+#3ZOcU^AxiU!1ieEPW=+5Fc|p4BKzdw1(W{OKe- z9iptLg}!goBD|vLW~JN$jAjpsnT3dP41Yg6HA0I){{D^ODU`;YTZ>=pE(Xn8yl}kY z9rFU6ZP0)`KoaWor5UGE-s2!vu`UN2SxILf?w9P2JwfHAn@~#dNfh;JKD%Mw(_8O~ zZOo6icQMH4AUVlMSqc@N{wU4Gc(3UB0UTVIetz|kT%E$vIb6W&7HB*plD^N#)Of#PcBLFVok9kAbo5$22GjK;QgQ0%PHzuHdx11ELd9?`)nn48LEABr5gNcW6BsC%6G00B_UlCFv|@ z;qj)C>#a$_I#_O*Dur2tz9pFip$R81nMc=^&93iDU6ZAA%*AWw`KB5^a22qxe&fhh zc=bIy=N@b_W}@`X0_`)H~3cSs3v5JOx+D^hNWMa3#&NBWmUGqiju)CD>;ZQ+E;#oD4m;6#6llDn>2 zf)$3a>%I`IgDYJPJcu2IQ`q=&k!DTDS{FEPnvOor`X0Z_GJt)fWHq&Wx*$w(ads<% zf%!6nQg&}>=xDLL;5O8`rcH;pwJ$SxDZrvd5Dr;FKy6+;WvtHJJ^$rW`ix`dT=lz% z{_XtK2ivaC(uU{CHBOCx{w&3JqiP3w2BrfOT=l?+Q-p~)!t-$QS`BBo`@tsfGYu#P}5t^VtW>R$UPp^1k;}CI`iOo3^l6POGO^ZLB~W z_>T~4OOEI}n||Ogk>1mYT4`w-sJ06wLE0p;aHfz=$7XuqCzGUwN(0|eqswJjs*@ac z)egM-D_M8mfIRGyF(%uW$qqCE>|>p=+f={Vo7^K)p?tdU-Z2r2XRK7I4DIcGG+zo! z3JP%Y5F6r5+YaJ}6T0^6oj<@3*@bMt&CrCecfHYCo^YdGq|2@cx0mG`n($Jk9s>fY zH?fSUT7cBZg}&YqawhMeVA}&Gw;SE>B7IVP)P6+On{oX<3D)!zi^UzHweO)6UA=zY zBdM>ZpsT&W{`L`Q2lIXnn4B$^Y?uEtZtULwaoiXnz|_C`k16g{rOV_3_?SD=`_#hT zb6ZPanxtA;S?4^@Ei02u!*%*(_iIW3-m9UVPS<~dG@6!x+`8+DRbEj#yr_rZwp(YK*XKBTx?{>9#nUcR;_b)LQ&*eyoufFt zVO+U{gV!ev2l8Y)Kc-c*W~Du44iVODDr-U!xu%R(!X>JQc;|Fwb_}Sv#2q?X&gY~2 z3>=QAW<6W;sOk)XBt4TUmXTIffi&r~ELfu)fPfB0UDumeYt`wN6kFPo1FW2zV4xqN zEj7!|sAqSz`Ku|Vpv%js5eD37qa2Uqqx6h4C2|(x?$v%)jdeZ>5ckf>#{#>Eb%Pae}mo zP9d&qzPAYz?0d(T2&8l-k*inH_eFKdCf}_(`=ya`?ClXWojQi64L?H#7tafe5M52%|8Zg{@ zofd8BjoMA{aAHrVwE<%>$T9Sz*A&Bk;|2zZaS&}y_gB(x3Z`+3t*^T50a|KKYM|Ej zkO8=*H;P{=>`Oy;{SDK+bY+__cwE_AAvhAF+v6}EUGO!9yK0o}W z73405g@5;DQ&N|JH6eM&|I{1yIMi|C0AMdenDkbvu-_AR)(rdT+3|4c7*g#EZqXF! ziK@AZP$Xk_xWgMn%k1TEMfFAY35^(2(&p_dmMf>bFni5RiqgTmj}f%6b(7*mup+|i!3QW+h8VI$ znzg&r*PU%A!B=)4MCpgL&L7_+T6ZGi^6n})C|_yN^F@UseSyK;u^!UqasB{ATr%Py z0vM09-Y^nLsira9|M6-U`SS&O&_62*6Vv#OiE>6Ym@z`yUt7(lIvTH^AT+|{&Te+| zw(m;uF6dQw0T*opIRp`a2+>n3;pe=&4K=yKET~vZe*vd&Oehv`BUXc$&bo9S_JC1u zOkJnn*+Pe2n6QrVk|j^awwN-400lhI(h1@~JR#=h1ec2`22`Ogldsu4PonT<(Cjuv zv7)HEC{So8*gdp9q9RtKcYt|uQ^yB{d#C~BfM@k@@SnKnK^jeJjiPSQweb`?Ke7j^ zkYOXgX|oN?M#S))T|R&04$ZJFPm2hCZ@R$yI%U7QNTQ9&`t7hI7F+wU@FU1UC@^3w zC=`PQ8a*s6>(6)cC|oUWo;U?W`%FPq1m0p;Le9^7IPA`oMp-_nC@G) zI`pm^@ZT`6k2P#%u%Ta3V>U4fg;mm*a6_@;HS<oVpGjAsuw-*$2#!T69sN<|>@HC*izmM@2;h zqkdGPE+xgV{PVDoi1~>kU+H)Gt>I2zrK2wF@p)OViujs%fQ>J4mYy`bzOaZVl2XBT z2G0MHPG9j~LtcJ4b?PHCv+jI7;^>W_3TXLO@B@PF-Y{d=OT7O^aKV*+ZxWblhQrX* zkb=(-KV1spnsq?MDR+2ce#TQ3X}x|#noX+zf!$VEPd^XMw7to@5eW3|Tmr_#<3JYP z%G_;jGfmr`*Jba1&~61sedj77k%Z=~cHPwtO5vMQ?#$+0@7(P!`=mir_x2@+=GK#64{t!1 zP@?2aoqx+{&W^fo8t&3r=x5lJL3rXOVrW2PLOY9yjVqctj{9TIY40yGlYO$hSj=_N>$ za5AQAo`z!O3!$H-C{8&|p?3|BjkaKK6?|j5Uf!uQcfpylJxPD8Xb_8s9X6A0%Tg{v z&tu`0GsFr=rDbd%U{~x%Vm;8tf12tooFiX<9fe#gakUstVtgPJ*{c|?p6Pvg+GMRq zL`IVXMr0)Mze!gukeQC&d{gm}fBGBKMj#hPg)4aJ`Wdkkzyk8)RxxPC7Vi#{pz)+G zw3bC{RZ4MuJzmQWqi^t{8>`DPCn%e#2OaUkK}(<7Mm|Zr*!GU|dk0h~iB>KC4j2Q}-oM zE93Lq_exiSJdcv(pCZ)o9TgGj@5e@%SI(YICAAPfH0aKziMVNONNd3RNiXbxfZ-Iz z40>i2n+bbXEdUXx$WEf!KCZk3ZpC~L7<_z7RFL+X4c=yox(cvQK|q$gw7bLJV~BVS zT(JgftkmN$a_=d0ng>4^SA6;jwP_VagyE2p7Y-o`0#16zjeHTo4v0`izmLz+Q-8@$ z^S9lH2~AsZ;!mJ!Vmu|pBV5waxf zpsV%@R?*&%L6x@@!@Q=Q;O*kbsmYmR8YY_XHUz)7G~dFF%dW{U`U3Z}C4z-WEm(@8FXtIEb}42hHE`K| zjVG19#1=LU#BRn@3+bR-yau$O?ZH)QCViU$oRmzTjfS3TkaG0bc?(JU5U$kI^~B(n$&aoc)#aao?7^#j)kZjKKenS*84 z@--u`Zg|mw7j^;OB<}8`b1As0$|^IUr(^pv?+KPQM>lQhzc4+3YnsBCYE^>wTqz_wwJV^Pxrl{7Fm%`fB!4rhch5PnzNd|La z^ur-7*eZatftEvpN)MSFfpXD${7^*B3C1Wg6WO9*JqQ7fj7@=u4PxcNra32p^Q6mq zzX$gxO(<1d`C6i&Y9NM&?LGCItP|Jj3BQ814vJk!(yjv*CGj?5k{ubPJ~&)6XU zDyt%00u17nA{V(qG%MzOe-^sY^{*Do&-DaL9Rtg>yTDWR8bEBfn18%+C1u&Z^i)Zh zfJ2&llb4%-05O@VBV*5Z{~I#Gb@Gy{Py}_7;8Q==hqo|_&a{^02e_hzZ%lwmMR4Z{ z#e4o9BS|wbXS3|dpTL_X-E=-Bk698=iT7c5#hR9Pr{;~K0;G7-atCq_?YaAS&olPMe*PYa3`0$!&Z@7`eyqfG>ZHU;e=ss|}u>4~kY9Cnd8<4T1pMsZp zJDxq5_$p>!(yWn7n|o5jwyM9mDpp*ufZm*1^H|wKfMvA*a^Z5=d}&6tH@fknfcyOi zX~#_W>a%75g)SQvWVr$02))1No|aa$w~IacQaubn5O!;PwCQ zxUBKc(f3ZHhxTmnzs8M&KLBy(bBW^Jdx5xbyHMk&(1ij8j&DrQ{gKZa=Ve#1cE_Uu zy_D`Ma#!@*c&c!}8WQ_<^SdfOAgS`V!?FK~&%qt(5i*VxRfHV@1X>!Q$7PrfPxZFM zM1#|i0P|6EuBfvJAqLdq+$nfMziYxgSE5hK0Bh9R0Wp2#gr09q`3#8raPKih$K^aI zQAi)bP+V9>QH1jHzt~|9KiM%sTC-0d1tRS7K+?c9h*UwK_q7dKMFvu00iS#`15MJ0 z8spCxEZ9D#jyh80kPY1`TvTn^rC%3;KW75*+uw)!lk(qKWaCgH(fV@zGt0^qHh#f@uLMrfe1#l6b6>l!Cv#EinmTf z`?G62+T>(llVXpBrFwsi!Zr0jXZ3oYYP2PpYl?Kfv%3-SgwVQS@Kwg+{CV)!%pNXodL-_b~PyZCM~Sk$2dDE_pVNl zMr#OD3n1QS-oP{r}01eV}`GL1ft@d&2Vo;-3y=p@9K1OY+^elI~R z9p3fG-LT6i)8KtG!>sY%aifn(t`p;R|F4mT?+>yiPU!z4jlTqGe7|{5mX-u8j~g5N zgE(88=RLX1eV}ac`0>--r-`#clIs^&jXqmlf{`R1=YC__VFEDD>I+iM>FwwG4FLVKo4@WxMssm*7|> z1}E?DYOOo}ooo5!wCKb*NAi#LKU-2QRQUpYnh$VS+Ej5Ot9HrcU7JZMOZ(tn*LcK? zvtXR^a`hJCl{1e1eqDPT=rQBXD+5xOSzP*;dLMgI!NIwOAxbC|2c%ndM!v~Zbm1~3)FZ`2eOCOn z96^qSDKPlH19ZZQCN}Yn$=v&DD`_I=32+c>`W%0{iS85!08v|mMC8EBdn1Gwy3YW| zrRNiNaE6BfK!%8JF4}wABV|TD9fAy5xUg-0Vn;2;Y#)K_d#@^pja~&Pp^Kle7D+;M zs`oZBeIXnGD!$-q|NlXi(03pN4uThxlgN_8AcL|-$b^lhJnnGL9RNbOETD)QboIp~ zep#>GP;v6J$H|_RZby~@e!Cx_d}=PA^#dP$;?;1JTS1U#^M2`Ta4%ORu5ZMZ_+~m(JU!YDmkV*rWnvp1|Q>PyTC@o{`e)J19du^!A#%#9hy9&N&gl~ z{MnzQ{-nGumbDSazN-jxiqu=f%$hQsxy5q+h>6oyBs`qK`;iwNw4SRJMt{G&2fI?X z;~SF^{UKmEd^x&A6!v|%#wgKAgi#56sKx#*YhpR52xvYG$G{a0KESB+_+2OscZV`* z73f811AypgO$6A=`6dt%Q-g}5?*^{X77z7rQrG{E)iY>^7o`U$T%qWmj@@scU{V%> zi%5}e&qF4i&}D3RE5hF-6{iP5x(P6lyZNzDu{#p4K`SPZnxsdk=-vyHH1&2yb`(J{ zvA`soG46`;?+hRbNN?w9nyD=awPKfMttlpyygim>@rv%8l~Es5wEKXF0LXjnU7GM) z6+HUDS0sPnu_x3AZvor7=u=?SyyIa~BUT!+{{f2l9C3?CkCvDtlyXacEgt0YAPZfh z*=?9%PZ7N&Z^j^u=lcxW47M<(G*s>DU6}**)wwE&B2+xEhGT7@rhgd@;iFeN+(gK9 z0Z-#>US0ueqQWDP(FyDjEHQXeo?mx{tKHyXmfqP%SX>DGFunTp2p+!Acy?v{^TcNR z-S1B{i8w3?B}n=w12&=f+z^IyGN#_WHuX-?z(VXgv>VGTqVh;#)<}JLVkL?W>hIW( zum3O}8g4%ob@a2oj9?&i>HZ<*3Lucw=R# z0HW>wUM^WpZRQFAk-&FXfsENAT6XtbN@aY1nvO}&j|_o32mY=i6l-S4KR}H@m@DGA zpOo9B_0YM$plD9PG%=jmXC)7Uv7+(*@ZIM=cDfx?6;0F=cz$q$7WhEkfX0?|Bl(CkfHFk;-<2U| z9@(}7M!TN7F&&m`U?;tH<$vlXyRN#pK-i5_I_@6JtkRJ?6)-II!bSfI>el%|eT00# zCrd7Yn4XvgH8WT*)=_B0tejGL-lqmH)+DJ@r6z_|M4GARFK+*Qeq=~b z3R5z-#H)+11Um|`wgf7ph8|v*b0rx z3gL3;`$S=6ZM^i zw|@2w|BJi#3~OrLx1i^?jDH4%0ci> zo-20xn2KxA#9@N1a~7|Y)y*$)vbq-*Y;GgqN@PS@f+;>FMNjq|58zMo)tGV;?gS=y_Z4&?-Ry`|0VqvF|rwW;m0O6Z*EV4c80 z+o&19J-dvYe)$_IN0$=*f4GJD7wG%zslXI!<+X9>-6vJdL z*Cr%3ZD797!PX|xz*mSm>oT1@Tj>`o_#7#u5OYcrwV!gM8>UI9O8>~^Ao#d(Cho#6 zH%G_@WoZa%8+ffTcd6`xi`3c4cOcT_j6N-rqk-1(1#Cyit(-*0t#>j)R|I8I9ZcC| zl|_-p$-qeN+A%d11{zTo)O!$daokAp4@-1*R#u?To z{+$Tadw!^t`{CRxpXl>-;w zDo9fgjaG?K6P~yZ9+_wYZ-Tp#Gs?I+Ys|Dwr8NoMT_K2OECPmRqvzyEetVnWP2#ox zbnyGz_xleSz0hwk(3oslAw2jq{Sz*0CeSraR$Aa5?tUY2c!Zi z<_FzDi7zAgv0CppKj@6IRS-MtBUoB13^;If71B(guj4XpjL9;EQa6-&Xm9bm8#XdX zB5o2pD}AhTnA%DK;Wam<_x3~0pYMk-2(1HAgsVjjpzFXxKj@Baow2fz3=FwmQa3tU zwjHh#DF*e^JjM0lf&lm>#$k;p_Wad&``xNzwD0HD8d1mAgvqBX%M{v^(pJ9JvrZ3x z2jNAVGS*}y48LLqc4&-iXrJu*x~Ytn51)WxVw^D9?`87qjPFBV+_Eg7L_5!a)(pMD zd~#}r&qzi}KROPLx%#j+TqogUDA|Ae7Ll4o>p~ns*5~TiD)NK0q%*#JdF~szx57KEjKzoI^|ovxC32U z<%Nv$qoKeDnsVju$U&Lg9e2oznwTD~#(_I=7C-2|24m9UY$hwn2JVER2~>o=+H+kI zV$j+&1ZqanL(h)!OIlTNqHINXifNh~s25&n*b)7L81mo`I>F7q3&pMXhvHH!CDV~7_+z|-@XGR2rPN<1FNh@Q0{^SpjiRmS5bCUbtsUMr;UJbn6IVsI*MU38*+y7a--T)Z5FxMO9XCL&F zZQTD>rY4Brm$ElI1hCa}TnJD2`04S;tJi8>rxp=|G^6>=Uj&nxKamSqfPlh}KZ^IZ z!!rMFqv)s5U!6Emf%iLK&$O~C-XVlU3U;PCv8V=(DNO?n#lJO?u1y1W8$a959|G;4 zA0{m?HF68Fm6@8qe7_|GOk7*r#dVO3W9(B+(_^EZ)d79p+wy4tgssI}FL+d5_FaFL z$MeUdT`A!NVUdx659=8#;?7}3N2(#~!y5YJkFwCWfJ#r_Ya1E{Ihm+5;>(BF*<1!b zw(K#e!{H|#@?qrkR%zFgZ&WNle9t&^amkKhK_Y=79fm5vs-~ikel80!9eJo4Ft0CDRX^)Q!o`tON>N-og>1&@UU3k?PVY(IlZ?kKq1fvb0_f6 zfY!clEG-oMuIj)y8)D=54Bynv66j8=J;|h!HgMCWmm=dzZIUvlk!--^vp(4635G$4 zB2gRiwagwUPXr&)Vo8szc0C^bpevW|p?E6o6xL%ZLDV8D8rUT9Y&-ar|AK(~@4P4d zh9HgU{J18b2AN*WSSL#pagERdiXChaJTds>ozo-dA9MjtpiOiRW;~T`wYTDkGiuAt z5ro_s+_x8J+AswtF@p#P{Sd;GQjZ!Cf4+_Y)k6S8+|$k-wpR)3xphxZaO=>LdJ&P> z1v?7hZM;Z`_52^EeND z?s0aF&n?On01C%*&@PN0)TQ#wBDfG1n#XRlu+eV;qA=u;Eud}_pkXK2LZNnlk667hWhq_O_#_^ALO@g5$8=OhePwN%-JJN ziieL)A3H2nx0=a)mqSA1NP_;A4-BFwSwn)21WSEEKKlZst|8V@<@Y zD+dn3Jt(XjsO1JSUw|0(jl5gH1y?G1GUCm)m6P{1j6)36_Y_~~DJ))Gh*0_NvD1>a zSl-~nDUMc-5AEWm7eA+jOEmvpDE_>|M+G}2pKMZ#;8M(c>w($nu|b$oVCX?;r28(0 zDZrvC!Ey*OCL=?OC6>b4;1^*_UlDj{8_khW3s{GKpmFV@vQdXp*J+$YhrkCNz@jjP z_EpMO)QwjpyVhb^6w_1C)b(q$*o7_9H%;2im9b`UGdOj6Z1tftC7-f4ynB%!c>LFB zihuCBeFW&rkkc$htV`-~bCBsXu&uxE2I&bbE`bBvzf}oYQ(Big9q@y$Y#g@~zmjU> zLSq=gMp9ppx?t^*2(l_%W&h)F~sYjLXPlSEQKkEOSx_=I%52kDXo($OsnOwist8n$ua%2ksE6`0EZXK-&j30C| zj@9zKR+Ixo0-L{yqvHWT#|oA-A~U3Jhm|w@JO@Dt0frN5^<__9$X*kDSWt56@}d@C z#4)y!dbLvlbbRrUQnNr(SIclPLnhGD5rX($71C9dsLIoRCMU*GS_oCqJ{&WQLmQ^K zbxuDoun~6qJ@J=+6aiQ}Eo!_qh43Nx51)wJ8DIdSsX+d3R z85~F}IH*bp?O=8j@XwQmo7&TQj*0{*9uLxuLdws!n!0z|7i=;FhY81Pn?H;fuxQMi4g zfb6}ZGyYmok`1OESW(X15C58n+9qMR5WKl(fl2{+mcQ?`>Y!;FO;2Ku!d9ZII98Mt z)x=H1cDrVfTrPuXJ?QY1(vACwC^!qan~{9<2%zj+09f{Qpx|kvJq8L(YCVnXKYdbv z`+m^>kPilZEykf;UIXsIioBlP&4VsFNyc6G^a3FZY)v zB{k$@uj}%jqIpq@l~r6io0tls53622sSe*AUp-Cq+box-ZMk;P5QIdQ2O9h(4{^h5 zBvb{s@LQGw@+tq3at4T15({R^`3;En#Ht;DxEqwKduRs-1`Z8BOk66zr>dxg0`GDu zY~=@ISK%o+EtrsU@rMx`hF zj#!6_b=W42gc{wrAoPxjfI@TKpO%hihukfC7qxj0zR-Z*YbY2^`W3k-Lzuieh6Xcz*RTJr@V1r#MRdu ztSqAzI-EUKSr?&vP*+zlFv1-^4tv1MUFVu<3(mxGUZrc*OW81I_vVQIS_;+}-uRB-wMPSVgEX{{g8G@bsuNSgN zuWI78!1i@UzUvefOeuJ;5q<~z0C@ZtjO!{Cr$kpy;zPzow+#cX<9Kuz0TqF3^F0i4TM~a7119jq=F88Go+l>) zSBpF~9}_j(BIARY$ppj|Pt>J>vaEmz3s8VQr*T$&zdmUD)^(>MmW4E1o`nal5#|Q1 zi+FY-Q>#^tuLI>OVSkyl&#LART?`7#<7so+EM7Y6(4&(Nq(8U0%Om1EVGk4jJAlgk zZ{DxSeS1wAe}cMneENm2zwnbhBK?yiXM$x~d`ZFK$;e;lwI?{7Uw*9c$Cd|}3h|FL ziRK^K6orN4pCKkIhUvfeV93>Ef0S@cNTGkEVF0=ok_iwYyC$ifzZj~e{iF%i4RVg~ z%un9=ZpLT-Ilv@B;8&A~z^zn`oAGC^js$(NK5oBi_At?Z0-bcG z+)oGz^P=aod8xNx>vAQi2Y`+0Fmo`q1vA~`D7!dgtzs^~j>~RUYt;#tX{dC6UCmVX z_*aKN9@P$)tbd*8Z3loHh5&0OMn8)k)Nm_1ALTr%69%RcF9DrryX7^<6mXH7ytYec z0VLJq6WWuSw(1SLZ|wVn&V+B>n!0^?ufC2C;4dP(){lYIB3DX}BgWFD_x?KZPNVWm zKqh+jDFxZEF|*=Xtc#H6nuaGDsQ;-75TPf8xc2?OmI67YnY{gnIZ*R&Is;${|1}5N zr_RBqzhC=vMR)=9l-53E#|?(*#>S{ijtH&*UALoGEJ6`$^-f#>uzsCZYvvGN4>{9I zXeiDt;s(U~-`kOtU>G|QZODTQSBScEvX-@CkVBYJ`KZt3(Sp@gr~1OjM;#>( z1*%tQ?-91C1e?kidJDVia7hZXNlDi~VByxrYYcP)GO-831nmGd;(raD*t;SA=NCP` ze7mjZc`Es;&s+9Y9fiWjh-znRJIdRrKo(%fsqX?ZnG-7dIj>?1I|lhk@;m1~Qw0?W z8I3EqoS?ia?g6|Wz;YR>&*;7LvOZ(o0G5it>*#`y1zg3Az{w6@I#E07bziB5RZszB zQG3b{x=PXALXr+rcuSAvoibV3ojiP#s^3QfQo{$(kSqTsFnj+Vj`kn3znFEMvJb&b za1d*z^g&t6FIr71Z--E2<>WMXKX6cGO~%nHGZQfrYlD);RYn8+l@9jspb`?MFdu9Z z9JZrC!ah@~vhXsKUeWkLCy$#S*ab%_K>}DLT9;eKL{N*bRxo(|70ea`WH&b)EE*g<+<5C1p`HtUu_KsR3xs7w1N? zGKn-B)P!q0+r+lypsgFMWsu^!H<5l?#d~V)5JeVWxfV$Y%pRIgkV-E9So+mDORn>l z$MlbQ(~|B%c}bjZZiqMsN?OkqQDhJmq$17?$5-zQ#l zM9l=?*q)E8agR@5+$Wn2<2zoVqT3?!4!(i)&l2)mOq(homrlut3H4teE{XdD~CDNq={1PlrOGMXVBrlRI!e81>|4T$d! zU&=_q$tPUUvqiU~G?}lR3ECQLX=&9h4ASe)ohmLWRu31ucj;9l53@wdr+v48vGCO# zj^PWbSLAzBtPQgh_snei|0w~5Bmq1LiJ=A&=VYiZo^C1v_AU5_sY|Mdd*4JLhr){kJNw* zQIG8jsEG89JlhdYRK~Yz{XmaHS42F6`MCL>vIc4y-7VCYL9!zw^4nC5D#7Iic`^kr2 zWxM+94f6HCkUhUgsPEf?RqbdS>Wklmn4h#sh5fn4O1G8elV)k6L(T9MK&5N{H+`0W zY_U@FbmbT+`cfFx*%le8YV`atC3|KjJ`f#pvjSM>+AWdgR+zn*)l(MjcG9tpy#nbi= zB1ndiPHIemzP$rJeAkQdd|u+Gw(!cPXr+-${OJ)UMn56BDq<#3&mUAkC(I=+Wg^$$ zneKu?C|ND`z>bT+T}|^v=lq!$e`M;>&u?xkm$i)HC~VU{8(>Y2cazx@Yt7=7n_CnY z{y*`g4OZvC{?ecdKJ#yNs3MR1M6cDGz^!M1$wFcwmsIsL<*WWkmi2)f-k40FtC4&@ zOE-7Djs%`wa0aULI5~?`&)H}@ZX6B=cP{*?PHU}%24k5=Z_ATdnybR19R|CWANdujUwW?$#^ChW78-hr>uYXKW z+JmzucxsK{QUbZJVmVOHG3Qe*u#@D;{=Q@+pZpnrmdhjiuJoSBGN7#dS*}Ql`4fyQ z{!jj1_56?M)<1!L?fVPtQx65N(Qs4Ujv!9se0AibHdJ$i1@)c$SV@33;G`b~>GQ2h zd>paY9r2E$-g_v2D#?+mXOw5jPRfCm-y{ZeEROBl%7!u3pnaa&Y)A=R%gv1;w`q9` zZ6687_Lxjw;DeObyGQqWmaIn&QeF|i-{=#C81WCUP*NXZgaV^}j*ty4+#si3Tvj_Jd*wawYxaZBq8^C{s$IY3+n%G<9PZl4xx zZCv2TsyASXGe792+UHZZ<}beND&>$6+tcjI5@J!oXUMvN=ODKLSx@6H45hc=jH-!A zPhK*)UeQ%(eXvouC$G}zG;i2RI{FjzFRG1mjyb_)`&u%cvRgm4%FL$%Drzs2y;)xA z#DS0f0jfmt%mK5LxP+p(ZLG}1yTXt=s$BQZ(F2ty{ucYvmH0=mHcX$3c%~~4X^0nY z_`K8-xZ`KC?7I9R6=6*Qb&SM6NC+CXgi?+g8$rV*a8pt6Z3?slF~NnG4xuQUI4$&X zc0kDsJR#5s8(bGLCUmCmCZJd($5Ys5E9B}t?`8F_@CL5Y_ThktOzb3I`v;`L&kQT} z?E;#WgG>NJbp(&8P39mr;>~YI4_(72EkVky`R;a2ibvk#7Cq5=DNWwxeWdS`au--` z)j^64zN8WJoJYETjZuknFij0xa)w7oCvDQnS2>`wgu8n9>)rLwXsx~?XUTd$)t!*7 zq1$UkBif8-^PFdRQ{%XArKTR8c>U6e0Gf!)&qmZ-0G83x*faQ)dDki4I1HT|F=O2TTLVldnH?o;oa>qs23d2<(Bx@oxo@BzLc z9ShMk0L1yH#;0>|)Dz5VgxglWD7`KRs~bckB;qPcrr_nkxJ_Km4?1oqi6NmN?Q&L> zmQ7?Mm9`cwxNntPKj%grv(v7UA4L?sz_xuBzktUrw*!;CEDNm6bpjg80}Fgb z05~~77Wp2XG_I^d#3^=36AGZ-1TyERX}45xu@X93mljHj26)qd+u0|AVUK;tPLaRbUdQ#oVhMM@< zukZM;7r23jZ(Q;OEZSa6$YtkRa=C6m?|KQcoWda10V9`v6eu262Rbr9l&j`ZtqMu$ zmpc8WL^nHmxiQY$C1+VjW;)slD6m(x&a$@k17W59pI%dV&DSsMLBH`4Gu5~eQVt?-5;*y7!P!X zmjtdg-zH*@dEg?N3G7|HQh?W?%M$4}rH5#qho}W8@;Xxa`T7^D@~3@I)3^Gof-kk zBttT;#fVEc(?T2ox!=m%^44E$@tz%c`q{}5@S;JEh_SDJv&`o?k}r}YE(JZ7BS z=ZgZtjJ#<9Zpw5=WSAKD-3W>?2rsv@Z`6MQIZQ=S=zXWKycYMAHcqBzvCfSSztmPc zrutphJSFcoUUCC84y}QL;X}~lnk#Z-&8s(SK11X!PfV-1^SM0wmgP&9VBN@{_5KdG z=mkB49f6;M1IaSGW+V2+E3FKQLYM9R%_w2hXWmRvKrv7wGmS{oe{6F%-;H4v=skUq zle4*`Bu|ALC@wCJx)(0?TFCudkYkF(xWuOubZ!T);BF3ODg;GjFpwL|SEeh-1-I{p zj|x3KaC<&JByzu*Q~ws00f4sh`~*8*v`5o+tnTwolhsGgr0iq%6V|9 zVl!1k`~WAr8w9EM){)VSBl7LvJN%;kWBR?(ABu7%uo{Irl0t(KJv-I_wVkk)^^1It z_?FQ^{?~)Ndk23dz7zK3bGCqdZm+R)k7oRZ5|I?F=Si3^GfP1=6poyB}uQf$dPczEw#Iav|gUsBAV4HVh*3MdJ?ayxc-irOu9{FxcTQ8v^ zHv_FUnag&y9G8a`zIXGjt;*K2>TO(=k9S?6Wjo+Ob2(0hPI=!5pg@*0JyvI*W~S-9 z|G}jzr;!z_jdUPmYnUAoT5D{@(OqxwM0of;clv5$ho9KWZbAUK_G>^^VpLDId@0Il z(=rZqF>qAzFuqbnwc8st2(RRdpS@TMVa>;3&L8W#MMi zWp$6nJ1ODf>^^IaW5JwYUI?pC%tYmEZ~Uz<)((;npRWtfnl~rEXXNm<&H@I8L}JDh ze$a8OZ6QNQxau`dm!Ntu$R%0+xs?#7M^p6&hUh!*lofZ>1khb1iU;-mz4&cyYB5EQ zu%WNU@T|hm?5u^rEYrusi(n=n!@Alp zVEYn&+xj!tDsl#3H@h9EXER@YR|@8h#40AkSCz!F$IqG&DjPzmAQ_Tc#u&KTzNj{Y_vnICkb z9-yvD8hai2`n?#kIHr3`rAjvM)66O>SyvjjsWLZVAA$_867q4XBYUUMP|KR23(;R4 zK;w@SsH5m+bbn)AhzaqlSN!$H4+FYNF11?u`;nO@ogjB5aVn_2z|CzpU+@*P@xnaPSA{wcZE+z`y_P>z;2|2WlB zVIB$Kme&aC$=!`( zAbPVjZxwxa>j$HHIIO9&IV;qooHwQdx6lh$+1s%WQEuXqjb)-Mm3S?dFX#^zR}}j1 z-OTTSuSnS#)yOo3Ebq{l#X`p%sggu4LQL%#KkdYQ3|ocJxVhsi5M=nX!&#}Au-xf| z4UJ)YdXI^amfEuXTix0@=Mh0lpN)h$=WmTT&z2{b0=qlw{6BR`0)okI#Ao+WbX!O0 zhF}&IMFlV`@CwUrjt~!N8v|r1<0;8P`^Rdkj)m>ol~u(Co(~QeB+hDYc!pQJall9h zlE8PXPe{AIyE`#xF-8PclE81SAtiI%GwnU-=%cmf4faBNH5}MURGo-VS2}9%ZFrg5HLJuJ0`{9YS&cU4SNyFcd(=5@ zs17NzUo)vG9&UB~MEnKL&*RKAd{8Az_ooSAvDMH_T|Ft;0aQek;tQJM$cb?V4SX$x z@j~7B&cdwX=UOx_h76F=2d44p#}(rEM5AsMg^y~^0G9l^s(g^8x934PzNo-j__9=1 zD$}`Z+#*$$)VwMlARRpdzp&&d5@OAnlnOhexP9(2PXoK#cZ}?}@cmh5vQK}ar=vUQ z2U?b%0xiSBxklNOw(BM(&mXQT;d~nlUFVL&yeIIfYF8O=aBKDC3Z=J^UAoMLVmCE6 zR_VheCN{j{H6)TwG}py?51ikLaTzS!ssgf@c75EHe;UgEEu+g^JkL3phw@4v%_I9a zC3AE23K`}=k2%h8$c9HdEMHGaa9D z(AH6a2oIipl1aScOv zfZ8s%OV7X%J-7Od#vuwOKQf1weuVGY$5N#bSN``A08p!~fqnERLY{d^^AX2xAg^~8 zeQJxj4s=eXh|MH@=eT8)FR(9x3Gjk8e+?0(=;2)f)Zm~a1VWf0^*bng*dk4ZUs9V= z^o}&m1siNB(Y@Mb83)l*09Na1B*g^G2@!8RAAv{K6{?s$dY9KCwZG)t)jHm{pCR2! zb2QVB=TyE{sD238d-`jN1w@KqW>-H)u@{3{hfVg>;}d@&U@Dl$c6uYD-Cdi zf2LfdR^l(-EK?toigI`M3&Mb=ODV^Vp}~Rb7TBl&Rj!Bta4ylx!nyZB#+T-0+sIGQ zCjz*m4#C?*$JdCr3Pi9@&0E~|BiG=@F>TLCREi^@HY}y=?7#K==2K-JY8_sqtV$IB zC~WfB(2G?211rm`k3Um*Ph&u{;pVmvybOzFNvd+7@+ZWXf@L<{AvM+3Yg^5cDg{wdsL z{xx#`U+reWh&`kP+yHKoM9X3EjhqU`d;KRbkySs10?KAZMInF>&21KP`_|P`0}R!^ z2efrX>!-tS?coWj@r_V6_&JJGxqhNaC2Q>*4J8`4XtZ6;>TW(!^lWCvwc0 z->3Y7H2te8R^h)@v34t7{8hy|8ukB5{ymHM(~_sRPcF?nOKKN(=5)0*pHJCR?cWjF z5vg?ir5P3?a>az@4+o0?xs0h+$)ml2^#Z?_X?gbbqawxlt-RU#cv;X$V{+A82h-Qf z&FhVBAU2-_TwPyz;&F>6s5gL2pe{+yu6SCUvFqLPEzlQ2b>2dpGA`yN=Uy>S(%Wr| z4DD)aOi5uzy{CUqk(_Jywk+*i4QPg3yGgb19S7Z9b;3@*5J`>h=pM~I8xIUcn%WI0 zIquGW?$&0Xt1Ss5-L{1dcDMA|n0yh}EzW6XtuUeY;i}39NLte^9_nFqJTPfYnmI6{ zu7oC$BqNe89m5LU_{N`xsF)6-tpr}y+z*{V?-w6**9{F@=LZOvESu|MeUam$TjOjLfJ&8urG&wF&_ zr80b{ojl?5U(V=z7uem>&s@*j3=sRX*@#T(|r+ut%1N|A$Q01gNCVVDUXqCwWKYeArib0nPRO#)&+ zJP0?V3y-L|ZGJP!zIO-!T0)>_2@KqRMe|bOYk*9Yg^0=+`@9xRs~L}SZSeRg)Vs!c zK(xX*yT_mFRmcR))g&Q+4t7R6Trn&63V9j~J^^(SA9 z&1^tE9XNLQ!l5{f3}PH$EsNr^r}SCI;NJnL%XCF05RX^LYV@q%;XL0yy%Mk$*PG9_ zBbi@%u^padtT12Z;ciEKaMiI!54a@88WaE9>jDcM`?3RID-#05txvZF*CH+#73W(# z{N8<$b}C(fD!JWfYW-9+*^ZI#PNB>=-61WgHqBKfaMD4=eku^rP!N-3MVg$ar%0X& zjzx@1TmozFCX41e8iaW7f(v});y0!oC#^c7syqck~qSEijRo8CRj4yfBG6qz+E> z_O|OXI|`g*$?5xass;1xWHwm4??J5tcV6^T!4r?}^68nLIkPqGj2J4|b6Z55_sNKF zJ(8fwrLXr8MXfvuo~PMw2HJZ&BODG=-#s<=e{Cx0-udd=j9`C|vD2*d0B@u9H_RdV zR)^^^|A|@U2i02xhYztK^~Ga7hcnR`P8*SRjAvAR_r{8X(Y9avC*APqlW8%u%KdhS;s9rYo`do!J?ct-+igxRpiKUtQ&`vB01{cK zgJD}{E_DFzjz}OQD*@27o5lu!XYf>&#I;<80ha=g>-ltwU~kLZErrFc-A=MR5$|LF zO|ve(%Q+vnT`%jS6N`O~7&jGP)h3zDR|eYC+h>F0YdzqUzyZHJj6&ub-p*@l_<*SB zn&wD+*3yOdjN_bK017dz+thU3hIGPESE zanDO#RiT-{`EU_ktF3r?0`5*yubh?E>`HOSa}iOxb(s_=y6@J0(@?tOm(btziKS4y z;Sah9)cVE*`f2jZbVP<)1hb0ZM(MH-Zm&rlL45&EhZ$!aqlG#-}bAoH^fzkCYIg z>aaz(X`Rs$iq^`D4wt~Pd>PrJunzxkZz0M#>IaHM0GN>2pltZD$*$b#t9y5A`Tjc$ z1#6N-lP|P}QDL%lSn(KR3sLvVfK)02d3&Y!@s6a3egV#i>`S~p>tjNW9v|NW7g9re<*=-5Ji;Zdp1m^ z!eK!`I}vshCtQ!n31HMSZ|^FhV)XUD8Bp{GT7dL!nRcE@?Eeq}@dy8$WKr@p;%6Vh zacQK=HE?U(*tN27&hIj+ExyN4Wa1J`6`=eBL{u)4&kroF?&*k9eFL->jZT=MCqiW* zrE8`zydjDcaTkttC&I;LM8g!!&F%D7nD^z~rqVZnXC;?FPvC5jL%{eHG-KI=MBT14 zQ+pJJjO2CqwNuuXi8r|)xgw+g^@d|$BBMAZJeONFzNCkzGrTm%Bz0{f>? zp60I&VBNPeHQuocPx zb@9F(BSUzU6&GH!X08#&0Sq*s)7@V4D70X9pEjp$UqXx;4Q-t+T??&}K=2U9ObHi{ z8*WcTnl7Hwk3K~_{RDO&UI$^8weksS+>xPt&RXbe)8s`RZ%4Wvu%;bw+N3z^FE6K8 zrltO%i-8i)Q0!U2D=ITy5%Dw~@P-ZnssV&t3LWj~zmrMsy%x$31>E3*;NnEhsS&pA zQKHcEA(M{0Q#{KC;{A)zNAa=}Q~!Vu~mF$CXwyBD)pa};~6)j~N(u8ke0ooXp`*ztJR z_`vdu+C`ah6`@^4RoO&a!F>n)j#0NDu;P;yIO~>7a(D$$(wpx>0OoH85`O72UC>Jh zC#>o(wfSCs{tv$P`V0|VK%&Nmzu1sa?^-bFuTk(g==p~9;l~_1H)b$Q6gqS-lqlvJ zb+9UE0vWQswlRtoNmXKa`BFOe7^JQ10x1{%FhB}#^Mj5Rn2zXF-8UYSyU4knXOh(@ z_(YJ(D%;p^QaiWd<*xnO&op3wsyJbH{DeyK?UpieMG?RN<0MoAJMX!ziNA~oEw7B{ z^4JV`df6#3uAdvVCG$oqmSsN!wkli~2m3KKrvs1r<^{ur4~M7jGt!`=?_i3@eJYGBZtc8#hiV@N?vCM&+J6Kk{u{?Z|Bl<@C%{T=d1a=&Joe7h zgZ9yycRq!to@r}*OgA0VRS_~30+6YQ!jd2n0=B`~MOCE1`^+&aIlK8%Q}`h3#L8t8uKd!p`ptFw*wN~2T zJ!SFOzi~wvK##?66q88|(SEJp{yMNYu?BV}&Mu>;REwCEJneGVbqcdBw8+os-9|G! zwh;v(6gKEJyxmub0<9;0YEra5d*R@e+QSuZTlaFE7e-w`3jBWiU#+ewqE z;*|1s*-6Pa=3j_!&SAf33Vub;U#ztf?yj(%h>~tF({U;4bx!??d>4CMOjy4x8T8>$ z)mr_TlIRL|88aYIQOi$_??e3tz!xxl(l%`0T1HsLUaOQ?HFPmbnGzdjLH@^Aw z@bs}j0P779CJaN51#sBr#2+G^nzj<7ad)sZo(zgSx2?1S+fVT%+Dq4gPL4R-I7Mrz zY;TW9){JgkWn1Fj)epF{la1L|C5Wg!ICWwI6L0q_d*$hz=uA{0xfC%jG!76r`o#z_ zJlx(ysNFuOVLovtK4zY(LcWMYM?pOjrQ>&r{Y?_NIb zyDIv&R-N78Y1?Z1V~$JBA{Y(|VhR~LhBrWk&rLvUn?*X|ti(q)@diDn-0IcFlfJj6 zJkNs$-mCneYt{S)ig%h^Yk-ErN_HB+#2bkHGco!^r?i#=M^d5W;Z+V^YC!5!Yk(Bc z3=(Ln^S=Wcp`io7#LMHzC!+`RiHPxw(8AlMQC-*i@BXE)8oS#O@8 z+c##Qe%~i1F~%ciuchX1)JFdpqk%Y4b10{N&@sB!sNTC46Zrjk=c0lCVOe45v+Hws zbVC^}!gbl%>mqN`d6LZ8;$c_jPkyr3(r<%5DNrcH?m9&)X2PTV37xjL4-+$UBlr~$ z_XoSCGN0R4*cA=^%5PRgkjyIFGKV9cP;r|&lQ4&;4YqQAwc75RamZB%DA|V^M7%kf zKjj)?1`0c}mN47P>G^|Bcal1cA(9d{C5R#0(gg3phq|scilF%XSXTiCbo{vi0sfYj zjK!_(7e5)xRU%oh+;d8yoPurr3V0y3w-ZiW&E*im5v{WEkN)%aVS&28*@O~u{~7-< zxB+|=MsTZw25=E2QrliEPDn}J@O*<_O?_Mlx_V6rdc|q$A>E;aaD)5(#1BAhSLktN z-|e$HkRSMdonc_b_z>Va<>I2nKZ!AMD&$DJ3zBYo&2I2#hGYR^zZjC0{J9}n+20tF zCD_f~|J7gg|J6ME(dE&v&9jw(Ea?u$ehLYurXoF78Rh)b$YKDD{%~;5 zC?w)9Mj`wDillP15fkeH9YXPNfAs8S^4AUAFlnF3;R}%4tNSKh+02N3IL~`JZy8pn zurSa0gFdo(*^_}L^om?23^Dzn`IKNwlX_C#As4AUkmzS{r@H^#bBdzF#FD<0ik7 zfvn_nV3E{K#2FyZ`^^Vp_L19eZGJB0Yi9E!O8pwq&W z^%N{kWi%XW6;ukAzkH}<>g$&BeQu3uJjW z=zf~`!Wa*njp#b5msG5WcFc7@o_wkdx#P|1Hz90Huc`s^+sdMZQ-f60vni~^?kNCw zW&0MB)M2UPpDss9F$vRHf4u$m%le!_zaY?xvUD}}4ZhcRJaO!k$#ZxoMJ5q7+dP5C}w6 zh=>qD=_M*6B?1D{AyE;L8bO-0NRv+JNK5ENdY9gNOQ?aAeO~wUJ??Mjn>pvl`GHw^ zAubl{DffNdS1CZ1n!dRTj&(#?Xa~o$jWDcAGk!mS0oyMO6#!Kt|WP7g@w`{W(^^XU;TnJxv z232T$;&h0fy|wH4dh9M{qZk=MVIj3l*`ra%uEG7sPrQ&UzJo^{demcNDtgkmCFn1z zlse|+h6kErc|){g(=`vnjT0X@m&UAn%mL>uH0dw(+8@7~!0ORwjNRy~jrM}p-;bmi zwkl-3bk}v@K~;}tqI2%eCdFZH8J!Z*b4X)w8P@2>1fq%;f}4VAiipm`2^L@8KhW@t zn^`RbD#6)jVO5>9qh*sIlDe|28L4MlX<+u!hy1ho z?>@`!7&Y1J7Fp7ZEp{6F?pmzrvt1>%o;Krg>1<;=7K=h|*3i-rEBcjT@%>U5NfO<- zq_&9Tv(i#{s_uf?^>9MfJXe-elWQeR+|=b)4_if6EYcsSCE;0>IZ!o-%F|fHZj4+b z{1=TW#Ll8TJQ@#}?+EdK0|zOd#g0g^k^F8G?EuMHa-Dv@jgeN4J!gMv zn`F8C=TEniGuFB9oO>l}l?^%R7F$Ou#B3Tj(!a}^F$@`nRzw+ z&Z0cFPAUXMFf2orS`e_SEt7o%uTke&^$iT)e|2;q2QuAN_n|P%9`kHeIL>Lx6>*YQgE98lzaFJX_(Y?1vnka0z57ao|2MUlUh+2uZQ})A(bxTu< zL$e&L@O$j%6zXc}%AuZ?2 z`G+G=sLn0%Uzh|ZLsX3b_RhZaZZGvqA~hVzIPP`tO%E%~FlK$Y?q0#yX9nw+o;_(h zlgL_pRvi?w%*c%mfII<_E{@~L_q?p&Z&Al9<-U+yH19S}+sYy?o;R22&pUBVLgb13 z+dGr!b#pu&DF5N#iLf~Lv&fHty$r<(&^n#k2@6y8M`!T?e)>M4DxDZ>0JP4dE-G5qej_I5%tnafg^`QM8|T#F8SMtG&)`)`T72>uw#B# z@$zD6kLXAXlR(={o?ciJow<&(vLD&fE3ZtLc^-*8bLokNpJJGzFPa6xw6g1#HyhXd zZ6pG3`%;NTmC%T`oZ4VBP)S|#@X}x{&dEj`J6`X@9NPFwES;Z@GwUD4XPn>M7HWAc z(Utt!Bq>Ag!hsx%;tw+-;08ejVAAV%3^%E-uWBk*@HTvMbs#;UDEazVO;aUqN)aio z&LhpIQX|vtfie0UzUPufalEJdhy0t+^y|$^O)>{?@=-4}S|0(ILFl2Gj9TT|tsMf# zT951nFecq~M6IMc;0fANzN}L~q@eW#=gVdX^XZ3NgBDO0PrI)=sFiF%^Kn_x94`dH za+<%XcEg`XE=Wjn7vX-})e7H)kZQfq#F!1g2aL1?E*pp*%>?DnAAkys} z|5BS_1-hW7`GoH;a@JdFOG|oN&f}u6Ksqn!;{!qn-eOu(0Cs8l+l>`Wd0n!QN7gZ) zRm6^cHSKDTm*-RkoMVZVT-GQY6jyW=I}uz}Luq814Ct)06F-;-Ek|HB1aYLj9b?GH z>lEc_R8H4HjwQJnDUQz^qI?!ecuh4_K%$vSPT zH+1h`1^Q^ZWdspD)6|FsG)3?e=+$gzybjcVBW_eNkK)J;Ma|aB?WmFE^z!BpL&{wjPAtdJBY9wRn$SJq5<)!MajZ1e9&42RKad@R5HU<5qDgYSk@uaG2 zY3aB=2WpxB(Ru2p7ZW9c`qhh_>PO^hYSimqDz&E#Fj?S#PD)NeHCJY6v=?H2TY#P71>tJmmbM`)f6Wf9|Te zf9dZMIKF%;8~#%Q$M)a5OIgRp|Jj%ue#_cO_b(<=8S71DqGd88e{O_Z{(ozPlang3 z)1~!Hl^luDYkd5Aq^rhQQ=i0ewCdWjcOhKv)3eKoS_T0ds`#D#Gcyk1gzZ$Ldws)efokV2JW7YN+RO zc*}U^(wJdq#T8*H0tv+P+6ABKeUrxUG$L#hx*gA>oyNcW%olQ{8qKa2 z5>heN*e{IV0wJU=j|)@}w%KdBs(}C351-LW3(F}Ds^{pr0jvHm$Ktx1EGUHflWk2&!F~S2?SsqKfV9q<3plCHYRSoo*7~|JdH^JaWfg zP^tD_Vo7Q8C6~Na70y5+VyYvQXg^i@9><4><|^Mw`Cu=k{1ETKePbu!My^r=(i7Nz zvNR!^1I@HR{4*Hu$0HS!h?cn$-;O)4zL}T%dnbL}FPvzfd97c5J*rIq8~oFO=8V)y zLy}zWFvfo)?@8}x>t8+wZ*(8`EIgjP0uS~`NOsLymNUvtJR7;ZsUM@SWH|ez4Rr#s zh92H%iZ)f8nhT0|aCi3^31tTL1Ki}7gUtdggxusA6z7uJ$sWH0tqg^mY1~XHbED|n z_YKI*R8ij-Ht%>00%G9sFj_7W-Wa|AJSiG}B@;#+(_IL^S7Z@)8cVFBw1Hq|oXef?A zt3dGY#%8F96t`Cd4hX-aBRwdyl4F z898nHp`*=>{iw2L=e;jOutN;U1waH-!oE6^itL3jNmYszpCxj|mT#$++kvwh;5*O9 zZa-iTC9?rKt=vk0YF0k5F)y{d8{*F$(q!&P!2FVs^v&y{xCQ zHil-V1Jw~T8-A~I^b`|Bzvh?Niy{`XOLE?0%1nb>#u3}g`B>H7kAZS^PlWWC9f0jt zAotUY_Aimy6f&^1Uy;aU(du%!x)Jp&%J>&{cBer5E=4u>>$+1dgs64!&k-akvaZ)H z0P6@QW|`nhquP46BWdkLHO-rA>TK0}>=#`ay8?*P6#c@wuQ_vR`XGC2(%v81~=(GX%`<0HM1%D^nFu}bJc6R3?vmY_$ei#}d7xg&lE%NC6 z*KINJy*E_}hpHX#N1ojfwOGVY(x6e>Z>K@F=+r2cC3VwZ=>p($yxssVr#}JeRQW4L z`ZtE{q5q4|7=Lf2k8MSh0KOOH)7T)@?`7wL2r_wau8G3=D;@3L`U-#`n68{*c&A#84>{CAO1}Jj5|WPM2(Swv3cDg z?)psy>%mTpk4okyqZqQ=Sw@V+nop=PT8mZlnF}81FZnjDC6YyqZ}zqjQMGgTpT6z4 zO|h@hZh^e|%>D*7nFT7atZj!)Di;!0YRg%3{ii3GHLNA;cQ2=a?2Er9C|aLzzLB{G ziY}0N%XNr(fiW0F5|XO-Hg13yV&8+=v&NMsC})RzMPkZ2LoGZ{e9M<2pL;CLu37)G z!3!wo86<&~=1UPKMG=y=CfRHFZ<9W(d>m2E$!OwjvgWMH&U%qo+rgOcH>S=-iV=*$z?>eMj@V`-n>HgVy=<&ZSo1^Ou% zNgTDtF>U`I!}Ss~Bwc+1yvuVh5}j^5f=M77RMwQ1KIgky8{(^<7QOJ|+eJ_oJl}Oy zo3OY0kRakieKVm$dhPv7Nlvx`WBA?4)1@Vj(){J1aE6+XyqGu!iZB&(d*Tex_Gj)M zlw<^AAdN-?>2k+1bK`eo)9E^&Zr+#r0w@L_(&^92Q8*a^msG!cw66!g3FddeGwilk z$8;$dSI4B+|9tN?B;kEonAL|*T(C;ryL@jdP-a%2*_)+E!i){<0tgI~*DZq2G(+kt z`y}E$NF!|bqL6vo1f%jzZo1|-2H!*63^~>4I1k9G4*25~heV1r0rVbumJ|sS*C34f zHYUDsdShM^)U4~&AItmDRrt^iP$!1Md8^525Qrqms>T;!U(aoI1d0?VXT~M=7k@!V zH8DK)`6f@}@1YMtv;kfY8*dkO`KcK=Zi+5R8?mc-R${v~Rs?iTY5Z-z(P;hmm4%!6 z*V0Hqxv&Qb6SSlDlWR`9e$+g$C0WyLOu1|LPIS1G z*Vvnsd1Kdj^xsS5Y^*({ZOdKpKaRQej+-CN**iE3rEpBlpedpgbTaI3V{)24Wl|I? zhUm;AZDlD%c?s@{A}2+Ya%&dFcgA1+U+j*B zand`*IX0ayvikXC4~zf+>e>}l5S&?N;xH$?r1NzAleNayW~7@I>_+`_bLxN$Wn|1~ z^!u2mh5N}{4t8sNp z7I53$I&|XL&J)t4{%XRB1zJb;<^ftA0n#ns&p|?G1TF#AxkahR8e;|&G`d%5Fc>Cu z+jSLAgwjqz{Gc`E;T6R&<~3hOq-BBEz9{XG4~U=6WCvS-Aec`=>z8mRz3%pcb=oF| zR*~(40jU=WubcuhL9TpCIvNH_KxtIZ)N zb8lW%3Cr@*#8v2hD1eF~i~<6}XMTfUc+RC>@3?eQANCS?*~^e*ndv1#bZ+w7aMiHd zkL;KXI{I1EXQ|BN@~ZcHQS-^t266Peap5CtpSDeJWSTb0k5GpmqTHoBGiRL}AsVzR zUXr~?aksmsJl=WBvip&ZljxlX=7~b_=x)ER8A1KoBV|c8INtvCZArVa9Q*8)E3Ldc zdZ0FrW}!Eip`4V!#!Bh;Q>%^FvjxIOD&clS)Iioe>O2v{s^2WlH2nfoXGip|0KhII z>?ble!r9i_mUAsG3w$!E((&xET!2X+lzuJ^`x;`23BnUXMjIyWPj&`hnj>0ECrHkj z@102q1xUO&RM!a7RhN?m5(wzs||%`_T79=4&C zzlX><_gTpgkv&Lh($)-`^b|kj9K5}^K5#IeLmzrNvQqOzTiK93aHzpA0F}UUuiytV zsCaNqeR|@(@+D9KYPAlpLlnpoGO`&CPJVO)GM7`S6LQk3b6BK9=>9|gkrJvaSsdAl zFVU&UYRIfN0D2+C%1D)zusjwYO~SVY_TUYPov)vy1Nr2j(^ zZN6(sjiH?WVW|i?M)Fn#EEP{^K6vny1+Y|fS=JG5;b@Ax&?5h&4H=RYP=r2LZ2jJvMw& z4?`cnsQqQkY>eh+to%jRggmR?`zu{OCIs1y?_?7j35tsgm41|vn8eNg19WOLZ4QdiDY5}&kZASG?pAjNwsbsv9}jaASn;G0rBO0-1@PF{dHf&YW1y{%=5+q6_ea2rz~smM@Y=ghccFiRpX?I^-k`0 zEuKIsP4elt_OO3%3jMdaHjlucQ@OOHe@oxmQ%?B8A#ZC7m;fC=lo5N zgjG+#Zp7%Jwi#c_NQDfr7(V>1{MVQM$xPcsaCYDPE2DpjLPiNt|Apu#Qp4MN6HBsJTHTHI?vo6<}vs%tq-YvFwZ;u?lyJ0R>j}z+kx|nQ& z@W@(wcL)%WtRt5%tg8cRfJVetYlE^#pODpor~Z4b9DJ8J&EyJbTN^aEJ8+`YE1w4OAV4GK z^@iOq2f2@3Pk7vn9)mIfl&A@G*A2QeoaoP1M)vPsdYZD_32Z4Uy768yVa|WO%~5?_ z^SS=IiAdeU%{H-=;@AOL1u*yQ-B?DB)KjuB?eHrlG zM*iFh@NBB17{IXY`ThrrMQHnRl^jy5u~=#@_EM}wQxf_CwI5E)`vaVSbBEm*eDU#= zF?-<%5IAs;`-@N%q#OE}iZDHg=D+yF!Zqc-+k@Ij0nR+K(pSJ4vIa;gaiX4b0S%OA zUdT39idIoI1QwghZ><9y4KF!@@g_?n`^Jw_Gri>LQRXWJIk|;=xBSAcZKmm5=T7LG zI|O)qhDL!0S0#}^NNLRt`)rEEq&6X-O*m@PwoilaEC9U zr4@e3-2qq5oYwMX)JraoDb3i@4*;7w&9o!;8s&73^6H_K?#87DzuN2vI02U#^RVTR zClEh|g+7|-vbI;gkNG156h}>h%J!KX(5wJTG&M>}2L9ef56+fzza_`Wq{)IuR%LVU zxtej8sF*jWBjP}Z0DW6D@r#>=1|cNuitU#h`9~UAO7)%AS2aLRaE*rjZK0^zV19np zJ&|(qHo4P(vMs<_`slQ{&F8%h(b6gGC_sf)1sp@#et!Y|H}C)EKoZzA{0|N!Nq=!5 zX*3UIA6jR07IWJl#^0qK@%o(uA{_VXFAgMsB^XcndT9MjNh!7-Vql>Y5~TP}M+I9C zd8g2r1N2!7)GRE(Rx4RnBdHVFbYo!^(esK=)(W_X(1)f(TgsNgM=qXW67TZc$k?Q& zugzBo=)y*I7syv9TK4uae>Fffb6*D5`yWCT0#alZ3KZ^(IqJN@c}p}3HRm1E)Hi10 z6&kuyDAK(B0N$A>;{QxuGy~g(9od_HG~i{NO1a#dI%fYlZFqwY%v<409S&XLg9_2g zdG;-Lk)+3Rji5jp58;fOY*gtvjRE*%q0ipOxLuJ`nV5~^;!{pBVf9ZUcsxc_mphj_ z**9Ye#ey$A60<8`b>7wzo= zPibFxsCq~jpl#E=pyOvySEWOppJ14?e}f_DvLN_P8o0Tc{@hXWg<6G7PFIhra{ln!_a5Y143ZhOyMqKzo7Yb98vC!e_%F7UXs&#jaU~ z3-rg?dje(v0{hGzk_FJ4)MGe`3GH7<=33=_xm;j-tDJMzh7W{)X*DV1e4K|mWpoxywa^xRu1-{0dN zhPd)wnKfrd%mEIU5>ZVc(oGtZ)x?o&vB5D>=v>N3@q^~xm%D~%_#DA4Z8u!b@_7sb zfJ@B<{IfCmq+c}x%j;I#p6#Vh#Fem8;?|xI$j=q zc6fVs(nZeFSJytp%+%%CIJhClX5^2iTHo7*cf57*6L%03xXP_c_X1vkS!*H2?!rhY z_er!Lp8eXG4y(~lG53aj4%W=wbmmUKtE$+ghKIa2_%#IPasGpl$y`uTssA<8TXp4w zwed$*wE89IipzX+WF!l2DVz#rKY*$MXVucpj*dZCp${zI|4~T^U>A6eSC`{Rzb^e} zx<|}~H$ctofQETSwX+&B+*xOCY?;>sHbCXc2uEGaxBrCfSmot)F=2|%=R3b%Ke$7M zw{K9m{3iPV$9&8b2(55U?E+ZamKy6V>}8AZrCB|FM%(^yS$Fpx-WmX@u)*E5E=*z6 z8=6S_<*0{cgXJEDM@ERQk z<6U@>^=2uhpz7^Oj?PCz7n;b~h~f0yIDt#4%=tO$!`jx^hfd~~!&iDDm@|oC)(6u2 zy14wxUMah4V>h`<%Zoy%BSp`=47;iu;rE0|Fo61$8i13O3iLYlD;>Lxd#MeMM|;)| za;-$-MfA>Q^r~`zP9=uM%s{wB zE|4A*=^v6F&%{eL#^@QYJ{5RwSRKQ-ZxI9xty3m&g1lXr79qKRB9{U#W;)G85~{2CFA_T#}HvpyOuZ3gzbJf4>O5@5uUm za9SrY&;jufrE=3o{qCh7Shvpo|Fp~2Z-W=Xs~2fPON{ji0`H~#uPt_TT_>A}cZN5S zXGrXn!vqzOR=1g!YuMD*v1^3r=<%!M-o?QfaQ4diM06dD#a}-Nw=;yjhGfw=_2CYo z?VY@kh@ruw48G$ki>lsLWC+E9{HT-w3bKRr91+Sd{{WaW^B=x?l4AN{Z_&Nc6F&^y z^%f8cnQ5XOE%}wszvxlbTDCb&Iovsv4k(j8JA)b(ye2%iKF+H(Hw)4P2RF^6SfXE) zSlCYQu3e|0iLmmCeJJqNz^!6gntmc@6P^F zpmXgL*=$9bx372Cm5Qm4DXkB7jvR2eJ+jwJnW}e~K^Nu8cixGbzy933A7zdmVppTa zgB8XFiQH&il21-kyc&rzJ!Y~l+jV8Z3fv47^&dBVf8oWw-+j?{_9hF5sVUyqbqfjq zR7e&=w%~ZEG2nA%lT8_Qzj9g<6Q3R~fYZ6o$aaJmHIESdOt>56&=M z;t5w{so*v{nrxRL{9HGj%5)B&lu3!F7Sni8wbC;cC;{tk+dQHejZFWx@-5v2?22sO z&ZBv}0+B^*A(`)?UVirhWE1s?YQ3#DW1nTdJR6a^KCqWayPl&@tpZU+1h8a55($yO zRz^@=EvxK;dkH2O4LrHil-ty-{3;!bT_O5S?xBTA4n%ql;@KyX^!su)kbq$`Pzc%W zh0;U))jpjDusWDcI7IbWcG}tSpf~InF%*$Vs;a5X`8yzQq^A%K^a1@SQUa)O4fG>mocQOTHaJ2TrWv3*m8xQ{Kuo~sOuH{Q@HxBzs)x+vFm)w5tiQ6P*Mq2rJ;@}ijt6S5%amB_utYhs z5D7Z%UVWM->1BjE2~OZAtc3T3=1U~|ynt@1e0N>;WG1r8Y2pT?CVRGXcXx-W#AMi=E zL5lRR6=@2bR2u7LP~``5J_}yADER`cR|Ue6PlUZX;*zXE?lItM!Ea)tl-KVl-8^SBMQLI&<|#9EB`WC{K{{@fn^UkeOmPG z0aBE!C(XCIk}vP_LC0C-E_`2XZDnG82dJchQS0&l!f${64>;x8hj}J~iVHH0mN%CQ z+B`1oK*n@`rCVV>@T*NYl}E_&8(M4nV>ZBEyghE5Y^RHI)TmWC3(qL!) z?wv5|9(CeT`xU-28jv5pVyb&ePcorRFe&>Dz&ZQNRd&c{8qKkU;~-FhOF_nU2$5Fb z1Igm3%>=;ic3**6{TXaPrFuFEUvC7YjOmRzrQhu-P4cPzysLW%nupCq0S;vT@L?ms zcDU%1lWkh4%+iB8Eocj<`xGGTFM>K_qbV$tYx+$z$zHHf@+CWG?%SUEp*(D6YQNH1 zoZ;GN-2;U>gjh=GoxgrpA-%$*1-P9i+-D%n&OpWU3^ZI? z1LxXlg%r9;HTI4URka1aA8a3C=*xt`&Z(bJ6C!C`*ns!sc+r!e)7pmh(Rd4SG_#vp zr%T!-lUiCTowP8<;?{07X)eYsFpjLFq;?X=(ON>XRWsQb7eA@`cChC^$G z@rBLv^0WJS;UX1ZQFkeR3Jp+B`6m~%#H+yAm&lyZ#};n`YWrMWv}i7cv%5ATka+EgIlg`EeXgA z2r>n!zj4b;0d9HA-&+Fd&olv(LV|q*gsIXy2{|%x_~Ys@&ewry@QJ@xlQy}iEL5L6 zQqVbQ=-3&JPh3H;$js^?QxK)jQ=}9v7T6rweGu4 zf+ZJoO41l}Y)<8%(;%>Orj>qrC+1o<0!S@|bxPB?k*9a9pht4{>&G~*?^iic!%n{1 zqoJlUNCXXVN9ANFiBI(s>Y1m`O&L>fUtBtILhwf!I9bN8)~CekFn=ahEj-hEJgtjR z0+nb2!I(?z;ljnJcjGRQYk-o}6xOIL(ht_p#((n6GHG?XxXPm^`Fb(=I(;6U{>lX9 ztF8rM{KBzqV`qzQzrD2E1&Bd=@$M+qmUtrmJe=sC!-G2_lW;~WpzihJO?DHyp&5hpM{s5vF4xM`xByDg4nwq~ zI#%j+!Js~e#JyegJaFD1{g%zZM2Q#fw%|;kqRjQEJ5b74RFb9sZ_*axrsZJZPug#K zanr6M_{qgV96ymiNFgCAJ)Y9YW`Bgb>n zK2U_;{-?j)IqJpIm}8vH?) zCHSUF$ggx?t#l!*{dmb4%%a!#JXdl}hFx?GWS(F!BowELQ<|4O z`)0!=G$??wLJh=E#wy6Ih7v&!NuN+N_9RVb#VVz2$y?T#M_fsUobw+}IyqF03f@PW z{4(aMztsNQ0PoNiWDP-&P6E(GbvBJ1J#yJcj4H_Q42HcZn5;nNb&+^Bxn&6l0401RSscys;{vflyUuKS+D zx3Ob?W}W9>D(`t?(&D=^9fi^mrs-gPO+pX_a#7hUN5`JU%_dc{ z2~SmCvVQ0GT&FjA8RJZ~aRWo^qM!F3BYclYgZ3-09Q096?T1!m(UW}OF9443XY&dJ zFlc5ZzLfqb!u#yBR;_&WwmlT$Nh+JGzznnR+QK=C0f7!^B-1ACs_D>qPCL&}=lBof zvbqnb8&&(g*RGCyBWMNl<>%|E9ln}(UC^glaEFtU5l}mCA{mVU`oq$yzG)240Qm zqNE+;+^mDemh_~6gOlUTz?Lh5JmB9Q^wf1;TK=4Z{=A!w|x?L z=(exIR>ym54LLMXateOf^S1r=|L)JaPjuQ|H+kBe*rJ##+_IGd3pJEGS^l5HmGp%}Hoepn)! zSg_0&y}!yEE?SU}HBQQ_pPJH(%h0sy|Fk&Ao#VwO7Aegtt`!&laZ78F<|O(CVK`e- zrWRljot%8^YC#-;M5wI5(;)>>_4XtUucJUz5H9i3nnqphV{UL&7AnJ@#K!DNhb!LN zTZsZ7hzXzA#>5*KQf-sxLUvGt7q_>rf3VDu42hV6FyhX_2)#`)DE~v|&tRc$ zKBTo2K?wgh!ZOfBP8Nl4uD;B8IHU;CLhnF&k8MVx+Vn;fxA4jGzpR%# z{PD;~hr{+1Gw)$)f}CjC1R!4&BW{pHEdfuFDPC&K%Q5xdr#K;eRUfa?9X8321aUf{ zm4qLb2{jrNgKMx?dI@~~M5*`12)9q(k4yC9(UVJ{1^8nM7Xd>N#4}NNr;eD00qMVS zCqghdgQ#{$;JQrRMTog;g@v<{EZ{764(Jd5Y-KF|i74lvhOHmTmu% zd)(10f0X_w7P173-zk=ve>daZ>-=f+Y1Ucx&d*(1sjd#mTSVANixLq2`horGy&t&^ zhra7WRiCbS%tuU(DVXXTelQFH)& z$Ym*mbDyV>Lg5 z7;<5TdwOo?9_PMUlyPf|J5qx3(ci7WEyG)%5viYg@Y^FGE)-krxp=Mp>XEYYvPYqf zJ`!!-kWS76R)XIYYg8TiN`{TGo%2X|eHfb_r;<4uXmbGf6Ri^DNs3S}QotavwJOSE ztajSE`dcGAr7#fudfB5iuUeo5?ppC2Y2jYof6$2D`ArMG_BSo`_MiIG!2i4bDV=^a z#f0RCAPm;HdaQ6Fh9FLs+%)-7{uK0jdaGPsla6@LT;z2j_b-12Cr+`c3vd5|zd)0x zz&=jHgPtaIckar@ma_;iayxC^g=W9C9J(2jR5cFzmLOI>PtwBzjNf*m^2XdHUj`9@ObZ}e~4!hE-Sk`()IVEmzsdLqV@PXu1S3pr8NE}H=_!qPr zIPoVq;lH=tfUNrpZSasvCmCzqX%eK}iF;qwXpOzQqc$gwy`H152Lyi}I4!p;uqti3(f9p`tRjZ~{*+r9#n}MMlu}9h6jOS9lNs zLFAo!{43o%fYkcN2LWOR40LY+N%G#oX+E0`yrUH|(e$)WChjH4)Tm>K-Fdt;eliUp zf?fi<6ZA%=OW$=z)x%lm$yQ+!{+e%nRU4UV+U{jsmfzO}@x@R|8D!d)TCJ^V&}GA5 zx#r9!w-p*<9dgeK8xIu0@L=Hj0?`=U%%^2{6v7GKu@6+MH+NX<$2u-6FFJx7K%>ss zB=N`>4t7$BBU(vu50eW4sz?fOas690=I*p<}B}uLvJ! zO}4+;h6f$yBxB&>$nTIsy9n|3j0|9rx@x~E9c;gFy<&8&N za!Z(Q9IK4SCfxfw-uV2#=8azgYfSS?)tZ6&ykI%4vj)~L)SaXcgl!BaT}98(mrdj2 z@$Yfh`;~>=CJczUJXDV};$74;Mv1xKW5VB#nYqlH< zyAwXo1S7YB8-Ht>JjHg*zyAWeOG*%I zC@tsKcQ-M9~?NY4Y|&Rz`ZJf_CSJ=11_PbMAW^a!NYQ_5DT zfG%pouP1W6xvn_s%t_`n0oAel9mv&o(pjO{w>fD3Nh88%i9Qk-4?_c#hdPV=p3J!Z ziaVDO|~gpeN4tvie`Nwm(aJ{CA!m z`n8`*`KJ|J37^qlVytAhRx$R4M&hq2cX7{i(3;ht`xoeUT>>asByC`~dQqd0 zg3AOeGl{^51R%(6xRfVOyXEm$0zJ%LsKBxUY~xeGOjYSFuHvot(9NxwS|NKs(5zAs zsp`+p0G~X&*xmd#>*5aMsY7Y1?$E}GARH^{5TRF(ls~ie;+o{CkwA1o!-8h)rZ?xclvm!X|tNU5sVj zs=wox8V(FMhPuBPh>YrjDJ)z`$?Ui?D8}ZN!}E_03)o;URkAaMnb5SxetNYoqXama zj;0I_7U9hk3!95;G0&F;KQnRzV)BnNJ?v;GPcs_1LBYgHXHw1hbID=2P97KpU zZRjhKl_=(wB)OTd&W47V+kLV%88<7}{lIsUE9$l8c6g(Lhp=NH;6gs~5)}exv#$i9 zN_e-awD^~Gv7SYY<^p715ffJ(hvvg51%a~!TK<$Ka5UDL36>y~H12&`TR)4;5Vkz* z!LbNE?06l!l)y60?B4i`!E>#`fTGGlbri;O89gr`(=0Yy_SV6`^sK1czH&aT)W{W* zWX!vwr1fn=r+3~s&^Ud-AoW(6fE;f`c*tj;4?wC5g4Qr0aQGR-2PgJ*-=NU?r2L|m zVUE9YWf3X>P!Q=asUq#;Eo3em_-kWY_Dnkm9r>0MAgp|;_c7%-uY|#6D>?VuTSR0wC5LI{i0rhunmpAL4Otqih zOB+)T+ANHVVnruP$cllxQ6ApCi=DNm=4$<3kc?5Q;j!#U{gP}QgH+S(6i*L<&ml~+ z4raxKv>D6y%DkhJK_kVitRq*A3Y`QziiD}Xqu4P!5*XbB379xid65X$+$_FBYstKs zVmxs>xgGsgkI(+z&V!9N>@xf2#M4tz0kktKqIZarC4FAem&{wP_HQ!^_8IL7qqia@;2=->X-iX^uTWlcG^lnrblRi zqUchpxB246)QsKYO22^zD80D``vFK87$$2_6JDMol)kOC!E?yevpjyr7$X^ZtH?yl z=VK=abO6A=pUNOnfE6CP49@-+1L1w0i zby3ubzmR95nsU^O?fqlI=NDkbczO;l&`Kln!F7;?!z*wEz0HCEe1-2y!-PrQgN%T? zaVe|0+aFEMy-|}(F36^u{p~Af?z*)#g5x^6HAjYJD5V2qwArCGNPg6+Po|qJ4F)WB z0qz~39VH^tdlvW6Y3?%;>oDUq#$%|t7)oHrGF1df{&<@0%_h#N(7S-402IPg9gqI$ zVEoUndirlPX^LzSDT}c6kPu#@w=~#M7>b{Z=(`a;qV#&sm>MZV((pES{bDw1UH(?n z-q~8HLBj5G%efQTDK3IRJhZ^9&57ObBz3CqG?Vg$#k}bzqsOcS?Rr-myo4YB;Y@h| zb5#cyBb}P|rHWGW!v}zQr|Uiv2i?JTyVi^NHp0%ds9ATO z!p7a7`{sYOMEt@g9ptvs1Y!4)GZ!L97O=A};C$~y-Y@NhG(CP2-9F|sTZ zJw8O@`lfqtlQU;6tlMu&sV;O~e8;7nXry+06@L|&AaOSSd5q+we4Z$S^~-UU6)yXL zTnF#$DZkRKWrIa3zH<9#3oNr^S(zk^ZgqP@f6eSJ{lSGqeO;n**6Y~7ET5KD#%*o0 z{hwP|4eed-+Bk3d)y=-nB|)HQbEZD(#G!fhA3@$X6qy&7jxw`cspbnqEC5*y{1V$( zxjO_0`ecKb;tV@40w>$2xtlN7{An{$>zqH#LI9Vt zi4t3|-(gwP+cdqU6AyTaD<-J3&5JIh?(RU|!I@+x-s`it?B{of=8Lql!TI$Bk;EM( zMx-0IQ5w9#8?SHh{2E+*SbXavEsO$dk>;D>j?*)E4*u<#{lT5-%CnPWo=rUE);z3b z5go_E1cdw{m22OTX`c@dE~4tN?>yM+a^zNbS#|@LUXfLMgTQ`WDCa|q$;6iVau^E< zc9UQiA%OmD%;I)3Qprqhy<^M}*y#q398CF|=kk z8CJG>!)o7XG~O@i?EZ4-nbIi_jOx#*!rq*ygSujG)j#mc8o86)bl2dM>C!2IKk(bi z%?uFd>U?%wTMwWK>-Ge$dBjP_e$K2!S%H0NxR!7FRc|y$$T$u zNaq?Gw;oiL_7;(hrk9sh11@w~#E-7&(w7NOJ{&H|N=L-R59DXI4W5n)yH?OVX83&; zWB<5|jlug@Ix-)Hs~W^bQD+o=>G9A`Idks}r5`d0C4a$dW$A@zzjlno0;>TxLu_s= zagM%_bDH*%lj^;CkTz*qn+1EDaJZzaS-<~MzC-%G%8huxF-CGII>OuidW+0zxHkOF z6XFA`j;@VjOGV+{=y|SF7uAAAtQrg%wEM5MLyUVbZQB|hF3-om({P`12oJmX=jd^p z9nOxIpa6WnT3R}?6<(Z%#y^!h*)anhe1>ReQ-`rjSkG5iZ;v@xy_iL{-g(1@*v-TX z;V07}tTH*w>{e=+iGD9Jdz@%b+y z?^n<)9%=o0BUX|3YzY{*B^1DTuuZ!lkwDCYQ#ny6Zw5^f9ZgDA9>|k_rrzv{U}P2AzB34^U%!bh%WcF9L+r4@LiGv4ep9lq zmDdzi94>usc8lB&Vj?Y{eb7mMuEMQt!W}7dKV@MsEn^1^stao)cGY41+?BhHsbncx zuUj*)(+B}D(JW(z9|@EFl};}{?L$al0_SNZ&QlE9-|576glI?e3D+xQ{F-c$6LYYu zb!y`ZA2&xwrMi$ycXmFwVB78A;#r;XffL8K;eNw(=VtWcgj$2Lk4v>nw6F2}GESKRY21l1C%>bz;$E^8or4df=!$Bs3sy};tmvtzReA44GOu0$ol0O&cQF%5*k8* z+$hWv4(Bikc?ut@07%cUhAKq0i=2Rz+zU8R-9394yQVjCPlpmFwxJW?6mqAfrJc@A zio&T#yJtBqN`2Xl-(Imcg8{$hbAVZNr|O5c3~08yxr$dngohjZSAZgErx&Qsdvj9V zAry{6n&!DZTBA4gSGwCqU5qOXoRn!)jj9)FriziqOg~9wzezlwnr2;4H#NOmGWhWp zy?fMi(3NBcML$NNgdHk>*CACe4WOIn9+2^--<2Xo0G55dOM?RhgEP63saDT!4hI*+ z=f1t%EX*h1$GO!+;pnZWi4ZMUTc%B+4GP8M`L3YAjVFd!%dlzHAnV;@$FKRmzi;qC zIw;H&_3X?^w2$woF?AORL`BR$AqqCE_ zg!_~)z<1-$N;GZlS7QLr_4@xwT3HlcB^G(ydLCHm&n15E48fVNmN*1nCu zQ$#Q+7$4AED!fi%BHp{wQrb?_vGsU#0e7GqAL$w*EvNe2iy{Bvc%o_e~QmIrpKKjxF7lJ!s zY_yUeM0h|f$ahjUny|5SRCZ^>sp?3&afa#`ao;^W8vy>2ll1U*0k~9uB9KWl)b&LleR%sSB42K7A1e9Yba%q z%z|26skif3u=ZH!OwEUt9_d!iyI<9{??eFe2fu{b_NU+qS_Y#bwI$N}7k)t8C3P`>7EJ)B0!Ef{hc*i&iEpA6-J-yqjwFW_L}fvq=u1C(fSR z1gqd(vA@=^=d&=?9?Ew@PfNF7Sf!)6pxexsLbJY3cTJcs;U>v!@2D95oKgyz%LQ>( zGSiZU>uYi7z#`q}6_E>i`rv_{kKxOst6@@VWv&ZqvXab60TPP7%d_N5bc86P>l3OH zQ49b454iQSP}o8h=gmjMcfX{?TNf7XS8GYL=t304dVq4wiKN~DzHsb5AsSC2y0^}< zc&VGUrOV@ll`S6R;|!#NlikYQbZUaG1UhJZ@Q7BT^SX*I?I2~Y-(vjJx&xXzTaDZT z6#9R-#fWx7=`D7~E5ZZDE=R*LO0QzC(s(7XU~1#g-L#a;S@vc|v2=hVozH0=g1qEL z6*MyTYV4-hw3h*f`p4qxga}_7qnea!`e6AQCYMZiY+PAaTUHynH((kB<9`-=N5q`} zfyswhjnmIqpg;kYL%UR=(TIx|Hy)G{Q}3F@o?H%GiH&;smQ+H%eZH?YhTPGbJJoeu2}(kJLG~p4U3zWf<7k$av-Ndkqe^b$_rYT+ zQxs~50D{TQ%%eHhS0f z?iu`5Gxzg{h>MhWX!{Z#@=JUV>_gI%rB0~fr(MYwePQ;QD!y_HwGuhk`!_g*wX<1}%>bG*GrvG&MbM4Cr~DYRJeD}Z4{~o(eig7?!)R^Y(#Na3r}Ps3x!K-tr7tT zBblSjSBYLnr%$|ci8`HBQJm6OO7?ugYGr)sis($_L)DuCT!_rlvrY724O*S{`zOVA zoMJ_a#KoiCwZnagQg_V}X8>x<<6Nys4w?-?p$IfNY%a6!%e0}emTlmxdliEm`D-ar zPyPTn7h3N~Dl3poJawofhh_MVxzLjz%@;Mg1rG!)GcYRvOwI(&!s#6hT?Yw9%}iIb7Z-Q{b1Hcf>tK1UvON_VP)Zc zaG@V*EwEV!2H2NDuPvuwT|h^~K~>mbxL>#OrL?oFcr(Jj?z02+3_GI0tPO=TUMF-a_@q~WWUCqiclX(p7ro9Tbv#A6~;};EPwE(DnJvk$^^{e{(Hf||3Zo| zBRcl|A2C}~o@o*~IGaR~{v~`z;HMqo%Q_>XUk*UyjXw9%FLAohP|72_{Yw0`Kf!2m1Jq#@e%c_KTZ*4kcsg z-SoqKj?BCbp2nq=(7cyNX7Zma66BskP`1B3*^faiu&{fnFI!3by9Bdia>YKFQ*|-_ zx^ZssQTA-cZyHE%-@&W^+y>7a-O$VX+H+f~W>3gHMFR+J00G?MI$8BNi@lG6DI3oK ztzUMMDMNTbxq<)<&1>C*;@#%i^J>P6q!JnJ>IuiNOVfIZA?n)52=s&%%Ibp44mu3* z7Lvi8DArUNRBeR3JbE2iV+t^(bqgiEwfWib^7i36h1mA z`Zqowg?s`mE(*vMYC7|ATO0k$E~tx~j`Nw{je8ChWY?*4WCKH}&E%xzL<@gWmL8Lw z*rPkNCXZgy&;^{)*Fqe}zue)J&fdZe9r{BHC`u5oo*PKUi-v4j|a9C7@Fqq>b{&Eils4DJSpc0L#xRn1C z{550-NYGUOla2u#iKf_Mn=<|je?Zs&mOtP%wto+LI5?kFrF~gXwiqb7IcYx8IovDc zX<~ymUZWSrN$fc+%(gdbE+8Y9$FEMG4{3T@7Jxc@xthnXj5dX6{A4&pVKoaXq&W878z8<=qi0h5SATQ8j^@Bc%l%l1haxj zYlW-wp_!fFug47z4xoSf)$FZ~0;;=Pe8_eX-5T;z19LHvuD#T!&LLcKrR+148>4*U zRP9>#U3>M9bkGm8IDG>?7c>+66`r1;LJXAdH)u1f=B#tuVQ+sSRQGtFd_k>tkuJZy z648!X1d-W<+I+j3W#S+g&@XoBCQ;MYns$X(;2GwZjV`MErpZk;kUy&)B;#TIOSvyw z>u_>(U*SymqP2e-g`1?JWebD0r$D0N%Gt2$Hq5C_z40R3hRQ@=HH9znym+Gc&6x7Tf=%x5)ozFaJ(pP5zsg|6h6w^8bt8;xp!7 z>Mgo(@D4_CV6*<_ui*AR zqG)M-Uwg`d?)7{a(|NC_c{z!X0c_*!t0(#Qq<-erP4;`MzXphXLPLM+BJ-=Ot5RG) zpTvHZx5Q$B+eG5sxA-qpeeF0JI^mzchFvEF_(|I2XO#dMer4AxVyv=&GLC(wlZIh} zjI&H%0V%u@W$c`X24c+2lmQ?9nR_EsR;s$fv3d&ipUdUc5_EeMcV;V)oh~IY5EeJD zZ`>Hok$u;k+Y`J`jQc|T)N)tUo3Rbs>{vt>x}1#Wr;2G_C}IRLI`T>%>a3MXBfVZQ z7xtY4hx5ICr-1v2E$@1osU6$Mx$&E(IP8#<(f-2?Z?SY`-mId2@MeV>%0NZo07?C0 z7e`OM2s#DnK(b7}z%*E1q9j_u$4}feMSl+5dI!+nS-%3#TcfYE*jR)w3dY`OHZkEc zF>I!}Kyxaq1p9jGv}}dYZC;s!h~vfzrZavMyN0qs^ zNsA@z#O#96%&J4en@M>zsQV*&e694y$K{x&2^Ci(!yWjdi&VgI?J18 zfbI^{vsGy>_QTU=IYfY9%Z;NCIfU@QdJV&Lhdf~dD;pbc{@{D{1&|+R9nidfKy&(? z#IY?=hogW5j73{?GV#le!KIFvo1?zl*e0&Ybsn80LEZ2<&c|Pa7h`t2_JYGRgE?DgCP|*PQJn7 zfmq9zSq&G~(J<48+)202G7?aXkhv3ArAKQ5oNW_ri-FS z#6d3hqv{~Y9MB~(knKqNQ`BA$n4jrTgQk1F^UYfg^`sl+erc-x-9`Wbe8!Lkj+V3M z-`qfkqswQT2cw>96BFmudD|7bW!uV-c$^(9cITE4$3M9&SQdN)R)Ccnr<8HahY6BZW!1ZH z&NpA&>MN=UTctaiBq#vUt>CNFUex)9SBttSo0g*$AE{$%D1>)N3X4Mew%9Ge{Ysw( z0UQTgfUa}-N|OSx90>ru_;5wZ+WQ5-;dH0r4_s(gs+ z0dxREW_=h4z`z{%k7}&U;lF7PiX={S@W`;D2J3lsmsal@61dXd!~Uuduk=&L)B|T4 z>K(MLJGQ!9kc?J?XWp&e-5wyw*Zu^}R@ zabsI$*~_CpfA9=1$d|P$(KLVJP~vYgo4+jIEWs&Z=11Gz5%_ZCnX|g7{aUni^4|_c zV?&#wM--tKh;Iq^8}Q1(Wa%cRVZ~WSn++wbQQLX+L}o)}g8g|u8tuZCqwoAZgDPK` zR{?j|bBDNsJNVUY1^-1UU*-zY!0UN3+icNanrh-*7v9hpcVkVVIt*=zB^%GtX(;05 zIS?)dZgykI>BNj0la9^+z6Edg@>an&SiM!#B6z(R4wVS}BwaZ-E*yt;l8TQeuO+iyNm-KUIw%z5}_F7er3gTOL z?(elh%AbXt`8anWCjgSfOq4VWJU}r}$2qXnM26NGsh^USjExQMb&=-VjPa9l8X)xn z11roS813mVaSUMDK`C(epG_)=G2emgdA)DD2L_DhlWR)J4{H)s1D%KUu7|gYn+*b= zvXQ2ED8I`I5n2cSKvVJeNS4>s>p1tYCPAX55zalbNNXiI|IX7Cg_f2jc>r?|l>fdY zdlsEG8wWZ~*1}12C4*VVHPV0F@6VlaZMC*!ZS$pv)@@ESKFWg(o4`sAsR|Zpepwsedm0st1 z+y1a=0Y6YMbpSv=IED%Z@J&n?zup`=Drx2EkpO5kvg@0rjHn~2T>B=Fq8_))l(%t? zQZ;lKqtMP{Qe2%emAxEt;3BD@{kk|-{@iaG75>fRL;PTnnl5g&!y@c0+|u#Ip}LYL zU$~DczsV3fg3mTF?4bwL0PzD|VM$gPV{HM;)QGlXqfx@K9t7?w3jR0E{m&HqFY+R> z8qv|wpvq^j7}L5b0X3+oFM;&K{9 zJc;zcsb-o8Oghh-lOEni&R+sn%>}ItS=%%1hNW`(ozs$wYM=LC9!n)o*S}}6&fQuk z8{qbDUNbUXP3lB#Rg#p5yMzK_81MpqBvWm-LTPjRpPuHA3NN*Xc}m*8*e;J#iB#z8 z{{#}(O(uMyIZIDtY!`dO?yl{v_%96knLykr&4mEt&3kO@iuPzzEsNs&KxaACy#hCz z+dU6k)O~*kuOIA9i0i%pe&&`M-w7}!?kXOW@HzxtJZD(LHgx_Vu4sKTNvR~z2iTw- zxXg7%&i}f$3C(6R>&8P)+aFgzu(nPI3B!7;V?L@V(e^3MfPOpb2%}zBQUY|P;S*6# zjTPHRz{ph?wRc$avD@3WoEFd!NZdfs5(gKNVI5Kjn-qq`1c_se8gBnakGR@{^R*iO-*%FZGSS7GwdBJzqJQern1JZH zsV_~V-g*O&^v&8s&vBRpsObPTZ0qMUjLG#a((Qod+82`ln?|N)x0`I} zJcro9Uy??*Fv-ZrR_Zwd&U(FvmL(rg#4;L%e*U)+ z{ePMU@T?_d>RbuaIV~$B_;!>98U?rnv_ya}5$9Ki7lhjChJ3ajdxo#dTfX1qhREcz zjJ}0d9Il`_ns}T*9BM57^ZaHk4VitFC~l=AG3I1J!YY$$o=_uvmhG7ff7q+IE`B>I zRN-*;EL<0Ler2e@kh+kaY3Spbupdix-pnKRt?#5#XA+j-159jizcHh%a-$MD0N2|^^ zt2^b={z|R);!MGn$6fSKbD4C;Hl)~RGl6Z{dT!Z-sRSDvZe*((-?nHa)of5a6`I^%q>A z%oaN@bUg$qQ$CwJ{xtc)d?-n8ntvgUYdEXxez>i%TJuh=4B&!=+%X(YfSDK`n?A2E zPEv3_X_ydAFj&inyenZCC+i>!?{*IiVON*KLCLWpjRu%p=lKP&SlhFT1jM2F3$)n; z0-4;4r@v&>q9t0G-h1dDe_(Rd5yuD<7&_Sfqf;?PH&monfPVaj%I}bEK7^bUI23^Au zR<ko^WuT3sjZAgcOC#@f1M8_B0B}omA{72iKjBI_c>@22COrRJyve_*;F$k4{3-OG zZCd`4yImNyoZLjnkN?PXDd4KLdWqDxEijgi+o?2)0MpD z6Ql&%_z<`F>Ihxe;HBnE-&GO=w4CP;-ex+MNv2;xYc9*2w$z60fA;-N^HL!9eRB)B zt)?b@$49z2%PHqmRDG>vA7CCUnxMOsLbBoP_RoBr(mQP_O+|#QPT96M^{9!^@2bzb z%RsyD+NP{HZf`F-0XfuH1c5&T0A|2QP~vcv;7sJ?Y%O8tcd?GJY(J^68&m_8-|@M4 z?Qno8*z3iG+d*(7;Q<9TX`tsGnXXXP(H@1?t9s|bQ>~XwPXrD_rmD=Tc~<@u8yUV= zI+gBc!Vnuol=tM2*ZzTO4Qq)12UF#XjjOogUUb3wG7&(15sv@J9%o;KRDfEglov}{ z2HD^;qi-@@IHINOU$t|_gXFPLE6M!q(y`a7m~ulC?&~9W1{icZwg!I;5=FW?ruB?* z%;6A+67HR9yl9(Wt<5zvm5u5S0vgRlno zV!qy=)ne186O_V%{#H25y+ z2x4rMu2mYT>g{j5v>$Pnj;@YHNe2Th=qrKn|4A4VcrDsbMUGgr@B_bH=FO_oS*N-W zmn^L&f7E9tasQp`>4ImTk1Z`gwg7Of^X6=@jZ|HH(VD^Z?gA2Vn4#zb9s4oHZ=2mV9D z8fMX>{0CVExqstTg@|yzF)_3H7(SSv)S^&4vqv&*DN!g zrMMMdy(6aH#}cac$<>lY_s`hV9mNxm$C!-uo(^DC$x?9U1^`-{^M)XtfWwu5R#7cc zX5Ixg`+!6L>D|F=+-*9p@zXUvkO}Nf1%{I@LIiOl7k&xHhV8y4U5l(cFF##7tE3Vi z29*}{R?cf(;$SCU%Xjt-t<1A_KB;V*wwd{{uYWyEMmT}Lug_msrR38BQ*Q#emDvoP zOJ*fN8EPP#?8u5z#;LbQW zcX6!qCHvqLizl~lE5+Rj)moA8=JG2Ph!OkIMFk`ZosdVH>opQr2-m5VCVs<{J;)Yv zTqM3LuBkqP`lGe?$aA&^FK=Ea_WS`hq}MU z2cBkmr?GJ^*YFnTEu=D+dC|VD_aZ0%0C2 z%1}xg5}+kl4dmK6;6F(pq@A6y-tbC#d+&SI3p?a5!2MY$@LkDL=~7HHE3^t(x(ld! zNd!%t8dfwc>tBqR1g6ECh0riDuWLurQ2k0Eb7{~@olX!P zadlB^9QlxE3y?*WJ65Zd>A^rZHnb#0|s@ z1gCH9w+M+v&-+malL;q~7~u)&Ob4~W7s=in^LLb$M#Qf02C$tS7=22&V>$(0Nt5Y9 z;Vj(gEZXKH4)`_Ap_>wQ9U5N@g7)^bw(?yRfiDovqy0$uBpK zj_N`_5K=-a6PNR6%XOtHhNdeF%YwEFf;N7zRq+O%d?hii|E8%dPQ|TShY?zQQ0?3K zrQa3wZB_l>c84=5rT23QxKX)w$Z}BEp<5upoTwT}X1%$)=pJ>IIFGw@a-Oq`l`V7Yb7+P`zQH~hHS;?5%)MbCp4&XCF(zdJd)S2exAKw#<^efPpF}zkD zF#F=TErEU}aaP?Ib6^X2B0+#LBxEjiw!)zc9XXETfw)cEXIBSlIbF|l%dBl|h`nMi z)l+$OW_+}?=Wgm7MzP1eZlD?}_@|qqOlXr3t}AT&!=P7|jXCyKs-5+&+fWD3QvSCd znr8v(A>P*?(gS0_OVxnkE`XaU6H^yBzTz`mXK}HUn@PTKQ&+;K1X^(};rY9#!#VsM#jzlD9_;ucuQR?iIkJV)rm9gF= zRV&80_{CRoGNRlMHMlAFDFtwrg}4Z4d00D?7AD?H(y_*Px};i4xXa&wPh?q%AHSE^ zu04ECH`5f95de`y&m}?2<2v0Zy5^5!4j|D~0l?PE_sDEaYDdbBWSu?x>5@iis#n&C z_N*jIr zWNW$-9^JCbLA0E2s#FexoA}#`4ex$Q`~q`lT@5>m3e|e^Fj_~;u4VleC<4NZoJ&IT zfiIEoLpUK%tzX4oNnEWMX~If+NvUU@zAkHZua^z??%sl#_OCax?~_zGXMw}+Tp610 z;26ONBblaZ;wT%be!WpqX;)gWMud)=L+=}ViOVN&iI;t!8+X_$2Y^N{VjM|TaBSHzbcxan@kPS~A^%wrk zK(yj{%ZC%C#XGwtA|a=`lCJScs@hql%Xh8VK{4H2G`?ngKv^jP9x`!cHci4f;Diu@ zulg{KVEI|e>q1(!UK!vzz=aOp5Bc>}$2D(-Hts`1@UcRNcAO@TFy+=e64HBwe%JcS@JJD4i;6mXIKx;>0{hi?s2W!bx3we>)*fwM%*C zLh%>49}8%lMd#lm$`Cpz9~OwEIlXws<~Kjm<-W%q{rtwDDE9DW7&TRp6hqcrg0z|) zqL^FmyM0-6yl zXubI0wa|!+y_?i8oM3z$xg65mwFCM*8f0k0x*=?eohTLIw-79VK*R7TncrJRplFejlz_-hL~&SM^4N;hB44 zcWcRL0^21BADD}*LIkF7XT*p#U?sSHq$}?{b675{qEU)J(d~A%?47pLJA(fCb5BYI ztYFQ1wXe9sID5`N`%P0&?tA_HQFfUWnT4p1=cO{R;DKZwP{nl_d)(yx#LOYjCw*6H z@6J;0TmKOt3lL&bY5EqQ&YcpXbW~JSey}0E|s?v%T%X}EYBiNe%t(k$QnJz&5 zyo}z6r@9hGd#LDX>G#qI!RjD3mJT+~BGMkP zcwpL>@q~qiAB^_wOFcT?KySD7B4^KZ6-yl2L=pu=aFY^!B`R$RnIyv@^7!Ct$tN6K zXMVu^lZ{81@GLeqZlwyTh*ZE?+?^5qD-}A_NLGViL!fdJrn8_E17{X=zu=rO*?QqV z6TF)jBUfc6I!fced6?P-qHhv_YnmZ1Ik7ZOGBJKbdc9PKz#6Jl&rYcqEJFAZozyEM z4ZVXj8`lbM%WG8*T&|(}2|1|-ui7KHQ;8;@m&*x6ZjbD7P9st^DHX!fPnPQ6TPM9> zUDYe}OrAzhO=QpQV%r|n{Nb_~qgW(7$D4Y;;n_*W3vaO#juW#H1TVfim<**y%@>(6 z;-f$4v`0d)(eoJmDYR&V#=QF_*aO@;Rb)i^`efi|Nqm`GmzQX$z+C*}@gq)-_u2i0 zMNC7V3o_a5!CzvvJnYlOp5M^*<9ng9-AI{`+3bh*E%cIgy5{Y-O5d$i9Ua+Id@~7= zk>o*K$u>7$qD@1vu3rE71Z)Dv*rc9q1cK}W7d4d5MaggA&f5xWuku(MPA^8UUySRx zg(SbExE%Bqw0|yPwZFEfS5ZNJL&8Gt^r~s}zTz-K32jS3_|dEz_axGP!Y$|H*fdv( zevhh0Ik~S~WB-Jlfi|GS*?(fD4T~7!jdMl}@<&NGP3&&uX-sT^U z2$O<pUKWeoAW6q zVkzTL`iAhU4-mMe&QAY>cjK`yM!CS{tk}}%ps6c zL0*Xmp!*tI;5X}SzcTa)yJM{QDSy+ZcHC}ogW|lZdl}LDL1@RhHa2J) zp0cX)oUB_M+2lYs-!5(nD;&-&Fx(jLCC?`HW=HSsUbt3=dfex*ef30&^BCRsbD=>9 zchqV_|F#!L3F?qwQrq3{1GMXEi@%=unYjQl?b^JI_oCTv${9wkCB~m9@t%JGJTnBI zRs31vEAxMq`2VTeI9lId7W@cGQutevF?`6~{eLGDS^kww^b5eDHyJGb`~em6gbAQR ze*Je9GKgXeHT-+-5XX31U;3ZHL@R&G9jY1@!w)B2HS^V?|66nst?LW5-U!*}A7iZm zo;82iVR6oXAvcbsk?2Cmk*$WQ9BWshNvq}!Mv~hR8@@hqvA1QYYB2upAz~5U;11F9 z3%@ooD(3badqpMvd5PH7Ax>f{j_;ksUTz0iY@BRg447ZFbDmuRT%ZlKO!!&o?G2mR z57pG)soifZf(9ZWI8_vHhuZz`@t%(88A#h8rh0Z$&tlxmfW^7mVy65h#;iK_S(1)g zIp`Qn_y)l4>hBn}u zV+NWJ;_%Ni5%5DwCHSmN*Y3oAyUnGtw*wf@CFPP=kjLCdJ7FlGAYJbQvVnvna+@};9W;!fY;+IP(u00^}Si1e5Oz?K-X#z z?=f<%%1HL2*;nRS<7JZJWd9qrd{j-rAi*B}es>?@v5CS%BRwH!U>*hzlBftw|gfPE=@37A6{Ne;}{H znrdyBy2cfkv{m<}o1y7_<>%(t_tj5?4g@aDThtay8OFY|=pjH?PW&m%OvDMihb1tZ zEI@NO*MF@9)s|Kcm|`b%vFRakGal0s+X3ov0J|i0r%4@t%d*j#14_Sk;k~o`>(krg zs*k{BcB?`=IS(`>P_#r#++YWY6aFd(#@H(BgTY44)zbF{iLB^m3uRo{^Gm1cy~un} zi;J3vh9a1VjQ4TOA$A>=kR+)_GmV^Sv-KxWbxgaZ)rW>Xv(B}~ofH6mg0 zn&f@M(FMt8vR4AY%^6?*G?T50+#A+P`D5r=%+F9WZEjN|KuM&Lw+7hD9qVq_Mo_esBtQ`E=>sTAKNxKyI3S8*aQZp@#AqaUS-yffKLVWoiQ&ALd$B zDlhD4H(IrW$A52W*}VnQ9{BSY2Fgp*YZhjMMa~T5I2Z#7CTUXkTvKdu1D$RmL)Ar-MCX5bICONI5PLLh~@}L zwnYE$=AZvj4$mw`Ov6dct#OYJX8wg@*WgzCSx%^(XfZ01~X8e_9lpbd%V=DMklsqw)h|_TU3QcV?L7{i>mZ zdXb4U*`KeuyI(?j2}O&4Bb=)Gr-ApMbq`U2yoj6NhI^gIbQpx1K7qxu%R6((F#|i9 z7GEz<+_4)yz@?O;Cn5dtUlSKz$1>dHy9g7E?L-%dlP4(O(RQQwAREB;*6cZ_&BNi1 zo1S`saC)ih;*e$Z8$)XOw__?Wt(*bjkII&t?Xq{axxwiMz2)mwlL(0gP}mG66%-iP z*pN|oTms%KChaam$}rpdlw5*AD2i1Y(2OG-%t18^X(;ULn3pZj~;B` ztS)6&NrVmdRye!bfxF5iTwm zt~e*dv_5kIhd^;8W;)3ha#a0{$>VdG98K_fE}SHspeu~g(FQU16pS_gGAZcK7y<^Y z5?Ss4)rLMec0EnPvfLkHULHyvH)v$T7ltG9nwh<*geO`HfXf#EO)*UPC6?Dv& zC3hHHa7XC{@gv4wds~1-=w_}dRr$a7g5&enZx%v%jxj(d zXSFx*z!vHQ%OvO#O^SDubz@g=r`3Xo( z6p~=%#uE6WwTBWXno9DY_1Ru@S?4$ zsW!Of?FOf#?8nii%p@m)C5p?SOL}s+&kTdjAX_o%ff6zVHS=m@v0;&3#v#JYKbIFW zget3hUY+Ukkdmp54x$PTtcjSL?Hjo~KyzMB_j59J@!`3~`L6rk_Y)SIuuQ;ZAoZ&& z4>Ek`M~T)a>d(wyk}Vznri`892KTkjqFYDE{Ah*smk3E?-=|`VYr8~MgahCWaXg=m z7=vEiYR$=XwfiQv(a)>Biq{5E*H}!Z6S8vp?YvjX1zpYp%PGlEz(K7p2eO-gQYaen zNy~cU#!2ppHAp`c&nlkRr~?9;J-y7W6p7Us2mwX3^rgqz*MA>`-khp}><%T=yFwPB zt&Msh2fRqW1J#cMIMr+CV}@*rDCq*S+8q$30WCE1wf!*z`V0(B@)5|?@*p52aa9?# zBZsLH@21rX!e=2zy*x9fW)HhHudM-<5b5<*5q6!zY?}7C-$r`zTSny1Kf@ybUBjD^ z0+Sl)tBJRRnqJmfaZ+}DoLVZy-fKf#Z1HszeF#LY;<0+)K2Ws^qR>Pv^5C?grSi(s(Ym2nB8QXp#M^?BW z-nEvsL_O8uZcWLdayzLev|6TFXmp*ZiI{=6uRTz2Ry=Od6;61q4;HhpNwf{*(dC;r z(U3IwLRSQjRqg0=)t5cU1X=b_qi|p|-i37eoR7ZF5IqzyU1q$Ha(*wBACOYAM#k$g6u-gnnjOKB5YacZDHNlP&Nn+9M6h zUTg-yCMf;EB1c{0W3kRpo)b<0LM(k=3GQjIEopn+cwfnJL{1>xZi@jC*;->8v=F_x z)!%V+1d!RQa*3ZuaeR8IuG}?E)FtGd!Tyq{lr92i8XhC^K|A7NmhfS4q+ZL! zhtDc&9=^HT)D&II`yqY@atbU6V{At;k2td^XTv@uz5~^)&8+M{NF1_UAGU> z+%EQyD#w<4?LQs>_!}GYFoc6}@^1lH%YOx6fg}GUR{dQ!`WLZE?N8cN-}YZ2(Ut!S ztm#<-Ql!BS?tcYjjSgu&o!Dw+5dV`3#_%5wQTxo1NVWq|2$p5k>yl{GXZDH;mj zT_i>>O-&CVBExT9k-_B?KoLvtp@#2X9+b*p$A&>DGPA;BO=_f2a8#y_MI7#Mf2#?g z&=JucCr7bflW{Dt$hko;m1er&82afu(0P+B2y22axD7`?r&NGPT9T}BevtEzYe**D zT=L&ndxF!8b!OBrzV;F1qMjn38Ar&vK%DDl?dR)7Pi?F*ai3f52~*<~%3FK;{(S)Q zi5g^{!rO{f%nc($9Fr{k6V5F}$_0EuWWDbZ75<#yVS{4FKazcTlEb-3zJeay1A7M( zAHdcr9-KIvlU-4&H7Q{Ys{~l_i(@p0q!I>Nf5`F4cFKOstUeUpGZ0M~$%rId7`aaG zASSuKW&IU3oDlTSh5~3XWG)L$$J{YHv9aIotA$QoV5TYG7|+(W-ZpvVb8+|W#Y=Oq zEkppBk3eP$m)m7OzhLvHiTCPcnF5*KesNTSjYKf0DTGlyfzycF1w*P5Cb<;D+HK{ zL8h+UpVI;-J*$LSN4Mp%rF1LtX5ZOKE|zxJ#0Kya8_}}tpVzU_OG9MfofI2a{G;ao z{m%G*|GUI8bdsL%Vi@g&3g&ELo#X=xjdOL$x_*Y$CFU~3y`wK|H~1(8{W<_6!A_2%jgutrozMXCQq+;>Mc6|dQ% zh*%J$DF_l36{K2dQW8-C5hViBO9Vukh=BBvh)5NPbOj*-B2pv06M8SwJ0S_ZCX^5$ ziSKx4*37+g@0xYzy}Rc9k+l*chI4Yx@3+7G?Y+Od*1#LRC z8y_2$T5xAMV|!)tpq#1)(YS+%SGB(R3o8|dyif4T?7~v58oPs2c;`KM1_>)}(!b^; z)r)~VmmW*&oMH86jO4b&c(t0}_fxkF+B};*-qJ|bqqZGv^gK9;!T=njZvD&>ehCOe ze2$uXxpBMc1z+~3uBVvWRoG1nt>;yLx^p720O`W?zxhD^?*CiTC7+pqZo-~`pQn0b zXxikO-Z2>{`*_VzW2A9GgZf+I%?EQjXREHCU%_IU&{Roe^T}=wyt6-o+kWfQNV7>` zGo6q%H#5L{#16MwTolYad;?GmpyO{a`jI395UT7+C;W}`3A@=xZ7mx8+u|Zuo=80N zmmU0inu9}}IXX(DeSdETM!jsi>W%liDlW}qe=Z^1`7UE~`9P<C2eMo-NqMlxWo!($*fO)7@sDE9-n3hR$(X(qOf*L- z?oaX>RhVFYbbe*VBhzTOUbQ$swz1L32I_txy@}%TrM%|F8+Hb z7zPk4(5XcRP@%jF@=9+2UwmtB?}MXVp~$-kMXS2;cDY{L&*8iSDn^nXp(3!P3akys zsm34B1xJhHB8|Hhx2;MYz7O?yE9E6#H`=jrz6bj{M8hNNW#Jk*j4oJdCLjc|apvpM zJ>{X53lkfF1#(8;D5Ml)rV4r0hF9Kv=tKO1FM$~0Kx?@Ics*xw=$s~ zQ3Z90ujT65{cK{tSx@Cq!^pB_A7pc^$&e=B^|Q_A;=g`h+n=;(ob7!B#qK|SjyAw7 zN;6JslJkm@hrBdq*duNn*oLknOGzf8lP6X z9jPMKKQ9Ts22^k!zM=2m`gAz%!|2rvIUtSYJv~9$mUvPLRkWfQh^456hK@@bykOdZ!T^ z518d1VrRn8n~vaX0L1S(0Fp|^-4R&BSq>Vf9&q-WA(Y!VMnTY*T8oYBZ%8AgFJ&zgaC4s`<9D<%$bWkQ+JU$u(}?^b5*-Q&S+z zP(v@sK(FlrNp%S}n&|UmUy}c}B0#ruC!HDD70ndLx*XQ?vR$n` zL`Ay}*bA!J{-}_kq^Gssm@PIBaMunN-$m+POceNJU0xPt6Q<7|`Tl0%&$bJZ$AnWc z3lNh&cR=Sb;&Z0PRx&8nT3Ah)`ZM8tCzIMlSC(O(deFg?3~2UPaKb<5X*h?m754y!gpLj ztbukQ#%@}qU($VpV_E~5IsG-7Kh3Fa85zY2c;QHQWbe2Qj-hf@k(!;K0%_S5_-=#aN7HT<4&5HoApdshsp3?M*lk0uE4}~C&y&E?3Xnf?wDCnqN`l-s-hBeoNt98*hDb*n5SSgymvyHiOSjlV^hBP-HW?!{Ed%6=(qb9 z%idkAJFj&^xv1bu@~8M007I7zVgvJeiO+1jlfeaJIksm9-Z)9hwFD$Y^-4=%hGyM5 zZ!G)$WJPLm1kdeewg32{{eISAMX>Y)kwrT3yI0;OTQ#A-w=h$ zVpnLFnG|TJJc(U(nx438saWBV^^@_)dkQywJeBteK7t{UiQ&e?(zI0 z^odr|W9d5TDl(W~N|@024T#C~VjRz)f@1cut2yKZtx2ctQQ6BdLTLqRo9ojkLw{}` z+prO`e}@=g?? z@)Bt=G-$nT1i?)(Oef*pv3gM8O{}MMakBVRsI*>D(3+2x-LUH3b=d|7*wI>^<%$E_ zK$+DtJ>JT@t>o})n1p68ju2RVQ}cc^roC?J=Je4W zbur{a`m-8(0{DdaY-?&wju6@040+W9l;P@$F6eEj`jX^Wqy39=T=@}%O#Q?t!RwO@ zZ)fISVO%%330nX{OpKmlDWf+_e7M zkQmC9A%C!8*o@_dT_dnEpUcJ-XoR#*l4Vo(>n||AC+wq}3zqbNkQHgh1N6?luyl$o zFn|lt^4XUg5}wyT6%s2>{%Q7?dRA2m?E09^88uj@h`4dz5Qr18?&Ac*sg@ zoFb!%aRGvCqNFdq0vzivc(6m-Qhs8{nO?>yY&ny+HV#69Fh=+KVG&5)7ko%AbB^7I z7{L?;Hca*+V{I_&H`CXjn4C-WP}xgJBw_R;vgk!OZH~8ybydR(OWq+Y zUH;AF+9tB~IXmGH*&G^nb8iqvi_-E}NXu;Ihc5#MLJeAI0ZY~JRgXw#vNC+kI&j@N zhmA9myJD0cv{rFJ=N-3Sj8Mc$R-6>vX$P)IhDkGM%P{|YC*=-R%Mqor1Edl4ng;U! zX2QB;n87dg<{-~>4tvt_?Lo(j#<5L>JAimk9GxIzxRQ)o?I!6q1q+?$F5i&t2F@|@ z`(tc|r;l?3CMZ#z9gTOAyJ~@=7y-A@#&=y@o=>^+;Vo901Igk`@3ux(5a}KO2|k#d z?BCedAPnUtPi?226cr-q^;zq#A`it_gnonbQ}gnj6p?|>9cAkAXZ$((GDaaL^n}vl zlA`c?wkK49Ao4*`TA&f;TRoLS6)2Zeh;9ER6d`}Xj}!Q0-@^PfUMF9_v3 zgKsof()Wqi@hMjac$1)2%FL7X*91RJ?k-1caS%>zNAPU1u}J9iKfwsJm?nE)br0<< zcuFqU-0WdrVxTI0WrsjrEfuND)8~ezWls4Ft-7B)evTji(XwxFCV+lyCQ^f!0$r^- zPzN1{I+He9bCSdIWai_q{ixAGt{{E7nBly+Q)F~ zg-a}eZu-GOQ27T*;-l1o>^xbrM(&k0W)J%*01d*P&d2s?j~1^iLHF}JkK`f)3U3@>-~7?%LSSig#(5IvS9H(j8LJ7%SYH5ZM&=h6QN^c?@znTUt*BjguxY!Z(Hzjtn& zHoIzB+S_?r<#Li>;I>8H1oLH=-%J8nyAL3i@HbVTj#b z=z@0Y512z)k%LL>hP%6K^K87j#5xe{zJn=jQ?MB-d3m?k2WYU8Ah%}9lRl*1-M-$l zwY)D)g+%!_fiDij^>Zd2E9nE%7E^KyQN6QFi#%_bo*nA1S()Kj-N~Vu4jLK`-5d3? z7$VEo@%aW>6x>Upf8Lj_AXLJ}Rw%ZE;LBR9AC#X(8+%y_xQbkpB(tUo<(%C`XDwFG zCF~eJ9(Y3+fR^8g8ktv2}SYYz&~gLQn>$+2_O2a=d5#p#2$oaPtSp?{6gAEAN@D$V)<;emshN@ zmRQ_J&UV1Mc-~j6INZl@3IbT{)|7~4^_Y{iCj*q+#F}h+{^g}ps~VjZAI`rn%yH_~ zuivmpA~>@zGJwRdEIjnI>4zF=2`6LG-U;LC6_2;?gk0b_4aBi4$oJ z5-{V+ahIe-ZvM%2yVLaqInbS%aQ!?}L{;JjYD2VqClV`_`57{0#Zw7|CQfWXZ0*v* zW>l{KcL?`?{@&g$a)s~As=W0@|@;q0bNQTMgZs)BZ4KfR#9?B8tXRn=U3kvtNDf$I~JT5AVO zSfwRUdn`JA`Wh$F-yUW0%LJyOx1WWbll7wc5|ccG>-Jw(U+gH&xG7|HZ=_w1QP2u2 zuU@}K<`se6Y!Z8#khV78yY!_XkjY%xiBW_=06tlp(jU9uA48T^-&5=JAQF1tUqOEuOAr^{7zS z+vt`-9Kx7GwF)(p+k(fYfetNPL_p*fs$)$eLz0N;H67{r0z3J zN0|f!GIFN8y-vI*t^HB+<-w zX<7FWm?}sFbtK&7_OzT)<-TOPP!!79Vk2j>5C(h42be)Ov|uqf(j&-tf*}wV>kPmj z^arOa=qBeN|GYCw)p^Z5=$8!b5!M*lbgOGhaif#TvxuJ~x$u9rU-qAy066fsG#yY% zGEVxljMG%9rmg_yTe+I>;~R3PPb37p*beIpMCc*w&A~?;AiPLhN9PbBp)$ujLwv~q zuG%gmxp*SZXhY7wcb(EKF61w4eFtR6XdAE}U~~YL|K#KiqNgg2{>%Y*I5wny>&ClQXAJLNAV9GN(lceC z=V(AjFUXtBvV4z#Vkz#oCqJl@olm&+*-gm%MOoBmGmm@x{QRw}5ll#VBnhCI>R%!k z@+$yMWu^3Qrt=tPY9IN4al-vT*=)%G&97{kLw)7sa@AKh+Fa31tnTRpjj^WkadQOa z7GT~&8u8Lnk@bd1yHm_W=-Sz{!O9?ygnf~>lI#hTkfb+yFy$bv(uiP|r>h@?-}OZw zKXQ(Vk;33y9*?XcrPuq+7buy-mKCLv-CV@2-^4t25~%v}%;pmi7T=AbaW61*t+S9L zxG+fU4qcGO2Ecd`(gCA+O$6|%T*52}FpdMf-?bZq*p-cDP=^l_#hEG&@-M->Llz?& zvE!|E?XzaE_E#^WG&*F>`TOwp$JS&7xX(8|PNZ)E9ld03g6CVf5}CUZg?&W65?Gq* z-18uE{>BA^R_ELF?H;`><1J99I1DsC&|%%p zc;9(?*M}BD|BQ6PPz{kJZAhpETRC=EwH5DM4pxhzFzo6pI0$df}+9OdP$NZPi5t37l(b)l0`az)qsoRV`e>epNK&& z8py7rN$Sq;V5Cqu-v~o?x%9x9=H^XtylUAtP(1&9GPCUSYa2GDJqJgi`#Vzs+vNGf zy_vah&`-3yg;-;sZ|upU1CW%fUcLZp^viH-4)}!o07pmbN3IO00`Xr#*sr*Mf`bEw zeu`(-OH++uMG%_Ac{yD7Fj&z11L>(v58AC%phT!~SGQob;M_LgfrYl|Kekm}O)pJ--P9E;&lkh1lOT|>eb?`Ny7Vx>DYGDE&%Wy*8IMmvcbLsNg z8=q$Nb__0Izo4ihWld-3kq}2{D6VLu>40pL{+O5M+AH13SN<#B^wX=`GnY$K9NE_@ zSigQ{2i$r~WDcNDM;^iBR?}zPC^jqY9BwVGh zmK#|wvx5pk2=NWjZaLytjwSKk^gb7uRvZqg5k9=?TX41Z$RD+vog!w<^xzG3gpx{j zs!~Y^o+}^Hfz2CXWDk>!O!qli%3U)HLct~bZoui)1x~LZ`8N~YqYp@cTX4D~1OjeW zfGCVHCPV3wp1A#2135eU*bB&iKQK#6ImjTAHhoHCZd9R{D}fETm#=Y>^@UPNfb6rZap)i%{6cG`C*9W4ED6x+?pj> zK2Ev?oMHj`@@@-=WyfjXF^SHFudf{VW+jYh zCMxf_T2tTF&s@L2hG?V70;UrnUT<0m%IT#}f`U%Ua0I4I-v!TR#RM#Rvt~z&lgo(c z)dM2-?eqmkH(3H=KOhCjP;~>LW9v`dP?XEe8|vQYRc#tUDkKpU&6qR+3vNo`!1@Zb zl|Q>3`LySeYbt(lAM~T1X=G)rmBZ4_M2q)*^t`WZ(GDZO5*0uv=_u#3 z)qcv=rG6ZXCV&`h2D&DVhlCsSEgAaCptxJG)rB5=SL^21M(Oy;)V+ksQ>{y3G^Buk zPVkQW?Zwf5)NA5}J{gpdNjjNq__QLjr=v$c(R&`4 zaIMBCvA){GqR$kqS;s5e++34UonWXdas6)Y=7UvUPM+sQ+;u=0^%5)?CsZp3ILm6m zfc2sA2BW*b1pD@7#;JTqBg<1=F4%5~@au@KZ-`~(0Hv92W@oVQ>918~g|Zo%?Rz}6 zk~Ez^v2!ef)I!7|8e6=%I9@yWrdG~!S9q4|k4fnDJk8>bb|8mi^@^7@=gpRH!M#d;_)Y<++6)1UEu%n zpXp0)0B%)*yBU0;b?1#4r1+l!OWPzZVs z(&iZWVxt?7EHYKtNbriS8OmnL`5}g?Rytm{=mWXea|!{59-$PqJGu+placuM56I=X z5Kw;<`^Al|ia*L)c1Q)SDZNF*Le^<$g$}O~vGIOWOo=w&|1$*O|1kZv6NTz zA4v}mq~C(COr80;XH__8bNjl7QKt1UxMN}c4MbXeHz6EuZ;kh5c_Xee*D%jl-zI#p zhQc%aSY09N9d0H@9q{d1$81QFLRpZ@ZCOtZp5i5`XbV{O)X9IJV)$QuMFW55D_R8h z?N%-KUYYtpm8ncz4eJr)}tOlqjo9yz_7_-KLI>2VegxC+M%` zER~6(xuzaEqPSOz_mp**tVCCnSazi5WfIf*$M7rU9LlHYtu-NDs`Kqgc3nR; zpp|04p~}GN(G>VZ+8NR`IGMHS^Yi}VwSC**L0ahyV-qfP&Ps9rJxFx-!Z{LM`r%+n zjXF-VZ|z%kO{I(AlctQR(QPJn$8F_=AYn~kxB#pLN^bq(_15ZvQ@NK!?-Revu}I(btb>XRvVNaox0{t+y5V4!rD50{1L^5Df!97V zS1KY{vIGJEp)|HYP)F^E=8*&upB&Eif6FAu#Hy+b)Z8YB+<&z8{`JpzslaB< zB1w{~AWgI&Ij}Eb!Q?$3OpvtG>JHb}w+-?)#{tXDQTi+w_mKJ73`FC|7n(WMF^y(P zwIx71v-7~<- zeE_aYzY-}5?0FE-vzbqLL=+dCeZWEXZrbMEU(n*#h<+tpPf*E|1=sl94HL~}|M)*I zvPA)x#D9J4RZ)WI>2P_H(*4b|FVYFVgI9Z6Jx_2PwkLPAaHtNo0jQH(N`Ix{i`o2Y z)KvUj3E4e8xUn@6YD0gsB~uH5BS|B5ICj798%~rOpIiwEX3K9X%I!BgnliJNp(lGD z&}7MF5!G5`j;68{39=K>Vp%>f-ntp<0!d4+JRp<#_PX zfh)~TjiS4S(pRUMmwq#;2>SKa?V&)wjz*;0gU|On$w;2)S2_i5xDqsd?CE8f7Qh&n z{`GJgX*akSdcrQ=LVz?CYvS=a^Zw6?iH0g&8H=|ZgE8&ODUV+ou6VPnL7BDU72DVU zWS(zALj;kby)G90h&53f=W12$rXQ@`4!BT#GwVPhSQ4<{_ybLUak79ay_RuUWK1^s z@cbcysFhx!YSWFW2RRXa9l@(37dYddJ8zUAfEffhL1p0sHTczE2nny7k4zlM2*%`3 zdx`ad(6x+Kvicr>-DJ7t=18jFmmP-y7W9yL&Er{I&eq9S2~K8$_Bo)zBDgYecgq4Lb|ajRcRylQ2p@X zLv$8kaOJaOw^?f>|5v3pe{OB$@4>&V<9}qn6ZrJJv?9VLNMH0%ROwG3`tq3YyHi@g zKUP7PKxkZB1kXm~`LY(=XeHr!`Hl@L$w-l3c?CicA>cJn7hl$Xg;YheG2rY*-R?J|1m`EgubI z+HwvskROLNa4_2^CRfKN#3!El%=W7*=(bh7lF?9K-&?T`RUoIyOO`5da0<>&9l}df zMa_XO7TgeU>@+D#2dAtwy{wE|kMneTZr~yxdw=tOLDW_C-lX}q7RP<1w6}*3@hJzu z+OTV&gQGNe(#D!30nH4*LG{abNZ}cjb@vlv3_Pw(cpN!Se;*}2*(z{9=jD|fZC#D8 zT0{oQqbc9Wq}oDDe})zWhta3&IIGC-9VK?BYN z5jElYh-~|=76h1b&t3MY6f0q%Gt8&PbJQ;0WR}snc{pGLKIw^BTn7xmd$Yi=6*1b8 zT*{i{q`TBcQb7HFSr`N_GSHo$Clq0+vK?h^bm*Mc&586luDYdj(bwVWuwPR=hxUNQ z@Yv!iw9AF}z-t^X8_yWfh^R6URZ-(p$c)EUU$XzSpDSGz$kcdCb1Bl^@ z2az6Nf@{H4M<7fk<}Nh?p9gKI1V(FBS458J7bUiM`sYe;+88a|w+f7kz0P;#L60hC zaQ|#!1KR(&FP z5Yp7E_yIY^UGZfF z+Yc_r=GqS0B;V#$AUS)FVcY2fIfi$82rr5IUJ6K=6E_cXkzyVrhH{3SfT@rKWoC_h zFq)Hot-V{Qk&SO{EQFTb?PRxesd z%bouWg+O)MFpGjhN|L>#eYjTUIEuzmkM8~SMaaIfJc*t#;?>FEnCzpUF_|)jnW}l) z+%z-oNhLlsdoO=nkgm*816|XRuqlsZ)p= z)Ublv2h!MkZ@sy!Vd?FSy*(i`P@Hz5js6-9@r<}fXjPoZK7RQN6RVpxriEi1OG*5b zOGyNbV8F;xweDq{l4}0S_1mY5A9{43>#7)Y6mn5cr(4p;p9^XR`Yb0_0X1g#XgfUv zYX<`{#$a3=t8oVs9Y8sF+uYYQBBU%ubo98J7=1#tm#y*h#EG|AhoJ*N0tCg21X>*- zZBEpsoCxlUEA9LNYx`;jn}8hX{B zmAxG4|DvXxIcuf#ayw_s@e?<-jXx(XiS=@*X0D0Bp`=ZzJpES>UIK^e4ut>wIlFx>cf zC(Bwt`<*Zf$$JK+!}9qR(8?vYVy9#(mv)o+^fxj`>AKV#Gxeku{MmQ3<3-ks=TstM z1BQ*BsYRix6fPemZg+_0m*TXSxJCxGVG69-Ho-gogP;`7 zUXFj>Vwl=7!_kc7@^Z8wAu2raSZ-s@Oa}ug>%`;g{152Igg-W32tWM*vk*QY+A^zv zu|H+CtvxO~MnU#N$_oLr*;V3~qUD_t38Jm&bpHp2U2RU!^kXx6KM5R=X16cFy*O}X zdHPbvtB*m(0c`KSJ%70{S5NoO@4yPy9zpaWE>JZ+64f)XdZeb_&ysElZwuxh9G?-I zTYU@ z9|0nUiv*d(@wgAj5{a0-X%O{Ai^86!>{Y%OJDWV)fNQQ|n>h%hA=R;BmD#E3Qya3m zWi}tgF1UkETkb_tG2~7Fr?WYYcl{i6Z#8SR74Vzfwogj_u(pe`jMSKKjZ{VwP$525 z4vec+x60eXuIyO`LP;&j!CA8@vx&2>q(rJxv~se}*=JbwtZUu6F&hoN49qhd0QNA* z3lKM4+fEjUFY->54+>?wA|AS)(9iNJ47rfb=5urbl=`Ip*=Dv>iM`_#LlYE7ye09< z+4xENc_qmjQ$`dK#$L6Ps9r>wV^quAlaz0X{cAz#>h4BspB@g;yFi+@AVAw+rC;~ zMc7(TMnm!DY6$bDFP5g~ro<3y;3*hp4`I73c&onn^)>yhji(!ZKFAb?Kd=_Ip#K2w ze+sT5#1RvT7O4kPj3EXS9Yd3Tg&R{GT(Zq;Jx$QGLUnwGswRE7>EXUdkn(OublaPJ zPtLdZ@R^Q!UFMG$fSMasVH-EfwS82R<#PB_15V)KT z|J?E!T=m;^$eUXnlBiw&8~||q(T%p`patRlRa7m4?Q`C$@!+^(u`HvFu|90zPMf@)7a?X!xQ|&Tf<{cFH1zXBe@U~K)xIPQEsWnsZaBR8czVH<>Wr8pQ9?U5flM4?pn!y zGqNWwPxk@!eS%BrH+aJf6heKcVB;|iBh0`&PVYTE8e*MiOAOow-y)}JeW#uqhF0H< zagYlB^i}2lvvB3~DZ}}8=ycx6yM44<>0?x6BgMsj-|4;MshVmrE(M~+;UA!d8k3(t z3;J`Nw!3dmD`{yN4U1Za-qjGayPh3Ze&Q<|lLGH6tXbQpn9(`-&ta%1j&X<<+WN)s zL$3?CzpUyp{Q4rcyB4!v2z-w@@m zBS)P7R}S01=OF$oC-EiybxhXXKKfJG>5DF41G@2?GAbAdvQK6?B|Zq z%=U#@&U!w~vWP*yOdj%ad0$DVg7d1>uT~lEw052lpCE%HoI98f+&Bm0IFPgjvM1Zt z722RaU+H^+{OpS^>{yrwNQ+l)i>-l}X$4r*IA83~y|{FdO!+ZH4OaRd34D!ITgzLm zZ8VuyGIyXgfL&OYpGjEDN_Ql1u+xk;x0CPVxph@b`>h9d(-}qu>o~WpJyzb`Z`_S; zY@Txc5(N)aqkaV$81NtAtUvNbmNw^L@*+JQ)|=r#DIA?1uZXl0QI9m5^2rfqQWbh{ zSOGszPiI_!gA$dL0K8QqBRLkMU14R-l=4WrJ+$_-uD6)uV6OrfFL$252ePwoAB;{& zZEu{uH27_Zt==3dufJT$KxD3~@e*LiYaujcYW>TsuNHnW2KrM{$*ymint?qv8>DME&~~UCd+s^edvZKLCZ^g5q5uPtlOlPshLw^Qjrg z=e=WYT|;n}ytbDm;wx9%izXr7PDkG2ZBE80H~AlacN64-I=3^VuyyO<(=Ss|p$Aro zp_E(H0wOS|dSmQSZ3f1nP&TC`YrOQ#6~V_&Jn8T614@{&hVZYqr(jNqc3dGE=r7n{ z4&?`bTOEXJW{H6@*|Ck29qA#&-fKsme+=Duu@{;P8Qq}VC1tLb%wQz;d@>_sLk5?} zzglE&_@zvii5~`~M%8*S&*>is6{OyMeDE-dpu!0%C+<_=|>$MRmL zdC$4^Sx0EypfO=uf#`R3v?pQ8kSNlsVtmq!nqUyg>(mOyFya3 zp@MN2Vl3!tuEf_e1u_}x)!u2lkG2cLiQI3S?wt+zf!9@%r8`t*rY|EEaNOXtUY&U9 zP#Y39d^MtYB|c2QQ`JT2_2od4{59G!sh(Z zSnvx_12Jm-IO{HkpwS3AN;4!)*7A`v!lov`CfAOharD07e_49S2yy>)-8(geD9x9i zNV6qDbR9nrl|WBvQ%eWaVX?N~+Xhm`Y5Q_pI z_QQrAUnB}rDa9BDGy}!+?katasbG;Xg5@J= zqIv%F&Cj@#rECus-;$@0~d-y9GwZjl19CO2aA46(T?RMSfyR z!c9H2^vIvP)oyE!o!gnka@F4QlE)`kX=FnwH9845MjKS)ihg13My9Yp=xisJ2jWmg zf1m}>-s%OVw+*nd2zo31=Dh}HIx(K22NRr}oU}00boUhRRqisL^^jO?jn=$<8LbDi zk0pgK*b|D)D`^J^V6?&>PVjE>(#($EKD>%yk(mkCq}CHTLd%}7aMRJ+>RobAqAx=4 zPEBFQo$C%Yzg83q$fYJd4Y*ID$CmAmNR}#C>h`oQDjph)sS`{QpQza~N5OG4)k zAfe!^xjy+nu9HVmDZ|Wz-pLs*OS`P;uN~Kh^wo~N75f1Mq%vgy!EW8R9H<>B{nhyZ zj9o;HZC0mq1Z1xMBp1?PIRHYb9_arc;Z2$bwU}eo82HbQ^dTfoe>0_0IS8fOgCToX z5$on$+hufDbVn7q>#5;dGhF8Y0SDqY0_P4CS3DAJu*n3DlM6-za^Y1qg=j_2NNCb= z_mfi;p`nq0AL&Y3w&@CPC$yjLrIR)EH^IY4IHTHRN=O?tZXz+0CWRXt(O?XeZvly; zf_CFU!~|r~j|1gaW%SMS71l#Tbn$huhCKP>3bnzHPh@}NAr=A0RV4dAI=H_E-rYkx z5#o*eHa#Wz=FOItFM>2PKR0NcW6$_R67NNb(`-o6_1$bM;OjNVCEn5#dn9DLJK6ib zYY64OPOq3XB>ZM-l^z!dCesS++?2h|p8U1$Ag_4T=u^UZA-&emS0N$c>>h#by)mC( zY?GEJI?MLZ?hS&hVcpL&N&PD#BONNZ87%V@0K+*Zg!Rb1)RyxiV))FZ`Jfl9&Fs(?MnqnM=Q;aTBm12;pK9`rUSWiIc%jf#CIg zZKWda%`|C)7Z>l$M!~8eg*bryc7Atyf?CzgFsLHFcyfPq%IVpIIi~=2SV8PtU(qMp z8|co$Y6E-I&omX7+AJ2MM=?7Hb&cez7rSnlG%ikv*%0aT#@??KVmY|Ns4oDBRBLJs zxxY=*eY4vpwc}>NuCX)^$+SH9D?|B98slen;Gw6_*qP2}*ms8<0-TB25!KqoAyl z@v7#RGYEBc$B?d9ro%(rb@c#=&%^?GWVF+Dr(HRGOlTl zcIJApETRX*#<)GoOIAaTxqA(~JeU0AxVw$<|8cakB-m-p?tv zWk}dHLs8-gt8gk48CL%6)|VWWj5dZ;ucov%V9d#1z<-)dm&fnd_Cq+_b72_gWVH3J zb$yBw3DqQdQMQ-W;p4JLL`2K2t(Mm0!i$ZDHJS^I155pXn~PQsN0|4gbq;OyZhv+ACFRhf}R7{7h=WXLh$waeBo z1nhbU>4%^VWQ`Nbwd~clUKY6NsxeaxFo*2{9xNMa4YAY}EGpTizAW=x+8XHEKTDaQ z7s}EFOi+~P^rZKR*rq(c=H#K~gr0q5B4n^hskCMzud&Qbu&r(A>>qP#Knt~ydyf!c2anx9(#ik_| ziE+C)=y=%CKGV}6R*y9r*W8GpfS>I@?BVU;xai$8#4NelEiq(4XXL~I!o5=YRbN|% zvE~s@ASmP%VuNN(&jIt(ZgATVmw`whSE6S9qPy!KjCv%#(!V|9UlHuS0s9C$qio|& zHffy7=WAdyXr!kfw#0?K+HqZ}ssOtEEnXxPdYYjZAr}^PZAa zZ|cGBA-V)f;l$4lY7pk;*wcsWke$s5jI6g*)_Tn?e8QSD5qH=QghYhf3Vfx>(zH(H>skG*() zwx4_%_1d91Zkxg_@$1W5RWY}cx2rBFGUh#AND&iFxDwd)qcow?4vG zqlBc6%B-K6dcAhEdt$cwBb#$1z9J5_Yqqf}e<<<5wU#JHX8*1zr6AwQ3P=?o7Sq7k zzq$9MygAtlH&Ki1MFvQpHeKb-7O6mpunN64IOM2-lC*$dakhUHrKk58KZH^eN3JOO@Nc^saqHfMezUzu;K& zSqUOdzzPEw|21JjF3mjuUN9v6U_hHly%Y}bK?E>oU2xO8{UhIAN%$pLD8o4WP1>AD zp-HLd5UIBnJE74i#B=}+0t6qH`p+6ckZ= zjSkq%&u%H-#(%yU!Q;e;b4`XIFKz28wtR8DcNT+64RQ>Po}M~qZ1e0#*vVVBfNyFx z+ULZcL~Drix+Mb`g$(h;gO0~Kg>?Hph3tv!OD8QlQ!&lcSI9jTE(GYAWv*qPxW3Nr zYwfKzPZade$K7H6$qRICnUF7_A-H1ppO;8$*ijjA&y$`xX4ejsRXQ^7Nt7>!`9?IW zcKdlA06m3zl^qn90FC;_Gnz=|z#!5g#MqBV%;x3_>+7s1IHuGZFyA#r zCV-F`{nWcWU$X`bv{Gpas4j6rjuvNVQtVugvq3|cXrA^z@;@;Xog675RL3mDP@?pj zeFkoNvG&qHx{Ign3mwUltD)!{#U&h^Y4-W zZ{Y1Oe@nh+vjr!qHB!cyrbM9M#2F5}V(n#VU^SyyrsxO1;1m9FsZxwKPX`7kzlyj; zvH2xTI{TJa78^D#9DX(V`G4;)nFo@Y*E7`iG_L2>3l?SyJZBKALq7bF&5v_YC=!}6Y*yAPd zsZHni3>L2#JUB}k&HZNL1L$mLe7ty*O*9kTrE25MI`P7`uDT{YmF|zUjDX@ZCb7lH zwbQ+MG&Axw-we~1Ewt9z+frSvO8K)#LO~4UfH@5L3|)}wPr}xLc(w!5yms%G4XxFI zO&^rUU-J2sVWw<5Wn^39H?P&o9VwO!Z&%Dd(KmwMD(&8@f%p3NP|t8RJWY+4)$E=& zfy1V1m%JsFQ46efI4-y|X+oDVNV34lC0)DF7UFqN^!&Hb!btE}VyXDH7T$KP7r$n7 zF9WVuz-X;hfR&l#XgahG5&wiFo+2)a3;!8-(dGj-YTe|?u}y|zi)blgk?OG!FFYeX94(nj#2tU9Q$&qvbjjC^oXYxJJlC?Xa`>V`@%@ zUvfFDZ1iyZPV~_$!C9i+tM6)W$;~M13v{yY5qBX~0K^3UX1YQaIBxN)31cf=3qeBB z(}?gsv$FTq&Un!IrE~)lBF{N{{EN-k*In-^Zw}i0J~6MEKZdJ3bbbHjMQ-0duUNsC zr(f66Pk|nxs^$NZhJcmGu{H*n(+w$;z;Hn|B+}9PO3e&%}4AbVYaJ&>4TM-W-3idJp`udaDA}u6@^k zHavB@W#sx)fVrJKI`6tx?`Ho1`n*gh4i!EWpT&ftOXVASnY~MeK(W-Er;j4yU$Ox9 zD9p_R_gFW0;jVGqgX<_?aIRP%8 zk{)VBtKb&PXs8c8UK`BstF5KHJy6RO8ZK(zl6;Sn2?)b_dkPMmDeDD78l$Tx+Dn4% zZ4N>M2A}_AtRb9e`t+vp5j7GnzCkA&HeH&T1G^5%So{|Wwg;|l1;jW!a06=Iti%4GNCN_taEg*M+KUzwB2Cb8$IkP2 zToZifW4H{d$Bv!Ev(|42>%MC}K*BzpqNo25YilD`->`meq;ia<_3nJ_cT505`iyCf zM5!EOi+*;G-a7eu;k~^{zKuBWw?lFOL38z&APzW=mujM1b;Jxeeqna6;0K*>X!t>E z*7GN~U^TIZfL5Bz=5_b;Wx|9zXnX}A*S!6PAz8{0Vz}+842?t1*8hjN_l|1nefI@H zP>N!dBArMPl&Ta#DX}3SB29XUihzJL=@1eT0jZ$~C*6gxR2#FtXA)>c5gb9OrZz(3? z)>D&h8r`p`sKJXoyb=YG;ho*?>+mQHx5i0?9w1tPdJd{+Xa|`uJu5#RV}w2@^ah?pDS<-m`tKN9P=1FxGjZgGZ#f!-NWMluIQ{janr(~d$=beTU684G-)ECuxj1F>#CEcC5Jeb-os%J62gVD?2SfA)qUX|L61fUjrT(4}1R+gzHxi`lzt4 z{3c1f@z3Ga5lH~06SsNTnX#=qW8DVz)MS{;tPrwOLo5S-hbU0!m(_V?J}36f=qAge z!bD;bYBlFUXd1N;+X(|Cq0NGhxsoj2i739t}yr%KgZHPt2-u;Ce(%Nq? z&Yx@YO`_{yknY}Ez<7+cQK8`Ckq^x3GFBc?Aj0m0Uf~tZaE^U})9`Z^?H+baeo4yH zY3#wzk}V+B_^)_@FGD4&i(-Q>z-n3Zzua~*tEnmdS(b84dtP1pdOM}}Yj0M+dc=r*OKbOx1dP(~F4rK&f|PLnInH@my@>q^H3N{YPE zA)UY(fnTr>kdV##mhR4p2uv<0j1yF}a)Kcizi}%Fx7dw{UaDH|e?scA0s${ZZba`ui#vxR3xuc1uT_-$4RyTSoZ+pQ zYK6FofKKf=h3_i+h262QPs%1i{|TvR&vcb2_NeQ?H+9$&?d+;pp4+BWwBto=KzD z>?BzF;;BFCuh2&7S&1n)qRz_;f9?#+Y`M}J-A4Z`ceD6T@y<$!hPDQi7h!Y&_rmq~ zYNi&svrhGw2M0|uT$OZH++ABbSPS0V7#UVM({Np$UZc#o0ffIH`4RGERmFhoyt+h_ zFTO}DQ%1aLrcvC7hfQSi%c&|hi$Ulwo}&K3xuBKJi7Pdiaf79QmRGtxPO0WEh_Xl)ovgih zGK5YAQmRFk`#%N+9$>o}J_m`qX+}~;d#eUNn_~Fc_1{Yt9o|Dzu9*SL0qjjL<2>=S z@j~9@z;i#C9_1M8dmm^mrMBY0$d>EuI8x8%a)Kml?o=Jk_#Rcl)j-cbrkx)$(uw^I z=kD5*(atq%% z*+F}%2s`Ie&pT{4LQ(yu|JAnj(4joxi|1CAwu*ZDUAPlPg<4hX`uWHb)gr}1(R@*P zYDK2}&{_QvQ>wW7^?db-`dk3a)fm?a4i&4;a8@Np!oj3{Sn|9}w0zLXq>tG`t34#H zqbkt(+7G}H;&ec&yHJMoxjFbGy42$5CSthbAqi4e_v7G62R<&}{_Ns8xxxMsLm>qVvZ z91G;Np3LzqrFs6Plc1qQ59Ol(tp;d?W5@wowy*v8`SHod2IX@O8^h%I&ku07um{-U zTDe7cW?-V5e7DK6wW#nEy`8&c)kBgG(<_Vv9ll|O_qr78mTkIium%-9C_hDJiZN>* z)+7BnRDT2xYOq%izy`^`be}o#zvKVkoU1c3qjP6m_(G3N%g=SH^-;$bK(#PBHaIo~ z)N9~FHJv0&A|BvP^<^#g^A&;L9)24S%b#E?_e!>6h96_3t!ucLr*r<%J!SkfB&TNR zLkwLEFGw${)%(#(=k;5xry`X>eb-G?Voi#qQO^v<^%sB~?HmZ7<8L~YZ(pABk6Snd zd+gZsYvH0mGp9rSVZqg_%ekDOrd8U4eP9fj0Bu}lmE@0(4$>A{*)R#`332p$^8K@q zSSB4d4G~1PM(`1CCCGwRdp(qgvjTur4MCvW1AE+6?U_&0v*V5_?k zYFJfWE?JXtXOMP*lvw?UjT{T~i(^0QUNeA7r$C0}-r*`x5sLFN=w|gx2TpCbYU|k8 zD%GP}V1bL{F%I>ofly-h0=(tu1AwziTNwLPnB&-0X6@ag6DH>C&>e&qo<}kJ=e1RT zc~O6E(5)j=Vm^B!ziyK z3{v%^d;IC>JH_j}0bP&?^&CJiZQP_*5|C(dNX9o|vhI}~!Eep0v}&HlO+91{MTVMD ze;xsi6(hKXvOBY0#UH}fxnsv@2eg%-cTL!`ARlCKCQxBy77jmaLKz&Pq;#*M?(Uz7 zXkzs&d)S2NJF@$7R5#l-lh-}V412C0ieW~?-Wk%PP2oivY5l`@dPORC&iL;xG_INW zExu{1&jPj9lc1pB^J5=Rj#sIE)ECIQ*WDg~C9<3CBo9gVxDtdcwfKhpb7(ySBAU~f zq7|bt5U?#1YBJ=8e_4Kbt)N)t(3H`0Sm^Wajl%C&9$fh$zdrn4ESK8>)I7ilf$re( zJ$MaKfX!dnF7>9&CPtW4Ugf6ss|#!ieGSb;8vB~gJN9A{J=%~i#; z0h`Tg@|ivI%uDQ2$1PY@08PNmueR24uEEZgf@i~+W49Cc%l?#&fd`d|3flz={Yz%e zjslSTWvk(c#H}G3(?J%w@09;eY~yZ!556pqGV6*UrT*?Cx9=%~acm(KjpTMlP1G}u ziVxYmlfR$m%|Sn*oqqW*-MfxW(D!)fEv_OjOlkgX64A7ed;*w9PP*|~NoW*c6cZe2 zr|P=tt>^Opi4mrr* zZ+mto;mRyyCCGA$uQ#mEdG0HM)6}>?n>_}@PSRI|<>2p+*P1f!G+LTNYUA0T>wo9& zq@9zRY$USrr9D1`&w+4uA&x~tMJD&*&)q?|A3T`~890{uNWjKWt}Mo0^FcE^k51I3 z+sRvLV@omf9yd%uc;IW%!D{EjMX`~AqhL#;x1Yod@LJ#o;_Ue^hvX`lkiXnrAa<_K zs{wf_`uE@g`9?tzcYSAkzP&`YtlyQ*TcWu>?~NZ#WGSYVzyxq}=^Bc75HxlVh#>PL zZl$_qf40>R`s&tUwmYWvs(2N6t}p4|Kx5zf11~NEdcl?igwR$(kXrO>#!kW-enPJg zZu1M;7%#KeN+Rl(yrQGs<){0eMBPlI1`sfj`*41LI~`&~gao&xZG{WlRzWr}pTUsT zZWr3I>0dPUY^BTuzo)iYSLtPda&*@!?gyWjra-Xq^?#+a(kT|lGhtyaauI}&gW*9e z^R)rP8a7GeGIAWwj1*#dS^A@7A;UtRkuT^}m^rFXH`9)xc)Tvc2iv<7g(+)qosDdA zRd>hD3k@uHkcBcW<-3A(uOr$!=3blVAG)-+IeK4S5327V)1ZV|`SuAnq2Gebc~cdr zH+wKc4QV7u1z@3Z28?p2MUZ}%UC}J&vva1Xi#|9_>cMCfQnd zXP9xLx(WTS&+W@ftxBL<7acdh<*nh&sG=k_ZkFdCPs$Z8Iuz9pZ{ZH@X3KYaj$qgm z^|JsMnT28wl#znnTZ%DEfZM~Xm9;U{@7`*(g|V%d=Le>~&n^3TV@F_bpvCCMzBjXu zBX++)6F78>$2_jI?4;SX${j|v9CLL@&*IlTxRu%HvcRe2yciT|g9<+iUR}1f-+%oc zd-qZp4tEJlt$~ih0hamMQ{c@D!<;A`aG49feENvv<#FuqOLZ8$_XmEk;pcfG58;@0 z$afh#aQW%{16IzF#5MHFCejYhZE8YP@JgSq@CA#`m!AI?C;vx+kK_lXyO1!-3*g$q zPQ(Q`YpvZkz?RLZ-5_yY+Tj{Y=8b2}R)P=0F9y3DisLd+fGjr!l+R*CZ3^at618iW z8A!9F=lBCG7wtFfbglk^YNwK*l^LN8?dRupZCdP*W4vWod$mS^UFSe)TWJrs840qH z@(F&l7V2rR@%0qT1S1%@M-(GR@X%k$s}a~jB`MvHh07z$X^h1&w;k|UQTtFv<{b7w zA2kD}wUb=H2r-p*TAAS9K)YB4hR7xytF1rv<#cussoF-q>%QQeLsYN7nSXEhv)NeBP}p4#AwA zo{swD3+@`sX$O#WZ-k8RwBR%82(%2Z#T`zkqGz??UC%3il_ftC;&uC=VGmz0U-Zb6 zY&Aa*R|Obor#%UO%tu`}k)jl39AV^bVcGVk`;hT&$XYMQ-IuLeL|3uJFG;O*$oT>5 z!?&PX)~ukvqb@P^t^2}=qK(6h>JQy-kBrRiZMvE(=XTFBtDR$R^+#<9lg$vk)MSdP zOA*CnL@X*;yLxP$!b%a_#=5+ma*05nR;w`>MgZ3%B1kftCVxOjVTsUaY?oet>MD3L%PsJlEIP|E*U zTGQD*VLjQZdaA;wowaj!=~bQ9MYDrFcsod|Cve}P=AxW-_z_~GE7xQ5CtP+dY{0vi z4ZkfM6rc?axcgcV6?vNe&22j9D0dSyD)T%rNtnC^61~GiT$+BJ zUumzi`{`V6BrW+e1ECv~86)39I04rdVG*V<)>?ZoZp<1F?JQO4^2X>Yv`atbEgpWY z&2>yAXqgU|flH>*<9*UoYloGfkaaUZ{#&@dD#()Dr9e(ao{CRPkKV^Q&49y=x3_p=E}x>T?g#|oH%c1NW$Rj z)m*ACt>k~*7}$81lG4ZbJDlc|EN>llbgQxP*lfR-EQ%b};GtsGAO-%bP1S-cVeZ)* z5|8|`w#t5G8u$z}?5p5Kepp%tk=6D`+eWz7Pj>fICeVY{4!6zUDTV zOo{6Mr5pU_a$NiwvI1{mM>X1OoF2CZ^31DJ$XmU zm5qE2-r2ro^~DL!4607#DJVDBJb!BeV45dNP0JTw=yoTU)+E!dDfeuS3D5xxuVDp& zQmgJ0(WIDum_07c7-343P4X(Vxa;Tc;JU@jZiHuiJsuy{(5|#dL^a;lU^by z9JdcHJWV3Rfv}vJl%xMW_n`H-Pr93{=fcAE3hl?jBEc<@Ry=KIU#aJjK2s}^r;uG& zH60hb*URM2Pq(j3u2nl1R21^6u>#Z*qc?s3YyY!2uK4Tho3|_Q^i*n=UXY|)xR@CcD#cQ%-!eihO97} z`XtJlP-|zH)u|L0{Uz z6?O2Z6#vT%gmfK(x1W>1zJ|I0vxfqjO0X@gq@D!%ZcoH!{nIfrX?wOpX#6vz8hijz z6J!Hwu^QWg9FMaQPju5xmb26o;i&bZmXIt9zmyk??K5pxk!6QmC~*7ygi<&e8fJe% zUL$npAzc1&Lwp`yNvS?g_loend*((m1!5Ohr81?CIYwYksI%+vUlYjT!L>vEY>i0w zB9vpee(8}JYmDf0MtV3g>5~D|V5wOnm@(q!K7)!ZBDB645m2&6N3i`3MQOg*$VOay zXX1vNtgKx{_`DJ0JIu8uj+2iCC!!0cGj(x*3}QyCfQ5#SH}x z=#aoLw`ObUSG67Hv3BdlC<>993~uyX6eG-O_Wa$sOJy|#zv%6AwHI!Nwh?T8?G8*# z2@BbU{BFPZO#ZQe0Pj4#<9q3l!l*_$JK98TUKzY|u1nXdS8hiF*aXwT%a#Av0-G?Z{Jz z0KlHC37nl>kZYorz{CCGWph-_U=xVUoq<5M*;&mxWPvsX`vJ|MmV=ls@LZz+bR2yb>pwu}@{$E){V*~99RB)#Oelm9ax9Dgr??*^y*&x>#5nbmJ| zZd_(qhJe|9F}h(k66rcO2%8re5$hzICYR4j2i2-vE!P*@JcFu(XQJ4_t&M?Rph&FD z`tz;Kwvw(Svdhc3pFo@ExW@jaqmNG-cOh*@k)_koUTR#~te*z9WXz7h;#2PC+?f*o zfJYeVns$&5wSrzwg@Z60gjBvxHjw_XYS8@)x!>jvp#wH2OW$7icu2{{ShE&wIDi%Z zS@UjL{LTF+O^u$n=K((2XEnpQSr-4q9j6od2}dR7n3JaW2n4s9B?G!_}nT43jK zbD2<9HQkjN|Fg6|O@6M4BAvCRedNTe@s1PLXS?C~%eK;Ue`Fs%7l8WgfKqxPF~}U_2Z28YL`n02 z#R61zwl(F@mbdDqeU7o0Ytop`xmn;t!yz^aDU57}3DrVYrq(B(+g>Sth*8!>$y@vO z*1Ua>d2V`)@8nBK&0oX+TCZ-+8CCZ9q;rN?Y9ze>{0TEfGnlV)_vLRjz!y7nwr>U3 zE5zs@s@AoX_3qL1*P?oTR28PbBAjA3ug^0PT+>wD`W0+t%eczxR;Yita!J3qu_zJu zNe^Z;te9Sk8!XB#sd~43DGYS?Cd{^{+Rtc2(H-MRjhTWYu6s>%=r z8UH>kQ?s|&kX*=Vj6m&qqC4pmpq1_a$`=o|CM4pvw@ieDrFy-8S>LO74(++#q3kM} zsSx$al(lq?9FN>Hd^BNGW4UD1-XC^9Pw|tnE8ObRkMh+muoBikmDk)CmU)iAcNq?7 zl}7WQYWJ%ECASZxStN$>0iK>G3*@|fM4CD6yj0`AfA$k2oJ2kZnn@c#M!`;KQ(!PA z;;DNzK(+yeG8%GDz$jsGDgTlMiMU#yYnayKt(70Y@90Y@K0uA3x8*2|tW*a2`58&4 zAg9Z6b${tn7m!{bfQ-X|6 z;mi)r7ZHv`*aD>GO3#cuzNqY4M$;AOOV(gh&PhHRtJGV%q>xUf;~#__8cVV8HCx1E z;w{5Q7xhc8kl_pVi~S+zNzUY0f(q*JR8=yQ5Fo zYcJaG^T7@{OYNOmY^m+$@*heqK_B{&56K0{tR#QCl&7we38X|CX%;*fgMWK9(I5ou z8}HHqgm8}K+US)*e)E(|5+mgyRdqU@>b>6HhH!eLoIHttTp7Y1eR|5pBlV9h!@mai z+1eB3=2n~aqmSVv{T7*1v!$qyznWu?&5P={HczV~zdVE0)b|g59&lBv zxzM*=^u^*t5=C3POF8=rqpuQ?H}6FMkASDMqFZ2&ssxS>wwmfSR(Pz8da}0crk!BB zs8_+`l%IfCtA-c>`8BOCtV}gl{8%H&>cN19I*XA!#KT|x&o(UlFI~lIoT9#D^TPd) z-0uqS(^prtmW>H^8mQ$RpVaro=FwzOjS7@2KR%YhLaQg&%-B*c@P)WU-wBd@sGw*^ zoxg(Zg+{?k{My_@v0|Cm3DYg6%pP)E?Gw@+n-2$;jnzZ9+?|1#`(_{Pw@JR+WpI2U zdg`jRPP3XW*Qte-W8LYreUNG6R*oLSMZin*8v7!@TvilBO4;18rne7{c8P5V9^aT0 z1|EpEB2aExm&pEF_`lO_HQTX;r`Q_qijFP0_um zguMWKD1V>ak6p0D6T@|{AVF1hcsh+9%nIMWFhknpwO}E-c^w{ne;&Etls^9OTzEKG zu&*pQdkz%~J*^@79m3?F@|)(zdtsUK14Aatj>7~iv02RyijoxYxL4TyH2c;|fQ?q7 znLF)Vf8ZS$49Xnm_5QnN80cp^WskNnblVnPsB6@ zSg97+mcEI!iAB^C*?Y2-XmO&b-O4xo1~-3%UQTy|`#o4##V_ z^Zh{UV=Q%Q(Z+k=7`E%UmBGbU)?2q+Yv!9M-T&x2vHD!sTC?Q8IZG=kd1QY0m*5`r z>YYe6_4%L+!xY{8Nhj0jQJ4Hg&SSS%r-FYhVTF)WvrKA|V&#NMYya;DujtC|venOz ze|>*eeL%oW9bqH+;6Nrb zV}50_Ax1DkV-I7`h+mRGgIh-~1}VGes&3KP<0SCgdjfksmE30cl4RGmhI%50npRxDKlBC4xJL(O~;y1Dh1=&NxzH zBccTjT!w`0_g!zE4@ZlasC00r)_g8!Dv`c>Sfrlbt#kyMMu;xrTj5c^*B7>YEu=pz zS==T^YwlAApIlevo|-vm1k4)irHQ}4r$IwYgX)}VkK!lp?5POlTvmPTCqd_@bPOTn z(O9rV{uT#8-G#6Dkg%8OXGT-VZaW_W+4DNl>T#>0)%f&BDwM|EAw8@N_bknEdN`{Y zq0D7eo6UQeTBp2LhI*w<88LBu^6oT9mI9Yy0kPqij&w=coJqfbr%!JMnD6cjWz^8&kYRYG=mVj-M$4rCu zZ?Hw(DyJj|Tf1nw6wrHo2X<)zlOv;Jn_{S&O5PixWdAy`0e@D}>jL`JGR} zg7tpg1^%sx#DZx4I6@<>NKNRr@Fx_q9`_}RyeZC_%>2qQO1Fa+*t^%`@YDp~HsbY< zW{_-TfDDKfD9Y~omvkYT515-&!gp0ESHMc!7i`~Tf5xtT`Pc3HP|`hc-1Zm%9p|1Q z!~W9cT*dDhlT837TOc1S{w0Wxpfu$)zu?_gTj8>y3m)do*)`?iS2D`uA25H6#`<*Z zzy~Y-Q&+!{s!KDzi6JR9f>)Hf1+ASu3AT&+Lcrvn!tFYTr3Wl~h2yTKa{FD?Qs4TM zu&3jtAiFB<)y|r|Y+^!ZHm6MC+L?=ZgM?7*2!3b{0jHmUw|~>`1*;Fz2Di`E4J|*M zx}X0#?vK3N1f6tEJ@FYTubLKwo2!ED^8T|H+v8PQg#9h353KCF{hLKKEq2wU#?~dQI2)AD3TM%Fcmo?XW!0tU$^+9DiF&AmXa2<7@#(?T%1N^{WawP#= zwdKKB3EXIiOILg3L>9;zUpLAZ=9%J zLSqJhX7TJI)Yv2n{9IF|1zf2gg)?F?BCcLppR7!4IezGu_Fh>Z+zQUr4B4V0w>fEv z>zx{|ixtr>ZYd^D?xc2oWuc>{5kQJ#*v#31MR2kbrY4-T&i7KM*mdjWlPLB2jc@dQ zM*3pA0oauq8tV?>&s=-+PAp9zb>5~hJfXn38j<7@i2Rh#>G>d$p0StHUwi4Jt{dGI z*N~2GH`tSQ@<~+VU7FM#Sq%l`fWnIbn?0H2*sZOm%j$P;GmXpaZRViw!ZY9%tDAON z$o9pC>!ZjkbwoGp!pMmDu_4W(t$YcJWo=fRZo}^x?jmmP z9rrN;bsl-y>D3n5r!D!Zq6JJj`OL0#rBHef8WvGVha|m#G0`~v;mrwvR`bl+L1{(v z?DMBp3Z9rhppTK?Z049M2gNcHU5@&|-B5t+5aI;cdVh$t}P2m`n>Q}I5KwT^ zW|hL(M;azdv_PVN_0Y}@+65gI+=~f&$qptwda?az8Kmo={9O4s&$*pSWE!n?;!zeB z92{J6-Io#DsvoXZT|E7QnODRDtHk*THY0*NgL^hRj$Y{7QYr+pH37Xvb2X^n7pqBY z>mxijIEH@_XXb4k{Lwt)tyj(0ZPk9JgZx|y2XNyHXp||M>(76$e87ZDqbq75TOUEx?e})j#_}fkwfPES9ce7|WHbM`k(Y?OL_6&fnS_>Tu92DC zn@n`C8*RZ6V8SOy#(t*lz6NNN?<{})X@7-6Sw;`NX2K8o5p=0y*-DkF84-mRG3 z3R=KrKmc(wN{%Qvhb_{$_?ND~7fGgdKu@XZlhu)Z;O$jiWV?em8K&HnGueI_6V9X? z-C1OJdPP#XD>3W_D3#iP{*M=+3(f_@uNO7JsK!SeT=zZfxQ^q$CMzh^XVszt-61Y3z$ngos z7CCxNX&Up$QsPRJe7pTjvO-9}*N*m!wY>B!ozSZHd#!XqeiUvdn$~O>hID2ax9@F2 z4koA?7Z0Eub|Q{{=UuKS`ANu3_Ed`<5Q%D1_SI;-tpo^z3q*l-ShUN%1g!<=8|;O@ zba#oT!h-!=2h~912&ILhIb2CGjC%jq6H-k@L;1^D)_XHs(YDSc&95pgP891 znDZ)s>HMSUD3a7nHSPfLq6*Y9P>YyhVY3AGR`J6O&E>qPuoYuc*Uuq4Z_#dex_3;a zO!<9A)otj&N-9BrZY6SlgbxJ&uNr)S2BjJv*e8D^e_ED8aN2~f3%(P zYf#F~P*Trdx-TPkpk1n<87M1lzyuzFCofY|OR?k71@2JP3+(ugU)W0@sa=g;XA5Kf zky!`r>lzHh1%RLOSUu0Grc~K`Ie!Be7F|EFUW@8vLxNyI!Kw z+H+oUZIesf%=YG@IF1K&K@(%zvRP0yDJDW%V?`gAYq3k4nFYRI6F9+e(qEhL#C^|C z9Pm=uFQ!H`2dxiY(jKO97HBpo8}WgijUEX~7ZnZ}T-bH_&~y|PyPK2s=hE!M4$OA6$@uQYHH`|E!4tJD+>Gv)g8m~goAqbPeu?<4y&xZGfguTFsu z(RqzIYoqaF<`bXecw`pMzr-?W-n+$hYo7sB&IYTrlL&B;D)nH7Hv>kuEOn#>7?5GG z=O8R*QbSb=oQL0x9Rr>ca{2X!HeLJP%gM^g+7C;-esJR!pX*R{h_G3#Nh%$7!;!+Z zM|-$HrZopT(%v2@Q`jYRLLX6M6>B=`Y54`tsq(1WU+-Z1JYjUf96$USNP*NC8aIFg zRd-&ioy=Kd2i!HQ6P8OWw70jjV_4Wnw#|9XbtUg#?YZrmv(E)!z%z(I%>a)gS4>Q% z3iS)>MY)D}@!i=pmz=WUg@wHZc(b1h z9Ym>u8rOvPZ#1kkk?@BGOb1He5L|%jcfcReH6&b*(1_~lB#95X*8gTWBUrpSQR_#? zM{|d{K*J6}J2S|{P7{=q#?mQ+4L`U`&9!?zSan+9ktl8`^ZunwZc`p<-IMVQDF*FR zu=!zVxg+($4=9Ca14nWG>C*>@R&41kxJ%I3^`k6h3rVgsW)s^!yp%d%x}qnn_|uui zBBoqA)ICe+yM5F-Am}0Se!53to-?U`dQ!XPvVYMz9p2RuZKZ5#*3-XquxqGU7n;&h zBUzKyOuG&okgp)505F)asD~#P`m-oz`2`YnmFk+uocf;A%=zJ~4pXP;+np((qesL= z%0n;;W#pjdxWMC&>lSq2t(j@b^$rIe{vxgrST2H%9E`XKJVZIjMGL8EunxfHFVVVE z7Rkos>yukEzDq)m6KXhRq{?)S#=$(a7((H&YlHtnw?%4P{xhTS6JLnYtsbKFxw=Xl zxmEgq&{!3F65xmqJl-K4f%{V#DEUkG!~(Ui_!-g*1w?URdWkuQgaCgGh&!R)?(EZ+ zfATqvHUfRhmJ-@r?Ofjti6&bodZ_d%9bY5+IPpCSA)lk@4Jb-oR@}PTes|Xu6kala z8ZMT2HDBPxiGMsB=@@bm`rsbZ{>Leg$V6)cb<_6~Z<4cVtn^@3>_mjq8oCz?Pya*{9IZ!XOL5^sY}J?+%Y-t+I;`B_X_HusUtcWj%9|8G#o_lMDd1WD z5XOpojvH^D%N+^NBTUZIl5?G_pUL0;_JQ&EO~5<5SF$U!S7l?m#*nu~vqW$TV1gL< z@EqkbkW_>{{?qF=I*ywzFQ35`^uo)P@uPwEv2D5E9+s2J6KaPZtTHFGomRNENynJ< z4~vBrHJ1tJqlqKOs_|{;iPh$Q8^CCHoBVheZt!MZR(y}{SDF8pgvpLr<|9aF`PB$X zU(DnWcsS;)v^N^i+N_nznOuLWhWn*ucfx9ufgkZXp6O>WRkd%mT&sVQCvgvfbYnkfkF8wc{{%u;&-6 zY1(e z{KeQR!?LJkL;ghq>Zgbps<@)bN0aqzG;agXQcWU?qc<3J3;LUha{g{>b^S+T(t=#` z6*mD3+ZR9b)p&cJEi`RN1uyn*Rb5>C*5(z0UZdeYx&xvpO2%KhIMn825H7jDo`>Hb z`k!)E(7%KK|0o$?I|LylW*`f+Un(YYx5FnP2Bmm4rEJ@3!0UcRYHxXEY21AmlN5Qn zxEZBpHLaDDJ8ch(b`%c4o#ZTXFIvF){5d*M`5v5R-Jtux18fkhSP)%(tEoZGdoq}1dZ(p7uc4Q4iWEsb6BGwo6 z$N5PJA}F^G0!It?|D^Wq><^AUWZCd-GQAGl5ytk*@9W9b_@bPnx95+F!cU$SEj~w@rde zr`-Z`#Ee4oQjb>2EyLT#S=f9Ec$T-_wWv8OKr zh?b!VINDRNAA`I8>EuQe@LPLx>C*`SwmEgeD)aQDj^LEt)b_49`q%qy>5sV$Aj4)7mZo#o z^}klL#8Z>1Degyc5$~!pSYAC7$=xXUXVF^weoynACJL)(ocs)JZ%vDZjoyRZH`27h zu8Qh`Ezk>dHfx6^W4~|eFvfEHrJL2hjkWKGIbq^51t!XEWos%mY5cvZABEojE}Bi~ z^;f~1mBh}C=#MBJSsy=?i1LOL*UHw0D4WQ2LwFJ&WNVRlr}k*#RuzwMLjiB@fT!N$u&= zSYfv>W0UvYC*|fezH{0*<8N9i6Q(V7k3Zatg55l@fi6ZgQvF^GA?$N&llIpxhilDF$Y6J?b_bI=Ea*y}2Rv?Jr#&#}DjEPE?hx zm)?s$J;f0aA7ZLUj`fsFL3#maT#po< zr|nMLkqQ8oY&Q#pvEXnh;F^_*pz?}>tx2?*>zN7;W59cf%As2ujTzu;J836;^gVY`l>47(JzBv z-0~e=*^&ApHR#44EnqX?bbF@sl$zdx;N8cjwA0Za#cuE4fA%?u-j>AQT<{PVZ78P; zy4?0%XYc}w;#6Ky zrM8Qu8~hEe-mzzaqSrP&_*N8J2Yx!Fw$_jw?B7ICvpq+c&is%Ua%l?f4II$llZ4RKwTr+b$?u-bEZ_y8}7xf5|Q3(Ylzoj5#*Lp{97D&-c4|{+4Dan*lgve#i{p&q0uZ>oZd;K~D7HQj{{IWqO(IicT z1gAeb3~LlI@_8x0Q`d&WN!7})roMgJqau8)n)46U!C5v(z;P(C;G?v1g2D@q?d7;y-?_^Wa^jr7O(LsMK3Zr*Nfx9_xkt$N124cLyl#7btK$C4)^=a3#oi5o$4Vn;b#;4#InVGGR03f@jjE?Z80@)iE1sXHKC;G? zJ26_8FHV1V6r<}s>he^#?^B5htZgA z{5BWY-ciJvC&f3rE(DKa2pW6)j|7Rvs@<-9CR3m5d#KC6M!g=i%PfW_=)X5V4e~M_ zK|`eJvvY~yF-`ojSbc-vIw)VHa>U{O*|*`wpe*uv_-K88&U)Z zHnbZE1LwA$9moWp2Pxt9w;K=9we_nGI1XY^BfPY|ZHGR<^9Aj4RaF_9Ve_!XR9J+KF#VkI>0_v51$pz~w3WvnP^@J50#7&}L z>bs}b2`4AYV%vY^_at09lntGzze2=sf^6J$5(&l<1rZ=cTVig$;F(wpDAJ^v`i#t z=&=<|9d~&_mM;aL|LQzpTlS*jSba%(wC~XVjb@6%zP%V9&EbcgCh;t8zNQ#8KmA}; zw+gyDI_ek#fasd<VK)1ChTl25@2Sm)$OFXw_yBslc^nM8k;PQ z(V{R0OHFx0CLnXEk>#-xZPicx$oXo2T!l$28f_AjTnxT8+lNiTb=RMW3dg=T!5Ke9hx#dD-_Hq|l|d zivxvZ*dze6_=J0>Ayn;T#=3eK>-gqnj_6P3uTbk2oj>p|NA_eup$mcra4#-|MDr1*q{kLQuJ*>|5AibKoiAtU0G}L;e=b16D z=~2DY3JIt2fQ1SRB+9$@M(TI{09IJPuxksM-uBW0WKx-g><=xlq@)#qoqp-fLI(s@ za6}Y#IN~+ZIvCL|R_|CC+Tl5do-=q~Co;Phtgngm@K2(#UYZ~ClX!~|_6Zn&USFyE zGU$Db9_H)(0XCCjNcKaxjUKjt;fFxl{Ec1RS2~z#ta-UL<~0M#N+^oHe*0SZIW_6u zQi#XH+=pqDmo!2S|G$-iQ;qH%|Tg((tY_0(Mv709#9&4Dhsx|8&mP7vGV&V{f5 z(ida@Ec9_~@V(a`<&}kYQbdeIp>Ob+_+)q#lu{EhOGV~QF3+&GYel#GH|E|mtjVa` z77c>*qKJSXM5HQBy3&FL5D^iTUZMgb(o{rRND!o#2q*}MK>-mEA{}Xw-n)Q=7C=Fo zB$N^>9J|y@n^BFC0z*peBiWfzsNcU+wBKXR6ht{IQ=q zFXUZ#@9hwb>*Jf)*LSB2WX6+6Hb5p3qOhqX+-0qKs!4;kS+{4%NCG2$x`tT5jaR}B zuUKmrPx@u7xMf`Y13B>6%nfflJv-Im>dZVfb+zfn*>81q;?Himq^!x4m*#-o$i+FU z=XYx<;l;B7@BNzY-))O1*kO?fM1Q-2yFyL-isN92(A25~8xIgi6QecP-4*97)X?O` zbAVJNT=ke}dMJK6XAvt61%V`O9`V6%fqXL(UEZ^NrN}-cnxPF!-+a)c1OE;uKc~d~ zbTogj9I?H8208RerP|ZKt%gVc(TE(=4t5Y12_>50o->3tv5b=|TKdWj4oBGgUcJzJ zRHms~#pip1t(J@(DwJO%9v2IH_bI3MFR|@R(w86=>2}Nrten0cB9Q}n#VXUL#^3c( z?3+NBmQiYkGNACy1lzPPx^Z53zW*ZBWj`f3IAdAvedGH>7FMxkuv0}=eo{+XPj`JD zzK=Z}|97hbI{P30`3J1E{JtDE3wf5&H4DI{b%~I?2XGNxP$zL5?TIX^_b*h^B?B%t2eF0H zPUQqhHmBn53_amGtDxYuTi<9_S#eh`1rgsrX<^W#;`}W+5X+WE)~3G!1BI~rCc2WC z=mYCMxC+KBpkASzVP}AT4b^ypL1@>l_iw+?yj=KiX=(Ui0%8|HmzESRs)ZWL-m$!Ome^K{^y-Hw}0@< zF>h)88;W^Po~n*8DWylCyv4j2sy2?&saZ;S?`KVaq}4>~#4sBMBoQ4QPU~BzCiKYs zW@2C(xo8{|OR8SLH54-qItXxUb<@l*%DL54v%ay_bPGxUSwDpm)za**UiB{ADQ`s= z^4B7$x{RS=!(bA;S#Uow|G;y!#j_t3jkg@;xQVJd* zE?~I%q6^d4ypD(szj9IU%YHmY za|E`k8#3`{#L+e4gz*s=;I3Rz&)_0Rk?#l@5+(wic8iItN?ELL^_xA#n67*EieBLP zbl%~p=Oza;KWW2m=_8!1hD6-#=HWA!_c#4-#%NjxOBR`~#7s zTHn~za)bG=>%k`aXTG-~pddec0#?O91dg?ykE)1J9r0ARO1UBO4Uc@2)FPHwTz})z zTTl0Rjn)()?m(YYG5x*<&_Gm^k|mtXPkE5YERXU!01VkbIgKw|!wR*+Dbm*KY5w!3 zdMCZiziLl>F}=j?9Az<7a{O$>J&DjS6<+C`{Xf}6rC1n5MiYbO8&Z_f3Fo587qt#L zDZNw%sIG|Sx4h(CXWrh%r@%}8+#=|Sofm3>Ll8E^Zwx+4O13QpZSr7$(c)tKcSP!|G9n^=_#fs!2cK?{@@{#zIo;8C z4%_21$wnR}Vb))fU`^nx!8uR(y)Krl4XJ)w)|Wp}Fx{ycC1LBNVEo%|;Ai6{bWjpU|?ZHBVp z@86i>HuK+_a_Y`q@|eCheD$X8^Vy202SRnw&u2JlC_coVW(IS^H<{6rZAEQ7u;QIJ z&alsn9_0Ao_99irL`zG*2D>=I-~eMRr`Shft7t$WslmT_A-)iM$V&7KeO|5I;Qo5CAs_ zU#q=`_Tb6-q?1moQ5-ZD@qXMpX=%{IZ+^ohn-6eT)^YR7H2xxTPaR46ZIWg_LCkY1 z?>(zZ%g0~iyvHw;ANuNm*X;e__BcEt`2foGuLjG(3-nloX^v9SqlK3DS0$}eN3MUa zPny%!vOjQR>ikov;dow zpGSo=4kA0sR#ohbpHRKmT{F!wH|t(_@m?safBM>V3CTrHdNtAy{siq1ed!fg7mUS&j7w71y#h-c$ z%Vs*H$ks;=GFkVf4%^sWC_E7M;{^l~s^U1%q&%@T$F{aSx4OYe6CH%ji4X6{=LA3E z!iAn6s-?+QV6JfjaE3|-Gq7z7! zmGZ<(G@$EZmr(hanug&g9HP&^fMFzWgiw#QyIeZ5glGPobz2xZV3 z!DIgiVk(8LL@LvSfez(rQyBtADFrE-_kDizqdoQ^@8>5~n@x{DF7bJUJo1bbSeQ(7X@^ z?mkmM1a}5JdQGWfHScER8OPQklko5B&Y&}U>nQQa{LqUOq*+) zE|-kW{t!B3C>fG=t86wR<%bfdpu>+G)!N&ro1;Hrf{m;+Dav{_Hj<%8u^VWuQsEy8 zdB>i?>dkdg*VQ#TsY!0-@~Qz;`7gtB4Ru$(?^q?4VzUf6zh&Q(zryv9SFOUB{Md$C zVm*VCMxI6vqJ#jEjHz^sLPC-KLbXegx)L9r9o-Mhe#s z8xP3f>1WI-Qs_0O4D@8?`B8DeDRmTpNQvn^tjvJHy%18AV?;FJ8pyoZ2b#^fY$@Jt zrHdh1T;}JT5_C5OGwzASC$>K6g9JHYoPF}X+q+lQ)yG{<7Tzs=Iw2Bq?4DHV*B>b< zGbLbE90if>%ez@h9QiV(G;cXceOz&lI`QhTsT$Xlt=FJEweYz0w~8@KKRJTx=O$nm z4Q6b885}WGPZ}>hopyKu%8G#3*N;e#R?1=5-NJ@VKV~74j^qT|XW3o}?@Z1-b|i`{OY$zz zFD*hSmBr4__0*KsHIB?r1+7Q+&kKJGKK*PRe$ffRM{z_ELy!j?e>J*26EcsxBI1wt zX4^k5alvpY<`V57R4ct^SDk6k831)AfRz@4*($B)#<0sKa&ayqIgkTTHk_LtC!inUto1N(ugRH(4)l^m<;rQt4a{xy z)73Bc?reS(X5sT3uNY+81a&6M1pR~VVX6e}4kK;9LRt}B?NCpn*s(ao1Y%%($wLzyqz-)DxP9ARX+Kj~a^@U>y}q6eXt(Kk-tYzxTYYUbk2r^XQJe zZJ!rc1P}jf#!YB_I%pvdL)%{4dU?t$O0dDmUZo3NK2&%BKNiZ2MKnVc_6*p$zL>d{3;HRTe zZosgmRcPiEMPp8xVq9rsQk!=V+aJLI!iSA z1KHF0Z@h5hP82C@cE7u1T(ao^$+Q0MM6%*P9CO3XpPxdMJ zY6tpT^S7%>U4ATZIG#D|{QiS~FDHQAE&+udb>igs6AP&LrcNOS@ZlRqchpV;*}<-7op1q*=+59b|jzBh;4-PKG6OUeu`RHme(uEU_Kof#b8(uI7xw(;vO(O z=^J3&ZY2`EB1i_{+K5W#GfWQ=`(0apE10aX~HxNF$lWZ2nEpI1Ug_a{TC^Yv=2P>x19%FRb4<>!80Do2V2 zTvd&Cc&*(*jkdjH7-Dp!jpl+%xT3lZ#(~s+mLhp?=g~xl(Zv|xhP;_(h_cXL3Z?PQ zhs+X_DgM_+tNo~sxoS(Ri}Kcq9+)!N+Rym|?Ym#c9;R;GAF*Bdr8uLqZ#^_uf2SF@ za6Nh3oq@>jn@5PWDHF!ycFd3t9a}gb-$(9pZ^x`64S#jE*&rn|es2%N!2~L93z?FT z#h!6P{QD0vyZtoeE9$XNs3tvq$OHR?@CSGkJ^g`z5;i*E_iyfxzgn3kUftljYER|s z(5ecr#(OT*bc{QGDvXslv61GOTsoDQ;PV{_jCR-+=SH6EBs5^<(wo5!?)LV=Pvl&J{GV?(qcKO}b zv`ooTdZWH{$Fq;WM6UjU>~y(BGs+tLtT4puW|k^skilHk!logH{y@$;)uXP{qfqW~ z{8y0zrj^h(9(p=m#MNiGhB`Z8N{$)XTB01W@I(zro8Y$XH6DCM=vdFa(prXXCS~^4 zq^7lrV9vs~Vs`RCcQ@aq+xp{#o@jJP8ela0rHdy~lN)nX=XEF1QOTX}gRm_>Z0PUC z1GII&Np^`2CCpR?pnNzl?9x;0aQUfvv3Aq)h`QiZr^!^IDQ{K+%yOHe(f zFlv@QQ#Q83z;F~Y-H;kYlk(ZA)vUr?R*VL%BhTy(Gp6CJ4v4X!<`A!Uj1&=CZf5ga zx#~XZXKTV9Q&N86<#=f1IK{i z=A{ z9<q~}nICU}Tt?sl}MO*J@zl!VBov@?$-gu4qZQDEB<$;_eVB2-3LQ=kF zY)>#2)1J(BYo|2_>gvnx_AuE0K*k+@quA#7C~SDpa(CUivy`Z*Ct?QJ2UmcmjC~0n z0Qr6i9~l*~O)>Y;v;`;d;8W}VF?hMwIui;l`BkKLi4laahwZC*`H|tJQR~Vmw0zTz zcD_LGwfPRh7W4is-Rh`i#Eq~W%wA{S8Y_)NFaDAMImrTz{chxI2 z<5Kz`$g4-%==DJG-@o3CjS(E;BR08v;wYr}>Q$$ReB@VXh2d}g5x0EzJ+MS}KYsrM z!JWLaQ87S~8EA`%*6xUTVsEI2Nd1x(MHp;{*>x=k~LlIoYS2RKW%+eiy zX9pL}vY*$Oh3=XGdoO!D|JNS~@ftMTDj04vwo`1_aa}r0cz_@Q%0={;Dv9glJK8BY z?Tp9P<)MH;j5R|Qtga839}|>(LzF>~)|w$z#ENR-wp=&QY39QRmjWum( z6EwV01doS?J@GIR%8hg8NMSIJD(;V`#gI@3p>sDL!$-D^y&F5|%l>4Q<$3`lveA~b**ewbaZX0;{);hDOXIO&OMi74o^OQq{+cp?U z624O%bGtg>ZUXDkTL-V}g%)@$J#tdH3nj`K233JMr<=b%zva1;Mq2IKm%`~glhj&3 zYbGjw|3eqzVxhaXl-tIsRMVI!8tYjyl)48uptvzO@(DdR&8PvkUItzG$0?dMxd6WAvb|{2YpNG9<2fU+*p2XQZdRI&u2!AILoo3;S=@ zT?=ZSR+5w*S7ob-o$m@cT%h>z&lBF43InpQy4!r{KM)_R7^=CBj@Ccr(IuOvF_ygV z;&NP*ZaLQRbN9gAfaQ2HRt4-YTZ$0l5c+#d(^e1kmEXKAwlXkMdvB}aVRhr}ST!d$ z5JumTDl)7A%?K3P$F3sB=Nm$fx7VjDu1P#~h4|6eK4;JkDHjQ^0hu62^lAD2QjR!= zzLS64-@}GqM*qhvskciRkZrqvr@_=eoHYA?KCA28K3iV>6;$MrCJ0ADgXZSE!$!?z z!-gKWu>j4>sExus(RLIO90uY0U4b;wk_Sl9^jgW6jn(k?XH8J&JhsDPH%Wm?{5X=k z>}EN2EPwRpao;z$6+YJ#v?-<4QnSVhND&H-!GR&6jc2u<`!&e7p21EVxhwn@Wk6O) z=ytoks5W?6_4;AL^m;Lc>*oCh>ag7<j z$XBT$CwwzBZK5x{-@2wud`jc{5xLpEd+kfG-h~-c+l90De_lw;pSEYQv}2T4V!>cn zYbsdFB*8R~!t*0NwPEK*Fk}}N>+paSvH>s}zF}JlWwNm^_b=U2I1^$}8PKsDODv)U zTcS7%kpV8I6EZFkZCw#2G(I1&7rD!#ZEIJD5tXcUmrt}_P4~W$p`59+r!Adxw;S&VC}Q?{ zE3te)n23;zLz#`A`~wNXPXU!fEg77wcOx`lzw=cCaV+24*h2bE4{gv&YG0<(1CT!2 zA+4~D)>T^D#eqkUW;fNHgk5e@lsD68=cxR+>gum!tGp#RdmAvB_l-<%&}ZdMTIt)a zFke+fs;VE!pG`Z@m35|GdD_g^$*!OClkKdKag|Kk3w*w1(Zbw**2$V1UFZ`@651YO>G3vJRU3m zfx?Y3%CrEX&}tS0H*B~9gBRiW?GYVA2zk1yr(3ML3_4DQ`~3D%YfaRPD=u6>$c zsqt8~k9J8mX;$@mtMG=Wq|PSZrA*Ne#|s!>s5s4#pr^{dqjUCm#jhLv?u_naGs)(6 zh%WBDCV%03ItE?3)^7R87FZ>3QK&BepwX0IO?`PhB*gN=^uBeMIFz2jkVKrO7kQ!X zp@q6!0dyzKjxo>peNWSCsi?A1IzmdSyl66BS(U)grm-z-XB#QWrhPO1<@|=wy^<=A z{6c*`jWq6`6YbE$SDxeKGyQb?~0f zOKpKxl?y({C}}WalE?V|hu^pdwW$ERoAvbAuD%5BvOT3RB)=7@S603wn0mv3VPtZG zVYwCrb~x9Z|cU?Z>+zU4237y85lK`CF#L8#HuQuYQuPCv5uj7N+<;?i}nZ<32*3 z=s%B3)YJm)1BEN4IFsAuuih9M54 zi9S%ud~g!&p?hi%uYSr^mjU|z(Vcvdec$z&sBUs{_)NK?c<-lnUt#BNX6fn?jO=E- z_2@V2p*sP-%*KS=#vq12OtY`vbl_37CFw@>eQFV~y{csRt8F((UKw1=8F^war%t_G zbJ=UbCI|lh1O#dM&hY5ERBALO#-wZJ+8WR&clq~d6CC_kS_S{z-;w&r?=Yh2JCgG} z%Pd&yZrDR@1e}ONuaEcoqWj&dnns>_$CgK|%ZeWqdyX1$fA>kM76fiip)jTs6p-Z| z{5+(P)=btvC6p?m8B(zE#QjId)y%l(R14U;oBm!&CK)WJF!O)`3(8B3FwPSuaB)6( zqatouy+$T=2V26#{eiLK>e$KNa_L!k{DHa0`MXxJ5JO8#&K$@?;5e!HPX&PgnK$QQ>8tdhUDo%hDu zVZ_VOsF?$l1kzR;-ndnn$NjBrX*IF8yv?W3wk25LaonUO|LJRq@2W~gL;E?z`cM}K zsPc?nRf$CN=lXoE-t3I8BVetzK@jJOlMF64sytw)lMixCjz@2HnKWpiKIpr zpcj<inW%q}=#uR;Q)-k3rUmqx1X5Kz3F)ApEAI&;`o7xe(*){_Is` z{OHSawQ8mdQf)D^@l0b4@Bd%ui&H+FglU6C!gvL>C{gq4(B{=MGHQkJ$q66k+P&ZD zw9}X{h6?Q@B}vU5p-XCvnh#9!R}Z?E=M}Khn0F_slV%7BGW@%tjxNopQ!0r?b9F|T zLp0Z%0LMJXP7O1onavJr zjsc>r89ve3aR+~l_(;?n@{Gs`-Bt^ag{&5B<;($p8CVNU0QVs4CfUB6==C##k^a#` zkf*@JPqcYGCq4XZ^oiS2n~AIe1Lv?qK@3o=x|r{oV&hp2&n8i71Tb6c%`@T)5`505)kIu5ilH)8f0a~1c2v7w z-EcZf>o9X8TZ8{L)7}u+KomJtBtn|%*hH1;BTV)r7=@Udjj(>Kw)n(UDR(Cw-YsX( zE(1xJn#UQg&TkxMbYsdse4!c{k_H?nKx?+9&Z=l@|0hy4zW8LU4r<%{%&9#|^E98n z@rIBb@Src=0{4Tr*~{9lKFYkT%5iw$(aAP}2Rh|nIN82vER10)mewXwA9-Xt=5fz| zE89dIGA^-Ek3d5X9^p6toXz*}eY!yDfQT7ajN}VetD7&rZn8^$)ZqE~^DV|hafe2l zv^U7hO|@c=&MCO(3DN6=*uF2T$nw85P6#gMx?cV5PC}L1GRkd)<<10 z{o+V6_c_@k5Vw-}yom4N1Ny&}%LA0p|GcCA|NDE8!i*UVDF8)yH(NgbwH3s~@*({A z^*Z!NF5jnawKw$4Dw8qqj#9JWk+y%7Ho&RN1 z0rHv<>)#>&rI-7!Z^hBQqu{`MNRj+ViJP;Epk4}))5FE;3r=^a4X_>I;>f+Zc;KAO z48iy$*-}*lSvv*=!89kJr6mLVyx!Qey4I@FL*vyHALxXg<9;kbcD*ad+yN3-zJyx; z^}|H@X1Ji|6@~qV_r=?%hPfzwul&TdL#3_c-(ESY*B1N}DF7J7vozyRj9y&DZ0qcI z=MHOy+o`|3E?!86zcf=g95od_w$js+DaQ8F_$Ive+kd4K*Z+#|X8JSwA3vC}T9htO zI)RW%KJX2s|EX8DWBx{VzHIUzbj$Jeb zx)<}odgkO9Vp_zygNahyT<6R~-lr!1erhcG)Wi0_nnPj1+Z*xw& znIT+0w&CI+^rGi}%29*M2Y)=}jC*Svx&a?pdX6|ny@G18YgAa-2l&O#!W=3}U#!N# zc&o=X|5N&Ac4hTJ!TI$U_e_Kfnz~MZI6E*sOT6JX#sJiC)4BR%1P!Z*w{V_c)txz^ zsi_GnR@(HlK+Wla^n240g1Y=_@9#e=(JTGn6X9R+{$Y&c-7yOr!crf&A_4q3 zF{VO8`+g?6KGl(!;{2g=e2sGi)da&m7{F_h(PK2XJEZYutHxv1gX?QQOFcp~H^267 z)fb&`_EUdHe(rFvU2OmiU15k8L$Fp1Mec_Hezj9woSMIv zarbc)DW7;Cm+)X|K7Va}fvXR>(DG~}_jA_4s=VV?3%#m%2ZLKCr=2AZNFGN#uL2>( zC^a>;HY8V5@JwDqWtoD7p-9A~OQM#R#t_IE$gk7D20#zAq@ldw{D|PsILax~i5&SPu0u+` z;{4@=%E%|lJ@L6)z{f1>#g9X0)<+FsFX617BXfMTqeYCv8{DVuzZ)(nIyKAP0axZf z|HLK?3o%N5me^*T=L9xk+y>>Zu7E^21yxiNG)oLPNdelQhk7tM)TjO;TO)`S+!5-kf42 zdU+LpDt=#H9&lKv8N6yA;Zr_!8MigQ>z#bK zPPVfev~hcU z8!4EtKZ6KLEILid+rUb1SC1M^f4(Q}y zh2Xz37C-6k4p;n)S*qUB_u0-&uOD@{Kj(L=8MEO0CSZ5ng33g@N{@UpiNYTtK+zha z#j;m2Il5b9Z@FhY}Z$IUmNrb+zpYR=Stf~CAI&!)-w zc$pktY>dUAmksjv5xhU@uZGwuKav|g9;pe?Kdc<{_7;P46!bXT&MQ+n+~NDTD#5A3 z0v1w=2AJ+!Qz=CKYO;e8t+0El1BX%HO|O8XChrr-S*46==_Oz(-$}R$_8=#Ou8Uo? zC!>IBeg6X-S%cC8oJT4}{_iu14!-iIS_YP$fCLoDu%b~Y38~O-w3W-Rj|3*P6cMiKvU?K4V9lrYD0>qz)p7;oMJV6G3+|BKV^{ zMlikPc2(J})b$)&JDB$T%0lnKA9D8hFGe&eCR?JMaNh69*NhVd`GoO~^aP3%@I5zml+FZMsSEBcNhXL<=8^Y(nk~|F`;6eu0FdQ@O6; z*UM8ZhDH=@@%YV&sAMUr#-Y>k$-SB724eo}07GEu7>Bu{nFmN41Wdw4rKmEGi9p(; ztGIjLiup}gOdlO=`*NM?IR215!dfwziFqfV!O=oBrJ%{_^tU<-rfFDk(CGSE{ zEU<_`7y%4$?sU67`2zvoJ<0$&)zqL1&~8zZ=CI*^AiM}el1|&^>k2f_L{Fgpl+PpK zJ?Fm%yXVORMz*1SDD7E*=>AlzMSWMw%5?G;xXnkfMUf~L84E|-%X3k-azIf$(8|asTay=&x1YuO7#n7m1 z7y;eLKk@{)f-4`IG=@&kCQju1{RL%aWQ{9pDSXBz zsh@b8=j?n+?AeR`@o&505{;mhgI{7N#3ViqO2qcnsrlV;7n=BnxnXt2WMeE8CJf43 zcHDhb;tXFSJrT)11BgHE#T@;Hc{7_%(z821Ch^fO_uDs=Py33z>Fq6i`o0vF2gpg5 z^AvAgJ;hpt*=VMt9HK786;i>MsAd0K_3+!8*N<-D2vK-Mz~1;QM>Yqt?)bi(yn^HuQwXo?KI^5`;dtn z%CmU)YIX;v*6y7CL)%+?%T>5^6h8Ddj!^0(9MEWlIvN0#dU(yM=^8Qzod@`#{}pNX zzx)lRy)H0D-zlz7e*;wM+&bU_8uC2$wxr)5%O-p7Zo>A}9}lQqX55vqN?+_cUnk1e z2hVu|(2;)zIsO;C1_g1)5)0(yt{3!)>6zKteE6XM1U~bhah?CwufhI|{pS>BKk7rW zX99}8zLg}lza@3}eR8i(eOQqjJ!Tqhr=fI9g_Db$a3nEwkZlWqr7QqMO^GMv(_>Rm z%OPMlXwZ7^gAM~@U_sC_&gTyr>clA4SKk_bAGTY`$W;PKLaLcAzbaEluC7;PHp z-@1HWq3{}0(pSUZLCmyOWWP8&X@e%9MxLsnU=~bI;H5<3wa01$HOI)By+t1RMTfV1 zY=#dWS25V2DR`{G0KHDURMh2n>06^Z?TT$Nt#8EcS5>d;>mwoY=1s>G7*J%-+c(Nh=4%&GEoH3F3q)L4k)rx zKy+4kv3{Rk$++~;Ly+`QI+Lnznffr>B~^3&diLXMY(bPI2KyZKe1NeP#WaMf|DbvZ zj5-=L*3^HlPBpY@D66Bdz7`;qUp#?QF?#r^c<>E!>=1hswiOPjG7j+>2@*(G1aPj{ z!%c+g8}5CR?$(M5f`d~_kCM9=r|mAQPDOVgy@9kcWHMk|A4T3kTmXky(kvc1F5eZ9p!7Ye%!SQ1dbXMyj>(X>cxEd%(QEikd|*RO3T$;01@ z+c|E1+fL=Qh$h@S%%yyyN)-BJe7nubm;{16c(2gh%sXPw{5=eLU53h=80A79XJ=2G z=Q#89O8VKeC&Dbl{>~Q6w$L%;) zKQu+!XFZU3u}#ZvX{~{z$dlF7+{OCt(fbu1kvE0cN*9ko`e9?YcfM!O7h-8gpyWl$ z4i@jg7>4ram}yV%`ZRv~?Uz2smjFPfD&dH0Y?8g_M~WeV=CmW&mbWMAxu9$=zPZr; z&gIU?c4nV->wYB1{rQ@2c9vDueCT?q%*TnUrCNEQv~F@h@Xhl@LMu^`jI+xRF=dq4 z;qDcV@BA3NwvK|&v36~v&st9Y$Dd#UBRqj4ds5t~N;KW5x!smvYxa$$S-8WYasTnN zoUZAi%%Nurvri{N`b}`u3&;TsFUlPixr^b^m%^)8`JTzZFY*g6b}ut=JYs%f;KzNQ z<*2K)&7sTLJm$|Z$7n(%>Re(B3HEio)Y10AXQPoziQ=ZF#i4y3XO4eLQo(;s47l^^ z#2Xaee8DFQ<%SDm$Wq>6r`tUDy>j0-pW`*_&;Dgt!QNm~|GHmWY4NP36jBmOk%6yw z5Sd$r#!=<4*3R4!Wzp1%Z*Id%GL}@Fd$mONT^%Hex?QZO#^CUw!fB65*RqNjqbPwf z$J66F;SaR5+h~_t1ruk^S$NI!CE8pz&|UL>$+VOIS67@I^Y=!($H9*W^eiY<3cj18 zixqo5*@Lo`bvLFu zYF&+Qt%b;CO|nY**y6q*cGD~M8Fy@1McTC%M3_vcOQc?ZTjW&4%^nC!W*ZocqiF)p z1od4z*{|pr71SMvP6wr|PY&+Ce%8!Nf9jUBRryxn`>VYU$_$pCG((4Wi`43*O8?v_ zBX(orD05!%PJezuZ!GFtW@b&ZLAdVGt&jtW*~}Y2p>(!w77Z2GXI}xzc;_C(eALe? zH1)}iUYo0cm)X59>ePK{MqN}&Ki84gaTER$UM5S+8*jsqRVb5xAQ$Sf%d*XlLPQWP zbhSQ!dD~PK_YBPhAF<-R3dzGQ+*ghYgg{mT^-TJZ*JzyJT}YZM#hLnK6;rX;@adP3 zxZt-+pTqDDFRw>W_Cw>Wv{)fS4TYDEAx39da=DKSE}RetKXcG(sKn3bdrEuxN>?-Fnj=p*vcpE#m1^w@p7@wIaQIeDFzUe+xc)8Ff=r>U)pYf_&(tv?#? zjhbJuh;oL1)K?`YK3mw}@X%)`Sr*Rx+TZzWuTpIsHph8(z&X6>czEX`4g&+D{_;Rn zEew8031LB%3m|Gd&rF{2`{pw2cPrw{nJG`pNWhPl5o)cGDg((M&Ec>YCEvX&0!)geT0s~pP<`Mt;VFq)S#P~te%A5IbszzdO# zisuuqw|6`;uX4vfpX4}>s4MNr3rPb#lrrdbHxjNjRcN*f-e#IP%RO;T)=N|G#=?y+ zFP?R`N*%yYK0K>b*RQs@i}j(HfiruV5R6o zaxb?%FVgIF=|s-uIvfId{hA3OGiAh({KLqL6yDlf;fTAD*7WQI2Ji?u1wiVWc9YXQV z{+)vHrm>Ctc*E{tVlA?i2uprdIleg#eTiYg55-v}nOVAL&l^DVVO8k0N~$KpkOEtP zi2|~ACo-wNI~S+hg{7@*bcLAq4I)^;nZWiGty z=wgv?0m}(um)jv5v5vcrs2S{c+=Lt1hT=<=rRl(#DaS;9lh;$dZr)12Fts9dnw^)x z&#%rBy}YA*5gZ-@l zOG4Bg30kRu`wP^V8rB=-(5Oi}2V=((GG-(x&XV#2b694sW7E8rzfV?MsA0=ZYS6`j z)oZq3BeW5%*G5b1|Ei@GbSyx{?3u|IkGrbV&%HjXd;m9~P$R|RpfrfQ#j|BSg{g>Y z3GrQE`-(nJQzrJ3$MD#gin=&acb{X!(^9V$SMZ? z*U4k^^UHqByorj49h>3Nd8HZCz}vk{om19tkWK&Y-VLUIZV>%{_V>V_!-?1Rge>VV zVIJ5>VBQ>FjG-!9e@X0sCrUh1_}uaHO2VLbZRu3OTQrAnzx?jg#z1-@^1w2#0{azv z1R-6YtmDye<*-tgubi|Oc6}h-N8&Ws!5PiQK@+3f+Vi1$Jr~C+u>ALHQPCChFY8DA zOan@a-Qs^rhdtpFE!cKFfp)fgwI#IXOU#*veFcfzY%_*S|I|uVZR3}0DdJw0=jKJR z()1a2E0d$gXf>SC?L`+`snh?V~O+m#W z@Uwhj!bpp@mqfo&@Ai-DY#2r-t_l_gyv)QzTZ6{KpXgn@6xZT^$iBCF@#L^Zvs&2O zn>Am#`X9k>TX)0CaA5RofP8USgDx~zm0z@SJs>eaNr|^uqrM+~piC})a&j`1-T6)A ze(Zj2C`_RdXf#_j@&kCU<2^!n7$H_-yJ9z!N6!@NU7YMR(K8e z;0Ui~%a@vGHX)I*n2#v;i#(&#arlY}#mZ3yi*A=~UcLro+I6&RJZBLF8DO# zcxcvxpO!|}bj=T?YQf)h{dBB9H990Ay<)7UoD`c_xo`lW|sw- zuf?cm#@XDtZ)GbpH6LvIy>Xen^R`N&TGzhs99#1SpkgR)a~ly(f;F|Z!w3yeMX$Vu zC&_bzDxRG~x%cg5%jC$}d?x>q$Nr!6UgkgJ|8d~qKk2vshfDhZ@AqT>E?L~WttmTPdaDB*|2+XEYvokoDG~s;6L5X_j)Ta!QD> zqR%`e`^rWhs_-jA$>O20gwk)|btn!LN2Z|PLHSWjj9Nz#%&gnRMTZv$y({b&T>b7m zI^B1gHNhp;u@iFYgaVtGMbv|R{9n0N*@2iH_=G%}NP?{`%}<7lW9CIOF?@Bf?qo-N zoymtOXGW%y1#Gnj3?hbdo{r2fTnCuf7xLpkqe9R`BCzD>ZY&ZR} zaR_?{S0(i^b;j@&hnUe1DHs229!|LuB%fe_-IAsD(W-J+rQBnK_`FuHv-g{n8g0BL zu5(Bpzr7d=#L-}k&{pIKMHa=k^EP+oUiZmFQeV{WMp9f=a$ zUM;qtL&vIb2u#>sIq}v@drs-bsVkA}z0X4@FQ3&bT7*q5wKW31Gx92>{2j{67F30`7gR#PK1At4gY8TLh4M&SR&ON?@RW=ePc~= z2KpfWLZ2=~OO7juT<8cr5ECQ&-MXqx!M8o-!u;hQOs`)4IHMcJzQP01|EoS!pSDol z76*HVVXrqQd0Zd)G3r&X=0VP{jA`|en<=PtdxO}GV+2Ok0LPM|O|V(#UGsRv;cWi(KhcDQ`bym=$W&bZB8Uorq@ zM7p`W^{e^&N^M93DfCKpaO=G%A(>Sr>oxtXL7a2|KSkQn(_z1o0$|{%7Xkd0a5i5z z=3lirS}*Ue6;TbBQI*Qgrap1nMsb?Ye0=?V?<=~!y7$oQ^(n6vf!j+T4csrNKXJm zAmy`aH`OP^qJG*E9@EDIFd3fLFy`THO?(4oTMCIW6qG}#JiMIp6sYN7o z+jzD2W*g={S)K;mtWgs;xoNH2!(#L+{+p#9$29M?w-BB^diA~cm5%t~Vr7W&0~JFv zgQ4})V?M>*&!}<^v+{fV^2Rsv8Z=(z1f|$&xlr$-`fCnChM%D1D0lIHqKsJre}(Cr z6CuVIrKy~dJ8bdl8_s!76Lkmy1@5KdfMn(d7I-HLzMaS2YN~eFJ9P^%?!*F}# zykVFVwI03#i&#s%*J`k z@q9M^c!c*q65+%z0h`rQK!m%|5W(nN(QVUejFrE4@^|;|+h4VF#_Ajr4$rx~yLsYa z#ibLh%*nU3AUW(m%v0H7??3HKqJkMYkHVZA z`~V(miHVorX!Mrp|CYOQr+j}YsPfjyJ1@_sTO3Qw#HwuIJ{CU8T}cldD85ir{x!lN z`qvY@dhWf=t`iGqPGd8Wae!3yb{59y#Bu@k0`7S9b*KpJ=PL?<(QKK`Nbb1`)dUI} zZP>?K2j}XY=p?`f_z%ZYB&uV6*W}qC6~7Ev@@6|9z~Qn%PeVzd#t}lnx(kSCwfgs< z8gNc_HP%j7zVO}A>di@}3}?rC4GtcM!rpmLGQbJfmIfNH@Pp%xkAT(!8A^L`75V`E zFW*)?uMCaP=F&7K)}!Ms*`rFgg8AH$etxGvy`ZIrw%68raYnW~stxvLsrRGY{|ldq z&vilWa26 zE}|8d_z#2~rD?V5hd$g0P-Evx-{|wWNqrWD3wFnm7lpT2p4bh4i@+X+Ekwe$`E%kJ{Q-25mEk5#6jVt$U<4p5iCVsxJ}{yVtUsWE8|DgQ3VIZsK-%S zeY#7rra#^uxtB7PA#fCa@Us&eq|F>`Hq20`oghX6QzF+kyp6WlTPfWn?oL%}-E8xZ zW^vR~^yTd8o3vwjEr_b@mZGNPGr`KWKU5AQ-K)5`_C|*ntOhyG{p?g2`s#0ceKCTI zdEf%74y;!$*po3z#DJLO)Nf1h>5M)zzFTFN?C!zY;TxkN?;U{LbaaIr6#hTKB+^)k|9qy)-#IauUhl*sudyJ%^fKelJbsY=0!$bT4L9+DZ;(d!2vEZQ zYOkOVZiz!sNiKtn=qWDp;|jQ-Ydks^m}=0B`h8FF-FjASe0Wq^Z<6EmLqXwy{WnMU zFv~afs^}m|T`2wsB8FLP0DP8mP1(FFGX`QB?-j3CrN*?EH6PyDv^RMo5b-KPzB}v} z9i)-}H7*UB#<$mM=@CYi(!+~^Xbk zXwglX2I6fvKVmYV)Z3lAwCB6)6}J=a$2F!POwIN_7Lc>_+hz;0G-0k*OXtKzc*0_W zGS>UEONBNh&D^Xz<0$-IS=sA^=I-at%zNWF{M`1OT*ECPXIT^fpVvfejyqWBFN9uZ zc`!y)Y*CD6C=bIH-q}h3y26^tn5JP4Mf_uVsCoVyYipP4jT4-t&R*%vK7&^!A{8w? zm|g@WP#B;NjA+xhz`=ox0mSELO!QO#dGRputELT2Ijguak57!s5+V*pX^Xgw*V|$~ zPvUI)>UoOAZX!Le+?0z9%KdC|{FV99D@ieQfGCE7(zfSquo=J>;f(Qw5SRdZAq`dq z=6t6z?@$umN$IvJdMWLfT;TTn14~T*o>|%P zs1kHrKKe00ry;A^6qQ=wtOo%Nj>uwE+x)JUHF_cY9)Ldssa>tMXMbpVF4~B7Hi^EQ zb&-%PuO5{zIP>GJ!WV+YW3L3}^FJRxZRQ&aaofQ%#HFZ!u{Fg0036DtU6hI6$gp4_ z%gWm-3VqS2A*Pp4@$H!SGQ` zqghcz)*khW@-wTq9F7X!K1;VedwMTlm-qgINyR=aK&e!G$c+<**fj+U+s%k46${N`_y)!6ae{T!iqlGAxz{o?24@tJm z_O|%7J(H@D;JGg+tHK6wT1+n6o}(|a!k~4^Otfl$0X%ybk|KlM&~PUfc3o*$(?0ag z**)opPMdI6p3j3^Iz3@B-v&hG-h+1a8XCK_~SI<)-n6d zGx3@pCbyO$1kH)%NSYwkG`$s|>5ZanaiQZ~rW2LlF5{!GbygF*RK80ZhaA(YfB5Y9 zp%B=aN-G2o&EUk6>KS6+5LBtwz)Pj=WfgV9y9O5KB?c!TiP}qgrY-6&hadMvX(Q_X z+d|oIUW(M#%gR}H@{QPb)gN8tLH;$%@V@Oc5|4z|tIP7TYK{5M@fx-#6v93qW1>~j zIOsVjE$~37X#*&rHI_2rsW2aU&p(;b*6)_-?Cx>*zVV1T!MSCw8^XgZ^|_k~zOwv) zBoUeC@3qx|i(mJ2CaWeR41H>kr{T`@m7ZUG^*~Y~35R6|H?Y>0f8;uvGuZElX94y7 zeZ#jzmh#r7>qDV|squ07zHGW{wsb-^)D5s)TTa-KG;Vj2k?boOR#=S;;kqzGujhW( z@4|yqu0r27T_o*o@PbWdM9)P4%23#s{9&4~lhllrdG{~*XZ@VsCwRE=i**d{~mdn5=4gI#7?H4^vUTGx&J-Lb&4ELE=QDbvZdOp@M8!$(B@#7zT=Go5Y zA-_;ir1!sKzgH&&A$RZfSsr_IamRgn^GayrMHg5OP;il@y%}tS9xQ(bsr2}E6G&=9 z$Nk|3qcA57J7qvWk1_nC@s@c0y_$KUz44UF?m~KD=xHhXJ2J+Q9`}cC(3(v&;Nm^z z+>vhdgQkWr?)3_jKV4HlDjoM?t?tGd<^?RS5LD{ZRBy(QOQ;@Lh0o5E|Fv;^kX-cR z)DMwHs&&FjOZ^x6RqI!$?wa8FArHKJe6*MQMK-3iB`Pvo?nBq zBkE5^JzqEQEri}{=LKeH`B&tKnTs_vAwsL)-q0k?g5v3uH{HM*G<%waJj6UR-S_Gp z4<#w#${~MP**pj10!@WNSXe_!1j|q4OzgTPoAteXMbf`~v0orO`7GNF<5AVa>Md*` zTIc_-id3V)h0i*;Cg-vQ6LJZ2?minDL231 zaKR{wkGobJW~U{f(B^mgK=IzodzUYUUK)6W1mj-$F>=4)Q-9y9LN)*Gr#8=nk*OM2 z9y7%bD~5IjJrRRwk2zYLQfN*(9=D)6u8y&$OcNTm!sppFy-9L+suQ+%TI)~eeb!}Z zkNTB7FuU}?t5(DO3(w^aNZ@n9eUJ{b?9(e~AUk)FP#Fa+TPimzO;Ib(s5JI=tMRjn zOTs%A^#OLr|EL_u{R=tQ?7Jrlhprd}QY(Y&1c`5#Nt2Ckyk+!=zNd_YBLFt%Rs=M}Z6U)9y4Z%>`t zGiV%{d%(KjUh-XYg1>P7IIE6iFc!KH)!ML88LGv~lhdvqX>&VSz_-2k z9qO#DJM}uF1zQohanW0~hVY)La-RRrG1aR4+Nvh^oOAp6#T}~0SlP^HLLe-hw67FU zNwVmYjwmX0wIZW^Yn;98s>e4ZW-vK!nRcpMpI)F|Gf{gF%K(=5%vi`jCBs zEUeW#hK`|!qCNV>$PL7?rC7X}xiTWklI5`#Rqpw`Q*E_8pGRp;+QZB@(oq&M7f-xu z4bjRZ*U~>X3Q}cgiYZ;TGI6U{N_R5dr3`Lz$ZU+yER+p&=RuXrA8s6n2szxR6fVBE zVsPeBy``J9fxW8V0`~CC?OtbcdVG!xHBj)yhtr#j$3l<1?7o!LAjJ%VZ)iD9C$a-X zd(-p33o?WjGuWdo&fk_`xctgN zb#{S^D#;ROrCV^N>&Veh`afpJG3mR9jIfqsZvRQ zjwWR!gPXFo+&%kI@gSR0Fk<%hpRAq65yl`QrSons4PvVFq^3 zkFr_$Npes7v6(i{#Ff5y$2*9KMHBBUPTCRJ+hF`OC6eaBw}JO~IgwIv`{8#l{SdR? zwSyyKu71h)645ykv#-H898c)KJ&i}S!z0)p<4^_px+U`V@1)1gI6N$F>U{IKYB%q? zoRh<&@t^T`ez14K_ocPfLBS>j>h1h;vmGocn{i(Q{9|%r5luf?hWQ;vkfkLMlFDzNTJC4&%OW@l&e{EAHGx=5LVWlP> zrJO^3ISeOs3lI9I8Kn+LD`vnLvGtfV>_V}`sd2_R#`G0ft5`s0_YZ|gdL zM>VRjd`__*3=Y;Uw9}Md^R9OWhH1fiJxtcB zatB%zzXe6^UHJqIWpfr`}(M+ z8lFpkYxkS!oRw8^z>VKaq0i1rKhcP|2c(|kVIT#az7L9&DuAzgc=UgE&bl-n(vg;$sF-<|pMY8UewW_Le;ssf%bLm7%i!Js19!0pOl_%EJ$O>#i`dI_CL z+ulguf1?9c0K@{c`OvLssv;&){^5=aG_wAs+H^?o^O*Hmw^WJgjI&}xHw+=D42o|1 z0V2LpHrIdfI{MPxi4s7a$)pa=UN7%N3qC?b!iXlo50n6HRgAM0rnnI8I?`=VP$d$O zf25qHPFr{^ImAN)YYu7`XD;I4y&Jq>aI}M$VgW1LiYPhnxPzpLczS<;r*vES@=wPF zas}iXGgdGI*^$~uMgZbZIh)>TV_*q&A#O$&x&?aGiREq&PgB|W^9uW<114q-d~$fl zKpVcMa|20OUawe86!EVya)-C4dCjii3fea2WjZSR%QBsry|4bbxZvd$0-nA1jG?Bp z^lX@u{uTqeI+*8Q+@4Wd8veddj@YcM`iHjOW=^Gal-4=ff@ zu@&XbN>1_g*m#$kV_-nOECEQVX@qAyEM-x zCeKa!v2iClU}9wBb2G6pqB5J!A90h23KLHH=xpjsRt1d{FD~ctGZRFULp8tuhtVzm zEnVXaqH|91hc=X=VNIHdcBE>jF0RSNQVTb~PT#N(b}DKmYGlcM!o0!);6Ds4jCSS? z!9}*h!VVJB3vAscOxHfaKM!j1(L;rHY}NZ41Cv1jj9wlsA2FC8%;*-yu16AU%!f4XfH|l?rrk2S{#)HhmW2Xikn`}y2eXVbHq{O@ zzt}`?FiSEcf{ob0lxLd;V-RQ{+-bZBKEq0pp@Qi1$o7WO1gH#7Md`bi6KxsitAD_3 zFpG3^hg*#Om^l;V!3y-T+`%ZO2Sz!QND~8HnD_5$*<1$c%^iqX( zF6E0oOO}<6b8`z7XOoSlLkNx>oe*|VowhuvEb_rlMt1t8$@W`u>}FO}EX_ivKg1;^ zG~a5_4pUiz{x@@+-DonDTlj2>qoLG--^Sr_QF2a(+;7XgNdx)u3DeJ|NHLDnF(R66 zZ2jv-e5dtg!w|7eXTWc3iW-||X_B<2JK|U#s`3&SrE??64@IHnyh~|;0LQky8|X;_zWb_qJilF6 zj)OGgP!lJ(0#f|~!6KAMj}sySi0HT8-KLdSIV4^dbq!6d?X@Eg{mvF>WW_5++RE@e z!w8sLuS{rLOiRWrT@q80c-pM`fGlaY!@v^t;W+Ffqbf+Y1ruv&lKSrFnOfnDv_x-Z zx9|I9xHk3f9zx$kEobK4JqD*4G4=tajVk3|VK}6K+cG5*biPbq3rR~^mwr2T^=Hi# z&L-zbTC=#@B7!1?*f_a(ofuI;Ri&hn#kf7hR#Xg+8%n~fJD$eKhDuKMC0z-~yea=h z|2S|m79r}k+R723En939xvq?uH3={wm1&@LjdVI~(c}E1DCu$zN0X$s3$osy>}Hq| zDcCA7ly|PH)AoEFC7pU{ZJfa~cHWWtAj5F}tzfdX6Z)--Cg-*lt?iUn+=s+&8<9Vaact5CLgy9d!-aqs=i^9Z% z?Dy(7%-m;&(=IUZ-rC4qY&i!cD|sW7%RDQ)6@1)HxvAbYzWtg`BI<*8#+PYV63HY0 z*pz+%NW`86K-9r_bkZ5j(iTJfCKcp)KTQ9^H$h;MY?~X^!e|F0D`!xHFcy&CA%blt z+#7W+%4_SBce!uem$pi_($722zT6(k{?U!h z76M~MFVw-d*(e$yKNDSMyl_A~oe92xrSyP^+cBgqw!-(eO2KNeDe4K~L_oSP&gWZ4 zJ-Yc8G}=UDgw+&F}uKB#4=cUO!>JAP2qh0-c7SC{82o~y)^*gR8u>k`Ej-Z zX&kqcSO?~WtjdD3)@DE*3}SYzzt$cR!jB*?BjQDCH(tak2&^t{Z^$_Yf!6C-7p(7%aa^ z6wQh-Ddkgh^rWb7)^2cR%319t(&7|LJ=^Uk(%6fP3yFJ)c))X8*sP{q#| zpDk0kaH=%cWQRvzRkfYTUVXXP{8d^69YlhXu(OTmSOj4lMx)W0OX#h7MXTlu3k>UuvuC`GgrWzA;d z7Lw!3H4>s>#0on7gBv1?XaU1TFg4klKD4TKg?nukHx*tJ>sAn*Q(YtNxx|uYniTlu zzPfY_9YohqPv`NhQ{ZouKUdp9$gEq@l$&pPqWRJ&#yIk)VK5Z)_kqzggtn*-?bD-8$l0%mi{oed8!}iXC@FT-(~NP{ zMT!C`*?Cu>G+O!A7Y}JZtF|JSor_^XO^}3#HpV?I^6xXH4gy*DlDbv)tEf-FDlcnt zQB7yTNfER-s}n3kI&>!O^tWx=v=xN_xsmQ!#9ar$m4BEKjxgi=fYy43oJ?eJ7A+j^ zdZ{Twx}snC;L!1miS`Ktw_~?j_l>_7OY1@)d`uvC=Ys0mX=6X1(apO!$otz;4--Q( zE)|i@Xf?=Vsw~$D&oGSnDPoVmaJAb^t9a#BFdUB%W%QbXW=O>A42g;_{&$xSmRi+Z zSc_+-G(%7I9ZS2~tOoTLrA^Q?&n!F8kQNV>1nZ+L&Kc zW2R$Y+tPAzol~Fb?Zi3sLz>SA8%lPI#7W|KR*(2VX`v17`f#$8g=~1{uI?1rIR)f@uEV$PWZy{}tZIH_M~}ra-bTA(6`Dias!q zf&7)Ax(<9-)CWb;lSOKbbe!4l(A9@JBJ5RYC2)%U>;H{hgz+N$>slMzHon_NA1RZ`$z11WT>qQI4x-cLFDm9iXXm#jOL*? ziK|(29nF6QhYo5iQd39=a?)Zxn zFZK$m8|qlHpC^RrJlEN1KT1xqV_z1j1NaFa{SE=z^PUabo#>rm`=Tk$L zUFE>UwOA&hGlf`8RvXra`FCV2DZp9-9rJ(pS8aNHaeuV^Xhx^aP&4MOROpln#ff@} z(LWF7SQkEG1sZRX*1$04Io#R1<4=dOP%MWhD)mkc@fjA z8SbkblJ!-ovZ3}`qL+`RI%wx4A=0vx!Jg1ydhQj)T1*yk_O(+G_BaHDqXt`QPac6pK{bp{)(;JR%{vMkovKZBH-b*97 zyPQY;3ASGX!oFJwZU&MZL6ISz9+tz#qLfBx${xoWFIXGYuiCS-=2pjP4xiEF|8y0j zMz4gqa3QDM7EV`@&Z`sZ#(&w5>;?s(e%6O~BNd-+mYr%WQBpf6$imN!IuAH+Uv0lp zUB7&w8Zt&0(xaQo{ei2<3TY3ao%{Zob^{|yUt?xZ98cn7^L^F8XZDnRt?MOl0s|-( zGTz6z(oh&59;bG{sh#@#+tPbJkNKv?(Blz74&l%9ke;kFe6yC_6}n21HvtR8++OKF zXs4_pUd;=m7ZP9<6VX736WS`*uiGji6{9(QORIO@!r)Y5gQnNgRcONv0@A32ZDa>S z<6i?EzgS`~fv1SdPjgImINB4ylPcIN{2XRfc4TBKFZpJYyz^#=_=ky~|3J*0naLP@ zWY>HcjMZTzhl0V=VXfP)y>gZc>rEDKADxpv+oj?61(jRDIK>!79Ma|o1~nwJc7)hU zy~O*Ht2L&lUHlCFm_I#|J}T)_V13jp$}=g02KXVNgnuC2Xn>W{1J}?8;_tn&W!dXo zMW~4bYgi?61NH?0|A1&u3)Bj)%(}Cuit`uxWbak@y*l1*o1jk|44r;$=g+=67=c~L zhn2x%{yg}xI~b$-6fXC~w5jUb$iiSz*5=%q*FhCARMXCb-uvZY_eMOV8Y=7c(nLOZ zN_*WPoa_Fs?;!c}=uNG$J`G7cFZ$;n_w*$ zj76hcTpNx3J~9STM~PLHnNs7l>&bW6%@T%jkB?E2Nmi?@L1r|5r`8|-L1cO2_{XHE zpCpM7pI!NnUNA2m!5x|_bf_l)viiZ?`}6Q_Ypyd5z8=KR(0Jkf5L-!;GQSy|PlJ-E!`VI7zE#p_GHQr3g8e&1t2 zItWcbL}c^B762QznkHJ#K%fxfFd~<7Tfq&5cm`Gb|K@iH&E`!b_^cSegGWA zW(?ItgVS@GIB7!lU&|<^1aVmFo@lOST2YWp_pT252B%FLhEA;{Y^KdULC-!GL1;%JTKr>Pb zyr7jew9NDl1XWOFkS>`KqTD1L>_^ceRRF+L5ElDH^FBTPC%s}_X+P^GPHo?cuC|X% z+%xpeI!`Kmsmb(V5RW{uW{QFiE1<`1<_EI2CEgd^5<;(iwtNkI2+I(6gB2;W#6*H=8m_A~TWDqO zbWI%dnTq$e=H4UyFY)@(>*5b4S`UykemSyA`43@(hGC!Q*Va#upjC^MW`r2z$!gtf zq@2Xqz9-3mY(dX~Jw)TebV=#!J%#?Uof$`?6(zGq;29ekv-xMeKh9^G$GO)^UdGiO zDapC=6N0{h7HCoqK1We}CoZkWyni#cI&lp!i=k9Ymc1zFGhr6Jbj8K>hbjoZoaU*OI&a(pJFB~gTP<|W# zr}k@_hD+Gs@q6XbAJTI+LUh&;{XW_2b;Ko5g&&(QZ<}?MY9Uyq-`#ongO2SnaC`$* z`@X&BAs*alR|P@eM{qH;f%oN@Y95l9*C~pQksdacZLm5U+{W2wegl!9cI?S>?!a%e zvjyQL6B9c!RaN!M$-Po6*Y!jk9n~RBJ<DO|*De?TF8)WwVnTBc^mCIe*fR-?4sHIiY`rn#w*5t@N`ZQqY+gV-n?btC`;jtkgXj<)xh@p6<2&xMwC5}t`4XX*l;6C1YYD3kbAwGub36a%O$b(;dmja;9^S_XUc!m{$K4NRQ zFduIT-{7W-ThkE3c0vRMaXfcIXyK>ToG#1Pnbu1^97-4N1vs?%VSX)>tw0jjc|X^` z0mz;3vgCEZL_8{sZ`4H~WC&#=$)rs?e)|BoQ+-DgvrgS&({_b03Ve8{t;m)mj8iLE z4(#HiNn1*>SyRo3d&#MY7PKM~1CKO+M=8rhkvN~;e6KDK7jpKD*(Lva#*Odfk=AB{&$8)OD zYIdb;OAuWa&l@#RL)hX*^eLi-j6I~w-$mBEJIBh(q#$&4p-}4v+E8vknNmfCFvhWE zTkS=_w)MMNU}BWA^O1<*wcG9WrHwo~s(ZdcaGY5PPxsH8IFTeCp_Q0Xz>1MZ(;(sY0^fI0u41>@`{BwKT>JuZXtaybxSPo8*D`bEe z?L)VFk^TIe1joM3L05(qNWoK#tg#z40{-^hxCd8kzl=zDlQmfr(e%oTK3fSCj1DOs zG`J~eoi>*3U~SKNTT&?K*7^{W#+c4d!*i|XJMXL!h|7+)OVa5)><85gW^0P;yj1QL zbUE}Trb+>|t>Dx(C#^qW+z_HRwUGmV>#ooX>0k~X_<7d1>-(WkIlSFy%DKXqyJ&XQ zOg($N=KY5rU_{D5L4xr|-#wkr^0vFjUdjjo3}(N{vQahX1b^X z?eiVi?KAw-{WS_d`>Xr}S&GknSOf10Sf!YLo@^%+PFrse34a4W}boY5$>?aTe ze2v<%ZBEafPQ4I0eoARwpj=Th1l6<#QWWGQybY&C#+J~OzgCXycFWY&z1`dj?KpDI z_9k*SAtooqT6kU8lgdmf?5qb$C}?{m_pQRU482gRRbfXlN~$|X`be3;v-Ot^EwzwK z+X8EdOcac=4-Q5sy{^)iVz7va1VS*x@k&+OTP}9qc1TSTmV$x2z-bm#k;r|?@2?g% zFq6nhH2Kyr`0$rutr)NR^ca;M{ynS4MD6=CthldjuXs}q%RKnn%Rg{!6PE*xLyP>2 zR3sxxTM6BBlGH^Hse;jElCfOc3c<%Hy+uGL`Uy(fWsZ4bvr#%rhWK-%M6Y00``yXV zfqsWqv+|DON#$RCXTv4KLb*|!!C+2u6eUFIf{!dDudc4$7(Eqn{B>ipK6&7CRB>`& zdk92C7L3gdU}$_W#@$Pu+2nJE0DUE=(v>H6Kx>{Q94G>(FBW!Th1Q^aG8c16T_=AgRHEaaW2alR+mlH@$pPfevc1*4!xxgl9Bz3;M+lH z{I{L@CS!17eQ?RBX+=*D8d6G5p=IwyUY}6NHx3#6_`1fPd#F6~q5itabG_#y;HSMP z79@?u$VlxoEhM>C(XN+2N6+tmthPbGYrndTt8ePQ>1aLqv>;-muJcCV#1sHo*w_7X zP!sdqCx<%v0^zZhY{>+@QTuz{$@)3Bkg78^NvdNr4}?rjo{G7cLJme+ZO;v9`Yb+| z?L-MS8qlHERT106i`wg7@4omE`BCSmgwDc_<-{J@(k31V8r@B+PV;OWF_dFTP$#gy zpGstUaOjp6EBazK4?`N=aubl%i(zfO7?#aFuZd>*= zq_8C09kEi=mi`ZPNn^}3`Yt+rPMFBO^kQ-L&c2_qi>k2c1-JmMK~ZsYCMRmA&aX2; zNyMBJLclu2M#G#3Dd%*9uMg?YxbWh|o!YP6dkK<8O@iwHuUzt@#H_ehY@uQligOqx zP2p-s^ZNXnE%Z-^*N+quq;L8NT<_BwRn=L)szMT8XQFaXqPpjfejB41dh!&L*xNtF zezI;!VJ)Z^?{)F$V{(|?(Wi(_DjwHUIS{`J?NFixs;+Jy%QD$MzmQr= zcq)1$cEG`I@C`Q#k7`G7eqkIzeV^xPIU|r390Qhy8H~ z`LOQk1i%`(m=96`6?BW;CTMXGCiEz&nqjbKw^bM*!btKXIvXiZ5+!%-3FF`77@PzmJ02YJNMaX3wgt`YCd#P$ z*r65Q`#a4{I9>JQut|M+y3p_S+JMt-Cm>gsPd7z_oo~b#WSpd&rE1Nup|1QmUkMeU z4Bk&$3bLa5RW(Em57H%Ymmj5?jBfIGH-voeKD@BabP)dkW(Adh@7isVq!h+2-UQe> zYZ2i4$fvFdE&(m%|MNuRz}|l$)@}a*@xq-1i=Zh`bttWS?FUsKlaPwxOYt&|SsKrr z)W##7T|{)$SqNX4FuzF*C7NWsK@Z_Qhe?3+s&5W2E&8fN3ZjplCWPn|%95cMR`RmB zxG(oFmL9^1h2ORCz?@F=lwe1n&3&|1kF^BUFev3Ldi@tsw=KPA5q5NnLha~O6AJWA zQJS@iu0LB>AN3|r4;G*+qQSZelK?u>NWdH7hq)`_J`%_=lsvH+3i{^QjjGRcJt9JT zex$F-bFC2nbNMIjML0Oa)o0#9=zp8UIlpleA=riNEMB5s)9sa-Iq2|KH8lNuWyNdn z)uw00gn7YzEcG1C8N4@w2BT;_`oI{lWvd-)6xvg}uj|JXfi$(4yLS5YqrI5&4KgRY zE1g7dU~sPjF;27t@&tvq?Wx%Nr&b-GTu5hTwr5C9PL4ZR|Gg_Hd{9nafYj<7b~j zHW1_Cn}r`&>Vxa6^ZW!$;~O0<&dVg-la4!*?)3QBnG-+XncOj}B`e zLiKsM1g3{NVXMnB?OZ=AVxA_%_y_U;wEGO4 zIO|txfUMLAbF)AfckRmy4Dv*}#Jh<;u!owo(Jk;`DPnqS(+SE=;`ZuROSLKA`E*TN z3AdLSQxU&j-PUH%(iCRiO{@TBSIRXsARK9!LlifF3J;SI8^sIRICi{K>xNhB>RS7O-gv1G zPpU#`;x9`zZ0yia#bYuh{5zvV)I8< z*?Yxq&vt%gg>CVPsy;VFgWe?{L&jfhQ8w%kuSBrkxa82TmeM8Z34!V>Jbc1w0b;qi zp?&_448~(^{&+t!87$~k7m0`K?2Q)^6+$&nrFCN6g>PPrHDBV?=RVhQOd%8%z_9}6 zRq|m(%WN*2RdfaRW#j!PnIpQSV=k&!^8I{A&ToC()jRl#F##W@N2#7~N0e13bH^%n0l-e{?Miv9q397I8GJ-4@pAv7EpP!eedJ2ID2F9NmZ|V$?)W?Rr1Mi(Gcy& z?sg4M2ioHN4GC_c0&uwH zIzt%MgFXuS0Fm1AL`KnV`YBvm;Lfy-hY`_dTgxumzeQm)o*X{WiYOof#{UP~`qL zV`dq;(PxPYvv1aWGN8V5+JrBRcCSHOHKJ?xIo0M|;d&k^{3xpl5mgU40bz}+WZDPo z``0wIagf0Uwyo5BE0)T(ia59;x;scYIY^872O?}#G0q^u5!(hdC@_(G4g-cs!?ggxv3@~Sr|+adrZ6pFkNLxlBS<;J>l=y-@hxcs(1@Lr$-nZDSo8AjvBO>Hka$ul_upzbJTpHty-!};80Gy zV~oSS$C-~moqTb#`Lb^EoG`dh7gtCXj9*xKkC`Wob}SJOuF$l%2?a{pSAH+G_$#3* zzTUsm~^G z!I6qvkK+T=%w8vM5;Ewu!&UBzHX{pTy9V7pTw(>h+#UaiM#vr;_C10kKO5N`3l;EJ zr(Bu88-|zP@;#U!`^P)FSWL4U@>?1puODa%ov7D3vOOZN8L3DEEP0%DQA?O&4NBO6%z6~uZNF}JtlkY1Aw z&unY&`!(cg+bMm`xU1ceb#+1kO3d|2XDVowP~@NBYGOR_!|RVp#)?|KCB~9<71Ml9 zk0D2Fp5EF5W^QBXxi?e zCaeLkhxb5wvDq$vetK>lQj>J-{spnK&(f5Ue(3^pb@JD-^GtEb zQp#{xd7$o)y?yyFzv(L~6`0a`fIhCwzon@DFYucApOJq*cK5%)y8mkwV&=tp2)acF z>ogyYI3Yj|CQiE+c6iy&n=`4rF+gg-Z`mT}_9K6_D^_kSCt4mT5|Et;HX4NDQXFhf z+OHKAigEAgntHtRvA>Tr{c-gB@^X^*ATy9XK}=E^_b>o!w}XL@znf`rCCh;en7yauEGPlLTr+n>huq z)^^GFSro?ME3#Vmr@`{IPaE429v4q7ERTO1X!4+^0moo2#&-m_48?9y+!V+uW*TK1 zUz!gbW5Jt3p3vMfZcmH%DPF|*F^ zN%w3Vq3P`CzjcB5`~+vXqGrR&RB8ZtkQBajIZzaEa>jm&J_Yrh>Qw(uVpJSn;ivn zzULpi$R6XadCQ6U$s66K%L{fxhDfFSA5PHoyJC@2r<$&j)M)&PWZ8*znaYu1sNdqs z&uhircAw`eqx2U;XALy%6Q-_d>0ny3qih8!ks!qN(94wf8`X-ci+J9g9DR~+_lx`f z&3eZK{quZYOzz3oXP>T0TL;%d7>%Dk2l~*z6ckM0cHu(6o47q`k@bOJP}X9qE0%noRyNz`P1>AZaQJa$ zCMi7#b}sb}Of3bVUH&)T-aD$v=!+H%0@4MM-hx!6cNCD=Xd^Jdn}ym|B9teJmw zsR&=bd+)jD?6c3_G5Oef`6lm;CCLkF@*nfUU=S3p8@6u`+nMP3%QMOn>W3~lN!NGnFUz*-FsF`>*s->JK4+@8wD185E0xNlX zlWF${m}g z!1BUvsaUdYtK1~FME3lhYws0Q`Bv{Qzf&SU`D8RzbCX^wLE13Z@k>?9mo$dUT;iXHee-KugMZ1bcH5}OXA!OO zPv^Nva944}s;$pHTRr#ud~bwjO%>yQ}shL3UDfb1s3LTN#j$$tpF?(iZcI9^CJ?YK8VwG}* zursU;x{?=*8>HB)Pwn~fK&(MtSPR`S!W$7QDicwnHa`l1D}=J#sQniS-S4lsuRif+ zl!=qdPW%|39seQ)`3>yuY*7S_xt5UIfhntpvFo2*OS#OsRnU~UZIdV=7oEeNfZWrg{Kti`m3h$nBYO~VmQF|Yk@Jq}oj9?Ta2lp25Z2e~u!o%+XU z1HeHE_F#~);yI1S{ZA35f!opHdq`j3bXZT8uPUVJ$)j_YOw6CoeXdwM2t(mKDQ5{u z9Z(Q5T>+ucHXrK&N9;8Jg;Z1Pn4Sd@|yYhtH~5z0lKy(FJ%18l#LGKj?tQ)R_l- zoLB2jVmq|vn>@MNm&4ySBjK_nUcyi+e^#8#2y_khJ>hr*cr-?-UN4ukRmt9<==!y& zoRMHe@+lO3w3J`S36|bS@M>CXsP8~|`5MKc&t|_4)-1O9B29^qz5jANyI>M7EbrWm zKK3adat?B$9sSSqk0is{0f-I2be(5I&)jcekQ>{@W4+B=<%F3N;4%^^X9;}k%FN5g zqLpTOc&QIM3eM^7AW?)G0mH@nDf6#G@!%b{gOkDHV0Fuj_7i+~*c8TU06G2`9gy|= zYflJa8(ovTQGrh)7USbVk0-mRA?JJbl&4jhL~B)Yw?NlvcGkM%zh~97=p;`YAL)Zn zAMB@0k|^rDaLP8_Cn}e}e6c!1zBTy#rZ$hEH=6*xcJF(|3)nsElrT+T(3bF^0(`VdiVPJ0+8)zZO;#tv zzLV9z&r}mj6Ug;T0uIith`npE|J{r0e-*X+|Lgbv88i93*hQrDcS6WHf}s_=$p0!cc2jSA!9OsOwFI*<%>`5d)(xYNjk_6{XW0$PCx9P*KA5mBDJ{U zp$k6oG=+IyERL#gtC&pNs86{sZ!=gu@VjUVm~hv{(l{%SQk3hBUqdgGJC90D1N&9| zo8JY!e6w|L*EgN1C}i>=;(z1c@{*>>R>0#~xRnh#4aVx&aBN;;<{X|+++*bD?E0jK zgkF4*nUVMztHRm0BNEV${4qplfD{JVjsb{5SWNyRYV$k}&PNd#)^s%L(0Znn&G>V| zy2OWd)*#eR>O*qjW#fdQ)S6{3b$A2M|1m}VFMo!2XFLz#Cu9-hsD=Bmh@9hu7meN} zpVsY~E=g0VvsT4*IZrnP+NJO97+%J4p8I{iq7j7~LDylayk_8HK7ND-u;UQZUW@@s zxmBAQ4Io3hvs$qrv;8}4#WfBGMC z1FPsBoZ1Fj7IOqFv6jH!1{Jnn1}1Din^bt--*w?2BD_r zy>GM*_PDKYn;dqO4D?sP>sCg&0URj<2kqMCMW96Y%&f1EC~HXiJUlpv@fCKUe&WY` zsy4pz(CYlRCA}f0Esp;Q(>7>tzy?4K&RW+3h$FWPb!1j-y}3%^nY5n>(IxW+{+s@$ zQoG-x=-0#?yUHrqf1vNwT48=)VJ{J1*kJ+rI2nrSwcJE)xPhsrR8L+qs1X=%fV@_a z1mHWgQg&RaHn?rkl+-zy+S2))kL^Bx$jd}JS^9Io zDp3msu=>xXWQ{c>3{f35g!nmuW+>e;6uMDTHa;!L3A{j03RG;kyw{y4_ z>c>YXF#A4O3=rrzJIWwIF*~CN@H(0?{4p2DG=5VY6s009-e5S6D+m~`eWAV%*Iho4 z3Dbo~q43vXz>@gw&;n5WT3TdiWc>9dFHn5qU#s`ft5;*VIa-{7%Thc2R-2`sg`Ryr zavFxe9-LJDN9=S~H@O_&M*->#k7QWSi}7T_e}mR6g5s?F>t%b zKewi$t@Nzl4}~)E2#YV(eV!*x9{jxDSf60HW;nu|rnY`ZV+Jmv3eB z$F?ej+^MS~NMjDS8A>02>Jv%Yp?& z%bfE4Sbdsaj^C%IN08ks+7KY9_8yFo#Ng0EAzH(&a%;PXIwxui+DboGQCb~q@G(2I zmv<=hFNA2mlw083R)dX|PVpW`3Q&%Zf7X*;kiX->h{ibe&dUqTxPKjc@Z8SU>479y z!upA0C72!P?;~aUKz(eR7k;b9q`n&8XRDv1L3SZuXtXnq)QZ)N^X|eJ3qMYrst7A0 zff||N4>im$0bw?zR09aPq^LW_ORZl<`sYiYVcisNpTDEgevGZMGeOKZ^&KmO0@$9Y zg;+Qrcek1Sby8)KBVtOSM6=+hYBhPbEX7#A_12M<^@bq%7IG+;5#=d>a;ZqnOq;JR z!eUMGn{{6HIhhs0uO=+l*8J>$<8t@YP3|lkox9<}psL^p+s?C<)y}7enUM=tJQtYHNj&vhrb36r<0i3bV$qqsx(| z{&fkRC^xf|FcMc#ux+Hxy`~|i4S*>$wbRrxh)=3WSqqM!m1x|8zfr`KVdm_s*bvM{ zvH#K3>pEBGOeZ*SeEZoJ*ee-O0cDyJ@<*o1WW~yuADY!^?)O3JprnaLmVm>>fuE+( z+3PVWL731#o6xviP7Q1voQ-0Q@3a0^OVEGoS5%#xt5_7DXMADlzDV$Mcmx)2SRw4` zbez;h&?X`YDq8gc{)-!zK-0=P_~Q+)&yw`ypVcm812CcU0K5q&eH8?h1Lnq|+|ep9 zwmPz(#Q*2!Md{kK_iyZMOC7FT4n;znZSE}W2xlHv)0kHXS>2hr@kkkp5Gk${zAp4y zWoF%f*i*evV`9@o-fmuYJH}PFAWFWQSu+c4Y>_#_0db2YPu85tt+BBQeKr4nwU0Ii z%7|rDwVQX+o@Q#rCmQHM;%)kMsF0c=nv@v}M&B~o@3&dy;6s%A2cHfCaHkx^$S=A8 zKSIX9?ZwBBML3v354C2LDimbM2c8kCIH$4Oc?C22?d(=0Ps$F2IT9 ze5gF+L5lqLZ~CK4DaMNl1!>_D$A12N1QAIw`OSzrgT~*1$LWhw-=jVMaB$)r`W6M6 zVMTU<$ny^gYV)3)j}<LT{%zTOXGfAFnNceP*MEEUM$6F5rAqr!+j+sd4yni5+gy={ z;k$zu{y5Ws=`KfbC$>uL?I%*|?S}CRd+V{+g8}M;XDwgd<$qOu*Iy98@#XLx*MFmQ zU(!L8C@{4Jk8YfFsP3pt>kulU#RIN?5PSgoEf-4K0^{7&5|Y(!XXfi8IZ}StaGapB z4B}?8E%wdWWt+k=Io9~QT&g4J{9c+X8YB~k*MET(afZjE_u_87=bVJipos{O4jKpg zwm-9Og9~0+AeZxP{X+bZX>O`M)uZ59{2pVtQkN#n5vJaS1aGkeGyx(@{NDEMTzoXFOqBXnjA_csC{Bw=@Cs&ISi616?BalE zMTpweCaCQar>KSX>l2gO<$fP$=9K%2=MAUqBLo-h?0H$(omU>*)L#xn{RV#B*4ugeRwG3Nxw#k@1(xplQ5dv1o#@(wqk#bweEG;PX_ zf0qEGDYZajI~>ZCW6v3>FOpqAHqB>#p_-lQS#Q#9`+VB#)LmTIq3yCby)2Ez0|&$W zs~mbZVT%TiAMoM_n5xOZ%}~%KSDM|s8QVg)Ch&0dNp%(h<2Z(>zk!1$a}g& z0B13R^-gugCs7lK=p&!j_ne<#3t724F1c)|KLgu;A*P?@cPELX)CQUcu)P`qgm>kQ zrw1&><$Y`QnDm@BW$$fgronK`19AHfpW5c>4tZCbD4VeiGCxt5;5hjlw(#~Df1<); zV(tl&v*XzP@87W=ipzb;`*svw>PxieHUq6Umm3VqY4?Z;H4`3`e6pb?P8ho;IGxhS zvyTxh_%Tc7aq$hG1_|&C$Q&3vsqaIs+z+|6h>m^cLy`9NN=vyEJV4p&^-PJ+$K>O1unVB!?j9;2h|O=B$BE9hdQz+fn;i4yQgjCr2~(59Lad z@(Pl7*;$gE@6D`S0HMr_R8D|*0QjhtiS^VM#-vyfKlNA_OOgklpg(OxIh7cDN6aNbc5vM=VeMV)pbvFqbbopM2$tOGbtW; zrUW!%kR)ByDe7iC8KP9L4;`OZuIx}!cCi!0e1obY@E zl%A&ISXa6jdTh9sP4u<{%U0RQomiXCDiDu*9;Xo0)!PA2US|Hsg4Yi9T<0)+a?$!l zZ7%byN!ZSeK&|f83qSl+`jZasIm7*EgM8RZY@(A-9IDzs4Hx56cp^jl$RM7tojy*Aq^5SHIDYT;JI!|KmSt7)R}-u2sK)rv(tRcztlQ@$R-{=O8sEwj zm^j(_%p$v7y6L2p@}$eqSKIp`zI6sxJ?Fa5(2o}XS5t~sz}?)O#LIU;YxLmNCkipS z6q!`Gt;ag|kY-UXiswlOd2?xHMs7S`>u!oAJI3i3yX-1=&yr@38Kc!TB^BchF}YID0=VhbWZL-&Q2k(QjwsEO~lhX z{?bA=f%|Q;LqZVh$Gm2Xh5Aiiyfg9T5a1{yxdRJF9gW8{h`rsqeGdsSn;&NsLhvk( zmwc5Lg?~)&QYW}am8H4`@ zX!HO6@9F-I|BpARKMOCSD5-EhaEXDe2+(Whn=*0##-4tFnP6(TGHBjr|E51wp~eEq z@pc+MqsROh=F;jPu0S(C#WQpnpYo-0AoL9tzcLn5>Ni2!!FX{DDKkqSgoCoo>zAGY z${RxGZ8XLqc)jW75?kG0H=2i-exU1n+DcJj`bs26eECVMZDYLHo5_u@r??xRr&co z{e?XK+`Ca{1zHw`E=*HHOyGspaZ-5DIQT|eY5#&Klev#D=@;+u;4`vZb~amr$JUn+ z$ui9MxKBR}U3|+x3s9!;W)Y`aDt-7p&@43p_}(0E!_-m}W{dCrBXi8Vi6nZ=S13t3 zcVjw_HiF{Uvn2pxy6K`9m{RUhx5qxXhbqN3^{qM_|DNp3JkUGMzV%}H80Q{7I$5HN z`^8m=-19KVZwPR(@|l*Tu@>R~;SszU9l6nsH}TLoKYp9CHv_Vx6@}h>Qfc8Dw;OPh zkFJQEdDXy*tfE%I+%E(WT}ZNi1dEPFcBEu;eM?O(UrJMVfww@3*KnjyhFBVD1*||2 zHLY0|-%IYm*1$XL1)d7j{@E?^oS2AP{xD`M^)-A;6P_k!ka$UE?tNA1-RkqOFX%4^ z-Ht{}JsV96Kt*!{>ptKM?xy)9nui<^^MTpWjQX-+$)_hL6{euj~SASB>t)&~A} z21TCwMl-0H{{7nK5=x}~{h%wuBAcmbhq$AI*AuBWc8RNsNzE4GxdY&ZhgTzK`0ErG zH??tEi8Mir{_i7x9;aD@1RL!?h&{$ag01Pn$*rblX}~e&i>q<^8Wfk95VInZlg%$g z@WxN&{osS3kUwG5+zR&ur|nN?54{V!M9VqJEoGB;Gdhg%rV2z9!b5jxMh0UVcW6S$ zAu)+6k}rY(nixI7JMRx~s$l1l-(G)vXUZ4DSZh5gZ!qY`&41c_oj-ctXozy*6XI64 zG$;jw!(J??#;}E%x5^)abRoQ%);oWS&q5 zkNO@okg?Sy;+_Fr+MDz zCIVYej@YOlcEebS-Jf`Jov%+NIHp&Xts(jS%|a)H~@)zCj5Z?TOu&pRk8<+IgPK8R3AL8dSx)y^Nf6-HE>?A6}eXdgccM zB-9N2WIVNAUz|394>1`-O0Q+=N4PC)Z*1ku3=m>Y^+HbF`8ql%~#(a2O#+9KZm692Z_<-ik8DySD~uX+FmS`Oor z1{&dNb|HHBn`Sagi;TZVUHN)X&lJvdyEfcmK2D!2CnL(Ldt$#2)g}!tF(?xd4LqJr zzDmlv5pk1%iW3XcSX(jMEb<^UUCdLQl}hCeaC5q*_W9y0GgBW3n2bKuFBCz=&lLo1 zoJ^Weo+HSq{6vnM99 ze$jtnz4L)=K+@T!vCNVEJ#wT-9+GVzx)kfsg){PXsH&~y6!|Qs2U1nYEq6l`X|0q1 zU%(yB?SV#H!g0w)(k6EZ#d#dt@QHP38*ydR@(*Ez!Q0P;#8o|_SA7RX6sgW{T! zUg}E|u${RqT^<#-PM2Kse>Zv!2PtEWb4;YuP3{D#bY6~pRQBW>qjMKB4mo*H*O;*p zN^rha`O3`852twB=G{}5U?bOi8<+Y~;HcoQL8pD&(PFKNh@5kzn4+%IK*TqdnQL_x zyS&}`{+qYgNh^VMP#2R;|}k7S<>X+p+r(k59UP`KyW-mg`Ad$-YjL4mCxrJ6&b zxdLD6VH%&+!820z0+e#fs5^(3`pFN9kz8u6%?oUt4{o_)898xU@3W?EN%Qvtzf7si z66`;lvp;h^@4m?N69frHV`_`0{G~-l&V}0NL;gWID56ZYo}rq0(D&Ml!P z-kb?KNXn1);q5+n^;arjuE^DPXg^ubr z*=&YGvrcCl{Tq`?a#w#_A)7d_nuykj&)=_D;UwY+J?-W^$T=&TDv3835#UNJmoaUc zgZWImjXS>)EPOf2dMK=ItR~u_b>qoJSa~ePpZWS|P1)7D81ks@848^ZvZAY`E zpHi*y>>)p^|C{`i5z>8Ud+R0)n6{dHlgeNVAJ77tN#VIwJ_&l}xCYFm-PmnWnE@AF zr?bk-Cru=BK8&1_JS`FlA^$U6IoU^a#wSd^^n+pSyBeSBBv$!g&4=Rq&>OdA&g^bU zTI|F~1xyJGzdDUE+E)ul6;Nf-2fwt48Iv$w9bX)r{?yW_W#UC5j z-846_f&j*?6oN+9z>f!}E^J3CHXFDt>YiV2jkx(05!|A+!*WcsxrED4*5gFk2|;7Y z3TUlP&f6k{o!>-qnO#??bMUE-3(dc)3>0o3`U?LUcl(n|>KCOO;80KfJ`VxvCQ+LpUuSf{ z!@gnpJ)vx^yzVgkyTGFBD2c6=)t$W=`*_@k<0>QOa?rGKf4O?$zT>x!J}kyLJ|VX^ zwq!&q-5#|0TJ<%tH^_IT>j~k@%Na=ar{z;&H<+%#0r2<*sK)x*P~WynAz-Ik71r&H zVrzZ9lRdcYVhRu@E)TR%oq8;E-_Y?>S;IqqLBVDEWwR?Z)>Lw^hgC{o)@_n%k*zy( zQ)bZK1y_kXqBgUd-_3gjDnPn;zk+nuIvnkp%LR?3$qpMuG<)Co@ba4QeT*nByXFKx z-76^-{k}4$%0JvPiz8_W%@=s^t(BoN)Tl@7^k_58J=V5hOewF{EWhMuSw1hNI`C8{ z`~;x9$RT5Z5|U~cPIz0egLuk2=Z>PtlxYMj=OB$qo4NUqAxi%68yZ=vG8`IQ@ ztqtf%NfU_b;1^OVB(nIhrx;gB%yen(xLAt+x_oXamcTg$uo2$HT+Cm{qIR?% z=Vy}sV8@r=m|J632*g`iw|jawH%^aV6i7W!tQbRJNxQkUhWed~UqBz{Enw6F*cV1r z(59u&0CDxGML0JJ)z%6f)3aP`xhyJgH1Z)huCz4jWEcCb_-b`A2t9>KljKT3(b#l@ zwPRfHJsXn?(v$aX*&kuo7at(<&dJCN-%nw@aHr-&^m=SyIh3Rb?=H7D!moz6(*K@R zWNdA9ozS#2lyi-o?(|Qt)87#c=@AF&B<{i1paXs*Y1*g_3u!%03g|izdU}eq-Ob0f zE|i$}qcITId`rXXarKiJA)XWbbjys1hO-dlV+@e~hIyJt9ko=2c$18ZE;(y)rleyc z6CSP4h9?C`x98{}w{z{NZ$R4SS@9BD3y*+y<#JSrDm}c`Q@oaO={B+3nxQz2_YF^o zXv$f+Daq4mub&-sDo5!vkt%SYxH?8L7%t!6WV`KHZ8uxEq90$hEm^YSXzZ$2!jwt7 z+saYbL_UscGrmvq;LF=}Pbh2q;8?9{*)ZalENu5fML;;u_QTuCZ%@S^kI^5o|Em0oEhi&%GPgImyQn}rkr2(r1Ij&2<+ zC<0se@jVQ17h68O+2$3X_-XL5uB4jen}IIopBu~&h)pmPg}0P;y}A=lGt(yY$JVy41}9!*@>a*7Ez$R}GjZQ?>-eK8 zQq3zrErBs!Sn1X<`JS*t?}K6%(ZXegX>$BcipCh}L- zsLu?e^^-bf%AF11gNt^v!?R|l1|cHf81gr>h|aPvMr zouS(O?bS2u3C-M64?FLc0w?!p{WSrN$b9znNtJ6KmNErje@t%7VXaC5p6c&8JFp}G z6m^vccb^1fZn^OkqoA_&^vy!JT>*8hUM=)p%;KA#U)A?4n_H%O6yy)If^c<_rdeUh$UwLo&WviH}+x#f%6I$o<-w}yJ zGzCXgtw7qBS$K(CE*8&iXvzd13NL^(>!Kdxo9uxRlGoEs6OP&`GnpAf``54_muB|` zI)O)RE_d#|y2KC**@aDs-CG(YokeCZ&Iv?SX5Aqul{P=oi`n$if_C;s5GrOkNpbI2lYnyM`%B(w7=Ugf4A!eM_GK?D7*?4}^m+ro7VrA#nU&dIF}KYLhxEO& zb)etal(ZhHrOFLD3~eorX>4AW%E2r$aT{o;opE{UC#Cj9Jp2T6#Y-6A5{YPVMxd)I z;`W+Kiz6GB;?CD7>I{A5{;6!;+n8*fV$Rj^(3|gNX{TEWR zf{%*YmnS<4nZDUt2~t!n%+z?@MvG%$5$9aFY@_RT?

kQpNeNyX*5E)Vl(=<(rq-#vjd5I}6p1$(rH*6}1 zrwZyPKDp2Xcws~)k|jZsw+pv7GU%&$Kh1yT*66CDq&i z<;+hf(J|020ej);R0zN8$918t?0eCQneI|Bk)ktI|i?UPV)nb{V9OXFYfp^rjyY%7K?BrjY(gS+~jR znGy7`{f`&n%;f_Kh43Hb9}|3EBQY#h)3aXb3ooS}NjR)BSP! ziEY;wayjyC#e2W~Y^s+b{T=f_tujN`KXT#BO+GYJ4Qd)nV}+4Vk^bBnL%EHoI*ckC zPBxC%&97sK0#k=5_b-iyou2@qCf&_Lf%~$WG1Fcs4i$ysZ06+dSU|I{A)yKU?vDDs zYzAz<8H}5)cNI$lw>xsVQ461s0^%sL{uvmvp7_1KKyS&r!+pdvxLP+u5jFMBYsqZ_ z;>rD?JZMhvrRdtLJ`33BqnaE#eB}G90ae!(_QqAAt%bU(#OW(1SXFms^10NVK)FC; z86;n(_2&xcNiTu17h^QRFFi>tH^`^>>gmiyQM<8-vzgOLC!6D13@6MIzEp}Y(;q#p zSmj(w%b=*aQFxO&U?*`<7l1V+Y;XDr3V*yW>T)6S{m*CI)q)Qsi+gjvYLW}cI`b5V zF(fMqV9HBfUuy^}T}`c}TsAkQ%H>}>emN!3X@BmY5rP&1=R=Z%Z6p!ch_CT(5t`VD zMzudqO8Bn5p&F=reU01bw&a{-P`%pK7d;B9n~u|mQ;B$X( zf>Avv^?jX%pF?<~SyM0H4HB+CuZ^%?f@zVv?6j4t50L5DmPcej=qwsD(egV|ziM)!bT@dtp& zDuA01AFSSda}<9CmmWn+#Z`X%4a~mJ&X%6|!1{-Oc8_TTY3Dnf!IUF^1p7xyhy16g zVj^yCnt2ex`@+9 zB+Ux_hki}707`m5**)X{w!f4Ic#@Uy@dOkMj=u?=iX-Nd=E&CO1bJLkI6(WGwTPK6 zm;aEBZq)V7yyxarTKsih--0%Hp&CB)U#60fkHEpR-Xq6hr!u+@xZ=xS{z}!$-4gzN zA2_al-x2(FIAO3@DO5mVQD!{{afHW`41bt94H-~fDJ&+U=*Q7hXj9`r42$ofWsX1 zlaIy+A78IfZkHZng8p)oR56R-lz&S5d1k3soNEEL6B_6fzYj3920zin?7~aZ{r#Oq z4~Ng%ssG#^DuOmFec%S`H7ms}R{y2m>g`9a}sVy8S^ z?9l_xFp%U~U}v;`ak3zX?PlkhNP)W_GfE_EF+NvUJ#D8qGyO$)`56p_na(>{!^!$U z-d~^<9P_rzd)R0UG~z@qKJIek*izM)@U5z});^`Q#|Ziw3EwAnCXQDgD6GVB9XEzh zGXO;`_!~%MTSXDjQ?%1qHbr5>=qBgmwjcG3803tx3JEU_1}#!9G9;}QEev)f#pL2G zIWh2?q&|X&%qUGD(9<=he98Z$6mvjF!3<0=xee=krj0HxqyX>@I4QicL+RbZ={G6Cp!Jo!mo0mJ=xNgh&Z{-C9p~Mb8w5A z3wr0M#U&?`B{RE@>vi}y{6{){jV2EqjruYtZYFEL`muYh*XX$V*PBc)P;HlC2u^VDF6#?SHhO)d5p>=uW=j`^Id^0I9ER$DLU6vTqDyQG;YNCE>h zxq7^J@(MOE>elq*^7ZGBCNu7aj>{%9*vfMg1@CLCpCWMr`2*1S2!BUmBe2utNfryE z?-Been$>egC4oqfko3DhV@Dl+)TrLd{yK2JA`guNWXkYG)F5J;A`l4b_e6jtUxI_! z_J1Pc?4VxEi-0fgX(L#)Rfk+l$CEB!K-}w!q(o#>g(fqx3}azL&vEjIjAa5N)9hg?_?}x>niFbq= zh{|s8}bO({spw3XZ9SfN$H0NRiRc z`(Ab|3SQ#l<>HMtwPs3gyIzw6R#O`}ZsyGvK928ccv>f#129P9i1v6+&~|$kKs1l8 zu}8ew+6?(!w3BJtP;l$l#T~G=W{}enWU@*z{hu(%hkdG`q1jt6UNYI)*<_U9HZKRYX8BeAYps4wx~O4rry@WYa!Xr zOwPV|%g0ADI^Fb%mq~qXt-_PrGIQa*d}=dcOzV6_pmX;PeH`Wz2g8UBehbi%eL2$I zbK8;0JQBkeEk5p+aP!dkHt)lMDr34OS-l85bk*vn0e+R(LkcJjtZvVA?aD5Axw}@M zi7fn5)3B3r@n`hN*+heoW6#p=VOqAk=IQadi6JLT0I;W(Ejk(H7p(o|YYqK<;h9{P9R88rpB}(F zLy2HuFE{t(=fwNTZQa4A`E60( zp6Rz`ckAD7NmK_HxaKSdeY8|}`0DSZ7MJcd@RJpSY#ILVa1bmxtnZB0Tu|Dz8{gs{ z=`oKqjf?qs&iF%&X;b9}?1p!(Om^x(ykCJ}YjU#jy?oI8{k3Bujq@{G0o3(j62fmE5mgonV*GiKhb{n8oo8T#Rv&;B|vE``(*JD z(-E1GZJN;9GoC8?mAT)Kz(vBA9JcNiE_rg=#RzVxNfIPze}jpj7Sd2P{uzrX2G7Bg zzyM2jqP3Ei{rbe2zSU``t7;6%TMo&iXXSWh*{FA|e{EBEeQ@*HOTeM1vvC$5lo=KA zxy;XAoBo%@YC+BHKIU%r4^b8fk%x{r*tQ*u6d=vudpdG?Lxd&uQx{DQ8lv^JJE!Fp zloPePBx-!(6M7##c1nfEfk{JXG=Ju58(btrrzyF>oDFf((SGav$&Qjvlz)EJVfqei z{6|F;Qjaz|uSNp>T}E+2Zi15L1yjr65be-M!+;v(mF6X#Zl>J#Tu713fb=cZKYsBC zq>d4Sy4m@XAvH{Qh*_NL2?t8oANvKqi6*<-cM&XzF{Br*d{MSC!HzQ9gW2VGUmbg{ zAmI`|XWY$&emj2&ef}_a5#GUxnWV8iUTR_MauOogG!FR%_|<={@qk}%cK_y~=<{&7 zy)g^-0P02BPErAcK6&9)C8DV`*AoOvup>H(Yi zsWtpOb5?q!kwCgc@SA1!7klTvFQRi$VCa4as2T&!c{b38WIO%7P;&iV&I{qQxo?C#z<9;4~bE#g56P49EXW>~9 zLj-$&boJ^csH7QB$Iz507YUvrFcPX1mX)`rZhAg!x>8pvvRAZJ9FTDoi(G?rDt^>M zpu=)iU)f)%TB@6A`ZC{~D8LNDZ_>JzXVP5Cw;=W&tvN=jHf$G)cUNjk~2TH%(rkQSp$ zx}lX#a#cP;Jse_9A>+}MQ&2#CF4+5yltvE0xdsx7JwLg{PFkld-9{%kcwdsbl&<>- zbf~5;wHZjdLue_*6)PQGt*5A8L7Aa1J<@FLwy3;`rFWWDfH&qKvia68f6NNbvh}aw zetp|=jQ;tK@uP1j18DVX4<{>f7{#K&8HePYvm7jUC4bAq^BS^)xYFRt>8uxyYtmGx5 z|3Xf5P$OZ#-xSd}YJF(eD>}`2QN51u0g*Sk2<*~3npB%1rj@}?7)c7Y5x3YcgYaSU zILZjuJXXFuI}IQ7k6jKaG_G*+TIaO1x@=@9iXf100Op{(4@uf+6AGj>#`kADTZNk4 zx7=zI-ae%UTz_yz=Bn4cS^M2E>@#g)lfc12s`KO$Cxs8)W+jyIEr~IZ{GFrN-ASBJ zUfHj1OtBz{Y3PR!F9g{{lJ6E`>Vz`=Zq>n;e{UMzC!bR;mxgw6w6mO&tZZu&z4Tpg z(j1-%#hbwSodWZT8bf>Jo?Xt!+6$)F<5N>t4)fLkG!?KZAok7rdv0Cs<{+VH(XBg! znv8NT`f6t24L55Yt0A%k2Lub~hwzQ69-9{qE&nwjXfM~P)BW)))3(Wfm1jVmHvu>3 z|0jIoA^Ibyyib>kZRTu@KG{1MDZU$KNpZdF?EW@@iT#|M^{Z#jnKXaodE^wdCS$XW zP_;^YMoskgewxcVQxiK`=PP`^;_l0E(@zZagS&!^Q5B+cstHKQU9kN zf;jAH1=o;j$g|qftWG<(E>We`E=k`vy>p#qr0-p;{W2z7_r+?_WKkprD~fj8PJB%3 zh1KpI1n^Dr-|`rmTYqwRkm)*0HmLiP*S~6K+HmpyPad_#FBdOCXp!y}gIJQ>@uqT; zFHx0{UV+=Zyky!MyK2Jn7$Mm0pu!*p4|h5H1jWGqF4!~$kPf0C#R{ZQf!5^tI3 zErdqwzO7iaZ_dPS;DOn-j&OY*5aP@lI%Xmd~T?<&V<<__*dFu;O=twUKY&V|N zKNE{6cC@R?$zY)mVuJ0D`e>3sX~AHL7fnYK>U*Fk{g{!%k8ueS^1%V#Aq}eL z;vea(4rfh-!HoY20Fb1UHs9yxd$d z4w{CM=+7hsQIBxE9M^BSdac#W$kZ}t#4Y@$(8?7E;nla3ZL66ODqkL23%&3ZxOW57Ly-q7~g1D+QXx=ZN>Z{TOkd1*bZhr#_I4Y!uox>0I{mfg zX##`ACy!R6ojbs8tF<4y^2)nEff)@BnUB(;$6=@XXd%z zb7r3VyZ1TI9}F{)z-D(pyYKQUmgWjpR}J=Fb}?rAtK2&gCpfql&RioI&hYIH+oyge zX1O*&qYEMlaa3nGEs32vi)lclF!snMg4VU}sDDcX7jvn4jY9n%OL_e)T>NY}m%!W4 zAu*phN?3wXvCtaXZBSFBTTcQk#FNcVM@3Q6>j}2Tg@&>-dGsAUS)QB-WL71M!a}(=0WON;Cfa$o-e9xjOpHau0Rlaz_!+tg0lJt%$ zx>eq{Y@5|DPHBm(@lkE^iN=379ysG)EX>1=FfYGTS)0?dZ^zte&NLofO@7OjF(4MK zCBRFrU-ZosI|a?#4R2-rswoH;zzKJ5KhbxZXqJ1s`P61`u0}ydBfb2w{%DP)-1WYj zsipcoTIzQg3xVd;e>aAl=FjAR8)p13?DQW1Z!;kNY$+V>+$GEcm%%R?L@4K;Zln%P z2mmkwA@!@mB%ru5pr7E)ho!_=>U|1MyWGN#?G4*! zg9Ld&r3`WFiS;Ot7IK-(q@qkx`StbWCkAiXz5&%)@Zlt2?~&650O(MVSx2%5`$=GQ z_h5J$wUU$ZrhKf|-qWxB>7i-n2kV=NaFuD48~HEoa~Qyer@o^I7tN8Ij@!uFwqiI! z^2#ooTP_ElwR+L@=}`Hk*<6(LtUvRZ|5SBI63`Y) z!E;W8ToWx--C@Ej4gz*;$$mx#*6v}I&8nx-KY}idMOko_=3E+KIk@Z0L&f;DGZZyQDH#!-FghVza9ds!r<->3H^asQ(~zXXIuw{ zqyvgWKFUrRGwP@6#nP7+Y^;7Z;T&iDQi^iPl7lZ5g3ytXiNNNgiHu^<oG|$ zv0)Ov_{~7uwgho!)8a8+f&&c@J`h<6@fKds(`v{h~s$D(-iO z@eG+PkIQ$`km$4(&gx;EUR;q1V9@KOU-M|n-2zW#$1d?4H#wax7DZsJ`2!h?fQ>F7 zRZzGavWJqw?9eSXaElf&VDcIvfbqKwN_=}hi=f(2bx)Wg+l<~U#Ju(<${@*a0zsc= z>p=`4aPCSSxq_hP0$?T35*Wr6wqaBz4d+K?mfazq0Cf~=sG};KQa|TjeWm=kil}HD zH4lTeA%g8tXM?9D~${P z-uR3@tl=YrI%_z%*8Am%pu6`deu*MsNdes;=-LSNG;;bAIGo-`K@i|~?LG=2hvc)k zY*?cCjKz}MWOV~B*2}ITsQtGT?do^;eKYC_3VL%%*Ud`Zd2b8qcv2o{WRexp5fZQpcgAq0xhvtz%yKyjG#QbDDQOj$K!+xv_Tay4tA| z$1o6pQ9#e`u1t~Hi+cE$+CU$4@l-RaGu5W9`%A8m;ClX=hceggHEo4^k-Yr%nzVVz z*^e0y)L@^J$y_Mw4-*)0>>MXP?Kd@Sk?*|odgkimDdj+xl*@u0#Sb-v7h5OryWoY( znkZ~+5z!lK6KH{_`;g{+@*M|PK-JBlW5H*Ft-~gsf2|pRvLU)+JUvYCBQ}t{5Mxge z6BvFOLAV4%VPnPD&sDFMC%inT`9t_`JTbKDt}^*zVXCk<=mq@3tVlYp%8hJWysJ5E zcj;_47mqd8zY=P7Pz}}{ZoP*7!bUv%4O3VjfroW;rzjrZ@oQ@Pbj;waU+xvHA11uI z#&)Y3I*4M#3}(+21==55re87puYVreQPRlX_&LJU<`zl`g3}c`$Hy4Gk*q(3J)3*J zu0_bj@L7bP&!>2)%Y6o%f&NTw!ZQU3V`RI7;bI+f zfp@1MmriVdJh@V%032#Jwm$j4Eqqt@y;c0fM7WcoSZzc0#!l71B~BjaJf+Sb^ayLX zJt*V$0zLx%mktiuiDB!v$1d>ZYG}?>kGf8dr~lfmJ2BzOf@P$A#%Z4*pK48eP)0p* zU%{4<5;fiL9Hlo z*`0Ojq)d^!Y_+-9-lXARJaTDRq2-u<3L5ebkRkjYuqosEc(+&_t^Eu41#~Kn0=em4dZmq z2aw};Q;mNFCSsOrVKqIO`L`C2;d-B|YnLkohf6(wSY7{%O(z4UYQUY_bnYqnE~Us{ zHhDI=JBOJNPI+7F>X)P4i?Br(S*pHjZeljL;d4Dj(^A~zS}pX;gIIp?JuPV9U&w8v zQy!9ZPTSv(zkeKVP-Jc6xw%$rb}Dgk(W8X8Qjc7IK|MF_kDA^v8uM7w{W#(udV6x+ zY*(uP!Y7-~h4a?J#swjjK%zPlin}X3lijTv7&Cda)Tw!nq?cg0J+W z1%v2&NuRtdLa(CRQ4vTkga$Y+q&1b?f#7^P_LCwTi~BA5g<^4;-PM?L{M)MQnXZnk zeZ3;D){ZZ8K&a*JhaTV*8VoACHHCB=lGh$zH*mylM3+}wBJB06kI&oCd6OI!JQzu~ z2ZW_${iLjcB#(`Q%qubgw zUt3~prpX^%pJ(PRsHRFk$vb>ZTh8m$vz^iyQggR1@?Sk!{F@OAZ(5RT?tcgMErroQw1kr`V+g| zgBtnnggO52X?+=2AHIG+@FI@wGcOY=y{3(E$(A3CUXm~F&*imdbI=iKeWGzmTWNxUQ!mw~t;Q zZxiOOL_UJf&Gl-Y$4|w0)$vI09%S=Yjem=gCR^hQW1)~F?=XXy$bI2N0|NkS&Tad+(MZ4rMjukg^*q$77&_P*$C|!sp{9U zNqu&uJE63`m(w$^&nPAk(T(^U0J1~ZFvJaNcQlA0=G-9JRA%?QrFQ&M+f#B}$H1>n zq+={5a?mqat&j1e#W?gXY^11z6$xx&JG|+M+4{$&>uk7<8zXg}#0yM*|B*LM8+XV~ zWs)F?Eh4@m8SLtDsl9+Yk zb2qG-;kEJ>iF7^a{=KGAg%br<{&8|#6@_Z8KtJS?rFg2$XNzkN4*l(pk{m!r0lgkW zSjvHifOWnW-$~`WCO-C4lV_~6>xsjs88?0jQs3~0Bae-OPz)YLWj4fp8SeOP3iyh0 zB|mJ%a*nn8mUnX2B{p4NpE>t>Jw1(tI+sQcS|;w2+1Ux#iLtBbIMA)1`UBzj^b)O& zc8{k>Z4a2PKA5Qd&DAQ|CM>D3^?i+XkJCZyY#-wy24{*syfO@;5W|~bCJV{|uun{n z{JJs|1S*O5|3Gw^z=IK9hwRS?il=s1no-kbK!nzQ4!Snvhas`Jp^h%!Vcc9tUReUs z9J$cf1gt5#2X>@WJheX#``obgm+^e@%~#}>J3f<|;-)}DQTTsrg8oNRZiYpZIR>pM zXiYl!-BGGKSsHI!jbdH!&{@c-`;2*1i)n!K>^!_*d+o_PO*^F;F5Pyirr(lf3zoA> zR)&rtOx9Lk3dE9hP9~LKHhH;rO7D&f?cPtI2F$riFv5GsRP4e-Ly^m#_JgomOY7-* z+b;3m&u6o~Tn#Kl{8ZKLZHyy&6ZXN!2L^YmQxW-;T!3vPNp0Wk?W)#hrRL)dIHt62 zmS4S{K{U%R=o)SrLghRxNWNb^rk|Nh z_FZfd#d>cwku>03O?;W1Y!FvfuD3LIg9VNmwpD!9`CRagDdZVZf#%l+nLUEwGKq9T z&3s!j#3u+l;hp0uf$k1gLdL^EM_=YulCmCO_-bv!@)?NY?`@9%EDQaw`WO8xuZLw| zaON^xyALSI60HrUF8iS&w;ErebrPHB9$U@92Q>4|ewYO9Zqws3jfp>TW!yMD*D`CnA5E062_ zVJJ&xz}QF~x`PGrMU2!?d!e-wu?S|e7b}(`+;zV)ySQ$6pwX+$Ve7@<*)OB)-a!k! zgW*Y5c4@GQNO>KfGsd*#Df;iyUjofvhf{iVr$izo-#u1O zkQ{um6%{|g5b#mw%ep^A5Jf( zi>w_65Sn(|xlYy37kwZ3t_ms{VABKjqyuoB^145QB(X{_f<&ga(PZ{<0vtnmgK_d? z?x3ElS85fXa!9lMy5il!`E|B5U*`L$R*Gm+cIW9yj9{JY;jJIY=_y9)iwX$Ri1Y9- z016e(i*%GZ>3T}Ob7k;Y;(|d<3#0HI_k$jcICQxNx#v2oM@f8&q^8>5Vf+NfblK~C zBPCjsr(ccxVOI4qWRp-uP#lyVVz!3tK_1@P0auPaEDA^$ME77UG0T~-Jv$T&W=H-H zq$qxtoK6H4l#4lFvbNlD80rha!ruWHnqZ*3UWAXQcC&)mN#6-Qfcj5{B}Wziv{)|N z0}K-1|J7j`0A@&_*HQx_2HVeX=ho;I<6<>pte%oa^g?fL+#2_z@Eg^R?q~be zR#nBRh(wDs3*5c&?(S(l8WSzZ$#6`TG4UddoNv{RIsjOJBhv$l!X3u;CER_NvmYrj z2np#v0Wnqg%8)BR0`gDqNNu2q=e~B0BL-SUlT4*Q1a%yqLi!s;MXXYf?2vp%@>C{} z#+}!|Yg1eHcPUpbr-m>2eLMvvCB zW9s}Xs0-7&11weF9jw~9+K`;v}xpI*OzY%+QA^9+xK+#BCGYMe~#F~o2n zP(hi6U&S@!ZkOWwvUK7_^H|EHj(@l&5Ux7Hh(-VHb<$f9bx2z9)A+y+wc+TaoF-y; z|4MzXdv@@d#qHy%i*|3FPe_)iZ18qJ{Y^dvI0_97xeUajW!FL(*L*isuXilYMA)4; zmweG5GVIWs6fWY=x70!5n%^B2$d#!?O+KQ}{sPmfZ#=ajG>To-my<5oE%DGVOw*+G zis4uvA-a$}t*7BnakoF=*_VM>*8^f`p*U_l-^bb6<#+Wpo|mY7DmAz3gJA2f=`w*>9M~%b7|NpCk9nY zOzq$?D><*?b>2?nGCLhzm+u!JL4YE9j%?suXD#|-n&5*hwg(-TM;U8##<$Li7psB z+y`4W%;lC~;%om7qGNZVA7(Jh*h)9dSN)iQBZglRMqr zqoQ8AkNL^Y=jml^s0w9=n2a@&TFu3fP@cGtvP z`7WQdmyw0-vX~u!q7=(-92+$j9gV~pfy<`nuT5G5;Ta_n>L|(>hOc{oJTo5MYIu6bdo5OqfSf(9hRAogJR1KI*? zr0iMCX+)c*SWVTq=%^=F^-Mv+&F2}VvK4rya-_&<$_V%RYLe79KwawX@nj%jG4Yyo zOV(nl-_nZfYh3w1vidj&3i$Mi+7|VvN@HrvfOF7C6EX#iz*|$u_0$lkn^zPx_LkU$ zHt03I^v}3ZZ|uQFiYnZolm~E9j+y;R2M=*65twSm^lCB?W#8YE>Hv|;7HSzCVSBm$EC$OL z-TR=yi>W`DM%f41W~&G509t3y5Q|i^U0YUU7EVyK z&bV7I^$a3bd;wzL#&}St*5WkYhHQ0p^XpOl)c5va|%IuGYt`|cb5lUy8je{yvCoOu>q4Kh2y!+`15D)T=#6?sZ)2wDRxX3sRG zTo}8dtOq3M8P{XZ4J4C<_b%}1HtFolglC@q;4=!=w2OG95 zuhjj_b?j==L7vOQ`a>{}K7HFF+bw3h473Nm1a_i)G5k0sqnT~lD1f1UGI!=cP*<5J zXa~Je$tNMa7;9svxv3=_2m5F9dVH8DWk!DO{2a3-UVz-ZH}y?;D;5L={&;6f0s;UG zM$l3RfUJZmnzyBPwaR{sbo7u_c)j)q@_0#`n;IqZmPPoVsR%_KJpd9EQ6LNJ2CY8h zI+-Rl?`pY|$J%jBS=3IQpEkn*zmF6jZ8yK0OK0W;fSf^}alwz#JDJg-SNF)wRg zKS=MP$%`uHPmjB3KO&^5|63q@$^xYlU;&fqsJ%#BJT#GfwbcDF5xpILLIHciJZrFj zu{yZ=JoAY#YiAYBmo&r}9AFbMleDQ_mL9^inzF=`M)z-w3f-u$p16_al;;wy>ug>m z*Eq=aO}Z(i5o>)OLJ`2B$(;6tE5xt)gUiYBSvDInTXl}+UKMqNbeQq;EZ3N}X|kn0 z(!M@-NA(R&7KIzvQvf{A)3U99b1V%6yv*Kj&!M$Ai4x$vgS z2Fr2uGpM^U@qFf1`wA+WdI8@XQ5!@RB1UMBd+n}#9JlKK;OowASidsy?JV!?HJ3;a z`8O|=Ua*Y_M*wk|i`ZeN^LaSYnVMv z$_%n%acwM$5{8EEzcsrH@`0Nmr9IJn4bO;m?4EBB;td~QH9c+&!BI2LHgMjiybC0%mB_vI{KR^&~I4%SeHDP zOp@r9>kobxq%^Eo8{#tO8WMchyNDU}QY}9G6Tx~Py*bxLCrm^1hQ2c?`zC$EFU z%ABmQ1lVsLXnL}3>p1Z|Ly+WfDM^X~sV7@_|3tUF zR7CbpUR%2lcL%R^A|kP@?!B59h#}Z`BT=oI+@l|+qg%Suo+C~6ax8CHXy%viFk*|y zrzN@@>XCe8g`w>2b$`o4xSNPfhkUT*# zJul{L4xLYW8j2d%5;V;{gcjThy97bXd*CrL+Bg+^0E>W~5XH$8y^f#zG=XS;uvkuOBJ5zio)uas7o?Cr}I*; za;U1RbV^<914igI{ ze&;w()2`At^wfmo4rAsV`7}}1c~un8H{PZx9MPR9KcDkRp-gAu)?T;}tnN5@VT&i) zb4KJ%uKNU-7WIZ66m2ULS^sOAEio1xU!98=Csv|7$9|fORmc{Z-+i4SCx%y}WBGi2 z1Cr{~kG=;)8GEP5YM>g+xv9*@7N;<1V}GlW->I%S>XOI{|IS_!$43X8zgU(&1a1Xt z98RFHmm~5!T>=l;gK|-_($wU{9Y$rWF#}P%?WuyTAWk>d9!rsAe_1=eomUrT z&?csLrgUxDVWsgVFzG;cEuO)l6BX)5)$Q{v+#7vAIMrhsl71W;9C@{FYMs=}*+W!Y z4s17$dD=;g2jlAe17QntHc34nRk`0HIW~ZO|AhC&Af5iYd9j!?UD&2=fes`%Gakt7 z0`aAaYb!9}AT}-hCA2%AHtV;dg4spuUJj2h&r^r^8%t~CAt!0Xk_^yB|9^h&hDw>` zrf@^6;z<{$dr3*OQe`Zh4}(OKj%&C9W2rCdY&c&mcc)ZY*>3bA<4CiF@6&jJy-iie ziipo~Pq=g&5}7ZkKYK!h)&qn7CCd-zAS&Wz??qS;6PGn;yDg(?uOF{e^t?2AQh0Hm9ndMjpPSLo_#c_;UZ)q%R z@6gzWg5w<8DICxD#1+b}E>#%xep33-wRl#?cb)5py(`B{3Bd;uwh_QlsN@~B6ICq{ z<1hBLb~y5gZPwoNhjShGg>uzXX_?oOmSh=IVD~I%=gjAH{!P@YEndK<=iTYkx|}Au z5TK;9d4E((SmB?|yq^?~SiD;+u=Oi)XYq0Ye zF6t@bAQsL&SZJ`)a{iZ*Rbj%w2mazLxwBe>A20bPc52oE@}>ld$4$7ZHEd-F6urBB zf{KY*-zKrPQcAUZh%ZUm`mT-(O$gr}IW3{gN(9);)ymK7OegD%Badhr1Lk}9bsBWN z*7IWQ&V;N7t7rX2(t*X{?lj_sYv6u$HX@^IyG1Cd+x1D4SEhTunSTvbE)z(bpK{cJ zaDrgzOe-Cnm)Ii$nWlqsC&6O- z1fL2Fn}_sVWDrz_D0@D7!?Hmulm`}PW2FmcwiS<~?aDn%G+8!hNWn^3)G3Ybx7D{k zzQwzC>2&`io=czGW0}K!65!h;S?UzH(V^Z${4g9cjx7Ah9a*4X=_ly8eIV!WHV9lF z^ou-$3?bEp>lhQapkBko3uaMpe%$DkDQ^4C5hBbpkl~i^ ztktdGX20zzk-yHLjbRmV>#PWsD@L}@4h(oGziCC`DLqKWM>sWd|%z--9ESMMc?(MylocqT0Ef@0FN?(uS~luFHcL??f%CoaW4S3rm%M#(2$~dV(ke zj>p1xniYV?l*JY$Tls$PJj!5f+ru$croFi#NL1Hx{_VB|FNA=;1rEDCWGwWUCJX$L zIiN|q1W6d+6<$268VYsgsOibeDUpN8>1l92Lz};Urs;r-*|H}|265j@HpdNjEHEq{ zu{T~VsI|&WtjXzf{-u;MU3q*PW)zl05AI_5l_h$jTJMo`E+2j|>d}hPIVZgkjhmBF zgU&8r)_@bSpSZC|O8ce(mW?h*7E4HI6~J+Y6%pMH^jx`yZ`H+pnOA#LbNt-{t@)Ug`@uv$^~YG{}3`33N0a95>4JO4Hvj;%8z=;dYb2duy#`#HRL~8`hAb#{+14i ztfLli_SBeNHjougg7R6C*#4 zuKJAoh{=Bl_i~cSKTtSr_uAUPQG{InZ|$9eo=CgI*2>IEJ<6W@>K_OM?n(h%_SZ{- zF&!8I+rbcQ8ShF4Xh)sNPE;lItfu)s9HUupVoCQq9$4A`0Gsg)TK-fDs{a-6GYqPy z6kbPlC*{!vp1O$%&@a66y>q&Rq4vVRTeXsfKTyh%E*8Lm#t3)LnCHtPozU5=VUlcL z{T}s8eWHl5{J^irJkDyrK#iiAV*8?`;&shsT@ z?5nn|6WeZ+T6q%tMF)cT{2!gN|L1qJEF*m0@^jA0NU~Xi^QBp93fQ>cn_MG3t|wU}Z^di$sH3mQ%r&=Q;PzKH3Lr=u z7_O<61vx?ip%|HO7KtC&RA3Vd67KdIC0cb^27DNtuvqAPtT&-M1dIOXo(S3nB^XgQ zkXX}w=e89knytD+`4-J-uS~b(ZDu|xWm%#9_9an5K3~-@cmcY8OW4m*rJY_BGOE8bOznth5g~)1-i>bWVsignZ_hJ+d#51?4 zB3fHHF!)QaJu8}e&rBPJFxl{)hxEMXdc6MT@~m81N>bg}($e&1fEiy#k&j?OPQIt~ z_^lO{)kKDGfdL|t#k^u~XkbHZ+Y+F5kpi|kB@w{{%JF=m$ZJc&>YU|d10p|&W;SAO zX`A4c!b&#lqXRB-qXIp}?(RP1iJI?}V5F^s1S|^RQ4!5{Y5iEifKDsJd++V-{W<#* zS_5L_Rg*ldP2g40~iU!>&sK-;Zp4GR}&9y$u z-*kGdf5D0-{vElD0NQz`7&qf&B)B1x$tUvR!g#+${+h3~gRgh;;+TVPRExEn#O7W+ z-nJS}514RMvMa^{K~n8K^FbKc_`ZA~rk~_LZ@TE6nK-9DX)_ndvRbJ7bfWcSOCz9x zzIRiu4sPH!RT>*ugIFwzJE`ypq|IWkV3rGotI%L!XTPk1Scz%L=o;4ltIiT3YS8M} zQ%`tHwtZUQQO58spD6{Jqda$i_fU7%gM@w`&&Pk|nA%52drZ=!uh9vFtDGxBLy1P% z9K&`b6PbT`+u_}Wf*|+p4HQwXL9@rU#f@v~MbiI)IQ)Uo!WD>_ zg-Cm1nlAWvHbptCy_tl>n?F{di&o7k_21V_56xFWg(2*42^bNnQu3O$LGG?pLwTow zJWk2Ol5IssytD6PP`~Xriy_hpL9fl;8$srBt0W-s$Pwz8`p-5#n4tr!#zJRwKsu9+RuB+Y&(CUU{ zO@8v#abz7?uR`0YKBLu7iR!KsO7}&*e^F)Xcml$Sw}(-Ma|MRG#r6!t1$9O~v-};7 zkaGb(!avPg&o@q-1tr+J#h(_^7iTra9r%t_7kLH=blk2hFV6!9W-y8^vAf*9xfglmW@ z7j%Nvx0LGeUcEQsI!WjK=Awu~yNXzFPTB7wP?iAumCQi&d{34phPrwBg>uQnS?^@H zg0En!#4-J~8UdQLC>kx=2^Rpb&nBb*gA>o~5JC+;vj-I^#QLILsnK6U_+E_Z*zyYw z$YTcSW{%X2Xf=LH7KdZ=m07AMrsNG?S9L?PYFjZ7cL1qCf_8E zuS`W44m6!_SfLGci@N=!e$ol6oYV_Ira@*u7`J@E2(d&9lGH-)s^`5e;$Ux~Mtt!w zQ?Ivu^Qn~qfhygH0k!3y*Mn@rUK@WO0^Yl;n15Bdf$|(Y>Tf9A-G4;@Xu8z-8cj<| z4{wUZg_Ie|-o04!Y{}!WGnH{MeM#f#)FT`?a#}##6hf?U#pe;WCoQdl#7xs}X8cgg zp6go?Po2-c=eg}^DDX12;bC=DLOqrOxvO<*Zfual0cginO2IPP-kTVN0c zk-~KnK7vEWmbgzgM6FJ3g%($)mk;}Sua9wUY@L}~tl@I4CA}#X3Aie!CIYNBY9nC_ zmcXrQYMczZ%=8rv4c;dzlZ%HQBTSmIZ;vs*-R$na3y}Z8u^PW@he+b!tWX5f{BZLK zLK3C=z+|#nn`1EaWV-n9Tv5n0;qcy)^+maqlOy+dX2v52>F>q-W&)4XcQB+1Wd94| z9JRGJnWEATOM=!&Y$@YKSAK7i-XlgaHATd&9wsbuIazqmMfmsSAFv}j1sZ%n0iK!4Y(j9x zIm~kI&2P&o?(+qtS|>N^f6{u$NU0O~&Dio(Z_5+xgpc_8Og!J*Dtg;FP;jTh)t{2b zkVps%m7XGz+kJ|ykM)sY-}Xk|s6A|2=I~x9y#PT+VmxK14Y9wsL*85cV7F#F8MJDU z&}(0=$NjohI)iKKZg`=7;F$;Ed{+!?nWaMKbRfvfi-JVZfTzDhNxW1zieT?9{=vtr zzg3dg%=aZ*iv5@B%$G{M{9{9&yU8Wpen*%t7*RT~$5Ydoq4%x(k$B7AtAZfEvd|N+ z^=Ey9uUsG9_$qdj`m*>fTUYWlih9CGqSKfIgw5O-SLQMsd)x7Rt8CUY3$3G`-j6jM zvvy1&M(30zV_&k+aw8%Li@=x+*9)@PB#g^0I1$=))wS`f-H6j9wlMEHG>1)RF{QP` zX2k9M#@5i9-sACP`Omg24=WK5FZ+=A|Mvuoo&pbKa-ZuGkjdllC7i|Yl=+g>LyKUe zXhKQzSX9eU-M&GZ@T>wUbu0C)nDe7q!{cirsi9u~n#$1rMG(NUB8D*^jC*9mk+?f8 z$}37S%bi8!b0d{?DqD376CMTL)q@4rmTlX|-^^#En%$X+ziQV-lL`eFt2=Ba1e{lO zF&!c3m|U?J({5bDgcgar*FJ5bO>UN@_bu@sGrb}4?fwgG`bYZ*;1f*;-Cs5)b2<~0 zLeNZb!7J{q+$YE0Z|+&TS%%cSsnPQcqQh17Sb4anEi_Ix#KHN&nm?tK`t=e+SK}?3 zm9dFN0k+aWAa-=y@=7rKp~!+h=%iT-{cu0J3Q|dNfakZDWu<~z1lvw+ce9OiH}&kE z<76Ety`58}awdJ_2RdyoLi?jn(-|JD#jR(-pO>Y?lKn|0-)%YZvK{@~#<6g5!pE$E z9oSiK8LxUJ(U;-x%@^frubQ0AmOc)l=7Un!bI@q(#1JL*o>4Cmg|88NWx#ouADr>K zOsOE>rgTov{hZdPBOuCqzJ3kv`l{_@vP1W#%}I^M-a6E9IgvX?|1BsQ2N+<9?Grv&K@8hm7q?j_2sXn0K=e{aSItN&A4z(bv_Mp} zW{2?wk<@n*dNscB~A z_pDiktV{2_5%Cjq1F2!Cdy$hB|DJ{ZM>hI@@cqI4Vbu$1|xKW0>XlnwKZ2T)kqga&`EphI#pNVta~5`q7XSeO~L=a#4;V`{!E0 zlOF|2iS&7YAa5^y0`-SU0IBMGNkcg|^@|<+{3TvFnruP3NrZJ!c~TSwy>E=H)L^W9 zzo{B)e6H^FXnz84s}%OO)^Dx5zrx*vBubyf9gGtTr@9*hUfqox+dQIgsZDvy>-=l0 z!qlZrxJS_E=PhsrK6HUrgAARjY~haLscSJbC00(!+inJAmHU7w)!HWF+X z#w!e4zCr?iRC9%&J;Vas+I;OBA15eK*87npC1dr0)Nz~`)7R^Tb^hcAD@(qZ*6 zWKP?00D6k|rk;k=jBUdWR}z)(dm>A0c2j%C)^ATW#9f%-3x0v?4Z7i-c`}gZNR(s= zH^vYZNlI{SVp@Lx+7Cb~96(Gb`d1SjworW5f-B5$yJlP0(xn4+k zoa=f;AG4=8X7+h6oPF1JNt89;LXIJ4mmSFJ`L=8{2GLx&$-gdMBXj(D*uT!EQd` z-lhApJ;t`_|O-lYM<%4lopZZlPxPxkbCZhrM%gaH+IE}7x6|%f9S;E^Ep#6~T zYE(%#Bsa#vm?kLR=5?i_jp_-%>=OQrfbjws(E$5F`Ri3R?EQA`cnnzlmtQcH#6aPr z>-#iE=_CfS9pwY{;t#}?7(yW~lFD5_{;taNV&T~NarKS#LiT5le;^jMl8;rL7s39V ztD!O}zNZcxB?zBXr!NFX>nrr1ue-kc7|507aqp6ydzg@rfmol^nQB3l+GToL1j_6i z9D_4jVs(SnHBrCjO5*cpyJE~#Ka76+U@WHKvsIt)WvuGlv8r^ntJ4gO581-Xsh2=F z;Hr*v6pn4a`fkaUi=9nL1zEM9ef`dUmt~~W6S?uLeH*!;-WUlwU+aYYOI_TQw$g?ol+;mnym!Zl7W7A5fa7iQvvWVV*>{gQElHmBId zXV5$jJjrNDwQAKMgM}qOBI#`kT_v_-`~8w*1BMIvjWqT*Jq2eMwI-igV!q7HLF{+^ zI4J>e=2U~$6L5_z(@VS9ZShyR`PurhisS<@N!Ow4=Mg9WKq{%AVKWx>mTkm)8id;0 z>v*YE*5Me8tMRdUqHl+5f%UN9=SG2}C)r&YoQdk2n)hzjex62z2A0A;!I=>lzDoMCh-Gr318e-mx!XW zZNjV;aLp;GOQJ3CFQ?o)vXL@M)$imtG<1YrU9sFLVZ&es?!o3m^(L}-g{aqyWOENq|-pK#}A#15U(a0;){&x zal;7*wCk?FGoq%9C(ds(^z+W1coo`alcD{Bsp^6E)DNl?z*M7XYrP|(eCiVTvNkz@ zDDGd4`k1jU^Wb`wTbBKGqXCYdsn?FWD!i8x)-vq1gva#a4)}9~JC1tL&dIgrGxNqm z*p$`_Gnt|v`_OIPVKAH-hHr)_yuuiVQ0skE{v*q6bA%XZIR1|7iOqs_cCtzzDZfju z+{K2F^9ge7%X^HSrT|$NwX(bQE)RaIxvdE=9NE1va<@)rd{$Wf!v2|P+F494lqd#k z$DD*4QwnzAMk`H?_F?7Bn`Znt0|!qj2qK$O)3q!!#FYp6T&Q4fzarY zHy=^9BQuwOw9q7%-^zxoM^Wzbz^n257G4f?0=U~x_tD0b1hC>oNOr}gcqJ&P-)-M5 z6mP96U1U|Adq7ed-;voJ9?1Az|IviO;Bs7Fu}L3GzThAU4f|~NSEKv(IQcXQM!etC z$Z^6O8T!t3HgQmd$+5Av`D3h@ooYiShilplJp_5X7F4l0|7))y;TU0#R&g>fu=S9A zjy&%7a!UPvk=xUO*;xa3#g)FAL;THKm#^s0**mEV$)y2U3#I7J?W>C91<;rWExYCS z0D60K3mhrgUy+CBnpb;Kb>3u-T|(HH7o4$(XlyxFesISwSA((hcK}-K)^P{-ObfBZ z<50OXm-OgX&plJw4Vwzc1y!E#4|=6!^>lvYX8dGBpttlbp{pL93x@LVY?Ta-Kl!+P zk@E<&B+=M!ilO@SBI$Uw%?|^|vn*_wB~X%Bh_0Hb^ohZASQ=)z0l8#Elj2nV((Y>UDqZwV!z3a8edF{B?hAG*VTkC8zeivCN}qJn5f% zKTAH*X6^N&m;Eb;{s}7_j--WOU7e$-@?)M0DODvun5eU0eEsBH>#?AURK0#3{TcPP zA~q3e-@Vl?AWV(nQZ%Ng4rg(9CJF^KAccN7UK+r+_8Q++NZBY}kk>LUrW0a?zz^}p zRHkG+X;rU_85NDHVv8tfjFy!joZ#z5AMn3gRZ%|S)z<0FMhP%e5vjU1he;8GHVfzRGw0*Qmc3H+RfN9o|0c5!t_E(wNLQ&} z8kJBLaIMk(VZNZ}g4#A$YO=%Bs!p|iK81Z0?-oh-tb4AEj|8~e1RN#yz=}Un{s9s7 zZeeDjXVi#gZY{}t-qXR~rO)x9z6eeSBD>{Gaw2jQT5&o(CHd`?*p(daK_e^uR4tGN z>CGUd~A08cuA%d$)l)CPL`1`s}%-&3RQm+~hZ#_GDTyXMb0z5VL;-UW!!%ZskzXcUjWl1`JEaJy}?GrjFp-m&tP zc$0~8nFATy^;6QB%2O`lPqx2CS~cen-?-cMt@YjHz6O6+qXsaZ(Xl^SROlwB&aKoS zSN;LuOo1H&Q*}x^-5tigiWOvn&}rgtFcK{sV>g;sJu#0M(Ur@bs`y&TkRzqno?}#B z)hq6SgV>q;gc6Z(WFcEJRbjGK0L~25p%^=zEa2I1dDzt=U{(Nf3L{gf^+E)a@W{lW4r%kh`eP> z0Zl+u9lCpX7JUN`F7NKZs$lq4pQ3J8f|PenV#7r>P5h?@U%u6`)_4jU;~^WfR4p9# z5YHC--V+A|HkawF!|I5RRo;`4OLbNV2@%I zVrA%Xx|2o!GUtQGjK9=#-8-Rf!!u2ts5-s&A!Dp*(~~Tl&wJXwk4pG`e8r z_=&>ZN6xXY3U3wo;J(R5?wOw><|x_+6XsyG4l~)tQqG^7kW-pZHVqeW8f!@gb zE|369?;xP?lOSCMLhllKM|u|sMLLoIhCs@19=|#J%sStmJ+r@AXP-4|*8Ig$y9r*>N)oa)S(A11928C?K_vi=n&$+Gz||fyFGR4ddY1v5t&ig#px%yY^)n z{<(3jJxWb&sxMxD76qdKu4=m`;6iN4*~5e>Oho8979FNAkzc`?;gH)cUnl9x_|e>E z==qBW!e6?76>XaQLo!^4{7H)=&^l&Ge;1j@RhzLUYxA{!C*}6IO1msXK`dS@UivZQ zms?RD>U->>H~LfEN<8-@_kA&^Q(uZ&0P~DG8_*mOtuHl{bK4Qv^sb9GIWFl*TX^LG zc8@ehPggma!STphYlZ~;AOxX(W>Z8ism$T=8R6A)ug^J8EF&vzzrHM`d`B?+Zmj(9 zn}xy2_q^}%&+@Rj->SP_|FcA^sQL(FEemG}aZB3?^nORWRk+hJK6cKT-CydzNB*C*>W;_o)r4=*7UWEOH=4cJO5szU^FW$z1LIt+jq5YfI_rEr(K;}^ zT0-;&^I?Im*#t8B8Dd6H6tTit#|!`Tx%Es#hHUQ{ZAUwyfs$(}HTp8Gd@{Lya?N~8 zpt6YA<-p}!X>ubP1>Sd4wr;$>M&Gmxr)d6ozm{RR9LN1yZ(k8z2ohf!M*`!MFFMQU z*?`WZphgR|SI)e*D?je>J-7BK)qbA5?;ph-xJo5V5rs9YP>~M&z}RFj?q16XI*LLU z_#l4oQ6>3j)4e&PJmb`LdS9A@CxqiyUzEo~20bXfi&#@dP>IlKDJt%uiLUfbtMvdY z1!%WZQ+Zut$hpS%=W97fSKaD*g)hF^*S@+ znq3Rh8N69vo_;pcmOKCHhynd|JtfhDVK65CjiV({dL$6Ggbuf5$F6rUPJ4q{l?M*_ z1-ZWN-*FG;wa<5p^^M9X9q<_O7hP4wnf$}ekcVJLlwLrzu@z3&#s>P~isK4eFNQOU zyBd+xkFbFW{iub@FA`ZzyZLgUc3TJS(h3dRwwtCcelXZxSLvAbLZXSl;8Z-9%KXwP(7PoF9zut!6a(b_4G; zj$Ej+NbX3K1!DI-6uh61^#gEHf?+fOz?BV?3rRK@vSyQ+M+pFTO$sDu<-oO09tYri zM*HI8ebNBYqE{ELL;!*D=O;f)ybsngAtC(nH$u{ipGRTB&FCdc_Y)7-pqCE(M{FDm zH^Z5VU8pbft!amA1rb%je=-2bicl0CwiP#bS7)qk!UFoYV;%cKFU!zVr#-uH z|5b-QHQ^Yvduf%(^>CF(4_y$Ig)JMUBz6AS7^Sh_%vPa)y$h(buMNJe^o|~>b6p61 zXu0GO0>zZT*fgkDWI>@bBgEi{^_|5U&wtFHe>bex?UY*VVboN=pe%m9 z9r(*O*?e-QNMbul*9ua;w4`6IwfH3#?c~6hG!j8`?oT_LL?l-rFrN@iX!{RK*rc-b z&a5j(bmOxHlyR0a4<>^b&(G>CCosZ?hA&5n zM2XPdn8B0?qFk}A_&96`!GAfXA6cR6 z#gt@S(jV00{*W%c*;1-#MQo@a{>I2$+h}@4bE?*%nM#PQG~r5E>-USL zgZookst|U|&c*7z-FEC({LK-!v5qK-Z_9&smBquOoVHRl9w)7vh}vtP#@xSZ@$BlE zxat~;>n6bbU<7HFj()S{#W3h8`2y?!x~V3GD0HupO@GdXi(m3`M{}3w_Luy|Sy)9l%R-N#un%mX=rBOL9 zpWDd<(u~_py*4-u1im%`dkbYiV^F}Zt_j|Jid>>4m~e*F&(S#;_idsIe@W}&6sOwr z{bRqYPf>MeZ=i3Hr>Yxjnrp)GefP5#mN*SoHO#j#`E)0-3Y>k zZc?z;Ma0x(6hK;REh`9icoHH8%>tJImERs_8yPOdp3>GZ)@tXK+<7|te48Ue9879R z&gQ>$x4IN+K9{KBN;&iLZ-_8UtkCb@IfxCecT~hdT|c2+9-?8O5;>hB)V~5$+R}@Y z)cr48SS;D@pRDlgXU9c{UjO+a(wx7X2SU*T+IP1R?yxv6OJ3y8Cia>v`fRtT$bc7F z$3nPJT<{le;gsX>Fx?)R185eRfZ@ZAAqb&wtQAqC-}ZqdfNx^+b#(T1P+={f z6?f$u-IMeGz83!wBk-j=np_WD?y3|yHy$9ONJ@yapYyiL#0N_z8lyxqc=T4ip0v{N zLS}N%@?dnBRx7dUHT?!DKS3-j4IB`#iJb@plt zN{9f`CE6>5K|p3aK?3H|Q^mKnwi;tz{%?+8=yv zO1EpyL7-kj5@geX0YsK+1s1jY2>agYg#lF^Q9gg+Lyy~t1~pL*9CMrR)hrMT7j2= z%&p)&$$)??;xoQ>N+k45aKP98aGE?$Tyo>;bGC7rVC(22G58-VInbe~Y*urztb`9h zX+*ybm)JWEjLs~&R7mqDT=EmE4OJ5kH4PWlU;|RmK^v&nT^3;Oi0fZ6iIF2|5+9Lx zGuzW@U6Gf&Cbv!M8O}15+blK~Z(TcAfg;X-#gDaQUBec34AD6YYGJi<1;2iBP22Qz z_GCS_Rol`2X?@51U2tE^SBN${)#M0F6h4Wpfwc|I@U=~z6vp1ps6l_BqYRF?S?dmd z|IQRY*kLNYlqY=9_BBx#ZnKHRnwb!!3Ol)3qABMB=|flAnLo(?e8hJjZEebbP9fRJ z?mc+$H4=qfAe3MyRHsKM3~P~@id%&;B3u!t(Cu-y)}mpk(W|?d+O&sm#77?&_exfT z->iAOP);+ge?l~38hLm#o%2^kr%bs1T^?(ze-jsZ_ur|*|2eQ0A-w?Fpzj*BfyRg^ zh$N#$t|#TFe-DKedV95@a1^T=E`TDNH0FGV**VFh~4f59@v*B$d) zTFUQ-AO??GZ(K3i86+AYTCX)?0-95xTY(cG2S!9_oEz&V7cN83Y0JG(5`0o!5&A4cSKvj3hKvc( z4|VDf&wj-uUEiDq^C zIikTqWo2Ww&%0<+US_G_R`omZ!;VTmY2I5RGFifq0{WfvJ{k=Zm zGjO4u>T?YbJF9-0)mr8faOn6p=qOK5fjF3_2pd&$W;ZM z9%baaVwuFx36w`)`Cl=;WFofjk6L?!9~q`70R4>G-;2AVxqi=V$Ra^E7OltJ^TI95 z>lWS1AI|m4J*G*#n8-0-CvDkvribn~HEe(@mLe7C3$8a4K32*U;T4HDOF9m?^deb6$|g{<~o}2Qc5eL)~AwazoiB8F~k`g!efF3ACGx< zV*v8Xd{v(|Gq2m+&)0vgp}7JrKat$Q_dCERkI<|ti)S+z&uR|NTA7HwVph95x@T2a zdocb)BF^WRt+m+oIcudl>fgv#C|2KNM!h`^kf_`4Nz^a*r?w`QqMG}G3B}zJ5eJx^ zsL*TD68PE*5(_~Zdwy@p@=fzuKbN6)XZf0vmp+Le$;%FxY~2A}z%Rp<%lg@2NV)Egv|C6 z$6ZIFk=3`i&9aTxp7Z*LExBEql_K?fIh}XVP=VZ9-(t98dGxqGe4ua`9Wk4uy%AeV$@E&p^?U}0YBaXPv-@|s~griL1vC-EQD zFUGGtCQm!;_x%z@46y&a1A*pdP#i$!hl1|EhSLF+El-fEkEQOygDD+C*t<|R$^;;h zNfOLa1m3asLD#$=E{eV${$h<;3kG&lB7R-NfurzaQSLP~6yzlT^d%5Dh$pR&!P$qR zbnm5NDcuA*VjfUgva|FS!sZ zY6wICf?A*Y=QjiEHD92h2EcX79racOUPcGZv>&CCRi+N)LG(xc4M~Z|!pPByz~&_A zCs2A)`42(9cR)+OmXue}4fN`bK@h6-q9R+3fC*2_gFmFvMv(ZGW9KL>>9CV~uu*1& z8bCYH$Ff4q=pA}*@LvJwM<1wF9opYw%Uec(?ZZBwRUPmFpV<$N?ZZsh2Qgx@vfI8-EEZN%&U4M7gEAuwGm6g||ypTvs??F#HC z3ZuQD5w_~sLi|wZUa9>D`89=xpn*JE;vFQssFi2J^7W-GEs_fPUP{h|G^5OgG=?kHff30)#rAU@Y_&`1ZWN0XFIR{9 z5>B-J^|y}`1t}6J5Ap7~l1B?{`-^egI;oh+L z*hqbMrk%G~sitzxrtAjrjN9As#5uzON2P{kSJcDPZrSLlG2OTlozVX+ZQYdSA-4gbEZ4ViH zaX!edI*iHm6xA;%b)Z`wNuQvGLGo{;h~ZyPCOEt(5t_GfvK+YWBej$i@$}5D?Ppj& zw8dnO4*w3^3a>3o1g(~mpUk=$_Q$0j?55>+H~X5TIj`jDM{m=vUPw&tyzVy>|I`juwx&BKQ^TcO8qy) zBOwfVmLfbLffGkXk)(YQ5eEz=MTV|^BUK;N7oh5PWn#*wv_)zDhL}OC*GU=3E=3}{ z>LU3dvKxPdB?*HIZ<=?}lJTwzA$fc4iY>krnNmAe z=s4taPpa^v&I)Y?*kdTz3q2^DCk3s7Aai*?LIbNd{5hO&mn_j^mJX92<6s3IwxcLiG&!K`R^h{>dqJz`_=I6Tr#2ONB8)p5LC8B3?xk^`{k)jtsRg zj2NGpwK!AVeO-~(tMv$1i7x}@QkZ;8pG6ut6XEnD#rCI8r3yQ{&sF730s?0O+Z8Mvdi1;r|I5_b|jt|5GY1uVQlZ(3DGQTV0-eCB6MYWzo>hegqnJ(eEArK>h?nJ;H=Dkh+y` zYLE=gmSuF&(y`gZUuO6@Sd)cH(HN{F&3M+m5v662qQ+BG=ym=ak7p^J3tSCo9iAyg zZ&|`b!?E#;;ZNO<+vMBF&jn2V1pZ@ll)DZqLMOY+NV1d*C8_Dck}+LN7RLyQV=2JQ zA$Q|)qSZ`WQzWKVo8L91w=E>uUlQn_sX>9d^s!-jEMmJh8gclki2B3{)e0jhn2;@1vq!v)|Pwbe`fVsUm;eRvA&dM)gkh!zF?>g)Pdz#74-&*m+%V!W-G&%Xv<(q zpU}Q5j1IbU8+$~_%tpcgmobR{D_F_@+3#fed-Pw2L{I~uyHa&@k{DLX(q$@3% zDEzJrfT^jjhrgPY!Tt>mRB9$v5-*T$V!x+Q)s2_Pjoq)-{W)_ub9XaU-^Xvoi@N^# zStsfNkUG4pnJj~MXfdQM3OaO7qQs_!x>V_h9Vw8nuv`v8CjdSk6VL^I5g1ftGt>M- zsYwm-&zgtlDj`FKTU2*eZS%yoCHAlIjY0h!@jgK60yV@!kaFD3FNXIBGGGTNXDY^i z!#Mpi@HfL3?^@f3$d#lh_gU~f4QgEwhrMU{kZ`3F5)^|=Uly-zGkLpkk?_%%cOZ*v z)I*gvDxRjorelcTUuf|K7Y@}jL=abN^(JQnjCczFiGZ%{uzbxi9Dt}!P#&E0uo^g$ zEBrZdM(esC(^emquIORzGNlryyaMe1j5-z+a0^I}LISEo@LZ;4LseNkkhYNvDlxOm zFtn$!k=bKf{WTx^f^GnKAq4r}L1KnF6q&m2?c%SIER%!b2#KxHD=R(QwO%XS*$x9mKB*E&OO+w>}urw)wVf#=D`iE!^DA+FG}lnWs(fp z@F~l6FvQEjXggK~olaP0^8Lw_fyBFgY`EHWoJFBK4zgzD)lT@e7JLm@Lv(iyTCNQ3 zlmLX45RaMPo^|QZb5T~3+aG#7$7yNT>;Ghb)pP#+)WI%MCs2Nzav9NT6ljjkHLNGH z11jgWnJN-ef-Z__|FQSXInQ`P>o%+mjnx+bl1I16Ns`~1EUc;#=Uvn+Ru7frEbyzS z+7FBgML;OKebo?(sqwwF#m(&c;EKk6gLp3;Rs-b2RM_?<-w-%6LDHBIfL%9?&>vAL zn!K_cbzPwaGJ0#?KDDR6(c%5RaUdOtSUw;_RsqVxO8J1_F$OF2J_cRPe|q)05IC>| z(GEk5KM1!D@E>sb@2(Yml^<_FL`-@gaCXVs5s7b^6UCD)q>4|Q%04Eoq5Zxd>`~}l zB9Uda?LOv^MpyC3Ux@0x( zSpyu3x|To!p+yQo_z8i>!>1Q3_reiQi@^&LSa;OZlr4OX=YrGJ1*)t$(=f&kyQh~p z_00H#gI1qj!z2uO5JaZBj7m z6G-iV*zCTgzZtpODw=p_5+c}iVoJBVx9jVv*aE8m(Lh@Q`jc^~pM{&rok_P=ZIOAa zP7n60Zw+p!&-T1OW5Aco(;_@|$OjCrkpwDS-z0rAx8M5hNq0%pww^m>iv{}nRq6KI zgaS?a62&M%Dgpfr`Y(+k*VKn}VV+ zTGtaXPn)3t!K_+Tfcwt}QmcZ2H2!kjpMJoJOkUkDiO!oRSxH0&(#_H85*?zASp{~q z-P8Z)G1NIjiMLlRy&mr$%jouF`unBVQ!7Vt8ep^q(RxWkH`i#{g99{7g0M$--|M(5 ztf%4I+=44@SgH_t*2ZO=W<1lc6O>)4fG>_A=^kjUbS)Aj6=Q+d_aj?PwT=aD*SeLI z(*{-z#YtEBIpeSQbiBCr*URfd88ECKVrK6;&LgVH7zavki*gu}!B8+WS6^oD+(KXT z^C^fRbvKW0>_LK30*y3kV-n2^#YD{x#&z!;tiV{h*ln!{IYu8|9>w}vSoHEhV->)j zD@8kOP#+(F2ep<80JBC>zntkNI>=vF&0hvS`Sa_*sy`u6Eh-dMWhvSN7UfF)Ut!VtRn5=Isf}t-V;Ry^0oUPn!yTi&%w2HO&`ZUfz7F&HH9zlBeg~xQ7%geKzzCAiw@q>!Q>GJvnqU2?bA@*4OY?#g zzVzh(|L&U&t8 zPn8S`te1M_H$SmL%+0wWXCV-;Q|ON{VE0sQ=ZIrmMuz>mQSd3CVDrfA>r2UWM~(76 zhz3SwvvGJCoihs|4T078L>Ywh5xR-jw$ZHe#h+rpvBu(NKOv90v1~0T-dQlsViug1wI{~Kx8a*-FoDbxAtS36V?ulHb!eR*FWK!JEjo)=>*&H6 zyi7AwOzTbA|2Rz?L>8hknSvLaaD_XT*jZ4qef*_REdT3p%27Phh_OC9>{WoARPgJw z>XrZ)q_qPPAGnv15zf#P8xG*2x_EpE0e-|3z65kJDu5B}8|Ovr0t|cKSVspJSg_Yh z;Tw0y5S5j==M08<5^n^*;{5fb{+P!9yY_cl#BKcL%MTp>;`Pb&5Gctnc4fcV5h5C| z!e`EUAMz#pG2|jGLUj>)M*@sgs8JYnR~8wzquES9Xc!d~B`#4$Xt!<=e!G=vy_{1e z%Ma7716p)D=5!nmfp^2v#qQ=9d2j|^tIo6;+e>u&;I?bp>+kUm?v{Lmd-uCWr_S_L zCjzUJa10523~JF5HekuwgqniR23VC3L!SYY(AQmg93{I=ip7<(f61meN`VetNI3)D zE7yJHe+Ri?U{&mZ*jUR&VxR5CxPSJ&h)oEu=J7IPzpbC@mY-|#xdLx3ku1#yzof*y z?!-YBBY3vIrhBU*E5zhybLWlWs7g;7qpOkCq|}qR*D@l(5Y*~$;Hfb!l|Q5bjh9&U zu8hH<2(}JE8JwuTKa;Ci=9?c2*KgHRvx&rdl5U_uvvR3d5CWWzz7``e>`>CHV5mOy zxyQ^=JG9E9#e>aH&9dC^cYw{+Jr&o);32o78)3^z+7dNdc{6mIo14$9Eo^}2uD}4#E=Y^Q4x!*LvfiFNT_G&C zK~mKFCKQG@OEyCSl*L((P?eP!^tsuMYhIE(9qAscC?N|DvII#Qz>PzI(adt_5#9_b zGluGo2pia$Li~BIv#1n97Ud$=d^IZ1yB<@>HPD}OP$u+_G-i=3ACJzv1tA@SO_@m& z1!S1T8OhF*u6+9nS3#z?uDsqoW@~N3myWmicxJ@J+ZA5incT`1)CB49b6nVZB-FuJ zzkRB*35|^8Rs~d|>efdbPjG@6=)6IHx1LK0O9_z}d z{p%}YQ-?Mf4{9J5TKl#NeTpDz9Ah|q=)}1&Im^Ki8F?-1=X;R*I1F^U&ORzlrc3nt|G7hC8L5cc-=W%N70?#ISP>OC2 zDMnc!rxncj%c*P9`^kaAR7+scs$5~XNV6kYC=d7$LICm1BYZ)e;D(FYk#~^D$q0Y< zymavUqLya5QlgoUlv1t~4^5-sS7-1aG=v~#O0Q*$k`uejXVm*3fR|G){ zMq;;nh>Pfu+sZmOoUwi{evsnFcYCbBKu-RQM|!_clAw8)*uYM z$qNfKRx9zUXClFb{}(Xk1L3EFbw}(N5~Vf}Rf}D)bAU#QjC3c7BXT#VxFRR9dByv) zKXhrz%6uNOuDP!e3()4!f8IJ;AOCTt3^nr*>WRvPh;E5ay69%1xtgyIevuk`mDhbA zwd}^ej}>2Z_w%;+eHukN9|Lv{&IW(CeB}3bc>OquE=VEVKB?9KpOjsK~_2t6Pc;)Temfhit)|B>w5WbgXLa& ze+8OovN?WSty(>Y=TRVa~A)YTh-T#MrYhE7>LsRRu_dhVoP5CHzk@6 zV$L!ucZ2xceT3Sy_~d;rXAJ)aaYKj?02F#m?V@D?}SlDzurEHExmp(JvEzH5OG7N1C-+DBD`y|*P%CNn z#%ZgL|L^)>Jg=K0a{NNac6jkfkIy!2NSQUU42{F@Q#6j)FRxkNl4O^xvRIg$CLd_L zvU@c%*X>>s|AF<(I>+$;p(n=wy%B>tO(?jT-Z9G@UWyCmK*>L?lcDNqO|s#ioR13& z|L{ERfm@YN8V+0D7ODcvDT0_c{13cO>d4F`WP41^!Iz;WV+os4!-Lr_Y6TOqJ?Tc& zH=n*QUpVxD4DK+)ga6@E|4pdt|I@Eg|2_7vw_frbEbjvKP^NSKG`xRAKibHJXoo-I zIAufKbmY$n2P6MN>>FdC?><>p)nA}5J+GPngT8+nimmHTgV5xP7%bi+$%5E1L}Hk# z@)IRgF3sDtHH>Dnss81Rc)C0Me)`(&?BBB^e|i0w?j@&*YVaN-yP@T%opPKY#n=+0 ztxPKFjhmHg4|QGSpCxAf!c%ON1Ii^AfJG+(bWCYKlCut(MK%I24XYwKwz)g&>Y`ML zB0=`4^J@_AmbdE#mL-7^L@mnTKVYLzNC8oD0m3=c`MCxadwD-?QxUc^$b*qHysb@A zJ#AR%cg6a8|C0>8XRsM?@=-bv6&~&LH#9BV78$*)P{J2-GuEgBaTR}s>or_TeKM0y zx6qsqkz&Y&c(GI6I=VepEpRD__hk0!j=~dH|Hlre)xxIiF4$(?`ipfFHt;Jyn*<8v z(A$)8H7(WA!zhwI7S*xfQ2feWMExs0zd@NsovgraIkoeXgW!(9(m6ySL(h~KGhOF3 zMWIOm`8@uH^v`)+_bH`gSlZ)825%eXAn4ZoGlLdQEegRFH0Y7xB zAQRzDE;K?TAlsQDK5FlkRQy}r3MD@?{L`xQQUm9`;T6$pNFZ2Mni`Pda}ZpAPa+O+ z{;+J-A%abtEeG~;nA?&E&Bmmy&`5FZG3T|QCZDu&|M*d@o+swIKXwMDspSkcQXT>I zvonZRfPgwIMrqL#N($j~&{M)6et8{L*H2oEs0}*$KB>|PD);xZX1)9PG?)8YUn9o} z9R_$tqTy6ws1(%|Qs<5-U#iuL?(k}*do6J6_o=r^mKpc zmp5Iu@*hx@v*bDp&I4^s;kYWPk3CX zN#FEFefjm38>@!Xj;&ImO8RCeqS=U6Yd~xJ1~W(iP7KCHPQD5u8Q=GAHTsVv+?RLb z$DP-_`5uU#ezY_kt=_%7t~VvOH10+0P#W^^1mZEv^ptgz z%{CDF)p#ZSgSNrYRWN8JWaJE9l%0yuN<_ft7KF%E@}Tpg1s{S7jeGmuDd$+6b*P=u z+@0+fJE!Xkky}zy=5(B$>F$GwQIY}NV~oHmQ|O^&tj+MpQZqkSD zP9*UZcoILAfW#0{RvX`}0Pk;RMU#u0lymz*L2=Keg5c4a;nsX}azuxtdhi=oqe#Zl z%i>8He=h4<_@6*V2w>SUCWiukf{{n_3$6e zK+gnp`J}&;!x}HoRVAyL_H4|0M>;rY0Q)MzYJ7Y(k($!|fKqmX&&3w%bG=gXIVYAj zoaS!0FYFcbH$)(trF}87S&jhGMOr6vewE@`KQ_JkVYr~575Bga^K#pF$9tasuUg(y z#xCkwE#393wiKg#O?e+Ezf{84n#D63D6};9(mCJBC(&y5#x;G~s;5_`u;!_%iPvrMYxfcYxOSqSt-(G{~{4D;LE_yq)vRi1W7Cc;gL>PQF zONQKpIvkn_z*}R#=lbVv`MxyrRVfm!8|URbeX)nTm@`RB%r`YeOxi>Osj4fST$brZ_i zBw0YPopoz=Dyxa-%Jk5Yi&pE=^Hb=U$^$0Zz8I5Z4)WEl&u?GEBY;G z^JVDy-9Y9~I?MQ^pV#{SgjpUj9X|AI2i#}vhUv#+8BmAYrd$`w1g3sa(D&d(OyN^N z+oRkJ1Pt&5q)1|sWQ5D&iH&>^d3gwkwRbm&HS^*oBc3d4fTONqn@?aGr?3M-v=Rf9FjJf4h*61b0T(tsXw$@Q%A-$`^bQn+{ zsbPdVhGx3)abpN~lh8Edmj$Xjyy1DGH#Y5xx=!74XT7-S(4-wD1P%IVReCry`^T&S zQ;M#D#RRH(6!{(9{f50<`7@!)`r-7vZFVBddmYuk?nc}uk8bm5j$3Z=V|6^4(x$)w zYhS(q|7i;g?mv3nFSJp~BwZpcr8`S{0`_2{nSG<*Thzm&q6`1B(pN&(FDNEI%KYJSQ!;Jbefu=9 zTSNH~J=807)uRqpi4$vjMkFb9gaWXgrD z5v-`h32i{gekz0ZZi#@3Z_fRMSfBusor?qR9C@ zBfj|a8~kHYM%pY~`>~8wIlq5A=6p8WA+l|6rW>Q*YAv|3=!ufh?srFu=0f6zmAlZtt`DW3_#+6@6kOfG|j1n_UEZLQov%fJz9|$G9kmwD>GnK;D_?!78e{SPU46j1$6|C(I~z zUqeNzAZN!ggH;FHyfmf~l^n?_-sdXZ@C~uI>YpjSNVXFj<=GR?@{hm>A7W}v&)>?Z|4D=LhqnvJnoUx;nWhT%a}Kv3o>=Q zrA0As0D1F^j+51kE)JUies6u-6~VoQwtg)@6JQu)(f0Hqo~Y6iH`7PlL-&=-E~RRA zfx`3)P^tW5r4owfKs<%b=CI5uW16$3vd)itpB`73RJCI+8C=la%^$vFv9RsQo zIUCzKBS-)9CLY%+4EdvZ1h}81{&52ZC+NO@B>J((^m-?6%GNBZ?W2H#nA;%!W^&&p z#CGb}P9aT-EwBvb!grDzRnc?SasVfsx!0bN^~nx3uZy3H5}A2fbjI4=U{u&7<#{|! za<4S3;SwvgJNYkiBGfSkI)lJm4#9qQj6b;dW}s<8A?NOAO>*zNtb)Hxu$+MbO^&oM zfArx{IN(#Ul6cEgObFIX%*t)JJCF~>ZpqTm=Eer!v0S?CqtNy3#=q`*B1e&iurTMX~XV2)x;@qKvWSnpU&)cKkja`j6 zx)8}A-rfACqrImRE{*Dmr6h>I_1C=p+===yJX*TAFZxlBIq=tpP+dQpgO2r>UBsv0 zBDxT>T-`$F0@X_w=Lm441LuO0Zyn`rH5R$OwU58F-tSBLVDM{Tf-DN4EmZ*>3vvX7 z&xJRCpf(*){GNNHIXa8EHGEr+tvr7R0s&Gg3ts=mptp>q3Zfy#@iM(+PSGJxw0)z4kL ze7N{2+)N=amaFl7e5XA;-;517B42tGXh86_^HCu#vTs@(>F3>rT~9_Vino$w%c89c zqa;E)l6t6RrD?4M-?FxHkPZRIuJvr-tyx&6H_m5NQ=j@sI@bW}-YxoF?bh^=&QH$` z*>y#3KWh922osN8CMhDr8rL1M4!?)q)pEv9WylY_b!VNo9-s=hFnuv+o*;7d4n0I@ zLX%@33e3~30g<7Oa5qbNtYpdvcRF2U*7bo?5J39tMP+a){W_OIM^ZL=4DBVF>;9LJ zR9MznR`%Wk^Qvv+99L)h0^3Nlh|v-k?w)2~J zXNfG{K6GrwytLkN*#rp7DiJ~y?3(w_4#_3?&}6cx2Y7eFLq@I(!fqA?w#E)4?>MRnmbCFYz)0e%`4vHRz>Bd!hw+C>xf{)J!hul(Zw;JwtxPjHh- z1%x+%u@_kWla+!MAgi#?naF#?*gt%QJ=7*B7hVIk`J+@N9ltrOv<@y#JP4P z#nBlHFEx?1t?j)vb@biElT41@S&pI4G>d{z!$sjD*et_C^{1PR3vCufs8iT(2RGeD zeB=A2cXSUHio}}3c()OP2p1?&Nr?zqJ;)!;n&mOIN8wAWdbkFt2~TSG=b&u@C!W`Z z*!@y4=_Fw!_Rc)@^5kcapIeA?ByZ~L0~gzk$96oHB2c1%UC$03rh{Zp&VVrY5YZbQ z2)JK$QALt9UEvt^cuGt7p`rhmEt{^7dDgqG{WW)rsn3Z< z-BIi5roKXz$$$_930x@9$fIbDyB84DurbC+<67|yW#`YXYn7aYhQpt0XFCzU_n_;>^6Xb%h`%e}gN>s?n^>2GA_Te1 zu&@x77=Vk7iL%bkKIjsSR^4ma8t{L1^_f_jSh5o}P7SORrwbqQeEX8gtVyfvQgyO6 zdq!63I(zlgmqem;f8ROpXNEixPJ{4Or-tuI%bFtnuwlgu<6eV$+`r^Y-RAAXog7Si zMXn|DSQ`MRFxdkbg~5|3MNnT6YC*9ET8w#Z$ZL3GT%}d7{H8Ik_%UsrYv}2!tf-J9 zJS2#@)exL-j?@wWhqx_a;;Ucf5+Vl97^@xr&eyNNbm9~*wzS`zE`S!QD>h5xcazMtCL+d06R zv0TU$RpGZyEo6;Ui+iAuM38)pb2v5 z4ivjzmN6avB>~2~N)_F5^5qB2_Mzf4qIgXAPbB_kMNohRA{uB=OkNgPky9*gA{XTSikiB z`dTv$nNZLZ{8<-p;u_<>8%8;|5ZgGxyZyfF9^Xj>eM%w5ot7GcBqqj?#HqKs^XX_~B zkMiG8B5Mu*1A6->;O_r_4ZxBAH)}%o_r!ngvHTw#a{rac8uhU!j)hbpqev8djy~kx zKC;6v6F_igB;udqP$6!@2frib@8su0E&=9w8Trs4E8LWq|HQx*i|l~zzb``X_*Vd1 zSUc3#rC=yC8k3Bu>Wayyd~epmD#j}|Wg^0!t~`u;zu$0k>)`e|!RGs$qC)=nEx}%E zz1SpO0v`da29^Yb3!1lmjhW+@hu!>vh|7a^gCxDTSPRCNQq%;5kKb~$BDtoCei=67 zf&Yu}5tDG{hhg4^;FVhE9INL0NQ3b;qncWw!|&>mw{4d-yd=<{ga{IaK;QZFN{u{FImIWy2>}FTxfbI(=h7Y zLN}4E6Zxn6HY$q}NniC^7o1tOB^G>P7wh{X9Q9kI8#h@%GLL=W@%D8@*lsRiXvxjS z04Jt|$kR$D5$WCrbyC z`Q>8W#O^0-i=GSimE`y%Y(fJa+53sx>y!`TXzp|^aK@J+A_nU+^tFG_v00|U!DZJj z#-rW91PT&P<)?2}gQa}+Wc=1BoNhiL5&0`l@CwgLG|ls?kA}ccHC2LDA zjxN$}Om$mwz27zFT;i<{6m0Sj^|RSmG&>Oh0AeNuEiFdFPYr8{`rW}i+!Pe449Cy6 z(uAdIOHFTVyehpGP9vxrSWWb%^t^W5^#t?xm+!V061v-e9X${@Y}v8qr;@MAJ9Sez z%(UZ_d8!bd^F4wM@fv&@StUebLwD3J(l#sj-Pt;7PZ}8Q%~@2o`SCzKr6GX6$8X@S z(EDIsX_kT5{11_IzpWo46EqxhNb1--jVnW}chXKABX&v^O9iH-SX;t+dVjqP*)pKc zAa(pXuKFpx2b<{U4PzQ6Zm-zco}Md2k-s23SYj4yZS&o*EEwM~ zMY$&BFAr~80%>`&$-U&uOyR3tw_o0fi9*4lIcTJ_>`0P^>rM$QkW| z{r)A#qz+#*GS@9hqVqaWO_#gcH0S`r;XeaPP@AyOZw~7z%dj0+< z*m3F5`JfFh*+<&=CnoOMzGITk76sOOYj-p-D<(wlmcNz!k6V1l?7jpbPJnRzgz#RYQNzA2UF}f`gi@4*!ujf_{VJD$==ttR=aJvX!3i? z?LyWyM@+5=HHE}cM^g@Gch zpmoiR2N+63RYe$&{kZ+``r-GTRXJAXNAh`q`%f>wT_!FVopmiB-dW~gBulr$xlZ5A9>Su(n3`!CI-)hkxl#`_G_s95@$v>6*OZE%#3kTSLW*HybDL zY*hK=TOty`92t6Lal@&@EOL(Q8`saU3H&Yc@8mtBEAzvz{}atHy61I!o=2|OmEBc4 znosli7ZuO@`W1K~$y4BNGCR&1g&&SA6KeE7=H1Tqxkj_QfOJ_U{C*4T7!34ap<_mBzbjtn@q3^rnx0oNwZ=EN4 z#g60dy%)b?zkWTGUA1gp`}u7g9l6Oy4L+LMlH3*tRYl7gj+pNR9+0!A{^RyX;creZ zshIzG$ByYy)u(5ydH3$or@VZVCpVv**LqmwE$;Ax^=jzJq;mOPz}wcfAB8P^bXNW0 z`L3<;j8S?EA7$HW{mRw4+4gkaKMUEliF1scmQuse`o!cbt@nC zcg|Y2_K`mK-)V2ZE?jva%+~BFTf(PL9SRed9;x$sAgWVV)3E-FDE>99BCrXZQJV7m zdj?4u){}RRDsj^Z16C04yQg#Z@ORKKv#Y^VIXf zEQYUH+9&>5|B1i2kMWP{NAaWg1tRWbMb)<}^<}PoTUL3AQ_aJ{_i4Do4x5QbYp(uQ zsk$BmE#|v=ZddXeoqtlpg&*U21_>66TvkD<^Vgnu~2Xa#2z$X2-m1Wx6~2 z6&sFOypQ*+k^B+(@P6Zd9vk})^ADAahgZ3`Juv(B>Ap=-%r4_CGbb+UQ&HEcQu=sn zFYpG~Pv?8*bL=ty`1oPo{tuInem5_xvhP^bC*7G7XYnpDd8!(l*)+=l*%OSX%q4$a zKL=V%1iYlS`J3BEwXKi(+kPF}y)rg?TW7k}tLtK$-8L?1atMgNb$h~1*^?4)<8y&~ zAL=rG?Ec3Y``fPK{A01DAGe2mJb#Qftcfc}$PFiES}#`^O<#k&9Y%WkVjhNbY2@ag ZQGLXvALd~=UOXE2#Kk?i;l%v^CIBZawCMl< literal 0 HcmV?d00001 diff --git a/openselfsup/__init__.py b/openselfsup/__init__.py new file mode 100644 index 00000000..1c4f7e8f --- /dev/null +++ b/openselfsup/__init__.py @@ -0,0 +1,3 @@ +from .version import __version__, short_version + +__all__ = ['__version__', 'short_version'] diff --git a/openselfsup/apis/__init__.py b/openselfsup/apis/__init__.py new file mode 100644 index 00000000..1d734787 --- /dev/null +++ b/openselfsup/apis/__init__.py @@ -0,0 +1 @@ +from .train import get_root_logger, set_random_seed, train_model diff --git a/openselfsup/apis/train.py b/openselfsup/apis/train.py new file mode 100644 index 00000000..eee1f0ca --- /dev/null +++ b/openselfsup/apis/train.py @@ -0,0 +1,275 @@ +import random +import re +from collections import OrderedDict + +import numpy as np +import torch +import torch.distributed as dist +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from mmcv.runner import DistSamplerSeedHook, Runner, obj_from_dict + +from openselfsup.datasets import build_dataloader +from openselfsup.hooks import build_hook, DistOptimizerHook +from openselfsup.utils import get_root_logger, optimizers, print_log + + +def set_random_seed(seed, deterministic=False): + """Set random seed. + + Args: + seed (int): Seed to be used. + deterministic (bool): Whether to set the deterministic option for + CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` + to True and `torch.backends.cudnn.benchmark` to False. + Default: False. + """ + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + if deterministic: + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def parse_losses(losses): + log_vars = OrderedDict() + for loss_name, loss_value in losses.items(): + if isinstance(loss_value, torch.Tensor): + log_vars[loss_name] = loss_value.mean() + elif isinstance(loss_value, list): + log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) + else: + raise TypeError( + '{} is not a tensor or list of tensors'.format(loss_name)) + + loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key) + + log_vars['loss'] = loss + for loss_name, loss_value in log_vars.items(): + # reduce loss when distributed training + if dist.is_available() and dist.is_initialized(): + loss_value = loss_value.data.clone() + dist.all_reduce(loss_value.div_(dist.get_world_size())) + log_vars[loss_name] = loss_value.item() + + return loss, log_vars + + +def batch_processor(model, data, train_mode): + """Process a data batch. + + This method is required as an argument of Runner, which defines how to + process a data batch and obtain proper outputs. The first 3 arguments of + batch_processor are fixed. + + Args: + model (nn.Module): A PyTorch model. + data (dict): The data batch in a dict. + train_mode (bool): Training mode or not. It may be useless for some + models. + + Returns: + dict: A dict containing losses and log vars. + """ + assert model.training, "Must be in training mode." + losses = model(**data) + loss, log_vars = parse_losses(losses) + + outputs = dict( + loss=loss, log_vars=log_vars, num_samples=len(data['img'].data)) + + return outputs + + +def train_model(model, + dataset, + cfg, + distributed=False, + timestamp=None, + meta=None): + logger = get_root_logger(cfg.log_level) + + # start training + if distributed: + _dist_train( + model, dataset, cfg, logger=logger, timestamp=timestamp, meta=meta) + else: + _non_dist_train( + model, dataset, cfg, logger=logger, timestamp=timestamp, meta=meta) + + +def build_optimizer(model, optimizer_cfg): + """Build optimizer from configs. + + Args: + model (:obj:`nn.Module`): The model with parameters to be optimized. + optimizer_cfg (dict): The config dict of the optimizer. + Positional fields are: + - type: class name of the optimizer. + - lr: base learning rate. + Optional fields are: + - any arguments of the corresponding optimizer type, e.g., + weight_decay, momentum, etc. + - paramwise_options: a dict with regular expression as keys + to match parameter names and a dict containing options as + values. Options include 6 fields: lr, lr_mult, momentum, + momentum_mult, weight_decay, weight_decay_mult. + + Returns: + torch.optim.Optimizer: The initialized optimizer. + + Example: + >>> model = torch.nn.modules.Conv1d(1, 1, 1) + >>> paramwise_options = { + >>> '(bn|gn)(\d+)?.(weight|bias)': dict(weight_decay_mult=0.1), + >>> '\Ahead.': dict(lr_mult=10, momentum=0)} + >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9, + >>> weight_decay=0.0001, + >>> paramwise_options=paramwise_options) + >>> optimizer = build_optimizer(model, optimizer_cfg) + """ + if hasattr(model, 'module'): + model = model.module + + optimizer_cfg = optimizer_cfg.copy() + paramwise_options = optimizer_cfg.pop('paramwise_options', None) + # if no paramwise option is specified, just use the global setting + if paramwise_options is None: + return obj_from_dict(optimizer_cfg, optimizers, + dict(params=model.parameters())) + else: + assert isinstance(paramwise_options, dict) + params = [] + for name, param in model.named_parameters(): + param_group = {'params': [param]} + if not param.requires_grad: + params.append(param_group) + continue + + for regexp, options in paramwise_options.items(): + if re.search(regexp, name): + for key, value in options.items(): + if key.endswith('_mult'): # is a multiplier + key = key[:-5] + assert key in optimizer_cfg, \ + "{} not in optimizer_cfg".format(key) + value = optimizer_cfg[key] * value + param_group[key] = value + if not dist.is_initialized() or dist.get_rank() == 0: + print_log('paramwise_options -- {}: {}={}'.format( + name, key, value)) + + # otherwise use the global settings + params.append(param_group) + + optimizer_cls = getattr(optimizers, optimizer_cfg.pop('type')) + return optimizer_cls(params, **optimizer_cfg) + + +def _dist_train(model, dataset, cfg, logger=None, timestamp=None, meta=None): + # prepare data loaders + dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] + data_loaders = [ + build_dataloader( + ds, + cfg.data.imgs_per_gpu, + cfg.data.workers_per_gpu, + dist=True, + shuffle=True, + replace=getattr(cfg.data, 'sampling_replace', False), + seed=cfg.seed, + drop_last=getattr(cfg.data, 'drop_last', False)) for ds in dataset + ] + # put model on gpus + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False) + + # build runner + optimizer = build_optimizer(model, cfg.optimizer) + runner = Runner( + model, + batch_processor, + optimizer, + cfg.work_dir, + logger=logger, + meta=meta) + # an ugly walkaround to make the .log and .log.json filenames the same + runner.timestamp = timestamp + + optimizer_config = DistOptimizerHook(**cfg.optimizer_config) + + # register hooks + runner.register_training_hooks(cfg.lr_config, optimizer_config, + cfg.checkpoint_config, cfg.log_config) + runner.register_hook(DistSamplerSeedHook()) + # register custom hooks + for hook in cfg.get('custom_hooks', ()): + if hook.type == 'DeepClusterHook': + common_params = dict(dist_mode=True, data_loaders=data_loaders) + else: + common_params = dict(dist_mode=True) + runner.register_hook(build_hook(hook, common_params)) + + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner.run(data_loaders, cfg.workflow, cfg.total_epochs) + + +def _non_dist_train(model, + dataset, + cfg, + validate=False, + logger=None, + timestamp=None, + meta=None): + + # prepare data loaders + dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] + data_loaders = [ + build_dataloader( + ds, + cfg.data.imgs_per_gpu, + cfg.data.workers_per_gpu, + cfg.gpus, + dist=False, + shuffle=True, + replace=getattr(cfg.data, 'sampling_replace', False), + seed=cfg.seed, + drop_last=getattr(cfg.data, 'drop_last', False)) for ds in dataset + ] + # put model on gpus + model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda() + + # build runner + optimizer = build_optimizer(model, cfg.optimizer) + runner = Runner( + model, + batch_processor, + optimizer, + cfg.work_dir, + logger=logger, + meta=meta) + # an ugly walkaround to make the .log and .log.json filenames the same + runner.timestamp = timestamp + optimizer_config = cfg.optimizer_config + runner.register_training_hooks(cfg.lr_config, optimizer_config, + cfg.checkpoint_config, cfg.log_config) + + # register custom hooks + for hook in cfg.get('custom_hooks', ()): + if hook.type == 'DeepClusterHook': + common_params = dict(dist_mode=False, data_loaders=data_loaders) + else: + common_params = dict(dist_mode=False) + runner.register_hook(build_hook(hook, common_params)) + + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner.run(data_loaders, cfg.workflow, cfg.total_epochs) diff --git a/openselfsup/datasets/__init__.py b/openselfsup/datasets/__init__.py new file mode 100644 index 00000000..11e81bbe --- /dev/null +++ b/openselfsup/datasets/__init__.py @@ -0,0 +1,12 @@ +from .builder import build_dataset +from .data_sources import * +from .pipelines import * +from .classification import ClassificationDataset +from .deepcluster import DeepClusterDataset +from .extraction import ExtractDataset +from .npid import NPIDDataset +from .rotation_pred import RotationPredDataset +from .contrastive import ContrastiveDataset +from .dataset_wrappers import ConcatDataset, RepeatDataset +from .loader import DistributedGroupSampler, GroupSampler, build_dataloader +from .registry import DATASETS diff --git a/openselfsup/datasets/base.py b/openselfsup/datasets/base.py new file mode 100644 index 00000000..02dc75e9 --- /dev/null +++ b/openselfsup/datasets/base.py @@ -0,0 +1,32 @@ +from abc import ABCMeta, abstractmethod + +import torch +from torch.utils.data import Dataset + +from openselfsup.utils import print_log, build_from_cfg + +from torchvision.transforms import Compose + +from .registry import DATASETS, PIPELINES +from .builder import build_datasource + + +class BaseDataset(Dataset, metaclass=ABCMeta): + """Base Dataset + """ + + def __init__(self, data_source, pipeline): + self.data_source = build_datasource(data_source) + pipeline = [build_from_cfg(p, PIPELINES) for p in pipeline] + self.pipeline = Compose(pipeline) + + def __len__(self): + return self.data_source.get_length() + + @abstractmethod + def __getitem__(self, idx): + pass + + @abstractmethod + def evaluate(self, scores, keyword, logger=None, **kwargs): + pass diff --git a/openselfsup/datasets/builder.py b/openselfsup/datasets/builder.py new file mode 100644 index 00000000..a7a40325 --- /dev/null +++ b/openselfsup/datasets/builder.py @@ -0,0 +1,43 @@ +import copy + +from openselfsup.utils import build_from_cfg +from .dataset_wrappers import ConcatDataset, RepeatDataset +from .registry import DATASETS, DATASOURCES + + +def _concat_dataset(cfg, default_args=None): + ann_files = cfg['ann_file'] + img_prefixes = cfg.get('img_prefix', None) + seg_prefixes = cfg.get('seg_prefix', None) + proposal_files = cfg.get('proposal_file', None) + + datasets = [] + num_dset = len(ann_files) + for i in range(num_dset): + data_cfg = copy.deepcopy(cfg) + data_cfg['ann_file'] = ann_files[i] + if isinstance(img_prefixes, (list, tuple)): + data_cfg['img_prefix'] = img_prefixes[i] + if isinstance(seg_prefixes, (list, tuple)): + data_cfg['seg_prefix'] = seg_prefixes[i] + if isinstance(proposal_files, (list, tuple)): + data_cfg['proposal_file'] = proposal_files[i] + datasets.append(build_dataset(data_cfg, default_args)) + + return ConcatDataset(datasets) + + +def build_dataset(cfg, default_args=None): + if isinstance(cfg, (list, tuple)): + dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) + elif cfg['type'] == 'RepeatDataset': + dataset = RepeatDataset( + build_dataset(cfg['dataset'], default_args), cfg['times']) + else: + dataset = build_from_cfg(cfg, DATASETS, default_args) + + return dataset + + +def build_datasource(cfg): + return build_from_cfg(cfg, DATASOURCES) diff --git a/openselfsup/datasets/classification.py b/openselfsup/datasets/classification.py new file mode 100644 index 00000000..584cd952 --- /dev/null +++ b/openselfsup/datasets/classification.py @@ -0,0 +1,43 @@ +import torch + +from openselfsup.utils import print_log + +from .registry import DATASETS +from .base import BaseDataset + + +@DATASETS.register_module +class ClassificationDataset(BaseDataset): + """Dataset for classification + """ + + def __init__(self, data_source, pipeline): + super(ClassificationDataset, self).__init__(data_source, pipeline) + + def __getitem__(self, idx): + img, target = self.data_source.get_sample(idx) + img = self.pipeline(img) + return dict(img=img, gt_label=target) + + def evaluate(self, scores, keyword, logger=None, topk=(1, 5)): + '''results: Tensor (NxC) + ''' + eval_res = {} + + target = torch.LongTensor(self.data_source.labels) + assert scores.size(0) == target.size(0), \ + "Inconsistent length for results and labels, {} vs {}".format( + scores.size(0), target.size(0)) + num = scores.size(0) + _, pred = scores.topk(max(topk), dim=1, largest=True, sorted=True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) # KxN + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0).item() + acc = correct_k * 100.0 / num + eval_res["{}_acc@{}".format(keyword, k)] = acc + if logger is not None and logger != 'silent': + print_log( + "{}_acc@{}: {:.03f}".format(keyword, k, acc), + logger=logger) + return eval_res diff --git a/openselfsup/datasets/contrastive.py b/openselfsup/datasets/contrastive.py new file mode 100644 index 00000000..bf42fef0 --- /dev/null +++ b/openselfsup/datasets/contrastive.py @@ -0,0 +1,23 @@ +import torch + +from .registry import DATASETS +from .base import BaseDataset + + +@DATASETS.register_module +class ContrastiveDataset(BaseDataset): + """Dataset for rotation prediction + """ + + def __init__(self, data_source, pipeline): + super(ContrastiveDataset, self).__init__(data_source, pipeline) + + def __getitem__(self, idx): + img, _ = self.data_source.get_sample(idx) + img1 = self.pipeline(img) + img2 = self.pipeline(img) + img_cat = torch.cat((img1.unsqueeze(0), img2.unsqueeze(0)), dim=0) + return dict(img=img_cat) + + def evaluate(self, scores, keyword, logger=None): + raise NotImplemented diff --git a/openselfsup/datasets/data_sources/__init__.py b/openselfsup/datasets/data_sources/__init__.py new file mode 100644 index 00000000..25a66682 --- /dev/null +++ b/openselfsup/datasets/data_sources/__init__.py @@ -0,0 +1,3 @@ +from .cifar import Cifar10, Cifar100 +from .image_list import ImageList +from .imagenet import ImageNet diff --git a/openselfsup/datasets/data_sources/cifar.py b/openselfsup/datasets/data_sources/cifar.py new file mode 100644 index 00000000..d04c49e5 --- /dev/null +++ b/openselfsup/datasets/data_sources/cifar.py @@ -0,0 +1,55 @@ +from PIL import Image + +from torchvision.datasets import CIFAR10, CIFAR100 + +from ..registry import DATASOURCES + + +@DATASOURCES.register_module +class Cifar10(object): + + CLASSES = [ + 'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', + 'horse', 'ship', 'truck' + ] + + def __init__(self, root, split): + assert split in ['train', 'test'] + try: + self.cifar = CIFAR10( + root=root, train=split == 'train', download=False) + except: + raise Exception("Please download CIFAR10 manually, \ + in case of downloading the dataset parallelly \ + that may corrupt the dataset.") + self.labels = self.cifar.targets + + def get_length(self): + return len(self.cifar) + + def get_sample(self, idx): + img = Image.fromarray(self.cifar.data[idx]) + target = self.labels[idx] # img: HWC, RGB + return img, target + + +@DATASOURCES.register_module +class Cifar100(object): + + CLASSES = None + + def __init__(self, root, split): + assert split in ['train', 'test'] + try: + self.cifar = CIFAR100( + root=root, train=spilt == 'train', download=False) + except: + raise Exception("Please download CIFAR10 manually, \ + in case of downloading the dataset parallelly \ + that may corrupt the dataset.") + self.labels = self.cifar.targets + + def get_sample(self, idx): + img = Image.fromarray(self.cifar.data[idx]) + target = self.labels[idx] # img: HWC, RGB + return img, target diff --git a/openselfsup/datasets/data_sources/image_list.py b/openselfsup/datasets/data_sources/image_list.py new file mode 100644 index 00000000..1a626c19 --- /dev/null +++ b/openselfsup/datasets/data_sources/image_list.py @@ -0,0 +1,36 @@ +import os +from PIL import Image + +from ..registry import DATASOURCES +from .utils import McLoader + + +@DATASOURCES.register_module +class ImageList(object): + + def __init__(self, root, list_file, memcached, mclient_path): + with open(list_file, 'r') as f: + lines = f.readlines() + self.fns = [os.path.join(root, l.strip()) for l in lines] + self.memcached = memcached + self.mclient_path = mclient_path + self.initialized = False + + def _init_memcached(self): + if not self.initialized: + assert self.mclient_path is not None + self.mc_loader = McLoader(self.mclient_path) + self.initialized = True + + def get_length(self): + return len(self.fns) + + def get_sample(self, idx): + if self.memcached: + self._init_memcached() + if self.memcached: + img = self.mc_loader(self.fns[idx]) + else: + img = Image.open(self.fns[idx]) + img = img.convert('RGB') + return img diff --git a/openselfsup/datasets/data_sources/imagenet.py b/openselfsup/datasets/data_sources/imagenet.py new file mode 100644 index 00000000..6c58ea20 --- /dev/null +++ b/openselfsup/datasets/data_sources/imagenet.py @@ -0,0 +1,43 @@ +import os +from PIL import Image + +from ..registry import DATASOURCES +from .utils import McLoader + + +@DATASOURCES.register_module +class ImageNet(object): + + def __init__(self, root, list_file, memcached, mclient_path): + with open(list_file, 'r') as f: + lines = f.readlines() + self.has_labels = len(lines[0].split()) == 2 + if self.has_labels: + self.fns, self.labels = zip(*[l.strip().split() for l in lines]) + self.labels = [int(l) for l in self.labels] + else: + self.fns = [l.strip() for l in lines] + self.fns = [os.path.join(root, fn) for fn in self.fns] + self.memcached = memcached + self.mclient_path = mclient_path + self.initialized = False + + def _init_memcached(self): + if not self.initialized: + assert self.mclient_path is not None + self.mc_loader = McLoader(self.mclient_path) + self.initialized = True + + def get_length(self): + return len(self.fns) + + def get_sample(self, idx): + if self.memcached: + self._init_memcached() + if self.memcached: + img = self.mc_loader(self.fns[idx]) + else: + img = Image.open(self.fns[idx]) + img = img.convert('RGB') + target = self.labels[idx] if self.has_labels else None + return img, target diff --git a/openselfsup/datasets/data_sources/utils.py b/openselfsup/datasets/data_sources/utils.py new file mode 100644 index 00000000..f5f5f246 --- /dev/null +++ b/openselfsup/datasets/data_sources/utils.py @@ -0,0 +1,36 @@ +import io +from PIL import Image +try: + import mc +except ImportError as E: + pass + + +def pil_loader(img_str): + buff = io.BytesIO(img_str) + return Image.open(buff) + + +class McLoader(object): + + def __init__(self, mclient_path): + assert mclient_path is not None, \ + "Please specify 'data_mclient_path' in the config." + self.mclient_path = mclient_path + server_list_config_file = "{}/server_list.conf".format( + self.mclient_path) + client_config_file = "{}/client.conf".format(self.mclient_path) + self.mclient = mc.MemcachedClient.GetInstance(server_list_config_file, + client_config_file) + + def __call__(self, fn): + try: + img_value = mc.pyvector() + self.mclient.Get(fn, img_value) + img_value_str = mc.ConvertBuffer(img_value) + img = pil_loader(img_value_str) + except: + print('Read image failed ({})'.format(fn)) + return None + else: + return img diff --git a/openselfsup/datasets/dataset_wrappers.py b/openselfsup/datasets/dataset_wrappers.py new file mode 100644 index 00000000..e749cb07 --- /dev/null +++ b/openselfsup/datasets/dataset_wrappers.py @@ -0,0 +1,55 @@ +import numpy as np +from torch.utils.data.dataset import ConcatDataset as _ConcatDataset + +from .registry import DATASETS + + +@DATASETS.register_module +class ConcatDataset(_ConcatDataset): + """A wrapper of concatenated dataset. + + Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but + concat the group flag for image aspect ratio. + + Args: + datasets (list[:obj:`Dataset`]): A list of datasets. + """ + + def __init__(self, datasets): + super(ConcatDataset, self).__init__(datasets) + self.CLASSES = datasets[0].CLASSES + if hasattr(datasets[0], 'flag'): + flags = [] + for i in range(0, len(datasets)): + flags.append(datasets[i].flag) + self.flag = np.concatenate(flags) + + +@DATASETS.register_module +class RepeatDataset(object): + """A wrapper of repeated dataset. + + The length of repeated dataset will be `times` larger than the original + dataset. This is useful when the data loading time is long but the dataset + is small. Using RepeatDataset can reduce the data loading time between + epochs. + + Args: + dataset (:obj:`Dataset`): The dataset to be repeated. + times (int): Repeat times. + """ + + def __init__(self, dataset, times): + self.dataset = dataset + self.times = times + self.CLASSES = dataset.CLASSES + if hasattr(self.dataset, 'flag'): + self.flag = np.tile(self.dataset.flag, times) + + self._ori_len = len(self.dataset) + + def __getitem__(self, idx): + return self.dataset[idx % self._ori_len] + + def __len__(self): + return self.times * self._ori_len diff --git a/openselfsup/datasets/deepcluster.py b/openselfsup/datasets/deepcluster.py new file mode 100644 index 00000000..b4928e35 --- /dev/null +++ b/openselfsup/datasets/deepcluster.py @@ -0,0 +1,29 @@ +from .registry import DATASETS +from .base import BaseDataset + + +@DATASETS.register_module +class DeepClusterDataset(BaseDataset): + """Dataset for DC and ODC. + """ + + def __init__(self, data_source, pipeline): + super(DeepClusterDataset, self).__init__(data_source, pipeline) + # init clustering labels + self.labels = [-1 for _ in range(self.data_source.get_length())] + + def __getitem__(self, idx): + img, _ = self.data_source.get_sample(idx) + label = self.labels[idx] + img = self.pipeline(img) + return dict(img=img, pseudo_label=label, idx=idx) + + def assign_labels(self, labels): + assert len(self.labels) == len(labels), \ + "Inconsistent lenght of asigned labels, \ + {} vs {}".format(len(self.labels), len(labels)) + self.labels = labels[:] + + def evaluate(self, scores, keyword, logger=None): + + raise NotImplemented diff --git a/openselfsup/datasets/extraction.py b/openselfsup/datasets/extraction.py new file mode 100644 index 00000000..6d926df5 --- /dev/null +++ b/openselfsup/datasets/extraction.py @@ -0,0 +1,19 @@ +from .registry import DATASETS +from .base import BaseDataset + + +@DATASETS.register_module +class ExtractDataset(BaseDataset): + """Dataset for feature extraction + """ + + def __init__(self, data_source, pipeline): + super(ExtractDataset, self).__init__(data_source, pipeline) + + def __getitem__(self, idx): + img = self.data_source.get_sample(idx) + img = self.pipeline(img) + return dict(img=img) + + def evaluate(self, scores, keyword, logger=None): + raise NotImplemented diff --git a/openselfsup/datasets/loader/__init__.py b/openselfsup/datasets/loader/__init__.py new file mode 100644 index 00000000..7d11a011 --- /dev/null +++ b/openselfsup/datasets/loader/__init__.py @@ -0,0 +1,7 @@ +from .build_loader import build_dataloader +from .sampler import DistributedGroupSampler, GroupSampler, DistributedGivenIterationSampler + +__all__ = [ + 'GroupSampler', 'DistributedGroupSampler', 'build_dataloader', + 'DistributedGivenIterationSampler' +] diff --git a/openselfsup/datasets/loader/build_loader.py b/openselfsup/datasets/loader/build_loader.py new file mode 100644 index 00000000..fc6c8118 --- /dev/null +++ b/openselfsup/datasets/loader/build_loader.py @@ -0,0 +1,81 @@ +import platform +import random +from functools import partial + +import numpy as np +from mmcv.parallel import collate +from mmcv.runner import get_dist_info +from torch.utils.data import DataLoader + +#from .sampler import DistributedGroupSampler, DistributedSampler, GroupSampler +from .sampler import DistributedSampler, DistributedGivenIterationSampler +from torch.utils.data import RandomSampler + +if platform.system() != 'Windows': + # https://github.com/pytorch/pytorch/issues/973 + import resource + rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) + resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) + + +def build_dataloader(dataset, + imgs_per_gpu, + workers_per_gpu, + num_gpus=1, + dist=True, + shuffle=True, + replace=False, + seed=None, + **kwargs): + """Build PyTorch DataLoader. + + In distributed training, each GPU/process has a dataloader. + In non-distributed training, there is only one dataloader for all GPUs. + + Args: + dataset (Dataset): A PyTorch dataset. + imgs_per_gpu (int): Number of images on each GPU, i.e., batch size of + each GPU. + workers_per_gpu (int): How many subprocesses to use for data loading + for each GPU. + num_gpus (int): Number of GPUs. Only used in non-distributed training. + dist (bool): Distributed training/test or not. Default: True. + shuffle (bool): Whether to shuffle the data at every epoch. + Default: True. + replace (bool): Replace or not in random shuffle. + It works on when shuffle is True. + kwargs: any keyword argument to be used to initialize DataLoader + + Returns: + DataLoader: A PyTorch dataloader. + """ + if dist: + rank, world_size = get_dist_info() + sampler = DistributedSampler( + dataset, world_size, rank, shuffle=shuffle, replace=replace) + batch_size = imgs_per_gpu + num_workers = workers_per_gpu + else: + if replace: + raise NotImplemented + sampler = RandomSampler( + dataset) if shuffle else None # TODO: set replace + batch_size = num_gpus * imgs_per_gpu + num_workers = num_gpus * workers_per_gpu + + data_loader = DataLoader( + dataset, + batch_size=batch_size, + sampler=sampler, + num_workers=num_workers, + collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu), + pin_memory=False, + worker_init_fn=worker_init_fn if seed is not None else None, + **kwargs) + + return data_loader + + +def worker_init_fn(seed): + np.random.seed(seed) + random.seed(seed) diff --git a/openselfsup/datasets/loader/sampler.py b/openselfsup/datasets/loader/sampler.py new file mode 100644 index 00000000..2653e2f8 --- /dev/null +++ b/openselfsup/datasets/loader/sampler.py @@ -0,0 +1,299 @@ +from __future__ import division +import math + +import numpy as np +import torch +from mmcv.runner import get_dist_info +from torch.utils.data import DistributedSampler as _DistributedSampler +from torch.utils.data import Sampler + + +class DistributedSampler(_DistributedSampler): + + def __init__(self, + dataset, + num_replicas=None, + rank=None, + shuffle=True, + replace=False): + super().__init__(dataset, num_replicas=num_replicas, rank=rank) + self.shuffle = shuffle + self.replace = replace + self.unif_sampling_flag = False + + def __iter__(self): + # deterministically shuffle based on epoch + if not self.unif_sampling_flag: + self.generate_new_list() + else: + self.unif_sampling_flag = False + return iter(self.indices[self.rank * self.num_samples:(self.rank + 1) * + self.num_samples]) + + def generate_new_list(self): + if self.shuffle: + g = torch.Generator() + g.manual_seed(self.epoch) + if self.replace: + indices = torch.randint( + low=0, + high=len(self.dataset), + size=(len(self.dataset), ), + generator=g).tolist() + else: + indices = torch.randperm( + len(self.dataset), generator=g).tolist() + else: + indices = torch.arange(len(self.dataset)).tolist() + + # add extra samples to make it evenly divisible + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + self.indices = indices + + def set_uniform_indices(self, labels, num_classes): + self.unif_sampling_flag = True + assert self.shuffle, "Using uniform sampling, the indices must be shuffled." + np.random.seed(self.epoch) + assert (len(labels) == len(self.dataset)) + N = len(labels) + size_per_label = int(N / num_classes) + 1 + indices = [] + images_lists = [[] for i in range(num_classes)] + for i, l in enumerate(labels): + images_lists[l].append(i) + for i, l in enumerate(images_lists): + if len(l) == 0: + continue + indices.extend( + np.random.choice( + l, size_per_label, replace=(len(l) <= size_per_label))) + indices = np.array(indices) + np.random.shuffle(indices) + indices = indices[:N].astype(np.int).tolist() + + # add extra samples to make it evenly divisible + assert len(indices) <= self.total_size, \ + "{} vs {}".format(len(indices), self.total_size) + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size, \ + "{} vs {}".format(len(indices), self.total_size) + self.indices = indices + + +class GroupSampler(Sampler): + + def __init__(self, dataset, samples_per_gpu=1): + assert hasattr(dataset, 'flag') + self.dataset = dataset + self.samples_per_gpu = samples_per_gpu + self.flag = dataset.flag.astype(np.int64) + self.group_sizes = np.bincount(self.flag) + self.num_samples = 0 + for i, size in enumerate(self.group_sizes): + self.num_samples += int(np.ceil( + size / self.samples_per_gpu)) * self.samples_per_gpu + + def __iter__(self): + indices = [] + for i, size in enumerate(self.group_sizes): + if size == 0: + continue + indice = np.where(self.flag == i)[0] + assert len(indice) == size + np.random.shuffle(indice) + num_extra = int(np.ceil(size / self.samples_per_gpu) + ) * self.samples_per_gpu - len(indice) + indice = np.concatenate( + [indice, np.random.choice(indice, num_extra)]) + indices.append(indice) + indices = np.concatenate(indices) + indices = [ + indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu] + for i in np.random.permutation( + range(len(indices) // self.samples_per_gpu)) + ] + indices = np.concatenate(indices) + indices = indices.astype(np.int64).tolist() + assert len(indices) == self.num_samples + return iter(indices) + + def __len__(self): + return self.num_samples + + +class DistributedGroupSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + .. note:: + Dataset is assumed to be of constant size. + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + """ + + def __init__(self, + dataset, + samples_per_gpu=1, + num_replicas=None, + rank=None): + _rank, _num_replicas = get_dist_info() + if num_replicas is None: + num_replicas = _num_replicas + if rank is None: + rank = _rank + self.dataset = dataset + self.samples_per_gpu = samples_per_gpu + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + + assert hasattr(self.dataset, 'flag') + self.flag = self.dataset.flag + self.group_sizes = np.bincount(self.flag) + + self.num_samples = 0 + for i, j in enumerate(self.group_sizes): + self.num_samples += int( + math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu / + self.num_replicas)) * self.samples_per_gpu + self.total_size = self.num_samples * self.num_replicas + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + + indices = [] + for i, size in enumerate(self.group_sizes): + if size > 0: + indice = np.where(self.flag == i)[0] + assert len(indice) == size + indice = indice[list(torch.randperm(int(size), + generator=g))].tolist() + extra = int( + math.ceil( + size * 1.0 / self.samples_per_gpu / self.num_replicas) + ) * self.samples_per_gpu * self.num_replicas - len(indice) + # pad indice + tmp = indice.copy() + for _ in range(extra // size): + indice.extend(tmp) + indice.extend(tmp[:extra % size]) + indices.extend(indice) + + assert len(indices) == self.total_size + + indices = [ + indices[j] for i in list( + torch.randperm( + len(indices) // self.samples_per_gpu, generator=g)) + for j in range(i * self.samples_per_gpu, (i + 1) * + self.samples_per_gpu) + ] + + # subsample + offset = self.num_samples * self.rank + indices = indices[offset:offset + self.num_samples] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch + + +class DistributedGivenIterationSampler(Sampler): + + def __init__(self, + dataset, + total_iter, + batch_size, + num_replicas=None, + rank=None, + last_iter=-1): + rank, world_size = get_dist_info() + assert rank < world_size + self.dataset = dataset + self.total_iter = total_iter + self.batch_size = batch_size + self.world_size = world_size + self.rank = rank + self.last_iter = last_iter + + self.total_size = self.total_iter * self.batch_size + + self.indices = self.gen_new_list() + + def __iter__(self): + return iter(self.indices[(self.last_iter + 1) * self.batch_size:]) + + def set_uniform_indices(self, labels, num_classes): + np.random.seed(0) + assert (len(labels) == len(self.dataset)) + N = len(labels) + size_per_label = int(N / num_classes) + 1 + indices = [] + images_lists = [[] for i in range(num_classes)] + for i, l in enumerate(labels): + images_lists[l].append(i) + for i, l in enumerate(images_lists): + if len(l) == 0: + continue + indices.extend( + np.random.choice( + l, size_per_label, replace=(len(l) <= size_per_label))) + indices = np.array(indices) + np.random.shuffle(indices) + indices = indices[:N].astype(np.int) + # repeat + all_size = self.total_size * self.world_size + indices = indices[:all_size] + num_repeat = (all_size - 1) // indices.shape[0] + 1 + indices = np.tile(indices, num_repeat) + indices = indices[:all_size] + np.random.shuffle(indices) + # slice + beg = self.total_size * self.rank + indices = indices[beg:beg + self.total_size] + assert len(indices) == self.total_size + # set + self.indices = indices + + def gen_new_list(self): + + # each process shuffle all list with same seed, and pick one piece according to rank + np.random.seed(0) + + all_size = self.total_size * self.world_size + indices = np.arange(len(self.dataset)) + indices = indices[:all_size] + num_repeat = (all_size - 1) // indices.shape[0] + 1 + indices = np.tile(indices, num_repeat) + indices = indices[:all_size] + + np.random.shuffle(indices) + beg = self.total_size * self.rank + indices = indices[beg:beg + self.total_size] + + assert len(indices) == self.total_size + + return indices + + def __len__(self): + # note here we do not take last iter into consideration, since __len__ + # should only be used for displaying, the correct remaining size is + # handled by dataloader + #return self.total_size - (self.last_iter+1)*self.batch_size + return self.total_size + + def set_epoch(self, epoch): + pass diff --git a/openselfsup/datasets/npid.py b/openselfsup/datasets/npid.py new file mode 100644 index 00000000..4e0205eb --- /dev/null +++ b/openselfsup/datasets/npid.py @@ -0,0 +1,20 @@ +from .registry import DATASETS +from .base import BaseDataset + + +@DATASETS.register_module +class NPIDDataset(BaseDataset): + """Dataset for NPID. + """ + + def __init__(self, data_source, pipeline): + super(NPIDDataset, self).__init__(data_source, pipeline) + + def __getitem__(self, idx): + img, _ = self.data_source.get_sample(idx) + img = self.pipeline(img) + return dict(img=img, idx=idx) + + def evaluate(self, scores, keyword, logger=None): + + raise NotImplemented diff --git a/openselfsup/datasets/pipelines/__init__.py b/openselfsup/datasets/pipelines/__init__.py new file mode 100644 index 00000000..7986cdd6 --- /dev/null +++ b/openselfsup/datasets/pipelines/__init__.py @@ -0,0 +1 @@ +from .transforms import * diff --git a/openselfsup/datasets/pipelines/transforms.py b/openselfsup/datasets/pipelines/transforms.py new file mode 100644 index 00000000..c5715688 --- /dev/null +++ b/openselfsup/datasets/pipelines/transforms.py @@ -0,0 +1,92 @@ +import cv2 +import inspect +import numpy as np +from PIL import Image + +import torch +from torchvision import transforms as _transforms + +from openselfsup.utils import build_from_cfg + +from ..registry import PIPELINES + +# register all existing transforms in torchvision +for m in inspect.getmembers(_transforms, inspect.isclass): + PIPELINES.register_module(m[1]) + + +@PIPELINES.register_module +class RandomAppliedTrans(object): + '''Randomly applied transformations. + Args: + transforms (List[Dict]): List of transformations in dictionaries. + ''' + + def __init__(self, transforms, p=0.5): + t = [build_from_cfg(t, PIPELINES) for t in transforms] + self.trans = _transforms.RandomApply(t, p=p) + + def __call__(self, img): + return self.trans(img) + + def __repr__(self): + repr_str = self.__class__.__name__ + return repr_str + + +# custom transforms +@PIPELINES.register_module +class Lighting(object): + """Lighting noise(AlexNet - style PCA - based noise)""" + _IMAGENET_PCA = { + 'eigval': + torch.Tensor([0.2175, 0.0188, 0.0045]), + 'eigvec': + torch.Tensor([ + [-0.5675, 0.7192, 0.4009], + [-0.5808, -0.0045, -0.8140], + [-0.5836, -0.6948, 0.4203], + ]) + } + + def __init__(self): + self.alphastd = 0.1 + self.eigval = self._IMAGENET_PCA['eigval'] + self.eigvec = self._IMAGENET_PCA['eigvec'] + + def __call__(self, img): + assert isinstance(img, torch.Tensor), \ + "Expect torch.Tensor, got {}".format(type(img)) + if self.alphastd == 0: + return img + + alpha = img.new().resize_(3).normal_(0, self.alphastd) + rgb = self.eigvec.type_as(img).clone()\ + .mul(alpha.view(1, 3).expand(3, 3))\ + .mul(self.eigval.view(1, 3).expand(3, 3))\ + .sum(1).squeeze() + + return img.add(rgb.view(3, 1, 1).expand_as(img)) + + def __repr__(self): + repr_str = self.__class__.__name__ + return repr_str + + +@PIPELINES.register_module +class GaussianBlur(object): + + def __init__(self, sigma_min, sigma_max, kernel_size): + self.sigma_min = sigma_min + self.sigma_max = sigma_max + self.kernel_size = kernel_size + + def __call__(self, img): + sigma = np.random.uniform(self.sigma_min, self.sigma_max) + img = cv2.GaussianBlur( + np.array(img), (self.kernel_size, self.kernel_size), sigma) + return Image.fromarray(img.astype(np.uint8)) + + def __repr__(self): + repr_str = self.__class__.__name__ + return repr_str diff --git a/openselfsup/datasets/registry.py b/openselfsup/datasets/registry.py new file mode 100644 index 00000000..48642783 --- /dev/null +++ b/openselfsup/datasets/registry.py @@ -0,0 +1,5 @@ +from openselfsup.utils import Registry + +DATASOURCES = Registry('datasource') +DATASETS = Registry('dataset') +PIPELINES = Registry('pipeline') diff --git a/openselfsup/datasets/rotation_pred.py b/openselfsup/datasets/rotation_pred.py new file mode 100644 index 00000000..0f90a34e --- /dev/null +++ b/openselfsup/datasets/rotation_pred.py @@ -0,0 +1,35 @@ +import torch + +from .registry import DATASETS +from .base import BaseDataset + + +def rotate(img): + ''' + img: Tensor(CHW) + ''' + return [ + img, + torch.flip(img.transpose(1, 2), [1]), + torch.flip(img, [1, 2]), + torch.flip(img, [1]).transpose(1, 2) + ] + + +@DATASETS.register_module +class RotationPredDataset(BaseDataset): + """Dataset for rotation prediction + """ + + def __init__(self, data_source, pipeline): + super(RotationPredDataset, self).__init__(data_source, pipeline) + + def __getitem__(self, idx): + img, _ = self.data_source.get_sample(idx) + img = self.pipeline(img) + img = torch.stack(rotate(img), dim=0) + rotation_labels = torch.LongTensor([0, 1, 2, 3]) + return dict(img=img, rot_label=rotation_labels) + + def evaluate(self, scores, keyword, logger=None): + raise NotImplemented diff --git a/openselfsup/hooks/__init__.py b/openselfsup/hooks/__init__.py new file mode 100644 index 00000000..cdcd6cde --- /dev/null +++ b/openselfsup/hooks/__init__.py @@ -0,0 +1,7 @@ +from .builder import build_hook +from .deepcluster_hook import DeepClusterHook +from .odc_hook import ODCHook +from .optimizer_hook import DistOptimizerHook +from .extractor import Extractor +from .validate_hook import ValidateHook +from .registry import HOOKS diff --git a/openselfsup/hooks/builder.py b/openselfsup/hooks/builder.py new file mode 100644 index 00000000..b56591bf --- /dev/null +++ b/openselfsup/hooks/builder.py @@ -0,0 +1,7 @@ +from openselfsup.utils import build_from_cfg + +from .registry import HOOKS + + +def build_hook(cfg, default_args=None): + return build_from_cfg(cfg, HOOKS, default_args) diff --git a/openselfsup/hooks/deepcluster_hook.py b/openselfsup/hooks/deepcluster_hook.py new file mode 100644 index 00000000..7ed56a0e --- /dev/null +++ b/openselfsup/hooks/deepcluster_hook.py @@ -0,0 +1,109 @@ +import numpy as np + +from mmcv.runner import Hook + +import torch +import torch.distributed as dist + +from openselfsup.third_party import clustering as _clustering +from openselfsup.utils import print_log +from .registry import HOOKS +from .extractor import Extractor + + +@HOOKS.register_module +class DeepClusterHook(Hook): + + def __init__( + self, + extractor, + clustering, + unif_sampling, + reweight, + reweight_pow, + init_memory=False, # for ODC + initial=True, + interval=1, + dist_mode=True, + data_loaders=None): + self.extractor = Extractor(dist_mode=dist_mode, **extractor) + self.clustering_type = clustering.pop('type') + self.clustering_cfg = clustering + self.unif_sampling = unif_sampling + self.reweight = reweight + self.reweight_pow = reweight_pow + self.init_memory = init_memory + self.initial = initial + self.interval = interval + self.dist_mode = dist_mode + self.data_loaders = data_loaders + + def before_run(self, runner): + if self.initial: + self.deepcluster(runner) + + def after_train_epoch(self, runner): + if not self.every_n_epochs(runner, self.interval): + return + self.deepcluster(runner) + + def deepcluster(self, runner): + # step 1: get features + runner.model.eval() + features = self.extractor(runner) + runner.model.train() + + # step 2: get labels + if not self.dist_mode or (self.dist_mode and runner.rank == 0): + clustering_algo = _clustering.__dict__[self.clustering_type]( + **self.clustering_cfg) + # Features are normalized during clustering + clustering_algo.cluster(features, verbose=True) + assert isinstance(clustering_algo.labels, np.ndarray) + new_labels = clustering_algo.labels.astype(np.int64) + np.save( + "{}/cluster_epoch_{}.npy".format(runner.work_dir, + runner.epoch), new_labels) + self.evaluate(runner, new_labels) + else: + new_labels = np.zeros((len(self.data_loaders[0].dataset), ), + dtype=np.int64) + + if self.dist_mode: + new_labels_tensor = torch.from_numpy(new_labels).cuda() + dist.broadcast(new_labels_tensor, 0) + new_labels = new_labels_tensor.cpu().numpy() + new_labels_list = list(new_labels) + + # step 3: assign new labels + self.data_loaders[0].dataset.assign_labels(new_labels_list) + + # step 4 (a): set uniform sampler + if self.unif_sampling: + self.data_loaders[0].sampler.set_uniform_indices( + new_labels_list, self.clustering_cfg.k) + + # step 4 (b): set loss reweight + if self.reweight: + runner.model.module.set_reweight(new_labels, self.reweight_pow) + + # step 5: randomize classifier + runner.model.module.head.init_weights(init_linear='normal') + if self.dist_mode: + for p in runner.model.module.head.state_dict().values(): + dist.broadcast(p, 0) + + # step 6: init memory for ODC + if self.init_memory: + runner.model.module.memory_bank.init_memory(features, new_labels) + + def evaluate(self, runner, new_labels): + hist = np.bincount(new_labels, minlength=self.clustering_cfg.k) + empty_cls = (hist == 0).sum() + minimal_cls_size, maximal_cls_size = hist.min(), hist.max() + if runner.rank == 0: + print_log( + "empty_num: {}\tmin_cluster: {}\tmax_cluster:{}".format( + empty_cls.item(), minimal_cls_size.item(), + maximal_cls_size.item()), + logger='root') diff --git a/openselfsup/hooks/extractor.py b/openselfsup/hooks/extractor.py new file mode 100644 index 00000000..6c001da5 --- /dev/null +++ b/openselfsup/hooks/extractor.py @@ -0,0 +1,50 @@ +import torch.nn as nn +from torch.utils.data import Dataset + +from openselfsup.utils import nondist_forward_collect, dist_forward_collect + + +class Extractor(object): + + def __init__(self, + dataset, + imgs_per_gpu, + workers_per_gpu, + dist_mode=False): + from openselfsup import datasets + if isinstance(dataset, Dataset): + self.dataset = dataset + elif isinstance(dataset, dict): + self.dataset = datasets.build_dataset(dataset) + else: + raise TypeError( + 'dataset must be a Dataset object or a dict, not {}'.format( + type(dataset))) + self.data_loader = datasets.build_dataloader( + self.dataset, + imgs_per_gpu, + workers_per_gpu, + dist=dist_mode, + shuffle=False) + self.dist_mode = dist_mode + self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) + + def _forward_func(self, runner, **x): + backbone_feat = runner.model(mode='extract', **x) + last_layer_feat = runner.model.module.neck([backbone_feat[-1]])[0] + last_layer_feat = last_layer_feat.view(last_layer_feat.size(0), -1) + return dict(feature=last_layer_feat.cpu()) + + def __call__(self, runner): + func = lambda **x: self._forward_func(runner, **x) + if self.dist_mode: + feats = dist_forward_collect( + func, + self.data_loader, + runner.rank, + len(self.dataset), + ret_rank=-1)['feature'] # NxD + else: + feats = nondist_forward_collect(func, self.data_loader, + len(self.dataset))['feature'] + return feats diff --git a/openselfsup/hooks/odc_hook.py b/openselfsup/hooks/odc_hook.py new file mode 100644 index 00000000..b059bb96 --- /dev/null +++ b/openselfsup/hooks/odc_hook.py @@ -0,0 +1,67 @@ +import numpy as np + +from mmcv.runner import Hook + +from openselfsup.utils import print_log +from .registry import HOOKS + + +@HOOKS.register_module +class ODCHook(Hook): + + def __init__(self, + centroids_update_interval, + deal_with_small_clusters_interval, + evaluate_interval, + reweight, + reweight_pow, + dist_mode=True): + assert dist_mode, "non-dist mode is not implemented" + self.centroids_update_interval = centroids_update_interval + self.deal_with_small_clusters_interval = \ + deal_with_small_clusters_interval + self.evaluate_interval = evaluate_interval + self.reweight = reweight + self.reweight_pow = reweight_pow + + def after_train_iter(self, runner): + # centroids update + if self.every_n_iters(runner, self.centroids_update_interval): + runner.model.module.memory_bank.update_centroids_memory() + + # deal with small clusters + if self.every_n_iters(runner, self.deal_with_small_clusters_interval): + runner.model.module.memory_bank.deal_with_small_clusters() + + # reweight + runner.model.module.set_reweight() + + # evaluate + if self.every_n_iters(runner, self.evaluate_interval): + new_labels = runner.model.module.memory_bank.label_bank + if new_labels.is_cuda: + new_labels = new_labels.cpu() + self.evaluate(runner, new_labels.numpy()) + + def after_train_epoch(self, runner): + # save cluster + if self.every_n_epochs(10) and runner.rank == 0: + new_labels = runner.model.module.memory_bank.label_bank + if new_labels.is_cuda: + new_labels = new_labels.cpu() + np.save( + "{}/cluster_epoch_{}.npy".format(runner.work_dir, + runner.epoch), + new_labels.numpy()) + + def evaluate(self, runner, new_labels): + hist = np.bincount( + new_labels, minlength=runner.model.module.memory_bank.num_classes) + empty_cls = (hist == 0).sum() + minimal_cls_size, maximal_cls_size = hist.min(), hist.max() + if runner.rank == 0: + print_log( + "empty_num: {}\tmin_cluster: {}\tmax_cluster:{}".format( + empty_cls.item(), minimal_cls_size.item(), + maximal_cls_size.item()), + logger='root') diff --git a/openselfsup/hooks/optimizer_hook.py b/openselfsup/hooks/optimizer_hook.py new file mode 100644 index 00000000..e8c1b7c9 --- /dev/null +++ b/openselfsup/hooks/optimizer_hook.py @@ -0,0 +1,16 @@ +from mmcv.runner import OptimizerHook + + +class DistOptimizerHook(OptimizerHook): + + def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1): + self.grad_clip = grad_clip + self.coalesce = coalesce + self.bucket_size_mb = bucket_size_mb + + def after_train_iter(self, runner): + runner.optimizer.zero_grad() + runner.outputs['loss'].backward() + if self.grad_clip is not None: + self.clip_grads(runner.model.parameters()) + runner.optimizer.step() diff --git a/openselfsup/hooks/registry.py b/openselfsup/hooks/registry.py new file mode 100644 index 00000000..1f196dc8 --- /dev/null +++ b/openselfsup/hooks/registry.py @@ -0,0 +1,3 @@ +from openselfsup.utils import Registry + +HOOKS = Registry('hook') diff --git a/openselfsup/hooks/validate_hook.py b/openselfsup/hooks/validate_hook.py new file mode 100644 index 00000000..45efd9e2 --- /dev/null +++ b/openselfsup/hooks/validate_hook.py @@ -0,0 +1,71 @@ +from mmcv.runner import Hook + +import torch +from torch.utils.data import Dataset + +from openselfsup.utils import nondist_forward_collect, dist_forward_collect +from .registry import HOOKS + + +@HOOKS.register_module +class ValidateHook(Hook): + + def __init__(self, + dataset, + dist_mode=True, + initial=True, + interval=1, + **eval_kwargs): + from openselfsup import datasets + if isinstance(dataset, Dataset): + self.dataset = dataset + elif isinstance(dataset, dict): + self.dataset = datasets.build_dataset(dataset) + else: + raise TypeError( + 'dataset must be a Dataset object or a dict, not {}'.format( + type(dataset))) + self.data_loader = datasets.build_dataloader( + self.dataset, + eval_kwargs['imgs_per_gpu'], + eval_kwargs['workers_per_gpu'], + dist=dist_mode, + shuffle=False) + self.dist_mode = dist_mode + self.initial = initial + self.interval = interval + self.eval_kwargs = eval_kwargs + + def before_run(self, runner): + if self.initial: + self._run_validate(runner) + + def after_train_epoch(self, runner): + if not self.every_n_epochs(runner, self.interval): + return + self._run_validate(runner) + + def _run_validate(self, runner): + runner.model.eval() + func = lambda **x: runner.model(mode='test', **x) + if self.dist_mode: + results = dist_forward_collect( + func, self.data_loader, runner.rank, + len(self.dataset)) # dict{key: np.ndarray} + else: + results = nondist_forward_collect(func, self.data_loader, + len(self.dataset)) + if runner.rank == 0: + for name, val in results.items(): + self._evaluate(runner, torch.from_numpy(val), name) + runner.model.train() + + def _evaluate(self, runner, results, keyword): + eval_res = self.dataset.evaluate( + results, + keyword=keyword, + logger=runner.logger, + **self.eval_kwargs['eval_param']) + for name, val in eval_res.items(): + runner.log_buffer.output[name] = val + runner.log_buffer.ready = True diff --git a/openselfsup/models/__init__.py b/openselfsup/models/__init__.py new file mode 100644 index 00000000..80813ea3 --- /dev/null +++ b/openselfsup/models/__init__.py @@ -0,0 +1,20 @@ +from .backbones import * # noqa: F401,F403 +from .builder import (build_backbone, build_model, build_head, build_loss) +from .heads import * +from .classification import Classification +from .deepcluster import DeepCluster +from .odc import ODC +from .losses import * # noqa: F401,F403 +from .necks import * +from .npid import NPID +from .memories import * +from .moco import MOCO +from .registry import (BACKBONES, MODELS, NECKS, MEMORIES, HEADS, LOSSES) +from .rotation_pred import RotationPred +from .simclr import SimCLR + +#__all__ = [ +# 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES', +# 'DETECTORS', 'CLASSIFIERS', 'build_backbone', 'build_neck', 'build_roi_extractor', +# 'build_shared_head', 'build_head', 'build_loss', 'build_detector', 'build_detector' +#] diff --git a/openselfsup/models/backbones/__init__.py b/openselfsup/models/backbones/__init__.py new file mode 100644 index 00000000..d718d076 --- /dev/null +++ b/openselfsup/models/backbones/__init__.py @@ -0,0 +1,6 @@ +#from .hrnet import HRNet +from .resnet import ResNet, make_res_layer +#from .resnext import ResNeXt +#from .ssd_vgg import SSDVGG + +#__all__ = ['ResNet', 'make_res_layer', 'ResNeXt', 'SSDVGG', 'HRNet'] diff --git a/openselfsup/models/backbones/resnet.py b/openselfsup/models/backbones/resnet.py new file mode 100644 index 00000000..db2c5fe0 --- /dev/null +++ b/openselfsup/models/backbones/resnet.py @@ -0,0 +1,429 @@ +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import constant_init, kaiming_init +from mmcv.runner import load_checkpoint +from torch.nn.modules.batchnorm import _BatchNorm + +from openselfsup.utils import get_root_logger +from ..registry import BACKBONES +from ..utils import build_conv_layer, build_norm_layer + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN')): + super(BasicBlock, self).__init__() + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, planes, planes, 3, padding=1, bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + assert not with_cp + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN')): + """Bottleneck block for ResNet. + If style is "pytorch", the stride-two layer is the 3x3 conv layer, + if it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__() + assert style in ['pytorch', 'caffe'] + + self.inplanes = inplanes + self.planes = planes + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + planes, + planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + @property + def norm3(self): + return getattr(self, self.norm3_name) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +def make_res_layer(block, + inplanes, + planes, + blocks, + stride=1, + dilation=1, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN')): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1], + ) + + layers = [] + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + dilation=dilation, + downsample=downsample, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + dilation=dilation, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) + + return nn.Sequential(*layers) + + +@BACKBONES.register_module +class ResNet(nn.Module): + """ResNet backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Normally 3. + num_stages (int): Resnet stages, normally 4. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + + Example: + >>> from openselfsup.models import ResNet + >>> import torch + >>> self = ResNet(depth=18) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 8, 8) + (1, 128, 4, 4) + (1, 256, 2, 2) + (1, 512, 1, 1) + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + in_channels=3, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(0, 1, 2, 3, 4), + style='pytorch', + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + zero_init_residual=False): + super(ResNet, self).__init__() + if depth not in self.arch_settings: + raise KeyError('invalid depth {} for resnet'.format(depth)) + self.depth = depth + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + 1 + self.style = style + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.zero_init_residual = zero_init_residual + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.inplanes = 64 + + self._make_stem_layer(in_channels) + + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + planes = 64 * 2**i + res_layer = make_res_layer( + self.block, + self.inplanes, + planes, + num_blocks, + stride=stride, + dilation=dilation, + style=self.style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + self.inplanes = planes * self.block.expansion + layer_name = 'layer{}'.format(i + 1) + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = self.block.expansion * 64 * 2**( + len(self.stage_blocks) - 1) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def _make_stem_layer(self, in_channels): + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + 64, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, 'layer{}'.format(i)) + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = get_root_logger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m, mode='fan_in', nonlinearity='relu') + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + outs = [] + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) # r50: 64x128x128 + if 0 in self.out_indices: + outs.append(x) + x = self.maxpool(x) # r50: 64x56x56 + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i + 1 in self.out_indices: + outs.append(x) + # r50: 1-256x56x56; 2-512x28x28; 3-1024x14x14; 4-2048x7x7 + return tuple(outs) + + def train(self, mode=True): + super(ResNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/openselfsup/models/backbones/resnext.py b/openselfsup/models/backbones/resnext.py new file mode 100644 index 00000000..326e8827 --- /dev/null +++ b/openselfsup/models/backbones/resnext.py @@ -0,0 +1,222 @@ +import math + +import torch.nn as nn + +from ..registry import BACKBONES +from ..utils import build_conv_layer, build_norm_layer +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResNet + + +class Bottleneck(_Bottleneck): + + def __init__(self, inplanes, planes, groups=1, base_width=4, **kwargs): + """Bottleneck block for ResNeXt. + If style is "pytorch", the stride-two layer is the 3x3 conv layer, + if it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__(inplanes, planes, **kwargs) + + if groups == 1: + width = self.planes + else: + width = math.floor(self.planes * (base_width / 64)) * groups + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, width, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, width, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.inplanes, + width, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + self.with_modulated_dcn = False + if self.with_dcn: + fallback_on_stride = self.dcn.pop('fallback_on_stride', False) + if not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + self.conv_cfg, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + else: + assert self.conv_cfg is None, 'conv_cfg must be None for DCN' + self.conv2 = build_conv_layer( + self.dcn, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + width, + self.planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +def make_res_layer(block, + inplanes, + planes, + blocks, + stride=1, + dilation=1, + groups=1, + base_width=4, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + gcb=None): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1], + ) + + layers = [] + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + dilation=dilation, + downsample=downsample, + groups=groups, + base_width=base_width, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb)) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + dilation=dilation, + groups=groups, + base_width=base_width, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb)) + + return nn.Sequential(*layers) + + +@BACKBONES.register_module +class ResNeXt(ResNet): + """ResNeXt backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Normally 3. + num_stages (int): Resnet stages, normally 4. + groups (int): Group of resnext. + base_width (int): Base width of resnext. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + + Example: + >>> from openselfsup.models import ResNeXt + >>> import torch + >>> self = ResNeXt(depth=50) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 256, 8, 8) + (1, 512, 4, 4) + (1, 1024, 2, 2) + (1, 2048, 1, 1) + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, groups=1, base_width=4, **kwargs): + super(ResNeXt, self).__init__(**kwargs) + self.groups = groups + self.base_width = base_width + + self.inplanes = 64 + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = self.strides[i] + dilation = self.dilations[i] + dcn = self.dcn if self.stage_with_dcn[i] else None + gcb = self.gcb if self.stage_with_gcb[i] else None + planes = 64 * 2**i + res_layer = make_res_layer( + self.block, + self.inplanes, + planes, + num_blocks, + stride=stride, + dilation=dilation, + groups=self.groups, + base_width=self.base_width, + style=self.style, + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + dcn=dcn, + gcb=gcb) + self.inplanes = planes * self.block.expansion + layer_name = 'layer{}'.format(i + 1) + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() diff --git a/openselfsup/models/builder.py b/openselfsup/models/builder.py new file mode 100644 index 00000000..4d2524f5 --- /dev/null +++ b/openselfsup/models/builder.py @@ -0,0 +1,38 @@ +from torch import nn + +from openselfsup.utils import build_from_cfg +from .registry import (BACKBONES, MODELS, NECKS, HEADS, MEMORIES, LOSSES) + + +def build(cfg, registry, default_args=None): + if isinstance(cfg, list): + modules = [ + build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg + ] + return nn.Sequential(*modules) + else: + return build_from_cfg(cfg, registry, default_args) + + +def build_backbone(cfg): + return build(cfg, BACKBONES) + + +def build_neck(cfg): + return build(cfg, NECKS) + + +def build_memory(cfg): + return build(cfg, MEMORIES) + + +def build_head(cfg): + return build(cfg, HEADS) + + +def build_loss(cfg): + return build(cfg, LOSSES) + + +def build_model(cfg): + return build(cfg, MODELS) diff --git a/openselfsup/models/classification.py b/openselfsup/models/classification.py new file mode 100644 index 00000000..81a1a25f --- /dev/null +++ b/openselfsup/models/classification.py @@ -0,0 +1,79 @@ +import numpy as np + +import torch.nn as nn + +from openselfsup.utils import print_log + +from . import builder +from .registry import MODELS +from .utils import Sobel + + +@MODELS.register_module +class Classification(nn.Module): + + def __init__(self, + backbone, + frozen_backbone=False, + with_sobel=False, + head=None, + pretrained=None): + super(Classification, self).__init__() + self.with_sobel = with_sobel + if with_sobel: + self.sobel_layer = Sobel() + self.backbone = builder.build_backbone(backbone) + if frozen_backbone: + self.backbone.eval() + for param in self.backbone.parameters(): + param.requires_grad = False + if head is not None: + self.head = builder.build_head(head) + self.init_weights(pretrained=pretrained) + + def init_weights(self, pretrained=None): + if pretrained is not None: + print_log('load model from: {}'.format(pretrained), logger='root') + self.backbone.init_weights(pretrained=pretrained) + self.head.init_weights() + + def forward_backbone(self, img): + """Forward backbone + + Returns: + x (tuple): backbone outputs + """ + if self.with_sobel: + img = self.sobel_layer(img) + x = self.backbone(img) + return x + + def forward_train(self, img, gt_label, **kwargs): + x = self.forward_backbone(img) + outs = self.head(x) + loss_inputs = (outs, gt_label) + losses = self.head.loss(*loss_inputs) + return losses + + def forward_test(self, img, **kwargs): + x = self.forward_backbone(img) # tuple + outs = self.head(x) + keys = ['head{}'.format(i) for i in range(len(outs))] + out_tensors = [out.cpu() for out in outs] # NxC + return dict(zip(keys, out_tensors)) + + def aug_test(self, imgs): + raise NotImplemented + outs = np.mean([self.head(x) for x in self.forward_backbone(imgs)], + axis=0) + return outs + + def forward(self, img, mode='train', **kwargs): + if mode == 'train': + return self.forward_train(img, **kwargs) + elif mode == 'test': + return self.forward_test(img, **kwargs) + elif mode == 'extract': + return self.forward_backbone(img) + else: + raise Exception("No such mode: {}".format(mode)) diff --git a/openselfsup/models/deepcluster.py b/openselfsup/models/deepcluster.py new file mode 100644 index 00000000..b286d784 --- /dev/null +++ b/openselfsup/models/deepcluster.py @@ -0,0 +1,88 @@ +import numpy as np + +import torch +import torch.nn as nn + +from openselfsup.utils import print_log + +from . import builder +from .registry import MODELS +from .utils import Sobel + + +@MODELS.register_module +class DeepCluster(nn.Module): + + def __init__(self, + backbone, + with_sobel=False, + neck=None, + head=None, + pretrained=None): + super(DeepCluster, self).__init__() + self.with_sobel = with_sobel + if with_sobel: + self.sobel_layer = Sobel() + self.backbone = builder.build_backbone(backbone) + self.neck = builder.build_neck(neck) + if head is not None: + self.head = builder.build_head(head) + self.init_weights(pretrained=pretrained) + + # reweight + self.num_classes = head.num_classes + self.loss_weight = torch.ones((self.num_classes, ), + dtype=torch.float32).cuda() + self.loss_weight /= self.loss_weight.sum() + + def init_weights(self, pretrained=None): + if pretrained is not None: + print_log('load model from: {}'.format(pretrained), logger='root') + self.backbone.init_weights(pretrained=pretrained) + self.neck.init_weights(init_linear='kaiming') + self.head.init_weights(init_linear='normal') + + def forward_backbone(self, img): + """Forward backbone + + Returns: + x (tuple): backbone outputs + """ + if self.with_sobel: + img = self.sobel_layer(img) + x = self.backbone(img) + return x + + def forward_train(self, img, pseudo_label, **kwargs): + x = self.forward_backbone(img) + assert len(x) == 1 + feature = self.neck(x) + outs = self.head(feature) + loss_inputs = (outs, pseudo_label) + losses = self.head.loss(*loss_inputs) + return losses + + def forward_test(self, img, **kwargs): + x = self.forward_backbone(img) # tuple + outs = self.head(x) + keys = ['head{}'.format(i) for i in range(len(outs))] + out_tensors = [out.cpu() for out in outs] # NxC + return dict(zip(keys, out_tensors)) + + def forward(self, img, mode='train', **kwargs): + if mode == 'train': + return self.forward_train(img, **kwargs) + elif mode == 'test': + return self.forward_test(img, **kwargs) + elif mode == 'extract': + return self.forward_backbone(img) + else: + raise Exception("No such mode: {}".format(mode)) + + def set_reweight(self, labels, reweight_pow=0.5): + hist = np.bincount( + labels, minlength=self.num_classes).astype(np.float32) + inv_hist = (1. / (hist + 1e-10))**reweight_pow + weight = inv_hist / inv_hist.sum() + self.loss_weight.copy_(torch.from_numpy(weight)) + self.head.criterion = nn.CrossEntropyLoss(weight=self.loss_weight) diff --git a/openselfsup/models/heads/__init__.py b/openselfsup/models/heads/__init__.py new file mode 100644 index 00000000..c6bd865c --- /dev/null +++ b/openselfsup/models/heads/__init__.py @@ -0,0 +1,3 @@ +from .contrastive_head import ContrastiveHead +from .cls_head import ClsHead +from .multi_cls_head import MultiClsHead diff --git a/openselfsup/models/heads/cls_head.py b/openselfsup/models/heads/cls_head.py new file mode 100644 index 00000000..b225718d --- /dev/null +++ b/openselfsup/models/heads/cls_head.py @@ -0,0 +1,60 @@ +import torch.nn as nn +from mmcv.cnn import kaiming_init, normal_init + +from ..utils import accuracy +from ..registry import HEADS + + +@HEADS.register_module +class ClsHead(nn.Module): + """Simplest classifier head, with only one fc layer. + """ + + def __init__(self, + with_avg_pool=False, + in_channels=2048, + num_classes=1000): + super(ClsHead, self).__init__() + self.with_avg_pool = with_avg_pool + self.in_channels = in_channels + self.num_classes = num_classes + + self.criterion = nn.CrossEntropyLoss() + + if self.with_avg_pool: + self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc_cls = nn.Linear(in_channels, num_classes) + + def init_weights(self, init_linear='normal'): + assert init_linear in ['normal', 'kaiming'], \ + "Undefined init_linear: {}".format(init_linear) + for m in self.modules(): + if isinstance(m, nn.Linear): + if init_linear == 'normal': + normal_init(m, std=0.01) + else: + kaiming_init(m, mode='fan_in', nonlinearity='relu') + elif isinstance(m, + (nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + assert isinstance(x, (tuple, list)) and len(x) == 1 + x = x[0] + if self.with_avg_pool: + assert x.dim() == 4, \ + "Tensor must has 4 dims, got: {}".format(x.dim()) + x = self.avg_pool(x) + x = x.view(x.size(0), -1) + cls_score = self.fc_cls(x) + return [cls_score] + + def loss(self, cls_score, labels): + losses = dict() + assert isinstance(cls_score, (tuple, list)) and len(cls_score) == 1 + losses['loss'] = self.criterion(cls_score[0], labels) + losses['acc'] = accuracy(cls_score[0], labels) + return losses diff --git a/openselfsup/models/heads/contrastive_head.py b/openselfsup/models/heads/contrastive_head.py new file mode 100644 index 00000000..b0455293 --- /dev/null +++ b/openselfsup/models/heads/contrastive_head.py @@ -0,0 +1,29 @@ +import torch +import torch.nn as nn + +from ..registry import HEADS + + +@HEADS.register_module +class ContrastiveHead(nn.Module): + '''Head for contrastive learning. + ''' + + def __init__(self, temperature=0.1): + super(ContrastiveHead, self).__init__() + self.criterion = nn.CrossEntropyLoss() + self.temperature = temperature + + def forward(self, pos, neg): + ''' + Args: + pos (Tensor): Nx1 positive similarity + neg (Tensor): Nxk negative similarity + ''' + N = pos.size(0) + logits = torch.cat((pos, neg), dim=1) + logits /= self.temperature + labels = torch.zeros((N, ), dtype=torch.long).cuda() + losses = dict() + losses['loss'] = self.criterion(logits, labels) + return losses diff --git a/openselfsup/models/heads/multi_cls_head.py b/openselfsup/models/heads/multi_cls_head.py new file mode 100644 index 00000000..babe5649 --- /dev/null +++ b/openselfsup/models/heads/multi_cls_head.py @@ -0,0 +1,77 @@ +import torch.nn as nn + +from ..utils import accuracy +from ..registry import HEADS +from ..utils import build_norm_layer, MultiPooling + + +@HEADS.register_module +class MultiClsHead(nn.Module): + """Multiple classifier heads. + """ + FEAT_CHANNELS = {'resnet50': [64, 256, 512, 1024, 2048]} + FEAT_LAST_UNPOOL = {'resnet50': 2048 * 7 * 7} + + def __init__(self, + pool_type='adaptive', + in_indices=(0, ), + with_last_layer_unpool=False, + backbone='resnet50', + norm_cfg=dict(type='BN'), + num_classes=1000): + super(MultiClsHead, self).__init__() + assert norm_cfg['type'] in ['BN', 'SyncBN', 'GN', 'null'] + + self.with_last_layer_unpool = with_last_layer_unpool + self.with_norm = norm_cfg['type'] != 'null' + + self.criterion = nn.CrossEntropyLoss() + + self.multi_pooling = MultiPooling(pool_type, in_indices, backbone) + + if self.with_norm: + self.norms = nn.ModuleList([ + build_norm_layer(norm_cfg, self.FEAT_CHANNELS[backbone][l])[1] + for l in in_indices + ]) + + self.fcs = nn.ModuleList([ + nn.Linear(self.multi_pooling.POOL_DIMS[backbone][l], num_classes) + for l in in_indices + ]) + if with_last_layer_unpool: + self.fcs.append( + nn.Linear(self.FEAT_LAST_UNPOOL[backbone], num_classes)) + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + elif isinstance(m, + (nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + assert isinstance(x, (list, tuple)) + if self.with_last_layer_unpool: + last_x = x[-1] + x = self.multi_pooling(x) + if self.with_norm: + x = [n(xx) for n, xx in zip(self.norms, x)] + if self.with_last_layer_unpool: + x.append(last_x) + x = [xx.view(xx.size(0), -1) for xx in x] + x = [fc(xx) for fc, xx in zip(self.fcs, x)] + return x + + def loss(self, cls_score, labels): + losses = dict() + for i, s in enumerate(cls_score): + # keys must contain "loss" + losses['loss.{}'.format(i + 1)] = self.criterion(s, labels) + losses['acc.{}'.format(i + 1)] = accuracy(s, labels) + return losses diff --git a/openselfsup/models/losses/__init__.py b/openselfsup/models/losses/__init__.py new file mode 100644 index 00000000..bfa523d6 --- /dev/null +++ b/openselfsup/models/losses/__init__.py @@ -0,0 +1,19 @@ +#from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss +#from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, +# cross_entropy, mask_cross_entropy) +#from .focal_loss import FocalLoss, sigmoid_focal_loss +#from .ghm_loss import GHMC, GHMR +#from .iou_loss import (BoundedIoULoss, GIoULoss, IoULoss, bounded_iou_loss, +# iou_loss) +#from .mse_loss import MSELoss, mse_loss +#from .smooth_l1_loss import SmoothL1Loss, smooth_l1_loss +#from .utils import reduce_loss, weight_reduce_loss, weighted_loss + +#__all__ = [ +# 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', +# 'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss', +# 'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss', +# 'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss', +# 'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'GHMC', 'GHMR', 'reduce_loss', +# 'weight_reduce_loss', 'weighted_loss' +#] diff --git a/openselfsup/models/losses/cross_entropy_loss.py b/openselfsup/models/losses/cross_entropy_loss.py new file mode 100644 index 00000000..dd9d4776 --- /dev/null +++ b/openselfsup/models/losses/cross_entropy_loss.py @@ -0,0 +1,103 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..registry import LOSSES +from .utils import weight_reduce_loss + + +def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None): + # element-wise losses + loss = F.cross_entropy(pred, label, reduction='none') + + # apply weights and do the reduction + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def _expand_binary_labels(labels, label_weights, label_channels): + bin_labels = labels.new_full((labels.size(0), label_channels), 0) + inds = torch.nonzero(labels >= 1).squeeze() + if inds.numel() > 0: + bin_labels[inds, labels[inds] - 1] = 1 + if label_weights is None: + bin_label_weights = None + else: + bin_label_weights = label_weights.view(-1, 1).expand( + label_weights.size(0), label_channels) + return bin_labels, bin_label_weights + + +def binary_cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None): + if pred.dim() != label.dim(): + label, weight = _expand_binary_labels(label, weight, pred.size(-1)) + + # weighted element-wise losses + if weight is not None: + weight = weight.float() + loss = F.binary_cross_entropy_with_logits( + pred, label.float(), weight, reduction='none') + # do the reduction for the weighted loss + loss = weight_reduce_loss(loss, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None): + # TODO: handle these two reserved arguments + assert reduction == 'mean' and avg_factor is None + num_rois = pred.size()[0] + inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) + pred_slice = pred[inds, label].squeeze(1) + return F.binary_cross_entropy_with_logits( + pred_slice, target, reduction='mean')[None] + + +@LOSSES.register_module +class CrossEntropyLoss(nn.Module): + + def __init__(self, + use_sigmoid=False, + use_mask=False, + reduction='mean', + loss_weight=1.0): + super(CrossEntropyLoss, self).__init__() + assert (use_sigmoid is False) or (use_mask is False) + self.use_sigmoid = use_sigmoid + self.use_mask = use_mask + self.reduction = reduction + self.loss_weight = loss_weight + + if self.use_sigmoid: + self.cls_criterion = binary_cross_entropy + elif self.use_mask: + self.cls_criterion = mask_cross_entropy + else: + self.cls_criterion = cross_entropy + + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, + label, + weight, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss_cls diff --git a/openselfsup/models/losses/focal_loss.py b/openselfsup/models/losses/focal_loss.py new file mode 100644 index 00000000..2cbf2edd --- /dev/null +++ b/openselfsup/models/losses/focal_loss.py @@ -0,0 +1,82 @@ +import torch.nn as nn +import torch.nn.functional as F + +from openselfsup.ops import sigmoid_focal_loss as _sigmoid_focal_loss +from ..registry import LOSSES +from .utils import weight_reduce_loss + + +# This method is only for debugging +def py_sigmoid_focal_loss(pred, + target, + weight=None, + gamma=2.0, + alpha=0.25, + reduction='mean', + avg_factor=None): + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) + focal_weight = (alpha * target + (1 - alpha) * + (1 - target)) * pt.pow(gamma) + loss = F.binary_cross_entropy_with_logits( + pred, target, reduction='none') * focal_weight + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +def sigmoid_focal_loss(pred, + target, + weight=None, + gamma=2.0, + alpha=0.25, + reduction='mean', + avg_factor=None): + # Function.apply does not accept keyword arguments, so the decorator + # "weighted_loss" is not applicable + loss = _sigmoid_focal_loss(pred, target, gamma, alpha) + # TODO: find a proper way to handle the shape of weight + if weight is not None: + weight = weight.view(-1, 1) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +@LOSSES.register_module +class FocalLoss(nn.Module): + + def __init__(self, + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0): + super(FocalLoss, self).__init__() + assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' + self.use_sigmoid = use_sigmoid + self.gamma = gamma + self.alpha = alpha + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.use_sigmoid: + loss_cls = self.loss_weight * sigmoid_focal_loss( + pred, + target, + weight, + gamma=self.gamma, + alpha=self.alpha, + reduction=reduction, + avg_factor=avg_factor) + else: + raise NotImplementedError + return loss_cls diff --git a/openselfsup/models/losses/ghm_loss.py b/openselfsup/models/losses/ghm_loss.py new file mode 100644 index 00000000..e62b9904 --- /dev/null +++ b/openselfsup/models/losses/ghm_loss.py @@ -0,0 +1,171 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..registry import LOSSES + + +def _expand_binary_labels(labels, label_weights, label_channels): + bin_labels = labels.new_full((labels.size(0), label_channels), 0) + inds = torch.nonzero(labels >= 1).squeeze() + if inds.numel() > 0: + bin_labels[inds, labels[inds] - 1] = 1 + bin_label_weights = label_weights.view(-1, 1).expand( + label_weights.size(0), label_channels) + return bin_labels, bin_label_weights + + +# TODO: code refactoring to make it consistent with other losses +@LOSSES.register_module +class GHMC(nn.Module): + """GHM Classification Loss. + + Details of the theorem can be viewed in the paper + "Gradient Harmonized Single-stage Detector". + https://arxiv.org/abs/1811.05181 + + Args: + bins (int): Number of the unit regions for distribution calculation. + momentum (float): The parameter for moving average. + use_sigmoid (bool): Can only be true for BCE based loss now. + loss_weight (float): The weight of the total GHM-C loss. + """ + + def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0): + super(GHMC, self).__init__() + self.bins = bins + self.momentum = momentum + edges = torch.arange(bins + 1).float() / bins + self.register_buffer('edges', edges) + self.edges[-1] += 1e-6 + if momentum > 0: + acc_sum = torch.zeros(bins) + self.register_buffer('acc_sum', acc_sum) + self.use_sigmoid = use_sigmoid + if not self.use_sigmoid: + raise NotImplementedError + self.loss_weight = loss_weight + + def forward(self, pred, target, label_weight, *args, **kwargs): + """Calculate the GHM-C loss. + + Args: + pred (float tensor of size [batch_num, class_num]): + The direct prediction of classification fc layer. + target (float tensor of size [batch_num, class_num]): + Binary class target for each sample. + label_weight (float tensor of size [batch_num, class_num]): + the value is 1 if the sample is valid and 0 if ignored. + Returns: + The gradient harmonized loss. + """ + # the target should be binary class label + if pred.dim() != target.dim(): + target, label_weight = _expand_binary_labels( + target, label_weight, pred.size(-1)) + target, label_weight = target.float(), label_weight.float() + edges = self.edges + mmt = self.momentum + weights = torch.zeros_like(pred) + + # gradient length + g = torch.abs(pred.sigmoid().detach() - target) + + valid = label_weight > 0 + tot = max(valid.float().sum().item(), 1.0) + n = 0 # n valid bins + for i in range(self.bins): + inds = (g >= edges[i]) & (g < edges[i + 1]) & valid + num_in_bin = inds.sum().item() + if num_in_bin > 0: + if mmt > 0: + self.acc_sum[i] = mmt * self.acc_sum[i] \ + + (1 - mmt) * num_in_bin + weights[inds] = tot / self.acc_sum[i] + else: + weights[inds] = tot / num_in_bin + n += 1 + if n > 0: + weights = weights / n + + loss = F.binary_cross_entropy_with_logits( + pred, target, weights, reduction='sum') / tot + return loss * self.loss_weight + + +# TODO: code refactoring to make it consistent with other losses +@LOSSES.register_module +class GHMR(nn.Module): + """GHM Regression Loss. + + Details of the theorem can be viewed in the paper + "Gradient Harmonized Single-stage Detector" + https://arxiv.org/abs/1811.05181 + + Args: + mu (float): The parameter for the Authentic Smooth L1 loss. + bins (int): Number of the unit regions for distribution calculation. + momentum (float): The parameter for moving average. + loss_weight (float): The weight of the total GHM-R loss. + """ + + def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0): + super(GHMR, self).__init__() + self.mu = mu + self.bins = bins + edges = torch.arange(bins + 1).float() / bins + self.register_buffer('edges', edges) + self.edges[-1] = 1e3 + self.momentum = momentum + if momentum > 0: + acc_sum = torch.zeros(bins) + self.register_buffer('acc_sum', acc_sum) + self.loss_weight = loss_weight + + # TODO: support reduction parameter + def forward(self, pred, target, label_weight, avg_factor=None): + """Calculate the GHM-R loss. + + Args: + pred (float tensor of size [batch_num, 4 (* class_num)]): + The prediction of box regression layer. Channel number can be 4 + or 4 * class_num depending on whether it is class-agnostic. + target (float tensor of size [batch_num, 4 (* class_num)]): + The target regression values with the same size of pred. + label_weight (float tensor of size [batch_num, 4 (* class_num)]): + The weight of each sample, 0 if ignored. + Returns: + The gradient harmonized loss. + """ + mu = self.mu + edges = self.edges + mmt = self.momentum + + # ASL1 loss + diff = pred - target + loss = torch.sqrt(diff * diff + mu * mu) - mu + + # gradient length + g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach() + weights = torch.zeros_like(g) + + valid = label_weight > 0 + tot = max(label_weight.float().sum().item(), 1.0) + n = 0 # n: valid bins + for i in range(self.bins): + inds = (g >= edges[i]) & (g < edges[i + 1]) & valid + num_in_bin = inds.sum().item() + if num_in_bin > 0: + n += 1 + if mmt > 0: + self.acc_sum[i] = mmt * self.acc_sum[i] \ + + (1 - mmt) * num_in_bin + weights[inds] = tot / self.acc_sum[i] + else: + weights[inds] = tot / num_in_bin + if n > 0: + weights /= n + + loss = loss * weights + loss = loss.sum() / tot + return loss * self.loss_weight diff --git a/openselfsup/models/losses/utils.py b/openselfsup/models/losses/utils.py new file mode 100644 index 00000000..3361c6ca --- /dev/null +++ b/openselfsup/models/losses/utils.py @@ -0,0 +1,98 @@ +import functools + +import torch.nn.functional as F + + +def reduce_loss(loss, reduction): + """Reduce loss as specified. + + Args: + loss (Tensor): Elementwise loss tensor. + reduction (str): Options are "none", "mean" and "sum". + + Return: + Tensor: Reduced loss tensor. + """ + reduction_enum = F._Reduction.get_enum(reduction) + # none: 0, elementwise_mean:1, sum: 2 + if reduction_enum == 0: + return loss + elif reduction_enum == 1: + return loss.mean() + elif reduction_enum == 2: + return loss.sum() + + +def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): + """Apply element-wise weight and reduce loss. + + Args: + loss (Tensor): Element-wise loss. + weight (Tensor): Element-wise weights. + reduction (str): Same as built-in losses of PyTorch. + avg_factor (float): Avarage factor when computing the mean of losses. + + Returns: + Tensor: Processed loss values. + """ + # if weight is specified, apply element-wise weight + if weight is not None: + loss = loss * weight + + # if avg_factor is not specified, just reduce the loss + if avg_factor is None: + loss = reduce_loss(loss, reduction) + else: + # if reduction is mean, then average the loss by avg_factor + if reduction == 'mean': + loss = loss.sum() / avg_factor + # if reduction is 'none', then do nothing, otherwise raise an error + elif reduction != 'none': + raise ValueError('avg_factor can not be used with reduction="sum"') + return loss + + +def weighted_loss(loss_func): + """Create a weighted version of a given loss function. + + To use this decorator, the loss function must have the signature like + `loss_func(pred, target, **kwargs)`. The function only needs to compute + element-wise loss without any reduction. This decorator will add weight + and reduction arguments to the function. The decorated function will have + the signature like `loss_func(pred, target, weight=None, reduction='mean', + avg_factor=None, **kwargs)`. + + :Example: + + >>> import torch + >>> @weighted_loss + >>> def l1_loss(pred, target): + >>> return (pred - target).abs() + + >>> pred = torch.Tensor([0, 2, 3]) + >>> target = torch.Tensor([1, 1, 1]) + >>> weight = torch.Tensor([1, 0, 1]) + + >>> l1_loss(pred, target) + tensor(1.3333) + >>> l1_loss(pred, target, weight) + tensor(1.) + >>> l1_loss(pred, target, reduction='none') + tensor([1., 1., 2.]) + >>> l1_loss(pred, target, weight, avg_factor=2) + tensor(1.5000) + """ + + @functools.wraps(loss_func) + def wrapper(pred, + target, + weight=None, + reduction='mean', + avg_factor=None, + **kwargs): + # get element-wise loss + loss = loss_func(pred, target, **kwargs) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + return wrapper diff --git a/openselfsup/models/memories/__init__.py b/openselfsup/models/memories/__init__.py new file mode 100644 index 00000000..21a0c929 --- /dev/null +++ b/openselfsup/models/memories/__init__.py @@ -0,0 +1,3 @@ +from .odc_memory import ODCMemory +from .odc_memory_gpu import ODCMemoryGPU +from .simple_memory import SimpleMemory diff --git a/openselfsup/models/memories/odc_memory.py b/openselfsup/models/memories/odc_memory.py new file mode 100644 index 00000000..d5e6b624 --- /dev/null +++ b/openselfsup/models/memories/odc_memory.py @@ -0,0 +1,217 @@ +import numpy as np +from sklearn.cluster import KMeans + +import torch +import torch.nn as nn +import torch.distributed as dist +from mmcv.runner import get_dist_info + +from ..registry import MEMORIES + + +@MEMORIES.register_module +class ODCMemory(nn.Module): + + def __init__(self, length, feat_dim, momentum, num_classes, min_cluster, + **kwargs): + super(ODCMemory, self).__init__() + self.rank, self.num_replicas = get_dist_info() + if self.rank == 0: + self.feature_bank = torch.zeros((length, feat_dim), + dtype=torch.float32) + self.label_bank = torch.zeros((length, ), dtype=torch.long) + self.centroids = torch.zeros((num_classes, feat_dim), + dtype=torch.float32).cuda() + self.kmeans = KMeans(n_clusters=2, random_state=0, max_iter=20) + self.feat_dim = feat_dim + self.initialized = False + self.momentum = momentum + self.num_classes = num_classes + self.min_cluster = min_cluster + self.debug = kwargs.get('debug', False) + + def init_memory(self, feature, label): + self.initialized = True + self.label_bank.copy_(torch.from_numpy(label).long()) + # make sure no empty clusters + assert (np.bincount(label, minlength=self.num_classes) != 0).all() + if self.rank == 0: + feature /= (np.linalg.norm(feature, axis=1).reshape(-1, 1) + 1e-10) + self.feature_bank.copy_(torch.from_numpy(feature)) + centroids = self._compute_centroids() + self.centroids.copy_(centroids) + dist.broadcast(self.centroids, 0) + + def _compute_centroids_ind(self, cinds): + '''compute a few centroids''' + assert self.rank == 0 + num = len(cinds) + centroids = torch.zeros((num, self.feat_dim), dtype=torch.float32) + for i, c in enumerate(cinds): + ind = np.where(self.label_bank.numpy() == c)[0] + centroids[i, :] = self.feature_bank[ind, :].mean(dim=0) + return centroids + + def _compute_centroids(self): + '''compute all non-empty centroids''' + assert self.rank == 0 + l = self.label_bank.numpy() + argl = np.argsort(l) + sortl = l[argl] + diff_pos = np.where(sortl[1:] - sortl[:-1] != 0)[0] + 1 + start = np.insert(diff_pos, 0, 0) + end = np.insert(diff_pos, len(diff_pos), len(l)) + class_start = sortl[start] + # keep empty class centroids unchanged + centroids = self.centroids.cpu().clone() + for i, st, ed in zip(class_start, start, end): + centroids[i, :] = self.feature_bank[argl[st:ed], :].mean(dim=0) + return centroids + + def _gather(self, ind, feature): # gather ind and feature + #if not hasattr(self, 'ind_gathered'): + # self.ind_gathered = [torch.ones_like(ind).cuda() + # for _ in range(self.num_replicas)] + #if not hasattr(self, 'feature_gathered'): + # self.feature_gathered = [torch.ones_like(feature).cuda() + # for _ in range(self.num_replicas)] + ind_gathered = [ + torch.ones_like(ind).cuda() for _ in range(self.num_replicas) + ] + feature_gathered = [ + torch.ones_like(feature).cuda() for _ in range(self.num_replicas) + ] + dist.all_gather(ind_gathered, ind) + dist.all_gather(feature_gathered, feature) + ind_gathered = torch.cat(ind_gathered, dim=0) + feature_gathered = torch.cat(feature_gathered, dim=0) + return ind_gathered, feature_gathered + + def update_samples_memory(self, ind, feature): # ind, feature: cuda tensor + assert self.initialized + feature_norm = feature / (feature.norm(dim=1).view(-1, 1) + 1e-10 + ) # normalize + ind, feature_norm = self._gather( + ind, feature_norm) # ind: (N*w), feature: (N*w)xk, cuda tensor + ind = ind.cpu() + if self.rank == 0: + feature_old = self.feature_bank[ind, ...].cuda() + feature_new = (1 - self.momentum) * feature_old + \ + self.momentum * feature_norm + feature_norm = feature_new / ( + feature_new.norm(dim=1).view(-1, 1) + 1e-10) + self.feature_bank[ind, ...] = feature_norm.cpu() + dist.barrier() + dist.broadcast(feature_norm, 0) + # compute new labels + similarity_to_centroids = torch.mm(self.centroids, + feature_norm.permute(1, 0)) # CxN + newlabel = similarity_to_centroids.argmax(dim=0) # cuda tensor + newlabel_cpu = newlabel.cpu() + change_ratio = (newlabel_cpu != + self.label_bank[ind]).sum().float().cuda() \ + / float(newlabel_cpu.shape[0]) + self.label_bank[ind] = newlabel_cpu.clone() # copy to cpu + return change_ratio + + def deal_with_small_clusters(self): + # check empty class + hist = np.bincount(self.label_bank.numpy(), minlength=self.num_classes) + small_clusters = np.where(hist < self.min_cluster)[0].tolist() + if self.debug and self.rank == 0: + print("mincluster: {}, num of small class: {}".format( + hist.min(), len(small_clusters))) + if len(small_clusters) == 0: + return + # re-assign samples in small clusters to make them empty + for s in small_clusters: + ind = np.where(self.label_bank.numpy() == s)[0] + if len(ind) > 0: + inclusion = torch.from_numpy( + np.setdiff1d( + np.arange(self.num_classes), + np.array(small_clusters), + assume_unique=True)).cuda() + if self.rank == 0: + target_ind = torch.mm( + self.centroids[inclusion, :], + self.feature_bank[ind, :].cuda().permute( + 1, 0)).argmax(dim=0) + target = inclusion[target_ind] + else: + target = torch.zeros((ind.shape[0], ), + dtype=torch.int64).cuda() + dist.all_reduce(target) + self.label_bank[ind] = torch.from_numpy(target.cpu().numpy()) + # deal with empty cluster + self._redirect_empty_clusters(small_clusters) + + def update_centroids_memory(self, cinds=None): + if self.rank == 0: + if self.debug: + print("updating centroids ...") + if cinds is None: + center = self._compute_centroids() + self.centroids.copy_(center) + else: + center = self._compute_centroids_ind(cinds) + self.centroids[ + torch.LongTensor(cinds).cuda(), :] = center.cuda() + dist.broadcast(self.centroids, 0) + + def _partition_max_cluster(self, max_cluster): + assert self.rank == 0 + max_cluster_inds = np.where(self.label_bank == max_cluster)[0] + + assert len(max_cluster_inds) >= 2 + max_cluster_features = self.feature_bank[max_cluster_inds, :] + if np.any(np.isnan(max_cluster_features.numpy())): + raise Exception("Has nan in features.") + kmeans_ret = self.kmeans.fit(max_cluster_features) + sub_cluster1_ind = max_cluster_inds[kmeans_ret.labels_ == 0] + sub_cluster2_ind = max_cluster_inds[kmeans_ret.labels_ == 1] + if not (len(sub_cluster1_ind) > 0 and len(sub_cluster2_ind) > 0): + print( + "Warning: kmeans partition fails, resort to random partition.") + sub_cluster1_ind = np.random.choice( + max_cluster_inds, len(max_cluster_inds) // 2, replace=False) + sub_cluster2_ind = np.setdiff1d( + max_cluster_inds, sub_cluster1_ind, assume_unique=True) + return sub_cluster1_ind, sub_cluster2_ind + + def _redirect_empty_clusters(self, empty_clusters): + for e in empty_clusters: + assert (self.label_bank != e).all().item(), \ + "Cluster #{} is not an empty cluster.".format(e) + max_cluster = np.bincount( + self.label_bank, minlength=self.num_classes).argmax().item() + # gather partitioning indices + if self.rank == 0: + sub_cluster1_ind, sub_cluster2_ind = self._partition_max_cluster( + max_cluster) + size1 = torch.LongTensor([len(sub_cluster1_ind)]).cuda() + size2 = torch.LongTensor([len(sub_cluster2_ind)]).cuda() + sub_cluster1_ind_tensor = torch.from_numpy( + sub_cluster1_ind).long().cuda() + sub_cluster2_ind_tensor = torch.from_numpy( + sub_cluster2_ind).long().cuda() + else: + size1 = torch.LongTensor([0]).cuda() + size2 = torch.LongTensor([0]).cuda() + dist.all_reduce(size1) + dist.all_reduce(size2) + if self.rank != 0: + sub_cluster1_ind_tensor = torch.zeros( + (size1, ), dtype=torch.int64).cuda() + sub_cluster2_ind_tensor = torch.zeros( + (size2, ), dtype=torch.int64).cuda() + dist.broadcast(sub_cluster1_ind_tensor, 0) + dist.broadcast(sub_cluster2_ind_tensor, 0) + if self.rank != 0: + sub_cluster1_ind = sub_cluster1_ind_tensor.cpu().numpy() + sub_cluster2_ind = sub_cluster2_ind_tensor.cpu().numpy() + + # reassign samples in partition #2 to the empty class + self.label_bank[sub_cluster2_ind] = e + # update centroids of max_cluster and e + self.update_centroids_memory([max_cluster, e]) diff --git a/openselfsup/models/memories/odc_memory_gpu.py b/openselfsup/models/memories/odc_memory_gpu.py new file mode 100644 index 00000000..be078566 --- /dev/null +++ b/openselfsup/models/memories/odc_memory_gpu.py @@ -0,0 +1,190 @@ +import numpy as np +from sklearn.cluster import KMeans + +import torch +import torch.nn as nn +import torch.distributed as dist +from mmcv.runner import get_dist_info + +from ..registry import MEMORIES + + +@MEMORIES.register_module +class ODCMemoryGPU(nn.Module): + '''Memory bank for Online Deep Clustering. Feature bank stored in GPU. + ''' + + def __init__(self, length, feat_dim, momentum, num_classes, min_cluster, + **kwargs): + super(ODCMemoryGPU, self).__init__() + self.rank, self.num_replicas = get_dist_info() + self.feature_bank = torch.zeros((length, feat_dim), + dtype=torch.float32).cuda() + self.label_bank = torch.zeros((length, ), dtype=torch.long).cuda() + self.centroids = torch.zeros((num_classes, feat_dim), + dtype=torch.float32).cuda() + self.kmeans = KMeans(n_clusters=2, random_state=0, max_iter=20) + self.feat_dim = feat_dim + self.initialized = False + self.momentum = momentum + self.num_classes = num_classes + self.min_cluster = min_cluster + self.debug = kwargs.get('debug', False) + + @torch.no_grad() + def init_memory(self, feature, label): + self.initialized = True + self.label_bank.copy_(torch.from_numpy(label).long().cuda()) + # make sure no empty clusters + assert (np.bincount(label, minlength=self.num_classes) != 0).all() + feature /= (np.linalg.norm(feature, axis=1).reshape(-1, 1) + 1e-10) + self.feature_bank.copy_(torch.from_numpy(feature)) + self._compute_centroids() + + @torch.no_grad() + def _compute_centroids_ind(self, cinds): + '''compute a few centroids''' + for i, c in enumerate(cinds): + ind = torch.where(self.label_bank == c)[0] + self.centroids[i, :] = self.feature_bank[ind, :].mean(dim=0) + + def _compute_centroids(self): + if self.debug: + print("enter: _compute_centroids") + '''compute all non-empty centroids''' + l = self.label_bank.cpu().numpy() + argl = np.argsort(l) + sortl = l[argl] + diff_pos = np.where(sortl[1:] - sortl[:-1] != 0)[0] + 1 + start = np.insert(diff_pos, 0, 0) + end = np.insert(diff_pos, len(diff_pos), len(l)) + class_start = sortl[start] + # keep empty class centroids unchanged + for i, st, ed in zip(class_start, start, end): + self.centroids[i, :] = self.feature_bank[argl[st:ed], :].mean( + dim=0) + + def _gather(self, ind, feature): # gather ind and feature + if self.debug: + print("enter: _gather") + assert ind.size(0) > 0 + ind_gathered = [ + torch.ones_like(ind).cuda() for _ in range(self.num_replicas) + ] + feature_gathered = [ + torch.ones_like(feature).cuda() for _ in range(self.num_replicas) + ] + dist.all_gather(ind_gathered, ind) + dist.all_gather(feature_gathered, feature) + ind_gathered = torch.cat(ind_gathered, dim=0) + feature_gathered = torch.cat(feature_gathered, dim=0) + return ind_gathered, feature_gathered + + def update_samples_memory(self, ind, feature): # ind, feature: cuda tensor + if self.debug: + print("enter: update_samples_memory") + assert self.initialized + feature_norm = feature / (feature.norm(dim=1).view(-1, 1) + 1e-10 + ) # normalize + ind, feature_norm = self._gather( + ind, feature_norm) # ind: (N*w), feature: (N*w)xk, cuda tensor + # momentum update + feature_old = self.feature_bank[ind, ...] + feature_new = (1 - self.momentum) * feature_old + \ + self.momentum * feature_norm + feature_norm = feature_new / ( + feature_new.norm(dim=1).view(-1, 1) + 1e-10) + self.feature_bank[ind, ...] = feature_norm + # compute new labels + similarity_to_centroids = torch.mm(self.centroids, + feature_norm.permute(1, 0)) # CxN + newlabel = similarity_to_centroids.argmax(dim=0) # cuda tensor + change_ratio = (newlabel != + self.label_bank[ind]).sum().float() \ + / float(newlabel.shape[0]) + self.label_bank[ind] = newlabel.clone() # copy to cpu + return change_ratio + + @torch.no_grad() + def deal_with_small_clusters(self): + if self.debug: + print("enter: deal_with_small_clusters") + # check empty class + hist = torch.bincount(self.label_bank, minlength=self.num_classes) + small_clusters = torch.where(hist < self.min_cluster)[0] + if self.debug and self.rank == 0: + print("mincluster: {}, num of small class: {}".format( + hist.min(), len(small_clusters))) + if len(small_clusters) == 0: + return + # re-assign samples in small clusters to make them empty + for s in small_clusters: + ind = torch.where(self.label_bank == s)[0] + if len(ind) > 0: + inclusion = torch.from_numpy( + np.setdiff1d( + np.arange(self.num_classes), + small_clusters.cpu().numpy(), + assume_unique=True)).cuda() + target_ind = torch.mm(self.centroids[inclusion, :], + self.feature_bank[ind, :].permute( + 1, 0)).argmax(dim=0) + target = inclusion[target_ind] + self.label_bank[ind] = target + # deal with empty cluster + self._redirect_empty_clusters(small_clusters) + + def update_centroids_memory(self, cinds=None): + if cinds is None: + self._compute_centroids() + else: + self._compute_centroids_ind(cinds) + + def _partition_max_cluster(self, max_cluster): + if self.debug: + print("enter: _partition_max_cluster") + assert self.rank == 0 # avoid randomness among ranks + max_cluster_inds = torch.where(self.label_bank == max_cluster)[0] + size = len(max_cluster_inds) + + assert size >= 2 # image indices in the max cluster + max_cluster_features = self.feature_bank[max_cluster_inds, :] + if torch.any(torch.isnan(max_cluster_features)): + raise Exception("Has nan in features.") + kmeans_ret = self.kmeans.fit(max_cluster_features.cpu().numpy()) + kmeans_labels = torch.from_numpy(kmeans_ret.labels_).cuda() + sub_cluster1_ind = max_cluster_inds[kmeans_labels == 0] + sub_cluster2_ind = max_cluster_inds[kmeans_labels == 1] + if not (len(sub_cluster1_ind) > 0 and len(sub_cluster2_ind) > 0): + print( + "Warning: kmeans partition fails, resort to random partition.") + rnd_idx = torch.randperm(size) + sub_cluster1_ind = max_cluster_inds[rnd_idx[:size // 2]] + sub_cluster2_ind = max_cluster_inds[rnd_idx[size // 2:]] + return sub_cluster1_ind, sub_cluster2_ind + + def _redirect_empty_clusters(self, empty_clusters): + if self.debug: + print("enter: _redirect_empty_clusters") + for e in empty_clusters: + assert (self.label_bank != e).all().item(), \ + "Cluster #{} is not an empty cluster.".format(e) + max_cluster = torch.bincount( + self.label_bank, minlength=self.num_classes).argmax().item() + # gather partitioning indices + if self.rank == 0: + sub_cluster1_ind, sub_cluster2_ind = self._partition_max_cluster( + max_cluster) + size2 = torch.LongTensor([len(sub_cluster2_ind)]).cuda() + else: + size2 = torch.LongTensor([0]).cuda() + dist.all_reduce(size2) + if self.rank != 0: + sub_cluster2_ind = torch.zeros((size2, ), + dtype=torch.int64).cuda() + dist.broadcast(sub_cluster2_ind, 0) + + # reassign samples in partition #2 to the empty class + self.label_bank[sub_cluster2_ind] = e + # update centroids of max_cluster and e + self.update_centroids_memory([max_cluster, e]) diff --git a/openselfsup/models/memories/simple_memory.py b/openselfsup/models/memories/simple_memory.py new file mode 100644 index 00000000..5ee7775d --- /dev/null +++ b/openselfsup/models/memories/simple_memory.py @@ -0,0 +1,42 @@ +import torch +import torch.nn as nn +import torch.distributed as dist +from mmcv.runner import get_dist_info +from openselfsup.utils import AliasMethod + +from ..registry import MEMORIES + + +@MEMORIES.register_module +class SimpleMemory(nn.Module): + + def __init__(self, length, feat_dim, momentum, **kwargs): + super(SimpleMemory, self).__init__() + self.rank, self.num_replicas = get_dist_info() + self.feature_bank = torch.randn(length, feat_dim).cuda() + self.feature_bank = nn.functional.normalize(self.feature_bank) + self.momentum = momentum + self.multinomial = AliasMethod(torch.ones(length)) + self.multinomial.cuda() + + def update(self, ind, feature): + feature_norm = nn.functional.normalize(feature) + ind, feature_norm = self._gather(ind, feature_norm) + feature_old = self.feature_bank[ind, ...] + feature_new = (1 - self.momentum) * feature_old + \ + self.momentum * feature_norm + feature_new_norm = nn.functional.normalize(feature_new) + self.feature_bank[ind, ...] = feature_new_norm + + def _gather(self, ind, feature): # gather ind and feature + ind_gathered = [ + torch.ones_like(ind).cuda() for _ in range(self.num_replicas) + ] + feature_gathered = [ + torch.ones_like(feature).cuda() for _ in range(self.num_replicas) + ] + dist.all_gather(ind_gathered, ind) + dist.all_gather(feature_gathered, feature) + ind_gathered = torch.cat(ind_gathered, dim=0) + feature_gathered = torch.cat(feature_gathered, dim=0) + return ind_gathered, feature_gathered diff --git a/openselfsup/models/moco.py b/openselfsup/models/moco.py new file mode 100644 index 00000000..c4ff19a7 --- /dev/null +++ b/openselfsup/models/moco.py @@ -0,0 +1,189 @@ +import torch +import torch.nn as nn + +from openselfsup.utils import print_log + +from . import builder +from .registry import MODELS + + +@MODELS.register_module +class MOCO(nn.Module): + '''MOCO. + Part of the code is borrowed from: + "https://github.com/facebookresearch/moco/blob/master/moco/builder.py". + ''' + + def __init__(self, + backbone, + neck=None, + head=None, + pretrained=None, + queue_len=65536, + feat_dim=128, + momentum=0.999, + **kwargs): + super(MOCO, self).__init__() + self.encoder_q = nn.Sequential( + builder.build_backbone(backbone), builder.build_neck(neck)) + self.encoder_k = nn.Sequential( + builder.build_backbone(backbone), builder.build_neck(neck)) + self.backbone = self.encoder_q[0] + for param in self.encoder_k.parameters(): + param.requires_grad = False + self.head = builder.build_head(head) + self.init_weights(pretrained=pretrained) + + self.queue_len = queue_len + self.momentum = momentum + + # create the queue + self.register_buffer("queue", torch.randn(feat_dim, queue_len)) + self.queue = nn.functional.normalize(self.queue, dim=0) + self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long)) + + def init_weights(self, pretrained=None): + if pretrained is not None: + print_log('load model from: {}'.format(pretrained), logger='root') + self.encoder_q[0].init_weights(pretrained=pretrained) + self.encoder_q[1].init_weights(init_linear='kaiming') + for param_q, param_k in zip(self.encoder_q.parameters(), + self.encoder_k.parameters()): + param_k.data.copy_(param_q.data) + + @torch.no_grad() + def _momentum_update_key_encoder(self): + """ + Momentum update of the key encoder + """ + for param_q, param_k in zip(self.encoder_q.parameters(), + self.encoder_k.parameters()): + param_k.data = param_k.data * self.momentum + \ + param_q.data * (1. - self.momentum) + + @torch.no_grad() + def _dequeue_and_enqueue(self, keys): + # gather keys before updating queue + keys = concat_all_gather(keys) + + batch_size = keys.shape[0] + + ptr = int(self.queue_ptr) + assert self.queue_len % batch_size == 0 # for simplicity + + # replace the keys at ptr (dequeue and enqueue) + self.queue[:, ptr:ptr + batch_size] = keys.transpose(0, 1) + ptr = (ptr + batch_size) % self.queue_len # move pointer + + self.queue_ptr[0] = ptr + + @torch.no_grad() + def _batch_shuffle_ddp(self, x): + """ + Batch shuffle, for making use of BatchNorm. + *** Only support DistributedDataParallel (DDP) model. *** + """ + # gather from all gpus + batch_size_this = x.shape[0] + x_gather = concat_all_gather(x) + batch_size_all = x_gather.shape[0] + + num_gpus = batch_size_all // batch_size_this + + # random shuffle index + idx_shuffle = torch.randperm(batch_size_all).cuda() + + # broadcast to all gpus + torch.distributed.broadcast(idx_shuffle, src=0) + + # index for restoring + idx_unshuffle = torch.argsort(idx_shuffle) + + # shuffled index for this gpu + gpu_idx = torch.distributed.get_rank() + idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx] + + return x_gather[idx_this], idx_unshuffle + + @torch.no_grad() + def _batch_unshuffle_ddp(self, x, idx_unshuffle): + """ + Undo batch shuffle. + *** Only support DistributedDataParallel (DDP) model. *** + """ + # gather from all gpus + batch_size_this = x.shape[0] + x_gather = concat_all_gather(x) + batch_size_all = x_gather.shape[0] + + num_gpus = batch_size_all // batch_size_this + + # restored index for this gpu + gpu_idx = torch.distributed.get_rank() + idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx] + + return x_gather[idx_this] + + def forward_train(self, img, **kwargs): + assert img.dim() == 5, \ + "Input must have 5 dims, got: {}".format(img.dim()) + im_q = img[:, 0, ...].contiguous() + im_k = img[:, 1, ...].contiguous() + # compute query features + q = self.encoder_q(im_q)[0] # queries: NxC + q = nn.functional.normalize(q, dim=1) + + # compute key features + with torch.no_grad(): # no gradient to keys + self._momentum_update_key_encoder() # update the key encoder + + # shuffle for making use of BN + im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k) + + k = self.encoder_k(im_k)[0] # keys: NxC + k = nn.functional.normalize(k, dim=1) + + # undo shuffle + k = self._batch_unshuffle_ddp(k, idx_unshuffle) + + # compute logits + # Einstein sum is more intuitive + # positive logits: Nx1 + l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1) + # negative logits: NxK + l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()]) + + losses = self.head(l_pos, l_neg) + self._dequeue_and_enqueue(k) + + return losses + + def forward_test(self, img, **kwargs): + pass + + def forward(self, img, mode='train', **kwargs): + if mode == 'train': + return self.forward_train(img, **kwargs) + elif mode == 'test': + return self.forward_test(img, **kwargs) + elif mode == 'extract': + return self.encoder_q[0](img) + else: + raise Exception("No such mode: {}".format(mode)) + + +# utils +@torch.no_grad() +def concat_all_gather(tensor): + """ + Performs all_gather operation on the provided tensors. + *** Warning ***: torch.distributed.all_gather has no gradient. + """ + tensors_gather = [ + torch.ones_like(tensor) + for _ in range(torch.distributed.get_world_size()) + ] + torch.distributed.all_gather(tensors_gather, tensor, async_op=False) + + output = torch.cat(tensors_gather, dim=0) + return output diff --git a/openselfsup/models/necks.py b/openselfsup/models/necks.py new file mode 100644 index 00000000..7d509cad --- /dev/null +++ b/openselfsup/models/necks.py @@ -0,0 +1,132 @@ +import torch.nn as nn +from mmcv.cnn import kaiming_init, normal_init + +from .registry import NECKS + + +@NECKS.register_module +class LinearNeck(nn.Module): + + def __init__(self, in_channels, out_channels, with_avg_pool=True): + super(LinearNeck, self).__init__() + self.with_avg_pool = with_avg_pool + if with_avg_pool: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(in_channels, out_channels) + + def init_weights(self, init_linear='normal'): + assert init_linear in ['normal', 'kaiming'], \ + "Undefined init_linear: {}".format(init_linear) + for m in self.modules(): + if isinstance(m, nn.Linear): + if init_linear == 'normal': + normal_init(m, std=0.01) + else: + kaiming_init(m, mode='fan_in', nonlinearity='relu') + elif isinstance(m, + (nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + assert len(x) == 1 + if self.with_avg_pool: + x = self.avgpool(x[0]) + return [self.fc(x.view(x.size(0), -1))] + + +@NECKS.register_module +class NonLinearNeckV0(nn.Module): + + def __init__(self, + in_channels, + hid_channels, + out_channels, + with_avg_pool=True): + super(NonLinearNeckV0, self).__init__() + self.with_avg_pool = with_avg_pool + if with_avg_pool: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.mlp = nn.Sequential( + nn.Linear(in_channels, hid_channels), + nn.BatchNorm1d(hid_channels, momentum=0.001, affine=False), + nn.ReLU(inplace=True), nn.Dropout(), + nn.Linear(hid_channels, out_channels), nn.ReLU(inplace=True)) + + def init_weights(self, init_linear='normal'): + assert init_linear in ['normal', 'kaiming'], \ + "Undefined init_linear: {}".format(init_linear) + for m in self.modules(): + if isinstance(m, nn.Linear): + if init_linear == 'normal': + normal_init(m, std=0.01) + else: + kaiming_init(m, mode='fan_in', nonlinearity='relu') + elif isinstance(m, + (nn.BatchNorm1d, nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + assert len(x) == 1 + if self.with_avg_pool: + x = self.avgpool(x[0]) + return [self.mlp(x.view(x.size(0), -1))] + + +@NECKS.register_module +class NonLinearNeckV1(nn.Module): + + def __init__(self, + in_channels, + hid_channels, + out_channels, + with_avg_pool=True): + super(NonLinearNeckV1, self).__init__() + self.with_avg_pool = with_avg_pool + if with_avg_pool: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.mlp = nn.Sequential( + nn.Linear(in_channels, hid_channels), nn.ReLU(inplace=True), + nn.Linear(hid_channels, out_channels)) + + def init_weights(self, init_linear='normal'): + assert init_linear in ['normal', 'kaiming'], \ + "Undefined init_linear: {}".format(init_linear) + for m in self.modules(): + if isinstance(m, nn.Linear): + if init_linear == 'normal': + normal_init(m, std=0.01) + else: + kaiming_init(m, mode='fan_in', nonlinearity='relu') + elif isinstance(m, + (nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + assert len(x) == 1 + if self.with_avg_pool: + x = self.avgpool(x[0]) + return [self.mlp(x.view(x.size(0), -1))] + + +@NECKS.register_module +class AvgPoolNeck(nn.Module): + + def __init__(self): + super(AvgPoolNeck, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) + + def init_weights(self, **kwargs): + pass + + def forward(self, x): + assert len(x) == 1 + return [self.avg_pool(x[0])] diff --git a/openselfsup/models/npid.py b/openselfsup/models/npid.py new file mode 100644 index 00000000..34e59ab5 --- /dev/null +++ b/openselfsup/models/npid.py @@ -0,0 +1,100 @@ +import torch +import torch.nn as nn + +from openselfsup.utils import print_log + +from . import builder +from .registry import MODELS + + +@MODELS.register_module +class NPID(nn.Module): + '''Model of "Unsupervised Feature Learning via Non-parametric + Instance Discrimination". + Arguments: + neg_num (int): number of negative samples for each image + ensure_neg (bool): if False, there is a small probability + that negative samples contain positive ones. + ''' + + def __init__(self, + backbone, + neck=None, + head=None, + memory_bank=None, + neg_num=65536, + ensure_neg=False, + pretrained=None): + super(NPID, self).__init__() + self.backbone = builder.build_backbone(backbone) + self.neck = builder.build_neck(neck) + self.head = builder.build_head(head) + self.memory_bank = builder.build_memory(memory_bank) + self.init_weights(pretrained=pretrained) + + self.neg_num = neg_num + self.ensure_neg = ensure_neg + + def init_weights(self, pretrained=None): + if pretrained is not None: + print_log('load model from: {}'.format(pretrained), logger='root') + self.backbone.init_weights(pretrained=pretrained) + self.neck.init_weights(init_linear='kaiming') + + def forward_backbone(self, img): + """Forward backbone + + Returns: + x (tuple): backbone outputs + """ + x = self.backbone(img) + return x + + def forward_train(self, img, idx, **kwargs): + x = self.forward_backbone(img) + idx = idx.cuda() + feature = self.neck(x)[0] + feature = nn.functional.normalize(feature) # BxC + bs, feat_dim = feature.shape[:2] + neg_idx = self.memory_bank.multinomial.draw(bs * self.neg_num) + if self.ensure_neg: + neg_idx = neg_idx.view(bs, -1) + while True: + wrong = (neg_idx == idx.view(-1, 1)) + if wrong.sum().item() > 0: + neg_idx[wrong] = self.memory_bank.multinomial.draw( + wrong.sum().item()) + else: + break + neg_idx = neg_idx.flatten() + + pos_feat = torch.index_select(self.memory_bank.feature_bank, 0, + idx) # BXC + neg_feat = torch.index_select(self.memory_bank.feature_bank, 0, + neg_idx).view(bs, self.neg_num, + feat_dim) # BxKxC + + pos_logits = torch.einsum('nc,nc->n', + [pos_feat, feature]).unsqueeze(-1) + neg_logits = torch.bmm(neg_feat, feature.unsqueeze(2)).squeeze(2) + + losses = self.head(pos_logits, neg_logits) + + # update memory bank + with torch.no_grad(): + self.memory_bank.update(idx, feature.detach()) + + return losses + + def forward_test(self, img, **kwargs): + pass + + def forward(self, img, mode='train', **kwargs): + if mode == 'train': + return self.forward_train(img, **kwargs) + elif mode == 'test': + return self.forward_test(img, **kwargs) + elif mode == 'extract': + return self.forward_backbone(img) + else: + raise Exception("No such mode: {}".format(mode)) diff --git a/openselfsup/models/odc.py b/openselfsup/models/odc.py new file mode 100644 index 00000000..f9cd2fce --- /dev/null +++ b/openselfsup/models/odc.py @@ -0,0 +1,103 @@ +import numpy as np +import torch +import torch.nn as nn + +from openselfsup.utils import print_log +from . import builder +from .registry import MODELS +from .utils import Sobel + + +@MODELS.register_module +class ODC(nn.Module): + + def __init__(self, + backbone, + with_sobel=False, + neck=None, + head=None, + memory_bank=None, + pretrained=None): + super(ODC, self).__init__() + self.with_sobel = with_sobel + if with_sobel: + self.sobel_layer = Sobel() + self.backbone = builder.build_backbone(backbone) + self.neck = builder.build_neck(neck) + if head is not None: + self.head = builder.build_head(head) + if memory_bank is not None: + self.memory_bank = builder.build_memory(memory_bank) + self.init_weights(pretrained=pretrained) + + # set reweight tensors + self.num_classes = head.num_classes + self.loss_weight = torch.ones((self.num_classes, ), + dtype=torch.float32).cuda() + self.loss_weight /= self.loss_weight.sum() + + def init_weights(self, pretrained=None): + if pretrained is not None: + print_log('load model from: {}'.format(pretrained), logger='root') + self.backbone.init_weights(pretrained=pretrained) + self.neck.init_weights(init_linear='kaiming') + self.head.init_weights(init_linear='normal') + + def forward_backbone(self, img): + """Forward backbone + + Returns: + x (tuple): backbone outputs + """ + if self.with_sobel: + img = self.sobel_layer(img) + x = self.backbone(img) + return x + + def forward_train(self, img, idx, **kwargs): + # forward & backward + x = self.forward_backbone(img) + feature = self.neck(x) + outs = self.head(feature) + if self.memory_bank.label_bank.is_cuda: + loss_inputs = (outs, self.memory_bank.label_bank[idx]) + else: + loss_inputs = (outs, self.memory_bank.label_bank[idx.cpu()].cuda()) + losses = self.head.loss(*loss_inputs) + + # update samples memory + change_ratio = self.memory_bank.update_samples_memory( + idx, feature[0].detach()) + losses['change_ratio'] = change_ratio + + return losses + + def forward_test(self, img, **kwargs): + x = self.forward_backbone(img) # tuple + outs = self.head(x) + keys = ['head{}'.format(i) for i in range(len(outs))] + out_tensors = [out.cpu() for out in outs] # NxC + return dict(zip(keys, out_tensors)) + + def forward(self, img, mode='train', **kwargs): + if mode == 'train': + return self.forward_train(img, **kwargs) + elif mode == 'test': + return self.forward_test(img, **kwargs) + elif mode == 'extract': + return self.forward_backbone(img) + else: + raise Exception("No such mode: {}".format(mode)) + + def set_reweight(self, labels=None, reweight_pow=0.5): + if labels is None: + if self.memory_bank.label_bank.is_cuda: + labels = self.memory_bank.label_bank.cpu().numpy() + else: + labels = self.memory_bank.label_bank.numpy() + hist = np.bincount( + labels, minlength=self.num_classes).astype(np.float32) + inv_hist = (1. / (hist + 1e-5))**reweight_pow + weight = inv_hist / inv_hist.sum() + self.loss_weight.copy_(torch.from_numpy(weight)) + self.head.criterion = nn.CrossEntropyLoss(weight=self.loss_weight) diff --git a/openselfsup/models/registry.py b/openselfsup/models/registry.py new file mode 100644 index 00000000..8e1611ee --- /dev/null +++ b/openselfsup/models/registry.py @@ -0,0 +1,8 @@ +from openselfsup.utils import Registry + +MODELS = Registry('model') +BACKBONES = Registry('backbone') +NECKS = Registry('neck') +HEADS = Registry('head') +MEMORIES = Registry('memory') +LOSSES = Registry('loss') diff --git a/openselfsup/models/rotation_pred.py b/openselfsup/models/rotation_pred.py new file mode 100644 index 00000000..87c8a3b4 --- /dev/null +++ b/openselfsup/models/rotation_pred.py @@ -0,0 +1,63 @@ +import torch +import torch.nn as nn + +from openselfsup.utils import print_log + +from . import builder +from .registry import MODELS + + +@MODELS.register_module +class RotationPred(nn.Module): + + def __init__(self, backbone, head=None, pretrained=None): + super(RotationPred, self).__init__() + self.backbone = builder.build_backbone(backbone) + if head is not None: + self.head = builder.build_head(head) + self.init_weights(pretrained=pretrained) + + def init_weights(self, pretrained=None): + if pretrained is not None: + print_log('load model from: {}'.format(pretrained), logger='root') + self.backbone.init_weights(pretrained=pretrained) + self.head.init_weights(init_linear='kaiming') + + def forward_backbone(self, img): + """Forward backbone + + Returns: + x (tuple): backbone outputs + """ + x = self.backbone(img) + return x + + def forward_train(self, img, rot_label, **kwargs): + x = self.forward_backbone(img) + outs = self.head(x) + loss_inputs = (outs, rot_label) + losses = self.head.loss(*loss_inputs) + return losses + + def forward_test(self, img, **kwargs): + x = self.forward_backbone(img) # tuple + outs = self.head(x) + keys = ['head{}'.format(i) for i in range(len(outs))] + out_tensors = [out.cpu() for out in outs] # NxC + return dict(zip(keys, out_tensors)) + + def forward(self, img, rot_label=None, mode='train', **kwargs): + if mode != "extract" and img.dim() == 5: + assert rot_label.dim() == 2 + img = img.view( + img.size(0) * img.size(1), img.size(2), img.size(3), + img.size(4)) + rot_label = torch.flatten(rot_label) + if mode == 'train': + return self.forward_train(img, rot_label, **kwargs) + elif mode == 'test': + return self.forward_test(img, **kwargs) + elif mode == 'extract': + return self.forward_backbone(img) + else: + raise Exception("No such mode: {}".format(mode)) diff --git a/openselfsup/models/simclr.py b/openselfsup/models/simclr.py new file mode 100644 index 00000000..16ece396 --- /dev/null +++ b/openselfsup/models/simclr.py @@ -0,0 +1,79 @@ +import torch +import torch.nn as nn + +from openselfsup.utils import print_log + +from . import builder +from .registry import MODELS +from .utils import GatherLayer +import pdb + + +@MODELS.register_module +class SimCLR(nn.Module): + + def __init__(self, backbone, neck=None, head=None, pretrained=None): + super(SimCLR, self).__init__() + self.backbone = builder.build_backbone(backbone) + self.neck = builder.build_neck(neck) + self.head = builder.build_head(head) + self.init_weights(pretrained=pretrained) + + @staticmethod + def _create_buffer(N): + mask = 1 - torch.eye(N * 2, dtype=torch.uint8).cuda() + pos_ind = (torch.arange(N * 2).cuda(), + 2 * torch.arange(N, dtype=torch.long).unsqueeze(1).repeat( + 1, 2).view(-1, 1).squeeze().cuda()) + neg_mask = torch.ones((N * 2, N * 2 - 1), dtype=torch.uint8).cuda() + neg_mask[pos_ind] = 0 + return mask, pos_ind, neg_mask + + def init_weights(self, pretrained=None): + if pretrained is not None: + print_log('load model from: {}'.format(pretrained), logger='root') + self.backbone.init_weights(pretrained=pretrained) + self.neck.init_weights(init_linear='kaiming') + + def forward_backbone(self, img): + """Forward backbone + + Returns: + x (tuple): backbone outputs + """ + x = self.backbone(img) + return x + + def forward_train(self, img, **kwargs): + assert img.dim() == 5, \ + "Input must have 5 dims, got: {}".format(img.dim()) + img = img.reshape( + img.size(0) * 2, img.size(2), img.size(3), img.size(4)) + x = self.forward_backbone(img) # 2n + z = self.neck(x)[0] # (2n)xd + z = z / (torch.norm(z, p=2, dim=1, keepdim=True) + 1e-10) + z = torch.cat(GatherLayer.apply(z), dim=0) # (2N)xd + assert z.size(0) % 2 == 0 + N = z.size(0) // 2 + s = torch.matmul(z, z.permute(1, 0)) # (2N)x(2N) + mask, pos_ind, neg_mask = self._create_buffer(N) + # remove diagonal, (2N)x(2N-1) + s = torch.masked_select(s, mask).reshape(s.size(0), -1) + positive = s[pos_ind].unsqueeze(1) # (2N)x1 + # select negative, (2N)x(2N-2) + negative = torch.masked_select(s, neg_mask).reshape(s.size(0), -1) + losses = self.head(positive, negative) + return losses + + def forward_test(self, img, **kwargs): + pass + + def forward(self, img, mode='train', **kwargs): + if mode == 'train': + return self.forward_train(img, **kwargs) + elif mode == 'test': + return self.forward_test(img, **kwargs) + elif mode == 'extract': + return self.forward_backbone(img) + else: + raise Exception("No such mode: {}".format(mode)) diff --git a/openselfsup/models/utils/__init__.py b/openselfsup/models/utils/__init__.py new file mode 100644 index 00000000..a5d1f553 --- /dev/null +++ b/openselfsup/models/utils/__init__.py @@ -0,0 +1,16 @@ +from .accuracy import Accuracy, accuracy +from .conv_module import ConvModule, build_conv_layer +from .conv_ws import ConvWS2d, conv_ws_2d +from .gather_layer import GatherLayer +from .multi_pooling import MultiPooling +from .norm import build_norm_layer +from .scale import Scale +#from .weight_init import (bias_init_with_prob, kaiming_init, normal_init, +# uniform_init, xavier_init) +from .sobel import Sobel + +#__all__ = [ +# 'conv_ws_2d', 'ConvWS2d', 'build_conv_layer', 'ConvModule', +# 'build_norm_layer', 'xavier_init', 'normal_init', 'uniform_init', +# 'kaiming_init', 'bias_init_with_prob', 'Scale', 'Sobel' +#] diff --git a/openselfsup/models/utils/accuracy.py b/openselfsup/models/utils/accuracy.py new file mode 100644 index 00000000..20d0ad8c --- /dev/null +++ b/openselfsup/models/utils/accuracy.py @@ -0,0 +1,31 @@ +import torch.nn as nn + + +def accuracy(pred, target, topk=1): + assert isinstance(topk, (int, tuple)) + if isinstance(topk, int): + topk = (topk, ) + return_single = True + else: + return_single = False + + maxk = max(topk) + _, pred_label = pred.topk(maxk, dim=1) + pred_label = pred_label.t() + correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / pred.size(0))) + return res[0] if return_single else res + + +class Accuracy(nn.Module): + + def __init__(self, topk=(1, )): + super().__init__() + self.topk = topk + + def forward(self, pred, target): + return accuracy(pred, target, self.topk) diff --git a/openselfsup/models/utils/conv_module.py b/openselfsup/models/utils/conv_module.py new file mode 100644 index 00000000..2ea56d87 --- /dev/null +++ b/openselfsup/models/utils/conv_module.py @@ -0,0 +1,163 @@ +import warnings + +import torch.nn as nn +from mmcv.cnn import constant_init, kaiming_init + +from .conv_ws import ConvWS2d +from .norm import build_norm_layer + +conv_cfg = { + 'Conv': nn.Conv2d, + 'ConvWS': ConvWS2d, +} + + +def build_conv_layer(cfg, *args, **kwargs): + """ Build convolution layer + + Args: + cfg (None or dict): cfg should contain: + type (str): identify conv layer type. + layer args: args needed to instantiate a conv layer. + + Returns: + layer (nn.Module): created conv layer + """ + if cfg is None: + cfg_ = dict(type='Conv') + else: + assert isinstance(cfg, dict) and 'type' in cfg + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in conv_cfg: + raise KeyError('Unrecognized norm type {}'.format(layer_type)) + else: + conv_layer = conv_cfg[layer_type] + + layer = conv_layer(*args, **kwargs, **cfg_) + + return layer + + +class ConvModule(nn.Module): + """A conv block that contains conv/norm/activation layers. + + Args: + in_channels (int): Same as nn.Conv2d. + out_channels (int): Same as nn.Conv2d. + kernel_size (int or tuple[int]): Same as nn.Conv2d. + stride (int or tuple[int]): Same as nn.Conv2d. + padding (int or tuple[int]): Same as nn.Conv2d. + dilation (int or tuple[int]): Same as nn.Conv2d. + groups (int): Same as nn.Conv2d. + bias (bool or str): If specified as `auto`, it will be decided by the + norm_cfg. Bias will be set as True if norm_cfg is None, otherwise + False. + conv_cfg (dict): Config dict for convolution layer. + norm_cfg (dict): Config dict for normalization layer. + activation (str or None): Activation type, "ReLU" by default. + inplace (bool): Whether to use inplace mode for activation. + order (tuple[str]): The order of conv/norm/activation layers. It is a + sequence of "conv", "norm" and "act". Examples are + ("conv", "norm", "act") and ("act", "conv", "norm"). + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias='auto', + conv_cfg=None, + norm_cfg=None, + activation='relu', + inplace=True, + order=('conv', 'norm', 'act')): + super(ConvModule, self).__init__() + assert conv_cfg is None or isinstance(conv_cfg, dict) + assert norm_cfg is None or isinstance(norm_cfg, dict) + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.activation = activation + self.inplace = inplace + self.order = order + assert isinstance(self.order, tuple) and len(self.order) == 3 + assert set(order) == set(['conv', 'norm', 'act']) + + self.with_norm = norm_cfg is not None + self.with_activation = activation is not None + # if the conv layer is before a norm layer, bias is unnecessary. + if bias == 'auto': + bias = False if self.with_norm else True + self.with_bias = bias + + if self.with_norm and self.with_bias: + warnings.warn('ConvModule has norm and bias at the same time') + + # build convolution layer + self.conv = build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias) + # export the attributes of self.conv to a higher level for convenience + self.in_channels = self.conv.in_channels + self.out_channels = self.conv.out_channels + self.kernel_size = self.conv.kernel_size + self.stride = self.conv.stride + self.padding = self.conv.padding + self.dilation = self.conv.dilation + self.transposed = self.conv.transposed + self.output_padding = self.conv.output_padding + self.groups = self.conv.groups + + # build normalization layers + if self.with_norm: + # norm layer is after conv layer + if order.index('norm') > order.index('conv'): + norm_channels = out_channels + else: + norm_channels = in_channels + self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels) + self.add_module(self.norm_name, norm) + + # build activation layer + if self.with_activation: + # TODO: introduce `act_cfg` and supports more activation layers + if self.activation not in ['relu']: + raise ValueError('{} is currently not supported.'.format( + self.activation)) + if self.activation == 'relu': + self.activate = nn.ReLU(inplace=inplace) + + # Use msra init by default + self.init_weights() + + @property + def norm(self): + return getattr(self, self.norm_name) + + def init_weights(self): + nonlinearity = 'relu' if self.activation is None else self.activation + kaiming_init(self.conv, mode='fan_in', nonlinearity=nonlinearity) + if self.with_norm: + constant_init(self.norm, 1, bias=0) + + def forward(self, x, activate=True, norm=True): + for layer in self.order: + if layer == 'conv': + x = self.conv(x) + elif layer == 'norm' and norm and self.with_norm: + x = self.norm(x) + elif layer == 'act' and activate and self.with_activation: + x = self.activate(x) + return x diff --git a/openselfsup/models/utils/conv_ws.py b/openselfsup/models/utils/conv_ws.py new file mode 100644 index 00000000..5ccd735f --- /dev/null +++ b/openselfsup/models/utils/conv_ws.py @@ -0,0 +1,46 @@ +import torch.nn as nn +import torch.nn.functional as F + + +def conv_ws_2d(input, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + eps=1e-5): + c_in = weight.size(0) + weight_flat = weight.view(c_in, -1) + mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) + std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) + weight = (weight - mean) / (std + eps) + return F.conv2d(input, weight, bias, stride, padding, dilation, groups) + + +class ConvWS2d(nn.Conv2d): + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + eps=1e-5): + super(ConvWS2d, self).__init__( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias) + self.eps = eps + + def forward(self, x): + return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding, + self.dilation, self.groups, self.eps) diff --git a/openselfsup/models/utils/gather_layer.py b/openselfsup/models/utils/gather_layer.py new file mode 100644 index 00000000..8b73708e --- /dev/null +++ b/openselfsup/models/utils/gather_layer.py @@ -0,0 +1,22 @@ +import torch +import torch.distributed as dist + + +class GatherLayer(torch.autograd.Function): + '''Gather tensors from all process, supporting backward propagation. + ''' + + @staticmethod + def forward(ctx, input): + ctx.save_for_backward(input) + output = [torch.zeros_like(input) \ + for _ in range(dist.get_world_size())] + dist.all_gather(output, input) + return tuple(output) + + @staticmethod + def backward(ctx, *grads): + input, = ctx.saved_tensors + grad_out = torch.zeros_like(input) + grad_out[:] = grads[dist.get_rank()] + return grad_out diff --git a/openselfsup/models/utils/multi_pooling.py b/openselfsup/models/utils/multi_pooling.py new file mode 100644 index 00000000..1440be03 --- /dev/null +++ b/openselfsup/models/utils/multi_pooling.py @@ -0,0 +1,38 @@ +import torch.nn as nn + + +class MultiPooling(nn.Module): + """Pooling layers for features from multiple depth. + """ + POOL_PARAMS = { + 'resnet50': [ + dict(kernel_size=10, stride=10, padding=4), + dict(kernel_size=16, stride=8, padding=0), + dict(kernel_size=13, stride=5, padding=0), + dict(kernel_size=8, stride=3, padding=0), + dict(kernel_size=6, stride=1, padding=0) + ] + } + POOL_SIZES = {'resnet50': [12, 6, 4, 3, 2]} + POOL_DIMS = {'resnet50': [9216, 9216, 8192, 9216, 8192]} + + def __init__(self, + pool_type='adaptive', + in_indices=(0, ), + backbone='resnet50'): + super(MultiPooling, self).__init__() + assert pool_type in ['adaptive', 'specified'] + if pool_type == 'adaptive': + self.pools = nn.ModuleList([ + nn.AdaptiveAvgPool2d(self.POOL_SIZES[backbone][l]) + for l in in_indices + ]) + else: + self.pools = nn.ModuleList([ + nn.AvgPool2d(**self.POOL_PARAMS[backbone][l]) + for l in in_indices + ]) + + def forward(self, x): + assert isinstance(x, (list, tuple)) + return [p(xx) for p, xx in zip(self.pools, x)] diff --git a/openselfsup/models/utils/norm.py b/openselfsup/models/utils/norm.py new file mode 100644 index 00000000..d5687cbd --- /dev/null +++ b/openselfsup/models/utils/norm.py @@ -0,0 +1,55 @@ +import torch.nn as nn + +norm_cfg = { + # format: layer_type: (abbreviation, module) + 'BN': ('bn', nn.BatchNorm2d), + 'SyncBN': ('bn', nn.SyncBatchNorm), + 'GN': ('gn', nn.GroupNorm), + # and potentially 'SN' +} + + +def build_norm_layer(cfg, num_features, postfix=''): + """ Build normalization layer + + Args: + cfg (dict): cfg should contain: + type (str): identify norm layer type. + layer args: args needed to instantiate a norm layer. + requires_grad (bool): [optional] whether stop gradient updates + num_features (int): number of channels from input. + postfix (int, str): appended into norm abbreviation to + create named layer. + + Returns: + name (str): abbreviation + postfix + layer (nn.Module): created norm layer + """ + assert isinstance(cfg, dict) and 'type' in cfg + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in norm_cfg: + raise KeyError('Unrecognized norm type {}'.format(layer_type)) + else: + abbr, norm_layer = norm_cfg[layer_type] + if norm_layer is None: + raise NotImplementedError + + assert isinstance(postfix, (int, str)) + name = abbr + str(postfix) + + requires_grad = cfg_.pop('requires_grad', True) + cfg_.setdefault('eps', 1e-5) + if layer_type != 'GN': + layer = norm_layer(num_features, **cfg_) + if layer_type == 'SyncBN': + layer._specify_ddp_gpu_num(1) + else: + assert 'num_groups' in cfg_ + layer = norm_layer(num_channels=num_features, **cfg_) + + for param in layer.parameters(): + param.requires_grad = requires_grad + + return name, layer diff --git a/openselfsup/models/utils/scale.py b/openselfsup/models/utils/scale.py new file mode 100644 index 00000000..2461af8a --- /dev/null +++ b/openselfsup/models/utils/scale.py @@ -0,0 +1,15 @@ +import torch +import torch.nn as nn + + +class Scale(nn.Module): + """ + A learnable scale parameter + """ + + def __init__(self, scale=1.0): + super(Scale, self).__init__() + self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) + + def forward(self, x): + return x * self.scale diff --git a/openselfsup/models/utils/sobel.py b/openselfsup/models/utils/sobel.py new file mode 100644 index 00000000..73ef30e7 --- /dev/null +++ b/openselfsup/models/utils/sobel.py @@ -0,0 +1,23 @@ +import torch +import torch.nn as nn + + +class Sobel(nn.Module): + + def __init__(self): + super(Sobel, self).__init__() + grayscale = nn.Conv2d(3, 1, kernel_size=1, stride=1, padding=0) + grayscale.weight.data.fill_(1.0 / 3.0) + grayscale.bias.data.zero_() + sobel_filter = nn.Conv2d(1, 2, kernel_size=3, stride=1, padding=1) + sobel_filter.weight.data[0, 0].copy_( + torch.FloatTensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]])) + sobel_filter.weight.data[1, 0].copy_( + torch.FloatTensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])) + sobel_filter.bias.data.zero_() + self.sobel = nn.Sequential(grayscale, sobel_filter) + for p in self.sobel.parameters(): + p.requires_grad = False + + def forward(self, x): + return self.sobel(x) diff --git a/openselfsup/third_party/clustering.py b/openselfsup/third_party/clustering.py new file mode 100644 index 00000000..d84459b0 --- /dev/null +++ b/openselfsup/third_party/clustering.py @@ -0,0 +1,308 @@ +# This file is modified from +# https://github.com/facebookresearch/deepcluster/blob/master/clustering.py + +import time +import numpy as np +import faiss +import torch + +__all__ = ['Kmeans', 'PIC'] + + +def preprocess_features(npdata, pca): + """Preprocess an array of features. + Args: + npdata (np.array N * ndim): features to preprocess + pca (int): dim of output + Returns: + np.array of dim N * pca: data PCA-reduced, whitened and L2-normalized + """ + _, ndim = npdata.shape + #npdata = npdata.astype('float32') + assert npdata.dtype == np.float32 + + if np.any(np.isnan(npdata)): + raise Exception("nan occurs") + if pca != -1: + print("\nPCA from dim {} to dim {}".format(ndim, pca)) + mat = faiss.PCAMatrix(ndim, pca, eigen_power=-0.5) + mat.train(npdata) + assert mat.is_trained + npdata = mat.apply_py(npdata) + if np.any(np.isnan(npdata)): + percent = np.isnan(npdata).sum().item() / float(np.size(npdata)) * 100 + if percent > 0.1: + raise Exception( + "More than 0.1% nan occurs after pca, percent: {}%".format( + percent)) + else: + npdata[np.isnan(npdata)] = 0. + # L2 normalization + row_sums = np.linalg.norm(npdata, axis=1) + + npdata = npdata / (row_sums[:, np.newaxis] + 1e-10) + + return npdata + + +def make_graph(xb, nnn): + """Builds a graph of nearest neighbors. + Args: + xb (np.array): data + nnn (int): number of nearest neighbors + Returns: + list: for each data the list of ids to its nnn nearest neighbors + list: for each data the list of distances to its nnn NN + """ + N, dim = xb.shape + + # we need only a StandardGpuResources per GPU + res = faiss.StandardGpuResources() + + # L2 + flat_config = faiss.GpuIndexFlatConfig() + flat_config.device = int(torch.cuda.device_count()) - 1 + index = faiss.GpuIndexFlatL2(res, dim, flat_config) + index.add(xb) + D, I = index.search(xb, nnn + 1) + return I, D + + +def run_kmeans(x, nmb_clusters, verbose=False): + """Runs kmeans on 1 GPU. + Args: + x: data + nmb_clusters (int): number of clusters + Returns: + list: ids of data in each cluster + """ + n_data, d = x.shape + + # faiss implementation of k-means + clus = faiss.Clustering(d, nmb_clusters) + + # Change faiss seed at each k-means so that the randomly picked + # initialization centroids do not correspond to the same feature ids + # from an epoch to another. + clus.seed = np.random.randint(1234) + + clus.niter = 20 + clus.max_points_per_centroid = 10000000 + res = faiss.StandardGpuResources() + flat_config = faiss.GpuIndexFlatConfig() + flat_config.useFloat16 = False + flat_config.device = 0 + index = faiss.GpuIndexFlatL2(res, d, flat_config) + + # perform the training + clus.train(x, index) + _, I = index.search(x, 1) + losses = faiss.vector_to_array(clus.obj) + if verbose: + print('k-means loss evolution: {0}'.format(losses)) + + return [int(n[0]) for n in I], losses[-1] + + +def arrange_clustering(images_lists): + pseudolabels = [] + image_indexes = [] + for cluster, images in enumerate(images_lists): + image_indexes.extend(images) + pseudolabels.extend([cluster] * len(images)) + indexes = np.argsort(image_indexes) + return np.asarray(pseudolabels)[indexes] + + +class Kmeans: + + def __init__(self, k, pca_dim=256): + self.k = k + self.pca_dim = pca_dim + + def cluster(self, feat, verbose=False): + """Performs k-means clustering. + Args: + x_data (np.array N * dim): data to cluster + """ + end = time.time() + + # PCA-reducing, whitening and L2-normalization + xb = preprocess_features(feat, self.pca_dim) + + # cluster the data + I, loss = run_kmeans(xb, self.k, verbose) + self.labels = np.array(I) + if verbose: + print('k-means time: {0:.0f} s'.format(time.time() - end)) + + return loss + + +def make_adjacencyW(I, D, sigma): + """Create adjacency matrix with a Gaussian kernel. + Args: + I (numpy array): for each vertex the ids to its nnn linked vertices + + first column of identity. + D (numpy array): for each data the l2 distances to its nnn linked vertices + + first column of zeros. + sigma (float): Bandwith of the Gaussian kernel. + + Returns: + csr_matrix: affinity matrix of the graph. + """ + V, k = I.shape + k = k - 1 + indices = np.reshape(np.delete(I, 0, 1), (1, -1)) + indptr = np.multiply(k, np.arange(V + 1)) + + def exp_ker(d): + return np.exp(-d / sigma**2) + + exp_ker = np.vectorize(exp_ker) + res_D = exp_ker(D) + data = np.reshape(np.delete(res_D, 0, 1), (1, -1)) + adj_matrix = csr_matrix((data[0], indices[0], indptr), shape=(V, V)) + return adj_matrix + + +def run_pic(I, D, sigma, alpha): + """Run PIC algorithm""" + a = make_adjacencyW(I, D, sigma) + graph = a + a.transpose() + cgraph = graph + nim = graph.shape[0] + + W = graph + t0 = time.time() + + v0 = np.ones(nim) / nim + + # power iterations + v = v0.astype('float32') + + t0 = time.time() + dt = 0 + for i in range(200): + vnext = np.zeros(nim, dtype='float32') + + vnext = vnext + W.transpose().dot(v) + + vnext = alpha * vnext + (1 - alpha) / nim + # L1 normalize + vnext /= vnext.sum() + v = vnext + + if (i == 200 - 1): + clust = find_maxima_cluster(W, v) + + return [int(i) for i in clust] + + +def find_maxima_cluster(W, v): + n, m = W.shape + assert (n == m) + assign = np.zeros(n) + # for each node + pointers = list(range(n)) + for i in range(n): + best_vi = 0 + l0 = W.indptr[i] + l1 = W.indptr[i + 1] + for l in range(l0, l1): + j = W.indices[l] + vi = W.data[l] * (v[j] - v[i]) + if vi > best_vi: + best_vi = vi + pointers[i] = j + n_clus = 0 + cluster_ids = -1 * np.ones(n) + for i in range(n): + if pointers[i] == i: + cluster_ids[i] = n_clus + n_clus = n_clus + 1 + for i in range(n): + # go from pointers to pointers starting from i until reached a local optim + current_node = i + while pointers[current_node] != current_node: + current_node = pointers[current_node] + + assign[i] = cluster_ids[current_node] + assert (assign[i] >= 0) + return assign + + +class PIC(): + """Class to perform Power Iteration Clustering on a graph of nearest neighbors. + Args: + args: for consistency with k-means init + sigma (float): bandwith of the Gaussian kernel (default 0.2) + nnn (int): number of nearest neighbors (default 5) + alpha (float): parameter in PIC (default 0.001) + distribute_singletons (bool): If True, reassign each singleton to + the cluster of its closest non + singleton nearest neighbors (up to nnn + nearest neighbors). + Attributes: + images_lists (list of list): for each cluster, the list of image indexes + belonging to this cluster + """ + + def __init__(self, + args=None, + sigma=0.2, + nnn=5, + alpha=0.001, + distribute_singletons=True, + pca_dim=256): + self.sigma = sigma + self.alpha = alpha + self.nnn = nnn + self.distribute_singletons = distribute_singletons + self.pca_dim = pca_dim + + def cluster(self, data, verbose=False): + end = time.time() + + # preprocess the data + xb = preprocess_features(data, self.pca_dim) + + # construct nnn graph + I, D = make_graph(xb, self.nnn) + + # run PIC + clust = run_pic(I, D, self.sigma, self.alpha) + images_lists = {} + for h in set(clust): + images_lists[h] = [] + for data, c in enumerate(clust): + images_lists[c].append(data) + + # allocate singletons to clusters of their closest NN not singleton + if self.distribute_singletons: + clust_NN = {} + for i in images_lists: + # if singleton + if len(images_lists[i]) == 1: + s = images_lists[i][0] + # for NN + for n in I[s, 1:]: + # if NN is not a singleton + if not len(images_lists[clust[n]]) == 1: + clust_NN[s] = n + break + for s in clust_NN: + del images_lists[clust[s]] + clust[s] = clust[clust_NN[s]] + images_lists[clust[s]].append(s) + + self.images_lists = [] + self.labels = -1 * np.ones((data.shape[0], ), dtype=np.int) + for i, c in enumerate(images_lists): + self.images_lists.append(images_lists[c]) + self.labels[images_lists[c]] = i + assert np.all(self.labels != -1) + + if verbose: + print('pic time: {0:.0f} s'.format(time.time() - end)) + return 0 diff --git a/openselfsup/utils/__init__.py b/openselfsup/utils/__init__.py new file mode 100644 index 00000000..94a634fe --- /dev/null +++ b/openselfsup/utils/__init__.py @@ -0,0 +1,8 @@ +from .alias_multinomial import AliasMethod +from .collect import nondist_forward_collect, dist_forward_collect +from .collect_env import collect_env +from .config_tools import traverse_replace +from .flops_counter import get_model_complexity_info +from .logger import get_root_logger, print_log +from .registry import Registry, build_from_cfg +from . import optimizers diff --git a/openselfsup/utils/alias_multinomial.py b/openselfsup/utils/alias_multinomial.py new file mode 100644 index 00000000..bad70bc5 --- /dev/null +++ b/openselfsup/utils/alias_multinomial.py @@ -0,0 +1,66 @@ +import torch +import numpy as np + + +class AliasMethod(object): + ''' + From: https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ + ''' + + def __init__(self, probs): + + if probs.sum() > 1: + probs.div_(probs.sum()) + K = len(probs) + self.prob = torch.zeros(K) + self.alias = torch.LongTensor([0] * K) + + # Sort the data into the outcomes with probabilities + # that are larger and smaller than 1/K. + smaller = [] + larger = [] + for kk, prob in enumerate(probs): + self.prob[kk] = K * prob + if self.prob[kk] < 1.0: + smaller.append(kk) + else: + larger.append(kk) + + # Loop though and create little binary mixtures that + # appropriately allocate the larger outcomes over the + # overall uniform mixture. + while len(smaller) > 0 and len(larger) > 0: + small = smaller.pop() + large = larger.pop() + + self.alias[small] = large + self.prob[large] = (self.prob[large] - 1.0) + self.prob[small] + + if self.prob[large] < 1.0: + smaller.append(large) + else: + larger.append(large) + + for last_one in smaller + larger: + self.prob[last_one] = 1 + + def cuda(self): + self.prob = self.prob.cuda() + self.alias = self.alias.cuda() + + def draw(self, N): + ''' + Draw N samples from multinomial + ''' + K = self.alias.size(0) + + kk = torch.zeros( + N, dtype=torch.long, device=self.prob.device).random_(0, K) + prob = self.prob.index_select(0, kk) + alias = self.alias.index_select(0, kk) + # b is whether a random number is greater than q + b = torch.bernoulli(prob) + oq = kk.mul(b.long()) + oj = alias.mul((1 - b).long()) + + return oq + oj diff --git a/openselfsup/utils/collect.py b/openselfsup/utils/collect.py new file mode 100644 index 00000000..b69b1d81 --- /dev/null +++ b/openselfsup/utils/collect.py @@ -0,0 +1,83 @@ +import numpy as np + +import mmcv +import torch + +from .gather import gather_tensors_batch + + +def nondist_forward_collect(func, data_loader, length): + '''Forward and collect network outputs. + + This function performs forward propagation and collects outputs. + It can be used to collect results, features, losses, etc. + + Args: + func (function): The function to process data. The output must be + a dictionary of CPU tensors. + length (int): Expected length of output arrays. + + Returns: + results_all (dict(np.ndarray)): The concatenated outputs. + ''' + results = [] + prog_bar = mmcv.ProgressBar(len(data_loader)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = func(**data) + results.append(result) + prog_bar.update() + + results_all = {} + for k in results[0].keys(): + results_all[k] = np.concatenate( + [batch[k].numpy() for batch in results], axis=0) + assert results_all[k].shape[0] == length + return results_all + + +def dist_forward_collect(func, data_loader, rank, length, ret_rank=-1): + '''Forward and collect network outputs in a distributed manner. + + This function performs forward propagation and collects outputs. + It can be used to collect results, features, losses, etc. + + Args: + func (function): The function to process data. The output must be + a dictionary of CPU tensors. + rank (int): This process id. + length (int): Expected length of output arrays. + ret_rank (int): The process that returns. + Other processes will return None. + + Returns: + results_all (dict(np.ndarray)): The concatenated outputs. + ''' + results = [] + if rank == 0: + prog_bar = mmcv.ProgressBar(len(data_loader)) + for idx, data in enumerate(data_loader): + with torch.no_grad(): + result = func(**data) # dict{key: tensor} + results.append(result) + + if rank == 0: + prog_bar.update() + + results_all = {} + for k in results[0].keys(): + results_cat = np.concatenate([batch[k].numpy() for batch in results], + axis=0) + if ret_rank == -1: + results_gathered = gather_tensors_batch(results_cat, part_size=20) + results_strip = np.concatenate(results_gathered, axis=0)[:length] + else: + results_gathered = gather_tensors_batch( + results_cat, part_size=20, ret_rank=ret_rank) + if rank == ret_rank: + results_strip = np.concatenate( + results_gathered, axis=0)[:length] + else: + results_strip = None + results_all[k] = results_strip + return results_all diff --git a/openselfsup/utils/collect_env.py b/openselfsup/utils/collect_env.py new file mode 100644 index 00000000..9998ac6a --- /dev/null +++ b/openselfsup/utils/collect_env.py @@ -0,0 +1,63 @@ +import os.path as osp +import subprocess +import sys +from collections import defaultdict + +import cv2 +import mmcv +import torch +import torchvision + +import openselfsup + + +def collect_env(): + env_info = {} + env_info['sys.platform'] = sys.platform + env_info['Python'] = sys.version.replace('\n', '') + + cuda_available = torch.cuda.is_available() + env_info['CUDA available'] = cuda_available + + if cuda_available: + from torch.utils.cpp_extension import CUDA_HOME + env_info['CUDA_HOME'] = CUDA_HOME + + if CUDA_HOME is not None and osp.isdir(CUDA_HOME): + try: + nvcc = osp.join(CUDA_HOME, 'bin/nvcc') + nvcc = subprocess.check_output( + '"{}" -V | tail -n1'.format(nvcc), shell=True) + nvcc = nvcc.decode('utf-8').strip() + except subprocess.SubprocessError: + nvcc = 'Not Available' + env_info['NVCC'] = nvcc + + devices = defaultdict(list) + for k in range(torch.cuda.device_count()): + devices[torch.cuda.get_device_name(k)].append(str(k)) + for name, devids in devices.items(): + env_info['GPU ' + ','.join(devids)] = name + + gcc = subprocess.check_output('gcc --version | head -n1', shell=True) + gcc = gcc.decode('utf-8').strip() + env_info['GCC'] = gcc + + env_info['PyTorch'] = torch.__version__ + env_info['PyTorch compiling details'] = torch.__config__.show() + + env_info['TorchVision'] = torchvision.__version__ + + env_info['OpenCV'] = cv2.__version__ + + env_info['MMCV'] = mmcv.__version__ + env_info['OpenSelfSup'] = openselfsup.__version__ + #from openselfsup.ops import get_compiler_version, get_compiling_cuda_version + #env_info['OpenSelfSup Compiler'] = get_compiler_version() + #env_info['OpenSelfSup CUDA Compiler'] = get_compiling_cuda_version() + return env_info + + +if __name__ == "__main__": + for name, val in collect_env().items(): + print('{}: {}'.format(name, val)) diff --git a/openselfsup/utils/config_tools.py b/openselfsup/utils/config_tools.py new file mode 100644 index 00000000..93c0b273 --- /dev/null +++ b/openselfsup/utils/config_tools.py @@ -0,0 +1,12 @@ +from mmcv import Config + +def traverse_replace(d, key, value): + if isinstance(d, (dict, Config)): + for k, v in d.items(): + if k == key: + d[k] = value + else: + traverse_replace(v, key, value) + elif isinstance(d, (list, tuple, set)): + for v in d: + traverse_replace(v, key, value) diff --git a/openselfsup/utils/contextmanagers.py b/openselfsup/utils/contextmanagers.py new file mode 100644 index 00000000..0363f014 --- /dev/null +++ b/openselfsup/utils/contextmanagers.py @@ -0,0 +1,126 @@ +# coding: utf-8 +import asyncio +import contextlib +import logging +import os +import time +from typing import List + +import torch + +logger = logging.getLogger(__name__) + +DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False)) + + +@contextlib.asynccontextmanager +async def completed(trace_name='', + name='', + sleep_interval=0.05, + streams: List[torch.cuda.Stream] = None): + """ + Async context manager that waits for work to complete on + given CUDA streams. + + """ + if not torch.cuda.is_available(): + yield + return + + stream_before_context_switch = torch.cuda.current_stream() + if not streams: + streams = [stream_before_context_switch] + else: + streams = [s if s else stream_before_context_switch for s in streams] + + end_events = [ + torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams + ] + + if DEBUG_COMPLETED_TIME: + start = torch.cuda.Event(enable_timing=True) + stream_before_context_switch.record_event(start) + + cpu_start = time.monotonic() + logger.debug('%s %s starting, streams: %s', trace_name, name, streams) + grad_enabled_before = torch.is_grad_enabled() + try: + yield + finally: + current_stream = torch.cuda.current_stream() + assert current_stream == stream_before_context_switch + + if DEBUG_COMPLETED_TIME: + cpu_end = time.monotonic() + for i, stream in enumerate(streams): + event = end_events[i] + stream.record_event(event) + + grad_enabled_after = torch.is_grad_enabled() + + # observed change of torch.is_grad_enabled() during concurrent run of + # async_test_bboxes code + assert (grad_enabled_before == grad_enabled_after + ), 'Unexpected is_grad_enabled() value change' + + are_done = [e.query() for e in end_events] + logger.debug('%s %s completed: %s streams: %s', trace_name, name, + are_done, streams) + with torch.cuda.stream(stream_before_context_switch): + while not all(are_done): + await asyncio.sleep(sleep_interval) + are_done = [e.query() for e in end_events] + logger.debug( + '%s %s completed: %s streams: %s', + trace_name, + name, + are_done, + streams, + ) + + current_stream = torch.cuda.current_stream() + assert current_stream == stream_before_context_switch + + if DEBUG_COMPLETED_TIME: + cpu_time = (cpu_end - cpu_start) * 1000 + stream_times_ms = '' + for i, stream in enumerate(streams): + elapsed_time = start.elapsed_time(end_events[i]) + stream_times_ms += ' {} {:.2f} ms'.format(stream, elapsed_time) + logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time, + stream_times_ms) + + +@contextlib.asynccontextmanager +async def concurrent(streamqueue: asyncio.Queue, + trace_name='concurrent', + name='stream'): + """Run code concurrently in different streams. + + :param streamqueue: asyncio.Queue instance. + + Queue tasks define the pool of streams used for concurrent execution. + + """ + if not torch.cuda.is_available(): + yield + return + + initial_stream = torch.cuda.current_stream() + + with torch.cuda.stream(initial_stream): + stream = await streamqueue.get() + assert isinstance(stream, torch.cuda.Stream) + + try: + with torch.cuda.stream(stream): + logger.debug('%s %s is starting, stream: %s', trace_name, name, + stream) + yield + current = torch.cuda.current_stream() + assert current == stream + logger.debug('%s %s has finished, stream: %s', trace_name, + name, stream) + finally: + streamqueue.task_done() + streamqueue.put_nowait(stream) diff --git a/openselfsup/utils/flops_counter.py b/openselfsup/utils/flops_counter.py new file mode 100644 index 00000000..df2163fd --- /dev/null +++ b/openselfsup/utils/flops_counter.py @@ -0,0 +1,444 @@ +# Modified from flops-counter.pytorch by Vladislav Sovrasov +# original repo: https://github.com/sovrasov/flops-counter.pytorch + +# MIT License + +# Copyright (c) 2018 Vladislav Sovrasov + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import sys + +import numpy as np +import torch +import torch.nn as nn +from torch.nn.modules.batchnorm import _BatchNorm +from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin +from torch.nn.modules.pooling import (_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, + _AvgPoolNd, _MaxPoolNd) + + +def get_model_complexity_info(model, + input_res, + print_per_layer_stat=True, + as_strings=True, + input_constructor=None, + ost=sys.stdout): + assert type(input_res) is tuple + assert len(input_res) >= 2 + flops_model = add_flops_counting_methods(model) + flops_model.eval().start_flops_count() + if input_constructor: + input = input_constructor(input_res) + _ = flops_model(**input) + else: + batch = torch.ones(()).new_empty( + (1, *input_res), + dtype=next(flops_model.parameters()).dtype, + device=next(flops_model.parameters()).device) + flops_model(batch) + + if print_per_layer_stat: + print_model_with_flops(flops_model, ost=ost) + flops_count = flops_model.compute_average_flops_cost() + params_count = get_model_parameters_number(flops_model) + flops_model.stop_flops_count() + + if as_strings: + return flops_to_string(flops_count), params_to_string(params_count) + + return flops_count, params_count + + +def flops_to_string(flops, units='GMac', precision=2): + if units is None: + if flops // 10**9 > 0: + return str(round(flops / 10.**9, precision)) + ' GMac' + elif flops // 10**6 > 0: + return str(round(flops / 10.**6, precision)) + ' MMac' + elif flops // 10**3 > 0: + return str(round(flops / 10.**3, precision)) + ' KMac' + else: + return str(flops) + ' Mac' + else: + if units == 'GMac': + return str(round(flops / 10.**9, precision)) + ' ' + units + elif units == 'MMac': + return str(round(flops / 10.**6, precision)) + ' ' + units + elif units == 'KMac': + return str(round(flops / 10.**3, precision)) + ' ' + units + else: + return str(flops) + ' Mac' + + +def params_to_string(params_num): + """converting number to string + + :param float params_num: number + :returns str: number + + >>> params_to_string(1e9) + '1000.0 M' + >>> params_to_string(2e5) + '200.0 k' + >>> params_to_string(3e-9) + '3e-09' + """ + if params_num // 10**6 > 0: + return str(round(params_num / 10**6, 2)) + ' M' + elif params_num // 10**3: + return str(round(params_num / 10**3, 2)) + ' k' + else: + return str(params_num) + + +def print_model_with_flops(model, units='GMac', precision=3, ost=sys.stdout): + total_flops = model.compute_average_flops_cost() + + def accumulate_flops(self): + if is_supported_instance(self): + return self.__flops__ / model.__batch_counter__ + else: + sum = 0 + for m in self.children(): + sum += m.accumulate_flops() + return sum + + def flops_repr(self): + accumulated_flops_cost = self.accumulate_flops() + return ', '.join([ + flops_to_string( + accumulated_flops_cost, units=units, precision=precision), + '{:.3%} MACs'.format(accumulated_flops_cost / total_flops), + self.original_extra_repr() + ]) + + def add_extra_repr(m): + m.accumulate_flops = accumulate_flops.__get__(m) + flops_extra_repr = flops_repr.__get__(m) + if m.extra_repr != flops_extra_repr: + m.original_extra_repr = m.extra_repr + m.extra_repr = flops_extra_repr + assert m.extra_repr != m.original_extra_repr + + def del_extra_repr(m): + if hasattr(m, 'original_extra_repr'): + m.extra_repr = m.original_extra_repr + del m.original_extra_repr + if hasattr(m, 'accumulate_flops'): + del m.accumulate_flops + + model.apply(add_extra_repr) + print(model, file=ost) + model.apply(del_extra_repr) + + +def get_model_parameters_number(model): + params_num = sum(p.numel() for p in model.parameters() if p.requires_grad) + return params_num + + +def add_flops_counting_methods(net_main_module): + # adding additional methods to the existing module object, + # this is done this way so that each function has access to self object + net_main_module.start_flops_count = start_flops_count.__get__( + net_main_module) + net_main_module.stop_flops_count = stop_flops_count.__get__( + net_main_module) + net_main_module.reset_flops_count = reset_flops_count.__get__( + net_main_module) + net_main_module.compute_average_flops_cost = \ + compute_average_flops_cost.__get__(net_main_module) + + net_main_module.reset_flops_count() + + # Adding variables necessary for masked flops computation + net_main_module.apply(add_flops_mask_variable_or_reset) + + return net_main_module + + +def compute_average_flops_cost(self): + """ + A method that will be available after add_flops_counting_methods() is + called on a desired net object. + Returns current mean flops consumption per image. + """ + + batches_count = self.__batch_counter__ + flops_sum = 0 + for module in self.modules(): + if is_supported_instance(module): + flops_sum += module.__flops__ + + return flops_sum / batches_count + + +def start_flops_count(self): + """ + A method that will be available after add_flops_counting_methods() is + called on a desired net object. + Activates the computation of mean flops consumption per image. + Call it before you run the network. + """ + add_batch_counter_hook_function(self) + self.apply(add_flops_counter_hook_function) + + +def stop_flops_count(self): + """ + A method that will be available after add_flops_counting_methods() is + called on a desired net object. + Stops computing the mean flops consumption per image. + Call whenever you want to pause the computation. + """ + remove_batch_counter_hook_function(self) + self.apply(remove_flops_counter_hook_function) + + +def reset_flops_count(self): + """ + A method that will be available after add_flops_counting_methods() is + called on a desired net object. + Resets statistics computed so far. + """ + add_batch_counter_variables_or_reset(self) + self.apply(add_flops_counter_variable_or_reset) + + +def add_flops_mask(module, mask): + + def add_flops_mask_func(module): + if isinstance(module, torch.nn.Conv2d): + module.__mask__ = mask + + module.apply(add_flops_mask_func) + + +def remove_flops_mask(module): + module.apply(add_flops_mask_variable_or_reset) + + +def is_supported_instance(module): + for mod in hook_mapping: + if issubclass(type(module), mod): + return True + return False + + +def empty_flops_counter_hook(module, input, output): + module.__flops__ += 0 + + +def upsample_flops_counter_hook(module, input, output): + output_size = output[0] + batch_size = output_size.shape[0] + output_elements_count = batch_size + for val in output_size.shape[1:]: + output_elements_count *= val + module.__flops__ += int(output_elements_count) + + +def relu_flops_counter_hook(module, input, output): + active_elements_count = output.numel() + module.__flops__ += int(active_elements_count) + + +def linear_flops_counter_hook(module, input, output): + input = input[0] + batch_size = input.shape[0] + module.__flops__ += int(batch_size * input.shape[1] * output.shape[1]) + + +def pool_flops_counter_hook(module, input, output): + input = input[0] + module.__flops__ += int(np.prod(input.shape)) + + +def bn_flops_counter_hook(module, input, output): + input = input[0] + + batch_flops = np.prod(input.shape) + if module.affine: + batch_flops *= 2 + module.__flops__ += int(batch_flops) + + +def gn_flops_counter_hook(module, input, output): + elems = np.prod(input[0].shape) + # there is no precise FLOPs estimation of computing mean and variance, + # and we just set it 2 * elems: half muladds for computing + # means and half for computing vars + batch_flops = 3 * elems + if module.affine: + batch_flops += elems + module.__flops__ += int(batch_flops) + + +def deconv_flops_counter_hook(conv_module, input, output): + # Can have multiple inputs, getting the first one + input = input[0] + + batch_size = input.shape[0] + input_height, input_width = input.shape[2:] + + kernel_height, kernel_width = conv_module.kernel_size + in_channels = conv_module.in_channels + out_channels = conv_module.out_channels + groups = conv_module.groups + + filters_per_channel = out_channels // groups + conv_per_position_flops = ( + kernel_height * kernel_width * in_channels * filters_per_channel) + + active_elements_count = batch_size * input_height * input_width + overall_conv_flops = conv_per_position_flops * active_elements_count + bias_flops = 0 + if conv_module.bias is not None: + output_height, output_width = output.shape[2:] + bias_flops = out_channels * batch_size * output_height * output_height + overall_flops = overall_conv_flops + bias_flops + + conv_module.__flops__ += int(overall_flops) + + +def conv_flops_counter_hook(conv_module, input, output): + # Can have multiple inputs, getting the first one + input = input[0] + + batch_size = input.shape[0] + output_dims = list(output.shape[2:]) + + kernel_dims = list(conv_module.kernel_size) + in_channels = conv_module.in_channels + out_channels = conv_module.out_channels + groups = conv_module.groups + + filters_per_channel = out_channels // groups + conv_per_position_flops = np.prod( + kernel_dims) * in_channels * filters_per_channel + + active_elements_count = batch_size * np.prod(output_dims) + + if conv_module.__mask__ is not None: + # (b, 1, h, w) + output_height, output_width = output.shape[2:] + flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height, + output_width) + active_elements_count = flops_mask.sum() + + overall_conv_flops = conv_per_position_flops * active_elements_count + + bias_flops = 0 + + if conv_module.bias is not None: + + bias_flops = out_channels * active_elements_count + + overall_flops = overall_conv_flops + bias_flops + + conv_module.__flops__ += int(overall_flops) + + +hook_mapping = { + # conv + _ConvNd: conv_flops_counter_hook, + # deconv + _ConvTransposeMixin: deconv_flops_counter_hook, + # fc + nn.Linear: linear_flops_counter_hook, + # pooling + _AvgPoolNd: pool_flops_counter_hook, + _MaxPoolNd: pool_flops_counter_hook, + _AdaptiveAvgPoolNd: pool_flops_counter_hook, + _AdaptiveMaxPoolNd: pool_flops_counter_hook, + # activation + nn.ReLU: relu_flops_counter_hook, + nn.PReLU: relu_flops_counter_hook, + nn.ELU: relu_flops_counter_hook, + nn.LeakyReLU: relu_flops_counter_hook, + nn.ReLU6: relu_flops_counter_hook, + # normalization + _BatchNorm: bn_flops_counter_hook, + nn.GroupNorm: gn_flops_counter_hook, + # upsample + nn.Upsample: upsample_flops_counter_hook, +} + + +def batch_counter_hook(module, input, output): + batch_size = 1 + if len(input) > 0: + # Can have multiple inputs, getting the first one + input = input[0] + batch_size = len(input) + else: + print('Warning! No positional inputs found for a module, ' + 'assuming batch size is 1.') + module.__batch_counter__ += batch_size + + +def add_batch_counter_variables_or_reset(module): + module.__batch_counter__ = 0 + + +def add_batch_counter_hook_function(module): + if hasattr(module, '__batch_counter_handle__'): + return + + handle = module.register_forward_hook(batch_counter_hook) + module.__batch_counter_handle__ = handle + + +def remove_batch_counter_hook_function(module): + if hasattr(module, '__batch_counter_handle__'): + module.__batch_counter_handle__.remove() + del module.__batch_counter_handle__ + + +def add_flops_counter_variable_or_reset(module): + if is_supported_instance(module): + module.__flops__ = 0 + + +def add_flops_counter_hook_function(module): + if is_supported_instance(module): + if hasattr(module, '__flops_handle__'): + return + + for mod_type, counter_hook in hook_mapping.items(): + if issubclass(type(module), mod_type): + handle = module.register_forward_hook(counter_hook) + break + + module.__flops_handle__ = handle + + +def remove_flops_counter_hook_function(module): + if is_supported_instance(module): + if hasattr(module, '__flops_handle__'): + module.__flops_handle__.remove() + del module.__flops_handle__ + + +# --- Masked flops counting +# Also being run in the initialization +def add_flops_mask_variable_or_reset(module): + if is_supported_instance(module): + module.__mask__ = None diff --git a/openselfsup/utils/gather.py b/openselfsup/utils/gather.py new file mode 100644 index 00000000..8109f0b9 --- /dev/null +++ b/openselfsup/utils/gather.py @@ -0,0 +1,69 @@ +import numpy as np + +import torch +import torch.distributed as dist + + +def gather_tensors(input_array): + world_size = dist.get_world_size() + ## gather shapes first + myshape = input_array.shape + mycount = input_array.size + shape_tensor = torch.Tensor(np.array(myshape)).cuda() + all_shape = [ + torch.Tensor(np.array(myshape)).cuda() for i in range(world_size) + ] + dist.all_gather(all_shape, shape_tensor) + ## compute largest shapes + all_shape = [x.cpu().numpy() for x in all_shape] + all_count = [int(x.prod()) for x in all_shape] + all_shape = [list(map(int, x)) for x in all_shape] + max_count = max(all_count) + ## padding tensors and gather them + output_tensors = [ + torch.Tensor(max_count).cuda() for i in range(world_size) + ] + padded_input_array = np.zeros(max_count) + padded_input_array[:mycount] = input_array.reshape(-1) + input_tensor = torch.Tensor(padded_input_array).cuda() + dist.all_gather(output_tensors, input_tensor) + ## unpadding gathered tensors + padded_output = [x.cpu().numpy() for x in output_tensors] + output = [ + x[:all_count[i]].reshape(all_shape[i]) + for i, x in enumerate(padded_output) + ] + return output + + +def gather_tensors_batch(input_array, part_size=100, ret_rank=-1): + # batch-wize gathering to avoid CUDA out of memory + rank = dist.get_rank() + all_features = [] + part_num = input_array.shape[0] // part_size + 1 if input_array.shape[ + 0] % part_size != 0 else input_array.shape[0] // part_size + for i in range(part_num): + part_feat = input_array[i * + part_size:min((i + 1) * + part_size, input_array.shape[0]), + ...] + assert part_feat.shape[ + 0] > 0, "rank: {}, length of part features should > 0".format(rank) + #print("rank: {}, gather part: {}/{}, length: {}".format(rank, i, part_num, len(part_feat))) + gather_part_feat = gather_tensors(part_feat) + all_features.append(gather_part_feat) + if ret_rank == -1: + all_features = [ + np.concatenate([all_features[i][j] for i in range(part_num)], + axis=0) for j in range(len(all_features[0])) + ] + return all_features + else: + if rank == ret_rank: + all_features = [ + np.concatenate([all_features[i][j] for i in range(part_num)], + axis=0) for j in range(len(all_features[0])) + ] + return all_features + else: + return None diff --git a/openselfsup/utils/logger.py b/openselfsup/utils/logger.py new file mode 100644 index 00000000..73f9891c --- /dev/null +++ b/openselfsup/utils/logger.py @@ -0,0 +1,66 @@ +import logging + +from mmcv.runner import get_dist_info + + +def get_root_logger(log_file=None, log_level=logging.INFO): + """Get the root logger. + + The logger will be initialized if it has not been initialized. By default a + StreamHandler will be added. If `log_file` is specified, a FileHandler will + also be added. The name of the root logger is the top-level package name, + e.g., "openselfsup". + + Args: + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the root logger. + log_level (int): The root logger level. Note that only the process of + rank 0 is affected, while other processes will set the level to + "Error" and be silent most of the time. + + Returns: + logging.Logger: The root logger. + """ + logger = logging.getLogger(__name__.split('.')[0]) # i.e., openselfsup + # if the logger has been initialized, just return it + if logger.hasHandlers(): + return logger + + format_str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + logging.basicConfig(format=format_str, level=log_level) + rank, _ = get_dist_info() + if rank != 0: + logger.setLevel('ERROR') + elif log_file is not None: + file_handler = logging.FileHandler(log_file, 'w') + file_handler.setFormatter(logging.Formatter(format_str)) + file_handler.setLevel(log_level) + logger.addHandler(file_handler) + + return logger + + +def print_log(msg, logger=None, level=logging.INFO): + """Print a log message. + + Args: + msg (str): The message to be logged. + logger (logging.Logger | str | None): The logger to be used. Some + special loggers are: + - "root": the root logger obtained with `get_root_logger()`. + - "silent": no message will be printed. + - None: The `print()` method will be used to print log messages. + level (int): Logging level. Only available when `logger` is a Logger + object or "root". + """ + if logger is None: + print(msg) + elif logger == 'root': + _logger = get_root_logger() + _logger.log(level, msg) + elif isinstance(logger, logging.Logger): + logger.log(level, msg) + elif logger != 'silent': + raise TypeError( + 'logger should be either a logging.Logger object, "root", ' + '"silent" or None, but got {}'.format(logger)) diff --git a/openselfsup/utils/misc.py b/openselfsup/utils/misc.py new file mode 100644 index 00000000..262f168e --- /dev/null +++ b/openselfsup/utils/misc.py @@ -0,0 +1,37 @@ +from functools import partial + +import mmcv +import numpy as np +from six.moves import map, zip + + +def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True): + num_imgs = tensor.size(0) + mean = np.array(mean, dtype=np.float32) + std = np.array(std, dtype=np.float32) + imgs = [] + for img_id in range(num_imgs): + img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0) + img = mmcv.imdenormalize( + img, mean, std, to_bgr=to_rgb).astype(np.uint8) + imgs.append(np.ascontiguousarray(img)) + return imgs + + +def multi_apply(func, *args, **kwargs): + pfunc = partial(func, **kwargs) if kwargs else func + map_results = map(pfunc, *args) + return tuple(map(list, zip(*map_results))) + + +def unmap(data, count, inds, fill=0): + """ Unmap a subset of item (data) back to the original set of items (of + size count) """ + if data.dim() == 1: + ret = data.new_full((count, ), fill) + ret[inds] = data + else: + new_size = (count, ) + data.size()[1:] + ret = data.new_full(new_size, fill) + ret[inds, :] = data + return ret diff --git a/openselfsup/utils/optimizers.py b/openselfsup/utils/optimizers.py new file mode 100644 index 00000000..8e756ea5 --- /dev/null +++ b/openselfsup/utils/optimizers.py @@ -0,0 +1,95 @@ +""" Layer-wise adaptive rate scaling for SGD in PyTorch! """ +import torch +from torch.optim.optimizer import Optimizer, required +from torch.optim import * + + +class LARS(Optimizer): + r"""Implements layer-wise adaptive rate scaling for SGD. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float): base learning rate (\gamma_0) + momentum (float, optional): momentum factor (default: 0) ("m") + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + ("\beta") + eta (float, optional): LARS coefficient + max_epoch: maximum training epoch to determine polynomial LR decay. + + Based on Algorithm 1 of the following paper by You, Gitman, and Ginsburg. + Large Batch Training of Convolutional Networks: + https://arxiv.org/abs/1708.03888 + + Example: + >>> optimizer = LARS(model.parameters(), lr=0.1, eta=1e-3) + >>> optimizer.zero_grad() + >>> loss_fn(model(input), target).backward() + >>> optimizer.step() + """ + + def __init__(self, + params, + lr=required, + momentum=.9, + weight_decay=.0005, + eta=0.001): + if lr is not required and lr < 0.0: + raise ValueError("Invalid learning rate: {}".format(lr)) + if momentum < 0.0: + raise ValueError("Invalid momentum value: {}".format(momentum)) + if weight_decay < 0.0: + raise ValueError( + "Invalid weight_decay value: {}".format(weight_decay)) + if eta < 0.0: + raise ValueError("Invalid LARS coefficient value: {}".format(eta)) + + defaults = dict( + lr=lr, momentum=momentum, weight_decay=weight_decay, eta=eta) + super(LARS, self).__init__(params, defaults) + + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + epoch: current epoch to calculate polynomial LR decay schedule. + if None, uses self.epoch and increments it. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + weight_decay = group['weight_decay'] + momentum = group['momentum'] + eta = group['eta'] + lr = group['lr'] + + for p in group['params']: + if p.grad is None: + continue + + param_state = self.state[p] + d_p = p.grad.data + + weight_norm = torch.norm(p.data) + grad_norm = torch.norm(d_p) + + # Compute local learning rate for this layer + local_lr = eta * weight_norm / \ + (grad_norm + weight_decay * weight_norm) + + # Update the momentum term + actual_lr = local_lr * lr + + if 'momentum_buffer' not in param_state: + buf = param_state['momentum_buffer'] = \ + torch.zeros_like(p.data) + else: + buf = param_state['momentum_buffer'] + buf.mul_(momentum).add_(actual_lr, d_p + weight_decay * p.data) + p.data.add_(-buf) + + return loss diff --git a/openselfsup/utils/profiling.py b/openselfsup/utils/profiling.py new file mode 100644 index 00000000..58b1c87d --- /dev/null +++ b/openselfsup/utils/profiling.py @@ -0,0 +1,41 @@ +import contextlib +import sys +import time + +import torch + +if sys.version_info >= (3, 7): + + @contextlib.contextmanager + def profile_time(trace_name, + name, + enabled=True, + stream=None, + end_stream=None): + """Print time spent by CPU and GPU. + + Useful as a temporary context manager to find sweet spots of + code suitable for async implementation. + + """ + if (not enabled) or not torch.cuda.is_available(): + yield + return + stream = stream if stream else torch.cuda.current_stream() + end_stream = end_stream if end_stream else stream + start = torch.cuda.Event(enable_timing=True) + end = torch.cuda.Event(enable_timing=True) + stream.record_event(start) + try: + cpu_start = time.monotonic() + yield + finally: + cpu_end = time.monotonic() + end_stream.record_event(end) + end.synchronize() + cpu_time = (cpu_end - cpu_start) * 1000 + gpu_time = start.elapsed_time(end) + msg = "{} {} cpu_time {:.2f} ms ".format(trace_name, name, + cpu_time) + msg += "gpu_time {:.2f} ms stream {}".format(gpu_time, stream) + print(msg, end_stream) diff --git a/openselfsup/utils/registry.py b/openselfsup/utils/registry.py new file mode 100644 index 00000000..4ad9f876 --- /dev/null +++ b/openselfsup/utils/registry.py @@ -0,0 +1,79 @@ +import inspect +from functools import partial + +import mmcv + + +class Registry(object): + + def __init__(self, name): + self._name = name + self._module_dict = dict() + + def __repr__(self): + format_str = self.__class__.__name__ + '(name={}, items={})'.format( + self._name, list(self._module_dict.keys())) + return format_str + + @property + def name(self): + return self._name + + @property + def module_dict(self): + return self._module_dict + + def get(self, key): + return self._module_dict.get(key, None) + + def _register_module(self, module_class, force=False): + """Register a module. + + Args: + module (:obj:`nn.Module`): Module to be registered. + """ + if not inspect.isclass(module_class): + raise TypeError('module must be a class, but got {}'.format( + type(module_class))) + module_name = module_class.__name__ + if not force and module_name in self._module_dict: + raise KeyError('{} is already registered in {}'.format( + module_name, self.name)) + self._module_dict[module_name] = module_class + + def register_module(self, cls=None, force=False): + if cls is None: + return partial(self.register_module, force=force) + self._register_module(cls, force=force) + return cls + + +def build_from_cfg(cfg, registry, default_args=None): + """Build a module from config dict. + + Args: + cfg (dict): Config dict. It should at least contain the key "type". + registry (:obj:`Registry`): The registry to search the type from. + default_args (dict, optional): Default initialization arguments. + + Returns: + obj: The constructed object. + """ + assert isinstance(cfg, dict) and 'type' in cfg + assert isinstance(default_args, dict) or default_args is None + args = cfg.copy() + obj_type = args.pop('type') + if mmcv.is_str(obj_type): + obj_cls = registry.get(obj_type) + if obj_cls is None: + raise KeyError('{} is not in the {} registry'.format( + obj_type, registry.name)) + elif inspect.isclass(obj_type): + obj_cls = obj_type + else: + raise TypeError('type must be a str or valid type, but got {}'.format( + type(obj_type))) + if default_args is not None: + for name, value in default_args.items(): + args.setdefault(name, value) + return obj_cls(**args) diff --git a/openselfsup/version.py b/openselfsup/version.py new file mode 100644 index 00000000..5ddd8800 --- /dev/null +++ b/openselfsup/version.py @@ -0,0 +1,5 @@ +# GENERATED VERSION FILE +# TIME: Tue Jun 16 00:02:37 2020 + +__version__ = '0.1.0+HEAD' +short_version = '0.1.0' diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..ec4ca05e --- /dev/null +++ b/requirements.txt @@ -0,0 +1,2 @@ +-r requirements/runtime.txt +-r requirements/tests.txt diff --git a/requirements/runtime.txt b/requirements/runtime.txt new file mode 100644 index 00000000..54028248 --- /dev/null +++ b/requirements/runtime.txt @@ -0,0 +1,12 @@ +matplotlib +mmcv>=0.3.1 +numpy +# need older pillow until torchvision is fixed +Pillow<=6.2.2 +six +terminaltables +sklearn +faiss-gpu==1.6.1 +tensorboard +future +tqdm diff --git a/requirements/tests.txt b/requirements/tests.txt new file mode 100644 index 00000000..d45e5409 --- /dev/null +++ b/requirements/tests.txt @@ -0,0 +1,11 @@ +asynctest +codecov +flake8 +isort +pytest +pytest-cov +pytest-runner +xdoctest >= 0.10.0 +yapf +# Note: used for kwarray.group_items, this may be ported to mmcv in the future. +kwarray diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..ea3d3a53 --- /dev/null +++ b/setup.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python +import os +import subprocess +import time +from setuptools import find_packages, setup + + +def readme(): + with open('README.md', encoding='utf-8') as f: + content = f.read() + return content + + +MAJOR = 0 +MINOR = 1 +PATCH = 0 +SUFFIX = '' +if PATCH != '': + SHORT_VERSION = '{}.{}.{}{}'.format(MAJOR, MINOR, PATCH, SUFFIX) +else: + SHORT_VERSION = '{}.{}{}'.format(MAJOR, MINOR, SUFFIX) + +version_file = 'openselfsup/version.py' + + +def get_git_hash(): + + def _minimal_ext_cmd(cmd): + # construct minimal environment + env = {} + for k in ['SYSTEMROOT', 'PATH', 'HOME']: + v = os.environ.get(k) + if v is not None: + env[k] = v + # LANGUAGE is used on win32 + env['LANGUAGE'] = 'C' + env['LANG'] = 'C' + env['LC_ALL'] = 'C' + out = subprocess.Popen( + cmd, stdout=subprocess.PIPE, env=env).communicate()[0] + return out + + try: + out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) + sha = out.strip().decode('ascii') + except OSError: + sha = 'unknown' + + return sha + + +def get_hash(): + if os.path.exists('.git'): + sha = get_git_hash()[:7] + elif os.path.exists(version_file): + try: + from openselfsup.version import __version__ + sha = __version__.split('+')[-1] + except ImportError: + raise ImportError('Unable to get git version') + else: + sha = 'unknown' + + return sha + + +def write_version_py(): + content = """# GENERATED VERSION FILE +# TIME: {} + +__version__ = '{}' +short_version = '{}' +""" + sha = get_hash() + VERSION = SHORT_VERSION + '+' + sha + + with open(version_file, 'w') as f: + f.write(content.format(time.asctime(), VERSION, SHORT_VERSION)) + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +def parse_requirements(fname='requirements.txt', with_version=True): + """ + Parse the package dependencies listed in a requirements file but strips + specific versioning information. + + Args: + fname (str): path to requirements file + with_version (bool, default=False): if True include version specs + + Returns: + List[str]: list of requirements items + + CommandLine: + python -c "import setup; print(setup.parse_requirements())" + """ + import sys + from os.path import exists + import re + require_fpath = fname + + def parse_line(line): + """ + Parse information from a line in a requirements text file + """ + if line.startswith('-r '): + # Allow specifying requirements in other files + target = line.split(' ')[1] + for info in parse_require_file(target): + yield info + else: + info = {'line': line} + if line.startswith('-e '): + info['package'] = line.split('#egg=')[1] + else: + # Remove versioning from the package + pat = '(' + '|'.join(['>=', '==', '>']) + ')' + parts = re.split(pat, line, maxsplit=1) + parts = [p.strip() for p in parts] + + info['package'] = parts[0] + if len(parts) > 1: + op, rest = parts[1:] + if ';' in rest: + # Handle platform specific dependencies + # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies + version, platform_deps = map(str.strip, + rest.split(';')) + info['platform_deps'] = platform_deps + else: + version = rest # NOQA + info['version'] = (op, version) + yield info + + def parse_require_file(fpath): + with open(fpath, 'r') as f: + for line in f.readlines(): + line = line.strip() + if line and not line.startswith('#'): + for info in parse_line(line): + yield info + + def gen_packages_items(): + if exists(require_fpath): + for info in parse_require_file(require_fpath): + parts = [info['package']] + if with_version and 'version' in info: + parts.extend(info['version']) + if not sys.version.startswith('3.4'): + # apparently package_deps are broken in 3.4 + platform_deps = info.get('platform_deps') + if platform_deps is not None: + parts.append(';' + platform_deps) + item = ''.join(parts) + yield item + + packages = list(gen_packages_items()) + return packages + + +if __name__ == '__main__': + write_version_py() + setup( + name='openselfsup', + version=get_version(), + description='Self-Supervision Toolbox and Benchmark', + long_description=readme(), + author='Xiaohang Zhan', + author_email='xiaohangzhan@outlook.com', + keywords='unsupervised learning, self-supervised learning', + url='https://github.com/open-mmlab/openselfsup', + packages=find_packages(exclude=('configs', 'tools', 'demo')), + classifiers=[ + 'Development Status :: 4 - Beta', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + ], + license='Apache License 2.0', + setup_requires=parse_requirements('requirements/build.txt'), + tests_require=parse_requirements('requirements/tests.txt'), + install_requires=parse_requirements('requirements/runtime.txt'), + zip_safe=False) diff --git a/tools/count_parameters.py b/tools/count_parameters.py new file mode 100644 index 00000000..5681a82d --- /dev/null +++ b/tools/count_parameters.py @@ -0,0 +1,38 @@ +import argparse +from mmcv import Config + +from openselfsup.models import build_model + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a model') + parser.add_argument('config', help='train config file path') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + + model = build_model(cfg.model) + + num_params = sum(p.numel() for p in model.parameters()) / 1024. / 1024. + num_grad_params = sum(p.numel() for p in model.parameters() \ + if p.requires_grad) / 1024. / 1024. + num_backbone_params = sum( + p.numel() for p in model.backbone.parameters()) / 1024. / 1024. + num_backbone_grad_params = sum(p.numel() for p in model.backbone.parameters() \ + if p.requires_grad) / 1024. / 1024. + print( + "Number of backbone parameters: {:.5g} M".format(num_backbone_params)) + print("Number of backbone parameters requiring grad: {:.5g} M".format( + num_backbone_grad_params)) + print("Number of total parameters: {:.5g} M".format(num_params)) + print("Number of total parameters requiring grad: {:.5g} M".format( + num_grad_params)) + + +if __name__ == '__main__': + main() diff --git a/tools/dist_extract.sh b/tools/dist_extract.sh new file mode 100755 index 00000000..0a30540c --- /dev/null +++ b/tools/dist_extract.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +PYTHON=${PYTHON:-"python"} +CFG=$1 +CHECKPOINT=$2 +GPUS=${3:-8} +PORT=${PORT:-29500} + +WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/ +if [ "$CHECKPOINT" == "" ]; then + $PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + tools/extract.py $CFG --layer-ind "0,1,2,3,4" --work_dir $WORK_DIR --launcher pytorch +else + $PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + tools/extract.py $CFG --layer-ind "0,1,2,3,4" --checkpoint $CHECKPOINT \ + --work_dir $WORK_DIR --launcher pytorch +fi diff --git a/tools/dist_train.sh b/tools/dist_train.sh new file mode 100755 index 00000000..a54b7dda --- /dev/null +++ b/tools/dist_train.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +PYTHON=${PYTHON:-"python"} + +CFG=$1 +GPUS=$2 +PORT=${PORT:-29500} +PY_ARGS=${@:3} + +WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/ + +$PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + tools/train.py $CFG --work_dir $WORK_DIR --seed 0 --launcher pytorch ${PY_ARGS} diff --git a/tools/extract.py b/tools/extract.py new file mode 100644 index 00000000..b6881046 --- /dev/null +++ b/tools/extract.py @@ -0,0 +1,160 @@ +import argparse +import importlib +import numpy as np +import os +import os.path as osp +import time + +import mmcv +import torch +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from mmcv.runner import get_dist_info, init_dist, load_checkpoint + +from openselfsup.utils import dist_forward_collect, nondist_forward_collect +from openselfsup.datasets import build_dataloader, build_dataset +from openselfsup.models import build_model +from openselfsup.models.utils import MultiPooling +from openselfsup.utils import get_root_logger + + +class ExtractProcess(object): + + def __init__(self, + pool_type='specified', + backbone='resnet50', + layer_indices=(0, 1, 2, 3, 4)): + self.multi_pooling = MultiPooling( + pool_type, in_indices=layer_indices, backbone=backbone) + + def _forward_func(self, model, **x): + backbone_feats = model(mode='extract', **x) + pooling_feats = self.multi_pooling(backbone_feats) + flat_feats = [xx.view(xx.size(0), -1) for xx in pooling_feats] + feat_dict = {'feat{}'.format(i + 1): feat.cpu() \ + for i, feat in enumerate(flat_feats)} + return feat_dict + + def extract(self, model, data_loader, distributed=False): + model.eval() + func = lambda **x: self._forward_func(model, **x) + if distributed: + rank, world_size = get_dist_info() + results = dist_forward_collect(func, data_loader, rank, + len(data_loader.dataset)) + else: + results = nondist_forward_collect(func, data_loader, + len(data_loader.dataset)) + return results + + +def parse_args(): + parser = argparse.ArgumentParser( + description='OpenSelfSup extract features of a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('--checkpoint', default=None, help='checkpoint file') + parser.add_argument( + '--dataset-config', + default='benchmarks/extract_info/voc07.py', + help='extract dataset config file path') + parser.add_argument( + '--layer-ind', + type=str, + help='layer indices, separated by comma, e.g., "0,1,2,3,4"') + parser.add_argument( + '--work_dir', + type=str, + default=None, + help='the dir to save logs and models') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + return args + + +def main(): + args = parse_args() + cfg = mmcv.Config.fromfile(args.config) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + # update configs according to CLI args + if args.work_dir is not None: + cfg.work_dir = args.work_dir + layer_ind = [int(idx) for idx in args.layer_ind.split(',')] + cfg.model.backbone.out_indices = layer_ind + + if args.checkpoint is None: + assert cfg.model.pretrained is not None, \ + "Must have pretrain if no checkpoint is given." + + # check memcached package exists + if importlib.util.find_spec('mc') is None: + for field in ['train', 'val', 'test']: + if hasattr(cfg.data, field): + getattr(cfg.data, field).data_source.memcached = False + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + # logger + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + log_file = osp.join(cfg.work_dir, 'extract_{}.log'.format(timestamp)) + logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) + + # build the dataloader + dataset_cfg = mmcv.Config.fromfile(args.dataset_config) + dataset = build_dataset(dataset_cfg.data.extract) + data_loader = build_dataloader( + dataset, + imgs_per_gpu=dataset_cfg.data.imgs_per_gpu, + workers_per_gpu=dataset_cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False) + + # build the model and load checkpoint + model = build_model(cfg.model) + if args.checkpoint is not None: + load_checkpoint(model, args.checkpoint, map_location='cpu') + if not distributed: + model = MMDataParallel(model, device_ids=[0]) + else: + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False) + + # build extraction processor + extractor = ExtractProcess( + pool_type='specified', backbone='resnet50', layer_indices=layer_ind) + + # run + outputs = extractor.extract(model, data_loader, distributed=distributed) + rank, _ = get_dist_info() + mmcv.mkdir_or_exist("{}/features/".format(args.work_dir)) + if rank == 0: + for key, val in outputs.items(): + split_num = len(dataset_cfg.split_name) + split_at = dataset_cfg.split_at + for ss in range(split_num): + output_file = "{}/features/{}_{}.npy".format( + args.work_dir, dataset_cfg.split_name[ss], key) + if ss == 0: + np.save(output_file, val[:split_at[0]]) + elif ss == split_num - 1: + np.save(output_file, val[split_at[-1]:]) + else: + np.save(output_file, val[split_at[ss - 1]:split_at[ss]]) + + +if __name__ == '__main__': + main() diff --git a/tools/extract_backbone_weights.py b/tools/extract_backbone_weights.py new file mode 100644 index 00000000..733a47a2 --- /dev/null +++ b/tools/extract_backbone_weights.py @@ -0,0 +1,34 @@ +import torch +import argparse + + +def parse_args(): + parser = argparse.ArgumentParser( + description='This script extracts backbone weights from a checkpoint') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument( + '--save-path', type=str, default=None, help='destination file name') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + if args.save_path is None: + args.save_path = args.checkpoint[:-4] + "_extracted.pth" + ck = torch.load(args.checkpoint, map_location=torch.device('cpu')) + output_dict = dict(state_dict=dict(), author="OpenSelfSup") + has_backbone = False + for key, value in ck['state_dict'].items(): + if key.startswith('backbone'): + output_dict['state_dict'][key[9:]] = value + has_backbone = True + #elif key.startswith('encoder_q.0'): + # output_dict['state_dict'][key[12:]] = value + if not has_backbone: + raise Exception("Cannot find a backbone module in the checkpoint.") + torch.save(output_dict, args.save_path) + + +if __name__ == '__main__': + main() diff --git a/tools/kill.sh b/tools/kill.sh new file mode 100644 index 00000000..14cac347 --- /dev/null +++ b/tools/kill.sh @@ -0,0 +1,2 @@ +#!/bin/bash +kill $(ps aux | grep "train.py" | grep -v grep | awk '{print $2}') diff --git a/tools/prepare_data/create_voc_data_files.py b/tools/prepare_data/create_voc_data_files.py new file mode 100644 index 00000000..3249cff0 --- /dev/null +++ b/tools/prepare_data/create_voc_data_files.py @@ -0,0 +1,193 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +This script can be used to extract the VOC2007 and VOC2012 dataset files +[data, labels] from the given annotations that can be used for training. The +files can be prepared for various data splits +""" + +from __future__ import unicode_literals +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import + +import argparse +import logging +import numpy as np +import os +import sys +from glob import glob + +# initiate the logger +FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s' +logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +def validate_files(input_files): + """ + The valid files will have name: _.txt. We want to remove + all the other files from the input. + """ + output_files = [] + for item in input_files: + if len(item.split('/')[-1].split('_')) == 2: + output_files.append(item) + return output_files + + +def get_data_files(split, args): + data_dir = os.path.join(args.data_source_dir, 'ImageSets/Main') + assert os.path.exists(data_dir), "Data: {} doesn't exist".format(data_dir) + test_data_files = glob(os.path.join(data_dir, '*_test.txt')) + test_data_files = validate_files(test_data_files) + if args.separate_partitions > 0: + train_data_files = glob(os.path.join(data_dir, '*_train.txt')) + val_data_files = glob(os.path.join(data_dir, '*_val.txt')) + train_data_files = validate_files(train_data_files) + val_data_files = validate_files(val_data_files) + assert len(train_data_files) == len(val_data_files) + if split == 'train': + data_files = train_data_files + elif split == 'test': + data_files = test_data_files + else: + data_files = val_data_files + else: + train_data_files = glob(os.path.join(data_dir, '*_trainval.txt')) + if len(test_data_files) == 0: + # For VOC2012 dataset, we have trainval, val and train data. + train_data_files = glob(os.path.join(data_dir, '*_train.txt')) + test_data_files = glob(os.path.join(data_dir, '*_val.txt')) + test_data_files = validate_files(test_data_files) + train_data_files = validate_files(train_data_files) + data_files = train_data_files if (split + == 'train') else test_data_files + assert len(train_data_files) == len(test_data_files), "Missing classes" + return data_files + + +def get_images_labels_info(split, args): + assert os.path.exists(args.data_source_dir), "Data source NOT found. Abort" + + data_files = get_data_files(split, args) + # we will construct a map for image name to the vector of -1, 0, 1 + # we sort the data_files which gives sorted class names as well + img_labels_map = {} + for cls_num, data_path in enumerate(sorted(data_files)): + # for this class, we have images and each image will have label + # 1, -1, 0 -> present, not present, ignore respectively as in VOC data. + with open(data_path, 'r') as fopen: + for line in fopen: + try: + img_name, orig_label = line.strip().split() + if img_name not in img_labels_map: + img_labels_map[img_name] = -np.ones( + len(data_files), dtype=np.int32) + orig_label = int(orig_label) + # in VOC data, -1 (not present), set it to 0 as train target + if orig_label == -1: + orig_label = 0 + # in VOC data, 0 (ignore), set it to -1 as train target + elif orig_label == 0: + orig_label = -1 + img_labels_map[img_name][cls_num] = orig_label + except Exception: + logger.info('Error processing: {} data_path: {}'.format( + line, data_path)) + + img_paths, img_labels = [], [] + for item in sorted(img_labels_map.keys()): + img_paths.append( + os.path.join(args.data_source_dir, 'JPEGImages', item + '.jpg')) + img_labels.append(img_labels_map[item]) + + output_dict = {} + if args.generate_json: + cls_names = [] + for item in sorted(data_files): + name = item.split('/')[-1].split('.')[0].split('_')[0] + cls_names.append(name) + + img_ids, json_img_labels = [], [] + for item in sorted(img_labels_map.keys()): + img_ids.append(item) + json_img_labels.append(img_labels_map[item]) + + for img_idx in range(len(img_ids)): + img_id = img_ids[img_idx] + out_lbl = {} + for cls_idx in range(len(cls_names)): + name = cls_names[cls_idx] + out_lbl[name] = int(json_img_labels[img_idx][cls_idx]) + output_dict[img_id] = out_lbl + return img_paths, img_labels, output_dict + + +def main(): + parser = argparse.ArgumentParser(description="Create VOC data files") + parser.add_argument( + '--data_source_dir', + type=str, + default=None, + help="Path to data directory containing ImageSets and JPEGImages") + parser.add_argument( + '--output_dir', + type=str, + default=None, + help="Output directory where images/label information will be written") + parser.add_argument( + '--separate_partitions', + type=int, + default=0, + help="Whether to create files separately for partitions train/test/val" + ) + parser.add_argument( + '--generate_json', + type=int, + default=0, + help="Whether to json files for partitions train/test/val") + args = parser.parse_args() + + # given the data directory for the partitions train, val, and test, we will + # write numpy files for each partition. + partitions = ['train', 'test'] + if args.separate_partitions > 0: + partitions.append('val') + + for partition in partitions: + logger.info( + '========Preparing {} data files========'.format(partition)) + imgs_info, lbls_info, output_dict = get_images_labels_info( + partition, args) + img_info_out_path = os.path.join(args.output_dir, + partition + '_images.npy') + label_info_out_path = os.path.join(args.output_dir, + partition + '_labels.npy') + logger.info( + '=================SAVING DATA files=======================') + logger.info('partition: {} saving img_paths to: {}'.format( + partition, img_info_out_path)) + logger.info('partition: {} saving lbls_paths: {}'.format( + partition, label_info_out_path)) + logger.info('partition: {} imgs: {}'.format(partition, + np.array(imgs_info).shape)) + np.save(img_info_out_path, np.array(imgs_info)) + np.save(label_info_out_path, np.array(lbls_info)) + if args.generate_json: + json_out_path = os.path.join(args.output_dir, + partition + '_targets.json') + import json + with open(json_out_path, 'w') as fp: + json.dump(output_dict, fp) + logger.info('Saved Json to: {}'.format(json_out_path)) + logger.info('DONE!') + + +if __name__ == '__main__': + main() diff --git a/tools/prepare_data/create_voc_low_shot_challenge_samples.py b/tools/prepare_data/create_voc_low_shot_challenge_samples.py new file mode 100644 index 00000000..34da1a2b --- /dev/null +++ b/tools/prepare_data/create_voc_low_shot_challenge_samples.py @@ -0,0 +1,131 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +This script is used to create the low-shot data for VOC svm trainings. +""" +from __future__ import unicode_literals +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import + +import argparse +import json +import logging +import numpy as np +import os +import random +import sys + +# create the logger +FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s' +logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +def load_json(file_path, ground_truth=True): + import json + assert os.path.exists(file_path), "{} does not exist".format(file_path) + with open(file_path, 'r') as fp: + data = json.load(fp) + img_ids = sorted(list(data.keys())) + cls_names = sorted(list(data[img_ids[0]].keys())) + if ground_truth: + output = np.empty((len(img_ids), len(cls_names)), dtype=np.int32) + else: + output = np.empty((len(img_ids), len(cls_names)), dtype=np.float64) + for idx in range(len(img_ids)): + for cls_idx in range(len(cls_names)): + output[idx][cls_idx] = data[img_ids[idx]][cls_names[cls_idx]] + return output, img_ids, cls_names + + +def save_json(input_data, img_ids, cls_names, output_file): + output_dict = {} + for img_idx in range(len(img_ids)): + img_id = img_ids[img_idx] + out_lbl = {} + for cls_idx in range(len(cls_names)): + name = cls_names[cls_idx] + out_lbl[name] = int(input_data[img_idx][cls_idx]) + output_dict[img_id] = out_lbl + logger.info('Saving file: {}'.format(output_file)) + with open(output_file, 'w') as fp: + json.dump(output_dict, fp) + + +def sample_symbol(input_targets, output_target, symbol, num): + logger.info('Sampling symbol: {} for num: {}'.format(symbol, num)) + num_classes = input_targets.shape[1] + for idx in range(num_classes): + symbol_data = np.where(input_targets[:, idx] == symbol)[0] + sampled = random.sample(list(symbol_data), num) + for index in sampled: + output_target[index, idx] = symbol + return output_target + + +def generate_independent_sample(opts, targets, img_ids, cls_names): + k_values = [int(val) for val in opts.k_values.split(",")] + # the way sample works is: for each independent sample, and a given k value + # we create a matrix of the same shape as given targets file. We initialize + # this matrix with -1 (ignore label). We then sample k positive and + # (num_classes-1) * k negatives. + # N x 20 shape + num_classes = targets.shape[1] + for idx in range(opts.num_samples): + for k in k_values: + logger.info('Sampling: {} time for k-value: {}'.format(idx + 1, k)) + output = np.ones(targets.shape, dtype=np.int32) * -1 + output = sample_symbol(targets, output, 1, k) + output = sample_symbol(targets, output, 0, (num_classes - 1) * k) + prefix = opts.targets_data_file.split('/')[-1].split('.')[0] + output_file = os.path.join( + opts.output_path, + '{}_sample{}_k{}.json'.format(prefix, idx + 1, k)) + save_json(output, img_ids, cls_names, output_file) + npy_output_file = os.path.join( + opts.output_path, + '{}_sample{}_k{}.npy'.format(prefix, idx + 1, k)) + logger.info('Saving npy file: {}'.format(npy_output_file)) + np.save(npy_output_file, output) + logger.info('Done!!') + + +def main(): + parser = argparse.ArgumentParser( + description='Sample Low shot data for VOC') + parser.add_argument( + '--targets_data_file', + type=str, + default=None, + help="Json file containing image labels") + parser.add_argument( + '--output_path', + type=str, + default=None, + help="path where low-shot samples should be saved") + parser.add_argument( + '--k_values', + type=str, + default="1,2,4,8,16,32,64,96", + help="Low-shot k-values for svm testing.") + parser.add_argument( + '--num_samples', + type=int, + default=5, + help="Number of independent samples.") + if len(sys.argv) == 1: + parser.print_help() + sys.exit(1) + opts = parser.parse_args() + targets, img_ids, cls_names = load_json(opts.targets_data_file) + generate_independent_sample(opts, targets, img_ids, cls_names) + + +if __name__ == '__main__': + main() diff --git a/tools/prepare_data/prepare_voc07_cls.sh b/tools/prepare_data/prepare_voc07_cls.sh new file mode 100644 index 00000000..799239d2 --- /dev/null +++ b/tools/prepare_data/prepare_voc07_cls.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +DATA="$1" +if [ "$DATA" == "" ]; then + echo "Usage: bash tools/prepare_data/prepare_voc07_cls.sh YOUR_DATA_ROOT" + exit +fi + +VOC="$DATA/VOCdevkit/VOC2007/" + +wget http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar -P $DATA +wget http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar -P $DATA +tar -xf $DATA/VOCtrainval_06-Nov-2007.tar -C $DATA +tar -xf $DATA/VOCtest_06-Nov-2007.tar -C $DATA + +mkdir -p $VOC/SVMLabels/low_shot/labels/ + +python $(dirname "$0")/create_voc_data_files.py \ + --data_source_dir $VOC \ + --output_dir $VOC/SVMLabels/ \ + --generate_json 1 + +python $(dirname "$0")/create_voc_low_shot_challenge_samples.py \ + --targets_data_file $VOC/SVMLabels/train_targets.json \ + --output_path $VOC/SVMLabels/low_shot/labels/ \ + --k_values "1,2,4,8,16,32,64,96" \ + --num_samples 5 + +mkdir $VOC/Lists + +awk 'NF{print $0 ".jpg"}' $VOC/ImageSets/Main/trainval.txt $VOC/ImageSets/Main/test.txt > $VOC/Lists/trainvaltest.txt + +mkdir data/ +ln -s $DATA/VOCdevkit data/ diff --git a/tools/publish_model.py b/tools/publish_model.py new file mode 100644 index 00000000..4dd35332 --- /dev/null +++ b/tools/publish_model.py @@ -0,0 +1,34 @@ +import argparse +import subprocess + +import torch + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Process a checkpoint to be published') + parser.add_argument('in_file', help='input checkpoint filename') + args = parser.parse_args() + return args + + +def process_checkpoint(in_file, out_file): + checkpoint = torch.load(in_file, map_location='cpu') + # remove optimizer for smaller file size + if 'optimizer' in checkpoint: + del checkpoint['optimizer'] + # if it is necessary to remove some sensitive data in checkpoint['meta'], + # add the code here. + torch.save(checkpoint, in_file + ".tmp.pth") + sha = subprocess.check_output(['sha256sum', out_file]).decode() + final_file = out_file.rstrip('.pth') + f'-{sha[:8]}.pth' + subprocess.Popen(['mv', in_file + ".tmp.pth", final_file]) + + +def main(): + args = parse_args() + process_checkpoint(args.in_file, args.in_file) + + +if __name__ == '__main__': + main() diff --git a/tools/single_train.sh b/tools/single_train.sh new file mode 100644 index 00000000..84e0c3c4 --- /dev/null +++ b/tools/single_train.sh @@ -0,0 +1,9 @@ +#!/bin/bash +PYTHON=${PYTHON:-"python"} + +CFG=$1 +PY_ARGS=${@:2} + +WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/ + +$PYTHON -u tools/train.py $1 --work_dir $WORK_DIR ${PY_ARGS} diff --git a/tools/srun_extract.sh b/tools/srun_extract.sh new file mode 100644 index 00000000..44e2e029 --- /dev/null +++ b/tools/srun_extract.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +CFG=$2 +CHECKPOINT=$3 +GPUS=${4:-8} +PY_ARGS=${@:5} +JOB_NAME="openselfsup" +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} + +WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/ + +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/extract.py $CFG \ + --layer-ind "0,1,2,3,4" --checkpoint $CHECKPOINT \ + --work_dir $WORK_DIR --launcher="slurm" ${PY_ARGS} diff --git a/tools/srun_train.sh b/tools/srun_train.sh new file mode 100755 index 00000000..acaa3ac4 --- /dev/null +++ b/tools/srun_train.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +CFG=$2 +GPUS=${3:-8} +PY_ARGS=${@:4} +JOB_NAME="openselfsup" +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} + +WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/ + +GLOG_vmodule=MemcachedClient=-1 \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/train.py ${CFG} \ + --work_dir ${WORK_DIR} --seed 0 --launcher="slurm" ${PY_ARGS} diff --git a/tools/test.py b/tools/test.py new file mode 100644 index 00000000..22cef51b --- /dev/null +++ b/tools/test.py @@ -0,0 +1,123 @@ +import argparse +import importlib +import os +import os.path as osp +import time + +import mmcv +import torch +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from mmcv.runner import get_dist_info, init_dist, load_checkpoint + +from openselfsup.datasets import build_dataloader, build_dataset +from openselfsup.models import build_model +from openselfsup.utils import get_root_logger, dist_forward_collect, nondist_forward_collect + + +def single_gpu_test(model, data_loader): + model.eval() + func = lambda **x: model(mode='test', **x) + results = nondist_forward_collect(func, data_loader, + len(data_loader.dataset)) + return results + + +def multi_gpu_test(model, data_loader): + model.eval() + func = lambda **x: model(mode='test', **x) + rank, world_size = get_dist_info() + results = dist_forward_collect(func, data_loader, rank, + len(data_loader.dataset)) + return results + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet test (and eval) a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument( + '--work_dir', + type=str, + default=None, + help='the dir to save logs and models') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + parser.add_argument('--port', type=int, default=29500, + help='port only works when launcher=="slurm"') + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + return args + + +def main(): + args = parse_args() + + cfg = mmcv.Config.fromfile(args.config) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + # update configs according to CLI args + if args.work_dir is not None: + cfg.work_dir = args.work_dir + + cfg.model.pretrained = None # ensure to use checkpoint rather than pretraining + + # check memcached package exists + if importlib.util.find_spec('mc') is None: + for field in ['train', 'val', 'test']: + if hasattr(cfg.data, field): + getattr(cfg.data, field).data_source.memcached = False + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + if args.launcher == 'slurm': + cfg.dist_params['port'] = args.port + init_dist(args.launcher, **cfg.dist_params) + + # logger + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + log_file = osp.join(cfg.work_dir, 'test_{}.log'.format(timestamp)) + logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) + + # build the dataloader + dataset = build_dataset(cfg.data.val) + data_loader = build_dataloader( + dataset, + imgs_per_gpu=cfg.data.imgs_per_gpu, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False) + + # build the model and load checkpoint + model = build_model(cfg.model) + load_checkpoint(model, args.checkpoint, map_location='cpu') + + if not distributed: + model = MMDataParallel(model, device_ids=[0]) + outputs = single_gpu_test(model, data_loader) + else: + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False) + outputs = multi_gpu_test(model, data_loader) # dict{key: np.ndarray} + + rank, _ = get_dist_info() + if rank == 0: + for name, val in outputs.items(): + dataset.evaluate( + torch.from_numpy(val), name, logger, topk=(1, 5)) + + +if __name__ == '__main__': + main() diff --git a/tools/train.py b/tools/train.py new file mode 100644 index 00000000..1ff05932 --- /dev/null +++ b/tools/train.py @@ -0,0 +1,142 @@ +from __future__ import division +import argparse +import importlib +import os +import os.path as osp +import time + +import mmcv +import torch +from mmcv import Config +from mmcv.runner import init_dist + +from openselfsup import __version__ +from openselfsup.apis import set_random_seed, train_model +from openselfsup.datasets import build_dataset +from openselfsup.models import build_model +from openselfsup.utils import collect_env, get_root_logger, traverse_replace + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a model') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--work_dir', + type=str, + default=None, + help='the dir to save logs and models') + parser.add_argument( + '--resume_from', help='the checkpoint file to resume from') + parser.add_argument( + '--pretrained', default=None, help='pretrained model file') + parser.add_argument( + '--gpus', + type=int, + default=1, + help='number of gpus to use ' + '(only applicable to non-distributed training)') + parser.add_argument('--seed', type=int, default=None, help='random seed') + parser.add_argument( + '--deterministic', + action='store_true', + help='whether to set deterministic options for CUDNN backend.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + parser.add_argument('--port', type=int, default=29500, + help='port only works when launcher=="slurm"') + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + # update configs according to CLI args + if args.work_dir is not None: + cfg.work_dir = args.work_dir + if args.resume_from is not None: + cfg.resume_from = args.resume_from + cfg.gpus = args.gpus + + # check memcached package exists + if importlib.util.find_spec('mc') is None: + traverse_replace(cfg, 'memcached', False) + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + assert cfg.model.type not in \ + ['DeepCluster', 'MOCO', 'SimCLR', 'ODC', 'NPID'], \ + "{} does not support non-dist training.".format(cfg.model.type) + else: + distributed = True + if args.launcher == 'slurm': + cfg.dist_params['port'] = args.port + init_dist(args.launcher, **cfg.dist_params) + + # create work_dir + mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) + # init the logger before other steps + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + log_file = osp.join(cfg.work_dir, 'train_{}.log'.format(timestamp)) + logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) + + # init the meta dict to record some important information such as + # environment info and seed, which will be logged + meta = dict() + # log env info + env_info_dict = collect_env() + env_info = '\n'.join([('{}: {}'.format(k, v)) + for k, v in env_info_dict.items()]) + dash_line = '-' * 60 + '\n' + logger.info('Environment info:\n' + dash_line + env_info + '\n' + + dash_line) + meta['env_info'] = env_info + + # log some basic info + logger.info('Distributed training: {}'.format(distributed)) + logger.info('Config:\n{}'.format(cfg.text)) + + # set random seeds + if args.seed is not None: + logger.info('Set random seed to {}, deterministic: {}'.format( + args.seed, args.deterministic)) + set_random_seed(args.seed, deterministic=args.deterministic) + cfg.seed = args.seed + meta['seed'] = args.seed + + if args.pretrained is not None: + assert isinstance(args.pretrained, str) + cfg.model.pretrained = args.pretrained + model = build_model(cfg.model) + + datasets = [build_dataset(cfg.data.train)] + assert len(cfg.workflow) == 1, "Validation is called by hook." + if cfg.checkpoint_config is not None: + # save openselfsup version, config file content and class names in + # checkpoints as meta data + cfg.checkpoint_config.meta = dict( + openselfsup_version=__version__, config=cfg.text) + # add an attribute for visualization convenience + train_model( + model, + datasets, + cfg, + distributed=distributed, + timestamp=timestamp, + meta=meta) + + +if __name__ == '__main__': + main() diff --git a/tools/upgrade_models.py b/tools/upgrade_models.py new file mode 100644 index 00000000..de4c2c5e --- /dev/null +++ b/tools/upgrade_models.py @@ -0,0 +1,27 @@ +import torch +import argparse + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument( + '--save-path', type=str, required=True, help='destination file name') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + ck = torch.load(args.checkpoint, map_location=torch.device('cpu')) + output_dict = dict(state_dict=dict(), author='OpenSelfSup') + for key, value in ck.items(): + if key.startswith('head'): + continue + else: + output_dict['state_dict'][key] = value + torch.save(output_dict, args.save_path) + + +if __name__ == '__main__': + main()