From 853f0c6bcab6d5d34c5843bd6bf580f5c6be2314 Mon Sep 17 00:00:00 2001 From: Ezra-Yu <18586273+Ezra-Yu@users.noreply.github.com> Date: Tue, 22 Aug 2023 11:29:42 +0800 Subject: [PATCH 01/20] [DOC] Update datset download score from opendatalab to openXlab (#1765) * update opendatalab to openXlab * update dataset-index --------- Co-authored-by: fangyixiao18 --- dataset-index.yml | 4 ++-- docs/en/user_guides/dataset_prepare.md | 14 +++++++------- docs/zh_CN/user_guides/dataset_prepare.md | 14 +++++++------- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/dataset-index.yml b/dataset-index.yml index ecf7f5b5..40ca6206 100644 --- a/dataset-index.yml +++ b/dataset-index.yml @@ -1,11 +1,11 @@ imagenet1k: - dataset: ImageNet-1K + dataset: OpenDataLab/ImageNet-1K download_root: data data_root: data/imagenet script: tools/dataset_converters/odl_imagenet1k_preprocess.sh cub: - dataset: CUB-200-2011 + dataset: OpenDataLab/CUB-200-2011 download_root: data data_root: data/CUB_200_2011 script: tools/dataset_converters/odl_cub_preprocess.sh diff --git a/docs/en/user_guides/dataset_prepare.md b/docs/en/user_guides/dataset_prepare.md index 7421be22..17ec229b 100644 --- a/docs/en/user_guides/dataset_prepare.md +++ b/docs/en/user_guides/dataset_prepare.md @@ -144,15 +144,15 @@ ImageNet has multiple versions, but the most commonly used one is [ILSVRC 2012]( ````{group-tab} Download by MIM -MIM supports downloading from [OpenDataLab](https://opendatalab.com/) and preprocessing ImageNet dataset with one command line. +MIM supports downloading from [OpenXlab](https://openxlab.org.cn/datasets) and preprocessing ImageNet dataset with one command line. -_You need to register an account at [OpenDataLab official website](https://opendatalab.com/) and login by CLI._ +_You need to register an account at [OpenXlab official website](https://openxlab.org.cn/datasets) and login by CLI._ ```Bash -# install OpenDataLab CLI tools -pip install -U opendatalab -# log in OpenDataLab, register if you don't have an account. -odl login +# install OpenXlab CLI tools +pip install -U openxlab +# log in OpenXLab +openxlab login # download and preprocess by MIM, better to execute in $MMPreTrain directory. mim download mmpretrain --dataset imagenet1k ``` @@ -278,7 +278,7 @@ test_dataloader = val_dataloader | [`SUN397`](mmpretrain.datasets.SUN397)(data_root[, split, pipeline, ...]) | ["train", "test"] | [SUN397](https://vision.princeton.edu/projects/2010/SUN/) Dataset. | | [`VOC`](mmpretrain.datasets.VOC)(data_root[, image_set_path, pipeline, ...]) | ["train", "val", "tranval", "test"] | [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC/) Dataset. | -Some dataset homepage links may be unavailable, and you can download datasets through [OpenDataLab](https://opendatalab.com/), such as [Stanford Cars](https://opendatalab.com/Stanford_Cars/download). +Some dataset homepage links may be unavailable, and you can download datasets through [OpenXLab](https://openxlab.org.cn/datasets), such as [Stanford Cars](https://openxlab.org.cn/datasets/OpenDataLab/Stanford_Cars). ## Supported Multi-modality Datasets diff --git a/docs/zh_CN/user_guides/dataset_prepare.md b/docs/zh_CN/user_guides/dataset_prepare.md index 59a0d0af..aa1e1fde 100644 --- a/docs/zh_CN/user_guides/dataset_prepare.md +++ b/docs/zh_CN/user_guides/dataset_prepare.md @@ -142,15 +142,15 @@ ImageNet 有多个版本,但最常用的一个是 [ILSVRC 2012](http://www.ima ````{group-tab} MIM 下载 -MIM支持使用一条命令行从 [OpenDataLab](https://opendatalab.com/) 下载并预处理 ImageNet 数据集。 +MIM支持使用一条命令行从 [OpenXLab](https://openxlab.org.cn/datasets?lang=zh-CN) 下载并预处理 ImageNet 数据集。 -_需要在 [OpenDataLab 官网](https://opendatalab.com/) 注册账号并命令行登录_。 +_需要在 [OpenXLab 官网](https://openxlab.org.cn/datasets?lang=zh-CN) 注册账号并命令行登录_。 ```Bash -# 安装opendatalab库 -pip install -U opendatalab -# 登录到 OpenDataLab, 如果还没有注册,请到官网注册一个 -odl login +# 安装 OpenXLab CLI 工具 +pip install -U openxlab +# 登录 OpenXLab +openxlab login # 使用 MIM 下载数据集, 最好在 $MMPreTrain 目录执行 mim download mmpretrain --dataset imagenet1k ``` @@ -276,7 +276,7 @@ test_dataloader = val_dataloader | [`SUN397`](mmpretrain.datasets.SUN397)(data_root[, split, pipeline, ...]) | ["train", "test"] | [SUN397](https://vision.princeton.edu/projects/2010/SUN/) 数据集 | | [`VOC`](mmpretrain.datasets.VOC)(data_root[, image_set_path, pipeline, ...]) | ["train", "val", "tranval", "test"] | [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC/) 数据集 | -有些数据集主页链接可能已经失效,您可以通过[OpenDataLab](https://opendatalab.com/)下载数据集,例如 [Stanford Cars](https://opendatalab.com/Stanford_Cars/download)数据集。 +有些数据集主页链接可能已经失效,您可以通过[OpenXLab](https://openxlab.org.cn/datasets?lang=zh-CN)下载数据集,例如 [Stanford Cars](https://openxlab.org.cn/datasets/OpenDataLab/Stanford_Cars)数据集。 ## OpenMMLab 2.0 标准数据集 From e1675e893e4720629ef995c620bc2c63f1d52b65 Mon Sep 17 00:00:00 2001 From: "zhengjie.xu" Date: Wed, 30 Aug 2023 06:47:21 -0500 Subject: [PATCH 02/20] [Docs] Update QRcode (#1778) * Add miaomiao_qrcode.jpg * Update qrcode --- README_zh-CN.md | 4 ++-- resources/miaomiao_qrcode.jpg | Bin 0 -> 225737 bytes 2 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 resources/miaomiao_qrcode.jpg diff --git a/README_zh-CN.md b/README_zh-CN.md index 6820dd64..801d3183 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -333,10 +333,10 @@ MMPreTrain 是一款由不同学校和公司共同贡献的开源项目。我们 ## 欢迎加入 OpenMMLab 社区 -扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),加入 OpenMMLab 团队的 [官方交流 QQ 群](https://jq.qq.com/?_wv=1027&k=aCvMxdr3) 或联络 OpenMMLab 官方微信小助手 +扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),扫描下方微信二维码添加喵喵好友,进入 MMPretrain 微信交流社群。【加好友申请格式:研究方向+地区+学校/公司+姓名】
- +
我们会在 OpenMMLab 社区为大家 diff --git a/resources/miaomiao_qrcode.jpg b/resources/miaomiao_qrcode.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d34cbae6fd131d668b0f16bfe918993610257131 GIT binary patch literal 225737 zcmeFY1yo&6voE-T0Ko$Uw*VnH!QDN$OYmU9-8HzoYXSs!cL^3exVr{BxE*8;$^X0W ze($|`Yi8EWnptaZ!`geF?%MV1s;;iK+D~&&Yrrch@ekqv6chkJLH>ZJMIZq3>;L8S z^aDVD`C{Y+1`HG#0F4d>gAVo71CT%}2o3cD22#y`B}60yC}{YXFtBhho)&T zcIUr}|37s{0f4E0uh0dbFeoUMrZ!d%Ker;S}7J zE?@scz85*}QohJjsHd$Y_8W;NEW10J2cf_``M=mBzxL)0^%Ri18$W8()3^I_*z$dT z%H$HVDva{Zj*pK5INGUnbW{Jr>k^2r=iCObTzNeKi*9w^ zd|4!82OD+ltF%T6cez1yapzm9ImSzSJCfN2HDiKEXJg=!o3_wVtu~M_9bQisnd8u~ zQvp+i)U*-(>Fw6=Ymov>fVx^F`ptvo9!S#zhI9bU<{#N$UOWK}b$nRh_umfnsJvHQ zjO%N_30oq|Qf!6VIZN;Dxojk$HaDW1+GuUYH@y@mKjX&(C33VzRi1{d@T*V5({SGo+7JBp=cfB3fdyv>Xd=$--{7+al&6(1-(Gj2qjTWwk-;EY0 z5E+p#7m+b5ru5aZ;+Y;ocD=5%Z90uO3R4%l>)+Jit#6|G@1oUkipRdoKu4v9G2Z9U z(I4v&X5wL54GX4=K8vpCeQ1BdpB>7K5Qnn%50-xN<+U_%OA}@SSHt;v1M;PSAKw#j zcq79jx9bW3mb111T%_PX#6TDOW4CSpFR(Y;B?7@M+n&E(zEQ&wPiy{hlD;N26!ctNeSxB;%fJA()5^z{ot`V z#%Vxg20Z)+9?-cf*YbXX&JK*cev!Q3BCvSwvw7|1Jc)H~eU^AIl3UHzG?tB)@;iAI zuGfNpX)(0Cc-T2(2-?VBS3MiV-DFe`M{7|Ff}4Cfi!nYOf1zAWeu}Ys=KC*!80mjK zI__oBaaYkjlH9h-{Yb7H&XQ;|)w7f36rh#bw^Uh$u`DvgS3P~PpEs|6l%XAPN#3aV zH=ChkGTQ48+B0D!J2E@t=vLbI#W@k_3V>wALpY}w(gqK5@fHB}gGqIR#G;{pHtO?r2gRM2#JsB)$<7F>_a#2tqymi4e{z&Crr-BF z0@!|8O~!%|TK^l5|KC2r(K15DNUwKMQUCxW+foMrW)03?4L}$G)DVUR_>W$o!UjKk z1AuE?oN67nA7DFBpU&jlEhd90m976{TCPSe8}2KB%TcUkV~2jI+U=ggRxq~f3q{`| z@#pmyLbUL!S0v)Vra<@TbtjZAAw?N@M1e70;bO6r8kfH?%2o}<7hoh;3fMR3onOpw ziSa0Bx?U9qHloK?!R`Mrv48_h_wZ;{Lc=DWIJBJr;_jRiv!R}0C&$_TCM{oToP1WL z`DR7+8{T@7Jw#j*A^Ic7pO_A?^gBYMV0`Q%q1`G09A=KDNGu``#a8d<8{rZMyJp{T zc>Ll2(r*T3FnRPC5V@*scWws|$JS$cQwU@RY56_o8d#@bQJ4JP8E$r_eApxd?2mS` zB$rgOIjF48r?=BL6_xH+4T9%yVMWz8==Ox}8jr7aF#sX+<|W_d!$0iFkTf``R|QI0 za?u*S*b?~Gf{zS5k33<<^;^i7f9c1uV1DVIQOGsjN>c+RcR7Cm=T=DDb!Q5=ciHTZ z^0zwq(&hCjA8u7dS>|??%X_Eq*O*q|tO?Lm*=_#@HtmI<09p8jjh1CsDHQYrw1yg+ z8pe<14gV&b%h-WZt)-APawO>!prOa32SiT`D-+hpdwu@Z)nF}>#ZBoIxSyWerN}#W z*r+CfYTa>wgrpOvMbh%qmHU$2-ZRlFlsyEHbH zQs$w~l=NTRO~mD&#acP|p>m)6y8pXIgO=ISig%z0&bgU_p)e#`AtKdFyj+Y`aZM5S7QGgLR4?} z>B?$5-}I~D=3x}cvner_A%^LMZa>#od}&UJWL#JzBWk4DEpF{MD=NwCDK-xGVL3#$ zeJ%?$XCZYxTr>W4_Buzclu}-{?~u|Qh+vBk>|tCg|Hv9bufJ5_`PfF(E9mw13txnQ z1=6m?8X+5uCdtO^#E67Xr#>1L5!kVSP&1(GQO@JA^WO4ZB$64^F0a!u;)GeZ-lR;2 z57C+cm0ifzeUyZp-Oaaw4(Z2+($imAtM7z1n&hrlF#6kB?nRE@!Z#zS&TPSf^q--j zUVZFplDmBneD&~G(v5F;5n_{F9?oA&C-9xGBq{AQ-*WDz!k86k%8&;uiO!J04kxEt z^Qur^VM{PRW^SRsJ3!VQl5{7VMcI`?RULAvKZ)<(Kx5ydpZzzrKCr{PC_tfqesHKe zK@AgR7r(JuH7E!lDh%hfDmY!qY>Cy6+-m=XN6{qcdSB|^nJ;tm4jc~0>uoZ)*YuuN zaU^+5Y-`%rT7U4h^C97}BSA^p9TGAR%^Zrtj~{*iVKw3<1U&%i>`F`zthh2mrHz-j zZB_j>ON*J$0|&1~;9Y!vw7&TM1!;i|yN*ir!}KunJC$B`ylV?xI?kiRA+-gDKoh^R)%fV7Q3yog@o?!FeVb*R$S%VTlYptTuE9FulXE{KZnzA=k?*iH!=gLm2!amIwODY%P>n{yp4lhew4H zM?@RM|FOCexkDamBZfWWnDJ3wqBGg?Ku#q*pj;b{-(rM~Yj3~tV2UVh61w|vyW<#4 zpt*9V!}x9Y_#Gz;Z2)10@AF42s=z}qt({EMFFtUt!K4;|HqNm|9IRzG z=jlHu3_S8K2MoOMDy2BG{e#6iodaFzS8v1FJjv3B(Hro+CbfG-{RoNH%0Za+cVJl~ zSLPB5>-Xs8WjOYUvY$9?l`hIg^PilCCJ=*l+MBE=;=SEsUhtOP4w1!`5AN0zUBhoH zetb{gj?%puW?Jt!U3({meU5g~{z`0JmtT|`&W0u4!nH;WZ4#+|44K|P3&J&tr7%ze;x^9h? zc;=BWMb6ggN`FfRTd)+pLV_td$b5Y<-P6O~C3QpT#1r!uv6EX57O1 zvjjcM?}v`NG*l5}^>jMN)sYiJ9*0f9ix(dybLUZA;j(Nz!+rkyIBDU+JDOp9INQa! z0pFZgSEE`&T)PTC-ZJUl%mV1?01sz-J$~H zA9je@Z%j``X2}PXNI0NPdthWO;6q*?yvgMlBJMHSx6E*S2ljB)z3H8L;GC1Z_TXV+ z%VfpeRfY4uSU%eN0}C(3l$lyezvW-bU(?vwJ%WmTraaxDx3pwz5{xuY7+DeZp>cbN zki_WQPl4zSavL6Ry9pfK74gQcF^(Avr1yn`GFAmRA;TFLoAWzVjCIFg^r)r&^;~MA zDMt>%RM`N}EL;DfIPCs<1%vlgB#J*DtfKRlPU#aQi_U91?|uJNqAdS4`_#aDDF)Y# zn-M3($#pyHfY2#LcQ?&$1~*2w)XbDLH5(KHkGGiLUkNC>*FUr*@O$;77;N)sypwe% z$>?`$&rhJaf<9xpKx`)?<(VUu!+-77Hs&c+VEAHwaIR*j#GS(Btx3g4c%Ds{;DkS* z=t&V(XZ#0sI6XUk7UyD+8-sU#nW93pXd{^rrR2l8v&1HtelKC5`ooH3u0C|<%It6L zY-7r*=Vc6x$q6*@Dy&ke<3sdZitmfJR@Ea5De{rcb-~R+I~Mo?T~t)6bEdkEzcy*N zn|bWM)ADaf+YVR8wWXH#N`vw|P+FX8kQ&QZ2xh>>ztpu*EnqFSGc2!LzvhwGyUmA) zePO#EwZ`WgY#boj|GsO5aD1Vy@hs`20u=C5Y>q{*w*Qb*kQifTa_6^<#7`aRa^ijJ z)V$9EZ?i}Z4Rky)IU=G#%`eTQNgCoTS?NeC;;iiWwu*?l<(6f7t!bNsiP}@Gu$Dz~ ze5P;Zblu&95+a-%xKCj#;X0ari3l_{U^619)E=+%t)z#rL2#iOY@zf(1iS9clPy2{ z)@#0>K*#3gUry?bfelTf@In42piV{Sd3oJjuS zVW`jA&3iidJ-n4&cgj7LQGc6%FJj71cSe9{CRf&m`#ts12?S6g%^KgO76yw z7-^UX01Wh_Y6~DPcJIYNSKcnZwer~vW)|N8oYVZs7TKTWQ^HHa|usFXZ;ImriX9pV>ArKg` z1UC96yra(?$vWzOg@q+^+?D8c(oqj$ZpYW_uo~l@dQSp)p!RuY!~3!|itu)JvB{rD z1Ya9j^XZ!xrVfc@*SSrEi22q%0dn@vn}Dj4+>GN#h#2_h0Nlt?OD+x`wPz2uxP66L ziI~TVKZjjm3L#%!U{FQxC*qNXR&U{Nt~ba5@+-1lVV5aA0aL7epYfMSvy2_@bM<+| z^OQDQ^!4ngd(N+{ggP{$fSz*aB=`8jx zd9P4`UClfuQh1Fw%~GiLv3?}CE7SE1=+D4Qz)@@d+nV*P3Lfu`>G-$|WP*UGNq;c5 zy~#N&i-=n-yJIsnudQdYi|EDzYAn>{N2_a5ZX0*r+gjuoE(;fKqG|a2!CxzYWw_gA z(zryIvvWqVneDQzxj#w2(kWrD(Sa_sYCTKubC=KRRXFi2Z9r*m{Ta6AB}6{R5F7zL z1<%OChgJC_&Gm#c#VHeiCLI}JP4LX{%DjX>$nr$ri-q?6H5O8T7kRq%^+tSo)7Jej zZ~`iV9TOB<&@T(yAh$30<|+9{$ovy43=!M1=K+B@HXsy4gqAj!h4*!0a#6i}f`7n> z*nL|=z7t0r+8EbixS1DB)hk;eEe0ogyuLyfo?d=t8~v#fH03?$%6K@l zj(l#FCy`}Dijp33HkAvizP%5o4^TZF_4Z6J2@EdlomcxjOpNO6lo4%)T;v>_oJ!XY>TT4B=dZEq#) zXSvuToCNFT$M1KLV4TMDn~$t%eGgwlwkDd@Mivkr_95Op|0@7pU#POEMZN~Smboig zsf*4Oj%TQO3t(vJN7NamBN1u+YK)OwQ|V%HJ$(VG=iV130n5c1WJCQO z`r)tz(sf|0*8t#mKAS`I*B~Z_waG z``&dV@TlAOd%;9dU;h9*Ggdyzs{JI@TNl5Ra#I!o{H>Oqp~{h}3FSF)E7ZwAwR#Wcuq z$nIHZds4petQ9JaL&8YXQ=Sm=ksVyVZ>?5%gK4|4h>y#<*b&C^@ljRT3V*pnL6d}y z(&9gfjyt$R9j6F?a zm3v$CWmF@}pSaR7t{t|i&xOvp>sZE~dFo$hK&pgM?#jj4QD1Ylnq25X(Gb5mk;VD9qOL_< zH)c}8qn+H3`6BwoP?e4KrmVp0pJ3@Er{_un&kia@y$+4Qp1F^9G{oqB4U@xc>C1;P z+oLDoOdr2Ab!=4d4=-p&^!Th5QR4S>TI3vzxV{7Ms4M=Q<7t&R?Jcx@_t+G^s;lU} z0L+l{idc`rrpNo6z=1OX-pNF&y`jN36G8D|6tTaP0|bqUh#vdpk0Mgu3_5eRC1$Td zjIG8MfYP$0vC56ZKCAnwJ=*?q@?}KFzz{aB+WyaQDar@0<2Sf-g#x*20P%;Bxr)M4 zxHpq!ehI6jJ`2=vJxY$hbTVM0%$qhec{s`BKB4Q3wZdq0Orvw{aa~UwKXm>XC4e84 zqeiCqn%#G&lf%Zw?noG7haCQOH;col2=@Mj*{W;`@IMSn#-oXS;qA}(c9zI*3C(6U zaw(RX@5f+kM%kkgqC&GmH6pa3AeKfzwI9Q5zKhwxnHqFP22u$)pp%3gt30Pg z7#qJz5(Qo_{x1o^dB5|^Q@R*LI0F~?Wz(O#I}|SN-I#p6Rp)#g4l=yd3YEPXi>gGAX|5+RBk zEp4upH3BpE?ZpB?MhYd?J%h>l@UL?)Ma(WNms7A7kRiRVHS|bc<_}}w^_Jam!McY) zBOoKitw-dbm+%TL$g27_Tv_5SFBX-++#X!U>?210*-6VI=@+8zt<>flWFl1o_RrDI z-0MWPs;Wk7lnPKf2~xYKdiOg8#DCbrdUzTd#(!r!mYmSQcl&raYvG;0aQFnkhV#pX z%>I7+TiL9`?YraSV|SzzZSGw-@@4-N^wy-GK+90A4ISlid*aVCcFoOT10RR0S@>Z* zjVQi=%-MM@5dYQ=cQq&^QtdG7ZRLS=) zO*YZfLNzosti;@8-dUIQ^026_vF`kPODf;kGaNmtsx3$f~%A72!H z|9OiX9VwKhpxVEE-87AY1!9mpbULb4m;~_-3eVm7*{)GYGy5m)md_d`el|GZf_t_7 zZz=pbx|cIPN^->hs<~7W>=QTDKc&#VisFm9QrWb+d+QON`s!gcCFY?uJG}K`!<0N$ z;clL3bCD}+I;6sF>SlA49Et1G!Rq+=tRQpmm!;A^%rv9O==*S?E2Sli>kKhsv(|MN z`nkQW(&t?v)+qo=+~wseSBIg_;LJBI>(`NvIW0RBsYqH5dCHqP(X)f2xHY3+GKT1W zPDR6to(qQ-Obwh7xU6}ocITP_DN-o5IeM_jHDuSYn&!NuP? zLRG;bIRMT1yaMyXLKlV;<0nw&Dg>6m1p@u0|Az)_A-M|?SD#BLjE#Eg%ip8^E$)Kp zy(uZjgLjnGq;WF^AKrA6X)`~()7aePv0km6VXE56GPU^y1$Nu~f;;yhH9=)B9h=hM zL$>wZkJvDixbZxoHo=x+^cKEzxcpQ2JB@2+1_$3~bAmCZzUOmII^ERTB-InJl}aT%*u8&jN{zmv z+1zdSo#6fnaJ-HO>d53ae2SM1at+$tlpC{;>ngjeyUjtAHT?3h7Pj|I!PJXXMIf=! zH_!M`^75~=w>vP|-VQx>we?Z#%cn&8ZoZmkl!;;|ZkByDTra`2Z}9{e|J177R&E_J zLc_C;o10WRVR=o!+*!G3n-6!+@&)=)$XVfj9o0+sQOJO!aJO~smG zWuFwl4T}PwDpP#4sk7J+b^K#J1$PZ!{BG5FELk9PX!3izZeY4-dDgS<_)9iJl!B+l zrEjlAm7MzkgY8**0FK^EPJ_4s;THP9^y`r`?>TG;VZi0=sdF@ure%4al zwuGI|6|WOf>eMF5TOy{rQL8<1p>+bvQXGef0k)eAMxzqU*7OWWB_OMQ0L~Yfif$9e zd|M-{Im}X!3}coqMzSKsK!E*VdRhqk$@U_13Dcy&Xr?Zxdw*T~6i`T>Z-Nv3xJkHC ze@u>|g1l)!w4&7(nO=wmYFlyleI7Xg7`R4h0IfNRLx9@~HWXz*m4NDGaE1zls(UP( zA@(8zWb0yW{=zi(ls4tNWWD%apFXsUR|DY7LAT`rjoEvo*8IDKgfTQu7ak15xeP+R zd-QU4{?W~A9h z9SLzLt_BWJTSnGOwBM}{ADvsKZs${_lxOz_mq-Qo22C%GrM-GsYr1)E5kT}^#jb%{ z3e==c<}S~gi{Fb^>)8oFzBE5mFk<#QQ#?HjyQEDiv-X`06d}Dmj)@p5 zer7u5%2btMiMPE%p8;a}8@KT?NcDPUJRz2$n=E11!fQ3%n>Btz?Dj#JedBPt z5PavxD(t>RHv~*XMj5rh{A8V}ztNq`)QLDfd^!0ux=eg>C@|W(Jbi}ul6*`KgJ_{y z;@Lc(XM&>4743@cu}XPXLe?)L*RM5vIQ5{mdIDu939{cz6$p&Im2}a9O{P!S^wJI` zL$efD?{xiLy4~UfR4kCi@A_uUQ_?UxsmUyo5iMvrbF9Ucn>C0GHO%bZ`a%!@03Epm zlwgymQbeGj^@ZHkYldVLr_+raPuDWRcIdXo0*@BC1zg`qf7=jI7qon1Rus*BYji{l z6@~a<4CDJKR3$wWS^W5&nxtI3O5T~}4FdJR2grPdb&7q)ZGH&h`agrH;^F+#I(mcq z4+tZag33>_hdpCc}RqUUc`Ivu=qY_%p&d2-{Uh+@pmgN9Mj;l~@+hKmX?v-A;)L9VIgqkR0 zt&V$8#_`l)$!rW=8lLPm;_wT;W_2KpHh2Pf8p&kae?s;*Qcb{ym9L4MS5CGJ%tJDU z-?TKi{8C_B0k?7dhuX$C+JF0!RRl6PKoI8huzYxrGMW9C{HqA=OjRpn^lwSchz{@@ zA4h}hzrJDuTV}7(sG=*zZUy1H-cciR5lM;_3hJd%0{!>2M(JNr9n&9kpxXS>egDoX zU)D`?0`T_l=~e@|?RQpjR@GhE^A{k2T8`h=3G7zP7fO5s>VD$oobAKnF7=b&XlNAa z1RNO*jZLrw8jm!2OoWY&>e!Ten$u+^(0DjGiQk;6nDI*kb<;%Vai-hw{Fgd-t&rhN zRuJI-Sb2yJScPH%#P>U^Rr1TVf4K3wt{qBj%+{7UZzfj>q?>-`wj1`0osw%j zoBiBeci7g?{Mg|)TkZM;T#y(pF+|yCt;}3(ZZ?_j-`$>OBo4O~POtZYcs<|0cT$dT zr)<)(T^}7jzgBG9MY}ScI-%sV52pA1iv?rC0jAvRKxm0`Pn;(a1Gh*_9{=z-Za(E$ zdC-IRu+~0t_MMMh9s1o;HSs8T=WV?9-fx$r>axuX?7ijna(~wU(t3TYZNq#0tU=OE zdePRVa$|Jpwi&W{MsW0u@w_Ag2;4)jNmv@{@>dGI8b!Wz$;37Iy)^0WOb0?iNF5}o%&i$3{V!NK2&C!@H0)|!xl!K5b{Mg7_} zvtuDUM;TiCd^X;kExX{i&+T;_A&8RcH;G&`x9Vu<6SdzAN}T_VaF>t+o#8#WYpvR$Wgn-~*%AJmSm#^KA*-G7{3G$dW9&cq|M%SpIDG#*X8!^Hul(m2YC=eeG!*RL zIO%`#;h|t&z`{Yl{1ZS8{pw%T|I0fStl-~roV+pbbVBxW_J1PtP_sS0##DI+2QW5+ z(*B|0b{EG$9rE(G7X3o=_br%$QuXA94l}x5@6NwDj&n=0HWs|r6d|*}{^p>jtLEo7 zANSptq5q9t1E40>&s`i_TBedAzE=M)6}JonCda51h=oqzZ+I2}d&`N{69U;eBK6#SC|m}zNGdw{eX3y4OZxBnms$`p9ub1P1z z9{u{~_kUgfO9K!MJo|pBSCG$VpdsJNz`THmdjSUx1N8zH1{w~4f`5gD!>)>sf%*Ck z8#b#7DWifK851)LhmbNkg`lF6{#!~3<9EVh28Jd^U)AkmEC2b_1_~MG3AkjA)5ys$ z-i`|?Tpi$UZne4aGEn2VKw##Ep#2P7kw1mT6Kh$;7I zQFMBquEU0c!X-UxMjM9qdm!OP*qszVq%<*BlLM|YkZMrTL*8<|OA-1`#+#GL$!7Mv;#zrZbP_D*3p>e}0ed>Gv9~ca6nsQtdQk6?|OrEnj1*0`WFS|sw;*YPd zu3l@WB&%D63FteY8}e!Ey|;;%oQpUrS&qL_K+~!GjC3`xKH_2qFY<8#YEjiuG1k2H@`ISSk`JkD@gQK&N zQH+h<{k03JH0_X-9pd*Krk|fb$0n|2V$vy%Bsq@A7`dFKDeZ;+%p2LMM631(-Sby)D`_W6ovEzT*>IFk^B&TsdjmOQ+I2={A0+Rw)nhpc38(? zyX&j_x<>IxLs3)v>yetw&92Jkbqx#4Uh@^TjhnF2)+(J)Gj&zfsTj-s)>5#sQ}xpL z>nJX67YkAg>n#g5+a!rjwO}Dk#0ffS(O6nk#@wdEzy?L=S9WmSnd@d$4H=7HgLjO5 zrG@8oR8&JE5RF?A4IEb7IupD$gVdAq-ucC8zJJ9@x0~EzD3RESE}CXRHce(GnMpMv zdxG`tS3p06y)X_5)JLM1+8-sAv%R$khv29?)e=8yY=tV=1rmWySR_AoOT&I!VkckZ zPjnNp1wxb?a!Q=f2mI{&Owz8W-ndvx9aR@5S&)7&PPC=o@Ig1-IBO}a?T53Wjjd*2 z$o=wa@N1#vh3RhQ2GSWd!!5m8XAECj_t2Y-ovJk<8(J5f#q?BPFWQ0pT~B>t09dX|;zAR(*+;nMam|kSU#B$J+wziiQtSOTS0mSRSZy5noUet zlij|ys3I$(y;cw+ViNHpN{kvNKROX49Jwy>LM2qOnmujEDCM?q=2$6v$P8KUjk4nC zWV~V|G$LWDBMT;U7Im0493ols}i1^ zM@<_sbys)$!8H2};1vbw#R4`sOc>qaJQO^2&ar48s;UbpDYNTLd;ZT0>u~ zus^>X9X5~7xvzmeuaiV_20~(<2EjgO_^s(mT z!;nCIihXf2j~SRcmpi9|rf3ZP#%NOBntY6|=HwKzgHs@|U2|*5{d+Lu;OM9e@2V|W zKziu%tS{e}t&Tdzj(z(+oBh3XSzXT?+pIV-N17=fUy5-5pqYda*L3?wg>>u|`xWZ1 z=RzZncjMp-+)ezbxFM|NAG6-Z&PJrf>JjGL(uAM;LB}W!U<8KnRHxGFVxt_W93-^> zROA-YUeu|dePXY(_|K;}4y~qg@v9p*Gb-ya_C~Zqag@~PJ@Sv35As(Vd8tl|!zHon za1=;#`8qUljxI%V3`gHo`Ph3bnO%Dkhh@s$P?g45`e*LE8mm1+ z-G-t`A>;V-Ie}aXEXH_C6Sbs-EcB~aPXOfe%!v()oEiL#5=z-K;&fpGlQlF-vX5(J zWkZrmG+f_ES|+~NWO;ZLJ}zwcMcjjjIkk`iP3f|)-L*%J=~EYE1ZpZY+>8j#1>P*W zwucULUEH+hj#`3vHaucg0GS{sH^wQl@cQr4nGL*5zfGx-ATHY*;1N&=+;@+Ru`}e_iZu@LZ&fn{{e%4%^4HOY-)?3J5 zx8SYAsQ-T3L`CSe(Q?&#<25BJ>RihB*6ec@k-_#?J8Qj}7%F8rWt*1?$Jls9yBxla z*H+_B+3u-{Wfo#8-%zxU=MLJ+ zney?Ze(X-EvJpxJ@}07ACbS0gIDhW4gIJbopII&ghC@U=l{n)pcg`%unlcU8e6#w< z>IWkx295UgzE%L!aOEn^U1Ao|&1n2_NeH?G26_-NE>sR~VB1lyb)cOeX)S`1Q_kW( zj2@ASB8rt-ub7ggZGvJhm+7wV^nK-VFXv*bbfBgolz8RYPtF3l^85pla}I$hq9xsnhpWl-4>;SEgC;sA+*gsXV{N#9;f?oOUU#HBFZa+VuAQ!c6kRHbL8d z2FI#*s@F^^waQ681&a~iqwBLL`$N$VhA`qOd~)UIlN`O1t-KD;$5#6Kbkj$BbB+`6 z8lt2R(8T@1%+0x`dPN8w#fZ=)2LVFh?!UL&k1KBG>Y7b%6W>jMJ436yV=LX6{4d1r@#BE2R+#j~HoJTk^ z&rcy?;!-cGVw)<4JxP3(_8>L_Vt8mj&ubnM3L&h#tjZEO1caDRv+TA6XReDVzIh;^Kn?2&|k;Mi!uMW z8@AIuMx8Qid<~yxaDDH>fNH?Js0y93A3-5B8RTweQ3aZnOr%e1qxyuRDjBwu;`zhx zn<%b%eoR>U`)|eFGqZZZvfT7?n%j$ZyP52D`R!Bqt|G;n`SYB)$>NF$yf6A?Un|P8 zD$BA?x@dAOBdcI3GKs!NvCInfzv!DuA^W7;@@AwqX29xusCJ%$tj{6R^_J?VsjJ^i zSx!b02M-v`L(CtB=vtVFRj&-WdF*>EO^FE$2h%J@teoWGqbZG5(P8-+>|9gTV+6jw zQ<)Se5Q{aOlyGl;`IWkcfE(PF;9?GP=}7h=0*#MVM^vd?ZqF}5I{3n#%oDKU0{U6p zTcW1PP;xX>Z`Qul;-eS5n%q0~OcZu@IE3e&MgH!aaXG$4afFIYe5|U@6ELxIj<((l zZ{@br=Y9a17GvEI?vFy=0-Jo*9UW|89xXK$!!u+6nVOpD#4E}_KBO8yoKc$YKLKuf z9b_fgPrw|k<`cmE8SHWC-MW>ZJ$q%y++QHDSb#N>sPd5biKcom<9rNOHrd;)g|S(x zrdS5-J+1f{Df$8IWSSkCrTHn$VaV9ghqp>c+4!y|KSNa9Z8m{LLQZoa*=g?9{Ua+Z zqcWobA0x7ENvpoBg4XBX#ZeTx<`&@`A%%G!Cj07+NGfZynV4Qn&CQ+8aY)>qt))te ztad|0n(D;w36g1b_)I579k2}b#FtG{sD%*eIhGHt@&#Xoy!mcytfPlTTUbOE1;do( zrkv!Ynr))Gzdl_vY=8r!U^6fIjrN_LglP3#gQmKMNt{A=WOhEd4W)Kl6Zcz$nuv79 zrVzWEagxvB5O@pA;BC_ciktG)>?&DC%@{Af%Grl?*DNAfMH$WO?^(qTP__QCqRbyM zVx%eUqgL;$RkQ5Yynj}YR!payT}dQz=5$Ps3Dgj?Yi?heCh-Vl@$e60ihFf{;jyq1 zH?FRWvR4f*_VId`)b*VkownD>McY@Q-v|cNP`_1-@DmF2V}|FZx!AuA6Dl5~s91@% zDa`CSl@w*(kK;^;>gPl_hd$uNl9he;EeUG67WStiqYmF&>Mi(#l?w160*)O#J0#%r4Ig4CCXj*gC}Q(}#?58|??BMLz-wlb&wunr&Y!t5=x zzstHs3(DYbtGR9S#;q%-Av_MsEtm|J*7Vd~P0oja$5pd5y^B~t+#~{00!L1k89PJ# z%V}e>#0Ic>w;<2`5?QdtLm6#743%3}7O&CN-IDe_f-s{i*zxOzuk*xmw!kk-!DP8_ zLooh^D4fTZr@?uO8CsPnt~=i-ezr}Q+ut%z7qwd0$xJaXOwATfYN$l3CGCGis zRQMCY{Z67CQ0m7XC?a>f^P<*inIw|Hx`oj7a? zIc5X3a`e@Bo@twZbpQ4RwufDFGP8Okxp1~-sHVe9mVVwN&;~)xt-o+nlxWrl36@I{ z_F&MaDz|-N(Xt*;T5`}XlRuwgY(A`7ixEjJBSR6O`uzSbM=MjSL-WyOwKv!TI+IaM zlFq4Ssze)8DQ*L5GcYQVEd^}DR8YcQ42h=Edjj4RAb>psb+WA~b9o_4yIvJVI&TEl z-AtC;)0*32o?P;L4Dzv_!Df~UhC%No<4oq=EX{|M!7#J2_h(N0$^^gz=$EmsPUhHj znpyFn<=F$>@1V2-gXCcO`Q)_x?D3Mekg)i+d3rr-$L(xmw@(%BB$KYnYz011S7|K6 zwvl+gmAHhpqwMd=`B~avlP*Hn{@?sz~br=_dB$;hw! z&Spo9GE=IK2g|-Ni0wWGx=ZB?OPks!FH*SYj)vN~pQRsu2G!QjXU`{FGiS~eYGVcq zCHlqiQP;s2yk|j=Ft6S4^-?<9n3RP+;22)KX+R51qY&>_WmERbe2JYKwlhXomD@ca ztR_00yOJsDWgSibJB9h7zVHvWR~x0j;HLE?JIcC!l~^Mgr-&RR9v|bUX%7FvQBTP zqQBnFc7b*ITpV`xpfbBCKc6LV8SBMCQ4c%X!6-Y#Z63Bd70xZSDXSeAB*tB4H@+1T zP$gDfs(9p1Llss?U0+~tL8n!}+#6?W?V%EO4@RsL--&j9)fyXAU50z4dO1#^BG-3Y zW$vSGrTRhgn9QLBN+KqXHlaC21Ej^3v(ftffloqo`ph>_i6cF07CG_iE+!(ni{?(v zA>3#UyK6_fGLcc*S`dlpzDmt2|3PQNLix)Hlyx)qf$T?#<1#*OSH4bkRNs88n@J1R zGdLMjOSHsrWqQ*FDDy1^wDhmsG$BHtAchLc6mYb-oy&oX3#y+|3MIQtHBR<^s3U1= znqks_Pet8(nZ$l>D-zvHZCq81kQ;~PFGuJtttl|0OIP?lWtd}Q4Evi!T?G$y&#soj zlx_C8+};xqkVc~bYpEgsY9$9R<#sSz*(x|0dEh_+C%wQ zC*U}Uw9hpjfpR}BsHmlDS6r9)+*tl_dOq@GR$w>sc=rCZElq#m0}D3`8E;7soraLO ziWp>SrEHRDJfG!yJ}fZCYXc{+8xbi$)3Q3|R(S;S*;$eC(euEnGI*zx=v_E!fHN<# z7;Zp#5vwe+cl_b;JniMnT+ZDf>f9Ua!HfzThjW^~?dkA(X(@noH zHrGoWzEbpetL+L(M+aGr`X-a0Xk|h%{2RH+AMz0h`I^xnOhpfh(mEtG@bLu)%8`v0 zUoA6BZ8_V*s^AkqXi?Yx<&ksN!VBi!u6*VW{$^9B>;mnyuSIK*WHZva;g>)^{*ldB zpQ!&O{Rw;Z0?UYuQr{X4G$%WGpT^rC8Xe)<97lClsw$Lt8GWba(|1Vz5HxSUH3&n! zuZ>K7E$Vv=$aeo13!p5pa_I zECjt*{0CO2xfSUd4s1c0T>kECDBp^X*u^X2yC``Ap8#!}NFvXrb9{Qv4P=6Q?A$sw zwn8(D?UR=`&xLl8gr4&ON68tMeS!-|jUz?|Y>ft3YKfD!LiX>5JV^M6BZZst>J_9p z>$Zi?hUm@)cqi29R=vLCs2mUpMx;IgEdt0h?dmX=hGXoSyRR{&lVdsd2K)cdTzXZe z#WW|?Apcn4G*IJ=6P>Y1RRWnTqps*`E-k)eW3QTD%vjL3flG7lbNr7eFQ5@a$bV*D z7;lOtG6wpqu^_RM-mww6MI2!Z`=f}WZ~bqiy=7D!P1~r8J0Z9yxDL*sfdmh3!6oPf zceexx?(Xg`gG+)AIuP6woZucT@SNs(zi;oe*E#FxpI&Ra?~KFHP=Y^x` z?_42#*dRcW9yB|*%h#mxg^Z2Pk;ZW@|!*HF1 zGE|TjATh2>D5-n+&qMQ#5CX-6C`<&g9^sy=AMb;;3Rp}rh*7g(3u4p|aUsI>@6m6X ze!iy6xpMca2cvarqlDn^vm@J<*Q4{e$bLt67U3^e}krr5)2s@>kr6O zhh)yuq*s#gv>2Wiwj`$h)UC{;vUi`F+wA>b*?GXN5tJ`wU@H;az=k?1T`esF(}M@0 zMm7|^w*2wLZpo^=6IyW&Tkxkg24UY#20n|%s%RiPdWjFs1f}Kp`{I0F%DY$<-{-{~FbosBK{+%K2m%nGe?xgCf z?*imCHE~hS&HfzFZ$*)|s=cPt?e6$;yE%9~i#|lc<&ZBi!^OLw<`%+? zR3<}~1nqk%TRvivN!S#(m}iC{d6BOcSu?39{nE6=7Pazcgb!yW`juMOCz`y#jAYZN zc^xUhWzeS>!!d{hEfoo-Up(2CcaX!Wjf-aUDl&5)mw3Y{g^xT&b!KN-Iv*v)^d@D_ zLb6-9nTQg9qpoWFh@VzBid(i<0@*2ZbbosTwKjDr(kdBq-E52Uy^E8>O%{vpa=*2> zgX}8b44;k;ov4G5g^Qo5<{&$1t!b)tjEZ!06@l9~G~k34p@sq|3r^8o z*EPaOi?4E~N|~pD8`IzTlj%CfxO|`p76$3p7S$hwUj_PXZ7^vxnA9x(`oAvey#v$t zS=!*0Q2zcLwpR+7JZ8`l4D1d*ui5}7U7@U-t`lH{Qn1myrucPS{zrcm?r=Oyt!R73-GoxX<&P$=f%t*7Wuuuew>bQ?kHysXBH#C$=pIX>^@Sg?__h4lS2R-I+SW z`+r(*w7ry1v04sV8Nf@0#!m|lEv~%BnFhn*Py1n$lu^FFrndC&ig80eqr8K8O}+MX zzxj9Iwa5OtI{e&Hv}EfF?rha)-?*t$5oSb#|6kO@_kYx5b-283DmSzJfRYo-=(TY$ zg=blbzBEN|Lhu9xE;DmcyA&j59slkVYHePvRFn}l$?`kGDKTk#&mt8JD^s?`nsI&~ zkyS_m(NyFlbK}00%xLuNwW`|Uddb}NpC|rP)v+c^{#_9VY?g%iiO;To2R3=~#K4|M7a+ zKaDs2NpkH;>gD>}eW=_}6ql7gmDvA+ra{1#a<`X9x1R44j?w*Pb@R}`iI?*Cu7#W0 zC+~UvExA`MJi%I_)WTARf-*gtr5Ya=ZyOwp@CLjZ74&nsZmDIgY}U~tkUq-fimRaD}NyJuao05`lu;?4KreW0T>R^}pUBo%^0EsJM~>D0-#D=7 za&u&+=sf~mE7O^WZBXWkW) zxy)<>Rp}81ffdS9^~jRav8xbru2ZY+>wG~Hg3UR7`9N{1A$D^Mjvfn>uXXIKHAMDf z?Sz(C7Y)@BOYe_JoYDDv9d=(!LO?RRY)vx=e!@M*;O{H=N6qb-YbzCoXAcXde-nct zZGMT=sb%~tA43m6vIo|`&c$=6zJ%f#C%Y(gXD*Dfx3J%UR~+P=Rx0qE-)O<*D2=Z| zgk~Nk6Hl^#C}Z&`dsdfX{OBdAB5BY(WF6$gfnW^|8>P_?+Yi86GuigaV&X&;s&y#> z8P`|?$%xD{ez)fi+L!PQG-!z%I(SbLo2ygQWFrryi`wiqq&b8&zl$>)p7PWRf&z|5Q~p2eUq(zC;yxcR082V22H7ZIOr3~iw!uU}nvrgS!7Eh>@-n-IDN{N8K20sGrj5xv9K`Cm|6{T{$fN66D$E|3c_Vb=k*;+T z<#CWl-@a6s@p+RZAV=%?mbL{>c=IRA9h#wilypla4kws=IIk&Hiid5n!e7 z&fa4rR!^%^rAwsbKqZ*TAvQM@mwz#)nQ33w?_QVEvxql|-hAAcx)m9o6(S+DA(k{# zHiUT;nru(C15ErW{MSL@i<`Rg$X2JH@HURwq;#OZGdGjWrfz`w>*TShu=h z|4HG#hl{CQBJ&hoTD;Itw?U7n$6niAT_*)jlIZBtx_Tu$u^J{X*yEdO!}uw+Z-8oJ z_hp+ojRVn=n1CN`^R$6FT=L7sYunBg#&#;+c5&{;&6J1*9wY3vx|6x}SDhgwa!Pp^ z_HL=Ou4?(9&=r3<^Em`9V=b9iZ>VwaE6zdMPwQug!+2qDI9}W*@|eq5PP4k}T@Nw@ zgb`KOm}Ur%;x4WK_MB6WdL_!4fHg5$%Szj3GEIHvaFZFnq<^+Aa7XF{m#*=kcv&rz zPH&PQ=%s3LwNxLusBAw7VnZ4B%j++uTz@ZFVSXdU0S}^D031X2R;_v`R0T2CD!)W$t@QUOg8on8sBc>h zuK5(F6kCyX*izpr?U3KQg>tZW;nBu8{P)1?Zu+sB`|1WMhz$}iaAnXKc{ zPFris8>$`E4>5MT;7)#MLgM{@4MPe6IR4H2Q;yztswltQ&sCGZmzS_j06+tZ#OYMDFb^b zEzMlF*)Y9<5I;3>ggd1zOXy7QR!r)oif^odftc`#(30KNUfciu?WltXx-%xB%~%N{ z(~s@7E3Wi~Te3bjyXAgu%&zW4x--Y(Oj9&7i{yxCol`A?{QM<$X?Oe`yt5vr@l%<9 zzYaN8sa;)VwS9>*v_sD5Rm>|d7;KDth%`>2#JII1voH1-m&mdLU}xTkXSW+=}cw!fTf`<6_F$clI|TJZ|hIy z$WFJTJK|unaxXH=jF35%_zwd6zHQ<0wuxaBdCfTGXSvLvb>vK*kLev@WtK)Ep(D;k zvBvuBuGc8KYy+VNbfV&XB}1s34uw$^zNV^H-kU1C1i03P6AW)ENcarW1y17l5r@&6 zExwN)kmRIzP|$-*dlCJJ+wX%w1|vnSNeH>*#dO)WjxB{tM2&GE*Iq_vr85057wQ_v z&JX=D9+RrRvEUUku@j-We_LMb-wx4!?hx?59rAy?y#n+Tm%(^d*MVi_n?Yof|ExPK zP*rO=&)qJqnH&zA$irBnngG<$M`k{kn zJ@9M^&z0QTNeaJV*nDDAT-p%3<)EuL&kVVxl_goriB6%I4kIpZokIbXN2d5k4<7`b%u5SB$7lT3Pfu?V3nc+>amM96tDBa&Bl!7KTcu%8z(TMH>xvJorQCR8Ywfs@O0!Ilwtzaj-*t>Y=!3 z4Y{sP5*F1WXtUJPsZ$_uPcXi$bMuY7dc^QAnwO14_4+cj&Z!ogoip}JJ&pPRv| z2}X8P#unzS2Eu`m$$MI;jBrTWD*ugukm4zu$H5Q-FR&lIUBG!9_)B-w^i=fZHBNK0 z-8Nrqg>3q{_a)y_VZh0jL-R8M4J9PlGw;41gpQ~FgYbY`@@QSk`wq`u$&zzcRH7pO z3!#q4Rsb@sU;@pWL5f?}|zvjXhUdxLs3aXZ$UkSBjUI(UjV(khyKyWfQsl^IrNjFg8@Sbp%Y{BfsPT)IF2GSqDFnbW08#}1+kNsSm<98)=FDj9XQd63sDOHB->d&B5IooiUZ(YBE=a)lYo zrrQmv)}gpjd}!W5MqOoX6fta*v14r6Ol8>E79iV)7tXfFaeVx%0DsezltqY;>M4J< z`oS9QhR=6I8K=gpDbFva&9=0quExTX16`_xiHvOU5D?3==#9y4H^VisFiVh4R7kjZyEcY7Y?fV zO1EX^?wJjI;H^MS`KX|j)ej9ma9u^GvbM15ADgMkC9mP- z>8)uBl#VQ(V^}DY92yPK2f=x-P^<7r;q}Cbg1@pGH+x>l0M1vwiM)hFUP|GJ-z?}F zoY0sX_@B!Te-9iunlgr1h6P-#JZnPB?o1~;oBQHC^jfuqsbO%UT_#@uok;AaCu9`& z=O-+Upo-r3uh*zAVP-#XuF}V@{U>T&tbO_AR)3n*Wipct@#J5tKI}W&UV=MgKJ7NN zfS<6-mI|IW9|)csV8*ww8M_AqxwDEf?Y5=6+KtGeGx9BncoWQj-ME2?XY1#P)hS+RcT3dt2SS<6j_(716j-PLIh@esBhJSbgWVhrfoj|G=T( zMdxWoR#E80CCey=^q1#+(LhT#7C9Zhxq_?QNWLp>5`%8r(5a$d(7?j)v^;Xt%0^PHBrNe~n6+sPUEuV^3+@Qik@LEX1rz zInYEoecQ40Q7f0jrVaXuLC2GQMnC5b9JjPed&>*lS0qB7VXQxJUJhSiCmVs` z`L!NIJDc+IbQfp;A-LT+a&{3DE*O~jv0By4DQIS>z*dluvQzCy@nXlNH#*zoW-Oq5 zjD(V&a#=o3{3BI&Os!m5BY#F_&aWm1X35nE=)dBs`mw)J&9a1gPt&NiQb7>LmS)0N zZn-=L6svWrK+HYA-~jZ5RLXF~H#({|$JA@lGY~}e+ybo!)%DZc&Z)^kTkyRk>Wg2M zJTSlnX=&ff-5&WGQrB&dJf;1@Z43QdwmU@E)kSzi4fldeMEA6eS}&E~a)uA;fEY5a z2AJy)c_EhR72$nK@3IQ&luToU^v$wlyvq8sQS3T6_9VaCN=GS9VjM3(T0Ww%7~=!)Wa1@j;#{0;zOU>XnH;yB_cPj9<<6 zH!_B-7ctfu=6VE8IeqiNVLXT@somgE5OFC>ljavcV?p-D?lwf^nunWl3zln_dvyD>ED6sYVc?Lj?Tw&L0-f4vBeQ?rE)4p6-L)m z5>zw>*wj>%=F-XAY!o*UFYIR=|*&LxBGu1hw5xc^>BtX`W6n(mx2P zQ}Hx?SiDMGM!nNBo9yd0N0oJ0!CCFF&ftZbJipMPZFDUb1TS zcsPXYC?#Y^z%Y)kl^}al*Oo3vHjxzBpWqBPoW;%wsIgMETJ5>t8d)>5-+*(vpMFM-R{n zRIL&z&XB?rZ*cgtvShH!J$lNuI}&LgFJ<^MC^hU!m-83zK(d#;!?JVnJI$A-d2^DcTU;|T#n zF%EJ5`^U_a%c68DbMoJvoD>{xUt211Myj4=m1D%eTXz0k-$f@YZI~~0%T{LBLVgB5 zlip$MrRo<6Ek+B(Bps5JpU^B8U}M20rLeqT8b((cn5Hn+YH&pkQ-Z?IyE}GAokd=^ zZd=B{X&^ejd}g^Ux*oxlAs4(#%^^d5fr+|$izCz;l8E=fn|u&Q_hzZE>xajh{mq;K?Gt(uK$$7A)DL5RY&SA1 z?y3o^M^h^1mbp|9{f4YJT3POz3E6;s&cr(O=fSgmP8+b=&HuZmH3Q&I-qaW3`PL;#^;0@y1#FauD!r*sppUPUY zf`HErcz@7X6K&EW$K0qGO*96;akwz0cNQ~^*v_HOm=*5Ze5dsoc-W?8?iP6#yC7#j zB!<~HtYp~5_>0kTeCucGuD;AS{SZzqZn0romASjmnY?_gGOOh%!~W-O zPv8VW(8r)R?Rn^4M)N+t)&iWGaIF;S>X}1jiG)`KnO1CfxAbEn=V@W9{~#oM|J$as zE$Ee1*R!_5DUR2qz^!Sx39aOzg5 z-b|MKhx%DXF+Eo#b02F)<9vM;oCq&mm+Cr2Tp8#*DK}&57@gJuYU%dy!j~DgQD57l zcHUrSnrRgwJ##mobGGIgPPnpZZ{`Vr@JwCwt%NJ_??t}slf=Y1Bx}!cL$(iOhKNBx zV^XL@Cl|k*P-m9^2Z8?cU4w-m<#zb(+rEp_HZg#MbJn(O&dU7p&Td?N-*n&`cuQp^ z{U#J=(_J&G24|3CV&oJDDJ4D0eZDjsxqUmVd=?h66FZY7mXMUK5$V@gg*N-%5{Hz1 zl=n0)@a_5&&spc4Wx-l|zymRt+MqJRwoq}|hGpE;L7$?#OqZBB%Zord-`=&8fUA>J zO4ShG-Pd8FS-;L69d3#r__GqfEI<6BHg#+ZyrEb*9c{6eV_;uC{7GpFC=FpPu2}HA zD?v!aCAMI`vWR$U0*Wb1UX{6A3&Uicf>ykp9MGqEL$6@SLX#_dA%PV_}{L+wx4&f z4})a8Xy7(K&FMDXRWARPQme+Sy$=Kt(veMIt{N$eb65bo4-|FjYy9RvF%94b7zpx0 zvV6+JiI>1b8R5d~)be1KT0Hk8o&H|}P8pmd*j$j3X^Q7RhpST>S-ez^=3^1|%i;CDak zVwwn0BEN{h08=j+JRJ;7(B9x>HM@7ujGogwN4RNAI>629>=g~GWP;`C^63+2 z>y^UfW138p#}zYd-}z_1_ET&FY7a9X1Xs%E8eJv4 zQwcxu(uC|ArlFBKCr$%R?r7wX=znRKvowvu-&FC_-82p_ud-!fb)`_|0vLJ3phLXZ zHJ+>yLTvu)-c4rSs=7x6e{{VBsF%qhk&9iMJxfe#wv)ofK}18@ABL6yITh1n0*5i+ z&|i#>337m~$#O9bskH=NmPcTH`5?vNI#mBi^eN-3!+gFsrR{q%kOwl8U4JEB|8>R; zkCJwqCKH*_(LzOi&jNjRQ*xNGL(N|Iin{>WRGB@7)1V%j(mP*p%8Ufw|CEGfH}cdJ zHbHkMTm%dk2pfO#*J{hmd=(_DU**gsf!ws%Akz~qb4(!Ft+V^K97OXJsbnTjWlk$) zCvb{JEIBNVu=DXw{A|k!J+_$Q63tZuwAt z>lt2hK7Dy;JoTV+C(jD21q7no>cDI!ssmDjPrg;|cj^H5lS3>%*+ZQ=a4#y%EFfO< zaxrK&%_cAv?yQa052g_ga#Zvo3)xlPr=e@=q5SqUk}+0oMa&F3t&~uM`dn?uj#h(H zC-HsemW8G6yqvO}Q@iev3b(;potAkNYC_C0;8DpHt5c*$4J9AlG0))M0P3b53nGyf zY|_m2YoGWCJZh$%z78e_DB5F9J@!R=_F1~ltLN6txF!Kw6EvRDxT%;P*lo`#7*-Ez zB+uAfT0kyvaLa+o%&F9OS|k6Me`mK9_Ko6Nmdx5kaN|A4-?a}7KaYKisH|o}Wwlq4 z#?lj(4CBbCNRdEl!45BC0XgG;tXYk z?Bv4V>6-h>v?0iBrL%ASpjeG0NT{%+QFj~(#?YsjrkMR~eMPZv#{=Tcz6P>S zud-lqC1Bch1Xz8em5Ofln~vCCd)8Hp(9}Q-su?_kcAPs6x9v<2R0t&gWR9ywGqHAlQmi{hG zcz5-=Y^zboyVKxt0`+|HWuLA-BwD?n;0y;*x;=R$VwyA=JSR~v*pS~?FY=EL!bBE1unoFzYI4MsrNU>I+1+}f|; zu@j5kc~6+WUTyaseU@5=X@*}WDk#Y1cFDzUHoY}_(5V!0fO{TrDttF9X)hI`+eL=K z9Fs<>tQh4#;ECi!B|{WEbOQp{)aB57RDOIfFb^Q-Z&bK;2!NcStKv_W2!3a}^S^x6 zQ{hb8`C>jL!*L!~WqkDyVd5tXSpIVm04f1R(KlJuTiGU8wJiRbgdg|rd0FU7rCsr1 zCzX%Y=hm0X&C%a+J8Y)yuVkM}vzHC#em|)ot`W^7v2Tg;jbfjNs(&K&mHM9TvQDRo zk%`osu8V)JJ#+vy#hr-VDYOA_#=v-W#J&xnxC~r?;$kZ}S!#Ss9uNG`u)XxmuJr3; z7v=3|z?fh1l`;;9i}Od`f%8n+n<)T-q`{bZqCK8ST#hBDqI}=(tUq+nxGo73vQi?j z!Jstfc-xef!MevAw@D_=P?^7b zrSqWZVU1=i>n|glY-g4G!w*}Lo(uYARGQy9=7ctLuIDR;o`qHZ9}4&7n74mT%+7#O zdU0em%gl1cb{Jly+2YDJk&p{9f`QF~R=IXol9@5G_?>am^{YvJi*=%fCtB@%{n(fY zFPzSuH!b*v$^DA)EL>Ee1|LqZAPm`1+M(ERlf{6dr9;JR0{zbetly7yWMN%sMi-|Y z)hfUBjD_X2ytN9x|IVZAmE0kEt+d&b7uoXmLmI{j!wnU2Vt$80^y7Qze3}qL^V6F0 zqfI9-H)-Q(+P<%GtfT8q3DGQUx=z7!_U#Kz8IFGtaNET0ej?nhH`IK)%CmUgvn60v z#-G<}(0wiofA;h;_MPzq-7wzH{|eBADtOa=tov|B4ucc!0-gpY9hL}I96t^xBEI*o z)G4+aVEq7s#NN{t3i03ui(gUV+%+~hxMFdszvY(Y$TgQZ$h(=|T-Y`6rI>`9+gbl5 zQHM&4`~P8j+)i&6E?j{b$L&OZH}?A1$x!$ymRqUv85oTj;I(}k^c5?QXDbQV;N|Hi77`tme5h`sb z8c^eE=xDjySr;BzntZ6z2ezz_)F#+YDi-5xp;yo;ZQfTOqV$8~wNK(+b@wnst*FiK z^{lEWCYYLQkS?@$;`-q1|2qRfEhr!bUk=)~be1?l$^!C<>Rn)*; z35(#fm70fOPLjLvo@&N`ZIT`WQG$yky=-2hX2noW^?{!?A_=!M9fT}`cXe4KPz#oq34G6ja=Q(4|n^&ZjHFM zKNxo_Ky0d%mU>dlng5e%7*149$3;|nVAflg-h`6Gfv%mV*MbB)irGq7lMba&uvV!a zg0kABqil}!U#tF5nejDenelgUPEsW@5G* zhC(}Y#^?bomyp%i4yiZR2aPk!3PB=oHn_+)H(GMT_mf@N1-cA8#mtZ?AZ>vinJ zVgTk&6mJnUakAwnzDwyzHLr+!1fj+&lyQ?X2Ib+sw@^mV+5EC=ZpxP|TE2?ZZP#hN zE!o5-B-x=%s=m12vpaHv&6t6H!U^Kr?$hL3vI$URfg{`(rY3*o)K2%0u>xNr6fjJH zS09B*Xuhr24kw<+lRLwz^2%soxi++drVtJbD&&kq|7v?_JFCbAu8WA~jHaW0>de8V zuyUUunIaaNFGLLFu+F?Px(@kz;O zHwQ=*&ub6x<3Mb6c3&S3eOk526BE5!`O{gIp2t~kxIOYBwh$?k(G?Vng)5SBzJtLo zNg_V^*f^-JZfuk?MC~om$l+;pG@-^7<@)=Q_-fv$8*DFE8@W{_!d_a*{tyEUZv*{9 zDX&Fhioq&tw4d`NVL~V*RcUu0jq?v^wF?TX4T%U-V4Z0|eY#rH78)6lGSxSIp!`} zOEv%dwD&yv3So`IovM>;mzBh}#9y-C_Zsp`#vFXVx>by}*5!)j$r+(`8Zp$E$sqtf zeVd?e&$l}|*gF2+IqhI zoq68T=d|(Y98Qwt@vrEPU0dGzcDjQHiPVIV_~QF9`x3Q^9c_pDYSW~Kt2MoV z2Ws*92%ea;)n+GK!rT$d!tW#`Q`(R+hGCFe3w2|wK`l}dMj}yB6O&t(ElzIVSzOm=waBA zBFuWo98N#U$TWxME*)d1ZxPDru^q9?^CZ7&f8&%^zZIenf@Oh`V+MVhE0n+L@Y*Jl zGpiXfOa~M%V=FyVh(ncHjU3ZJ7m3kk&I(4Yh-t~_J%es?h z%lg<;z$3V>6`YCI{sOV5!8gT3!?4*b1wo+W@cN~dbM=2ayeSwNy&WIIwlj_hHx2hH z3c~muN*#8fmD@DZ%a|J=67#OvD2HO_$ak=-`NwL8R8qIb1tgj@j768W|Wn(+N&+&DPN8V^of?DdG*q)>22z3kST@y8%bZ=>R}kJ4i|U zgq8v*YjZ+@U)Q26Pw0wJ>AQs7HM?D6-?zU#o|z^$M{}>t*R8@siRc7VT|x#PS;;^s3an!pr07cA6w|mAp|n_PE;9Ejlr) z_2q%r2)h05QtzhtI9%TCsr8+DA~^*NSG!%0ny4)!K@B!4*n9tsKDm*3w1W!m_1@@? zR_9TxNWM(w@5RSzf5z^;SI7jM_HG$&s@MhtO&30G+wG7`$Tmz#6$@363gFS$!(Z{( z0;(u0P`!_?RlyvEvSLFfKkg1se;~|mYCE+FLREOEC`#VN<7$t zVH8{NTFah+WS?CV9UDP-uR~Wl8EJBE zR>ooQYjC0m1$I$finZ3F1FP)2U7bivv zm&P)k9x2KuRsJli@JcvW*y@}#P68(+RJx8<`cnj5U0@T8NTs^-w$zmT>AOcni<{2% z0**_<%;Z@&-jQ-7^x#De?SFymMoi=_70M0zVvUz>XfpK93yF`wNhK%FfA3endy6x5f@2CNyx9CJ=2G zZwxy%$Q|yBFwxS~$1}imDXF9%g*CKU64q-=TKovG2COHs|3MytP@xWXWw*=ge_#m zkUI2KW_^*@nBYS{(IXcwY2a)>DvJr}Q1_BcTKg;esBNAl-*-k23>PjRb~~#Bv`rzi zAN8MJDi$sV@cn83P`>XXXKTpRf`l=f^C}=KbiYydlnA(4C%okh2EFa~w(1z{S zB0`k&Y7NjMMah=~dL-huoGXvEsV0tKKou0ILh~&Kn1BLeSi8;mS!!T>JMs@Um6@!S z9njHCw?2xT=C)JG-^Xbb$rVYyhJmT-AE5gjEw`R?0@-FdfiXtXJ8jr|deX(cMHPk| z5e-Uc>FN!Y6)d(k1X}zaY5(allI9*lKDh*m>p2?(yzsVU4>l1t_XupWRZc4!zEq~? z>TeF}>qn`JTyC8yF8-WvlQc7#E*I}OX1Bb$p>P;b7>+pN-%y>n#|ZwHN;UfoD4-00O0-3Ao=8dwG+QEbp; z{{*j5i=lDIU9AoeI4Tk2=G~%q46kaJMpn4|Vo1_6eEEZCxyB&fm(#P!Abi4^fjB9R zqBbvIB1UwUpmqt47;oP5spwx|VYLWSUF6ClEEYhJLJXkn974^w*T zWWCx(pyV{#aT$Z2k{xPh$1*cUX{(_^tB%tg#q8ME<|kMpEXbxZmi=8F}8Fl*5^?Af~c#LfOqn>W!!0a^*b_^Zlh12P)Wz~ zykn+Pu!l2b4>vx=6#83u9JQRt+!`J9roVe zN^WjJEzBQ|L*#cRx!;$!8$xx!IpZ0OVlKyzWHa9X?30IZy~@!bLPB65&oR6k!HplL zC??mGoWYICT{RAx^_6e0|3Z(|NDp~#FV(oW!SA2=zkRfkLoI;NzpI>Nt?7*M2hM@$ zP2y7D3$NFU5wJn0xb0-@q}-J?X2fpU|4di7JfvYy3{k-Zu}`>*MA?XlF!sLI$^7hI z)Z!8>ub zGwA_KOn6^T3w7x=erwC7Yu8+@mlsKF%uTvFfkQ0zQd31nG|lwG$4i3d@OwTMKZ-&# zGK6A$hW;Wt3cIB;2Eg4z$oo|JtZp^!@n3^m zrq=L*$8*%4>dC>M17HzXVuIgkipu|_kw@Be(RVzobA#SOjD3P<{KJnML7IFX5^Xq7aM@b*G%FY4!g_^Wx$El-Siz*!a7dR=Vplv_^T1yD*2|xS%6@MrcgvI7NP--jVpg9dJRlldq_JK(N zqJGb!t*~5nz%EkHgn!HE5AU~Gr4zBFR=`Um*{O0$JU1dL7YF@2a{fWUeqJwTsMuV4lh=8>W0>>-R6&gMwUz z=G@$_$RZ!YL@7F9`IbZfy-M<)^eGt*)2{RUX=o#EC}cD2q?tZsk61=WmO13ZR=Y;1 zQcw#rq#^_iDpkML%;x=-N28gEdev5p59tFlGucYU-Xt1Xx=6hXvucN!`cE{nRx73o zcx!$Pe8S^uJsLZnnAFlxG-X@9sLbf52rGx^TZT7VnJayysI4oI3aTrAb`oMiQ~3!t z0v7t`W%l&4ul@xh+`9Ut+(e6=6jV&}fc;sMnm^BNC4F+O5pmp6r|Va-xs|8=lmHesy8ZG4#*nLeE)>?Enm$Nr?V!sF5Hsx|hvWem18Ixj8t5qW&< zzY{ca-oN;Iwm~&dF@HLd&`HXkk8Q4#^>2d1zpWehE2jzq)^8aD_ve54X*P1LdmHfU zWTbEGbqn;>xf;dT0G>qrWeIb~fIh{kY)#vLZp9wktvti+F?syYn!N*QdJJkU)jtS5 zPv4x(n-zi4GvGwI?H%m$e*{;V@Od5QhhA-QwUFC+84HwWcf}wkfFYZ*9Dfj z7!eBBgpwa5qVwQZVXEvT41c8KbEtEuoJ?~c^%m(2OP1UBk+D>Vy6MuoX%oC|O3u-Oxdz zyAcxeqj3lalv3(VZ zSa=#MTzM%%dxc`gLd2^{b|y{E+V;`x3RIu@OVv?=W~4WoCEfZl|HsHAnBRq{cCeWs zL*g)L5Ygc{*^`=E4G8rg3;Dw(HAcvZ@WoY+CH}+r=bwEB{$kyz=FDi^g-iYkW>fL% zYv<}>-o%As?Vo#+g3DT6TC0+~`YP9+i)Ppy%AHKc+;6gur3K0YHdlB&Ag$!mp=J$o z`NI#g!db817c!!MxaooGWKO-x)dpo8las=g3#8R?Jo5B$qO&jYhx8QeqV#B!a~s~6 zXWV|e80LNWi@z`b+r7Leng=;dv4%2E8-DuADYJDG(2+qDO`fYWW9WvGi}!Xd z%PqsN8jiu>vj2^;w+_mq3Eo9<3ju;dg1fs02=4Cg?j9_-ySoGk?jGFTg7f0;?w-4R z-}gKBRNZ_2I91dZwKLs2Gw<%qGu_iqH!rcIx{JvhbVNjva-bR|$dTC(cEh|{xYE+` zPW(yjm1OrHcgYvQ{?oI-hD(%iFic#FQut9cRd*f3Us4pr%$J{LiKY z|4hrBWQNvG=e@G}?$l5h2&{jUA=LzuPlQiLmXzfCe|(x=QD>*J^=iGRyXgg^gWRZHxV*(Z+aZvg&LNdQ?2;zkIyyC~~mRL^H_K|5()^O6> zVTGd9>f=~u-v$=#QZk|S0|M-dYJa~|Ov$<2FQ@jJ)uq~JQhyDaa2XwWHk8)vo z{c%aO29Gm*<(U@S5FpSIBDcpdg-O?L?t})4M4uX6?e0&xBx$ZbCEa>;wTh!|Q z*M&bcIA`WzSnJ}`y>3y}R$3kZ?2W;bZW*V~IsTyu5)Q}|;d-4YtYI?AbKaPgzjlAu zZ(gU0hO+9_FU1(OQg1jmJ<1tIQMX)&uqP$IHz~U+XJ|MoJnWa9yL%n! zu0La1XM5W>;DtM-iEQi=k~C%9R=2s^Pc4ZhoG||jhO~kQKS9)xx&Q0x=M~6KFN7XH za37b~juSecJOq+ca3wiwfM&pXFDvjSufCFbxhjHmzxy7y-|jcB|A2k~SH2gA{S5_n zH3oM2j4~wL3u>b_X3_vnA=6&wo2k(*wNP?M6vjP_zE53#nl^zTWT$q%z9|xpYn!}-`GxVk)3pho>H`Mm(puW(|B+%B}bcUt^`&1kW(L+k)($(>&`!!37xWy>z+0y?1MX)g990n%&S% z3F@n-=r(*!-7N>(1SDBQr6#p6tLx?R>;5II2Q0>Zcb{ADZ$i;JjdPeZd#axw6 zY^h7uY@~2lIzW9`yZc?_cjP^@kOG%O>ERPP=ghb;^9gN#c>pfzCFs72Eox|72FLKW z874t}73B{Rs7g!TlRl(=*?hu5nJX@uP3N0n2=4b{vekriNWwwiHdl{KE0R>4G>HCD z%8!^eoMhF4_29OK9$LpV#TqyW{l`@BNHtt^zlmA2o0!;j?5-LWQr9_I$SeQV*q|XT^n1ms^@6s3vLc^(m%^9%sk7;RtAY{_ zCXNz@3k>I+p=~X%19ZSX$g7#s=UNga=lWM@V$Wa1^mVNfUBe%gWjxXone z`NU(T#_Crm&4q(M^Bz^*21Hd)jf0)c(2EMySEZ3)E-)=W0QlguX__N9UftW9wdTvH zEf5z}ZWHAbfWU6<^Sc8849iI4oI8o%!gRqHN4g@*3n zyB($@;i#`7MUpk4YSm1l>XRU3r9jUnFGk~hPTDkza5bCz>??~A8~X))7KMHwJ;9=w zT_1s6b4aViH9<)q%*iCr)Bjk)TI{1u_(k2;YTB)+K8c7iDNGO>)W_O=4vG6=&mBGH zi6st+rXkPNZ}cM@Ez`^B0!oH$v=7ZDuNtXY;fnD?eQAAJ6xmbssj{?!)+ImLBLt~o zNB>ylRbm>+qyu~H)s$OP!?l)a?aN8c_%`h`CFcw;oc6wJ#EO9WA__rU11g-XZdcMq zssQ$WHx&oR(WiWS93y;I7+5x_4>eIi7HsrJ<1&arHlp9f9 zOlfk6*4`naM8QN`(QqVr*shG#HZZ}`?j?DN4X$*>sUyhRd6XYI;cB*i;?f@!XIJs& z{a}o=Q|>B=%7TJNu%!d#sM^v!=dz6>)|Su=^nQSu0;~e*=~R+xMoT4;KW|5feVbIb zzJ{k-*mtlvZf3>&G3qv@7wyP{D@Ebo+ReBXHGrN--wT#VOxoJKd=koVx=+nE2r9Hq z+B>k6{}6ksqa}Vu4S0GUq6QJIZG%L=*viKz2x&rxalDVWDy^gSqi=SGDCdUnscbz~ z3a4WZZ9K)SmdfT7JsgOc`iZ_TVp!~ZFAnpLYnbcq>oOmyZJMKprh8nTT>mJ`QB==| zG~RKjKq`uXp;x)Og)~mZEYkR*SuKstDLS}eW}1uTkoDCKf-K}I(82uq52ZSZM>5xi z?3-@R)&3Nw(| zUXQ|GCNkvbTUTw*n&)x847rWaOX*~RxCY01EOlu^6uc}=9BN!@>H2l5WZc{!V!0dc zh}F;m2TlqWMVjuhXdP<=$0$SG)Lx)UOE1C(ogJ`MbdxD5)T~9JTCpJNF1m?I<&(mU zF)uda8`!OWd+5-Q{IVp(;!peZhx37yA)o{48qM7sc$%+a_sn@9Ipeg=n2^&GQ@)~WQsKPBHStOuJn~6$Q*lX8YAMqLWJsW zm4sSau-@kn8$fFNus`?_P$RccIGdmUm*Ge)Zs!* z=In%=$%vHeRA4v>7b>dygABd!;%Wi>mf9=4A3IH>y#?pr#OiG#D`4FHi&V(lV&c02 zU|2eWWbmi+$!K^DS=>x97qJwTt``FuW8+FogSK%n9&dQXICCQ%9!XpNZj>o#g3yTV#o`WE8_C ziB8{x4en}QV-)rp;TJ)Bx9i6%gtjN~*0wn1YBoQY zJ`$G9f2KMX?KN*GLo7izf8ys_?k7Xc#a56u%c9!ITo}o~EH^9jjvZAU^Cq%uB|r|1 z#-0OCiPl%cHtfb9=9CbOFD2>}Ch2T`&i*tjcjOr+P~c!qW?v%P4lXL8gkIdGvF)bJTtu(@vFsNHs&lmC`Sy8zja&P1*>_Vh# ztAYTp(rmn~ZTK6YQ6=pPcg{L+lu%Bqj<*g%iZ#r%eyIjwTZV8z4o^3Q4#a5gV3NU8 zwJ&uQ>JDzEG}0Gw0YL^re-@2Wc5jO;O_C>d0uQ@>%64k$!6pG zFH_$X*jW|gX685G!;%#Mk}H<`hX_aNca0`@JJPR>O#QX>)9KE0D5?WUxJ;}8(wu2w zwHeGvM`}weUCT8r5au#wEB*B_2#U-BHV?&vsu?b22O~;+xW8K<+bp1e z?_iGMW`cu79CTt}AI;b{d$~)R^>R?`-n&gbI}ClqVda4U|2*YG%{Es&=kair*cn{O z=X;LJYs&?mKwjj>Nxb43HS~4B`OK75$!>{*N;Y}0Sv@AXYPnLqT64;c^b1F?8Uepc z1h_`nDVfz|Ob?Aoulb69vyrGp>SR*0`AT4`mh8Sh?^iZ(g=ia5eLsRowRef{Q3Oqy zzD}ZIzWgLBm_b7{!RV%GkvGe(Q3F!_&Sy@8MblS*nuV>sxC#tK=^`YziJq#?Tp<<8 zF_-g~=`F#azA*H15CGlCo^?r16b$Gp@_1o!xPeuldUo&G0@dmxZbYB`X@;`&;wm)a zqtlYyCf-zdo#^u5rKf#P6g5LmS7q6ntGAUn!@$`~^PM`Gb%Et>kpAQhpx=+$Kfu>m zUDs_}Ov|w$IpwYAo^7To1uWs9zL2Yq4?q5x$6lInn*KAT$jes$V7h&PbeVtm>{837 z-$msgVp^pY9p(C&UoGCg^Ig+C&Qc{%IZlyX<$o#{f2OUd857LJZnWHOJQ?(_hjtKy zzvu@vuhI4wP&A^`b7vOQ7=7U02kU`~BcUp-tDuS2N~29G(x?3vyu#gezyTXrjxifv zq>(+;Ck}#W~#7>CLhEf=a7~ z&NN1%?+RqfD+^7(AOZ?B!+g>##^7iI{S_Z(TwaN3HLGfy6(F{|uWqf~%VkBa&5yx! z#4TLnranU8rQasO*1f!bms+ygs7>V!WF5h$7!UXl32ai$*K!BN#nORW z8xqaKif}-w>N6C}2+fk%Fbio{3+&NEap+AG_p8mJFV?T|iFN=!zN&nYDDKX&Rufio z!5DZ?HZ1w5`q7V;-LQ!rtU5m8F_@z*wyaVsGarSXT+rml8a{k71j;J^-!iKe0G!)c zl~+*#PB8&3Y}LD}W~L!!D=})hFYS7)JLB;98Hl^lem~gDWF0&7xNY&{d^F9;Pv@n*PJLDZd3A@d-UX-;? zB6f)rF(udW3~f1TBP{wiHn+nJy?phzS2hgrA z+?dxCeV_X7Lc#AAD`>WP58L^*i+~4nRdGkGl~pZW-Qd21tLNMv78i6L?ke_zOko^ zCJNv{y6jlB+KXkkup`dDIaXs0_!riD+qeA#>*vM^PXNa?mcg?}=fB#2f)4#;?chbGi3MewsXaQ;fmgKJB+gV4gvNHh9F>>m6^YQr5~ zaq(|iXr52#TWhj!*qioUipx{usQSbR!K%E) zi_OUT>F}d^hD(;Cp&N=Z@UnC__fQIgE7(yn!LHu~V^GE0iBEPCH~~8#025{V+gvpvko>0BZ#O6WQUwQvz?wrmHj)B*rQR)P9?@QbAnMH8dx&WMl3T zq|pbYX>;vC++=u3Eu*E84?lMg2Ys8^m$ZUI&4D)TzW-SLVPFXQbvlIA%E*W(-U0Y% zx$^!91M-Lhg1t(o7=^Nk12VLwF2r+=&NP9{mJ%IfHJd}&l?N8&?VEY+q?sb9$)!fY zlznvH-lZ|oiT_91CBVFv-YHTFDu}k$#<~BG$Ls}?R)!5-w=`>$9(=R?^zZ@WW`{+$ zq^e9YCh7C2h=&ArtyH}@jE4>7mOpHHeFXcyX&(dgkK$e_7H3{c;UU##Yg&OAT>Y_N zX6Vg=s6R$M&XOoG@}&@wl(KW5c_8+YgiMo#pQ0?=00(Ru9hvvHQtj|#z!^?L_Z}R` z?(=8m&OC*Yt=G=QB(USG3>P#O!45v#9uy@=No zcH?6d4BNltcF`dVWh-gyz_}}|)C9l<{vIa_xWLUKy zqB+VA#Ehbjo<1#ZdvpZ4{$JkoYp9`3P!K?f!6mavX+aH0M#B+2`K>76K*7evwt%jU zY_R~=A5_!{J7VxFBd*B0ll7UVLw?M6#h=9=v1J2$VitqDrwwu>343erTy zHh|f&sAM%%!ogI$`_sj&gOf^bi0SD@f&V(Q9KQnx|`nR!){qNmr>-Ex+#r)o|2T`{wSxfcu7{i>eV*zDC_CpFC7f=nP#EP zpm1)x7=wi2&>?<$9LF=L%-Qui6_80!s~~X_k5>9cAoY*@`87<&Lh{idFG1V@6>JL| z!0G-XvI3@_<`NOp-eFQ2fvU}8OwhLGFX5T~V|O)ZSeq%k-6~BsxXx_%AComGlA8?i=-EXh!5 zNT3t&U?aSm2c1g5xWnX($-PQ+A(IMZ)|OKNn9!*PP7^ZeQ@(0~+(Fdl*Pv3d)Q%~7{-Sbs z9{0`)dR7I$8Sx!>iFg5aD;ky#LJU{oQE^@4m}8j`xs3f^5vlHnFZWR#w?xb%Y?78RIUHJV!BHDn+mbwYP=4Lh( zQ3kLJ_gS%m{(Tq$9ht&FdijP4i0}>e*#1_zxc3vid(ho|X}iY#X3MSJpTG`=0=5?M zs!(|FV(UG0K~0=d zF%gb66f~xj&1PO@4K$z3Z;zqtPXk;|)v8D@y@97P@brh5_ci7P6-`N-yslMg$xb?c z;Kz!0>-_NEWWy14O)DMl-h&bUJJvdS-&9OCagbbAuDN#YN=aD-%O@gG3Dx9x%S7UX zHph2;^z8#NS`!$vi+B7Jh+1rpAUCTTP2b=7eGi)NWB2h?O~t|uo|1qH=Y`?FZmdwR z4KgF>8}f~|fV28hiw}jD9}Dip?@5t{Ey5yirZ0^Aw8rHrAciYFJ*&_r_7&4c)DY%a zwbJx;1b1pN90_f`)GyvzDD5Y%@@eSY6-1}3!(_EG9BSnIlU$K7lUi1)Wn82ncL@t=! zpl-|B_0q3QV1)h=)*Tm~WvNgSQLk1wCV6iGS%2eEMi3#AMR4xx_o)7h7GljA*wmDN z8wVUE*J=Lp72F`Sk$d8qMPjv^K9t5xbrt{8@y;Jyek~by?0(`?tG^bo53A(j-P#?8 zfXMX$DD_nUs%dU=mgjaLNzXIJbNp*dQHo9Ev1z!|XI@~|rvYaD)pUxnkG$MvL#K`A zIh{avV9T@)m(5d19do#{sNm{zXk3Z7Z~an{k=A(7*`H5tG?^&D{`@jT4*4uFoz zZuj5k_|A96QvOBciLl5!OWmJr(xaqd##c#~ZXV}j@ zMrcG*pBJf}c4Sjn4D!p&BTLgQ)Qp-4a@&H>C_KP$_ZE56IPJQtIdJ z|DNAaOW+Zw!tCHZ@mBUl^-|gL`>tqj%R{^AUoe$my5$PGsV~!1B;40VC3oq)W-x}K z=_=ewmd)H4y=Xkf+G-wO^(Z+?Fq9~;PS%fVy2t*E1*?k2(jGJwMmRe%8D*HE5-BVx z$g^6z@J*{5Z;WVPjT9-Ks!<64HY^H_3XJ56m86m%;D2GOJx&KGIR$_NyZliD%t}TNGFeDMKhcZsR zv?JmZZ3Lz4;amF%fB1RJjN?(@icmzl$IhI{{XHr#^HE)dD-s3p7;n4 z^CmGGOs+;hLOGEFbR$6kTQbudMT+wj(ALof_D?&`ULfl9x)V2C`v?D0O9WZoX1qFG zNX-dSkFON=wsQVATF1G(lTZnzIb4jljd6P`FN@%DW}4DBEk*M3fjs^e52g)Q#h<|+ zrkt^homzdk{LesHyOph8g|~=GT2PMd0BO0Fc^gy}cyhzGEJm2SOx( zbTx#NWB)^c^B7z^fPXmuW%$3A&v@rZP)I8)r zw*|Z{b~)T3@VVB+&tPkgyA8FzAeU;kK}R5U6;6)zkh74;Smn*dcjaOI z4hNS%*ojz%ae}G&1pQd#uCo*Bwr~8Y4cTbHzweHd8FZtGKFP*t&l{ur0h261CTE`_ zagx)D7qdKIBopfs67fg85-0shpiRApCVBC)?n+>Ll)?h}b9B~dkjs6_YHL{l$|{Hb zN*KA}iQk6#p{9#?Dtfxmy*R)h(c{9chE~%0BqCYy<*>Y7Q9_nn<%}rstEbqp`UWON zV`OSoJJ-k=ttw?M98EdZNZzaR^d9kTU+n!X7L1})oz-4 z!b*oSmY5|k8rpL}gauNY8_ze28E+bng(b)GNPJt+Wj)yfX_QF4d&19jnRx?Tp;MLX z;w)--K1cE7VX$I6-In;Zo;t1o^}Qs%mFOaStW^+xOg5WLd;8~UN`<7bL@^7NW z*@nmb!-B*D1(&361#VNKFvBcCS7>AgY{s6LbxE_up+ zlZ3YqwSU1B+syOPjN?Wg;o|v0wY+h==?+S$Dv3iImA}{!l&8F@D!fWzy1bpG&FWO$ zK`~X5Ttv3xz|yw-zO*%MCFp8KsaEBseRxnkcximb6|ia5?vD_q8P@$)xmic=w^Ca~ zY8_s z#W^p#+Dv{0IC$b>6D?nBFB3$@)fk9;*Wp*cC9fOyDH}>D2+pG`;p8|TjkUm7dTB+a zacN;_+j+Kq&IFOv5SZXU{7ctq*(-I;?Cz^jX zqp1;IZSR~eX_P^uj=`S`p{TmFLm%)oW=$#FroyfYGzkYn@PIF-gT_Z20U zG4y2N|7nbk8dkjUI9~!*rOJbWF`(7-M0IJ|{tyR;#6!TKI+)&dhgry9FLDVJqmDl@ z`w0ZsA;1QtqU4(+B|C<}&vP4f{R<|-M`z~qDOY#D`|9zja&Y}45nE@#9?doz=oM;l zCgS0^QpwMh6&>TCHRpvnZ~!EWA8MpRC%z#@X(F1<(P3i_zNr0An6>%mobs#jX}LxE7R(Rb4l&SV(h9<`1-TMk@p-BdGWD@z6FQj-BcTfFPjtrie7HO1g%b2t~-YF+pl zpqbY;3&t^v+uO}KZt`9@xI3C$Hu+p9T>EzCl#TS*^Xy==yKJAkT;}exF~bfVU2C0u zVe>IjF^P~nZLukT&qMMbI9vn~r^HTw)z@d|`vTHS85~5i&Kcd_4q=zuy}azrptyQ? z*c?xI^zsray0b=|qIG@9dijsWeDr@9=2K47u|vbfDWb!2H`^2b!1e^f{}G|31`DJ- zQ^QqD@V~O|oMdWTIh3^(8kRgK?r*_B&I`xf5Ont9sP%w*APhQKP~*83M0jJ=SU)z2 za6A)pp5e0$EiFA0!&T})T>I%h&%T^Ry$)eq;4Q#p8zgj)7xWt?ivY>GYIIxQUoBrx z`+5$J=Aqlnry`new3%#1__36@v<&3126t*5vIYQ+HvPK4V6&qrBh5=kCJ~&-TD=hz zKR*V+B@%~=qX)n#Ji*~Zy`yAc1$uB{XdPJ82w0>+9ph=X5SUyUq_{lB{=fK!-(7-GifNQaT$Y9m) z@|g=cXY-{MmzfvkRbc658T(J%1Zz|>SaM)p&$F-(0u2X%>NreG_5-xujkpCC^vw3Z zNYjoLN>uCex3sBP`eX;Jdt?z$uw|qnF1;t;@_DqHBt5g?&6GtH^4bdQn_7ddKA;I~*0n5+u#UAn}Rx~I0pAUmyzz(3dqrsdZE(gK5 z!QHlf@xa;~x{!QzIsaG1h>2V7b84QQ{1Lw(@14%sW6?UUG*hrOzYv*xU76UR>@9yW zkbOS>mb~FV80w3U9vUWn_LRA}h7;a?v{OD;`dA&919orWRC%{CQAkr{+~wLR7OKD* z>uxb|%XW<4zRM+(;o*zx50fOS4!6^ky&Q$_6FAjbJ2D%7t6tKC(!3p+q+4@3_OGKl zn8k%;D3Ix}7?{z@&BbDwYs|SFH&+>Lamc>s#NQz&kGBjx3E}Aw=)M%D$Nk3t4zz_V z2rp-EKX#JaE^l7|dMq=EWmucbp69HCoK^}U;wuUHdr?JnxC&F>7-Z@O(%RI?+;8Z( z(_(tM6p?m7AV_IgnaiI4d<RdS1PS0~AxxE5 zvF~s)yi1K=#+>3h6FL7qkXMxFvPcYbU|d>bW?q6X%9>~Vtimicf$-~`PZ^KJW`Av6 ze6sUxyxdBwz7_dkvITRjNMY9>de&<3S$AUy=QvY^KecLC^rZI_FUVw5QU+b#? zRfwDhnYC@-JncB^v@u~Bm*3BoLsFD9gEG_NOm$4ZT3B8qiS@Eq6(ivo=H^U>N_U!% zWsn-otTPgc{^bKOe$M&jf~3I}>3I(Nn$RO(u8{RSs;(z-N!^o%Hh(w`tLsK7^K{Xn zTET_ERJH3p)eVH<8>hAC5uvJn#-w23Oqe8Bl&V!vhn^3v5l<%QmB59B%@i~$G&mcl za;y#;7Wrs{31EUZO`n*+vz$@6%ZscT00Z!$wfN9=4)%PP+iiMy2P=(#!3h2VAlsW? zi>{Vmyxtf7e`W0c|737STBXuciJP=$bo`2(wQ%e(-TR~}QnvE(mMZ>5O^W{@AY38q zR?YqhRjDCewn39s$X1hN2^=huc2gaw7A;Y8P?%W3Ym#&1ZWIoQu?{W@7DxzB zrsI7uItF5wZB-_wA_&bOurO6-Ka=RMNRN!n)>T?mV&Kdtg*sT?)}AJqoc@c|bO5Zj zHKb_xRYPz9eM%tXGe}L`Ix>LOdbq#HbmeMA;wX*=WfN399OG_vLKlHda|=_0`}I(Xd>h`;Gc^jS6)U_ zPcoOi1XBb-1cTjolutPL3{LGwiJ_aN)HJ8((!xelTT;tOe1;6Tsb5#)x7$-5cf$*! zGh2a(NXKhNA5B^lRUnOe%5t)Cl&t%%^2Ll=I@%H8;QOT?V~Sko;Ha?0flC2-^C7F* zM^x3D;RkSz@j_ zf3W_7(WAbkTu)A$SbN!fiR%|uQKb1S;fXo^Q^toTmWJQcV3Lz^>P zy`ZQaJxxM7o}ti!s6a<)E=x#OSy8^xE9jc0?%b6!y&O{hwpY4WD3NIar`$L@voJjM z<&k91%UDe~9)W~uvyhttH3vWTLr`T!W>QrNBRH%}#{`pMRoQlw_nhZJfM^oyDS+Q6+vROGfq*2oI;Z#8(Tg9q}x9n&Sehn zav`7xl`u4cXge>r_)Opf zK)PYt>u%$C>5T`H1sIKifpn@7=G@ATjU|$st z=zjp-Q)cyJIu}+o8TF*t{9$F4eLWwqfVUF+6uEJ(x|X6J{$;3*2z^dXIZT41RB-oFuP;VYA-Ti zaU4VEV26?Ox{7)~fO_^NS>^Tj+WHRsI1MEMS4JR+JUz4L~m{jEDZ&OJ!OEx~s z73B}+MO0-vX@(2@>x=M&N%zILF~}H`yNejFVQBh{d*$ZWrH&$#T_e=3b1doQiC%5f zK9u74RB3~(Z_pYEN#Cj$-I?(M@s>)`<@0;eORka@mY_ zS)-mPgyr;g)oQC~7{U_w_<3Uc>0Q1zMzLYUOTk+ehK5vzEsfKv?COAhR{k_9mPl`% zFt@~dYnJS8kVGU3|EYKPpfYc7w1qNXjkH%}M{aV$OvWXI&1p#I$Jq_^XC0MCMjvUv zkWldFuiiZqutdWXTt7?YsU{eM3#;mwyr{x>Ks-E~mz&|H@%>qNQ~LPeu!YunBMlle zvF>xr$;x8nq8R&ola#%T;p*QF5ODWm-Kq6hlWL+{%EHYHN88a8hvSmeGV`KjTtsAb zE+hT9O_KrIJiJJht+Gz+%NgCFt!P}&xS1sMCrT*s(vcR}==4T0bO)uC=**J*?uaLa z?SC&JPYPSf@ZSuWioDhfc(=_GWeB-UwG^aV0^ zF*(nkm?vkWo`@%hElQH$7$j~_%4o{%XW;X{N_5-5Xp?SrRYFu&QBfAb!;ph@1S(E(ZLwBXn-2Yb288 zmgy#5)XeIAozA4fx{&NVC##|)?yqz=O?k7Qt<&sk_S9C7 z-oJYl9s@}jIFea1G9c4A;zZ zim8vSd;}DRnl{r6zEcbi`gh||lrWUr@3ogS34g)<9RKYDvqb@e0EdKtfPzJUgZ%&t z2Zam{0SWa96$}lX2pWZ$34>Wc(7Aq=kVL`2$)&DgdWMvdMPAXtaZbq4FRrF`3I>ym zm0w9eFt@Rr&A+dIbLT?XC@45SZ{U(#S;hEg@Aj@p!momW#H9Sf>aD*^U~u5zU?0Ch z{sp^`PRaOMEuwH)v%_|H)9G0ySMa>*@q=OQc;?XiUN=YTzUAgguYI9t`t_`xFXi0j z|4``_o7y0$Go8ckW}0oML(Ps&Y(Tatoh`u1krj;UH3j-e0gawu8?5 zC=Z6$ZQZE9U~d22S_f`%{C8`5z}T9q7F+8tIysBTWDOZP7e4f~GgxYnvx&HCyBB=};nr`}OjqZ?CaCPazXO~hQ^ua0c@1aah! zNaCgrkKq1-?Tt?V5ng|;{IMz_vw$+%w~Fl`Z<`h=a5t0;K`dRV*3`JK@`?1%NC$14 zc<$7~mI+~W&Dw|Jg%O6kBMIE>WJ%-xAAtSx;HfAnThwMveylk#T;lV~W=rG{(_k;jNiUZ7L$Pz>sbUW5JDOXg(+$U ze*d1UdQeeDe+la&_~EjfLUXT|>`QUs?v_8gf!?FIUSjLn@yX*TwZt&aqA^#4+?r&C zjRA78`NLw=60mw}C=fC!!^3SF#H{16|B5U-8wNuJMVK;Dq>n-=%Am%pQt3cSU{j=Z zhiPR$e|J%mq2n3tnr~$8+2g_@^DQ$x5}8fn-H5BoIDXC1<5+m+>#(c3`{A{&ck<+~ zlO`p^ian&vTft1k8~<^TP6Bs#-0xnLgUK!7~%{+$1 z!a{|o=u>N(r<1}Zd^tCQXDu5(?&5F@v|k#f6am#yr0(Cq5fN&)!xPw=5K8MKLj(v|CneAepBdZX_+u0ddUX?YZWTPsC^v5<7n(Lh{kt0 zcZhjBEsf@Q6TV2<<;;p1z4PhsBm-SO`y5~ee>=)fUcjK3>dp1m>^!Lc3ueKMUTLsa z9jI~^c|BV5NXt07J>4x?CxNY6h{3EX*d!~1=8be^YxL4yhKHcOt1-{%1LK;_DQn0Q zVEm=l#1n;KwDW>ZGI)d3ydc>nY{51yO@kCVjkyjH8>W+2Y^qIAqCU2ICECTf-YaJ3 z40+H-NMjSvoYz038*Jr9lB@F(4wQ>M`WwUT~ZcCmb)B6882babp3fmE!C9$N^?y? zKK6l=R4X!uEmiL-w>iy;&+hv8dFx=Z^6{dQ=K2r-LqbJOQCAWUfcJb#14PKuwIH7uo|iaBhDMx#DwN;B~beWyp!Dk z#_DFIFQ3;Z9$He9jS^Re_vCiHisY5bgcvPsqi6b3uo5HgDpq%A)RYHEV@Hsd45DgW zpde%4et55ntsSYMO2(xStsn}00W~itmypDhp-0K4(PTc0TDre%?mW2Y5!0!m@!1h# zFbaDr{8BfLUg%&hVBEgbq+-WT36AGizi2TITeque$?kn~0+-T*iDkt}Cclvqrjz8b(p~U1SMo`cz zoSYC2~n2VG%tg{n~L$bm}#!+m4Fv`9 zWZ2A-mDl|gY^R0bM#NkEcDCaU(}?G^hvCyOd*knz~tAN>U`Qxv|5s^bSv1+CUbw#X_A^W@6s?QeA7*X;P)WTBW~vnvI4q zI*%*~(odA%OWPQn__go;2>h#k!mN}!)Z{1e9h7m&Fnh0N$JtN682fywMbIr#Sw+4i zDe$X6#zHI|nm>s}7spePyW8>7j;}MSd}Ofk#k*^$s1gcUy1}6nC6;!?!;ULnS{=+n z?(kOIKHsepi&$#-b9v*CTO7_s?A8fao+L(9<3*~|x4S0i)(}`7x`>AZ7P+Z?b=C+ z)TLF|v35DAe%8&lQW>GzR*c(Nxx%0qvs|;7?^CcJnR6XxNLa$j*PXl7BSeuM_?hlG ziLRyt%Ic3Oa}_^(Xb_!}GzcmkRzYN7fu}HG655tn^wv2J7ItnDtEE3bP!&$9KUq0H zuRtJ+v2*Zs@Cq$1@rcS@_e9V%unSgA2Qu7LyGTgLNjIJiz$p^h?daH-9HSh4FV=~H zb-+H6y;%Pq`{Sv|9LuPCg->gSlw);!zmaSAk;PLh#rwC~Z5VL!XBxTc zko#A~VN!Ie(oJTqi25ouo|Mfru@14upIXUdC(zKBhQtajG8H;|d0KyTc&4Zr77`{o zKas~}e#D{NL*EZtX^{A8Jf}Mgxz2;2>4h7{O--c4XbI|+Ux^(!)7+k0r=@A+BBKNS z?zf8A7N&iF!7^W1eq|EOL?;q^uW-wQLH-6`f%(m?SNKDKR{56s=P7AT<1Mtrm5yYa zM$Xq@xd4?_=%b{9e{+J%1I_B(%@om)DjkNg#5XPY%I0kSX* z-*fvln6W(6p;THLTB3d9LLewSJw{R-PeeUEMWr8_urcnUUC!feA=wb}D~LgWL)@B| zGZOFYLwG0BviknxH_uCVizCaaLMmL;61rGTQjwi+5pTW^MsLN3KO7W)e9_cywrE%p zZFahP=zQ4yweu}!Rj&+i+68xZ@P$7erL4?IHQ|YG!zJohq_lSpuZiyrwr(z*+=SFw zW)~Dlz?Iz3Gokmc=S66LMR8`|SJ|%uXQR8}qPp@u2?TFBtBF+qP3@{{v9c>0VJ8 z_15rRhxVENc--xN=f9(-pBD$wDUw^4l71b4>x8Q{+V)q z(R$}lP;*lm5Y$i)_hm@zqZ>>4T_HJ>zQy~bUr?|qa zeP9b}n}d$&W9#WGAFq0g{ox}qo6+Esq@=(to;-qp}Nmg&qWQ{jQ7 zTW&ZQ6R1EedLmNL@G3}4O2M5=`*`o6Jw$j5*F)1SvB{gT9UHo0VjcEdi1OSX=Le;k zG?qej3e8j#an;39avIx1Vq1@Vzk-3gm4yCajI&c2D)E$uF`mp@*7$D3C?Bul5}T(( zv4o`UFm;pdvCQ!61z4$@$k2ZI(GO>04xgsD`m0PBWPe-8PWiJj9iC zRm+{w^txel$xi-|*FgvQmd&-qoU#;(aLa^|4c(t{u{~lt>cUE|pLZ#`kvo?siGk5E zaZUlQ#u8W8nqrc6GR#`$j3942Z+qN02;vu||Bot|Qc%q(-m}x?#P62%LSWtT`k!^j zrv$UwRu2i-7|E&p1xq~u68_*Wb@GDyxLjCutRuzT3r%^&Oe%MrEQoj0GxzZeSbJVV z`FS75RY5qbbYFRc!}{p1i~GpncZll!p5LJehkIt%4T#RXvmbpivA^W8i^%Q}wR`BU zsEh{QI_0-ZGH0wf1BssO0ha9hyXLQPvu$NN7#fKPR~RG0>Tdq;)YjO*+7#hN{@^dxRq^+gmRlAF`d zG!R@U0#Khum8a57V^_8Uwy;bonw23mLcz3;qWP~Sk6)%|4ejGA{9f?m{vXobI;ySi zYZs*jT1t^Zk>VC8PH_uRin}EQhvH6gEfgpY0fKvR2ojv)#T|-Mthl>-Px`*+-tTws zxZfG)j`K(M*kfmBt-03DTzfvxoNG?4Jjif*WWkJ#j5TF!s%(y{8~^9~Id+!NxU`6^ z#+e9wN9&KZGCI{md1DG86C3r*Wxa(+0u4e;=M@G2Cj08SW+RVbm;o5vh_cJJwAn{SK9o9#W;;$jc;< z0upAF7Yb+FA^dRo?kS(qQ*#-o--)?9mdpmCM0GDe#3zh#Z)hlCI(eGtNm$GsRsh64 zbjmVai^k|qvf|~+G*0GFDi+o@g#4IWV`)u$cyiqPpf-NHns+NuBYx_~5I={^sS9oj z@w?~wC7h`UdTJ|7AFprgeRO=bkuwxtcjhO*rpPwfx^Q}Q-Bj}^v3`xnL2t#;zZyHF zdnA4MY*;39=G@vAHG1c1bGN*WD7X4NMeZ#5+uzZ9U_UbJ2Fyn$<9vR)aAr4$E*FeR zkjwtuD@92O!mzPgWW-_OgxLcRE$L&X?gsh1e~HsbUkq7AD-=0HubkIBoa1ln;5`d+ zui4weJhGY@A$%NzujheQx^hvUztUyDZ5boZ*}Vj03`PR3Ik+XfG6W7 z>`gMq61hqkb<3q0XnyqDLz>-p?|*EkD>c`N;@kM>MYW>B3n9huH1hR%G)aF|blKJ6 z6N9kQ3>GSkY%2%tTCNn6qP?4$1;KCtmUAqa&KC)q<39 zg~_f7=~HP08BH`mS~29EQcvrr9Ygzxu0bWx)aSQc?Hp+|VS6OxxMOB?YQw++t)Dq} zXY1(6afAP%DU7wpo;-+Yj(OO@>?w+%ySvA9eVqJ$lhyjx$?4$~B0`t`==#P<6g&w5 zKk~wi-AHMyxk1CITL}5P?L^izJkR=v!YkN-`Za9A5 z-fW+CkvTp15&P{;zpS;IVCokt2n*z5!L5wYsEp@WYG`Y z|5No1@&2H2QTWL~PF>-41&N}DHQtJTF}mvbp7)e0&c{?7cP_Hst=z}D5_j21h29Yv zdb;1+V}525<-H^WOnAo0_Inu-a(?V;M@D_X>-it@vt>K(HYeL(k<9(r`+RN#IS zxa#o8eudi9QcD;FCjWjkL3H8}{T3M_9GKwEW?V=@IOa!;?%d9j3v#eU28I2y-*A72 z1|6sWxVrTid~NZMjf`*d2W5A4 zroGh3c?KDAhXbhvl7K~hHu1k>kXo|^Lk;D}HexFA*fG?dmQy76R%t)X(9`EWp{byC58 zvZ~(GoS*Lm(*2&`NpXB$%s}?oINoDah4%8T)af;h!&~yX2nZAo*1~*- zWen5J=s+(cAuS3r=TU29R^Pa%0E07#41)#K-6U?_w`v+;OA-RL-urBYAj6ZJ5p|}< z?BEwqbUenj@(c;rVkL5M!{P(s{5wKdhQXv>ZNByU(!)$`sZkp<5()W6D%|ovQu(jr znOg*?9+n<;vDJZ~Hq4XS0&Za8b^OnR5Rtver4O#l+Ye^E?UN5&HhG_Nz0(->BU;Ad z?!D)87>|Y={czdK<3w=-dXw>sd2x?tHA3`4T%su(rKk~K-6`S>)#SfE{>rl=UWxj- z*M8!*`#Z6?{dNEZa zW-ANT>K_Kz3r`2Ouj}yXUO7^){wVsjL$x<|+el1}tB_fV!+9zEz1 z%fAynHh@iDrF$Z>x!QY(1OJ|tNY}lN#fd_Kh3UJQ{YQWDP`YHgX1{D-5-QMnU68~d z6g8pz3cs5qWT#7{e^8>^k8Nn8Mw7JYk#@4BE90Y+cgSf5<%S{EjdGcfsZI3y&vgkW zWER1_mK*myO3>p+j#Xg|tL%^${mIyD>vFzrTF-uoqX+&Ft zCp3vCDl#;t<$Njm5+EA0JsCk2XsZy_N_KVQ(gg;Jkb$u(HKyM0eaFQ)qz$ioTN(a? z%CB~+Qrk=Z1@VOly2i_UH{gZ7V%#M7`-t&~ikYcHzhuk2<0^Lf4ZnyQNF(YT41tlX zYw0kN{t_~yZ4B_nsy9$v$w{puY-lqD%MP>^J9W#%zDHH(P@>D;Bq7ZiDqN*YgctG< z+ssZ(RrQZeQ1R{d5V{{m0%t_|iguQa+O`0-IiWsI1M0ljv)0cOOk$gFZ}0PpV|*Mt z?jKln2NBO@_L$0XN_KK*UdTzQ?<~bhy-I~T1Ya&hw#C^Ko<^x)5TbY{s8PU!9(3W| zq6Ex7jkyV4Ks;k2NKFg9+V4LoaXBm0>khKN33nRFa=rzjRA}YwkY2AD7(Oi`J@)xM z&b#vFLECF6S}>=?pC|+42~(h~8M9G-By*{{w8U!ATzkR^XA3~Cw?}+Go>N1HOE>GC zokIlpVEp{Nv!>0TR!LhSwch%Aytn`EeWuSv^|;tf-jprg6bK|6eoCDo8K#2AKQ9|o z^eJ)V{gA`JA~GC+RsMBYJ{H=^Xz0~&4TDzlWlXkeyMFDMtC9@aYrpnLD^{fIZDsR> z@f2WnpS2AMmYEV^%T@h@GNoL8@o_W5j|9UnBI%B&%x!-@s~r)zI)3HzS)N-Am5PUI zXKH@%TgFF^+pYZ)m2%$PMdW))kXU6h=8Y6Ol?NXHHQu(Bbk(&(o#4^qGkHIK z_a`ILtu)5X{Jb|IVJheAY+Rf^IUW7B@@*Q;AY z+4k)Fz)^t^vP75HNU@&Ubd|@vvckB$N{kGjg+j$;dr(sxiIvz_v36 zkcs^<#_4V*ll6A-`X7`7#$@$=xxTD{hA-(dG+2mHh_$?@Vax7Y;freDpPEq|zTV?E z-pwRqzja)Ko2mHEqdcU=G!3%hwi|d@bu0Oh9%2tvj@P=JmCa?AM-1qAPpMcy(+X5) zn4+Ye%bO}4!$m@nyPV*R*Qu;{gL=qL-P)1yeQy+NG%n7EHmzlk`(38h6X zLElM5o7ZkmLQ#(HnWy!`#)o@)aVyyejM*DrwQ+%7@5?&sXxiuP@yQGBRTZKfx5O*i z7b*%?1#g$nUVq=dr=7cQ4DqA&+#4pYe6-)2&y0>gLtWiiaFKarL(rW5*1zfM_=7U= z0*z|wv!{Uq?ncGtoG38yNbJ()0-F+X6qwW6ly}9f7+))QqoaM&>*!sju?SFfjPb$3 zi?4Ib;ha+MA03p^Z0O`JdMX|MP9?k+*Gwp_k~w*&k&9I&y7?hI1iFw3%y{GGmb^fc zWVfm6X0^KRW>r<~R?1KRiKLVeOmt&^?uVGz6DndZ=8keV3AfjJQHIv{;|T!&te;4> zd?8U9Zv>s|r$(PJo2Nzm;tF|rr6`vDcj0o{vPAj*Nw@cEM=r+;X0?Q~3$xA8qdudE zr#Uf0Ch~bv2$DquM^NXewXRz*XQgUUpYYQH9YCg9bf*YU&hx{rern@KZb4(MuCYOE z05qBhfj2-HOWVwIUYWVn;q2@J<7rx=glVrU-FmPQW2>f^X|OVT+voh)7;XHqn+jtr zj22jLpS~jL8YT0sF%}h3peKP+^yTzPL9n9dQ`SO_!-wY_y|wg9F+o$pQcYOX+5mbS zms15Q&e6EX7Nr%DhVO7DjxLpSg9~iQIIN^B9-^}{Bm?y|^ZrdlFZCmjI$Eq>WG+aZ zSKOB&{3B1yS>gMbN#L~Ar+jzR=5lvOeVnxIh7oO(fd@B4_Zp$ zDzF9fQye`6+RLtYXS;b<14-9M+@iqYIK1^K5o~gk zU^?BfVkbz6ZAKw;ubiM=k8>W#vCJnrHCRzDlrOaX{NAV!@C#FsK2L`r$st#&!EA2# zmsx~jC#xB_A=Qt=PmcGNpT~|-x)p#}lHBxr=1#!jr(!*aO@5+ZPD1W^k@W@;Y;9|Z zKw7_(`^IY?QEVIp&bc#knTp6S-tgO$e0##dhVx280&phETY)TSBR?`*#A++Z__pZ- zydv59TVkdz$uqWkuH4lntJ>(e2c`sD-0ga=?~h!W$lO{9kv6n)3+&|4UvT;l+c*(l zB-tL@5wV8Vh?svrd52fu#J*wUQv3UkL_ZpR&hSZkNWI~>;}LGzdmQ!=M%~|A_uHT8 zL;AUL2j&fN*;jGTeapxr@hU0@J~*X>#5I4VxF4k%eBcq;VP*~W)A>$k$fx#sq}3+p zF3M*{>WcKHJmQe|OrLncX0wsw`^>P5p(7%7yLHWwckFY|f8G}I_cq)GoAaHb;%^QQ zMQa*=P;N0Q`z@IuzLk%--e>?p9P24AaWbvd_0yT0bN3^CFAf>si~plj-Xk>)8PpGn zXqQ10W_tH`!LP))I6gYjw=PNnBUn|!n5SI;`dVBSN18;`R3m%8$Ih9VnS0kqTd`oI z&R-*F!;4oI6q3&l|Br8!alr!xw-r<(H)GpK z15dg%-#5zmeM5s}EN9W(gEg#9ZQq}zq9@K+P8Z||n3`TwJr}4?_1xQH9&Mcj|}!_0$4wF(&oU6P*+YXS><>|=wxJoV6ety z*D-if1})d#io~h=5GpMDgK{tMka<>p44)&gUXTIhQZ7tz_KH&@;^nS|6@8*K>#6M4 zTX&P;l(2@k&5vUFZ@TgOJT+!wJ+LgAF_Z4edxY6oE8AG`{rEh5lXf2y_Q=Da{GV`p zLxs*geMed-k@_W*w7)mDarfU}_+|?Z3Z-j)PJ(lNkUdyS(6L}OKL3_G2XH4Ksw{S9LP@dslr`C1IajXy88!NnP@#6V| z;*OBcI=tAPcvxy7^=q2RRk`vyw|YD>h!R`;uY0ol0{a4l+jozzBpyDh+|+lTw?SMG zplTTsy5{Gf>ZwMHCQWU*Qx)9u<8Q{rJjEhXn^h+dionJA6O+me@wh9wJhK~HVao2z zEqb5dmdad*Cx0*GMNiBWvds@gpBJndX0=!bNu5PpimD?X@uO`2poA}&bvx?eOuOGF z5e~Yy+0+N!kv_oNTBlfj)JdI&Rp|NT@SZzHl=ibk2kDayGtCzyEJI4Iz8;Qo$~aXD zfYm!In)@g=Z{ZnCbMn$@o|NV<$eyqUX-mY$tlKTftQn<>AFqoPPk~u$5(if0|(w}Xnbg2f5N)88`>IJp2`7RnR z?Z%>AeW})H#%dS)YP`qXu(ec*8%H_GRih@NJKTC_@ys1hK&s!LMryyV={3B6Eq@`; z%H1T<9j|LTWuVHule2Gjfl@28=$-7Uf%Y)BT^C97>=_*&F||7?Vy@)wK%Fsb=_RRq zp*e;aV|Q2f-Ca8sdP6sh%*$z>1&OcwNv=rQQSZ6~0+dAA$p|4V|uHhXkfgvG{96)bIg@Sfa7TLV#qKSWK!a#EUn-{{#sL8DY zJTLsz_susuprJR~A|~naD;aL)Xb{D)>&@Ki$GXq z!=XH&aOohmu1j6-Jqrs_*D8_n&jys+ex*!*Q24Hqi^XBW?rQO4f*;AstSDlK{eSAd zzs5|7P<`8VZpjbC-lW&s?qkNq3Mq$H^hvWFWJ0u=Wo)S)*AQ>)u?ZIvFf*MqL#TFM zhIo1IP~JJy?c!JV77}QDkzG#fkZ0#%k}Kjy*~%5T(+dgwb`e*0<5xa+{9DTFob;HG z1K|>_x#%zr&d_s(loUcjgji<2-CGc9pTE2C@Xmg$D7>Fvxiu_lgp`CA-h%gJb5hHz zEUy&Jb>+z4b?sF-;PS^D`f3nr%?Z~JpBp+O(zaW-4Ee{9FMWrcy9}S-QddHnO&`vA zwySW*%){tnQ14|lPImKg*7ZgfPB_Co+Kuu91|kW|hMb9yc8~X|Am>RCD!Dzpiyu*} z6Y}kJP#zNDCQ5lYdcV+3eSw(!Zl*ZFn!N`gEX7IjTQQU*K%jM@$=iubwrC?KhC?+a z5h}InQ77=4mN7?;StqNTE8R_9DLqC1C;z^mOY1N^TnhRo2jjd-UFh;ty=efBcnGk` zeK4~{6mjIL+Mb`VSddc6THM5BF=uc>aoks}v!#v0B_uxH!^zgLwY&`|)JBG1mTL#! z=LAnnj`eB^Y0<>sF5lCr5DKRIp>Zv+-MrH+&*V$A$}mM>S<#I)Q@?+HNC@(0qR_`<=7 z?|$>S^d3GaN3e2Bwy!1flhEZN7&OV-$)okf2Mj0+ygYv0BWholeoSi#rO0*i%n%=8 z$Skl9l78SMFC9$1DYHSum1ko)byGK1Wo^^7XgO;PIV1f}e%{6NOnL5}1>PQTvzY4T zmffT-e3c|^D3=h;_&f`zn%|8maRL4xlq|5|3wFR$#o%D`Smt}iC!YilaAJc6u$J|D z2oA5}$)RxaJZ5#zjhLdQgn(40f_6a*_N6)Qp2+s~_rLkh+bXQB^>-}XW%8{x6%$<& zM}xMcR1?OQLL09C#9i41u0i==FltMqf+-vNSaB@PujCr4FC zY}}HiUYA4&Ks-g|$;!TCMO615v(S>{(I|g`b&lVNbQaMo zRch#)T-7jA4r+MLk0(>TV6R4359gc~FPuhkDq;w$E~0rC@Czy^z4Y|0G?#W2+b^@O zz_ZV9kr@nc#gGZu&i!1GO7pmfRGNqB_{*K8aExhj`_*H(d>igZ{|)#o+IO-=a(q~? zQ%T|id?%4k*c3e)^4}`xN~751>Xr8c-QE<10Y%{MBW!nVN5ojP>D*y@Qp5l)X{Jc% z9;>xUd#YjFPCD%n&(`cMBhcHPw37DcGfZCh=+6=7(U5DEJ!s55givg} zu9blYzez7ewN&(*8nOYe^o(o+S`y%qL ze1MNl8dKexpRq*#zZN~OYy0R6ffl60LC)(!1`=X)+m_e({Z>UQy1bIE$5r*tsh!95 z9W10*XaH>GKPY+UNrz?XfRv$u*G^U&upHwe zpw^YyN7A&4NJcleCQ5i%rpxiyTGh$yl!!)w!t6{2jIN*!Ud3AF zpA{~Wgaq#-vy-tFstFAVUmWhjwT7G-vA_P5ukKFGA{FUUqYWrkzphH7@Pf!b?oKJ{ zu{x@eP^K`QK{e?5{|XKaMNuQov#cM`;Apl7V@^<$X@y2O zNp?9GJ-0?)+$edh96InctShduQRvP3hkPA|(6EPpdPf@&nvo=#I^ocb>!|NZftwo9 z;*9q`qw|tcKiCnWxkG@hD=@YPYgt{kJKp~q#^S0O`h(IXDMjgNgJ|+hS>_!FOMvk1 zv=nR3xjQ}jnNkJtgYfOTbnPsyX-hkE*NYQ9%?o1b1H?KQcgASh>k|%#L)(dt8mPXn z!CUl&ngz!UTAmwRa7>+N9))DhSbKekk7kFIbS~mv1c`0_#vOmqA8*kEi;W`;vBw?@ ziJN}!a2OQ*KVC0A@s-_dk^B*Q zZIX1lUJg5ss3|QJr@`W)?^`RT9srnIW)99mWmSVzlrR%AM}O3a!2KHJsYA4p#-{k)$BY_DO980H{i|IcNI89bxJnzqx#95R8bL> znCX@yBHeHp*el|L?0ycKz?{39=eYsZ--ulM3y3s^vAb`|MM_f(q&hN@eW;n3kTOfX zh8KZReBL(09EwYdt<1s;;MSpe!37?xv5HPj9N~+J6#0CWKeuN~2F3@DfeBob(-U!k z*qHrda!fpqP}Y5W+1@SEww9^)D}WMAv}6xuYl?RJ56)=Dr?@LGVo*9u$oaq0 zkP;eZ`@%))H4tv*hMH&_Q|zrp+)%LxbKlpPQc=NVP6yKkGZEr}-#_hK1Eghu&V%U5 z$3TS!B0w+<^T`I-ddB1td_`lP(99nEd? z&tu6X`8uw}z7kNLlcDE7$R``$Kyl>!(c9JWFZwVb-$ob&BD|7zZ&lnoNB3$)^uQKB zJEv9-LvA~=&KyE^e9LzX%9*ovAc){?WJTYI#iMg%T}f9DH{6n2g_7vJi_qIeCQ~S3 zoibBNG2HBh@6}w;iSm^V^J3^suF@s%@?|#uP?IO9MGA0d8Es)Bg=b;Jsr!xLQ0N+& zq6GiYM7?@arEa(0Y~XGG3nE*CC7PL>(Mw)?j=+tU+LbH}U9v|=(1D%DjrU?T5?@-} zkuGCnz|1%|)Taolmy0glY8+uh=!x4e4x&g0!XOw1l9rnZc~N6Oxhj}DlZ6Ot&iGWD zEh{^j73iw_2nm>(fTP`y5?)qaa%)I5D1$_UEEr><24@R`|1@q5&XZ%1AaXteMD_IBoZBV>VXBfQtD^!&?&zKAZq$VTH-&C4E+Qh|rMDH+=T@yf-^lf9-1RpiI^t%KV*_dQ@uo)a5aZkq1scfil<2hngRO*vmGhD%Hy}ARpjuq2ZspB`C=xO}4s|idTB?$R#`=)b#4kB!F zG4=3RtPzT>h!jWA4BG3MG7WaSmaodql6pz37I#aoN_PY1cFIZ{y6$f~NE zkF(FoQy0H^^H~*lo0fs}-p3!FNxX@+3v4cs1q@t*RDn(p(HCuPQ?|DATv{~_1Xbc( ze}QVwr)aZb)>--!%2i8r(@}M;e||{%Ev&O9f5qvSacJ^9*OtTINh`@|olN8)>t&4@ zNFq;uffvvctYF?P*JTq8zi`<--a_I-3h&7_$hdbarJsKB?rMo>FeL)lMQobj4@#Pk zn8Jm-{DmXv4~l$?^SEj=t9Kr_P&OrL6`!tfPvh{{p1EF76_SO^=t2wS9X!Y+dFpK_4#8X|DL?qolrC8V~fn8^Y-2!ln42^ zl6LI9nhammkeOSF5~SG{w(HeQ_y)$s)He__e4-N)BkZbc*mWPoO7 zQZ`ueywUOQdt0POZ8IahOJj(1o0eQdff;Y$(oD4IbM8|ID3dlj-FwcZmgyrM~f z=-9u&W9Z3tkJ6FkMCaBnd~SjcQ6czRsZ6>R$OHIr~vT#fcM|XjDJR5Xf)m= zJj^|-4I1VKrr9i2ZhSEwwt;zFk*f);UU&Yw`WcuzwHx(rKgGUTXyD^e7$s{P9E{p&^}PqM#qTi@g~fwU6xlpwmS50|7gD5Gld;e!fBfZeN43aNZ7ELmqGP3V z-xQUD?nera6@94XJJxVrrd(+c2$9op#P-C-2`r8tz z7Oj8P;&ZNA+Sej@anaY)dV3Ocj|Csa!i?y+_gYW-P=vowAtTQI+l8ks@;?cq-?^go z-*%5#TU#U$FCbn+jmJS$Nai;BDKgLP17wG)Z0}+8%gV)L?Cz4oRi-ZjEn4sIB0}+Z z5n&UtW^)8n$%_UdJ$tiKKA&vOm|GxtWI;%JMPu> z*)@!kn-RO7^|g!!DxoOQ`)6t_3WcsAborNIE4ln#@*hm@LR-_7gH1PKquA*bfZw-vtyqzkDvK zObdMzFN{LcFMx33UuDW(-UzPeR6PGR$R$VCj#X8<#~N8$T1H;x~g z>@4Gpe!oSkd`fuH3Ef{1yFW<8d_?#3;E-u~c{9AGuVz${(rOJRNF89M>5^yxk(QQI zn?$xyD>FFrcrfXxZzInk)XLWQzA}8Fcv!b$u5X~`QvWi>o+@bvcd-#3sR00)%_T4e z0vf9S;?M)yxpqx~pYDC*-ZS5`>{gSp0+dk0jE8-;2aE90X)0R*6h3XTz6Vn}n6A)h zZY2t#s{UBd>~Kg*x=uSG-t*(2heNAWEsw2Y`TT{0DtYRhL?pV-o(eX4-of>Wz_}M3 zEz_@lfv=#v2avm#BInFOg6kRc-l`I};m<>`ni$7X4GQ&=%W!$7?fkFt#t_Fh_9#^L zwDUGnYJ>&^xEg>{1r^$UEWfX6oupm*(tG-8C}0rLFm(X|x0H^lj(efp+f3x{H=0zb z6w^#8S2NRC%vZ*ip;_ZBe^A_ZGf=yAwsVClg{Kldg0&`iC8I3^Ybra#kK}hv{gMz5 zi|`*YcH_iIt1f%tIa7AHX3E0ZKAiT+T4G~A6D$krmAo6!W|`NK8LkyUho6%BgxQ|~ zk7!U1H|sEqL@!SwExLPJ*qd=W+CUb!c@#Ae@dA|D5$RSdkSesq+erOaxC8?W zh>WHjVSI3CkK|IU{#_G@lgpuxP)8e(D3|X;w1Sr(d%4y>CvYMSlYS;$_x%>BK1HT- z#)*7P04M2onB>P{{Ws5$JSQ=!0s!XXG$E%VlJNO0;2K+K+4Hygl)Ijlnao$qn=dQdA zYn3m>wWZ0{?rF=>Hg4SypWg=<#N=VrKi%@g^{hCgaLi|QRIo|DPMx#1Fl)=COE?y_ z(QZyXzin%tc{=q2)OXeh0-RTv!eqM>beLpKDPUj~KR>k`!g8iO5B)8xUFxQO5(sl; z52wPKo5{%S`7NSlG5{_}4~#gTavtv)w!h-+NU6`fG8wuk)0bo-CwwiAFN1|3W$q<| zM=(xIPyE;sQJm;HQDN?;t#N8%+OMpSr(BzE=8mnt*J zq^uVfCao(-peP&UgA#J_r{C}=mB9gT#KAmxw7CRQva7CAU{#cg2cHYx&-AR7AkZ*S zT>=dJG>}j4rn|bBSSLIEadCQFW5OO08K%atknDpUw!WnFk63R^;1QwjiINHH^ER^k zy3Z)>4jg?e$0{{fkWvi;rwBj22E-D&KcUtGrAzhK{heP|Aoq*>3cX9~U8bo}@+v0$ zO`pvsPp@K|jU}<%HVmyBPkXgC$j#I97D72<)FsOToq^!3TY_Wx%OinV=lgkWwD`09 zfeiw-ZX6)@9>ABov zPYb?ZCE|bUg4IztrnWyHdbF4S0#50Uf+Ea}g1{b=yAM*$COPPhV})ih$a$4nD6g$f zf0C}DIWZ;wdgXJGA>ol*71Y8YLTq*hWl}eeCW1LB6Xt_wFf`kXXF;v)^j>Mf^;i!N zhGr!XV!c3Ciu+QvLALlNQfKH%V}=*K%;F9a91>IpV^cDdQWQT}ZJjJhr5xv-YJgU2 zt1;K8%JDdmk2l9Q%8^!8m>PLfg`SO1PYm_Q0>X#{%(P9-VNzfsu*g^*x*ezBcE<(@0em?-zN%i!B6)@+VQMnF%!IVzqjal@VnY5X%;)xjV| zSKeB53ibUIDXw^9*2_;o34~PEzO}$^9t>aHln{IYR(n?Oyg!tJ@Zue7dVdjKoO#j- zDC!49ZVv*i2ioHUSY0VTvq_{-d&JTfX3!_3tYhZ%6*iv%8&3<;B{PNTMI@aSLQi8! zD_<^ohlu<$>K)A}N;6LG(Pmfz$XgIIV597#J2iBLrFXwnEeR&X#F`&yEh@JV z`BfHuOdfnur*ZSMHJs%f~y_`OFT{}@56!1b0ugui);R;^B&MpHV43@Xe3sRF*2Sf?W ze}&0PN*%`Z9~r`{=W1&o>0uk)jy}(pw%mG&@7bzn4-PDjbpD{|U6yHdchik;b&7~O zcBy?WKV==H3t-izz&$n>^SN?419H~d-xN8&CUAGe+95UlksuxmuAR*Qt59(B5?yCd zn&b7XdAUo2-AE90QTvN7?N251ur^Yqf~k9lm3sC18eV#%A}&U)rjRsBEuRlh{0*xc zt)_)`!mOvZzDhEr9;*yXNw?M7&~_PFS0c~mUoX3U*p%MM1aZ#q*LiUj(6>*m7D*&3 zhP@dUI-M%7-VK<1nRZA`>Z}@N7}_IU|B)co`qD^yR76vSg55=Ter!;<)RDI^B{q(a z1S-AIzF{IFM#eH_&NQBqmx?jY`8qF}dk`(?5KJef@B*v#?6KiMC>3a&YuvzQh(}L} zFNP(mq9*c}Xa zDR9pu(CR{RKn1eol>l056nkP}!@bN2`0TGbQ)(pBfk5krox0$6UCWtqF|ew0$#a05 zl2>?}?iHBb6cnmeD+BglqU$c_F(^x$H+t-##p}*jj;ihvU`d`;@#sdP1;(KZ7Sn%n zI9GolyQsy5_~%{hQnsOTluuna;9g~w{5aaH@e5spKV~X@6X!3By9Flig#; z1ACU9naV%@_s4qT##M{IyhEe#^y(StYGXnb@f@pvA@3xy=MdnMKA54b85;HBQ6`VPfhCNMl*1*_bnb_FuxSHlx z&}s=zq|$Wy^4s02%4Yp%p7K;Hv7kW(ewvQ#FC>7R0ehQE372ddTzPw6e=3kTy0Bp@ zei{s%U11=z9oQuUe4?q_w^Gv8s8dNkQc&Sy@m~t6A|@ukP@Vk%Q=gf5?nnE=+UjEj z-=P$T`(HkqH03z-BkQ>A`~J${4Ep4po6%rQ`C#hC-plAQj9`kdUF}r!6h~gMx}=IYvZi z4upz6oi@Pa@@?N^7FcDCDBFQF`#iPmXsdpTz-ZMo_`e z->ym|auEUtER-@@Q%iT@-#f1uue}4C*N#P$^HvW6v}>5?Jhz*y>p9abB0cy1rDflh;U z>4Ag6bJes&SlhV6EiC&wh=51i3XnMSsTo}?8d(YIU@ZB?S%~bNa-l7fhfrdfkHKa= zJY80xmaQCT&O;AZqMcT=9_zz(F1DBk=)sN zNX>4*lUq*Yrs@cEF!`5ydNA?Z7~+|K<|FKK9M3Ajqxqek8CZv%_Ni3y|LhPLn5=z zZh}lb?^0nwB9ZBd9_!bhm0WM33jAPBDSBQr_D$eG&y?ziX!?fGsfd_2K{U$n7r&Gm zwh}t7hijF{t1x;UV~P|*b=U3w(U6*olV^x`Ka*t}8@bHBQ?6k*N5GywmHk)jsLS>J zeyrH~AIBrHL-j}MeW;#!yD66iTE#YSH2@`-gT{oyOD@VTe}DNghhPGpe>2nGLl;YdYoQoCv*o*$!p794En|87D z0caUAM#6J>_ZZ3hYPr}qzct)b--G7edOXa0EkR}y_V^GnZL&9*>sy<%d(3^iL1pMq zdvW_%avnW>@woLv_4CJZ5yKU+av_NmJ$;$+)@F{er3T#9?<3I{P{i|z+kY69e*=;z z_mCN$3JoZ>qYVpZ{Fy}T+e$)1_DGk?G2?dDr|lkukXsA?2J|7Gj@=HsL$LYFL61(Q znewG9kvl*qWn_YpM_eUVd;i&kdb7&?szqmy0&VkUo&4Q2hq~f7nNWcwz9$Nm0&Mfo z>0VFi#QFoROUD$@#wLGslf`{YeRE~f$P;TG#;PnCaaz07S-dk$z^S%r1xr~E@C3we zh6@_FC8B}AASmP1AW7=S&Z$v)Rh=^-kPP6^GyxY^0bq;lxy@+JsNc0eJA)x!wN_$^ zZ){kdQ-sx#-S%mc@h9Cn9BGC`QIfTi!5}Z=QFiMwDL{!(-*hDwG~}93#plNQ$Y?Tl zvE)O3H-xBmWtzn=B0OKMMtM!#bx9j%^}fu#!a4^B^G(s&3NrG6yBP)?lVTYe?+6cv z$99ueGMFc0Fc-q|H5=gg*_7^9^pVHsFv)>(ZyZcmbNEyCpUQE4jK~V7tz*M@$mF`K zddVmw-IcmAP)~1o%6gHS!PHCDC~gvf^ehqrg3SeL+gWUigbGsL#vq3>*$}!Jns#0( znVpW6p4jbRYBHl)xyJRSN02&IC|OQpjbUGwUbmQYop7qObHk=31kCvkzwGu9;=?c@-)Wjwa9S{%85&nb1 zVtCgE3S-a^z%)fhQx&HKMB|I>WLdEf#Tu8MzGGin#SmaJE*;Vk;cA}f1vp@-%U73H z3pG~NIQ0yO2W9`$!oE-rRI%_AK|kx+poTCSCBS<^bX@-zZM%-#8xpb;ZspU?UfvI& zsLm8QbP>8PM`I9|F7J~Hg;mBaXi?ddBI_`Ke^;!`wG^w`Um|C*pko(FPQkL6`-?iAJEE8*l=UGkHwQ}GApl=9~bZC zl?S8yPQS)r0)$p^vZTHOpolJL+m&HUg8dmuv$tT@RzzAF$KhJBwCuUAO)6g)zYr~` zIE^P3uBB!(cktVgbc-MX@&|bflBe%;l)gL7$zz6=d{E^%0#;AR#=Q>-%@5}d?fW*c zSip!$jebQ3O;;PmHedG7mPypgd>cTbCr@bsfvA_~W0-QratuJcaf(eKg{85OdQBMc zDv?(sihGzP@>L35G?h<6e9{flt~Hvn$1_pLdP$B)p4g+cmat)N659=yp(g8@kkv_p zTE~~*)dSxWoWnlozhL_Q^~HxUx0O*VA<+WTDd?AxGbJ?zW+gP!N%gKUoYg>Xtc9H9 z7@9aQK%tJ@YT1Go7H17i`h@3|0(Bgdp3ryZPW;m3Uyqt?v%C^GOXt}VeM1Lytt|bZ zzJmb$a0X`mK6XFBe)$V0Om|EDP5fpFCOVq%uZISQ76us_SB)bDs4N~M@n<}0TenI9 zk!dZ_+@s|}bEzKVm|;dyG=(XvQJMl7kZv_UCg4aANG@{fwbJXnaynO!JSS*rsVCkE z)XPgkYc*qz*t0(WD^V~o&DK&XS#zkwt5G@c-5BvY1>^;Mp_}-{pR8T-f zL3)!CN57FPhvw#o_%1k~aIOl6DmuBlAAPf!DQX))K#w=Ym1|WG zbwyMF1SAS>ZtV0}J5$9YJ~3oGjWLwuiLUtC@X^!nN9EK*#wa&kiKWJzP(qK%zj&<&cU-y+^rnrD%S)ZA=wL(HLwn1T{Nt zxg63ejT0ADVkmQkQ**>mAd!y=Zv34~5f27L@WXJvL#ViD4;g->f3Tg`Q=(5hn#1y5 zR6eOe@3-3V0Rs!&lvX>FupVOK{1p-}=SD#t{N;isvxcTH0lgGzD-&Vg7?{4SH8&jC zm{z_8qKV8kW=E>>pp#X%(0Dun0Sa|tYGRY=?|@c^I3&;`^RO8~hHEZ9&O*>S84<`I z@S2|{LfgRXh7&Wf0F4wC`?#Mbc<9n6q3y%eA;vGqMS?;*`#{0bDQ(1?*0 zMhuZ@P*SlUBsLKbU)t4G~@jic|ZUu z&i2Jg8pWGvPechE&a1{F`2C&MBgKv9)pthK@n~wQ6tDa|l%(H-e*NH0d(xk2?)3NV z%42Hp&O3kGlHa!qAGCI(r_3!h$s7Le(x`9L7MQ5u$uq<#Cqs|c;le$);Akm!oZXuI zvhwMS;TI&aW%N*_PJ5Mod_o&e_cF9$%{*-gn&{i~{R5XDRezset**R4jIZ8E+eCRx zSCrpW&Uo-*GOEDuP4^R|8QkE3Tj=S!R$80(`0*q3D!02$7|g~Sqv~yo5x0BV`(*8< zC8jyk*Xb#aZj^@$X6K7hK@hON6BKG$hNXuqiIxMA`n^ zn!z7XndM`VDp>6%0442fOT`4)YXn3Wrrh`;L8@+fx(DzrpGUqWU;rg~3)8*}kXFA& zNw43SPO(~L18s@-y^s3iDlVfpL8#uX-j8AT)ARsQlvX((r4oaZHq=XF=Lk|v`h(Sc#>9-wht14n%oO6 zNfAG0+44`QD@nbar__ir8Q{oIF6Af%ljXCl7CUc33jCk>*{Fl@95^F~WTZdjhw zSdz!M6c-O?rbO>X8Ai1%p)6b^DX?C5lnR>y{nkh3M8vP`rO~y9w4#1qx;Xfl`6479 z9qUQxgdSId8;OEq==jnS?0CMIv?#Ut{6s4F7C%kBE)zIm@b&*otUR{YxgCbv9mBTU^UAsbT&E_~sSH0my8v>T)wvV}4xfI1n1jd*$-nbKPS%~OLOA}E))Vs7F;hRgVqZ1m&mu4E^z&mSWm(iLqYTahavw|}^h@rs zch^7sdU~rnnFFh-D)70E-=k8g__7UAAP&9lBix#Jw|~@B^2VY0xSjLEUf)MwQ{g!e zx)n1xGaWbID-Nx&WjdbO)~2uaO-j=d&z$_|Xnt?)c-pPeRIoA(> zc|{SLk0{8-xK?M-YAFy|f~e~+gE%WMyPr>6ztD(Q%~MVgcqojSKQA`U$~+PH#5#~hgW5^7wVuxt(a(&AF|#>?ws|c(h#tzwryB432b`N^O0#9`QEW29j9>rNgE8V;$i`|k z7Kc1+;P>hOZ$&r?MA;&Dd)$>&QV03DX*HaUulC?Cg>lJdCSvY|C5O@Pyk;)z$S5F9? zwr@*G6nBm@4e&7gU&$j~LdP(AebuajFvTu(zGJD&WYSJZms9@I){J;YR{%P%$n zfW3!`fQUh}=e3wewQ%LiMiF%_%;X(;>OSaOcXG5{%&a|zRseRFfrvoemP>HD;&efm zg#s=sXkRuiM-St)Jg3Z+Jo}cj7E3c5|Ay+{cVacEDHQw+Ba44UgI!d{8hh=E)Vw+6 zx7p(WQ$2W%t~+M3o_9ToW^py#eP%}s?YN%Fsx;3_YL@fEl){dZ7geyvjK`4A?;QGT zsEbzpBJs9>^{HCFD)I%9nqy+qqomDfei<9lzt z%;<5KYBqUuvceyAJZ+rs#1X@8Y?q+Kx%%RTHMP-$oB<{Uq;JEcp9;lW47W7veRiAa=%zW~oHPzo3Ee})mx5LRIqRgz8Uq&QRKj46U1CzVSw zKv;^qr5z-?G?XiTG&Y2WKWjMS?D4r!%!Pn3ue7+(g@wxKtZ7}npWj@C9^Qz>Gfxcm z)N=oLbl^6{{Al7kL*=P=lnRh^X%*J7>2(cVGs!Cn#smH%P;>s|^S55$!B*z*WA6@hSBH(vcuPZ-MDNKe`kSA`gPr6;%075vm%FfDL zqXAtGMi1{-O!~OjS;63?S$;47nWmOymmhpGDM$+SiW33>bW8fM6Q-v<<96%@yJRW- z&XG>}1WuUyMF<+rk$fk8PXbU${YtCf%6YA|0dvSWq(O=zpqSmtfNn+?lZ>#nK`n`e z1PH1*nI3xR1pbZ_0-6`vVSax^_6f}Xu6AfB``W1_i_4s=q}H~O@`SE3Ll1j0j^8j=Z%oM)Nh=;rOd{}-$zPBU zVJyxY)n+9d4bT%=st9+SlX>V1UNREU2f-{ZIxwW5#_rC>V#=wUQRjNMF@zNxl$XvH zmC|>}43y5E!KWEzoN;CvLHN*i#r}44mUh+yE+$!FkuSJbW0!-3BY&hR-2Qv7wfrB@ax7+y3HR6(8NFqdf|;PSx>wk4j_&6DWV&knUzP?(vPi}Qjhh?8{dWfN5HtU zAG+|l^FM+J_p7se*dC zG_u?9hdwQLe&utV2)ne5W0Jh{%9xPNgSfbXn9A>iaY?m4F~jek{Hsa>`L!Zx&g5bD zey5D}r|l?Qm#fRtl)icdp_eZ2>Z<$%)U%hv@^LSMOPPP~@zl@)_fBSJnx~V+>oJ(l zw)@ZRr4UsPS~p&o)7{D|2OuPOSz#5b^I~V;qa$L~tbhqJq@*8}*y!vEz^wqd=a`!98T z2j@HMskx)Af-^3m2}1+b(MR;BPFw6T7Q-mTV$r0!cwc)i)=ywt!0$0PoNTL+v926w zPI7aDs_LG-WXnPNVXL9U?tcV*OX1JYtP}#iE(X^h{znir`PaPZr2@aMR&p2FGi~9b zp2aviPd%bIU1szHdSdpA>l0h6FTa*m!iiDaJL|Le=XAfrX&5N@#>FjL^7X;j9*Soz zZg>8YG^MSd{@&knz1$0!ON(3y;ynobaerAMU`42{p$+&e@O>0G|7ZDo<4IG%2OwtG z;!WkJ1&(bm=lFsP?>}l`3Y%Ctvo?;C2wf|HIimpPoH)rb6`C@Ix@i25;2c=A8`>TZ z4k3)4Ds8SDRWO_KIh=T@%hk&LWE&n>AyTyY&XO8f#WRbtBq+F+^EEDAt>d>2OasOP zR<=SiPL@OBPkMu=9$x$0L`One?yNwGVA~6v6JLrZPqNI3B+DckZU0tJMaghaeg8jX z8lAW;v-gioZy$(rml>Y89|!CjS*l(5EdPzV=zez`!Orl)wCvm;Jd>&Un#zs6zar$e zk--M_?%xHs>Mep{P&u&lA&oO9xJ7qt)4;Hs7zf)f&DKc5wrm5BQX4RXMev&1;~jU` zzU~?RuEB?Vqi6v(yxH@O5|%3wO_IcwZBU~niK_&TeZviMSbgJC_jUx5dD_vdM%KkC_&SJSfyA2!mp%>!=+NBu^s_!+iDn}$23 z_pwWKiFSXKmQt|A)=&N!f^I@%RoW-Wfu3|+S0bu8ZwbD70^pY$ap+#dX&EFtW(UIe ze(M4rg!|VXATi3$i=vM@LMD8xw^4sw?}iA*t{%_+Mb9KUmQX4Po16HpQr zo*-j>Zd>#foUu@tAZ==o5hA#uFJ|!7fEzHteeN7!whR&-jl$V^3jxQL8Q{H5MhauR zOP47aa)n>AnPBfr{dzM4l<5Sqe_Go2Fu-9}XAI<53XQwlh*TbiI-8iEJD=w=<7!X7x_knC%<1RQlGe6Jk+AbIOFOnH+T#kA>Cs`8M$xtUQ!J+ zT7QY~>EW#ADZ-U-ClnXZTF9O5xTa<{D z2ElM9Yvgny>TwOiVrnl>rUF@V(VM3IX{dJa_CVbee}j61s#(I`Bop8KBznfv(%?j- zBlwg4`e!wrvKsTc>ikMksdH)ywQSQ(zH&bHmLR0Sa&P_{s-cJ0UtVoL|40UhaUgss zCY~v^kM)7bYn0!G8>lhl_%O!LKKJKo>I)vMsjEUSvY`;LS=$urjFsocSn`HJf@!rN zBC5lQWtH|Zn7C|NRZ+)ScP#zC-Bck0Ges?tZ{b7C_JQ|w62(SZUC2b#Yht+EmtS+_ zn9&W|YF_#*cHwOeBdaH5k(#38>p+vo_x|}F4f(yv){?e%fVT#0v13|JLM}GrKGI1) z3t$Qy(fY@UzlLBarX-QbkT-utD(=^5*dgGB!N_a*2mHI;UITmXQ) zA7p%wr{V9qZ7^&7nuyfOgQLh)dZl#u>1W0PKDURTsS-Hvi$A$*sq$L3}ffb~s$s=&gd1lYRh0?!kXcrY;h}ZOIUp{4%%fFtf_NC}FkR>uo73#d8UEhYr3*Lf9jq)~!c2aw&95z1!Y?CSTog~q z+;vZwUK-vIdUs##ciU!W`Q#nPjE#A}PzOc2Vx9H9CT`0&@UzTt_vA@q*Kj%h`)W}g zzt_W*;@Tx%q;%6x6^>u+C93vWh40zf;n9VWZ)IRyIyt#n*l$c`bi)~wzk+P?$-e=u zB*oY=mDzfU8%TouQ(OY<)D}@ELiA?cK1C%Mx}$S57X-~dpdKu6(IT!I22qshX&rXw zu-jQ`fICk8LY8jIX-%hCVTU|6k4+t4<+XcTthCkF`hG0EP>usYJ?*mO9d(3 zW4auc^GH%2;J+r+L+sZgMNu{)y0Zj}3o*qe%pFIQEwU3K+apz_D&x`q@D+ic*6lG= z$g#2eyj|oT3&VlyOa9NE^M{RcXWc(4wTdr?+*LoQ$z}iwvtI}H31>d{7-w+jZxOSr ziZ!{Q|CI+!HY55H$Qy_od1kr~`RXYL7<S1Qdvtb50x}?_P6I97$hUlY#VXv?EpII9uP;f74L$)DO=`RBj4$D^Z{!=) zD|GqMHh(O?=532S2dF!)1cz^a+tO+)8cIb<0qb)8;$>5*4e&Z(qyaNsAt>FzIFW!T{>?Us;7L=>Hg?AwN9wNDr+z7op z?i0E&I9!)$tWV6-U}C(BthbLtWK$r*037L097yth22 z_*LY1)!)8VlDB>wzZmE8JW5FL;NR@L2I0h6gB6LQ{WYd)Eh`?DL%YGcTrQ1pK6oOl z@k%_Fg5r7fdr!UF_zRuLuw3avjN=+mAd(~*qsqI|puX^Pt4y{-ic@q{I+Uy;#TEKq z%<=9@U!ZuoyIOGNHMcA&H)3Xc3gVG5B?metq>pub#CD6A$bN0RU9|U0M#A(4r?Lw$ zBT&Woo81^9-@8;~79YDbS@&HnG}Rs7j^$}>%KsW880o&d(mS6yxl?uI{0$YdZB&w# zE73<;n6nY5@bO%#`K)9uV6nN-+s|n|Qxff;Z9HQeV{&hzo$>X6xj7|GX11}lGWrQ9 z;Yb~ir>4sOk09D>JK4!Lf%UNyk+_RE0^PU|2t$dEGyA2p0`3_tIKM&>1%v}PHA^lo zk1FADMSpd-r@Jr_*3GN06mPEe%BeVPATP-Dv^g?VVBl;V zLF?-4j%9~*`Gi4w8kpW)5MWTnhlu#!_vsp3UM|8pW^g9wtHL-`Rhz4T7BQ7D3vJof z@uSZSDujJ}aibb|K=wH>c*JlJphph|dWsk@tJt2mdW3B;fN!WZaMJ)|eLoRAnX7~t z&k)$SH%955aBDf;;6rQhWm;l~h8!}!iZ0Tc8VamTna_?xzn1ZK%79n{ zYXFeL?z_c~MfwSMGvbOct zQ741JD56WO%o2fY=e09iNDl>IJVfZ%VQZH|WI%?=^|!v60yczZ4jllqynP#CJTa_M z#sH*m!1enH+V%52E8q#fWYTk%5e8?b#R;T-T3gosAhenjKjW+OxG2QXjJ>uFDobC^n}3&|gW7&rx65 zhYcILL0%%G6WN@r5e@p^@w4p(A;s-r+b1u4A@4(6^RhL}OL4y0u%}*3mHX82?r000 z+0!?7MTPjo21L3f2_gCg%25P|`xAW+pMDWmy(LcM75BDFp8EvjKOMWHj?CmvP>Vzi z`OC!E1UJ^uCek>rTH9E;|JpVYtItTMr1iH>kiw8yC-s|hJ2==O%oWQb6|H|m_FX5Q ztDJ%$LyY-}WTluRAIk1aH#>p!S~W(>*z;u0k`L>}0kw4NsT-spmdQ8v8|m12cU^}f zkAFodBVe2a8C;Qtg$Q)EDmAr8ev)+d`Kf?8G zF`xg!xzb+zN02;F$Jyi7n9!I=sz9>h`RS|8_j059l|5@^-?`yzhW`jAfdzB1H&ugi zC7R3-{{4{kM%-1|9%Ey0(88$v5dOto5xVs-$0J%pVV&f@hJ^uE^SiwKd-*uh?MA>I{%LH1Bk3eldE@JCU|;*R*Sull6`{sD|^hy~>#%HM+|mPr`|AqyGQV5$gVPW@ zl(t~Nb?4z@L$=A(;xb=j^ID`ivXAb*`>R^vH3H_fcU*ZA0b7_q51#tx!<9z{;74yp zSdV?hz9drxGXpJC9_1FIOzf)uLKBwmG6&6#$bSX|h1}CV$8O4(Ams?s#*GH-q-apya&?5!_m7KWe?ZQj;LNWr`-r1b zXkZ{UTI12wz`TLPLt-a99&XkDrb-E9m(u0VSaqhl3%(h+3#ywC`=ki%3W&~A{4w;{ z(~Wg~ujHNUvwQp&ioC!3S_lOhMf@tL69h=Jzx|H`;!7Hdk14j;;^~KzZ}c@yd&&SZ z(%`=DSdqh2ZEZf0U4ZNH=~Wr_a1sXn7+qW_hTEWdCiJCNpV%p@+(05c3GUvQ(z-Mt z2BWrYO!)2!{Cd!;rGMVe=kDG+Y4?=#4!|0L8l=CYPabr~15(Y--dA79)l3wKAI7eH znrr;as-i~jZ}+Y>L+JS9RL|A1*~LW|mIa){`UVj=X?;xvp62Z5y8gB=1kw4!_TZW0b=o@fK)vehel z@E=gO0BfP)?AcZM_oAC;Kmc3FwIH)=49M*7)ZW(j@!+!v}-kpZGp z&(6$70n@|wXAA1>Z-M1Kf3twltKg7#mV|jH?MYiJ))(C^rLXgVm6#0RqrhCbkar8f z)>rpV;uSWw`quEDT7tBhYFl6sOJa~}oGf!~Up1RCWmBCv+SGlfK+fj(AV^ z8=g8hcc_OQ5YbMDB?FNGFFem$;jN{RrgM6em&%oHCzF?41qu7cH|LRuU~`ahFUvu_ zWBZYg3o*F0!j~3!r%y5X=(wVxb?#l@5b5CSlm}yQz)_JBAmXX!`|ZxM@rY&g`1D*`3nlMnNH_B-YK7j&w{vOIS;@T6^v`7wo*9fuh19}*u6 zV-Q?)7W7IfuRW!vMdr;rMYP#R+zzjinZPezz*2GBCP?KHbTk2Ba4=2k&IXJnsno(8 z>a~1{ZV(yCbc4o=?cfwd=fL18uk%dP-+HN1fr++|bn18S>{0fd0#)%~tI@u81uvG>^GqO+iSYz{YqaS@= zm1EQtRyn`DGun5l(=+f_gQF~tMpD-O7uHsod#8sI!^xw=GE zHL4->E}5^o5SU!F-_}9yk|<1!yhbrClX%2t=n2pol7pI{!5PrJ=7%~pjTrWj>r2XfX3`G9i4K!AJQJ>EYu;#>CxC_9 zRaqt$qwCpo7Zz}Zz<)K+;$fAAKQ0czInDu+U$st&IYMFZE>POB=pFg z_w~D8gV_!+pjgs!rq27g6>hH<*GYBrN8Q5OWm>+tYf@8O-R0;f<}6|x$1bc}bPLOIrRKYR zaV&z%@Nfpa#Tf}5eml)Wlqj(ey_)6Z^B8Owvn~V#f~vZq*TAQZVNg2sf57GTHE^je z(LfuEswLsfbCaUeZil)fgbE{dj5wxsqx_HVvgNsJadax>8SM zitFu69`;*b5~%|#zKQjmgp5F(>0EIy;!sZEC8HF{H!F#!3zvyQA~`Lo4(Dw z9cYzRFE(iA>d*efwI8p>2ps%XG{M^YdkAkVGtW06QTKnUGM#oqFtJ{zU0- z+qisW6pAt4*6%mB4yXmG|EVJKtUUDFvsS#y7% zO`k{pc)!h}2>TbwdH(sypsx8^YY(ROM_(U$M_6d-8;o6hVBt_q_W;OI4Lv{A%A)Lq zx0cq{zV^)C@x_?H;Yc`4QsDvNG)l%OFX@^lNNo3uBnW+dQ0X{1A^yvkWylGZnk1SZ=_mxUUDq zS|^>V5BoRSN@vmA=EhXG1Zs(O{yAW0^>`|>gVviydDSXGMG zBMKwvVa_>|3cG!~^V)skNoQ`XC8>W=%xb18mfkjoUe_+7Yp*DocGS?|>k-00n-E4t znU4|egFs>d8Qa>?t%?Qu6Ycvgu1^p&HW#lpe11|}`BaW&HKas%uCPEcW(jPZwXM_YgWCmh!rE#z^zbGFgA54lKKrqhahkGSJ{ z3_Y`n`qL*@FC-ZXfvMS-UtEME9wjwRFT}?7qZ_;aiQLY6^tE*j>e^-1ZrO3&`WoaP zu`jP3KxDLl@_SYg6|EWx10^W#18>yda~E1n|3v(rrO(n-(u98Nkc~t;u7cS`lKK&6 zEKeCjfmdpu+CI209|edCE36F`gg}<4YXk#@Z#AzesHv!AiMh%09SkEy?*Q-AVJQN9 z{)|FLUcX2ynkyy!)({KZGaT5|KFM+&`!9H5e*_4Pz~v;;<{IiuI8dcRV!+Un z*+S*k0g zr9DK&l~(o{OshgW+rP|ZrHMFnBZ$$Jn|*etI{g~TYMD6#9C zbthX<41vw!r-8+wqSEg^WD2ul=P7WVR{kd+ts67nx%A!|;(qwd{kfRz&w{RiCwKln zuMnE}?MnSLtw|s3p9QYvy;(-QPssB0FV_OcN&QQ6{W^D#J_CLOLa86%qVh;q^RP=t z@^4bEnl7Ks;LKbWkC?T5D24zxrr^2C^O06-RmNDm=OlK{75Den0IAoIDG22gx@er2 z01ET76{U@snVEqTSOPU- z-v_l^gEfzNxideg|JuG=m)yey?}UobYpM?J?NL+lkg%ojw6S!Y;YxFc?G?*LMAQ$} zGX%NeAh(87{mobrRhY`zJ)L@=hlVi>H>l!49%`gdfZavnGZv;Haf+olE(O6>qr?lq zeEej>n%zKOeE?_(zZSccuP>;}l&tRg7=*n^#^xX!k_dc3gp~S_W_mu3B$jxmW`M5* zQqy1~p-NZ9ig~ZD!c(D90y&++d8PAOheIRx8yT=s{N*zTiz)dK_8b@wjo*)d+N*5U z71Tz8x-T85SjauTIc`HPrbF^v_zI;{0e-otp;JkK47M4u`X6GbkVxB1u_XSFpkUmG zq~~%l%ktJg$<@DLu4Pg|lf*;ApTl^^;4ZL%nI6<84O~agkPF zNXgGx@cU#0yqs0b7Dj!^Nop?SoYo;*J+=GN`|n8kns~8aJMWh|mH0q$c;Vq`iz-p- z@GJ6BEBuRY5CVutzKeVCg+Yt9=^fJ(h#(Mfz1_ZQcDpEc-XfWpme2B(X0I{C;*H&a zm88+7d`UXrNDYjzFR8hGupi?h>7FD&gvFg1l$5uCe8k7+!uVZ^)(d830-oVq2Sh})1*=YpXTm(HR`UVn;_foCXjHsxz^LWyS8MZ{Fg5m z6qX$IUOxEdPG>rK_~Nh`TfgrkR~|BTroAh(XkZFRc^s-FVh{IDRNr+tzay`4drBB- zq3tq5S=gK|&Ek=O!_(Eud#$EqKfiXK!Z@{k)K%GM_mVQv_+FRo>~)sc&t3pwQKc7KjMCXM z_=~6ePw%?uPjnL7A7+JF%-6Opq;LIG(o_KUZ9qxWoUv-c->d%6;|ZIwt=oi7B+Sn1 zDPAEz`+MFS7NzWVw~EVn#dD1UOR$Z+Rt{GGars7r)H}8b(cOVsO#?Vx;7x=knH%FZ zJzz2yVHwTR4$jZa&V8${mR<>UEEvvA>piYo$9rn=H^FrwwtW3u1vq+lSCu>g8$NuO zCz9ibeEQWgo=(j(dO4D=8_eWUTy+HUcz;f<3e-OJDtBf@wSetIh9KzD@vD1ml*dl7&jmxJ?>=jTQ$VYPYQWGy%$HRMdxYkGK+cLTFwCK7S!=rL?k8Eh#i*XAG%CTATbf zuKG17B0nf=CJwdDGz>S>w;4{M>$1?=WW?^c=Eev)Jf4<){k!K6{Yj77iQmoAZ5H(Q z0sN$?xpC_1N72>+)8Dp{s}SIKvv9)4P4My?@VohD>L-w`oegAb1CpjMSC+TkEc4DJ zu75lOF66UI^XbiR4LvaU^yzPq54BK#RImP(_l_lLy2rd`0?N~nEWUsHPhNIWqV2fW z1LBnr)=9Yuz1HumLDlt@i2@}QdERn^3Bq;?6t{cKT3U(c_U3y(a`gz4GshmM`u-?R zQX$_8XR(&(Th-D7;l>@F6nuq@VX8%jYt^9;g_$yf&k#k|1_!V3Ielejd4?~|<61o#) z`|vZ%$_uYV5PA59!j#-Q0L$c>nn#G}*Cpw4$Zzq*%WB@d@w9{i{3j=EvAINVtY$n? zY}5efBp@-8Z-P@j9~u8$5D0`p441ICOcFGC1nC>?K`4GxAYLz4CP!6&pxaPDI?SVT zAF5G~-KIJmt^F)on*p~40=rTP_@ZNfWXNsfaiUt=dURuL61qIB-SO=GHNxk}k=D_y zj+5c0F}1H118LM_#fODJZ6HM>M<7F4cj3-hG1 z%JE?K$%Hm7>9!ty$y#AQ3=v;1P|H{G5@x|T2%fu+1qpU}nPB3%a$cd}xDlE!K=85BsswFbKpMoyW^@c!QDcK??FXEgsN&iYSO@9Lw%mBL7K zwVp=C#w~-r$fh3TsI6sM-jS?7n_GZ`X4+S)0L6}!cMYL`Jj9Mk)h|`T1jVtdnx`DC zVp}AMHur4JP?&6pV~yNyxCY*Q+9J{Mw=UvUS+Yhf$hD}=f{xlwUQeZd4Hx?O)%oj2 zm3~G@NsmpGw6uf6n%EMi@|CCo?9leHa7W4=fgEt68=ij zNbJd6A>Vo6KUV!Er!&Z>Xs|houAw(+xlFyW-T~$myg3nbjy2k)O6{VEN7r{dKmT|S z{V+NwTc(=7L=)EK{hIW#u*ZTZA<~PIH1S!PwsNnM3#w7MlTjxwMqocIlNjuaZO z$kITBC3dh>qkL7o^tq($^Llpm)5V=01L9}=Y$y-o+CxbEFo_nNZYF8L%M1u)BOZ8K zM@?Y}LRh=ZK*0I1r-q`c)BujcOj+N)Ib+|Z)d!_aK-E83a`SnvsqCs0QR}bflX$UI zyVdCO$VPykExbCBxyytsyoClDqB8;+a@)y@Q42`OhDntpqe+IKKxcQ9tODxqxC_Fdc4nc%GmzTdH?S0rN z+8>AMt?sR4ib%#LN3$jh#M3g2d}d3Oc`bIZv4gYk7Vkp5HQagJ!W^Kr9yJ)Qd3!~6VT$K%X|Ys&QnEJ#HI{-0 z2B58F>}ouv*i%3@K(4eZv&tI%L$o(8MZLN=0T`*FD(k#YL_q|X6^nRDc~Mbn!M^=$ z@`PC%tWW(Z`M&~$Q^Xry0^+=I?c1nfU-P^m^INgjX|#`nhnhvVGFh5Y?;-v)^_+_J z{AQ3afo*NEa=Nz0n~STs^qXb(I`5>jCL9Ds&HpOCXCbm4puv6qn`i#9qs*IvItv4uV7) zj4_Rk`o>}>BKg8MyZc3xB7m+WvVD6KhYJN6T#px#oFujPA=v@NikR-z+&7fx6l|7<}4~-$2 z<9$A7iB*^z|Msx=6r!s@l8L?;1alW*1hqaXTfCt%5>vgLixgZgN+BC%yJ^UL8-D zlvVb4sD9JnstvKKCTCogv9gMH62x{u4nBph)4XtQ1Bli2IE1H&m}=DF{VfZx z?3gA`JLErH$Ur*D1V$`#owjnFn7WbgswwfeLEO%dnCmx@kNitY)+h%j`?WJOz8gB< zcmbwdTiBVhZvtI5ZLt>k+pJxR+Jlg;zxar1%)~2ghApv@a}zQK!=xJBsxSwCLrqzW z!{@ZML2h5CxRI2!NoB(fRv#ch_ZLjNfBTV-D?%*AUR}96ABp@KV z9{rwpB^fzGBgH7qAI*mrfOVs`@ag$Qg_*hB=v;r!xUyspZuI=*Z}6JxhjimU15G)o zaK07@;A=KWJqp;lu4kBkU!6SPjemWsffZYmGt=)35!zaAgczOUG2LnOW2Rg+`mdTcFZ2Z5?8~xkG1-}@ zXiMn=Ck{It72pe}rEShub~H5TKSJma>5_SThV*ejYjOb(>3QGU`;^HQobgIq+*@&5 zYJrl2rUb6eppB}dZD0h=Be`NcD4TyB5;8-+=o5LyG6fekNZQ@pIQ`#WT5uP`^2jb* z<3sh=wFKPlrXDjYN1uqAaym?J4)%P2#9~5N#Z3&ye`Qdxg#xQ&rN%>tZ)~~F)Nl}?j=jn^LOZ^R@PNUp6`tx?jGl)MO^5|>t;_nm( z;H5$xthd4-*f_sSNKHY+?3gD**|H%{<`sNqd1*1tuB?~uXOj?7nZL1BX-;%y=qvi? zVU*3#MBztdT03{*ImHz2d}vg^aooCkDHB?N8MLe58kFkh9tcw-Z&n9&7WtL1Wl=X>% z2;`+{xSOxsMh@Y2nh(cibjc?4t1wd7EF{K`=k&`=MCbnR=E^-ELP|F^_c?o1MiH_4 zIwl{_NKdUDMm5e`Ax2pcRz;-!ium`Hb(D8kf zMq4x-=L+O^8}r(%-u;gtX-wzWPv;dp?wYvuaWDi-bLj1mlf+V+SxjdmALdgs5N}zR zD2Wn}DvMK(ed(kN>qUB+&8o-&PEo~tfwkHpW+>!qkouXOcK3}8n_j%EiEi$%sC{i9 z3$9E2K{7b|oz#lsT7v%%EH{kV#KP7VOBYsEC;4vPEKj~9=Zr$gZ!I1zBXdkvCLMO) zgW>0g%`?>4kxz-lR#{w*;%BMZv#vYzFcIZq5#_A|ZjI%{_;)wtrRK7Y_8QYH>B~+~ z_rLhvEof7H>G2P%EOdqIbp;`YMW3;3?o;r31-Y)>6%6jl{gt?H0grg&nJ8X>xg$k1 zm+4dhORfx=<0v^Mu2mD|lw@W`)T=zKeLJLUK_xAFJBUI(S}4)e+}E-_7XodTmq{-| zqcSLYZ2z#=c>BGsqAW;0l+L4k;~65PZiscx#p5bn{2lauog92(Q6zigkz=gn;yN68 z38-PjG_F{%6uQ(ZS|wDJEL=5yYvHmtASRCt>c# z8nb!dEbUL!C*)$Sz=wk~{Y{Wux`ta|^Ck2;t7j}Jrg&G~6tRf6JQdw5Q;vWQ~d z-owm=?F8X{!MGCM91U)#@<+oqEs=RNEKV?S;uR$><9Tu z&oEdD#BqmH3)K&57M>S-kD@mNy}e2KQ`ER+COJAB^`rhD%HA`si7sp(q$*92-Vu_3 z5CYOWNKfc3q4(Yqkd7iK2%#oGC<00kp@a0MNUzenB27VxpkUV>eBS@N``b^uUm<7a zoRi7S+}C}Tq+AE~n~yl%l?VUp5QGgG-;)E>g}03^C4IZii2+*U|MY@iEpE@% z(|x(UDD{`9fbZT&-C6%>OQW2D5yXtpR*=jKZ^|LVw@uv>Kl)Lr#w0dWY$~^ufdYcW zuXca__%N&5$Us@W^nNUs-*qy2w`XabkkBvk!)t%r<4oWXk7w^jvK~n%R5~{d;XmtN zu)5Q0vwyPRx)|D_{T=-lcs49=g-^Ul)KpwLu)h{# z8*LV}SPfu8T#*MzKGIi(G@0h+L9!p%`Ly+i3)(urhZrww-mJh^YR7>5bm_a`{Kf)C% zZagR^y??U`t6S18!Nj<3is1gJwDP=f0l z&pyvH?$Cem=2+KQgTKnyD6bY)EA89WkTkKwEBf@r%9vvz*Kw{qbIWNVzD%BPJ2us% zfuA1wSyG{c)NuJWp(R<-_Fl(M6H)l<-B|esq6ifk<+0Ky;zZO3PXr@X3a68}e+DrcFUMjemb|q4IFvYP-XAfw2`#J$g6MC zfCE8b_k7PseA08`P6QoO<;PD>+kt4APAX9TK`_vx$elUz;6ACVu za5y{S*m%F;rKf9`L@Q)p-T%lHE>8F8Chigef|4!oU0-gqQ((nf$Pm%-{uTgMn5UF* zdG;Ov+e%4gnt96SS=JmMAYJR;*cJ+Kf6T~5v^1^Kxy?N>rOuBVE$n!*q6^{ zuuBP`D@x*q^sUU`%fsT}XLJu}FWirjHqAV$Krq^L=e)rq{Md{E`+ zd!uiloj;Hf6zn?9dKNT%Z&t>}*(65ntt7Hn(87>lWdlExl-mO0wf{hz&i_E1Z(R?M zy`P4LrB#;+;dK5RO2)SXmbY@bVr8|PDa4&yq5zkiH(y{IC@2^ZhE^ihYv)3k-Ch=w z#$#Uo+P`!|>-b+cw9y}JIltdOtbSqk+i52MQ+Z77_&)&sl;?_(UCqI*+1vkjrkdwH zchRo99t%>p(O^pBi+4r0U+05Y?$Am1N&9=N&Fom~=f_U>w{T5xos5;tl&pS!a+Y}( zY&Mpu#oJSd)wW`@wLXaYk!9N`LqvO~b4;q4AyA2v-{LpOr3AaHBhNs;((mKYRJl$z ziAA9iSa<*=4M+M74d3VdnS3{Wap8Bg=A~7AQOfyp$R?FTje=Qq(ha-CkZ_f!iy=L? z&ZoL%FLSgUxHY~!u6|H){t#gC01~%I@Y4Q|Uwj(BJq|YaRq~Cd7lKF4+ANy`AeU^N zT^iq-*yYU#t_b?EJxZJxLy7^A8Y+d7)M}NuLn_{b+aU?jW3Neg%OwM>U^_Jk`wxU}DP20Fwhbd+qB=GNi+ z$%B%(brbN|SwC*4n&W1X>(|bzqEi1xeo08Cc!c?>0J-E%(vh|9h!7(Q1IXPZvBC*p z781|2$X|>+xd?E(omi0`A85?>;>(H-6w_L>h031pue;2s`7;Q-e`MRAjgE9@BIfi} zKO2vOH@ks6Qp@!sLS9iwTCMwA?X35pOd2<~t2Y_PbHaP8Ds)x^7fmF)^?9VPNrjV- zYQyu|?tJ|BSot={VpX+=rBqek)Zr8+im~qv+o$uK4NqtyL}GJJ1}08Yk9KCD?vIBn zs!+V|Oo$KVWQIj22$X9W%3eGuU*R#lo6-M>t3+c}FZZmO@o^&cw>SS`EVj*38Rgc~ z1HAWV;NqGtzHHg%ETgLY$g>4N&_(fK(L{KXv?im<%Y_O&sg^kf6=&&Hr;w93FO!em z$LUn2RIE1FR6~xh9oMBOsTcOGRR1hg%}J;|9hpxmst(m8`Z7-iE&;%Uo1!|xvL0_b9B7!Euq{v}MHZC6V;oAEx%<}Il^ zDIFr`$n%TO(&5;UzISO=X(dCc^2#lynkI5y9nUkeq1J=VZ$oH-wW+d6CB>9+4__Bs z6d=agq3&2J)4MqTh7^%plhuvvC{6|ubkn(OE6fX0|$0NyA zi5lk2=hRnpXN9$tM!6D4`P>}G27>>4ipBh+%2@o&{?K0C>Kx1_ZHBT?o$b(L1qsrd zfz<8w{o0qlB=x>2M;G(w)n*ka`x!|6g9(@>r@dB>2O4j#56`?_q;LU-N*dWvj|{?W z-n`SYO`=>KT50)X@6bl5@w!>vlvMU;Y2K8xrBCp+HsLI7jVmw7UG_`T{lyaCepH(M zc@3w-&Q&eFe1*s^Aa(F4X23|8olW;NO?#~PIsxF~Ahn21m{F=IQk3#d4Go3vdXS!; z1&W4Ib7U7c$l2s~_fB1_+ts?r{74Ic-;&h4f$~2ep=Pn+w6!1jI4kfovQpPO(bRm} z->L|eQQh)p0&G8J`L?kB`x~GL$CsCt*SciCw>BS#z?`eRB-xkm z4&38!{zUcA@DatycN=ZQ$ne!p55~UqAF0NS^OTou zzKu_PpDn;JDeEsd-=M91*cYl=)i`|XiI2*AjikJ-o-q*v{pOOs7w;gp!};s=-%2e2 z386IXT&z17F$;z?42}3#cD{QYNUCFFpg4XxfAK-GDp<> z{QyRVnVOI=RQq&sBMme3Ucgb;bp0E&FxRy1p32Z@NJ!NA+<|DU{<6@U=^xVYmFtvd z0`$hs`dLAo-`!-s^^HvPUU^mm$HQAq7EmnJg!CL?V6lmPgC@uB71k>ibqqZUF964% zKrJrDmwdV1v{Q%g=t&-6*(o2-L~4AkAb_cv84+dtCiqA_9l>Gf-69!z0t~B_-lI98 z9(fCK|H}eL&6cjxV!kEUCab?!MK-@S_s^8H$_e4sG}EkKcSFojOomgzQp}$;G8=Bi z@t2y@zZWtSUNT|^C??FBD0%~cVj^*$-bN{aVdPZ^tTtA({_a|Y&|;HU^`m-2KFnT% z+*TUpX2zI3t4HXa#9~z)>P*(l@6Z7GEUhRFZpu(wfz6*CY2P)>)O;^R4QV7lRG-qk zpE~Q%6Qa%HH>Sp0j-#Z7id`1csK+X6)oJ&Vog<41eE=Sb?k9?}2$wXu(ztMlgd3w| zS3Am9)k0Js7=62k?u&OVv(P?w76H|bRXdp#vYCU-$d3|X5vPZmQ=z|E%VHuAk|~2r z8K}G*v$9Rvq~^x3)k1xK3opqWBRqA@^L(~}YMbnES8;CXC9SELgcFA~Z@A3j^tO2S2*G>1sYkI(#e)_W?4RuZ7d*EJebrlO1ZSJKH$|D(?4@m_{zd zSTM2zugJFitaB66PhxZ~Uy46c-S49S{1o&Hfj`gAhr~p4|KqZMfblbZIhzNGf2JOw z+UpH^^Ko}U!-64wL7_=m+lig!KMH_cwlhXe+ z!Yj%=D-tx-w6mPVJo6>Q#IxkGd3U3NZ#vZfe0DMEU%I5dRhi=d!ZI(_XViH9RV#@2 z0-Rkjh}zBQUfjro3VVf7CA;qHnb82x|FTyypK}iic4_L(?95sh0c#rr{TcUL%h3`V ztLkhH#`0Ls?uJoLdiQ?sur$Z|ak0vhy7{dXkiQJZJ%AB$Wvc?F z+#L$vd#ew<3u|GfU=4*h^0HOzgtP2`+~E$@*L`1TS=lGt8^e+jrSWb?WVBlU=KziKY;aSrhHu`G&QL(AXu*+#*LmzP~E(>2r{~+t&8+JuU9ycl5j2H$X_Yg3TJ>jjk7UzxU35 zK*0ZxkIwdb8GrC5l=|4bArMw06XUz*vn{s1Ms&@B&_zPpaw9KhltQRUtu!_+?6uMk zra72?$86BG91t+hXf~AG%~ByWoPi$$MiHABi!Snt@@w@50yCJe?+~=WSzwMF?X7lj z<^$mb2d0hz>Oz#t&KW-{L=LM*G|N^+aDJq?Cd6*c?kWPyLmUqA_RHs&G?fkKHnS}f zN?hSXAKM9BLT=aGedZ*kVD_G_>Ea&L(n=V;hiA7~qI}0vEFh*Tf%^{rGY`z}M0r2o zB5JSE;dp?@Kcnck@gGl+0bFs){}P3J!`JWB!s@iYjzWJbxaB9#F3&Ml_nI~x1Q=H+?| z7GDe70Q>S;=OZA;rpoW8XL8_Df2 ze~DVWJOnCk#V~_R<)wa^DQnoldsu^t<@q}fs=nR~Kd7$4>0E2dcx8ZM;@eT~u+`?U zqSoW{;$FdUg9{I!2l~!x9^z<15>8h5?z(D|<1~*Y;^6I*=v%Y)9Ww2mF%?FGF%R!w z=F0tZKM(5w?q&A^G!`~>lw(TI-*LRP%Cx-)#LRuq{C{LUEZvuUkyX^;CG8dinKq<}YdVps(lzbCJ)Mn@-u%&s@yD=6cp+;1ig%p-zFcY4XU|wUdse(=4KffCwRT;2D=pDkBx^3WAM$U9U zkL}RvWi@pm-^XRN$#nd(HKx*uS8wkB1lECQ$qea%y6dCXB`_Q5w=q>e6)I8B;aN(6{MgJ@fl&t0D9qb2d~S5Z?>ey)C` zMlg%ZRLc6}e|&+~UhV3`gEq3@>q3SZ16HL}T<*3x@|ol9#Jj&vxbLA9qP9V0QlCee z#7hJO3HV;MiH&p#LmYv*cUw*foVWvr!_rbxG=xcL^HUbtz`msO0pqmW+082ADaPW^ zHs@z7e$mgL`k16$^N|mrh!$tQn(lKxRD*1q%Y;y#MP}lEMAgoCmhuj3#Uf9ieP#|c_0_Io ze!)qHa-IDe7DSXaIn&H1E|N%|Ijh^9wr)G@?) zIa_8f4I$xU=BhyL2#76!^(9{Rd^yI#4R$=t)4WXO=`m-3^0o!!;%hN;!iHYtOCf!botWxtEQcLkS zl|t&9&|fD9QoXoenOe-6=|s?=KOvEDrp6{6Y{tlaTDz?Qhf^aFX>p)(tlI52u6Bf-YJuieHr5TuQs)++PBu+`6*3fiZVY@A0Ov$j*k59rLy=e zjw7)Y)4Z0w>lT!<Lb<&7oaods|#MT9FycrwXhs;eQ*eLe#6K;3QB0w_8ToYKsvy{(EgGfJh`uTr$x zHuQNfLH>m0cM>Xus)V4>XIN;`4o(7+AOl33mCLxP7;c5*3(KoZ@fC4Wsjs?pq#B`u zRF&H|v2g~^vntSPdYscz!2x=5V{ETf*7$aL$u)$orF-1U2Pm$lv_f8bDK$W|8MD%N z2QS8F{1|tX(QtA@#^T;kDyi`;R;H}aI*RkANZ?mX=tu=WZWD>yIjG8Ga1wDd&@?kO zMWWOoW)rh~R?ew89S_`Opi#f_w~8@))n&kR)i<`m%xC*goOvrb0HcfV-o7JVr&>s z^7=L-J+Tyx$0?xbBYTpUr1w_zL zVm%jv27L*l6L0iv(+6s@&O!9sBUxwu3PVc!;@nxE)A^_RTOmWx} z`LeI0r6^I9Z=&eLKtzYwfsWTfD?A-!)*1u@c$7Do6@iw;67a1B5^{^nTJlX_ zl8*8-ZluYuCw&|{e^Gc4U7qwEna5qCc2ikwV*;#To&i;x5 zo?sEuV4snlBZ()z5Y`X+cl-{l2s*{6*5xnnaq zTR~&D{+nxA3KI4_D#-{!{wO^6eZH4D(pmnotDEvn_R%Xzt*e~e`YkZ99BcNox&+Y` z@&=?F39D?YpOpzSpOiRKLqW#`G{}@w)D*tLyzHJ^B|M=#BqA-bI4K=8_T?k#s1p4! z(;XwS1Q*DEr~(myDkT3y6{=0uUn>H*3fbA5e(_+l!Lozjsdo6Od)lRWv3$Oq+%m9G zXd!m?IrHmRVUAPGj16T1rp)W!tGQJ{#c37n+t~6&!Ka_lquZV{%kJ&P130>s!HRP0 zeE`ttrY6Hq{|}&%bqQ#EgF2JQ2RVPS&-lGhSk6nKc{yRkG^+-EOyu5$k)cDg=v!gf zvbc4GbnnYRtGpWwr_7{)^JYq~G3jZS!3%Par9(845yo0(Os9Hgph22mAa6=R$@@^2 zkr`0LMAeCiTrT=f_U84G&l+5TaB@us_Tc!Hjdf5RpQ-xJ;D;W7*MORl^p&S|4ll=2 zT1dD0bTJ2Mnm$@s;Sv zJX1EeU!c|!*+j)_^Mf}?^OfiP;T6`S?}0Uwu=9A9rm^iG@MZ=J+7)h`q6VNUZkaQk zml)2BDU}YF6*S*V==X&d?{i9+;fkOYB%No%cbM+qHCwWlqagOt03v}yu^C9s*5Hx5 zYi^EeY(;i{`YpkewoTn2A$!Dj8rvqm^4|Es#PLZY3rZW3^~FEq_g>7igH}d?xr`wh zE1sLSIhi!uz8w7@ol!=v_X9um=|mo`ZdLusQ}|2t@~Q3J7>ftqy$v7whQDcdExIl} z68!C~(tQ~xuToFaS8x0WoJ#oNu@o<;?0qX_2z5{LV}H+$kAWfRqV)5T&Tf{X4%&7? zdbwY?OQ~`wcQ{GTN1m*Mf0Uo40fR58^)z=q>R#0onMMLi)^oqb-ySwg|EGP{ccuF( zU=#_t{cfSn<~%#V{_yG2B`NyHZ|V`n%{6YVay=eX{xRS!D0jv9S_N2h_@fE z3xC62j6rSVgjm_Z7>IE7i^qYuNj#6S~GdIGF>wdcTL{9+DR;DBOW*`71H5<>`!KQ&P3%8$zmJ>kkfv5`+Wpw4$n6ojuEtu_b?SZ_q2r{+5!pnw1r2LKXYKq@A_mu0!`F#6+^@z#toE_DFT5kQBiwUXD{we1 z=-9h%-#Qm@dfff5X|^ptIs?|EyK8M2DF)jc`C*>O1a@3U+OqxyqJJ@Eaeka?Vq>ON{_B?3xhVq8hN&pbhI`wU9Z|#{ z9`jgblBro5ZUXVYQIF>?=I@Ab#81pFAg@4H;Ed5URZn`3~Yu3lkJ-~z;^hQ z@qR0dE1@J$|Hz}RIDaPNRByO{+i!l>e>_;t)YIvM2St}3Sk-rYv;Cqs z%Xv#WLHi4e{>7}rSslv}uY0Kbo{e-luEt{fi3obg#{Eg~N-X7T|N8{4Q@AQZk-FyAN1oGmy zGMIpPL`N9@c6qrs|sUJUuII!kGq~ zS3xKk&K*QkBv4=TlwN*S!a>|O+{yt{p(K6T)S z5xzPq;5!fDN_P*x-?y8I3O<&^WpuGJ@WZn8?0oZlM=5Lb5sP7ZgiW%V@)?7|?5c4o z8fyT@;d3XkjsWFcX&6(wSSEEU3|xHGtt+ z`fu5zP5^SbR?e*WC@7*mCo(}@UfXKrsYAHiO*uS7Pi+zsi3JTSOMxUsdVSt5c|7ge*P! zpzJLYPDSUi&r|>D=@K{JaWt{sdAe>$%%eZ;`?S5#nE$!q-x#>OWgYs|rylrA6_L>Atwcff9L1ujcN6&du-mOOvRO#WcB zVw zg8n%t%0&0944^|?30#)gGgPq&x=mZPr}GAC>8evO5UfSVzo2$ULgqBU&veLZEyU^_ z_Q}nRAj8>_rrR-MW6`adA{gRpF6o`;!p3P%^ zi+4;#zk@Mp=RUWYe)Dr=<5(tQhHKk+mIf6}3Vxd&T2&%7*EWqFiGLbzO669%y8;(#zrUtC`|NBB z-(6?9wvGa=BKKTy>+{Ui=X)VaF#qcjhy3G5$M>9rh|zTn~YJp8R)UdymQAPRD>u7hc^EW&34 z9zI1qsGj66=!ZXEYB?iFZ;F#^W;QBWq#Cy+hy!wt`@R_ti^$>%TY1Tf; z^#h{&D;aGPOARb6UnRC7rfK0u^kV`}?*;7TSRi5C(eZ|aPbtq;%P2{>veo;1JAAMk zi#tl4SoKfCD4Kc&;ZU^VuxqqsMh%mlFQVsMO)13d8Z4-^M@uOgMx3vg`s`yC?nt^} z7pFl`$G(Do&m0V<5BXf$mqMP})q4#%e6?6oimoYnf`lDOb!hTNntqJ&+=w*=um|r` zIg4obSE8@Y%N9IiAZ}kEJ6#wC4z7J-W(3kRdZH8f9AaBW`ovOXNHcMhshM)7h@CIB za9g--)rz@1>T818&hXQbd2g(g)PjL?c<#zBgKlZBEDk;|sJ)z!L{x)Twr&Q=STS2hSN|pQe_G04|89M+ zqz+1;3dy6k!=y+`sE|<-rHieZY#%cyAhwGX-IL8`nX;N45psUjg)o%fwfm7gv;`k6q+ma zb;0ZEcwp?sb5D*gr#1e65}h+#7nrR~@LL9iMCp~^aAIv$yx}|5Mm5P{ChsaVv_z1I z{080q_SNu{c1x*X-Nv!BbgY7cIJqVLuT}*nF8=|)xE3=cQmymbNg>l9uFir5E(GVf2dDFaR>>u59ai`Y_KJz#btcgbFA=k>mm4_EYWF zd{8iqL#W)cT-##Qj-4&8hT@M+NE!nlEV|INq=|xmV6ADADW!Pnd0S_Os={}RpVpi0 zGo%Fz-eH@ML#XYtp6jFfGwRW%6d}1*QNdypSzQl_t{(R^7Y2FZ{$60E} z&CYh_sCD@2IWhGN#sYoU_xi#(%Fb|3MaIL435waX)TDXh6y`R*>+mx~`onJRVbUt` z?C00PciPsa8f*Myck}9n5pM3mm)rlm+Pnd=LO=$<=`qxHIgp)~mcmwgBuH*${b#Li zS{O8laEZv0KnV{BL>!rAUP4b>E&nCw&2+XbEX<^X6c76+2=ESUiT)FbZVVbqs1O8v zYe9G_AO-IlVC8YQ>Eet*&CG;LQuE-uuyASEb>HRAl?BTX9T90oM3KtGS>mpi^2hvw zAR95(M%JaVzKa6|0RBwSo^;8pSo2U{a+{10qhVTRXk-1DTnehOq;D`3@J6@WyAKVn z`7>!w{d7?hUyu5Ac^wmK;U-q&_YrbG^3|zVf2NXorpxZyZfvZqBz^bzv!O*JE&dAZ z4$j^YJdW?u;3c1zQL%A&YHP=N&3dlXu|R)rLG!bWjObb*NYsKtqJJ$tXRL2^!Ja8w z`d;C8$8D%rE^Eu@aA&Ou^0Le#f4z>|(7!}$cu-sko2z`{9u98BO&z)g)J8SrS=39;uzF^ye zi-L`H9OQf+VQ)nf?;rJmDz^kANXg|{%@s%(LgSxN6=gju#&gCmT3e5`I~Co8k6KxA zW)IHvBXUR3uRi;pFGvR(Xz7?(kPf0Gr*HJW

$XrM!NDsiVutG-G717Ci70+D5De zzR{^j0=Pv2>nrJQ%()LD0sQ4%VuQgHOJ{ymNkMtK{OaCqd%Dch zd2=m(sk3PjEoP&hmr&tctQ>976H!BS9!B78d!dj$TQb@DJD)1=4W=Ti?mJ>>7v!g= zWAY#O2!Dy%mx$&mhk)U;Q|D6IJYUHOO>9`XDANo^8KnA-PTMiRZUiW?Rj~jt>@_RN z91kpwf8@93n`x*tD<~!QBCQ5tnF@=2H@a+UG%*tLqzsZW3+upG+zN}D$tt1c* zV5`Go`4KO}oy`Lo!540gwz1{|S>tj zAfW;yP*z$6RI_=jnnEyfiK5Xxc>p%{R#=Tp5~wMyDvy%`UZ;Tm>kMF%^Y3I=JGb01 z?xnz>N(_H6NGe#1-Ymo^a9e2S1-FKs0SdN|mCcj^LC!&%7=cl{=#}esd0p{!@Q0-z zzHV+|D?ACHs#^Tlk;_dPmg~1+9<_2HZF@S2dK-v`GkqyOUjJe4j{)#l{VjHq?5o_b zjpbQfKBHoq4|GFHQ&5BaN+$vbx)Tk$)dI8l#*7EloTBEwTAC^U7E+0Eb^b6j$)uke z!n-!aJwe&Lvkw_G_wqC3GNDr=MhB5*vvcl6mo!#7^ zuQuII?NA-wMZMTMuaBMwLc9nsk2EjkGtVqmXsf18v-okVW(X_;NfF?Iam+w`ys@v7 zD0dT<&NsFCozQhGtt4)$-eS_t*o_(O+))G7#EDTHH4vxs_oQ|{DcyRe#AS+{WcoC1 zal7{wgJg|=CJkh-Ls*zu&f~LPz`8ef!F0AX!6&eJAvYs!e5!GbSW8CY3etTy=JKb4 zqSrE@Lct--w5J_meo4%>+P55!eAANyr)z(xK*29xiRfc46-pSG2^a|MEWNtdMnh?q z<{-WJdkWu=GSE%Wv_v6MQV6)a&H_Ivxxg6-esb(Dk+j9{Ap*c*ASa}57QO)(LCY9( zwHuy9heR&X0sb9=e5E~167!>6ug6XJHqcBST5eg67XukDmRB#w-W#f_TGvOfIL;cu zmUrYCN>n-4n|hMCer#sco%1uJP*S}?-I9a!I?VG4JVD=@pop4eIKX;4XIwHxn)GLw zENCd7RcuOeuLr5aBPL9?j*HkTf5fT+i)!Y}e&B9-22EQu z`_S^XyML~N9)aa!tFfI&YctArf{XVm6n* zTv5sZp)+h!BZWhP4I}~L^4QpxdRMMZW~y4^~9o{)4})tIz{qbBUs#WG6JXt|5|0 znVFYG@VF+5pT3k7_&~0v`dc(4WVOI?pgb@?ZYOnK0*`f!@Q9Nu zO=B+(7Q$W)3%!GnMZy4+<$vd|(UFMFRawsf(*It_wR&|?=dLk}@bClhrb0>f_5*V* z6A?qfKoQ|rRy1Luwx`G@62_aY?yV(Ib) zjI4POnsl&Fa6q~$b*tSqc4MOeidQAzePWjhSmKC#_8AUtwQHc5B(9XMmzd?XNxT-%} zgn-D!wn;U0j-|-|+!hw7*ESk{hY_?Ti z_j@O%6osfF=WHKRsKn{U+{>`+riAz|sX{Q~Yy*~AUvz%`SOa)asU?qT+=^Mh#l|JW>kZYi%jB=1#N6R~-Mg2ue*})}^Flxu!w3cN7%saQ z0O|5_X02pRD!TMP9u6BB(aR0@$yiFY9{KSn%h}I`KdU|C`M1wDLF_`3)?o?0*;m%* z3_v3_3BOy-xrUU6-_!l)k}djlKePTn6PB2$a|g>W=G`K*Ds3|DV*wQg;etQbH*EeA z{W8!4Vg;W%eAoy#>{~2f;@0`HUr$PBi$7T3zfnW@@e6GCQNpL6>3-vlt+q(Tk5vBk zWH)Sq&slsqsi!iIoVWhO1^(WtQP$Zu{d3x6Ve{z^SMK{n6+d2;{pjm=u)X`Z;}iL< zMQ)*mKMX(K{Ev4P+ogAvJ#qPuSLF)rvFE!U77sVii~5LXBTo?Lfx_pL0T#qYAFtZf z18IS@fOpE=hr1#-YnRRg>F>GNExa_nC$&1+S831lA-zWE$1i^1#dSNf-TFp3{?|J} z6G(}5Zt!ph?D^+`_}Y;U+mV*hh6P0rnhNWx);EijrV+d8;!&k8SXnO$M>^Y?ZZj>= z<21`M({!%HB@4JIDmy+A!X0UWh_WZxxjrR@qzi+#^2Q}cg*4gyhKp8NvoRU~4JvRW zR}V#;NuTJ!G}m9ZxMP`;NV2o+4G;lqMQnfi`M6h^n7|?cdETgzH1FqmQP*Tikdm(j zOTVSPysFbSKH7f7k|7aZ@g=9#)9h1$!0O?hWxrgRqokBd*Qy=(z4QG-{-b<+2sYZ< z3X+Wp)ZZ=K%_^a3-JbTN9vLex&v?GL@g%5%kbw$vA9t<8K8n495vA6y+NyuK!-tAS zX$J2Z@uJSz<%FkxiLJUVjN@!~lq`n%PfVpv2efDq?uBs=v$}jHH0eAf$3aAWijicU zD_e5iJtqM_g8E#jJvvT8$8kL*c4lG>uO!$!s8omSD)yXjHn+>WQa$0cFKoz~Gkbn| z^>b!m6o1Cq9mRAJB$xA+*DA%zucY>&71?~ch+P5>1h7CS<&CfHidRY}(x-V~f)o3^ z-^9vPZ{9A2F}~s*%#PiOCLBamlhhD%e3`;rOd1IsF0(@-xM(bdfb=zJ>45$x{Q^SV zvHlKwxs~^`bzU0*#LYB`_fye9{uht~&Lds7swdRRcIFY~s&7eMu-_>bloBi@yF+qsYPm z?51lNCT>dt>wz^TC?TiGM#mo5)i1Q4ne@lGR~wF6xn02_Ds)4esuS*2xmy0D{Unrr zp+LcX<3yLFIq=!I>%{eXOMCw&De})(?kJI3t3W<;JRlINAcsVS5 zrW}MUYS}moG6edePl5<8JT5GP(3^QCr=5EM zsnU_n_Dx|j5Z{@G7^fDqas(Oz_pK_Td{OLK&pj>X6yU30UQ?Orw3a-5OdZ$Xm<$Es ziAmP{Ur}57%qi%tNkg7Akx3{bE&xXz53EJQPI1Ab_lc|u756%L@(U{;id|82GfUo# zx|Uu*u>cp;GEjVx_6PB*&m*vV@N?H=W!E!PEe^v4!7}HQNGnhtuMbg>Ulq4S)8uub zvwOFWI;+coR)n+};!7?$VurN|k0vRp69APgH=SMn5GD~nvB23CSC=Ay>%oVPiFGjg zwP=XxPoOJ;dfCDMK-8tONU%A|4NP_gE&|>KOR_C2%BN&YK973HDt^kSi{JHsf+pRZ z%5y%vrpS^kgS(m1-s}RScz1|Z=VPemf0z4^mn|ST#!D&Qx7sI@ze^b8IcI;c> z6GZY!roW`)7XkYH{+X1-f@qbYsCgdOea#tun*qw$+J zS)zfaOzf(C-mLvf<-K7FyOs5`u5sZ};pHH9`GFuCTI-68ffEA*$R}0@PJh&4ugG2g z2-k{%WJcu+Eq&lEv;7Yx_jh)i3B#|A)YCaXi0`NVSbSXlBIw8Os}4Y{ppQaZ-?!4s zRKfpEnSK2OcuM@2GMfpc%$^qgmn7JZ`%4r7{n5?T+rsMbKq7psGyF^RB#Z7vRUj;f zD0QE*U-;q1YRs19Tl-9-i;#V>dxequH$3b!udXbWz5M_1hr0n;{RnrV@tmF%YTL8Z z`1*VDZVWdubD7}cW^nz7_A7&yp;kHvHJV>XetGUBAd6VvkHqIgV7-xX-Szd__&*!S z;r`-y4moi7n7mEX?~3f*)QGRpXDw$k2NmfFE>Lzy>=Pk=`>_=qGJ9HO&s=t|8W{G| zdgU(>`)=33scq^s4+&&3o4^nq{LeoV((7VXAHlNhb1N8YJi{!>xMIaEjVe5r!BlbR0MgrW{;R+mFewaiD@IW zTz6w&+thV~UJTX^Y4Gm(6B(=UwDU3r1yt*hWsO7T;Eup7+=>kEO{D%(i-7}^K+Bpy zTBqPxtDD(VN(n(@hXf@pK7YTMK|Ns|8kE@BXYCu$O$upmB{MMBh_<%(;yu!;oGQ?o zVMSN1ct^|H*Jz)$x2!II4MZza|`1~-{EA~-l zRNabQZZqSl*NzAoN~~*=Lsp41;|VBn5YXg?Rz0{emEhOh6Gre2);KRg;TzI)?m z$YaW(^*~M)4&@u$+uNV+pZ9$C`FXtewb$qJpD#F$9{XmXM6{$htFy<%lg3ma01|&k zX5KO?{iV|ex?hc3%T`R6xki^IP1|;MR^&LP)}=lx;An|_=+1GFR@K`$?)g41wt?kd z2n?tHifwyfVJeRmG|H69!4PeUwc#NBKEJ*;)0cwS2$2qfN#rMOSKiRK z{!-V#b~{cTd>+ojgouJsVdZ)k^n+?N2=o9mgs$R%mOu%yRX+^TY5og~*4~}D#YODQ zbv0?JyWD^soC^qCk6d7yZ&Hdbcq#7$y>g=e7-XE#GI)r%E%+7yb1U%)?6m6g<(P#P z$|F%HZM|q6qSaqNHa~oTnikqHZQaC`LvpvBCrqVG^5m%B`M$lC6(CR@>IEevfFhnr zGmAAnk5wDpPGZnunH`asT<+Ta=?6pXin7RMm>I4o(5VzzO!J0RNzx;V+ZpQH{5c( z)U8VYB?_z(k`o+sH8N1j4A2l40zhLwXl+UvnP{Lsdh{X602Vw0*pUJD)QX4%`QoIk z3!D5W3Y-bXA>4jSXGS(!W;#Y(m%(Y)Rwbz3|BJD=3X7xby0tsOf&>fhPH=a32~IaI z!QDMrfZ*;HoFEOs-Q5We!3pjj9P(G5=Y9A8eb==Q_5mDKS9PydJ-gPu#vFs*NzB7! zSmrMqrOnTWmH{pWdM%tT67R@cs#-2xg;F7c^>5v{6l&+E=F~*v)fE!uyez^s88xc{ zj|ZD{w!J$MWTsBv=cPbBEh35<@-MrQIKmA01HR;9j}w!=HeR_eC=|+3kx`kEE+@5} z)c`{n%~EUhG0n8u7}GyZ01lok@O5$Q8mD5sD?4mkSLMW(L$H)Fvg;8))fK)PnWr;$XbNZ$Vh7-3?14h z(92dt%1v{+&CF`5&Q5j3sRBBW?0Z3Z0%_zZr|+vPF=7(zzhyYFbtwnq=|(xq#aEkc zy)kY(BN}aBfC*Z$hVs(o$~Ubcg<9&bE*nc};|Nc_`iT-7>BzD`jl$_4Ai)H#9*0dF z@-Pa4>JWg}8ohA_KF%qZ zMiO8R1|jTL8;5BXum!dq2bW)Ie~)huZa&0EI#ZpTgQ$(PA5pyMRI^eDoa$BtQ|jJB1>bJpQl=2jWzsHUXn&uW|0 z9byK-Q(;qHE>*i{A76P06?5YnD|X@zvd9cmz!6?ws)H3;pTB1PvUVi5ZKb1vIv*QV z@FyP3RK!#EpgaB0-iyd!Xv_$dOB#||2K@}2C%D#`SR)7|VW->gErbW1_W$$ijE28K}jLs|M@_`Rkg zt$E%GEY8}oIH~kYX%bO`1zQKBO3AXDX8_O*a#6I1o?8&naK~s<8_~Chpb(YOHPEX! zzDS|2IQDzfWq4o=my z#1*A2XM)aVgDiKT$1wPBqWh&3DAiWPRVd`$Z&y6Q80-QOXIh+ z@$A1PS5eOTF0LL&s%`Y$BwyzF*p1fr+0StmpC0SC{BPe3Os zbzA*AhJlJ+C!iCTv0Q!wc*+&&Kr$?A(JYGyI?%kxCJLJ6mne%Vd~iKNC{X?bYPW1@ zLtv>?al0cHa;_``0?Yn@5`TN*KYxl{Id%1z=WPFLSze^>AAbQRsmn5W8#|x4a^Wgm z)oy?qIcG=0a!-<-*3(^7Fac zv;K6u9+*_V-LV&|-p$*Js(C!BdJul!v**qCOf2IMacn(%Qzb%)2GL^2E!Wm)W(x%> z-puU;L5r>-kLv2GCn6VyZz#R}MoaI0^Vf`JTV~^07WF(IG)0Hs@wf3Il(>d!{$*10 zaU%GAT0I|)A`2tziR4o7v`%rlUMtpL>QkRAt!gvSEt<+V9tX#$cT&}~!DLW>9V7}- z8sRJ)el;NRLCGQEzCA3Zwa$iJD|yf274Wd;=-lv>Fm~<%bsw|5UxXC$ z%`{-vGx24RZ4D@neWq5XY5W`Lh5;>Rm7LgC?CFEI-|6%M;=`5^Nm6qBh@PM|AxQmb zX5Ux3qQOK)o26qI0{7LYIFvl6qCO_05T2z#*tMKj-ebnF*dbde6BN(M?HOx<>khiF zb0HxuoEA+iNkh8T^;@Ujiz*w@pOAgq)$6|nnk{9CohDi)xc|IFJC7u#KFG0`{#L}% zo7E~`Q>Gn3GOF7gKu~hr+pnHR>2WDd#VGBt3$sNckyWqmTSBIIaBgs~!z!j#`4iR0 zV!N^b;u`1iQ^_~R%ld4fsazM-etfcdvg|0gSkrQO&zx|xO4SW59T`gIp%`X36HKU+ z$rx~XUrhEo@kdrwe5uvgE8ZH7FVP^t4B^zPvF5LyNxz=0Z z$~KAu#V00gV-i|n%)e#c_^Eivg-AzDu8fbdVXD}3bXiyTs9AZt~9Xl~>3ELzm$NT&L?Ck?g) zCA0~g(uW7PbrKbzpyqQP81 zNwX|4l2xQVf*%9p*K6?Z##g-w|4xvyVp7a}3PD&-9GLiol7~vw3P)_W=JH-R{^o>g z5j>OZS~?apmH-T74aNvJGuQ=& z@AGHULzTz&m$=^#F_oxdhs5M_gKEC~KoY{gy{A+i?GlxUnxAG{&ZBY52P1h0qI|5u1JV@z76OLDz{mPKJ z9o4@bPy*7Y#r4CwHp?)e;MH<|XEdqzrhgC8kAW6>-B^=(5&zXJj^m5zg%8$xTW6SN z63= zzNzT`+*-|L!BlZl(f`zPE%$N-q3aC^M^TYcOpvn$-fT2QWP&Ydo9{5+Jl&`?P|i#` zZucmIJp?~7mSJlhDwj}To`Faaza1RZ$Kn3*-9N0OefF|$_^kpOV|BWKQjw$?O7njY z&XrJv={aL$qzXAb-XL>CzHBC{SAJ9I0#KQhcmu0A<8Rt#az%b~$3xo~i~66afVnT( zJO*dtduEd#@uX?J4k=x1=oH;RV>p_keH@3Bgz`@1hwwW}Yz5&^FNrxXY=b}^hgK-n zy{bx=W5JIKMUt)>_&9w_cT4|h89C3}l&-|Pf3>!@$MM3IGuzojtM||mjYKoDPnV6e zyMs&R)URiN zf5S&uW36WUn@-1_(r;GA5k&|oT30977Sf{g#!lQ9c7C7ov*~G9 z&JC?f7ai4_bP7IQ{7y1si3DRA)x?i&cCNOmN=YCrO)1E@Fmt#Mk;L}^?lb+&(D78v z4mUyya2K}CnH6|Mqo@>Oe2o`pW6IV)R`zf&Olm`)6{AX&{62vYuZPfIf&JY%Z9VI^ zgrJ*KuOf2anJP?qmFcOTT{!?!aBWPSTuSn=YEB2tL*!kpsA5LGX`5;$pV~{%ogN=e z?mn)n=R5wU>F*bpojx66GXBmOGZr|nZYCH4j8^-c`TNr_lVGE$6e+N=E<RJJl#br+LgA_PcFwwCJ*bKcVpbUdXYGoNQ%Q- zXd1(XYzB@7K_DOy*Pyzjw$8m2Cm?44q;peKVP+p?E2FJC zPQvv=R6#(uF$4qz)pexc(64aK!*$BvT;6_mZ6`=xVgG8vXW(D%t74$#(t~~h4Nmf+pHnH?)z#HxIAG5l$4yOW#TWOj^$gvBFugVO zj>LVSf8F}cmcrI;BFC6jjHjzKG{H6ev zA1oJ}dh@VJ)=qa8Vk@)F@Mqpkc(Qn3Wr4bcTAdr-5~K#Oq!c)Y^A%;Z2Ing*z+c=I z?4n&!GXt5ELOCD$o>0TjlSHJ)SQ-@;)2BOXjnVh(o^z~S0#V8u&hyGO5oGIak;Ep+wFHMDZp&C?l8FeUqNhXT&IWfN5vd@4&a40LQ=aXBcCn#9#{b)Y^GBlx17)<>f=mixcOXVlQl3l?({1{hs!lpwvu0f`A^{Vufk(&viAMuGB5m z9Y;{}zZaLJvV04if~+q)*j}+x5|sD;VCZlh|nMOBY|k+FrdRU>1Fmc!G;z2`{(DC zm3>Rrt*em`R7r9(`)D^jxk@D&uMI~lJGG#!YAB;F^IAei9qZD2`{KQo77d$Zqq)fn zxhiIHU>7NOcG9! z!kLUoYm~;OCLpRyeKsdaWLgU;lf&%(8V9(}$i1`26(8Fvhk}q66SBiqPga8fyQ!}tbcncC}*e&qg<2vZ`zzn$5LYM{1 zZdwU@Z!de~KZTc?^|zIn24o2Q+>jO}jP+2Wlr`?4pHZflAh8%GsNKG4HGV_%dq3G= z6>8b9rYK(#R|hvuNhm{=P!+EqWR{MCcd6%Xut{7O!my|?BU>$#ezvUPlf7~Qi~8wW zzS4e<9_dR)5~pJw<0#28@G9ZWMjNtiR-w_b5xT+%Y-ot~-T+648b7!%)wy}|UA2$> zd~?y zZjPQwd|Vz${9NG8Ia}+{C`F?{2zk;iQ}ZCl;A@D4;LK)L)HlyVV}-AQcfrSo$n`Z` z$!uyQnE2!wJf-mz5uyfOkQ8qFX)VmJy`}>Cs%e`c?6#RR+!%GCopy4=pLe^o(B1ak zNS2{oa(%PVVVGV@qk!l&X>E+)OH>K?Q=Gk*56#w=hj|&+0R)$A(=zG=<~L(dGDII3 zVZKJ$*>3T-L~6Z^>T_dpy-YhL$vV{TuO+K&5#5YcC|_(}?&$m~4h$;j(Y_h9wD8l> zttS`CZfFt3E~fApxS!q$EoU^(61o87s3`moNPXb2cXT30GnDK%YTV&gsc;s5_x{Wu z(3uXyA5d+HD`Q8igu_1nd9*|MFkmBgu(c47x?ufK11Nuv_@BtNk1%a|I1Oq^+K@>r zHGlhm@n4>v+XITfe`WXFM)i@(V>V?0z@wrf%et+Oocx#bG*z!FE0M1qDkSFVA4KLg z%ep5YCKuj`OgFS~Ol7ye1%!SJ{VJcj4$pl7srgOp<;On%Mg!7)|8lxNAnrdPAFNJI z*Gf%ngy~8hqC_`u)yz@>=TdFTQMd^N{Le6YHaphO#fA);C;az1omLAYbB{_L8kI|~ zq15BW$qpBVwQY6}>9UhJAZHNj20|R_|6CD{MH5xa<|W9kV7s~rrfbW=l1SQ2Gx(ON zHznA$EfzX9RXEE}kRu67j{YOW7}h}46r$p&Gl$@i13PvmUTQywy5%}4`VP{*GfC-j zFv`&peYS#{M^oAF|NP#oGlnK4MxKH}V^rYW)WAYW^_le|l~HFz3p3IowfrVaMU-Vo z28&JlH3dcNeJPR6csN8wZXx#%=w4qPqg9#Tb{3X#Hw&X0-qPQEhhe=i&um^#Pzq$CVd3=K{|rbpTv7OU>?NJdHPnX!xi06j zr%DZ-r6Sq&k3r0r9IaZ0=(^a=RlD}o14a1>bW8ZuNX<97r4ig8z1dlnW&@Uvm834^ zwGZ>N)SddPWU}*3wM{Qfc!PFBn)lRwTRyq|hpD7- zmU<^FPNj&>R&5LoV*j4K1~~iF-?KA>8_1DxY!;Y96#G-2o>v9Z*5q}3_mLw_G4=b= z!g&+gGfVrDtGS5sHaC98$O*dHi$XJ#LfsYcK&qQ8R+_pt|(Z^g}xcqz5 zh~6op4A%>KTxb_GAB$x4rcwy}R(Nz^q89!wzi>T>I{t!)yNUlFyLkVLTDJE8x(Gb*9Pukv!iZF= z9`#^%)Y7z3OC7Pfo&817tN$Wsw>zYqBh-Y$KOn?sr)koN&Ef}*g%Ey@1)~K(4^nlY zR6+aFS$24CD_kQqfiDI`s&!w#RNqA}iU7T$Yhk}10AE3sKOp^K__3GW&QIF3P%Lpv zaJ}!hWsrroVl$KX82!mr#Hms#^pxvh@_FF@9P*9bF-d;s^Vo}q%aEKfh*@jVTU-F1 z=O5MNDb&pB#+q);3R6zW`E6gkvqCt7TT^>De^IhItNP3^N`jyT@4(j3FHKgC9dBox zv!k-V+7W=%NM+!XqOuQssr<^^q5lbM-4~klt5*g;pDkAyy~8)egCjNULQ5DKFbb|^d<2{n#ZNELFQNzHyY#w7lTQ}FrDySXQ^Q!1 zjLH{V^mGg>UD!eo*K~l)6!M{pgp;+Km}^?WkvhH53TLOL3Iq~(IN1E9T^KbETbH+! zER6(70jHRg2Wgd|_a^907Ck&+d72s=~pPC0R5G4LIPdms<4q%_^qvv zOD)?YaBJNT#-Fz77}aUdUHY7(!>*=Z_%EAI$&Mz?AD=ah6J$$_gFi+k5->_uk8Gzn z`;m{T$s4?}rpwkg)~N0-ltUM0ic=5cHA!CD1HT*8B*HouM44mdA63q*?QAcPAWlMV z4_xbeERL%~pPdn$kqpgV9;wMqeCG%d7`D;>VuP-89H4Q=teE+XD*HNt**^F64YH5J zePcn3T0%>~FLXPIs=iE?l??^cIgPy+%ROxU?9ZSfcQZ}n!J*fsRQLQn>XNn6b`(sC zZcLm_JfGM?tg!eOr|%0GKKK4UXb~!FGIqc#i^)%U#cyyYAVD6Zz_tpGbQK=ca;j$y zaoOxgnE>K3Rj?r#;w35|7(F{due`GuZ4Ew*q*XwrQC9G}TxbDb72QK7| zQxGBw)J{T0L{nkTe-s_1z%vUQN)%E~kVHXBYY6;QMl-$j21gSP^Cn)O)2^j$W%i~X%x$otu$g#>Yq_zY=t;a~iG48iYt2m6QBwO?hO+{>W% zRS#y}fJf^?o)%x)0cO;8nK^W%2y~=Y?XI3~;+bf~S5kekU=WNN^M6MTZkFB4ijR=~ z%#@7D`3eIAf*}e~;i8pQQw~qM|R92 z5|}IfoUo+1Za(y$8r6>5BzZMGGi5xzOtG876}{K-6mlsTO)0>glFWda6+K0dGK!JR zGLAgB=p&03kpc^iXHrNXCV$3x9|2_yi_b+Qpny_HxoBxe4 z>!rvnD=6?_<+jzg-7n|=mXV#l9kw|3PMCdwu-yZ_5U6dK3D8g)h^BdSAOaSQA)RkJ50AQ*4~u!x2<^;UrbqTBJB@_=kY&?1F~DU#Ltq znf-PFb8|TZN(*bRMjn&V0aY>;ygUsC*AB+6mv+&lIrOf9Bdi&~@Jq(eCQU3MD4deP zYy!fbnqeXM^DY;}SW567P=T^m_hS~SLIfFVTC>@Y%_b*1_Nu~0*aM@!gMj$@ z0}1;eVdd^fS~h;Z1CDH=T1D|Dz1#To`yuNGrx)8U+uvmGEXRe93q3C)ae#$!ETCYq zkH4|=8J`v&ul&0PufO7vedO*iKOi+<7+a-0Ub@dL^vyKnaE-0C`M(~TeZ$46J|!_^ zSg)#6cYSbSLBDcIbp30}J>2>eo^A?jEgZWd#Z`)0Wb{n|&ReQ>b!vfrLT9I53Xgk+xPcBXczuwff=@q)y z=1E- zVShT0R4o3*)hAp6r5?ie7uUciWMf(UbWIs$-wu?dV_x*+0dNiJ|H3tlw!O7Y;q2vU z+kv9p`txJHgRiql0rJ8GQBWTgf{0qt{gO^JmdqGs50P>;P3xP0za4u9Mmm7rz;dp3 znF~_SWrJ_D$U>l0%FIWz@iFEIfQHzZ>|hm@ISsXu1U6UC;y@gr6VZ)#DCB^GGB6)$ z$+{W(Njw73I`IPO6$Y0RNPqLP2GRwpx7mh6fp)|_9e}<34OX(Tb_PCy6)X#}C?YFL zz*K^M&E)A5F#Quyr~Y>O5yC&9X%5W{Q%BZM$r1WB&}BvTnn8vr;ttEdP-^x_2Fope zSXu3oF<-+;nVum`bf;;4xLGEJC1O@qBR?dhq9z6Gk|C(xi%bbGP1MX!SQwVTKvq=w ztvoxw1He*Q5kGK^{+wC(o`o zHLZXc>9T8|aAT(~#g@(KL>3LD-z&w~Z4dFB|CoTqM6zMm7|(bzTid}gIXO44HWuR| z6XI!P`f)^TFeyHBkX?qp21(~Ze({iDA6l{@Yei3)^?d0G=8VF{E6}awBML-tJCq|U zMPHks@49c18m*@@#NadcrmXu9I4XKynio;QM+S8dA8kFq%x~#m0tsxTcf9)CQILM3 zDj}h%EyvDdM+NjQYZGg0D>lpFx4BR`mkreUlFR6;s!n6MajH;px?w1jtz=W9F$DL# zPo%Y=jEfQ|2D_-wy%G76OlGW4j>9)<;;N**jLo-M{P%n23mJUNi2)h7{;e?CD$rb< ztRWfM@Va%G!$*whc}%@myV~C;oos^hvu-0-#sMkgF~oic^7m_NF{d*#(~Yg?tUkvU zuw&t(OkW<3+W{)yFn0(eGgKKF?NgbA%&RGNu#dt?vL?B@nK^JvcC1YGv$xquO=J0z ztEvMcD6k467KBadutpi_RFQ^P0>w)}z|<+&EJXxJq0%t#<|BDD?7YBiKpeVVdVbk; zmNXvB7Mu|7OpQFT+1Y&5x%cdtPaRlx^Y&|#YpB4jVYdI2AD>SZBbC@mV!pL?t1i6a zj@$gAOgp%Jc{w-hjE|}H6NKP@>BTvsFNTnYkQEDThemoY=V%@gobJl_b*Ku@$FkWF z#zrg=|0)!fIf6afF?m__DfeHl43;%|+Jb5>zqKDSS&hH+zq)lXrQBH$YrbVG^?;C7 zsqXZ-%;sMNEt)^edjSvu`e6etW&Q5OaC^2GYuhWkGaw862UNH3u?egV#TG_rmlk^7 zh)m64h0ruLTKS2M}&5lsX`Fv}rDTE1!}N5lx0rN8bA6Ti*=JQv3l8mwZnT zjG+Zp4H>KeIbhagi{-9mX9*tKfnS|;(fDsiQ{p2!IN98_y?eFqNyC=b^yQTqu?efj zvT{?2HqvW`mcTK4h9`*~po7bDlSIx6Mf({=Qyv zNJ3arbxcokHm_7JfRq<{wA*V))9GMX)#)J5*NnsDz*LWvR+4+>u`_;giBA|uJ1&Bg zbT+)$!kCL2jc1q+ChqX#@X;|(kzF-08?+J-z3O1{JNmi5{I-tH{;AU0VY{(ApTs(5 zC3I~ta_u>@(YHCvo`#Ms0WzEp&USEg>U1ID5JcDW5~w4+yx1C3f&6s95T0~ML?hF| z%xFs!z06_=@D!csQXc%~;5i~+`r(ODR0HwCuiXpYt#f#kSX$1U!T~#NYbpI5gNK)Q z`(UEB(nloI_ezvZJzjV1^CbGV_%)#%d%yNbLtYdScSNCBkqo4va;ltO^#T1%6>56m12wRv4kY%YI6CY{T3Z$u^~ zf4I7|)^r~gF5O+85!}N9UtG9<{{|B_Fz5G1`c|?Km1brm zF~<;uS~KY*Gp|OI>)qJ}tK;~Vv88l>qWa@6^d-PN-ArW|+dcY99Ws}{La4^|1kTI$ zoaM0@fn}H)XXBo;zpN+O_D#d$V*JbmXo9HKGi_wpTpL~WJB!pzJ=mVANCU!$_q=ub ztb*AF8JBP{(4#6J^naiw-g6=C5rwvB`9%8%`>0<$30}|DzkQh$`E_dL^*P=pqhJ#( z?iAZ}>vC5Q{;e(~?X9L=5LMa16Tuf;{gRkZGh)EX7BO|>mOF4wgfQ!SsaJoW7BR|tTShmY5 zO`7M2O>2?LvSeFzJA!rC*3TGETS}uSAHZtnV|RJ(WHx?9+;^_4zcJ*?fB8$18Ie%l z_#eYRhvOpPP`Mb=Hb>xp6DfT@Ge9G$uAnb?SlY7oFSOjmwWK6bW4h0++D7vtYPKx4 z_9#|GuQ41D!D~F4p5R0zS*jgn){pZt=-J#GYHK}4M)O$`4H`AuSr5MGOzngwppsCm zEO<31J_BWT@r)ptND<@8o3>5_=(l<3pl~t)tkx+TpWJ$7V)mPWmU+BJkIN%#wBaa? z8=jqtHSPS`7c~X1Eq`267WYQ;#=})RFmYJwFP8N`pjQ7KGB4K+IfYFb(aqJmZBfSJ z)=@hpY2iom9uZ;b6AzuOKcJL&h`mn(&L5B{oCF~Q%r0rr2b(1x$}|+d0Q*g@OXcT` z)zpWzJ>T&DfSpi4Tw(AiJ;wLBHx1{M{~q^c++M*yk+O2pzqZ7{t_SAE8Qhq8LF`xb z$WMIJ$+8EdNJZ_1PL)z>M56`$_0_=B$%@zE z<+ET0S1ra*z;j?T=-jHGnWoPW??Py6NE&dVoMF4rxqVUOtZlmU0=(cIn0!M2fPPoT z{Q)%^k^?p=1-Q3AtC`j}T~0hK@6EJI{Zq4kRBC_yaMts)KQyt zUP5;3?tHd)VR z6PDO+J!?EmNG}?gCl7Xco7Tkc&hFtI^LYdVA+A5YBO_#qqXQ7fV?`)QKHhsB&|wOu zW~>Y+SFTpjn0!S)Wr<$(35hn{9b=S<@QNiTov9c!WBu_s?034r1gnrO7A@$Z8#unR z^8+Y9(O^1H=$n2GwZ4%E<;dhgVa+Pgt04zFu~qx7x+pY1>el9*n}d6S`#6^5@Mo4F zAhvaSbtk^rbpC|z4=A*&fxuJW8&3P@hIq^bhr`X=HyVFH*!}l^K!Y3eHYg4>+;u)_ ztIb;Vor~WB;q;hHDCv-JR>DVhwRsbfS8g%~&Z_xWOKdKy#DEQjn)Ty3rs=d!Q-Srb zZ7;~qcRjc0(glth#g|Uq=T!;o2$?GmPVo(e+YC}W3jPwm!~o>K7dM#F86nnqbSILk zo{rq&Y$KW1FxNJ{eS5p~P^w)MW5J#_W|J1LbrYC_*|)TDg|_z)psz#hznTYZhKwDjtN z(C(U2(OE}o6AKv^gZcJV^NYdR_OtK9bMni$Uk_krSfSWy(`ncTG2Dky8L6ohaGz;y z?fgxmue`;cU&eES&g`G6+LT;5zL=QterMQWJo;>8e)rQ*H7UmA6;g;{Xw$mn%0`~U z(-KvEXwT~2y{lGpV{)w5lyQH=YiFA6tGkER`};R{Xr3>Ms2gpe!WQ=verenDL_ePx zy(cdBFX_9`Wd_hFD*<7Pzxe*;!hI(XK>B8KHx2U{~18?_n$#!xu12563mMDw zE%&aVfCqPW*au12SJjK|Jt{TK2yn&rwb#`)2A`m;=QDA_a{TS*Q?s@Q;hH97 z4OmBwmvcl>W(%c0nVpzR7p94C5DN(To8juur$0j+$9iiR_kqR3$&2W(5>n+HVV@v2 z>Hq7I$MEqET*{0a`!*3x1;=Tk&HHp76Gbf>i1?h0S?n%NVfm|z&1ICgNI^6#dt z+zrX^`0n$2ivT7_VrG~2HmVdDqj4FwqQwWTkz0p3Ap2%H*grA1@-g3TEPF3`CPxRGU z?Nt1AyH%eSk@acE1vJ}TnvdwAZ!F!{Q}DgJah&V8f@R@=pr=MODrt$RMNL`}7@6)a=n z?c5^Wr}#*Mj07WXgV9dr@4BO}J|&c@m*Lv~a&$wk<(mj(+zX-~z3RQU#Yg+#om=-x zaY6BWywM-f1fDtX2aFcF6!wt|H^+CKCqyZy8s<+ROFg-Oq(Eo#<#evF2J33(circE zi*AnHD|ajU16vsH$q0A%`MbSkmGAIbmeE+!g`S&+IVqD1?|rH<@7o-cId9)#->TZ- zi3eVBNETSdzuvrBx!(kt@BYAy+Cd$&%D22YCT4Owx%rNltp^GTGRNuabIlyDrt7FV zE%4~Zq4Y$4Gop>m6a|yI1xL_h;C|JmLLB>b2aizH!;IPzv8r47>tanmSakk!+w#Kn z6VmUDcjAvrEQ<@xrn`kJ@bMFsuUJhHC3!qR1l=c6qcuM5x*EJh>GJTha@yTrP_$$` z9`7(HqP1lGC7yoO_d@TfRBu^`T=$K#W@%QBOPbu8M`#%c7soQyf4O*X`zg+g4?7&) z>z3yJ;#X6>AD&UNhU*7^*DklWi=eOMZuHMhqs;^tKX>!tqMa;%@qJRhLKOo{gSw^p8&7HJUnpnn?!|{Xc zFuH}$_P*#I8bk82h}AsFL7%kFBExbk)Qe{Gyj!WV$4#VWdM6hsdvddqjCsYw!8kBo zPA6002)Fs=g9sr@ZzsHWZR^FM_vpV-A7eBwNjKKQwe+_d6xx+>z;n2i%4K?38)uG- zBMViyzm^?O`207l{3~Cv2t%IFzsz3c*j!gM-SXWIiZ!(l)h_>#X>7?Fd16~{9n5Qf zLb>w2(w~e$)O~k1PRxursQKzDbau)fQk#@J_S*O8$BC&o^H&9gYMi$>PSL{GLa&?h z2JQ42WQ-}Ub^d@Bw0MyfZ=*BEdm}@iS?1Sw#(7p(#oeQ01>E$whT8_l&Hz=IK=ZoK zY1)@kp}_9}ZwW`(b-NpV<>kgux87ETo3;etAm%Q9HBtk*wxac(aI30xqTT2n=l8f|M|854zaDnSa5mYh z6G_4}TtIT1OovTp>5unsrU==gF)F_dc5o=9KYy$~i8ZoU3kwV9(tLr#G+eLsTb$Vy znSZm3>OR{$g<#;?5-XLd!xP&sASO3pgu?c4L82`%X;{J1{tW=&| z)(NZz?7O93heQ}53k7v^$_}HzJ2OPh4CA1cOY6kPY?dwA97NENABcMR=y-#_)+|ZF z;_bvg4J#Z?!8xJJML~TZ` z)o>}d+Syrotlj{Td-=rXwD(mZF>v~h`)iThBE9H$qNit zEznHtM4fF-!2XR(>lH{1ZhR^={#L`}H6NER+se*xDN^)D$z0-nXHIT1b{gd!^%H&F_x;^9k;8yZn!O~aJ9VHi7HeuHh%GJ)XR zl_{~=0BtQb48P!6!SUGS+s+^O_390qgDR|TNd#*Jr-u}=a?Mk*9 zx`0)9jv;Xp98UYOP#aZQo9$+c`xJbN6f{U%F>*2W^HmXEMP9P^rJid+o6IOpR%E&k zOvALWoYKrYMXCiGZS+#8TUTOvRQ_kODo*qS(J?L1h*oYKF9-xX0I?xH6WmDHs=>2E z6H7eQm-;oZ8qWKzhF`bw1^(s=ZUyJWhsa~pr0HGeng!nkzWJ?)JyBq>2J1OBOC1Oi5pgI>iNtE|NJ$`Iz;-^-$oc(%Xu zA!X`toLipYD|Wm6;cT}0A~HZe%JBd0_1)LiBnq{pm20jtB6-UZ@Ccs!+i z5}qsXwo!@C-sBUAK4@t&C>$Y6YhQ!za**qa z&}IGkZQdlIW)OYo$MW5}UnX4xQ5d%mKG_#NH>2-c-e?!n+8jEo$_EwxpgK=v8B zDcgYlID6u`Vf-8#WaOCbo9+aP?qC*}5X$1PuG2o*=9%F2N~?IRV$|j8CDG?^e-a7# zU@LOPjG&V?7w=I8_@@q2_b2MytIeV8L>d!k9 zijeoqPL6V?{Owm`mm1i_&K+ZB1sCCA(@Hq(C>hHwTu(FTCLu?m*^HZ?^{zbTwsm@`h-N zU-k_x@FTyP?2By$-tI7?{Iu7x+Oqbj^c~oZLF6UjE+-AJHHqF>#CCpkAQDPM^Zg1J zx3?l%5nnRxZ=SlRI)G4=(JIHJmHCy7vKy&eev84-mC{hN){!os_QS3)$iKjjC@_5)xUU<8t_VRJ^6p zU<_FG7rav0H8_PygS&L&xJ&<2rB^4KIZnmKm%{A!L~|((hPECKqaC}q_nYSD1f3NW zF}N?)y64U2pdA%7*Zy_%kQL9T?k#luyrWyTaq`tSA_kfcMORr}ksY=wP|FervPbXuyGFHb*YmkSE)uzGhet%o`De487-t3AFe8Ej- zqcjD9Hqr13)Gi6PQJE`S-cTimYjN^e$sN$Ng25M^&jh$y%7@r6EBK^- zCTQP=bDPjOeJ8^>vt#{s-6II&q}VxDly4^6xewRVU^zCayLVCSmr!aaekF-?C+A?h z?+^rgLNoVW&1H!zPRhpV*Wz0_ z_uEgW2gEsYD#${|RMNI|(*b=wE4sJpwANc80-bD+ z9w`Lir9L}~B%36(er!`W7=~ml0&8;4?@aoQR%=e$nThqU;Lj?)z;p*lDxI@TXo^qS4oFS9z}lj?9buAcq@<(IfqWiJ`2yjJg@ocK`t z}n6bWSKEy*gp>TFp+%X^)nhRQU?6=5`l(xFy*FmRn#k znLpC}LTCHl(__%~H?}wFkEAFywJdF8WolV9B0fe7^n?UD z;zRPbwHV?0UIi6~FuE9)@+pnW!x>N!;Z_GK$Enh9Tsg(R{O+G~U~f-e%C%mAl`qV# zUsZB8!e$b^Jx`G=&t{5`e}Mh{scLqlX!~F&GSzl~`0V~eS;`^aJC-EgL&ESw~fAIGj z>fxfp-kZWoS6<5eRp)tv!Z%{c<11AzUy#xhYN28mxVoe^tWs z8-D`FpZWfJ;&HSK5slv0;&SCTE{h~nE)WRdL~QBmxa;uxCW3IrJqLW9W+iV+-$H~39DLhM(?1ga-}=j?PBKMr(4{@R)4BU6{(W6F<8Z>Cpqhb0{u0SB^9iIxM zu*-AXXp!~Ux)Fg9goQx1R`>OXoTfNGJw$roQ74JV?Ck~JAjcXD(wUiGmM64gBCv2W zPlJR9KG_ck+acilWIP{ihlA~q@O`o#54JI;nNQ;uDt2^5a>SVYVdjP z(h7^Kkmmjx3z7B*po^fg;ECs{0_moD0js(0U;|6Ta>L2aLf335Iahu@L+_Kn+3P}A zOC=Y05W81PoyEYfT{_aQOPrHTq3-(r!B18rUF(BK`N=5t+mf*cm{Gh!W!bYnX$T_T zhT6eROq@JB2>~lBaArx1leGQv1{sDFsi5rustD=Ec8Z70EhMoeUqm*0O5`iH#$kcO z0YxD}B@Kgx3)tVIj{{UaQIpw|-Pi@x3yMD!#I8UFX zJ$UipYG8`<-b{Q5&iO`*TN-mjAluclqt+}t(#9nK3(~?qD5BR}+9N;UjK~1>d?dOK z@wkeBZEfx)dOBXrU}RY*Q4`B(+}R(~v3ChabGr;Ly+Rc}@lC z7^u*9e}m%cUDIFC%UFC~h(r2_v19I-y%s^IQc|5pAy0YQiq2bXReytTy{4h+e!FQI z^X!D9fOGDz=i|lzGIA%Jo_ZmRSPUl1L%(y4?Hnu=C016s{yL+z0h{R^dCu0{eLfE zY8AM5%X-?vXCo?BlSTKg(~FVBy_NZTXBiRW@t z%Krd~j{p)WK8Zq={o6iI_)7C*(x1QlHpF|Qo?9=YWlA;xi8nNmezBj+nTN-A=Cu?B zgRZ(QM&)}YKQ!p`QjGG*n2HvcuOWZvQ^}c23UGer^G|`bLaude6dJL4T9l?~l?2zV z?qIO8R4$}#M=AkSbuxNJ4}mnLl^(Q@!tImos2V7o6;phbHBh{gPavD9J~-{0ty>A? zo;;{OKyEbH7Z~3T^);j**XcA5z@ImTcBG+Q!Z$g)6HlW3g|DB1x>NW60EXC4baTsP z^lYgMv?6RyiWc#XodIutyt33lsotki%ZO>~(cR}nVp+*PTZ+fAbKke13ZQslncvFZ zXbSwiFDtMZ?G9|8b}Xn%pfCsg7X!N!%fB{GstooIkpBP}rNS^V;d-GZjs_C3gWRul z?pm84jh$ZYfC9M(S&$&+UQvj2kWIWbObgVH@wym;*2czkb4{G*R z8OijX(NVUmNoc|Z3_4dRsp4YNsLHgrneOh_h#`hz4YHAM-yH@q7esS}7WROUUQuQB!yGmq1plU=+nThFOf4nW+&4ub{0M)V-64b%i zJDo@>LPxa1=YYc#pP!&Ae*XaQ+X?QDd2GIol_UYT@B#Epq8ZCX4Q;H+Crv>C)GIa1 z3e*`aYEKngjvv~KDIJw;E%mwIb1YUqmn{Yp*B9mZ`W$BoGdnZD2={~Z(a$J(-IV+- zMjpg4ipNS%d$ADRb4=6OloxpDQPvsbmVWv_B(OPKB{cA_YwySk@9x~^ZGr}NSvoXRl$3D@*^gPq{{R7(OYS}KHT)MrX1D8aIq5*B@B00O_eVUoI3o|4z^Uy!ddv#1&K=Wz z$KdR#ALoiXXL$Z|jt-x2{^R`g65ogYUY;wDwEqBqq)!W{zq>zsq6h5wTly1!@cO^c zpdO!zLH-Qi@*s5I_C@yiRWvdCzKaR_^lzaCeSaWdE7zzm*98yUE5BB{Ipw-_x&tlV z5v8u|C*6+cwtB9+HR#B5k9!-R;})`G?cx%}>BUnsWC$4p$O8nr$^k_+iH8gxg*l zyQ(3%XerbA^=QOD-^8F4 z*|h9EFAA1RjMyf&mV>_k00$Y2T##oFFqSdmXiT*02NGIQxyHRJnN+c%#|{{SmK9CSV7 z(4{|quRpBa9Pu}lD)`NSB5ZS=VAAXvaOsW1q8qgSc3;361ylY32~ zpsbW5=sf&rJR*P|+tH8hAfMs%;x?uFa~F(~mJ4heIwws$pMIj2Ky0Vls5b~)Si0ek zdg`7O-eB(sJajj%<{8Gbzatmyj&j9xY2^F*ID2jU?74#V3^nz<9%ClbgFcWnl2u_= zTkgd@yjRv3;p4>%L-c_^l5QzjXNZ^#CjpM%__?NqXEyl|_))$9sJiP&G`RZHREwRY z)e8+9s!iyFeloeoI5D{95M&bu!~w8>8vV-W!{?7|*|TQNn-@~Ws!87<+z9}Fo(|Em z^w&Dug=yu7$K!tvX6+4*-rZ!cltjNmnI0L_q}{S^*!#S=i^J6#CsFKCiGt!27c0^H z4~`^GZcOz+um*hG2Xe00Bpohe8XJ&_4y_zU#-M*t(mTSc(%_P5WNRDMy%+`R3Byr) zNj}G5FG4OOb~FbmL5CD+)od_8=$U4#DqY|e-u;|iMgxfX={8JZeK+bJCuT0LI7C?Y zSn-52Nn6lku-W|&%Zoq~8~*^c#|yZ+q1Z$P;%m{!3G^tAL0y`fa^qgoRqW}(ZwIEg zpQ%({a4qvQ!;{reS5WJ+tr-kDazB&SSc!AI=)Yw_2*~MO0O5{_WA)4Li(;G#>--Kn z?c$d~^yy0n^v)T1y29x6Bp^5{M}ZI(VSCN|WRi93*RNi^diCqqyD@9yjxpT!!l2dM zVf)d3xTI>M1@xxidKH<`KxRXlu)9&yP+puauPFg>5Yd6sA|QujU{1#b2xF_|b|^(* zP!~p-9^L-{`KI@A(btK4C0h1QbPFtM+(JKj5HfcJs$n{(25kxd)}{ibQUvNF*fKqU;BrcuY$`*<5`1FW#wAoF&Ja64ehsl$o~Bj58?CXEHJ zQ>|aOkb=3g!!iotTT;^G*yZPkM|RDHXh)LcN0HIs-u~hsqX{3yDpaXbrAn15RH;(7 zAZgpmNbJBc9}C^eU#&9Rzu*B(_QR#o#@f$d{{YZnk<8n4JbCTxy=}uhTtm7c^Pz)# z7n*-PaAQx2`{hXT^?R&NQq$lop2(L9Qn3KNkeRU?gJ>YyB#uKXTRSLEge!$(+WS}r zAtsRbJsQf%0XE=Jrti%K=QxHPLSVf`XrD=+@8^unn*s4Mh_F#~L^?8VbR7Qxe>hAM z>H0Akj!yJPdhE+hB!}~LeE$ISah@6qM2o+AP^+q57vL9j$98mPZv{6Cro(!PRv2>n zc%GSVr0yuhKRo1W0-g%N_wSJKf5gwzf7|~3GdMn9_ORnSzE|`3JYF2%&{Yp}r}=ju zjrw^}*Nu92!+#;%bMfDp8RItaJumcU{{ZXN`nl7C*t14rKgHRcp~a-r7PcD6p`qdq?Hkvy0EbQ6kiO@!!36R1=giGN+2jj!#>)ZH^9y z-?@7@25qzWSMimErz*n8`q@$pZTR0z0323W7m-v&RnKmb6eYlQ+HQ>PicrLD51-** zR96&Y$sPqArnmukKpy3>H?`+FLXAvFu$Ap-=?GbqOvF@pCm5Uo$K;>a_{;I80s%eE zt5R*y7F<9;vSfOha0kEa=R`TQ0>wZBTuwor{q9dO&Djvs!l=8MGm(By`4p_yAt12P$c%J+my2^L7 z(SlhEA+h~Qv#(AzaD0xtzL3!_>2bO9&q)d8eMGzcKc9$*%?itY!xH}hgp{h)KbHgG zeV6b)7r6|d=6CwxWg!Rget5rC4S@kJS#i-(j#@V$dnTHyD_4F4I#=iRbBB*eLnXD- z0eLx@QtBnlxDz-Y-FSCv?KWx|niSmYyK&oq%or76$8lFJNbx1E z^pP%%(&Ng*NFNl1ZYbbMHUPw8L{)rc?cW6i>-p+@k{jL@Ve+I|ij||x$l7oMXVLw? z-f$)f~I(_M}S5Hn8>TbGlsp&mODAW3&fBb%6#-B;=xsgI{{Y@UoELF~CbJIW7q)yNC_BNbet0>h#Eyzij>4m{6ncWOAxup$%VzhR zgfHHH(y)yJbx#ADMHhSl?hLjmB5mz6OLy8}w;UcWXBx@a4XVJ4aT4J^yuel%a}!1r zoc{o#pbrM%YrVRMF4CE^1)Y0LOW12gur{vo1%t3+J$G6Z;@(zw?r7?23wRTvaZoVO z)A6k`II~{2EIkE;lozI!xmYhVd9dnU8yCZ_#s=$jMsP!5K5=fZ#{l+toymxHT%>t4 zgzF7@(n{dtSE5V``p_Q!M&UAAV1JOW@W+ep!VI@bf7LLyLJIgLnj)g==VGJ6T$!(t4UX!pd&2lvU(WX8}cvt=sW{HAdi z1qOp=H2Xsp7s`nTS?4__Zp)ys0Bfd$x86+kw-}Un05D3>U_Lz~4>4QRvlJ$<1ixJH4Mt{%=oxbF1z zT%{}}>(#Pe-d}%j670Q^{(N>qg~hdnG-@A;c;ruTKut;4;I1Dso-y7r!WRZyC!IqZ zX9I$NS9~U#2l$Pg*j~m0|mnZ z4bJ`RlpuA;^?L-+Xe}WEt;+Taz~jpR@xfI_&Zl(`6fl>n9+i9-fZH^U44Qa_XL?Pbwu@I{uUj6g|1Zs6^4Yhw-O|>7;6z zTr|+v3{2qmhb({GxNh;Z0RI3ACpENXv!StSo>BFl=#zgw{@8-1Q(S;ufo&h8r($!V zCx!4>716`N)XY;JN3IW@`WW$H0CF-kAWy|yf{Bhdp3$A-Lepb1c8ZmNd^QEPdVTwy zAnmq(wS{zIjK_0;{{VH`{S}~SlYw=g`Pi+S-s zTKy3J058Nb-rkC{`~HHyQ~KwwCBIR$`mg2ebbp`t&s=m`COkR5UHez>hbPZU`#;V4 z=l$pK;=M5lc6s}|zA;*R>G~Hkbii4sYIzq_gTr=Pva3cX#sHz}Q0j{z=2q`ooJ7xLez(y^t<+HMJj| zAq>;LrDHW6k@P5{@&GUzSpHMdwz1Z9pH!Z1lm~VEppz2UWupgfKF%1JlJvtod5W z93HTdmrPsBl>Y#Y+`#cPr1c*g2u@DXa$jWUKO5!J>A~{-Kaz(usekX@jSBME?LYnL3H?@Oqm>Lk$OJMR8Dc zznN#6{4Plzv;9gX3JOP=?7M^YaW{UPM2wg{MfrfJ+Y|gmgp@it*)O%CiMuiEx-XP# z75r}-bXuj&X_zxhn{;rM8yqP3?N`}Ha zM_o-{5%)Xao$LdV8KYJK>M2dY3uw1b3FEBZ-WeaiHFvZ79pEw*aTKJ&K?^f@IvY|{r zw%H}h1=>41p%tB}e4OK|h?&UeG*4QwT18Ge5>_aYvE|4s?}V}_zxsq|d^i2J__xB= zlQrwC*~jQ?v|zQfE>qvXuZGbQf{v~>ou(dDCz^8hJb&iWomJ5IiwX;-Z%)f~F)lSGJ(pklECO zAeB(c(7-fT%*0zH_Fm!3z0JR$kg%C~B;+T5FDM903{NgxfLvxY(|+#UPFch>cU6o307zsKCvAI@$x`UU0O_)W znVE*{=Np_Mzn*7;K{Bj0oNSaVCZ*}+SC~l%Wp|ayA5EcXo~ttKhjbWBc(OF?1N8YF z-%OItq5ErbsTF!{W6XAArHEvlM_|WWwl(wKeKtLwJMiX!0Ro4DHOaz9;;i(gChNs8)ea&}=AOYMlc0 z%Z<|W7FNfw%O|cc9J$&T33%2TLVZA}*tIIPJDQ|$pSj0(<%-G_s1A3l^#+GU!#NnK z$RktP$F%H(;ZLnic!zCr|VRV6CRNz;7 z35Kyp0C9!D0|TEb)B#+uEv>5KYF`>{wR{ES8SM)U>?b=9ONEhVsuBC`@K-`VaC?47 zN-P7c++I&L@}D7!xhAs{Vyc!idMw>=LJNd$=+^fAD0xnsXNx?}gWAE`!gnXQ2dM1Z z>8xf-#e4p1sCTC#3$m1X$fxww@AfI$%d(m|SS#=qA)YJY5TOXDMe&76e4WGjP@VmE z+#rdlw|p_iZ6IC~QxP@0ku>ZJbe4EobdC!W!rKq87=?`vH=g;&Y#eCaI&FD~N}9rl z0ya*F_QA~0!66eA(m1=^I2t^W(Cla{t0?9`yO{}7^xcO0M!r(KG1ZH$zV0-(j{Hf- zZ|lZ5?QMATW!EL#$}Xbes}`>^&ga!LLxx-?Tvx{U&u`aYK3)agzgDC@gCcYu`L_Tc zxm0AwR8to@{{SjOQ#}OC*rcd|-49-fTz5EiG$Q14`PM}*QXW6ghl3}iAfwq#_iN{V ze4lLn>-c>MNzJY-n`INEz*+pcT^PBMpY*5q!R`fShv0UEp-|`{N*Q(oM<98^LhU!D zmW54+U#9Q+&2sk}!+o>R9DF`jALPFsd;t%@eY$dgbpGG;bU$5xXZrmk_xJ37N1|Zg zjx&evdN1ShXXxjviT?oDlxdyY_EQL2vYBHMX}R67pqrzgcB{~h$CMBDa6Y$3Ba#%M zeE=o*@0GwbREASs6K_%L5%e^*ME-sEj{+kjWU_j>8mb^Qki-r*l$S?s!9i_KnE&-XA=Zzt|KUg zqR$MBCew!p5c%=H@Mc7b5+q2GB1DN2BuJ4QeM@VJ*7@&9ID3LSgwRr{7r95N<{`1c zvU6O7m`wo_eH3lESvRj#+)NUJ4Z}i{jl*B29e|eKAt??qYS+gi3+Z|YXqwIS2P;9q z3DwQ%h6Gb6^Q4tCl=4MTRORp_9pV!9{d;%=Mi3*!@lUIS&J=&ffv2pFqYSs^{j+;v z9GL)3o&vPNF|Qm@zoO7+t?ejgc@HNibb6jP&31J*?r6kq#ncO37UeK$0REr})9-(m z{&DS^p{JM9hFUr%-6TIhd`{JTulQcvAY^9$0GCthwz?3f{7EszL|{8PN^#;9I!v3s z_LD89bUEs-sqrlH0zCY86))oEz(rI(alKnD^ zd1fr=f(hEV4H=OWmXNWn#?~{xWH^}IpY~%h zfY&OBj&CMzuxI=K0C)(=kslWI-O%&B+b9JQizBGSc`aJDW4I;PN4Qc!<(?SvacYay z=pb?U=q=AH(*FS2!?o*~BfS`0tbUpt!Kvgjuz!k&Y@LWyzaazqI+m3d$1Le6+zYbp z+R+gL%j$(Ehc7b+$)U5ji^BDVG|jPeeP4!3R@YeO0lFDfcQ;XD%x=R8y4kugr;j`t zF{NjBTRX~la5H?SaAV@fl+H6?meMV-$` z10G|<00!U~RPWN~W4FKuYiSw3q~W6_dT}`S+A5~@eDA6ieVYO!sPhQ(HvCVr<@PSL zpC3gnDeG>kO*eSn#8SO96UK_QCx^d2sP&3{3&vug*~?caa_8C1W8Fp({&4K96(+4M zb>WTF#^2Lo$6e*vh&#+XM-@4I0}M2LHc&Acz3?8wL^rGqfq-`4_HIjED%BrdzqHZW zj!onviwvVGbDh18OPzTfD0jOXImIE^e*TwuRLD1B-+nms96mQPX=ka9Mt&WixDu`T zG4$v_K_1zxbc~Ndbl)VW$b!4^;|@H2njJ5e&a-3Fv^&6CoIi`Ol+3SFK&bksZU}k_Z;P-F{{S*sNWKL5Vb|;_?$6CRdqW>o*?opiz|*PAn{+E? z_+wkwt$1kaOq)&l7D}{rItd&-*Aw%voW7H$!<+uU?#ni~u))Y3+8ny1Y-V!&Aup_} z$H3I~34^u<0v&RSw_r&NSv~zC~`h{VE$r$z~k*!A&7yvw& z0iVd#Qleg^<{!5I03T?0UR@Qu{{V-ItH3BH!ob_61m(j%F$=(-Rw=nq#;mx)w%kLv#b@ZZ}9q)W937v-2UpUOLaKj6UINBjjv=7vv5B3Bij7^IDGN}@Ob zZ&LKa&teR?>J+oj>?Gsxf5+#x5DwF9g4wufdfWL=UZ_3uTg*I){#Pw#}W*^V6(34a!mgKfO!vDEES1^(4@_u*htO^2y{pKzlNA$SwY+d zpE{p7{IS||x3q3%3siya_Q-hZi6j=`X~&hz3YDnQQtmn#!=dcr?ZRozrbV;`ZJCe* zoFusIFeDwbN(_P;#Vi&qh+X5Bjgu3`$sE|xyr?{YC?(me7dn~=ri>ZuX*gubg zXiks6+5Dc0!QkDWPf@99^dC|9U!Bk0zn+|bVE#W(X$oboWUp#52brV|Q#QrB5-sig zL-_kWv1ZM>p%G^R)ELAa8^aYejhCQCzm@jKHN{}bCjhpGZ$RMspi%^{*zm4ld*?9> zoOdAuNsXgO32TvU(~Le(K%xxB10|v3{RR>t`9ul0?;nPDFvbJksaLTE(=TV6ZZ19v z|5ZKjldPel$Mv6@zgCc^RV8M5>N{2`324$Ud@}cF7MA**`cT`Fk0@x108@!#4 zAZ_4-lS(sP2fnocwlI7r#(XxeX3gz`ibcGd(P0H+lVl782dWqQQfu$VRiUUld+rk@ zc)n2Nq9ctXQMTspi$ahIm$^@ID&WiM6aZP*cMN|f{lAkt z-o$pU;9o(cb77*i@&N0OQ!-<4QQ-?Wz7vk{kIhwd4PmYcf^PnpfQQF?9g_+;00&jM zT+Iu5i9ZEwT`UP!mAxZJn$m7WgJ}In*Dw@oPj)_|3G>!TOe5QH*fZtu_WuAQ&Q^`a z(G)TWggO+)b-)^z>k&ud2TUrd-@_qgzo|d=9+%^Z>og<(0Bym~+SuIsj-vVia{Tc4 zUqqer5TXxbCeC=c5Qk3EhtpR=Dm~L0hxv<`>eggyr+A?i$evr+aq3X6bhWu=xEjUz zzAVB-KsW};1!s()YMW_-w*k!0^fKtgO`J~Nnt0zBn1v2XhV*!Vn&4^< zJ8-?dc|(IC8@1%b`~I}Q-gA+A)-XSOwfIqd;_=?yLs>eSQE>X4=DM*?dm4gv1kG1Q~>U*Fg++*sDtMe zkHws*fMPTX+L0`wdk6D|4tT>cDbOn|+#n`2mc3DPuD4AJ(M00-<+Q3)};BxV`WhU^$NG z7=MJCMC1T5b5hbS1ZZtV*#QG~@}F-}^VRicKaRCiuB}5Y*Qco}v%XJ#d0t714&V|1 zM_!SwaU?*MO(F!0I)0DpLUP1JoGA_kX$hP54yUYjVwGr6aCMHMGm;hCqF&|@3fmSF z)HD{xzP-_^c9kw}bcKYXSN&-ipH9aOm^mau+}?g=zX=Y_S>J0Lo(-{omaUFn0dYVT0&F?gCgAO(T7=_WUseh)H zNlyqaZoJSju=x>$_cD+SFq#iK@CqQA6qdr!1e}E7-n#V{aNG10-h{@CS&Pr-hZI>X z8EW>{`8x+}Mbe@p@grSgbqM={N zv0PeCe?k+$J5VGc6$Ci9 zsEJKr%ix%v4}su@Q$u2SSLi!3_9$Dh6?|nmFE|EXdB;Ycl~ld*7qED`v4$5)5tuPd zTSB9wVV1Pq0%Npj4tCF_9u0Bikxcha6M289rk+CIF5Vtco5=T z4o?ZE!0&~DjLqP1!`<%^RvHS$;FbRXXK3EtexTd22`KN0>_B)^f z5v%bb>BE);VZ~ILBFzJph5d9|aPYi=mb9 zWk8v(9`cK|`LY1@aMBOISJB74r1X8``~e!3>Lt> zQH5exlaLZ+^Ph3EE6ebH-x1(WCnEr$QMWFV-k;~N)NTjrH6QMfa-1wkZZg)QP^wz2b{h*S3_@&xV|X3A`QM8RxqW6v|{cit?c?1NP4QSvT9 z(sv1U1Yu-Tli*#}P0XU`SL71-x~Xn|lb%23A4sS6{5OX`ALY`S!SX#nlk(|TgXQ{u z(O3HFr6t9YlkC1&L7r-7gn&EpswsNwRun&TcfGAktcejbMY0J^pzSbg?*w-R$xy#o zm*M@-^OuEGfmI<(fIIprgqcV187ivBxn@;k^QHI%i8U^8n&gw4$9%30zO7b}PjEYv z-o;|iz&B88->+Z7K*tzJ4Its+7fXZD>9LOb>|-<(7y(vjOjTyL#|AZI@E zcsogjeEPuEAg$yrTBc1^vS^b5w`tJ4%tg}z)CEVq_LA-euS?A{bZLrXXf6#ken0sA zyk^rKjY-H`x~D5${xNZonZl32asL3(zjMctK=kZt!9eaOkx&msx0pO&rT6>mDy&2g zG|bv(gVJJjp`VXVa;KpcISd9%|{1!n=Lv9XSC+gvA%=+j%$|e3tRA zVC9~yw4je{YTO$S9~e4T&MI6p^?wmW2ls4WLix+!>K*hzPj&v!_0>G@&gj}d-}CVV zlaisZzSGGOkYiJEi_np(1ye>34XA`jX(C7je<6P@=Ms=F9Mf z0qj$fP$`Fgy-DWiF;`0;+PVOK0tKHfO+WBQxk5kSsBaRXLe{Gl>)M<_v8INGnH5?z z=aiP`e=|q%pFSDH+>O+J%6dNnx(3^6@*$W>=HRv6vu<%Kk;@b+K$xJjS(&hO`hOqq z)x>Gz_B+x&(b4=N4g>;!rd_24>XJ1JC4mHcSP0k+^0k;3Y;4*1;#yihVi2qZz4|z! z>)~;=v@Vtxj0*kA(9wvahrpYcKgm0hh)@^6YQK>r{`#PqVvRV9rt~v~+H9abvr5g5 zqG0qefEZ+J$U!c{y!sleSneC(g>ZrdZZ=>x5S(?b^afHRQAK%>=Wx$d*s6Z!1MYQQ zL!J%3Hq0=L=;$klIF(&8AkJ3|vmN;v77Tl6SF3Hb{{W2hR99@Oq;ti|K{EkshZ3g$ z0A2+B8sy0t29`Mjk<>q5;$coX!GzC1UhL$FUUrz<5B(jM7&=9JoNBuj9eO>M)H{=ubpP0%01 zmaZ_(MH{k`{4G)LI|PxdM{S(E5M2{2%L_pN05nddPJ&7#(#pK#sm6Ro83l0~4cOwt z^g42^OxyWfD%}eU3tHLDS&Q@}wln8KmMP_D`EI%w8tSs#mr~?8e5XnK+GqKJ()CYg+Qp=4si=0 z`uct-pVM^}^0qXN>F5rTe=a8glDUmnS~WN6O$tExJ_qn|J7lAEs%!j6PzoSbT>u+Z z;KvOHsu$fGUghW4o*h}A-zl&Qb6F1}=x=<0ByBOhYi&WM0Dp1FApo6e=8g?> zXFG^CaEq#C;Uh%p4lZhK^{}ZGo_jq|{{YXkPT?sv-*0V2^$O z`xuQ*@v$J}M^@?7dWfPYwk-I)W2NFg};5cq=-R>Dd1&3qEMk>Vxb^Z`21vCLw@E!4{7ly2zgQYV)Ri~mW^3JJS z<$q`V)_kM-ui)xs^jAH0h~cH>zc-BhkAGbuO;6oDIQM%g`fMZt`&94mVF-1Ujk(}> zO5)r%o4$q{d{Tk;U9tJ=`Q$EVvkU-zMNt*>IC%Jz;Xv0>Sn&NP_BB45#ztKzQAS$2 zw|t+G;&X-35Eg&rB$(ycTQ~sOU`;bb#CsakQZUoN;e&~5S2|m1Uh{P9R!(t0ngZCE zE$jgSi2ndmQsLhQ?i*m0pi@%y0n{J0G-)b3eiB z*RNkLT)A@P%a<-(xlJH%I=?c)S9N`N{m$;no$E;fH|X#Kvx>$1%-8U7u-0#NmpPiNZ4XwW%vnm6}_DNr})C3kr~EAfZb8acE0(zOdmD{YbroJ zU;-&L<3~Jh!#Mld@BR}qci>uNpn@ympZ@@}iWgf0C&9op*NKaP=FKPaU=GRgKA7$+ zo(%V@$`n+Wz8oAQV0G^wTv><&fRt)bI$|-kWb|={m?6hqm&yah@6K`kHlOMI{{V&y zCZvCR)b`_cgJ?M-i`P+~hC6jen@(>49~QRHCg9UkntaJMM?FMz_bTo2xXBexvtEJshL z@&5k+LbvVhpXkoJq`d+NE7>&bA+kOod?)#J@I#N8u6{}Pm7d;MI-IH$k$7qYrY)Cv zkrD#5x*84oMv)^Z1nQe>tP}Z1?7y7TPyYaJb*iI~ey)aW^qm+}M_U}ygYIq+rs?9u zTM42pzc)yDN&sBr^KqNXDz}#Mvkagm$5380zE~*}aL#L+@xvIlCzH#Ur151B3$HvM z19_b$&8%4C%IJ|!?!&Y=8<&EY`bp@jQu-M53o5yNm8x>CVE+5Vhw3wkL~U#v=go?S zpb2+Rik}x&HCjkh>BwC4WgTE@hB*OyKnZC!B{$3(5+~EG)B>|qF)DxI1C1u?_;wH} z{Q$#s^wz2`P124b@L_vX0i`NkK(M&|x9JlIxQ=$+YXyjb&LDy5WJ5ljh!ke={{XtM zmVg5337xKr1IRcE0=)`3aNVWAcI@?)@T=+j1oLq~O&wVn1QUfCWyBf~l)cTxVP2kk z`=W@Df=;R~KUOAmNkIo-MkvM(3kpt3Km}$lO_=d9Tj)XgOl+}Y$j-NDFo9E8ZUd&)V>XV7@ zKyplYLzA5m6%^XyAN6F-O|BwbdO6(dgZ3c)ga=Zn%@=ipd@pZ+IXD)S!N_GPi);tM zm<+-+-O8yca-{SAHh%aq7B8y-HX5=gOUw{xRQ?JRvtjyyztVUp9i%u($UJG)b38es zSFT<3Sdi4K0y#@P4Z@RLRsR6l>zaefk>K=Bknxc4daiL;R96~$%2?9$`#y7^T-faeDxu$X#<<#f>zLi2e* z;DfOjsxpd^GpH^I2W%p$f~5f*HggGtoW@fvL3QE3~_m# zPR!kl0nvhL5qRi&(%KSvn`+@ew_^pfrlYsqK+Gcm!J|+S%gkLn>tM`Tq}H9cQ;a7x z^{^TuN4_HS^6NYv<<+Pcn_mI%M6kdh~T@o(*C%_t3)yCzzg1Xpuluwgd%fb)=qvG z(?60J%q3|A5stGeKe9~j>8q*UTs)XAv|QGU4$~kTTLni!;*^TeCp-P^{8#qR>k4xK z_f!l+s^^VpfG^zDEO7Q6Oh8eoQFP_;XD;P6)f<5|TR#W@TTl0TxOh+JenlBCMpt|NHWV9mw}K>>!&oQCSk1y^f&%z)n1RNIy<^)6kI_Zk`M44_=GJn74DH17ubmhJqqfy>AY z{JBIR1Fcujp#u5Z2O5;vHiQY|8c38T0QOrgnTken!2bZn{J$;`6mwj1D02X5KQsQb zzqsN-p6$ZeDKomL<^<6Rv*7!;(~nM~g#;$UpVl@(wgp;*MmH05$PfWe+;B{BB*t#I zdtB|;tTZurE%)_#QBM!=k%LGS-JgK82X>fcZSAg;E`=(kvh{|zfhhc6lKMCK;>Y(tlwcFtqqjB>ZF z?>FP|Z|fX~bk#Lc-)V6LWru*as)9~gaH)>2TQD~Ed161Nx0UpE$)6$n`ag5^mXzq? z`!&$}{{T*`(fvQ6-Tp;WcMp$~mE@-Ld@VAQw^4s1<&Qp_Pj6W2L6SW5@%Ya!ire{H z{#?F>{MEky0NMI{-PyP|M8z~6?&|&>pw{iEIu>(Iy7ld>1oHq{9YV=M$^E2_h|u@}k+q;vC_#y<$`}&e z61w#|RA)TP9Fb+`c}{#>*6|E*H)P>V%kmHF3$Sq|M6p=lJLGaekMz_K20RMU-8#}z z1Ob=&bHeI#yoYo?La{}p#@=ib8(7Y}YO)W_U?g5Bn&s7wF7E&&dt84`d1Qm3tYK%) zhv2`T=NtqUyjNb$_Alvp7FaUs8KU%LUe^fb{{WjC-)WOvWuRhv2|KD(I5by+gg+&` z0QiCKZA4yA-!Dv-+yjxBhRantv^e8eR%hs96{%f?V&rp^P-)OLMqk>Fz%Wa|rO4(6s zbdi0J01<*FXG}oCsx%NaDXzG}s4-bo?o7~>25`c4Js%{xA6H1de&!)G9(%AEzE8%l zSEBs{m1qNYX-w@y_D+=MQ7AiDNjB!?n-3DNX*IKAt^pP#skP1i>#H zNsW$=BW8HxrF6q1$;vrvo1@_W06XeAA-Z3FozrqaJOc5X?Fq}R2b{&VIF?~0^o8Oi zee%lXk#Wjt!+atX$`I3IXtGdNa56+Az*7Vh^DP=Ff*8$YQsSxAz+IWrXpZY-x>k?Q0Qn9u7r1v_~X-=chVGjG9QG6UUN$XS=Ln0Nqx0W1=t8$Y%K*{VP zwJge6OXs0blKbI1^NH%TLDHVhP2(qAzt{$jRWXY;d`)nD&k9NB;-HiIZn!%ibk1~p zW)yXlr(~E19zh~uP*7p=)APb(WPTH@R__6iL0w|=z+!Hy)5=y*S+JBTp!nCC2#e57 z!rG+CZY}X~XKuJ3;x_ODmo6vb5&sZ_#LO9FGj4mT30s0RV4rD{XG&k+JdwCbcg#M^ zcg_$0lst9$>T7tmyFcnuO4(cy7p`=&l9v!P|7MNb)@qCFvaASl0Xy;J&Z3YOA_d&r z&ziN`E!Z+)5f+Xctzfj;;>T7nQK4CatZa}by2MD%25P`^ECIIrc8g} zAh?ZN`C8fMU)ygjh~|ZtdF6XCQXLgAf^3q7u1{h0r3mstrH+A>7aKO7$vL)Dp>T&6 zo*4m0I6pDgO6wOWbi5k6g%~^gUS6Q`Ca@n`tn};hZF(|I`{}Wv^)37^wdv!z>;dvK zzWCn%{PqE#)xP@hlLX5QxN|H)RDYq*3iwXsrT zKjhZzDMteesV-tO%-Ttr25yI2sP+ek+?pt3lu^sV&ziqjikAxUf5Fq1iH3qKWQn0! zyqyG=F&ffh+~`I0?4WcmY;=Cd?k^?;ngN~poHY-oj10*JLETzl6vwDW?Xw&ylMGo_cdwkZ@$gFr4tY#F#4-rUy+aEpU`Rc zF6vQo4~LPlX?f(P0L8dNf*FCC4KX;P!82f35gJBtSi05}gATbd1!Nb%(bstPZl~|y zg~MLXt$LjKf^n_|2+lBW7BGTm5n#Os-g02H~Xu;cZ6B;u?oQ4Fn6m-}DK9Ca1k zbfU_X%?cH!7V=-}cL3W?+DYp;6*)`}#`}$iEO#)9k)IA@LZ>w|*j^pX1W38Itw6%d z+Gq-1Kq^c>XGJk@*_o0OJ6#(b5F!}zBb#G@H4%Ua9orh&vML-7E{Jo#%zq|WFepG^ zWQ&Q%oajMjc^gRfJ}>BQK0i~glpxCw%jzO%m+cWI zC(ZmLF&d=uAM4+48?K2TC!;1ne7GpUFQ}hf-qLzgM4RQAoc{V-Kjr;hvUdERW(#(v6lC$$j=${A(2~3~&t~va0qZmA<5T_aa5N5| z$NlH97iK&BYPfm?Ygj3zfuw<9oTeDE)-vJIm^GGDiUr4vh>)qigbGt@bs8S8#+X83 ze%6n8kfkMjOe63pLCyf=U-pr8F0LGaO=3fpT0Y-XUsk;Y`ql3^k1{uhvv^Zk%v zsQN)hI7jz-uo)Pm>0+hMpdN&VMcMrsg`kPMddKDyzOpFfuT$I#`ZQSLVIOF6T3h)1 z)ewrbx=9Fv2dKA+mN{p!T=Y7RVxK*L&?$M(%k%PqVH-MbEm6J#pE32k&HfO=3G&?f z+bjGbK2;jxQ@6@P4(RJp8u^fIUanBmDrJ%i0aGUf`f4X zehJv)duoz%XnF(Y!N}@9z3!+u{!llLM*cnBQr2EpQdpItqGv!@44oH<%{PlU7C{2L zs;$H@gB6w86UhM@f33|w1Wo@B;3N+2DRF!7VpVPi=28vLaD7@YlMfp14X&s0{uJr? zYWpZaHM7eO@Rs=k{s%zH4S7M!A=Y6bYpCDHl-Q0w)4qzcp``;VyH}=3?1W#8jm%<}wO*W!Fe114e;#T^S@wznj-*l`AFW?lnY|%{QZHB! z*h0GnLU_>$-ZY#05VF&w=5;|8)NI%lf#J~kcHg~ej!S~~k)6?KZ_&$&+oaF>#%)3K z5hcDtSO|9X4&A?CJ)2i)0CDYc+VrlFm50d4U0jI~Y1iW~m%wy@%Ll=>F;8uDgdbI! z1W@Z*r{DbV5TLi{y6jkQlG#MW$g+jpl6MM*h`^J|r(r$2TUh=mk4O+iKI;6N+5 z`wUG)KlC0ZeAuv{|9?Q9F&u(TC#m6iIfIZUB*@8pd?^$VMCPVKnR5c=szSBY#-@dKbIFqp`V2yY%(jEj?Wy zI%)g*fozsEKImr0C4Atgeefr-us5<_PVGLBe3vMc2Xehl{GU!Kuwgz8v+=37LZkMpVm~i3YvGibN6VPRL8cs0;9{a=Tj$b8+k~?^Le`X-_ z1_%D;*lA(;?IC|zBO9?fhK9i>`0x^60ME$yOfb}HAd8=^$x4UH3F|cCM$`4?7GQL> z;;9smzX`;!4b7EIv=l~cnCB4dhxSc^)B8#b0Re}W5IZ(J-oLt;zv)nt2&Frt1;Lg7 zZj~Li!j6S=3__=ELMs`*;;apxzs0 zTxEi{&)_l(O4D9^_d#%P-a!qw(x%s%#*0|+7*18-qxJ$>3_=MX$1Hd>3&AI}_qej_ zP7O!t1S;Cr&{TF3nkcoB0~hb^XeFPp2MffwOejLKaT*WrZ!yu)jM5qPD;h9BbV&4+ zEyE7I=6Di~ zz^?M8A+i&^n-o%o%HX;ifwMM&MKpds-qMq71>yPx!yK4vB}kIdg9A?X`nZ~@7Nb=I zeYxj()2{*zI&F|CG_8m^=y=}eGQ~r$S6cRsmsNsAw>xZafuMtDMvuhTY{dPWLt@C2 z{mD7*#d^n=l(g_jTx0G0>9p|WE$54aCGc~0D2ONO$;U1y`R=R}<%RzAX)y&l7=Gc= z)ZV+(^#nxJ9FvYxc8bR4Ibs*JcO+0weAVxkmS3Q}ozMQ)+-8jyq*Ds0)k}Vk%39Lm z+4F!+q_8GsZWn%#8SfACPK6zqmk zjw=D&1W+@SPg#}vkqf?m%bgjTn zX_*B8$KE3p+tczH?12-?|BK;47I^gBcGSPFB0IEGT zW;fI$Lw^_oQ+ll-yJbS@Qjma?!LiQCWedYE$9+kIrY!0m{q?m=c1d>61tE-C`lPSE zXyw>n=To=yyK=@^=3jprdoD>zo}HX0K(0v|DKyJq(DB~NTFvZwK?<6*Bcn=VFJwGY zR4qCIW{s@7fpz${2GO+uxj%c*7C%@ht#TDY*Z)4_1)L$4T)ESvj2PFh?Ni6?NXOQT z*^j~F)0K?ekg(0!R6*?(;8Z2BE?*h5|||nQ&#SOqex6dJr&J-;+BB4wAMbHt#_7 z?~ihao~>D16XzfL&gS`4k2Vr~1AG>X3eBgM-5kgzXID>NpM?Ck1_?8;Lp$v_?I#=P z7P?s5Vio@_ClwkbUdew~P)2#O&TerV$+a%^t}cGbC*rZZJx)&#r)w<`3FwTD?v7;# zO z&hrSXCZED;Ho)Kk=i|TI%01`Cg}X1i+{mW4t|BOcLXFrYuFvPHM8cdRXvg)>KEu1KIXxnnzc`0v)fq{Oi4FM`th}_ci zKT?&mb*R)r?sE!z4(9Lme*jDcm9KqNt?A7G2jH`i|Lb$?L* zeRz1e13e%kdxsw0WviCRkqOi^pyQyEk9gYJ=~yV1_9H?UwMLky4folO)d-BAX02>#9@=(PYC|Zn%tG zO*~5i%6D1)@69+bff-DT>=mW=Kljw!y!s7uJb%J57_sRwJjeNm^2FvoWjE>fi8GWU z`_16c6PpLuP9YB^(+$|w!#_Z*##PrwDqnIR^JYniIMXZEdkjx*PanQ79f#X<&SP|b z?f605X3E`M=M?xeI`@6BcOE9Rfa2+2yPJUU-_e`34Q~mbeJ$rBk8oEy7{?J_)9LjC z-d=NmkCa~7SN$=(s*2p&W@E#SdVe&Ub(^31)x_}$NJuY(5qb~|Chx$#aHsz6s~4=$ zg&~y=ut~uT^}NziKiybEY9ehJ{>7Fs!O@_6YyiNV#pVAlxO#64rqPTONF<9$x>DCJ z2#-zRQ(rlOCY6NMYUPO5e;hbEbEwfT0hMNd$-S1(8Kqcas0g?jp%C{2T?1Q`a-LUO za!47g30PAP`qL0koispeB#pOph5RehIH5i95>_ajq4mWhmmT2K#E8bhYjz$*G1N~C zWMi3C2eJPH`9%E`23)l*ESk=7sUuX3Je4IoxIM>!td=((a0gOU*kDs zVMU{D*0k6G?$<$%SE1!aF{%2wxRb5Q9*C2}PD^S|%N22o=%4R#h%nPIgY2v9sk&Cq-q$ z$qQhl5zqz#UN1BeyGwXNeQ}!y9-cp372VV$CzSCOg0dbqC;Z61KEJ3pDr@ZRtBWb8 z$vpp`7#;%)jn`Rz9fDDhFX0#wSx9U}5${-n#Z7%fuc&?-U(E3&GMNk}^~B zHW@}$3UT8B=4jJn##?=$3-M(viO;;9t3aoF;vESmL~2ju)%k9IJLpMro|Ur&tqS%J zd<{Iun;JMsZ1i^szJO_V%fK#q{5eb*1&EbY0|>g+={H!wmz9oiq+14J!Tvghnl z^|cFvjk zuT=eH>_K%eHa(q$KQkzcs40M?sRbIsKcL?YlF-4&z``yMuR=M&EjSQgn;f@C(_(6y zKQTCYM3Vd^3L3!!KcbjZxNoAliN>4T@&nEt-a~9#2ZPkRgrF{hfS~MNEY5FqA#?5h ze}J>Pmt+@NxIy|a5U8^6r_cA7Q_htI1ju>Z)>rX^c>^e#OTBdBDZ_da{jmn|O>yp_ zY5b&LB_6(&KlY>57r-b3f*!7$*3#BZI>GrloHL8&BmM`~#mnI+6^Y2tek=1IV3U6b za$^_TAvNc~+7347)gd<$|8A5Is|;c%$6$zaS9~xSxzx;r_f+2UjWW~qucwR4Z~%Tb zCOk0Wv|xa>-H)C;Q;NKrM^0x$V^m+d@3qTb)0u&xG?=~%XncLWZxPa;TabPN!4ETE zdfVwuD)jC0GwS)Z{(p^Vt0l4J?JMq5ml!cEa#(P;f$PiS{yD*%WDlH8cbIC_+}hoLn3%Xxp#GIowa+ZkU7lM zKbR_;ld1$}39K~W^TPh54g~-~P;mSM>|wh9gjSWaB9cIr>{;z06rT+Z8)CowjXBH@ z#z_|_1CdLHoSfWg}z5DoOFk8gL0kD^~=iD30OSVbqUg z_gDVAaDuFR>~mS?i=nD;{Pl>Gcy91@ry*;8;zuga#t(>biqJK|Zq z=(%FekphfXxJO@qCLdwpQ8aSPPg-GP%Mze->!aKE+c^vyf@kcVfN75AA$inRU1GOW zE`)3rtmVa~tnU-7A$!{UZe~9M!2m@Y{v;^&p1|}i<{4HxYQ4^j_YC|Ar1Ig+Yr2UCz=R2(}lqbIuT-E2%9P9>r*O?<5|S-Un?#(%kP(pFUd5UOh5Ew&oU z`ZZF#p<#3vjq1h1&5J0f`Gew*9r86yJ!i>INLRjh(V9zqq7qyul{0XFY7K?CCKlqS>47Q-`>mW^;Nw-1> zcp+l4xpsl|-H6$y4fA-zC@0?6u&wr-jDG-Es+!ncqlbx6%RA)iz^Ig#T3eD{fs|nE zNG=ucS+X=)2pN6C~9}MZCnH|2j9R8V}kw!X)_~kq!Vw4%uuG^Q29uz;glf z{eb;^-i8m%5|J&kUB*a7F!R^ymO94R2m9C?h^3i7CC_7eQCD3a@TB;&qT;fg4qh4^ zr?^Drfi?`{NPlLCH;nDFn$~dcN^%7;nXx>b!k>=B?~HZ}7(YTQZ+!t_QrbjCsuaQVT|Yt!IgK&Xs6f z0 z60o8lDXs>1zt)>kcS^^L=t4ix|Ekbq_pDO>!s)=xwvOze&Ma;H*xm@fWCbvruaXwr z)O%2rtOYDDNsMZT~Q)#vHc{5sihC%6Qr+`L^56{|kG^bmhX_*AHZRN^l`Y=I|^@jL1U4^8@jX&xp z&y4@tA){1TR<@mULr7;0rDJc{L(?mN9-qR*nxf<_#Y}vayTSu@^%9zb6h{;k<5d zZ`R@u-6bKn_hCI}aY+)p6qL92EI2}oE^?%h`AxVTqwFshiJM~QkH%ijzFTk}Agp>t zg+l~0z5e=;L#{&}f;0zH`^n?;emQz#4-U#F@>szroot7(BYud}fHILCp`A}VqGIJF z9QQCGtYJp)1=o^Ed0EsP=}8CzIOj zj}ETLRg?D0^{$&-71S+Fx8UFrCXk*K0bI+^{hX?;?q^glH&Y9nBZ8y=3(}#cSs>cS zcSvsIAUStMe)zKNjQ|35qxk#!+gngAwg7k2?f@v?XWf&wr!RX#uPSb}dHU$tjyoiTHw<+IkK{*@zsPPM~ut2tJ*B%R> zTTyOp9R5x0D>|L}1mmZL?P!c#4uYLEU|9BgdFpDE^C*oY5$OeUR~4zYD&#ROd-Il?HneCSVI zSltd}YuCWGbyLL!i%2P42kb6gG!Y9&g&SMhztYNYs-?{Cm)L zTFEpL5y*K^&Vw#_NxBB#t%Iw=OI8$BjI?UZcG%MdK-wD66|UsT_**8GiJhP-Z_(^^4>ggI==d$yxsw=ZTj0IBry98 z$eg-tzYMtwodO^b&#PCgN`lK8r8Uwo35R+MAO#>1Q1{n#T+gQ%kL_k#;cG*KHt`Id zdO#`)cv$f&+gi!OfX=SR_54anq;&AK&Tik z^s>=}a<2%CGxJ+FY4Q$2U3d$%ziq8mon!LKv1B?|pzO-s2;-k+RM5?{y~EjofCJ-5 ziU&~Af;72&K2I`|#GQ;XYQPP|_9I70j`jrSAqxxLm0ddDM%oYO9c}bn^BtKZLdkvd zUv_7q0KZTq_wT*9W>|>#cl%v6pvp_FQ(bxXKl;0~=~S%9xav20N4ZsZKSTXqT7JSf z=@9>u#R&`w1_}c7pU+PiCjg0vQ4mqlz=1Fzq2QK5pfnYsrM!Yh+T$T=|iT~?`x-m`h=v};*@>p{!xNTK?spkFc7$4*ss$6q$S zGwa&xl--~aXG#tRNOx4?O1oNqDL4x{M$*J%D&D?2j815OqhWPQdmz<4*vrs$hok{#>R|@` zk#XJZ`#6az|37T}S?ML@zpP0z6^Nmmry@WR=jiDmxziX7t55M&XxbJx&BFEF3SLE1 z0$J+mrPKzCd960~-s9 z$V5~C2`}48vE=CT$r>!_ADvYYgAM3${p@CcH@ui`h-vD{-Z-p^qpfC}5=OGEPJ_ed zP+7+IURp&DQ9;k?K|-;an!pU(bSo+S3B)$Kg>@kI3`d4M&9Yd&z{ z9<_ndLnbRqaH=`6Rm!U05>IZzQK^*(T@Y~=Gd)?~qn>>n+;HNYJ14$Gnwge~)R7a5 zs5CsU(;!)f@z0WdwrN)cnEU}Dr=`=Qszn8njM9hp&g?XBdgB)ojKj=#Sl)Ay@B4O| zg#v5Dovt4*xM#;o1^T{W9vwlQ27GoiPs7(I&fxI8Ja5-U(4ele36Y?eCkkJh6aeTw zZy!R7cZ;~P_Vqa{4altoH3q733$ECA0o=eW6s&$@ghs^jID~t5hFEmberv8dXVD#K z5GW7#8t^bJl=Sh5({AhCh{Le_nx}85(MdHmcv|yC{nLz;gvkI!mf;%P2<^Cqjr$v3 zUxiCBy2(Jxo~Dene*j=Ff}=e|D0t~c;i~n@kmf~VSeOFOE>Q@uB8ABpSlLP@?#bjM zH@7Mh_pYnG5>q;A>J&g!32Jg6DI=Yk^&CV~!)~Ap(iHlw10Z;ju}abUONp5HFA1k0 zy2El=E%+#Dy=hZCp+$kT{Xxv=QE|K4fak!<*E#m`dYd6>AHHON%up?2l@RbIZb#Ad zXD>LM{!7v)&7pCU8PO@0eJpEng(n#xE($b!sG3CTo9ma|YqEbPfV|D){D*Drmj&T4 zQX@Vrg5W=f&O!PIf2)^7)HDUh-mNm%QdX)%vQpOGq)TfBFc{#Lw#;d9CiH<;iL_s- z38A2w)gPiGt9hrftK|~lUc|(>k@-fd8RFEMjEkC~qf(gW|2QfxnMa+6d>~}g(FSDn zt!ZCL9l(_w8|l3IBBmr&SQiC7`Jj=X?&>+kNt*24p}sfhD?H%F*>6&KeXpN^6h|2f z$FaMV_c(w{WGJu4*l9I-{6HNC88LLb=)hvHL`d94~1;Yv3A{)Z_onXB{NdQetjVgcttXR7 zg#GcuSu<}*$sAWI31eJchws(lh)cBT@&8RaAVn{>tJzRc`sb{tS-8M5WO;uH*S=7B zpL7`Ecj9t!^cU5l|JD+9g{7WNnME~*fVCCWXZ}>IEftKYkVq5aaOx)CTquVrt9S$Z z4fuQ$fN3{T+?n@&O0xd=ljl^ zZCo9gx=4(&l>AtK&sOB#Tvg4Q1XIcyB=%gYELuUT28+(P=6lso-uuktVm>HOgaL(3 zPa!ZyAR28^+A_dG0hc=RbMiM9xC*XFsQ%MkGYmS{C5ro93MRO>m8zfbI`ud(nP4@^S8~8bYfd_JhmBq^fgFaEwrIRH095 z7eVG?B29V7`CywFfK>CDMj9ZVLGjHxD1o>1Zp2+4IVqCuwL0(X822HDVk8HmU0uOV1(3QRn~ATC--{R4$m!x04A!BJjA7%2 z3nolVCP334@O)(WmMJw zk1}QOH*{M{(wJ0PLQ=W9pX%riI=1R)2LI@+dDq}&+6ttAb(Yi8MN*i)_r!ZBk zW$B8)6cragdub#Nb@_OPoX2W}Gin`G6v29EP;O94AXaRP<>$={(zFvdOQh-;ETmmS z_M#3GU46BBuuSZ}I^DK99q3Sw1ONU$do1?7-KAjmnulVPZnbKk77z!;=JlAA))c@& zs5BdAK8(G@Vvp^~sq44Z94z1nT%3A(V#3yvxhuG{3RcB`sJR69s>@Q=oG0)p+`{5d zl-~Y23sM%HF>C%4zHy>ED6ma%SLH^P^Oj=2`Mj})uUjz!15O$(U@uf;@f1Wn&UZ&4 zpv_8M?Si*5W%j4w)gL87!7M19HF>Ojw&p6KP# z^hbR1AThzT5C=(iC)@bMPDC99Kq0C`+pqc8(x|HTdk%Q|p00jkyZq z3TV+|N{?+-HJmjnB zHnRi8xyr$8%MOcVRh2NB?j!<{oy@kDX^78ds=kNt-Osnw7+!v6Gbq~{pRYZ^N|FHZ z4z&bG+>5*%Q-VgKMaI7br_(0JDQhZ%IIwHm7+3V7BHF+}{b1si*|j@#on18&9ckr? zkWu7lQJ87}fZYQh;pHdJYeOdqb41&rY>ZgDMmLS+uvrd2Nfyi z_wC7r*JpZdBeXV(j5R#;g2EUMT>`M|L;<%>VFnst%v1D&L~I9C`|MMY{sSO`p{COu zf$d?XTn))WEyvSUYbvPel8Yvu5B#Op@yV#5)OO~QW1yh?*>7P*J19wQz^=yjg>&Fu zaJu&G`%5M@qnTBtOem^QvgBBpnc8q!uuE2!!s%uH1{>i}(Q@gDZ%;kOhQ~Osp7v&p zMCEi<&)$c>G=**T5Uj9k?-jp0#<^eRi~c*Kr|+cLk$+6qP!9H2dNiYU*{zy`YToYe zZ^&l&yTBQ#`M-$|Oi#IMx6H!Ry-|h>Imfp80Wg=sgtwrILcPc*KyOoK=w z4&7i&)*4_HBVzFQX`}K0f56F{2%G3y(6nVgE=?7SAkc7pHj-*34{wSPPo7?I!+NTj z9$bWK0y8lWM+V1_B z)iuCN{89lOl8We0ExUg8h`s)^omL>~IWj>VlQX~_YRAch^L1w%6PV97!}x;a231rG zFiQUXw=S1vV$I!XiC+iFkSM_KPX73Ue`*>9K}YuaIYn`5>#HO#u$r9qpmMIJrjS`R zlhyQc4E=?|zFnwdf_S`*b3yI-+pD8xg_?5_Q}|xSqr-FJL9nU9uZgr~KH_g}k*p zhW76mebFV$R(BM_vxhxv(5_xcM~!KEG?1CL8wD}!W$Gh=8x*fhavhY6wzn>SAnI_; zl_3R*PzYhKzRWQhY<7TEu?a|BP!y*OCwe4^QX^1L?$FUsS95v2znU4qfZFWgd`WA` z4TE>grB#s$#^VppPCOiZFmnMxS%Dv>g%zny#15i~L zr>G?_IpO2p#kj_zg2#^jDNpsVQi2t_aiw$nxTuV2qZVWp*k_-(fMq*f;Aj*A7Rm!o zBJX@wCUC*uAqMyVXj-nN?eY@yegQIGOfLO#q&PR zXcD#T!ReRORV$aw7E}`93d5DUGg}Q(E>~D+ONO!!jV$}|dQglv%qs9%P?H2Z&z3x> zFbramXIP_`phLf`X28=+j=4D5!c8~K0>{)b#v*BGnhHg0T|}H|Kuw*zy@48xBV1CU ztzDY4=oY()1Lx&H%Hf`7>Fm+Bu{(D*-YgtH<{$G~IWD|U!3jK2XQ_xw6@a4^FjT-Z zgI}>gh!sFwQ~=B8Xmkr(OZq~GjCKttGx!(MJJ5eM7nVCHK04rG7tuoL@+AQ!rz0tD zl%=-Apkq+&q-kuA;McXYLOE~Y5&pRFQq-B{i@=Ll2}ZT&K#Ny*-KRahzVa5=b*|7{ z!56;2;O9U~W z4R1|Z{ER#1q1Zz>_74h!H2u-2{PULuwuu)fEnSCHVsQeadeWYrCPqp!3`N@iGTrB2 zRl54wGP*FpblLx+g+KzyP>?BTYW}@N&GJn@!g@uD{$qgqiXyM;5>2kLRun=Du}UYh zws_B?Z@rR~6=}6$TQ%8YgRI^UP))UV>o~_HhF8TlNp=HE$p1ZH9*XJzBvRl+(H|HS z%(4cz(>6_4^Sr`ODL_*o0@BQ*sVG^2#vW`80@QmmshMwZKGnutO`}Ga-`bCYcn~7I z^^jqk9b#Sr@)1cHhY@37`{!!y!>HdYS+p^7HQ+kL6uN4`<;mo`MFYJfJb=vS3yL{% zGqcUq-~xj_RwEcWGcNNER&wd!89lcXtu6*3 zwP@z(h8G_vtpwbtWX5JpcBDRp?&fof$6?FVpCU9u(gx>IYFK0~3c*#;`kWFM)eY*D zO-~BulBC>E?JLoxsY`6_19$j?tlBeak(m2tdY^jiW{&`vFJTJ8kii1=Oa-}~ixCGG z#t^+^t|!%rj@3bCk}<$K=zi_>vMG~wKC>w!0rbp1ZkEg+_Z=@9ZjRD}t`-N6>c=_1 zz4@)N-@${+1JE?J_FZQ}eg~J?ep|XKx0wxKIISgj0TzS#zEme+4pWfV?FCt5%s;)#>#x__PDQ|N!z$HjE#yPP7&BN(& zF~ydHP1FjQwb@6&y#-e4mco3OJ%)rVn4p%<7N=8YhE?-$S(9;UTTunLEW)edaW z!o~>W(lku7rupOBLK#EZLXJ3XhABaDi^rYlEu@=3E_&%&__a0DoPhs9hV?g~3-YfGWPgMPX|+!t(^ zBvBivDCp!&Dk)yaK5=7?;R`|du7FT5yFV3Fw5`^atIeuRlh-SSalP5-1Ky&3N<=rp zNI>6fq+8*!kY+im7GMEs_+cj!jaoa|gu8-a482$VPry>0ZZ{-f%cF2|cMDR3NS1*G z#Gv0yOWwjaS;s#>f)#(sr>xVX#!}rK4Y%#4;d{ZrPbu$5O8awwUtcqrA$NneXpVUo z-cH;{^6eOS{)r(Q-!@p&$nOp(4i?OksQCw2(!V^v*&(4J3k8zNWroar9S1Wx>^C`0 zt9fynQh1BcyI}Tdp;Oj*-Em@(QEHekbd zO{CNp6RGk|HOajvbt@dNCYhaIf04j{CZvM1=>(GnojOepR36d`D<6yfT6wqlpMYL4 zD>XxbSrSB%UXZJiIrdbc;y_8`GfS`olneO$4b1#PNEkpg(YcIZ_134(z0qa@0AKm0-ZxBRR55og zO0Q_FEZNQ3-d^CyM{HQ~2Y|!k^97s_7#ejO?aiA$!(uI-wesc~&!X?bh&QG!e{BYZ zk)Tq;KO>oM2_cCL@QCDx*hnPNFLEO!XIvEw;qBZ;aHx{BB--Y)Zu6qsq5eH5#3 zpLo4`uaJ5Q^c{-tV@I+^EKsV^;oNXaPkld(Ge}ZoBo-`QU{5Rks3<_Ad<8*Gb|l4( z5GPg~``^JarN2+eb=) z*OY(ZYkUIVjO4{lU+BQs`HGnD_-M!FS}RA2V+<;S6~n1&75l1TKAZ3PPzl;JO>g{C z0kLDp#%JnflX7ZB$N3e{N;Iq387*!sd`tKbkR(|uR0G4Nz$b`hMMnju#=$5MCZn{V z#Zez+c!XrG*80E}Gr|m$`?9qowtvlgj#Xk`Am=k2e&>teyfIWNZ7P-;5Y~(lwMuE@Irf<}tuHm-)Azr7&< zTL}0zcy@ZTy2EV*@(XA!W$u?2gY-4~u`~#r{{WHg9Xn>;?>?7wAwMVolKl|HdD(wj z?GHU_gd-sX_tNc*8*$*2OhjQ;=P*&lndht^+NT9g;Dr{5bs#id}7@S19Uy|or+ zIrY4+k_-x@AI9YO#=s*In0m`-FT*GZai;;cNSNdHjvqN*?4;I#HY-V^9tMNe!gv?E zpt_Pvy(3t-0sSMpYTD@K0_)AB38MbIwSM)L>-5KdMY!DA^u;BEyk~0Po@vdpL_VLj z0>dV2lUW#VH@avVlvs1{y;>X^3&%xK^>42EBy@6m!M}2+SAXQ4X&F#wkQz}k3f1}W%^U6|jX-&E~tW7T7 z{P<9;-5}4;#5q7)y#ZFG&bNDFze2U!4O`U%Mz6R6nIRtER^PHQYC}C1!F)X+qa}{v zZT|--8efI46!wDB0Lyo5ON>lfoeePmNottuD;6|7g-LYAoE{nGJK1UfHc&$Uc^w1Y zI=`**?C_d_xy8(5dLN^}_uH=D9hf*K+L2@Y1B`=7;R@wV`=OfI?_KELbs_3do`haN z@R~og&?=v}wU~DomW7NJ6>_{K80wp4U@4@O-)RuSsYA@l=Pp#Ygi!d?+K>UWlH?Pm zuhZzE*)w`ZxD*N+SA?^WY0wd?mp*w1VyR;O>F9-`Gk0Varx*^`Tu9`363tGCme5CdU8LlzM6R+`-*{F1cn_05T#R(nCVaLpa|98@kt$I zpDUaCP;V9*e*Dw%?1tgFCyxN3mR={syOJcX>0JEppeg|{p6s?IT1cqA>N2Y{h#Bq` zKErN!x^`A0SNV4vn-`(8%sFjwlSQ#Y;sa5ZQI)HfuE)tujb?Oa8}bc2$e_o^pI5hE z+Bc8HJ#v#>8;}xRPeDHk7ICsEQ^OvHKo*6eFA6&?v&x_jF66Lyo$aD1puwhuJELSl zzea;Xu4aL6Xg(SBG*kv$DJ<|28ph*I#mz{V-(=2Mlyr4;KE2xtf}M0Vj`;EqxJgVA zB!J@S8z_NV!b98`l%Q<{&f2P(HC>W1zTf7TT+Mg&n&g3MyUD%Z9iacd1W^5VMX$>% z_0t-Q!ROQcZO^-}(F6B!`~G)N*lquCN~#>*K)2$SKGzkara{vQVrkz2@C@NIU#nwO zW#`SWhu~gE%{v;la8?_I6PVh+=^L)=ZYDp@i;cWC_ZhNBlFp~{n5fWEHzu#Y_w)T{ z+&rS*?x>^c3C8<=#cJ?Po*Wu1-#fZ^#y7@D{7;-ysdZeQ@6msL!r-@~XAm~kV@5w$ z%m1+!+qk}pLZea(kleJmy@}2RR2h&QMPvCN{;X1Ac(oyM32Q1;V!Nu?B>TIQ0)Z*` z^e6Mr!X(QS#RCa}7@=@Q2QS>%sDgMYc^1cm0S3ETkNc&8tVRzu4stz2X_tVDtRJ=x znt(LDW6zppE(d0mDBjS$~lqrb+&qJ3_od6@(Yxr0Y-BO@Ej}y;*AEh>`5ZdP- z&$0)c^ZN>T`;s-ET=!01Ts_ym8o&(WIF|T1La09{z$M4Az03KR77p7=Gi?*JLHJ&q z1#Q%ooM;x2s738#m1;X|Y?fDI8=f7u_hJZdTCc^?6Xd;ttrTO zcGqqUV#VYgS#kRQjw5mj5{Xv_Fr_^%g4C~NbGu3qv|k~zRUal-6f3Ml4CM0C#+DI| zJfTrX3g2%&{r_^FO$%DAtShPCK z@~jfBu2A14^J^2c2ZOg&{T~3^KqSA+&jr4pYb%o9_&5eOYudiE^S>y%z4%V~^y97b zZw>W>_a#c`8^FmTMxqFg03&k?I?^;%=sK_IyRTlodiCqquU}hwp`#_`c`&Qrz=Tx@ zsUxDmz~VG8(=5HWi${<>Z$|_DP*j`ghFDji{nJaTaMFK{`WW8Jx^><6A5N~%a}!NK zDUvr+^Ws*yF7rmWQ=Q1v1<5m;6h3VbbL^GiOM#s65{eYk4$zN5#UwX)9JYFqO%~47 ztE#j)j9_;x(!2%rF1bAS9%vuH{{ZTAk(eC`{&S6H<>gmmA+qd&)U$MYKe$p(mKS^x z!Po8mH=oZz{@fMB$b`!al?~P8@IIa9)$|ZR(ZYFozm3Mf26vGJi%Y)z3qme`U8zR` zimpn0*H(sO&1gOwoc9!WEH1BjVsKpMY!u=HE5E9rILww-wbQ9-GyISMW&!y)jN{qqZcG7jX^MhXTlLjSx~}eKraCi5>u#2$&J8!t zY1^}m>~<=bpwkri)UcEWW zs0ZF1t*%YwA z!Gi`26W+@pUuGj`3SdXRy!H(<$i5*117*RBt5sB344(}&_QM^PxTE~FzXgVzDI>OE z*03&KaFMIN0JXZKCV|l5XZ+I$2?mRxNEdt)olyFgtjs^3&jyzUmIB)o8fFq;$16!W z$-XiF0D!hugYG^soq01-^-2P7556gK1Ho3B!>$om3lo@U=?f zs5#@Z6VmEF>;cFrO_I(TN-w521dFinjW|}wnZB}`TZ}}!CsK9g6&kqYWOQTBxDx(P zaubHb3Pkn#Ttyl}&NhgTqJ9)=?W0%(%I7-G4`%Ll?4<}43~4jD-pp2PHI zi4x})F5)1IU4057M_Ar(wgmdll0Mh1TD5A`t5&UAwPMp)epZIs%%)vBphHWR7k_Uy zjz|1A@P+8>(;Q?8>@`fPj2O_^@?$yirv+08SDcWW(zcuQi*1T`77O!W7e0;B3)wD9 zd74*zvZZ%u0dMoS9{B$Ny?+r-N5v?!ti%One7Wm=Des5yK+q2H@#~#kCUOQ9CmkOmb=~~FWVUS+{V(K|W16=KmT4$(H*bSpg1(FT6ywI8)@H~G* zHHY{EBtQF1=ttmb@xOj(yj8+uUify41zYq0gUd5Spic)0?i-99&=)0Ll}D%ixdxOU z)>n)T8yLL8>XLS@iK=viBYz*CaSn{I8ZReA@^$M!$R?U&fCNFpS=avnA7b&N@@c`P z+#}!h3HhPuc^ByGoiV6Ht<{Z~a}QI7oCV?4;f>$0;&mPeJ7j#z$v4^n0iXfc!QpL< zi0nfi$?D&nK}Xm0m}_2e-y05?ygR~WR(YR7{{T}vKgj$q!u;ni{1AG&<4P#NAIa}Z zv)a`0ag)4sQ}=`tNwf;^Ezu{C08+^LwbdlxH$mJgIZN#;7dA+NwT;9Vy>(<`nJ;q_ z^WgTp(P`wm?rpiy0CIk9qm{+N!5zt=8R}**)9JFrS2Q>YTY*37-?Ob&yc_l0ZZNFF zf`tyV<^Boi2S{bD7rn+f7)FSEpB!k44s<9ZFH^#GQfLngI%_RKmsy=S{m}_hVF&(RYlS~NE8M5t8RPITfGC25UH0SyCxx-{{ZkgP+z(wmScuW_eKXM z^f~Fi__#(C+O5Z?=x>Rsgr-5%!w%B;7ANXczpxRhHJie>bN6Mr&0^rpxGth(tOF(x`dULSzK9E0%^e+E5jp*QHkQx@@qi=X`WT^T>a$Q1SiW#C;tETWPH z>_aOt$V#6H5)Z~8qdYwQ7Y ziAS4joth%ie5YCTf(tZwI!!2&a87~`8wST zNx;Ch&gj;W_urFcBJ+~}08l?Tdk2Qjy9XZnoFw5Z^d1T|Pf}g! z53P*w>5!auvbEanxDS=7$c008ub<8Qw7?8!=u=zZEMY$;jSIkN_F)v)dom=R9r0mj z1z$Cx@UVU8hi*kKBx_A)`OoLT40+8EtKjkxSVeqdKdn#Fw9nRx!`g9My1JU5Q*RWe1uC!C_xg%Sl4{Uiyx1b0)Enl#U>H`D!BdPkESkIhS8z-OJg@Zi8TZyL;CmlA z&a%tO2OsP|AU&m3uJH~~3W_c+e?g$Vpn|08rSn0oSPj=JGSJZe`y^mI=Du$wc z5^4o*Q0aVV*zw%c!ag*iPKK%W1_+C-yW_j^_m2m)3K9PRY{!I~%Tbn1Tv1Z_32hr7 zvC#}|l@*zjY18_;2vBxHZejD5JtZwi=`kb9;+|&)c}n!-BSccRrjM}2T|ziKvl|>s z&r9;8;aKCh9(hj9MIwb#r$2L_4VI1sZD?UP4PRk3Jsbq810*f%p>#~T-ITRKD!`p! z!ra*Bp=IXY2{PgE3aUecWj3JY-zM8 z@W~M?xVpJ+ceW2oeiPrNdML#-8s|B1R?_(wM%7i07`#R&fn&BPnGRr*+DkCAE@N=Q zzn;>SfO@!c-SPf=at$DFi?Tb$J(Z|v`;L2Sc=6YvEMj>~Y?QsFCa&$DhO7Nh-wY_3 zD%DlPWHBi@W8@1vwtBc=C}#1?r*1QYRW<2R+z$L2W2o=tU8jV1cCPws25kTI4PT{2g0E7CKoFV3551to<2~HN| zH)o2wkcDo3!uGR4MMGorF!aM=XITrli$yavp5ZpSCLrPCldeDshujF*vF}xv?50Z~ zGEKPwFKDL%lcO3+)*}J;OYB%@GvJ;AfsFCNjf>D&&>x-pE|jlYpxOGxl>t2csHfbq z5ca~QmGeupF`{;Gvq(qXrVMjgIJw9Hs=PWwq8 zzS5DCFlo-R{{V6x$Bq?s^>jJv=RiK^E?vF4h@1j(8H5Dbz^p+30O?ENFTyP4c+^vb zK_1b>9TjjGOiZJD<184w@d`9+v2JvTjQc+c_m7AkRs50s3ASl-!j4L-=!}1rOdp=Fji;$-Ch+&w?RDfmq?N6z{h>}p=9aZH-ivUxh3_z=39*=ui4^NHNV za;l4bt|Q*H2-?(;nuqC|6P0eUcxcRmIX{N~08SvL^WKzk3T-VDSgDC>t-8V2 zH&z+7`o3*&Mrd~M3?pTijaypjaMIYEX*=-|g}U3}v%x9ko^SDDw0B-GEEf-+{>KMQ za*a!@tizw8Pv_Qm+nfiZvVC~Yd;23q?@!c5F+Kh6O$68Dq3ow#u-EFE(I23sGK3%w zZ_B_NHm}#=?7vSeyl^g_nN(pkLX$*J)7XBe`1%>gN^`YT>*QO<)q&XgYDQ=&Hd{o(L zc6@HkRQYlDiyhz$qMN*25&^2tLgI;%brPZjhx;H>xr$Yi#b zL`^TV7^(avjWlZ>QN+Bg!)4bWvVE2VG@+Bw4nUU_!W3+nLCKFBGv2o~VVunn6ch@9 z=J)|gV1>CM&a*Py&C6S9N)r!ZTC2W4a~Q!axWXB7ssLy+J34`C2O=#d#srr*y=JXl ztDQs+qRaa{@U2Jr`RqJ_o*H4;YbT$ITP+IhMr$kAAI0AP01(}BD)y`Dpp;|d&}3G& zi+wdzx-d5@35l2&eCmV>RTT|J^zgOOz~i&Mh|i&gHKntcoIw%N5Rf`UTwysjU5HL= z_kBZVIWRA5G_fQLbV-K7ogS;J=eIO76L7ju&*3nwt`K$1I&iD! zds;PCx*7>gM|inXa;%Y2%897oW2_45Jxa;+7FYBD7MrfAiJx;twSblrV1U=dKwxqjd5|iumu#SbtMDgUC6nFf*zZx1id2_c?^}?@8+#^N+tpmJR|}zYveKAS zuGqG6CEF^7`QPW|pcoMv6_hIH1!xV%+-B@;TMMaIQ+qj>u-NzmLzdRaWIKeJmPEGJ-pbGwJwiRn0j!3 z4{we&@HW%RgS>gpb0%`{%H?W0h8X(;@xrrx1JeHho+E{Gm;vEB$+@1K)c0q-CYTk* z6h;#W;zwlJ@nUA0J7S+m3UwO?d-?)o7R2|3^|RI0LZt)s?Y8&QX3L<^4LD@p+hpBQ#r(;w*nB?HTUB7fp+CI;;4ft~>OVsDi<`ba)6+V2J z@=PKJI;IbtK+^%XK~7yIVjSrFQQ!XncdR3hctvn02FrvV7eLqd!LHaoapJw+vKNjH z#1fJ6aE>un1Yr~LTJ;X@b5RUu2jG$pGFCb8%;EtHRQtk^kf6w2_fy-!;dbo0>3=?B zeM`uHNjg9I45m9`Y>SxURugwh`wS4UgZcNS=$+zL4E|6XJaTQB`E4kf} z+esJoM4;;ehSC?Ytz5qWaJJk6RCWGU2Ef_MP~G(ns{JMUOY=NUQ7z6g#iQt1?p1l% zMfS4!HG+GMlwg8>9fe+5jYzMfoSFXY>Ggw6Q`k#44qoYx)#dPf8Rg8syuCMtjQ8Fe zd68GHr73(5g1Y*Hz)dAvMWo9?*=@2CreXSi#@ferv?Nsu`4KFJYYd9UZyL&{1IMtN ze)TcT>d>Zi{{WnpXR}o^k^6!PCxN6-isv&eS{G=9Zmb#z49^>#3-|ksQ9=7s^HaQ` zuLMWfg>bLR)gvgm|q*TNp_^S;{_@8%W<`jpnz^RLx&jFrw;;=Bs^A z_QpkXJSDxy=|`gXf;~SRUzH^q;O(+}#j4zB(6XJ)Y471aq&4HV8*$0COre^caIAti z@W?vfmMn0Iw@1ync6Zu1N0c4l=B4@THYXMQ2EMJX^5%txmupGn+p1KG(YS9TCDBWO z{Fb48)W{4a3bQZCJqv{NFig$U*eA%NH1IV3=Q`0b!dsko2p*gJTuxnNA}<07Xs=bX z-FfoAMq{JNGMt#b55fhxS!HT0ty;F~_hJ*Q^}&Zzo-NvBXGWZ8`S{SUlMO4-k@C4{ z%caYDk&v1)VslZ~frj^;3ckv^T<^)u-$xTq6Eefv1cUkPTe#P*nYJbQJ)`$~1`$Pq zF^33utMLa2A|!&hTuux0c(Eyi8347cHq3ml@kq*Oe`%cBkrMKVw(;VS(t+Nk31V5| z*u`V3`+Wt?l~gN6_;HMt>X*Ay&q-EZIxGgZk|J70piD5*bzZSmRJPQnJ4bYJQRTy^ zjP3JKT15a_OdRYk48$dK){d0Pp*XNWt9V#y`m&s?q{r`z@V6lnI3OqM4tPH23`x(;&x@-9u(FUKJ}YwE^-$v;vcQ6y;dT- z^YUJSsag1PBhu2&Nn0XDE%Q8-Bv~;(&lcdQ!SXbN)CEW93bif?j2jscg`O-p(Ve)) z0Sy2mu7GQ|LOtmvl^XJR0JS}PvPIBaPno>7(I+jM z;I**;yw_t@!EF*Og%E)8hwJkqNC5;gcN5nI*sPycu`|NDm@R|<9sRb z`~LtB#_0BxZq3+d1|KEM_`E8U?HNPoZTwk0NoqLh`J?FmkES)4mn!;Vdr|C@?9D@D z9g6!soZDV1&QEIv9i5GikuXFDYb+25<1)a95$%M=kh#oub?ILIa2RK+^`~Q_Uy5@# zHX~4ZtWncHFn%?tu978=cx>=+N_DZQ7uhR)`bF;^{-TfRm2PlD(s#^J7^~6cjT5|k zUgTtyn8y|&?|Fy8#4SRIzh>AFg-s{wp`>@TVjeO0b~9vX;|UHA9<37lL!oty5+KIF z3z%5LDxU7ul5n2`0sJccQ1TO12m-PoCZVIip(p12ZHH_@>n!10oxU4b6}R{X^`u(u zfl$ol^h^CvZPa?o!~0y=XY#QB0Edp5GGxhgJ^TirBIh(GIEnnzkS8|SKIts38MPUcK_Zp0PG@t*z9DaIA=jBIKx&m7Y8 z2rhh&TKeEgB7x2HFG>6eb2OGU8wjnxB|sUcokg;U zY-+XIZ5?lsim6`edZ+03jeblXRdXgq=jEqShhdr1(J82XniBDc;swJ)cJgb6!A_^c zD;UlKu-%9dyq!C9huL-Q;@pOcJ7vk;z4xf4wGALy&E{C2c2&i&(piWxwMLWF&3 zpTrK*_+N(Cs|T`a*)a1akDUAlpFwK2qVW&mz8Q#TU<~cIn*LhE>bxfwHI|ybARfg` z&Cw*;5=7j0jYjj}72Ut#h!LPiKy-FCh~<5+vwHUW7DJ&VI8a-V*{cbuQpH%$j@?c= z(&^H`MZ!iB%iHp3x7T5Xu6R~j^;2p0S?zb5H_X-ZIg0M4u^JlZ8`gGh4h&;;7iSoo zdVfi#hLrE{%M3e#v4r@cK%46^(cUXz9vXryl#P99v=sjUi2l*g^K`}jU8*_p)7Vf~ zT|pwZgX6NH{IcOlw0P?9luo5@qxoRN{#68iMY~gHmL_p|*k)`P{BS^vKGtOHt69^j zm^?K90O=miUxOojUO(ZE(v!ukP8X~mrqgnC*AE&jDOx=|a@gC0%dJRM?t_(wZuHZx zO97w^mpy_dtyRrIBjEx5TXVJ1-;p&#S;|(XS$GA@W_4bPi~9#MHyGh_6w`sBMtly8 zJ43iKoBO=bUuDf~`|&2R_FhWQg8e1>OZ1n*OX!5F>E~N4{FuYuCEb-r)?TBvX=YAAj5Iwr@tb9z7VUU zS25Z(EJ70cfG}!NrKOH-XIQfNn)U;uEp@L2eZfXS3m#kYnEygd~3dNNXgm07g_giNC~q32HsJvKWJT?O~=8e^3Tf`Je!x$g1H3v*xHJ(oy~Q3SMdDKo+P3@ zIi_;U@L}k#_*+`BLI*xw_y@7tSqE1%vHt+95il90i*9w_xq3U9`(`DbzdRq2JRk9a zX~?J-iQ0Shuj9(^cxPnZp|ncC%?4<@WI2?knfH=lpy$uiE_e+gY;DcGvQ7qX$h}Oi zLN!1Ig0?i09&c=R=we1L~4v#k#v^*xp_gOSo*R3QoNl;`d;_=F-ONVa-3-lg0u4gF${>K z1#&pb*2@__oBR3&YZu$Eb-Xh(TZa8wXHt6JKQ@WvgzY#SZ!Ay4{$@$Yu}5N};}=b_ zSMz134i%_78Wud^&-#pcpo!*&Y59aA3>`4=LnJ^FjHXL%s)?9_5;#Fit~}?D{GKSZ6|tH*g)|xwdB2XWAC>z4;&Y}oir*fse5jFd3Gm8L&7_mrG{L{Y7v`%DjXuTtg|bywgLmRH+m@?b!8aEklLUv zDCuS(Bez&|gR*7HsZ$&YRE9wk{)hMY@RgdwFT12YOpG!*q}Kx8d|U|pnWhW}hu7)V zaM5bCqmQAt`~Db@)X=S4?l_pxI6!b#o2L>GIx6(tIDV*((`UYe_~G07v#J7Lq2GHh zX>T29V>E1ey$ew;Ts7U(Rm;tt2cg6Pk@K)5XgTMDPT7T3Tl?BnDcLpJwplMfi$>zw zr!zuJP@v+}MUW;;pxXSc29k%quKxgPbd#+9)oa`6+nQ{4pAF6WD@pAi$j-w)oFk{q zk@D@sjn=I^fc=0SxC!0Q_hAh_pSqZ*qm|Cp4DdAvXgwf-Lg;iSi~vL%dT!yPE3+&G z_+6?P|#rRy0XU|f&LLG8i6&hmYtv*WU&z99JlwJ7kGWgSv_P1#LGCo7jOyx!G*i?=e~QUu(8 z-ZB!BKt}m1^?+s|0o3IG0GgiphA}Xi3NeK}2>gfmMdX{V(iNUqZG&kGw@k1B>9&a_ zU-_}sL#_^uClhOa38sl|ue$#LPk0fmGy?wstf5Rk39xwN52J3r}@w~tOc z?AFcmSjsfYx+%0P$)^oAVUv4<;qY6|hH9-Ton7&~u)*dB2D=8E&W#HA zjZua(cHZ=DZ5>jUXhag9pXS_{-|#tgBh2fWACLY1JOynyG;@TAo7Qu`$RO7BsZO5U z0$I0rtCU(XG)=2MTQXU64|op1=fhE$8$E3qpjJX^$sAR$c;!gmLg`)dF!?2AHYyq^ z%KI#PIF(la0Fzag;!-Hu_#MIMrh%g#hc@K820@7Qzvs)$f&(yXDS#cVbJRLX2r7@` z`L+@~agg%SPaVt$wV%s`I$QYQ$86Lb7xPyj+&OccmPzfxQI1UYuQJ@`qh@YgpF_~%)msXTMuI7M)2s!V0^<1d z@%z#g=F9Zc<8u%YP-()aNtt)dJ2IS#2RJKj{#M6aoB_iq^CDxXI$#j@dqFr*X@DfK4*8$@d*(qy|m>qh>nUcPpzs)nzT)RT0e7l31Bfyl?(w>!{|kGg8R$m zJTM0y3n^>`cS(`JiJAb4;y|uD+2MZ*`TUz2M?IC$K@~b(Pa1Q3LYyP%dBmUOOX7bS zmxnGGH8dIkkv8X=_Wm^IyR5zo379B1`ncVTb6+uOuI%v%erPPYdbe4(>*ioVSz^{f zDekl89f)ulauJO)8T|`h)2CxWS-SJYMeqfn4M8Iz0IP|hH=~!VYWSlzf^)dryjbCV z3ESM)lE|Rbig2;-)HIq44PD z9#_p(kd9AC7A2qRcuvnn_&zVvLF2}hYZd}W6OYYD>B$v+oKiGCoF&7dm=V>fGT$Cg zX=R1l&H&!Wdj5XoX^;I>8HhoyI}~+BOcAMjqh;26ow4t=J*y-yj^8A67ElfaV(Y{u z-TwfaQc+=LMNA5qs(0`#qlobDI9Vfw93};Afaf9WhusbCYicSw(_7#sU2U2VGy{|7 ztEz%F6h5X8_X7nED9x&n3ou&-J}i&S5njO@DL87tI)ZF$!W}RpvecdNsCb_P-~9*S z?|FX2e)r}UbO&0gT~c2p@ev;i{G9Fx0v2HeYX1Odk36HhIs(k%<8FZH3G^DJ!yLYm zy0(0=LS*#b#xzJsvP@gbrIc{MDkEeN6n3!Hrrrj`!HZ{PJR1U@F@Nvz#H!X<9$=;q zHp~ox80mM{z0blDpiyTPt2F2>ibRD8hjP@EO}P#5eZ0jU>H8OWKHqA1nLgJLLY1T1 z{5RRm?reVyn2_vG890jHa&;QNFd^6|+EuANHvwho(qX3u{Lr{^%iP=C+ub|HPuu7y zmL7$k2o;38zF0@8xHQC-w(q;~v51mbmXz4m`Am&L+PbJ7{ci|KHiMUP6(`)b$-2D+ zxvhtb6O;Em0vYQwPG5>Hm|w&vhu1w!3= z8)d=Qgg}=sFRQC?GDbB=P`Kz389S<9C-?HiNuKVzccK#bXQc_U-=m@>X{$0yrROSb zv*CRvCg-!E7xZzgFr?(I%ijrghQ_P|ucG^*R6$L-#n-^doC7omgm9Ix1)Bi%9<>yi z?hKZGaze6Ds|?w3xUPx_=jIt(g38`CQ7oFO2VhMH`Uu6@YH9_H3l)uXnA8|Je}9f8 zO@X-&la=yZXahMACrv0E6H|VcbmezMJ;TyCkNz}!)PwAj`BtFqTKL6VK7nCnwGdle zMD&Y!V%s7^Sj9#B*b5q{>jRhW0p`?2Q%Gfa5fw%V)vk%?^j|fH@bf(BGFptlG~mH; zB1*3rVaehP5Egk_3qkKM(v4Ggt~GBn;?%o_Q~pf$$t0R?B~0I)$lrh%u4DFlG^{&H zcaFmgF6@HTy!(SAG1YnWrYBS2FTno*hI6BgLAfhLW4N{Vp17#Htm%C`bZbTS;Df1i zrj%7yIY0=gx?Y85`=ReC;;o%8&t8>aW?SrZ((W2-jP{Cs*oa5eN)1LdK9%%aQyr75 z(F@t^K`=~T7y`yoF!oFh(yjMSS32X(G zcS4*PrOpkpuwvT6n$oC|-67}|i<@Exu8>n2;F>@OSkk}3{kFD-{F1On> zV$i%;AX%l*nRK;o-rm`#RYGs}RdzO7M&cVf{H!2rwx-0tZ6R-h?mLtxmwTZxflU|Bs`OYwkPl7newt*a@B`X12$ zjV4x*s#Mt2A|CN_)n4`m<{|A;+5-)%$qwkyhdY81t%q+3JWJv7 zTmJyP7nn$}W;b98Erj*^t`{!Y@^xZ3{TNo&m8k;N4P0-^RhVy$lKFw}8o$!!Esl2)9yKb!)FO@opd#M&<=ud^!3sxK z`mMuN5v&`@D&L!N{{H|T0<}9XS#bz52t&I_o8P(C78B|Ys#K#EC1(qYUpaFaxgEp-W`YE{hYNYULYz+QaeEx5f^;-H z53S}76yW~9-Qo`li!oe$;+aqoY5*KvFg@A2R&-NmCL=vyTb*M=2_@-Y@fE~DG;>$}dGrO#RT zZY7AlA^!jdtLWPxVaEqwc1j}sys5(+j|*emeAu0&M@t`%BxdehyF**$u(r;Ap}?Zm z8Q9RI;-xB+v)`;r=I@vuhUfZh+*EJYE;zb-i%NgGrXKJBf-J0*<(P}66#L@VVEYzt zpka}5c&n5(wH(zmDhw`;o~`xD8RhZWwb16 z$X6Q+g=*!5y@u_6g#4dZUvyn!;8-{>eEV1F1o|UL{e56Mz|ChuA5F&_Vn62h{r)^< zca5G;T`>q&@+x}6XRZ;8gMTi7XTzOYbsF+eH0l{b4X`kn(hBnmLxkGUsF?Ib-hTB1JpeyH zRaaG5=knpk($K?RfpnD3S`TTc<22hZ22~{{VIW00~kS z`b$4abee;4OlqtiNg&VH&uYa29M2ltS!kb6yuQu=6kBX&hjxf@+|%fYCo@hw0inbs z#IYs@M>{(I0H3+<`}}x{xOSrFi;p_Mz5`CsT zpmFy;cuwwZU(^v}lH8i7DA?rv9eVgDdJh(htLRm}sC&oYx}Q5R>XK;q^Ev$~P(I3_ zDt)xE6^CFe+5Z5NAMil`052c_6>w%Wyu+SuZR=KhULC$kjRmRKVS!eRN#eePQ_eNq zUy%N>1T}AVoYuQ|JHnXumz1k;TQMVVuMP$uSwiNUX%1AdR=4I~tfxop&>CWuXWM=g z$iZo@Qxyfbdi8i3(WG3yAqsip<|S*h7zU6_i{ex zP^Y88n~Ap!AEX^mvRKV^{SOAdqs;z~0^00fCgaC8YWx2HP939&i><1GU?t5j5fPn8 zu4D+w{{V>mL|o>8wL1o4M*dndj}Y2enB1oiPJ_~Ljhnom3($X2r(GUjl@eIg`X7no zSCQZ(4^91u@P(_$gsZB2HJ3c>G|;Z+>Y;`n58m?hNN{p5M!y8Z=a z&+F2Xrww7>l=O4~<`aI|f2ZLYOEDx2MzC z$V~3Jr9?HYSn204G+r)>u`uwUK!F4G5A)~GpFVu~^XJayLS`J*$DbFp==wv#`o(|L z=j4dz(D?j!tWejG$;Py4D;O29`j;4@Vnjc-7e=O+dFdiGBgZnj5A& zhhK2lF8tTF`Z$Lp%OG?-&&xVTtyu5KMud9c<=_E6yTzpR?Azh}BFm}*>gIzw`Hsi6 z!2GKtBAu*G+X6d5cjV>TO60nY-s|OS z_$8!gU}nkyX`l)K(xIB-1|p1jol8YI$jK-i4Kc($8BtsyTi|w1l*3y6{9%lsBOvto5MtL-aWB4L` z6;5(EU@FL?-5$A*=lyLh5?@c4@n`j>wl?YgyNdnY69I;FOC}wZcGR`TQx{sL9Z9QG zIkG(AwVy~Y?l0js`J_uNbqojkusRl;U^s0brAXTiRN%^_iEAzVfm_)s`9#YnKuk~i zJ^@X_${H$S62b_T8vXS+;IDOKIW}yBvrsp#VN0^~cH)5=CvVikS|6qA>^nQnil8;4&Ri zBq!HIGb6eAllIoP)=0y*T=-0+;A`h)QuzM>8Z+DW6HjB=#Jp?YM&b&dt9S8SZ83|U z6sG0gNM5RrBdQf!w>f5l;6ku9<3W~v3iel3YCCMiWOtLx!KxWUfyMMSdKXfJvZpLu z9dhGa4KvIu`5hc8g_bzpjF$C!fEcE8OR^$Kx?BK9M;C`2`ezyI5+jw<7AE<>HM+|a zbP}98SnTjpe)>!I7J_wtJkV#VkPWRWa}i+b>#c|NE680y7~8J>d5{>M7&OmupsVL# zFsA3C-Kx};)}U0d6bVhiNb~G6x~khh#2s8|20pZUjQHLYt^netBcME!&uPJN$+x6Z z8Xz3ih*Vr#?9D>Q%(yKFr`#1DXXJrT_+1}Lg00?SYPSU)j2^@|8*muy_!u$u^ou@c zAwU}e#EtzAUcvluwCx)&z50jq;7D4i?iUi?Z28GUO*m_N7oXv&aYk8gtp5ONdlM2k zaCSdufsM>p^638n5#y5Fmw8M%JdJwOx?otyV$6W9$My4rkKDf zfALvoK$M(9EUo_l666F6rEcNqe+#k(2`d3XaP$&9Iyn*RWu{LRewv)P*>Q^WmR*l@XO$;}|$ITY!O zYZ`X%F#*gS9b92aLQ-^pTP>|VmhrxI$sn7(jIIbRoux$pbu1){wA~E8iS9vZCGl-7 z7lcpkIm40EVM55M9P0>TPsHZ%>!a799_g(;^6>-l4^*=Wzg&#_NFkYIuOoCe_;=`R z5LL!Nv!VBeF@0$$8s69|iTb@Q7uW+C>j~7oyv?=0GMzON{#9(2zU(fV%&j(X#%x~N zyN86qQWB@#JauUVtxjwNmYp})glEbAdARJhTSlPEAzaE$O}VT}b91-hrXs|rzlI=4 zRIfe#DzGjf_zs)sscC^aPX7Sjc*~5^#=GQ4{ZU4u1%)MW7%19)xh!>RBeHG{l9N&a zTOxd}+LBZxn=Y)DsDA`WRXfU5{h5#Bu)S8;9py>lc zvBH;o)n7#y@iWiie&O^93vRancAV^Xovs8bDX{acP8&XPsw%tL;3&Y2J1)-wN-=r$ z!&vRwdTPBt3w7G#*V@?MJAsb_H1XfO)8QXzR;?Rrwe(A-t+e@1ca+d=@_XCYfGncQ z9$NJ^ye_{6U>p$IvfG(kW9o;=1nln(4%|KqsT*&dy|`%7?V%=$HOuNxwkEJ>!+0B1 zZGn91D)rubw~MV5l-#*o^<4eFXr8Y#d{c8m;Ns}$HToOucUh5qwI}IT*8?#z^FYJNR(lQ^y^=)03@s<{&9yj6KklS z8AR|(5nvU)H8&=9g_JEE8dSOxDO#_ ziD{{|9}cLnwulL31R%*>@_W^n%odzm5UMe_u{y2H<4i5%R+1CIoE4VWH_hD+wHE!w zy6)+};sD&gJjub?B?D>fksk>H8Tu*R6V~(O4j&j!F+>O&S`_20{D&K-H(|MVzrXrj z4G?vS`Vm#DN-m#ioCHt6{{T|-#t~qS=SvGm{h#&tOyh_f1)A#G*BM+Xh2a(%LBvS~ zCnIuQ{6%8p@Bjw;Ho9t;Kf&l2$QQX?7HztzriAcsE&vdUQ50Ve40VSyd$qSMO?vL? zga-z&6(&rDb^|6!{{Vq$j(OTRX#W7*IQ}KQ=4C@vFF=>m6W3gBa|n$$0{TmjU<9G2 z9Sx4~6H;JhvJJ)YF+zex;(Tl+Wu6kcX1KeO&|`mX8PVUQFeL*vaNEu0Ytzm5I)!4{ zpllvs@TU_00G#qU%BxV24Ug-dfN}#8n*qGD2FnE?4+Z}Kbqit%^}6G7P~~Xf%hs+{ zwPjUUChH*NgN~DUcK}33qH27!tMky}K{o68QpAKs)3*M2gS>6S7pNm|NYDY?Bqnl9 zY359?o7@MX-ABkU!ZOf6oxJzCAW zvw7cKX)EF0$QU&x!MXnMexX7iK?+!szp!qzX$7AR62eBz)S|){8a9Cr7>Lh-2PgWZdwV(l`rp|LSmbhpM{Bx?e z=$YsTp$O?ZC5*xZH^D$SeN5!VH{6NA6=W6<3>YEwd zYT^57CRL85^_P~RmkHnP%LL069ZLk-dH(=PlqC2%ZNVugECI+kXg>uf+eD8_Fq@TQ zPm!rgjwM~+#6UN-{~a>Uk(;HQB|PAb(QsdG=>76P5Q!3oO= z=ht}mkfB#g2lM&isW^82vCWWNmE%tv9$(9bnA3G7J=OK3WTayDHu%qCb4r&&=HoPQns?06r<2XkN3X^wdU%lCFn!@)_+aA`1Mvuj6Tx+ns5T z8*3{gs|6!5rvnai`lj#ct0-e#44XW{9MlG@TrSQU)v8-EoNSeIynM-{J=~!h@boyM zg{D2e#iNFcbe>Xju<8K-7{7{ zb7H;z!>+pIuP=uJZ6mw{5KU7j%-f1U2JYzofWj|eILfL?!XJfeJiBSTosC#lqMI(x zy^RlWG^@M_o#IlO4Rq{YY2Ur)ow?u~ET48ih2_SLYrvr_Q>g8w8iyxk!LrM|vcOCK z0Iy=CYT(l8iA`Qn-6&;|oH`mxMC4ntNqmk0ySg+!fsbsbiosl+b9AFo7w*&4wrz82 zV`|&R)V6Kgwrx+XsZwidx6@9Ix&6L-@486VO4j@Sm*lLoPxgME-xCF(yN%0$Q$m~W z452o#AV^~{<;LkZ$W^<*Y|u#wzAAwJGC7RH!AUu~JfK~yP>pbIJ(zX%I1VZcbPA6^%CTGc$xsWu?6P|VeIBKpV1}M~^1LTvvO7`S`<>wZ z-~gQIJ{&8y`$?Ssi}&evse+Q8g7l8=7Q-V8!8dBdli#Gme&ewNIp)OosTl3@9{?-L z=SK-u(so|(=a0nEo7_KKuW!+|tot`?zJbtFsM;ifr34rRE*kWf3!CvjuTznVDg_{@ z?QsT^eQsSd%!yyXzazRtudqGMv^fu@$_yrqLZPSCVbC z7b+1rg!n`NcB=Yq+B;N2`5b{UqYnfYhv?xB{2{!W+NYoHM}YnG`MYnQzq9`IFa{$n ztfOjyp}rDW75@1eOwb($Ydby4%1m`R$ja_@g_cT9R?Kp^(fi9*s7r_P`l~9( za-SGp`HM0|m@8vQJPC?d<1;q~Wu4Bs23=yRc3on250;3DDcS?dGy*G5Aw1Cp9$e^y zynsl7P@^(wtHr&`I-ig`&(whkVyv9hon`?hR^UIa3obeY)O|fq7~ka2Ih$w zH($Q0_O1X8ot-^ewsJx@_DqoFQYpyEbz$W-IdwF<#zxV894zLKpfM$bu z!RZw_AOnNjw}D@w=kY)m=}__P{YT2h8*&ZQY!0XG|3m?9=}MUn7q7`O6#oM(awK^i zX2tUxu&nC=9kC$lVe=lSs0<~45!I{v`Q`5Kq3&Eb`|gfnIyb+owA+XjH?NI$ueKnn zTqGx-^#22}OoOvN&!u*Eu<3Pau$76%um*NV$6;qJ1~#}p!qdR~*!^^9Q<<*j2h?7| zzS1}x8|`Gfu)j)|(0JHQmcGwk8Y2E#`$oUQ$8zwk;V_`J4kM|IE4rVoKOLjP%BI++ zdT7cI@u8JuktJt18~w}WU$%6`tr@su4S%JQq6^)C-pC$)ok zcl+`biL@O-r0o_t35L@B2GoV|O~V;m1*QA0zMvI~T!}l1k~3)~w)SbSX_xd9#yQ>G z4jy~wt)s60bLs2D%>S{+&d+bzo5pt3%+hSrU9v62A`pjZz*VC(>)c0BQZnx^$XNfg z*vm^aQA9-$-pOp9iOzrkYF@c?5P=7v3}wBtbv=@w=GT$5rls_r(x|MfJyO4?*^jSM z3NIoi>&}$t!>S5jY;3&tr7?7=9^U~wGN`Us9ucepMaRC`VEWlSNmG|hAXhIXAc`VO zSomZyL)5SxNz6sHpda05Q_RA--Ctn5`E&mE0)L0wG`U&bZLJwi6i;lv&es%QwZ~+k zYZ9!en%duPM3O?|YWybZ>u(c7JB%wMa7{HK-4E#;4(F_6RX;bq&*5&y(JVuv+!T#N)FOSSZ96g>GX(q2it?(_9XHjH9>-K5Q zVtA4O$qC9@Mp;*>H^B#k{suK-KVlF~sqAjP?Y$Z{|IikSbs$?2YnlP!o^I3+W4mtIk+75|wt2dWOE812C3#grv)JxvgsDUDolIHgh$$_Tcm z@dEK4`g?FdqT(Iwr-;x5^$WyT)2-m}>z*~f*PRjebD_@3h7bX8kPEm8bzREAw?!t6 zPX(E#wbT*@*!&JJq@J)tTj~7VZzd;svtgbetHIuZSEJw&UDJ#vfucDFDGBqQ=&q6= z83-26Z|-Gp?cTeYQYFcZZCIwDG9sVZV=|X)Zp-oAyQU#Enem16H|M1lm5^_+Xx6iO zLz|c0W@0tOF0~!_c!c11FCn`)a0Nx8$;wdP`_^I^RyHx{M?)_^hi}4AXaf+QDVFiG z;V+d+7x@eyE?CSe$1$$50)ffi7Bq!(=^Dk88oKYFFL}iG6B=;`ogBJeX{>GGa3JGJ z(#f6J)!nzVQOaQV6!2a}z)%D#7h82!r)=Cj-K$g6 zg~9xe^xTyj8?|s(Bl)Gh-t}2#*+36Fw1A{R?$*yv-&OoPodU+$CJ;fG3sZ{mmCdq8LOb1Zk`DqqPB#-@Py?HDOBsAUR zHTf?)jv@ZEpWkuwJSL38Yjdh%;v{<=T+M|}qR)tN)+LY};1qvnz=|kn0OWA@?q{Ui z=#gfC|IkKjOg;s+{vmfE@mOW>Nob)TVxxMeM7g(|+gko?&$-|)I}pR%3PjxZctRKj z%q0XY7FZGnjeO63uaQ?RcXj+bd#a)q+IN!#x8RS?)&zE4`3UktW^y$)so8k6B%+~@ z^b(#M-qWYl2G3ys*b$$n!I%RB0yvfn!z zXu)TyV`~lb9o;B`E`8}|i&)nL^>&~t$%D{ zZBYvgM&NbsAM<3si+@wNj!jNu~=&o?(O0)jWmgAir! z_4Cn{j_eE^jzbeT*tDWK0nbQ`#|HM}+|-X_d^jbH|nUfXe&}C>qRJ zO2Lz;Ar4<+{eCEV4)m=U6<}K*5n@ zaESIv#sI>sBK(3plMYA6>Ke%~jr{oCoT;>RbzQwW$g5?;zdGO2&@?{Tr^0O0PO)KR zyiAA`?9gu%AowO|FQ!-?p|EWr2`#wr{M-Dd$TUyveFg~wUhSpRLFhUy3c!g=}5OmI3{@Kj%XgeTps&hHN- zYRGweDt3U85h8g6Rx)g*&j5oKzLe&B=}*3wTj(|!_VhHv>2!wAUS?Ey9nqlj-Nc+N zxOiu}PSpDE8Nt*K2>k)CjuAnVr`#W9VB#rq+oP_ll=^eWyBhY09IV!jnXfOHeS7>5 zQ%o*}aw||CS~oU3*7z)Re=|HJn`@l#8!#n(=juEiY=4S_`!j3`4{C)@$YFcf^M~~w zkJ(Y9^?%s*Q;3a2@AyfIJqB7=THlsDLMiIW#|8eFMyWX#Wu$1CP?k_!kKr!CH?GsK zLJdlbVwH~o(OgRuQC^EAm^XlURCctG9InVjCJMW-eBXgIHDVdVtJfw2%=@TFXG3vo zOcN2Lqb%pRDhJq}Sie}*&(%+SuljN}ti$$Vi}fPl=_(kz-6{}t3b$;N&(MOI~Et^--$iZ%ob=_iu?tsJ2j8tSZg!?L@>gK zuanavO1ZLdyFcqGZ9luGdsEpPAG=YxX3C<8D?x~IPRZBPdX6h2! zK7JRBm8*_xELn=wIAYuZ&6bE`Cz4?Zv}gw0ZQ&S3kqkL-S7_ zS3pBgWftr9qDP02x4Ec4n_urCEW;3p=x8xO8 zfdA#PlA32ZqL$E&$N%5^xglQYPdWSDVyqz31W|--+7f*lK;yEe(a7CbY|#sz?6IT2zE!o!XVV2gw19q&QHHg)nwA*!a)3abAmv&8Zh`H7EQUwwqxXA2>>DQ(V;q zq@$yH(d;)TS3*~M5Md*WaP}ay5!K#MuM9Tnp!tx40%N7J-&2Yrd58r&=HPbBsxm-> zw~8RxcBg_n79WKk4>9>KrV_({NQJ6W>f%>os&4ukw;gny{itl4K{)umn|1BKS>9Z>jiHlrvaXveqUJJgU+Zle( z+c}m>`Ous#wPPGOeQWahsJ%e&`2SU2_HcksNpM^V6S78m%*)m~-i>wUg=2UwF%Dy6 z$JbP525!ydSqf8-5PKB}FMUa&o*FDM)l{XZIq0hSOvu^(i{m|ipVE%Nq`v>SB>a=1 zx=K{TYV-r1VULkFuGmNP<(IIsLYWZMdr?LI%99J!?cg8hI|8Dqao~fjm(AjP2VTk* z5C4R>aw^;6y4-ECv{tid=nxn~4-keJ*$h3h%+h@^1ghTm;b8Ccm}^S033s;W#zL3N zfWi1QV%jG>M!Mdl_53q3#eSRJ%U(=ijjJq@YTx^!BF<0{&*Ac-AI>R*yKZlLb?*+^ zwS;8r!8am1fd#r!^H#1ci66eZX{qTixaKeO0W>RHvkSIt9mjLrpQEKO&{Ki45Nwx2 zwmUb?ciy-%!NcvhC78Z2wyRJD)?7*0&=t|#gz2+;*fG_vmD_fB_S(iCM zZ-I#YzjB5umi_@=|9kWpYB>>@bzHGOAta1fa|?AOU>U^kz^)=BfR2;tl?QzluE^JhYG2{RxtyM|D(#e;6dofMU+sTl*c!B!g0ssFQcD-y-zlODBbE zH^&0QuACKG<}zXhl3w;kHt1){dK!A}-z#aUntKAj7b)%^(p*$F%AdcYbQ<|7`z2r; zkk>{;J%grmJrNoSNZ5dWK;f-dI?#1G606`hNTQ@4j`IssV7h-l?91}dPl48*j{)nO za&horK+eIVrSS767Rgi?Z$J(m{$<<*?nHjJo$ugS5=ML(_WJ=&X<=T&`!Pentc~->% z#!@Zm>=u`v`6o&%Gugo~gjWC>`vxK(B0*=`KF!Zm0?)BjbH#Hsuzs%v88q+?-k_yf{Ns1;uxMg{*FONR)K}I z#!lXeFgyC6Jq&u;sagAbf<9_ct ze%orVX2 zeu(|P(qsQkcGbq<<~*R{(HfT z&a1+8%Vs@oe$)vT^%;9}cOP6K`eBQA5pZ2{ijrS)<~MMEQE zG)39?&jY1pk65|kSWB~9>p4|6d5NlAE~n4T6w2|vzMso8n6#S{L*DbfBrP+3s0r#{ zu~hIis1_jl;9Pw^Uw*MI#@JPh!ZKvc<-xPbxGYyI4+V(wb{YMXYqE7-`z18^1O)2a zI;WyTP5Ap(o~epfN3!In!=pDhC8pV%J+}>x+x5mBfyb^N%=>P1q+tZYZZ?1AwBETr z7PTAU?}jnO&1drVgC-g}lqddvc|sz=%7ykzbeQvGc!+P6C%#sHe{*V)Tsn?d!Mo#& zBJ=o%pv$kXE@c?89uJry?+2toAvOYD-zL0xH+t=>Fjq+sqa5@E&Vyq4fl)ROH_2kW zx@KMLeiX_OQ2!nrLmihDwVn%FP2&+0Bmp_BrPMuDHFvMC)@^b`QX*?CUZQo)FD5B1 z-V;P~xgVG!)hTjAq`OwqL?U4r8NZ@~5lg`M<5q96YCRZqgy@6ITRq|l>C+o?@t8ns zt)!G+wS$^wzADm1ENqg;z21_qVE`&5J8a|YYRQ8C+m~A{k$YLHO!)%OhjGk>x{e`F z?XcZ23E|p6J;-4{XoJKQ6?WVj*V^lQg2+t>DkQ44oe8nqw}h5l0~g; zJ`<&i&(o!GPBwFcmVwBOP&x={=MGV-<$n=oCd-Twt03ubW2NmMmF(7#kDm?TRxrqVE=( zswH8gD0rw*Edp^pWTYTv5hCBG8I5s{7rD2$=YlKDYRi9uGxmumlfOnXN+E=sM(fKKil>oel5)SsTv%T$HXd8~B~-&vyy4{4}Z`j)!l<~4EvzbB{hPN>Hb;7yMJ8bW8f zA}AxOEmITzLcPILi7EG_iVN^{C&weDetfsnjbsWO&aG?5SABSp#h+Hnieb=ep{&+j zM!_jTuvhR`SIP!QXYK6J#U%L}6wJr<3~BC@U8;t`HHudZ0(lZ>QN%p67x)*Ho3+ea zhYEYxs-`W|ISQ+%of|uu3Smnvw5;=R$XzdZv58foxINeFPTbFNAbypIb=im0Lo-UE z;NwJx%MtQ|94lGQKr zX0NQ$TDQGS@#nMn28{f(jJ$;p=4fgFv^f<5Ppcq?x&Dl=c4IP5{|PftP2$9`3pGZ` zI{aRCbR1tKBvL{h??pMzRv_38T{_-+IwS-tyY^^h_G}rg%GjcdrZ1@2c~9R*H4rBd zf}zioLpDp7YdS#XjsuuVWZ@bvIC~xb6wE#bvHxe3QGJ5*1QL>kY#MYNWIL(Lql9BS z_(i8{*vh0MC`B~Zv^Hb;e!TTy02u=nN08R{x6YW+Z$88yiV>8Jm$U}xUMZu(Ik~Q? zL2IW9%^}yJy{u$1g8Qv=^Cz@B_s~**_}A_Qy;1rw3_yU=MG~il&tQREH6ugkYV-w; z?%6~~ko1$CptG~Mnu+h$3G{9Q`Vp@U>`a~W*m=4j>$W0>k$fqk6GbUr1@m%jB@L`Cq5=+xSFRE zX}LyJYr(PdwpA%Kjoh|-2u^KvvhQ=drJp$)XubIsy{K+Ka0G3c% zc4oJUIZEF@MVB1bIovTAK+0fi_E_%W6dLSLK})hJ z>p>y)k{vkRCRLz{N@HC|DFNaEa*NcYOHgdHaRXkM;|ewdTyUl5c=#UTIkCZAHHP*) z%tmqGcV3R(V$5&m0=#m8+p(xI%Wc?2%9O%?NLxq@-j;1y4%NpqSxas7H`m>>;mAi$ zP|@7N)bW)7&FV3_B3t^DTvgsp^{Z?aTmiqZnTE_qsCas8@sMS8nd9feGEQh_ zVs7>|7clgNL)JVe2Fx*1N@joOKa@%_d5a1skMXPtj;{}$zsW-7=AR!th9rCc;P8$L+Wn6n89ajF42I}|8DRwCPZIU!8rbW>C!8`*DJ^CQ^zA(q zeIqP*uO(q*K3<-%{j48Z{wv>ryB}guWXNq%F)+02XiQ1_sQt9SPF&gg?FkMP3Uid9 zJO?{gJ%+Z>oio|w*NcsB#=p;HSIAI2xtGx}dwl<<{XXZ#Hfih>JEndWLBw>`mVLV5qNtJW{w1^5tZx>DF_0BK+ zqkLKiN#*xPe_Pp(4_C&l?{L1!CyE``ousGI3|wtzARd^Ga_P@cxpk8R0}+Mbptisc z47fv%I&tl+H-8^{RXI37HD%TLA`am)U!o686gvfi~CeKO#p!mOT#D2AFHgLi54wSo0A48Lb$G*ocm~e|6 z-X>E>r8NE(oL@C){Cx8>%9!-c`0`ieSHg1U;O{@#*y3bHtkp9;A%A2d!Rsc?&RKM7 zRWIV}8yH9D>qRR>Op!m*Xd=6A#$JtYQ<2_N`~&!kTE?v+`x+Hf!ZsggRM(5y98Hj_ z{^rR`fWBUDZ&~3z84FfbMw`3b;nJrIv7{FJb@XTb%NJFnZ^H`pe2$SD!ixu(pqC1Wy7uq8O0uB{B64FNl*Dted9rAeIu$$J3Uw zl^@hto*B~^j<#R-I2@`~c31AjnFuvMffCQc9e{nD> zL*>GN0kg^da%t1Pnbp12T~Gfea$m^WG$m!J++i!7Rqv2o;rKq-i_rF+!|D6E5qC_r_LbV&f)UpYvZ{xx*gy-_<#R~O$5mBI8=$7*XYW>wIbtVURUvOcmDVd#tV z2TTBd9RhZV&E49SWIYT+>E1hTl|YLdgA@;cqgKwf`6aP0NO8_Ya!cfY)cLS!&~sYR zskdkT{w{)HPFRH@LkXFQk-ItHc@f(CzN#CrSI*~&BM|F(L(CCF^Fr!6WT%Q6iGXFH zz@Y9l`#=lpx#fNcd({VJi2|veY$J^n32}u*^33(UT2hn3^hp%Py2zZULi4UZOZHe* zPThmV#-O`?6T|LE<2181eFns^S4wl@r}o#eTH9PW(E-v5hLX#*gyAAA@hnCq=az??(5QMB!h1~>5)=B@(M|;mi!=z43<8R9_vdlvq@9Us0vMXb{ zBGG1LIV{mR#okG?EPU|vkEIAM3mrDu%_&L~|5R@mm6Mx>`s6RWdjRL1`ZPC~H|BB6 zOO@{xk~>j`w_a;0)?Z0TP~xqd%_9f94A@O_gTs#3nGBFO$!v2WP%k^)^t za0{#zSqF=QiYS;b_@iK62lT^yJRTtV9?PL|P|TiMEkb&Z)_Yln6H$FEy{UFaR6rCx zhI6bzjH)1O+yXWY zR)?%;XZxV`EP_LF*;3C2GM%4^QZ%b!x?A2;RNf0WQmbgxb^|g>lB9$_QRFxz*uo~feBYcJ7kw;acXmlYD0}jkK%N$!w_rQ3X|peg?h_bfXI@6z9F}Lx*fBz4q0ES z-rA59`JgDVG3%u?9h!bmCWDQpF-`)@YfV_~KA#kO(;;3lY~r&f`ZB&=O9&2MXEwp* z;lv5FII04`pTE3Le7u9jiIMDyjEv1`At3eeriC}BS1RUK+X z2MQNcO?yk=el?VGFkK(q^vfH};zLWDHcBq6Xsg+BAdwgoUg zUl#YV(i3DGxu2-(+zY)aKS zVxQh~x-gpT)XYCe>J}5!@tH(fjwDokVPcU+<2oL6A==-jgR_F7W>uBU};e4T~uZ$m?cQ@2dDHS^Y4B;lCQsuhn ziBr1terLX}Os36j^2%5RD<&V>2#4vn^Occ0X%PZda3bg#i2l{H+o=7SwLB>96;1^M z=TZ4scll)nieLH;WmoA6=D2`YO$j3pdRo7HKHVUzE7Hsc$OWIX;Dh7wcO*M??l9J0 z*ZqZ={{X+j^S;B=&LuScTIr`ISn<1>P!95<>45|kEt-8m_vn80j0PX8sNpa(>gwVb zu0&Nv{met*2Sfw023w0s%JUQd0qP_IW6~MGqn%`1YtlObEPrtc)6`SgS|ff~BzE;F z?Yd%dv#9MouxQILHJT`c`^y!_(vfWi4yKW`?ExloIAZ!Lv&M}Ys*+#r^m)bq4EcN| z>b_D`O~PwH!bD@ZJCoz+ch*nmmyb<%)UTyg)I+vkFhPa~t^qw&EBlfhGzSt#w-t48 zoDvCcO&kb;5k7`~y8vNufUOKSDOCkw(C8r8Bfn}Z{Z_C%zZ3+VprsyQ4Srf&TzX7| zdu9}0o3qveZR3K9p`IfAkyx`bjNd@QGfrlNq7SFvnSq6|;P?{cEM^<}?R3N`6;_!F zvadS7s?y2+>#rSbKnhiU*jffkN&)Ly~*n2zx(Yop9l0tM_4O4m~w3J>tMxe0Cc zga{Y|*rJ=1rqBB&4i9Z>czWV<9(og>A;gJ6`;f}7kX_sx)30+a(|spl-Qh;34}Qui#DGV`Cv;EUDYDz>{#$4*PCL_SYT+Ht_5`>%uy1P`{`xV* z6#`2`Vx)Vg@P&Y3Y+N7jxw@`9WZCRlf`}f&4vU9mV}|pR={U<0kE1D0O7uZ@<3>dq zu>=Z=!UaTSEX28UKB!Rt)4{A$4KXky@}m@AuV!xdyMZLm&S$omMf^$*rvcXHMsUWm zCk$FM?e=CfyzMu(1(jyZXwEPiI#bKXzWy6Tn79Bws6GM8I8}{$H=AKIb!_h_B^Fu| zZjlnDEIW2$wvCDENNX@ZJCC$aLP`Zh}o6li2Ldg_kS{+?t@;#poTD4b7l6X0Q zLPt$$Bn1E*ERY3S7%>>*lV)Iw?kHcBJ>EdzTjp?DRFlGMw#Mx>BvW>^Pvj!`0-g3- z2tJ+ah_K~Sz%4G}oW9U?XgCL*R;K9DZuT^eTtmr)6vnJ2iV_BdH)VbafV?@wT92rz z7E{AV9=-mOGy6&0yiUSJCVRoI$_^qsu1Wd_=u{k8UE)Itn?i&PV5lvMcjaLa z=c7Yb2Hh-|j}EU&;I?QG)f&i2V?9rj@u#aMoK`1C#rz<-KOT`ASov;Ld z2$ei=!R<}q{{!rC<-+OV z9|@Dd)Ibo>XiDYK&d3Fblt(2RC}056i$qvrS>(+Q9b>RDe|L6!>CW;DrYWS2w9zxl zGqNqnOYG6s``1*TeA5AD(JqBTph1!X*HkdzXho|QH7+`nAb`_ly&@25 zaeos&)N4t7ch2Tt-wv~sXKRd^YL9X?Ec+strdJ_Z z%Y~n%)Dj@0jsjs!f#pxxQ9CA67y{QB%$!kBb{ZQj!8#o10czSlf0U_V8ybOSCM@6rtY{ldXD|`6V9u^~H*hFK;@K7zGvn_35rrE&%5dA6Mif?4AtA=H6 z%TPN`^lI_O%4MTs?45q`Al$zEl@6(KPKzhLCQxQq0VZKfbm1qHEwTr`h}Vm0)!!M^ zs7YX+nU^}Govx%ZYJeD|-iQ~-@MY#~t*AcGH5q%-Ra$CL;LmLG(pD^6hYWpaj!ef< z)@BekFDoY7t6WJ#T(NvH3-Izs#2fIf*+Nvh>;4Cg~*7B<|+%Gz7$+vI*QspXu#mlaBl!I+G9 z2)p{(uTM?~N%I?2Ez?z?mpT{$d8pTD<}~;Y{GhUCjt&VYCaDY{I+VtsOR=cE z7=~6~Ke0Kd`U(KFTBWO0dBCrxRUiTI9Wwej>1NE&z;B+-U)SXYzM=~|vL-TAqLg(l zj8}=rL?Hs}s<{Oi_!4|4(&!J}s2Y}kYvC|e`PXSU)RQX*C9oH-Hk2KuQ$a7=0IQ=A zqX3a1TOvy}dnIuYjn;5BjjwV1QMVgs&6*jcXYvjuj#fLj#z;=oH2cuh?R)L+P!)C{ zpCx5{b;Vo+=DPWRfO&7Xp9NfyGrA&!biyVaJ1XWD4*TAI*)Zs_ZtgAl#s|FB8pf|z zdZKFd?Hh5(KNsfLzL%hTPl|dd0@jJa1BW9`ISo+*$?0Ly=vId2?-gpdrXHNKuTzHX zGug%|=lDTah3E`5;(FbXb}Z{ngErt0hHPJ*BBrQ}bG57lfz`%rN=^BD(?Em~wmE~- z-{x?OT(HA0_1`jXww~>N3EqI_U77+sUhuxJv|Yl2BpkZ4w~A;YF{3M8h=F^2=R}QaqzuV4qOrpP*9;+wPG$7k(m0Wm zy}v{Dd4MZz0hqDm8w!-QYSd#aHZv-BD5b3i!+T0iI#+boOU>yv&z}QpyZc?^U0J_V zLqRHAY)&axrA4@fF>UrgIB2`nc{ea4LP5i?5m*vCJsmqRYwSX%hn3x-7y#$n#*~gC zQj1s{ej`3%9#`l6g#_g*oKU5r}j6Hfc`N)u?$2K*J0&~jycu9xZh`8L z{7G&RkIZO~h-{posW(pG>}}pelR5D29Hc8SH^1O!$12ZyYdchYmetU)0IT%HHZ}KC zny}^)J=8TKQOm;|KbI$bOHd4PVDgPU> z{n}(Xzqv&adDa+4s7^3>yr%z%uNZfVPaJ~}pZw-9z-S&?N5 z$Eylqrt_Bre@D@x>GnSCO(*`IigIq<9xiL2Mn7OIruD+Q0y`$d_2}u9VJbRbPx$es zh3tar{(>zo5ZnH?NABk^&yDria#l4B`8uN8(}hi6U~aKw7}efl7O*j1%`eRk_xo^d z6MQZ2uv)SJvKmUZwUvi~wC-H4SgXqwP zJ*7w?22i|d727jVsbUE&#m1ZyFec^og}96DJTnoJ!-nxWjiNN*x8Tl{*f zGbI{yZ{80^eK#-;AI4smVbwqsb=O;@5+N#BEqH4_wK6}XCP zXRVVy~O>1=R%xmKku{gijgG)@cy$L z<+RJvOKvwWvvWlp6;UQqXV_oR?=LM|MSaBeKp9&;ElGEC-~Bu%wG!q$ z4Qw8#pt%J*i)v-Y16|tNb0L(G)NH1I@buz2T@>|9Et2<%%Jx`&$$h28ja=(H4r9Qx zkQ~26@?j7YN+w7-Z-^>~0^0fft#Cg1RO&^rA`D~QZC5V=K)UWKJQBR|^84kcRc=2f zVkGI!qyF~0isYE*O1}pQM*AmI3O6ovF;)}PlP*nbnQ*e$)+VH&)ff!cFsarAj$3HW ze)&^GDY|l5m5au}fal&ARql%Z~!{fkZBuyrQarJxe1k|Egi0Jn*F}TuoljYy( zcN;LcP0Jd`9ZX$L2@a`W8aZ#$ch5$*FdrdL2&>r+3du08q(tTr8NN z<1k_)lM?|K^x+6>zhXN%E%LZhBm7-Z66`8I@53o0Tt~x;3RmJ#s4NN9gB%;uP zdYT?^00}Wd*j;-SsILMk*%+1rX2u!@Ws5c(SJFOS$9lp`X=c@0nxkl6CpU{;&LBajlIYKqCLacRN1^i4m6!BtKF$lkff5vtCNCFV{y3NpoqUY_5; ztpX_j{?FO7M0BWp8Y)|s+e(&~c6DEQh-NRk>6|IZIDvh19Sk-z6aF&Hv#vPI^U5AO!@ad}b%>+2s_;rLpB_iuTs`<;*Ud?DAi<48BX%2eOa zg7MSwhKzP;=uuvQfcd@k7L#a_Pt(;Dau89<)O5STuj$F5@nre(8aP= z znaOWlKHOE`w_F~-cc0pzq@P`RX?Yaey6Q%#HR!N1I;dp~!_Tm>x~SXNr%}JB^C7*A zwbTT$0K!2)2pAL+U{N#d7vcNM00B!!``PJefAc<+4;@$;-Cey^VaS>B#Kh1>im2W^ zpFvdCmN=zO@v(X(woITGI zS19jR02oNOI4>8>LK+p>@|=%5)*w(N=wP&A>wpL%LMjy^w~i||UIr0^YB>}*0Odhf zej^>i8!>@D08a24Vcqt0AUvx*PBne%5*aSuv-XA0It2QrryR*q6Tw%YTGuKUivo&ph9ob9TQu;u@J^vJp~2#;g?blqA~_U74TTpGZ;Bdza3H|=0nLM|^dbs@hTZ7iUOuBR*sJGClD zyA1(wIUnMS>uteBM&8EMKZR}0@5fs|>A|V0YES28WOS>pgP)O^Fvz=~q&JPCIT7T@ z+$(F=-{kWiE)@(|?X^9D5wuH3ZVY^VyEhUpmV4W~FN?5EYYU)dMi>a{a;`*Kj{#=5 zuCHC-b^v!%LB7Bff9}(ZC9^-Fwxbt=MyW{wbr0u3++6VKZ{jM;jM&U$nR|7OtjAX6 zv+TW61nB7+J87wkf|;DuIqoscQe_5!6&L4r=y$DTZ1rpIkzcNvngTcJeWFj~4>;Uw zpTR|c&@mgl(>G_;BpknWEIz7=B(3EAR@*+TAlxjCsTIR7mIXQs$6py(s}F;vI^fbn zBMhoZ&ARYCF3oLH(Ow`hL|pi+&YVE1)3MEFLxr-!PYJ+vor_02Kp-jIW8HprweXNn z;Ez0no}{zLW)2?JLy3WY&gG#|rW*wDV zHihfd?@i5?$!tS=#5I7nbq;zKDbxrDa)tZ8r*9W9BZ7bES)^u{WoR^DI6Rd`r>0My zDQJCzX0`$Kp}=OC|6Fisu`tv#a9@gU+EgAebkKfLaCJq~-P3B`w^wNU_~Cr z=gPh>{1@G^t&VNmHahCq9ox38j?uBX<8-W!ZL4GM*vb3u&$-`Qbx+m#;I7C)N`i?iN)EKl}1t5#Wd?=P=M^-m^I^LlmdQY=xb3)>cyYlFG2Y6O3D%cqedf)`cg7(0K<)YpNqE!7@% zv+FOmU6z4?#)ln1SDp7W`b|}xF@$3#jQFa=igV6wNY}bAIJn}L&6FiJsuZ7`Z><A`c9aX0B>(cQG8!<_8}W~o};IS>(cff@heJc6W1WTchz8PB0%H_W#U z)yrgLmRDVr4FV)~aYtIgCvuk;_Qy4krF^|lZZw3O=6ROdk23-(hD>t(cVedr?L3~; zG4l>UFXa-1QO7S$UEDA38G83$797yJByQzs?P>k%LH%__KU|hV(d1v)h@l(-(Ez~m zF#Xy^U3Ckkf=c;^s*q+hPrEuB&rVv9UXhZt6{i$=JDzLrHp@!RmY_l4BL8`1$w z&d;3=`>y3s8#XNk7M0tb<0gNoY9U}SHSjY2*u&INwc3F9Dg4y+bNJSEkk#A&)3QAL zvgKj4_zgG5S>yI|frO^LW@C=VRx~PzJ^2j~M}7-gkD`>>uV$K^B{Mx7L3jv~cKo+J z&6|_%Iy#`I4vsg$PS6?F;B@h}irW5ym}L0aA~q7LpwfYWCQjCjG{Iiwe|Z&9kF9egwI6 zMI@swSx@>6h=nS1PpiIQ0IF$Su2C;;HVqwHOifZ{pz8c4x^)#N9~LCB;~1ix>}Etk z40MHyiYz&btd?orT%GEPcVs@*YBJ-c^a`R*`8-tWrm%=rNgqNIjt=+FHyRFqCO!1v zO;!^`;$j+XNk>(W3|3vdeq@w{eOyc5iC8$MA5r@szvDR5O;6{@skl;wV^!FRCuEw3 z!`t2x0nLiP_eD4!54tG0Q0N*A^T&cLgA;tj8=8mxUGGPxnOA1GnOX2K2d8({NDf)p z{@Bm3)Vk7=9H+~=g|(?vg=@0K<^0su|JyeTn%U2r3P|HD)%6v& zz;^qV&X@SuF79;XU~y9P_;WJ1{!>~1)PjE*OiYNa!M^GZ_C#>saUe+zm3icJcjsb%8kb!BrFLz#}O)7!o)L|#`vgmg@-ETg~aIYj~xkndRq%Hkys?1Ypn^z zJz}nQF;uh_*reEZMu7pZ$t7&RRfG&5xprj0?fr(N0zBvKR)TOU2|s7FECn>K6lW`Se}Mo(bf%~Pmp&{ z0HyAUfYDNm%4> zY$gL?u;|%uH@AT2&4pN_xSKiN;po?@oJpjQD#!uuD_+@F(}nEm&H>+_Kf84+LYD{y z2p=IUVKuFVbr>Z@j5BmDlVUHyNI9y4x{87)tI1OuHsHP>#z!^e@*q+Gc`#`Zlwe*% zOBLt>^mN>Jwm0-202`ko)#HuwKL9*#i|2=y0*orka^ZD@`qQ9M{g59qcfH+0(a_AF z<;0qWJayrkfK75CsWHOF?t;6wqwQPjS=NH4r`1lA@#bM~T6fk@a(^ zH(jRt`#B%MY*U4+Go!;qE~fnjbSfeAO4PhC>UbRZOtUzdia!VvdK*eR@2&XzF!;WB z8#s*~QOEHgfK>VQV>gi&Y_jqTcgNR$d+&>FKM1dE>z99oHBBk^ojH66Be&q0pBDd<{0=fO{2Y*cMD(@~X;3_(v zzHj!C5O{l9)8)|TxR_kd1L!EhJdBlsGfnzFV%0l-fSm=Q7BN)Rbbxe9ozVxD2WIrn ztxTtAfBNuyl>b{erteS=KkDUR&SYZ9<6q1K+n9Ca__*Ss6Vk+%WxS;IFOJp6+TwYB zDFR*T;mhoaG4pO?^$<$^6(yVmY7l&k76m`$g4@ISOYZ-s7XR1qg7FJVZs*n^5{M;4 z)HZw}()#_rc{2VyLgRq`_q)f$tC$DuH-x6ElO!k}*p-Yl2F+qw*b*)W8({PduN3U& zat_oxG`++TqwX*vrU>Rs36;y`HPHPO4^@TValC5C0NWOqDNF{F)YwO{9)%YKQ`eUc zQ1$WSG|i|1GkHVoA9c2IcMHL8R&E`zt9#i*7w+Z@U|mA}ts!I?=Ms#KO)^@)hqe(rQC zSG^nwJu7g2g4`{Za%aRj|H9Y7b*C_23x$RTZX*6g#mN@nU^I2s>(N%9X%!q%4IT%Q7DM(3I4AXH(DvrAaT1ff-O zv{@iGhV9RQb^txu`M=B16_gMq^*6}(1XC~I0CA4*WAlBF_^ke%YKSfJbW29DnF?Za zq`D)aMwSNZvuGm42y(OtmW0B|asU7n000NkNN-{|hZv%k##<6v2puOu#NG9p$G5fX zI9(_j^U(r*^sJ@}EQcWd*Vmp6#gq)M<0NLvJK10v3XEVUgVeES=^7Rz*wXzUg>qo;#$r} zU<*JM4gl9!J|>1vvxiYWB?Rs!_iF!d8rs{t*`80-&P@Um;WVq8hk{!0%qI7aK16xK zAL1&SoT9v)agD5R{c8e4#|KUcW2<1t@Y`z(sX&H{I&FbI(W5-_2In5&0dSLh?4tz9 z12o#*O-l!WW?rWr%{QHK;zBauMi%?bPHlH0A;o3+9%lkgsXrl39qWWZ`XZry9;0B) zBD|=dx};Fv&NtmjteXPOe5`Fa<5t*iwi6CCzmDx$-T*<6G&lQ8P;_bz9uu zyBSEnLhRZkKB9r3p^aaL^u(w4xu>s4&EvZ>gB%u}%r6t%2~25%*~`PLy#!pouQ0${ zrIhh&82+FETJBj6Uy$Ux%aAtOOt$v0J z%YtfS*i1;l=NI)bxg$HPWuI`4ccu+96Ok?{N}O!d7Xj^BE14u{RSKD=ezjZU<9X_< znrBI&f5&yOjnTfXUwdre+uRudLkT?IZZCdeOaSuTiIjw%lNFF3VL5^U~Tvh zlU|!{g!{P6eDROb*@7`sjjjv9Iaeb@|5$uuLAp}aIw$q&#H8&H8I<6B3_7!5002OT z46uhw3eS-9g>#12KEa1lvR^`%DC>YE62pBBBHmF3;W^E2b`U_DkplX|sbWrqs*F`& zDFiA{;PpFlG&$Ps_OjHx^T>5@70o92Gn{e2Fp1znB~mdrSOF(-qaZ|JD8w8a zGPD9FB|^9iR`^*ZGB|4B@dNAGUgv4G6T^u~{n$s!+(6mE$il2{2l?(6+H;wrKalP} zz$Aji8%`SJ8X2IlUlz$K1>kV-xRtP*z+JA3i1rImP-YsTwXCT*QLklUk2rGl61?jl z(L>U9rnJXR*KmEZz=j?DV3Yn<0zL15z2^@AAOoPAt{OA`p`-5#zGFyNZyM{g0xx{t zB+ec~dIE$Dd>@1z6x#mTO(e*i34TVK?aEu^FV~p~M6X2*)~L$=Wvb4t%F9N(U9Z^M zYd${G39`et7x+!$Sxpk4F@xs>+uQsw)(JyXO$_=D@3bzH!f0E+${W@FK}~o_{$g8q zU3RM5*ty}0+=kccN*{)?PenFjwWX zHqg6+E+5uNA&^BBi9nGAJUpWP5)2mBlT8vZ0+MyoLH8OY9qV{md3DA073i+A=#t*- z7pLU}!*bklWthu!g05*gLo}c~tOaW|lsYk6^u_9tT)dh65$SCHgk3g3&izivfhzmA zQd6+!&E$b2Y;y6+x&(W)K560WQC&{FWSO+3tbW<3I=rZP3Oc}Zs`w0&(W{Y&#Y^5e zF1^~A3!D1PbN_0YOZO57OEEKBibGUkHT%UI7%p*pY9eA0h0!Zr*?v|R?3sPj zhr-a*l?S+6f89G*ZwtWKyOTcoAU=~!!RIXl$!iBbBs!m1AXZb`TJI%MkL!%4JI>+K z-i=*fn7TrvXnX&4vMvSb-(ZYHK$+uF)~`D{F}%?Y@m zBS!6WG$XF}?X*Ev*Gy?Rc77?CE5@~T^Ta-Oqi#UX;*U_vY_^?o!`fKS*J7-^XD@|w z1^4+ve+9>o(~@~DW?Q8uCG0ojzs_jR&5TE_3<TAiRM%3Y_juO7wvM3_RN!wi zi6ftX@x&rJP}J(HVy(9pu&$;pFWC>vV_a9~Km#D)S?$VA)&t`BfYM&miWRhksBlq+GqAtJ`??}ABT@^4Ub{R0`}V#`vyQL=V?cFjZRR13 znSO?epV1ehW(iQQPs5xl5{ch0IBx3c)QOR zd7TTCso7&^suWtg$a=8CL5G8oZrnj;w>u2J#7B8yhiQRTRpqA>s7hfSEgc++j>*z_ z`VY{HjXSD+E|H>Q^Q&(w+T3(QOH+rUswBb-lj8UY>Q9tY;zZR*#0#PI73Kw^pa$S(=V^8bf#zzqfatro$I%U{m(Ra(`IArl1$ahv*^x8 zx#x&d^7H_t(`JyfkxZ@Lx4gc+&jk-AGi!|@J$d(VfZnb_A|8EQ-kdxHY7o}_d39XIZ--S|Z zGmm0{RO5X$9ei#6y+}K=w8?~f<7TmWV3~R))m|tm59&%~@I)N4u;L_WH4l>4Ae3B2 zZH`@xZ5wC5BOlV8LG9>l03lWIm>fIHNYt;UE|%PXx`;?h%_$qAfR;6zi`!&Haki?Q zUx57HMgu?Klzcx6FAQyu(Gd!#}cr?c8)k#t%J_lnCh&z<_v7#J22@gsWb0>ra@ zgN8pzx>-1P2MSs$O6#kfdiwfEFs~pctpPy(=)2lsD9`tkPCMM4uB4SiMWIvHUYn9? zq#ygR{3jnzdp~Wj3Uv9>zQ>E8O;4P!(6sr>$51dq^7pH-^6J<`}yft@Hb8CVOM|fDXcR`Q<1S@ z_>}jLE97|az_h#DJ$SA=?G93OXB=r)uI9(f!VFbvGb;7NEw!kIf3A&z%Vl%#V|hsn zJ5szQ2+q(oZ1FegYUffPog&*~?@Ufbyh7`dXd>|9kqPQ#__#w}B;uyt3G9+zH>RwB zZM$aaeCwXt;7{s-uTNRIgqyi$Ny&5f-196llEQ&~mC3E=Vs}Jfj>JCV_*o-9P2YvO zEfz2qu#KH5(-XBKRekkr@@6#j%vT(z;iIusGQ4cI))%CZCihghCy#*cWd*O^+HtpP13WTAkGd*g*S)S9Kdcdp#vxc)WzGI`UTlQ z;jj!wsQ~5fa$k3&_V;CBa~+FQMxHK4>y3cp7dIe!wSv{YrTuEd%r@QyAeoZaOiL}phL z_mDcyT0x=}XtkkT3=jDsZax=!;Xfskd^9XKx%}9zQgP&R_m`V^5OmHU_Gc=c(BgvI zsKfS>WDelv-#Xzc8!@AY4|+agb34NH4T$hR2K>Yh7mG~n9@v~s5_}ixS-yMu9lJL0 z4R-N2WT!iO_Dp-$gX`?kXEl<|##8mQJ@nz4A*-4LAc`f(ctod)EO%j=w>$L6szNsB zy=P>KpPF#E_KhKh;abzIn~KxsTw6F0Rl2PzKcjdZvP~UH#o_x73Qv~d zHcXNnZ$1SS(wEwlA!2|z^L}2wc|ha`sV19be2A3|TZA-ukvIml7rGn^rLIsjwNtz0 zpMG`mQzb0wZ@yVcrMl2pM1*qb{16^MNq3$}#nn!6c4-RJKJN`mrtJi%a&C=C>kBU? z$gjF((=H7*t+J!DB-wvnT_~coHi1MxpM910>mDeKM)!oPhyf%G1*aFi zkuI!m+I-H^l>hSAsK3#AFtX1LOvwIeaDwVu?gNY5l@7IOzlWBZj6TW#FG5ws+qsKk z8YlD8{O+4Ey*;a+L9@)1EmuzPh+V5(>1Dhb2N=SxrnD-{iaIO-rsw!##J7) z)}oODm5ui@Wt(%90|iE{bi;kQdS%@+2qpy@ z2&yOdgFBWpbks8S!|H(bEMM(6C~MO7ZMDm`sEDwWo(0nY+_0pp*$Nep%?M7T0zN zp=gv(tTVJyMHqZnO-*fvMhb-p$q){i+9N~%`7-9EqC2@KtdH4o@_eb1|&ly*bAY^Af?tZ z5ksrzGe_W`^R8mL3GSm+AE{pdJ`f;;1vze4WG>bnCm%`)q&o6g8I94H8iQr@nCW^w zoCCp^03z5_9{ZtL-G=lr<14R47kANOV&d7CXph!}XHi<(e6Iw(>Ychyzh~T&(>@>v zTqPa;-O3@+%FoUw97KI(M&rPi^b`cMr-zHv{=nW(ONkk@eaw2qVq!XDD@k{hT9Q*? z)m#-?q$phGw>05t z1uMWv;qE@aH$<#JYgnIE0hvQbP%gnxYjmtzePo-hH6MseY^ow?K-ogSxkoa{Fz90f zsBo8WJ^Se@;3BT^x6N?zx5Uw}eGx0;w}!`nC_J!^6zwenV1g+4O$b51r#slgzr(gF z`7f4;nRiXz1Gz$78rsd z!&6uRte#gYe-`x_ZRqxpSe?5XwJyfr2-H;WbDdd!%4>90NynddNcV_@B z0I(-o!mPFN_90Oqfd|)XBBGQv=uBF=T|Vp3?D#rwDE$yoIWLj;KxWD_U2`20^%$!h z5pk^u)!nrbWM-s(mvUM0)OuKmf`#j5Omy7B;iWa#_GaaTY zqpke140qYGH!|~K}=t>PM zh-8KA;Ubxo;h6xo*$k!q4eE4seyM02_vxr46Ap$4DJ3S=g^Kq4QzKS8F-$HFeE4fh z5F)H7e|UxyyPVwHFWbc#HcJ{mqSnww)kFU_L{eXG0U{K0r5K^CRfod+m*`{A zf@It_fd!c-n+36UdyfVrdFvUbh_Nf!{)0r7y!3EabE~w3KqMt$77#@FzhLSRjHYKV zFN=n7h=Y5B+LJL+p&BYqGF;dYCWQiyvC^4pMPC|5;Z7(29~mms@cE%S1* zmKGm$c4@J#v8ILa?{9(;3Lc@Z=B|5{z8TG3bbv%{8pot^S-Zhh%Di3kXuGE|991t* zLVA~sSxx6NmmtW^zu<=Af=iH?wQU|79i}|V-Zl&!XV&9yx`(+Fh?F1kRGdc@#gN1@ z4R%CG4iSK(0ASWE1hla~AVl@IT_St>O0&11`3Tsj(~n$c!`y7HYv$96hriFO`m&s=LH*P`p3Gu85G|% zBbjq;yx#OD$cS@$Vh)niDv5lT>a%zJQpYHg9{<4VLGbDA>Epw~cBaVr>3%7g)B}bK zX#84xr^=FkuAzgRyiTM~lK+yIx}%lwE-J&Hkc6tz#zI9I^DZDt#hpTBRwbm4mM$KC z&Qv508wZ0;f{lI0>Ht6l@F8+$ZqGx@QijJ>H(eL$I(p1~97V*cnN@@wV6Rg;AISYK7jQ<*(ATl)Zk*@y|b^_t#s#vTwBwHCL#g0M5Ty4-hTFjG5u9w% zZ#zEzWF<}={iD;7p|Q9c^OFt!gD{#ghC}HfJCbKy`NU<~=^7L=>kgS>b`*gR< z>*#!B7-}^T(i0g2VtHqeQY(6P7o-Tz?%)dSdP^|AJUW=Rp0_qaP=h39bw6odG!RhP zL`~rp%wcU#(RsPSt7HQhlDBOD+H0gVk9`{c15{(YYQHd$cmypnqqkQ2bo>Y4f4bTe zA4EGGzg!RtLuQ^+4u8uAXT3*05!K5nZNH7b=*U8WHjR1nRsxW8)V<};sWANwwa~r4 zteIwUtm*xPwCX z!MFX++G#1|jH~xz$mg8pZ*x0h7dAkxG^l2Xky*```>8?z zONlSC1Mh?9!}tm+vQT4VF*b>K2bcr~ErQjk|3{SiOmUz$^$Z1<5-H9~Fzgx1Wnl2Z zmx|0&oNm?d!tAVjh7v2BkB$pfiPL0_nDWFRjkM>Hm;e$-W>U?dpk!~PYB}xHh3{kj z6##by{$|h*(52O${tY9;?q{R;!r4{uIJy?IT1-zd78B|W-F5glSk=a$$fldcHi zV*neMf=^!wt7V7S?|Yh{rBK-I&B7_hFS{=agL-6i35`D=XnO@P|9Ye9-{ZvO%+7VuEevwv%6X6QI>rG-q;I-3Cs*Iym{*N`F* zU+v=~7K2-GgCQ4g0M0nLia(Y#eL&HO=7|a|Y3j?P9=;2^8r<6}^M((NcLiZczIvUJ zxHfO|=t=bZKlDc+FCHIbVI&rscIPafo~Zv{Jzw+0LgI(+Qe|!!Q5Zz>6pVf{>6lPH zMD3$mKlwJ@VJ73!$eSoRMf$YE$k`|XWG0Xed}O4e!yy7ia9#l?r+-AEFPo}=?;0jb zgjTB0P2+_cEBUHB14wT432|nOirvJZLbWNpPAtb@pwJgWC;kxOpL)Y%7(KQpyB(Xg zQKLbn2b1@M2i36f*vrlK?#@ASMP0&u+6haS=b{w8jcKQCY+ff{<=V~~P`*|1ds6+g zSQ$itk2^-1kgWT+#3rM!`@e!}4k*Wy>@Fl73c@vGe|ZIgr7enmzCs||4lLa>qiX6c zz0nOKM-nH7pN2Qd#KsRK$c~(geopoWK6-hERJ>~n^6?lnzS+ihGqM1l__dA*>U#Gc+mFaHdVr|tJ?t#pM zl;;wk?xh?lhBb9Ov8ZS$l~~6e@i5zMD=xlY&n-(>xBQ47(QSlk4RV}*vknu>Okc`$ zf1=nvXl7K6bysk*h#Wy)=3}`FHMBw02MedJd_1rEO7N3yBxd5<6^D|@R$Whc>{Km& z5<84S`Uahgze%d~GwOlWMs6pW{ZhYidfBe2|Ba;1zBzMH7+W-t6Z{8Q7Ruzgn>QZ5jPg`Zi~R)p~gewJ^8959q1655)kE3v$_ZoFn*nM8CfY zrVGSyKr99rV+w&|v-AqTB?DzWV!wc~>t}yly1g58-M)v4 z)r*`=ojB_l_+>Ks7Eyiw;{^xcneoXm?Yfi0H1_ASHjUhc)d}LChrUyT-MVu(QAsbc zy#a9~1u2F~qzr2IC|{PL$0F3vl-2nNo_%!dgfFml_T0E=p+UCL+2Y{KlV&SR9LKbxp2=Zz#YIVMQjrye3`#_Zg&d_eaS&K0-9o zRP92xM_B~MKS`r1V1neAjcwcN>Ob(*elHrSWP?WKjWjU;jZWo*xuRz6N!?D& z*sj4^-Mrt-yH%k;1Fcu!-&>}l>ATV*YZw3XNNb@faJml_TPx29_@%T5oml8s`t)bF z;*(j6OrZ|iAq^4rprXJLd$Mt#bp4|sXUmb!a)q;kLnP|r9$B$vxSv@0OPRhVOLrL3!@u0B=Zci$i;*X#RaU4L@%%2idL7y zi&^XcRpW|x1{F&2YqM*J0sc_ja9W{^BtiEkX|mqK#v%!}+B7xt^hKVUV z#Gj;FWPd>*e+tGDlk2Ko+E8oy`#&eR=7K59>6|GSgCaad0>Cwvd9fzPBe83ZRJW-B zZ|w<%&nwH|WM+HWO~Wg7Kh&PQ?}!;+(}vc^D2oFfg4l}$gKpB0#;+Xz5l{~g3(0X{ zZd69AvnXVsJ}88Q*soUxS{yhkQnAbQVQEkAwRUSBCXxb(G4YdH=KjSxVI4g!{2)mtJVYZwVl`L& zaJ>a<@=~2K$|s4O>WBygbmxq^d!o1wo+?%Sj>GiFE&Vo1x|CEEKXx5=CZ+_Itebal2X3AA?{NNCS=i5nH)dXby-09EMmL1ST&_iW1q|CzTry8cwgomG z)fpYqWLyAq6P)0KQY``nrujHnl2C1%Iw-e<2Xo1om1Cw}uNf#Qw8L6uMX5FN1D+d= zTIoR~Z7!ITj*dRsCA3t5YsIK^&4fCcUNyaz;0kCCjW`se)YsuP0YqI zd?S`$y$iX_c3xjo%pQ9kL0EDCdenW2df8q4qZrX$Xjykiy2qJ!TBI3MB1!k|_uxSL zYtARjA5-ijyPzFNC$ap@3QK|FmEHE$_LGB3$62T4e^><%I$&*fL7S+ImeX5TAN*N? zhI%a@2L(0ResHG3ZoBvEZcCphh=01zVJ)@AZ<<9qza?XvFBx8f&bE}*n9%zPsKtmk zFKv6u93&jh3#%l&B~i6n4r zv2nn&eSJ({EddSaHSNVSD?3>{M;q$%VA`h4*U?8fD&&^M^ZLk}Sp-%OHDj1&dRe?j_3yOc{Vio4!yY_80iVgTWYjcH0+f# z(vbt$q;EY_or)Xi>=zBiZ@iJf;ScaqN`yNc{q27iGexqX?-CN9Yc8w!PJqbWvL7a> z64k||fbE{&#|%XiP6{-mCXx>OM=m1J%x8G(`^kK4eNk5hB4wK|O`p zCnX;=u+_`AIiTKM-~hsC<3P#X*8QWM`|G0j|JtU7PykrSHUVoPW%f$fPEK8uYue)irl4qYUsz7~LuU8?0EE@8l0SsQ7{xIVSEk5?FJ{TinACD@iJu7C zSV)_n?T3O|(Jt-5*`TbyjrG6fRGeAW&ghTFf#}U?f_hKOgNAmZ9W)reFr5IXGj>&~ zYf;Wx8v%+}6V{b2>)>!o@iPcWF*1`b)+nCll)8Jnbj!_Vd&ac@U^}9_3@)z>c#k%+ zHgE|jX#hMbqLHC(l0{z}{-Ld_Z4k1Vfd39M_?;^xU7edHdm{5;Mf2D9D!a|T*mh_dq8-gD-cz*jqZE0M zr(Zs_&cz}Bjh+#|D@`g(94oyiPf~m2zV-fY0GkU70~?UzDExH54L0ieB!#B6jc!7A zKNR^}I*$`%Ye14&&KJOkeH)}<9}VB!)96Ta+Kc25Nn%4ZsM78nc!7=5XX;?6uCc8N zL^W=?!1Hi0ihx*SBSyxe4_qyDA`1%!5CY)(jqA4axC`b$Woc_Bhr4wJ>nUhQW>-K3 z=%+P*ykbp*2Pbionye)?xADkOgTABTdWwDKoPm5(*<(@J)F_H4%&~Az_zwl171K4C zUwE^P-k^@29MR+RiJd}=kVGR+#6(dUhH-x<&}l!qoybq*v99LUd(3$>xW|wU1KT{* zSTme~4F-93CUkuEOlYX!Yl+v?jP}}}^;6>i{sSa{y=dk#y|4TW?Ts%rC;|TMnMA3$ za9S?d&ekYGSvZaEMb>xDiK8f06e~2nVbgQhXhlq@I=fwUu8Eht0~P%G1YG#Se!^=V zmq&`=={!m=jKWI^BlKgKKUUCExJR{H(qCe`xgt{I&I3pObQ5UT3h%#h{t^f_r$FbMZ^$URu7xA{ z^Gy#b$Snuz!0q!DT1@$0`x5q>iEgR?g4eA}YH)rWv%M__>i4aa@;kk{GX0{NbhzYf zMQS~1Pgf*i^gNAe7r@rt03BeY5;7RlTVE4?K>Jblw`SUS4Cayvf@XC8fqXTHg=r=A zqMQj69OExRp~B{vag7nteiOB{oa=w9ZUuO%t4*ta9jFhc(aDWv(^@mS)bNvU(Qch{ zl`nTP`=~PC=0KF%;?y2$rpy~u$OBqozA>wBqyh75KreN&wc3PT+Y7l0b(O4}>=0n* zUbs=ULT?hp2Jek-B~y^HACjgR3_5<&T677Oha##os(GcP>^tz#gf+^?^|j!w+K8yxDiyp)nf;vOT z_M+*@mEhgB^mE4&1oLAl2}~b}Z^;{tnQgk1lR%qU5y9bVpN zj_Sl3rMnxEVQ@`e^c(y9R`jRh5n8p%Q9(u!8e@WqLq=HvP2qj#)?8G4#L*D2;(^Sv zQ{65LQ+VEarKk4m(I7M4qmw|ck#WW{?kb`ML6TCf1`MFzyk+m|*_H=XiVVtmBmFq9 za^X5zr9rwv=R#S!aIFpt%%FLn*34ea8)esmM#iG5< z7Y8i<7@jpraNpER0hOW%E^lFCD?Sr6MpbeNpF$sXSEAGs(DbxHuxobpmPY$jqUUEk zZ#e%4ID6{q_a7>)`<`r_ROm7|_B(T$^n1x=j>l!6`EuiyNY61l;ple{`Cgk1KQT-X z;n5TyqFe}k7bK}0<~BMB{l@TBhUVM}0yi613jD-97=3ZSV#&qET1`05bS z7h+qDt_>7XVfRV_ybAvYu$%q^tO{9v6{7$4)cW2(Ok5z_G{Eb+!aQcc9VH@2$Ctsj zWEA)bE=cF@obGS=P2D(5L28!MmR;v4{t68CkXl>n7?tzIJAvreV9T%busZ;^I&9hy zLZclZ?8?Ii13(FeTD$23p9O~tv#bFW11kUHA)l5 z90>!032l@KxFCbYbQTqi9!kNkU@>GE5_lJtn~Y-3Q!no!haUq=pNz8~gFga8pvWpw z6jVU2%ijVtN_#=K-q~=eugZQN_je!fZXX(V`8{XUfe%DM2%S!c!tUMnf&|!p-htD* z0Cn&V+=b=SD&qay8TY0CK3u+6=++F(^vV~4*R(r^l|i&FK%3gS&bt{eFsEg&^)9d{ zsdDlf_0pHG=t4+oF$4!TDh3(Q484OW5!(O_n9l>hn`;;w*~IemLuAKZtd;+H$(W!t zU%aznAruuKocW{eo{MG`f_SeUuU@}6Bxec=X|cS^4vlHbI4pH^5rZ`JwT+=j=RR!) zyUkh5?YqP>;Oz; zb_>|}t81i%vz6%zz~Z1BfAvk7X>RpKrkmf>Xe4wDR^sUN1CXOvHp>fsMY7RvZ#YP- zIL|q?3AT5WBvawlNlvIKT=W;P(C1|T&R9GfJ0?jOC))np^!%@>gWNDcHlzIypx4~G zz1%rpm4*muRmo$6pOMynvWPW&?mK65We>@v=7+-^BMs!1gm&1gB6-A=xuH-h{24#( zbfDiaPbH|Tc$G%*HwIyLe5qtz>WbMbwz%{-1whQ!r|nraQJD%$bn+~X@IpW+V1 zPH1LQQ^&wP)nm?hgA%*_=-RW&ShDPjtD(mueMx)mlvBhj4st7Gkh%+1O&KAzl!3l% z+dl)BWEMu)a>wh=pJFa!uwYXWI_{JpCA15wf7@;6DC5E^g#Wx7`#Nb@g3I1xru%K} z3jjhK%%lB_!YE0_y*Bl6eQx~FHDTWu46rwO_?pYs*(`$jh6*x;`ckzlf39ZG7lFWj zY^JO5sq112thn4&EZ?l_d?U+}E0%ed4$gt!pw8~3*4hyj8=9kA1%HNKn9FEvT1OBo z$+MfUzfy=lYalbF`T4?Q6>?A&g*zKvPuSR|x=ZF{*fK|EQZjNAU|l9Xm}>Mv!6f|z zx!w#id{+T>%laq1S#C3(0#};bxdCiwt43#V05Sj!**77?8@J%wHCZt<%6|M2Xw9{|BV;T3^=*cpBzxes{t13!U_QqdM#o|Ju#vL>fsi|+-iH@v z--E7_a8|yFspFMsOvyW>j-MszahY~9BBa0`(X0i!kSyKyRnD|B$S$1>O&E0e4?}gn z*4Ulb)PrrbH{&hMg|1?n0N4yk5_Y8%$VM?ORC#*9tGEiGmTf41NYokJ5spN{_ih{- zWmFt0d*{m}jRRlE@zHeP{6Yri-3JiX=FJ|##Zl87xa_SJp%6^#t@WlT`!#zz?Gb1y z<@Ukga`;kKPQbNv^m(6tABTJ$r<5AO@q0HpXKP^DTMkmwb;3 zPsXogSJodgPB3ai*nvqJOUc=W<|)W@sL@c+kVoL#{{gQ&bGrlby-m`a_SYi8B_X4@ zUh@`o{w)X+#ooqg#UzEZOJ4Ptl{oj}%_Odt#`Dv3yqf#np{v*bEz<|56OO!JuG;0! zm$_1j^4=ST5(w}@V4EF}tL(Jp^T^|_UsfHDQzhDtoWWbSjI>ZP9&&h~axYpx|0o{f zV~br26oyfc!tr@_SIqb!8|!;NDmfzi`#IiV^)Fq?7ico?d3|LQWd*9E*zaXJpE@oqT6yK0shdI>rB$=rLj^ z>f5$ty%U{Tq6E2gD=5IC^pCrA8;<843i{8Q$VEYd#=Mr{_?19b1Ev5bjkv_!?ad!; zF95BC=m^I#btW)QYXpPh(&pLoe{ahdetTkBeNryUFr0#*RetQ%_lU^26Kvnvf=CpE zz}(kbjNTH6UFMLq#r3Ibw4ea$7%iRk7bE7xPk|)loXn=PZP>XP-jTC?QX0?|ci#23 zUV3W;cbJT#Jw{~^Milz`PLb4DVtANXQHX`mXokTlwCH80ZECe|5}CVFfmss`iL$%l z^dR?-LJH|<+;jDu?&p~|a$9~K!ezvlc=Gn?UaupeM$zx@;*Inq&H*f}Aq$L&GL?k% zv+j`hx8n~kISYcx#*2yMPN}W#jR-*z@K*_p3Ukn`I_Zu>%Wu;LOc@#H6LVzJ0Kzig zQ078MSyChzrWcU*f-dmh#qY&@WmUR2p>Si@AmTHiED6Q>}WmX)-!3m1BITL`i;t{qZgj6>RXwF z$Ont-=MFB#M%{L0;MsW@Np8$Z-!G>59xGFPM8_A)D)>N0v0+wgMRrfQXj&hl;WLvl6NxIfB4aiB2@K)%DZ0X;kJh-X9ZC>|jxqx?tXY>5pnRmC+ybq?XMU{JTON_J^fU$0 z&pP%78W%VQMMAfKm#txy6NINtDWmDyA|{t1eopmHEq^wcj(_4t6&zP^ACH|^?{H4n z4jErXMfo6q+bc(p4FJq-cMMOadnT|H+cNKRL7y?B0)wQ>L(8H4Gho z*I4sb{ZPI@oHwnW{P!s4r9Pec)PJ?e)tU+6*VKE8=`e7H>SR47wj3XFErRZ6{SPvq z6D`BH#)BdjZ!?0P4#v*pH7k2ig5=j>kLIAWqyY8i3ae}|3fu%*AKLdwzUu!ES??TN zN!YdT?qtWFOl)&v+fF97ZQFJ-!Nj(0+jcUs&51c@zwh_`P95y(s$Kn0S65f}vz}V_ zz3$7j1!HiI5r8RdzHV-@WAo<>Z=!qbDabh^fc3tr5T3)jmsS%oCD=H`uH+##eg3Qt zkEDMs?8DtPHe9I=a9{#kqIfr*_0i!Q!|Hy`yt5CUjubCV%Vu*SwM^8Zq`rcG9YBH^ zyXt!0u1@=r{qF1S=O4CN-E#bQo$h}8D7L_%4`_gZ7$Bbund52J(|BvgavFMovm&jZ z@A|u|-x*!A4iw=w>nQ;pEpR~v{}u})vY*OlK&;Yx%-T&9_Lj{{U69E&3Dvnj4?M6=C01zj&c z*jg>s|K9tta-G@R*Y}54X*fN35`m_@n}uK!V*!?m1@kT|4->kqr6FSGHPdtqJ8eHP zSPIdU>^#gRllSG&1o4fhnxzYb5m1canC{O`f6PYB1obK70Dmajg3kdYTKRQf(5dw! z^}9+ACv4Y06SVzqU+*m4jSz#!_*qf$GWe!$shqn}d6Ezxq?2*91npL3(U_Zd<_kp2 zpY~jyAQOt^khh;SDo9EA;>ZxSodhZ%@fBi`q}aAsGvfa4qN&fM5#sI47oKUMF^cnt zz@^;y66h7KJcMBAgTQ)7n zqf^1qkW(o}D*Qv=I)0d1_p035?X+*a4%Rw}EpR0H?rZ z!S-D#XlK2Tq9&KP_*Bf?bMO@xlX&{Lq_sT<-cf5SgiIUz-GN!KEBeB}OxZh_;azPX zLO06cb_AqT8NgV9u5BbzpM^&#)~)v+ZUDMX`uRm8n6ktZggSG z&Q|`SephEPpqEs-S*G~2eF-V{%j9-Aj$#N`g}?`GCqcrr>riGEYwD(#)OT2y#5Sj- zaST=-s|%vLPU#06!cYlNtm!CtpN`8Oj4Xf|nowd$G6<0mi}>yZA8;7zHI|K6BY&3`B;%o2Kmwc&iF+gq2{{w8aJp2*) zN|9oKE%uHITc@_l@<4<#sYq2<6CPO=B2QYtOppYt!)>FbwLU*Oir@*}>;_X|Ux94x zZqCfTPjIxMQbB^mp}f$y01trb1H1G9OBqoOTtWkA=?x0cRV~)84hio#1!N!EnkD_b3zF(j1`|XRlNBSoG(STG<-vK?Ujt zZ4YrlLs<7x3!Bv4{x#Jyt-?+arjmuzbPOX5~bPQ?o1n$YVSEYFh?M*8eGC_(mG(dj+_Mpt#B6_X2oIbXMbW5Mg9St4=MK1aF~XnHWnOA76WO~i)yEB zy57yIQAtpP2+2UkL9Kgz;)VUI09z=5HtGkbxy{b`3ZUpKBU2#rQ)bU!jlvx(xQJMU z-#oT{UE*F21{K!igVua{Tka8;k2YqP*9|(H9Ib0c-R^1CPlEFV{?P?FB7EzU07kG+ zZ!L!%aQX3Uyw>br92m1E!R%hVGyJw<-8zki+&;pjRa@5%Xvn{6b~^?P$D8^_19<>A zS>IZJg@%0o15hRFF8Lb07p}qH$7Uy7mIe;hoS?jsiDdfpmH$d!WUyX6Kg-zKY~+m8 zw|x%QcjFI!fPFhYPucRne;@3R2PnH9mM)~NVA@XvjRKHqQuU-6JRkh~TZ@~(xnu?W z2omT!kO0hGfr}8{eTv77-Cca-C7a`=fg&sg&8KLX@6J54nWGd0hNv6VKx;2)%Bt{` zeq*V2qc}%-;8*TUKnIn8Gbl=efm3sivcuY(@?Hki4MM{sx?H^GKZ@@{IAHX!E?J`K zVo%$1dt6I{1ULXJVpE@xJ*kAQ(y?)U%So+-ZbW`w$GHcfo4-2bJEf{TYrTK}2gs19 zg|Mt}<=OhF{T(K%ntG9-66j?pp!!1(%nZxMi(<<&$T~`NNbD7HHLewYlJ#wf8MEy9 z_qb)QfH_Z9Y+WJ~b7{z{}- zrJKPhw~TvV{M+ZYOAu}bR?7TYg6 z{B6AShc6=e))4B=KW3MZz%CZJB3P;xP%hy3QwQj)Zbh}e63gB)4VQdSn~>#=WJkja zA25Ba<(>3b1Iwn!v3;p4Oj!ke#QC)Z)ykdG7Q*YGz!?ZQK_DtSL1mqL;vpQj)ob1xw`66DvUW#tIO}I8okShiPt7BNQhVH z#h?uAEn1)4;Bv{roUwJ>G+pdfX!U&;waN1Fj}az9%(KK z(i=&zr>4qAH#-zDbV(0`pVLdonj5*z7(9xTUxL+wo;ui!c+SVKDp&SHG)GlIY_tBP0Wtn%fvv55f z9?ePv2S~hbCm@e+2l7_64;Vf{X4Hi0?qKfI!(soE?&VcQ_Rp`>6<=QJ$h+FH7%8p! zp~4_#km>SLq;)#w^i}NHDLGD+ushMWa+H^Z_KyU+da2uY3uccC!tkPfK7F^N2h3)9 ziK8c@A^z(jMe8CVTn+{!-GrND2Hf9iLA(^O4>!s`n1A$<6QM4gs|R~6hQ;wUB2mBK z#^=zs;^BtyI3={VR=h7rcw5YjYD4+COkvWV;;L;X;iqHd_+LeCW*e3ky^804S|WUY z?V!+%l1=Mp)n|sr-G56vdrRqr2hIf2jQpsA#?T8EOw&mcMnYE9wbX{=T^Lno@YuZ< zv|-aLX4SsrMcmoI-oy1FpG!uXBUy-6g__1RQj_QU%Z}TI!wXo#AsqNlX`!VJk~&J8 ztHkJFYq}@W#Y{`y>v24hDk)E1%hA~$dkX7Y+&FUZT506`xAd#9wj*c|55C)TIC&yP z)MM2^YNqdI|1mXPOoNl+dM#5*b-K7+U84 zjtAmu{4w2AZ$if`V{<;W?flkfjub}qg_n|d+g=#`MAd#Aq^tR5uQe+Fv*a>XI1e=m&ML z(i~{f6S}CrTsr3RCe7ALdeyanTS~Sg;WK!<`x)x4s&wb->SAHaaPfdS)OCc;9JGEs zqebNah^!(BvKoepv9zYmY23zC8^zmSFu{u`cm938nU-tn+(wD*>nBK{XcVDv%-`4> z^p1%5VSAZxOJT5#Xc5>`lp>hj3pggdl1e16r0yp+$coln~(D z`yBUER0!8g9WQo`8?spv7G~n)cWvwhX7FH*mlcGE#<7iy%k&B9rMHy6fZ3{j@dT}W zT36ZrCmUrg^=Nyq>@D|qU}^EaeZJ$@Sx`56QLzl=i}^BgQFLgYQ=OD!c=HU6VZ#TQ z5O;W5;3fAH_hzs&&G9D1oa4t%z4&cEl@zP}kNw)Ytx8}dglG*wt53FoDy(#q*R$0- zcj3F6_-C9YKX7?KZPvMjc~P^Yq5djK+{ z|C`r9l1ZaV-f~)1I9j?fq{juJe zBYj|pINoNRGUOUI(!gpo!C!I+`IkB3xH}uwQjL&U&eG>Tgs^lfFk;AG`+#rOKh*(+i5?UmNq)*?ru zUdGS`V@Idx92YLJ?o-4SU3{hyg>%?by|#DoZB8qSYfxHoF&&0$t*-5)jW5=-?!lMt zyDsTpsEO#^-h&V|c{L&wKtu{@hy+ZUxVQPD$Kc`WMV*|=L#{Gci!}41xR}U;>=}PK zm_i554#~pS+!4a$Hjh0eJ0g#Ej|5`VH_NPDB=wOPf!*PYZ2z`qR-ZM-yT_Eps$0KQ z&AJ9iYBMmdI3URHXhk_n)w(7Hbuol-Cna#{U%j{yV$+Z_?btx&0IUzT%iLZ&*B9@J zA!jK16E4~hn>Ex>5rY}*YMf(8x3)FLrQ@2pA4xy5uW&jcH({Y1-E*;;t|~1Wwm>cH zDjuvoTI|#Ee0Ud}yKoVPz8k|WtzgsBXro)T*0CW<{qtzx!)1=sRqo{BUfMT#DVXQZ zE^C7gA}9mxI73^R{BUIOZI3qmlqt3~H<@5Ukl>`7d$x1q4FBiS8IGNJ_vp{xs0)P) z9tXTF(XW;t%2yA8I}xu)f}>knuQdTtzqC&t7>4h3&sn24-|pKk{tB%*-ukwLTs+>1 z&nz{ouvuUIZAe}}S#+=SGCscePUVH88Rc!z(+CR5JYkZDiE-)Pnq4Pt@X6<0En9IY zMCVU`_qb<=8l0y)aeMCak-s^Iq))N2Kb$YTL)HT^A}7jJy477{Sqk6fGMNeZnsXbS z8Rp;q0hZWZ`!{69RlT6cE@SX1G=Z}cdJ~Xlp9;lrKK}qSp|e|e3>&Acnj&oitHDE# zq-AtVnFJgyv8+4}mNt>cT1(egZ6gqeAOG`AmC z-Qco`AZQ*BI!os`(k^QWBMfg}qXj7P53^r{bNP8a#eaC#1CRSS^FBvfHM@EbZjs<^ zpXuS?Sjoev3kc*}^!U{RiOF;@z6Hg5k?=*y_s{@YDO=c#o+ItFz^7=oF4CF)V8-2s zxl&DwtW;y)rJOQD7Pk9`{sMuRy^@CJPQZubZ(QVl?nfRRPURP~u>NOI?+o|1Z91z! z+pG$9a~j1k+hE8P%LiKQA|iZfHpt}SlVqwi5=f3Vl2g5_H{tEejom?H95=`s8nXuf z{fq10ZL}8b1GZ1jDe3Kh$piM#RAM7!zGS43h34A1^Q2360!DuOeo+O8pVMIe?aVNC zU)Q$a2#&KeBR`~bMi;~j{}aOLEdQ%aHNxw7|9q8TV7{ov&yU%OXA-rd183BDV#A55 zCqclF;0fpJ8y7FEUXk6Dr9I?X9(HI2bBN94*RPS70 zJ1?EpMJ2mF?W06my%W1T=Sg6jmD5TJh0G=4P+(VI@`v_blBwR}5X9<13Ml zt`+*9p*nbF7=~Vq5oiENAP*d}APAn)eri2;a-D?fVUO+bG>(ARm*8KSNhxg%+Ce$=~wM)_z7c-0*TZ(Pk;8`T9L z)_%d;nH1i7*ZqseiR#p-pvL7-l*1e8lDR+I;ApYo+o@wblAijge9^0@?Am+s-f1_{ z$wG~TTKcE8JxMQxRd3m!vY+#I$-A^2s%g_)8h(o+DJ^0;TNh>0KY(8*FvgrW)o8Z2 zDgDi@vE}O(TT8`iA4<=El~2H9mo1VtnP)b>A{f~*K=Ll#D^JU_{lEXv2a1{SzgmoU_irK2{$7!{u5>Shpnhx5njQxOC3AAf zWSz}x?dVCWs?u}Ks!UAzl^J#^M3JmBbt?WD`Fo7AuI%LubvZ2$EY@QW&Y7D z!&XtHDTdu6`iZ%aJ1o_70snS&(u6>1Jk=Ep^hUf^f9Jlyh&isuSUhWd>A2k+3`fG_ zGdB?@xo5Q>wKE0AFMGD)_5JAI z#SoiwjPOiEZ=kZ=W)&*g)K>!f12|yRZ;yF^%t-1QvK^3CY~B_N=!j~2Qj#~3vioh< z2k7-+;syZ}g!FIC0a|r%{qQVcWK%4RW&KW6v`FMGTKFjqy8)% zvZw`VFC;2TO_mU&pPP5G65NT}R)7w_Z6kDC^;bTBAV z;~UI5I|@4-E(8yy=hF~45>osj=tg`{WEZ87UB%)S z6hWhXWJlAs$@Ny_L9MR#M&A-N6X@6clw0jvdh!21Q57VaHCIuuWPAR!N0h4q={$qp z3wRtkwsSu21^Qw5_j+pKo1v9gT;yPwap%~B)X3VMNWQH~ z(H?6U#1rY*LC6eW1cQG7$1H?ngta_4i@M5!v&z|Q%(cvEx(l)QKEy6oJ;()JOgU;F zDt%T*P@FCJvb>s(uejO&MFk$`f#&Wpq?)fbl*7W zui6!2G>nYw(wY$-VV_-1muYejBB< zt6y4R`l`?~90aIx41KHqbKq?ilx~+a9$EL*@rSr|^Pxyx5vu>ie3;SEq7ZEk{)C!LOZS!0PIWHfjjUmB0!X*7XN9H4tc$nv z;Fx(E|K1AK-J~cjB4Iul2i1_mxts>X)XX}*Q>JL{ZXLz!2-EdyhC&?=Ulrp=y!;L* zd+BPc5K-}Zcb3Ftb~l8iJSA}K4S*@97?0ChvjO(fiv1s3pUS?VY+t5YMUnDx+7W{R zKm>?wXR2rs^>>3M2ljrv`O~#Q`_pkfdUY{vV^5AA{eAS1fFPUOrD0!3?d`gUC|Uea z_JWQRvUxyaeeULM8;9|##%Ck!&UTk8QDU-^sPg*zz9p$Ce-;Df4Q1m+d{T;eqQI%y zG+N-G)v>UY4<5@4@u9cj)nCymE9Ytip23A@!^7uX;6MnK#A?FK&v;6W140@$ zC5nCj4-S|>I&Xi4VDTQtX$qN&jk?Ve%W*NC3J2Qkvfdt8@jc|xbE&NGW5w9t+54MC z{W>KFmLTVKp&JkAax?>xaTwAEoZpV6BN= zZc`m*+`I$F=yhMUx-?YJ;jZ=1FoOvw4gD5axoi6U1=ICJEx#w+>uv!Q0DGE`}a{bA+TP>bAH-w=f6tA zi*KBjm>W2_#pW(5#gWvP9Ktk|3Hcqq8EGA4mhr8<)Hp^qriQ^FSi#Ur#Nf${ZJ3FgL{3)-OfK_YWV zH=4bRq0PQ?6v&z`u;_I52cQ6+zpPVnq*L_?jX95>;uCzf`yt>rB)w2kv0hQpuDX>b z{yAhM+OnD}6(@e6D_kRztX;XRQ)BCb#N6a7=AuO@5zZ{H*_h}O>*Naj*&+EXky22UgLL3}{q94KxSEcmtlKe~pDN$cA| z^X(B;Tyy@;V;8GCUO>gSZU0)<1*6x4D|K(*-8_oGc9shTcr7r&e*o1pI6{R1*b~mo zWJOSXcPx~(nxb4buS5Q+G$-3sNYz~V3NzO1qMr><1Vl917G?T<$wM2KyfqUk#tMbJ zog&D&w6doWAeUS5+`x(kmxo&^=vnvY`_8A!yNG%Yh4Eesq^ZY`g*Y!?F&+2QgoNB; z!f^M=$-3JIv#xgD42K9fnbPIZ1eF+7w&oSLGDLiFm;-VQ4kNH6YkgZE%aiYvZ1?H?>V@RC)IzM4lF#9C-xFS|8U<{QSw{kr~+{@I8-m2<`d zo(^R1TkXkOjt5**#l&@w{gjtO0+D)`KT|h{&%->2*$Gs00X#Wj{co98^yQi-YwlORCMSyyjL>!j`icFXEcwjd13H>>BR4`5Bu} zX17Ko>;-s?=*-c&rW0+lB>8mUSjg!=KoMrbaJlC)3$2jSWe=6RRH7bY*-_-mBrw`m zEBDp?|EL9Y|6Sf*oRa=``BK95sM@$BahOtxiA1voG-0%bh3fU7dH2Eqw4o(7u_Yk> zGn~(k!}w?^31%%D#jLO;jnJ`x{K7_2kl8HRyoM5Ko;~avL&9&AHT@4zAcrX>pg!$S z+lBi^yk14$uV2J>j6N;?{9!1PY2fZ|!<{cAclPa(K3+XT@n=I7@6{n=|C zj~a7oPQpL{=SWp5`{>K&4yhjlt=l(m@&Quc0a#tm^APd=L4!XH+-Tiarb_utr~s*e zd;CR zXX&-1yIFkS3DC`lFDDQc{w_f6SCd3WTW1vwPYIr%VDvtUnH)b>bXoIoMdv#WRueTNbt7l~@*cw&ZV4buvAvV`7tC@JJuMi^&JPJ{BNucGw3$H$s zQpEo*xH4RVwIJ7Ain*z8&9gR3e2;31y~iINIy>KUIRq0t96m~7*RPMnMos$n*1&|> zbqWhw=Q)bYk1*Z!EOpeMsX@kPfxdoeC>C%&fg0n_^m@&+ba+)aL~k~+5<>3XspYs0 z&1LS#pFgE>j$82>y}Wm(z=|;x@`5#?lJL)jel|JoN^^=aIc3T$@NZKrvRz~Yls+$B ze7%oQZ-y=E3wt;;Y}W#ude#L4ncPoNSkvg}`B!zfibOJ@)?2igEKq+fl~h+(&wr6B zX$FgHe*k+*@kUn7t5r#E9vjhkslld6uV=y0?JRZDa|D6r#K zaDOd(;^q})*WQ8cNM}WvErFi~%tkp%rGyV$q_Zd*n^7fHG!harezL*(6qYBak47sQ zdBL11O)zZ`45Mx4o*(Hn$ZD^Efyad+rUlLp4str_|IfP#G9x-501oJQJ(rP+uV9zg zqB?a6twnk-JS?(UvmmI`+hW^_DjGkazq@R`*oEF&EX{AuYG+uv0Z3Vm#>XZiXypLV zkkZ~cGc`C1{+j%mh1xVBsP}rgF6ackjNH=VY{n!d;0Fc^Q=}AOGZ;{tN#Gg zP3_xiT^78Wy`>iJC39I|h9#H}_eg`@0l^Xyb4It!E2Pds#S<1aY8tIl5W;`E zHZA)6u?7n76AB>-D!1rkLw4*dt_hRt!{~fH5wu?Sy!_G0=o8c)k|WF@1q%*t^OH2g z!r}YC&(-KLi5O0qCdDv~o~<%_%m&{s{#)p-8CS{cy^dCYa4n^s zs3|+|3EDu?I3(pMZp%!M1YX^yd+ZluVm6-nDUp?E-W{WkEQ#aKIr(u`RfwvNpDT~S z!tI-dr0i88BAm(i3T_e$Ua>ok+u`}OCjFjIuoDN(=Qh_GdSEtw#(sJ^3b;RzGKr&Q zACcQ&ShIlr*z`Ea_K6-n)_8Lt18c$g9mQKXTFgWT!OVInnc~Ctw|emr zFPg`-&KPiBd|F6-!Nl$cPkIsl`_L{fPG~y@xXPgZ`-e&e{%@z^{cmgXgBm82T4zpr zQAiv(L+kosZz3(1H%t?c$7e)P&*kybde`lAy!Vdew{e!eE62xjuZxGCUzstS01_RHRsp=_>E;}?5uH4SyE|5iv-{EyB#nwW+MvmF)$e9% zr`z-WpS~SP-M6O9GF*J)=4Ahe74hFQ%Jv^#8F-r(?w|I^P|jQ7^DHVSHbVh=6WsX3 zpuQAC!Fs5wRpoNZfC>tP0ZAYjfIKn~f2u8KF8>f0Iw3zt7P{2FAiNGb+&`D;8N`iZ z)8-0Iir#u3y!Q4%yPa}b6Di?T4NK?e2sn(X;+2JGxT>s*;+E@gAgjNM%nGyVx-^v3l8|~z?xEXU7lNfosDBd=Y=*ay}E(7x# z0X73Z)RSPWIX!(+Yu23?IpKd6m7;5144mku{|0YUw+AYvO$@AC=(_P|*{tZ)x#zHT zI-?{BiS#6~u1`^HzMMRY7@UHYLf{3B%?SwjzJ72t>$oLR2F`D#${wetHl+8}rAGT+ zF~65+o@Gi^UX?~3)grf|YgTv7J{-kmRyJYSh~M)b4<1}MCM78K7vec>v!cJwlcqM2lvdL{HNv4fAAIObAVwtRgD~~p; zbP?GxxvQjRPT{g|PH2;FC~Q^&=!M$R z2Mdh2=5->}MfV;IwqCOXx6TQxbqF@CoSEu(&(V`Eb@j8QFGV0Fu4Y^tjRe*yDjhma z9CsNLtsM0K zsq8)pkvJ0uG3^J9>u=FETZUrRY>-*p(`0yPA^rh`5>JxHqboVMY(o&c&DaZCJ0ja6 zgvhinwN#>zPkewrX*!}QKZAaM2UoY~xO>aK0Sei#pMhQhuk3_-Ho~1rw~z^J17H)` z7y;BKCY4%7o;{DNLM*o&ULO4*Qxg@PMgR5ub%(;aM0c?F$DQ_WUWSj+srsO?koOZU zMpay(6fZ5jwFiI-cEs9tDM73bq`C?qOEqGs#y4R2ImDrT;pH$5SRkG>qFkmHg4x;7 zXK$}?`SoK#^MSv&CxNgbKw-EFR)WDY1*cJ=A!^M{z zvHy`nBVF`eDDApfKNG%`5Y&Gu>pTR?yomx_TB2iyw9rH^wj6WjB9VF9$aE6_88{L>y%6~IoW<6zn80n~w|Ay$TG^t- zYd@2E(6SE88+UlKaj|j#A&Ng4xC4YDrppP&2p5kY~+MS^y=!D~ql@-GktWq{qGf(j5DmYV>F{lz5 z4`^TA(zt5R()oRk!Yj=v-tb`Zq# zW)Tr#iQ2;k&;(#M=%}d;4E<(5+A60iBvkR2NM>mnVPgneb(7+J4|-W!!34>}V&Sh&Otw0H zeXDL?Qjv6@hVuPCz)|=KDE|LPUeR93Qv5tKKn(Xi1YnvE@ck+(53@Qp2+UT&!&^m7 z$f~_`GsGs1_XEX8d&-gPttFCcGK-=`xJ?&yNEkzFTyl8Pv2L1ogiF+l+qj}(v7jA< ztLXiS{CN;nFqmJ11`79wP%$-DG{W}5uoo@SPB*9QNn*Il>?%^5D~Jw<1vdYgw;#6p*h1fG$WOyx4FVBA*d5`Nh1@HTB6V4VPzZ`OISPSe#(!DlCeW_ zicybHp%Hc&WeAWZIq8Ju6M~`voh+GCL+GzA+K*sWYov>-CgOTFE2u*Z02)r(;#;mn z*uU!qQ?{FkLgKq{3{%5cbpC>PB;G+BD~foF)BT8Xwn6PpvqD_)>-(i;(6Hey7)QcV z(r|}Qr0_>=#A}`x)OM}J>8bJRe?-QiwqdjGSm^;$1nC7bP;qg|u*;ZcsR+8ETW(S! zh44Ts28Smjzr-P{Uj#64OTkv6(k`6I{C-L+uJml?Bw>9>mX_ji+`E@lNO%b^rWNY^d~;A4nuC^m3lp6X$y zLJ)@ZX#{>*n8FOOey^!8fxN>+a^NM;bY-1rq{HUKR%txq%Niy`9M;C-8^dLrpOrvW z#;rK2jMy07JlJ3ZHaD+b7cnr#F@R#3ug#hwT8WlKV%QmMNHQ#c9dDj3@4K^7sm3^bXs^)vy9Ze?h>0UWNn+_A3_D(b(KRnrBYyvaxR)+;&9zJp1Je229(ZNARE8kxS&O(P_!x12!24w&jcFN zwYb!XlzCLal1QWE?LAvZhrAxm47S0eBP!V5QsZ%D*&2VnJ>ZY|Xbnn@2sHC)d^|`77d{+O`1Yh zZqGA+?=%yHo;EkKjzwu?)BgcbnV2eC|K#V^AGOq3v z@(@QUBX=pQ(quq|3^;Z6@b-tdP9sj8XiC%;J5w~x8eqdVxBp?c_)Ou3_&BsATP%y> zHbHf{X3>bEPLtCh@NEUt3=gJjpxGyh4J{fY5Iud-mFRRnzs6{c`bVEks*pj8GVDUf zADR52w8mk$LBh&_tU|DTVvAd3}EHwWH~>eMz759zC6i{@-udu;2Os;6VWaRGP-;L>rt!Aw%Cyv~1Syw4p^Y z_7d> zwwblL_tz9syE@2E_vQE)sThk+_1{FtgOdbXu1BF7RciuB9@ubOm91==kfbwWY)p(t+h}{$g|L=570-U|vny%z?aQ)1 zq9?123Vu&F&qhjsR96Zcn6b!q%CYK1)CATAi?rWJ6D^B{6|USEW-kGf;J^Ze1OZfe zQ1DQ{^0CstI}l7b{R4Q+gj&~yM%TA*ixtHS;F*r95~`?+WhSxdVAmzA&D9QX=S2j> zkgH(~NXBv&S#GP1oKI)4!orY1Kwl}PlT<})s_)7XIQ3214uwie8|-K9Kx7~a&rb;H zcF8e$TcXyFjXN+-mjoR)LRi+A&8izDK*EDz%-x`6zS6p{Eq#B;y=Cf|7@H6(UI=QUK__=8+sgogW8_{uNGRh@?Q`~wY2^( znE!?VqTc{uKrl!M2v|5EEZ{#sFmT{E2mmA$Dls&Zkm7d?!+LfFH7D1=gx~pS=%nN< zMvlpix6Eu!m?RX!#sLj|{r}bhFks+dU;==D03#&|I+BI|Ie6XWWIs)1(2}O9*P~KV zC+SL+TJaR5QFxC~AQJ*^m$4D`*YXzhU?q_j>xG-cQjB>tSBg%?>XcLfP?W-S%ZRz2 z#>%fA3#9T*SF6wX@}O|56gK@-&-yPv7Q50*5DN7!S-s}^ga04E;we+<@&ZP+;b5sP zI>!6T3UL7P5x*MjB~i)vXZ|HZlLA( zw1N+^F!yx(&$Jj;#p*wmG=|dxe}-~G?lu)kLdg8Wr{Iu4d}=GNxp=Bl?&wHK4Ab?s zFfG9MV2*EQUWg2(6PD!3Gpqmeo{Xeea_$Rj*1m#MVsdJcrZ zyhL4hiB>Uyi>~R;&8o1}HE07?H|$L*c_ExH^#%Nepw)fK>nvmZnyeolmkgnMwk%^% z(-#*OLpQycQ>NrKqa??0g`GhAD;R6*RoB;UNv$k6OfI&gAZKeOv8V>$Z|Gv_@SBtr zdnvF3)nh#uT75}PMJazJ6G6o%x7ZVjj83TmEhL~g1g)hQGX5JRDQlV5UR!QkInxns zg-Bs#YXmYoMiSj|c8RS69;^zsqI4nku_WcF2E#y?z<-}jOItA|`i>9{=>r9z$rPfYYugJqNpdh&~@ zAuuuSGx|azX|lN_TVT6sXs$QiIx>dUOa_s`(ue5`%IX?SP15Fd+%tiU#-IQfj}yoj z_3%$xvA5Hf6^sz0&f988eNBD<9+HaC3ZKyw#Ee4_f+PwL*q(WNr2JuuH1_Kg(7{&N19vd!ZJ$^&#)bRwT3 zZtIJC8YAx(z;uqvp>zl=#M@@>@ADLbOR$S|mgbfj4&YkCNkJItl4NMt!F_=vOE+j# z<@VhBBA%%0dE{iA#aZHp(qW!%<(lViQ3wCzvK*-J=DZj-l+VU!qP%+-mj0?H1`p!HA%%-mHt;(;A5 z&<w<5n*xDO={{y`AX7EG6 zqGUy0f5W?y9oW#MY!dCH{qx zhqi^eCaeQENp>F54jOrRPlL*Wj`lHCRqbEgm*1xVN(t8I9Mp2;yf&UPjfClg8b^%qpZkl`POHc#RbT;&?7_xRxOW*G$<#AX3n z%iS`CBh)f_O5@3`aF(y8mK@Sz#Y>nYMRkYUXQRR@uT+V2@Lbg~G>`8UDQIi#IW%BG$-K*fELtYH5FdB2Tw4tq?D)qK&!|z*vQF)c? znU(%5HvcHgD(UDvkCjeZ>Zf6i1iJmv1V2^or-A(kAlP#Bem7UL{p+9AIigb;@7)vf z#A+CBdKM=BYe6~-%_DRDj5Z!FkjyKO45D26oGC}I@^He+tbkO$T3I7ZGBKoK)ZQT- zRT_*h2&ORr#l2;V)Kh3El%x>66wb;-Ik>|2h5EkDNtnl#c#Wig6~}!#8?#=UN1cYopl0KZSZ&j&;Mim)M}b=)rxDX0d!3t1 zaq9mHvjt50B`w6Ts)ah9U!4B{XMW5VoR9-oNmxo(#CZ%+t5sPUo<(I+oRb!%N>XX? zA1d=`QYlWjB)Ej2*!G1%E9w+SL5h~HQz&#u6(?H~W>sLi`>|uGA4e?|CDFvNL36Yz z32c1e0PV5HdCw6utlcMdh*M&!K5KxrnH&YD86(PW+AGB_F#YZFM884a9DA8G}hRZT76`;x)x`}DDB^F9bjrc$bu~$VA zhUoAZP>ReLY@xHpI3u)_aSg7JgoNC2FLAiKx957Eo#mRGs%1^siA3sKvLA@!NkUMU z<%>Sy09>hQB|KiY4Td9Snl&{nn9vzR%%${pyd>YwTy4kfld?muB`P)o{H?whm+2B) zC|w>K$!sZ2p)i;svUgDfv$Aiw+nG1t8s|xaOsr{lnNwSUmou~;KFO3Ym5|!(3Q&}S zN4stCIS+95&ZPZc=ReW9zwS=s$+2aBaZXsMM?d6{&gmt%!H9za(MvI4y> z5R=@FHSf_DwigbN&jMFHmJm`8&I5bdHw52u1`P%*lyRn0LBCxJ7x~bu6x*K40{;N5 zY&6t%71%>;dB-KXv=ug_j(=$&Tcxj$DVWYjs%a2)NmAX;@EtrLg!*i{qBrio2O;hr z+0?(Q?CIbA8mwFXi`epQh~l)0!P9 z0F{ZhCNv*tk~r3Y6op)pk^r%{BWsNJHV2uZ&+ zaGmY9Gl=pdgf_6y3cEo-jqXUlEZXBi`GO=1JcPf>NZrDoaV?u}X+m_Zt#CZ-qw) z0HH}*7E+Y0JBuBT!*SD}D~veU2|@=z3GV2hv~=;eb{yjX3!7hVqxo{fjilK{0IK%l zw)XD7cP+6`C=Z}6*0{C(wNlyzYKvl-z?`r~oX857API`Mts3|Fq zPozl$sOx{fJAyzH zx}BIQRJ0WcwD6QIsEps{m}HJfX&!*;d7KR+)D-TWU(5sC+DCVeK1sC`^Tn9I*gQON zG?P$9{{So*hUb|G8}jk+`|p2}T|)fuW-gzIJUo0p{eDR`8>Y49r1&MaY&mQR-rIHl zH~A#gZ_j3H{{V@P4-db_^gSJ(#d4Wj&6(PB?2>Yf_NH}!Z+fB)GRFH$!E literal 0 HcmV?d00001 From 634852ad61d000b598edaeb1e9e700f734616405 Mon Sep 17 00:00:00 2001 From: John Date: Thu, 31 Aug 2023 18:15:47 +0800 Subject: [PATCH 03/20] [CodeCamp2023-338] New Version of config Adapting Swin Transformer Algorithm --- .../configs/_base_/datasets/cub_bs8_384.py | 59 ++++++++++++ .../_base_/datasets/imagenet_bs64_swin_256.py | 89 +++++++++++++++++++ .../datasets/imagenet_bs64_swin_384 copy.py | 55 ++++++++++++ .../models/swin_transformer/base_224.py | 29 ++++++ .../models/swin_transformer/base_384.py | 21 +++++ .../models/swin_transformer/large_224.py | 17 ++++ .../models/swin_transformer/large_384.py | 21 +++++ .../models/swin_transformer/small_224.py | 29 ++++++ .../models/swin_transformer/tiny_224.py | 29 ++++++ .../models/swin_transformer_v2/base_256.py | 29 ++++++ .../models/swin_transformer_v2/base_384.py | 19 ++++ .../models/swin_transformer_v2/large_256.py | 20 +++++ .../models/swin_transformer_v2/large_384.py | 20 +++++ .../models/swin_transformer_v2/small_256.py | 30 +++++++ .../models/swin_transformer_v2/tiny_256.py | 29 ++++++ .../configs/_base_/schedules/cub_bs64.py | 39 ++++++++ .../swin-base_16xb64_in1k-384px.py | 12 +++ .../swin_transformer/swin-base_16xb64_in1k.py | 12 +++ .../swin-large_16xb64_in1k-384px.py | 12 +++ .../swin-large_16xb64_in1k.py | 12 +++ .../swin-large_8xb8_cub-384px.py | 48 ++++++++++ .../swin-small_16xb64_in1k.py | 12 +++ .../swin_transformer/swin-tiny_16xb64_in1k.py | 12 +++ .../swinv2-base-w12_8xb128_in21k-192px.py | 22 +++++ .../swinv2-base-w16_16xb64_in1k-256px.py | 11 +++ ...v2-base-w16_in21k-pre_16xb64_in1k-256px.py | 18 ++++ ...v2-base-w24_in21k-pre_16xb64_in1k-384px.py | 19 ++++ .../swinv2-base-w8_16xb64_in1k-256px.py | 9 ++ .../swinv2-large-w12_8xb128_in21k-192px.py | 22 +++++ ...2-large-w16_in21k-pre_16xb64_in1k-256px.py | 18 ++++ ...2-large-w24_in21k-pre_16xb64_in1k-384px.py | 20 +++++ .../swinv2-small-w16_16xb64_in1k-256px.py | 11 +++ .../swinv2-small-w8_16xb64_in1k-256px.py | 9 ++ .../swinv2-tiny-w16_16xb64_in1k-256px.py | 11 +++ .../swinv2-tiny-w8_16xb64_in1k-256px.py | 9 ++ 35 files changed, 834 insertions(+) create mode 100644 mmpretrain/configs/_base_/datasets/cub_bs8_384.py create mode 100644 mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_256.py create mode 100644 mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_384 copy.py create mode 100644 mmpretrain/configs/_base_/models/swin_transformer/base_224.py create mode 100644 mmpretrain/configs/_base_/models/swin_transformer/base_384.py create mode 100644 mmpretrain/configs/_base_/models/swin_transformer/large_224.py create mode 100644 mmpretrain/configs/_base_/models/swin_transformer/large_384.py create mode 100644 mmpretrain/configs/_base_/models/swin_transformer/small_224.py create mode 100644 mmpretrain/configs/_base_/models/swin_transformer/tiny_224.py create mode 100644 mmpretrain/configs/_base_/models/swin_transformer_v2/base_256.py create mode 100644 mmpretrain/configs/_base_/models/swin_transformer_v2/base_384.py create mode 100644 mmpretrain/configs/_base_/models/swin_transformer_v2/large_256.py create mode 100644 mmpretrain/configs/_base_/models/swin_transformer_v2/large_384.py create mode 100644 mmpretrain/configs/_base_/models/swin_transformer_v2/small_256.py create mode 100644 mmpretrain/configs/_base_/models/swin_transformer_v2/tiny_256.py create mode 100644 mmpretrain/configs/_base_/schedules/cub_bs64.py create mode 100644 mmpretrain/configs/swin_transformer/swin-base_16xb64_in1k-384px.py create mode 100644 mmpretrain/configs/swin_transformer/swin-base_16xb64_in1k.py create mode 100644 mmpretrain/configs/swin_transformer/swin-large_16xb64_in1k-384px.py create mode 100644 mmpretrain/configs/swin_transformer/swin-large_16xb64_in1k.py create mode 100644 mmpretrain/configs/swin_transformer/swin-large_8xb8_cub-384px.py create mode 100644 mmpretrain/configs/swin_transformer/swin-small_16xb64_in1k.py create mode 100644 mmpretrain/configs/swin_transformer/swin-tiny_16xb64_in1k.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2-base-w12_8xb128_in21k-192px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2-large-w12_8xb128_in21k-192px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py diff --git a/mmpretrain/configs/_base_/datasets/cub_bs8_384.py b/mmpretrain/configs/_base_/datasets/cub_bs8_384.py new file mode 100644 index 00000000..b193bf83 --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/cub_bs8_384.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler + +from mmpretrain.datasets import (CUB, CenterCrop, LoadImageFromFile, + PackInputs, RandomCrop, RandomFlip, Resize) +from mmpretrain.evaluation import Accuracy + +# dataset settings +dataset_type = CUB +data_preprocessor = dict( + num_classes=200, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=Resize, scale=510), + dict(type=RandomCrop, crop_size=384), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=Resize, scale=510), + dict(type=CenterCrop, crop_size=384), + dict(type=PackInputs), +] + +train_dataloader = dict( + batch_size=8, + num_workers=2, + dataset=dict( + type=dataset_type, + data_root='data/CUB_200_2011', + split='train', + pipeline=train_pipeline), + sampler=dict(type=DefaultSampler, shuffle=True), +) + +val_dataloader = dict( + batch_size=8, + num_workers=2, + dataset=dict( + type=dataset_type, + data_root='data/CUB_200_2011', + split='test', + pipeline=test_pipeline), + sampler=dict(type=DefaultSampler, shuffle=False), +) +val_evaluator = dict(type=Accuracy, topk=(1, )) + +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_256.py b/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_256.py new file mode 100644 index 00000000..9690ff84 --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_256.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler + +from mmpretrain.datasets import (CenterCrop, ImageNet, LoadImageFromFile, + PackInputs, RandAugment, RandomErasing, + RandomFlip, RandomResizedCrop, ResizeEdge) +from mmpretrain.evaluation import Accuracy + +# dataset settings +dataset_type = ImageNet +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=RandomResizedCrop, + scale=256, + backend='pillow', + interpolation='bicubic'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict( + type=RandAugment, + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type=RandomErasing, + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=ResizeEdge, + scale=292, # ( 256 / 224 * 256 ) + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type=CenterCrop, crop_size=256), + dict(type=PackInputs), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type=DefaultSampler, shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type=DefaultSampler, shuffle=False), +) +val_evaluator = dict(type=Accuracy, topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_384 copy.py b/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_384 copy.py new file mode 100644 index 00000000..fb1102b3 --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_384 copy.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=384, backend='pillow', interpolation='bicubic'), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpretrain/configs/_base_/models/swin_transformer/base_224.py b/mmpretrain/configs/_base_/models/swin_transformer/base_224.py new file mode 100644 index 00000000..5ba4adac --- /dev/null +++ b/mmpretrain/configs/_base_/models/swin_transformer/base_224.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier, + LabelSmoothLoss, LinearClsHead, Mixup, + SwinTransformer) + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict( + type=SwinTransformer, arch='base', img_size=224, drop_path_rate=0.5), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=1024, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)]), +) diff --git a/mmpretrain/configs/_base_/models/swin_transformer/base_384.py b/mmpretrain/configs/_base_/models/swin_transformer/base_384.py new file mode 100644 index 00000000..d747fa08 --- /dev/null +++ b/mmpretrain/configs/_base_/models/swin_transformer/base_384.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling, + ImageClassifier, LinearClsHead, SwinTransformer) + +# model settings +# Only for evaluation +model = dict( + type=ImageClassifier, + backbone=dict( + type=SwinTransformer, + arch='base', + img_size=384, + stage_cfgs=dict(block_cfgs=dict(window_size=12))), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=1024, + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + topk=(1, 5))) diff --git a/mmpretrain/configs/_base_/models/swin_transformer/large_224.py b/mmpretrain/configs/_base_/models/swin_transformer/large_224.py new file mode 100644 index 00000000..758600e7 --- /dev/null +++ b/mmpretrain/configs/_base_/models/swin_transformer/large_224.py @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling, + ImageClassifier, LinearClsHead, SwinTransformer) + +# model settings +# Only for evaluation +model = dict( + type=ImageClassifier, + backbone=dict(type=SwinTransformer, arch='large', img_size=224), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=1536, + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + topk=(1, 5))) diff --git a/mmpretrain/configs/_base_/models/swin_transformer/large_384.py b/mmpretrain/configs/_base_/models/swin_transformer/large_384.py new file mode 100644 index 00000000..9cb01033 --- /dev/null +++ b/mmpretrain/configs/_base_/models/swin_transformer/large_384.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling, + ImageClassifier, LinearClsHead, SwinTransformer) + +# model settings +# Only for evaluation +model = dict( + type=ImageClassifier, + backbone=dict( + type=SwinTransformer, + arch='large', + img_size=384, + stage_cfgs=dict(block_cfgs=dict(window_size=12))), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=1536, + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + topk=(1, 5))) diff --git a/mmpretrain/configs/_base_/models/swin_transformer/small_224.py b/mmpretrain/configs/_base_/models/swin_transformer/small_224.py new file mode 100644 index 00000000..f6de6ac0 --- /dev/null +++ b/mmpretrain/configs/_base_/models/swin_transformer/small_224.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier, + LabelSmoothLoss, LinearClsHead, Mixup, + SwinTransformer) + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict( + type=SwinTransformer, arch='small', img_size=224, drop_path_rate=0.3), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)]), +) diff --git a/mmpretrain/configs/_base_/models/swin_transformer/tiny_224.py b/mmpretrain/configs/_base_/models/swin_transformer/tiny_224.py new file mode 100644 index 00000000..fc976cc0 --- /dev/null +++ b/mmpretrain/configs/_base_/models/swin_transformer/tiny_224.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier, + LabelSmoothLoss, LinearClsHead, Mixup, + SwinTransformer) + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict( + type=SwinTransformer, arch='tiny', img_size=224, drop_path_rate=0.2), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)]), +) diff --git a/mmpretrain/configs/_base_/models/swin_transformer_v2/base_256.py b/mmpretrain/configs/_base_/models/swin_transformer_v2/base_256.py new file mode 100644 index 00000000..9fcfffeb --- /dev/null +++ b/mmpretrain/configs/_base_/models/swin_transformer_v2/base_256.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier, + LabelSmoothLoss, LinearClsHead, Mixup, + SwinTransformerV2) + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict( + type=SwinTransformerV2, arch='base', img_size=256, drop_path_rate=0.5), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=1024, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)]), +) diff --git a/mmpretrain/configs/_base_/models/swin_transformer_v2/base_384.py b/mmpretrain/configs/_base_/models/swin_transformer_v2/base_384.py new file mode 100644 index 00000000..c7566b5e --- /dev/null +++ b/mmpretrain/configs/_base_/models/swin_transformer_v2/base_384.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmpretrain.models import (GlobalAveragePooling, ImageClassifier, + LabelSmoothLoss, LinearClsHead, + SwinTransformerV2) + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict( + type=SwinTransformerV2, arch='base', img_size=384, drop_path_rate=0.2), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=1024, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), + cal_acc=False)) diff --git a/mmpretrain/configs/_base_/models/swin_transformer_v2/large_256.py b/mmpretrain/configs/_base_/models/swin_transformer_v2/large_256.py new file mode 100644 index 00000000..da36e679 --- /dev/null +++ b/mmpretrain/configs/_base_/models/swin_transformer_v2/large_256.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling, + ImageClassifier, LinearClsHead, + SwinTransformerV2) + +# model settings +# Only for evaluation +model = dict( + type=ImageClassifier, + backbone=dict( + type=SwinTransformerV2, arch='large', img_size=256, + drop_path_rate=0.2), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=1536, + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + topk=(1, 5))) diff --git a/mmpretrain/configs/_base_/models/swin_transformer_v2/large_384.py b/mmpretrain/configs/_base_/models/swin_transformer_v2/large_384.py new file mode 100644 index 00000000..5e1323d5 --- /dev/null +++ b/mmpretrain/configs/_base_/models/swin_transformer_v2/large_384.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling, + ImageClassifier, LinearClsHead, + SwinTransformerV2) + +# model settings +# Only for evaluation +model = dict( + type=ImageClassifier, + backbone=dict( + type=SwinTransformerV2, arch='large', img_size=384, + drop_path_rate=0.2), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=1536, + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + topk=(1, 5))) diff --git a/mmpretrain/configs/_base_/models/swin_transformer_v2/small_256.py b/mmpretrain/configs/_base_/models/swin_transformer_v2/small_256.py new file mode 100644 index 00000000..e747fd6a --- /dev/null +++ b/mmpretrain/configs/_base_/models/swin_transformer_v2/small_256.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier, + LabelSmoothLoss, LinearClsHead, Mixup, + SwinTransformerV2) + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict( + type=SwinTransformerV2, arch='small', img_size=256, + drop_path_rate=0.3), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)]), +) diff --git a/mmpretrain/configs/_base_/models/swin_transformer_v2/tiny_256.py b/mmpretrain/configs/_base_/models/swin_transformer_v2/tiny_256.py new file mode 100644 index 00000000..8d8bfacf --- /dev/null +++ b/mmpretrain/configs/_base_/models/swin_transformer_v2/tiny_256.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier, + LabelSmoothLoss, LinearClsHead, Mixup, + SwinTransformerV2) + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict( + type=SwinTransformerV2, arch='tiny', img_size=256, drop_path_rate=0.2), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)]), +) diff --git a/mmpretrain/configs/_base_/schedules/cub_bs64.py b/mmpretrain/configs/_base_/schedules/cub_bs64.py new file mode 100644 index 00000000..2ca40bfe --- /dev/null +++ b/mmpretrain/configs/_base_/schedules/cub_bs64.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.optim import CosineAnnealingLR, LinearLR +from torch.optim import SGD + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type=SGD, lr=0.01, momentum=0.9, weight_decay=0.0005, nesterov=True)) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type=LinearLR, + start_factor=0.01, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type=CosineAnnealingLR, + T_max=95, + by_epoch=True, + begin=5, + end=100, + ) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpretrain/configs/swin_transformer/swin-base_16xb64_in1k-384px.py b/mmpretrain/configs/swin_transformer/swin-base_16xb64_in1k-384px.py new file mode 100644 index 00000000..76548d93 --- /dev/null +++ b/mmpretrain/configs/swin_transformer/swin-base_16xb64_in1k-384px.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_384 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer.base_384 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/mmpretrain/configs/swin_transformer/swin-base_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin-base_16xb64_in1k.py new file mode 100644 index 00000000..12ec65ea --- /dev/null +++ b/mmpretrain/configs/swin_transformer/swin-base_16xb64_in1k.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_224 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer.base_224 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/mmpretrain/configs/swin_transformer/swin-large_16xb64_in1k-384px.py b/mmpretrain/configs/swin_transformer/swin-large_16xb64_in1k-384px.py new file mode 100644 index 00000000..f4a6143b --- /dev/null +++ b/mmpretrain/configs/swin_transformer/swin-large_16xb64_in1k-384px.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_384 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer.large_384 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/mmpretrain/configs/swin_transformer/swin-large_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin-large_16xb64_in1k.py new file mode 100644 index 00000000..4b22f5ae --- /dev/null +++ b/mmpretrain/configs/swin_transformer/swin-large_16xb64_in1k.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_224 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer.large_224 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/mmpretrain/configs/swin_transformer/swin-large_8xb8_cub-384px.py b/mmpretrain/configs/swin_transformer/swin-large_8xb8_cub-384px.py new file mode 100644 index 00000000..6156e306 --- /dev/null +++ b/mmpretrain/configs/swin_transformer/swin-large_8xb8_cub-384px.py @@ -0,0 +1,48 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base +from mmengine.hooks import CheckpointHook, LoggerHook +from mmengine.model import PretrainedInit +from torch.optim.adamw import AdamW + +from mmpretrain.models import ImageClassifier + +with read_base(): + from .._base_.datasets.cub_bs8_384 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer.large_384 import * + from .._base_.schedules.cub_bs64 import * + +# model settings +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k-384px.pth' # noqa +model = dict( + type=ImageClassifier, + backbone=dict( + init_cfg=dict( + type=PretrainedInit, checkpoint=checkpoint, prefix='backbone')), + head=dict(num_classes=200, )) + +# schedule settings +optim_wrapper = dict( + optimizer=dict( + _delete_=True, + type=AdamW, + lr=5e-6, + weight_decay=0.0005, + eps=1e-8, + betas=(0.9, 0.999)), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0) + }), + clip_grad=dict(max_norm=5.0), +) + +default_hooks = dict( + # log every 20 intervals + logger=dict(type=LoggerHook, interval=20), + # save last three checkpoints + checkpoint=dict(type=CheckpointHook, interval=1, max_keep_ckpts=3)) diff --git a/mmpretrain/configs/swin_transformer/swin-small_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin-small_16xb64_in1k.py new file mode 100644 index 00000000..969edee7 --- /dev/null +++ b/mmpretrain/configs/swin_transformer/swin-small_16xb64_in1k.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_224 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer.small_224 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/mmpretrain/configs/swin_transformer/swin-tiny_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin-tiny_16xb64_in1k.py new file mode 100644 index 00000000..ded80639 --- /dev/null +++ b/mmpretrain/configs/swin_transformer/swin-tiny_16xb64_in1k.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_224 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer.tiny_224 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-base-w12_8xb128_in21k-192px.py b/mmpretrain/configs/swin_transformer_v2/swinv2-base-w12_8xb128_in21k-192px.py new file mode 100644 index 00000000..7ca933f8 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2-base-w12_8xb128_in21k-192px.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet21k_bs128 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2.base_256 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model = dict( + backbone=dict(img_size=192, window_size=[12, 12, 12, 6]), + head=dict(num_classes=21841), +) + +# dataset settings +data_preprocessor = dict(num_classes=21841) + +_base_['train_pipeline'][1]['scale'] = 192 # RandomResizedCrop +_base_['test_pipeline'][1]['scale'] = 219 # ResizeEdge +_base_['test_pipeline'][2]['crop_size'] = 192 # CenterCrop diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py new file mode 100644 index 00000000..6df69c48 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_256 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2.base_256 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +model = dict(backbone=dict(window_size=[16, 16, 16, 8])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py new file mode 100644 index 00000000..f9f05216 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +from mmpretrain.models import ImageClassifier + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_256 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2.base_256 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +model = dict( + type=ImageClassifier, + backbone=dict( + window_size=[16, 16, 16, 8], + drop_path_rate=0.2, + pretrained_window_sizes=[12, 12, 12, 6])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py b/mmpretrain/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py new file mode 100644 index 00000000..6538144f --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +from mmpretrain.models import ImageClassifier + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_384 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2.base_384 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +model = dict( + type=ImageClassifier, + backbone=dict( + img_size=384, + window_size=[24, 24, 24, 12], + drop_path_rate=0.2, + pretrained_window_sizes=[12, 12, 12, 6])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py new file mode 100644 index 00000000..34298ff6 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_256 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2.base_256 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-large-w12_8xb128_in21k-192px.py b/mmpretrain/configs/swin_transformer_v2/swinv2-large-w12_8xb128_in21k-192px.py new file mode 100644 index 00000000..7ca933f8 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2-large-w12_8xb128_in21k-192px.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet21k_bs128 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2.base_256 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model = dict( + backbone=dict(img_size=192, window_size=[12, 12, 12, 6]), + head=dict(num_classes=21841), +) + +# dataset settings +data_preprocessor = dict(num_classes=21841) + +_base_['train_pipeline'][1]['scale'] = 192 # RandomResizedCrop +_base_['test_pipeline'][1]['scale'] = 219 # ResizeEdge +_base_['test_pipeline'][2]['crop_size'] = 192 # CenterCrop diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py new file mode 100644 index 00000000..bbfe9283 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py @@ -0,0 +1,18 @@ +# Only for evaluation +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +from mmpretrain.models import ImageClassifier + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_256 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2.large_256 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +model = dict( + type=ImageClassifier, + backbone=dict( + window_size=[16, 16, 16, 8], pretrained_window_sizes=[12, 12, 12, 6]), +) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py b/mmpretrain/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py new file mode 100644 index 00000000..a481c79d --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py @@ -0,0 +1,20 @@ +# Only for evaluation +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +from mmpretrain.models import ImageClassifier + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_384 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2.large_384 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +model = dict( + type=ImageClassifier, + backbone=dict( + img_size=384, + window_size=[24, 24, 24, 12], + pretrained_window_sizes=[12, 12, 12, 6]), +) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py new file mode 100644 index 00000000..8051f050 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_256 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2.small_256 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +model = dict(backbone=dict(window_size=[16, 16, 16, 8])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py new file mode 100644 index 00000000..d28ffd06 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_256 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2.small_256 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py new file mode 100644 index 00000000..a95485da --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_256 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2.tiny_256 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +model = dict(backbone=dict(window_size=[16, 16, 16, 8])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py new file mode 100644 index 00000000..59ba55c3 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_256 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2.tiny_256 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * From da1da48eb6bb8285b28277f1dd06ca30ffbe3dfe Mon Sep 17 00:00:00 2001 From: ZhangYiqin <312065559@qq.com> Date: Mon, 4 Sep 2023 13:11:16 +0800 Subject: [PATCH 04/20] [Enhance] Add iTPN Supports for Non-three channel image (#1735) * Add channel argments to mae_head When trying iTPN pretrain, it only supports images with 3 channels. One of the restrictions is from MAEHead. * Transfer other argments from iTPNHiViT to HiViT The HiViT supports specifying channels, but the iTPNHiViT class can't pass channel argments to it. This is one of the reasons that iTPNHiViT implementation only support images with 3 channels. * Update itpn.py Fix hint problem --- mmpretrain/models/heads/mae_head.py | 23 +++++++++++++---------- mmpretrain/models/selfsup/itpn.py | 5 ++++- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/mmpretrain/models/heads/mae_head.py b/mmpretrain/models/heads/mae_head.py index 1a5366d1..b76ecedd 100644 --- a/mmpretrain/models/heads/mae_head.py +++ b/mmpretrain/models/heads/mae_head.py @@ -14,15 +14,18 @@ class MAEPretrainHead(BaseModule): norm_pix_loss (bool): Whether or not normalize target. Defaults to False. patch_size (int): Patch size. Defaults to 16. + in_channels (int): Number of input channels. Defaults to 3. """ def __init__(self, loss: dict, norm_pix: bool = False, - patch_size: int = 16) -> None: + patch_size: int = 16, + in_channels: int = 3) -> None: super().__init__() self.norm_pix = norm_pix self.patch_size = patch_size + self.in_channels = in_channels self.loss_module = MODELS.build(loss) def patchify(self, imgs: torch.Tensor) -> torch.Tensor: @@ -30,19 +33,19 @@ class MAEPretrainHead(BaseModule): Args: imgs (torch.Tensor): A batch of images. The shape should - be :math:`(B, 3, H, W)`. + be :math:`(B, C, H, W)`. Returns: torch.Tensor: Patchified images. The shape is - :math:`(B, L, \text{patch_size}^2 \times 3)`. + :math:`(B, L, \text{patch_size}^2 \times C)`. """ p = self.patch_size assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0 h = w = imgs.shape[2] // p - x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p)) + x = imgs.reshape(shape=(imgs.shape[0], self.in_channels, h, p, w, p)) x = torch.einsum('nchpwq->nhwpqc', x) - x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 3)) + x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * self.in_channels)) return x def unpatchify(self, x: torch.Tensor) -> torch.Tensor: @@ -50,18 +53,18 @@ class MAEPretrainHead(BaseModule): Args: x (torch.Tensor): The shape is - :math:`(B, L, \text{patch_size}^2 \times 3)`. + :math:`(B, L, \text{patch_size}^2 \times C)`. Returns: - torch.Tensor: The shape is :math:`(B, 3, H, W)`. + torch.Tensor: The shape is :math:`(B, C, H, W)`. """ p = self.patch_size h = w = int(x.shape[1]**.5) assert h * w == x.shape[1] - x = x.reshape(shape=(x.shape[0], h, w, p, p, 3)) + x = x.reshape(shape=(x.shape[0], h, w, p, p, self.in_channels)) x = torch.einsum('nhwpqc->nchpwq', x) - imgs = x.reshape(shape=(x.shape[0], 3, h * p, h * p)) + imgs = x.reshape(shape=(x.shape[0], self.in_channels, h * p, h * p)) return imgs def construct_target(self, target: torch.Tensor) -> torch.Tensor: @@ -71,7 +74,7 @@ class MAEPretrainHead(BaseModule): normalize the image according to ``norm_pix``. Args: - target (torch.Tensor): Image with the shape of B x 3 x H x W + target (torch.Tensor): Image with the shape of B x C x H x W Returns: torch.Tensor: Tokenized images with the shape of B x L x C diff --git a/mmpretrain/models/selfsup/itpn.py b/mmpretrain/models/selfsup/itpn.py index 85efd254..488a9963 100644 --- a/mmpretrain/models/selfsup/itpn.py +++ b/mmpretrain/models/selfsup/itpn.py @@ -64,6 +64,7 @@ class iTPNHiViT(HiViT): layer_scale_init_value: float = 0.0, mask_ratio: float = 0.75, reconstruction_type: str = 'pixel', + **kwargs, ): super().__init__( arch=arch, @@ -80,7 +81,9 @@ class iTPNHiViT(HiViT): norm_cfg=norm_cfg, ape=ape, rpe=rpe, - layer_scale_init_value=layer_scale_init_value) + layer_scale_init_value=layer_scale_init_value, + **kwargs, + ) self.pos_embed.requires_grad = False self.mask_ratio = mask_ratio From ed3b7f8ae6d972ce0f84d8abd04495cf745276cf Mon Sep 17 00:00:00 2001 From: John Date: Tue, 5 Sep 2023 16:00:29 +0800 Subject: [PATCH 05/20] format all file names --- .../datasets/imagenet_bs64_swin_384 copy.py | 55 ------------------- ...6xb64_in1k.py => swin_base_16xb64_in1k.py} | 0 ...84px.py => swin_base_16xb64_in1k_384px.py} | 0 ...xb64_in1k.py => swin_large_16xb64_in1k.py} | 0 ...4px.py => swin_large_16xb64_in1k_384px.py} | 0 ...-384px.py => swin_large_8xb8_cub_384px.py} | 0 ...xb64_in1k.py => swin_small_16xb64_in1k.py} | 0 ...6xb64_in1k.py => swin_tiny_16xb64_in1k.py} | 0 ... => swinv2_base_w12_8xb128_in21k_192px.py} | 0 ...y => swinv2_base_w16_16xb64_in1k_256px.py} | 0 ...2_base_w16_in21k_pre_16xb64_in1k_256px.py} | 0 ...2_base_w24_in21k_pre_16xb64_in1k_384px.py} | 0 ...py => swinv2_base_w8_16xb64_in1k_256px.py} | 0 ...=> swinv2_large_w12_8xb128_in21k_192px.py} | 0 ..._large_w16_in21k_pre_16xb64_in1k_256px.py} | 0 ..._large_w24_in21k_pre_16xb64_in1k_384px.py} | 0 ... => swinv2_small_w16_16xb64_in1k_256px.py} | 0 ...y => swinv2_small_w8_16xb64_in1k_256px.py} | 0 ...y => swinv2_tiny_w16_16xb64_in1k_256px.py} | 0 ...py => swinv2_tiny_w8_16xb64_in1k_256px.py} | 0 20 files changed, 55 deletions(-) delete mode 100644 mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_384 copy.py rename mmpretrain/configs/swin_transformer/{swin-base_16xb64_in1k.py => swin_base_16xb64_in1k.py} (100%) rename mmpretrain/configs/swin_transformer/{swin-base_16xb64_in1k-384px.py => swin_base_16xb64_in1k_384px.py} (100%) rename mmpretrain/configs/swin_transformer/{swin-large_16xb64_in1k.py => swin_large_16xb64_in1k.py} (100%) rename mmpretrain/configs/swin_transformer/{swin-large_16xb64_in1k-384px.py => swin_large_16xb64_in1k_384px.py} (100%) rename mmpretrain/configs/swin_transformer/{swin-large_8xb8_cub-384px.py => swin_large_8xb8_cub_384px.py} (100%) rename mmpretrain/configs/swin_transformer/{swin-small_16xb64_in1k.py => swin_small_16xb64_in1k.py} (100%) rename mmpretrain/configs/swin_transformer/{swin-tiny_16xb64_in1k.py => swin_tiny_16xb64_in1k.py} (100%) rename mmpretrain/configs/swin_transformer_v2/{swinv2-base-w12_8xb128_in21k-192px.py => swinv2_base_w12_8xb128_in21k_192px.py} (100%) rename mmpretrain/configs/swin_transformer_v2/{swinv2-base-w16_16xb64_in1k-256px.py => swinv2_base_w16_16xb64_in1k_256px.py} (100%) rename mmpretrain/configs/swin_transformer_v2/{swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py => swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py} (100%) rename mmpretrain/configs/swin_transformer_v2/{swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py => swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py} (100%) rename mmpretrain/configs/swin_transformer_v2/{swinv2-base-w8_16xb64_in1k-256px.py => swinv2_base_w8_16xb64_in1k_256px.py} (100%) rename mmpretrain/configs/swin_transformer_v2/{swinv2-large-w12_8xb128_in21k-192px.py => swinv2_large_w12_8xb128_in21k_192px.py} (100%) rename mmpretrain/configs/swin_transformer_v2/{swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py => swinv2_large_w16_in21k_pre_16xb64_in1k_256px.py} (100%) rename mmpretrain/configs/swin_transformer_v2/{swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py => swinv2_large_w24_in21k_pre_16xb64_in1k_384px.py} (100%) rename mmpretrain/configs/swin_transformer_v2/{swinv2-small-w16_16xb64_in1k-256px.py => swinv2_small_w16_16xb64_in1k_256px.py} (100%) rename mmpretrain/configs/swin_transformer_v2/{swinv2-small-w8_16xb64_in1k-256px.py => swinv2_small_w8_16xb64_in1k_256px.py} (100%) rename mmpretrain/configs/swin_transformer_v2/{swinv2-tiny-w16_16xb64_in1k-256px.py => swinv2_tiny_w16_16xb64_in1k_256px.py} (100%) rename mmpretrain/configs/swin_transformer_v2/{swinv2-tiny-w8_16xb64_in1k-256px.py => swinv2_tiny_w8_16xb64_in1k_256px.py} (100%) diff --git a/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_384 copy.py b/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_384 copy.py deleted file mode 100644 index fb1102b3..00000000 --- a/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_384 copy.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# dataset settings -dataset_type = 'ImageNet' -data_preprocessor = dict( - num_classes=1000, - # RGB format normalization parameters - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - # convert image from BGR to RGB - to_rgb=True, -) - -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='RandomResizedCrop', - scale=384, - backend='pillow', - interpolation='bicubic'), - dict(type='RandomFlip', prob=0.5, direction='horizontal'), - dict(type='PackInputs'), -] - -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='Resize', scale=384, backend='pillow', interpolation='bicubic'), - dict(type='PackInputs'), -] - -train_dataloader = dict( - batch_size=64, - num_workers=5, - dataset=dict( - type=dataset_type, - data_root='data/imagenet', - split='train', - pipeline=train_pipeline), - sampler=dict(type='DefaultSampler', shuffle=True), -) - -val_dataloader = dict( - batch_size=64, - num_workers=5, - dataset=dict( - type=dataset_type, - data_root='data/imagenet', - split='val', - pipeline=test_pipeline), - sampler=dict(type='DefaultSampler', shuffle=False), -) -val_evaluator = dict(type='Accuracy', topk=(1, 5)) - -# If you want standard test, please manually configure the test dataset -test_dataloader = val_dataloader -test_evaluator = val_evaluator diff --git a/mmpretrain/configs/swin_transformer/swin-base_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py similarity index 100% rename from mmpretrain/configs/swin_transformer/swin-base_16xb64_in1k.py rename to mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py diff --git a/mmpretrain/configs/swin_transformer/swin-base_16xb64_in1k-384px.py b/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k_384px.py similarity index 100% rename from mmpretrain/configs/swin_transformer/swin-base_16xb64_in1k-384px.py rename to mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k_384px.py diff --git a/mmpretrain/configs/swin_transformer/swin-large_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k.py similarity index 100% rename from mmpretrain/configs/swin_transformer/swin-large_16xb64_in1k.py rename to mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k.py diff --git a/mmpretrain/configs/swin_transformer/swin-large_16xb64_in1k-384px.py b/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k_384px.py similarity index 100% rename from mmpretrain/configs/swin_transformer/swin-large_16xb64_in1k-384px.py rename to mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k_384px.py diff --git a/mmpretrain/configs/swin_transformer/swin-large_8xb8_cub-384px.py b/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py similarity index 100% rename from mmpretrain/configs/swin_transformer/swin-large_8xb8_cub-384px.py rename to mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py diff --git a/mmpretrain/configs/swin_transformer/swin-small_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin_small_16xb64_in1k.py similarity index 100% rename from mmpretrain/configs/swin_transformer/swin-small_16xb64_in1k.py rename to mmpretrain/configs/swin_transformer/swin_small_16xb64_in1k.py diff --git a/mmpretrain/configs/swin_transformer/swin-tiny_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py similarity index 100% rename from mmpretrain/configs/swin_transformer/swin-tiny_16xb64_in1k.py rename to mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-base-w12_8xb128_in21k-192px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py similarity index 100% rename from mmpretrain/configs/swin_transformer_v2/swinv2-base-w12_8xb128_in21k-192px.py rename to mmpretrain/configs/swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py similarity index 100% rename from mmpretrain/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py rename to mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py similarity index 100% rename from mmpretrain/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py rename to mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py similarity index 100% rename from mmpretrain/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py rename to mmpretrain/configs/swin_transformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w8_16xb64_in1k_256px.py similarity index 100% rename from mmpretrain/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py rename to mmpretrain/configs/swin_transformer_v2/swinv2_base_w8_16xb64_in1k_256px.py diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-large-w12_8xb128_in21k-192px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w12_8xb128_in21k_192px.py similarity index 100% rename from mmpretrain/configs/swin_transformer_v2/swinv2-large-w12_8xb128_in21k-192px.py rename to mmpretrain/configs/swin_transformer_v2/swinv2_large_w12_8xb128_in21k_192px.py diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w16_in21k_pre_16xb64_in1k_256px.py similarity index 100% rename from mmpretrain/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py rename to mmpretrain/configs/swin_transformer_v2/swinv2_large_w16_in21k_pre_16xb64_in1k_256px.py diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w24_in21k_pre_16xb64_in1k_384px.py similarity index 100% rename from mmpretrain/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py rename to mmpretrain/configs/swin_transformer_v2/swinv2_large_w24_in21k_pre_16xb64_in1k_384px.py diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_small_w16_16xb64_in1k_256px.py similarity index 100% rename from mmpretrain/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py rename to mmpretrain/configs/swin_transformer_v2/swinv2_small_w16_16xb64_in1k_256px.py diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_small_w8_16xb64_in1k_256px.py similarity index 100% rename from mmpretrain/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py rename to mmpretrain/configs/swin_transformer_v2/swinv2_small_w8_16xb64_in1k_256px.py diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w16_16xb64_in1k_256px.py similarity index 100% rename from mmpretrain/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py rename to mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w16_16xb64_in1k_256px.py diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w8_16xb64_in1k_256px.py similarity index 100% rename from mmpretrain/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py rename to mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w8_16xb64_in1k_256px.py From f4d372ba7d9a1e10fc8422d01fd7fc0a875f8c7c Mon Sep 17 00:00:00 2001 From: John Date: Tue, 5 Sep 2023 21:26:43 +0800 Subject: [PATCH 06/20] only keep one file to set swin transformer model config --- .../models/swin_transformer/base_224.py | 29 ------------------- .../models/swin_transformer/large_224.py | 17 ----------- .../models/swin_transformer/large_384.py | 21 -------------- .../models/swin_transformer/small_224.py | 29 ------------------- .../models/swin_transformer/tiny_224.py | 29 ------------------- .../base_384.py => swin_transformer_base.py} | 0 .../swin_transformer/swin_base_16xb64_in1k.py | 25 +++++++++++++++- .../swin_base_16xb64_in1k_384px.py | 2 +- .../swin_large_16xb64_in1k.py | 8 ++++- .../swin_large_16xb64_in1k_384px.py | 8 ++++- .../swin_large_8xb8_cub_384px.py | 8 ++++- .../swin_small_16xb64_in1k.py | 27 ++++++++++++++++- .../swin_transformer/swin_tiny_16xb64_in1k.py | 27 ++++++++++++++++- 13 files changed, 98 insertions(+), 132 deletions(-) delete mode 100644 mmpretrain/configs/_base_/models/swin_transformer/base_224.py delete mode 100644 mmpretrain/configs/_base_/models/swin_transformer/large_224.py delete mode 100644 mmpretrain/configs/_base_/models/swin_transformer/large_384.py delete mode 100644 mmpretrain/configs/_base_/models/swin_transformer/small_224.py delete mode 100644 mmpretrain/configs/_base_/models/swin_transformer/tiny_224.py rename mmpretrain/configs/_base_/models/{swin_transformer/base_384.py => swin_transformer_base.py} (100%) diff --git a/mmpretrain/configs/_base_/models/swin_transformer/base_224.py b/mmpretrain/configs/_base_/models/swin_transformer/base_224.py deleted file mode 100644 index 5ba4adac..00000000 --- a/mmpretrain/configs/_base_/models/swin_transformer/base_224.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# This is a BETA new format config file, and the usage may change recently. -from mmengine.model import ConstantInit, TruncNormalInit - -from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier, - LabelSmoothLoss, LinearClsHead, Mixup, - SwinTransformer) - -# model settings -model = dict( - type=ImageClassifier, - backbone=dict( - type=SwinTransformer, arch='base', img_size=224, drop_path_rate=0.5), - neck=dict(type=GlobalAveragePooling), - head=dict( - type=LinearClsHead, - num_classes=1000, - in_channels=1024, - init_cfg=None, # suppress the default init_cfg of LinearClsHead. - loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), - cal_acc=False), - init_cfg=[ - dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), - dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) - ], - train_cfg=dict( - augments=[dict(type=Mixup, alpha=0.8), - dict(type=CutMix, alpha=1.0)]), -) diff --git a/mmpretrain/configs/_base_/models/swin_transformer/large_224.py b/mmpretrain/configs/_base_/models/swin_transformer/large_224.py deleted file mode 100644 index 758600e7..00000000 --- a/mmpretrain/configs/_base_/models/swin_transformer/large_224.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# This is a BETA new format config file, and the usage may change recently. -from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling, - ImageClassifier, LinearClsHead, SwinTransformer) - -# model settings -# Only for evaluation -model = dict( - type=ImageClassifier, - backbone=dict(type=SwinTransformer, arch='large', img_size=224), - neck=dict(type=GlobalAveragePooling), - head=dict( - type=LinearClsHead, - num_classes=1000, - in_channels=1536, - loss=dict(type=CrossEntropyLoss, loss_weight=1.0), - topk=(1, 5))) diff --git a/mmpretrain/configs/_base_/models/swin_transformer/large_384.py b/mmpretrain/configs/_base_/models/swin_transformer/large_384.py deleted file mode 100644 index 9cb01033..00000000 --- a/mmpretrain/configs/_base_/models/swin_transformer/large_384.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# This is a BETA new format config file, and the usage may change recently. -from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling, - ImageClassifier, LinearClsHead, SwinTransformer) - -# model settings -# Only for evaluation -model = dict( - type=ImageClassifier, - backbone=dict( - type=SwinTransformer, - arch='large', - img_size=384, - stage_cfgs=dict(block_cfgs=dict(window_size=12))), - neck=dict(type=GlobalAveragePooling), - head=dict( - type=LinearClsHead, - num_classes=1000, - in_channels=1536, - loss=dict(type=CrossEntropyLoss, loss_weight=1.0), - topk=(1, 5))) diff --git a/mmpretrain/configs/_base_/models/swin_transformer/small_224.py b/mmpretrain/configs/_base_/models/swin_transformer/small_224.py deleted file mode 100644 index f6de6ac0..00000000 --- a/mmpretrain/configs/_base_/models/swin_transformer/small_224.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# This is a BETA new format config file, and the usage may change recently. -from mmengine.model import ConstantInit, TruncNormalInit - -from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier, - LabelSmoothLoss, LinearClsHead, Mixup, - SwinTransformer) - -# model settings -model = dict( - type=ImageClassifier, - backbone=dict( - type=SwinTransformer, arch='small', img_size=224, drop_path_rate=0.3), - neck=dict(type=GlobalAveragePooling), - head=dict( - type=LinearClsHead, - num_classes=1000, - in_channels=768, - init_cfg=None, # suppress the default init_cfg of LinearClsHead. - loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), - cal_acc=False), - init_cfg=[ - dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), - dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) - ], - train_cfg=dict( - augments=[dict(type=Mixup, alpha=0.8), - dict(type=CutMix, alpha=1.0)]), -) diff --git a/mmpretrain/configs/_base_/models/swin_transformer/tiny_224.py b/mmpretrain/configs/_base_/models/swin_transformer/tiny_224.py deleted file mode 100644 index fc976cc0..00000000 --- a/mmpretrain/configs/_base_/models/swin_transformer/tiny_224.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# This is a BETA new format config file, and the usage may change recently. -from mmengine.model import ConstantInit, TruncNormalInit - -from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier, - LabelSmoothLoss, LinearClsHead, Mixup, - SwinTransformer) - -# model settings -model = dict( - type=ImageClassifier, - backbone=dict( - type=SwinTransformer, arch='tiny', img_size=224, drop_path_rate=0.2), - neck=dict(type=GlobalAveragePooling), - head=dict( - type=LinearClsHead, - num_classes=1000, - in_channels=768, - init_cfg=None, # suppress the default init_cfg of LinearClsHead. - loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), - cal_acc=False), - init_cfg=[ - dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), - dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) - ], - train_cfg=dict( - augments=[dict(type=Mixup, alpha=0.8), - dict(type=CutMix, alpha=1.0)]), -) diff --git a/mmpretrain/configs/_base_/models/swin_transformer/base_384.py b/mmpretrain/configs/_base_/models/swin_transformer_base.py similarity index 100% rename from mmpretrain/configs/_base_/models/swin_transformer/base_384.py rename to mmpretrain/configs/_base_/models/swin_transformer_base.py diff --git a/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py index 12ec65ea..09af3d01 100644 --- a/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py +++ b/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py @@ -1,12 +1,35 @@ # Copyright (c) OpenMMLab. All rights reserved. # This is a BETA new format config file, and the usage may change recently. from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, LabelSmoothLoss, Mixup with read_base(): from .._base_.datasets.imagenet_bs64_swin_224 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer.base_224 import * + from .._base_.models.swin_transformer_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * +# model settings +model.update( + backbone=dict(img_size=224, drop_path_rate=0.5, stage_cfgs=None), + head=dict( + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type=LabelSmoothLoss, + label_smooth_val=0.1, + mode='original', + loss_weight=0), + topk=None, + cal_acc=False), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) + # schedule settings optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k_384px.py b/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k_384px.py index 76548d93..aacdc327 100644 --- a/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k_384px.py +++ b/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k_384px.py @@ -5,7 +5,7 @@ from mmengine.config import read_base with read_base(): from .._base_.datasets.imagenet_bs64_swin_384 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer.base_384 import * + from .._base_.models.swin_transformer_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * # schedule settings diff --git a/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k.py index 4b22f5ae..b8fc2793 100644 --- a/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k.py +++ b/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k.py @@ -5,8 +5,14 @@ from mmengine.config import read_base with read_base(): from .._base_.datasets.imagenet_bs64_swin_224 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer.large_224 import * + from .._base_.models.swin_transformer_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * +# model settings +model.update( + backbone=dict(arch='large', img_size=224, stage_cfgs=None), + head=dict(in_channels=1536), +) + # schedule settings optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k_384px.py b/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k_384px.py index f4a6143b..9a449aa6 100644 --- a/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k_384px.py +++ b/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k_384px.py @@ -5,8 +5,14 @@ from mmengine.config import read_base with read_base(): from .._base_.datasets.imagenet_bs64_swin_384 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer.large_384 import * + from .._base_.models.swin_transformer_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * +# model settings +model.update( + backbone=dict(arch='large'), + head=dict(in_channels=1536), +) + # schedule settings optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py b/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py index 6156e306..779daaa3 100644 --- a/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py +++ b/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py @@ -10,11 +10,17 @@ from mmpretrain.models import ImageClassifier with read_base(): from .._base_.datasets.cub_bs8_384 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer.large_384 import * + from .._base_.models.swin_transformer_base import * from .._base_.schedules.cub_bs64 import * # model settings checkpoint = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k-384px.pth' # noqa + +model.update( + backbone=dict(arch='large'), + head=dict(in_channels=1536), +) + model = dict( type=ImageClassifier, backbone=dict( diff --git a/mmpretrain/configs/swin_transformer/swin_small_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin_small_16xb64_in1k.py index 969edee7..59792528 100644 --- a/mmpretrain/configs/swin_transformer/swin_small_16xb64_in1k.py +++ b/mmpretrain/configs/swin_transformer/swin_small_16xb64_in1k.py @@ -1,12 +1,37 @@ # Copyright (c) OpenMMLab. All rights reserved. # This is a BETA new format config file, and the usage may change recently. from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, LabelSmoothLoss, Mixup with read_base(): from .._base_.datasets.imagenet_bs64_swin_224 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer.small_224 import * + from .._base_.models.swin_transformer_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * +# model settings +model.update( + backbone=dict( + arch='small', img_size=224, drop_path_rate=0.3, stage_cfgs=None), + head=dict( + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type=LabelSmoothLoss, + label_smooth_val=0.1, + mode='original', + loss_weight=0), + topk=None, + cal_acc=False), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) + # schedule settings optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py index ded80639..733e1ef0 100644 --- a/mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py +++ b/mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py @@ -1,12 +1,37 @@ # Copyright (c) OpenMMLab. All rights reserved. # This is a BETA new format config file, and the usage may change recently. from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, LabelSmoothLoss, Mixup with read_base(): from .._base_.datasets.imagenet_bs64_swin_224 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer.tiny_224 import * + from .._base_.models.swin_transformer_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * +# model settings +model.update( + backbone=dict( + arch='tiny', img_size=224, drop_path_rate=0.2, stage_cfgs=None), + head=dict( + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type=LabelSmoothLoss, + label_smooth_val=0.1, + mode='original', + loss_weight=0), + topk=None, + cal_acc=False), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) + # schedule settings optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) From 9b75ce0aa4dac6d2016cd5b7f975ef7e02bb20ec Mon Sep 17 00:00:00 2001 From: John Date: Tue, 5 Sep 2023 22:16:07 +0800 Subject: [PATCH 07/20] only keep one file to set swin transformer v2 model config --- .../_base_/models/swin_transformer_base.py | 1 - .../models/swin_transformer_v2/base_256.py | 29 ------------------ .../models/swin_transformer_v2/large_256.py | 20 ------------- .../models/swin_transformer_v2/large_384.py | 20 ------------- .../models/swin_transformer_v2/small_256.py | 30 ------------------- .../models/swin_transformer_v2/tiny_256.py | 29 ------------------ ...ase_384.py => swin_transformer_v2_base.py} | 0 .../swinv2_base_w12_8xb128_in21k_192px.py | 2 +- .../swinv2_base_w16_16xb64_in1k_256px.py | 2 +- ...v2_base_w16_in21k_pre_16xb64_in1k_256px.py | 2 +- ...v2_base_w24_in21k_pre_16xb64_in1k_384px.py | 2 +- .../swinv2_base_w8_16xb64_in1k_256px.py | 16 +++++++++- .../swinv2_large_w12_8xb128_in21k_192px.py | 2 +- ...2_large_w16_in21k_pre_16xb64_in1k_256px.py | 2 +- ...2_large_w24_in21k_pre_16xb64_in1k_384px.py | 2 +- .../swinv2_small_w16_16xb64_in1k_256px.py | 2 +- .../swinv2_small_w8_16xb64_in1k_256px.py | 17 ++++++++++- .../swinv2_tiny_w16_16xb64_in1k_256px.py | 2 +- .../swinv2_tiny_w8_16xb64_in1k_256px.py | 17 ++++++++++- 19 files changed, 56 insertions(+), 141 deletions(-) delete mode 100644 mmpretrain/configs/_base_/models/swin_transformer_v2/base_256.py delete mode 100644 mmpretrain/configs/_base_/models/swin_transformer_v2/large_256.py delete mode 100644 mmpretrain/configs/_base_/models/swin_transformer_v2/large_384.py delete mode 100644 mmpretrain/configs/_base_/models/swin_transformer_v2/small_256.py delete mode 100644 mmpretrain/configs/_base_/models/swin_transformer_v2/tiny_256.py rename mmpretrain/configs/_base_/models/{swin_transformer_v2/base_384.py => swin_transformer_v2_base.py} (100%) diff --git a/mmpretrain/configs/_base_/models/swin_transformer_base.py b/mmpretrain/configs/_base_/models/swin_transformer_base.py index d747fa08..c73c254d 100644 --- a/mmpretrain/configs/_base_/models/swin_transformer_base.py +++ b/mmpretrain/configs/_base_/models/swin_transformer_base.py @@ -4,7 +4,6 @@ from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling, ImageClassifier, LinearClsHead, SwinTransformer) # model settings -# Only for evaluation model = dict( type=ImageClassifier, backbone=dict( diff --git a/mmpretrain/configs/_base_/models/swin_transformer_v2/base_256.py b/mmpretrain/configs/_base_/models/swin_transformer_v2/base_256.py deleted file mode 100644 index 9fcfffeb..00000000 --- a/mmpretrain/configs/_base_/models/swin_transformer_v2/base_256.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# This is a BETA new format config file, and the usage may change recently. -from mmengine.model import ConstantInit, TruncNormalInit - -from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier, - LabelSmoothLoss, LinearClsHead, Mixup, - SwinTransformerV2) - -# model settings -model = dict( - type=ImageClassifier, - backbone=dict( - type=SwinTransformerV2, arch='base', img_size=256, drop_path_rate=0.5), - neck=dict(type=GlobalAveragePooling), - head=dict( - type=LinearClsHead, - num_classes=1000, - in_channels=1024, - init_cfg=None, # suppress the default init_cfg of LinearClsHead. - loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), - cal_acc=False), - init_cfg=[ - dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), - dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) - ], - train_cfg=dict( - augments=[dict(type=Mixup, alpha=0.8), - dict(type=CutMix, alpha=1.0)]), -) diff --git a/mmpretrain/configs/_base_/models/swin_transformer_v2/large_256.py b/mmpretrain/configs/_base_/models/swin_transformer_v2/large_256.py deleted file mode 100644 index da36e679..00000000 --- a/mmpretrain/configs/_base_/models/swin_transformer_v2/large_256.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# This is a BETA new format config file, and the usage may change recently. -from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling, - ImageClassifier, LinearClsHead, - SwinTransformerV2) - -# model settings -# Only for evaluation -model = dict( - type=ImageClassifier, - backbone=dict( - type=SwinTransformerV2, arch='large', img_size=256, - drop_path_rate=0.2), - neck=dict(type=GlobalAveragePooling), - head=dict( - type=LinearClsHead, - num_classes=1000, - in_channels=1536, - loss=dict(type=CrossEntropyLoss, loss_weight=1.0), - topk=(1, 5))) diff --git a/mmpretrain/configs/_base_/models/swin_transformer_v2/large_384.py b/mmpretrain/configs/_base_/models/swin_transformer_v2/large_384.py deleted file mode 100644 index 5e1323d5..00000000 --- a/mmpretrain/configs/_base_/models/swin_transformer_v2/large_384.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# This is a BETA new format config file, and the usage may change recently. -from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling, - ImageClassifier, LinearClsHead, - SwinTransformerV2) - -# model settings -# Only for evaluation -model = dict( - type=ImageClassifier, - backbone=dict( - type=SwinTransformerV2, arch='large', img_size=384, - drop_path_rate=0.2), - neck=dict(type=GlobalAveragePooling), - head=dict( - type=LinearClsHead, - num_classes=1000, - in_channels=1536, - loss=dict(type=CrossEntropyLoss, loss_weight=1.0), - topk=(1, 5))) diff --git a/mmpretrain/configs/_base_/models/swin_transformer_v2/small_256.py b/mmpretrain/configs/_base_/models/swin_transformer_v2/small_256.py deleted file mode 100644 index e747fd6a..00000000 --- a/mmpretrain/configs/_base_/models/swin_transformer_v2/small_256.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# This is a BETA new format config file, and the usage may change recently. -from mmengine.model import ConstantInit, TruncNormalInit - -from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier, - LabelSmoothLoss, LinearClsHead, Mixup, - SwinTransformerV2) - -# model settings -model = dict( - type=ImageClassifier, - backbone=dict( - type=SwinTransformerV2, arch='small', img_size=256, - drop_path_rate=0.3), - neck=dict(type=GlobalAveragePooling), - head=dict( - type=LinearClsHead, - num_classes=1000, - in_channels=768, - init_cfg=None, # suppress the default init_cfg of LinearClsHead. - loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), - cal_acc=False), - init_cfg=[ - dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), - dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) - ], - train_cfg=dict( - augments=[dict(type=Mixup, alpha=0.8), - dict(type=CutMix, alpha=1.0)]), -) diff --git a/mmpretrain/configs/_base_/models/swin_transformer_v2/tiny_256.py b/mmpretrain/configs/_base_/models/swin_transformer_v2/tiny_256.py deleted file mode 100644 index 8d8bfacf..00000000 --- a/mmpretrain/configs/_base_/models/swin_transformer_v2/tiny_256.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# This is a BETA new format config file, and the usage may change recently. -from mmengine.model import ConstantInit, TruncNormalInit - -from mmpretrain.models import (CutMix, GlobalAveragePooling, ImageClassifier, - LabelSmoothLoss, LinearClsHead, Mixup, - SwinTransformerV2) - -# model settings -model = dict( - type=ImageClassifier, - backbone=dict( - type=SwinTransformerV2, arch='tiny', img_size=256, drop_path_rate=0.2), - neck=dict(type=GlobalAveragePooling), - head=dict( - type=LinearClsHead, - num_classes=1000, - in_channels=768, - init_cfg=None, # suppress the default init_cfg of LinearClsHead. - loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), - cal_acc=False), - init_cfg=[ - dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), - dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) - ], - train_cfg=dict( - augments=[dict(type=Mixup, alpha=0.8), - dict(type=CutMix, alpha=1.0)]), -) diff --git a/mmpretrain/configs/_base_/models/swin_transformer_v2/base_384.py b/mmpretrain/configs/_base_/models/swin_transformer_v2_base.py similarity index 100% rename from mmpretrain/configs/_base_/models/swin_transformer_v2/base_384.py rename to mmpretrain/configs/_base_/models/swin_transformer_v2_base.py diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py index 7ca933f8..79ad9f07 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py @@ -5,7 +5,7 @@ from mmengine.config import read_base with read_base(): from .._base_.datasets.imagenet21k_bs128 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer_v2.base_256 import * + from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * # model settings diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py index 6df69c48..a10fc1e4 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py @@ -5,7 +5,7 @@ from mmengine.config import read_base with read_base(): from .._base_.datasets.imagenet_bs64_swin_256 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer_v2.base_256 import * + from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * model = dict(backbone=dict(window_size=[16, 16, 16, 8])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py index f9f05216..d3dd0b35 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py @@ -7,7 +7,7 @@ from mmpretrain.models import ImageClassifier with read_base(): from .._base_.datasets.imagenet_bs64_swin_256 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer_v2.base_256 import * + from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * model = dict( diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py index 6538144f..e9ee34a4 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py @@ -7,7 +7,7 @@ from mmpretrain.models import ImageClassifier with read_base(): from .._base_.datasets.imagenet_bs64_swin_384 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer_v2.base_384 import * + from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * model = dict( diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w8_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w8_16xb64_in1k_256px.py index 34298ff6..d40144cb 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w8_16xb64_in1k_256px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w8_16xb64_in1k_256px.py @@ -1,9 +1,23 @@ # Copyright (c) OpenMMLab. All rights reserved. # This is a BETA new format config file, and the usage may change recently. from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup with read_base(): from .._base_.datasets.imagenet_bs64_swin_256 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer_v2.base_256 import * + from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict(img_size=256, drop_path_rate=0.5), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_large_w12_8xb128_in21k_192px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w12_8xb128_in21k_192px.py index 7ca933f8..79ad9f07 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_large_w12_8xb128_in21k_192px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w12_8xb128_in21k_192px.py @@ -5,7 +5,7 @@ from mmengine.config import read_base with read_base(): from .._base_.datasets.imagenet21k_bs128 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer_v2.base_256 import * + from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * # model settings diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_large_w16_in21k_pre_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w16_in21k_pre_16xb64_in1k_256px.py index bbfe9283..8990b7fc 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_large_w16_in21k_pre_16xb64_in1k_256px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w16_in21k_pre_16xb64_in1k_256px.py @@ -8,7 +8,7 @@ from mmpretrain.models import ImageClassifier with read_base(): from .._base_.datasets.imagenet_bs64_swin_256 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer_v2.large_256 import * + from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * model = dict( diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_large_w24_in21k_pre_16xb64_in1k_384px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w24_in21k_pre_16xb64_in1k_384px.py index a481c79d..7cb8b7c0 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_large_w24_in21k_pre_16xb64_in1k_384px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w24_in21k_pre_16xb64_in1k_384px.py @@ -8,7 +8,7 @@ from mmpretrain.models import ImageClassifier with read_base(): from .._base_.datasets.imagenet_bs64_swin_384 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer_v2.large_384 import * + from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * model = dict( diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_small_w16_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_small_w16_16xb64_in1k_256px.py index 8051f050..a10fc1e4 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_small_w16_16xb64_in1k_256px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_small_w16_16xb64_in1k_256px.py @@ -5,7 +5,7 @@ from mmengine.config import read_base with read_base(): from .._base_.datasets.imagenet_bs64_swin_256 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer_v2.small_256 import * + from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * model = dict(backbone=dict(window_size=[16, 16, 16, 8])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_small_w8_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_small_w8_16xb64_in1k_256px.py index d28ffd06..bfec3466 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_small_w8_16xb64_in1k_256px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_small_w8_16xb64_in1k_256px.py @@ -1,9 +1,24 @@ # Copyright (c) OpenMMLab. All rights reserved. # This is a BETA new format config file, and the usage may change recently. from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup with read_base(): from .._base_.datasets.imagenet_bs64_swin_256 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer_v2.small_256 import * + from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict(arch='small', img_size=256, drop_path_rate=0.3), + head=dict(in_channels=768), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w16_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w16_16xb64_in1k_256px.py index a95485da..a10fc1e4 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w16_16xb64_in1k_256px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w16_16xb64_in1k_256px.py @@ -5,7 +5,7 @@ from mmengine.config import read_base with read_base(): from .._base_.datasets.imagenet_bs64_swin_256 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer_v2.tiny_256 import * + from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * model = dict(backbone=dict(window_size=[16, 16, 16, 8])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w8_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w8_16xb64_in1k_256px.py index 59ba55c3..8cca2b38 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w8_16xb64_in1k_256px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w8_16xb64_in1k_256px.py @@ -1,9 +1,24 @@ # Copyright (c) OpenMMLab. All rights reserved. # This is a BETA new format config file, and the usage may change recently. from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup with read_base(): from .._base_.datasets.imagenet_bs64_swin_256 import * from .._base_.default_runtime import * - from .._base_.models.swin_transformer_v2.tiny_256 import * + from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict(arch='tiny', img_size=256, drop_path_rate=0.2), + head=dict(in_channels=768), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) From b0b4422736d069c6eaeb3e0e584ea8d42b9d4138 Mon Sep 17 00:00:00 2001 From: John Date: Tue, 5 Sep 2023 22:22:43 +0800 Subject: [PATCH 08/20] fix a redundant --- .../configs/swin_transformer/swin_large_8xb8_cub_384px.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py b/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py index 779daaa3..ef2559a8 100644 --- a/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py +++ b/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py @@ -16,11 +16,6 @@ with read_base(): # model settings checkpoint = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k-384px.pth' # noqa -model.update( - backbone=dict(arch='large'), - head=dict(in_channels=1536), -) - model = dict( type=ImageClassifier, backbone=dict( From 7734f073e42d6f284b7772aa1ac837970c1b6b49 Mon Sep 17 00:00:00 2001 From: John Date: Wed, 6 Sep 2023 23:56:03 +0800 Subject: [PATCH 09/20] set arch etc --- .../swin_large_8xb8_cub_384px.py | 6 +++--- .../swinv2_base_w12_8xb128_in21k_192px.py | 16 +++++++++++++--- .../swinv2_base_w16_16xb64_in1k_256px.py | 15 ++++++++++++++- ...v2_base_w16_in21k_pre_16xb64_in1k_256px.py | 18 +++++++++++++----- ...v2_base_w24_in21k_pre_16xb64_in1k_384px.py | 11 +++-------- .../swinv2_large_w12_8xb128_in21k_192px.py | 16 +++++++++++++--- ...2_large_w16_in21k_pre_16xb64_in1k_256px.py | 16 +++++++++++----- ...2_large_w24_in21k_pre_16xb64_in1k_384px.py | 12 ++++++++---- .../swinv2_small_w16_16xb64_in1k_256px.py | 19 ++++++++++++++++++- .../swinv2_tiny_w16_16xb64_in1k_256px.py | 19 ++++++++++++++++++- 10 files changed, 114 insertions(+), 34 deletions(-) diff --git a/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py b/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py index ef2559a8..2003cd3a 100644 --- a/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py +++ b/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py @@ -16,12 +16,12 @@ with read_base(): # model settings checkpoint = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k-384px.pth' # noqa -model = dict( - type=ImageClassifier, +model.update( backbone=dict( + arch='large', init_cfg=dict( type=PretrainedInit, checkpoint=checkpoint, prefix='backbone')), - head=dict(num_classes=200, )) + head=dict(num_classes=200, in_channels=1536)) # schedule settings optim_wrapper = dict( diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py index 79ad9f07..1ecc4363 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py @@ -1,6 +1,9 @@ # Copyright (c) OpenMMLab. All rights reserved. # This is a BETA new format config file, and the usage may change recently. from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup with read_base(): from .._base_.datasets.imagenet21k_bs128 import * @@ -9,10 +12,17 @@ with read_base(): from .._base_.schedules.imagenet_bs1024_adamw_swin import * # model settings -model = dict( - backbone=dict(img_size=192, window_size=[12, 12, 12, 6]), +model.update( + backbone=dict( + img_size=192, drop_path_rate=0.5, window_size=[12, 12, 12, 6]), head=dict(num_classes=21841), -) + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) # dataset settings data_preprocessor = dict(num_classes=21841) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py index a10fc1e4..103afb42 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py @@ -1,6 +1,9 @@ # Copyright (c) OpenMMLab. All rights reserved. # This is a BETA new format config file, and the usage may change recently. from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup with read_base(): from .._base_.datasets.imagenet_bs64_swin_256 import * @@ -8,4 +11,14 @@ with read_base(): from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * -model = dict(backbone=dict(window_size=[16, 16, 16, 8])) +# model settings +model.update( + backbone=dict( + img_size=256, drop_path_rate=0.5, window_size=[16, 16, 16, 8]), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py index d3dd0b35..6588f50f 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py @@ -1,8 +1,9 @@ # Copyright (c) OpenMMLab. All rights reserved. # This is a BETA new format config file, and the usage may change recently. from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit -from mmpretrain.models import ImageClassifier +from mmpretrain.models import CutMix, Mixup with read_base(): from .._base_.datasets.imagenet_bs64_swin_256 import * @@ -10,9 +11,16 @@ with read_base(): from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * -model = dict( - type=ImageClassifier, +# model settings +model.update( backbone=dict( + img_size=256, window_size=[16, 16, 16, 8], - drop_path_rate=0.2, - pretrained_window_sizes=[12, 12, 12, 6])) + pretrained_window_sizes=[12, 12, 12, 6]), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py index e9ee34a4..118c085e 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py @@ -2,18 +2,13 @@ # This is a BETA new format config file, and the usage may change recently. from mmengine.config import read_base -from mmpretrain.models import ImageClassifier - with read_base(): from .._base_.datasets.imagenet_bs64_swin_384 import * from .._base_.default_runtime import * from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * -model = dict( - type=ImageClassifier, +# model settings +model.update( backbone=dict( - img_size=384, - window_size=[24, 24, 24, 12], - drop_path_rate=0.2, - pretrained_window_sizes=[12, 12, 12, 6])) + window_size=[24, 24, 24, 12], pretrained_window_sizes=[12, 12, 12, 6])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_large_w12_8xb128_in21k_192px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w12_8xb128_in21k_192px.py index 79ad9f07..1ecc4363 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_large_w12_8xb128_in21k_192px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w12_8xb128_in21k_192px.py @@ -1,6 +1,9 @@ # Copyright (c) OpenMMLab. All rights reserved. # This is a BETA new format config file, and the usage may change recently. from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup with read_base(): from .._base_.datasets.imagenet21k_bs128 import * @@ -9,10 +12,17 @@ with read_base(): from .._base_.schedules.imagenet_bs1024_adamw_swin import * # model settings -model = dict( - backbone=dict(img_size=192, window_size=[12, 12, 12, 6]), +model.update( + backbone=dict( + img_size=192, drop_path_rate=0.5, window_size=[12, 12, 12, 6]), head=dict(num_classes=21841), -) + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) # dataset settings data_preprocessor = dict(num_classes=21841) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_large_w16_in21k_pre_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w16_in21k_pre_16xb64_in1k_256px.py index 8990b7fc..0a1b59df 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_large_w16_in21k_pre_16xb64_in1k_256px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w16_in21k_pre_16xb64_in1k_256px.py @@ -3,7 +3,7 @@ # This is a BETA new format config file, and the usage may change recently. from mmengine.config import read_base -from mmpretrain.models import ImageClassifier +from mmpretrain.models import CrossEntropyLoss with read_base(): from .._base_.datasets.imagenet_bs64_swin_256 import * @@ -11,8 +11,14 @@ with read_base(): from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * -model = dict( - type=ImageClassifier, +# model settings +model.update( backbone=dict( - window_size=[16, 16, 16, 8], pretrained_window_sizes=[12, 12, 12, 6]), -) + arch='large', + img_size=256, + window_size=[16, 16, 16, 8], + pretrained_window_sizes=[12, 12, 12, 6]), + head=dict( + in_channels=1536, + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + topk=(1, 5))) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_large_w24_in21k_pre_16xb64_in1k_384px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w24_in21k_pre_16xb64_in1k_384px.py index 7cb8b7c0..b20bcead 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_large_w24_in21k_pre_16xb64_in1k_384px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w24_in21k_pre_16xb64_in1k_384px.py @@ -3,7 +3,7 @@ # This is a BETA new format config file, and the usage may change recently. from mmengine.config import read_base -from mmpretrain.models import ImageClassifier +from mmpretrain.models import CrossEntropyLoss with read_base(): from .._base_.datasets.imagenet_bs64_swin_384 import * @@ -11,10 +11,14 @@ with read_base(): from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * -model = dict( - type=ImageClassifier, +# model settings +model.update( backbone=dict( + arch='large', img_size=384, window_size=[24, 24, 24, 12], pretrained_window_sizes=[12, 12, 12, 6]), -) + head=dict( + in_channels=1536, + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + topk=(1, 5))) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_small_w16_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_small_w16_16xb64_in1k_256px.py index a10fc1e4..dfd15c31 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_small_w16_16xb64_in1k_256px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_small_w16_16xb64_in1k_256px.py @@ -1,6 +1,9 @@ # Copyright (c) OpenMMLab. All rights reserved. # This is a BETA new format config file, and the usage may change recently. from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup with read_base(): from .._base_.datasets.imagenet_bs64_swin_256 import * @@ -8,4 +11,18 @@ with read_base(): from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * -model = dict(backbone=dict(window_size=[16, 16, 16, 8])) +# model settings +model.update( + backbone=dict( + arch='small', + img_size=256, + drop_path_rate=0.3, + window_size=[16, 16, 16, 8]), + head=dict(in_channels=768), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w16_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w16_16xb64_in1k_256px.py index a10fc1e4..f2fa1609 100644 --- a/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w16_16xb64_in1k_256px.py +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w16_16xb64_in1k_256px.py @@ -1,6 +1,9 @@ # Copyright (c) OpenMMLab. All rights reserved. # This is a BETA new format config file, and the usage may change recently. from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup with read_base(): from .._base_.datasets.imagenet_bs64_swin_256 import * @@ -8,4 +11,18 @@ with read_base(): from .._base_.models.swin_transformer_v2_base import * from .._base_.schedules.imagenet_bs1024_adamw_swin import * -model = dict(backbone=dict(window_size=[16, 16, 16, 8])) +# model settings +model.update( + backbone=dict( + arch='tiny', + img_size=256, + drop_path_rate=0.2, + window_size=[16, 16, 16, 8]), + head=dict(in_channels=768), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) From 06bb586eb715626f19e97dfa8b632f104ba47d2b Mon Sep 17 00:00:00 2001 From: mzr1996 Date: Sun, 8 Oct 2023 15:44:37 +0800 Subject: [PATCH 10/20] [Fix] Fix pipeline bug in image retrieval inferencer --- mmpretrain/apis/image_retrieval.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mmpretrain/apis/image_retrieval.py b/mmpretrain/apis/image_retrieval.py index deae1de7..27919b20 100644 --- a/mmpretrain/apis/image_retrieval.py +++ b/mmpretrain/apis/image_retrieval.py @@ -108,6 +108,7 @@ class ImageRetrievalInferencer(BaseInferencer): # A config of dataset from mmpretrain.registry import DATASETS test_pipeline = [dict(type='LoadImageFromFile'), self.pipeline] + prototype.setdefault('pipeline', test_pipeline) dataset = DATASETS.build(prototype) dataloader = build_dataloader(dataset) elif isinstance(prototype, DataLoader): From 3bcf7e2d6ed1d4c215dcf5e404dd6da52e8f0e3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=A3=9E=E9=A3=9E?= <102729089+ASHORE1225@users.noreply.github.com> Date: Sun, 8 Oct 2023 15:46:47 +0800 Subject: [PATCH 11/20] =?UTF-8?q?[CodeCamp2023-341]=20=E5=A4=9A=E6=A8=A1?= =?UTF-8?q?=E6=80=81=E6=95=B0=E6=8D=AE=E9=9B=86=E6=96=87=E6=A1=A3=E8=A1=A5?= =?UTF-8?q?=E5=85=85-COCO=20Retrieval?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- mmpretrain/datasets/coco_retrieval.py | 75 ++++++++++++++++++++++++++- 1 file changed, 73 insertions(+), 2 deletions(-) diff --git a/mmpretrain/datasets/coco_retrieval.py b/mmpretrain/datasets/coco_retrieval.py index 60d1586a..be8a0bcb 100644 --- a/mmpretrain/datasets/coco_retrieval.py +++ b/mmpretrain/datasets/coco_retrieval.py @@ -1,18 +1,45 @@ # Copyright (c) OpenMMLab. All rights reserved. import json +import os.path as osp from collections import OrderedDict -from typing import List +from os import PathLike +from typing import List, Sequence, Union from mmengine import get_file_backend -from mmpretrain.registry import DATASETS +from mmpretrain.registry import DATASETS, TRANSFORMS from .base_dataset import BaseDataset +def expanduser(data_prefix): + if isinstance(data_prefix, (str, PathLike)): + return osp.expanduser(data_prefix) + else: + return data_prefix + + @DATASETS.register_module() class COCORetrieval(BaseDataset): """COCO Retrieval dataset. + COCO (Common Objects in Context): The COCO dataset contains more than + 330K images,each of which has approximately 5 descriptive annotations. + This dataset was releasedin collaboration between Microsoft and Carnegie + Mellon University + + COCO_2014 dataset directory: :: + + COCO_2014 + ├── val2014 + ├── train2014 + ├── annotations + ├── instances_train2014.json + ├── instances_val2014.json + ├── person_keypoints_train2014.json + ├── person_keypoints_val2014.json + ├── captions_train2014.json + ├── captions_val2014.json + Args: ann_file (str): Annotation file path. test_mode (bool): Whether dataset is used for evaluation. This will @@ -23,8 +50,52 @@ class COCORetrieval(BaseDataset): data_prefix (str | dict): Prefix for training data. Defaults to ''. pipeline (Sequence): Processing pipeline. Defaults to an empty tuple. **kwargs: Other keyword arguments in :class:`BaseDataset`. + + Examples: + >>> from mmpretrain.datasets import COCORetrieval + >>> train_dataset=COCORetrieval(data_root='coco2014/') + >>> train_dataset + Dataset COCORetrieval + Number of samples: 414113 + Annotation file: /coco2014/annotations/captions_train2014.json + Prefix of images: /coco2014/ + >>> from mmpretrain.datasets import COCORetrieval + >>> val_dataset = COCORetrieval(data_root='coco2014/') + >>> val_dataset + Dataset COCORetrieval + Number of samples: 202654 + Annotation file: /coco2014/annotations/captions_val2014.json + Prefix of images: /coco2014/ """ + def __init__(self, + ann_file: str, + test_mode: bool = False, + data_prefix: Union[str, dict] = '', + data_root: str = '', + pipeline: Sequence = (), + **kwargs): + + if isinstance(data_prefix, str): + data_prefix = dict(img_path=expanduser(data_prefix)) + + ann_file = expanduser(ann_file) + transforms = [] + for transform in pipeline: + if isinstance(transform, dict): + transforms.append(TRANSFORMS.build(transform)) + else: + transforms.append(transform) + + super().__init__( + data_root=data_root, + data_prefix=data_prefix, + test_mode=test_mode, + pipeline=transforms, + ann_file=ann_file, + **kwargs, + ) + def load_data_list(self) -> List[dict]: """Load data list.""" # get file backend From b0a792eb08f7857e06977969b61354733d082d33 Mon Sep 17 00:00:00 2001 From: mzr1996 Date: Wed, 11 Oct 2023 11:11:59 +0800 Subject: [PATCH 12/20] Update OFA to compat with latest huggingface. --- mmpretrain/models/multimodal/ofa/ofa_modules.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mmpretrain/models/multimodal/ofa/ofa_modules.py b/mmpretrain/models/multimodal/ofa/ofa_modules.py index 1c79049b..ef5c8533 100644 --- a/mmpretrain/models/multimodal/ofa/ofa_modules.py +++ b/mmpretrain/models/multimodal/ofa/ofa_modules.py @@ -1301,6 +1301,7 @@ class OFAEncoderDecoder(BaseModule, GenerationMixin): Defaults to an empty dict. init_cfg (dict, optional): The initialization config. Defaults to None. """ + base_model_prefix = '' def __init__( self, From 4849324629994aa719d09d27f6b851986fda7044 Mon Sep 17 00:00:00 2001 From: mzr1996 Date: Wed, 11 Oct 2023 11:12:32 +0800 Subject: [PATCH 13/20] Update train.py to compat with new config --- tools/train.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tools/train.py b/tools/train.py index 84c1eec9..89c8548f 100644 --- a/tools/train.py +++ b/tools/train.py @@ -91,10 +91,6 @@ def merge_args(cfg, args): # enable automatic-mixed-precision training if args.amp is True: - optim_wrapper = cfg.optim_wrapper.get('type', 'OptimWrapper') - assert optim_wrapper in ['OptimWrapper', 'AmpOptimWrapper'], \ - '`--amp` is not supported custom optimizer wrapper type ' \ - f'`{optim_wrapper}.' cfg.optim_wrapper.type = 'AmpOptimWrapper' cfg.optim_wrapper.setdefault('loss_scale', 'dynamic') From c0766519b1094dc5c74ef661d41d0aa0db5639d7 Mon Sep 17 00:00:00 2001 From: hmtbgc <32740258+hmtbgc@users.noreply.github.com> Date: Thu, 12 Oct 2023 10:36:17 +0800 Subject: [PATCH 14/20] [Feature] Add minigpt4 gradio demo and training script. (#1758) * Add minigpt4 gradio demo * update minigpt4 demo * update minigpt4 demo (inference with float16) * update minigpt4 and some dependent files * add minigpt4 dataset for training * add training script for minigpt4 * restore files deleted by mistake * fix an error * remove useless modification * provide command line arguments for minigpt4 gradio demo and update some comments * update code * Update minigpt-4 readme --------- Co-authored-by: mzr1996 --- configs/minigpt4/README.md | 7 +- configs/minigpt4/metafile.yml | 13 +- .../minigpt4/minigpt-4_baichuan-7b_caption.py | 190 ++++++++++++++++++ .../minigpt4/minigpt-4_vicuna-7b_caption.py | 26 ++- mmpretrain/datasets/__init__.py | 4 +- mmpretrain/datasets/minigpt4_dataset.py | 79 ++++++++ .../models/multimodal/minigpt4/minigpt4.py | 101 ++++++---- projects/gradio_demo/conversation.py | 137 +++++++++++++ projects/gradio_demo/minigpt4_demo.py | 144 +++++++++++++ 9 files changed, 651 insertions(+), 50 deletions(-) create mode 100644 configs/minigpt4/minigpt-4_baichuan-7b_caption.py create mode 100644 mmpretrain/datasets/minigpt4_dataset.py create mode 100644 projects/gradio_demo/conversation.py create mode 100644 projects/gradio_demo/minigpt4_demo.py diff --git a/configs/minigpt4/README.md b/configs/minigpt4/README.md index 01e53954..23666fc9 100644 --- a/configs/minigpt4/README.md +++ b/configs/minigpt4/README.md @@ -34,9 +34,10 @@ For Vicuna model, please refer to [MiniGPT-4 page](https://github.com/Vision-CAI ### Pretrained models -| Model | Params (M) | Flops (G) | Config | Download | -| :------------------------------ | :--------: | :-------: | :--------------------------------------: | :------------------------------------------------------------------------------------------------------------: | -| `minigpt-4_vicuna-7b_caption`\* | 8121.32 | N/A | [config](minigpt-4_vicuna-7b_caption.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/minigpt4/minigpt-4_linear-projection_20230615-714b5f52.pth) | +| Model | Params (M) | Flops (G) | Config | Download | +| :------------------------------ | :--------: | :-------: | :----------------------------------------: | :----------------------------------------------------------------------------------------------------------: | +| `minigpt-4_baichuan-7b_caption` | 8094.77 | N/A | [config](minigpt-4_baichuan-7b_caption.py) | [model](https://download.openmmlab.com/mmclassification/v1/minigpt4/minigpt-4_linear_baichuan7b_20231011-5dca7ed6.pth) | +| `minigpt-4_vicuna-7b_caption`\* | 8121.32 | N/A | [config](minigpt-4_vicuna-7b_caption.py) | [model](https://download.openmmlab.com/mmclassification/v1/minigpt4/minigpt-4_linear_vicuna7b_20230615-714b5f52.pth) | *Models with * are converted from the [official repo](https://github.com/Vision-CAIR/MiniGPT-4/tree/main). The config files of these models are only for inference. We haven't reproduce the training results.* diff --git a/configs/minigpt4/metafile.yml b/configs/minigpt4/metafile.yml index a7879d98..f70cc9ba 100644 --- a/configs/minigpt4/metafile.yml +++ b/configs/minigpt4/metafile.yml @@ -19,8 +19,19 @@ Models: - Task: Image Caption Dataset: COCO Metrics: null - Weights: https://download.openmmlab.com/mmpretrain/v1.0/minigpt4/minigpt-4_linear-projection_20230615-714b5f52.pth + Weights: https://download.openmmlab.com/mmclassification/v1/minigpt4/minigpt-4_linear_vicuna7b_20230615-714b5f52.pth Config: configs/minigpt4/minigpt-4_vicuna-7b_caption.py Converted From: Weights: https://github.com/Vision-CAIR/MiniGPT-4/tree/main Code: https://github.com/Vision-CAIR/MiniGPT-4/tree/main + - Name: minigpt-4_baichuan-7b_caption + Metadata: + FLOPs: null + Parameters: 8094769024 + In Collection: MiniGPT4 + Results: + - Task: Image Caption + Dataset: COCO + Metrics: null + Weights: https://download.openmmlab.com/mmclassification/v1/minigpt4/minigpt-4_linear_baichuan7b_20231011-5dca7ed6.pth + Config: configs/minigpt4/minigpt-4_baichuan-7b_caption.py diff --git a/configs/minigpt4/minigpt-4_baichuan-7b_caption.py b/configs/minigpt4/minigpt-4_baichuan-7b_caption.py new file mode 100644 index 00000000..7e610a09 --- /dev/null +++ b/configs/minigpt4/minigpt-4_baichuan-7b_caption.py @@ -0,0 +1,190 @@ +_base_ = [ + '../_base_/default_runtime.py', +] + +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(224, 224), + interpolation='bicubic', + backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='CleanCaption', + keys='chat_content', + remove_chars='', + lowercase=False), + dict( + type='PackInputs', + algorithm_keys=['chat_content', 'lang'], + meta_keys=['image_id']), +] + +train_dataloader = dict( + batch_size=2, + num_workers=4, + dataset=dict( + type='MiniGPT4Dataset', + data_root='YOUR_DATA_DIRECTORY', + ann_file='YOUR_DATA_FILE', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + drop_last=False, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(224, 224), + interpolation='bicubic', + backend='pillow'), + dict(type='PackInputs', meta_keys=['image_id']), +] + +test_evaluator = dict( + type='COCOCaption', + ann_file='data/coco/annotations/coco_karpathy_val_gt.json', +) + +test_dataloader = dict( + batch_size=1, + dataset=dict( + type='COCOCaption', + data_root='data/coco', + ann_file='annotations/coco_karpathy_val.json', + pipeline=test_pipeline)) + +# model settings +model = dict( + type='MiniGPT4', + vision_encoder=dict( + type='BEiTViT', + # eva-g without the final layer + arch=dict( + embed_dims=1408, + num_layers=39, + num_heads=16, + feedforward_channels=6144, + ), + img_size=224, + patch_size=14, + layer_scale_init_value=0.0, + frozen_stages=39, + use_abs_pos_emb=True, + use_rel_pos_bias=False, + final_norm=False, + use_shared_rel_pos_bias=False, + out_type='raw', + pretrained= # noqa + 'https://download.openmmlab.com/mmpretrain/v1.0/minigpt4/minigpt-4_eva-g-p14_20230615-e908c021.pth' # noqa + ), + q_former_model=dict( + type='Qformer', + model_style='bert-base-uncased', + vision_model_width=1408, + add_cross_attention=True, + cross_attention_freq=2, + num_query_token=32, + pretrained= # noqa + 'https://download.openmmlab.com/mmpretrain/v1.0/minigpt4/minigpt-4_qformer_20230615-1dfa889c.pth' # noqa + ), + lang_encoder=dict( + type='AutoModelForCausalLM', + name_or_path='baichuan-inc/baichuan-7B', + trust_remote_code=True), + tokenizer=dict( + type='AutoTokenizer', + name_or_path='baichuan-inc/baichuan-7B', + trust_remote_code=True), + task='caption', + prompt_template=dict([('en', '###Ask: {} ###Answer: '), + ('zh', '###问:{} ###答:')]), + raw_prompts=dict([ + ('en', [(' ' + 'Describe this image in detail.'), + (' ' + 'Take a look at this image and describe what you notice.'), + (' ' + 'Please provide a detailed description of the picture.'), + (' ' + 'Could you describe the contents of this image for me?')]), + ('zh', [(' ' + '详细描述这张图片。'), (' ' + '浏览这张图片并描述你注意到什么。'), + (' ' + '请对这张图片进行详细的描述。'), + (' ' + '你能为我描述这张图片的内容吗?')]) + ]), + max_txt_len=160, + end_sym='###') + +strategy = dict( + type='DeepSpeedStrategy', + fp16=dict( + enabled=True, + auto_cast=False, + fp16_master_weights_and_grads=False, + loss_scale=0, + loss_scale_window=1000, + hysteresis=1, + min_loss_scale=1, + initial_scale_power=16, + ), + inputs_to_half=[0], + zero_optimization=dict( + stage=2, + allgather_partitions=True, + allgather_bucket_size=2e8, + reduce_scatter=True, + reduce_bucket_size='auto', + overlap_comm=True, + contiguous_gradients=True, + ), +) + +# schedule settings +optim_wrapper = dict( + type='DeepSpeedOptimWrapper', + optimizer=dict(type='AdamW', lr=1e-3, weight_decay=0.05)) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-3 / 500, + by_epoch=False, + begin=0, + end=500, + ), + dict( + type='CosineAnnealingLR', + eta_min=2e-4, + by_epoch=False, + begin=500, + ), +] + +train_cfg = dict(by_epoch=True, max_epochs=6) +test_cfg = dict() + +runner_type = 'FlexibleRunner' + +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + interval=1, + by_epoch=True, + save_last=True, + max_keep_ckpts=1, + )) diff --git a/configs/minigpt4/minigpt-4_vicuna-7b_caption.py b/configs/minigpt4/minigpt-4_vicuna-7b_caption.py index 704760af..f468e2d8 100644 --- a/configs/minigpt4/minigpt-4_vicuna-7b_caption.py +++ b/configs/minigpt4/minigpt-4_vicuna-7b_caption.py @@ -55,13 +55,25 @@ model = dict( type='AutoModelForCausalLM', name_or_path='YOUR_PATH_TO_VICUNA'), tokenizer=dict(type='LlamaTokenizer', name_or_path='YOUR_PATH_TO_VICUNA'), task='caption', - prompt_template='###Human: {} ###Assistant: ', - raw_prompts=[ - ' Describe this image in detail.', - ' Take a look at this image and describe what you notice.', # noqa - ' Please provide a detailed description of the picture.', # noqa - ' Could you describe the contents of this image for me?', # noqa - ], + prompt_template=dict([('en', '###Ask: {} ###Answer: '), + ('zh', '###问:{} ###答:')]), + raw_prompts=dict([ + ('en', [(' ' + 'Describe this image in detail.'), + (' ' + 'Take a look at this image and describe what you notice.'), + (' ' + 'Please provide a detailed description of the picture.'), + (' ' + 'Could you describe the contents of this image for me?')]), + ('zh', [(' ' + '详细描述这张图片。'), (' ' + '浏览这张图片并描述你注意到什么。'), + (' ' + '请对这张图片进行详细的描述。'), + (' ' + '你能为我描述这张图片的内容吗?')]) + ]), max_txt_len=160, end_sym='###') diff --git a/mmpretrain/datasets/__init__.py b/mmpretrain/datasets/__init__.py index 29753d70..e621e157 100644 --- a/mmpretrain/datasets/__init__.py +++ b/mmpretrain/datasets/__init__.py @@ -43,6 +43,7 @@ if WITH_MULTIMODAL: from .gqa_dataset import GQA from .iconqa import IconQA from .infographic_vqa import InfographicVQA + from .minigpt4_dataset import MiniGPT4Dataset from .nocaps import NoCaps from .ocr_vqa import OCRVQA from .refcoco import RefCOCO @@ -56,5 +57,6 @@ if WITH_MULTIMODAL: 'COCOCaption', 'COCORetrieval', 'COCOVQA', 'FlamingoEvalCOCOCaption', 'FlamingoEvalCOCOVQA', 'Flickr30kCaption', 'Flickr30kRetrieval', 'RefCOCO', 'VisualGenomeQA', 'ScienceQA', 'NoCaps', 'GQA', 'TextVQA', - 'VSR', 'VizWiz', 'OCRVQA', 'InfographicVQA', 'IconQA' + 'VSR', 'VizWiz', 'OCRVQA', 'InfographicVQA', 'IconQA', + 'MiniGPT4Dataset' ]) diff --git a/mmpretrain/datasets/minigpt4_dataset.py b/mmpretrain/datasets/minigpt4_dataset.py new file mode 100644 index 00000000..e14e5c35 --- /dev/null +++ b/mmpretrain/datasets/minigpt4_dataset.py @@ -0,0 +1,79 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import mmengine +from mmengine.dataset import BaseDataset +from mmengine.fileio import get_file_backend + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class MiniGPT4Dataset(BaseDataset): + """Dataset for training MiniGPT4. + + MiniGPT4 dataset directory: + + minigpt4_dataset + ├── image + │ ├── id0.jpg + │ │── id1.jpg + │ │── id2.jpg + │ └── ... + └── conversation_data.json + + The structure of conversation_data.json: + + [ + // English data + { + "id": str(id0), + "conversation": "###Ask: [Ask content] + ###Answer: [Answer content]" + }, + + // Chinese data + { + "id": str(id1), + "conversation": "###问: [Ask content] + ###答:[Answer content]" + }, + + ... + ] + + Args: + data_root (str): The root directory for ``ann_file`` and ``image``. + ann_file (str): Conversation file path. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def load_data_list(self) -> List[dict]: + file_backend = get_file_backend(self.data_root) + conversation_path = file_backend.join_path(self.data_root, + self.ann_file) + conversation = mmengine.load(conversation_path) + img_ids = {} + n = 0 + for conv in conversation: + img_id = conv['id'] + if img_id not in img_ids.keys(): + img_ids[img_id] = n + n += 1 + + img_root = file_backend.join_path(self.data_root, 'image') + data_list = [] + for conv in conversation: + img_file = '{}.jpg'.format(conv['id']) + chat_content = conv['conversation'] + lang = 'en' if chat_content.startswith('###Ask: ') else 'zh' + data_info = { + 'image_id': img_ids[conv['id']], + 'img_path': file_backend.join_path(img_root, img_file), + 'chat_content': chat_content, + 'lang': lang, + } + + data_list.append(data_info) + + return data_list diff --git a/mmpretrain/models/multimodal/minigpt4/minigpt4.py b/mmpretrain/models/multimodal/minigpt4/minigpt4.py index eccbb27e..d25d0b6b 100644 --- a/mmpretrain/models/multimodal/minigpt4/minigpt4.py +++ b/mmpretrain/models/multimodal/minigpt4/minigpt4.py @@ -31,12 +31,12 @@ class MiniGPT4(BaseModel): True. num_query_token (int): Number of query tokens of Qformer. Defaults to 32. - prompt_template (str): Prompt template of the model. Defaults to - '###Human: {} ###Assistant: '. - raw_prompts (list): Prompts for training. Defaults to None. + prompt_template (dict): Multi-language prompt template of the model. Defaults to dict([ ('en', '###Ask: {} ###Answer: '), + ('zh', '###问:{} ###答:')]) + raw_prompts (dict): Prompts for training. Defaults to dict(). max_txt_len (int): Max token length while doing tokenization. Defaults to 32. - end_sym (str): Ended symbol of the sequence. Defaults to '\\n'. + end_sym (str): Ended symbol of the sequence. Defaults to '###'. generation_cfg (dict): The config of text generation. Defaults to dict(). data_preprocessor (:obj:`BaseDataPreprocessor`): Used for @@ -54,10 +54,12 @@ class MiniGPT4(BaseModel): freeze_vit: bool = True, freeze_q_former: bool = True, num_query_token: int = 32, - prompt_template: str = '###Human: {} ###Assistant: ', - raw_prompts: Optional[list] = None, + prompt_template: dict = dict([('en', + '###Ask: {} ###Answer: '), + ('zh', '###问:{} ###答:')]), + raw_prompts: dict = dict(), max_txt_len: int = 32, - end_sym: str = '\n', + end_sym: str = '###', generation_cfg: dict = dict(), data_preprocessor: Optional[dict] = None, init_cfg: Optional[dict] = None): @@ -135,16 +137,23 @@ class MiniGPT4(BaseModel): self.end_token_id = self.llama_tokenizer.encode(end_sym)[-1] # set prompts - if raw_prompts is not None: - filted_prompts = [ - raw_prompt for raw_prompt in raw_prompts + self.en_prompt_list, self.zh_prompt_list = [], [] + if raw_prompts.get('en') is not None: + en_filted_prompts = [ + raw_prompt for raw_prompt in raw_prompts['en'] if '' in raw_prompt ] - self.prompt_list = [ - prompt_template.format(p) for p in filted_prompts + self.en_prompt_list = [ + prompt_template['en'].format(p) for p in en_filted_prompts + ] + if raw_prompts.get('zh') is not None: + zh_filted_prompts = [ + raw_prompt for raw_prompt in raw_prompts['zh'] + if '' in raw_prompt + ] + self.zh_prompt_list = [ + prompt_template['zh'].format(p) for p in zh_filted_prompts ] - else: - self.prompt_list = [] # update generation configs self.generation_cfg = dict( @@ -153,7 +162,7 @@ class MiniGPT4(BaseModel): do_sample=True, min_length=1, top_p=0.9, - repetition_penalty=1.0, + repetition_penalty=1.1, length_penalty=1.0, temperature=1.0) self.generation_cfg.update(**generation_cfg) @@ -161,6 +170,10 @@ class MiniGPT4(BaseModel): if hasattr(self, 'register_load_state_dict_post_hook'): self.register_load_state_dict_post_hook(self._load_llama_proj_hook) + def half(self): + self.llama_model = self.llama_model.half() + return self + def encode_img(self, images: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """The function to encode the images.""" @@ -184,33 +197,39 @@ class MiniGPT4(BaseModel): return inputs_llama, atts_llama def prompt_wrap(self, img_embeds: torch.Tensor, atts_img: torch.Tensor, - prompt: str) -> Tuple[torch.Tensor, torch.Tensor]: + prompt: List[str]) -> Tuple[torch.Tensor, torch.Tensor]: """The function to wrap the image and prompt. - Currently, the function only supports applying one prompt to all input - images in the one batch. + Make sure that len(prompt) == img_embeds.shape[0]. Args: img_embeds (torch.Tensor): The embedding of the input images. atts_img (torch.Tensor): Attention map of the image embeddings. - prompt (str): The prompt of the batch data. + prompt (List[str]): The prompt of the batch data. Returns: Tuple[torch.Tensor, torch.Tensor]: The embedding and attention map. """ - if prompt: - batch_size = img_embeds.shape[0] - p_before, p_after = prompt.split('') + if len(prompt) > 0: + p_before_list, p_after_list = [], [] + for pro in prompt: + p_before, p_after = pro.split('') + p_before_list.append(p_before) + p_after_list.append(p_after) p_before_tokens = self.llama_tokenizer( - p_before, return_tensors='pt', + p_before_list, + return_tensors='pt', + padding='longest', add_special_tokens=False).to(img_embeds.device) p_after_tokens = self.llama_tokenizer( - p_after, return_tensors='pt', + p_after_list, + return_tensors='pt', + padding='longest', add_special_tokens=False).to(img_embeds.device) p_before_embeds = self.llama_model.model.embed_tokens( - p_before_tokens.input_ids).expand(batch_size, -1, -1) + p_before_tokens.input_ids) p_after_embeds = self.llama_model.model.embed_tokens( - p_after_tokens.input_ids).expand(batch_size, -1, -1) + p_after_tokens.input_ids) wrapped_img_embeds = torch.cat( [p_before_embeds, img_embeds, p_after_embeds], dim=1) wrapped_atts_img = atts_img[:, :1].expand( @@ -234,17 +253,22 @@ class MiniGPT4(BaseModel): """ img_embeds, atts_img = self.encode_img(images) - if self.task == 'caption' and self.prompt_list: - prompt = random.choice(self.prompt_list) - img_embeds, atts_img = self.prompt_wrap(img_embeds, atts_img, - prompt) - self.llama_tokenizer.padding_side = 'right' - text = [t + self.end_sym for t in data_samples['text_input']] + prompts, texts = [], [] + for t in data_samples: + chat_content = t.chat_content + split_mark = '###Answer: ' if t.lang == 'en' else '###答:' + prompt, text = chat_content.split(split_mark) + prompt += split_mark + text += self.end_sym + prompts.append(prompt) + texts.append(text) + + img_embeds, atts_img = self.prompt_wrap(img_embeds, atts_img, prompts) to_regress_tokens = self.llama_tokenizer( - text, + texts, return_tensors='pt', padding='longest', truncation=True, @@ -295,10 +319,12 @@ class MiniGPT4(BaseModel): with torch.no_grad(): img_embeds, atts_img = self.encode_img(images) - if self.task == 'caption' and self.prompt_list: - prompt = random.choice(self.prompt_list) - img_embeds, atts_img = self.prompt_wrap(img_embeds, atts_img, - prompt) + prompts = [ + random.choice(self.zh_prompt_list) if hasattr(t, 'lang') + and t.lang == 'zh' else random.choice(self.en_prompt_list) + for t in data_samples + ] + img_embeds, atts_img = self.prompt_wrap(img_embeds, atts_img, prompts) batch_size = img_embeds.shape[0] bos = torch.ones( @@ -336,7 +362,6 @@ class MiniGPT4(BaseModel): for output, data_sample in zip(outputs, data_samples): if self.task == 'caption': output = output.split('###')[0] - output = output.split('Assistant:')[-1].strip() data_sample.pred_caption = output else: # raw output diff --git a/projects/gradio_demo/conversation.py b/projects/gradio_demo/conversation.py new file mode 100644 index 00000000..3c594690 --- /dev/null +++ b/projects/gradio_demo/conversation.py @@ -0,0 +1,137 @@ +# Modified from +# https://github.com/Vision-CAIR/MiniGPT-4/blob/main/minigpt4/conversation/conversation.py +import dataclasses +from typing import List + +import torch + + +@dataclasses.dataclass +class Conversation: + system: str + roles: List[str] + messages: List[List[str]] + sep: str = '###' + + def get_prompt(self): + ret = self.system + self.sep + for role, message in self.messages: + if message: + ret += role + ': ' + message + self.sep + else: + ret += role + ':' + return ret + + def append_message(self, role, message): + self.messages.append([role, message]) + + def copy(self): + return Conversation( + system=self.system, + roles=[role for role in self.roles], + messages=[[y for y in x] for x in self.messages], + sep=self.sep, + ) + + def dict(self): + return { + 'system': self.system, + 'roles': self.roles, + 'messages': self.messages, + 'offset': self.offset, + 'sep': self.sep, + } + + +EN_CONV_VISION = Conversation( + system='Give the following image. ' + 'You will be able to see the image once I provide it to you. ' + 'Please answer my questions in detail.', + roles=['Ask', 'Answer'], + messages=[], + sep='###', +) + +ZH_CONV_VISION = Conversation( + system='给定一张图片,请仔细观察这张图片,并回答我的问题。', + roles=['问', '答'], + messages=[], + sep='###', +) + + +class Chat: + + def __init__(self, inferencer, device, is_half=False): + self.device = device + self.inferencer = inferencer + self.model = inferencer.model + self.is_half = is_half + if is_half: + self.model = self.model.half() + self.model = self.model.to(device) + self.max_length = 2000 + + def upload_img(self, image, conv, img_list): + img = next(self.inferencer.preprocess([image])) + img = self.model.data_preprocessor(img, False)['images'] + img = img.to(self.device) + image_emb, _ = self.model.encode_img(img) + img_list.append(image_emb) + conv.append_message(conv.roles[0], '') + + def get_context_emb(self, conv, img_list): + prompt = conv.get_prompt() + prompt_segs = prompt.split('') + seg_tokens = [ + self.model.llama_tokenizer( + seg, return_tensors='pt', + add_special_tokens=(i == 0)).to(self.device).input_ids + for i, seg in enumerate(prompt_segs) + ] + seg_embs = [ + self.model.llama_model.model.embed_tokens(seg_token) + for seg_token in seg_tokens + ] + mixed_embs = [ + emb for pair in zip(seg_embs[:-1], img_list) for emb in pair + ] + [seg_embs[-1]] + mixed_embs = torch.cat(mixed_embs, dim=1) + return mixed_embs + + def ask(self, text, conv): + if len(conv.messages) > 0 and conv.messages[-1][0] == conv.roles[ + 0] and conv.messages[-1][1][-6:] == '': + conv.messages[-1][1] = ' '.join([conv.messages[-1][1], text]) + else: + conv.append_message(conv.roles[0], text) + + def answer(self, conv, img_list, generation_cfg): + conv.append_message(conv.roles[1], None) + embs = self.get_context_emb(conv, img_list) + cur_max_len = generation_cfg['max_new_tokens'] + embs.shape[1] + if cur_max_len > self.max_length: + print('Warning: The number of tokens in current conversation' + 'exceeds the max length. ' + 'The model will not see the contexts outside the range.') + begin_idx = max(0, cur_max_len - self.max_length) + embs = embs[:, begin_idx:] + if self.is_half: + embs = embs.half() + outputs = self.model.llama_model.generate( + inputs_embeds=embs, + eos_token_id=self.model.end_token_id, + **generation_cfg) + + output_token = outputs[0] + if output_token[0] == 0: + output_token = output_token[1:] + elif output_token[0] == 1: + output_token = output_token[1:] + output_text = self.model.llama_tokenizer.decode( + output_token, + add_special_tokens=False, + skip_special_tokens=True) + output_text = output_text.split('###')[0] + conv.messages[-1][1] = output_text + return output_text diff --git a/projects/gradio_demo/minigpt4_demo.py b/projects/gradio_demo/minigpt4_demo.py new file mode 100644 index 00000000..e4d61426 --- /dev/null +++ b/projects/gradio_demo/minigpt4_demo.py @@ -0,0 +1,144 @@ +import argparse + +import gradio as gr +import numpy as np +import torch +from conversation import EN_CONV_VISION, ZH_CONV_VISION, Chat + +from mmpretrain import ImageCaptionInferencer + +parser = argparse.ArgumentParser(description='MiniGPT4 demo') +parser.add_argument( + 'cfg', type=str, help='config file for minigpt4 (absolute path)') +parser.add_argument( + 'ckpt', type=str, help='pretrained file for minigpt4 (absolute path)') +args = parser.parse_args() + +if torch.cuda.is_available(): + devices = [ + torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count()) + ] +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + devices = [torch.device('mps')] +else: + devices = [torch.device('cpu')] + + +def get_free_device(): + if hasattr(torch.cuda, 'mem_get_info'): + free = [torch.cuda.mem_get_info(gpu)[0] for gpu in devices] + select = max(zip(free, range(len(free))))[1] + else: + import random + select = random.randint(0, len(devices) - 1) + return devices[select] + + +device = get_free_device() +inferencer = ImageCaptionInferencer(model=args.cfg, pretrained=args.ckpt) +model = inferencer.model +chat = Chat(inferencer, device=device, is_half=(device.type != 'cpu')) + + +def reset(chat_state, img_list): + if chat_state is not None: + chat_state.messages = [] + if img_list is not None: + img_list = [] + return (None, gr.update(value=None, interactive=True), + gr.update( + value=None, + placeholder='Please upload your image first', + interactive=False), + gr.update(value='Upload & Start Chat', + interactive=True), chat_state, img_list, + gr.update(value='Restart', interactive=False), + gr.update(value='English', interactive=True)) + + +def upload_img(gr_img, language, chat_state): + if gr_img is None: + return (None, + gr.update( + placeholder='Please upload your image first', + interactive=False), + gr.update(value='Upload & Start Chat', + interactive=True), chat_state, None, + gr.update(value='Restart', interactive=False), + gr.update(value='English', interactive=True)) + + if (language == 'English'): + chat_state = EN_CONV_VISION.copy() + else: + chat_state = ZH_CONV_VISION.copy() + img_list = [] + gr_img_array = np.asarray(gr_img) + chat.upload_img(gr_img_array, chat_state, img_list) + return (gr.update(interactive=False), + gr.update(placeholder='Type and press Enter', interactive=True), + gr.update(value='Start Chatting', + interactive=False), chat_state, img_list, + gr.update(value='Restart', + interactive=True), gr.update(interactive=False)) + + +def ask(user_message, chatbot, chat_state): + if (len(user_message) == 0): + return gr.update( + value=None, + placeholder='Input should not be empty!', + interactive=True), chatbot, chat_state + chat.ask(user_message, chat_state) + chatbot = chatbot + [[user_message, None]] + return '', chatbot, chat_state + + +def answer(chatbot, chat_state, img_list): + llm_message = chat.answer( + conv=chat_state, + img_list=img_list, + generation_cfg=model.generation_cfg) + chatbot[-1][1] = llm_message + return chatbot, chat_state, img_list + + +if __name__ == '__main__': + title = 'MMPretrain MiniGPT-4 Inference Demo' + with gr.Blocks(analytics_enabled=False, title=title) as demo: + gr.Markdown(f'# {title}') + with gr.Row(): + with gr.Column(): + image = gr.Image(type='pil') + language = gr.Dropdown(['English', 'Chinese'], + label='Language', + info='Select chatbot\'s language', + value='English', + interactive=True) + upload_button = gr.Button( + value='Upload & Start Chat', interactive=True) + clear = gr.Button(value='Restart', interactive=False) + + with gr.Column(): + chat_state = gr.State() + img_list = gr.State() + chatbot = gr.Chatbot( + label='MiniGPT-4', min_width=320, height=600) + text_input = gr.Textbox( + label='User', + placeholder='Please upload your image first', + interactive=False) + + upload_button.click(upload_img, [image, language, chat_state], [ + image, text_input, upload_button, chat_state, img_list, clear, + language + ]) + text_input.submit(ask, [text_input, chatbot, chat_state], + [text_input, chatbot, chat_state]).then( + answer, [chatbot, chat_state, img_list], + [chatbot, chat_state, img_list]) + clear.click(reset, [chat_state, img_list], [ + chatbot, image, text_input, upload_button, chat_state, img_list, + clear, language + ]) + + demo.launch(share=True) From a4c219e05d3ab78c20b9d22dedde7dded6fd206c Mon Sep 17 00:00:00 2001 From: mzr1996 Date: Thu, 12 Oct 2023 17:20:22 +0800 Subject: [PATCH 15/20] Bump version to v1.1.0 --- README.md | 9 +++------ README_zh-CN.md | 9 +++------ docker/serve/Dockerfile | 6 +++--- docs/en/notes/changelog.md | 22 ++++++++++++++++++++++ docs/en/notes/faq.md | 2 +- docs/zh_CN/notes/faq.md | 2 +- mmpretrain/__init__.py | 2 +- mmpretrain/version.py | 2 +- 8 files changed, 35 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index dc5c6cde..78d56fc1 100644 --- a/README.md +++ b/README.md @@ -86,13 +86,10 @@ https://github.com/open-mmlab/mmpretrain/assets/26739999/e4dcd3a2-f895-4d1b-a351 ## What's new -🌟 v1.0.2 was released in 15/08/2023 +🌟 v1.1.0 was released in 12/10/2023 -Support [MFF](./configs/mff/) self-supervised algorithm and enhance the codebase. More details can be found in the [changelog](https://mmpretrain.readthedocs.io/en/latest/notes/changelog.html). - -🌟 v1.0.1 was released in 28/07/2023 - -Fix some bugs and enhance the codebase. Please refer to [changelog](https://mmpretrain.readthedocs.io/en/latest/notes/changelog.html) for more details. +- Support Mini-GPT4 training and provide a Chinese model (based on Baichuan-7B) +- Support zero-shot classification based on CLIP. 🌟 v1.0.0 was released in 04/07/2023 diff --git a/README_zh-CN.md b/README_zh-CN.md index 801d3183..06daeb1c 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -84,13 +84,10 @@ https://github.com/open-mmlab/mmpretrain/assets/26739999/e4dcd3a2-f895-4d1b-a351 ## 更新日志 -🌟 2023/8/15 发布了 v1.0.2 版本 +🌟 2023/10/12 发布了 v1.1.0 版本 -支持了 [MFF](./configs/mff/) 自监督算法,增强算法库功能。细节请参考 [更新日志](https://mmpretrain.readthedocs.io/zh_CN/latest/notes/changelog.html)。 - -🌟 2023/7/28 发布了 v1.0.1 版本 - -修复部分 bug 和增强算法库功能。细节请参考 [更新日志](https://mmpretrain.readthedocs.io/zh_CN/latest/notes/changelog.html)。 +- 支持 Mini-GPT4 训练并提供一个基于 Baichuan-7B 的中文模型 +- 支持基于 CLIP 的零样本分类。 🌟 2023/7/4 发布了 v1.0.0 版本 diff --git a/docker/serve/Dockerfile b/docker/serve/Dockerfile index bff871b7..86df2926 100644 --- a/docker/serve/Dockerfile +++ b/docker/serve/Dockerfile @@ -1,9 +1,9 @@ -ARG PYTORCH="1.12.1" -ARG CUDA="11.3" +ARG PYTORCH="2.0.1" +ARG CUDA="11.7" ARG CUDNN="8" FROM pytorch/torchserve:latest-gpu -ARG MMPRE="1.0.2" +ARG MMPRE="1.1.0" ENV PYTHONUNBUFFERED TRUE diff --git a/docs/en/notes/changelog.md b/docs/en/notes/changelog.md index f84d691a..7a8ab680 100644 --- a/docs/en/notes/changelog.md +++ b/docs/en/notes/changelog.md @@ -1,5 +1,27 @@ # Changelog (MMPreTrain) +## v1.1.0(12/10/2023) + +### New Features + +- [Feature] Implement of Zero-Shot CLIP Classifier ([#1737](https://github.com/open-mmlab/mmpretrain/pull/1737)) +- [Feature] Add minigpt4 gradio demo and training script. ([#1758](https://github.com/open-mmlab/mmpretrain/pull/1758)) + +### Improvements + +- [Config] New Version of config Adapting MobileNet Algorithm ([#1774](https://github.com/open-mmlab/mmpretrain/pull/1774)) +- [Config] Support DINO self-supervised learning in project ([#1756](https://github.com/open-mmlab/mmpretrain/pull/1756)) +- [Config] New Version of config Adapting Swin Transformer Algorithm ([#1780](https://github.com/open-mmlab/mmpretrain/pull/1780)) +- [Enhance] Add iTPN Supports for Non-three channel image ([#1735](https://github.com/open-mmlab/mmpretrain/pull/1735)) +- [Docs] Update dataset download script from opendatalab to openXlab ([#1765](https://github.com/open-mmlab/mmpretrain/pull/1765)) +- [Docs] Update COCO-Retrieval dataset docs. ([#1806](https://github.com/open-mmlab/mmpretrain/pull/1806)) + +### Bug Fix + +- Update `train.py` to compat with new config. +- Update OFA module to compat with the latest huggingface. +- Fix pipeline bug in ImageRetrievalInferencer. + ## v1.0.2(15/08/2023) ### New Features diff --git a/docs/en/notes/faq.md b/docs/en/notes/faq.md index 9f78a048..dd059114 100644 --- a/docs/en/notes/faq.md +++ b/docs/en/notes/faq.md @@ -16,7 +16,7 @@ and make sure you fill in all required information in the template. | MMPretrain version | MMEngine version | MMCV version | | :----------------: | :---------------: | :--------------: | - | 1.0.2 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.1.0 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | | 1.0.0 | mmengine >= 0.8.0 | mmcv >= 2.0.0 | | 1.0.0rc8 | mmengine >= 0.7.1 | mmcv >= 2.0.0rc4 | | 1.0.0rc7 | mmengine >= 0.5.0 | mmcv >= 2.0.0rc4 | diff --git a/docs/zh_CN/notes/faq.md b/docs/zh_CN/notes/faq.md index efd2ff5e..23ec5f50 100644 --- a/docs/zh_CN/notes/faq.md +++ b/docs/zh_CN/notes/faq.md @@ -13,7 +13,7 @@ | MMPretrain 版本 | MMEngine 版本 | MMCV 版本 | | :-------------: | :---------------: | :--------------: | - | 1.0.2 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.1.0 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | | 1.0.0 | mmengine >= 0.8.0 | mmcv >= 2.0.0 | | 1.0.0rc8 | mmengine >= 0.7.1 | mmcv >= 2.0.0rc4 | | 1.0.0rc7 | mmengine >= 0.5.0 | mmcv >= 2.0.0rc4 | diff --git a/mmpretrain/__init__.py b/mmpretrain/__init__.py index 0b0f573f..69c585bd 100644 --- a/mmpretrain/__init__.py +++ b/mmpretrain/__init__.py @@ -7,7 +7,7 @@ from .apis import * # noqa: F401, F403 from .version import __version__ mmcv_minimum_version = '2.0.0' -mmcv_maximum_version = '2.1.0' +mmcv_maximum_version = '2.2.0' mmcv_version = digit_version(mmcv.__version__) mmengine_minimum_version = '0.8.3' diff --git a/mmpretrain/version.py b/mmpretrain/version.py index 24b33124..32f800cd 100644 --- a/mmpretrain/version.py +++ b/mmpretrain/version.py @@ -1,6 +1,6 @@ # Copyright (c) OpenMMLab. All rights reserved -__version__ = '1.0.2' +__version__ = '1.1.0' def parse_version_info(version_str): From ed5924b6fea1772c7ca16cf9c4e4843694025395 Mon Sep 17 00:00:00 2001 From: Coobiw Date: Wed, 25 Oct 2023 16:23:45 +0800 Subject: [PATCH 16/20] [Feature] Implement of RAM with a gradio interface. (#1802) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [CodeCamp2023-584]Support DINO self-supervised learning in project (#1756) * feat: impelemt DINO * chore: delete debug code * chore: impplement pre-commit * fix: fix imported package * chore: pre-commit check * [CodeCamp2023-340] New Version of config Adapting MobileNet Algorithm (#1774) * add new config adapting MobileNetV2,V3 * add base model config for mobile net v3, modified all training configs of mobile net v3 inherit from the base model config * removed directory _base_/models/mobilenet_v3 * [Feature] Implement of Zero-Shot CLIP Classifier (#1737) * zero-shot CLIP * modify zero-shot clip config * add in1k_sub_prompt(8 prompts) for improvement * add some annotations doc * clip base class & clip_zs sub-class * some modifications of details after review * convert into and use mmpretrain-vit * modify names of some files and directories * ram init commit * [Fix] Fix pipeline bug in image retrieval inferencer * [CodeCamp2023-341] 多模态数据集文档补充-COCO Retrieval * Update OFA to compat with latest huggingface. * Update train.py to compat with new config * Bump version to v1.1.0 * Update __init__.py --------- Co-authored-by: LALBJ <40877073+LALBJ@users.noreply.github.com> Co-authored-by: DE009 <57087096+DE009@users.noreply.github.com> Co-authored-by: mzr1996 Co-authored-by: 飞飞 <102729089+ASHORE1225@users.noreply.github.com> --- README.md | 9 +- README_zh-CN.md | 9 +- ...clip_vit-base-p16_zeroshot-cls_cifar100.py | 68 + .../clip_vit-base-p16_zeroshot-cls_in1k.py | 69 + ...lip_vit-large-p14_zeroshot-cls_cifar100.py | 68 + .../clip_vit-large-p14_zeroshot-cls_in1k.py | 69 + docker/serve/Dockerfile | 6 +- docs/en/notes/changelog.md | 22 + docs/en/notes/faq.md | 2 +- docs/zh_CN/notes/faq.md | 2 +- mmpretrain/__init__.py | 2 +- mmpretrain/apis/image_retrieval.py | 1 + .../configs/_base_/datasets/cifar10_bs16.py | 52 + .../_base_/datasets/imagenet_bs128_mbv3.py | 75 ++ .../datasets/imagenet_bs32_pil_resize.py | 60 + .../configs/_base_/models/mobilenet_v2_1x.py | 17 + .../_base_/models/mobilenet_v3_small.py | 25 + .../configs/_base_/schedules/cifar10_bs128.py | 20 + .../schedules/imagenet_bs256_epochstep.py | 20 + .../mobilenet_v2/mobilenet_v2_8xb32_in1k.py | 9 + .../mobilenet_v3_large_8xb128_in1k.py | 40 + .../mobilenet_v3_small_050_8xb128_in1k.py | 85 ++ .../mobilenet_v3_small_075_8xb128_in1k.py | 83 ++ .../mobilenet_v3_small_8xb128_in1k.py | 34 + .../mobilenet_v3_small_8xb16_cifar10.py | 34 + mmpretrain/datasets/categories.py | 221 +++ mmpretrain/datasets/coco_retrieval.py | 75 +- mmpretrain/models/multimodal/__init__.py | 5 +- mmpretrain/models/multimodal/clip/__init__.py | 5 + mmpretrain/models/multimodal/clip/clip.py | 364 +++++ .../multimodal/clip/clip_transformer.py | 99 ++ mmpretrain/models/multimodal/clip/utils.py | 115 ++ .../models/multimodal/ofa/ofa_modules.py | 1 + mmpretrain/models/multimodal/ram/__init__.py | 4 + mmpretrain/models/multimodal/ram/bert.py | 1197 +++++++++++++++++ .../models/multimodal/ram/config/__init__.py | 1 + .../ram/config/ram_swin_large_14m.py | 93 ++ .../multimodal/ram/data/ram_tag_list.pickle | Bin 0 -> 51099 bytes .../ram/data/ram_tag_list_chinese.pickle | Bin 0 -> 50796 bytes .../ram/data/ram_tag_list_threshold.pickle | Bin 0 -> 41289 bytes .../models/multimodal/ram/gradio_demo.py | 109 ++ .../models/multimodal/ram/openset_utils.py | 212 +++ mmpretrain/models/multimodal/ram/ram.py | 332 +++++ .../models/multimodal/ram/run/__init__.py | 1 + .../models/multimodal/ram/run/inference.py | 29 + mmpretrain/models/multimodal/ram/utils.py | 87 ++ mmpretrain/models/utils/tokenizer.py | 1 + mmpretrain/version.py | 2 +- projects/dino/README.md | 26 + ..._vit-base-p16_8xb64-amp-coslr-100e_in1k.py | 104 ++ projects/dino/dataset/__init__.py | 1 + projects/dino/dataset/transform/__init__.py | 3 + projects/dino/dataset/transform/processing.py | 91 ++ projects/dino/engine/__init__.py | 1 + projects/dino/engine/hooks/__init__.py | 3 + .../hooks/dino_teacher_temp_warmup_hook.py | 33 + projects/dino/models/__init__.py | 3 + projects/dino/models/algorithm/__init__.py | 3 + projects/dino/models/algorithm/dino.py | 82 ++ projects/dino/models/head/__init__.py | 3 + projects/dino/models/head/dino_head.py | 69 + projects/dino/models/neck/__init__.py | 3 + projects/dino/models/neck/dino_neck.py | 41 + projects/dino/tools/dist_train.sh | 19 + projects/dino/tools/slurm_train.sh | 23 + projects/dino/tools/train.py | 104 ++ .../openai-clip_to_mmpretrain-clip.py | 77 ++ tools/model_converters/ram2mmpretrain.py | 117 ++ tools/train.py | 4 - 69 files changed, 4618 insertions(+), 26 deletions(-) create mode 100644 configs/clip/clip_vit-base-p16_zeroshot-cls_cifar100.py create mode 100644 configs/clip/clip_vit-base-p16_zeroshot-cls_in1k.py create mode 100644 configs/clip/clip_vit-large-p14_zeroshot-cls_cifar100.py create mode 100644 configs/clip/clip_vit-large-p14_zeroshot-cls_in1k.py create mode 100644 mmpretrain/configs/_base_/datasets/cifar10_bs16.py create mode 100644 mmpretrain/configs/_base_/datasets/imagenet_bs128_mbv3.py create mode 100644 mmpretrain/configs/_base_/datasets/imagenet_bs32_pil_resize.py create mode 100644 mmpretrain/configs/_base_/models/mobilenet_v2_1x.py create mode 100644 mmpretrain/configs/_base_/models/mobilenet_v3_small.py create mode 100644 mmpretrain/configs/_base_/schedules/cifar10_bs128.py create mode 100644 mmpretrain/configs/_base_/schedules/imagenet_bs256_epochstep.py create mode 100644 mmpretrain/configs/mobilenet_v2/mobilenet_v2_8xb32_in1k.py create mode 100644 mmpretrain/configs/mobilenet_v3/mobilenet_v3_large_8xb128_in1k.py create mode 100644 mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_050_8xb128_in1k.py create mode 100644 mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_075_8xb128_in1k.py create mode 100644 mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb128_in1k.py create mode 100644 mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb16_cifar10.py create mode 100644 mmpretrain/models/multimodal/clip/__init__.py create mode 100644 mmpretrain/models/multimodal/clip/clip.py create mode 100644 mmpretrain/models/multimodal/clip/clip_transformer.py create mode 100644 mmpretrain/models/multimodal/clip/utils.py create mode 100644 mmpretrain/models/multimodal/ram/__init__.py create mode 100644 mmpretrain/models/multimodal/ram/bert.py create mode 100644 mmpretrain/models/multimodal/ram/config/__init__.py create mode 100644 mmpretrain/models/multimodal/ram/config/ram_swin_large_14m.py create mode 100644 mmpretrain/models/multimodal/ram/data/ram_tag_list.pickle create mode 100644 mmpretrain/models/multimodal/ram/data/ram_tag_list_chinese.pickle create mode 100644 mmpretrain/models/multimodal/ram/data/ram_tag_list_threshold.pickle create mode 100644 mmpretrain/models/multimodal/ram/gradio_demo.py create mode 100644 mmpretrain/models/multimodal/ram/openset_utils.py create mode 100644 mmpretrain/models/multimodal/ram/ram.py create mode 100644 mmpretrain/models/multimodal/ram/run/__init__.py create mode 100644 mmpretrain/models/multimodal/ram/run/inference.py create mode 100644 mmpretrain/models/multimodal/ram/utils.py create mode 100644 projects/dino/README.md create mode 100644 projects/dino/config/dino_vit-base-p16_8xb64-amp-coslr-100e_in1k.py create mode 100644 projects/dino/dataset/__init__.py create mode 100644 projects/dino/dataset/transform/__init__.py create mode 100644 projects/dino/dataset/transform/processing.py create mode 100644 projects/dino/engine/__init__.py create mode 100644 projects/dino/engine/hooks/__init__.py create mode 100644 projects/dino/engine/hooks/dino_teacher_temp_warmup_hook.py create mode 100644 projects/dino/models/__init__.py create mode 100644 projects/dino/models/algorithm/__init__.py create mode 100644 projects/dino/models/algorithm/dino.py create mode 100644 projects/dino/models/head/__init__.py create mode 100644 projects/dino/models/head/dino_head.py create mode 100644 projects/dino/models/neck/__init__.py create mode 100644 projects/dino/models/neck/dino_neck.py create mode 100644 projects/dino/tools/dist_train.sh create mode 100644 projects/dino/tools/slurm_train.sh create mode 100644 projects/dino/tools/train.py create mode 100644 tools/model_converters/openai-clip_to_mmpretrain-clip.py create mode 100644 tools/model_converters/ram2mmpretrain.py diff --git a/README.md b/README.md index dc5c6cde..78d56fc1 100644 --- a/README.md +++ b/README.md @@ -86,13 +86,10 @@ https://github.com/open-mmlab/mmpretrain/assets/26739999/e4dcd3a2-f895-4d1b-a351 ## What's new -🌟 v1.0.2 was released in 15/08/2023 +🌟 v1.1.0 was released in 12/10/2023 -Support [MFF](./configs/mff/) self-supervised algorithm and enhance the codebase. More details can be found in the [changelog](https://mmpretrain.readthedocs.io/en/latest/notes/changelog.html). - -🌟 v1.0.1 was released in 28/07/2023 - -Fix some bugs and enhance the codebase. Please refer to [changelog](https://mmpretrain.readthedocs.io/en/latest/notes/changelog.html) for more details. +- Support Mini-GPT4 training and provide a Chinese model (based on Baichuan-7B) +- Support zero-shot classification based on CLIP. 🌟 v1.0.0 was released in 04/07/2023 diff --git a/README_zh-CN.md b/README_zh-CN.md index 801d3183..06daeb1c 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -84,13 +84,10 @@ https://github.com/open-mmlab/mmpretrain/assets/26739999/e4dcd3a2-f895-4d1b-a351 ## 更新日志 -🌟 2023/8/15 发布了 v1.0.2 版本 +🌟 2023/10/12 发布了 v1.1.0 版本 -支持了 [MFF](./configs/mff/) 自监督算法,增强算法库功能。细节请参考 [更新日志](https://mmpretrain.readthedocs.io/zh_CN/latest/notes/changelog.html)。 - -🌟 2023/7/28 发布了 v1.0.1 版本 - -修复部分 bug 和增强算法库功能。细节请参考 [更新日志](https://mmpretrain.readthedocs.io/zh_CN/latest/notes/changelog.html)。 +- 支持 Mini-GPT4 训练并提供一个基于 Baichuan-7B 的中文模型 +- 支持基于 CLIP 的零样本分类。 🌟 2023/7/4 发布了 v1.0.0 版本 diff --git a/configs/clip/clip_vit-base-p16_zeroshot-cls_cifar100.py b/configs/clip/clip_vit-base-p16_zeroshot-cls_cifar100.py new file mode 100644 index 00000000..dd684a50 --- /dev/null +++ b/configs/clip/clip_vit-base-p16_zeroshot-cls_cifar100.py @@ -0,0 +1,68 @@ +_base_ = '../_base_/default_runtime.py' + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=False, +) + +test_pipeline = [ + dict(type='Resize', scale=(224, 224), interpolation='bicubic'), + dict( + type='PackInputs', + algorithm_keys=['text'], + meta_keys=['image_id', 'scale_factor'], + ), +] + +train_dataloader = None +test_dataloader = dict( + batch_size=32, + num_workers=8, + dataset=dict( + type='CIFAR100', + data_root='data/cifar100', + split='test', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# schedule settings +train_cfg = None +val_cfg = None +test_cfg = dict() + +# model settings +model = dict( + type='CLIPZeroShot', + vision_backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + drop_rate=0., + layer_cfgs=dict(act_cfg=dict(type='QuickGELU')), + pre_norm=True, + ), + projection=dict(type='CLIPProjection', in_channels=768, out_channels=512), + text_backbone=dict( + type='CLIPTransformer', + width=512, + layers=12, + heads=8, + attn_mask=True, + ), + tokenizer=dict( + type='AutoTokenizer', + name_or_path='openai/clip-vit-base-patch16', + use_fast=False), + vocab_size=49408, + transformer_width=512, + proj_dim=512, + text_prototype='cifar100', + text_prompt='openai_cifar100', + context_length=77, +) diff --git a/configs/clip/clip_vit-base-p16_zeroshot-cls_in1k.py b/configs/clip/clip_vit-base-p16_zeroshot-cls_in1k.py new file mode 100644 index 00000000..80c4fde8 --- /dev/null +++ b/configs/clip/clip_vit-base-p16_zeroshot-cls_in1k.py @@ -0,0 +1,69 @@ +_base_ = '../_base_/default_runtime.py' + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(224, 224), interpolation='bicubic'), + dict( + type='PackInputs', + algorithm_keys=['text'], + meta_keys=['image_id', 'scale_factor'], + ), +] + +train_dataloader = None +test_dataloader = dict( + batch_size=32, + num_workers=8, + dataset=dict( + type='ImageNet', + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# schedule settings +train_cfg = None +val_cfg = None +test_cfg = dict() + +# model settings +model = dict( + type='CLIPZeroShot', + vision_backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + drop_rate=0., + layer_cfgs=dict(act_cfg=dict(type='QuickGELU')), + pre_norm=True, + ), + projection=dict(type='CLIPProjection', in_channels=768, out_channels=512), + text_backbone=dict( + type='CLIPTransformer', + width=512, + layers=12, + heads=8, + attn_mask=True, + ), + tokenizer=dict( + type='AutoTokenizer', + name_or_path='openai/clip-vit-base-patch16', + use_fast=False), + vocab_size=49408, + transformer_width=512, + proj_dim=512, + text_prototype='imagenet', + text_prompt='openai_imagenet_sub', # openai_imagenet, openai_imagenet_sub + context_length=77, +) diff --git a/configs/clip/clip_vit-large-p14_zeroshot-cls_cifar100.py b/configs/clip/clip_vit-large-p14_zeroshot-cls_cifar100.py new file mode 100644 index 00000000..a6dd7c11 --- /dev/null +++ b/configs/clip/clip_vit-large-p14_zeroshot-cls_cifar100.py @@ -0,0 +1,68 @@ +_base_ = '../_base_/default_runtime.py' + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=False, +) + +test_pipeline = [ + dict(type='Resize', scale=(224, 224), interpolation='bicubic'), + dict( + type='PackInputs', + algorithm_keys=['text'], + meta_keys=['image_id', 'scale_factor'], + ), +] + +train_dataloader = None +test_dataloader = dict( + batch_size=32, + num_workers=8, + dataset=dict( + type='CIFAR100', + data_root='data/cifar100', + split='test', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# schedule settings +train_cfg = None +val_cfg = None +test_cfg = dict() + +# model settings +model = dict( + type='CLIPZeroShot', + vision_backbone=dict( + type='VisionTransformer', + arch='large', + img_size=224, + patch_size=14, + drop_rate=0., + layer_cfgs=dict(act_cfg=dict(type='QuickGELU')), + pre_norm=True, + ), + projection=dict(type='CLIPProjection', in_channels=1024, out_channels=768), + text_backbone=dict( + type='CLIPTransformer', + width=768, + layers=12, + heads=12, + attn_mask=True, + ), + tokenizer=dict( + type='AutoTokenizer', + name_or_path='openai/clip-vit-large-patch14', + use_fast=False), + vocab_size=49408, + transformer_width=768, + proj_dim=768, + text_prototype='cifar100', + text_prompt='openai_cifar100', + context_length=77, +) diff --git a/configs/clip/clip_vit-large-p14_zeroshot-cls_in1k.py b/configs/clip/clip_vit-large-p14_zeroshot-cls_in1k.py new file mode 100644 index 00000000..10500017 --- /dev/null +++ b/configs/clip/clip_vit-large-p14_zeroshot-cls_in1k.py @@ -0,0 +1,69 @@ +_base_ = '../_base_/default_runtime.py' + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(224, 224), interpolation='bicubic'), + dict( + type='PackInputs', + algorithm_keys=['text'], + meta_keys=['image_id', 'scale_factor'], + ), +] + +train_dataloader = None +test_dataloader = dict( + batch_size=32, + num_workers=8, + dataset=dict( + type='ImageNet', + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# schedule settings +train_cfg = None +val_cfg = None +test_cfg = dict() + +# model settings +model = dict( + type='CLIPZeroShot', + vision_backbone=dict( + type='VisionTransformer', + arch='large', + img_size=224, + patch_size=14, + drop_rate=0., + layer_cfgs=dict(act_cfg=dict(type='QuickGELU')), + pre_norm=True, + ), + projection=dict(type='CLIPProjection', in_channels=1024, out_channels=768), + text_backbone=dict( + type='CLIPTransformer', + width=768, + layers=12, + heads=12, + attn_mask=True, + ), + tokenizer=dict( + type='AutoTokenizer', + name_or_path='openai/clip-vit-large-patch14', + use_fast=False), + vocab_size=49408, + transformer_width=768, + proj_dim=768, + text_prototype='imagenet', + text_prompt='openai_imagenet_sub', # openai_imagenet, openai_imagenet_sub + context_length=77, +) diff --git a/docker/serve/Dockerfile b/docker/serve/Dockerfile index bff871b7..86df2926 100644 --- a/docker/serve/Dockerfile +++ b/docker/serve/Dockerfile @@ -1,9 +1,9 @@ -ARG PYTORCH="1.12.1" -ARG CUDA="11.3" +ARG PYTORCH="2.0.1" +ARG CUDA="11.7" ARG CUDNN="8" FROM pytorch/torchserve:latest-gpu -ARG MMPRE="1.0.2" +ARG MMPRE="1.1.0" ENV PYTHONUNBUFFERED TRUE diff --git a/docs/en/notes/changelog.md b/docs/en/notes/changelog.md index f84d691a..7a8ab680 100644 --- a/docs/en/notes/changelog.md +++ b/docs/en/notes/changelog.md @@ -1,5 +1,27 @@ # Changelog (MMPreTrain) +## v1.1.0(12/10/2023) + +### New Features + +- [Feature] Implement of Zero-Shot CLIP Classifier ([#1737](https://github.com/open-mmlab/mmpretrain/pull/1737)) +- [Feature] Add minigpt4 gradio demo and training script. ([#1758](https://github.com/open-mmlab/mmpretrain/pull/1758)) + +### Improvements + +- [Config] New Version of config Adapting MobileNet Algorithm ([#1774](https://github.com/open-mmlab/mmpretrain/pull/1774)) +- [Config] Support DINO self-supervised learning in project ([#1756](https://github.com/open-mmlab/mmpretrain/pull/1756)) +- [Config] New Version of config Adapting Swin Transformer Algorithm ([#1780](https://github.com/open-mmlab/mmpretrain/pull/1780)) +- [Enhance] Add iTPN Supports for Non-three channel image ([#1735](https://github.com/open-mmlab/mmpretrain/pull/1735)) +- [Docs] Update dataset download script from opendatalab to openXlab ([#1765](https://github.com/open-mmlab/mmpretrain/pull/1765)) +- [Docs] Update COCO-Retrieval dataset docs. ([#1806](https://github.com/open-mmlab/mmpretrain/pull/1806)) + +### Bug Fix + +- Update `train.py` to compat with new config. +- Update OFA module to compat with the latest huggingface. +- Fix pipeline bug in ImageRetrievalInferencer. + ## v1.0.2(15/08/2023) ### New Features diff --git a/docs/en/notes/faq.md b/docs/en/notes/faq.md index 9f78a048..dd059114 100644 --- a/docs/en/notes/faq.md +++ b/docs/en/notes/faq.md @@ -16,7 +16,7 @@ and make sure you fill in all required information in the template. | MMPretrain version | MMEngine version | MMCV version | | :----------------: | :---------------: | :--------------: | - | 1.0.2 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.1.0 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | | 1.0.0 | mmengine >= 0.8.0 | mmcv >= 2.0.0 | | 1.0.0rc8 | mmengine >= 0.7.1 | mmcv >= 2.0.0rc4 | | 1.0.0rc7 | mmengine >= 0.5.0 | mmcv >= 2.0.0rc4 | diff --git a/docs/zh_CN/notes/faq.md b/docs/zh_CN/notes/faq.md index efd2ff5e..23ec5f50 100644 --- a/docs/zh_CN/notes/faq.md +++ b/docs/zh_CN/notes/faq.md @@ -13,7 +13,7 @@ | MMPretrain 版本 | MMEngine 版本 | MMCV 版本 | | :-------------: | :---------------: | :--------------: | - | 1.0.2 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.1.0 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | | 1.0.0 | mmengine >= 0.8.0 | mmcv >= 2.0.0 | | 1.0.0rc8 | mmengine >= 0.7.1 | mmcv >= 2.0.0rc4 | | 1.0.0rc7 | mmengine >= 0.5.0 | mmcv >= 2.0.0rc4 | diff --git a/mmpretrain/__init__.py b/mmpretrain/__init__.py index 0b0f573f..69c585bd 100644 --- a/mmpretrain/__init__.py +++ b/mmpretrain/__init__.py @@ -7,7 +7,7 @@ from .apis import * # noqa: F401, F403 from .version import __version__ mmcv_minimum_version = '2.0.0' -mmcv_maximum_version = '2.1.0' +mmcv_maximum_version = '2.2.0' mmcv_version = digit_version(mmcv.__version__) mmengine_minimum_version = '0.8.3' diff --git a/mmpretrain/apis/image_retrieval.py b/mmpretrain/apis/image_retrieval.py index deae1de7..27919b20 100644 --- a/mmpretrain/apis/image_retrieval.py +++ b/mmpretrain/apis/image_retrieval.py @@ -108,6 +108,7 @@ class ImageRetrievalInferencer(BaseInferencer): # A config of dataset from mmpretrain.registry import DATASETS test_pipeline = [dict(type='LoadImageFromFile'), self.pipeline] + prototype.setdefault('pipeline', test_pipeline) dataset = DATASETS.build(prototype) dataloader = build_dataloader(dataset) elif isinstance(prototype, DataLoader): diff --git a/mmpretrain/configs/_base_/datasets/cifar10_bs16.py b/mmpretrain/configs/_base_/datasets/cifar10_bs16.py new file mode 100644 index 00000000..3737dbee --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/cifar10_bs16.py @@ -0,0 +1,52 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler + +from mmpretrain.datasets import CIFAR10, PackInputs, RandomCrop, RandomFlip +from mmpretrain.evaluation import Accuracy + +# dataset settings +dataset_type = CIFAR10 +data_preprocessor = dict( + num_classes=10, + # RGB format normalization parameters + mean=[125.307, 122.961, 113.8575], + std=[51.5865, 50.847, 51.255], + # loaded images are already RGB format + to_rgb=False) + +train_pipeline = [ + dict(type=RandomCrop, crop_size=32, padding=4), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=PackInputs), +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + dataset=dict( + type=dataset_type, + data_root='data/cifar10', + split='train', + pipeline=train_pipeline), + sampler=dict(type=DefaultSampler, shuffle=True), +) + +val_dataloader = dict( + batch_size=16, + num_workers=2, + dataset=dict( + type=dataset_type, + data_root='data/cifar10/', + split='test', + pipeline=test_pipeline), + sampler=dict(type=DefaultSampler, shuffle=False), +) +val_evaluator = dict(type=Accuracy, topk=(1, )) + +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpretrain/configs/_base_/datasets/imagenet_bs128_mbv3.py b/mmpretrain/configs/_base_/datasets/imagenet_bs128_mbv3.py new file mode 100644 index 00000000..cf0aa629 --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/imagenet_bs128_mbv3.py @@ -0,0 +1,75 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler + +from mmpretrain.datasets import (AutoAugment, CenterCrop, ImageNet, + LoadImageFromFile, PackInputs, RandomErasing, + RandomFlip, RandomResizedCrop, ResizeEdge) +from mmpretrain.evaluation import Accuracy + +# dataset settings +dataset_type = ImageNet +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=RandomResizedCrop, scale=224, backend='pillow'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict( + type=AutoAugment, + policies='imagenet', + hparams=dict(pad_val=[round(x) for x in bgr_mean])), + dict( + type=RandomErasing, + erase_prob=0.2, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=ResizeEdge, scale=256, edge='short', backend='pillow'), + dict(type=CenterCrop, crop_size=224), + dict(type=PackInputs), +] + +train_dataloader = dict( + batch_size=128, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type=DefaultSampler, shuffle=True), +) + +val_dataloader = dict( + batch_size=128, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type=DefaultSampler, shuffle=False), +) +val_evaluator = dict(type=Accuracy, topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpretrain/configs/_base_/datasets/imagenet_bs32_pil_resize.py b/mmpretrain/configs/_base_/datasets/imagenet_bs32_pil_resize.py new file mode 100644 index 00000000..f911bc20 --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/imagenet_bs32_pil_resize.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler + +from mmpretrain.datasets import (CenterCrop, ImageNet, LoadImageFromFile, + PackInputs, RandomFlip, RandomResizedCrop, + ResizeEdge) +from mmpretrain.evaluation import Accuracy + +# dataset settings +dataset_type = ImageNet +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=RandomResizedCrop, scale=224, backend='pillow'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=ResizeEdge, scale=256, edge='short', backend='pillow'), + dict(type=CenterCrop, crop_size=224), + dict(type=PackInputs), +] + +train_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type=DefaultSampler, shuffle=True), +) + +val_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type=DefaultSampler, shuffle=False), +) +val_evaluator = dict(type=Accuracy, topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpretrain/configs/_base_/models/mobilenet_v2_1x.py b/mmpretrain/configs/_base_/models/mobilenet_v2_1x.py new file mode 100644 index 00000000..17dbb9fd --- /dev/null +++ b/mmpretrain/configs/_base_/models/mobilenet_v2_1x.py @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling, + ImageClassifier, LinearClsHead, MobileNetV2) + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict(type=MobileNetV2, widen_factor=1.0), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=1280, + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + topk=(1, 5), + )) diff --git a/mmpretrain/configs/_base_/models/mobilenet_v3_small.py b/mmpretrain/configs/_base_/models/mobilenet_v3_small.py new file mode 100644 index 00000000..83edab59 --- /dev/null +++ b/mmpretrain/configs/_base_/models/mobilenet_v3_small.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.model.weight_init import NormalInit +from torch.nn.modules.activation import Hardswish + +from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling, + ImageClassifier, MobileNetV3, + StackedLinearClsHead) + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict(type=MobileNetV3, arch='small'), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=StackedLinearClsHead, + num_classes=1000, + in_channels=576, + mid_channels=[1024], + dropout_rate=0.2, + act_cfg=dict(type=Hardswish), + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + init_cfg=dict( + type=NormalInit, layer='Linear', mean=0., std=0.01, bias=0.), + topk=(1, 5))) diff --git a/mmpretrain/configs/_base_/schedules/cifar10_bs128.py b/mmpretrain/configs/_base_/schedules/cifar10_bs128.py new file mode 100644 index 00000000..8ab749e8 --- /dev/null +++ b/mmpretrain/configs/_base_/schedules/cifar10_bs128.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.optim import MultiStepLR +from torch.optim import SGD + +# optimizer +optim_wrapper = dict( + optimizer=dict(type=SGD, lr=0.1, momentum=0.9, weight_decay=0.0001)) +# learning policy +param_scheduler = dict( + type=MultiStepLR, by_epoch=True, milestones=[100, 150], gamma=0.1) + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=200, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=128) diff --git a/mmpretrain/configs/_base_/schedules/imagenet_bs256_epochstep.py b/mmpretrain/configs/_base_/schedules/imagenet_bs256_epochstep.py new file mode 100644 index 00000000..9d245ebb --- /dev/null +++ b/mmpretrain/configs/_base_/schedules/imagenet_bs256_epochstep.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.optim import StepLR +from torch.optim import SGD + +# optimizer +optim_wrapper = dict( + optimizer=dict(type=SGD, lr=0.045, momentum=0.9, weight_decay=0.00004)) + +# learning policy +param_scheduler = dict(type=StepLR, by_epoch=True, step_size=1, gamma=0.98) + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=256) diff --git a/mmpretrain/configs/mobilenet_v2/mobilenet_v2_8xb32_in1k.py b/mmpretrain/configs/mobilenet_v2/mobilenet_v2_8xb32_in1k.py new file mode 100644 index 00000000..79eec635 --- /dev/null +++ b/mmpretrain/configs/mobilenet_v2/mobilenet_v2_8xb32_in1k.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs32_pil_resize import * + from .._base_.default_runtime import * + from .._base_.models.mobilenet_v2_1x import * + from .._base_.schedules.imagenet_bs256_epochstep import * diff --git a/mmpretrain/configs/mobilenet_v3/mobilenet_v3_large_8xb128_in1k.py b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_large_8xb128_in1k.py new file mode 100644 index 00000000..3f1bee1c --- /dev/null +++ b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_large_8xb128_in1k.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. + +# Refers to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mobilenet_v3_small import * + from .._base_.datasets.imagenet_bs128_mbv3 import * + from .._base_.default_runtime import * + +from mmengine.optim import StepLR +from torch.optim import RMSprop + +# model settings +model.merge( + dict( + backbone=dict(arch='large'), + head=dict(in_channels=960, mid_channels=[1280]), + )) +# schedule settings +optim_wrapper = dict( + optimizer=dict( + type=RMSprop, + lr=0.064, + alpha=0.9, + momentum=0.9, + eps=0.0316, + weight_decay=1e-5)) + +param_scheduler = dict(type=StepLR, by_epoch=True, step_size=2, gamma=0.973) + +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_050_8xb128_in1k.py b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_050_8xb128_in1k.py new file mode 100644 index 00000000..50e1ffc6 --- /dev/null +++ b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_050_8xb128_in1k.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +# Refers to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification + +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mobilenet_v3_small import * + from .._base_.datasets.imagenet_bs128_mbv3 import * + from .._base_.default_runtime import * + +from mmengine.optim import StepLR +from torch.nn.modules.batchnorm import BatchNorm2d +from torch.optim import RMSprop + +# model settings +model.merge( + dict( + backbone=dict( + arch='small_050', + norm_cfg=dict(type=BatchNorm2d, eps=1e-5, momentum=0.1)), + head=dict(in_channels=288), + )) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=RandomResizedCrop, + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict( + type=AutoAugment, + policies='imagenet', + hparams=dict(pad_val=[round(x) for x in [103.53, 116.28, 123.675]])), + dict( + type=RandomErasing, + erase_prob=0.2, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=ResizeEdge, + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type=CenterCrop, crop_size=224), + dict(type=PackInputs), +] + +train_dataloader.merge(dict(dataset=dict(pipeline=train_pipeline))) + +val_dataloader.merge(dict(dataset=dict(pipeline=test_pipeline))) +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader + +# schedule settings +optim_wrapper = dict( + optimizer=dict( + type=RMSprop, + lr=0.064, + alpha=0.9, + momentum=0.9, + eps=0.0316, + weight_decay=1e-5)) + +param_scheduler = dict(type=StepLR, by_epoch=True, step_size=2, gamma=0.973) + +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=10) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_075_8xb128_in1k.py b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_075_8xb128_in1k.py new file mode 100644 index 00000000..c8c640cd --- /dev/null +++ b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_075_8xb128_in1k.py @@ -0,0 +1,83 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +# Refers to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification + +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mobilenet_v3_small import * + from .._base_.datasets.imagenet_bs128_mbv3 import * + from .._base_.default_runtime import * + +from mmengine.optim import StepLR +from torch.nn.modules.batchnorm import BatchNorm2d +from torch.optim import RMSprop + +# model settings +model.merge( + dict( + backbone=dict( + arch='small_075', + norm_cfg=dict(type=BatchNorm2d, eps=1e-5, momentum=0.1)), + head=dict(in_channels=432), + )) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=RandomResizedCrop, + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict( + type=AutoAugment, + policies='imagenet', + hparams=dict(pad_val=[round(x) for x in [103.53, 116.28, 123.675]])), + dict( + type=RandomErasing, + erase_prob=0.2, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=ResizeEdge, + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type=CenterCrop, crop_size=224), + dict(type=PackInputs), +] + +train_dataloader.merge(dict(dataset=dict(pipeline=train_pipeline))) +val_dataloader.merge(dict(dataset=dict(pipeline=test_pipeline))) +test_dataloader = val_dataloader + +# schedule settings +optim_wrapper = dict( + optimizer=dict( + type=RMSprop, + lr=0.064, + alpha=0.9, + momentum=0.9, + eps=0.0316, + weight_decay=1e-5)) + +param_scheduler = dict(type=StepLR, by_epoch=True, step_size=2, gamma=0.973) + +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=10) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb128_in1k.py b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb128_in1k.py new file mode 100644 index 00000000..0c220a01 --- /dev/null +++ b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb128_in1k.py @@ -0,0 +1,34 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +# Refers to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification + +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mobilenet_v3_small import * + from .._base_.datasets.imagenet_bs128_mbv3 import * + from .._base_.default_runtime import * + +from mmengine.optim import StepLR +from torch.optim import RMSprop + +# schedule settings +optim_wrapper = dict( + optimizer=dict( + type=RMSprop, + lr=0.064, + alpha=0.9, + momentum=0.9, + eps=0.0316, + weight_decay=1e-5)) + +param_scheduler = dict(type=StepLR, by_epoch=True, step_size=2, gamma=0.973) + +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb16_cifar10.py b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb16_cifar10.py new file mode 100644 index 00000000..0f91ee38 --- /dev/null +++ b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb16_cifar10.py @@ -0,0 +1,34 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mobilenet_v3_small import * + from .._base_.datasets.cifar10_bs16 import * + from .._base_.schedules.cifar10_bs128 import * + from .._base_.default_runtime import * + +from mmengine.optim import MultiStepLR + +# model settings +model.merge( + dict( + head=dict( + _delete_=True, + type=StackedLinearClsHead, + num_classes=10, + in_channels=576, + mid_channels=[1280], + act_cfg=dict(type=Hardswish), + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + topk=(1, 5)))) +# schedule settings +param_scheduler.merge( + dict( + type=MultiStepLR, + by_epoch=True, + milestones=[120, 170], + gamma=0.1, + )) + +train_cfg.merge(dict(by_epoch=True, max_epochs=200)) diff --git a/mmpretrain/datasets/categories.py b/mmpretrain/datasets/categories.py index 011ee5c1..9e75f795 100644 --- a/mmpretrain/datasets/categories.py +++ b/mmpretrain/datasets/categories.py @@ -1438,3 +1438,224 @@ CIFAR100_CATEGORIES_CN = ( '海豹', '鲨鱼', '尖嘴小鼠', '臭鼬', '摩天大楼', '蜗牛', '蛇', '蜘蛛', '松鼠', '电车', '向日葵', '甜椒', '桌子', '坦克', '电话', '电视', '老虎', '拖拉机', '火车', '鳟鱼', '郁金香', '乌龟', '衣柜', '鲸鱼', '柳树', '狼', '女人', '蠕虫') + +IMAGENET_SIMPLE_CATEGORIES = ( + 'tench', 'goldfish', 'great white shark', 'tiger shark', + 'hammerhead shark', 'electric ray', 'stingray', 'rooster', 'hen', + 'ostrich', 'brambling', 'goldfinch', 'house finch', 'junco', + 'indigo bunting', 'American robin', 'bulbul', 'jay', 'magpie', 'chickadee', + 'American dipper', 'kite (bird of prey)', 'bald eagle', 'vulture', + 'great grey owl', 'fire salamander', 'smooth newt', 'newt', + 'spotted salamander', 'axolotl', 'American bullfrog', 'tree frog', + 'tailed frog', 'loggerhead sea turtle', 'leatherback sea turtle', + 'mud turtle', 'terrapin', 'box turtle', 'banded gecko', 'green iguana', + 'Carolina anole', 'desert grassland whiptail lizard', 'agama', + 'frilled-necked lizard', 'alligator lizard', 'Gila monster', + 'European green lizard', 'chameleon', 'Komodo dragon', 'Nile crocodile', + 'American alligator', 'triceratops', 'worm snake', 'ring-necked snake', + 'eastern hog-nosed snake', 'smooth green snake', 'kingsnake', + 'garter snake', 'water snake', 'vine snake', 'night snake', + 'boa constrictor', 'African rock python', 'Indian cobra', 'green mamba', + 'sea snake', 'Saharan horned viper', 'eastern diamondback rattlesnake', + 'sidewinder rattlesnake', 'trilobite', 'harvestman', 'scorpion', + 'yellow garden spider', 'barn spider', 'European garden spider', + 'southern black widow', 'tarantula', 'wolf spider', 'tick', 'centipede', + 'black grouse', 'ptarmigan', 'ruffed grouse', 'prairie grouse', 'peafowl', + 'quail', 'partridge', 'african grey parrot', 'macaw', + 'sulphur-crested cockatoo', 'lorikeet', 'coucal', 'bee eater', 'hornbill', + 'hummingbird', 'jacamar', 'toucan', 'duck', 'red-breasted merganser', + 'goose', 'black swan', 'tusker', 'echidna', 'platypus', 'wallaby', 'koala', + 'wombat', 'jellyfish', 'sea anemone', 'brain coral', 'flatworm', + 'nematode', 'conch', 'snail', 'slug', 'sea slug', 'chiton', + 'chambered nautilus', 'Dungeness crab', 'rock crab', 'fiddler crab', + 'red king crab', 'American lobster', 'spiny lobster', 'crayfish', + 'hermit crab', 'isopod', 'white stork', 'black stork', 'spoonbill', + 'flamingo', 'little blue heron', 'great egret', 'bittern bird', + 'crane bird', 'limpkin', 'common gallinule', 'American coot', 'bustard', + 'ruddy turnstone', 'dunlin', 'common redshank', 'dowitcher', + 'oystercatcher', 'pelican', 'king penguin', 'albatross', 'grey whale', + 'killer whale', 'dugong', 'sea lion', 'Chihuahua', 'Japanese Chin', + 'Maltese', 'Pekingese', 'Shih Tzu', 'King Charles Spaniel', 'Papillon', + 'toy terrier', 'Rhodesian Ridgeback', 'Afghan Hound', 'Basset Hound', + 'Beagle', 'Bloodhound', 'Bluetick Coonhound', 'Black and Tan Coonhound', + 'Treeing Walker Coonhound', 'English foxhound', 'Redbone Coonhound', + 'borzoi', 'Irish Wolfhound', 'Italian Greyhound', 'Whippet', + 'Ibizan Hound', 'Norwegian Elkhound', 'Otterhound', 'Saluki', + 'Scottish Deerhound', 'Weimaraner', 'Staffordshire Bull Terrier', + 'American Staffordshire Terrier', 'Bedlington Terrier', 'Border Terrier', + 'Kerry Blue Terrier', 'Irish Terrier', 'Norfolk Terrier', + 'Norwich Terrier', 'Yorkshire Terrier', 'Wire Fox Terrier', + 'Lakeland Terrier', 'Sealyham Terrier', 'Airedale Terrier', + 'Cairn Terrier', 'Australian Terrier', 'Dandie Dinmont Terrier', + 'Boston Terrier', 'Miniature Schnauzer', 'Giant Schnauzer', + 'Standard Schnauzer', 'Scottish Terrier', 'Tibetan Terrier', + 'Australian Silky Terrier', 'Soft-coated Wheaten Terrier', + 'West Highland White Terrier', 'Lhasa Apso', 'Flat-Coated Retriever', + 'Curly-coated Retriever', 'Golden Retriever', 'Labrador Retriever', + 'Chesapeake Bay Retriever', 'German Shorthaired Pointer', 'Vizsla', + 'English Setter', 'Irish Setter', 'Gordon Setter', 'Brittany dog', + 'Clumber Spaniel', 'English Springer Spaniel', 'Welsh Springer Spaniel', + 'Cocker Spaniel', 'Sussex Spaniel', 'Irish Water Spaniel', 'Kuvasz', + 'Schipperke', 'Groenendael dog', 'Malinois', 'Briard', 'Australian Kelpie', + 'Komondor', 'Old English Sheepdog', 'Shetland Sheepdog', 'collie', + 'Border Collie', 'Bouvier des Flandres dog', 'Rottweiler', + 'German Shepherd Dog', 'Dobermann', 'Miniature Pinscher', + 'Greater Swiss Mountain Dog', 'Bernese Mountain Dog', + 'Appenzeller Sennenhund', 'Entlebucher Sennenhund', 'Boxer', 'Bullmastiff', + 'Tibetan Mastiff', 'French Bulldog', 'Great Dane', 'St. Bernard', 'husky', + 'Alaskan Malamute', 'Siberian Husky', 'Dalmatian', 'Affenpinscher', + 'Basenji', 'pug', 'Leonberger', 'Newfoundland dog', 'Great Pyrenees dog', + 'Samoyed', 'Pomeranian', 'Chow Chow', 'Keeshond', 'brussels griffon', + 'Pembroke Welsh Corgi', 'Cardigan Welsh Corgi', 'Toy Poodle', + 'Miniature Poodle', 'Standard Poodle', + 'Mexican hairless dog (xoloitzcuintli)', 'grey wolf', + 'Alaskan tundra wolf', 'red wolf or maned wolf', 'coyote', 'dingo', + 'dhole', 'African wild dog', 'hyena', 'red fox', 'kit fox', 'Arctic fox', + 'grey fox', 'tabby cat', 'tiger cat', 'Persian cat', 'Siamese cat', + 'Egyptian Mau', 'cougar', 'lynx', 'leopard', 'snow leopard', 'jaguar', + 'lion', 'tiger', 'cheetah', 'brown bear', 'American black bear', + 'polar bear', 'sloth bear', 'mongoose', 'meerkat', 'tiger beetle', + 'ladybug', 'ground beetle', 'longhorn beetle', 'leaf beetle', + 'dung beetle', 'rhinoceros beetle', 'weevil', 'fly', 'bee', 'ant', + 'grasshopper', 'cricket insect', 'stick insect', 'cockroach', + 'praying mantis', 'cicada', 'leafhopper', 'lacewing', 'dragonfly', + 'damselfly', 'red admiral butterfly', 'ringlet butterfly', + 'monarch butterfly', 'small white butterfly', 'sulphur butterfly', + 'gossamer-winged butterfly', 'starfish', 'sea urchin', 'sea cucumber', + 'cottontail rabbit', 'hare', 'Angora rabbit', 'hamster', 'porcupine', + 'fox squirrel', 'marmot', 'beaver', 'guinea pig', 'common sorrel horse', + 'zebra', 'pig', 'wild boar', 'warthog', 'hippopotamus', 'ox', + 'water buffalo', 'bison', 'ram (adult male sheep)', 'bighorn sheep', + 'Alpine ibex', 'hartebeest', 'impala (antelope)', 'gazelle', + 'arabian camel', 'llama', 'weasel', 'mink', 'European polecat', + 'black-footed ferret', 'otter', 'skunk', 'badger', 'armadillo', + 'three-toed sloth', 'orangutan', 'gorilla', 'chimpanzee', 'gibbon', + 'siamang', 'guenon', 'patas monkey', 'baboon', 'macaque', 'langur', + 'black-and-white colobus', 'proboscis monkey', 'marmoset', + 'white-headed capuchin', 'howler monkey', 'titi monkey', + "Geoffroy's spider monkey", 'common squirrel monkey', 'ring-tailed lemur', + 'indri', 'Asian elephant', 'African bush elephant', 'red panda', + 'giant panda', 'snoek fish', 'eel', 'silver salmon', 'rock beauty fish', + 'clownfish', 'sturgeon', 'gar fish', 'lionfish', 'pufferfish', 'abacus', + 'abaya', 'academic gown', 'accordion', 'acoustic guitar', + 'aircraft carrier', 'airliner', 'airship', 'altar', 'ambulance', + 'amphibious vehicle', 'analog clock', 'apiary', 'apron', 'trash can', + 'assault rifle', 'backpack', 'bakery', 'balance beam', 'balloon', + 'ballpoint pen', 'Band-Aid', 'banjo', 'baluster / handrail', 'barbell', + 'barber chair', 'barbershop', 'barn', 'barometer', 'barrel', 'wheelbarrow', + 'baseball', 'basketball', 'bassinet', 'bassoon', 'swimming cap', + 'bath towel', 'bathtub', 'station wagon', 'lighthouse', 'beaker', + 'military hat (bearskin or shako)', 'beer bottle', 'beer glass', + 'bell tower', 'baby bib', 'tandem bicycle', 'bikini', 'ring binder', + 'binoculars', 'birdhouse', 'boathouse', 'bobsleigh', 'bolo tie', + 'poke bonnet', 'bookcase', 'bookstore', 'bottle cap', 'hunting bow', + 'bow tie', 'brass memorial plaque', 'bra', 'breakwater', 'breastplate', + 'broom', 'bucket', 'buckle', 'bulletproof vest', 'high-speed train', + 'butcher shop', 'taxicab', 'cauldron', 'candle', 'cannon', 'canoe', + 'can opener', 'cardigan', 'car mirror', 'carousel', 'tool kit', + 'cardboard box / carton', 'car wheel', 'automated teller machine', + 'cassette', 'cassette player', 'castle', 'catamaran', 'CD player', 'cello', + 'mobile phone', 'chain', 'chain-link fence', 'chain mail', 'chainsaw', + 'storage chest', 'chiffonier', 'bell or wind chime', 'china cabinet', + 'Christmas stocking', 'church', 'movie theater', 'cleaver', + 'cliff dwelling', 'cloak', 'clogs', 'cocktail shaker', 'coffee mug', + 'coffeemaker', 'spiral or coil', 'combination lock', 'computer keyboard', + 'candy store', 'container ship', 'convertible', 'corkscrew', 'cornet', + 'cowboy boot', 'cowboy hat', 'cradle', 'construction crane', + 'crash helmet', 'crate', 'infant bed', 'Crock Pot', 'croquet ball', + 'crutch', 'cuirass', 'dam', 'desk', 'desktop computer', + 'rotary dial telephone', 'diaper', 'digital clock', 'digital watch', + 'dining table', 'dishcloth', 'dishwasher', 'disc brake', 'dock', + 'dog sled', 'dome', 'doormat', 'drilling rig', 'drum', 'drumstick', + 'dumbbell', 'Dutch oven', 'electric fan', 'electric guitar', + 'electric locomotive', 'entertainment center', 'envelope', + 'espresso machine', 'face powder', 'feather boa', 'filing cabinet', + 'fireboat', 'fire truck', 'fire screen', 'flagpole', 'flute', + 'folding chair', 'football helmet', 'forklift', 'fountain', 'fountain pen', + 'four-poster bed', 'freight car', 'French horn', 'frying pan', 'fur coat', + 'garbage truck', 'gas mask or respirator', 'gas pump', 'goblet', 'go-kart', + 'golf ball', 'golf cart', 'gondola', 'gong', 'gown', 'grand piano', + 'greenhouse', 'radiator grille', 'grocery store', 'guillotine', + 'hair clip', 'hair spray', 'half-track', 'hammer', 'hamper', 'hair dryer', + 'hand-held computer', 'handkerchief', 'hard disk drive', 'harmonica', + 'harp', 'combine harvester', 'hatchet', 'holster', 'home theater', + 'honeycomb', 'hook', 'hoop skirt', 'gymnastic horizontal bar', + 'horse-drawn vehicle', 'hourglass', 'iPod', 'clothes iron', + 'carved pumpkin', 'jeans', 'jeep', 'T-shirt', 'jigsaw puzzle', 'rickshaw', + 'joystick', 'kimono', 'knee pad', 'knot', 'lab coat', 'ladle', 'lampshade', + 'laptop computer', 'lawn mower', 'lens cap', 'letter opener', 'library', + 'lifeboat', 'lighter', 'limousine', 'ocean liner', 'lipstick', + 'slip-on shoe', 'lotion', 'music speaker', 'loupe magnifying glass', + 'sawmill', 'magnetic compass', 'messenger bag', 'mailbox', 'tights', + 'one-piece bathing suit', 'manhole cover', 'maraca', 'marimba', 'mask', + 'matchstick', 'maypole', 'maze', 'measuring cup', 'medicine cabinet', + 'megalith', 'microphone', 'microwave oven', 'military uniform', 'milk can', + 'minibus', 'miniskirt', 'minivan', 'missile', 'mitten', 'mixing bowl', + 'mobile home', 'ford model t', 'modem', 'monastery', 'monitor', 'moped', + 'mortar and pestle', 'graduation cap', 'mosque', 'mosquito net', 'vespa', + 'mountain bike', 'tent', 'computer mouse', 'mousetrap', 'moving van', + 'muzzle', 'metal nail', 'neck brace', 'necklace', 'baby pacifier', + 'notebook computer', 'obelisk', 'oboe', 'ocarina', 'odometer', + 'oil filter', 'pipe organ', 'oscilloscope', 'overskirt', 'bullock cart', + 'oxygen mask', 'product packet / packaging', 'paddle', 'paddle wheel', + 'padlock', 'paintbrush', 'pajamas', 'palace', 'pan flute', 'paper towel', + 'parachute', 'parallel bars', 'park bench', 'parking meter', + 'railroad car', 'patio', 'payphone', 'pedestal', 'pencil case', + 'pencil sharpener', 'perfume', 'Petri dish', 'photocopier', 'plectrum', + 'Pickelhaube', 'picket fence', 'pickup truck', 'pier', 'piggy bank', + 'pill bottle', 'pillow', 'ping-pong ball', 'pinwheel', 'pirate ship', + 'drink pitcher', 'block plane', 'planetarium', 'plastic bag', 'plate rack', + 'farm plow', 'plunger', 'Polaroid camera', 'pole', 'police van', 'poncho', + 'pool table', 'soda bottle', 'plant pot', "potter's wheel", 'power drill', + 'prayer rug', 'printer', 'prison', 'missile', 'projector', 'hockey puck', + 'punching bag', 'purse', 'quill', 'quilt', 'race car', 'racket', + 'radiator', 'radio', 'radio telescope', 'rain barrel', + 'recreational vehicle', 'fishing casting reel', 'reflex camera', + 'refrigerator', 'remote control', 'restaurant', 'revolver', 'rifle', + 'rocking chair', 'rotisserie', 'eraser', 'rugby ball', + 'ruler measuring stick', 'sneaker', 'safe', 'safety pin', 'salt shaker', + 'sandal', 'sarong', 'saxophone', 'scabbard', 'weighing scale', + 'school bus', 'schooner', 'scoreboard', 'CRT monitor', 'screw', + 'screwdriver', 'seat belt', 'sewing machine', 'shield', 'shoe store', + 'shoji screen / room divider', 'shopping basket', 'shopping cart', + 'shovel', 'shower cap', 'shower curtain', 'ski', 'balaclava ski mask', + 'sleeping bag', 'slide rule', 'sliding door', 'slot machine', 'snorkel', + 'snowmobile', 'snowplow', 'soap dispenser', 'soccer ball', 'sock', + 'solar thermal collector', 'sombrero', 'soup bowl', 'keyboard space bar', + 'space heater', 'space shuttle', 'spatula', 'motorboat', 'spider web', + 'spindle', 'sports car', 'spotlight', 'stage', 'steam locomotive', + 'through arch bridge', 'steel drum', 'stethoscope', 'scarf', 'stone wall', + 'stopwatch', 'stove', 'strainer', 'tram', 'stretcher', 'couch', 'stupa', + 'submarine', 'suit', 'sundial', 'sunglasses', 'sunglasses', 'sunscreen', + 'suspension bridge', 'mop', 'sweatshirt', 'swim trunks / shorts', 'swing', + 'electrical switch', 'syringe', 'table lamp', 'tank', 'tape player', + 'teapot', 'teddy bear', 'television', 'tennis ball', 'thatched roof', + 'front curtain', 'thimble', 'threshing machine', 'throne', 'tile roof', + 'toaster', 'tobacco shop', 'toilet seat', 'torch', 'totem pole', + 'tow truck', 'toy store', 'tractor', 'semi-trailer truck', 'tray', + 'trench coat', 'tricycle', 'trimaran', 'tripod', 'triumphal arch', + 'trolleybus', 'trombone', 'hot tub', 'turnstile', 'typewriter keyboard', + 'umbrella', 'unicycle', 'upright piano', 'vacuum cleaner', 'vase', + 'vaulted or arched ceiling', 'velvet fabric', 'vending machine', + 'vestment', 'viaduct', 'violin', 'volleyball', 'waffle iron', 'wall clock', + 'wallet', 'wardrobe', 'military aircraft', 'sink', 'washing machine', + 'water bottle', 'water jug', 'water tower', 'whiskey jug', 'whistle', + 'hair wig', 'window screen', 'window shade', 'Windsor tie', 'wine bottle', + 'airplane wing', 'wok', 'wooden spoon', 'wool', 'split-rail fence', + 'shipwreck', 'sailboat', 'yurt', 'website', 'comic book', 'crossword', + 'traffic or street sign', 'traffic light', 'dust jacket', 'menu', 'plate', + 'guacamole', 'consomme', 'hot pot', 'trifle', 'ice cream', 'popsicle', + 'baguette', 'bagel', 'pretzel', 'cheeseburger', 'hot dog', + 'mashed potatoes', 'cabbage', 'broccoli', 'cauliflower', 'zucchini', + 'spaghetti squash', 'acorn squash', 'butternut squash', 'cucumber', + 'artichoke', 'bell pepper', 'cardoon', 'mushroom', 'Granny Smith apple', + 'strawberry', 'orange', 'lemon', 'fig', 'pineapple', 'banana', 'jackfruit', + 'cherimoya (custard apple)', 'pomegranate', 'hay', 'carbonara', + 'chocolate syrup', 'dough', 'meatloaf', 'pizza', 'pot pie', 'burrito', + 'red wine', 'espresso', 'tea cup', 'eggnog', 'mountain', 'bubble', 'cliff', + 'coral reef', 'geyser', 'lakeshore', 'promontory', 'sandbar', 'beach', + 'valley', 'volcano', 'baseball player', 'bridegroom', 'scuba diver', + 'rapeseed', 'daisy', "yellow lady's slipper", 'corn', 'acorn', 'rose hip', + 'horse chestnut seed', 'coral fungus', 'agaric', 'gyromitra', + 'stinkhorn mushroom', 'earth star fungus', 'hen of the woods mushroom', + 'bolete', 'corn cob', 'toilet paper') diff --git a/mmpretrain/datasets/coco_retrieval.py b/mmpretrain/datasets/coco_retrieval.py index 60d1586a..be8a0bcb 100644 --- a/mmpretrain/datasets/coco_retrieval.py +++ b/mmpretrain/datasets/coco_retrieval.py @@ -1,18 +1,45 @@ # Copyright (c) OpenMMLab. All rights reserved. import json +import os.path as osp from collections import OrderedDict -from typing import List +from os import PathLike +from typing import List, Sequence, Union from mmengine import get_file_backend -from mmpretrain.registry import DATASETS +from mmpretrain.registry import DATASETS, TRANSFORMS from .base_dataset import BaseDataset +def expanduser(data_prefix): + if isinstance(data_prefix, (str, PathLike)): + return osp.expanduser(data_prefix) + else: + return data_prefix + + @DATASETS.register_module() class COCORetrieval(BaseDataset): """COCO Retrieval dataset. + COCO (Common Objects in Context): The COCO dataset contains more than + 330K images,each of which has approximately 5 descriptive annotations. + This dataset was releasedin collaboration between Microsoft and Carnegie + Mellon University + + COCO_2014 dataset directory: :: + + COCO_2014 + ├── val2014 + ├── train2014 + ├── annotations + ├── instances_train2014.json + ├── instances_val2014.json + ├── person_keypoints_train2014.json + ├── person_keypoints_val2014.json + ├── captions_train2014.json + ├── captions_val2014.json + Args: ann_file (str): Annotation file path. test_mode (bool): Whether dataset is used for evaluation. This will @@ -23,8 +50,52 @@ class COCORetrieval(BaseDataset): data_prefix (str | dict): Prefix for training data. Defaults to ''. pipeline (Sequence): Processing pipeline. Defaults to an empty tuple. **kwargs: Other keyword arguments in :class:`BaseDataset`. + + Examples: + >>> from mmpretrain.datasets import COCORetrieval + >>> train_dataset=COCORetrieval(data_root='coco2014/') + >>> train_dataset + Dataset COCORetrieval + Number of samples: 414113 + Annotation file: /coco2014/annotations/captions_train2014.json + Prefix of images: /coco2014/ + >>> from mmpretrain.datasets import COCORetrieval + >>> val_dataset = COCORetrieval(data_root='coco2014/') + >>> val_dataset + Dataset COCORetrieval + Number of samples: 202654 + Annotation file: /coco2014/annotations/captions_val2014.json + Prefix of images: /coco2014/ """ + def __init__(self, + ann_file: str, + test_mode: bool = False, + data_prefix: Union[str, dict] = '', + data_root: str = '', + pipeline: Sequence = (), + **kwargs): + + if isinstance(data_prefix, str): + data_prefix = dict(img_path=expanduser(data_prefix)) + + ann_file = expanduser(ann_file) + transforms = [] + for transform in pipeline: + if isinstance(transform, dict): + transforms.append(TRANSFORMS.build(transform)) + else: + transforms.append(transform) + + super().__init__( + data_root=data_root, + data_prefix=data_prefix, + test_mode=test_mode, + pipeline=transforms, + ann_file=ann_file, + **kwargs, + ) + def load_data_list(self) -> List[dict]: """Load data list.""" # get file backend diff --git a/mmpretrain/models/multimodal/__init__.py b/mmpretrain/models/multimodal/__init__.py index 072c0f84..e68504c6 100644 --- a/mmpretrain/models/multimodal/__init__.py +++ b/mmpretrain/models/multimodal/__init__.py @@ -5,11 +5,13 @@ if WITH_MULTIMODAL: from .blip import * # noqa: F401,F403 from .blip2 import * # noqa: F401,F403 from .chinese_clip import * # noqa: F401, F403 + from .clip import * # noqa: F401, F403 from .flamingo import * # noqa: F401, F403 from .llava import * # noqa: F401, F403 from .minigpt4 import * # noqa: F401, F403 from .ofa import * # noqa: F401, F403 from .otter import * # noqa: F401, F403 + from .ram import * # noqa: F401, F403 else: from mmpretrain.registry import MODELS from mmpretrain.utils.dependency import register_multimodal_placeholder @@ -17,5 +19,6 @@ else: register_multimodal_placeholder([ 'Blip2Caption', 'Blip2Retrieval', 'Blip2VQA', 'BlipCaption', 'BlipNLVR', 'BlipRetrieval', 'BlipGrounding', 'BlipVQA', 'Flamingo', - 'OFA', 'ChineseCLIP', 'MiniGPT4', 'Llava', 'Otter' + 'OFA', 'ChineseCLIP', 'MiniGPT4', 'Llava', 'Otter', 'CLIP', + 'CLIPZeroShot', 'RAM', 'RAMNormal', 'RAMOpenset' ], MODELS) diff --git a/mmpretrain/models/multimodal/clip/__init__.py b/mmpretrain/models/multimodal/clip/__init__.py new file mode 100644 index 00000000..f7a117ea --- /dev/null +++ b/mmpretrain/models/multimodal/clip/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..clip.clip import CLIP, CLIPZeroShot +from ..clip.clip_transformer import CLIPProjection, CLIPTransformer + +__all__ = ['CLIP', 'CLIPZeroShot', 'CLIPTransformer', 'CLIPProjection'] diff --git a/mmpretrain/models/multimodal/clip/clip.py b/mmpretrain/models/multimodal/clip/clip.py new file mode 100644 index 00000000..b509a63b --- /dev/null +++ b/mmpretrain/models/multimodal/clip/clip.py @@ -0,0 +1,364 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import abstractmethod +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +from mmengine.model import BaseModel +from torch import nn + +from mmpretrain.datasets.categories import (CIFAR100_CATEGORIES, + IMAGENET_SIMPLE_CATEGORIES) +from mmpretrain.registry import MODELS, TOKENIZER +from mmpretrain.structures import DataSample +from mmpretrain.utils import track_on_main_process +from .utils import (OPENAI_CIFAR100_PROMPT, OPENAI_IMAGENET_PROMPT, + OPENAI_IMAGENET_PROMPT_SUB) + +CIFAR100_CATEGORIES = [' '.join(c.split('_')) for c in CIFAR100_CATEGORIES] +PROTOTYPE_MAP = { + 'imagenet': IMAGENET_SIMPLE_CATEGORIES, + 'cifar100': CIFAR100_CATEGORIES, +} +PROMPT_MAP = { + 'openai_imagenet': OPENAI_IMAGENET_PROMPT, + 'openai_cifar100': OPENAI_CIFAR100_PROMPT, + 'vanilla': [lambda c: f'a photo of a {c}'], + 'openai_imagenet_sub': OPENAI_IMAGENET_PROMPT_SUB +} + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function.""" + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class CLIP(BaseModel): + """The implementation of `CLIP `_. + + Args: + vision_backbone (dict): Config dict for vision backbone. + text_backbone (dict): Config dict for text backbone. + tokenizer (dict): Config dict for text tokenizer. + proj_dim (int): Projection dimension for similarity computation. + text_prototype (str): Text prototype, which can be a key in + `PROTOTYPE_MAP` or list of text. + text_prompt (str): The prompt for text prototype. + Defaults to 'vanilla',which refers to "a photo of {cls}". + context_length (int): The context length to use. Defaults to 77. + data_preprocessor (Union[dict, nn.Module], optional): The config for + preprocessing input data. If None or no specified type, it will use + "MultiModalDataPreprocessor" as type. + See :class:`MultiModalDataPreprocessor` for more details. + Defaults to None. + init_cfg (dict, optional): The config to control the initialization. + Defaults to None. + """ + + def __init__(self, + vision_backbone: dict, + projection: dict, + text_backbone: dict, + tokenizer: dict, + vocab_size: int, + transformer_width: int, + proj_dim: int, + context_length: int = 77, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + if data_preprocessor is None: + data_preprocessor = {} + data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + + self.context_length = context_length + + # build the vision transformer + self.visual = MODELS.build(vision_backbone) + + # build the visual projection + self.visual_proj = MODELS.build(projection) + + # build attn_mask for casual-attn + text_backbone['attn_mask'] = self.build_attention_mask() + + # build the text transformer + self.transformer = MODELS.build(text_backbone) + + self.vocab_size = vocab_size + self.token_embedding = nn.Embedding(vocab_size, transformer_width) + self.positional_embedding = nn.Parameter( + torch.empty(self.context_length, transformer_width)) + self.ln_final = LayerNorm(transformer_width) + + self.text_projection = nn.Parameter( + torch.empty(transformer_width, proj_dim)) + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + + self.initialize_parameters() + + self.tokenizer = TOKENIZER.build(tokenizer) + + self.tokenizer.vocab = self.tokenizer.get_vocab( + ) # CLIPTokenizer has no attribute named 'vocab', so manually + + def initialize_parameters(self) -> None: + """Initialize the parameters. + + The pretrained weight will override the initialized parameters by this + function. + """ + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.positional_embedding, std=0.01) + + proj_std = (self.transformer.width**-0.5) * ( + (2 * self.transformer.layers)**-0.5) + attn_std = self.transformer.width**-0.5 + fc_std = (2 * self.transformer.width)**-0.5 + for block in self.transformer.resblocks: + nn.init.normal_(block.attn.in_proj_weight, std=attn_std) + nn.init.normal_(block.attn.out_proj.weight, std=proj_std) + nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) + nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) + + if self.text_projection is not None: + nn.init.normal_( + self.text_projection, std=self.transformer.width**-0.5) + + def build_attention_mask(self): + # lazily create causal attention mask, + # with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.context_length, self.context_length) + mask.fill_(float('-inf')) + mask.triu_(1) # zero out the lower diagonal + return mask + + def forward( + self, + images: torch.Tensor, + data_samples: Optional[list] = None, + mode: str = 'predict', + **kwargs, + ): + """The unified entry for a forward process in both training and test. + The method accepts the following modes: + + - "predict": Forward and return a list of data samples contain the + predict results. + + Args: + images (torch.Tensor): the preprocessed image tensor of shape + ``(N, C, H, W)``. + data_samples (List[DataSample], optional): The annotation data + of every samples. Defaults to None. + mode (str): Return what kind of value. Defaults to 'predict'. + """ + if mode == 'predict': + return self.predict(images, data_samples, **kwargs) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def extract_image_feat(self, images: torch.Tensor) -> torch.Tensor: + """The function to extract image latent features.""" + return self.visual_proj(self.visual(images))[0] + + def extract_text_feat(self, texts: torch.Tensor) -> torch.Tensor: + """The function to extract text latent features.""" + x = self.token_embedding(texts) # [batch_size, n_ctx, d_model] + + x = x + self.positional_embedding + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x)[0] + + x = x.permute(1, 0, 2) # LND -> NLD + x = self.ln_final(x) + + # x.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding + # (eot_token is the highest number in each sequence) + x = x[torch.arange(x.shape[0]), + texts.argmax(dim=-1)] @ self.text_projection + + return x + + def extract_feat( + self, images: torch.Tensor, + texts: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor]]: + """The function to extract image and text latent features, the input + image or text can not both be None.""" + + assert images is not None or texts is not None, \ + 'text and image cannot both be None!' + if images is None: + return self.extract_text_feat(texts) + elif texts is None: + return self.extract_image_feat(images) + + image_features = self.extract_image_feat(images) + text_features = self.extract_text_feat(texts) + + image_features = image_features / image_features.norm( + dim=-1, keepdim=True) + text_features = text_features / text_features.norm( + dim=-1, keepdim=True) + + return image_features, text_features + + def compute_similarity(self, images, texts): + """Extract images and texts features and compute cosine similarity.""" + image_features, text_features = self.extract_feat( + images=images, texts=texts) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_image = logit_scale * image_features @ text_features.t() + logits_per_text = logits_per_image.t() + + # shape (N, N) + return logits_per_image, logits_per_text + + @abstractmethod + def predict(self, + images: torch.Tensor, + data_samples: DataSample = None) -> DataSample: + raise NotImplementedError + + def tokenize(self, texts: Union[str, List[str]]) -> torch.LongTensor: + """Returns the tokenized representation of given input string(s) + + Args: + texts (Union[str, List[str]]): An input string or a list of input + strings to tokenize + context_length (int): The context length to use. Defaults to 52. + + Returns: + torch.Tensor: Resulting tokens. + """ + if isinstance(texts, str): + texts = [texts] + + all_tokens = [] + for text in texts: + # adapt the text to Chinese BERT vocab + # text = text.lower().replace('“', "\"").replace('”', "\"") + + # add special tokens + all_tokens.append( + [self.tokenizer.vocab['<|startoftext|>'] + ] + # <|startoftext|>代表[CLS] token + self.tokenizer.convert_tokens_to_ids( + self.tokenizer.tokenize(text))[:self.context_length - 2] + + [self.tokenizer.vocab['<|endoftext|>']]) + + result = torch.zeros( + len(all_tokens), self.context_length, dtype=torch.long) + + for i, tokens in enumerate(all_tokens): + assert len(tokens) <= self.context_length + result[i, :len(tokens)] = torch.tensor(tokens) + + return result + + +@MODELS.register_module() +class CLIPZeroShot(CLIP): + + def __init__( + self, + vision_backbone: dict, + projection: dict, + text_backbone: dict, + tokenizer: dict, + vocab_size: int, + transformer_width: int, + proj_dim: int, + context_length: int = 77, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None, + text_prototype: Union[str, List[str]] = 'imagenet', + text_prompt: str = 'vanilla', + ): + super(CLIPZeroShot, + self).__init__(vision_backbone, projection, text_backbone, + tokenizer, vocab_size, transformer_width, + proj_dim, context_length, data_preprocessor, + init_cfg) + + # for zero-shot classification + if isinstance(text_prototype, + str) and text_prototype in PROTOTYPE_MAP.keys(): + self.prototype = PROTOTYPE_MAP[text_prototype] + else: + self.prototype = text_prototype + self.text_prototype_embeds = None + + self.prompt = PROMPT_MAP[text_prompt] + + def predict(self, + images: torch.Tensor, + data_samples: DataSample = None) -> DataSample: + """Predict the classes of the input images. + + The prediction is for zero-shot classification and the text prototypes + will be prepared in thisfunction. + + Args: + images (torch.Tensor): The input images. + data_samples (DataSample): The data samples with information from + dataset. + + Returns: + DataSample: The results of prediction. + """ + + if self.text_prototype_embeds is None: + self.prepare_text_prototype(device=images.device) + + image_features = self.extract_image_feat(images=images) + image_features /= image_features.norm(dim=-1, keepdim=True) + + # cosine similarity as logits + logits_per_image = image_features @ self.text_prototype_embeds.to( + image_features.device) * self.logit_scale.exp() + + pred_scores = F.softmax(logits_per_image, dim=1) + pred_labels = pred_scores.argmax(dim=1, keepdim=True).detach() + + out_data_samples = [] + if data_samples is None: + data_samples = [None for _ in range(pred_scores.size(0))] + + for data_sample, score, label in zip(data_samples, pred_scores, + pred_labels): + if data_sample is None: + data_sample = DataSample() + + data_sample.set_pred_score(score).set_pred_label(label) + out_data_samples.append(data_sample) + return out_data_samples + + def prepare_text_prototype(self, device) -> None: + """The function to prepare text prototypes with prompt.""" + class_embeddings = [] + for classname in track_on_main_process(self.prototype, + 'Prepare text prototype...'): + # format with class + texts = [prompt(classname) for prompt in self.prompt] + tokenized_texts = self.tokenize(texts) + class_features = self.extract_text_feat(tokenized_texts.to(device)) + class_features /= class_features.norm(dim=-1, keepdim=True) + class_feature = class_features.mean(dim=0) + class_feature /= class_feature.norm() + class_embeddings.append(class_feature) + self.text_prototype_embeds = torch.stack( + class_embeddings, dim=1).to(device) diff --git a/mmpretrain/models/multimodal/clip/clip_transformer.py b/mmpretrain/models/multimodal/clip/clip_transformer.py new file mode 100644 index 00000000..4b5f7666 --- /dev/null +++ b/mmpretrain/models/multimodal/clip/clip_transformer.py @@ -0,0 +1,99 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modified from https://github.com/zejiangh/MILAN +from typing import Optional, Tuple + +import torch +from mmengine.model import BaseModule +from torch import nn + +from mmpretrain.models.utils.clip_generator_helper import \ + ResidualAttentionBlock +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class CLIPTransformer(nn.Module): + """Transformer. + + Both visual and text branches use this transformer. + + Args: + width (int): The feature dimension. + layers (int): The number of layers. + heads (int): The number of attention heads. + attn_mask (torch.Tensor, optional): The attention mask. + """ + + def __init__(self, + width: int, + layers: int, + heads: int, + attn_mask: Optional[torch.Tensor] = None) -> None: + super().__init__() + self.width = width + self.layers = layers + self.resblocks = nn.ModuleList() + for _ in range(layers - 1): + self.resblocks.append( + ResidualAttentionBlock(width, heads, attn_mask)) + self.resblocks.append( + ResidualAttentionBlock( + width, heads, attn_mask, return_attention=True)) + + def forward( + self, x: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Forward function.""" + z = [] + for idx, blk in enumerate(self.resblocks): + if idx < self.layers - 1: + x = blk(x) + z.append(x.permute(1, 0, 2)) + else: + x, attention = blk(x) + z.append(x.permute(1, 0, 2)) + return x, attention, z + + +@MODELS.register_module() +class CLIPProjection(BaseModule): + """Neck with CLIP Projection. + + Args: + in_channels (int): Number of channels in the input. + out_channels (int): Number of channels in the output. + init_cfg (dict | list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + init_cfg: Optional[dict] = None): + super(CLIPProjection, self).__init__(init_cfg=init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + scale = in_channels**-0.5 + self.proj = nn.Parameter(scale * + torch.randn(in_channels, out_channels)) + + def forward(self, inputs: Tuple) -> Tuple[torch.Tensor]: + """forward function. + + Args: + inputs (Tuple): The features extracted from + the backbone. Multiple stage inputs are acceptable but only + the last stage will be used. + Returns: + Tuple(torch.Tensor)): A tuple of reducted features. + """ + if isinstance(inputs, tuple): + inputs = inputs[-1] + out = inputs @ self.proj + elif isinstance(inputs, torch.Tensor): + out = inputs @ self.proj + else: + raise TypeError( + '`CLIPProjection` neck inputs should be tuple or torch.tensor') + return (out, ) diff --git a/mmpretrain/models/multimodal/clip/utils.py b/mmpretrain/models/multimodal/clip/utils.py new file mode 100644 index 00000000..65239bc3 --- /dev/null +++ b/mmpretrain/models/multimodal/clip/utils.py @@ -0,0 +1,115 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +OPENAI_CIFAR100_PROMPT = [ + lambda c: f'a photo of a {c}.', + lambda c: f'a blurry photo of a {c}.', + lambda c: f'a black and white photo of a {c}.', + lambda c: f'a low contrast photo of a {c}.', + lambda c: f'a high contrast photo of a {c}.', + lambda c: f'a bad photo of a {c}.', + lambda c: f'a good photo of a {c}.', + lambda c: f'a photo of a small {c}.', + lambda c: f'a photo of a big {c}.', + lambda c: f'a photo of the {c}.', + lambda c: f'a blurry photo of the {c}.', + lambda c: f'a black and white photo of the {c}.', + lambda c: f'a low contrast photo of the {c}.', + lambda c: f'a high contrast photo of the {c}.', + lambda c: f'a bad photo of the {c}.', + lambda c: f'a good photo of the {c}.', + lambda c: f'a photo of the small {c}.', + lambda c: f'a photo of the big {c}.', +] + +OPENAI_IMAGENET_PROMPT_SUB = [ + lambda c: f'itap of a {c}.', + lambda c: f'a bad photo of the {c}.', + lambda c: f'a origami {c}.', + lambda c: f'a photo of the large {c}.', + lambda c: f'a {c} in a video game.', + lambda c: f'art of the {c}.', + lambda c: f'a photo of the small {c}.', +] + +OPENAI_IMAGENET_PROMPT = [ + lambda c: f'a bad photo of a {c}.', + lambda c: f'a photo of many {c}.', + lambda c: f'a sculpture of a {c}.', + lambda c: f'a photo of the hard to see {c}.', + lambda c: f'a low resolution photo of the {c}.', + lambda c: f'a rendering of a {c}.', + lambda c: f'graffiti of a {c}.', + lambda c: f'a bad photo of the {c}.', + lambda c: f'a cropped photo of the {c}.', + lambda c: f'a tattoo of a {c}.', + lambda c: f'the embroidered {c}.', + lambda c: f'a photo of a hard to see {c}.', + lambda c: f'a bright photo of a {c}.', + lambda c: f'a photo of a clean {c}.', + lambda c: f'a photo of a dirty {c}.', + lambda c: f'a dark photo of the {c}.', + lambda c: f'a drawing of a {c}.', + lambda c: f'a photo of my {c}.', + lambda c: f'the plastic {c}.', + lambda c: f'a photo of the cool {c}.', + lambda c: f'a close-up photo of a {c}.', + lambda c: f'a black and white photo of the {c}.', + lambda c: f'a painting of the {c}.', + lambda c: f'a painting of a {c}.', + lambda c: f'a pixelated photo of the {c}.', + lambda c: f'a sculpture of the {c}.', + lambda c: f'a bright photo of the {c}.', + lambda c: f'a cropped photo of a {c}.', + lambda c: f'a plastic {c}.', + lambda c: f'a photo of the dirty {c}.', + lambda c: f'a jpeg corrupted photo of a {c}.', + lambda c: f'a blurry photo of the {c}.', + lambda c: f'a photo of the {c}.', + lambda c: f'a good photo of the {c}.', + lambda c: f'a rendering of the {c}.', + lambda c: f'a {c} in a video game.', + lambda c: f'a photo of one {c}.', + lambda c: f'a doodle of a {c}.', + lambda c: f'a close-up photo of the {c}.', + lambda c: f'a photo of a {c}.', + lambda c: f'the origami {c}.', + lambda c: f'the {c} in a video game.', + lambda c: f'a sketch of a {c}.', + lambda c: f'a doodle of the {c}.', + lambda c: f'a origami {c}.', + lambda c: f'a low resolution photo of a {c}.', + lambda c: f'the toy {c}.', + lambda c: f'a rendition of the {c}.', + lambda c: f'a photo of the clean {c}.', + lambda c: f'a photo of a large {c}.', + lambda c: f'a rendition of a {c}.', + lambda c: f'a photo of a nice {c}.', + lambda c: f'a photo of a weird {c}.', + lambda c: f'a blurry photo of a {c}.', + lambda c: f'a cartoon {c}.', + lambda c: f'art of a {c}.', + lambda c: f'a sketch of the {c}.', + lambda c: f'a embroidered {c}.', + lambda c: f'a pixelated photo of a {c}.', + lambda c: f'itap of the {c}.', + lambda c: f'a jpeg corrupted photo of the {c}.', + lambda c: f'a good photo of a {c}.', + lambda c: f'a plushie {c}.', + lambda c: f'a photo of the nice {c}.', + lambda c: f'a photo of the small {c}.', + lambda c: f'a photo of the weird {c}.', + lambda c: f'the cartoon {c}.', + lambda c: f'art of the {c}.', + lambda c: f'a drawing of the {c}.', + lambda c: f'a photo of the large {c}.', + lambda c: f'a black and white photo of a {c}.', + lambda c: f'the plushie {c}.', + lambda c: f'a dark photo of a {c}.', + lambda c: f'itap of a {c}.', + lambda c: f'graffiti of the {c}.', + lambda c: f'a toy {c}.', + lambda c: f'itap of my {c}.', + lambda c: f'a photo of a cool {c}.', + lambda c: f'a photo of a small {c}.', + lambda c: f'a tattoo of the {c}.', +] diff --git a/mmpretrain/models/multimodal/ofa/ofa_modules.py b/mmpretrain/models/multimodal/ofa/ofa_modules.py index 1c79049b..ef5c8533 100644 --- a/mmpretrain/models/multimodal/ofa/ofa_modules.py +++ b/mmpretrain/models/multimodal/ofa/ofa_modules.py @@ -1301,6 +1301,7 @@ class OFAEncoderDecoder(BaseModule, GenerationMixin): Defaults to an empty dict. init_cfg (dict, optional): The initialization config. Defaults to None. """ + base_model_prefix = '' def __init__( self, diff --git a/mmpretrain/models/multimodal/ram/__init__.py b/mmpretrain/models/multimodal/ram/__init__.py new file mode 100644 index 00000000..35619d88 --- /dev/null +++ b/mmpretrain/models/multimodal/ram/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .ram import RAM, RAMNormal, RAMOpenset + +__all__ = ['RAM', 'RAMNormal', 'RAMOpenset'] diff --git a/mmpretrain/models/multimodal/ram/bert.py b/mmpretrain/models/multimodal/ram/bert.py new file mode 100644 index 00000000..f54b2ce8 --- /dev/null +++ b/mmpretrain/models/multimodal/ram/bert.py @@ -0,0 +1,1197 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modify from: +# https://github.com/xinyu1205/recognize-anything/blob/main/ram/models/bert.py + +import math +from typing import Tuple + +import torch +import torch.utils.checkpoint +from torch import Tensor, device, nn +from torch.nn import CrossEntropyLoss +from transformers.activations import ACT2FN +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions) +from transformers.modeling_utils import (PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer) +from transformers.models.bert.configuration_bert import BertConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + + +class BertEmbeddings_nopos(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding( + config.vocab_size, + config.hidden_size, + padding_idx=config.pad_token_id) + # self.position_embeddings = nn.Embedding( + # config.max_position_embeddings, config.hidden_size) + '''self.LayerNorm is not snake-cased to stick with + TensorFlow model variable name and be able to load''' + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous + # in memory and exported when serialized + # self.register_buffer("position_ids", + # torch.arange(config.max_position_embeddings).expand((1, -1))) + # self.position_embedding_type = \ + # getattr(config, "position_embedding_type", "absolute") + + self.config = config + + def forward(self, + input_ids=None, + position_ids=None, + inputs_embeds=None, + past_key_values_length=0): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] # noqa: F841 + + # if position_ids is None: + # position_ids = self.position_ids[:, \ + # past_key_values_length : seq_length + \ + # past_key_values_length] + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + embeddings = inputs_embeds + + # if self.position_embedding_type == "absolute": + # position_embeddings = self.position_embeddings(position_ids) + # # print('add position_embeddings!!!!') + # embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding( + config.vocab_size, + config.hidden_size, + padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, + config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with + # TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous + # in memory and exported when serialized + self.register_buffer( + 'position_ids', + torch.arange(config.max_position_embeddings).expand((1, -1))) + self.position_embedding_type = getattr(config, + 'position_embedding_type', + 'absolute') + + self.config = config + + def forward(self, + input_ids=None, + position_ids=None, + inputs_embeds=None, + past_key_values_length=0): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length: + seq_length + + past_key_values_length] + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + embeddings = inputs_embeds + + if self.position_embedding_type == 'absolute': + position_embeddings = self.position_embeddings(position_ids) + # print('add position_embeddings!!!!') + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + + def __init__(self, config, is_cross_attention): + super().__init__() + self.config = config + if config.hidden_size % config.num_attention_heads != 0 and \ + not hasattr(config, 'embedding_size'): + raise ValueError('''The hidden size (%d) is not a multiple of + the number of attention heads (%d)''' % + (config.hidden_size, config.num_attention_heads)) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / + config.num_attention_heads) + self.all_head_size = self.num_attention_heads * \ + self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + if is_cross_attention: + self.key = nn.Linear(config.encoder_width, self.all_head_size) + self.value = nn.Linear(config.encoder_width, self.all_head_size) + else: + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = getattr(config, + 'position_embedding_type', + 'absolute') + if (self.position_embedding_type == 'relative_key' + or self.position_embedding_type == 'relative_key_query'): + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding( + 2 * config.max_position_embeddings - 1, + self.attention_head_size) + self.save_attention = False + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, + self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention: + # print(self.key.weight.shape) + key_layer = self.transpose_for_scores( + self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores( + self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + past_key_value = (key_layer, value_layer) + + # compatible with higher versions of transformers + if key_layer.shape[0] > query_layer.shape[0]: + key_layer = key_layer[:query_layer.shape[0], :, :, :] + attention_mask = attention_mask[:query_layer.shape[0], :, :] + value_layer = value_layer[:query_layer.shape[0], :, :, :] + + # Take the dot product between "query" and "key" + # to get the raw attention scores. + attention_scores = torch.matmul(query_layer, + key_layer.transpose(-1, -2)) + + if (self.position_embedding_type == 'relative_key' + or self.position_embedding_type == 'relative_key_query'): + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange( + seq_length, dtype=torch.long, + device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange( + seq_length, dtype=torch.long, + device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding( + distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to( + dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == 'relative_key': + relative_position_scores = torch.einsum( + 'bhld,lrd->bhlr', query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == 'relative_key_query': + relative_position_scores_query = torch.einsum( + 'bhld,lrd->bhlr', query_layer, positional_embedding) + relative_position_scores_key = torch.einsum( + 'bhrd,lrd->bhlr', key_layer, positional_embedding) + attention_scores = attention_scores + \ + relative_position_scores_query + \ + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt( + self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for + # all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + if is_cross_attention and self.save_attention: + self.save_attention_map(attention_probs) + attention_probs.register_hook(self.save_attn_gradients) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs_dropped = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs_dropped = attention_probs_dropped * head_mask + + context_layer = torch.matmul(attention_probs_dropped, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + ( + self.all_head_size, ) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, + attention_probs) if output_attentions else (context_layer, ) + + outputs = outputs + (past_key_value, ) + return outputs + + +class BertSelfOutput(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + + def __init__(self, config, is_cross_attention=False): + super().__init__() + self.self = BertSelfAttention(config, is_cross_attention) + self.output = BertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, + self.self.attention_head_size, self.pruned_heads) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len( + heads) + self.self.all_head_size = self.self.attention_head_size * \ + self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output, + ) + self_outputs[1:] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + + def __init__(self, config, layer_num): + super().__init__() + self.config = config + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + self.layer_num = layer_num + if self.config.add_cross_attention: + self.crossattention = BertAttention( + config, is_cross_attention=self.config.add_cross_attention) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + mode=None, + ): + + if mode == 'tagging': + + assert encoder_hidden_states is not None, \ + '''encoder_hidden_states must be given + for cross-attention layers''' + + cross_attention_outputs = self.crossattention( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = cross_attention_outputs[ + 1:-1] # add cross attentions if we output attention weights + + present_key_value = cross_attention_outputs[-1] + + else: + # decoder uni-directional self-attention + # cached key/values tuple is at positions 1,2 + self_attn_past_key_value = \ + (past_key_value[:2] + if past_key_value is not None else None) + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + + if mode == 'multimodal': + assert encoder_hidden_states is not None, \ + '''encoder_hidden_states must be + given for cross-attention layers''' + + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[ + 1: + -1] # add cross attentions if we output attention weights + layer_output = apply_chunking_to_forward(self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output) + outputs = (layer_output, ) + outputs + + outputs = outputs + (present_key_value, ) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList( + [BertLayer(config, i) for i in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + mode='multimodal', + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = ( + ) if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + + for i in range(self.config.num_hidden_layers): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states, ) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[ + i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + if use_cache: + logger.warn('''`use_cache=True` is incompatible with + gradient checkpointing. Setting `use_cache=False`...''' + ) + use_cache = False + + def create_custom_forward(module): + + def custom_forward(*inputs): + return module(*inputs, past_key_value, + output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + mode=mode, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + mode=mode, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1], ) + if output_attentions: + all_self_attentions = all_self_attentions + ( + layer_outputs[1], ) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states, ) + + if not return_dict: + return tuple(v for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] if v is not None) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class BertPooler(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + + def __init__(self, config): + super().__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear( + config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that + # the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertPreTrainedModel(PreTrainedModel): + """An abstract class to handle weights initialization and a simple + interface for downloading and loading pretrained models.""" + + config_class = BertConfig + base_model_prefix = 'bert' + _keys_to_ignore_on_load_missing = [r'position_ids'] + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version + # which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_( + mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +class BertModel(BertPreTrainedModel): + """The model can behave as an encoder (with only self-attention) as well as + a decoder, in which case a layer of cross-attention is added between the + self-attention layers, following the architecture described in `Attention + is all you need `__ by Ashish Vaswani, + Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. + + Gomez, Lukasz Kaiser and Illia Polosukhin. argument and + :obj:`add_cross_attention` set to :obj:`True`; an + :obj:`encoder_hidden_states` is then expected as an input to the forward + pass. + """ + + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = BertEmbeddings(config) + + self.encoder = BertEncoder(config) + + self.pooler = BertPooler(config) if add_pooling_layer else None + + self.init_weights() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """Prunes heads of the model. + + heads_to_prune: + dict of {layer_num: list of heads to prune in this layer} + See base class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def get_extended_attention_mask(self, attention_mask: Tensor, + input_shape: Tuple[int], device: device, + is_decoder: bool) -> Tensor: + """Makes broadcastable attention and causal masks so that future and + masked tokens are ignored. + + Arguments: + attention_mask (:obj:`torch.Tensor`): + Mask with ones indicating tokens to attend to, + zeros for tokens to ignore. + input_shape (:obj:`Tuple[int]`): + The shape of the input to the model. + device: (:obj:`torch.device`): + The device of the input to the model. + + Returns: + :obj:`torch.Tensor` The extended attention mask, + with a the same dtype as :obj:`attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions + # [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it + # broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask + # in addition to the padding mask + # - if the model is an encoder, make the mask + # broadcastable to [batch_size, num_heads, seq_length, seq_length] + if is_decoder: + batch_size, seq_length = input_shape + + seq_ids = torch.arange(seq_length, device=device) + causal_mask = seq_ids[None, None, :].repeat( + batch_size, seq_length, 1) <= seq_ids[None, :, None] + # in case past_key_values are used we need to + # add a prefix ones mask to the causal mask + # causal and attention masks must have same type + # with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[ + 1] - causal_mask.shape[1] + causal_mask = torch.cat( + [ + torch.ones( + (batch_size, seq_length, prefix_seq_len), + device=device, + dtype=causal_mask.dtype), + causal_mask, + ], + axis=-1, + ) + + extended_attention_mask = ( + causal_mask[:None, :, :] * + attention_mask[:, None, None, :]) + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + '''Wrong shape for input_ids (shape {}) or attention_mask + (shape {})'''.format(input_shape, attention_mask.shape)) + + # Since attention_mask is 1.0 + # for positions we want to attend and 0.0 + # for masked positions, this operation will + # create a tensor which is 0.0 for positions + # we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores + # before the softmax, this is effectively + # the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to( + dtype=self.dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + mode='multimodal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj: + `(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer + of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj: + `(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token + indices of the encoder input. This mask is used in + the cross-attention if the model is configured as + a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length : + obj:`config.n_layers` with each tuple having 4 tensors of shape : + obj:`(batch_size, num_heads, sequence_length - 1, + embed_size_per_head)`): + Contains precomputed key and value hidden states of the + attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally + input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to + this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj: + `(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value + states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = ( + output_attentions if output_attentions is not None else + self.config.output_attentions) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else + self.config.output_hidden_states) + return_dict = ( + return_dict + if return_dict is not None else self.config.use_return_dict) + + if is_decoder: + use_cache = ( + use_cache if use_cache is not None else self.config.use_cache) + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError('''You cannot specify both + input_ids and inputs_embeds at the same time''') + elif input_ids is not None: + input_shape = input_ids.size() + batch_size, seq_length = input_shape + device = input_ids.device + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = inputs_embeds.device + elif encoder_embeds is not None: + input_shape = encoder_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = encoder_embeds.device + else: + raise ValueError('''You have to specify either + input_ids or inputs_embeds or encoder_embeds''') + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[ + 2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones( + ((batch_size, seq_length + past_key_values_length)), + device=device) + + # We can provide a self-attention mask of dimensions + # [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to + # make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = \ + (self.get_extended_attention_mask( + attention_mask, input_shape, device, is_decoder)) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to + # [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if type(encoder_hidden_states) == list: + encoder_batch_size, encoder_sequence_length, _ = \ + (encoder_hidden_states[0].size()) + else: + encoder_batch_size, encoder_sequence_length, _ = \ + (encoder_hidden_states.size()) + encoder_hidden_shape = (encoder_batch_size, + encoder_sequence_length) + + if type(encoder_attention_mask) == list: + encoder_extended_attention_mask = [ + self.invert_attention_mask(mask) + for mask in encoder_attention_mask + ] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones( + encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask) + else: + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape + # [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape + # [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, + self.config.num_hidden_layers) + + if encoder_embeds is None: + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + else: + embedding_output = encoder_embeds + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + mode=mode, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler( + sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +class BertLMHeadModel(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r'pooler'] + _keys_to_ignore_on_load_missing = [ + r'position_ids', r'predictions.decoder.bias' + ] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + return_logits=False, + is_decoder=True, + reduction='mean', + mode='multimodal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj: + `(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer + of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj: + `(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token + indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. + Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (:obj:`torch.LongTensor` of shape :obj: + `(batch_size, sequence_length)`, `optional`): + Labels for computing the left-to-right + language modeling loss (next word prediction). + Indices should be in + ``[-100, 0, ..., config.vocab_size]`` + (see ``input_ids`` docstring) Tokens with indices set to + ``-100`` are ignored (masked), the loss is only computed + for the tokens with labels n ``[0, ..., config.vocab_size]`` + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length + :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj: + `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention + blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally + input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to + this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj: + `(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states + are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + Returns: + Example:: + >>> from transformers import (BertTokenizer, + BertLMHeadModel, BertConfig) + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') + >>> config = BertConfig.from_pretrained("bert-base-cased") + >>> model = BertLMHeadModel.from_pretrained( + 'bert-base-cased', config=config) + >>> inputs = tokenizer("Hello, my dog is cute", + return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.logits + """ + return_dict = ( + return_dict + if return_dict is not None else self.config.use_return_dict) + if labels is not None: + use_cache = False + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + mode=mode, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + # sequence_output.shape torch.Size([85, 30, 768]) + # prediction_scores.shape torch.Size([85, 30, 30524]) + # labels.shape torch.Size([85, 30]) + + if return_logits: + return prediction_scores[:, :-1, :].contiguous() + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift + # prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, : + -1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss( + reduction=reduction, label_smoothing=0.1) + lm_loss = loss_fct( + shifted_prediction_scores.view(-1, self.config.vocab_size), + labels.view(-1)) + if reduction == 'none': + lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1) + + if not return_dict: + output = (prediction_scores, ) + outputs[2:] + return ((lm_loss, ) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, + input_ids, + past=None, + attention_mask=None, + **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, + # the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + 'input_ids': + input_ids, + 'attention_mask': + attention_mask, + 'past_key_values': + past, + 'encoder_hidden_states': + model_kwargs.get('encoder_hidden_states', None), + 'encoder_attention_mask': + model_kwargs.get('encoder_attention_mask', None), + 'is_decoder': + True, + } + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple( + past_state.index_select(0, beam_idx) + for past_state in layer_past), ) + return reordered_past diff --git a/mmpretrain/models/multimodal/ram/config/__init__.py b/mmpretrain/models/multimodal/ram/config/__init__.py new file mode 100644 index 00000000..ef101fec --- /dev/null +++ b/mmpretrain/models/multimodal/ram/config/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/mmpretrain/models/multimodal/ram/config/ram_swin_large_14m.py b/mmpretrain/models/multimodal/ram/config/ram_swin_large_14m.py new file mode 100644 index 00000000..e4b88653 --- /dev/null +++ b/mmpretrain/models/multimodal/ram/config/ram_swin_large_14m.py @@ -0,0 +1,93 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# data settings +test_transforms_cfg = [ + dict(type='Resize', scale=(384, 384), interpolation='bicubic'), + dict( + type='mmpretrain.PackInputs', + algorithm_keys=['text'], + meta_keys=['image_id', 'scale_factor'], + ), +] + + +def get_ram_cfg(mode='normal'): + assert mode in ['normal', 'openset'], 'mode must "normal" or "openset"' + model_type = 'RAMNormal' if mode == 'normal' else 'RAMOpenset' + model_cfg = dict( + type=model_type, + tokenizer=dict( + type='BertTokenizer', + name_or_path='/public/DATA/qbw/ckpt/bert-base-uncased', + use_fast=False), + vision_backbone=dict( + type='SwinTransformer', + arch='large', + img_size=384, + window_size=12, + ), + tag_encoder={ + 'architectures': ['BertModel'], + 'attention_probs_dropout_prob': 0.1, + 'hidden_act': 'gelu', + 'hidden_dropout_prob': 0.1, + 'hidden_size': 768, + 'initializer_range': 0.02, + 'intermediate_size': 3072, + 'layer_norm_eps': 1e-12, + 'max_position_embeddings': 512, + 'model_type': 'bert', + 'num_attention_heads': 12, + 'num_hidden_layers': 12, + 'pad_token_id': 0, + 'type_vocab_size': 2, + 'vocab_size': 30524, + 'encoder_width': 512, + 'add_cross_attention': True + }, + text_decoder={ + 'architectures': ['BertModel'], + 'attention_probs_dropout_prob': 0.1, + 'hidden_act': 'gelu', + 'hidden_dropout_prob': 0.1, + 'hidden_size': 768, + 'initializer_range': 0.02, + 'intermediate_size': 3072, + 'layer_norm_eps': 1e-12, + 'max_position_embeddings': 512, + 'model_type': 'bert', + 'num_attention_heads': 12, + 'num_hidden_layers': 12, + 'pad_token_id': 0, + 'type_vocab_size': 2, + 'vocab_size': 30524, + 'encoder_width': 768, + 'add_cross_attention': True + }, + tagging_head={ + 'architectures': ['BertModel'], + 'attention_probs_dropout_prob': 0.1, + 'hidden_act': 'gelu', + 'hidden_dropout_prob': 0.1, + 'hidden_size': 768, + 'initializer_range': 0.02, + 'intermediate_size': 3072, + 'layer_norm_eps': 1e-12, + 'max_position_embeddings': 512, + 'model_type': 'bert', + 'num_attention_heads': 4, + 'num_hidden_layers': 2, + 'pad_token_id': 0, + 'type_vocab_size': 2, + 'vocab_size': 30522, + 'encoder_width': 512, + 'add_cross_attention': True, + 'add_tag_cross_attention': False + }, + data_preprocessor=dict( + type='MultiModalDataPreprocessor', + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=False, + ), + ) + return model_cfg diff --git a/mmpretrain/models/multimodal/ram/data/ram_tag_list.pickle b/mmpretrain/models/multimodal/ram/data/ram_tag_list.pickle new file mode 100644 index 0000000000000000000000000000000000000000..0519d1ee759eacdad99df2811ff59432369e1599 GIT binary patch literal 51099 zcmZX-%W~wolBXv-aleqv%u{E2#rDxJ>>HB_;e~@QXSw=YU$Hn0Q9PSPW;{WRZKmYgt``_ii|MSm({^RaH|9$bV z|F$@;_p9}Bvw!;Yk0~in+vVxBKK=Q}aCyIcxB!d*wuk-t&p#%+!+v=>ua6&p{xP^; zZ?}Y_<->Bd-T_UP4-f0p>EPEf@Mg8%pDX!rTx||}3N0TFN5Y%s!|`Lgd04zIH|FUr zANV`uZ?eq_SUhgFyVP~wylu`l%7?=dM3<}O>v>tZd&||zDp$|z)8=ixI4@tpb+}yp zyqu_Yuv}fX!DICXLYE^S$^5oHo;RoUE;x>s`}2CgU)tFEc(~s@(C^9V^|<_4JS;)D zzkEC%{`_OMd^&C(s4lhimKz$rTW*evhr@og5uG*Iefx)$PG1IK=O4F#569)> znVRo6)cu*TJFM1QDMseU&3a4iDY)H8eL*2w7y|CZYw)^V`b{JPF5Wl$)#2T7A?0wS zo)6SM6nd+wo|YAE*I>d2rWDWG z!o|i`dQ`KAk01qk5Y~XlUK0Mo(SH}B52Wjdke*NXY zUoxKvjvMFejVXc<^hB94@%8Fbyyz%jbMyl&cGQ|>*e8L!GlRuF6X1zl|IyeIGCncK zwu|-e56ibUY$HE~Wv0mHb-i6a!AmBlZ_aC5n_uBDmK0&s>;;*!XlZs0_5*_AQ^%)E zHyo}`s@pABo9&jWDYd&_x~|4RAm(H@@vxEmQOC|nLzTt12^RNDhs9bx;Gp}g>LdKm zaXlW_#YvAx8AFem5W<}4==0`2j4u>9FP@fL<_)E&>AYYFoMKbq6H7#9P?7V69pTSE z{>d+I(BN=ffIAHP*vMx24Mkpx3cyYm`g7hqw%tMoUc?hPEw+b8qP?X$B z&2n*G?=s&e%M%lM`Q*kn2Y!ZuESNH(6&v5~zM3hJ;x}%Gq<$=l3sc?c@UXdJ3C4&0 z@{(Q#KEP;f|NP7X49bBv4-)>&?kb9BMr_L80~o9wli~7Qa}wa32|Rn;JUzn%7C&9b zCbLne6#Z%&B^A9vl=ah-%%5x{R!DJ+{t^n?WD$C52$eJMhs(?HAgcv9hjGmT4hspJ z?fWLt-I}Sh7x78ra)1toi!u``pYZW;a07T)9+^p?>J+%M)Q;VhWxQKDkl((h)Mj_H zJ2?1n>*pF_QM&9{uGS5(cs$hX6Gh|O_0p9wYtdrKU97mp(8af86adzH`Pq-<8$(7r zrwd%TNjRhB+kr*SG#o77mu@Gw2^S9x3Cx==%qxLSHT!O#ori3HS^jR_zn0(vaKF4i zguwtf;S3h{hcg_MZX^a>j+>>NqQ0_;ve$H9*IV|pjNc@=9}fpwDfq;W)RmIZPOd(Z z84Gl5usR-K!{mK<**albGJ9v2<0D-gkRu}4{W2U2tO==%T2@S+l@bpi0>RmQ->&e2 z>Gt9r?AklL>@MJ7lgs}nt^R8rT=OQ^Vif|}!d#)BC{Ka_C7GTo@4^8P2-Su;Zq zUG^`M4@AfZ{I-l74DI$W`-7t;;+faB!q!KX5G2q7#t5P9!UsZAAx#s9UoaK2zru1^ z@JxxCLW;m*L9;Bfb2vRM9q-o6P z*gRyWhl*k5h!km@8EbR<#o?Vg#bUo$9o*&2c8pjp!1gSfR(6bZH)i3jAQOGJluWjb zd(DIlcbgQoM?#`&yeQV9&#qL~O^br-s>FJ=&wX~Yp_wCu&6n1-t@%xuN-J1IGCGAS2uSKiN4C{&c&a^n47EhN= zI67lQj6I89GKi-WQzFxlzZ5ttnHelPNggT@LxxVz0nhMIdOkkAZ+7pISCa*;j}A1@ zy$meR&#wH%7rhoLw8SWHjn}lKns0QZ#djOzr5ekYa@x2w=jQ`kBGM6ZG@m~Ubh;bE zcaD13e-D>19(8~ox4kt<5uiyN5vnk}l8?DHMwG$e>( zWIW57K)M)b91IxjTLbWU>d+XOkR8b!9xnIQi>QYrIX}!nAg&kW#|z_cvlnd& z^8ywpjU1I8^|g2JR!^m?rq5D(wRk*cqSH7VuhT?mmCcMASFa7#e7x%7G_g9NDK55X zna5 z@V_`yDMkExR({%ZRo(7WF-WMlqr&|09+0MHhO*kPU>lM%p2;Vot11B! zBtL}s(5a1w;Yx-9Ned*4Li^p9_oL?T!}4>koC66MSg&h@OxVlhgk2j!fgG3&t^j^$Dw7WOb8^8*C5g zhh!X*l0h&hs?t;~e6eamhiPSV;Ije}6u7LBLQACuU1C2SjRH?EfRN&{Ze&R^WC!Qo z>n4~KS>~CE3)a@j-cBo8+w&xm_vM!E^??}(laQ0wY76E)cXMa>idogct{k-@#`lN& z$88D@0UxY^idI36AobfB;mc;*rli{)mbfR-q!Yk?MQ`PybdrsB1AfA6B}f4^*Ao8{^L zfFjJ7-xp6zMYpG`oIx5k&9>7VewR-3i|cN6T(YSk*g=!u(=sgyuxUjaX7(gnXA*P* zWCl@sM7L8$P0{?G5WZq4x0)rySQA}%NG!XQpIzWdaK4(LcsWm~z84~r9H|5?ix5nb zGlI#)q?gAC6eHAf){z?yT*XNNW^4LYHE&|*hZqkp?3b*A^b={$64XQb`#~M%=Dnl` z7z3;*ZN;KIcq-{)DnZw$UAhYHWo-;lYZHF**yc80)8{@w7*Ml>(nIW9WI6{6dUkkV zsk%+iPX!??Y0i6qLxORi%Mcdu*;qVdt^p0;>-?pa(M=?xpi(T%suTBdY0qA`^)1aLSJfqd^9UL!b%;oWN0jG{5gnJ zU*oxehda!7*96RGQ7*q5u@YurC%g62vau=<#%xi^9LYd3#J;q)i+I;R!mARm9UU|{r630d zF%=-M*2gAGUG`;sxwsa|%R+!3Lkc^mKwcc1-j!Z5&=rNZyLziGW(dZ za7|3X?1oQAW~ALBCS4wuJ5GSJYto;cyA+sVF=steLI4Br-bq93a6txZ$f-D)HPaUi zcCRG^(2?D1F*c!7wcT1IGEPF| zZhCH=tDFu)HBdLwBnLXHxX}A)J}cczHG#t`w6G*VEixl#n9xP-{ksvwBNobaC$-qy zlCB7RM1qd&KqIle8X$SpXuZB(ICtN(@*vCW>EbL9()QD!I=Rm5x#cgSAA{M>ejzIf zv6{^VKbL8~An{T&7#|Uf%Cs@l{0{XwdtnkLR~f7e%ppO*_ngY4Y`3UEOl6uWKwn(L z%ntODCaMZI)^~G-Yie2HYV)M3U0}wOK{|?`y?Q!eQ>|0>LYRcIiP*;Gp52JT!uyW% z2yfnG)k)sGMF_Nh#rTFKbI`BMq%3!a1Xvbv+HOj3o<;TIJh7yXW;~m)t$K`WtDe4h z=l{(HX+7+knMVRFS=XXHay?5PKAJ0RL&9NUr7k8+AXzYiQIPtrOG9dmakyBkM6?i- z^uuXkS28xp)~n8g)JS7+LdZ#f*@D?F$OJWj!}6%Ez@|IgAwcOF!s2LFY_Mz+ zhjm2|WHm}G{?_EDzB5*IColB@vVCmoBs`zWHX_K`g}MRAoR^60ZS;T-7mvN@3@o~; zE-jB02{y0Lpz+Cbsaw{-8P3sx2Covz#i+C*rhOn8JXD~T3vK`?YjSsJB*iGkVG{Ab z{OjLA>0keTJuDbJAV2Y8^swHPP(5o9M{AjI*zJ)w-81-7a6Dcn6^oP0P`LOwdXFm( z`R{mcJ3C)X_@2O9-_~`wLuJlL&dqZ^Zj6(eD8z8KB}xp^LW9r+982n9uYw9=fjaG? z8jn{u5P=*>wqYBSgxJR*NYdzG((p#{z;%e#pacdoMwS@~u>{(KQajsSQOQMR2`$t~ ziHc2+3Dr2|OK1+K#^dV4Hr8xa3Y#`<%@Ieeq{7w-DFl$Tf}WKSErvmR826T7&nxm@i1LsXjGtjS$caT~@$+wIgmNJ23UW5cE zb&JPo!+t2ML15(7z4AMaqx&d!P*~L^~5Jai%JyLb*l+57qHc zV^Y%Y0evlNl41=mDTG$Lcuj$?390dKUviMZL^!;tfPKlh+qgu(eP)EK!rURm`G|^C zyTvoUBcT0~1FHSvg;S_>0}0l(_c@IZudsa;Bk=aX4r^LkYcO~#D$;zHz(&OQ{`Q%G zHA;JERz6sD`B0)`C4mNA66-Id?BY3rV{E#3vjTjnM4OL0_K(j@oF&1LT*qIKs;$SS zK28W_EDjY-R>CDlSq4|2GmsB-c7!~H48L7BENP|S+BI*JF3iM{!jpHL>^O^yX#&Y@ zy<^_aVfE)jc!`ycNe9D{A(0&%kI@4_=`;=MMqrpYB%i*!12$it`NM{>JaK%HLVco4 zZ)%2#q2&Z)z&i8=OW?iMgtv{t09_!VJT0sVN%sZ!P1*LWqWV#N#v~r(tYXdRk$hEM z{{dy#*4dk>gq)l~;EU5juo)RY#q%UktyK%Wm6wQBfS3XO7`yiq(oc9=|H~y?m|#&V<`oK+2MI(02u}oc z(@v;E4opRAn83GkbvlY|XbqVs{B&hV6p~th^!33{T(Ibtm1DIR`Q|8gm>^! zu(O)onL7!mXC!CJ-(FLiPyjX{P!%7t70pcnY9}i+jSVPsIDi(bOr(lEI11Eio=;!E zSxYE$z@%bKMoB}~9Q`~Tp7u-RuOZ_Sv6GJbfj}XjesN?G?m>!Xr~WB(`-WIM844$a zB*Ya?rb774C7Hexc~WP;l*9E{Z<^QaWY{Aj*3_8dBDjbuq*F&r{!AcsJ6PxZ4?7Q}aWEC50BKuzYSAC!l2iOO%E zk+0f}RgwoG#kB;5cbH~EEx))>%Q}%SxJDFP;|n5W%_H}3c?8*CVJZ!ZYaVHc%}!EL z(M>fH0z-nZl;aZ{0c;+QspDY&WT__X)T>~zd*(=sBK&2`ye44Cb;7@J8s2Pr*H3Ww zYMT*i#fx__ye;mbKnYG=N)sKx5&OlfT4VKCYerE!P~++h$KJIH>U*kE!iH$Lt=Hbi zAmTU|rLz@<%(kdl(MWXa9)yKr{Hvl}-%8*R8+<(vb-C<{Skm?q`_xF|c&U;iOZ zlY5&K@rJ4DwT(2E(R^gCrUGUM-{k)z_6u$!fSWkS0khD=`xB1DsF8T!^t&`gaX~d; z?|?GRD#R2st&p`Zu-8+=-EI=il$HL%bq0>d_Hv)2OmSocTr12>#2qG)M!vJ0YW3 z?An|lW<1Aj0A--ygGMN3Lzod*5poGcbpfleHYJe5M7LUW8t0btB?rJVPad)nf(1w) zJBJ}*e2S5j>T_{Ue6ZRz2EXsy(F&*>Lvh7RS6w=U=I_u$-s653#ngPk(KJpzM8UM| zgQT6{E{X`uxB^e;60sA*tGblVPzA|O`X8k&Sf$=d|A90iFI#MP7&1;9R8d-)ZuaQw z%}7B>JaXl`G+uUF2qeIQi&+r5TwGan5^DsvFX%m`fUS)23{>6&um@tOOb7%~!k9Xh z(9Z0syW-^q@S}^q9wJ1fOjU_wT4zOMao9lb5+7%)h#S3PWTAz!7_*IeAYjHaK3t4H zVdpQ9NSPGOi3KPl!gP$#&c=<Ty+M=q7*i7d! z%(JnS5`9zAtG6AffEq>u<}K^e?cQu#oG zfttE=jn=^fy~|f|qzngXmLZjWbrJa?@Bx2*kfL%=L_mQ#ki!xBjRsyxm=H?kFsVG* zEqua;uUc1dS+{Homu$#um4z=uWpQhKr=MndXYscbvQ8i3ykDCSI9wjE5b6I6h=S?_ z574+K6_{LAa`Kdl+OgiTq(!oD=SF)=FWy>x{)YCK2haK`N_zT@_Gu?oLgP6%Lg(>- zhQvoKE(%V`40ZVQ+L=u+ubvE3?J;uZ|*Ay^WT z>IZyO#q8k>5k%nSaqkB~28=ebYnG|5{F6rdDTOl!%6>V1P*;k~GJN>J#R+Wq`dTNx z^9D6B6ie~L^XWO3n(=ChU4uCQvGEL!y$9eK2cx8HUL4B73Nb_M2~UK*Rcto=t)mkF z$ivrct+Rxf*ka|MR;)w~swNB62ZvbKGEp#SGK)av_=3HHIoAhtvKn++{|YIy!FNGrB-V={1Or0B)-j#{{ch1X7T0BT{gS6KYETI(03oOKljNOJrmxq!yeivzUA zCaPB&5qi%mqV%i)q^H;cYnMtly6tMvbew5In4kkr^QV_E3z`oU8($xW%)07y7Q1fh@<0lc{* zsaIE;ExHrkHw0H~?z>NHyWnG(kv5vTy=(a^f7P0l#3d zfJR*;Ho;N8*v?HLt8ex*d^gJL1~hZzL;Zy1w_uvXW2oKwHld|sfUFNrG5S$+Y#bmY zq)_%qx~hmny$9~yvT-<~z~L@JZYEF{^8$k!0e~A*437IYg;iC_cEKV*2dL(O<{sZG z4=VodN-P3u8rn#3tLu_bx&2Im&d=CobCN8t0n?%G@KR`A02^pLd&q2hxoFZ5C`_3r zXEkX))9r9X6Ip}j{ndtM~M#@haNNzXDpVY`sS*fcAzTdm`J;kcLP{gc`^GjMg0^LghXs$SzcOB7>FFAFiB3#W%usy zqE-o*DOGr;Tr9<1xp?Nt$VmzA28f9Tf3GhC<5V~##5i5$jt7q%$W1xbGkEDlhz;8{ z@%;)f*b1 zXK1@EIcS$gr(OYgiuoM1{4=tsDM>!gX+dOGKw~fCirK^V!RvfX=I?xv3UVvva)y`8 z-6h=tTTLrO*qL!|l4`7o_uD_juVyOh(Xt|0ED^97%J}rwF2qmWt+hQyCH><0I)voe zV*BgkGp=wWJS0k(o1i9gVp%()FwQ6p^%%3foY!5*h2@q%0^r;P!riS|INJ|i0XQ{1 zVC2zqsBqJu&e(m8H$FM*zQ;%WGj?%X@mm%cn>*`D>9&WPszdeViuxFzx#L5Wxj8qw zxuLHcczlY!UhwX(e|MRa8(Hco>5=IGtK+xp-;rH}+0Rv(sY){;8e3!-@+vijFeBlx z*B%b{VuRZ~i=tkxAflhBEqx$~$0Rd+T%ruHH0K3$f;8#nblC@RFb$L@+rh~!@mF

p|@%2yJ8fC&fE6rI^}E&z%{pzvNWhDv=U=Crk!T^EV@L>Z9g3>wbkc;?auNYg1;q%-<1 zkx%{D72@Y68G#=Wxu_D^B(Z0fcGatHpP@06JT*g^GS~&%L84{Kq9}w6A1kwPdEkHR zK*-XRCFpQGo&N=&`vvX}L0sQBnx}J38x8!q{QSbX8APDN+Cp**S+FTf(2n&Y?HH#( zUu)t6*R-lH_3;(6eaKT$uYkmyLp|rR!`wCqZd)c@=&P7bj^E4RF?jqLM%>qnR{ROT zGAb%?1Vh4TONL;<_T;@MlyBya<&by=1WpG->5x!qmN+Pn4eXQCf&=;})HsbW-DU_$bMIp;I@7mQTu1j0I8@tG=O3^2?Iq;{M?E(MAeYvS zYnwt_D^2$r(IeV+{nA!G#7VTcZdXk=rD*P=5!0{{9n+00!fk}Ip0c(XMV`D>FhERl zZS)@xngDx`xuM2iWwY_)VYBA;eHbQpuw3pOH#_s1C6&Oa6U`6K@$6_s$Fou z#=8rsch?jxEwV|d)XLD0_5#2)sa_-N!V0tE zLT8%LAx9IWuYEGYSqW$r&_;kc;>w+Ov05eF$9DgTyt*+j6S(tRB$M%Iv(ti+OCfS1Jfjk?yAfYbxpLFz;kaz-C&w zaY))PyEtg>SyY8dhFv?B$cnH#F+t}Q>A1wKqRmCJi*>6fmDJu_pSpW1y~mPWjvp@n zvOGLqvW)tV7g_)05lddv0eX+B5+nYqXNo0vg}kFen08m3*xpU$) zeDb{+J`Q)6U%%XgNZBrbr~jktqfz}@WmALB1bLST(zFNcIlSrNHOvI#0)V0j#+2-g zvjUp(O%*2B22KMwi{=OyD8~A41uVWi2W15=t`aab(ip>QC#r)Er^H?0lL9k{CO2Lo z_;iSK{qWsmM0Z5eI9f;_k=SEPf6^h6#=Pm&Eu4zw`Xk741ezPj+C^ilrbUI`v^aR; zWTnXWYkkj#-G9Q=!>~NDAXG1()=U-Hd@s-R++YxJ+&YD6Qt{GHIa1=Hs>ZXy6WwNX z`cGNxV}mm1&l*_UTl?UCHpz!N9xb>Oqk`W#qZJ1wTK+PFhB|3WKWY?c?f~Dv#Grld zOq=6{I~z55M{JRtgwbcPQ$-<&q{Ax*Jhzt-!ACd`Jh2Y*_JQtOFqwIaa9;0}>%NtE z)_tQdw++5pJzCDPF$YDwexGEnhPfw^(;LQ~9{c^NB~fMm8Kfd1RD~ zvZn!%w=;)WK{5w11!4!uhfI||H)fGVr~v)-m1~WeAb(Mc8$vx;t+a|~`_!O&TEaBO zdS(D;t21#D?^)HOC*%e0#iyiJB}(Ev-EM~d_%|Y~ihqlZsIkaz<{%M83Er4vgt18_ z6jJZjQ(OaBni5?DWBMyN?u6iDmL|^9gDqN?y=CWz3}B{3kD1LUTpn3fe7d-ofFsQ| zi_iDZ$bv;5a~MddOOBC$R+kuGrVT9hkBC!rM2Nc;o%b^@GqE`{J7$S=3TwqMn?-%C z80Sz~+*W`mSFi=bwNE6=ZNVyE*WuU@m*FOt-;^$`<>c~-!P~1HC%ga^f`)>7o z3J`mGLO0L=98GsMzL7nNdSp1#eR7ayYfdP!{MDFAJJ*kKsE)D)DFdxKKd+k()NTPz z;J5x@{)-<~E2mN=qDfSX_QDhRGFr4o-vrn*-C4jPFD+Q0RBcnvMFZqOuNUOF1e*#@ zHt0ZQTv?pBcU})UQ2+;*R7}|EzotqS5Q+uj>4Ig5)7wxPGh6UBEGPdNPnN~Z6^1jd zSeaf1*sL`DlgJ<~(K55ouqDUQqMjm(^xJ>BIJXVxTJOn?a>(Bt!H?12SI9a{fzi|D z7lK7t;RoDy^z^awC0!t%Hl<#m;d4`2(WVuP44D1(ZbangF4BCxCT3_JPgJ8rxWgs{ zMWIg;qgDpWSy2kLD`2z`?Bk}l{^vjV#(ksrSP8#hVHRZbm->?S%83UaXu(KJK)I#R z49~9~=_XYLa{bbS-3A6y&+8D03Cp~@IzR-fa~^;?o}dVekCU5Lo=CW2DkIV?c+{~k zlTTjpD(#>i^vV%QzGqP8=EP$FF>cL(LF;zJCy<*>V65;Kk*nYmqwpKquilxVwO)ul z`V91>^AiJ7G0~psb6(l6=p7;yh5Z`@tyiyUl$Q4Zp(7%!<7N0aCC7( z()U1ihEyNBL=QmZsws-g2Mm!97a&*)t-t$(B-N9r5d8qxiE%R`fRJF}neYMwl)gvA zU|}>_7pHwJl}8m|c6S}I^IpOeX;gwdX+|LYr8GqLNe}Gm+R}Pw1ea(1>`Tc-X~(ux=v$0 z^BQju*XcElEvGi+3lxIm7N_w|6w( z;P$13Vg`vz0F`;^p*wO?AW%c=fC8 zfeSHaR86n3(K09wpE-q&Uy`rJ&_`Yf<8f2BCfXupm#e$rt>p?z;NitL;~Ld|!dWF1 zG4N!?>ilg-wmkH3E2S6WV5Lfludq@_}eKprgi`1z?i?Dwe zie!@&1zljIl0r`kfei{#eD|P0fo1BVy%w!al>W$0dN}4dU!0G3{PkWov%5XNNjG_X z$R!AASiAMUGYA1|7vTe=na6A(KVf&2Y8GPE6iA($%uO}yKVBY6$?WY6IU!FKv#xVy zOkolzp*ltc(Xp`}i27dc($7fK&a2+D$Lf=3ts0CFLo;NrIyo{^270r?8M0O$qYlFL zB-1UQUc=Ad*+)Y@mKxR&9Q)iz=tg)DbLE(3Xn^AdTnorMc*a?Pc|+3_89sk3vo-*< zb&1Re?jNi}1&e~oH)eCOOX8dvI^u7MQd4Zc-bRBzlF#J2$SkP8D+0-FQiYja*75B# z0ofMAN>{R6HT??XBI9?#|7M}yeMu{W*LcgCgq=dai64Cqm@4~>F%#{^q%V)nBny@p zrISI9KVYoM!%;|Pg`MMog`pYZ55ko#GU^yYLLV0Hy1{ziPDXB+*^r(6w8YoJ2E9qP z+?eQ)bCx`|F=d%Crd)ENzPXXl^REH#ut)9oLa1W>h%5dcMoTLMS!`6aLO=3`8Xn|2 zQlV?WZHGjnInE1U+J@s>u3cB?-1hHG%7L>6X-9b6QkW2cx~0pp$*0-fcRrekPi0q2 zoUZJU^_C<#tSfSRm?}rBDNR>uVKxVRs(D}E z*ZmC$#y2ZZR!}Bdmk9a9x;=N->1mLkh-f=Ne-mtno8})wTwdGX#GRBttZxili*4E@=E~Uy3&c5gP+`IgDdubayg8-|C83JV46_gKpT@pN^S4u*>Qg5fsMSB zNQ&g#oLq$E_dE}b&Q_+Mo1Yu5jK5=xC==fpp2l%`yx4xWEq_`-nF4x0V;ud-jX35H zpW->vfO`?;MYfESy`M7UA%F8=D9NyaBoIPJzWfY-nz)Oy%#;Mj!u9?S-|IIR zNZY^EJ%|2F81&p=vpV3onL;lS?Bx=p7gRVd)lj`qbbEki!+lwDD4|Kl;TP2xHcuqK zUc{Rbe;dzvLA7w`U(zE2eV&Ie8$4-g+F!9u2SD4e)o5T}=S z2H8i+mi?0t-YFds)VZ$-DTu*$3#>}%nN9vCSfLW%j@rA zFX8d7SojS(H=H3B$1=(RL;@xJsO_I`*sL6`I%T~v@O)WEHCh4#;*mlaOCu~F4{CfR zjBb#`QXAmrEAyaC#>U|D8GSmj#{efs83|@EmnM%kK(UFx49;g^=9ROW{*J?mR3_ZxPK@q+qjI#i(B64N+t+~<=8v~ENU_k?uA&&$;IrHsQPzsfxFb?YnTdgO82XW|&Ow-V~z_4@hZ)UZH+9_ltV$+MQ7weSPH7@fZW=%t~2ZO?Mhsh@5x=9d*K;fO*_jybT zwlI7nG8K7WRmc!J1lx`aeK5Cz0eE})voki)fLa}&>4w{x7iB^+tw-?QkSsR{+WzI+ z1_)XF>Poh1T%{V#;_)=cpCdj9?Jfb)*!(dtZMQkp9pdScA?ugR2PH6*T_>%dOOG@!PQD^8c zD_8;}jg)49?hvQ1X#vg}OBM^8Bt+PR&-KfvW4OKi9!Yy%z~TdM1*+`f)()opRh$KAEEXH;qqMpirQY*#hY#f zG2VCLDF`QCzZ^jmTlk3cuR#l)^wT1eCpCf%F9EpP1u*l3Y)tPd&Rz^hX&!tuQmjO^ zu84$vUZLQQuiRn-V{b0YJfV4|0Mn;NFigB@uvk45jGvSZJ+UpvTu#qWtui;P18XVP z;~=*$`xN~Rgsc(XYyo~r&Q0F}du?Z@UH^ zg#e_;f;|F@0y|iC`NZX-?WmprSqCIcpa5%_ztn^akdh>HE5@HNTmV|ZT!OD4XWh#2 z%!bPelctCA4ASqsNmU>Ebu0dc03UoQUvH9p#@oG>x@FB(nPu~b0%T$4O`6*d#33LK z#)EdSYv?Nfo*OoN{bDqxNp9Vcwt)rE#cLd8-wV3m^4-C_>KeJI!(|7?V(3@>d8Y!M zitZy4N31y#EXR!?kOQJI(nYfy@MW)WJ?BNmUwUCvBugl;@VX|eyR2MPP1dgi+#O%a^r%XgO zTzUOzmOe?$VG7_%hgGiwF@{xHm0~%f8KDtD@fw0KD?^EQ;|IMr+FdHz#l^OM#VByQ ztUfY5w_$0M9O+>a-)!CbAvHbY3M%;cSj(_`8i9|}lid1afmA_CVl?5)jb{ygy`4wl z1OAHQj4-P|t`wALLQj7eUFutY_Dho+bi`Df70`a&G9Ke#LE2DQv3OoU4)#cA63}qC zBNfU`xO^LDMf(zizqfIV6%Bx5XD6&jRLGL+R|bB;+go6PS5EFcPKh6Q+AB;G69uf-8NPo;u1AoJD4CPNAIq>62CXGUD1@p+^HxbPi+cb#V{C|X1^q6t zU9r9WhdA}!eN71AXtWi%`hI-YQo8kzYnG_3rNVw@swaQG#wFBe({1Dbl7K)9E}seh z_ycq36FIc_*vvmGEsNwQvR7|yhPLp}&GRd&#Iknub6_xXN+F>*lu)%~TmZRwk^TC> z32vpGadT96WV97RAwLbrG|)!Fs1`G~QRUL)%Qs&DoQ(+q5lo6WrFsk7n$m9N-3l0``p0lEbZ=5Afo$BpI4)Rf3mZnp{f zr4n?E>({LTAa43z5V9viutWI9)@^zNx3LXc!F_ zg54VTSBc?pc-UdWBSF$E+9~G(3_=R3C{|_>^>_Ajtk@Kp?s?CyOwLe@2;$aJtqKrq zhkKs+R8Z~n1d`i{L5W)d%PLi1_74g?gm+8|=n~6DMn*KvI3(OO@N%qENE(pOO%1xA zP8hK9h$P<>Xej-@_=v@61V}X$m;=Ro*R@x9wjUTyQtBNBbw>t&gQa^CiivCrLu39y z+fC;QuNMU-70n^owBTA{^`YLLs}HTB;hs}e$FI+|e}b_=6HM!;?nvRRAV;ht!CGhq z#rcFPSUQxQ)!PK^M{Rq1net#?S1d*eT_kfM7ja46WOIMz@Ac+MFK_vc`PW@xS3#dm6M0QZ0LaalTlP#&2K_s4*)R}wF43NsnssS8g1Gawn;K+H~MKAoml7yYLggRNeX|WSByM~D_ z-kEtv^__~wfg^As5I3IWBlhRon7=jX%P)QoNV4@=!p{LRRUP{_=`lVR zdz-Wb8=d1k5=ps^LnhHHFHr@DosKy#ptGmVTC}<`u2|TxTQiet#ICE)k5mR?G4e)! zEkCK6M9~>%ui@p}5(~?$keouM_0NN=-MW%CS%x*#p+5+>7Wq^pr>BZ_uRk zkYzV!^%JHegHrEUvt=goK%2G7FIqx#GOWrp#7`D+F2&dZJRMd5ePq*4MrxpHeXO>=C`|Ilu;TS>NE2%(}sHNe~&$s9`N_XJpH4e}?HO6%+rQ0Ow*ySUHbR5jSC zn%#$Q4s7AUlH3&0t)A<14I}p2n9Lz^UXc+?o$jKQwznb;(mZ|hv2J~I$k#N_pxJ4m zb@Suy`jIJ8a<3^K#WvU9)YK$#KKYBLV89~CxU(o-oF+v7;ZqC;r0`=|&Kw~qK%c4U zZCyPUL=BY*@mQ7T!%U#$_-JZvkK-a+;&;$!DIsfrWIT4;J`kP5y#=EY&D<2g9#Qkm z?-0&{`rwnVy$SN_>r4*7n#}>48u|Vix!%zFnnu28{Z#5Qp)%MCwU1xdPfcqf?TMp^ zpvY>sR=U97Ie(X12C9xy$+zdiwnH5NK{QSoErz1)?={qnsSMq(0Fzu-*Ju^D3#_(j z%q#0OGa<1Y7krr(I>8?#KLn^ZGjI#$$NOL%P>sa)YHIua#M}BIMSt$L&XypL&fQ#% zueyzt8Q0Vri3qvrNT701gKXCrR*m=qfz8nyI>DDqCo(;{k3_8b0*HYZy98;kJK9x3`opZF~va_9V&AS84&%h=1wdvU~xt_pzZEC?6!)> z{HQs<+$OcAfP41l`~=F37kaR)V`yT??OM8B4hT3=+k;e>Z{71q3GW^$HSbT*@`8*J!@-fCwwzL|4)TbPm@cwPxFB6FE zY*v6{K$mY??0UG^;_WP)H+h=VjTs~MH+1>AK4?g-Pm#IFxMh+s^@b@5+If0codX18 zKF4vFusQlro-yt7mue;k%Gbc)2aKMS5gaF$m7GkvJd){0`3!MD4-LBKvgThCt8V7B zO@Ix-EB!U72Js=T`_154G7&8cG|i}s6+fhLrBhxc09#hxf)fENn!kE*2X%z26LYE8e$tQu0WRn-IwD8rMuY~%cX%w-Rq5HvR ziJ2VOAN{@3F5X>KmAHQAQadbo7Pux&SJ&6y^_4Q^t1sa74Ar~T(8NYbmV>3DdKy=K zQlLfnxqgP1VW|qXe!dlY_D&zdV5_#f1Vuek*h7EYEK!b!8g54nt$ET+$OdYWk_{2Cy!R-8<=KdNh5OhQHpyJP?K|1+b+a|?j9gywSH;h5$ zh|^q_C~4V6$s#ux6Ex}W8({G_e;Yfexe$vaTDJyP7jjb-(NIJKmR5L!6e%sp<&E}S zBTbZyjtSPqXq=o35DI-MzTgOzb>yb1)47I|R%_8Gr(7(8B|R>ZGc2INM*$XwF7|NA z#M%vC@D;@Qt|`84jSQ+FIj&erh5kMw%2a}Xts2oeC&(y*?$X-y!8@ph}8R$W;L6itOp zv@qjCW6S6x@=PE3=9rn98pODO#Q}?Dox)OE7pd7((F65?;6WE!H1oFae{U&D{@vT{ z^e?$A(FbPsn1px~RZEy3DvBg53dl*I01@#yWd>9$?Salkxip%sp0KyDkaVBDUhb8d zYvul^n6}E#i9J<;F28O2+g%>z7`7bW`>(k|MZB4bU*(>px;2x!W$$$| zqicSeb&WQU#NR$sP}Z(*^noa5{qkHE?w**wxx$>NLH8b>An7B1_A0&L4hCKZ%=15j z$ibSYl3X{C>F25h8)#&C14J-`%rst026Rl;kLpQjAyC8u5K6?+pj!M2!+|I+N~Ea& zN`b+Vt=hYTglLD<2;~seXp3BoNmpG!ovrd+4gi1y0M${py*Fl3TY~@&4k?)PXlPbI zQ(P&+h-HIv19EATBv1%rDv#jcdzS%Cr&Ai;^OIM?Zb>fd|JY!?Opt-S8_3@HxGG%Yni4 zh!>9crVonIoae0Ie|p1Ge~))t&C2Vo#hbEm&wQ`sx{pp1YhpF`al$;C=tRj{NBCbe z$f+<(I*W_UkzBY3QtX;-lsq>EUrrV%n-f(UB5d}OE)s4suP@k+W3uDY02%X(g8kxb z69as0weuAWJru4?uZxN|fQVm6O>kARQPMZ%2Lw(WLy;XcpABls_1g&U|8NzzzSP;# zJYRV6wEX&LL)Sy2Awc`=udg`ZRT7sh+PzIy^|rPL4^41t6+rgXCO>FLE};0}wJ$Q% zK1c#*pH3vh&h*ne=Ji?rVzX%#Hi-e=T#FCHOsbdT($Z&?Swe*wDf&xvISg8HrBXSK_P1p2p;4!+>6@4T zfdk0g3h5IKfe~r^JUCvS^6q*d^!4ePbO@Cf_@mwR_-oHJg(0Fe?lD z*r1$|M}nD>)QYKyQMv1$OW-%k$_a+j?#hFvN=y?rOHLxpf($5iaL=HS2;U?gJEBG9 zBZen-5o**&`kxG=CaFoqiFid=Q9O&K&5+?(pFzB*bBQ zArX}kbK;62qHAA^Z>{c#-%8F{jJPfU z&SZVXOsSm{B>0_Sg${b^sE~kBOVU6B%vDK|m(v7&#T5r1iJ~!rVsb^D01rY?r&yl0 zq@qg=CJ?7sy2thM3<2?Xf51NR>f!HkOk-aaIextxeH97~03DrKLPnV%Ef614^K4!p zD8xbteP?K^#F0f!$v7;KSGzd9n4BJ-_sdIOD<1+!6Q33UCxkfDk zi;3l>ka}%MH0?6Jj zDVi|G8fPRd?{6X`%R5g^XOFXFOGUD$~hJO>m2o1%CLbZATHky#E3zHs@b>#}UvpJGOYA^9u(EGN` zpj$yWyIv}hW2`mmvBl{5#GqY|uzKDLp?Xpwi0dj%;n`f?LMMI^8A{@jK+mYbywteP zWCwUECx3i)5J8edB&Zidrt^1zbs(~$p+ut$ZYcn>0-ESdiT27hO@SrdK)%(0D3z#r z2)D`e!h^7#O302pB7q-yW2f~~3`~SM=rCj{P7JX&)-(~037^zpawCwP9C4lv(rSGF zlMsnAEfF$6Cgk#^AxO>=x)9tR{HpOYA;oEx5vkjl;K+Rx+ovDvt<%Jx`STzpV@t>e zn9<>338s&Zvs9F>lM@jw`@@e8^vpoFJgcOk%`3i<@i7vv8($Xhs&wAPuEOk>p=!P< z9FRl8im3#UKP;+(%cFr>A|L#xgi@(aOx$g`oe)DObim%%IgeQS^$`J@1mUNp|49&L&MO=ezIk?z-rB#Q>S_6S*vF{f(C zQQXQ9h@+jXh*~*EqM`t;E8#*=5fe(yUXRFKP@|JC6z41isHBH}$UO7|*)lA*ZpbigWdMC95{$>W%RZ)k|hm=u)<8_co+b`yyky>Ky z#pR^agdQnWJ7#w23{4k@PBg^{fedAjv#n&=G(*abhh1O1=xKiQd6UdYT^pm;jjo9F zBUw7WJJIp9A;=>LIne;2jrcGX)=b7eQIO$lta8*un7ITt+1FqEZSLU5$L>d$Mgg55 zH2A>|RU6=t8-e^rj`ex^WZ$?F?T?y}5WY1}C<0k{lSJ!rQuCsrCU5pG$}mAv2_fTY zLJe9S0QT!@5OuPku56lIab=znzF(D8^2Yi|5d84r4C znV}-@h_Eb8VXFPm&NJufLyuw z`nKPMG^`VKu^Je93=Q0Gs5RD~)8VnZ8X{P0>gH6hgmi}_BjP+2K{FI{WsV8jSrFik zgIf&|e5_GzXAY-4%?1`+?rP6q3*#2y*8Wq>G0tj1eo9C4Lz<4 zN{qTRc$1Jx4-=DgUA#LrnC3gvI6optn z;*LaLQt^kF@OlgnA|-t@gtbr1a`c@IQRJ{P^e}8{O?gm{`P44=i(>3>qXMW+5~7ij<<~qUqM#j|O?=3HRW-I+LQ{RtK!+(9}xP@hXET zgp2Mu2V+bnS|D81@4q;g62+PGfNd9zD#SEL=Si0gtgPlVV(n7z)-V(2y!tZy}9K?Fux==?mm(ecrZ7)Rj+N92L2HR|o5K zh(vCH=79h}veCHjRbC;lR-&6!3*pO*M<@J{87rJ~guXz;FZR=$N~oERAKGj=Xwj<*G9D=O-`44NERjQ#}($$H{RE5rnxRdcinMFw)}{z zKr98f1!(H6-O5@yzsmgv;0LTLIYl1un?5Teca=zfcZD&H z<=0?nrG3TSL{kZfa_**5n-Ra~Bz#1t=Zs0c&uC zW&&%YHy6cMZR>6VU6SkfKH( zfKi_k@EcCBc@X(t5iAU!E?4Grx#3py>^)EEQ4y5B+?m9E%KPFV8@gZuM<$>M7~-Gr zcGt`olV2VT`U;RF@&U$`}16?HH%e!0q>8sYjuMwmGG4i13}rxy~ke0IN{_o$qJ{ z(Iu#t-_UElY&Y!Z{1W2+<-|`Qgjf9aYm`&jI*}Y*U#7hYgyS53dl5CfA->doawfFv zmYF4pF?6>Lf-kmTO=t;BW4oFo_6B*P0$<9#EosBPQ`S7QHNU_!_kdk%v?tIE_LPZk zO?L=6iY~@YIkrkif)9ZrEvAi+yh?@dg@fH!I(NQ%;2+8b0O>H839qGiH(JiuR~jDqV|8mNTf9r2eRQle5CX6pqQBj&T-PZ->( zGTLVSqM%KAB5$egWFCmySF*8rgXp>Zh7U@<;MdD*3;pxKcgAOD8q-TYcjdZR9mI_5 zn=FbCFCY9|M#&XJbqUuyZ3uL{np_o)iG~(-Xto5g-xx6XL?s#M9O~t*DbV@39bQ$W zT~?ap3)TsS;TAQ5aq;Em_FN?iWvzz@b-#rWH%HI{aX4YT?E)U+Hv)+289qoeEb7Kp z=neTP2EeamG6>Wdk5>>N_Ufy#_oC0OaK7&e?OB*nI%vV2$Oe zY0bi62xxI3G0#x)f?V8E%?NwQj}wqdMQz!$6Jsmz>I;&in2&Et$?#4r9qYfirzts6?8z7k+o7N=i+n|XMv)O zKs^zZ)SNK#*~4(MXLds*LLL~Z)9W5vkpe2mKU=1iU|8KrXqG zm8;6)!8cT5+9$69j`6)^CjdCjW}%yij$2=2&?UE5-N)y*ZkL(@UJsy5r$sN(3i6tv zT=o_aap2@dwwyJONMJ2#84yB3J}8I-XNii`yVuLd1Wl7S1vCc+A>(^^ME}g)5|<*u z;y{L}kUs+9)EZqlWeN8W(x&tLLtAEdSZp#rN@sTacbcw0hDkWM%kvrQPP4&Eg`4S5a!;!z1eEtLU) zwwBQwD<->CuWnECw^%V*ag1T?$bz?_P``eyh_d-%*8lzuOBk<{vDRVz`vv9jY(gL_ z_R_xA^V^d*UY(4Pk6rHyU$=)ZNuAm`tz1>v=<591f5Rh8RN_xMWD(~V((g2#$R@5Q zM|>({3Rz=^z-dZ51nDK9lkNu4OL-b#Fs1Bof%G>ZF3!Fc5j1G-!U zB4ZYisY+=Q?H8X*!ltEoN(|l(s}~3`;-(h+Cah!n_0HC}&j@N*1m*8vk~)ESLK=3R zay5Lz-%iv=Z%l~ztqk8W8M+o`1uQnmc=Yxi{gRMW!t1Z*zT+V-s)Siea{))OJ!#rS zYZmvByxb8DvN@1F5Hrx_I#n{$s_5ntccENwPyO|?BQ&(d`;I|Ju}mI*DY4!$pmHzO zQDr^GRNEkJac?McG7vg1yvsDK#BdJEXgsG@%p!6EfnavmpwH=OUbl4uwL&u>-hA=8 z;7imILo1^l@yN^^tOG0>tLEH(pd`!&KcCe+JLUjYwC$E)Y@x2x0+}_U-VC@HIe^@ zdHnUumpso9s@6eF{e*V)%DEC5u&R=GEU+OT(cUx(YTBt5+iP;)Q!nR5xUpVeaZOsR zi42lVk=cQ(-4Npr$72q2RAQc?Z%_2X2c2)doVJne9v z!+^9fqH^J)W2G5^tpK-{;$cX7dA2|okD>U?R6g>aK^)7#y`p-qFHyv0zL1K)Pzcq< zl{Xe@H2b|_8n2}1@*+@-QlA9)M&eNkrRP%1$b#}sr z-42<_o_BP}L0GFckUO1ekwDDlGNmcPPOhT~Vdv92Tacl1n`c^PnfkuZAy z$A)Ec%ik>ch^r8R-+apt>KX9s6_8895UF2jod#^S;iT6Q(~$HSHT}&v$VNV$UU5XF z;YlJTpUwlr1WFjAihUDtse-)`$tYz}Z*rn@TdAKve4PsT`iM{yQl`}P_S&rFFZJ^p z1G^9aVXQR(Q;azqs^qCbvwDhD=7frMtjspyi@w$ClxZK8q*`sfVXFbWy4oFtS1B!S zS%CI02al2nMs1ua>Rb)b{rZ?amUXok_&&e4Q<^S&&aA(DMe1R}Q+bY26%xyJiVoJP zO~SJO_udb1)BbxdG7sKSxx-SjRwmxGX;0Zs1IO@S-~8ZyCNlv37tTHMWMDfZo12I$ zDwxTl#zt!ur5=0{GPl2*?(Y!EDA~egTcDy!&rdBN-O~^(6A6xq6u3RvFf?6?C6SVv zwP@wYOO;Vi0iC>5CMiY)wL8*71YAGlaBgbQJz08dL@NMB&CHGwo*1>@#touH60sUZ zHv&2}tNS40wnN#8-2xfin+Bnz<+ritUA!Kr8g$Pn=LZ@3B~T4Nm{P8Q{>Q=Re8&bF z;%Fa`pWgxEnkUgg~0 fuN=zpH~k;}LSu{XD8KFWX literal 0 HcmV?d00001 diff --git a/mmpretrain/models/multimodal/ram/data/ram_tag_list_chinese.pickle b/mmpretrain/models/multimodal/ram/data/ram_tag_list_chinese.pickle new file mode 100644 index 0000000000000000000000000000000000000000..4abe105e3b347ab63c1dd8ac25977853918635c5 GIT binary patch literal 50796 zcmZ_1$!;vo)}Cjegp%sGw)ma>|DW^&9(bx*!O$g0uy4SyM;;ik2ZCVO1Hof<#kSZN z`@Zisdv3B=>}I}_ipb27Zo$vHR%DUq$UfMIUu0XEkrAtT*Sl5>{nnwR7q zLrcZ_0Xy^Ijc_$jBV3*Mo??Bp_xi-1UUxb!H#t8}P8Wx>+596rpuHu@vUu*R+u_#x9a@uy>Dgh-{JjE7e2n){wk-11)irC+b`MMO5FOTqt6`O z)8!*ip>v|f?MPgm?%<8=;Wc~R^?b;kV0q z1>GlqUf|Pkt?qWY9*_B6x;CHWN%RxpZdr}baXYP=U!BCMW1X}5QY`Apd!PI2md_z} zhW1^i3+vtpw?oCYK6=qy%%Hn; z`P4~1!w*!8%e8Q)FPeQ!73UgH;c7MB%rFAlt%30I95&yx$y1JIQ~I=F>o9EVH?sNT zY;KVAPy01>tGCk-PlrNh!fzPg zc#CR2x{-IbY4B?K0TZ$o7S6)UW9W{tlW~^sYaHYo!}*5#Is4e4>uBh)7j&q2`Sgb@ zmWHyuQypsl3O9_BO4i~P>Y>}t+Gp9>bm-3YH^b(K4c}@Q?`E^~Ft5ClpAPE*-UjoL zk$jGasm9q>IMmU6`X%n@@w^f5c$l*N$XF@cdvr@`7flTVdOI9xbY|U?aIM)@3~uxX zPZBPhkNF{Dq85KQv)9jj@?LE|?&@4|LO;}au>AL&iKlAp?8j3+U;3K8&QC;7VI0< zz0LFI*ZaE3{$bqLx9i_FY~6;IW^#796Q>6Kp1tm5e$CD*Vlf|Ae`e>O%!sVbOI6vE zT_VUBqEDO2 zI?ia$^CB+A^2u)8y5T3hVG^On^kJuU=0Ms%a~md}!xG=Ip7tOt_IT6sgy&-fbhtih zw|@Ew+3rCxJ{RW}89e-Ae4NggH09&UxV==JHww?koNTu(AK+Ru)bCKN4VlcUWJA18 zEgRXTk-c>|q1WArkN4rG8@i@2rcSb7ylFk&9~aw;44C|xm#Sv3qxqpat=Jsvy)m2y zvi5GTdm4WaF<_E}otd*kwQxNynJ9HqXGZg!sIj2uskt(6;rpc+_@J8~+*zG4et`}M zb1U)Jhs*15bQ*8l;f`-w)u`)gcWz`2>m>V{ARSWZ-@U|SL7+Uqv zJkx-AKXA#Gw?=*N$LX0cv!WA@kMqxqc=zrnAi;_QW>YmgdJB8{(YP}dnv?qV_pN-8 zPN@0GVV0u<-bQ(6^J9Dkb)DU+*nDz@&24Qkr2VwREH_D;XySOs$SjXeJQagqn_v2@ ze7x**nl4rUTLbwS53l16WAbZHn0vB!{I&n$M5>146(7dM!I0X`4n3w%Jp0V1byOuDO7g^&Q=22T zxV=@pjO0@iP}%e%|7e8s$1sY((Ah) z1qP(1c0K=`Wlwzk2sbaLK3idaw!!~h)8-?sxUOMM!#}MWEv~HU95cbf?n~U`;%fMK zw`o?%W;FfsnMD%{sK$3G#rctP9M}7M7+X5?VA?)IjUVOaVM|g>CfhIR-T1H^5AC#@ zyW$w-SID%)L&o@``|Gp%vtCS-Vd$oJ`#nm5uoUV8|0%=QXTsFUYM zY@Gefr>^qlk9=Y$96$7pzWz?l!19FhXuZG3v;|g4+wNKSMk$wT@o-VUGATi+`)ia} zv&$EsZ!~O}R*RGMe3|J~_l9>)f3qCBH9s0POsiNDCYNhrVl&L$vh}V?Ibh9xmbE)l&goW5oR>Dyj_eh zgf8kF4cZBvA%;;ry9h7MMqFJD9qEm5{a6Y-HT;F+$+ADRKDpWO@p08=h77zgkEOta z?jw-0J#JdVaKji0;}80!kyAzhJG~Kim<`pqy;m&p32PECdOZz@)YgE|=ZQD%a5lm} z&M(d%@(BiREiA5t<$X7Nc%*FOTF=bD`<)|9C9V5o zC>-n6(&p5%jjD8^tLXGw6Hg`>YRt)AXWG=YUK#+EaVk$dy^$7}AenNA|56niGhb z2Upze$qyuNbV$}k-5W)7#`F|Mp!S7}FhoRVQqp#RG{QjET@O<;|I|=*wEXU#G#(_1ms0bkEnbFmV}l zA+M+NQ8VGr%^+QcoVVV@DzeR42|9iy8@dVma<8+wJ$tL3Io8}W$Lf3BHoeE~Xvgtx5%$)k5U{9|zh2vs+wc7jmv>8gR6LQbgr$%V)2M~!D@P40sL?1ohdxxp@)7b-Cs-qCes2pR zsQZ451+!ezY1WUaZIi2-wYS`7!i3M)HTJW+2XkerarMc(Z=Dm5WNWbL&O5@;^hC`Y zaq>_{m$8O4;KJKY+`QK_TuV1u1$uArdhG|%U-IPGUAGd_=Fo=O0DSSc@WPnE zM|nkk=oiOlnFVSrzVbCS`xXVg6t4^JYSWO1YPf=S&YC)YX%%L{u*XzV6@VeM5TZY@ zjX*dWVPepLjz8kf4T7POpNwUjE9M|!_zG+TQ!xEm&91?07pn(Et?O z)xk;Qw72XXAqie}VfJt>ZaiYC7MmA(>TQYEFtY)K#%8`hs7=rc+3^+oixVX^lkfk^ z_Zg3sFmDFP{0lcppuqh@ekW%jU)r{}AMi%1pcSXMjCMmy9SeC8mrjx2#vd~GY**(TXD09zlaL&TG}S(v3E4GdM_v% zo}4*-onRWik6^EA-o3HUJCJuTd+$qO>^}a!u$v5op<{&ZH|39E{FpVbi_ShniEE2? zGm|VFD2{KDG~sQoI6ytg3PZ&ftLk~7p>OK6{QZ`;by_`6UFO5*@nn-*>Ew&E;zA=g zyV=f$^i;*#AfH})Hpk4Psq|jd3H;6OX8On?n@O*)jHB#r7V@$u*_BKL9W_gnMvz50 zz`O*viqjt@oChFv#<r<{iF~;l(>j0%FC>hjx;6I9;y}bLY zk#9fd+fSL9wREvH_{k+1+p-TT*@0dUtF`zTMNIWQxBebC*!r7)b8;V1+jLu|j%4G` zXmqzi*epKmL8l)~L+jgzv1WdVwFP{oof@F!QJbPQ-?UkZAbJj0j0QmXo4B}O*k@I3 z`#kx_rFT6NJ|C4|6eeC=Vtd2YF|sl4Z0qRqm<{4uw*DTPU~e*^ir3L%36s9=4TN@R zX%LidSaLN@Xjgl03(-`3Ts<$2P|CGpP2)c7>5cM{PBFrmNb1<&kZQKKV)CM{hM{`) z7L{-ykyswZ>EFn@h!3!V+i)fsM57pylI^`c+8*X4GP#<)-I^7MA@NH{lDza)=SIU?bxw_BW`fh`!j zCTk`ygT*N@Y~r1L)5GOtX&f$_Djx{XryzwOPo)xJ)Sic#3G2N-!C58ehmW|5hev3p z7z&7>TX-W=*|h1VRx=^Nx9#}Arw;HZ3Mh|Zg{d-!Y?QAcoZIzzyR{op5rHPxRlj^GI8r> zcx+7;i-sZ`;FQff1d=~q^%O>`DcW(mYePWu$#gCc@XVLDVgAE+Hc)#O&I4MRNCRg0 zEMJ}T;T_NK(l~Gb&ffHe`Rk|kUe|d&!Y2AL!#(`5gr*sYi^-StRLWF=ou8P;UaTU=v%S9ZbGRR7e?t>H+_nW zF^kBdq?2Cv2Bxo>g@D=5!OgF&`+OduS1YXHlkrR0r=wNQabZr}AZb|(s!El<&-+`T z+TuOxvj&XS>ueXBGTO5BR<^33P@ZgB5GMdNeP#qp@?y4Ev#BSOGqt$ejoT~U_7hCq zqRZyludDbuk5eC=X2X2`AAI;Qw$~{3pA7r0h0V>lduLNm$X_PiQ;WmDh{z=0y@M&F z`5zwk!KBpayr&t-$`^jqpndu62Fw7vj%hB zU%)rZ*jPF$r(;iARJwfvn+tLB;?sf{Bol%?dGprBdo};v@(5zKH|+C1I~=kH8sMj~ z)x-NTX0RLNSfu&bTpxKZmO7?9t6_H*r6L_@&H{h43z=YQ+)FCb-~mEYE{{MY~P-(y3?_J{Wyo*=H79`fki z406r1Fh3QI0YTIpr&4>mW1$Jo#Gx5iH~G|yNqB=L=H@e}tAHXKI|JJ!KIrMBYGt1e zcw8AR#+-BrD&LyOHw>OxCq85yxb%!6$yFfPHoL&EFZfdI$!~^`$2zq10m|-;qHQDi z$nkje&UmptlQ1`w#*qnoM!XO|M*N|Jb=m!7_KUd${P6w+zgKMG=o54o9QvXROZjC5h2$ zw3mh|+?KO!81oj>rHLZa$=2{MC9qTzvL2fgVP%wljyv42+${M%#Z}Ydqg5iW#t}ga zy%&S?x@~vahQN<_I2d=28#o}@u>^@XvRjj~=4NZBon*JDA!fqDn=u#uC~&)`?iykF zgny2GV?h*6ENRdJHR9c*O`5dX7YJEvHor5A1Ep-aBm}cB&+cs8*Q4#pmM@3}>jiAn43;`vmtwt1gb@@$Uw&`ZG!aM>L#ZZ* zexzftCMeRrp-6S@#2)|{p2o$u{hth|8vGf6Kk!}JG9ZXxp5yQko1W~4YH;>k(`A>( zQN)7*--8X&A9ToD9@XH>TK;he)Ne8t&Al5obwfOgtaZ*mex3P|H{pJtcDXwFf_dA@cp4*j%=-34qTB#*zB5eV5;?W)>|8vuE@d zxR|zLwp+N>+=CWXevlmAbSZFKbSXx}`H5+n-KuNQaKL43+vi)6CU@^5{8}&8dQE z4-`U+3i@Wgzv_%7Z3;g~_u-yKnX2ZSNBI`UA0mA@yFE2mh=A{5npAUFSr@OZit*=K zNu8|F7l_;tV44oWQ*qCmbGFo_#k#{&5T2UxafqAqH;fP3g^-`L6P|}@x3pPCOCY^9 zUa7b^uo3-sD*2CvzF}NE#W63C--{gEy5U!PnZT2)81RM&F8cXcDU8<~a1Jg!d=zMJ=u`$|MzppV|)05P&Ulxk|5#afQNKfazHhQ+VbL z+b?_%3_G8Zprki?9Zr^Tq+!MjG|4-~`VCdePF@lECeAGEJpJhmjjVa(>W`pSxHn@X z!QC}IUgzD>GI!PWL(N|%zU#@DemN;#Cfon}zu}kXZe*VuAF??AD9#Z%)qHm$-(hr1 z+K1bZ6r;MGBZO~}=AZrjP*lT&4;E+Op1 zzU6JBtEMQvV9qk4f2ik-hxlfw>j9ikiS%z6p98C$lkNX9=0aU>I(NuldRN!J9=Vzb zW6w5P2l~EvUy;%G@SXV4IQ`ZiXizTk#?3C1L5CtH3c@+x(BEoya2pqQ_!U^lhK|+Z z(=ZLQ%jyuJX_v@RE!(}$PG{Wvke_ICF&;d4r-hp_ND@p|P5qTEh^8ecrRtmg{m3&X zG>7+9)1)v^h%gZWsw1{z^A^X)?FaauOq%pC-cHU0mg(akvJWtmQYO za0^qI`sl;RhoV!4X~`1C7m$xSsx=;FRvk3N3v<)2Y>2EVj-v6$=vd;5#)uQC3|Le5 z*&EA;WeZC-3*IH*qy`q8Nn#V#Y|30M+Xhn=?Q65wemaUFi*M~CJL=jeu7B;EK*pzg zhP%1&0%n0NnQk6WhQsG?cja(*7$;6`@wQW2ASHxDVG8m2IgD?cysT*!EIjy-D26Z% zmJQebGn@VeTqug*_6TE4y(DsM)jO7(6zyZ-^72E$d^hrIe4V;C+zH?u-?U#Z+eTdw zh36fXc_-Y_QGo3C*#(0Hw_^fEB8@&j&>KG5W9Qt(KO+RHn8t?lPM;l5`Py{fMGT&K zp<<=`Scg`CsN-{Zm`I4^y3{Fg*0#pBF_e->38DWe zzn$PrdE$5m>*ys-Sl_hiiQ%_Y3=Z+J6a_S^xRHvu+4%f^5=oMj&-5n%+CrrhNW zdx$b)nq+<_(>GD2BvVZ zib=iWhEPEv0iUA*yZkQ$knENJJI9 zC!c@kWHdyWljA4M;{pLz%f^@cz8?{G!;|IVhF4hw8Hh;t(zKVk3nybqV&OW_HgbwQ zh7-ql>iQvM(&RPLgxY~k!J7X5Cb5S@neq4$i9Je zxY8d40eN1<^W9s2ST2)M4w8VzWNnUyH@nt|%O-P;%Bh$G^a1edO~zhvFkvh<5kq>f zbhEgmG>9km+s>!RcSiDExqtk1Ukj%=>uOJ27>7$m!|Bf6FrccIx0mv#DGQ<32m=)3 zGd5ckZ1+G@m<2V#f`WYaiF;!rwN4Zj6ARTCUskvqu6@3)XqfO(`TlZE0lj-5yK-D> zz6t87pN5P0%7jE-C)GldhfG0n2!X?HO3R z*UBaE`_&cG$FX%}@~4_@jDwIcw~95q3B<;z#L4x3sgMTp!KdsDOJ%j#_(-vd=Y3#P zJ!Ge2{1+V0eUaehMW%3NQUkNoVS7$zmiV-M7G?E4xL!C}G zYABT56@PIW&Iu}|tL9cFDTN0r)&+fK(Ctm>;bPpbq3p9szC|5_hCa+D*ONg*m|LVv z3wc1{l!zaQt9uy$^a9xEs53`Ea?M<;%H~_SlW#iRR@IdUeuy@j6Q|+1>)hEbbfED)Q6RypZE2Kdu_DUVB~Ubv6Gu_dJsl$3Sbn zW{V^8mqlJj8aTHE6>dSt=SQPxRUOhqf72japiyquykS;nrFg+c69*kyr5wUzbw@U~ zi}gw)PAwOM0tMpqSZ=9gaS*>Mm&II9d*iMr8}H zv7W7eXv7gYH0L5tEfga|9v>pw;&BA%afGY0Rf9;uxP$;13-WDGAHi}%%2b`&Hy!x{ zKZvp4Py(Vs@#s1ONnmfaOzF`-w_vtzU`itmLN3G5iqv zrgRo2?dbw@#vbu&58r?kkpHi9s@ckie`oqM3Alq@6c~uVi5`6h_%o@$rd}K-P&8p! z-yeE3(fVtzoLJ zhJawF&twV0Rb-sNa zIpRtXt_vW*O#dwo8kRblB@I+3 znTnX2I8kVX(hRpmEHQ2C@rv{MBtvY8YlE<)fJ$_xChPzk-}t9+1%(J1;zz{POerMg zaCRy=Ada7Go(j^V-TDh+0d3U?D}4GI8`xep0*&dYfC{Of9==zY*UTWeJ$qO=)@eV~ zIj@fx*wX};basmQ0JeanN_C**x0i;=-U`7IHu-BGQb7bK9B_&0Mo~qw z3Fxzz{GQGP#+MBONOeV&!A%<^Q@Ovc;L>ilpSIn5TgUz2{9d<(X$*GGO>pYMg2n-^ zIz5q3XyS#X1-H|S<8Cru?${dRSzHg;bxIdr=!|_y)O-OCe;9< zVGnY#*plJHxH7TMGU59ebdBBIwX5d?3tP$>UE{N&^pHdGrRBo z(|I5w@`BTRI&SHeuy4EN|0`=N?|9tT0X>fT_Y!(HlcGI;V2fhzc> zag;I1VBS^?i-(!jJdE=f76<+jz*Wu0Fiz%=9*-iTm= zEe#s9a9lDoI1!HUDUOw@t#G5fOY?jOFfs+Ler3Na6D`Tk%^it>Hr{9*RSu1rndu%` zI1`Ai=e9;EVgU+|O%gMi)v))GT_wGJJ48eA7o`yQleg1oFk&+}Q48{f&j0wGZw%_C zMCdzHO0#`VL7eTge9LKcWU?$}Z|8liSphQm1{M&38RR-4?oTG0e|^JDUx59MQkG(_ zZ^$=#lp?{D08H?;I#Whn?#CT*YO z7y|jJb^CbNo(ffS0J+ZtA6_VeAsAktV4S!mk_}zSbJtvQz0`Id0&ZrQ)yxs0-K!gH zy%{cC$+plsHSNdWw6H!R(qs@+8wTh&!oM_yd2!FlujHfHZ*~3>l8}0`KneLSa0B=i z$QPa#BuDu9Lnet6byKAS4J2d55*}4U14cvw93nVR%q#ih^s0hr2C(2;;h5<8TeB`1 z%XD8L$S2Eu2F_Na#B~V$pRvdI`~;PBh!2h86$ge~0sik&Jlw8e_;YLgSpPABMYqT^ zlHJwlKLnd@Ctw7JTTLEskSJCh-|F)l5Br<3uRWZp0fAd30<$hbB=mVXzR8GpGE_Qx z1~&?eKbQ@oQ*nInsX9QQ_Pclpc$i4p0WwNx@Lot???w@NPU$qu>`w$8 zXadcIKu-O_XV#N1pDAK$Jb3%w7x}&9DE7SwG|J*pK?QdtK>hsz&|N4Weg5tdg=6%w za5ipaU3=AYWLM}iw1 zy<4A*ZY-nMUnR1!F!-cj#746OOYcTngxQA! zXylv$-vl&>YkLS8I@tFB6mS!_Y)Em0JE>3fIUZj<|00 zx@b616FY6^=oX7hel2@^1kuReWtx0HWJqw8v_`}gDaypgHiLB*(k!GrpCyGF%TZV{bxYEVf9!mK(L&}{SC-w3HiFiBT|1#Jkt@J7E|AQU>6$jcLG2%UG)uw?FF zL%9CUK3|EQaOeE=L=!13{#Z5Wd z%B&qAe^6Xb>qmmaCHT_&wwgVFN{Hx^b;b zOm%6G7#McQkVoygl-;CrW+wZcf(z_`rm~VA^>fmcsxnZan>gP)t3NmpjF!PyNpV*` z`8}^zA!g6(;DoA7lM3@dVru5Y+s+KkuG?rhZXT!B+!|%R5zbei+=4beeoN&d1IqRo zekUj0Hi7Ts>&Ca!kSQ;5HBn_5Nd{YsD8pMA0!YNiv2_twW!#WA@hAuiN1HuLJe1-b zP#gUX0rnYyk3ABY82C{IscNo4(rn*vVY}o{59SS`;8AdRMTTx$zDaqg?hR~w%LM-^ zbB#(Z=cDn&Qim*EJ)Kjgg!`sFQh9kgG|;~gwUzQZ?HRsH;cu7Myq$P+Ogu;07=$yL zbv$<7p`3vUL2xv)>TrgW$F|%I*L+Z1+@>Wz6N)FBP70;uWRNDOsv*+ICN9%v8u{hX z^tXh4S=LLF>h|~U;q36j_)pO`)!UO zxnBqqFziThWCzX){KiLYtvI^J#DTWLIm9^QgDkLO@xYC_Y4L;gax|i)~6>tIsA1!d2`mC zziimkgh33Z^IHMROu&RxZZGDWm|>M-OH?X|c;4uBj&Xc_4|k@)ubA>yG9eA_Y)Pqt z;Mughr9-70NSh^tBb{kepvont9m`7+=nItlqUX2iqhBYeW4R9)NFKo?^~H=6)zlH& zOnC|ET5dwkIFM0W?#7QkTVHxit#iXnfoU(ZU z62cxvfYtWE5LCl`UhE5rkFtcELm4_brJ$SG-MpgYS}Kur4juGKJ!L64epspwV&`UbcZ2?I1S8-X^KaQA8p8k91bTPD=;iG6r079+j+=vB8?5T zgZJXvzY7qcON9ftc*PgI?53h^FYnoqp%6!J&aq)YAdV?)pr^IA={B`F!5E8azXeO{ zeopCrX_M2yF|kV2SCl4f-+?FSb+(Cce<0?Jc~A+b>Ms7Kd@O5tb`eEf;oE7Wj#U0_J?b2PHg zwtX@oW(fghoU6JD;%ULdTx2sEEwmD5nPNmiuPjxy%K2_Dla56r!5b-MUFj;s!!0ll zM}C5VMEr>og0kk8w;JK#%pOQ6q32xi*D&ci4c#u4@6cvMR1jozDjr8w5pzJG6?pd1 zWr-Vn?cvGHd;6O`CfEOFj~T9ivnSaL<(;{YxfMj-G}NRI6PLOcmX77}ds@N5CDWd+ z5y7>~n5%n(o;7y8Flu8yV9hR*nrA@<1fJSZPdTb?y?yrSQ4Eg7iQPinMg5|xWyzYT zX2&b|xZVIf(QvUn-j;j;vAstC|CCEbV(U#8(s`c<GT*q% zw-^~f&y!(R;2LuSSB_1$_`sS7>A0{6)KfZ;L{O~LawJ#HvMDclsCY6#X2! z#YW>2j4%I1Qb=G2>L#heVBH&i=UBN(pTgpb)$1^5(m7GG4NO!*5qV}*{Zt=jSE943 zXRK6`t9lOMg4@ocJ`S553LVP!+B9qekTbrTctb*O;DHPij4~mG3y@M_Y@&WR(H-v~ znbnZGR4r9TC0f+WSn-MJI?d0rCR!@INOFRLw_r`v404Yt5s7k~)Of$L+7t?$(W?;) zO1%113P>3O)CBTK0A8WHV{#8W*!~q1BUhL58-Y`&;LH7!w=sNAPUN#c~SFs)#ymnc*wJ}f0nazw0P z?QuW!`yMaIDt_6FOd-j7FlF&?d!2D{WL9|9UyGB#06LdZm=dCdb%otDy z>x8*^5314bi_l_;VW{e2{y}tpN49Ahjnko&cr>?9n?sPmpr80v4khyNCN`>MM8+2V zK#5x`YiotaWCoZm8(unIu|vb514689JKB~e^Ancuf9Y9KAc2l@nrols=nxC%J6H<_ zfapyQDmO6(knRRO++1Nj^W0&koUIy04&J%7Rp?He@J>CQ3s$f)E=dr23J~dJA^hvG zJ&(iOi1R~ooyQ*t@hmWuIB+r*_udzNlUu61i;5)ygH7nL|ifJ zsUiHe=}a8oV+=lQMN3vFYh>^hEodlKRI&cs?_6>jtK!@#IS<-7zUv8}uwQ_*Sh^*3 zPcJq;Xy2(G)?S&{Sf>dfynis*qE2phAa<5=!O~_iw3>gKD6*p(lbiSO(QMiR6UqcK zNj%WsUoy0R_V;`fmi7KlOKd`_5cFU=RL~gu{6pi0*mCDz#rC4PST@w=7{Px5k8hN* zec|?^^MQz(Azl{AGd!QK+D|*WK2ph$RQ;CwHnB8PM;N@1CNdIc3IsqOx5@zZe!iI} zR8l5^IEDIgYuM=wY{E#?=Lcl7I(&rQX;{AT52O!F!bX_>;E8!Za5s&AiY2@}IC7Ou z4LZz++QV2z?dgQCnhaIqv2g?uUx)q@2Ui^1o?as%y)w$Y+8}5(-TfgWuGYonl||@58?234)x02HXD&cCYm5cr~xQnel~s4 z(%ez*J65?#CqmG&Y4WXS0(CS*l+@g(7WJF*CiS(*s9`SF!odzaIUhP!MNM)B*oVPr zb|l(n2?Ltv%%;f&oE7|Kv}A%KuJnq-B|f~aLP;K8hGUC^_D}nDXqJjm`^!{I!nX97 zi6g<^xY0(26NUhW=9-EpWLEL;{B?&fv{qqurW$r*w>D8U^WyV(d`9~X^xKMuD@gve zr*r|$Ei+!Juv*<;=jdU)zLiy~_dN!YLAqY#pFtKB34PK}n4cQETeOqlm5F2U5l5w0 zYkt(_p9e+!MUYS4ASH!Z8n;CiH;U#laE1<~BHmUD>h(Rp8R`c~R2|CplsDpq|B92< zKsD(<{NH9%t@w4AZyo1b0sy$0YBn^=cv%%Sl;ZXb6%y+|u;$?bQkEP0SbPo9SS?fp zi0^U{nhV2B-*;k>sDKR>#qG&vw}Gk%x4aQm&Q@U3qz1f3@D_Y`r4TbgoKx!odBX|F zxa(&7gUBPR-Hx9(A11&Rti&>5Y?Ed20jjwa3k0Oz+mRXvC3$A%*5r{*$(I$kWs8Hg zT<*=cDm2%VX4d^se0}|_!T;j{U%i&SZf9?3D{LNu%!`JYBl*6|NI3_op32{nmvMpf z8@{)gw)yB(JeuWl^T4Xb@L;Y?xH{5}sYo|@#!afSo@~NDeY4Lk>u1B~>o55j!j%*X zgj6jXJCmsRNzLW)gE zA5CB=!vJtcMil!25h9y~A^y@ZD*)*$PNxa*It5t79;6DO>UvkmtI(5K83Dt0>e=Q( zHqFWd;IQU^>};9jBl>zQAU1(qwY)W%&$=A}0R==%BeX2b#<~r^5gtZw_}OoNL-haj zo%zlTB(87!6WiZPVadUX>({Oa04Bp2phL|YmJ3%!z?jcV0QQ+t$>Wv6MiEpoBrPVR z8c5gpJLIf*gPavL%%Y{v)|)13f(V82t1OeH#^Y|IWY9dG&*7V<6RWX#z-kXXPd?Ok z9vb)0uW#Mr!IR3mznv>CY*^`GdfK3rJm45A5)J2rHnag;tyq6NIiry#%1%m6MZ0?L zq}Xqys#t|EtYdR62SV4P8GB#;5Of@ZGTmCb8`s#o}>j zA#ZTZQ&I!>3e6{T@GE)efCtN`@rVtJ&_V(b*IVY#42W+(`U&(Y+fD>LtoXD?r$MR_ zPlG|91~fZ~?T2}d1jm5gEx+*VWuO_YOMKA zn}_Ay7-qJlK{~%g+CRfqS?e*?8ek!fF^$ap`GQGA zL!lDOPiNG*Hwd2$+KIlFH7swij*3&HBr!VG^9BtARtkq<9fee7pnX!@)2o&L_@Dl7 z_1Ja4@WDX8jRE2?A7v=4&>u5`n4#`Kh);K;?2y7lbqP?2F|Xq$90w>5PQ@UNOygy8 zCj25ZZ3LtOsmE;tg_aJi^a#WLhAc79py4kyP<6@6ijA;34OIv%4GrzLq+QTM&+bvx zn^_Be4BeQ%1@^hII721dHGeU&pzGw?VUTJliyy`U8&))Q|1#{VhbfL?$#|SLL*%yP z;S2Y)vOfA!mW^)i$DH4te`D1g{z9Cffyrm#f&oHH%zT)xrGEI@Q|4SMvBW?k zBK=3<`kmqEWk=%Ypku4q_9fsZ&&S8e9+@-xs9y7t90>>7p(VvaA4MwcxE4TW_ zmX9qLdmCt}?hO^_Gt@GCI4iD_d-<@pWhyK3c(~q9BUK{oydIeSq|MNX52I!h{|JMa z4IsgAD^#gXs~PoP)IjW{j)%j$&8MLD$@N!;LL&AON#X+_krV=;;d!8vD02E_XDC0$ z!Wm)m+^XwVJkUdg+juj7q*TDeqXtYW*FG|x%0xC48GbJ}LhqoJX~f;Vcp-=08%c)S zPUO6Pl1O7q4WJ@(1qpne`=^eRL#DqfhRY13-%j(0#}(hvrF~j*hDmX<;%okV5i?)| zj)Ogj7U)T8YCu8S8@DV5Act2=Retv@mFlKlt1W~inG~Z;wMn%gkbXkvmu*YA^8FqR02(9PPPdYMD-6gD zT`M&t{*N}_mKaJ-}EzDAGl9;>bQFT^6DZ2XP#W1#r%x}sq zxtNPjC!sw~r6+a@0PAYn#;1Fq>6m7+;8F;c@4+ksYX(wpd*Mxb4JNX##pk!xzQ9;* z@X{kXgno7}vhM=qk5tFwtJg7u!%U}4SokUPMd6xvvUwN;Zt9nbuH1ja%1yJRl0I{> z+DW>0Saz;cwaT?$s=)n*@R=!IRy*pCs`$8xsjsg<5$ps3r`g8j;f;swf_SfZS*xQ$0z<*}@k z0omX4!X9x>T-Kzznb^Q8plMj)069wh%NOqW=OK%5e9-%tAmFI(vq3w@PP~ko8e2EcAjau(*WhnZWnQhUMoRgD;zfFrAsB8YPjLjC-leLbuiC@7bez^3 zW~wmSo~$ebk*JqcxYM(mJIs)}XEnP4r8|tzk`eH|5l>qNWYxn`S1EeQA)-DRB|AIj zb0Sxsz5l^@S!aUFDWKvFP*lf#;^tdEL(%9}j#3^bO`G*=I@2=4tHrMoS9@M>LC#vYcEn@7`W4PJ2Du~+5u`;v*WJmaTqS(hx!Lc7CTP|ElT){v0D#GkhR-e{vQf8_` z94Ts#Q7~$e_gBlP5h(1W@qFjO%6iBg2rt~N&OGGx1=eV3A)yBJXYz`{`OoAe>11DY zY3I@=4@8Pf<2>Q7T!ITw{9Qe8zi!ZyRnVE9EGwm*(rF$yW2on!7y)%}NbRWH6R{Mp zeW5Ci8CjBs{7Rh5CnvS(ndGAX!h8Mc`~UU}Y7&ugq*wbzhFuC=>eCQ)4_%J8-;gNV-84Y*Cwva+i&)n2PlASKtAwT;;m*UVNqJPX^B znAz^9C!)|m(&>G7KN)KIBT86pa9lUlaBzC>n43g0u0dE!8Bx>7gIr=sU0}3~5S@cD zwll4Wu?0Wb=^@_Q*bCm+m@ zrI7dn`Aj7OIsiC{HRS|Fd+s_(%6uNM1MgXa5DD$vnlRc9qH7`<(D!fgLAEdTa-FS1 zDk9D>kEBK65_|(A`8>KdUOzL=D%rWHRsb(G^pljve}{uzc(%QS@pSKsS#`9 zgnVPlzGXdMU1IW=_$;OpMST< z?DaqYZfAM7bU92g~jm4;JN)flDc>pIlJxO?ydJZ#$P5!#Qf zO9ge?pYFyeSJ!c8&B}sAccNJ+v$+QgcsCmOC1D5Ru*P42*=4%W2oT4%g zICFDv7(t@$Z35{wY6sFLrgB;=@}JV4_hll4IB=eGV6bJnw!|#bZB4&db5bTB4XK)f zIgPTdQgpyfxPKC&@p2Epi#Qpq{8XvOT|l$GK-YLkx5SFJrXOY5!>QS>z+JW`TQ+@_ zU5uK36YOUx1%E22u&v^Y^fQNKjc7T(KN-H+=u-;pe(xFE?|26%@GFv(9?7^Ne*%M? zf)h}jscb9FiGhTAQNzfiH1bjRbpIe(5Go-3)6g0mHkQfl_K&(Vo0)3Rem=r{as!X!V1a zM|I-Lt@j1(tV=Nsh8pn?^3-pWamMlpS_*i;vFJFx89thZ3BfoyP9)!y#D~?ZL=rBi zVJUlRSQisA*n$CJ7nVc`TRNZ*TorFvFTw<9WFK-X3Gd{m0s;DX7;PRyD^c4r(E=ze zGXRpcEdiaXC}6Ivj3xy-hwAnN5Zw{h2>C zRfoYsrup?1yb_Nby1~thun=TQy_uWuX^Cd^(vIHR3T>n|v_@YYzz$ie1S=M&(~6C@ zx&G-ws~5rY#yTp%Sx)TF{>hA}5_ZNK+_Z3mTnLd5uh$%b5hum!^9$?t#X4w6;?(_+ zmc=lPObD1(S*~f_<&@Qh^QMFb^^Y~ul=ij>IA^tcGPA1gEXY9ch6Sjtjz0Qd{J=ke zdWy9+kKG7}=eI$+Oz4j)PH5>DYR);Il8RN-4bY9O;oqL%ifdAopgd_q`1(7Y-p?`5DiR%GYg1_3h#UH!aT{O@0BY{*jM$XGLW!& z<+}LLR?eBd58vNYGmFk|<3k93%Vziq=GF?+h_~lL5v@JrV&a+o>WEZ;N?M8RWW{>z z1SwrlYdpGwg$)mk4@p6DcN4Vew@`Qx4=F9tOt z`mboyhAiI0^dI&ij30e)lOUH2GbkX!kx)>R0E=n0GQw%T#NEnUwyF^?nfNvqi~`6w zGd1#0y?162Uvo*_t)AU@J%XC<6I4IieDev<-fh z$81hiC4J#;ClMGX#@XYB91SLw#Ribe6r*>jL-qSzzwxMatMTlJFimT}OjD|wCNlrJbkuCJ{|Q6fyN@N@)7P&$VL0^e>w@Gi;|5`)|C!(R{96EXoW=eKlo;BeQ3 zh#_0!SMq4{id%V>makyIBYj)SAhq*HMgp0ETH>G&8&HVR)WeVX>y+y44SY?5nTbPC zE@aj%3Va&oD#;YA)}vc=D53I#dl!sWLiT}c7+-<)uv&{R4aXIEaP@%tvfm?y9~5H|oq=wOq12Q;lHipZ8vQ-a3Ej zI_9xsUHM(hf4TuNA7;(J?aAm6{{E&J4aN9rydCeCli5Z#j_SCRYd?g+*bckka zbsrjG=C_GeGn)50W{C(jwHHYx+4|A|CI^QonaqWB6yrm$F{y##BT}Pk5o}6Rk)e;f z?6H#Kh0F+GgTJ8ORZAgiaUM!Xz8IuMeXypbSg}H`8i}xpNBpa8*l61C7>l^J7U|JK ziwiOYdv8NVt>hAJXH6s zZ8@Gx*jUs8Iu9p&x&h7Wyv?~3zjGG)f z6-q24%iSe^=#+$QU%W<{+gibv(u$+Tafi-+jzlVHl>=$>E5KY+&fPpq#R_Zmwi<(z zaazW4#Lf40m#L>>(J4K)()qzS)i7a|+8&zp{a!f>lQAV2mk(B)uoQ&kf01az3|>ml z9-16d1sy*>?kt-+6@i+0V75^WgC6I=LjjOaQ)%99tWsPzId(~+sA2QQQj0j0&+S>+ zFN$z3_Fk5l3HEq68EKPqh!~jB0a1NW*-oPhM}U#((ERUgUi|}t?s7m-y3&T^nq-?a zSYlTVn*^!tl8+_X!9(Kj1-nr4A3xeO^-tUtu{{9M_#wlvxDPNW#SrX8uyKWxrkYo& zYNte*EEG>OYF(hqEZ|$xs=fXr3k}XoZAEMN#1_&zf95&4X}{$%<9qa@iCES8vV7L& zvjeZ9gE&Tp5?6)^nOfwG>-WM095TIstlL2i=P9jLT(Lhx~3@2IXI&>@=AC;dri5 zVj*==Da*Cpbytcf*vjVKX3Y8{tHf8i0xDz}PuS(n*#B*q=yLHZyo@KeN^1xIp&2td zoqjb1Q)kDJjg-KCO8pv7@$U)3DU?0Q&~NQZw`t4W_dAhAx&x zHZW9Q{Dbi7hmoXOfR!xkZTyj6cW)eOs7@ zO{1D03U|0Nq&IxVdpCH2-^Se3kpA{+FsvG~%j1j4?4QLO2q8v=x61`mHF&9_rA#hB za!7h`a;TQqRSgy0VUwb6rfpc4O;sR-VqzFlG>vxt_`9rZS8)f%Yv4&~STVeBNIrx; zhz$=sB)3JSP=A$1_CB1ckQ=LWK5KK9^4tcRwk`~!s$}=zWdrDDEzV>`8tsSbP*=2U zK}om-{my^mDUEu3o|>YZzzy@^6_0&*16$AxCE(AoaQYdhKB*4jHeCL=Yv>-y5Q&OF zu+w^5;m@?}42wO2Fj(+Kp15Io-e5`LcI#gvq8iAz21_^ia_=5;qN}`Nm$EvC#i7zd zEl|%!5Lhm%%m_hVS3EF+Zp%&tL+4~!EK600$fE3H34KG!q-Am9jLZ9{x6?eCn^Hpw z`DB=lfLW6VrVF7$D0`P!>WFb*2Aql|ahVp60vwvij_rdm{}sGys`Q)PiWK zv^#rBZj2IIvR6K)$_xV$Ia|o4x>fy}B?}3#s*aVEZO-GzCvS>cxh|%g1oHH1h!ra? zAo)jwu60w`H_aWwh`UXWpOH!B@sDj$QwsgpKn8eqDc|Of`B2N^sm$6X8a}oegmIR4 zs{^}{DOxIgJRt)vQ(<}qRaINsG2bPOs_KB$9Ws$wEhy;xB6V4ek0Y&;V!DE6hC^n`*DCOW>lu~v zr{JgIP6CPVCzH))#UFHM+~mz^-$@}rA9~sAYW5~oPB}KfFEzdv<2>{Xf->Bz*r(w) zR%r2lw*gDn@f?qf=$Ww6ruDU95#^Q~$w(>Fx3B|zQP@A^_Fv6`8L7~o*@h+y&PJ%> zY5oGy;jsqv4G<%q+;JmENZ4t=S=Uik&DlR@@!$SACJbbcak9;I3aaR(_-i>*;>7?8 z>-_bX;$heQ5so@VMNir}k?5B#ij_hJ|C!C1l*bFc$=&k7R8(4sp_f-}X`v-$uK7CSf?RDqr+7%gS>^^_cv^Y9T#1qAM;hTojuiW^ z$=Pd9<6k8$wbsmU7+Mq|P$O>pYOpv6Ajs_$J)EC4DFOxl!_Y~n0a{$qtijZ_fH8cQ zr#Pd>P!ge*4TPz648~1^%40740G%n;swvur$Ug2|81shz@I(A)BfDI&+BY>{xX&e_ zm*Vrb*GBd)n(dxD;1)J__+h=RHIl4m5k_S*i=Be0>MJ3NsiMM2+S$jXXhVeHS#{Kw zY$abjpEhNCG-O^pGyXY$N9R)tdMHf52UJ3+IbHn*7z?X*k9yxJtzrzLhmP>IB(N-Q zQdqRj>UIiD7UNS;jC9WOLY&jh!}Tg6Rxe@=|5QDUtvQ}q0h-t{^RA>16+_4USTBW9 zi*-Gi0Q)1R68RU8YRm4gXA{5VS(P~y7!ya0(0b%g9^0a=yHKPd^TYNX0SrYZ`S;HBY*($M*CHHrw zoa(-(WHw!#qie`*Ry9>27_g_4U#C-6P2azffyuid`VW7V6mMzC0_$8iF49siSY26y*GWNY}yrVC6^$@bs7hzGr=%`NW}2{mLT>$ zS&RhnK_1xGz-2vjIBx1RYxsmCcPB0IU=8eVcVnTOS@>tb8)0IsIPeN(EEp!$z~8WZ zWU_?G_W)zuz<%WUZwCdl*~@H!LC4cR=bu;LTy(7OL{{nC+lw=RWq9ycpr#l7w{|M`Y^$7D zKSO+-;LM&amEt`Q9R*I3W`Rgf6*5ot*yJPGD#86(;6!JQA7ecCS0M*kvSH6EXoAJc zb<|jsfO7LDj7b`?YTL?*!fG@wxN_8%H7vi1FPWd=6d#!cDcA4MFN|_=i=cYmBn!fN znBu1kUu-2v)Yl%^oHxz}#BNr9wxG%8 zycgz(%U-5wKsTAaWSI?W{QCrl?w6=_tXAXERvunaRk3JN7>Yw$;8rh~k|@U~{d4Gn z*B~un2|#BM6yQZsh0(F>;$X_jC3C49Fc>P7lpacW?CdP7M{q3{vM0HD5Gt+6K7kdZ zf}lJv4LGV~Z?ilGc)17qGHTTOGlK$6f9g;|eh0F%L3E3Z+f?g>(Sl$EezpaPlmTiT z3#Wr>`Ch|`ezr~j#ITrXSgz<_veTo6Vk9CA;*qp_AJcv66~sU%QDiHum4Ja#LpAWL z-oJ{pi3(ZCJO6GU1Z&dY{4~@@*`N%TfMwjlBM>Ddus%1&xC2@?(hDf)VkPK z-cpShl#$A9YGl~{@Wr686}0)x2HJ+n3p3_a`YG9Da~&3pcI!M$d)@DU=u^)ceJ+yT zVPcFW5_m%yT(qqOU0%XMlqckC1mj*%1)UcAL{yQK$YnhIA?kk?js^0jgkMonSUeaRC+ z2C2@AGemd|!(+9|vKqyQL>!HLL_aLL!2BzL1K;VT=X6>pokqRRbfIf{F0^%P^+V#b z*sVmaF~;sIYc75_aQNC@`-BB|PmPNEJ^RA072%_rovvAU5z9`w8TMfPb)85)!64U* zlujdJ^O=9v(X52DDS+Wj79ZpqcaQLUUDRN#<=1D)sUF?UPw`i4c*0cOHtm2+R%LE$ z5A@sE1}HDRU8>uSo1pH(mCL%YqyoHq5=-`o&y8`t=Upy^>M7xZVDhBv=jEyp=!KBeqM4YuGuHcEcW zG$Bm(Z zw)jLw*XSBWgRG9HhK?=Nf!$_hp$S1@QOk75f0Hk$irTtcD%yzZLV=;Wrk&$V!`Tls z;_(TGXYv+{Z%*#tW;Fy-;)EvV+HBT7B@yg#w*%m4(q&=R4C9A&VjpL+;x0hri51;2_ zOmqgpG9T4MKS|bfa#|+Db6Th)K%z4@dVfH!Zfg~qt_ z#lFPJI+A?O$lW&IP0q7`%v#)dks+m$eX2&?Q$EN!FfE(D?inoLaDv@NQT9_eVX>`K z;!ShL17PfNXHy=4*DYqLRTG?L(A`LMN5E0CFaSG!xjV8pSw~%aNRxf-c*D61V%(oQfCzC9;tGL!cy#a+suvR?`;Q*aG|_>k@T<0bkd9Y=l3yoXbNw62^kM{l{ETv z$h4mxL#py^%sR3>w7nu#Y==s(v46+}lpWG%w1&?ofj+~gd*IG!4PdwidwdyTz_LTc zuQARX2#gvfM0Re|NIUzB7NZy0R5(ci$~q~Z;P1i@RyA?uE7?cp$>X^MQpU1#TQc@& zIMS?!@{_7scuZzYCD}iehL#gi_Gl$RJ;0-+Ux>dbIu{xKE_g9`;`pLo zxe57x#e;V+`Qp>EC+p^qmw4XUg{d$?NLFoG{CMS@{vcTzLUkifz0>07@uQt$QLZWg zA6jox+&{bZ#!N9FUs+US>YEiM=AgffePeEN;d+q<{*~SK!W;vV)0gR^^a*9?>tv7H z=_-p_&@hoJV@V*45u?*hEK+?6H^DK+Djx~Y3{`CTy^a%+QV0=O=J04fBG^ARE$y};BK#2`wel0C&8*V zz7BroFCvT#5qOkg>;Vr73JOm}!Z0H*NiYz)P0bAg_ISvAX{s`^$=?(?zJv0LGf*p< z9c4ur`PTD4j5eqoR-?SDJ`4FMrJ$~s*@ktrl7_EU7*n5E7Gf%$agF1*uGqQLZ%QVG z%QV*g%b#sHCelBml7fi#DOWG3@Xd3oIIKzu4-ozrI&SOQIVMJI($H~W_!ox}KD-+LbECqKGP67dmsnZwETbbpWh~k9H2>{EEezc?EKGf6qFbYRz@{-m#jmr zh9UA;xn4@iys3G^U6SJb6a38eW_TM-*-M=n9DYbAdXnQeTlS>isQDg)SN`zqHt1G! zCE->EvLSMRWyCkK-PP=pT$+>wS8=>oNgZO8;oKZxb0?9ywridzPW6o`MVaQlCvKlP zJsg&CXkJn=)ZT7~TbC+R=$kf6&yF{;-x^4D_rDT0v?iHH(ECd<0A^Wf1T0TA+-u7h zI|N$wkk+t=toPg2V`)>9)RUY}oNG#Y4_yaMl@)tJa=X#ZZhYDg(l$RcA_ z_+i{&PnL3iU$V78LG6FB9K8)QhA2AtN1S+QXie3`kY|z27pMeIebUnJN@q(L^{jMErEDLu-6pjbwx;Pm4~A=&pmB8(e|0pylY-hA;oTaHtf zaH-U<0~OkDrLbsNQBVpTJMn??ZCmk5gNa5k%TJiTNYsB%ip*FdSj+_j{aN6~i2d*v zfws-K|IRl3p;$CrHqDhgsr#=~@$RY#RI1Gi%T-F@24*Z~<|FL{+)WSr{xbRl6>rkr z6lXE0Bc9K|m$~rUA{iQ$1a34nmA(JN5r{^hY`35dm{Jt~kU*f}5uh?B2QChY+o^oN z1o*d2ccIxsoKp|_dkB=%!kg(T=1bB(po-Qjx;!mtlg=r*W%xU*?idS?Hf-8|i)osB zL5}H3q#O?!r<*Rivv-!(VOrtMG55``dL2u(bEa61{L;&9@Xo1^7>2%9@Y|(DiL2zd zT8&;SfR&PE>Nu$Ja6^k!Xt5EHe~(@LWMLC3VTsxUpDAI=pgo%`YT-NSUl?h+1WRil zAzY*89il9GI+HKp&K&K%Z@S^AZ>h?)PjS{eG`07u({{yT5xf|uWPguhjq}$oJqO1_5#3tP@3kzpn zo{Yb7T@xLFL)-EX>ixDwPl#^*Mg&>dGTksm1lJ#vn&Zx@NU5dS%s;_Bzvg$H6|D!r z=@zk+{Ee)8>MsgNCQ~-fr zE+Z7*OCqGUok>@%2z5*AOQubMhqj=XY9zc$b%&eJw3L7~!u%<}DDattEh_@0@_=6R zb$-fe{>n%hr>=#3IO!R=30eSMd>}1w3nZJeCh0BX+VbmalvNOtiQ%5N3dY|tLCqt) z*);E?JV8(dL>0KhniGgZ)6?DkR>Vb!G(M7LS}ikHO_|`hs3%XGlE( z@!yKA5gs;u_sMjY?bDK_RGxd{-OLiUou0PC zQ59FSJ?iO-MYpRsy-tzhs+5w+s$eR1QD-p;j9Vya28O52-s^Ps3YPMZQG- zgZo2^Ze89qa3Ik-hIUjXc0K4$&ZCyOFIPdh_S%zGc#lx-ctmOlSRF>J2Q|Q?^*L{r z-7dlE^(;cbL69%KlC8&Q4RR3ZvKoMN96C34>OjBEDui$XI1RR}mxlG#XhrAs&v>u0 z47%KU&=)|pLa=G6?zpj%km;dConwhR4gz|mYP6km#(SMDCNjSj2e&E0#wg5}^lDaY zP$C>URWWsKc}}cT*4CL?d<~KBBpPrH7-D8XlFBmPval^>u*SofEJVk-IXNXKQ(60% z@H?X!TjAQRNcE?QZO40b@*_VDxX$3H!{p5NR)miTbE>hk3_;VjLs}w}2$*`9zh;wH zSksAXDe3`NO*B!P&r07PksH7{DTx=RK;oOL&Y8E}JHqY9pw!9K*;8*1$kn+5c^Sp3 zv&8^OzTL>51h9foCEW1=MhfYt#gG9ig66`EYoIYSsn!~DkN&nhC zQ`-=N)b@}?N!F{Wo&yDPEJ&0;cH$aJ3t(d}6N6Q;CmL>kpyI$332qeaW4?rM_WcIm zq+oK`1C;^W(8;uImEg4b1sCxOs)b2D$SC>A31Th#q4bgybI;zIGOlu+`DaVxrp++c zOs-}+>KUc!DEP4FlcS)#BE6>&^LXCCYc&f8bfKSG{D)-!`$U}7LP~T zgf|ycbApZlVx1$uU)qsLx z{*YaaSTQ5kCz#(+p3|#lweCK;;ij*mymP2}-8aEh$1AC1PuXy`q|Paclx2YtNVE!` z_gCx2Bk=_k(KpFQ~-H!CcAmk;lbGJs3elHD*3p^oghpa-C}z!a)W zDyRH8^u%>&;(X)0E@A%IsG?BL5gZBWFs<-j3##+Jy|_8@b_Vqisv1;}q|z=-$y1-E1eq9;&ta&UX^pUx95YqLVJAI2hc z%nzu8Maa?O4T`*22&dx2xMFS4!+D&M?++Hgc-Csa(vSEeUIBA`BcCClBBaowz<-fj zy7bg=294&SmgV@F?J_6l7-foi5Ft=#i(Gr#SynvA+rLuqkD^SLmFs&-WTZ-BlP!o& z6d^5ljaG#P!OsJfSxFVydAM=5o~Bp&JR@!|ht*^32{cjY1?6L8aHT_19zLk7Q^o0l zU(BZFJMO7;)ax8Gy4Sf4H_9ZF%C=VvURS&)H6W{zY92jV_O&{`$Rq9;lZ9213j4`fDg0ortfMK>S2h}n!v2n3M zor8-xf%44eF|1Xr$=ilck$idOo0?U}bMCvx(N6=L5?J7NQIBL67Im7E(Zy8ua8#L- zrM09nj!iZWc>BAdO!-aj={N7w!Lrj4%fLtKunzAfI^Ss8Uz??_^SV+pc=W~fj}c9| zQasv{XW>=t2iLj8naY$4XGJ7yS19_(GhiA%JmEyv>n=EGi9OwJ@_Fr)) zSSPy(rJ+J~_F$AdNHbr{EUMt8;o8}tO7(I%K^qfXKXsI(8S0X=IB7|z#Fwzj1Gao0 zC-f^d-#%jD=-0_O7VcEZw`8x2p=SlJNc!*)1fqd(tA1!sYbSH2xp!%by4PNo@MXa* zRro6@$93ONFU4)yrGB(rU$X8MXBf&xktk2Td5*ikIN7iDw0?CaRf4;B76Beehb)iS z3+MUKKKjchD{*TW9qY+eD&6F6F+O8yvtXG^v?1E*<5I96{=cAKmcS5nMyHv-SM!F| zQ?0NlhB{&X)O^tzcq$#9v=8fL$ZSqKUFon`hO2lr?(xTfUFM80N+nM6Xf(&l$w}kd z47Q)jS)h$^vnWu}O&Sp$%P_1Lck^$A2qs-ZopUy%2=FIbvKp4o-1eYUnxiu`QuS^2 zd<&Mc-v%U%5~#xVgxBwG5-T0jSOL)FjZjnhpZ~*u;D`R-p3ZHhVIYd44+TL8^iTAw z{E`~66j7tCctHe9z4gHhqA$g-87KJ**WPEwmvS;q(&=PoaxVLv&8PH7b6xgF9o|Cy zi-(TA27yC%x)5S%sD;k|1Ni|exTKCv7)2$9Gv+Xv)SL!&51Ix9yO1YbhZYn&8Z8V3 zw?*%DVf3*TU{(Jh3C~s*C}M$+2wE1PJ!d-ETysXCq%mco5%_l%0;jvhqr_SUFX$nK z+rZpYrc%@cy_u?S_EO@X2pgWg0y!Q5LdD>8`B}T9T3qu{Likt7<#EOapYcx0Kdr$> zczxHbFRnuHIwd7cE&O6FJ=ds(WT=AZEQfXXD2H^?@g01sauI2)NYN6*14ijfdcw8Q zkUu54QeufZR_S`9Jd={5TCgsK3ofa+lx{DE?~fr!z-TN6zlxa@PRI%R!DHd3q0trm zt?_Fur1?JIdZ%R=RcbdKYL0RM_~D@JWY9lodkg_yV-gFwPbE{ zZN7Soz)s%+ePQCkJ0tB;eGse_OK3*RE_!M(Kq+byz`+Ax&CERY35CUx7phLKBIyIy zQLIe8u}ocXG93fhu-3DP wTo3}U5qvXt0AG1B;nV<~PFv6Lk8$ z9IF$B!@W zzx&&3{g^{z4Oo-E`0{UmxclD!hr=fH6WU<+22OA$wNZ{w1E>XPRB)_vgx!Mb)#V|% zJTBM91}T7js?_C}`}pw(=bVEf&K|U+0`LYxKCHOr7kcCYgv|*l8@}`&I~<$4Ke@IK zzc@@S_a0t5iT-uGJJW|v-eiiatTL9WXp-*^YS=xyuP*-q#y&l4@nsWACnyk zwQkOtHkgQrYy{1fs42nfD|E>I0W{96dm%qPtZ2Ucdp9c!u z2>@y};Q8?${cd;BbYqlob@mUO0C4u+XjIg~C#XV-1zzAd6^@R{MBM6=)5A|VyMyP) zyQcf+4)@cy-|V|p8e`8B31sft;4}c|5x^N0Lli)24Y-7T;INc6hsfwY#iNB<29y9a z{XoW$2*4>v)=n!*eE&hXA6#obou|iVjX%D)GdhB6ZJ^rglwk(I`MU)kt9$ih=di>w znKqicKOY@OFdP9I10Z*VQ~+x#40#JoqyZEKjP?(zP;10#$e~t~q67dH5dbLzug4gW z?8C;#&Zq$9yczj0e^Vx$Va?MWH{!^f9i3ZAp#oUdq&9L~)R9oaUH;RZlJ5vSO6TawN@Yb zeU84YM0H94NNogu$Gg!Gd1%!sA^?00-~a;0sSvRt5diYFVPhW**8t-Xxl8yl7-WEb zNCn_pCWnW(LGF|z)EYhG^o)d0C;_Jdx8So@0+j*VwKfuv<4gp=Iw5eb$fqyS&<_C4 zUf`;qw@<>c#21pU8Iyw1G1b19ONNASV?LfyvFZgL43W+~``!Lkdm<>@!+A zx&)8USx^B?9!0`IX?(n;jS9|a?nN~M8#^4;f=p2WslC9>In!Y%5mLwj4cQz=FdTu# z#~dPn-HY0gS^#s1m_q~r84yx%0tj+aYg0FJsyjj|fJPN2y^x}4;560o0a)b-Us92B(@>+@#mNUjk zaWV##$wV4}bJ&?9AILOMWCUmo6%hcbHLy0B`?h_re%K1{I+nz)WpeY@EYu=pKpOya zh|sFvM#32Z3>(#mjA5gKV}K99H%F-807$KkOnd+s5&@86{Csx$t+qKt4DfMk6bU{d zH7a2;+fIoqps#!)hePD%KvN42XC)CDt0Ko`)Ab)VAqUuplmY!X6(Xp@63cQo#=$jt zEka|+Dau0(QUEwa0MFk!o^t2DjQ|HZz$qdIR;_WY3XyODYWlIWRpH?JFxiku@PPwx z8>B`6AHX;A!#8D~9eyEs|J`dp=ndYZ_M2~B_<9wpzuEq|f$U3JqiQ&zvAt4Wl8>DC z+KM$F#s+dCYy~L*DF7*eQ$#ibN&svaQYMcgI0Hs<$jAYt0HahOV;BH(0Axo<-2!N; z!vWBk=N3{Xhsy%zg;f|DnC!<+x9}Z$nx{iIMhajRsqYqSY#uqF*_;vi77Rl_O$rX+ z=6VjNRZ|qss8v)OK~t2c7bX)Kt(rFWye6mBAiue-aP{zn)nrF*VfqTUbpJX1=#Ifp zhk&r3NinpD1Q47<3PWEj8x`^ftQiKCm)~n&3~!o;6Z(PE0LD)+Bn7}ZJdZaS6)AvI zL;~RavQJQjo`Z^%4>u_T8uLs-$||0L;XujzOPIznQ5<~#_jKF9h zgYpTY1 zjpN@+(jfzT%^4Mj0H=uT4k!xPt3>ln&!$#r%&Cp{gEsU4XnKZYLs9{Rp|#P_a-P>Y zMTa2;2N0TvGXkUPjdNqTarBOE@v44KUlR)>+&=u(Hc|ku$C_OShy8;gDS$_UAFBpA zJ~szk{mmDt-|{zQMy*lJ03ZL>j#LBuXWtrC;Lxf$Ybkss(UEi2z7#1#f@d zN-Y4jnzw&eAzuTbXk!@q2`QYztQF+VN{ z)f2SZGjN7`ZuTH%6^yZ3EdpMAmk=ogJY>04y)f z35N6>I2bk@$PK`dHX2o!v616Ur~pB=aas82-r=8Qe{yaK+EfpXZIt6|pKwXzo)^+1h$B@f0kr8mKj*JWg+0Rk}`Rl)a{K3T? zuW3=)&=JUhuz_nSoEyNv20S@TTK68Fo3(5hrVEqzig>pW0Z_{kQX^oW=KbKt5|(qA zc$4MEhycRMiV{o(L;&~%@Fw|NzA3|Ypfhka-tm$Os^;Vi8*S(2fV;6Ka;Dbl&#gPP z7I7QF2eLs>-9n0fsMSQ8;g4Yh!svBBjUk-9Z(XfF{ z2{`uQTy{}Zfba$t&MknZRoZBt;b9m6h6ds86Ee<308Pr5b<2Z~$wUko5>7eN6h&pS zBai`b4B!O7D~P}esf}}8aBfm;av;O;*-Z+$Bcw*4nF^@o(d)Ak@~Dk9GPMl64=E8w z0zQxdqyVH0IQ2|+#PSdc@{n3H3@TE-zn~44#E?h8gAI8|!2z(;2hbs$09cPTDu!0m zVPhB?+nAy{yJcjEyisv7q5>f2$DiSg8<78f_jrGM?lG#JQLUvKIh+7Ob*oG!0@&;E RFoV`@G?8z<{PN*XzXGC--R1xQ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/multimodal/ram/gradio_demo.py b/mmpretrain/models/multimodal/ram/gradio_demo.py new file mode 100644 index 00000000..206e6b40 --- /dev/null +++ b/mmpretrain/models/multimodal/ram/gradio_demo.py @@ -0,0 +1,109 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import gradio as gr +import torch + +from mmpretrain.registry import MODELS, TRANSFORMS +from .config.ram_swin_large_14m import get_ram_cfg, test_transforms_cfg +from .run.inference import inference + +parser = argparse.ArgumentParser( + description='RAM(Recognize Anything Model) demo') +parser.add_argument( + 'ram_ckpt', type=str, help='pretrained file for ram (absolute path)') +parser.add_argument( + 'clip_ckpt', + type=str, + help='clip vit-base-p16 pretrained file (absolute path)') +args = parser.parse_args() + +if torch.cuda.is_available(): + devices = [ + torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count()) + ] +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + devices = [torch.device('mps')] +else: + devices = [torch.device('cpu')] + + +def get_free_device(): + if hasattr(torch.cuda, 'mem_get_info'): + free = [torch.cuda.mem_get_info(gpu)[0] for gpu in devices] + select = max(zip(free, range(len(free))))[1] + else: + import random + select = random.randint(0, len(devices) - 1) + return devices[select] + + +device = get_free_device() + + +def ram_inference(image, tag_list, mode, threshold): + test_transforms = TRANSFORMS.get('Compose')(transforms=test_transforms_cfg) + model = MODELS.build(get_ram_cfg(mode=mode)) + model.load_state_dict(torch.load(args.ram_ckpt)) + model.device = device + + if mode == 'openset': + categories = tag_list + if categories != '': + categories = categories.strip().split() + else: + categories = None + model.set_openset( + categories=categories, + clip_ckpt=args.clip_ckpt, + threshold=threshold) + + sample = dict(img=image) + result = inference(sample, model, test_transforms, mode=mode) + tag, tag_chinese, logits = \ + result.get('tag_output')[0][0], result.get('tag_output')[1][0],\ + result.get('logits_output')[0] + + def wrap(tags, logits): + if tags is None: + return 'Openset mode has no tag_en' + tag_lst = tags.split('|') + rt_lst = [] + for i, tag in enumerate(tag_lst): + tag = tag.strip() + rt_lst.append(tag + f': {logits[i]:.2f}') + return ' | '.join(rt_lst) + + return [wrap(tag, logits), wrap(tag_chinese, logits)] + + +def build_gradio(): + inputs = [ + gr.components.Image(label='image'), + gr.components.Textbox( + lines=2, + label='tag_list', + placeholder= + 'please input the categories split by keyboard "blank": ', + value=''), + gr.components.Radio(['normal', 'openset'], + label='mode', + value='normal'), + gr.components.Slider( + minimum=0, maximum=1, value=0.68, step=0.01, label='threshold') + ] + return gr.Interface( + fn=ram_inference, + inputs=inputs, + outputs=[ + gr.components.Textbox(), + gr.components.Textbox(info="it's translated from the english tags") + ]) + + +def main(): + build_gradio().launch() + + +if __name__ == '__main__': + main() diff --git a/mmpretrain/models/multimodal/ram/openset_utils.py b/mmpretrain/models/multimodal/ram/openset_utils.py new file mode 100644 index 00000000..5fa0f52e --- /dev/null +++ b/mmpretrain/models/multimodal/ram/openset_utils.py @@ -0,0 +1,212 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmpretrain.registry import MODELS + + +def article(name): + return 'an' if name[0] in 'aeiou' else 'a' + + +def processed_name(name, rm_dot=False): + # _ for lvis + # / for obj365 + res = name.replace('_', ' ').replace('/', ' or ').lower() + if rm_dot: + res = res.rstrip('.') + return res + + +single_template = ['a photo of a {}.'] + +multiple_templates = [ + 'There is {article} {} in the scene.', + 'There is the {} in the scene.', + 'a photo of {article} {} in the scene.', + 'a photo of the {} in the scene.', + 'a photo of one {} in the scene.', + 'itap of {article} {}.', + 'itap of my {}.', # itap: I took a picture of + 'itap of the {}.', + 'a photo of {article} {}.', + 'a photo of my {}.', + 'a photo of the {}.', + 'a photo of one {}.', + 'a photo of many {}.', + 'a good photo of {article} {}.', + 'a good photo of the {}.', + 'a bad photo of {article} {}.', + 'a bad photo of the {}.', + 'a photo of a nice {}.', + 'a photo of the nice {}.', + 'a photo of a cool {}.', + 'a photo of the cool {}.', + 'a photo of a weird {}.', + 'a photo of the weird {}.', + 'a photo of a small {}.', + 'a photo of the small {}.', + 'a photo of a large {}.', + 'a photo of the large {}.', + 'a photo of a clean {}.', + 'a photo of the clean {}.', + 'a photo of a dirty {}.', + 'a photo of the dirty {}.', + 'a bright photo of {article} {}.', + 'a bright photo of the {}.', + 'a dark photo of {article} {}.', + 'a dark photo of the {}.', + 'a photo of a hard to see {}.', + 'a photo of the hard to see {}.', + 'a low resolution photo of {article} {}.', + 'a low resolution photo of the {}.', + 'a cropped photo of {article} {}.', + 'a cropped photo of the {}.', + 'a close-up photo of {article} {}.', + 'a close-up photo of the {}.', + 'a jpeg corrupted photo of {article} {}.', + 'a jpeg corrupted photo of the {}.', + 'a blurry photo of {article} {}.', + 'a blurry photo of the {}.', + 'a pixelated photo of {article} {}.', + 'a pixelated photo of the {}.', + 'a black and white photo of the {}.', + 'a black and white photo of {article} {}.', + 'a plastic {}.', + 'the plastic {}.', + 'a toy {}.', + 'the toy {}.', + 'a plushie {}.', + 'the plushie {}.', + 'a cartoon {}.', + 'the cartoon {}.', + 'an embroidered {}.', + 'the embroidered {}.', + 'a painting of the {}.', + 'a painting of a {}.', +] + +openimages_rare_unseen = [ + 'Aerial photography', 'Aircraft engine', 'Ale', 'Aloe', 'Amphibian', + 'Angling', 'Anole', 'Antique car', 'Arcade game', 'Arthropod', + 'Assault rifle', 'Athletic shoe', 'Auto racing', 'Backlighting', + 'Bagpipes', 'Ball game', 'Barbecue chicken', 'Barechested', 'Barquentine', + 'Beef tenderloin', 'Billiard room', 'Billiards', 'Bird of prey', + 'Black swan', 'Black-and-white', 'Blond', 'Boating', 'Bonbon', + 'Bottled water', 'Bouldering', 'Bovine', 'Bratwurst', 'Breadboard', + 'Briefs', 'Brisket', 'Brochette', 'Calabaza', 'Camera operator', 'Canola', + 'Childbirth', 'Chordophone', 'Church bell', 'Classical sculpture', + 'Close-up', 'Cobblestone', 'Coca-cola', 'Combat sport', 'Comics', + 'Compact car', 'Computer speaker', 'Cookies and crackers', + 'Coral reef fish', 'Corn on the cob', 'Cosmetics', 'Crocodilia', + 'Digital camera', 'Dishware', 'Divemaster', 'Dobermann', 'Dog walking', + 'Domestic rabbit', 'Domestic short-haired cat', 'Double-decker bus', + 'Drums', 'Electric guitar', 'Electric piano', 'Electronic instrument', + 'Equestrianism', 'Equitation', 'Erinaceidae', 'Extreme sport', 'Falafel', + 'Figure skating', 'Filling station', 'Fire apparatus', 'Firearm', + 'Flatbread', 'Floristry', 'Forklift truck', 'Freight transport', + 'Fried food', 'Fried noodles', 'Frigate', 'Frozen yogurt', 'Frying', + 'Full moon', 'Galleon', 'Glacial landform', 'Gliding', 'Go-kart', 'Goats', + 'Grappling', 'Great white shark', 'Gumbo', 'Gun turret', 'Hair coloring', + 'Halter', 'Headphones', 'Heavy cruiser', 'Herding', 'High-speed rail', + 'Holding hands', 'Horse and buggy', 'Horse racing', 'Hound', + 'Hunting knife', 'Hurdling', 'Inflatable', 'Jackfruit', 'Jeans', 'Jiaozi', + 'Junk food', 'Khinkali', 'Kitesurfing', 'Lawn game', 'Leaf vegetable', + 'Lechon', 'Lifebuoy', 'Locust', 'Lumpia', 'Luxury vehicle', 'Machine tool', + 'Medical imaging', 'Melee weapon', 'Microcontroller', 'Middle ages', + 'Military person', 'Military vehicle', 'Milky way', 'Miniature Poodle', + 'Modern dance', 'Molluscs', 'Monoplane', 'Motorcycling', 'Musical theatre', + 'Narcissus', 'Nest box', 'Newsagent\'s shop', 'Nile crocodile', + 'Nordic skiing', 'Nuclear power plant', 'Orator', 'Outdoor shoe', + 'Parachuting', 'Pasta salad', 'Peafowl', 'Pelmeni', 'Perching bird', + 'Performance car', 'Personal water craft', 'Pit bull', 'Plant stem', + 'Pork chop', 'Portrait photography', 'Primate', 'Procyonidae', + 'Prosciutto', 'Public speaking', 'Racewalking', 'Ramen', + 'Rear-view mirror', 'Residential area', 'Ribs', 'Rice ball', + 'Road cycling', 'Roller skating', 'Roman temple', 'Rowing', 'Rural area', + 'Sailboat racing', 'Scaled reptile', 'Scuba diving', 'Senior citizen', + 'Shallot', 'Shinto shrine', 'Shooting range', 'Siberian husky', 'Sledding', + 'Soba', 'Solar energy', 'Sport climbing', 'Sport utility vehicle', + 'Steamed rice', 'Stemware', 'Sumo', 'Surfing Equipment', 'Team sport', + 'Touring car', 'Toy block', 'Trampolining', 'Underwater diving', + 'Vegetarian food', 'Wallaby', 'Water polo', 'Watercolor paint', 'Whiskers', + 'Wind wave', 'Woodwind instrument', 'Yakitori', 'Zeppelin' +] + + +def get_clip_model(): + model = dict( + type='CLIPZeroShot', + vision_backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + drop_rate=0., + layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), + pre_norm=True, + ), + projection=dict( + type='CLIPProjection', in_channels=768, out_channels=512), + text_backbone=dict( + type='CLIPTransformer', + width=512, + layers=12, + heads=8, + attn_mask=True, + ), + tokenizer=dict( + type='AutoTokenizer', + name_or_path='openai/clip-vit-base-patch16', + use_fast=False), + vocab_size=49408, + transformer_width=512, + proj_dim=512, + context_length=77, + data_preprocessor=dict( + type='MultiModalDataPreprocessor', + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=False, + ), + ) + return MODELS.build(model) + + +def build_openset_label_embedding(categories=None, clip_ckpt_path=''): + if categories is None: + print('Categories is None, so using rare_unseen categories') + categories = openimages_rare_unseen + model = get_clip_model() + model.load_state_dict(torch.load(clip_ckpt_path)) + templates = multiple_templates + + run_on_gpu = torch.cuda.is_available() + + with torch.no_grad(): + openset_label_embedding = [] + for category in categories: + texts = [ + template.format( + processed_name(category, rm_dot=True), + article=article(category)) for template in templates + ] + texts = [ + 'This is ' + text + if text.startswith('a') or text.startswith('the') else text + for text in texts + ] + texts = model.tokenize(texts) # tokenize + if run_on_gpu: + texts = texts.cuda() + model = model.cuda() + text_embeddings = model.extract_text_feat(texts) + text_embeddings /= text_embeddings.norm(dim=-1, keepdim=True) + text_embedding = text_embeddings.mean(dim=0) + text_embedding /= text_embedding.norm() + openset_label_embedding.append(text_embedding) + openset_label_embedding = torch.stack(openset_label_embedding, dim=1) + if run_on_gpu: + openset_label_embedding = openset_label_embedding.cuda() + + openset_label_embedding = openset_label_embedding.t() + return openset_label_embedding, categories diff --git a/mmpretrain/models/multimodal/ram/ram.py b/mmpretrain/models/multimodal/ram/ram.py new file mode 100644 index 00000000..c5d22f07 --- /dev/null +++ b/mmpretrain/models/multimodal/ram/ram.py @@ -0,0 +1,332 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import pickle +from abc import abstractmethod +from typing import List, Optional + +import numpy as np +import torch +import torch.nn as nn +from mmengine.model import BaseModel + +from mmpretrain.registry import MODELS, TOKENIZER +from mmpretrain.structures import DataSample +from .bert import BertConfig, BertLMHeadModel, BertModel +from .openset_utils import build_openset_label_embedding +from .utils import tie_encoder_decoder_weights + + +def get_path(path): + file_path = os.path.abspath(os.path.dirname(__file__)) + if not os.path.isabs(path): + return os.path.join(file_path, path) + + +class RAM(BaseModel): + """The implementation of `RAM `_.""" + + def __init__(self, + tokenizer: dict, + vision_backbone: dict, + tag_encoder: dict, + tagging_head: dict, + text_decoder: dict, + device: str = 'cpu', + vision_width: int = 1536, + prompt='a picture of ', + threshold=0.68, + delete_tag_index=[], + tag_list='./data/ram_tag_list.pickle', + tag_list_chinese='./data/ram_tag_list_chinese.pickle', + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + if data_preprocessor is None: + data_preprocessor = {} + data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + + self.device = device + # build the visual encoder + self.visual_encoder = MODELS.build(vision_backbone) + + # build the tokenizer + self.tokenizer = TOKENIZER.build(tokenizer) + self.tokenizer.add_special_tokens({'bos_token': '[DEC]'}) + self.tokenizer.add_special_tokens( + {'additional_special_tokens': ['[ENC]']}) + self.tokenizer.enc_token_id = \ + self.tokenizer.additional_special_tokens_ids[0] + + # build the tag encoder + # encoder_config = BertConfig.from_json_file(med_config) + # encoder_config.encoder_width = 512 + encoder_config = BertConfig.from_dict(tag_encoder) + self.tag_encoder = BertModel( + config=encoder_config, add_pooling_layer=False) + + # build image-tag-text decoder + # decoder_config = BertConfig.from_json_file(med_config) + decoder_config = BertConfig.from_dict(text_decoder) + self.text_decoder = BertLMHeadModel(config=decoder_config) + + self.delete_tag_index = delete_tag_index + self.prompt = prompt + self.prompt_length = len(self.tokenizer(self.prompt).input_ids) - 1 + + # load tag list + self.tag_list = self.load_tag_list(get_path(tag_list)) + self.tag_list_chinese = self.load_tag_list(get_path(tag_list_chinese)) + + # create image-tag recognition decoder + self.threshold = threshold + self.num_class = len(self.tag_list) + # q2l_config = \ + # BertConfig.from_json_file(f'{CONFIG_PATH}/configs/q2l_config.json') + # q2l_config.encoder_width = 512 + q2l_config = BertConfig.from_dict(tagging_head) + self.tagging_head = BertModel( + config=q2l_config, add_pooling_layer=False) + self.tagging_head.resize_token_embeddings(len(self.tokenizer)) + self.label_embed = nn.Parameter( + torch.zeros(self.num_class, q2l_config.encoder_width)) + + if q2l_config.hidden_size != 512: + self.wordvec_proj = nn.Linear(512, q2l_config.hidden_size) + else: + self.wordvec_proj = nn.Identity() + + self.fc = nn.Linear(q2l_config.hidden_size, 1) + + self.del_selfattention() + + # share weights of the lowest 2-layer of + # "image-tag interaction encoder" with + # the "image-tag recogntion decoder" + tie_encoder_decoder_weights(self.tag_encoder, self.tagging_head, '', + ' ') + self.image_proj = nn.Linear(vision_width, 512) + # self.label_embed = nn.Parameter(torch.load( + # f'{CONFIG_PATH}/data/textual_label_embedding.pth', + # map_location='cpu').float()) + + # adjust thresholds for some tags + self.class_threshold = torch.ones(self.num_class) * self.threshold + ram_class_threshold_path = get_path( + './data/ram_tag_list_threshold.pickle') + with open(ram_class_threshold_path, 'rb') as f: + ram_class_threshold = pickle.load(f) + for key, value in enumerate(ram_class_threshold): + self.class_threshold[key] = value + + def load_tag_list(self, tag_list_file): + with open(tag_list_file, 'rb') as f: + tag_list = pickle.load(f) + tag_list = np.array(tag_list) + return tag_list + + # delete self-attention layer of image-tag recognition decoder + # to reduce computation, follower Query2Label + def del_selfattention(self): + del self.tagging_head.embeddings + for layer in self.tagging_head.encoder.layer: + del layer.attention + + def get_label_embed(self): + return torch.nn.functional.relu(self.wordvec_proj(self.label_embed)) + + def extract_visual_feature(self, images): + image_embeds = self.visual_encoder(images)[0] + image_embeds = image_embeds.flatten(2, 3) + attn_pool = nn.AdaptiveAvgPool1d(1) + cls_token = attn_pool(image_embeds).permute(0, 2, 1).contiguous() + image_embeds = image_embeds.permute(0, 2, 1).contiguous() + image_embeds = torch.cat([cls_token, image_embeds], dim=1) + image_embeds = self.image_proj(image_embeds) + image_atts = torch.ones( + image_embeds.size()[:-1], dtype=torch.long).to(images.device) + return image_embeds, image_atts + + def image2tag(self, label_embed, image_embeds, image_atts): + # recognized image tags using image-tag recogntiion decoder + # image_cls_embeds = image_embeds[:, 0, :] + image_spatial_embeds = image_embeds[:, 1:, :] + + bs = image_spatial_embeds.shape[0] + label_embed = label_embed.unsqueeze(0).repeat(bs, 1, 1) + tagging_embed = self.tagging_head( + encoder_embeds=label_embed, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + return_dict=False, + mode='tagging', + ) + + logits = self.fc(tagging_embed[0]).squeeze(-1) + return logits + + def forward( + self, + images: torch.Tensor, + data_samples: Optional[list] = None, + mode: str = 'predict', + **kwargs, + ): + if mode == 'predict': + return self.predict(images, data_samples, **kwargs) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + @abstractmethod + def predict(self, + images: torch.Tensor, + data_samples: DataSample = None) -> DataSample: + raise NotImplementedError + + +@MODELS.register_module() +class RAMNormal(RAM): + + def __init__(self, + tokenizer: dict, + vision_backbone: dict, + tag_encoder: dict, + tagging_head: dict, + text_decoder: dict, + device: str = 'cpu', + vision_width: int = 1536, + prompt='a picture of ', + threshold=0.68, + delete_tag_index=[], + tag_list='./data/ram_tag_list.pickle', + tag_list_chinese='./data/ram_tag_list_chinese.pickle', + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + super().__init__( + tokenizer, + vision_backbone, + tag_encoder, + tagging_head, + text_decoder, + device, + vision_width, + prompt, + threshold, + delete_tag_index, + tag_list, + tag_list_chinese, + data_preprocessor, + init_cfg, + ) + + def tag_process(self, logits): + targets = torch.where( + torch.sigmoid(logits) > self.class_threshold.to(logits.device), + torch.tensor(1.0).to(logits.device), + torch.zeros(self.num_class).to(logits.device)) + + tag = targets.cpu().numpy() + tag[:, self.delete_tag_index] = 0 + tag_output = [] + tag_output_chinese = [] + logits_output = [] + + bs = logits.shape[0] + for b in range(bs): + index = np.argwhere(tag[b] == 1) + token = self.tag_list[index].squeeze(axis=1) + logits_output.append( + torch.sigmoid(logits)[b][index[:, 0]].cpu().numpy()) + tag_output.append(' | '.join(token)) + token_chinese = self.tag_list_chinese[index].squeeze(axis=1) + tag_output_chinese.append(' | '.join(token_chinese)) + + return [(tag_output, tag_output_chinese), logits_output] + + def predict(self, + images: torch.Tensor, + data_samples: DataSample = None) -> DataSample: + self.eval() + self.to(self.device) + images = images.to(self.device) + label_embed = self.get_label_embed() + image_embeds, image_atts = self.extract_visual_feature(images) + logits = self.image2tag(label_embed, image_embeds, image_atts) + tag_output, logits_output = self.tag_process(logits) + data_samples.set_field(logits_output, 'logits_output') + data_samples.set_field(tag_output, 'tag_output') + return data_samples + + +@MODELS.register_module() +class RAMOpenset(RAMNormal): + + def __init__(self, + tokenizer: dict, + vision_backbone: dict, + tag_encoder: dict, + tagging_head: dict, + text_decoder: dict, + device: str = 'cpu', + vision_width: int = 1536, + prompt='a picture of ', + threshold=0.68, + delete_tag_index=[], + tag_list='./data/ram_tag_list.pickle', + tag_list_chinese='./data/ram_tag_list_chinese.pickle', + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + super().__init__( + tokenizer, + vision_backbone, + tag_encoder, + tagging_head, + text_decoder, + device, + vision_width, + prompt, + threshold, + delete_tag_index, + tag_list, + tag_list_chinese, + data_preprocessor, + init_cfg, + ) + + def set_openset(self, + categories: List[str] = None, + clip_ckpt: str = '', + threshold: float = 0.68): + openset_label_embedding, openset_categories = \ + build_openset_label_embedding( + categories, clip_ckpt + ) + self.tag_list = np.array(openset_categories) + self.label_embed = nn.Parameter(openset_label_embedding.float()) + self.num_class = len(openset_categories) + + # the threshold for unseen categories is often lower + self.class_threshold = torch.ones(self.num_class) * threshold + + def tag_process(self, logits): + targets = torch.where( + torch.sigmoid(logits) > self.class_threshold.to(logits.device), + torch.tensor(1.0).to(logits.device), + torch.zeros(self.num_class).to(logits.device)) + + tag = targets.cpu().numpy() + tag[:, self.delete_tag_index] = 0 + + bs = logits.shape[0] + tag_output = [] + logits_output = [] + for b in range(bs): + index = np.argwhere(tag[b] == 1) + token = self.tag_list[index].squeeze(axis=1) + logits_output.append( + torch.sigmoid(logits)[b][index[:, 0]].cpu().numpy()) + tag_output.append(' | '.join(token)) + + return [(tag_output, [None]), logits_output] diff --git a/mmpretrain/models/multimodal/ram/run/__init__.py b/mmpretrain/models/multimodal/ram/run/__init__.py new file mode 100644 index 00000000..ef101fec --- /dev/null +++ b/mmpretrain/models/multimodal/ram/run/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/mmpretrain/models/multimodal/ram/run/inference.py b/mmpretrain/models/multimodal/ram/run/inference.py new file mode 100644 index 00000000..da5afcf5 --- /dev/null +++ b/mmpretrain/models/multimodal/ram/run/inference.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + + +def inference_ram(sample, model): + + with torch.no_grad(): + result = model.test_step(sample) + + return result + + +def inference_ram_openset(sample, model): + with torch.no_grad(): + result = model.test_step(sample) + + return result + + +def inference(sample, model, transforms, mode='normal'): + sample = transforms(sample) + if sample['inputs'].ndim == 3: + sample['inputs'] = sample['inputs'].unsqueeze(dim=0) + assert mode in ['normal', 'openset' + ], 'mode of inference must be "normal" or "openset"' + if mode == 'normal': + return inference_ram(sample, model) + else: + return inference_ram_openset(sample, model) diff --git a/mmpretrain/models/multimodal/ram/utils.py b/mmpretrain/models/multimodal/ram/utils.py new file mode 100644 index 00000000..32cb115b --- /dev/null +++ b/mmpretrain/models/multimodal/ram/utils.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +from torch import nn + + +def tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, + base_model_prefix: str, skip_key: str): + uninitialized_encoder_weights: List[str] = [] + if decoder.__class__ != encoder.__class__: + print(f'''{decoder.__class__} and {encoder.__class__} are not equal. + In this case make sure that + all encoder weights are correctly initialized.''') + + def tie_encoder_to_decoder_recursively( + decoder_pointer: nn.Module, + encoder_pointer: nn.Module, + module_name: str, + uninitialized_encoder_weights: List[str], + skip_key: str, + depth=0, + ): + assert isinstance(decoder_pointer, nn.Module) and isinstance( + encoder_pointer, nn.Module + ), f'{decoder_pointer} and {encoder_pointer}' + \ + 'have to be of type torch.nn.Module' + if hasattr(decoder_pointer, 'weight') and skip_key not in module_name: + assert hasattr(encoder_pointer, 'weight') + encoder_pointer.weight = decoder_pointer.weight + if hasattr(decoder_pointer, 'bias'): + assert hasattr(encoder_pointer, 'bias') + encoder_pointer.bias = decoder_pointer.bias + print(module_name + ' is tied') + return + + encoder_modules = encoder_pointer._modules + decoder_modules = decoder_pointer._modules + if len(decoder_modules) > 0: + assert (len(encoder_modules) > + 0), f'''Encoder module {encoder_pointer} + does not match decoder module {decoder_pointer}''' + + all_encoder_weights = set([ + module_name + '/' + sub_name + for sub_name in encoder_modules.keys() + ]) + encoder_layer_pos = 0 + for name, module in decoder_modules.items(): + if name.isdigit(): + encoder_name = str(int(name) + encoder_layer_pos) + decoder_name = name + if not isinstance( + decoder_modules[decoder_name], + type(encoder_modules[encoder_name])) and len( + encoder_modules) != len(decoder_modules): + # this can happen if the name corresponds to + # the position in a list module list of layers + # in this case the decoder has added a + # cross-attention that the encoder doesn't have + # thus skip this step and + # subtract one layer pos from encoder + encoder_layer_pos -= 1 + continue + elif name not in encoder_modules: + continue + elif depth > 500: + raise ValueError( + '''Max depth of recursive function `tie_encoder_to_decoder` reached. + It seems that there is a circular dependency + between two or more `nn.Modules` of your model.''') + else: + decoder_name = encoder_name = name + tie_encoder_to_decoder_recursively( + decoder_modules[decoder_name], + encoder_modules[encoder_name], + module_name + '/' + name, + uninitialized_encoder_weights, + skip_key, + depth=depth + 1, + ) + all_encoder_weights.remove(module_name + '/' + encoder_name) + + uninitialized_encoder_weights += list(all_encoder_weights) + + # tie weights recursively + tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, + uninitialized_encoder_weights, skip_key) diff --git a/mmpretrain/models/utils/tokenizer.py b/mmpretrain/models/utils/tokenizer.py index 5b8a324b..fddda432 100644 --- a/mmpretrain/models/utils/tokenizer.py +++ b/mmpretrain/models/utils/tokenizer.py @@ -12,6 +12,7 @@ from .huggingface import register_hf_tokenizer register_hf_tokenizer(AutoTokenizer) register_hf_tokenizer(LlamaTokenizer) +register_hf_tokenizer(BertTokenizer) @register_hf_tokenizer() diff --git a/mmpretrain/version.py b/mmpretrain/version.py index 24b33124..32f800cd 100644 --- a/mmpretrain/version.py +++ b/mmpretrain/version.py @@ -1,6 +1,6 @@ # Copyright (c) OpenMMLab. All rights reserved -__version__ = '1.0.2' +__version__ = '1.1.0' def parse_version_info(version_str): diff --git a/projects/dino/README.md b/projects/dino/README.md new file mode 100644 index 00000000..3458fa4c --- /dev/null +++ b/projects/dino/README.md @@ -0,0 +1,26 @@ +# Implementation for DINO + +**NOTE**: We only guarantee correctness of the forward pass, not responsible for full reimplementation. + +First, ensure you are in the root directory of MMPretrain, then you have two choices +to play with DINO in MMPretrain: + +## Slurm + +If you are using a cluster managed by Slurm, you can use the following command to +start your job: + +```shell +GPUS_PER_NODE=8 GPUS=8 CPUS_PER_TASK=16 bash projects/dino/tools/slurm_train.sh mm_model dino projects/dino/config/dino_vit-base-p16_8xb64-amp-coslr-100e_in1k.py --amp +``` + +The above command will pre-train the model on a single node with 8 GPUs. + +## PyTorch + +If you are using a single machine, without any cluster management software, you can use the following command + +```shell +NNODES=1 bash projects/dino/tools/dist_train.sh projects/dino/config/dino_vit-base-p16_8xb64-amp-coslr-100e_in1k.py 8 +--amp +``` diff --git a/projects/dino/config/dino_vit-base-p16_8xb64-amp-coslr-100e_in1k.py b/projects/dino/config/dino_vit-base-p16_8xb64-amp-coslr-100e_in1k.py new file mode 100644 index 00000000..d4a1c240 --- /dev/null +++ b/projects/dino/config/dino_vit-base-p16_8xb64-amp-coslr-100e_in1k.py @@ -0,0 +1,104 @@ +model = dict( + type='DINO', + data_preprocessor=dict( + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', arch='b', patch_size=16), + neck=dict( + type='DINONeck', + in_channels=768, + out_channels=65536, + hidden_channels=2048, + bottleneck_channels=256), + head=dict( + type='DINOHead', + out_channels=65536, + num_crops=10, + student_temp=0.1, + center_momentum=0.9)) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='DINOMultiCrop', + global_crops_scale=(0.4, 1.0), + local_crops_scale=(0.05, 0.4), + local_crops_number=8), + dict(type='PackInputs') +] +train_dataloader = dict( + batch_size=32, + num_workers=16, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type='mmpretrain.ImageNet', + data_root='/data/imagenet/', + ann_file='meta/train.txt', + data_prefix=dict(img_path='train/'), + pipeline=train_pipeline, + )) +optimizer = dict(type='AdamW', lr=0.0024, betas=(0.9, 0.95), weight_decay=0.05) +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=0.0024, betas=(0.9, 0.95), weight_decay=0.05), + paramwise_cfg=dict( + custom_keys=dict( + ln=dict(decay_mult=0.0), + bias=dict(decay_mult=0.0), + pos_embed=dict(decay_mult=0.0), + mask_token=dict(decay_mult=0.0), + cls_token=dict(decay_mult=0.0))), + loss_scale='dynamic') +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-09, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=90, + by_epoch=True, + begin=10, + end=100, + convert_to_iter_based=True) +] +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100) +default_scope = 'mmpretrain' +default_hooks = dict( + runtime_info=dict(type='RuntimeInfoHook'), + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=100), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=1), + sampler_seed=dict(type='DistSamplerSeedHook')) +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl')) +log_processor = dict( + window_size=10, + custom_cfg=[dict(data_src='', method='mean', window_size='global')]) +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='UniversalVisualizer', + vis_backends=[dict(type='LocalVisBackend')], + name='visualizer') +log_level = 'INFO' +load_from = None +resume = True +randomness = dict(seed=2, diff_rank_seed=True) +custom_hooks = [ + dict( + type='DINOTeacherTempWarmupHook', + warmup_teacher_temp=0.04, + teacher_temp=0.04, + teacher_temp_warmup_epochs=0, + max_epochs=100) +] diff --git a/projects/dino/dataset/__init__.py b/projects/dino/dataset/__init__.py new file mode 100644 index 00000000..da65f285 --- /dev/null +++ b/projects/dino/dataset/__init__.py @@ -0,0 +1 @@ +from .transform import * # noqa: F401,F403 diff --git a/projects/dino/dataset/transform/__init__.py b/projects/dino/dataset/transform/__init__.py new file mode 100644 index 00000000..00dacb3f --- /dev/null +++ b/projects/dino/dataset/transform/__init__.py @@ -0,0 +1,3 @@ +from .processing import DINOMultiCrop + +__all__ = ['DINOMultiCrop'] diff --git a/projects/dino/dataset/transform/processing.py b/projects/dino/dataset/transform/processing.py new file mode 100644 index 00000000..df4bf0be --- /dev/null +++ b/projects/dino/dataset/transform/processing.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random + +from mmcv.transforms import RandomApply # noqa: E501 +from mmcv.transforms import BaseTransform, Compose, RandomFlip, RandomGrayscale + +from mmpretrain.datasets.transforms import (ColorJitter, GaussianBlur, + RandomResizedCrop, Solarize) +from mmpretrain.registry import TRANSFORMS + + +@TRANSFORMS.register_module() +class DINOMultiCrop(BaseTransform): + """Multi-crop transform for DINO. + + This module applies the multi-crop transform for DINO. + + Args: + global_crops_scale (int): Scale of global crops. + local_crops_scale (int): Scale of local crops. + local_crops_number (int): Number of local crops. + """ + + def __init__(self, global_crops_scale: int, local_crops_scale: int, + local_crops_number: int) -> None: + super().__init__() + self.global_crops_scale = global_crops_scale + self.local_crops_scale = local_crops_scale + + flip_and_color_jitter = Compose([ + RandomFlip(prob=0.5, direction='horizontal'), + RandomApply([ + ColorJitter( + brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1) + ], + prob=0.8), + RandomGrayscale( + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989), + ) + ]) + + self.global_transform_1 = Compose([ + RandomResizedCrop( + 224, + crop_ratio_range=global_crops_scale, + interpolation='bicubic'), + flip_and_color_jitter, + GaussianBlur(prob=1.0, radius=random.uniform(0.1, 2.0)), + ]) + + self.global_transform_2 = Compose([ + RandomResizedCrop( + 224, + crop_ratio_range=global_crops_scale, + interpolation='bicubic'), + flip_and_color_jitter, + GaussianBlur(prob=1.0, radius=random.uniform(0.1, 2.0)), + Solarize(thr=128, prob=0.2), + ]) + + self.local_crops_number = local_crops_number + self.local_transform = Compose([ + RandomResizedCrop( + 96, + crop_ratio_range=local_crops_scale, + interpolation='bicubic'), + flip_and_color_jitter, + GaussianBlur(prob=1.0, radius=random.uniform(0.1, 2.0)), + ]) + + def transform(self, results: dict) -> dict: + ori_img = results['img'] + crops = [] + results['img'] = ori_img + crops.append(self.global_transform_1(results)['img']) + results['img'] = ori_img + crops.append(self.global_transform_2(results)['img']) + for _ in range(self.local_crops_number): + results['img'] = ori_img + crops.append(self.local_transform(results)['img']) + results['img'] = crops + return results + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(global_crops_scale = {self.global_crops_scale}, ' + repr_str += f'local_crops_scale = {self.local_crops_scale}, ' + repr_str += f'local_crop_number = {self.local_crops_number})' + return repr_str diff --git a/projects/dino/engine/__init__.py b/projects/dino/engine/__init__.py new file mode 100644 index 00000000..41422545 --- /dev/null +++ b/projects/dino/engine/__init__.py @@ -0,0 +1 @@ +from .hooks import * # noqa diff --git a/projects/dino/engine/hooks/__init__.py b/projects/dino/engine/hooks/__init__.py new file mode 100644 index 00000000..df43c492 --- /dev/null +++ b/projects/dino/engine/hooks/__init__.py @@ -0,0 +1,3 @@ +from .dino_teacher_temp_warmup_hook import DINOTeacherTempWarmupHook + +__all__ = ['DINOTeacherTempWarmupHook'] diff --git a/projects/dino/engine/hooks/dino_teacher_temp_warmup_hook.py b/projects/dino/engine/hooks/dino_teacher_temp_warmup_hook.py new file mode 100644 index 00000000..d66b0250 --- /dev/null +++ b/projects/dino/engine/hooks/dino_teacher_temp_warmup_hook.py @@ -0,0 +1,33 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from mmengine.hooks import Hook + +from mmpretrain.registry import HOOKS + + +@HOOKS.register_module() +class DINOTeacherTempWarmupHook(Hook): + """Warmup teacher temperature for DINO. + + This hook warmups the temperature for teacher to stabilize the training + process. + + Args: + warmup_teacher_temp (float): Warmup temperature for teacher. + teacher_temp (float): Temperature for teacher. + teacher_temp_warmup_epochs (int): Warmup epochs for teacher + temperature. + max_epochs (int): Maximum epochs for training. + """ + + def __init__(self, warmup_teacher_temp: float, teacher_temp: float, + teacher_temp_warmup_epochs: int, max_epochs: int) -> None: + super().__init__() + self.teacher_temps = np.concatenate( + (np.linspace(warmup_teacher_temp, teacher_temp, + teacher_temp_warmup_epochs), + np.ones(max_epochs - teacher_temp_warmup_epochs) * teacher_temp)) + + def before_train_epoch(self, runner) -> None: + runner.model.module.head.teacher_temp = self.teacher_temps[ + runner.epoch] diff --git a/projects/dino/models/__init__.py b/projects/dino/models/__init__.py new file mode 100644 index 00000000..49d01487 --- /dev/null +++ b/projects/dino/models/__init__.py @@ -0,0 +1,3 @@ +from .algorithm import * # noqa +from .head import * # noqa +from .neck import * # noqa diff --git a/projects/dino/models/algorithm/__init__.py b/projects/dino/models/algorithm/__init__.py new file mode 100644 index 00000000..1125b63f --- /dev/null +++ b/projects/dino/models/algorithm/__init__.py @@ -0,0 +1,3 @@ +from .dino import DINO + +__all__ = ['DINO'] diff --git a/projects/dino/models/algorithm/dino.py b/projects/dino/models/algorithm/dino.py new file mode 100644 index 00000000..2d78922f --- /dev/null +++ b/projects/dino/models/algorithm/dino.py @@ -0,0 +1,82 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Union + +import torch +from torch import nn + +from mmpretrain.models import BaseSelfSupervisor, CosineEMA +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample + + +@MODELS.register_module() +class DINO(BaseSelfSupervisor): + """Implementation for DINO. + + This module is proposed in `DINO: Emerging Properties in Self-Supervised + Vision Transformers `_. + + Args: + backbone (dict): Config for backbone. + neck (dict): Config for neck. + head (dict): Config for head. + pretrained (str, optional): Path for pretrained model. + Defaults to None. + base_momentum (float, optional): Base momentum for momentum update. + Defaults to 0.99. + data_preprocessor (dict, optional): Config for data preprocessor. + Defaults to None. + init_cfg (list[dict] | dict, optional): Config for initialization. + Defaults to None. + """ + + def __init__(self, + backbone: dict, + neck: dict, + head: dict, + pretrained: Optional[str] = None, + base_momentum: float = 0.99, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__( + backbone=backbone, + neck=neck, + head=head, + pretrained=pretrained, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + + # create momentum model + self.teacher = CosineEMA( + nn.Sequential(self.backbone, self.neck), momentum=base_momentum) + # weight normalization layer + self.neck.last_layer = nn.utils.weight_norm(self.neck.last_layer) + self.neck.last_layer.weight_g.data.fill_(1) + self.neck.last_layer.weight_g.requires_grad = False + self.teacher.module[1].last_layer = nn.utils.weight_norm( + self.teacher.module[1].last_layer) + self.teacher.module[1].last_layer.weight_g.data.fill_(1) + self.teacher.module[1].last_layer.weight_g.requires_grad = False + + def loss(self, inputs: torch.Tensor, + data_samples: List[DataSample]) -> dict: + global_crops = torch.cat(inputs[:2]) + local_crops = torch.cat(inputs[2:]) + # teacher forward + teacher_output = self.teacher(global_crops) + + # student forward global + student_output_global = self.backbone(global_crops) + student_output_global = self.neck(student_output_global) + + # student forward local + student_output_local = self.backbone(local_crops) + student_output_local = self.neck(student_output_local) + + student_output = torch.cat( + (student_output_global, student_output_local)) + + # compute loss + loss = self.head(student_output, teacher_output) + + return dict(loss=loss) diff --git a/projects/dino/models/head/__init__.py b/projects/dino/models/head/__init__.py new file mode 100644 index 00000000..fe31e084 --- /dev/null +++ b/projects/dino/models/head/__init__.py @@ -0,0 +1,3 @@ +from .dino_head import DINOHead + +__all__ = ['DINOHead'] diff --git a/projects/dino/models/head/dino_head.py b/projects/dino/models/head/dino_head.py new file mode 100644 index 00000000..e817bfad --- /dev/null +++ b/projects/dino/models/head/dino_head.py @@ -0,0 +1,69 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn.functional as F +from mmengine.dist import all_reduce, get_world_size +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class DINOHead(BaseModule): + """Implementation for DINO head. + + This module is proposed in `DINO: Emerging Properties in Self-Supervised + Vision Transformers `_. + + Args: + out_channels (int): Output channels of the head. + num_crops (int): Number of crops. + student_temp (float): Temperature for student output. + center_momentum (float): Momentum for center update. + """ + + def __init__(self, out_channels: int, num_crops: int, student_temp: float, + center_momentum: float) -> None: + super().__init__() + self.student_temp = student_temp + self.teacher_temp = 0 + self.center_momentum = center_momentum + self.num_crops = num_crops + self.register_buffer('center', torch.zeros(1, out_channels)) + + def forward(self, student_output: torch.Tensor, + teacher_output: torch.Tensor) -> torch.Tensor: + + current_teacher_output = teacher_output + student_output = student_output / self.student_temp + student_output = student_output.chunk(self.num_crops, dim=0) + + # teacher centering and sharpening + teacher_output = F.softmax( + (teacher_output - self.center) / self.teacher_temp, dim=-1) + teacher_output = teacher_output.detach().chunk(2, dim=0) + + total_loss = 0 + n_loss_terms = 0 + + for i in range(len(teacher_output)): + for j in range(len(student_output)): + if i == j: + continue + total_loss += (-teacher_output[i] * + student_output[j].log_softmax(dim=-1)).sum( + dim=-1).mean() + n_loss_terms += 1 + total_loss /= n_loss_terms + self.update_center(current_teacher_output) + return total_loss + + @torch.no_grad() + def update_center(self, teacher_output: torch.Tensor) -> None: + + batch_center = torch.sum(teacher_output, dim=0, keepdim=True) + all_reduce(batch_center) + batch_center = batch_center / (len(teacher_output) * get_world_size()) + + # ema update batch center + self.center = self.center * self.center_momentum + batch_center * ( + 1 - self.center_momentum) diff --git a/projects/dino/models/neck/__init__.py b/projects/dino/models/neck/__init__.py new file mode 100644 index 00000000..e5f4aadb --- /dev/null +++ b/projects/dino/models/neck/__init__.py @@ -0,0 +1,3 @@ +from .dino_neck import DINONeck + +__all__ = ['DINONeck'] diff --git a/projects/dino/models/neck/dino_neck.py b/projects/dino/models/neck/dino_neck.py new file mode 100644 index 00000000..8d8881ea --- /dev/null +++ b/projects/dino/models/neck/dino_neck.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.model import BaseModule +from torch import nn + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class DINONeck(BaseModule): + """Implementation for DINO neck. + + This module is proposed in `DINO: Emerging Properties in Self-Supervised + Vision Transformers `_. + + Args: + in_channels (int): Input channels. + hidden_channels (int): Hidden channels. + out_channels (int): Output channels. + bottleneck_channels (int): Bottleneck channels. + """ + + def __init__(self, in_channels: int, hidden_channels: int, + out_channels: int, bottleneck_channels: int) -> None: + super().__init__() + self.mlp = nn.Sequential(*[ + nn.Linear(in_channels, hidden_channels), + nn.GELU(), + nn.Linear(hidden_channels, hidden_channels), + nn.GELU(), + nn.Linear(hidden_channels, bottleneck_channels), + ]) + + self.last_layer = nn.Linear( + bottleneck_channels, out_channels, bias=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.mlp(x[0]) + x = nn.functional.normalize(x, dim=-1, p=2) + x = self.last_layer(x) + return x diff --git a/projects/dino/tools/dist_train.sh b/projects/dino/tools/dist_train.sh new file mode 100644 index 00000000..3fca7641 --- /dev/null +++ b/projects/dino/tools/dist_train.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/train.py \ + $CONFIG \ + --launcher pytorch ${@:3} diff --git a/projects/dino/tools/slurm_train.sh b/projects/dino/tools/slurm_train.sh new file mode 100644 index 00000000..7e2ad297 --- /dev/null +++ b/projects/dino/tools/slurm_train.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:4} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u projects/dino/tools/train.py ${CONFIG} --launcher="slurm" ${PY_ARGS} diff --git a/projects/dino/tools/train.py b/projects/dino/tools/train.py new file mode 100644 index 00000000..b9482c3b --- /dev/null +++ b/projects/dino/tools/train.py @@ -0,0 +1,104 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp + +from dataset import * # noqa: F401,F403 +from engine import * # noqa: F401,F403 +from mmengine.config import Config, DictAction +from mmengine.runner import Runner +from models.algorithm import * # noqa: F401,F403 +from models.head import * # noqa: F401,F403 +from models.neck import * # noqa: F401,F403 + +from mmpretrain.utils import register_all_modules + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a model') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--resume', + nargs='?', + type=str, + const='auto', + help='If specify checkpint path, resume from it, while if not ' + 'specify, try to auto resume from the latest checkpoint ' + 'in the work directory.') + parser.add_argument( + '--amp', + action='store_true', + help='enable automatic-mixed-precision training') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + return args + + +def main(): + args = parse_args() + + # register all modules in mmpretrain into the registries + # do not init the default scope here because it will be init in the runner + register_all_modules(init_default_scope=False) + + # load config + cfg = Config.fromfile(args.config) + cfg.launcher = args.launcher + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + work_type = args.config.split('/')[1] + cfg.work_dir = osp.join('./work_dirs', work_type, + osp.splitext(osp.basename(args.config))[0]) + + # enable automatic-mixed-precision training + if args.amp is True: + optim_wrapper = cfg.optim_wrapper.get('type', 'OptimWrapper') + assert optim_wrapper in ['OptimWrapper', 'AmpOptimWrapper'], \ + '`--amp` is not supported custom optimizer wrapper type ' \ + f'`{optim_wrapper}.' + cfg.optim_wrapper.type = 'AmpOptimWrapper' + cfg.optim_wrapper.setdefault('loss_scale', 'dynamic') + + # resume training + if args.resume == 'auto': + cfg.resume = True + cfg.load_from = None + elif args.resume is not None: + cfg.resume = True + cfg.load_from = args.resume + + # build the runner from config + runner = Runner.from_cfg(cfg) + + # start training + runner.train() + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/openai-clip_to_mmpretrain-clip.py b/tools/model_converters/openai-clip_to_mmpretrain-clip.py new file mode 100644 index 00000000..72725502 --- /dev/null +++ b/tools/model_converters/openai-clip_to_mmpretrain-clip.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_clip(ckpt): + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + new_v = v + if k.startswith('visual.conv1'): + new_k = k.replace('conv1', 'patch_embed.projection') + elif k.startswith('visual.positional_embedding'): + new_k = k.replace('positional_embedding', 'pos_embed') + new_v = v.unsqueeze(dim=0) + elif k.startswith('visual.class_embedding'): + new_k = k.replace('class_embedding', 'cls_token') + new_v = v.unsqueeze(dim=0).unsqueeze(dim=0) + elif k.startswith('visual.ln_pre'): + new_k = k.replace('ln_pre', 'pre_norm') + elif k.startswith('visual.transformer.resblocks'): + new_k = k.replace('transformer.resblocks', 'layers') + if 'ln_1' in k: + new_k = new_k.replace('ln_1', 'ln1') + elif 'ln_2' in k: + new_k = new_k.replace('ln_2', 'ln2') + elif 'mlp.c_fc' in k: + new_k = new_k.replace('mlp.c_fc', 'ffn.layers.0.0') + elif 'mlp.c_proj' in k: + new_k = new_k.replace('mlp.c_proj', 'ffn.layers.1') + elif 'attn.in_proj_weight' in k: + new_k = new_k.replace('in_proj_weight', 'qkv.weight') + elif 'attn.in_proj_bias' in k: + new_k = new_k.replace('in_proj_bias', 'qkv.bias') + elif 'attn.out_proj' in k: + new_k = new_k.replace('out_proj', 'proj') + elif k.startswith('visual.ln_post'): + new_k = k.replace('ln_post', 'ln1') + elif k.startswith('visual.proj'): + new_k = k.replace('visual.proj', 'visual_proj.proj') + else: + new_k = k + + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in pretrained clip ' + 'models to mmpretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + weight = convert_clip(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/ram2mmpretrain.py b/tools/model_converters/ram2mmpretrain.py new file mode 100644 index 00000000..5ee3b476 --- /dev/null +++ b/tools/model_converters/ram2mmpretrain.py @@ -0,0 +1,117 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict +from copy import deepcopy + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_swin(ckpt): + new_ckpt = OrderedDict() + convert_mapping = dict() + + def correct_unfold_reduction_order(x): + out_channel, in_channel = x.shape + x = x.reshape(out_channel, 4, in_channel // 4) + x = x[:, [0, 2, 1, 3], :].transpose(1, + 2).reshape(out_channel, in_channel) + return x + + def correct_unfold_norm_order(x): + in_channel = x.shape[0] + x = x.reshape(4, in_channel // 4) + x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) + return x + + for k, v in ckpt.items(): + if 'attn_mask' in k: + continue + if k.startswith('head'): + continue + elif k.startswith('layers'): + new_v = v + if 'attn.' in k: + new_k = k.replace('attn.', 'attn.w_msa.') + elif 'mlp.' in k: + if 'mlp.fc1.' in k: + new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.') + elif 'mlp.fc2.' in k: + new_k = k.replace('mlp.fc2.', 'ffn.layers.1.') + else: + new_k = k.replace('mlp.', 'ffn.') + elif 'downsample' in k: + new_k = k + if 'reduction.' in k: + new_v = correct_unfold_reduction_order(v) + elif 'norm.' in k: + new_v = correct_unfold_norm_order(v) + else: + new_k = k + new_k = new_k.replace('layers', 'stages', 1) + elif k.startswith('patch_embed'): + new_v = v + if 'proj' in k: + new_k = k.replace('proj', 'projection') + else: + new_k = k + elif k.startswith('norm'): + new_v = v + new_k = k.replace('norm', 'norm3') + else: + new_v = v + new_k = k + + new_ckpt[new_k] = new_v + convert_mapping[k] = new_k + + return new_ckpt, convert_mapping + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in official pretrained RAM models to' + 'MMPretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + elif 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + + visual_ckpt = OrderedDict() + for key in state_dict: + if key.startswith('visual_encoder.'): + new_key = key.replace('visual_encoder.', '') + visual_ckpt[new_key] = state_dict[key] + + new_visual_ckpt, convert_mapping = convert_swin(visual_ckpt) + new_ckpt = deepcopy(state_dict) + for key in state_dict: + if key.startswith('visual_encoder.'): + if 'attn_mask' in key: + del new_ckpt[key] + continue + del new_ckpt[key] + old_key = key.replace('visual_encoder.', '') + new_ckpt[key.replace(old_key, + convert_mapping[old_key])] = deepcopy( + new_visual_ckpt[key.replace( + old_key, + convert_mapping[old_key]).replace( + 'visual_encoder.', '')]) + + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(new_ckpt, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/train.py b/tools/train.py index 84c1eec9..89c8548f 100644 --- a/tools/train.py +++ b/tools/train.py @@ -91,10 +91,6 @@ def merge_args(cfg, args): # enable automatic-mixed-precision training if args.amp is True: - optim_wrapper = cfg.optim_wrapper.get('type', 'OptimWrapper') - assert optim_wrapper in ['OptimWrapper', 'AmpOptimWrapper'], \ - '`--amp` is not supported custom optimizer wrapper type ' \ - f'`{optim_wrapper}.' cfg.optim_wrapper.type = 'AmpOptimWrapper' cfg.optim_wrapper.setdefault('loss_scale', 'dynamic') From 6e00cbecaac9bf42805c9eecc58aee3109b3a430 Mon Sep 17 00:00:00 2001 From: mzr1996 Date: Wed, 15 Nov 2023 17:34:13 +0800 Subject: [PATCH 17/20] Update mmcv requirements --- requirements/mminstall.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/mminstall.txt b/requirements/mminstall.txt index d23d0ac7..197701a1 100644 --- a/requirements/mminstall.txt +++ b/requirements/mminstall.txt @@ -1,2 +1,2 @@ -mmcv>=2.0.0,<2.1.0 +mmcv>=2.0.0,<2.3.0 mmengine>=0.8.3,<1.0.0 From e95d9acb8919c59f7f1fab24a7701bdc23d25337 Mon Sep 17 00:00:00 2001 From: mzr1996 Date: Wed, 15 Nov 2023 17:34:13 +0800 Subject: [PATCH 18/20] Update mmcv requirements --- docker/serve/Dockerfile | 2 +- docs/en/notes/faq.md | 2 +- docs/zh_CN/notes/faq.md | 2 +- mmpretrain/version.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/serve/Dockerfile b/docker/serve/Dockerfile index 86df2926..40ba0409 100644 --- a/docker/serve/Dockerfile +++ b/docker/serve/Dockerfile @@ -3,7 +3,7 @@ ARG CUDA="11.7" ARG CUDNN="8" FROM pytorch/torchserve:latest-gpu -ARG MMPRE="1.1.0" +ARG MMPRE="1.1.1" ENV PYTHONUNBUFFERED TRUE diff --git a/docs/en/notes/faq.md b/docs/en/notes/faq.md index dd059114..d83e5260 100644 --- a/docs/en/notes/faq.md +++ b/docs/en/notes/faq.md @@ -16,7 +16,7 @@ and make sure you fill in all required information in the template. | MMPretrain version | MMEngine version | MMCV version | | :----------------: | :---------------: | :--------------: | - | 1.1.0 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.1.1 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | | 1.0.0 | mmengine >= 0.8.0 | mmcv >= 2.0.0 | | 1.0.0rc8 | mmengine >= 0.7.1 | mmcv >= 2.0.0rc4 | | 1.0.0rc7 | mmengine >= 0.5.0 | mmcv >= 2.0.0rc4 | diff --git a/docs/zh_CN/notes/faq.md b/docs/zh_CN/notes/faq.md index 23ec5f50..6a5fdc46 100644 --- a/docs/zh_CN/notes/faq.md +++ b/docs/zh_CN/notes/faq.md @@ -13,7 +13,7 @@ | MMPretrain 版本 | MMEngine 版本 | MMCV 版本 | | :-------------: | :---------------: | :--------------: | - | 1.1.0 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.1.1 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | | 1.0.0 | mmengine >= 0.8.0 | mmcv >= 2.0.0 | | 1.0.0rc8 | mmengine >= 0.7.1 | mmcv >= 2.0.0rc4 | | 1.0.0rc7 | mmengine >= 0.5.0 | mmcv >= 2.0.0rc4 | diff --git a/mmpretrain/version.py b/mmpretrain/version.py index 32f800cd..8f8c8b7f 100644 --- a/mmpretrain/version.py +++ b/mmpretrain/version.py @@ -1,6 +1,6 @@ # Copyright (c) OpenMMLab. All rights reserved -__version__ = '1.1.0' +__version__ = '1.1.1' def parse_version_info(version_str): From 3022f9af7bca3feee88657f5bc0b91a917d25c02 Mon Sep 17 00:00:00 2001 From: Ma Zerun Date: Fri, 22 Dec 2023 16:28:20 +0800 Subject: [PATCH 19/20] [Feature] Support LLaVA 1.5 (#1853) * Support LLaVA 1.5 * Fix lint --- configs/llava/README.md | 30 +-- configs/llava/llava-7b-v1.5_caption.py | 76 +++++++ configs/llava/llava-7b-v1.5_vqa.py | 76 +++++++ configs/llava/llava-7b-v1_caption.py | 21 +- configs/llava/metafile.yml | 28 ++- mmpretrain/models/multimodal/llava/llava.py | 35 +-- mmpretrain/models/multimodal/llava/modules.py | 208 +++++++++--------- tools/model_converters/llava-delta2mmpre.py | 39 ++-- 8 files changed, 338 insertions(+), 175 deletions(-) create mode 100644 configs/llava/llava-7b-v1.5_caption.py create mode 100644 configs/llava/llava-7b-v1.5_vqa.py diff --git a/configs/llava/README.md b/configs/llava/README.md index 7aaf57d7..581abfe5 100644 --- a/configs/llava/README.md +++ b/configs/llava/README.md @@ -16,46 +16,28 @@ Instruction tuning large language models (LLMs) using machine-generated instruct -**Prepare the checkpoint** - -According to the license of LLaMA, we cannot provide the merged checkpoint directly. Please use the below -script to download and get the merged the checkpoint. - -```shell -python tools/model_converters/llava-delta2mmpre.py huggyllama/llama-7b liuhaotian/LLaVA-Lightning-7B-delta-v1-1 ./LLaVA-Lightning-7B-delta-v1-1.pth -``` - **Use the model** ```python import torch from mmpretrain import get_model, inference_model -model = get_model('llava-7b-v1_caption', pretrained='MERGED_CHECKPOINT_PATH', device='cuda') -out = inference_model(model, 'demo/cat-dog.png') +out = inference_model('llava-7b-v1_caption', 'demo/cat-dog.png', device='cuda') print(out) # {'pred_caption': 'In the image, there are two cats sitting on a blanket.'} ``` -**Test Command** - -Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). - -Test: - -```shell -python tools/test.py configs/llava/llava-7b-v1_caption.py MERGED_CHECKPOINT_PATH -``` - ## Models and results ### Image Caption on COCO -| Model | Params (M) | BLEU-4 | CIDER | Config | Download | -| :-------------------- | :--------: | :------: | :------: | :------------------------------: | :--------------------: | -| `llava-7b-v1_caption` | 7045.82 | Upcoming | Upcoming | [config](llava-7b-v1_caption.py) | See the above tutorial | +| Model | Params (M) | Config | Download | +| :---------------------- | :--------: | :--------------------------------: | :-------------------------------------------------------------------------------------------------------------: | +| `llava-7b-v1_caption` | 7045.82 | [config](llava-7b-v1_caption.py) | [ckpt](https://download.openmmlab.com/mmclassification/v1/llava/llava-7b-v1_liuhaotian_20231025-c9e119b6.pth) | +| `llava-7b-v1.5_caption` | 7062.90 | [config](llava-7b-v1.5_caption.py) | [ckpt](https://download.openmmlab.com/mmclassification/v1/llava/llava-7b-v1.5_liuhaotian_20231025-5828aa5a.pth) | +| `llava-7b-v1.5_vqa` | 7062.90 | [config](llava-7b-v1.5_vqa.py) | [ckpt](https://download.openmmlab.com/mmclassification/v1/llava/llava-7b-v1.5_liuhaotian_20231025-5828aa5a.pth) | ## Citation diff --git a/configs/llava/llava-7b-v1.5_caption.py b/configs/llava/llava-7b-v1.5_caption.py new file mode 100644 index 00000000..371c9b5f --- /dev/null +++ b/configs/llava/llava-7b-v1.5_caption.py @@ -0,0 +1,76 @@ +_base_ = '../_base_/default_runtime.py' + +meta_prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions." # noqa: E501 +image_size = 336 +prompt_tmpl = f'''{meta_prompt} User: +Describe the image in detail. ASSISTANT:''' + +# model settings +model = dict( + type='Llava', + tokenizer=dict( + type='AutoTokenizer', name_or_path='liuhaotian/llava-v1.5-7b'), + vision_encoder=dict( + type='VisionTransformer', + arch='l', + patch_size=14, + img_size=image_size, + pre_norm=True, + norm_cfg=dict(type='LN', eps=1e-5), + layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), + final_norm=False, + out_type='raw', + pretrained='https://download.openmmlab.com/mmclassification/v0/clip/' + 'vit-large-p14_clip-openai-pre_336px_20231025-fb1315ed.pth', + ), + mm_hidden_size=1024, + use_im_patch=False, + use_im_start_end=False, + mm_proj_depth=2, + lang_encoder=dict( + type='AutoModelForCausalLM', + name_or_path='huggyllama/llama-7b', + ), + task='caption', + prompt_tmpl=prompt_tmpl, + generation_cfg=dict(num_beams=3, max_new_tokens=50, length_penalty=-1.0), +) + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(image_size, image_size), + interpolation='bicubic', + backend='pillow'), + dict(type='PackInputs', meta_keys=['image_id']), +] + +test_dataloader = dict( + batch_size=8, + num_workers=5, + dataset=dict( + type='COCOCaption', + data_root='data/coco', + ann_file='annotations/coco_karpathy_val.json', + pipeline=test_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) + +test_evaluator = dict( + type='COCOCaption', + ann_file='data/coco/annotations/coco_karpathy_val_gt.json', +) + +# schedule settings +test_cfg = dict() diff --git a/configs/llava/llava-7b-v1.5_vqa.py b/configs/llava/llava-7b-v1.5_vqa.py new file mode 100644 index 00000000..5cb9812c --- /dev/null +++ b/configs/llava/llava-7b-v1.5_vqa.py @@ -0,0 +1,76 @@ +_base_ = '../_base_/default_runtime.py' + +meta_prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions." # noqa: E501 +image_size = 336 +prompt_tmpl = f'''{meta_prompt} User: +{{question}} ASSISTANT:''' + +# model settings +model = dict( + type='Llava', + tokenizer=dict( + type='AutoTokenizer', name_or_path='liuhaotian/llava-v1.5-7b'), + vision_encoder=dict( + type='VisionTransformer', + arch='l', + patch_size=14, + img_size=image_size, + pre_norm=True, + norm_cfg=dict(type='LN', eps=1e-5), + layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), + final_norm=False, + out_type='raw', + pretrained='https://download.openmmlab.com/mmclassification/v0/clip/' + 'vit-large-p14_clip-openai-pre_336px_20231025-fb1315ed.pth', + ), + mm_hidden_size=1024, + use_im_patch=False, + use_im_start_end=False, + mm_proj_depth=2, + lang_encoder=dict( + type='AutoModelForCausalLM', + name_or_path='huggyllama/llama-7b', + ), + task='vqa', + prompt_tmpl=prompt_tmpl, + generation_cfg=dict(max_new_tokens=100), +) + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(image_size, image_size), + interpolation='bicubic', + backend='pillow'), + dict(type='PackInputs', meta_keys=['image_id', 'question']), +] + +test_dataloader = dict( + batch_size=8, + num_workers=5, + dataset=dict( + type='COCOCaption', + data_root='data/coco', + ann_file='annotations/coco_karpathy_val.json', + pipeline=test_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) + +test_evaluator = dict( + type='COCOCaption', + ann_file='data/coco/annotations/coco_karpathy_val_gt.json', +) + +# schedule settings +test_cfg = dict() diff --git a/configs/llava/llava-7b-v1_caption.py b/configs/llava/llava-7b-v1_caption.py index f7558bed..92e2d1fb 100644 --- a/configs/llava/llava-7b-v1_caption.py +++ b/configs/llava/llava-7b-v1_caption.py @@ -1,16 +1,9 @@ _base_ = '../_base_/default_runtime.py' meta_prompt = 'You are LLaVA, a large language and vision assistant trained by UW Madison WAIV Lab.You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.Follow the instructions carefully and explain your answers in detail.' # noqa: E501 -im_patch_token = '' -patch_size = 14 image_size = 224 -num_patches = (image_size // patch_size)**2 -caption_prompt = ' '.join([ - meta_prompt, - 'User: a photo of\n', - im_patch_token * num_patches, - 'ASSISTANT:', -]) +prompt_tmpl = f'''{meta_prompt} User: +Describe the image in detail. ASSISTANT:''' # model settings model = dict( @@ -22,6 +15,7 @@ model = dict( type='VisionTransformer', arch='l', patch_size=14, + img_size=image_size, pre_norm=True, norm_cfg=dict(type='LN', eps=1e-5), layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), @@ -32,15 +26,16 @@ model = dict( 'vit-large-p14_clip-openai-pre_3rdparty_20230517-95e2af0b.pth'), ), mm_hidden_size=1024, - use_im_start_end=False, - use_mm_proj=True, + use_im_patch=False, + use_im_start_end=True, + mm_proj_depth=1, lang_encoder=dict( type='AutoModelForCausalLM', name_or_path='huggyllama/llama-7b', ), task='caption', - prompt_tmpl=caption_prompt, - generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0), + prompt_tmpl=prompt_tmpl, + generation_cfg=dict(max_new_tokens=50), ) # data settings diff --git a/configs/llava/metafile.yml b/configs/llava/metafile.yml index 2b3cfc4d..406a214c 100644 --- a/configs/llava/metafile.yml +++ b/configs/llava/metafile.yml @@ -21,5 +21,31 @@ Models: Metrics: BLEU-4: null CIDER: null - Weights: null + Weights: https://download.openmmlab.com/mmclassification/v1/llava/llava-7b-v1_liuhaotian_20231025-c9e119b6.pth Config: configs/llava/llava-7b-v1_caption.py + - Name: llava-7b-v1.5_caption + Metadata: + FLOPs: null + Parameters: 7062900736 + In Collection: LLaVA + Results: + - Task: Image Caption + Dataset: COCO + Metrics: + BLEU-4: null + CIDER: null + Weights: https://download.openmmlab.com/mmclassification/v1/llava/llava-7b-v1.5_liuhaotian_20231025-5828aa5a.pth + Config: configs/llava/llava-7b-v1.5_caption.py + - Name: llava-7b-v1.5_vqa + Metadata: + FLOPs: null + Parameters: 7062900736 + In Collection: LLaVA + Results: + - Task: Visual Question Answering + Dataset: COCO + Metrics: + BLEU-4: null + CIDER: null + Weights: https://download.openmmlab.com/mmclassification/v1/llava/llava-7b-v1.5_liuhaotian_20231025-5828aa5a.pth + Config: configs/llava/llava-7b-v1.5_vqa.py diff --git a/mmpretrain/models/multimodal/llava/llava.py b/mmpretrain/models/multimodal/llava/llava.py index 103d8129..f829b092 100644 --- a/mmpretrain/models/multimodal/llava/llava.py +++ b/mmpretrain/models/multimodal/llava/llava.py @@ -24,8 +24,8 @@ class Llava(BaseModel): use_im_start_end (bool): Whether to use the im_start and im_end tokens mm_vision_select_layer (int): The index from vision encoder output. Defaults to -1. - use_mm_proj (bool): Whether to enable multi-modal projection. - Defaults to True. + mm_proj_depth (int): The number of linear layers for multi-modal + projection. Defaults to 1. load_lang_pretrained (bool): Whether to load the pretrained model of language encoder. Defaults to False. generation_cfg (dict): The extra generation config, accept the keyword @@ -51,9 +51,10 @@ class Llava(BaseModel): mm_hidden_size: int, prompt_tmpl: str, task: str = 'caption', + use_im_patch: bool = True, use_im_start_end: bool = False, mm_vision_select_layer: int = -1, - use_mm_proj: bool = True, + mm_proj_depth: int = 1, generation_cfg: dict = dict(), load_lang_pretrained: bool = False, data_preprocessor: Optional[dict] = None, @@ -75,7 +76,9 @@ class Llava(BaseModel): # init tokenizer self.tokenizer = TOKENIZER.build(tokenizer) # add Llava special tokens to the tokenizer - self.tokenizer.add_tokens([self.im_patch_token], special_tokens=True) + if use_im_patch: + self.tokenizer.add_tokens([self.im_patch_token], + special_tokens=True) if use_im_start_end: self.tokenizer.add_tokens([self.im_start_token, self.im_end_token], special_tokens=True) @@ -108,14 +111,12 @@ class Llava(BaseModel): vision_encoder=vision_encoder, lang_encoder=lang_encoder, mm_hidden_size=mm_hidden_size, - use_mm_proj=use_mm_proj, + mm_proj_depth=mm_proj_depth, use_im_start_end=use_im_start_end, im_start_token=self.tokenizer.convert_tokens_to_ids( self.im_start_token), im_end_token=self.tokenizer.convert_tokens_to_ids( self.im_end_token), - im_patch_token=self.tokenizer.convert_tokens_to_ids( - self.im_patch_token), mm_vision_select_layer=mm_vision_select_layer) self.generation_cfg = generation_cfg @@ -207,16 +208,24 @@ class Llava(BaseModel): Returns: List[DataSample]: Return list of data samples. """ - prompts = [] + tokens = [] for sample in data_samples: - final_prompt = self.prompt_tmpl.format(**sample.to_dict()) - prompts.append(final_prompt) + prompt = self.prompt_tmpl.format(**sample.to_dict()) + input_ids = [] + while '' in prompt: + prefix, _, prompt = prompt.partition('') + input_ids.extend( + self.tokenizer(prefix, add_special_tokens=False).input_ids) + input_ids.append(-200) + if prompt: + input_ids.extend( + self.tokenizer(prompt, add_special_tokens=False).input_ids) + tokens.append(dict(input_ids=input_ids)) self.tokenizer.padding_side = 'left' - input_text = self.tokenizer( - prompts, + input_text = self.tokenizer.pad( + tokens, padding='longest', - truncation=True, return_tensors='pt', max_length=2000, ).to(device) diff --git a/mmpretrain/models/multimodal/llava/modules.py b/mmpretrain/models/multimodal/llava/modules.py index afa6eefa..fa3c6bbb 100644 --- a/mmpretrain/models/multimodal/llava/modules.py +++ b/mmpretrain/models/multimodal/llava/modules.py @@ -31,10 +31,10 @@ class LlavaLlamaForCausalLM(PreTrainedModel): lang_encoder, mm_hidden_size, use_im_start_end=True, - use_mm_proj=True, + mm_proj_depth=1, im_start_token: Optional[int] = None, im_end_token: Optional[int] = None, - im_patch_token: Optional[int] = None, + im_token_index: int = -200, mm_vision_select_layer: int = -1): super().__init__(lang_encoder.config) self.vision_tower = vision_encoder @@ -43,16 +43,26 @@ class LlavaLlamaForCausalLM(PreTrainedModel): self.use_im_start_end = use_im_start_end self.im_start_token = im_start_token self.im_end_token = im_end_token - self.im_patch_token = im_patch_token self.mm_hidden_size = mm_hidden_size self.mm_vision_select_layer = mm_vision_select_layer + self.im_token_index = im_token_index self.lang_hidden_size = lang_encoder.config.hidden_size - if use_mm_proj and not hasattr(lang_encoder.model, 'mm_projector'): + if mm_proj_depth == 1: + # Llava V1 mm_projector = nn.Linear(self.mm_hidden_size, self.lang_hidden_size) self.lang_encoder.model.add_module('mm_projector', mm_projector) - elif not use_mm_proj: + elif mm_proj_depth > 1: + # Llava V1.5 + modules = [nn.Linear(self.mm_hidden_size, self.lang_hidden_size)] + for _ in range(1, mm_proj_depth): + modules.append(nn.GELU()) + modules.append( + nn.Linear(self.lang_hidden_size, self.lang_hidden_size)) + mm_projector = nn.Sequential(*modules) + self.lang_encoder.model.add_module('mm_projector', mm_projector) + elif mm_proj_depth == 0: self.lang_encoder.model.add_module('mm_projector', nn.Identity()) self.post_init() @@ -80,16 +90,12 @@ class LlavaLlamaForCausalLM(PreTrainedModel): return_dict if return_dict is not None else self.config.use_return_dict) - # decoder outputs consists of - # (dec_features, layer_state, dec_hidden, dec_attn) - if inputs_embeds is None: - inputs_embeds = self.lang_encoder.model.embed_tokens(input_ids) - - inputs_embeds = self.forward_vision_tower(input_ids, inputs_embeds, - images) + (input_ids, attention_mask, past_key_values, inputs_embeds, + labels) = self.forward_vision_tower(input_ids, attention_mask, + past_key_values, labels, images) return self.lang_encoder( - input_ids=None, + input_ids=input_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, @@ -127,106 +133,93 @@ class LlavaLlamaForCausalLM(PreTrainedModel): def forward_vision_tower( self, input_ids: torch.LongTensor, - inputs_embeds: torch.FloatTensor, - images: Union[torch.FloatTensor, list, None] = None, + attention_mask: torch.LongTensor, + past_key_values: torch.FloatTensor, + labels: torch.LongTensor, + images: Union[torch.FloatTensor, None] = None, ): - if self.use_im_start_end: - assert self.im_start_token is not None - assert self.im_end_token is not None - if images is not None: - assert self.im_patch_token is not None - - if self.vision_tower is None or images is None or ( - input_ids.shape[1] == 1 and not self.training): - return inputs_embeds + if self.vision_tower is None or images is None or input_ids.shape[ + 1] == 1: + if (past_key_values is not None and self.vision_tower is not None + and images is not None and input_ids.shape[1] == 1): + attention_mask = torch.ones( + (attention_mask.shape[0], + past_key_values[-1][-1].shape[-2] + 1), + dtype=attention_mask.dtype, + device=attention_mask.device) + return input_ids, attention_mask, past_key_values, None, labels with torch.no_grad(): - if isinstance(images, (list, tuple)): - # variable length images - image_features = [] - for image in images: - feats = self.vision_tower(image.unsqueeze(0)) - image_feature = feats[self.mm_vision_select_layer][:, 1:] - image_features.append(image_feature) - else: - feats = self.vision_tower(images) - image_features = feats[self.mm_vision_select_layer][:, 1:] + # TODO: support variable number of images (single now) + feats = self.vision_tower(images) + image_features = feats[-1][:, 1:] - mm_projector = self.lang_encoder.model.mm_projector - if isinstance(images, (list, tuple)): - image_features = [ - mm_projector(image_feature)[0] - for image_feature in image_features - ] - else: - image_features = mm_projector(image_features) - - dummy_image_features = torch.zeros( - 256, 1024, device=inputs_embeds.device, dtype=inputs_embeds.dtype) - dummy_image_features = mm_projector(dummy_image_features) + image_features = self.lang_encoder.model.mm_projector(image_features) new_input_embeds = [] - cur_image_idx = 0 - for cur_input_ids, cur_input_embeds in zip(input_ids, inputs_embeds): - if (cur_input_ids != self.im_patch_token).all(): - # multimodal LLM, but the current sample is not multimodal - cur_input_embeds = cur_input_embeds + ( - 0. * dummy_image_features).sum() - new_input_embeds.append(cur_input_embeds) - cur_image_idx += 1 - continue - if self.use_im_start_end: - cur_image_features = image_features[cur_image_idx] - num_patches = cur_image_features.shape[0] - if (cur_input_ids == self.im_start_token).sum() != ( - cur_input_ids == self.im_end_token).sum(): - raise ValueError('The number of image start tokens and ' - 'image end tokens should be the same.') - image_start_tokens = torch.where( - cur_input_ids == self.im_start_token)[0] - for image_start_token_pos in image_start_tokens: - cur_image_features = image_features[cur_image_idx].to( - device=cur_input_embeds.device) - num_patches = cur_image_features.shape[0] - if cur_input_ids[image_start_token_pos + num_patches + - 1] != self.im_end_token: - raise ValueError('The image end token should follow ' - 'the image start token.') - cur_new_input_embeds = torch.cat( - (cur_input_embeds[:image_start_token_pos + 1], - cur_image_features, - cur_input_embeds[image_start_token_pos + num_patches + - 1:]), - dim=0) - cur_image_idx += 1 - new_input_embeds.append(cur_new_input_embeds) - else: - cur_image_features = image_features[cur_image_idx] - num_patches = cur_image_features.shape[0] - if (cur_input_ids == self.im_patch_token).sum() != num_patches: - print(f'Debug: num_patches: {num_patches}') - raise ValueError( - 'The number of image patch tokens should ' - 'be the same as the number of image patches.') - masked_indices = torch.where( - cur_input_ids == self.im_patch_token)[0] - mask_index_start = masked_indices[0] - if (masked_indices != torch.arange( - mask_index_start, - mask_index_start + num_patches, - device=masked_indices.device, - dtype=masked_indices.dtype)).any(): - raise ValueError( - 'The image patch tokens should be consecutive.') - cur_new_input_embeds = torch.cat( - (cur_input_embeds[:mask_index_start], cur_image_features, - cur_input_embeds[mask_index_start + num_patches:]), - dim=0) - new_input_embeds.append(cur_new_input_embeds) - cur_image_idx += 1 - inputs_embeds = torch.stack(new_input_embeds, dim=0) + new_labels = [] if labels is not None else None + new_attn_mask = [] if attention_mask is not None else None + for batch_idx, cur_input_ids in enumerate(input_ids): + cur_img = image_features[batch_idx] - return inputs_embeds + if (cur_input_ids != self.im_token_index).all(): + # multimodal LLM, but the current sample is not multimodal + new_input_embeds.append(self.embed_tokens(cur_input_ids)) + if labels is not None: + new_labels.append(labels[batch_idx]) + if attention_mask is not None: + new_attn_mask.append(attention_mask[batch_idx]) + continue + + img_idx = torch.where(cur_input_ids == self.im_token_index)[0][0] + if self.use_im_start_end: + cur_new_input_embeds = torch.cat( + [ + self.embed_tokens(cur_input_ids[:img_idx - 1]), + self.embed_tokens(cur_input_ids[img_idx - 1:img_idx]), + cur_img, + self.embed_tokens( + cur_input_ids[img_idx + 1:img_idx + 2]), + self.embed_tokens(cur_input_ids[img_idx + 2:]), + ], + dim=0, + ) + else: + cur_new_input_embeds = torch.cat( + [ + self.embed_tokens(cur_input_ids[:img_idx]), + cur_img, + self.embed_tokens(cur_input_ids[img_idx + 1:]), + ], + dim=0, + ) + new_input_embeds.append(cur_new_input_embeds) + + if labels is not None: + cur_new_labels = torch.cat([ + labels[batch_idx, :img_idx], + labels.new_full((cur_img.size(0), ), -100), + labels[batch_idx, img_idx + 1:], + ], + dim=0) + new_labels.append(cur_new_labels) + + if attention_mask is not None: + cur_attn_mask = torch.cat([ + attention_mask[batch_idx, :img_idx], + attention_mask.new_full((cur_img.size(0), ), True), + attention_mask[batch_idx, img_idx + 1:], + ], + dim=0) + new_attn_mask.append(cur_attn_mask) + + inputs_embeds = torch.stack(new_input_embeds, dim=0) + if labels is not None: + labels = torch.stack(new_labels, dim=0) + if attention_mask is not None: + attention_mask = torch.stack(new_attn_mask, dim=0) + + return None, attention_mask, past_key_values, inputs_embeds, labels @staticmethod def _reorder_cache(past_key_values, beam_idx): @@ -236,3 +229,6 @@ class LlavaLlamaForCausalLM(PreTrainedModel): past_state.index_select(0, beam_idx) for past_state in layer_past), ) return reordered_past + + def embed_tokens(self, input_ids): + return self.lang_encoder.model.embed_tokens(input_ids) diff --git a/tools/model_converters/llava-delta2mmpre.py b/tools/model_converters/llava-delta2mmpre.py index bc51b19d..104ed07d 100644 --- a/tools/model_converters/llava-delta2mmpre.py +++ b/tools/model_converters/llava-delta2mmpre.py @@ -9,23 +9,21 @@ from huggingface_hub import snapshot_download from transformers.modeling_utils import load_state_dict prog_description = """\ -Merge Llava delta weights and original weights, -and save as MMPreTrain checkpoint. +Convert Llava weights and original weights. """ def parse_args(): parser = argparse.ArgumentParser(description=prog_description) - parser.add_argument( - 'src_path', type=str, help='The original checkpoint dir') - parser.add_argument( - 'delta_path', type=str, help='The delta checkpoint dir') - parser.add_argument('dst_path', type=str, help='The saved checkpoint path') + parser.add_argument('src', type=str, help='The original checkpoint dir') + parser.add_argument('dst', type=str, help='The saved checkpoint path') + parser.add_argument('--delta', type=str, help='The delta checkpoint dir') args = parser.parse_args() return args def load_checkpoint(path: Path): + path = Path(path) if path.is_file(): return torch.load(path) @@ -41,19 +39,23 @@ def load_checkpoint(path: Path): def main(): args = parse_args() - if Path(args.src_path).exists(): - src_path = Path(args.src_path) + if Path(args.src).exists(): + src_path = args.src else: - src_path = Path(snapshot_download(args.src_path)) + src_path = snapshot_download( + args.src, allow_patterns='pytorch_model*.bin') src_state_dict = load_checkpoint(src_path) - if Path(args.delta_path).exists(): - delta_path = Path(args.delta_path) + if args.delta is None: + delta_state_dict = {} + elif Path(args.delta).exists(): + delta_state_dict = load_checkpoint(args.delta) else: - delta_path = Path(snapshot_download(args.delta_path)) - delta_state_dict = load_checkpoint(delta_path) + delta_path = snapshot_download( + args.delta, allow_patterns='pytorch_model*.bin') + delta_state_dict = load_checkpoint(delta_path) - merged_state_dict = OrderedDict() + new_state_dict = OrderedDict() for k, v in src_state_dict.items(): if k in delta_state_dict: delta_v = delta_state_dict.pop(k) @@ -63,12 +65,13 @@ def main(): v = delta_v else: v += delta_v - merged_state_dict['model.lang_encoder.' + k] = v + if 'rotary_emb.inv_freq' not in k: + new_state_dict['model.lang_encoder.' + k] = v for k, v in delta_state_dict.items(): - merged_state_dict['model.lang_encoder.' + k] = v + new_state_dict['model.lang_encoder.' + k] = v - torch.save(merged_state_dict, args.dst_path) + torch.save(new_state_dict, args.dst) print('Done!!') From 17a886cb5825cd8c26df4e65f7112d404b99fe12 Mon Sep 17 00:00:00 2001 From: Ma Zerun Date: Thu, 4 Jan 2024 20:43:27 +0800 Subject: [PATCH 20/20] Bump version to v1.2.0 (#1860) * [Fix] Fix resize mix argument bug. * Bump version to v1.2.0 * Fix UT --- README.md | 5 +++++ README_zh-CN.md | 5 +++++ docker/serve/Dockerfile | 2 +- docs/en/notes/changelog.md | 11 +++++++++++ docs/en/notes/faq.md | 3 ++- docs/zh_CN/notes/faq.md | 3 ++- mmpretrain/__init__.py | 2 +- mmpretrain/models/utils/batch_augments/resizemix.py | 2 +- mmpretrain/version.py | 2 +- requirements/mminstall.txt | 2 +- requirements/optional.txt | 2 +- tests/test_models/test_backbones/test_repmlp.py | 3 ++- 12 files changed, 33 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 78d56fc1..5318df5b 100644 --- a/README.md +++ b/README.md @@ -86,6 +86,11 @@ https://github.com/open-mmlab/mmpretrain/assets/26739999/e4dcd3a2-f895-4d1b-a351 ## What's new +🌟 v1.2.0 was released in 04/01/2023 + +- Support LLaVA 1.5. +- Implement of RAM with a gradio interface. + 🌟 v1.1.0 was released in 12/10/2023 - Support Mini-GPT4 training and provide a Chinese model (based on Baichuan-7B) diff --git a/README_zh-CN.md b/README_zh-CN.md index 06daeb1c..9ee8dffc 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -84,6 +84,11 @@ https://github.com/open-mmlab/mmpretrain/assets/26739999/e4dcd3a2-f895-4d1b-a351 ## 更新日志 +🌟 2024/01/04 发布了 v1.2.0 版本 + +- 支持了 LLaVA 1.5 +- 实现了一个 RAM 模型的 gradio 推理例程 + 🌟 2023/10/12 发布了 v1.1.0 版本 - 支持 Mini-GPT4 训练并提供一个基于 Baichuan-7B 的中文模型 diff --git a/docker/serve/Dockerfile b/docker/serve/Dockerfile index 40ba0409..c50c4e8e 100644 --- a/docker/serve/Dockerfile +++ b/docker/serve/Dockerfile @@ -3,7 +3,7 @@ ARG CUDA="11.7" ARG CUDNN="8" FROM pytorch/torchserve:latest-gpu -ARG MMPRE="1.1.1" +ARG MMPRE="1.2.0" ENV PYTHONUNBUFFERED TRUE diff --git a/docs/en/notes/changelog.md b/docs/en/notes/changelog.md index 7a8ab680..499ed24f 100644 --- a/docs/en/notes/changelog.md +++ b/docs/en/notes/changelog.md @@ -1,5 +1,16 @@ # Changelog (MMPreTrain) +## v1.2.0(04/01/2024) + +### New Features + +- [Feature] Support LLaVA 1.5 ([#1853](https://github.com/open-mmlab/mmpretrain/pull/1853)) +- [Feature] Implement of RAM with a gradio interface. ([#1802](https://github.com/open-mmlab/mmpretrain/pull/1802)) + +### Bug Fix + +- [Fix] Fix resize mix argument bug. + ## v1.1.0(12/10/2023) ### New Features diff --git a/docs/en/notes/faq.md b/docs/en/notes/faq.md index d83e5260..da45841b 100644 --- a/docs/en/notes/faq.md +++ b/docs/en/notes/faq.md @@ -16,7 +16,8 @@ and make sure you fill in all required information in the template. | MMPretrain version | MMEngine version | MMCV version | | :----------------: | :---------------: | :--------------: | - | 1.1.1 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.2.0 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.1.1 | mmengine >= 0.8.3 | mmcv >= 2.0.0 | | 1.0.0 | mmengine >= 0.8.0 | mmcv >= 2.0.0 | | 1.0.0rc8 | mmengine >= 0.7.1 | mmcv >= 2.0.0rc4 | | 1.0.0rc7 | mmengine >= 0.5.0 | mmcv >= 2.0.0rc4 | diff --git a/docs/zh_CN/notes/faq.md b/docs/zh_CN/notes/faq.md index 6a5fdc46..9e94cd8b 100644 --- a/docs/zh_CN/notes/faq.md +++ b/docs/zh_CN/notes/faq.md @@ -13,7 +13,8 @@ | MMPretrain 版本 | MMEngine 版本 | MMCV 版本 | | :-------------: | :---------------: | :--------------: | - | 1.1.1 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.2.0 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.1.1 | mmengine >= 0.8.3 | mmcv >= 2.0.0 | | 1.0.0 | mmengine >= 0.8.0 | mmcv >= 2.0.0 | | 1.0.0rc8 | mmengine >= 0.7.1 | mmcv >= 2.0.0rc4 | | 1.0.0rc7 | mmengine >= 0.5.0 | mmcv >= 2.0.0rc4 | diff --git a/mmpretrain/__init__.py b/mmpretrain/__init__.py index 69c585bd..66866a86 100644 --- a/mmpretrain/__init__.py +++ b/mmpretrain/__init__.py @@ -7,7 +7,7 @@ from .apis import * # noqa: F401, F403 from .version import __version__ mmcv_minimum_version = '2.0.0' -mmcv_maximum_version = '2.2.0' +mmcv_maximum_version = '2.4.0' mmcv_version = digit_version(mmcv.__version__) mmengine_minimum_version = '0.8.3' diff --git a/mmpretrain/models/utils/batch_augments/resizemix.py b/mmpretrain/models/utils/batch_augments/resizemix.py index 89cfb720..c70f81b3 100644 --- a/mmpretrain/models/utils/batch_augments/resizemix.py +++ b/mmpretrain/models/utils/batch_augments/resizemix.py @@ -87,7 +87,7 @@ class ResizeMix(CutMix): (y1, y2, x1, x2), lam = self.cutmix_bbox_and_lam(img_shape, lam) batch_inputs[:, :, y1:y2, x1:x2] = F.interpolate( batch_inputs[index], - size=(y2 - y1, x2 - x1), + size=(int(y2 - y1), int(x2 - x1)), mode=self.interpolation, align_corners=False) mixed_scores = lam * batch_scores + (1 - lam) * batch_scores[index, :] diff --git a/mmpretrain/version.py b/mmpretrain/version.py index 8f8c8b7f..1822b7f2 100644 --- a/mmpretrain/version.py +++ b/mmpretrain/version.py @@ -1,6 +1,6 @@ # Copyright (c) OpenMMLab. All rights reserved -__version__ = '1.1.1' +__version__ = '1.2.0' def parse_version_info(version_str): diff --git a/requirements/mminstall.txt b/requirements/mminstall.txt index 197701a1..9b736b02 100644 --- a/requirements/mminstall.txt +++ b/requirements/mminstall.txt @@ -1,2 +1,2 @@ -mmcv>=2.0.0,<2.3.0 +mmcv>=2.0.0,<2.4.0 mmengine>=0.8.3,<1.0.0 diff --git a/requirements/optional.txt b/requirements/optional.txt index 85853cda..5f31808f 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -1,4 +1,4 @@ albumentations>=0.3.2 --no-binary qudida,albumentations # For Albumentations data transform -grad-cam >= 1.3.7 # For CAM visualization +grad-cam >= 1.3.7,<1.5.0 # For CAM visualization requests # For torchserve scikit-learn # For t-SNE visualization and unit tests. diff --git a/tests/test_models/test_backbones/test_repmlp.py b/tests/test_models/test_backbones/test_repmlp.py index bfcb5dfc..f03fce4e 100644 --- a/tests/test_models/test_backbones/test_repmlp.py +++ b/tests/test_models/test_backbones/test_repmlp.py @@ -169,4 +169,5 @@ class TestRepMLP(TestCase): assert len(feats_) == len(feats__) for i in range(len(feats)): - self.assertTrue(torch.allclose(feats__[i], feats_[i])) + self.assertTrue( + torch.allclose(feats__[i], feats_[i], rtol=0.01, atol=0.01))