From 8f9ff747366c2d863c673c836e7bcf4849ac5d29 Mon Sep 17 00:00:00 2001 From: fangyixiao18 Date: Wed, 15 Dec 2021 19:07:01 +0800 Subject: [PATCH] [Refactor] add UT codes --- tests/data/color.jpg | Bin 0 -> 35851 bytes tests/data/data_list.txt | 2 + tests/data/gray.jpg | Bin 0 -> 39088 bytes .../test_data_sources/test_common.py | 46 ++++ .../test_data_sources/test_image_list.py | 19 ++ .../test_data_sources/test_imagenet.py | 18 ++ .../test_datasets/test_dataset_wrapper.py | 41 +++ .../test_datasets/test_deepcluster_dataset.py | 45 ++++ .../test_datasets/test_multiview_dataset.py | 49 ++++ .../test_relative_loc_dataset.py | 41 +++ .../test_rotation_pred_dataset.py | 35 +++ .../test_datasets/test_singleview_dataset.py | 41 +++ tests/test_data/test_pipelines.py | 99 +++++++ tests/test_data/test_utils.py | 36 +++ tests/test_metrics/test_accuracy.py | 15 ++ .../test_models/test_algorithms/test_byol.py | 53 ++++ .../test_algorithms/test_classification.py | 28 ++ .../test_algorithms/test_deepcluster.py | 39 +++ .../test_algorithms/test_densecl.py | 47 ++++ .../test_models/test_algorithms/test_moco.py | 44 ++++ .../test_models/test_algorithms/test_npid.py | 32 +++ tests/test_models/test_algorithms/test_odc.py | 47 ++++ .../test_algorithms/test_relative_loc.py | 51 ++++ .../test_algorithms/test_rotation_pred.py | 43 +++ .../test_algorithms/test_simclr.py | 36 +++ .../test_algorithms/test_simsiam.py | 50 ++++ .../test_models/test_algorithms/test_swav.py | 52 ++++ .../test_models/test_backbones/test_resnet.py | 245 ++++++++++++++++++ .../test_backbones/test_resnext.py | 61 +++++ tests/test_models/test_heads.py | 75 ++++++ .../test_necks/test_avgpool_neck.py | 23 ++ .../test_necks/test_densecl_neck.py | 32 +++ .../test_necks/test_linear_neck.py | 23 ++ .../test_necks/test_mocov2_neck.py | 24 ++ .../test_necks/test_nonlinear_neck.py | 26 ++ tests/test_models/test_necks/test_odc_neck.py | 24 ++ .../test_necks/test_relative_loc_neck.py | 22 ++ .../test_models/test_necks/test_swav_neck.py | 16 ++ .../test_utils/test_multi_pooling.py | 37 +++ .../test_utils/test_multi_prototypes.py | 23 ++ tests/test_models/test_utils/test_sobel.py | 14 + tests/test_runtime/test_extract_process.py | 61 +++++ tests/test_runtime/test_extractor.py | 63 +++++ .../test_runtime/test_hooks/test_byol_hook.py | 79 ++++++ .../test_hooks/test_deepcluster_hook.py | 79 ++++++ .../test_hooks/test_densecl_hook.py | 70 +++++ .../test_hooks/test_optimizer_hook.py | 125 +++++++++ .../test_hooks/test_simsiam_hook.py | 77 ++++++ .../test_runtime/test_hooks/test_swav_hook.py | 76 ++++++ tests/test_utils/test_alias_multinomial.py | 21 ++ tests/test_utils/test_clustering.py | 28 ++ tests/test_utils/test_misc.py | 14 + tests/test_utils/test_test_helper.py | 44 ++++ tests/test_utils/test_version_utils.py | 21 ++ 54 files changed, 2412 insertions(+) create mode 100644 tests/data/color.jpg create mode 100644 tests/data/data_list.txt create mode 100644 tests/data/gray.jpg create mode 100644 tests/test_data/test_data_sources/test_common.py create mode 100644 tests/test_data/test_data_sources/test_image_list.py create mode 100644 tests/test_data/test_data_sources/test_imagenet.py create mode 100644 tests/test_data/test_datasets/test_dataset_wrapper.py create mode 100644 tests/test_data/test_datasets/test_deepcluster_dataset.py create mode 100644 tests/test_data/test_datasets/test_multiview_dataset.py create mode 100644 tests/test_data/test_datasets/test_relative_loc_dataset.py create mode 100644 tests/test_data/test_datasets/test_rotation_pred_dataset.py create mode 100644 tests/test_data/test_datasets/test_singleview_dataset.py create mode 100644 tests/test_data/test_pipelines.py create mode 100644 tests/test_data/test_utils.py create mode 100644 tests/test_metrics/test_accuracy.py create mode 100644 tests/test_models/test_algorithms/test_byol.py create mode 100644 tests/test_models/test_algorithms/test_classification.py create mode 100644 tests/test_models/test_algorithms/test_deepcluster.py create mode 100644 tests/test_models/test_algorithms/test_densecl.py create mode 100644 tests/test_models/test_algorithms/test_moco.py create mode 100644 tests/test_models/test_algorithms/test_npid.py create mode 100644 tests/test_models/test_algorithms/test_odc.py create mode 100644 tests/test_models/test_algorithms/test_relative_loc.py create mode 100644 tests/test_models/test_algorithms/test_rotation_pred.py create mode 100644 tests/test_models/test_algorithms/test_simclr.py create mode 100644 tests/test_models/test_algorithms/test_simsiam.py create mode 100644 tests/test_models/test_algorithms/test_swav.py create mode 100644 tests/test_models/test_backbones/test_resnet.py create mode 100644 tests/test_models/test_backbones/test_resnext.py create mode 100644 tests/test_models/test_heads.py create mode 100644 tests/test_models/test_necks/test_avgpool_neck.py create mode 100644 tests/test_models/test_necks/test_densecl_neck.py create mode 100644 tests/test_models/test_necks/test_linear_neck.py create mode 100644 tests/test_models/test_necks/test_mocov2_neck.py create mode 100644 tests/test_models/test_necks/test_nonlinear_neck.py create mode 100644 tests/test_models/test_necks/test_odc_neck.py create mode 100644 tests/test_models/test_necks/test_relative_loc_neck.py create mode 100644 tests/test_models/test_necks/test_swav_neck.py create mode 100644 tests/test_models/test_utils/test_multi_pooling.py create mode 100644 tests/test_models/test_utils/test_multi_prototypes.py create mode 100644 tests/test_models/test_utils/test_sobel.py create mode 100644 tests/test_runtime/test_extract_process.py create mode 100644 tests/test_runtime/test_extractor.py create mode 100644 tests/test_runtime/test_hooks/test_byol_hook.py create mode 100644 tests/test_runtime/test_hooks/test_deepcluster_hook.py create mode 100644 tests/test_runtime/test_hooks/test_densecl_hook.py create mode 100644 tests/test_runtime/test_hooks/test_optimizer_hook.py create mode 100644 tests/test_runtime/test_hooks/test_simsiam_hook.py create mode 100644 tests/test_runtime/test_hooks/test_swav_hook.py create mode 100644 tests/test_utils/test_alias_multinomial.py create mode 100644 tests/test_utils/test_clustering.py create mode 100644 tests/test_utils/test_misc.py create mode 100644 tests/test_utils/test_test_helper.py create mode 100644 tests/test_utils/test_version_utils.py diff --git a/tests/data/color.jpg b/tests/data/color.jpg new file mode 100644 index 0000000000000000000000000000000000000000..05d62b850a70ea53948b6a687f2c14967f882cf2 GIT binary patch literal 35851 zcma%hbyyT%6!z@WNG{#A#L`_N9m~=SA|>730uoDiEgjM+AQB1)N_UrnNQbC2h#=v| z@2lte{`=0%+_^E&-1E%bd(V5{bN(&;+W<%qs_Lo$2m}D2hXeSx0z?5AU@-pw5#0Zi z!T+DR|DE9eSNiY7{V(^BJP7V1LVSF}|0+Njn3&jD*!bAk_{4=VVdG%n;ypC0kpLht1{M}JCMFif zgJ3-rNdXKNOe~0y0ye9jl@|_KczWrHa8vIrt|D@ioXy(%6fdGo1l>%bZv$02qonfb zn|pW>9S8tp{J;HR{I8B48bu!z`cM9=z{Ge^@ZrOONikR;LYNAAWUN?LUg6}z=}jk! zrM;VMv!^0RYl?qg0fL9;9-blvESOzDcl?PQ30@h}f z;3frm5^PR+8f83GSvq!)uw@CzfE+}wQjeX}u*DuxUST($Wn(y`NaUXIw5}CQNYhS6 z?-a^>tfn5&%~sdh8YMZXc6NDk8y2VX4-l-qN}YA;Y|Iqc)D@bGah$HQ&X!;t6=YMS z#7-0i5&^sfl$GE^zi@2!9#A-dZ3$oiI3&41Ts|<+4zTe91XTcmJTQobVg^dy5^ewq zC7?%z%Y$yrtb8{J$N+hbt^Re(B>UIACao~tq`5OT#@zj_45-~29y5ufya#nqzKh~F zlgBS+?Z@Jt*LWqIapWy?@XNO)E&<%EJs>Y}V35=j++F}Qskotpl$S8*2~k#Fls%gA z*c4>GKtKU-DxZr%P6@J(+W}d}gBf(ol;Qw9FJ@j2Wmdhg(?Fc05*XZK+pmKAX|W7Tnb8<84!hows?V4W^5EN zql-N&3gCwG0bVd&%o8A>>^(3@X31)mASgmKCw>iqEAZa&?DZ9nq~N^T^$I3kI_Y|r z?#+V~kK%Ipe*9VLAE2+=VO^!U`sY+0YoF!hOp>ofrpcRT?Nxd4!c1XNa8cT~P!>E9 z_oeQ@Uo>*@uhv=q0sV~;%gqes6b&%$0eT`E_|L0PuHQ24TCL7xY%?4eXN8~e6( zDutLCNdX)HXH^+h5{CkWL_vQ%IRFHvsU;U~<4;h9asv=xEzcQ&62b!90D8X9NnFSj zR55qocW$lhg!k#+C)-xAp`8$${%e{et(N(X!E*89)?MXy0jZ%eg6E%-gSmd$+6kS_ z%QaGWS-s>M#k9zEv<_{oq8G3$wifRxRcuWT6-Xb<>q_E=U#N@+L|5|F1Y8E}RkQ|n z3@1Y4l3Ayc!2s67@FG_NWaKfK-x9>70VGc!20!~5XaMCbBo6jcHo|-g;B&Ap;i<;~ zq*y3SEtKjbP}Z*i%!-G6M%Er;GE3Yv40!;BfdU9rP|7_Zng6DiCAk3xUc9_&6_q?SP82OLP?JUgRnWvD z(gP@|i%_CvL_G$kGawiW%L)Zp0%QH(KF&QS@1=w5DcnF!whp8u=+1L7pf(Zax(K*L#d9#lWG^^@7Q+)BFJKG9CMN z_Gd6L$Hf$JR_t<ln>>mL|tUaujoq!BL zLjq_4I1g)1ZaxT~Z(g8nWsI$am|UfV2#N{wBAlUVba`o%=RJa7d-8j?FJGhuuGbkar?iu25Y% zElmTP3GpP@PwJLnJbf=KtfJ-8&!a%JRv!cp(P1S2u?;trebyo#D)fe<&a#z-CAOPU z=nc=t-grp%WctUi&Vu>vRe{4A1MX53)ILL72WxiR=?7cWl785He^Gext{~C}bE091 zg95VxAOpi5R%>}|7Qi0BRYtK=1Gv<{AjwSB53nmQVUsY|fz%^VSVGc3Pdv~Y zSAxyO1&~6(1MP_*Hvsb_w1kp6jq<^Cw8|5D;Ax}3v7$}*7^m3}CqL(97@Cx-y(H_V zmG~^fl!3XoepA%=$Qfpwe0bLvjS=f-!ww#xtSTa%blKS7&Ul_TkzAS|K`IJ~)HDHK zGk(yQY-F-e&~eOaHj#*E(di3bosLowu(L3*;OBXXPd}bg^p&$*L^LG)t77yQR;3H3 zF85RKlk^N3DYjWtxpx64;m@&r8q}|^)EWkhv-G~7ZEcw!e2!)OJtiIhbAnKB*=Kpn z71jCIH~uZT{SOMDax&3~;qk~j6etcOx9EvAFIMItikkT?M$xC+Pr3l49oT0e*WJma33u6ouu*{IJI{pUxGx z3XPtV-YZIhdDQ7MRro*Pj^gpuCAX;nzAg}i8P$X}0$Ol-or)67GnuU-Rd)<@Sb0yE zsbz##U^(wsdiG+_oRk}am=roSJvek)fh9l9Ec~IrJDl-ZRH-4Z2#ZYR;4-L}43gIa zE3)1ruzi9+a^;NHta-GMZ&NwRE$lcX|FUTM)qwsuFzUpz1W*^M7OGDa2_47;*Fggr zbwnjcij%D|;PLV#qy)eWfC&fUNbvgsEC7%Pi2|^#2XjvcpyK4|;RN9cx&ZYnK(GfK zVNH$C%8KIE^TfvoS&>qL-zo$4yZ{GV2>}N{OaO2LcpHFqJVtV}euS{6ni{PN`#a^q z`IeC$q%7y!nHtaZf^h?V{(O#NerksZ*b{*4lnlNYS+2@nt9fKx5v< zE$@xc>*{@{tMTHqho&48YajrGLXKEmU8Pkzs76~TiRc{E$h+G z1MMs2<_GI8+&_6IfM0!&85!MH9OOm@8|V&KT1#8XiNlP33y!1^G^IXFJZ$)a2A#yF-9*7$SYm^14wx|;ymeD z&k4V<%wHz@80htH{P+jd2u@e0wvbJdlu1y2xWY|&DbTOf$?$PKEuhuTj^6R`)+muf zMKHsS@v7HW7pp>Ue0}~9hq;RCw2a6iPjrf`QIO%HF}7<{2l~uz8++m(;4FTjF|KrR z^QFO0sQ;|%H}S4?uFNymaK}R)vdr4J8K(4;AHV3;zIXUem7S6sPE5D*eJUj`eE=h= z)Z5oI-&Pr9s3C%NpMysuyX2v^CLJgHK8=T(0e>CT)dk+=5axByjpp!|;t~12sW@f- zJM6$f;E8z5i#~SJE&~QkA-UF^%D;I{OQlt-(R>#y+Zk2J<6klp_O|cnp;U0*{+&kJ z6F#_}h~BnNv-G(CYkw%(i~POL4~0Y}U72l4!_2hYQRS$Az(w?H<53m?6{%~?trPq2 z6ovOhT{2y6R9acfcL)01*a0=e@nQ1Ufqe^?-d#yuAH-HZT+)^J(hwuAhu-B>q&$xQ zaWr{Qp|0_hu_7!rFV$tUcbHbDF$211I>%P3s;5I%g3k=(rV+9B5JCVF08@n-_W>&~ zgC8)SJT|L5CklIs70@4a1al{%@K9hdK$-Vo-6O04$xM(k%Gyi?)lTqwHh|wn8aC{ClhFotLPb|+p&o8SJhrFCbBo=cpFM(2t}*-p2&UjB8YH5dY7SQ zS)ACfBXO3nnCRS%{L=Cmp*R!{^|N%@CWvYu71B>G_xW%+@*<%9^}G4h`WKr8hb0-YKVnQwrjXvS{h)-A%IUC@9AN+`y*v<0!9#Q{TvfSNKC(=(e zOz+V>SKG;b^s@h^k>B*s{x5U5fm+O=(#-~kA2LhcK#8IvWY;Wz`mYHLp6hM(D{?#q?SBCss zG!FPtn>eUr7(-UW;v+-7WZMmmUAGH|@#mu7Wb75_F(i3rLrYI3fa^T_%>Z}2FW~Dt zHMJ;jfwr&{y~heG+UMu?3$xFI#XJ~T8{O}6h8fvAn_YDN0f?Ut1D+}L!Qq4^WJ(`q z-$1sKq_QSlLQF_u`dpcK5gi1v{A%CnPRDM<18(N&o}&%c$3<10p|=KJ7IVGW)q$Mx zDM?-x$%S98PvjX|S?wu8^*=uI8mV0?4BkRNZPgbS8=8IM(T%R>&MAsK_nNJvKX|n~ z6~#Tz`DXmor$6sySyb&e28{|`Mr#T~Lp|Mnv;q(w-;Q77i$6d&-AB3Usvorf zq`)HtYXi!*2Z)D*1JWL9B1!i#0#M*GfU8X&JE8}lR2%@a_J}E1`s8z2Q!_rj)pj+f z!23avaVl7So%-miJ9)@#EA{D>ze_51;Q1Bj_pRY|t}TWA0|{l7T>(O#-DxAY^krh% zFVWv_Zk#~2aW^6VfLKZ@yz(I5?d?J(>Tc(#k1DLE zVtl8wefJMwWgW#d%C(+6cYV!R7;nF6)QUD`K`i%~Md&Y}iwc&#W>Po@7ftw=?b;8> zi3hnuQ9*CTZiaH%_|-j>Zh}-vC(`$L_STw2oD+tIz4-v5@B(qGHG(UgLw5lfqvRkN9lXRyE+6D`zms zF^D=pv!D4Bhjn)o@Jo%8-GN_sYd@w4Ubwwk!!~%zxj??RSc92__9)&xaemr^es7+O zyaqLPdr)Rn@C&_HG?yq6&A(uaJqYzks|u7wbdNDm3}&kPx5npYGjJJlh^_HtlNR43 z2@F+oj(@qMWB9@L0OeNoxkF| zqb}#BcC0ypi{jd9j5h3o#=F9!O?EoZsW%}GBrw>*mcpLJYu~FWOrk+|;d5X1^0AvA z+HBOB2X4wnqfSJGANBouKP{kEsF5<)CgX!y?*3qBv%J0AgO!cs3XO^&K6)>w-{F84GrbY`fdZj^Fe15JU@$jmGf@{) z9}Cc;aDLcpYl1llQ66Nj00ALtA&wVI{y(Dw@X)IOmV^E%89^f~%C`i{C{^HLr%nN2 z=T-t9kPA>NfSUjm@uILz~)NZ_MM0Z(b4 zJmVO9b!}beGzRT5ezpFLJMq~4lZC(a6J^}lk<@fAK9A^y7$_++dxA@3UQiyU8={1L z!hHsHepS5sqyAm;95w~PYspypUdx{TtsGU}=F4PsuRr08DG2~^io=(b(sioNbk@5* z7+X4;^xR}_Dy>@#Fa60h-v_%`@HfrMxa@o^ITw4lG5nn%N9$4IpM&SC4$MMhk@x)n zfHPS|u2*DyABiaJ5W49#p3>Ibe{Onq(8@e+XhX6O+ww@6Mew&4foWkh{1O%+PNn=Hy1POiMB zaVvHvk4}rG8b)pMmLOI0Y8W;Y5h;|?xK(#_g%oa#;Rn@ji1}zWQY}0&8E!{)sq<+z z1_3&OD$lM$GmD>2`g;+>F`53mgSn)%RSnB2(z8sh=S><<;f4?`5!zyWv`j#AbI5F? zjn`cR`3l3;s6s6`mhw=T`Iqj-KOmCkbL^nxnJseKFEFrk));`OHOi6-M z_xwK~>;jRce>?I1YPpgR#v|F;k%%Zt$*a`{GDb%8{sEpP0h+sGJZFA)VOPF}IvtgV zVb(U5{4)_WCl%I>KgqQP{*Xr&OIAH@qWS9Bxx$ABilm@_H(nKKu7pZ9dqr)45MNw_ zX*R1572=I7MB^T^uqaRI zzU3dFMX&iy`q`b1i$A*`jM6KKG;W*dYq+imMo-9HPB;y(^Ieh11lnh|au}HxrI_;R z?*2rOk`*9^pbU(_0n|7U5OoX?|A4;823^4ci9nM)C>H<&bX^7*aUe{U8B79zkAr0i z-;uIM(7FLod5K6%x&f3PGd%#Y1e66?8#{@hX;T|Rt~0xoG21zm z0mrT5?PEn1>9ymH*-hu`to$^z4sZMUJvFBqYkhy=GA3xX8W3n+)29TVGhz;uuDa$d z*C~%W2jk`mkkyc;FO)jkv1{q#CunOkiuD;>+Kpw1uNJmLC;P>iOYMpukyJ0l5TDDa z^U+|(nU~2ZUwhEb$8+fGonz%RParm_^Zw;scQm14VM=RiS;{(VQiKvc9aW*ScWgn+N5~U;iKl)B)hqv20!GRfoIun$Eg%XknBg*)DSZfmhW78g7EC5J75%3blkhikrRW`!bC&|SX z^295XM`{9oU@nl1F`xx`z;ns~Ei;BAFNP2^E(sO{)B}ILZ z2jrgq0}?i?M?=3?oCvRfotBx-i6ar&aQP#k9W-1l{a4v6ITg7Y1N|O_GU;&nDH^Lt zP(G6Qq`w)43C{M1vFyIit1752C#F@+9lUVJyvui=S{8i!B^P6|CNSU2@e2dZvVH3^ zt{u*wVa4lYMpozd2`cEEr&&X>(R6ipR!fVC1!d+TQ4(^em$d!fD!T2>rSB|2w5g?c zA8q1y!eDa``sI#zygct@zR&-gAE<_n^a^^c7t5}4Zs-4z8{sm&{_Ky@QNZJU})mllUXVmvvsU1`C6vlvq zgMj%rsjP<)3(?tpRpex)Z_I?nI67QNQ}{&Q?BwD2xH&2JR54ey8@$Z{LR;NS)pyPo zEbc2@{;L03zm3nh2vV}^gn!fQDJdDncL#6?j4I*f5#S6pg4PU}=#Rp0xwC=ha}kgV z66EU$<19p}89n40oS|44ZBkt>Z2`4iu|k}L8kf*m=5J#ufRlc4KWd?^lpK}IRN2b7 zQU7Ss`=t5HhS{>48L}y**{gD`1Cb-7RhO`!)YNcacY`-t`B(19(EZnB-<%QSp*Gz` zW_`CN&99*tDh87$T8&2dgIYQ;P>(e?fdoOgQx!9mvv7;YylJ0 zx@3mz2`j7+Y<8|B82%U&?wLUPuQJ8+ulP|=E{HKVZ**-h^l0m7rYC}=5`tDRi6(VY zQ7a|Yx-@!8e=zg)J3EMzs1mKhSnv~}1<|Hg=;0+_>`sm`c7RXsd^ajvK3WYwKukNp z2f5ApS^7zlo;7H+a38sf8Wm$FaV#!>Do@T>;ky0c3*)rR7y!F^CfVY6V%QMIUS7a) zkeMZD_vX_rahwe6k4$@%pM8Z9v{lTzqOv-H@$|sI@;KIxR!ewqD)fMu>pZVX^u>vt`NaKTYa-gD zA>-LPS#>=d{Wq1KQ36&Wi>g8F5%)(c+Y7rS9p4-Jz0^d!fN@`fTXoQ24B{y|K7_C9lc$XWF%^yLBeCgE=vEiMershRgEV)_3|i^( zQbu6&VrIecTE~Lb)pa!oI$?0Lk)3hOvY(CjDi*)_h2Qt;>(*M3FWYpZYh9Ipuo@wg z+U&?yStz4IE#rMi+ z&?OuZ8SlLd$aP=uvj4^}Y^BwIq$a`tluBc^n_Mi6?Yv2HcOxkII$^^A6%Y)|&J;GP z<>rA0%PUr9RNge7-k63pV>LF5^0E^=ew*DFCY7 ziMEP=ciqkU?@z75}8Ez(@Q@ZY&c$c*^tSGc$G#CfZ;CBKSFd=JfU- zQ1)rDg)xlyS9eD$M<}~nf{8Zttp?^E9(zJoEi%QD62N|q{F1Y^Y}Qzzcq>2~-{`92 zZkW7wSxb{$>-%=3C8>Gz9duZUcnkdxFdiGwZX+Xo+p-tXD@~+?NS@+9u__-Tjy-UF&5@#&tVAF<=C?3Kx{3 zDf)XuN{M0RwC2Wz(@mUdfc5pz;557yGbH79roowvMI^lMz z)a8`KtDV~MBx%)pf)`V8J3U^&5(6cqi>J!Yj1R@Y2FNkxA0e!hFa((~O)mg$Ua$#( zArB6Z1L^5v49s9g4Wh6C!malp3=B&jthn4-dMd_0RLdwiC_$X8! zIac5_Jh}TR6EI}FrTK*>?}3Z$Pu*6<0us&Er8HhI$q6+ z5+D}_Bp<1}&ecl=ou7m}}_e?Aq$*v>3!QA$@Ypk}ArG z4yS>{5iRY&aYp6)u<7-Nu+T)9`in2uEsG)Pmj<0yh>8!ccNd)q1txHIp1wYByLwKz z+%BxE!5zf z`hPXu1Gj(V8R8rq3pmZAs3`3B4<3;a#UveVMyh1n4UL6xx)D!a;A@%c{1Ja{o{)y8edw}x^Hcau-Z*ae)Md=kl7bp#MvJ&!1q@={dD$`OLszH&HNrtcePMVpxOIL zy+IRIpSCB}@m6S|Nzv?ZlOGEG5;E~(#1Vm}_TK_Rg9N%`&3pF9ZvDDLy?^l=q>*sM0WdYktfIf$NvC+mA`CR->*l*wgzun7SOwbm6$;z7lBQd56)^)fEAF;MUhzd z-~z+|KB5GpUKh+A4^S5X)M>c#U{oAP7oY`jbJK8h0nP+*9Ez{36_qx2E5vcXcS%+$ zx5Qq#)1;E(2U3;&#C-DpVtzSk?o~cH+-dOdknyO{xDq?UBlLEgPD#HdYEImaE?A~2OPaxt_* zh}vb-DzQEA&lZj^xwAVQ5%Th-BO+Tb?8R2$0}53fHt02uST)%R_lpCyPX@!fIRX>_ zR+BD8{#LHGQC8dr@C42}Dwm-&O0j?Fe^0~y;3LkbNAc)FOX>+UbEwg2y4B?QWxr^* z^A*{%ge2rPy&#Kg+?Ir$(fCLV`gtJ)*D(-vv4gmDui9Q15zd(#AsM=}g=%%%^4eU<9~C<=gumz>?`_|5w=yzB<3Hn#U9+=9 zev6NP!J=raLT^CWv2VknW{^z%r8)WCif$C|6C|qaO*}~9A5b}`9z0+(&Q3!o*gNW2zHYei zrzAomEk;V@WAL(0>9lxKqxWpB1=F|fl|L?z-Y#QPArOdob>YEl0B&Rv~R# zUpbg3n#{=-#c(JwP3)~L=vnk>6y$FFFcZK&zPD;aTS+HpS@ZUlO<{4+7l4j&QLAKb zb6R{a^Lq{^M)IwJ$d3h{^k$!AW$MnQbw|16MlRHB#HY^Gz43B{9D~>K<2(Zd)p5)O z=u>G$3q$F|1EtZovo>E}I>O!6_(9`;AD=|;y^`ih5O+}YDYVF>!pI?p^b`f{(yvIo zGofE@v}2CgUHuI$y!i8+^_NJzMfFIY_R^d90kehJAo?aBy;_|g^c2sT1~uv^Zlw>} zpLd#RJmam~`2Jjr=8PEr!}`qh(rPrCb(qIFj_6tvqe$f=Q*EvD#k0&sHo9Kd=iTpq zGYeWfPUpf{KHT!JTAiKmQj{h3YprlsyX`@YRZCM^*?(-Qhv*bqhiVoae15CKA0xhU zfmRnl?!5Bh>>c?XU7yF$RY777Q~Ax)A(;n_^2xG zdvz(IqTBNy_y-OO4NpQ&E?FnZHRHQ%A4gvP-ag!xDJizG9*h2EX<~-@u?5KHU0gfS z*x|8h+7`v(&5wFI`pR_4jVyNaiQn*jQG*$l!E$AC@0cpJjpqm(Aae&l0uKIia+GIu zrVn;_-<7M8&#H(g_-!tK4fowcYqX>&G}}|F6L_;pBQlhvAi{5IeVWBBluUd29 zr<4K(N9ES+$wcum(0JbHM`1tP{Dhe$g%*MCh6G*P9mtOjgPjh}7%H5~c!{)FR zZm;q&FPr+4w8hoWY_uvrI?8qaY9z7F!?97}_y=@ltmD)GWIfn?)n>n)PlwU4PW!mA zESgUXUxo6*mBR!{o)_d%lx#_*@Jd}Amgc&RMv) zoZB{@<&a&d|D792k^ruKZCl>S8VO1H>4gON@KIqMef#0nqIhg;P z=aa)>iQMH)p4&J4i$Kd8UVL!;fhdZQ_y^o4Jfl$yOOL%0zbs0AW^*G|STDR+N0S#f zVFb_Vh+i0m<7t4RS(rV@PgBIrUK=wi*FJ4&jHTOZYV)`!E@wz{-N>*9_NvYLCr!uj z{sBIq9-{QgLGqHFyUwuo+w)!ND|U}hhw=+{&!0F?o<#8IoAfzQH_v!$HiTjE1~Zf8 zaiHwiz^y;!U_!~;sTI!1jDv_SdzK&iRpFE7S$GT1D&8!5v7F?Sx|$KiPB6VUr(%CD zs`5lsU8_*f z_W6K87MDg_$_7;8y%?C@9bMsCJAW&ox~7)}*% zXppDQZ8DdZrtV{+$XO(0xJnd5GdZWi-@B4kWOFnjKU0IPd%|8^XlW5=eMi`QBCK}K zS^uQUID1l1Y8&nwJc0bF>J+fdw*S^xz@SD_mu*UHm=)bRam42z`{XLom>kPQS&bW? zV(k|tv-0TQJ=PMDS5H3fv7&urxYDv~@2_9}PV#;!TA{MiAbWT7m(}}C%n($oQu(Iw z_$WQXeNsrBbe^5|eX!*1ui3wXfjXZ1N02z7n!byi(ZUw|*QM!E4V zHz?+AQy)!3o%!+}nH9Hy?v1etpx~0grq~dDkpwYZZsvoke7lj0il&6JW26c9itZR_ zQ3tE<4A>}0t*d6Wm{&6lRoC;1dZrUTck5nxAPGfm1?n2|gs2?C$U z$Pi&@{@RM#mB*n7j$DYWOHdK6$SOBuUMc7A5?}B3EmdYqkY(pY7GbJ)JHMEWXN=F=!dL2w6_mZlk?NCeL{jd>6gJ$b^XfRY+Yv|X0-9w z4!vkD8|d66k}q_NKe6GF5hujpCqXum)O`uP&>O$WNUzb}d&RR(#g`Jde3Q*4>9Uiq zJn8(l%cGeUl{4>HLgtVq)%|=3CVZr4sTj?0(`oQG_l}^;z%aJqmrj>L9dg(btkujQ9vK9Bz$x)s|S&6Cic=tv_~ z^Il#sc%R&Rr=Lw(c%XVsB(rd#uTo{Wzrl>imXpG)CZitd70l9P|AO=|6SL}# zOSXHUt@?J!E}hqdjj($!n+4ksfLDtH2L`dz2Bv>a(}9=pAkQfK2i^Xm;X{4-Naw+gf37pUfxPy2C-|9 z!Pj`FxTVfGEhd~Yg#Iu^UA-pVyOB}bGS|p#FAs#LYNz0Oih{b}I3zu+tnv~lA}>>U zQc^B(JkVDHSfg-P%eymJjdS0^2>=?jH-n9=>o|R&P`OlAKb~wuejb-htHz|oAAXgS1(WO8KN!YOxKWUB?6b4lkWD8WDRd-f z;^BT_cTn~kGMge{p9eL+bnvKFH~g9C$&o(WziT^iabZ2a-RI&s$NRj2x5`WSO}diW2v z>-6Zo*_)L&`3WstP6f1`UYPWsm?d$Qw_Rsx2ppDsEl2>243bAhMBg(o<`R$Wj?!(G z8d7TnAI&{*a!gBk?`TQTL{r9IW} zv#hPUd=x0uTx_h!H=z&R63n7X(;8Bw3DYfhG(oFg5jroOX3Ll|2JAAm{s7mnNAvM;kVa?dwN-b(Z*Po-^>A~1-d}qk& zkU0hWy1h*$x#(%mJ9%YRXol)yi)olboKSg_^B0-KWCfo}pBOa>GV-_Ud*-e$!&C<} zc0k;mRV3=a7k?*{o>`3MUH#fnDQ^}15~!qz$F?(ISWM|LLeb5(n{6b=S{>>(kt5Zh zun+PaYIRESzGJNrZ^||%yO`P{tLujvv2+~Y?f;PqqR%Bmv3XYMACz>iXG^kjs|3aXfvj2jk*TuxS6-th- z>!5#w(^amlGSjUW$;RWk!e2n12fV1+bfj*v)?mJzm}DY^ zO;w^rG~Hk&W%efXAw=b!w3%Q}5!0*qd67ITeOhWo%EJrmKPJl8dC@qNDv0u5aU=ou z!NL4^&3r(Q^63;1Wu;$u}#Y9fP>vN_Hl$f z0AMk6;nfA)ZtpLZl#-`!Hfvq6dIAIh2*Z*b{y^)S2U(h!R3?pEJgPV{=u5Q9n|-q*H>Dl;?9jt9S^q>glnfhFg#Bwq9?H0EPsBi zVt@V0F>_P9Hi+wK>~A9af#;z%Y=al-2U!O+qf?(q%Ww@w3+(CxS4V$9Usln_GT<}8)e@$zzcNV7Sl^i-pYohqAhf#J!Q z@v*nODbSN<^I@j}5RK>bc&v7zjTy9P;_PYZUk9h-b>7n3d#v^dtI$v9wA7}{Jco4L z&zM556-}DrX9tmz4m&z)uz@y#j0Cjmr~A+%A)OA&d14wN+Z7(T`ZJ?B<%;Dt*u8!n$Cc99(Zqzl#Ms4ail0%G>1@RWUa0 zYIx&$Z>_{cd$T#aEmIvNsw!Ilv`9XbX!3K194j$?t}EmN59h(crF{=6-#)D^e6x{xpLZaL(G}YrOO^vEdD<31T3h?rn z!^M>I_=R9IrSS_iEZ%!G+}UQE#=dBhzFgrDDN|9E&MLcdXZvw&{7N`C=Hq-mh1J*} z+*0Yf7q5jYD)v|LENpHfov8$ymk4@wOR5JK7%(7kxVN&kiw*CBf9Mpsl3f>)W}NJm zBOYdZ&157tNzyd~E!L|28Q+BS4*Ge@xdBcnA?2pM8tbxkUoTWP7J17f1a(%SkG0&9 z;d+zc9g^Z;<~?O4I!>|%)%S%mV{2@}SX{ShowpO%wJzn&bCfHv7=L^=OGlCHF88B$ zmtdWJ>bCRbEOQrP;&=NKml1vB2Iw;UHZRJ1T>zCQoDr|`)Jk52`W*i`&$0cIc+PdR zY=6s^k&`?N8*3$j3wv7?2^yJ>ED-I2Vf8g_BBI3eT&^WeCQE>*LIe^;fd6`+hBz^( zrmW9+>O2dopAy6IG3CH;c`kEY5A(4VQI8{fWc-}Gorb=MixB2(;`&&TF41{&`~)ET zTwWWr?CanuwZd0x)9(>;(ltVjV zG$n3r8fckcxwAP%3&slLc_1;WXz8o?rM9AH zrOQdLxUnJI(;u20TUfHpkKc|xUXPYMUw0_FRhRyv*22r*zy2*~_(S}*g-xy#5C>4B z@D%y}0);{VX++wi1XOP390;mX56l=5dOY?m+rZrm*Abd0(A zg7A0_#;zo^BK*?tsMeXocmRF#4j?V?+a$?*)8KmO<3?k$tXy$c_e-H5uFJ-{`Y&zK zlpB78u>y}4PgJKDT}-*IirAdk@VV4^YOB+Vl=pZpNHI@y2RuFVG?kZcOl+9kd^T$` zpK#eP4Zs?T-B;shn}6qF-=&GoR#SU)#s)V=&O+$VMB&O1+G zBRb(dB~70?nVP70{QW-0Z$Iy?iRV09f2z)ztj-&<2AWFBOE{gbSnQdOb)j~A@pOgO zEIDme;DE7RY3qs(wLwLquIkHyvu4G?Dc0UnJL#K0H!690d=w$sC7H~&$)^vCh3e4Vwh9l);_PW*5=^Xk7y`YlSaO8^<}LYx$OI0$S&I9UBz6F zi8*T(PZU+DP=z4`*Mor&Jcu;iEecMCdC^`|kyjrwl}8hugAwGmS)fyx0TKNcr-Xx~ z0`ex`P?kGgwdGJ3UmN-kSwg*~H*2H;<6_FB?}{OG!VbeNVd#E-XEW)$Grhtd2>ipn{8d_d>D z_kE{kz{d#O*BWHc%{#d3`#D|&*TGFsewCQ3TuQVZT#V&_x^+E7dL((Xm<{aW`#85J z2~!-C4mzoys?=~5YMPZr&lwvCXCMoab6JoL-p50(0TzE_#|3)ET^>u@W@@nD>#`UD zh&sa zW9h4iIYju-4`&Ka;%E-cs}1kl=dV|Dw&5*C6_ir2pjZN~3))&*~`; z-^WN?`d6AdH-1xDvZ*IYPNtWEsRMf}<%@<^S|K;PjJ=EoKTkaY0{{mFXzIvAb5W2} zW&%9`#=+eqgtHHlVMLW*W(Zaimx!eZZ1L56m6qjLW%OP#Z4l8|Anj=@aQGhpz(7C0 z9$O@ND?J20-5D+Uu}Z~>D^gEV27CoQBO{F(hYZ6n z2%XRo(`8qesQtWtk(I||tc<;+fx^CC^sHvesD3s>%C?X|H9_+n)1K?*EAwpX*Jg(s zEHW*)L%WVX-@}iHjY}hs=49hzyDj7mKgY9{jC{_BWKTkz8rbCV(k$1q6SI``J%4Y0 zF-b@JNupUBI@r`u<;3XuQL9Smc|&C+N{bjS*^BA+ z@#OyiWH~8xHAaSZ+r%Y2>sM^*b77x9D5IKPmQl7v+?V1E#MLQN(am3z7Nuwp7Xi#) z6qS|92S%%Gn&}-q*0lGebz8J^lBPhWt2?KyKhKVt^jObC9sdAsQCiX#F@!jm!8rp= z+=43LG2~+|Np@?3(T!~iKl*vAoP^6H1}RYlB~%8b*y3FsG{r%RG1^Z?=&N*$=WF&> zP_rQSiUDM;GZZ_(0+LC?EVHE4l(=w7Con6^l%9tvH1ucNH*~IS^{HF(l#pc|(2}WX zn1wobk`ScImT`o#-{O7JWrS1{kH*GFBfc->hOu^1rOsT;1z1QdDB*$KWdZPpb3w;= zHE>E&`%7k$iVtdgM_rK%CR$KI2}UJ_Hkf~urtmf5l;s_m#!pEYH2s#I$-fX&rZ`9w z(wxM%#FrnLu{ONiXGqxyN+w*fgMSC3c1O3XISCU8ma=kEl1Nbmmu0DMLg1$n;A-@F zBOm5QSAtLZc4-p!j$FVnsm^<#C7=3OFsowCw}WJ8++?i_;?t&Q*Z$s_Ggg(jtB1<-w|mAcDo(nCViJh1?b*uAKh>QH)r& zR<@IAB+W{ax+LZjd%QxlU2(HHpjjf7A;o^BkB&JfqaxaiIuJ+j($F!e;$L^)9N#I( zN?(eW>`5+4Nrbf-od|MWlUEh$2xSaPAPRyu8YWq0SO!#BNK6C zlEta;hT#-saV>UYNMaS{AHS5GM zk?j8f!t{`t0KOt~FYxkEqkW&7`k%s8M$awyler$2vzHO-5$6S%xCBfVfCeJKL&bm) z004jlh=2@4RREx301AKz`{L?M#FUWW=NaP z#`Un)X~1`OsX<1A8v}@OaKyJaZ4VBNek6``(oW)P7j4jxH$x=0Ty#Fz^@}tv$uvuB z{QmlT(kpZwj}vIFeqP=umdK&>snF=iQ&q614lykPWEJlz{n!1&A~+BupOqT1vKGl^ zP$q$&N{|#WG+hV=@=zOA$7D$Pmdhao%K923g zH^lZ;Ed3E8GDp9>M%S2Ky1|$YJbK}QeIp%(`MI^fPUB>$;n^m<% z3Lt}pMaD`8OtWxH79x$TWOH{+(4lJ66S0@TGeL3#{5V29N|F-%MkLglKDzMLiORHZ zwmV25bmQ~m9ca5ba#Y4PwSaVQGI}@Me1uxT5Vo}qe)15arSFZW>4Df{)uTsh5=mHy(%QUk>{pW7NggNdGqp9_?Q1gAB^GHVkb9I) zDk1(K;zZ;m=nS+aPpAL^>M?)ehbsG@`y-8-=PoJfe{$usorh1fqFJ6gV>qbdVFl8e5n`y94S^;13$qSP-s{+ys6-iR6 zfDgj777pYqOxYtA_|1Oi+*xFjl6pO__+i_>;-r)n6D%N1l&O^{ODHJ{qKnH~6ooKT zKS3O4jpNAU`<)DpPAY5nHC?vYw9i9;gw6qk!W}^_>r(tze-)%QG4N93Z6Ky-2gXe$ci>}8g#s`VKF8%*o}03hFOG9^t{M3-x_P*WC5jwk?% zaUuMa3^E@Ed8$1%_;s<7Qbu8hJS22Y3G-v4=N_BzsJ_oT_=J-^Lt|Yz`{N$EKbhuP z-YsMX@lXRrtN{?%0GI$yD3A;$3$O|#0||&=D+EP=Kt!-BTUD_ro+{w+`-s7x#g1LG zRxIxsjcr$8$_}KEp`{2hz|Z((qqVcIi_T9*N3U@EDY86YLOj{@Po-jMsT46<3)0Qde6&LFK0yoboeNI1;^ za`he`Xj`OPvBa6fi;sHi;}45WtdlyeUj5X1!)#F2(JA_K`Vmp-36k{2MUCZIM~|PRw&KkScZs5kF`sFzWuaOms%rjHs;UYkBIT`F;+1llCu@%j!s&lG~)}lEuXhe{TvA3Y#4fl)0rTB&#v;5;E`)&mt+! z!rMs#?1!_@-BO7FxeNn>#+3lQ0b^R#n+zg4*!Qx9wzi=$Bq)$Zs3HELTIR&%i;W)p z8x1kPWpkQk0WDlXPOlUMur=N(B|p|lEutDLn`Zm7SGiM)@^%gH$4ccI@J+X~pO!xa z#j;C^5C*C_dIL=`faf3$0q7)UPKe9OT1Wsp4Nhv-tw0@>fH=sK>=LZKXl6G3l&z(` zn{Z`>JwK=x<+=?ue>^f$Xw;d7ZN<=M&tA9+63s%D3_aT3ge<89hxk=Eu>^u+mj(WR zv7@rveqIbau#l8WNGmSRu$6&;8i`?a0H}~f03hP#WRkjHCY(|C?5$sSe8F_uN--Ie zO6m#4g-bZOJ{QG6Xhvlq^!#OU{aKqg{{WNpT0Bd!Ruu%e_|jBq`iVKWMP_o3LzY#O z@;K3r#{HDds=9A0GA!2F#b}jlU7*>NnY3T+LZ%l9a^OoOZv$LaR}wh$eBLe=`$pdl%bj{>tUm-( z&Lxk8NmSI~1mRXNiHcIjpkl&EsKu35PoMH#&JOf~f9l=$cQXRw*^vo~)r@It;g_Icvm{ z{AJ7_m;QxI4BM*0AqlRHjZ6Ih0NDk8T!=vhM5_>VtvMFWv5Zv6f>NZ=DJgIb_OwFi zuFbiEsuHC)>6Q+f5*YA0^U59dD^?T*?7cA*sRU&0pD>ae!6au_{#anxq{UM9PJxf+SAAy;t#Pj}UB-Ng)xo5Z<_@Agr^Q?O~ba`p2 zWT-H~VZlHVfglDro*)r1E1&=nKgiR^*3t!?&%(m8MgB;?~l$ceuAj!DrxFH=wY!bBcj$l`^qK$MM6cQ2(q$Ev!Yfa z#COrgYND*NI82`ubX_63tpi_uctuO3F`sB-nfLEF39A}TwdM^u`}wpiy?aDe;WLS^ zAEuf6G=xb~7>El+<%H)-W$*k~j3u$D4DDB?3sk1QS?Fn(hpO(&jO~+DQ-~{dTy*Np zPddI-c7;z!y4b?SAz)!w!qiYKe3bdPARIYl`B5Ux)6pAx+R{cGNq-c#3K5Z@U=&`1 zNTi~ki$%1`Ceq3!Bq1k0Dlh*4Xm{r6BQ5|!L!m9~X|U6ZAG~5+_c1u9=2>->7kufM zQk;P@(k#`z8ur8OGRmr~C>l9nG$f44&^aopbE`ajvTmEvpEfj+q%QHqGjX5Wi#~uq zE+??1XETj4totJa`i(h&^=__5uwu`kOsh_~*eyym#*#b`utCf9B)2iZMr@jk9G~1! zcA{!zB}&2xB%WlP$Ce=PmXDtsB-%Q7Vab$hHRax7D&7S^0r28trIr~0Otb+10M^Gg z$h$YnJbEaEYh#W*xP;&2O)Z-OM z-aiWa2Dvh2%}^n80_QPjd4nYZR_@hD$be}^=I4B}>(Zm88d%hRc5m0Uv?f_3oHaBg zDIl8WL0SPc0g6Up$uWyGt!!pBy&iqrX=zmAKm{hQmjjEbAcs5@7%Gg!ksLWK=yKmP)2v+4Us+P_sgGGrLwX zcW#iGnG7ijOv6Mi#u^0{QNn+grBU#ac)#?rvWos>J+N6w4pNhozP^ONP+p)#Q4E!u zluJn^Elkh|ZUJR!BZ&^KEu1wrDvE^5-p@5#peb03;!z~={G%g60!S?MU}3H@UD+s* zv)EIZ31I93iSS|>=*|_SP2Xe|jg`I72&n{7ig6BrkB7zLtuDxF)gF=ThT0aO$tq$T zClDZd)4=qOw}S3nS;g{`NcG0euXH#d@F0f&0J^_7=yJW_3-<8h}9V<(VkIgmBC6@A63wY zUdD=^#f>~Det%eBI_zf=+?6<}@~k~H6f|D zbFaUCaaPMDN7`>OSNGTVg|;yvrMB=Kxcl?>SX`y>UMPt-fOgZ+@;nZY8Rb2UuYrZ( zX9PbDDEil^(DCtgx9qW0zIKxZN^vCSMq-cVD*oFf^6_SfV&sk%d)#3mB{)eW0$U69 zpzt&UqhFUKM;c``ZH@(~6+2X%IFnP7NnEqYt&}KPwM2ByZWfgfW^Ej)sUVV6UB>Yb zop7WnA&4wf!;yyQ&%#KFw@bj1%2PFQ985V9m+%DqqF#|@zQ+3~pfM+lS zpAyPPB54sg>f0LSx>P(jvVy_mVioXd+E2(GU zi%Mk389g-_g%tMue3rIxGeVsr%lvbKX`X!fKAF>lMyDe>L@noS-9C|rB;6XL69w&- z_%0=+*9j%FbA_{NfJxEIb8bFn?vC$?jej#Ln>3=84h~NZ6qNJ{#G(%zvxO#T$gI+A zH~OVb#+-msNF|5Dpptm9id1;DGicL`E5|EGK6tspnWgPM#7MVU@JvXU=m9}mD1zi< z?S~l1py42cQR%T|(v(@joGG-=G;M!i*Nu}9v8-BTW+cr&%}C&xieWGSKu$9)V6ih6 zT*p@)btO!}JR7)}-K%3Qwz=|=Y(cU}6Oa~#QF8#zGLn*C7zHI^N@s9f#*CRG7;;Tt zq!=(w&ud4twk~2)kO#wu2sGXaC@KRW#A3qKW-nOt7sYS0wIySnp`22J2^d49xdeWi zg@&q2C>C*-$B1+CO8OrKvlO@N!V;pR@VUYOQ4S770-^#<3$;U0jvNjZwslrpR9CM# zfy*cwiiH%OTf3wH91Jc848&2p85D?RD1zS*G_DnAm|=&8q!RgS6)ts=VmT0(JeY^d zj$4r=9EUL#Y?m!`TCY113zz~?gv?b7mnB7=Di{y<6$Ui{;?iGpu}SF+CdvS)Oj22^ zEg%rZh7(s%pv@=@?E31*4cZeqYL0We8rI>EmZc%6w6+!j&o&v6gaJk>#a!}_!J;`699&6KnLv`0#}Yvmd?ur* z1;4ecRk4SmdlfFRTrAub_Z~UNOT=G!$nvzhJ&UrF86$lP96(6*P7&3p??E2l3#HlQ zvZZ}daFjCEHJL?3ra)MtZGg0xECBETF+_lB5eNVbPhfNgDL@esHUJ_GfJQ8U35fv2 z00jtJ0tWjDDIr0_b@E{u^3zn!7~5SKF5X=QD#b0?^$dI+{G-cxnA}~*qrcyB-;63rjEf?|70mTm9zoRUll z`|@Y;O77+;6pEoZfUO6@&X;FM4hAY~4UAjem(_Gl?8S~s)JdGR&eRntC`;&4u<8y8 z1e9c|5JHt@4m%Y&zNnkp&88Ai86{^x+pomZtj^6rT0v^cMm&n1!R=~hPB@koFr0#l zz$YZR!cKq$C%b3D^di|J=`!_3iFr>F&Lxngeu{wq0Q|%dSQdFesERWcXxNDOG_Z&Mu@51RQpmqwJQ`6tFJz zqtVjEC!?j50jL7B54*?D$4utu!kx@;gD3DV6@@-re71S2i)AHI>jTo>~vPSB(G5{rrV)bz8&q@%G;5>8XJ1*m~SDpM0 zy+<82CERKGwHZElF^UY%c9zl>Ph~bxxDUrGYwP$mCfB zDss_1I^j@cs5$tOscJ5?;UpF+1gM88Kg~*uhIH>f7~R>Kw0uAk_2SH5WYHO9vPAa2!_IUAh?fGTOZgJ8 zI$}`DJ>C!HRCw(Dml;Fb{{W9j*kn5a5}5!IEr3Q101OrZv_*idQECB2Q3(L2hycV$ z1QQd;07Mo6MSuxczk0v{g+&?xg{>g#3zD>6EX;dDyN(jT^$q)cqmAXl;&ps^Z1d&g zZE`^*5#oM%M~s?wdN@@Huv*sPh*0IH_W}iCvA<=SOtB*HY7#nB`|vQ{OkWgq{xo#= z;}giMM>Sfs)87{P#HV3LC0)|bU+zfsAio^ppH3}**S ztrpo*6XVBUm)bXI$+NVMN8hj31ju_HrqRdWNM|CHM&1{=1&@b5oO)&>Jo6={IymgD zTJPXo6Ri&dGXzqcTh}PbFClbljv6B^Hq~%1P~xH4!8(fTz?am7{Z_{PG`2!sQmZ;LJT(va5ezLzs0W}Yj`oGMZzntnhIg@TR*KHhU#m;1ThljTT>*Cm zny<^pJe%}r#}xZ3drP!EnilZb{?LPS5i*(rE|X$-FP;;B-S z9E`Vy5JR>sIT}#X6xFiPe0D=z%Qj`(gx zw1|6l{{ZNmye|x653 z)ykPzLDYvREaTxM7ffb~`;Mn@(O}v{Vw8diExSyeJEpm4JR=SY za;Da~h9tWk*td@KX15;FmD-f2bYcM`Lr|QL5RypJf{;#5_G$Wc25dd)VW}b8FU`<` z!P}{&YRNqu^tM7?MYo$Ud^iFs>3Zjx&b3-&5m>gY5I_JNz)%D@)Eeq)T#O`BNEWs= z5q4PzER+;OQQ4h~(7L3^DX zcywWN*@SkDdjh1PAxTIB0;Ll$GdWD2=?E!6cPcIum;?etR2b;JS<_=}A#4ifiDM+l zQqY2U%)pVoMq$PYd}RqRE&rQjo$G8021T&YAD?y2{1puhPKu{8OK#4>#YD`@_ARu617!eE(kx&D` z04s^05KLSE@Bp-!tbpY^!^z@=qNBA|M3kC=(ESW#%LLQeuN0l0QQPTQCa5q|lU-ZD zADNF0aaVBT(VBy8IDE$95<9>L*CJ3{xMX}ur}->l@4r(uA`YCdH|#47f6Xc z9j!L0Lp482{LA>m9|hYMl#cdGRI`##QvU#7+8qub=@j{UEVq2ILe0+z1JJjpgl>da zqcoW{E>{J45%h1IJMkbzzts4w=x+^T6i7QQ!4O)0sp80f*4k*s0 zE74rNuX!mO#5W#;T+8(tbpjD7!7Y!BB>wVp#HtyQ2^B=50ur7B`Up-}*i8_6UQmhcBHNjVC|xaeKe7~Fd-wc<*WFyjKC zfB;!=+m$R1WE%WNAwXoZR?E%J*xFRaWdi}Js992ou1mt2a7Y@M)}WTeJdK(as^erM zHJNI~DQxp4C^ITri>bjGxhf8oVhkb6D*epr9>1wi?Mty~cD473P(saAoFjCQoLE%u z0Vn{0ha;F?6t6rIe31Mzaa7D>YHmYk0$EN3p|B;u43wSa76PRpZiAyGAh^wGnj&G{ zpk67Mv;Z765|C=uW3CgyhANycYQ<=}_%1s?X)`4Ql5!PyI%k<;HPzhik(Vqz4#zUZ zv)3suPH#^hJlGOxlRw1Klj)C?=)T+*)(rI-av49Hh73t610=?Uq+_1lkr&i(C@ z{mcfLGD&V8Foh>=np7zyWgBsO?>qiw1IrKt5aLplsfQAxGk9bxZTkNJlR}T8V_~+1 zOOP?Bmc**Lshd+?N*Vy%0|4DnnMR~Y?Dgo=@PoSGfz{F&Yg%~Z;yLvBQKo9(KHICn zp;SAdhdMPY$T26$;$*fj7e-aKJ3_LCbJJbYmIsDw1N&&&KNj>ij>fweZDNu^rF;nh zW&OG7hn!a`*w0jYhUK*5LP*dzGfcj`Iv0jVXyeaJZ?rZr6NqJpfT3|(XdJzDJRU3d zqGI+?Ez1Soib)~JE0*Ls zK4TrP67cu30PhHNu1bdc2Ee2*5@upfG2P-ym=q-?*N3K{nVCuZJo#+CP^GPZ!pT8W zhAF5CmL?HE7cOv`luZh9tRW1l;YzW+9AB^g8rSM+8zHqxB`G$WG_?=_O2T`_Et1f* z;iCZT38)XctnU-}*)8bPEE1NfYETKEEFJSGOUEpfo$PWH1mIXUG=(#vN;sU-P;^yF zpLG8KGA32E1ARD32{n~M2|tvOk%`S^r~(ufQ-$LTRBnyEGS_$8(4eMN!m1Wt6sm@e z-Q+6u7;`+t6q0MwChS*|LnVnMP=Y}%{Y*D&}Z zeGk+Vu;8jLKtT&mi5<+;Cg?hPj$W;16AuocGMWW`t>;C{#(6{}$zfl{p&HR}s66}AOT$zb>k=h&X zClVAKQI+N@TZjo&GC9W!$?|XQT4!^|@vCp5gK%Gqnye}laHx;wB_?7AIe=kOO+ds_ zwRjv-(T>}vfADF^1r=()voyPtv#<^-C=pfw14b1E%2;C|G#=|OXWxNYI!#^(lRNu&5jq*4QRiV3%Fr0YgGlS2o2PN`7kwyTWbFRF^Vy&_eD## zOQ9-9cv})uYS@Z#2BJku$)HMijzQf)RO&+F^+%WQTOv}Xz*CGGD5NV6d(}un2nA|& zrmP838?K`E4W7M%SA?m9Izt4#NJ&aY@J=TwJB3)B(+N#xv|c@$)s9$7X_Hi({EzAb z5F7~oG|xcYvO?%n5;7z?8Jws()HjVcK@bg*J4-b$LhLKS`kdOQ! zr4*A=TzEeYOZS*sy8X@RqJH9+-N^+0WnpT3BqR*90R>I14O~8mEo{p@y%DDV!#t>F z-nDGCas@b@S`}Y#&-9n8!u~}0bguRqpUNCRLzCp~;P#hBXsTvMwF7xc(?`jfLF?9> z!Q@KgNt%9xGf z$zY$04iQx(g}bDroFPGtQ4@^S`<>Gw6r=fpayWTRfzNF7Pe@mB~2PN$$utf#ieN&$;6-; zAyoq!0VZ0KLmWX!Dk-id1S&91TQ6Kzo{KMbgG7@?dKYo}obk{HIszG~#SokJbXBs1 zH7M^)xaIU7b*MP|6Ee^MI6z4b9Gqdz04WHTi7PHOQ8uVi+-_%J|l+`hT&&^RXuHX8R^9ILv!jL4l3MLdq9v6MAs_+bNOq4sG?V*(vDb^$ zvE`b%x%EDUzHyz!b}>nf?<*laIlKCO2!uEeQlvoI?5Ro3j{ry1gX&>?5-rGLn@q(W z!A>Q3M=&s92cxT{D|qts6(41vXO^Uq-V~&qkP8>)2LAvvxH!CT#H+vPWXVROm6mWd)8yoyUV<^z%SN>66V_$0 zZXa(SnT_z$qZGR+U7~GqLP`=7i18+!Ko8&p_z1z|WodCp=wQUI*^*qhOu6$2??UEg zd#Oc0S6R5wfSwqgS~JExcvPQi{{SS;hD@#ts$ZKnJ)YQ9NZv%`q=#8ji=;A+vm zVNf}f%;Z>SARuYv(Jj!mvaxTx>x5B8oVuQwlUT@qbS7V91AAmmvlnfxT@8P%my>~=^-?i~;tL1-!rDpgdDgb>^S$*AsQ4AOcV z7eqGA+rpa_RFXJpN70kI807jrA+UPcGquqvw05jWQxD2pTCw<8jwY@il>vyg?#%B; zjr&EncT;!wl& ze*Rw+w!IEkk&5G(8()e*Lhqr!zH zIh3IZ{?w8b+|3V&=MUp<*$H|&%dxF>BjQW?^Xb;UClsWU(V{vu4VAi5j4CDAZ zYQl=jP3XG+037s5A&Y`Il0F|$Xw1+F9IEdos&Eg86(m!Wn$n#bEl%iPG={d~!GI1b z22akcUbO(7Y8bYd!L5mNvN?9p!@LY0fUE}Ol1g8Z0qQH9Xjt3mTgZ_adUl&GLXHxd zgf=8025AE^@T_EN6zrzms2H|m@!!1}XOhNpKN6DC8w!F;fRe*q;ee8{UDB@YKsvQ} z=`S9aW=7k68P?Z!vodi37P)ez=80--G6DfI!vC{tEqe-8)@3$S93!%o z9F*+q-b8>5$O_OEHYA3JLIR*OI>Z6U&VZUYu6cl(Ib$XQXhPMSKu<$?h%pYy>}@hi zp)E+8*v5TDOYSSP-|3*0@ZQ8y1Q!{{VBZhrd|nWrJr!9Fdz}RvC#cgsX<= zBh0z~0EmuONJ)A(!b{|elW&<a4r$Nc zB_#7T5z>bm`&N#APEFf;FLrrv#B~q$Zz}PD@7SxMh4C$e0q4_>L*E!g(ClokS$1H| zR(tKkEV-6;r4F^=LkjQNq2}YFOx@;5ZQKeQ)MYzRhR+G-P)LnYtKuv9_w1tHduQFu zC@$kr3lYRSyEvfBA~JqvTa31BjysKxuRB_2AcxZuaF!yD2rL8UMMg{>B}&)kEiNpI zu+2ECaUX=f>gePbrfk47zI+vFWaYM9{2`sF5&<8S>A^rT9DsiltGUM{{Su1RqV6c1 zvIt{@5O@TDT926l$((~ls~wG&D# z_dd=fVQ}mTJY1R+N({?<+A~Qx7EX&oZlXvzCn2X(=>9Oat3a76a99+gK^3D0W+VH0 z0rt|BWJ#s7K)J32hI%!A-(w7f8J2l0eQUGO@Ei+hd?+~s;v-64k)F(dw7$?xfY^csG`zKR1*AS(fHmJ zB(7KbbaDKjCUE{zx&Hv~jiF>>OokaeluQYH+Y_h_XulDhZ~hA!y1(koG4k><=W+fa zCM7sbsYOPatGiPV$U>5|uQryss2mpTg)R6{du{&!u*Kxc>tCq5Pj9{Bf)!_S=lFRx zRlPEq-b+NxCoCVt5^7AxtSUY%Q`-LkKVxj<>redu0CFzyzf(ObAWNGoGyJ(SZ4%4o z*jAE*M5w0-R_GHJv5GWG9e!V_TyB{2V0M{ujt5bc2w)s36d~n>KJW&KQzcB0N^qo( zW@+DNN-?uryDw8X0W)}}h?)jR5%`X4l%u|t7f=TlB|3unxoe|-H%5c8a!5Ey*TX#y zM_Ph;8pkc|kCWA-MzDiM&!$!5ml}9oe zSFt|ILewpDN`lB_r6d59zEo1NrBwtg33sW^JDvxJ%l`l*x;dV2mB;@898XNxSs{R@ zzj>JTa7rhemir%~(+spOj>3FN2UqcclcWV%CZPdA1uy`HfRR1i0b)$C0U#3K0Kyt*xfT^N01224!Z43uXV z{>x3w9|8LUeA+oVF-Yh`BuXXcWSW&^dX$#%a{f3^);aQok*zqDn|| zOLfER391~v;kAfLR%mXo!i2k6LB&hr; zOJX&}MiHtwm$qed_(qZ0;6iGI;YnkN{7ZfW)kK`;DuB9OMaa_#rO=F$W`k|Lh8Ir` zKQF)943eZS5#Gp|7LwfbPZL7w1=s+@1CVT(Iump?qPu=iDoHHw$Oop53xTSek66i) z+BHIvTe>!Ba}?|Dgtr*KmL>)ja%K>kGle?pMF=IEgA?va6PF_C8kLH2P=Ej=4nUIJ zfDO=;=+t2w)A-PZ9Mxbu_-EAJ|F^uT|}~hQI<;r31D*t zs#HRgy)mCoGg>bDIkkzJkff=Ucd(W#OVk2VvXjMu1Uh#^FqNmXB%tG_Tu{+G18`%$}8{NmCG{)8J4lOS;?y<04sHz%(iWcs(X4 zQc3>+m;NtCM>J>R)KtMGf{7|Q(u87c()+|~hmdVCj`p|c9E+aI-`JQ32||tzBr066 zr7QZ%k_|snjz|VM-d-AtuiS1a{o5_KedUM|m?RJGW02&`3TWZS2IM$6n;ZBomdkE5 zkTf1(50Nkb0B`j)MB%Lw2~kp z08px&@JfpwE8~StZpwQqa!aca5BWzjPv8C^@`TBHG$%;YHfGDXR;MLZ^tUYGJ#y#pUXI0l&H%psAeXqcOVvHP(*FRjdsXw z`aI=3V(nJhgT0t#B_yF`IKP4u1kCTrI`&GZsq*7R%^x34`--XLL zK%rYRoMfXBNN({tfl#EfKxGo=OtSn#G`-j9q82Q1RYecB55vcGfpW@_Fp#Avl7y&b zj~~E=?xajf!jpuplA|_XPsQ`w`)2ILo5hTuDCz!39>Tj6vs#j&I#@GOvJ;IHfgd`xnDodx0SW!UKf*!9!9LeTv(xFC;V(jffnKWRKoSm4G3b$E60lchS z*n)FPS{$WC+V9zeC=j3$5(NhENi}s46y?EC4N^c91~XF*N?jJoNl^iT0OX>QN}L>$ z0b@;42o+$-29N#9YuTyxdD&hvCQnIWgrH)?DV{HczcXKi>O^*YPXm|TksQAx%dJ54 z4bgU`00agDKpQ@h>7fM$QAeJf+?}#^M5t~26l2M9;gM7=vD~Q@llD;pL=#uF06!(W~bz* zjB@aoq-c+l-?Fi6?6fE01oUyzpn^SHlqLAIHD|uct+%szrB8&G$t>V$Oq!G?go;#d zls_ImYIRDj&4I9p(N1Os#SCc^1|Bmi>K_5v?SfBR->I&9`x1ot-_(VoOnJsi$W)X>J3)BKI)5H}M!wEx*ffiQQ$dz=7+ZqbX zhahK@^)3sHtKq12MmMoim1t$PHt>)`fIPB{ne5Sp6qM~FYvq@Zvlt^fX2%jb@+g>e zCysUT2g&|$$0Z<5q(NTWFqKiUH^fMAYm*moR%-F)EA-1Ov{9}5JKQTAv<7|T(VLJ< z0-??Has(=I;%Zaosj_a#eg(3I^Q0Ca3?RD(;$Efrc%0aoM!PAyZMHZPql~3#7>G$@ zfl5*j018x+_^3m(kXo^e_F9*-&%0~2YfvnuEm%sxG+ipAEzMS%l}pB_05J<xB15{W9JoRvO|eiKp-YEpyYK@952sZnU7F1Osn8dMZa!mLVJBr;P*4^~$2 zW)ymyL>sI2F|=DQTPT#|fC3YR+@KkFju*nm5la$6hztvWNGp=E>!HPiwE&I$C18AJ zkEI=w=XQ^sI6OjIq*hf%p)AO?O9?m^D5Ov^Y+|ksCQB(PsA63tB${r78rg(~ zA&|?1WzIX8JS=cl*3LF>CQnAMcUZsxM^-52$==EP9g-%ki|b^pt1LMxD%kB7e}4Y) zV57K91z5DA0@j5z1sfa%Ku{Q>Kmkxt0pOq%L_i5J00DpkS~)-m7z+ao1c_0J0VYIL zvSW3~inx-P)@_h*g}BjV=xs9U93rYLn;A~H!O|j&a$_0S0N^6DiR4Gm*`dNK!7}{X z7|hv0pdDMzDDfhGqa!EWcybM;218c&!-QkWi?)pr$?{b#w?QuSPDeo8IPlf;0J??_ zYsF^8kb$>eNlS*vHy{ODt!e-uYQ&``l1REL_UlknFHBS!ngGQq7^_pgh0+39*(EAk z*v82_lWFA*!!QM(%94}}zd~6}sVW5YVGh#vD}M!oP57Klkms6!92|@#M4B;DfNYe( z#B$vtYS(4!W-mmNL$b)H4+@Y+Jz?r!26;zn$~N0xryddxB3Xm80Z$6B^=dAa_BS^;0h4zCNVAFW+S!xY*Mp~z9OOyVVe^Jpdm|O11Mq) z>|<6uPIvOXdm?ke+i0 zD5(5I)av}sr%ZXB(n(@tuAy2v{{U1)kP@IW-VS&G9Ja4Yb0dsZq?P_dRGXE4Wv0_; z-5Le}44evaqG$$n%_n#fhB3KY{{TPi+s|4o`x!lDCkl5`USz^T5DCi~l*vkX0^9&6 z2snI@ndKJ69OsR%^I!I8Q?az$(5SgM zlxkeNWtr59J5W#p%wbL=l!<$opfdBq7-Uj6i7mv{$;zx9%qmf8td%?HuwC}dO2nf~ zz%_Hk`UcFuI9CSyFUpHmn``9D4ptw|zgFQu25kq@Y>%ivncCE(hf=@;n5SHZGI(W5 zHg{NL`a+&Wirw?uq?w<~69Q8E!6Z4!l32bF%+3idlt1`+M`He}lOxf#>6mi?CV&+N z2^9f>4g-R60JUfduhFd*QzeGdOF+a^GGepBiEEfkCkO$BL)23&L?xN4^li~~Iw@DC z*Isso5h}rG5)B)>Q>X%_ClD3hcIKheV(~H8^K4($b|o7Te9TS>b}LRCSct*{oKEG> zfD{3uA%Z9+k$3+9C8px+cWChi%dx6j*dfY-9TdbBf!sK7mTbv*(y*`3`UrVfi509Ylx&lNEv{iiQyrKp2PtMgRjq0f+!N zfD&K;Kmiy4<=ntcgS#*jU=qv$VL%oZ5dk1vuzElTF76|;c5^mpDHDGuDVAuXiEA3o zkqc^x7}^jk6eU?+y#V1zRL3HXv^K+C)6$;b&t5UkpXh9YuVy{97}QIrbXN#=HE<6e zBq@WrE|v7xbiJQ7M0o!q!Q0?TIXeegWUIX6@}&Km@5P(U1})7RwyLiFgqI00uP&P9l`4 zqV!UY6J|SIOOTSML1TfL$2BClp-9~X;db6j-)K`U)}6MKGz?`J#$YHcgUo;cBO^e? zxYaX{wuZu0qPuwQ1f>uXK{_^{Y`kg8?Oo?f*HC~M9xQT)M-`FF zW)rnLN~zMwatcD_Ipc|66ITf{Tq%nRv1rwf3e#m=tfknlYfR*(SxSiv#;4&?BDFa% zqk@`-XO5F_PU4Ji`egmt_*k{tNF?D51ycD?F$X3MM%I<4xcL(FWtKSmE;m2=c+6SE zt3d-c2!EOMX3{kMe63c5@W#Ctd!zk0SigRvNX(`Dbg#avm)4T2MbzNBMWV<4oO1DF zRMpH?J}@wj41{46%L0TUj=O`wLK*KIt2BKWKNtT2H z!SE$7nTkAFA8-AG>&1E`?BD7AECvN$zb9;|mz*v5(tB*T@ktI(-~|LT`Mw{-3B|2`meM3js#m4M11}p<%#9UxMFkpW@LaXa4|Cxh967=IU8~u50p( zFA1+@UzmwIC;EI?>4^y~#1rFFpE$blCGtn{e+ZwkegfZUdlf) zve9*Z4J;_iM6uFZ6aN79hW`KvzD2(+mYWaZs>qzBigNv;>%@=2SLDmtJ!I@o%%A?+ ze4&fRkHDwoN&6-&-I&NuNJ#Jy#fCox9%(n(DJNqkkIFouiy9X4G5oEYK>VZ9FToY& z-j3qwNYF&_MO!)x0Ewg<_H{*&aDtH*0cVK|7MnZ(C`y2Xs76o$(F5Pw0>Qvjv#3M_ zSfH4-5FQ8sRv0V*&}alaR0Ti)0ssx50UZ3K`;f-cp^f>F$AUnAR^9)Oi2R5VC;c-V)Vc=AW;AaIIsbUNq~_G z02IW4p%4Ht03ZO~00@`>NH73CPyh&k0s;UDpaCXS03^wP0$ivGu_9aq*mxi!z!C2# z39&*{0bnvIu#*UJV=Xejcx>W~Eyc0CkppP# z`9>G?>|)4}$z7Hc{vw~lqW=I|j*`0~8pN+0kFH4BdnJ}MhAa!-k^Loj3gfe)?wqyl<^vW9F={X3wz# z_ni-J!1)$D=B!!Zk!FL30k=*Af5&~bX!hd&Y~43!iQlhQYtJp+cIn20uh;#z;)RX> z$IHgVnTh$!a70^sm#kccCDUp*$(3GzN2oV?qIIo6*GP$G}XUMw{$>d@OO$#_k9$l zI1);2m2#VQ^%m0jcBeSI3yPESTWK4-_I7hDI9l0&tLBQ^onx6J(jn=P$M78)7BXpi z(=J+l)|=32!-?xz;4&n2VurF6jf;)YsN++e7;Y}e3jlX!ls#QJ)tc8Ky?k?>^m5*g zTJilyrz?EM)0*t!Y6{0`e8;9chrP0M@6}Dsb7$=P3H-s~wmR(7z0%gKIC){PDbvog zKmTLb)Xx)@Nz~@1lPNlOC)^0>Rc!9{!eF&`Nc{Mh&4JvXrTYDMmMNF^ zWk_>m&&xG~3x}NNjHnvM zVFfxca7k2S^E%l4Uyj82oQ`cWVQ|P`DxtTLHAm{i=1@l*o3!5Q)f9$tUQ%G=r_=ee=CO@EVxsXg@NLY_f}n0=L(7#-tATo ziXVC2aypex{G9S}m0L}5nY#Euew6^?yWO)-rUb}b?T7huQ~^8-F4u3n1*asqrY2}> zd#&4J3wb6|&iYE(-^$b@Jc&!839o?%9zz2YuAVn_K0gaD!fN^`s!^#zzc=)n@aT%6 zTJ|4=?_8=@`{O6r3R!#pe63A6zfg|ef>~K_>o3e?5cRKJ{)N(I{R{tm1 zN`=9Lr^nn9CrixA3kOe3pr?mBN^j)1?$+C1Tcj_NLYWkkkHca5(PxXXA-pjP^P*m$ zw%nm-dak2>2(k@tpjbE>*Wd|htAWqKT4M!%2g)&9eo_&oSsr}8r??SWwrk=NX} zds3En4*zzG9}!iKrKw5`#DWlmdxR+{{Q}%ibWEx=BJ1bYR{p*_IUAm5;&y9Z`)n63 zyOBn3!0Z+9inKHf?!8l0)>La%p6EeA;>3Sxv`|Lu{Nd^$U;lc~;oo(~acLoJfwxQ4zY^AHYoR&NRZj?ChO`7{p&3|DfZE`LrtT4xEFD zRY#5>m)E%^+T8AGT*+k38IqxKc}py~FMG)2*19{|jivVu4^>1S2mMS6C5VkSeV+E( zqI9&>f?ML_9~w%-9Hvr8DekX*@I+4L7ex{>A+z8r%bi{;mw8GkS)uGsuW=#PDeb62 zgKF8A1zUKi1k|pIZnBRT2+>>tK8@MXJW`G%S#UmZEq1_ZaHp#iR4JVPuh5># zQlL5VAk-t1WU9~pAAkW|>JJ+|4eD_Bjoy;ags3i95w|UFhU~p+in3-rZz^`C=ow zOF6rP@u$F+F0^w1kay*EWgX-7+nL*A!tfKl9@t*xu!^17SWij_Db93`;?My8=SaCUnPEH=-V^5=KkX9*g6on{m&sN+qLB2B}GLPOqSSx1>M5ArA* zoDN+YJfNF=6eXT>LW#>&U-#yBk;WS>SI;Pi93Ou!h? zyww*ry4kPR{v~a@{BlNfVQ6YVo^A8Zq=}_UCzK&A1mF+W^JndBw!Er_+6d5*IWFp@ zU!FjgCN^NIVWci|(1L3gNNQodAPf#M7We9ZpIGfLg@%!d&^L%A+{;Dg-7%&!I|(0q zt}%%B@v`=5bE+!a8^_X^hDF@8mPxfJ?62<6(peFh3g~d4@i;6nsqf9wvb$4J- z%>;8dz20}}d}xA|9D2;tf*TQ>vfu=1X-}I{2*Y}q9is5+xrQgo9$r_S$}5A0vIck~ z#@pYXy*+q+t8rig>c(fWQvJHgp-f-*6Ml|K{*C>F#vzeri7NcsqGFb-ZH;*o=!!J{ zLcXZi|BUCuJTLb~eX%Ak#GT7pTfTE?(DCY>I{+Oa_L*9wyHi{ecPO{Z^ZWr>GTKc{ zQCo7tlC2{`^L;|`5qSB1t&SO(}MeLuWdavLmf3L zs6D-qJ|a(HVY-gZ ziRMO-ayV01XQ}_11|?W`UJ^{ zF}e?-PstjigeYDTA~zz@0Fxksd6s#SZEtv>9g@AY;It~hIYAk=!y*QIZ&T8o2d2Yt zbP_itsI>2pt+_H2`mD@onPe|OA9#Ag4;#rkw(*1p<63YMgX}HlDA0OmI>X(YKEy=j z5^C@{<7pb-2P&}z7uxK(JkdlnIkPXDT|C-`osTxr4b-?E=bg`QQaFy-Lz^F+jphe4nmC!^wYEk=ulRw;Cacg8 z7pNVOSJg6aQ$ISh=@>Hs+w)w}HfA*thL|1kSMzPbuBU|tv&>3GcoNHkyZ@)s5NS6# zsGNt{TX3&srlYSdIR16~Bob|mC+M%k`|=t0(rmlM%fKQ&ly}(c)=q=c^>L$2!FYN- z@4~4rr=t``rwCbmu-JlQnVXV-JgxG5mNc2w?@q_5b+z@t~#_RfUGRi<+ZfX?zpSD$Y_=Y7Y&O+m&f!50)I+co~}(yW1@~KeLOORAXp>y z$MH$leA-nh(tcwllif+YNV7G;ouV&3K^eVt0IJZawG-NJyfBq{lW6EJgQEuYhzi!l zFsHDb5AOBqqXCk&62ida62{0LC%gex{(#1Dg-yw=UKZR>0$)=fDZ%%rn~cL9&6q=Y zS}vXgH--klhJ|2TL=rS<(H9L(Zi0+XsDxrRq1#zJ$P^=E-$?6lEmLP-6GxD@&n71O4F{+*sZQzqDA zDCnm0$pF)H)5}iBk2Q1#N+f^Y5K_3Bq}Z-G-wj;-e13KkK|NbrfGwsOGhtS{iQUam z84^Uja{dyXPb<>Xgfauh>ckxP_Tv~>vNZ>Gb#blwgnrth2M-e610;lxF=y6E)}k5( z#1nLij1v2A+J_k6rDR5cH^*KTTbP4L&9!vKNY{Tpo8P4QoOjkf9Fhs`+e3=Yyhbd? zr06_u+CGqAhuov^^t2uPqr<%7%NCpz)B)1R*4IYfQF(0dy_VAB%9fYc;W3)++b!Hs zZULyx*nMN^BTna+ajJQlG7||%i+sP-X1wWv{u;vQhP37=Wg?wygI(ys79RP_(-< zH&@0N`}u&LvIvw*VBV@b*E4U>Jyv8-6W*XRqnkH9EDRE@rN7fXJkl!Pe8Tsz&4uTM zH#z$+77tfmk_&Ja+#h#SZLvnlTEn$MxiJU`YLP6*_N@h{VuSu&L6x)`5uFUYRgCBX zk3(@N9m_H)O_P``xnDzx8%EsKBxu0AC&&}&)eIvXIO@uAuF->SP8d`fCd-xGb`?EY z;Z!GfrLj7}?@TT<;y%i?4>MhzB7KnC^aC3GbscX(DIK*<4i9M@-|hbH6P!|Q{Xrt0 zHAGJ!kwU^Ht%z17kV5VzB*hFLp4fBEQ)P0&qbvK;-jd=q(0&UpE$H2D{B%1aNR5ap zm0dILGP3td{oY+=xjJ`JNCXgGk9EjMDoWtD(T*AhxTBuwSk@xYN%AXv`z<&gbE?Y@ zuIEgCQNxuz7F|rIanuxeen9E*6~58!5gZN+t0Pr+RG7 z8&-TnB7lBF7;t^3bd|7ao)WOgb{5=jqOEy;!oQ3XC)uj88SCPLwcff2y6$n3pKw~n zi;Yd^v9(%vosNzNUG97?egvKbmdAg^`$RWGhvebxOajMM1-W&;REft5jYx9y^9qUu zcj=B+Y}jliR3ELU+HwGGqnH|5H|34`_%rcAzjW4IknGD{_LmGbJ>lgf0U$qdj>*bCsZDE>TTm4~88Qih&;FkBR7_56#YSlHz765BM& zkx2PPUT|MPLw%|T*)>&5(rV8G#Obm6v4_hlt7TjZ@%?5WM6Uv@_{_wH4 z(3M2l1LUWawoaOzN#_1)XekL2uPxcuo9_EC6TaJ8rV#FF)EBkfWOw^K%sC+rIe*c7 z`gA9dY6TDLXu2l`_`adBTZpoh<}p}@{Dru=;=huAarXyZr);c{?WwK5g3)!55m3*c znDwcryPEt}KrCG;o+9L7O!K)vkdnt_>zOLKJ`bh$D~=g|(h_aojmDT7R9CgF-_Cm? zK6eR#|5FPtV*=?kKoQ`5^v+#xa&CnZ`XS9Xw6kZwUm?i(kh-0WWsxa;X%9E$Y{-S^ zVlQLDoWbYnjxpA1_vdhXf*)unkb-DZy7#xS#<-?egyQaSDyujPa+6`l%%BIq#=axG ze>6J6ppZ}NLJLet4nf=0foR8pX2`7BNH08{ynt0~uF6XG!uoYE__d71U+}-a02!l} zys$OT22}j=#)=}8x$lYCTnV^cJoYfJ$9f{%3TuaTgnWY7b<|dUtOq~LPdR3SrfLm2 z1S$Iy_NBLUk2Tco+La6oS0CLcE3EoJIg~2qqPb9IV|BpAD9oA?^Ac@*~5n?%MD7iFz16IviD?ZSz4})_#syC5pU+1 zJK#=2ugpk#V%{t;ywx#7ne1UQSlm8#&M%cjLX$bx=sYn9l@Kc2FH?MGbzg)!sw;)M zQmVu@k88-G{=yHS&N8RHyuA)?los8pgxhML(#<#E!-t=1t25BjLO|Fd z_ogeV@l(2gN7b@)&voIa6M~R2Kx`y1-9v#DbSo#d74y8CM^II&|b=E`Bo3?6az(`(yRkjN`>c2gpOd8J;so?XLn; z!`Vd|I{$6F5s8vFdTM%{F4Y=iQHl)V9r#hcnpbBM2CKQ53 z#MVTPArhkbbS?Q;!LyxvXG;Gn=>8NAM*}*}G~uU8;aWWm9rg&kW<|`xj!{(rx#i5T zKyH>mLU9Iyr;q6Y>8DpvK>lFHe_!+AH~@we&^Z(#wu?hhI#6c}jQ?xmTd+T!%L&M8 zDwW!CXSGbH*Xi3++lJRBnDBaK!I;Bh1_<>htCr^fOzv`=X!Ue$Nm zM;c`u>KnM~R6B(jjt}RJN9G25ltvjDk!eDu9{X?D>^;ACCO)^(+uiJK3}rh4CtnYb zOY!JeK-|C~c_G?Bb~{y~V>>lmfhnj+F~%+G{9XG(6e+rwSa14?XTcr*08`?&RMth1 zMi18p7WC%K+H_#s%*xL<@19AFhdI&Nig19YB20zvw%`IiO0@2+qV<`3@9WsxWDj_} z53Y=<$;=_VUK(%-J_NYV@6xzGKJz#qV#}+UxjcvBEXx}sZVBZo+%&m6hT>;kE3Xz}|29+FTu`N@w_jY~A`6h7pbumXuYhiwAZ6tR!GNGmB1F|UO zrry`)F+?GBzAj5IlHNI?}7tk{>%3gLVyc#0eBUD>n{e*1`gAYrk8g?r}T1&jyVM89z(UK!BE5J z;iE@wf?HLCdybav&Mqh!DX@=XU$x-69xxVIa37Cr575b`H;B}NGX-inlZcM>xVjoq z5e9A{?uNTsP3My{RO_KT`aE4I#0f=#>(IEkTAVkcN4-a7lrF-b$>FTlATIE;`-fAG z9aqHR8}$XfeQ`xrz+NnV#dk2_8^F5fGA5)OAn_{qnqHaBr=buZ2|K4O#E1;Yo}MaE z-{~AJ&y7_l3-RVLRI1cJMad5ZAJo*w7w7 z;ig!OfeDN&F^58QguQ<6cb)`01vrqeD$OBI9X6GoSjg-DL<%TVm>@`-eQs0WKMWfOx zM!C9w-lvlRA@_GGF_&bk8nB+Sk?`W#%Xhw6vrT&=!#^!3?n)3hsG3)lPFY&5hdlfn zxo0dmeA9djF2V6g@=Vr?i1c}?U|usvGrS5eP;Ff|c#eo~(p^3kOuriY&$H~dF(&^C zwkSN|rE*n>s&iX2)@V|4ep5zgp(e=HJ$B&HqQh;PXBXa-+rAQPGzGFSI;GFcK^G=C z6G751-*5)?5b)7NSGx*khyi*Y&n1zd!4;3V&@Y(%joWMKY{mK?m!8^nEQ|uySW;fj ziiy(F<#i#M(o$HKiuc=A`mj-Qp}lv!Kz`@wNImfDU>rZoiXkl(9w9q7=% zZwIGZ*7J&79z^dB_X-mfYZ=dTc?{jc+hszyB&qfrH>9o|YvFp$QiZ$R)G-+oOvbnw z`DZHd?MnO0i>l!j@2;H_7lxD+W#9I9?7cKS9r5v@BA3DNuE+=tV^w7}pIe3wpGg~( ztX`_YcNW~ZAZ8^&>J43cAiG323y#P`hp9&~mAPMF-$GpqL^_=)aX!weZN(zd64n4Q4ofxVxmk+jRR%fDC$L7v6s;vLv5r``bB_x0+RF$Fksmv>z`)_>#r1WBYT>924bCxCTypbybks<(AbId zfa%X4!v=E3VaCypN%r9k-Q#BU#qX|{0@5rSPgT~`9v^S{=LC%rCVgLO@=J^ukqsjq z6<+7S@SsjP!fwLsgV@f7k6I$J71U>rB8i)=_SKy6isOrydo0}doq6jhLu74j!4-xt zaOn?a((=?KEcVMy&31jnA!EYR*@H4xV%+sQAIklZMnD3h!-;*VW5Qo~os#9$h|sU2 z_E5*avCdn`Be{7J@7Il0tu4#n%*~&CN-l}MqZhK5o<7x-NepE#lFNl=!WQ?# z6^LH&%N}?E1zS7n!bj-^Aw#)>GszM36F0mpxNx{-#rFK}k@fpUE4PNIM}18xz@Kd+ zM5CXf!H`0ketA#Ogo!!N`|^vZla%G3X0PB5)9$OJ)x57cStd`Xa7Gjz3vDpncoe_7 z;p|rZej7W^ej%gsdOAwSx2Lv8W*$o|1rQOEr*}HQP<(gR%EA7MtJJ*Q(ASG^57J

4esiCeo(Jgo zQh%Zo@aU1g7-OV;rxB|=^#o`*_O%^A*x7OYhz@|aIKzBBEE+VNihe)z_>k*1i3sz8 z(Yd8AD_v)Wfi9y*-jp3GwO0Fr>TV?DfQCiL7%O#+If{bGkV;-LflQmZBH2it_y(ct zr%ctV#HtxIHZ|3% z*H`TdTpYxF@ZtjhAyu%1aE?q~rZ#n;H}vo-#;^a7zi8S-U7pM?z$Z(xr?C?0G{JWr)9OqrLxDpv=?5=1e}a?i;)D#V zNg^}U1a)FVGQA&cCsfqHv&2Z_V8je9@fBg|4lyv;Y!v*ci2&@rw%O}|oLmyhiNn^MS9Mm2B z+gkhd;E|s1w`YzM?997(V5lK+c1~5`o5nmPg^Hxyk|yq%^V|)qK2t}0oh`V*+uxNz z3qVq%d2(0GMfUv0veQ$KpeV@>``gF)>6(}=+x%KIkM74_dvf51C{M{K{3>GWDEGP< z-wn?gszfWDCiw=a3~Km*mI^+2e(dgeVP|9Ko%8%}x7n{e$ThArHkp92b`Z^>XlS0q0erONM#s=k8*1Jho%7lhh6Vq;E-m==oL{0%^Llj-qcO!aSH|A} zRL4(9Da0&hNnIf{Ip~OdUSAKlTOq47ui*^)C<%V#8W^7|DSb(ye%L8vqN!%_TI&(5 zXw{gVwd60l4#fw6l4>+TT&U4ZCA-!CT!7Xs>9{|o)fwLSf4lZl{w{)|+(8I^U1!xxn1JSl z%aulCydjWe;@=v-J343Kob5{c??0u^(jG|HW&@8v3O$u895ks#ftU?MnT0oyTB+6_ zVqB_LhO+Q_1kEm24&*alWma{rmQ~e06Eu>yKzwvq5NDpvcb}nBP4q@S2Zdkf*B#@J z=S1hn&wsx) zq^WmLC~MnM-GfCSrF0i=3`vvm!@Mos<7Khj3K1111>42+D`-3OFv2;7$*Q5 z>3Pm=s$Qa%{lR%>K5XES$^CLU*l!JV4BV`;1i7)^f}3K#;oz^4AU+3pOeA3Q0_s6= zb3}2y*jMnv$RKT>w6tFlO^0fMw=RLj6M8#mZKE!~dX&#@2lG$vP`3F__@BTWFcUUS zsBR7)`g{tgldgO46GEm-InrG~I1NvrpkWL&Pt+WbbCeJ=R$ZEyeQ@uQL{XUs>*mXthvbp3F}Pp7vw=S*Qs3DCHCX zdOC&+ih;!UltiqacIJV>9Eef%U|i44iqgellIh8wt#;Nc9UYo%!HyBJZ+4Y0@qZ$5 z_BE#gygEkgA5mh-)CdDJu4g_Ing~ypli$&V%LpCv2u}%CjaLwD zn@}s`6zNZc++=c5nXCdh=zzG7xJ+};R^qFe`b=vLBv=ov2$xOynxE1?59;kFde;OM z|LwW(%kHA|GWd1*W~yXqL|hIk+#_3Xy;gVOz1#Jj!n4q~NE9?*)oJ^rd7?J>@j4@f z`VT9Rc=a<6^WW~@CMvGlFB=Y^>+rb*{ z>dZMwYuZL-<|PgtQ#;M;QA1^GoS$zy~tA|4JvL>D{zBa>Myu z@eXp|GwII&Q!s_!{4quhhBoaw}H76WU@l7n|$`8(QiK=x%&9# zkFN-B_K}_kZ(PX!?my6nxUbBvqsR+nej}Q5dMSw>+KB~MGwU8}oPcqCXnmZ2*xm79 z%mvs0v8;=W`rsDG5t?K$6e>mm>6SUn@ZkQJ-DW5nRrCM!=gUlgm#G-K^MBr*wCSVn zkX9jvpC0l!ye!o5rLuuOGOVcWH;=XtCr=84P=r8UkfOU_*kG%J^2l+{O!1cL%F3!Q z5AGG14~ZgDNCVO=ZCsCg{XQ}~HN35p(Ux_l(Jyr0Q$!(A=fTOrmuEt%9s!6vGFT5E*}iJAZ*^;`(S6+ zWOnqp@$r9awzzLBg`LY@QExcKjJwe$YmZVt6Wqg+t)CJi_nt5mJU@??4Q)QrJW0%g z+%<7+!?o$jzx>Ba@J>e7?g`;E1Fa5x^E{NKCtZj|w=)iFFY`^|GF`E`R_J1Q2r_;q zB+O06uP$+$HAq)T|(4;gh>{ZtEF8{J!7+;OpxUfK!LdTvA&EAA`m{_zH zg2wL1E_wvY=J?D3L+PT;C%|~jT~q&t6lYRi*f}M`DOc9{B~b9~ltXaJvbS(n-${k2 z=aBeEt$AN-u)I;0#u0=IRldfcxf-RD!i$72gP z?S0fiY68@~yuNjH;-?FD@7{z8gWTMB;g5oVI;+ievYlN8Eyw$WDa3zF0^{iu>u8 znDxI*tYfbX64(G|t58UxP5KdaB_OOs#w+Z9I@fA$>#x{ha1AFlfbU(p_qFb@pFemWrK6{A zVWb>AB60V<&|+WwlYu~^Tss<2WpcTVl{df>GU|WhBC&@# z)L~dmHXSxV2Mkbm+7<;FgcRx{0x zu9HLqD@cjjQwLtInfLFk=*zSEqbAvM-*&6+Cid^xVj-D=RjMe`QoPA`oL6kN#wE|5 zBJJyqGk8rBkHO6VivW6nvJ=&G(0E@D^m~kl?fRICj|P!-^3uvQV-&qFz|;ZH5Zn8i z*UKDw_FS`4LthX+s8@JiY+PQ#sxtlRHvo98dIN7GAT#D<)>9B6QX;&B`zGz((vdzg{=``{tuHP z$kc9IvYT#A%;z<2JB^3lCq#xQU)x7<=mFv!9osOOHS%=C#NYnmLdzeobxlDP*+WA^ zcuu&`#71Wsp2~h({dU&|zE=`gB$l82UxsR*!McGb1$I^*nA#Es3rZlWE!#huK1?%t zs+3DTlE{TwrEwbk2v?~f*4#8C99lQqT${FYoo;`()vIex-J8i?#95P|(M;si1- z>nneCNE<{i3qi|e0hz$b(3J6YDnj%jSEmqIP6OjWzp#i)*eycJ>XV&N+AWxIk4%@~ zYedElkYj%#bH$Hi;4K;8YzC}REdHNNbXwK}#vTYjZXg>y5X5YpIyHD~ESdyRUDXG2 zY}jjBi-S$dD_)@tJ&##CY4Bf;D4$P_I#Kuhz8gsUzNTP+wMU;-07#{Wlu}MnRL90o zQ!-eI4u>VqaIXxL3&(hH)t=932jJaZsi#KiK&Sjul#W$8zr~LTred&Q^$l~`^^9&b zWti)Y-FXCR0&;I0YG0VRH-Q>D8k$!gi}XE)hLA2u{J_fowUcA-D_u1g0;x0*2zuH; zH$dmv5sE1b?j|v4lwlU`s11I2(}LSC2aYkjPo(SV^q#C=pDh4vE)@imW`dI(9v_w=xs3MqU17>u z%28SO-!izloOKdd^OK0X0^NN!<BP)UJpkmPbM_NFzf5>nr-G*s?n&(>PiYYf5l;rM-$4VhvrxeaFJLkt=$;@DV$R`hfG=IlgW5>#8rn2HQ#5byA5pzLg zXDil`5B!MiYg+Qlsj%}6mEqyR(5G4eD3zL~S|g)q&*gg|F)#mT71zPxtBEz}${x*pL&WrM9sTQc^5Y6`+bN}E zV5f;1Q}`r#a_?5NnHyrTWk)l{=4(HLTxLI}LK&r>ynm?ie8f7pTmKTuTqYpN%UMyh zeiKQPD(WyhaXiOYU7R&5A!3%jyB4HT5q z;vE$rj{wpJ1E{oLS!>`2+lhc16B+_vVO^ksSRRNLA-kdHNoLDPaqR)Hwc6Q%S;{0jt7ydwv-?aC;|iotb2P$!1x3)6@1xR zQz2VJd8?&?sPf$pRNKMx2KN`d2@JN-OKQ$};FZL#bZ5N*;x&sOc zyqEorf@7(Q_nO+;h1-?s>O1Ip@Sem|#b97QS{Jcxn${eopUUYMjyss3;hRbWZS~gl z$;V{cONQ6dZMq%mfe!&YWD&jEqLx4NkwS3R=y>V@Z9jc<;N_U1AcB({+|zc$%T^g5 z$3sF!HGeZW-dB+)Q6?9Qo+MvClLa(q%EOfGzij=6_5xW(UGI_^i&u8!a2yzFUq@dfc_}hbcro5{3x_uj zH`gi0B8SXRhi7($C zHk$7OI`-kQI_4uVIe=htYP+ap&4j68@9Ze$X&pU8zpm-}rfH8glI&O=+Oo^DW8YLf zV>Z<~oGkMT2{%@XmWho~Ty&&nm1$LK-7f>4O)PKVa}A!C55@}uZg_!Q-N2~br*rCv zCB5S1r;{x>c?G5a?f7~>FBgpt?h2de9q$qN8?Ko=3=cKnOp4)}_5ddB#4xCGFuua2 z&AI({_O7t8=L>f_UHT+@r(A?>NF3K)(0Z|0(sUD|J}3ZT5LUD{4x-<@h5x6Pw&(8o zWXAbX_7&twEXT)1s%oOC)mUR}cv+jys!votBD#Amw8xdjQnpiN)1#FzK@ToUJt;rc`>q1PAZ3K+*s|unM ze(Xg=qDgUK*g8M4x0n7qCc{s4nEvKsL!0u>$He9Jz0RZgj?n(Sn!w(rO;9JHbP($x z_gpzcNN*aCJb8&5BiF9GcmJ&0$N?k0Z`YeK+PBmeXk}eg8T`xXylXIylK&_<;!8G|!O?~3f@LXeM0j}jSw%X80`c6gR8?KCD$nOKc zAK4zx_^V%5^~is{l=1M`o9yrq!07%*)<%#N`?nL%%?G}R z507iE+gF_%wMz#_aM(nfKIs>awrVy=!D_T>2kz0>}liWe`RiZmHY^a=x!lwWeW`qm zk@OavqY>$m(EyXhvM5-!qY)cdhv=uAKLX-_0O9~!kKF05JC&r}|3VN`UJ}J?0fC^L zR>%!d@|d)}!dUYLZB!}86bM=`INmWU+^wRQ32`Ec0Br`?Ie@lJmW!SX2sNDkFR;np zsf#oy4~hT)|$LOp_)bn`zi&?X)zoS0kuN~ryHj6?kA)M(?sm<9b<=#QcEd1 zXd)DGBTSLOGY)Cb3n2_l1qHD-$joVA<(WX70?S^l`YGczqn%1JbFKq?#FEU23%TIq zBtcCK22)CaZlEg};e^47Nmim}T9yKK7JDnq9USs1HVz>#qd`u5d=fA~PEa<8)HfDS ziA{aL49+ZT_DRp9o77Hx5oj1(ZcxsK-c;NdPur@!HMf%G(o_E=<)0sK5cnYB7@ zBA-#XvCw@A1aK@kGsz&cVMnCVQ76pG#MZcITFmH9#$xh+hg2p!7O^2GwG{4x+>H}c zCmF?U^1R2O9gfE*K^n7S`|!RAE zC&{0P=n4_T=LQYa(Ry>sC5`Xx8ss4iW)H0-&$1=f55vJ~q1B)sz8T73P7_i0-r(gET}Wd(y% z6p*`N1GZ6tcAoU-zv*3nYS#LblbTCrJO%WoplXB%w4AO4NghG)Z%V#(S+eKwKZZA* zbp!usz5h&Id0(ywhhMJj?MNreE#Ncmz&4S;O<04#7ga~{hZd3k*Zpj zfAAw7Dv}oh)wZ}VFe+HBZn^OE!lmb{4`2@}cMvjk0^4d6K0W666K)n7z$FoNSw%5O zUCtkiGj}<~*h#GZ!&L{_xW*eEfFazaLPc6x&(PpvyyM23{qlE1p8BSDsXN1B32wwq zA3jppb|5Hk+K7}9m)?jQ6ErSU|Bbx7#@P)o3cK`SqA5WRI6qX`i~j#SK+v_524?fDJL`~j@zl|puo zgL#aXMFxb~!3@ts4+56;iy|>Z6@az!Q1yVh5|w=d2YV8u%s;%3zn(J{^#ElKS6Gju z-YU{>53{{Qo;mh8!>8ASGZYu=!_J;|q+Q`O;fc+CJRUD|lex?=zlVO+Ifepo*KnAS zvWn|(^atrnclU^)bD3jo+r$nKU7)@#{2vySGxbh+EwR)_A&3gs)hdA@tpr+dN!>p1 z_b8O9_Dg~XCfNCtV%z9^Tge9AC|@RL^(nEeamNC7r%2ZP*x=sdD&@1&@&amdrJxs# z$6+qEe{~=xRS$wwc;%!R;;93NPMSScv9Q8ySG;^?{^S zF$h7!TZY|AfuoUg)H^L56g@QAaBqVMHJ7=EaS%X!;|Bm~1Ln=( zd}X$m*puViWT(96*IEF$;sp%Oxv=699L392VaJTM1g(;H4h$R?ze>si$)@Sl;(yEc z3x5V@l7f&Sa6pG0a|H2RsYipJ#(?JCDHLMraO7f7xmKkwe@SC)u)= zV~kxw-ZpY=QGNTqoR3$2UVdBhPmxz5S40d2nLH4SXonLp!4RH);+r8PO?mmv+fTk> zy$-*59xXNpaKQQa1EqyVh@YAGa4fIso(1=pPsTSlXx&z`)0a7JD?=iwP01Wtre|XU zoBT02R9InVZw@sC<{=GRKj0RUW?7A(Y2C^{2@6w?7NTX+A!v$B*-}dy#YR4C|EpnQ zmuTIQxZkUJ)>9E6mURd(Hiru0Y8`gUcSr6LA4x1rw%}eVmr`{qwVziXEN%^j-6ob> z&1>-dwI%*QEPeE(3TO*286~*<5xr@wa6H65hr8}qgPZDY_{Xw}T{G%09D|n;PWe_2 zp8w)$%gcP(GqjP&e)8Xn#;4Kj2lB%0d*u$l}atX_J)P^55&Cx{gu-07s1YjOPB)M|C_c~8m;IF^G zN%Z*R$o?R#usobB2XpW4l;A>I!?gZ+#Qf&#Ec4dai;v3kk4Nl2e0LyMwqvgq5qMnS zv_8-Dc1tzS{bn3NFF61Elhqwttef7x=(v4<2pmayM1ndxV~`wEX)G%hF<38|ZI1su zGWmWFYgxT4U>tCbEtN?SkKXX<3fFMp*PH0`I2yx>8QrqI9N@YqI*M)-!s1Vt?Ds_Q^qYwopE=0f(86g-T*8xO` z2+D|z5QfYULK4Uz$@M+?z5S=pqdt(_alYq#*15#VQf4Wi)1VQp2KkMTyA%ySJK?qb zWro9-5%Ac?(fEmYz!M>~tz$vm)FqL160ZUn(7glU*yRZ7N<*PH5u`_v@f^Y?>Mo?q!Loe&(0osv*w+<5kltj%IbA4Zc)+go!G-Gs$H9~XTMXXz# zlEl)3bQWVp1!iohnl9Jdj z%OM-LpklgaB4Jr$9%G+R110BIsKwE$ z!aO_MJjQbPR~d%!A=xJUrAzJ6VQhetnWeSr2<;L!+z8_l-^Jyw2tKNH|88^&l@XJR z(f9VgGP7bD6u<`8bQL#5RIaf1o5}rdlyWH2nzz72kYEc2$>0zNkua)2*QS4Umgg{8 zd}}>|qe^BoMQVA{oa1+=BEzGY{deANVhyAE(@f2l z8d>Q@PML2vNF}DW#x8#aPs+bfhE_^@$+`Y+LxiFwHcvO4zR+_nV~^jBE6Xw-MS|CO znwYy@ornU$I5^?ol*7GA7-_anoswZ)6I=75ddjXyKPPE9{XfMv!f;j&8F#>Bzt;zX zrs)Olf!K|=GUSKfXyfNQm?HNMi{@e{B{7{z@hFAT72`W?Ny?#DTF;5bx*{E~nA=x@KjlP|a`*+kAE@s+vE z&0DE4*+)`*5OPRc6~1-#`TpNt?b;f!w0l=Y@+8AJ`ej@y^yTBlHa^t4erBz_y6*ms zW25tK^OhZp?U()OS|7gf1=i)J4tPgk3+-E(psT>Yf?*&+SaQ?d>80lFk#l?R`V%k4 zdXgz*MRvW6mO9QOJP+4)Cg z4;}@hL%724`D-hsNh64Sq48?)%??aPRp%v>k7!5Zwt4^AWi{KMqyaUu$xB4RuVEqf zCsf6cx9{JgeQKF9uz)P|CxNfES&ZX((#mURM9HqF7maqoUF4#(!6ItR{T?&)*zGnm)VQ>K?Evfe+8 zK9GqkMYZ}y&#ncj*a=hwVv8g~iq#?GL3w6FEG)bNfDN-YJ(0L0^*L4K)-MCip$WNX6`oqP9wd zcD>iZKgbSBybKxLH1NLuq96C}^PyBW+`=c7y&6o;?{lr(j|QL*Dc~s;`W|m?K4`$q z^5Z1*4?Q~VgPTqZ8Fpfb6F_tYbt*mf-U{jBg+3Sg`jh(7&vanWS!l+d)hdyReu8dX zTeR^!+uATy_xpI;P%_6@P)b3!<@5l5o@zClBs5jvq}=uZlrB>NqS<_&kwfZFR>-x< ze`}+gH#O(yz(0sf?)B66Ct%&+Q^Wq(W(EpNLtpTEU9tm6LzlqpFis5}DPxv>HX5Im`&@BgVx5_K@Ug0^Q86$OPhFa^dBn*r~b0*R4nO9j~W! zb|$UsxIcQL%A6grK0z#TtF%f7xh58P5bboXx04=t9c5b;qmOtx1SPrbt>lO`3F%nE{g#)9R#?#g}2HMf%j0KzzRsdo2l-i2-6`SX+Aop}w39me$_c)GeR036A7tB&a#Rc?F>nzjYUNe&^X^1~6I z2N^EGq3?tMkU!660#oRRwM=Y82Hk|n$aQ6&r0!q;_Cx=Enta}p)FW3YY5HuIsMWiN zq8D7*$e8;6ee{gdHOuk0S7cd%LY33kh|d(p{-b_s z#;9zr9t$Q^L_hU+{lQ66U}opcJ$vivk2@!MY%%N2uq{=yw3{MFz8n1x%=IUl&2l{c zCExlsWp;7%9iiyn1tO)s^Taay&_;rKg%vMU?Uyl`wu95OoaUPFmJm*6xc89{7IVr7 z?-%iwh1|UGRXAogW?WTwX#%MTHkNo57n^e`4xCQ(NVWa)%$FTJ<6ir;&I={V5A)P9 z$~)=}J;dol1rH}{-4J1D!oeqj9rTEgyKdZOH$Dif+U9**jaDJE067LNIw1uwK3ImS z&Cjgwf6ccX&+6uHXU>JrqGF;CViy>6v1b}_q~GpoyU`@w&Ys?h0^!MRh|WzFokas; zlE-ZEWJ^L*%Dzq#O3OKcNR{yO_=IvmYUS#iC7D?E2)4ui~vFgQZGMUI$*Mjqskb&LymT4A%4$=_9KQ!_^8S#+NO;FpkvGhgWQv=@435?dX zbsN8lqY7&4gF!o4gf^^9j~EOn-=<`1%Im-q0srReQE=FGVyHilZ?6 zR_c(Ot{bnyyZwME_SiJtl@$)%uOZUu=}hmjobw$2yey&=6SqC3wQZ>c;D;ExZ|#Lp9Ks9 zqXD#vuKU%vZP+jfgK)t9uIbRpKD4x}t!1Jy$Rrsg;FtBpx+R4YN+PqP)gJ{!Fcr}f z)Dx&5VS!Iu!(jhi!0RYa6XM6lxbkea88?uh9Qgv>)3WVvD{)Y>SWg2z9cP z{wGbGd~5FX9zBhc1Fp5X8RV>#xxOh%HeX>Bab~8egW^UobD3I@LsCC=Nfmg$PF)4; zNNVxyJW$|SVSaNOA{fM$0LAeCKeMTbO~2N|+c#5pWZGp1UF9A`S#50<5W7hR8j8MF zpabqlm3p7h=wiaYaP}SfXt4M_5jxLBxamj z5sWAcOPU+<{`CCC)3&}r2OJ{+MxmFLmS7#TN%m?$Bg z9XoU__-T6yx-b&0G^giVC59APv0Pq|*SAxZt9)za8D1jE!~4QF z#8r#;#%s=wdjAQeFUBIli{JaQ02{&jb2m_UpC>X>`sj0Y)>@y~<@IIDwL2n5hVb`$ zW##w_d^V@Jnd-|NZiD%#;y_8bbGj-j&|iD-M$e{3_2h2jO6HBUXC!*1)jV>?(jwNE zj-N(Ydm`2>Oh5PXujB_-=@HhA$w)n_USc~gP-g10bkV#=!hjvhgPEbL(M?Ale!Goc zGtur@bW^|e2>Mr9Fe50HGlT8L^=vBh?Pr08aHnT*!HfRMw|04G%Z$RlvFR3Nwm?8? z2~`NV$6Mhrvn;6WvlB^ocHZF`CrwV8IUE*nN*4c~W5U$!+i0~JVTEcT0j11Uz2Nrh zVC$dl*M`!zI`mid1{()J?hR=M2T9jr?7;~9m}_bCw1eh#+8OclG`=VE+|9T zxJz~jx5-=!s(FrC5)7DLl5z!P3nw75Lx<+#v4Y`SE1P-5oIRx{&%Av zl253pZrd!U8Xe6r4w8etlIBhFDCW$B@kFE%FAar`Z)%t{Wog!<`QMEuhuRfcZH#;h zRG~dOVSN9!6Vmfv-_D~C#(Q2Iubw<0x(JB#CNaa^f|S*Ax*Mu#06F+C+Z!Y9%q<5#TKQL+qjs%M=F1yCnE5PoeAC=e2xI=={3< zE#h8M=yhV(O08!zyk(2nyKg(=%(~V(o5Rs7SFC(fr;T(xJA#^w35|IfCYU-5u2|-- zx!6z{vs)3dVdf6r{kquD`Ag?BKVgYh1^dCSsf%n5U+gusvdHm-$EmT>Y-+dPUTtr3 z#N*{c)5#&;^S$bpcS~E&3j#poF%F6~f1lPG(V+xiW;;p4EbqDQbnIH;ja!hQ5i!S; zCB3}O^xb!(slv%*CGv`dt5mu9PF}%0VkCvf=dZPV!Yu+q!CPl6C^PPKBvg?yU*-n{ z-FD51a+3(T6m8zq2T_EoXfxxab>q^+O}YMrcX9|-a8aUZuEM?iH9yxXD8KUOSzlP| zL7Bfyz@sm;9b>rj?g&Y(m9}LVF`fL{Tif<<(!%7YGp6>;Tb5uu4u+2(2rTkv)T#x{ zi^TT!b*X-5tG#;9n9lINAhW){F+T_8eGEW!KYEYo1wC2Z4efVTx%ThF;8*}X!#jd9 z_aI~3b^JhF>ZXM0&oxKV&$PV>yRzMNL|F3a%chNsA%Pa=aFYK^ETUF{CXZPY{|WnG zhEXizyW;G8a~TA%nNGC*bpJd`IdQB?x>OO9Z{?>S|e~d@sFvpJ_;E~-|N$#_Gf|pT^2azXq6KHd<`yoMK zSq7VT{(23n5)td1nwmqMF5T>}catI{5@Pe!-IMKJCOW$xQgOFJ_T$`22y&D`n%>oz zoIqgqoGT+$Rf*fl0Isq&&dUUnmfTAi%m_m^s0-givn;@y9%+i%R`wH2{MT>Pdkhy7 zcX)sjCDF*lUqo@BJdLP}b_gbBvBIMXNm#Ey??E3Es9?ezUFUDCY+|->zn~QOsBiAP z)`wLX$Xo)xT_2_YF-#S(*^rk-L+o|9XPArnVdnq(_Q3B(#lG0steWIEUVRstC$}*K zdMJb!pxh7HJd(S%5I?)7`+Py-QpLk;1gcmuvK0C}F=_L;xEz7K7p4;TWv>VjP1r*N zN$=f1AvU}IB1qs>2E92k?ZsyHSAVL)@K>Mm#|d-b$p#gT7(jxTdUAz%JebZ<9gg;= z_4`(sV?kOwGy*iwnATdGRZO<7SP;i(VzxpZ@dTv{*B>xAMcl5VVs(FAS)gpGc8Wen z?4tE7!^DA!Qfq#xVi9zedGv0v>+tK~a3Dep`8Ey*T4}&U|DQ#SOWt%xI$0f%f4^fpi&u?Na8Yj0$GcK}!ji#8@?pfZ%5VQc1xRvVKp*a^P zEi2tA+W&1%X;yRw&)IO$`{w#0>ye2-R*z6c^?S&zxD{=BR-{gM=CA}T7hiw7_rvm` zJNS@)ZTbDSkORC_k<@&i!^N~}h)`rfjw0mQl6!$U`yx5w<$seETlmTE%(k2xC+15{ z`iDb1MRSGDIgyA5=QC?Ma~xp38}z93k=fg1s^9CD{<^q{#-!F^=G@uP*~Gk4d3#a4 zN0^fH8eh){d#UWKd?LpNb8V>it*+RdtK+4z zA#@>n%njPVeZgqO;-)4aar|)pLc|8%iM-rG4W4p4H;4#FQU;>r2ZwRKMIdr9M4gS- zAaotR>C&qqY^)iX5L0tCb|eluh25HmeUAMK4e@ht7oAh?EuyR;;OXP)9K&;$83i38 z{Za0y(EliL!k&y?Bo#atzz8&$vUzB4RU;FtgWgOmATWZOi$Uyp4q7HBXl`;|2me`t z4jw?Ypxa648+V;61jJ5^NA1lf=%)EjJU7<9D?AP6`J5e&WnZD~>wr(UKYkpyZ2BfLw~!b>YQ7D+ z3hRGfC$*VbHa@VcLs#bdo@)R@|9#?5aFIVLOpIs=OkI&49Vfj#)ulh__ur!WF9w|I zkuHkN!Yyp;M=@Xl?lcoRzLGQvbo&}CY$0t+&Ba^xXFE9bE8B<{#wBhLkz|R8#8&d= zRUyxkezHfW?Xj?1KW$2imhpDV28xxr(+ZN>bjLN)sdFN@MQh;ZQ7|npIep%zb#M9; z6e2ij%!?qmSdK{cMcXj`@PVAv@P;*~MduxPr$XLLK?w8PGJaBq&PGV;0I}9RfRixP zgU31r?gP2r=-Z2LP9I-)#bIP!&ndCfJlz=p%8^z`(`=7nc1h%fdDDj$)r)ROSx`Eu5Jv1{8M(r0^uO4DwAL+w za>)_b%|6uN{}~ly?q2TKcHUVny7@9hM<7EXFQSFwS6+VQ}b4-{T65fN_rZRCSQ=g<3F&8}eV z6$H!{`cn;cY;&`hXA8Uc-IYaQ;tc&6@+TIV$U;AY+>rsf5^S5S(Q)EhWq)$!zR*rg z6OHpz59&l6`rfZMb-Sb^Q+mX8e}^^2I*xMV_2p*G<&X5KE{I4|P{FtOpuJrlwPlJ@ zR6phzEWI%9asbLNNmGC_S)4B;>2Zy|^HYjxKi<2MLM-#zI*fT?u%HPTytVs&%&#YXD`^GU0B6*0Oln~TPxCPSG>%t*nC!Doeg2)-G%I< zf@!aJJKXAu@U9olaEO86EIq9XF8m2}4pr>YcBTOalh_i^f3|4m#RPJSuU3S%Bt!Q- zD+FVg8B=e*VNNpzfW2h@7+nfH2&Qv9-TG3u{>H(qaS+a zro)J;d#h)aW^5?=A*_x$WbsB@$2z$VvaVbR>9cXAZ#FhI+mFx?HZx_=D;JLiw#<9b z%kymYV~F||Xsdv>dH5@Jx}JZkb4=Cn#Zu9Jfz_wn>MqfAh$nWPn7E0` zzRZ*cu<9eN-a}+j-Gr@R1a}2=671mtSII{+=@Wy7sWn-HUiQiF#~nkkIlI z{|vb3YYzTo*u~(>)WT=p=S1(ApqNzzbvS^!2F_@#mpPJ&_YgYV%Fc?D1(}D3NX^-e zzAk;SpCMp{rR4{5yAw$z5mo(BAa?SH1&e1Cm$xO3c}vtqt-Xl zzW7JOfJOtciUJL15>#BYYl}3Qe|Bs|N$54pZD7Z<_k@E;%N*y1G(lm%y#L(7dHqhJ zYDCXBMErr^wIrw&m}@}IZ$yKbtz-&w9?$(-}b$rRg0n)VwjwR!m+ z@Ag`4(btg^whxRRJt$tk-e#$v+ErE>AYkH6ox>xd&|%{6mpcg{#P zO8H^M77ZzDbdlvs^-^dTgM!lhUwYFZ>_{|n$0IS6Z@v7p>xcc48|r-(hjU{pT*$H@ z!7%4hbD-9{&bFy3M@rFR+lGG(yV(|Pxy8YIQ)kNvceYs65*<)(4pu*#xE$Q(r=D_p zgsw3D(86sa1jW`xg@^yOZHkPquxe|@TGRZQDWMj1bKUg)6UFEp>A?xFOb8c``_DfL z+GAHC`h$H3@AabSqe)HU*jxQEsM3UqKyyB#!~>5XmVD?RY*^-j&e_CwC+lh9#HcmC zy&yXe8bM(B3iE+(AEXGYpFol^_e2|*efv7(lDLGJSJgLqmX6Gx=~Zfa{TKMwk(=KQ zeHmI6%WS8LN2IQ|xgig)cKM>Fx;^^rN0o+fk?28-bQQiY2OD`X`}eV)t-0n-t2zIl zNq^S45Jtem54j;*Mo=j|I}`tRGvO5e<(`6akuM)JEuAQ z)`?#CCaJ2xjsP}Idh2c5Be=09#c6Q-r#*{H7I}XGW07_ZAhvsshqy4o7%D3r2S@j9$7fFa0;+;^M|EL?D~?U4jmY_9hKn-$~uuk9*_ z2e(;Q7v~zg82nMme`<5D<=uB+(x}4H;s>$@BBIP!gj44y<123aAFa_#rAPFA9rVqW z=%dbKhf6*zU3|I!(zpwvP9qIMD`%mLb0HIIG4-<6scSbBb0|X+gRULibf;0#I7qC)SfVdGfHLGtjsyN?U}bB zJGhR3vk>2xZVW+?*PVec9`($i)ZK$QUppI8&Nmj7wl!Z#H(R(lYGcZpV0zUfm}iS2 zWQGEnYWu7W%R<0kkpQKXfY*=}(w<(X()30igIaASNVkD}Z6(#{yU_+eue*^%))HcQ zB7WbcbnFAK*PpGd%7$`+mFIH)UMhhnh%iB3lY^-TW}vGJvx72BSnv!m+~I-EL+lk> zbwM3>b3jIlJBGhT#V<=SCXl+NsA*g4Z8JejEOUaa^z_irB)Y!i#Ka()u_mIlF;~A% zC;sD;qdRnlBJCJ9@yoKK9Z$KiEutlmrG>P4nBVo$U6fwf_0kJ}FyR2S`sdgWbYRlP zanG0?qtLM2dQ_B%b2ONCm9;?I9vxQIocU~k7zp25FVGiga6};)%Og}~*o|5mK*c#O zFUl~D7#~s%g^e$EdgOgSGM6-J1v;RRC#hRQG6e92GG^4ur99z~@jvHkHYmbs)R*>c z7zQb1JsQyqUSu@K)jp_j>fhsAd(UkUJj%1~o_f89Fm=d~Tn;%hSwd2~KULhPP>viA ze!u-*_xhZJv3YCv?_Ul|L&%4!q=3e4`E^Ueh;HEH%Wbi5EFbbMv{{54NcJxaZj3ZO=s)>N?UjN9assozc^a|1Xnoq-92efmN z|C;=od92C7R2e+=y*?N+HU#vih{iXsXwW;PWl0$A!_vGxhf{*z|4`PZe5Ox~B|trg z{`%?OrX{b+X#E_Pb~WDB;!fp#@|ZJkiU}4zZpxwtsxv#&>6ewCBaZ-n@niWF(JPWB zCz~}3i4jz1|uuoDn*;?doaDfO%d#jC8iib)w-Lv(Ytzy6;BwxWAEAB-0kBCIZ+@3AgVC(MQH_nX0z%f~18QZ8J)BPAAKuHy}P>v z93RY9$q~JvTFdyD%$DhPy7~)u=cU!{SdiJs_5utsBUBn3s6o`eAtN`+tIq6ly1)3p zjJ2H6&=e?y8m@OWY!}%KS&hZbKFh9Ii3%>(#NSE3bZbWGIWB>4xrzp1&$RdH2eG2% ziE#`(J{%kL^$xuw+w!sEWpjA%>IcQ5Bv$hLfo&P(C=-2`UVnNShl8-ln`i9` zq2*z#U*7VYHhWe4x8nMekn_1Ab;Ys2sk4D(A`M0$=4Hz>+g)Zq^I}78H{&MzS^kI8 zEpO((vA(rI&VGGzGe?=-6~h3Xx1b_A7Usz;{!{1B{+8`aVb4wR8M}*bs>V%jJY6Te zTh~e^0Zrywgca~ltYK<}3wy(lhjt#B8&ci3+31SbH%V=@@zrsOu}2X{YnPqla#l`E zL3??G8j+m?le4vMX{h_iwucrKhGqlg+&;%rA+Tlc%RDXG_I68MY`kL8#Rq~y{1cG{ zyP!;j&X5b~lB=I-aHc09>kksI-=9aPPkvyUa%KOHrB-2)#HtJnFe#dscg4Bc^m;8l9-R<>vufSm^@)}vbb*u9pelap>pt=GC{w))6b6=@X9YaKULhkw zdC0Zb7u4lJAa5k60s#)2-vBj~;s3Q|+fw=I%yBfVmZNV+KAKO2c@T7-oo$FFT5$}n zu}>^O{k%7A6OCzB*YmI#V64}8eK%4x`+DAm-_agnb+}q@Q#u-&h<(Anodk?$9~)PT z=`Wc7G*E)0hXfKY5p3_6=v|8TR2QKcm-3?Qy%B{O=z?%11eSg~hDYjyQZt{m4x)0a zrX{Janm}zPQXQ$Qw*J~Iy{y{~uLMt^MBtjm=3)XXeb`uE(ReN&AOo7-0~zD@8uCTW z@HmrLwstqQUUUkO@tHJb?(}QytdZhs4b$54khG!FGTv`b3QPOc-^5`PFU zcN1t`hi^MW;e1iXRGw#dq7y)JUV7ifr6~_4LGx=?dAXUdIg;3)u|kvjr(Wr|ZKB6A zrLfy}W&_AU%*$G}Re}Hu&VPPTA2hKdtvJ*w)v@l&YlBP*fHNcrmcICrp1>VPGAJp^ z)sl%@VGTb=F&*>;u}d?9(oA*BEp`%$NUfMOmbvaxM``2^{%+Sv-hGZ!&%Nc&lzH9F z1V?rY`U;Na*nQgE?4v%h%yNbN$+BN9P8TRHxXh9>-;2y!2cuO=e}fshHlU1!LB=V| z@$$yO_uK2)brGWEH7b8)Q!tx!dvWw2+5IBFG!5OjZ#{oNrElf%(} zE08AQlS{QgE@V7qj@n|nCHUk7fUg4b%93rM74oP_DBu3$jpFU2Q6D}nj8td|-5WWX z%!xW1xKx_rG3gy+s}4t5Kz_HWgp%@Tnl^! zjpXw`0m_;(QE4GLZ-iPFldW7Xns%sZX1UVWb^Kh#AWUkZ_h~~v?VNZqzDdb@+m@b- zOszbX1LhAo8&t>&s>3;8FwDZL0@kuv^?@vXV?kWqY9w7ssL6Uhyl8R*wiHXQ&GHB9 z2Kx`6qZ;>G?MHgWjcCjDyOup5P*;bVu$nJ|=dPe~&21MvXJxT2v9xdgyt55krs&?> z7-RZF=HnxAE24a!(n}peIx6MW1)lzG=Q~3>W3jcpXRALjUCKs@@v#RUfqdhOxP~oH zHC0c@b^mG}&U1P2*zNq49-9lhW|&r6v97)+%#Ga&!B9}{|8i@Ato=$#Unsq={pCNq zu6r+>Hg(_U>Q94-HunFjz5(xM|LIiQ;o_l<&3zW z`Ve<_EpN1L*te&a;6V#ehk>Rgsjeo1He660N=*<^_Z9rQbIP35z;&R@bn8DkVcbc- zMv)pvK4}JWVAH;ZutVaY-4l$J))5}wij}6?uM}TB%irp1| zosse8#*tH-ZUE!2M1(qSXu4c43ZU0hV+(o=+bd|=)y+0OOWHF|7ijq9Tu--4jxVsv zui@4Go0KPptz{cLpugHIaDi=rwBDusUTS||=>0+?lS0R9?=b%5fU9DVuCYj!xcg{! zvDE?hKZH4Z?|{Fm%jFwAdR43#ky?FveD|LsR?ZjlgC^gb(b@iy3BXM)AhnK?@5n3? zonWL-p}<|kS=~f)DeO|Ih4qLQdX^y!BQsW-3o^&1irBYNj^_Ngjw(ijBM97)DW zpu>|_AC*k}EDmqM;XeFBY%)mg6`Q`%i*{A=8OAQO9fmzy5Q;yZ!gbBLNPjZ}|A8AZ zdg5Ol0ld1ip<{X%Xof#l_2N~Z)0|z?%X#;1=S+m~&sUKLU9ZxA7_*w%TA739l)w@; zJ?$f^il7};f19b!t5PoXp9(7(P!WUmPTT<_@?M@*}D$6bEMY!Y4|E;NX zZgTkAtVXd3+elo~iQKTibpiNPTwF<3^?$O1`7Kq=eyx@z)zJ`mRifjNk z|BdKHmj8~c^8V}!tsu+=>hc}#pl|AZ32PB*>LwSSK9V)^;I|&8(3)|M_{*m03^xe7 z%BsC1?uW%;&0c)XMMmToq6DWrF% z{U~S&Mthrd%x;a6-Tu}x>%k4#hj~jCuF6CKv1^8mrgO!<-0R?J^ZQriM%$U7_o_JcY1-`q~3m-f6)rkl>Z3 zjED4u-nb0g4Si@oUyKq1-k!l?ksGvsBf+ThI8BzEe z%V&aBJxq(%R_OLdiJK8Aw@-t%Z0#;Hca^xpu0Y?HxMBo-tq5)>{?LQ}IBb(EV=6NR zO8d(aw5gTPh16|TZ3BCfOmeUoH^tx72R3bjd`MRtG@Za#1Rwi5=HWM)Wbk0x)rzB; zCt-f4;RU`2LD4O~v%#jPeH88dvm=ZBd0Y~sw;K`wDGv{MPX{<$nYC*AA+>jxBVh+L zgyCqE933vjT&X2}<$r3!AUcTS-&$y@zG3o4U}e+n!-9uzTy-?O?6v<;(H@8_6o#$nI_SzZG6- zlgpR&($H@G=JT={k(Q}%7ra%^L-tT-E6Fzsf&g>hSVB@5$^5~JcGNJiiBQH zvA5@MJo!`m%(}AD^q&t(Z?Bzbz4|Z3h5Vu{3Q}>f#2OI=5RjaY^})d%;b(SE`Y&8z zzT_-t^Y0(3ejGWh1|c;pZFnc>-kf6Fc-xu#-Xk%n{da&;QmT_YT6bDq|0&Svq?_~k z??yk4FeonB6j*KJ6RZ=0%CYQWQZD;^hT_o_iiY2!4mBT- zToPKwqaHbZ?1hH&syFP4ex5!FpDT<+-guc~LTv>o=R?{U;V~n#2%Q#Z+24>fveo#|wgGvLUn@%zFaxgk2>_9E(lM0%)Ukk~( zS(%aTz(3d-DKMXXa)kX6(WWOaiH>=!{aP}_j;BY~jpMH%PPddr(2F?SGEiOJUE zI+@zb1P<;BGNM*~=}lBSi0ngghku60wG8$7hS836CdnTK=_lD*lzo*s>ANqu7&b+#Q)SKcj+P>qr7);`%a|d?+~CbXQ6a8!HmIj72hkaJ zWd}eRB$zk>U&DdhFH~(0OWNY7854$@3L|GiPWb?K6tW#{RK9^CcJJZj@*=H#UV=P% zPV8~R8&NrXz&;@KzJ-pJu;Qy&-}PXmYS-kfKeioLU568tmo}sL9k_9$lvp5*Y|Vdt zaI;aJ+g7WH$(t@dhUyT68hqrg{7z(#M}eO)wtYqGEm|v~BNRai82{qj!CS%#0PS_NTYm!$M0A#`VA1)O+Kn|BP?% zwJ6^-239Qjmzvx`FPba+QrWYAKZzs+5-}z%Fv?0a|Q_(8PZUsCfc7Qh1C12BUEjj&0i|IeQ>As6#hstH3wz#q%5mCGa;|bH#hf1-ZeP`|AxHB71a9xK6&rxTx@J`tO6LL z@2`gWIrys>@2@u&i~{D|8^=@ycmvb}DA7fRzppDi`ZO@(8Nz?sbhhdVjbH4Szl<1vfm&&dx>(%t<< z)9`B-&o}+K^86Le@3-4S7=#Px9Ex-*l;%U}x!K_#GLh9Q*=ve18@quNg~BM(>s@iv zl)R>;1N0f{95F97AEfL0)h<9H`6LiZv~5zh&*7tr+(oZUO2Y!otLuN&XQn3*Lq3k-d=x=M<4G>)^E^o+h}bqELeIQm43p4a;~Ip#p^ z^w8D^w(} z3++}MNr<+s#3tU*>&(94E53k~Yv`VK4L08F>(!<{)-P*C8E0TCG$={Hw`RB);y&S; zQ@FDIZgQTeIs258!XX7Y2 zr_KrEXdsjvyaSd5TJer{77~P`m+Dxwb4-BnA8F5+LN-KzNITty2BMS%o9k#@hxo>~9v#`L z>Mkkr=?;Lry;H1HaJDuXA7v&(tP`;k#1#HP8UHg-SN{7lZIjL`SMjvwWy_IyE9dd1 z)9H(j_Q`AAyCW(><4H{AbHFE&mhVQ)gq3-zj`G4_irryrqf2K@J-2+zx6B;v@BT!C zCdby@rO6@pI*Ka6Z#EjDNpV?rd7;4Ql0Bhj-Nivp2qZv|>6%mK6>_XCFc`Zs&Czk| zw-YyH6eSH@p8M=Z2t3JD@H73de_k#1Fy1gr%qGraxnAQ00LvRn$?xW{G-%VdLS8AMWQyXa z8-*9YzA|C+1RfmtnErO+nxrA`m~5(wFE5btI{b7M5iP^(1|ixpO8;UBi#5A4^l-T7 z5Uza+WI{6D5^kI<%^$cRc&tV%=&k1q+E7N4j~8)5%Hj?vt0o1kU+=hXfb@x_FE`{z z8I(1kGWqPSnM2q1=wEu9JRee#3~p6>M2yNR+EkJKM(3vPJl{EPCK7Yka#*8fY*N<9 zGx?L<40JF@SC=~Gy~T+sX65%62P+xwS~rNHg`RX4T0CsvHz@<5OzHzit>S|NF*tX} zR#Ew{)8>=RsowLB)|^;2nQeT_Io2m)^k|Q@WO6qMV7u1K95{~UFB#-GTlRfCoxP@f z-K*mB`HpXvZv8bTG#>4w5vPw7&AZwHLw2#IKwq9Fsy*h<4WbKiOS(P2QW0GZTrU`&?VJ?E8ySCWD<9qA^b}GBuGj~duu=sctc2$62k)v5P-TbCYuMY3+LCr0 zT33C@*hui0v@6G-1bcscjwB)D-fpdh8?`B%UyV=oa4s2dnfFheN*Se3C9?CL$XqLe z?GQV{s^KMC?|XDYhe)+Qu8tV4TEcxhntWxf{D)>682xLK2%5mTzEss04u7GaS4w&L z$=0t;A0+#jL4}Jpyl^|Ckt<6Xw$# z+?ReZjeQkw@@rfLAL}b83x~#YZf#KKKa0 zF$KChT{`6qhd0bVl?m{{S_rb9Q${Oe}a#%F7z0PF6zb`hWXUx&`UkYKaKb%29WXQTu zpiy((DtmSGYj(VbQ+`Rm=P`F(5eIOzp3HPOD;S0d+m5a7CE@*}BOC6^Tmm4t8~|&N z1G|7A(6`D5@MUo{P>oM$v1HAro83F>W8EoGk21j_sh6h9jOBHTJl9J4jPP>U?#*~FaxC~gFKdsi zGOuh2q{V(IYx6)?A54k;H@M9^G`r5)iJqJmd)lDu23iXZbs?$2b^^a z*{gAfR9iuxq%t}wzI)*gxOk_r&+xn)?VXU->le0rIQ@DRHp0oA1tVN7Xfd%3*eE;a z*Ejr(X?`Z6%^gmPgTR1OKxsvRnH+8xMG>3R=6N8l#_a9D+;v0K#$9Ks?F2huvye9) z6)MM)`$GHBl<2yvZUe3GA1)rz7(R%1kX9xYE#43>4wyd50QZ+CBxFO60?+9}^r~h; z#UWg0*rK0_)T=y$Ozj36quuu(318PN1uBq zg4R~~W?d3@n6U5;2sxqb32a=>QH(HqQ)tKb$R+dpO%yi{oz2+aZTdKp;KCXWO{Y*! z%{>6$)G{=!KDSjGe7_~W>?4>TLWI*Ib`Q zT?rW+h-@iG=tS$|_peZTc;DDAnZfVnX^W41EE_knL#`1!AEAXArQO%V7b0^+npDeZ z7u$cmXGsIkANS~4Q!?e@Gt2uwpYPj(o(yfFHh;14MdtdSIv*j6QXxUFgmZ^$^UHYa zt{z$UW#=c0t;AIucF0aBPsw)aqSZCctCpl-4c)Z!m7c+(rB(U55^YIW%$W7#2I}fq zV-}nTFcvGcZarZCYzfEGr6FD1V*zK@Z{_{kl^;MGr9l+rtTN`X>GamGyDX~bvgL)u zn(s#K^A;DXutZTZgKI+lgqwWPG2A`CSWu=xYEub4%7Da%hQ*HJ0m94yB>-oncz% z18GTO?g0_wv77Dg3wS?d*Bp8zjx2$W)9l%QAvju}x|+vT zlTQ+Rn+x)s=C-1I#!F;e03Vv0J5&1xaP|{=b3qMmDs)y^sPpkSN1Hkp^Cuu{upoUC z)=pEqhpOV@w)ej@YCYaz5mzI*@q>0wqe}8 z)~xm6+p?@Qt#!0DF(@m^GFcz4x2#;6mY*!mOwG*R{7#;lGnX!XbE#<`R1OUZnI&2( zK9)mfWG05BhA3)cO3l1!5_eL9ru0R=Y5^OlL^2rKgp3r(3NyT8=I%5 zykK#!eLj&!aewz9pzSOp#dC>v`?ES!yShQU=Wsamh?#%scMg4x3MaoaMk4SxX(t4$X^_m{9WixWp(_)D#JYb}Tro`wuFL&G0 zK8;cwJOj=rso}^KMC)>G^n6f%y4QSQqyFMpnqvk~t=bkzW70W&5P_?QMY=PDx+%JA zuZQlP&CWaA9R>MY4wCR)cM>L=eFys5r%>z{`{*C->YZ|yh=UCBo(V8d0fhmwzL(1A zSYsyAuE-tnjFQKbj!c30hCPA%JPrOyAKzI=qo2njpL=~x>sn#kdXohX?=;Y}xEUDu zg(pj?ipj#2HzHS&W<^_%a`I#j#A#kSRn^ZStNdPQee}Pbkz%`%Ijcuntml*GM}H18 z`p<@kZn7cXvzSmo?#$~j)@{4}SF7dDIYbuTxTsuvmC8>Eu3!?G$%tre$;=&}lR6n+ z(~tO179N`WU^@G0gLq9H1-ilgFyt}eQvF_VK`VIb#2pwr3SA`uB4PKAe&ua1omxbr zQj1cpIXjM+Tk@5I9XehLLue_52U6ZAM30Eq79GE|yjguio$Wns<8*Oxt+S8F95<9{ z(*d)^XkF4iSVcqDv({G;RZZTj2F!OJeP#~&WjN_@gVwtTn3^mWha2iMC44VzjFyhc z{@lAJyCjzpcn24t6 z<+;?G8KBtVSt}2z)78_mya8(suM#lcADcQcCES(w)+k!6s|mrwIyk2QbBXOA{tx}T zf;E|$zF-7~{C`c7Ux>o~Ygh~zRG&V>eL@IAwzv?2j{esot-0l>TGr+^cdO!<#(YL( zJ`DSV^I`7l=$ZrT2$EGt1TxMzqZa#oi$C+u6b@+z|paLN~&T@oRRUm6`_*( zn195B7UShK$Mf%Y4KJIrwYv~%;WV#{E$lOpVIA5N?1+9-WGVStAr8{j4A@Y-67g=^ z^3{elSY~Ru5B^Mpj$7oi#}-y+G3im2Iv%Tcq_-@z>JQUjLLyOvi$eSrAh0w}N}s05 zVo67_;<(I4$fF8h4LvzQ-1z2}g!K{mBf-2hI$V<^$wGz}isbeXRwAs5&6{+|eA~Kh z?T`uMm#L;sfKEH*ek#43TI-2P{;I?q|x-UC}YDabHhUMxYK7~ zrTn)vmHYxD{mMp?9^*k|O|@(kl2c_X#B-{lxnG}$C{@ok7KtvxY zh&{Jk;p2-moPtnU?%F*tEh_4!a)ttMKAz~K%iyGY*Ee|jpPFQ6qAvT`Dk7@9y>lev z&H9S>tamZIs%&vSf$+m+iJN4lJey0IRALR2Kw;#|)QJ2j%t9HFSBMwRol4=`s#Jbs zRBjCYqZWF`sJvu>vEGHQPXFKAP4?Qi-N;Ne`~@wx;GUV2|0A#7BSY%1*aXd6{rAY%a#hFKGn zUQvRWodSLiWW{1$>f0`AqF5e)NFU`6e?2`oP zT(3ZSYA8|HNrl;;tTaz{NzKRwgzjcFT%oBEG(WkRzWR@|9?$q``FVq~yl_tuAc&I~kaid1 z$)ugc@?WClZz^n-&rVy`7FVt5G=C`=PT+$g3P^UL^YtrOf0!&~G<3s`4IA6GYdf8a z2EqSKF5s`QNI?G3?mAYE;YR`;&{E6BeDPTLmtZWuicfZjN*bW_%Br_ zG-YLEvqQlUqViR#{M(M27y_`_IBQ8o&iX{VjJ+$}Yfh~BDQAmFiag_)!ciSn1&;H+ zPj8aZwd|k6!+7i}p9-E-rtR*ZtTcu{FDz$%yChlEz0@CksU8~d@W8O z4cq~80y)Cu|2wk(N<8LI+>8Y?6NhewRCP2PG8j@sAZ8zrrCU!E%3%lP3daCzhGuO! zL=(=KJ#UK92+HSDq#TKYKOr9Z>)`@_e6D`5A2nvf8}M@Zpr#DoqFK+_e!b!BThK8@ zfmB;0LpkA59UTa(VA@CbIy8za!iA31vHRRBk$d7BQ@*;CeJ1Oy)g>9@+zJRIgU4St zg>*$hwygf}V(i}c?)kn4GKa_FevzP&^l{V_he`3HQm7i$gBHOzqswL`zP@GDak0&* z_uqKy4!fd?{{XF^&VungwsxL0&pwN9uoRio2>RQq$IRib^$tUyTNQyD z%59CC~mA!$kG(wuFe z!aCWZIj!#Kl+tha-9C9G&fg4Zh)x|7d8|U3zEwsm_qeCQH1>7lJ)l9Pca4Bu+3=un z+qqVy&j?1l{ys-k((d#Ijq<9v7TGdg`7o2e{e_dPh+yg7Roz#%%*ccyU_xZxKdIw~ zMNQ{T>lgzAwZFa{>0VOv*YA)ge&pek!R~-$(P^*yH+r}ZK_Rs(pU2(35=1JEGb#S814& z4Ze%1LQaJE!&(sVn1Q#`?4b%uf)&%8i-6fO^+A2mSogSwjJpa%t6=bh198XNe?C&S zk}_V)jBImBctrQ#-&32mexEKOHodQ}Y=!4^1(!X~DP06Vl0fvZLr>}$h1;TkOTv+M yhEGTpQkbb2@U2(u(;-uOW}x%_^xp2`PEql8B$oDUEaoGxbLi0h+;yD4d;brGvNf~- literal 0 HcmV?d00001 diff --git a/tests/test_data/test_data_sources/test_common.py b/tests/test_data/test_data_sources/test_common.py new file mode 100644 index 00000000..1b3bb27d --- /dev/null +++ b/tests/test_data/test_data_sources/test_common.py @@ -0,0 +1,46 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import tempfile +from unittest.mock import MagicMock + +import pytest + +from mmselfsup.datasets import DATASOURCES + + +@pytest.mark.parametrize('dataset_name', + ['CIFAR10', 'CIFAR100', 'ImageNet', 'ImageList']) +def test_data_sources_override_default(dataset_name): + dataset_class = DATASOURCES.get(dataset_name) + load_annotations_f = dataset_class.load_annotations + dataset_class.load_annotations = MagicMock() + + original_classes = dataset_class.CLASSES + + # Test setting classes as a tuple + dataset = dataset_class(data_prefix='', classes=('bus', 'car')) + assert dataset.CLASSES == ('bus', 'car') + + # Test setting classes as a list + dataset = dataset_class(data_prefix='', classes=['bus', 'car']) + assert dataset.CLASSES == ['bus', 'car'] + + # Test setting classes through a file + tmp_file = tempfile.NamedTemporaryFile() + with open(tmp_file.name, 'w') as f: + f.write('bus\ncar\n') + dataset = dataset_class(data_prefix='', classes=tmp_file.name) + tmp_file.close() + + assert dataset.CLASSES == ['bus', 'car'] + + # Test overriding not a subset + dataset = dataset_class(data_prefix='', classes=['foo']) + assert dataset.CLASSES == ['foo'] + + # Test default behavior + dataset = dataset_class(data_prefix='') + assert dataset.data_prefix == '' + assert dataset.ann_file is None + assert dataset.CLASSES == original_classes + + dataset_class.load_annotations = load_annotations_f diff --git a/tests/test_data/test_data_sources/test_image_list.py b/tests/test_data/test_data_sources/test_image_list.py new file mode 100644 index 00000000..986e05d7 --- /dev/null +++ b/tests/test_data/test_data_sources/test_image_list.py @@ -0,0 +1,19 @@ +import os.path as osp + +import pytest + +from mmselfsup.datasets.data_sources import ImageList + + +def test_image_list(): + data_source = dict( + data_prefix=osp.join(osp.dirname(__file__), '../../data'), + ann_file=osp.join(osp.dirname(__file__), '../../data/data_list.txt'), + ) + + dataset = ImageList(**data_source) + assert len(dataset) == 2 + + with pytest.raises(AssertionError): + dataset = ImageList( + data_prefix=osp.join(osp.dirname(__file__), '../../data'), ) diff --git a/tests/test_data/test_data_sources/test_imagenet.py b/tests/test_data/test_data_sources/test_imagenet.py new file mode 100644 index 00000000..bf42554c --- /dev/null +++ b/tests/test_data/test_data_sources/test_imagenet.py @@ -0,0 +1,18 @@ +import os.path as osp + +import pytest + +from mmselfsup.datasets.data_sources import ImageNet + + +def test_imagenet(): + data_source = dict(data_prefix=osp.join(osp.dirname(__file__), '../../')) + + dataset = ImageNet(**data_source) + assert len(dataset) == 2 + + with pytest.raises(TypeError): + dataset = ImageNet(ann_file=1, **data_source) + + with pytest.raises(RuntimeError): + dataset = ImageNet(data_prefix=osp.join(osp.dirname(__file__))) diff --git a/tests/test_data/test_datasets/test_dataset_wrapper.py b/tests/test_data/test_datasets/test_dataset_wrapper.py new file mode 100644 index 00000000..45286091 --- /dev/null +++ b/tests/test_data/test_datasets/test_dataset_wrapper.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from unittest.mock import MagicMock, patch + +from mmselfsup.datasets import BaseDataset, ConcatDataset, RepeatDataset + + +@patch.multiple(BaseDataset, __abstractmethods__=set()) +def construct_toy_dataset(): + BaseDataset.CLASSES = ('foo', 'bar') + BaseDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx) + data = dict( + data_source=dict( + type='ImageNet', + data_prefix=osp.join(osp.dirname(__file__), '../../data'), + ann_file=osp.join( + osp.dirname(__file__), '../../data/data_list.txt'), + ), + pipeline=[]) + dataset = BaseDataset(**data) + dataset.data_infos = MagicMock() + return dataset + + +def test_concat_dataset(): + dataset_a = construct_toy_dataset() + dataset_b = construct_toy_dataset() + + concat_dataset = ConcatDataset([dataset_a, dataset_b]) + assert concat_dataset[0] == 0 + assert concat_dataset[3] == 1 + assert len(concat_dataset) == len(dataset_a) + len(dataset_b) + + +def test_repeat_dataset(): + dataset = construct_toy_dataset() + + repeat_dataset = RepeatDataset(dataset, 10) + assert repeat_dataset[5] == 1 + assert repeat_dataset[10] == 0 + assert len(repeat_dataset) == 10 * len(dataset) diff --git a/tests/test_data/test_datasets/test_deepcluster_dataset.py b/tests/test_data/test_datasets/test_deepcluster_dataset.py new file mode 100644 index 00000000..e22e42ef --- /dev/null +++ b/tests/test_data/test_datasets/test_deepcluster_dataset.py @@ -0,0 +1,45 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import pytest + +from mmselfsup.datasets import DeepClusterDataset + +# dataset settings +data_source = 'ImageNet' +dataset_type = 'DeepClusterDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [dict(type='RandomResizedCrop', size=4)] +# prefetch +prefetch = False +if not prefetch: + train_pipeline.extend( + [dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg)]) + + +def test_deepcluster_dataset(): + data = dict( + data_source=dict( + type=data_source, + data_prefix=osp.join(osp.dirname(__file__), '../../data'), + ann_file=osp.join( + osp.dirname(__file__), '../../data/data_list.txt'), + ), + pipeline=train_pipeline, + prefetch=prefetch) + dataset = DeepClusterDataset(**data) + x = dataset[0] + assert x['img'].size() == (3, 4, 4) + assert x['pseudo_label'] == -1 + assert x['idx'] == 0 + + with pytest.raises(AssertionError): + dataset.assign_labels([1]) + + dataset.assign_labels([1, 0]) + assert dataset.clustering_labels[0] == 1 + assert dataset.clustering_labels[1] == 0 + + x = dataset[0] + assert x['pseudo_label'] == 1 diff --git a/tests/test_data/test_datasets/test_multiview_dataset.py b/tests/test_data/test_datasets/test_multiview_dataset.py new file mode 100644 index 00000000..40423d97 --- /dev/null +++ b/tests/test_data/test_datasets/test_multiview_dataset.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import pytest + +from mmselfsup.datasets import MultiViewDataset + +# dataset settings +data_source = 'ImageNet' +dataset_type = 'MultiViewDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [dict(type='RandomResizedCrop', size=4)] +# prefetch +prefetch = False +if not prefetch: + train_pipeline.extend( + [dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg)]) + + +def test_multi_views_dataste(): + data = dict( + data_source=dict( + type=data_source, + data_prefix=osp.join(osp.dirname(__file__), '../../data'), + ann_file=osp.join( + osp.dirname(__file__), '../../data/data_list.txt'), + ), + num_views=[2], + pipelines=[train_pipeline, train_pipeline], + prefetch=prefetch) + with pytest.raises(AssertionError): + dataset = MultiViewDataset(**data) + + # test dataset + data = dict( + data_source=dict( + type=data_source, + data_prefix=osp.join(osp.dirname(__file__), '../../data'), + ann_file=osp.join( + osp.dirname(__file__), '../../data/data_list.txt'), + ), + num_views=[2, 6], + pipelines=[train_pipeline, train_pipeline], + prefetch=prefetch) + dataset = MultiViewDataset(**data) + x = dataset[0] + assert isinstance(x['img'], list) + assert len(x['img']) == 8 diff --git a/tests/test_data/test_datasets/test_relative_loc_dataset.py b/tests/test_data/test_datasets/test_relative_loc_dataset.py new file mode 100644 index 00000000..f3dc2eff --- /dev/null +++ b/tests/test_data/test_datasets/test_relative_loc_dataset.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import numpy as np + +from mmselfsup.datasets import RelativeLocDataset + +# dataset settings +data_source = 'ImageNet' +dataset_type = 'RelativeLocDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [dict(type='RandomResizedCrop', size=224)] +# prefetch +format_pipeline = [ + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] + + +def test_relative_loc_dataset(): + # prefetch False + data = dict( + data_source=dict( + type=data_source, + data_prefix=osp.join(osp.dirname(__file__), '../../data'), + ann_file=osp.join( + osp.dirname(__file__), '../../data/data_list.txt'), + ), + pipeline=train_pipeline, + format_pipeline=format_pipeline) + dataset = RelativeLocDataset(**data) + x = dataset[0] + split_per_side = 3 + patch_jitter = 21 + h_grid = 224 // split_per_side + w_grid = 224 // split_per_side + h_patch = h_grid - patch_jitter + w_patch = w_grid - patch_jitter + assert x['img'].size() == (8, 6, h_patch, w_patch) + assert (x['patch_label'].numpy() == np.array([0, 1, 2, 3, 4, 5, 6, + 7])).all() diff --git a/tests/test_data/test_datasets/test_rotation_pred_dataset.py b/tests/test_data/test_datasets/test_rotation_pred_dataset.py new file mode 100644 index 00000000..6a5dc0dc --- /dev/null +++ b/tests/test_data/test_datasets/test_rotation_pred_dataset.py @@ -0,0 +1,35 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import numpy as np + +from mmselfsup.datasets import RotationPredDataset + +# dataset settings +data_source = 'ImageNet' +dataset_type = 'RotationPredDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [dict(type='RandomResizedCrop', size=4)] +# prefetch +prefetch = False +if not prefetch: + train_pipeline.extend( + [dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg)]) + + +def test_rotation_pred_dataset(): + # prefetch False + data = dict( + data_source=dict( + type=data_source, + data_prefix=osp.join(osp.dirname(__file__), '../../data'), + ann_file=osp.join( + osp.dirname(__file__), '../../data/data_list.txt'), + ), + pipeline=train_pipeline, + prefetch=prefetch) + dataset = RotationPredDataset(**data) + x = dataset[0] + assert x['img'].size() == (4, 3, 4, 4) + assert (x['rot_label'].numpy() == np.array([0, 1, 2, 3])).all() diff --git a/tests/test_data/test_datasets/test_singleview_dataset.py b/tests/test_data/test_datasets/test_singleview_dataset.py new file mode 100644 index 00000000..857bac06 --- /dev/null +++ b/tests/test_data/test_datasets/test_singleview_dataset.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import numpy as np +import pytest + +from mmselfsup.datasets import SingleViewDataset + +# dataset settings +data_source = 'ImageNet' +dataset_type = 'MultiViewDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [dict(type='RandomResizedCrop', size=4)] +# prefetch +prefetch = False +if not prefetch: + train_pipeline.extend( + [dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg)]) + + +def test_one_view_dataset(): + data = dict( + data_source=dict( + type=data_source, + data_prefix=osp.join(osp.dirname(__file__), '../../data'), + ann_file=osp.join( + osp.dirname(__file__), '../../data/data_list.txt'), + ), + pipeline=train_pipeline, + prefetch=prefetch) + dataset = SingleViewDataset(**data) + fake_results = {'test': np.array([[0.7, 0, 0.3], [0.5, 0.3, 0.2]])} + + with pytest.raises(AssertionError): + eval_res = dataset.evaluate({'test': np.array([[0.7, 0, 0.3]])}, + topk=(1)) + + eval_res = dataset.evaluate(fake_results, topk=(1, 2)) + assert eval_res['test_top1'] == 1 * 100.0 / 2 + assert eval_res['test_top2'] == 2 * 100.0 / 2 diff --git a/tests/test_data/test_pipelines.py b/tests/test_data/test_pipelines.py new file mode 100644 index 00000000..01308068 --- /dev/null +++ b/tests/test_data/test_pipelines.py @@ -0,0 +1,99 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import numpy as np +import pytest +import torch +from mmcv.utils import build_from_cfg +from PIL import Image + +from mmselfsup.datasets.builder import PIPELINES + + +def test_random_applied_trans(): + img = Image.open(osp.join(osp.dirname(__file__), '../data/color.jpg')) + + # p=0.5 + transform = dict( + type='RandomAppliedTrans', transforms=[dict(type='Solarization')]) + module = build_from_cfg(transform, PIPELINES) + res = module(img) + assert img.size == res.size + + transform = dict( + type='RandomAppliedTrans', + transforms=[dict(type='Solarization')], + p=0.) + module = build_from_cfg(transform, PIPELINES) + res = module(img) + assert img.size == res.size + + # p=1. + transform = dict( + type='RandomAppliedTrans', + transforms=[dict(type='Solarization')], + p=1.) + module = build_from_cfg(transform, PIPELINES) + res = module(img) + assert img.size == res.size + + +def test_lighting(): + transform = dict(type='Lighting') + module = build_from_cfg(transform, PIPELINES) + img = np.array( + Image.open(osp.join(osp.dirname(__file__), '../data/color.jpg'))) + with pytest.raises(AssertionError): + res = module(img) + + img = torch.from_numpy(img).float().permute(2, 1, 0) + res = module(img) + + assert img.size() == res.size() + + +def test_gaussianblur(): + with pytest.raises(AssertionError): + transform = dict( + type='GaussianBlur', sigma_min=0.1, sigma_max=1.0, p=-1) + module = build_from_cfg(transform, PIPELINES) + + img = Image.open(osp.join(osp.dirname(__file__), '../data/color.jpg')) + + # p=0.5 + transform = dict(type='GaussianBlur', sigma_min=0.1, sigma_max=1.0) + module = build_from_cfg(transform, PIPELINES) + res = module(img) + + transform = dict(type='GaussianBlur', sigma_min=0.1, sigma_max=1.0, p=0.) + module = build_from_cfg(transform, PIPELINES) + res = module(img) + + transform = dict(type='GaussianBlur', sigma_min=0.1, sigma_max=1.0, p=1.) + module = build_from_cfg(transform, PIPELINES) + res = module(img) + + assert img.size == res.size + + +def test_solarization(): + with pytest.raises(AssertionError): + transform = dict(type='Solarization', p=-1) + module = build_from_cfg(transform, PIPELINES) + + img = Image.open(osp.join(osp.dirname(__file__), '../data/color.jpg')) + + # p=0.5 + transform = dict(type='Solarization') + module = build_from_cfg(transform, PIPELINES) + res = module(img) + + transform = dict(type='Solarization', p=0.) + module = build_from_cfg(transform, PIPELINES) + res = module(img) + + transform = dict(type='Solarization', p=1.) + module = build_from_cfg(transform, PIPELINES) + res = module(img) + + assert img.size == res.size diff --git a/tests/test_data/test_utils.py b/tests/test_data/test_utils.py new file mode 100644 index 00000000..f2df110a --- /dev/null +++ b/tests/test_data/test_utils.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import random +import string +import tempfile + +import numpy as np +from PIL import Image + +from mmselfsup.datasets.utils import check_integrity, rm_suffix, to_numpy + + +def test_to_numpy(): + pil_img = Image.open(osp.join(osp.dirname(__file__), '../data/color.jpg')) + np_img = to_numpy(pil_img) + assert type(np_img) == np.ndarray + if np_img.ndim < 3: + assert np_img.shape[0] == 1 + elif np_img.ndim == 3: + assert np_img.shape[0] == 3 + + +def test_dataset_utils(): + # test rm_suffix + assert rm_suffix('a.jpg') == 'a' + assert rm_suffix('a.bak.jpg') == 'a.bak' + assert rm_suffix('a.bak.jpg', suffix='.jpg') == 'a.bak' + assert rm_suffix('a.bak.jpg', suffix='.bak.jpg') == 'a' + + # test check_integrity + rand_file = ''.join(random.sample(string.ascii_letters, 10)) + assert not check_integrity(rand_file, md5=None) + assert not check_integrity(rand_file, md5=2333) + tmp_file = tempfile.NamedTemporaryFile() + assert check_integrity(tmp_file.name, md5=None) + assert not check_integrity(tmp_file.name, md5=2333) diff --git a/tests/test_metrics/test_accuracy.py b/tests/test_metrics/test_accuracy.py new file mode 100644 index 00000000..6d33cbb4 --- /dev/null +++ b/tests/test_metrics/test_accuracy.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmselfsup.models.utils import Accuracy + + +def test_accuracy(): + pred = torch.Tensor([[0.2, 0.3, 0.5], [0.25, 0.15, 0.6], [0.9, 0.05, 0.05], + [0.8, 0.1, 0.1], [0.55, 0.15, 0.3]]) + target = torch.zeros(5) + + acc = Accuracy((1, 2)) + res = acc.forward(pred, target) + assert res[0].item() == 60. + assert res[1].item() == 80. diff --git a/tests/test_models/test_algorithms/test_byol.py b/tests/test_models/test_algorithms/test_byol.py new file mode 100644 index 00000000..eafda2ee --- /dev/null +++ b/tests/test_models/test_algorithms/test_byol.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmselfsup.models.algorithms import BYOL + +backbone = dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN')) +neck = dict( + type='NonLinearNeck', + in_channels=2048, + hid_channels=4, + out_channels=4, + with_bias=True, + with_last_bn=False, + with_avg_pool=True, + norm_cfg=dict(type='BN1d')) +head = dict( + type='LatentPredictHead', + predictor=dict( + type='NonLinearNeck', + in_channels=4, + hid_channels=4, + out_channels=4, + with_bias=True, + with_last_bn=False, + with_avg_pool=False, + norm_cfg=dict(type='BN1d'))) + + +def test_byol(): + with pytest.raises(AssertionError): + alg = BYOL(backbone=backbone, neck=None, head=head) + with pytest.raises(AssertionError): + alg = BYOL(backbone=backbone, neck=neck, head=None) + + alg = BYOL(backbone=backbone, neck=neck, head=head) + fake_input = torch.randn((16, 3, 224, 224)) + fake_backbone_out = alg.extract_feat(fake_input) + assert fake_backbone_out[0].size() == torch.Size([16, 2048, 7, 7]) + with pytest.raises(AssertionError): + fake_out = alg.forward_train(fake_input) + + fake_input = [ + torch.randn((16, 3, 224, 224)), + torch.randn((16, 3, 224, 224)) + ] + fake_out = alg.forward_train(fake_input) + assert fake_out['loss'].item() > -4 diff --git a/tests/test_models/test_algorithms/test_classification.py b/tests/test_models/test_algorithms/test_classification.py new file mode 100644 index 00000000..a41c4217 --- /dev/null +++ b/tests/test_models/test_algorithms/test_classification.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmselfsup.models.algorithms import Classification + +with_sobel = True, +backbone = dict( + type='ResNet', + depth=50, + in_channels=2, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4) +head = dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, num_classes=4) + + +def test_classification(): + alg = Classification(backbone=backbone, with_sobel=with_sobel, head=head) + assert hasattr(alg, 'sobel_layer') + assert hasattr(alg, 'head') + + fake_input = torch.randn((16, 3, 224, 224)) + fake_labels = torch.ones(16, dtype=torch.long) + fake_backbone_out = alg.extract_feat(fake_input) + assert fake_backbone_out[0].size() == torch.Size([16, 2048, 7, 7]) + fake_out = alg.forward_train(fake_input, fake_labels) + assert fake_out['loss'].item() > 0 diff --git a/tests/test_models/test_algorithms/test_deepcluster.py b/tests/test_models/test_algorithms/test_deepcluster.py new file mode 100644 index 00000000..8e37a854 --- /dev/null +++ b/tests/test_models/test_algorithms/test_deepcluster.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmselfsup.models.algorithms import DeepCluster + +num_classes = 5 +with_sobel = True, +backbone = dict( + type='ResNet', + depth=50, + in_channels=2, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN')) +neck = dict(type='AvgPool2dNeck') +head = dict( + type='ClsHead', + with_avg_pool=False, # already has avgpool in the neck + in_channels=2048, + num_classes=num_classes) + + +def test_deepcluster(): + with pytest.raises(AssertionError): + alg = DeepCluster( + backbone=backbone, with_sobel=with_sobel, neck=neck, head=None) + alg = DeepCluster( + backbone=backbone, with_sobel=with_sobel, neck=neck, head=head) + assert alg.num_classes == num_classes + assert hasattr(alg, 'sobel_layer') + assert hasattr(alg, 'neck') + assert hasattr(alg, 'head') + + fake_input = torch.randn((16, 3, 224, 224)) + fake_labels = torch.ones(16, dtype=torch.long) + fake_backbone_out = alg.extract_feat(fake_input) + assert fake_backbone_out[0].size() == torch.Size([16, 2048, 7, 7]) + fake_out = alg.forward_train(fake_input, fake_labels) + assert fake_out['loss'].item() > 0 diff --git a/tests/test_models/test_algorithms/test_densecl.py b/tests/test_models/test_algorithms/test_densecl.py new file mode 100644 index 00000000..897a3a67 --- /dev/null +++ b/tests/test_models/test_algorithms/test_densecl.py @@ -0,0 +1,47 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmselfsup.models.algorithms import DenseCL + +queue_len = 65536 +feat_dim = 128 +momentum = 0.999 +loss_lambda = 0.5 +backbone = dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN')) +neck = dict( + type='DenseCLNeck', + in_channels=2048, + hid_channels=4, + out_channels=4, + num_grid=None) +head = dict(type='ContrastiveHead', temperature=0.2) + + +def test_densecl(): + with pytest.raises(AssertionError): + alg = DenseCL(backbone=backbone, neck=None, head=head) + with pytest.raises(AssertionError): + alg = DenseCL(backbone=backbone, neck=neck, head=None) + + alg = DenseCL( + backbone=backbone, + neck=neck, + head=head, + queue_len=queue_len, + feat_dim=feat_dim, + momentum=momentum, + loss_lambda=loss_lambda) + assert alg.queue.size() == torch.Size([feat_dim, queue_len]) + assert alg.queue2.size() == torch.Size([feat_dim, queue_len]) + + fake_input = torch.randn((16, 3, 224, 224)) + fake_backbone_out = alg.extract_feat(fake_input) + assert fake_backbone_out[0].size() == torch.Size([16, 2048, 7, 7]) + with pytest.raises(AssertionError): + fake_backbone_out = alg.forward_train(fake_input) diff --git a/tests/test_models/test_algorithms/test_moco.py b/tests/test_models/test_algorithms/test_moco.py new file mode 100644 index 00000000..9cccaf70 --- /dev/null +++ b/tests/test_models/test_algorithms/test_moco.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmselfsup.models.algorithms import MoCo + +queue_len = 8 +feat_dim = 4 +momentum = 0.999 +backbone = dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN')) +neck = dict( + type='MoCoV2Neck', + in_channels=2048, + hid_channels=4, + out_channels=4, + with_avg_pool=True) +head = dict(type='ContrastiveHead', temperature=0.2) + + +def test_moco(): + with pytest.raises(AssertionError): + alg = MoCo(backbone=backbone, neck=None, head=head) + with pytest.raises(AssertionError): + alg = MoCo(backbone=backbone, neck=neck, head=None) + + alg = MoCo( + backbone=backbone, + neck=neck, + head=head, + queue_len=queue_len, + feat_dim=feat_dim, + momentum=momentum) + assert alg.queue.size() == torch.Size([feat_dim, queue_len]) + + fake_input = torch.randn((16, 3, 224, 224)) + fake_backbone_out = alg.extract_feat(fake_input) + assert fake_backbone_out[0].size() == torch.Size([16, 2048, 7, 7]) + with pytest.raises(AssertionError): + fake_backbone_out = alg.forward_train(fake_input) diff --git a/tests/test_models/test_algorithms/test_npid.py b/tests/test_models/test_algorithms/test_npid.py new file mode 100644 index 00000000..fe4824bf --- /dev/null +++ b/tests/test_models/test_algorithms/test_npid.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmselfsup.models.algorithms import NPID + +backbone = dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN')) +neck = dict( + type='LinearNeck', in_channels=2048, out_channels=4, with_avg_pool=True) +head = dict(type='ContrastiveHead', temperature=0.07) +memory_bank = dict(type='SimpleMemory', length=8, feat_dim=4, momentum=0.5) + + +@pytest.mark.skipif( + not torch.cuda.is_available(), reason='CUDA is not available.') +def test_npid(): + with pytest.raises(AssertionError): + alg = NPID(backbone=backbone, neck=neck, head=head, memory_bank=None) + with pytest.raises(AssertionError): + alg = NPID( + backbone=backbone, neck=neck, head=None, memory_bank=memory_bank) + + alg = NPID( + backbone=backbone, neck=neck, head=head, memory_bank=memory_bank) + fake_input = torch.randn((16, 3, 224, 224)) + fake_backbone_out = alg.extract_feat(fake_input) + assert fake_backbone_out[0].size() == torch.Size([16, 2048, 7, 7]) diff --git a/tests/test_models/test_algorithms/test_odc.py b/tests/test_models/test_algorithms/test_odc.py new file mode 100644 index 00000000..5165e28a --- /dev/null +++ b/tests/test_models/test_algorithms/test_odc.py @@ -0,0 +1,47 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmselfsup.models.algorithms import ODC + +num_classes = 5 +backbone = dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN')) +neck = dict( + type='ODCNeck', + in_channels=2048, + hid_channels=4, + out_channels=4, + with_avg_pool=True) +head = dict( + type='ClsHead', + with_avg_pool=False, + in_channels=4, + num_classes=num_classes) +memory_bank = dict( + type='ODCMemory', + length=8, + feat_dim=4, + momentum=0.5, + num_classes=num_classes, + min_cluster=2, + debug=False) + + +@pytest.mark.skipif( + not torch.cuda.is_available(), reason='CUDA is not available.') +def test_odc(): + with pytest.raises(AssertionError): + alg = ODC(backbone=backbone, neck=neck, head=head, memory_bank=None) + with pytest.raises(AssertionError): + alg = ODC( + backbone=backbone, neck=neck, head=None, memory_bank=memory_bank) + + alg = ODC(backbone=backbone, neck=neck, head=head, memory_bank=memory_bank) + fake_input = torch.randn((16, 3, 224, 224)) + fake_backbone_out = alg.extract_feat(fake_input) + assert fake_backbone_out[0].size() == torch.Size([16, 2048, 7, 7]) diff --git a/tests/test_models/test_algorithms/test_relative_loc.py b/tests/test_models/test_algorithms/test_relative_loc.py new file mode 100644 index 00000000..e32f6246 --- /dev/null +++ b/tests/test_models/test_algorithms/test_relative_loc.py @@ -0,0 +1,51 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmselfsup.models.algorithms import RelativeLoc + +backbone = dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN')) +neck = dict( + type='RelativeLocNeck', + in_channels=2048, + out_channels=4, + with_avg_pool=True) +head = dict(type='ClsHead', with_avg_pool=False, in_channels=4, num_classes=8) + + +def test_relative_loc(): + with pytest.raises(AssertionError): + alg = RelativeLoc(backbone=backbone, neck=None, head=head) + with pytest.raises(AssertionError): + alg = RelativeLoc(backbone=backbone, neck=neck, head=None) + + alg = RelativeLoc(backbone=backbone, neck=neck, head=head) + + with pytest.raises(AssertionError): + fake_input = torch.randn((2, 8, 6, 224, 224)) + patch_labels = torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7]) + alg.forward(fake_input, patch_labels) + + # train + fake_input = torch.randn((2, 8, 6, 224, 224)) + patch_labels = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7], + [0, 1, 2, 3, 4, 5, 6, 7]]) + fake_out = alg.forward(fake_input, patch_labels) + assert fake_out['loss'].item() > 0 + + # test + fake_input = torch.randn((2, 8, 6, 224, 224)) + patch_labels = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7], + [0, 1, 2, 3, 4, 5, 6, 7]]) + fake_out = alg.forward(fake_input, patch_labels, mode='test') + assert 'head4' in fake_out + + # extract + fake_input = torch.randn((16, 3, 224, 224)) + fake_backbone_out = alg.forward(fake_input, mode='extract') + assert fake_backbone_out[0].size() == torch.Size([16, 2048, 7, 7]) diff --git a/tests/test_models/test_algorithms/test_rotation_pred.py b/tests/test_models/test_algorithms/test_rotation_pred.py new file mode 100644 index 00000000..8485571f --- /dev/null +++ b/tests/test_models/test_algorithms/test_rotation_pred.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmselfsup.models.algorithms import RotationPred + +backbone = dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN')) +head = dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, num_classes=4) + + +def test_rotation_pred(): + with pytest.raises(AssertionError): + alg = RotationPred(backbone=backbone, head=None) + + alg = RotationPred(backbone=backbone, head=head) + + with pytest.raises(AssertionError): + fake_input = torch.randn((2, 4, 3, 224, 224)) + rotation_labels = torch.LongTensor([0, 1, 2, 3]) + alg.forward(fake_input, rotation_labels) + + # train + fake_input = torch.randn((2, 4, 3, 224, 224)) + rotation_labels = torch.LongTensor([[0, 1, 2, 3], [0, 1, 2, 3]]) + fake_out = alg.forward(fake_input, rotation_labels) + assert fake_out['loss'].item() > 0 + + # test + fake_input = torch.randn((2, 4, 3, 224, 224)) + rotation_labels = torch.LongTensor([[0, 1, 2, 3], [0, 1, 2, 3]]) + fake_out = alg.forward(fake_input, rotation_labels, mode='test') + assert 'head4' in fake_out + + # extract + fake_input = torch.randn((16, 3, 224, 224)) + fake_backbone_out = alg.forward(fake_input, mode='extract') + assert fake_backbone_out[0].size() == torch.Size([16, 2048, 7, 7]) diff --git a/tests/test_models/test_algorithms/test_simclr.py b/tests/test_models/test_algorithms/test_simclr.py new file mode 100644 index 00000000..09e2a219 --- /dev/null +++ b/tests/test_models/test_algorithms/test_simclr.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmselfsup.models.algorithms import SimCLR + +backbone = dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN')) +neck = dict( + type='NonLinearNeck', # SimCLR non-linear neck + in_channels=2048, + hid_channels=4, + out_channels=4, + num_layers=2, + with_avg_pool=True) +head = dict(type='ContrastiveHead', temperature=0.1) + + +def test_simclr(): + with pytest.raises(AssertionError): + alg = SimCLR(backbone=backbone, neck=None, head=head) + with pytest.raises(AssertionError): + alg = SimCLR(backbone=backbone, neck=neck, head=None) + + alg = SimCLR(backbone=backbone, neck=neck, head=head) + with pytest.raises(AssertionError): + fake_input = torch.randn((16, 3, 224, 224)) + alg.forward_train(fake_input) + + fake_input = torch.randn((16, 3, 224, 224)) + fake_backbone_out = alg.extract_feat(fake_input) + assert fake_backbone_out[0].size() == torch.Size([16, 2048, 7, 7]) diff --git a/tests/test_models/test_algorithms/test_simsiam.py b/tests/test_models/test_algorithms/test_simsiam.py new file mode 100644 index 00000000..b39118d0 --- /dev/null +++ b/tests/test_models/test_algorithms/test_simsiam.py @@ -0,0 +1,50 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmselfsup.models.algorithms import SimSiam + +backbone = dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + zero_init_residual=True) +neck = dict( + type='NonLinearNeck', + in_channels=2048, + hid_channels=4, + out_channels=4, + num_layers=3, + with_last_bn_affine=False, + with_avg_pool=True, + norm_cfg=dict(type='BN1d')) +head = dict( + type='LatentPredictHead', + predictor=dict( + type='NonLinearNeck', + in_channels=4, + hid_channels=4, + out_channels=4, + with_avg_pool=False, + with_last_bn=False, + with_last_bias=True, + norm_cfg=dict(type='BN1d'))) + + +def test_simsiam(): + with pytest.raises(AssertionError): + alg = SimSiam(backbone=backbone, neck=neck, head=None) + + alg = SimSiam(backbone=backbone, neck=neck, head=head) + with pytest.raises(AssertionError): + fake_input = torch.randn((16, 3, 224, 224)) + alg.forward_train(fake_input) + + fake_input = [ + torch.randn((16, 3, 224, 224)), + torch.randn((16, 3, 224, 224)) + ] + fake_out = alg.forward(fake_input) + assert fake_out['loss'].item() > -1 diff --git a/tests/test_models/test_algorithms/test_swav.py b/tests/test_models/test_algorithms/test_swav.py new file mode 100644 index 00000000..8fb88792 --- /dev/null +++ b/tests/test_models/test_algorithms/test_swav.py @@ -0,0 +1,52 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmselfsup.models.algorithms import SwAV + +nmb_crops = [2, 6] +backbone = dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + zero_init_residual=True) +neck = dict( + type='SwAVNeck', + in_channels=2048, + hid_channels=4, + out_channels=4, + norm_cfg=dict(type='BN1d'), + with_avg_pool=True) +head = dict( + type='SwAVHead', + feat_dim=4, # equal to neck['out_channels'] + epsilon=0.05, + temperature=0.1, + num_crops=nmb_crops) + + +def test_swav(): + with pytest.raises(AssertionError): + alg = SwAV(backbone=backbone, neck=neck, head=None) + with pytest.raises(AssertionError): + alg = SwAV(backbone=backbone, neck=None, head=head) + + alg = SwAV(backbone=backbone, neck=neck, head=head) + fake_input = torch.randn((16, 3, 224, 224)) + fake_backbone_out = alg.extract_feat(fake_input) + assert fake_backbone_out[0].size() == torch.Size([16, 2048, 7, 7]) + + fake_input = [ + torch.randn((16, 3, 224, 224)), + torch.randn((16, 3, 224, 224)), + torch.randn((16, 3, 96, 96)), + torch.randn((16, 3, 96, 96)), + torch.randn((16, 3, 96, 96)), + torch.randn((16, 3, 96, 96)), + torch.randn((16, 3, 96, 96)), + torch.randn((16, 3, 96, 96)), + ] + fake_out = alg.forward_train(fake_input) + assert fake_out['loss'].item() > 0 diff --git a/tests/test_models/test_backbones/test_resnet.py b/tests/test_models/test_backbones/test_resnet.py new file mode 100644 index 00000000..9f8f21f2 --- /dev/null +++ b/tests/test_models/test_backbones/test_resnet.py @@ -0,0 +1,245 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.utils.parrots_wrapper import _BatchNorm + +from mmselfsup.models.backbones import ResNet +from mmselfsup.models.backbones.resnet import BasicBlock, Bottleneck + + +def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (BasicBlock, Bottleneck)): + return True + return False + + +def all_zeros(modules): + """Check if the weight(and bias) is all zero.""" + weight_zero = torch.equal(modules.weight.data, + torch.zeros_like(modules.weight.data)) + if hasattr(modules, 'bias'): + bias_zero = torch.equal(modules.bias.data, + torch.zeros_like(modules.bias.data)) + else: + bias_zero = True + + return weight_zero and bias_zero + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_basic_block(): + # BasicBlock with stride 1, out_channels == in_channels + block = BasicBlock(64, 64) + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 64 + assert block.conv1.kernel_size == (3, 3) + assert block.conv1.stride == (1, 1) + assert block.conv2.in_channels == 64 + assert block.conv2.out_channels == 64 + assert block.conv2.kernel_size == (3, 3) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # BasicBlock with stride 1 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1, bias=False), nn.BatchNorm2d(128)) + block = BasicBlock(64, 128, downsample=downsample) + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 128 + assert block.conv1.kernel_size == (3, 3) + assert block.conv1.stride == (1, 1) + assert block.conv2.in_channels == 128 + assert block.conv2.out_channels == 128 + assert block.conv2.kernel_size == (3, 3) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 128, 56, 56]) + + # BasicBlock with stride 2 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1, stride=2, bias=False), + nn.BatchNorm2d(128)) + block = BasicBlock(64, 128, stride=2, downsample=downsample) + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 128 + assert block.conv1.kernel_size == (3, 3) + assert block.conv1.stride == (2, 2) + assert block.conv2.in_channels == 128 + assert block.conv2.out_channels == 128 + assert block.conv2.kernel_size == (3, 3) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 128, 28, 28]) + + +def test_bottleneck(): + # Test Bottleneck style + block = Bottleneck(64, 64, stride=2, style='pytorch') + assert block.conv1.stride == (1, 1) + assert block.conv2.stride == (2, 2) + block = Bottleneck(64, 64, stride=2, style='caffe') + assert block.conv1.stride == (2, 2) + assert block.conv2.stride == (1, 1) + + # Bottleneck with stride 1 + block = Bottleneck(64, 16, style='pytorch') + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 16 + assert block.conv1.kernel_size == (1, 1) + assert block.conv2.in_channels == 16 + assert block.conv2.out_channels == 16 + assert block.conv2.kernel_size == (3, 3) + assert block.conv3.in_channels == 16 + assert block.conv3.out_channels == 64 + assert block.conv3.kernel_size == (1, 1) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == (1, 64, 56, 56) + + # Bottleneck with stride 1 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 256, kernel_size=1), nn.BatchNorm2d(256)) + block = Bottleneck(64, 64, style='pytorch', downsample=downsample) + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 64 + assert block.conv1.kernel_size == (1, 1) + assert block.conv2.in_channels == 64 + assert block.conv2.out_channels == 64 + assert block.conv2.kernel_size == (3, 3) + assert block.conv3.in_channels == 64 + assert block.conv3.out_channels == 256 + assert block.conv3.kernel_size == (1, 1) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == (1, 256, 56, 56) + + # Bottleneck with stride 2 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 256, kernel_size=1, stride=2), nn.BatchNorm2d(256)) + block = Bottleneck( + 64, 64, stride=2, style='pytorch', downsample=downsample) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == (1, 256, 28, 28) + + # Test Bottleneck with checkpointing + block = Bottleneck(64, 16, with_cp=True) + block.train() + assert block.with_cp + x = torch.randn(1, 64, 56, 56, requires_grad=True) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_resnet(): + """Test resnet backbone.""" + # Test ResNet50 norm_eval=True + model = ResNet(50, norm_eval=True) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test ResNet50 with torchvision pretrained weight + model = ResNet(depth=50, norm_eval=True) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test ResNet50 with first stage frozen + frozen_stages = 1 + model = ResNet(50, frozen_stages=frozen_stages) + model.init_weights() + model.train() + assert model.norm1.training is False + for layer in [model.conv1, model.norm1]: + for param in layer.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test ResNet18 forward + model = ResNet(18, out_indices=(0, 1, 2, 3, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 64, 56, 56) + assert feat[2].shape == (1, 128, 28, 28) + assert feat[3].shape == (1, 256, 14, 14) + assert feat[4].shape == (1, 512, 7, 7) + + # Test ResNet50 with BatchNorm forward + model = ResNet(50, out_indices=(0, 1, 2, 3, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 256, 56, 56) + assert feat[2].shape == (1, 512, 28, 28) + assert feat[3].shape == (1, 1024, 14, 14) + assert feat[4].shape == (1, 2048, 7, 7) + + # Test ResNet50 with layers 3 (top feature maps) out forward + model = ResNet(50, out_indices=(4, )) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[0].shape == (1, 2048, 7, 7) + + # Test ResNet50 with checkpoint forward + model = ResNet(50, out_indices=(0, 1, 2, 3, 4), with_cp=True) + for m in model.modules(): + if is_block(m): + assert m.with_cp + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 256, 56, 56) + assert feat[2].shape == (1, 512, 28, 28) + assert feat[3].shape == (1, 1024, 14, 14) + assert feat[4].shape == (1, 2048, 7, 7) + + # zero initialization of residual blocks + model = ResNet(50, zero_init_residual=True) + model.init_weights() + for m in model.modules(): + if isinstance(m, Bottleneck): + assert all_zeros(m.norm3) + elif isinstance(m, BasicBlock): + assert all_zeros(m.norm2) + + # non-zero initialization of residual blocks + model = ResNet(50, zero_init_residual=False) + model.init_weights() + for m in model.modules(): + if isinstance(m, Bottleneck): + assert not all_zeros(m.norm3) + elif isinstance(m, BasicBlock): + assert not all_zeros(m.norm2) diff --git a/tests/test_models/test_backbones/test_resnext.py b/tests/test_models/test_backbones/test_resnext.py new file mode 100644 index 00000000..a5271362 --- /dev/null +++ b/tests/test_models/test_backbones/test_resnext.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmselfsup.models.backbones import ResNeXt +from mmselfsup.models.backbones.resnext import Bottleneck as BottleneckX + + +def test_bottleneck(): + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + BottleneckX(64, 64, groups=32, width_per_group=4, style='tensorflow') + + # Test ResNeXt Bottleneck structure + block = BottleneckX( + 64, 64, stride=2, groups=32, width_per_group=4, style='pytorch') + assert block.conv2.stride == (2, 2) + assert block.conv2.groups == 32 + assert block.conv2.out_channels == 128 + + # Test ResNeXt Bottleneck forward + block = BottleneckX(64, 16, stride=1, groups=32, width_per_group=4) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_resnext(): + with pytest.raises(KeyError): + # ResNeXt depth should be in [50, 101, 152] + ResNeXt(depth=18) + + # Test ResNeXt with group 32, width_per_group 4 + model = ResNeXt( + depth=50, groups=32, width_per_group=4, out_indices=(0, 1, 2, 3, 4)) + for m in model.modules(): + if isinstance(m, BottleneckX): + assert m.conv2.groups == 32 + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size([1, 64, 112, 112]) + assert feat[1].shape == torch.Size([1, 256, 56, 56]) + assert feat[2].shape == torch.Size([1, 512, 28, 28]) + assert feat[3].shape == torch.Size([1, 1024, 14, 14]) + assert feat[4].shape == torch.Size([1, 2048, 7, 7]) + + # Test ResNeXt with group 32, width_per_group 4 and layers 3 out forward + model = ResNeXt(depth=50, groups=32, width_per_group=4, out_indices=(4, )) + for m in model.modules(): + if isinstance(m, BottleneckX): + assert m.conv2.groups == 32 + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[0].shape == torch.Size([1, 2048, 7, 7]) diff --git a/tests/test_models/test_heads.py b/tests/test_models/test_heads.py new file mode 100644 index 00000000..c9548164 --- /dev/null +++ b/tests/test_models/test_heads.py @@ -0,0 +1,75 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmselfsup.models.heads import (ClsHead, ContrastiveHead, LatentClsHead, + LatentPredictHead, MultiClsHead, SwAVHead) + + +def test_cls_head(): + # test ClsHead + head = ClsHead() + fake_cls_score = [torch.rand(4, 3)] + fake_gt_label = torch.randint(0, 2, (4, )) + + loss = head.loss(fake_cls_score, fake_gt_label) + assert loss['loss'].item() > 0 + + +def test_contrastive_head(): + head = ContrastiveHead() + fake_pos = torch.rand(32, 1) # N, 1 + fake_neg = torch.rand(32, 100) # N, k + + loss = head.forward(fake_pos, fake_neg) + assert loss['loss'].item() > 0 + + +def test_latent_predict_head(): + predictor = dict( + type='NonLinearNeck', + in_channels=64, + hid_channels=128, + out_channels=64, + with_bias=True, + with_last_bn=True, + with_avg_pool=False, + norm_cfg=dict(type='BN1d')) + head = LatentPredictHead(predictor=predictor) + fake_input = torch.rand(32, 64) # N, C + fake_traget = torch.rand(32, 64) # N, C + + loss = head.forward(fake_input, fake_traget) + assert loss['loss'].item() > -1 + + +def test_latent_cls_head(): + head = LatentClsHead(64, 10) + fake_input = torch.rand(32, 64) # N, C + fake_traget = torch.rand(32, 64) # N, C + + loss = head.forward(fake_input, fake_traget) + assert loss['loss'].item() > 0 + + +def test_multi_cls_head(): + head = MultiClsHead(in_indices=(0, 1)) + fake_input = [torch.rand(8, 64, 5, 5), torch.rand(8, 256, 14, 14)] + out = head.forward(fake_input) + assert isinstance(out, list) + + fake_cls_score = [torch.rand(4, 3)] + fake_gt_label = torch.randint(0, 2, (4, )) + + loss = head.loss(fake_cls_score, fake_gt_label) + print(loss.keys()) + for k in loss.keys(): + if 'loss' in k: + assert loss[k].item() > 0 + + +def test_swav_head(): + head = SwAVHead(feat_dim=128, num_crops=[2, 6]) + fake_input = torch.rand(32, 128) # N, C + + loss = head.forward(fake_input) + assert loss['loss'].item() > 0 diff --git a/tests/test_models/test_necks/test_avgpool_neck.py b/tests/test_models/test_necks/test_avgpool_neck.py new file mode 100644 index 00000000..08bc3511 --- /dev/null +++ b/tests/test_models/test_necks/test_avgpool_neck.py @@ -0,0 +1,23 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmselfsup.models.necks import AvgPool2dNeck + + +def test_avgpool2d_neck(): + fake_in = [torch.randn((2, 3, 8, 8))] + + # test default + neck = AvgPool2dNeck() + fake_out = neck(fake_in) + assert fake_out[0].shape == (2, 3, 1, 1) + + # test custom + neck = AvgPool2dNeck(2) + fake_out = neck(fake_in) + assert fake_out[0].shape == (2, 3, 2, 2) + + # test custom + neck = AvgPool2dNeck((1, 2)) + fake_out = neck(fake_in) + assert fake_out[0].shape == (2, 3, 1, 2) diff --git a/tests/test_models/test_necks/test_densecl_neck.py b/tests/test_models/test_necks/test_densecl_neck.py new file mode 100644 index 00000000..2e8af0f6 --- /dev/null +++ b/tests/test_models/test_necks/test_densecl_neck.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from mmselfsup.models.necks import DenseCLNeck + + +def test_densecl_neck(): + neck = DenseCLNeck(16, 32, 16) + assert isinstance(neck.mlp, nn.Sequential) + assert isinstance(neck.mlp2, nn.Sequential) + assert neck.mlp[0].in_features == 16 + assert neck.mlp[2].in_features == 32 + assert neck.mlp[2].out_features == 16 + assert neck.mlp2[0].in_channels == 16 + assert neck.mlp2[2].in_channels == 32 + assert neck.mlp2[2].out_channels == 16 + + # test neck when num_grid is None + fake_in = torch.rand((32, 16, 5, 5)) + fake_out = neck.forward([fake_in]) + assert fake_out[0].shape == torch.Size([32, 16]) + assert fake_out[1].shape == torch.Size([32, 16, 25]) + assert fake_out[2].shape == torch.Size([32, 16]) + + # test neck when num_grid is not None + neck = DenseCLNeck(16, 32, 16, num_grid=3) + fake_in = torch.rand((32, 16, 5, 5)) + fake_out = neck.forward([fake_in]) + assert fake_out[0].shape == torch.Size([32, 16]) + assert fake_out[1].shape == torch.Size([32, 16, 9]) + assert fake_out[2].shape == torch.Size([32, 16]) diff --git a/tests/test_models/test_necks/test_linear_neck.py b/tests/test_models/test_necks/test_linear_neck.py new file mode 100644 index 00000000..910261ab --- /dev/null +++ b/tests/test_models/test_necks/test_linear_neck.py @@ -0,0 +1,23 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from mmselfsup.models.necks import LinearNeck + + +def test_linear_neck(): + neck = LinearNeck(16, 32, with_avg_pool=True) + assert isinstance(neck.avgpool, nn.Module) + assert neck.fc.in_features == 16 + assert neck.fc.out_features == 32 + + # test neck with avgpool + fake_in = torch.rand((32, 16, 5, 5)) + fake_out = neck.forward([fake_in]) + assert fake_out[0].shape == torch.Size([32, 32]) + + # test neck without avgpool + neck = LinearNeck(16, 32, with_avg_pool=False) + fake_in = torch.rand((32, 16)) + fake_out = neck.forward([fake_in]) + assert fake_out[0].shape == torch.Size([32, 32]) diff --git a/tests/test_models/test_necks/test_mocov2_neck.py b/tests/test_models/test_necks/test_mocov2_neck.py new file mode 100644 index 00000000..4b185b12 --- /dev/null +++ b/tests/test_models/test_necks/test_mocov2_neck.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from mmselfsup.models.necks import MoCoV2Neck + + +def test_mocov2_neck(): + neck = MoCoV2Neck(16, 32, 16) + assert isinstance(neck.mlp, nn.Sequential) + assert neck.mlp[0].in_features == 16 + assert neck.mlp[2].in_features == 32 + assert neck.mlp[2].out_features == 16 + + # test neck with avgpool + fake_in = torch.rand((32, 16, 5, 5)) + fake_out = neck.forward([fake_in]) + assert fake_out[0].shape == torch.Size([32, 16]) + + # test neck without avgpool + neck = MoCoV2Neck(16, 32, 16, with_avg_pool=False) + fake_in = torch.rand((32, 16)) + fake_out = neck.forward([fake_in]) + assert fake_out[0].shape == torch.Size([32, 16]) diff --git a/tests/test_models/test_necks/test_nonlinear_neck.py b/tests/test_models/test_necks/test_nonlinear_neck.py new file mode 100644 index 00000000..eed0ef57 --- /dev/null +++ b/tests/test_models/test_necks/test_nonlinear_neck.py @@ -0,0 +1,26 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmselfsup.models.necks import NonLinearNeck + + +def test_nonlinear_neck(): + # test neck arch + neck = NonLinearNeck(16, 32, 16, norm_cfg=dict(type='BN1d')) + assert neck.fc0.in_features == 16 + assert neck.fc0.out_features == 32 + assert neck.bn0.num_features == 32 + fc = getattr(neck, neck.fc_names[-1]) + assert fc.out_features == 16 + + # test neck with avgpool + fake_in = torch.rand((32, 16, 5, 5)) + fake_out = neck.forward([fake_in]) + assert fake_out[0].shape == torch.Size([32, 16]) + + # test neck without avgpool + neck = NonLinearNeck( + 16, 32, 16, with_avg_pool=False, norm_cfg=dict(type='BN1d')) + fake_in = torch.rand((32, 16)) + fake_out = neck.forward([fake_in]) + assert fake_out[0].shape == torch.Size([32, 16]) diff --git a/tests/test_models/test_necks/test_odc_neck.py b/tests/test_models/test_necks/test_odc_neck.py new file mode 100644 index 00000000..27698156 --- /dev/null +++ b/tests/test_models/test_necks/test_odc_neck.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmselfsup.models.necks import ODCNeck + + +def test_odc_neck(): + neck = ODCNeck(16, 32, 16, norm_cfg=dict(type='BN1d')) + assert neck.fc0.in_features == 16 + assert neck.fc0.out_features == 32 + assert neck.bn0.num_features == 32 + assert neck.fc1.in_features == 32 + assert neck.fc1.out_features == 16 + + # test neck with avgpool + fake_in = torch.rand((32, 16, 5, 5)) + fake_out = neck.forward([fake_in]) + assert fake_out[0].shape == torch.Size([32, 16]) + + # test neck without avgpool + neck = ODCNeck(16, 32, 16, with_avg_pool=False, norm_cfg=dict(type='BN1d')) + fake_in = torch.rand((32, 16)) + fake_out = neck.forward([fake_in]) + assert fake_out[0].shape == torch.Size([32, 16]) diff --git a/tests/test_models/test_necks/test_relative_loc_neck.py b/tests/test_models/test_necks/test_relative_loc_neck.py new file mode 100644 index 00000000..d5c23517 --- /dev/null +++ b/tests/test_models/test_necks/test_relative_loc_neck.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmselfsup.models.necks import RelativeLocNeck + + +def test_relative_loc_neck(): + neck = RelativeLocNeck(16, 32) + assert neck.fc.in_features == 32 + assert neck.fc.out_features == 32 + assert neck.bn.num_features == 32 + + # test neck with avgpool + fake_in = torch.rand((32, 32, 5, 5)) + fake_out = neck.forward([fake_in]) + assert fake_out[0].shape == torch.Size([32, 32]) + + # test neck without avgpool + neck = RelativeLocNeck(16, 32, with_avg_pool=False) + fake_in = torch.rand((32, 32)) + fake_out = neck.forward([fake_in]) + assert fake_out[0].shape == torch.Size([32, 32]) diff --git a/tests/test_models/test_necks/test_swav_neck.py b/tests/test_models/test_necks/test_swav_neck.py new file mode 100644 index 00000000..ab057d87 --- /dev/null +++ b/tests/test_models/test_necks/test_swav_neck.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from mmselfsup.models.necks import SwAVNeck + + +def test_swav_neck(): + neck = SwAVNeck(16, 32, 16, norm_cfg=dict(type='BN1d')) + assert isinstance(neck.projection_neck, (nn.Module, nn.Sequential)) + + # test neck with avgpool + fake_in = [[torch.rand((32, 16, 5, 5))], [torch.rand((32, 16, 5, 5))], + [torch.rand((32, 16, 3, 3))]] + fake_out = neck.forward(fake_in) + assert fake_out[0].shape == torch.Size([32 * len(fake_in), 16]) diff --git a/tests/test_models/test_utils/test_multi_pooling.py b/tests/test_models/test_utils/test_multi_pooling.py new file mode 100644 index 00000000..0aa2de9c --- /dev/null +++ b/tests/test_models/test_utils/test_multi_pooling.py @@ -0,0 +1,37 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmselfsup.models.utils import MultiPooling + + +def test_multi_pooling(): + # adaptive + layer = MultiPooling(pool_type='adaptive', in_indices=(0, 1, 2)) + fake_in = [ + torch.rand((1, 32, 112, 112)), + torch.rand((1, 64, 56, 56)), + torch.rand((1, 128, 28, 28)), + ] + res = layer.forward(fake_in) + assert res[0].shape == (1, 32, 12, 12) + assert res[1].shape == (1, 64, 6, 6) + assert res[2].shape == (1, 128, 4, 4) + + # specified + layer = MultiPooling(pool_type='specified', in_indices=(0, 1, 2)) + fake_in = [ + torch.rand((1, 32, 112, 112)), + torch.rand((1, 64, 56, 56)), + torch.rand((1, 128, 28, 28)), + ] + res = layer.forward(fake_in) + assert res[0].shape == (1, 32, 12, 12) + assert res[1].shape == (1, 64, 6, 6) + assert res[2].shape == (1, 128, 4, 4) + + with pytest.raises(AssertionError): + layer = MultiPooling(pool_type='other') + + with pytest.raises(AssertionError): + layer = MultiPooling(backbone='resnet101') diff --git a/tests/test_models/test_utils/test_multi_prototypes.py b/tests/test_models/test_utils/test_multi_prototypes.py new file mode 100644 index 00000000..004aa1c6 --- /dev/null +++ b/tests/test_models/test_utils/test_multi_prototypes.py @@ -0,0 +1,23 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +import torch.nn as nn + +from mmselfsup.models.utils import MultiPrototypes + + +def test_multi_prototypes(): + with pytest.raises(AssertionError): + layer = MultiPrototypes(output_dim=16, num_prototypes=2) + + layer = MultiPrototypes(output_dim=16, num_prototypes=[3, 4, 5]) + assert isinstance(getattr(layer, 'prototypes0'), nn.Module) + assert isinstance(getattr(layer, 'prototypes1'), nn.Module) + assert isinstance(getattr(layer, 'prototypes2'), nn.Module) + + fake_in = torch.rand((32, 16)) + res = layer.forward(fake_in) + assert len(res) == 3 + assert res[0].shape == (32, 3) + assert res[1].shape == (32, 4) + assert res[2].shape == (32, 5) diff --git a/tests/test_models/test_utils/test_sobel.py b/tests/test_models/test_utils/test_sobel.py new file mode 100644 index 00000000..b5e6ede1 --- /dev/null +++ b/tests/test_models/test_utils/test_sobel.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmselfsup.models.utils import Sobel + + +def test_sobel(): + sobel_layer = Sobel() + fake_input = torch.rand((1, 3, 224, 224)) + fake_res = sobel_layer(fake_input) + assert fake_res.shape == (1, 2, 224, 224) + + for p in sobel_layer.sobel.parameters(): + assert p.requires_grad is False diff --git a/tests/test_runtime/test_extract_process.py b/tests/test_runtime/test_extract_process.py new file mode 100644 index 00000000..03529fff --- /dev/null +++ b/tests/test_runtime/test_extract_process.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest.mock import MagicMock + +import pytest +import torch +import torch.nn as nn +from mmcv.parallel import MMDataParallel +from torch.utils.data import DataLoader, Dataset + +from mmselfsup.models.utils import ExtractProcess + + +class ExampleDataset(Dataset): + + def __getitem__(self, idx): + results = dict(img=torch.tensor([1]), img_metas=dict()) + return results + + def __len__(self): + return 1 + + +class ExampleModel(nn.Module): + + def __init__(self): + super(ExampleModel, self).__init__() + self.conv = nn.Conv2d(3, 3, 3) + + def forward(self, img, test_mode=False, **kwargs): + return [ + torch.rand((1, 32, 112, 112)), + torch.rand((1, 64, 56, 56)), + torch.rand((1, 128, 28, 28)), + ] + + def train_step(self, data_batch, optimizer): + loss = self.forward(**data_batch) + return dict(loss=loss) + + +def test_extract_process(): + with pytest.raises(AssertionError): + process = ExtractProcess( + pool_type='specified', backbone='resnet50', layer_indices=(-1, )) + + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + data_loader = DataLoader( + test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) + model = MMDataParallel(ExampleModel()) + + process = ExtractProcess( + pool_type='specified', backbone='resnet50', layer_indices=(0, 1, 2)) + + results = process.extract(model, data_loader) + assert 'feat1' in results + assert 'feat2' in results + assert 'feat3' in results + assert results['feat1'].shape == (1, 32 * 12 * 12) + assert results['feat2'].shape == (1, 64 * 6 * 6) + assert results['feat3'].shape == (1, 128 * 4 * 4) diff --git a/tests/test_runtime/test_extractor.py b/tests/test_runtime/test_extractor.py new file mode 100644 index 00000000..7f679a64 --- /dev/null +++ b/tests/test_runtime/test_extractor.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import tempfile +from unittest.mock import MagicMock + +import torch +import torch.nn as nn +from mmcv.parallel import MMDataParallel +from mmcv.runner import build_runner +from torch.utils.data import Dataset + +from mmselfsup.core.optimizer import build_optimizer +from mmselfsup.utils import Extractor + + +class ExampleDataset(Dataset): + + def __getitem__(self, idx): + results = dict(img=torch.tensor([1]), img_metas=dict()) + return results + + def __len__(self): + return 1 + + +class ExampleModel(nn.Module): + + def __init__(self): + super(ExampleModel, self).__init__() + self.test_cfg = None + self.conv = nn.Conv2d(3, 3, 3) + self.neck = nn.Identity() + + def forward(self, img, test_mode=False, **kwargs): + return img + + def train_step(self, data_batch, optimizer): + loss = self.forward(**data_batch) + return dict(loss=loss) + + +def test_extractor(): + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + + runner_cfg = dict(type='EpochBasedRunner', max_epochs=2) + optim_cfg = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0005) + extractor = Extractor( + test_dataset, 1, 0, dist_mode=False, persistent_workers=False) + + # test extractor + with tempfile.TemporaryDirectory() as tmpdir: + model = MMDataParallel(ExampleModel()) + optimizer = build_optimizer(model, optim_cfg) + runner = build_runner( + runner_cfg, + default_args=dict( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logging.getLogger())) + features = extractor(runner) + assert features.shape == (1, 1) diff --git a/tests/test_runtime/test_hooks/test_byol_hook.py b/tests/test_runtime/test_hooks/test_byol_hook.py new file mode 100644 index 00000000..4292e0db --- /dev/null +++ b/tests/test_runtime/test_hooks/test_byol_hook.py @@ -0,0 +1,79 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import tempfile +from unittest.mock import MagicMock + +import torch +import torch.nn as nn +from mmcv.parallel import MMDataParallel +from mmcv.runner import build_runner, obj_from_dict +from torch.utils.data import DataLoader, Dataset + +from mmselfsup.core.hooks import BYOLHook + + +class ExampleDataset(Dataset): + + def __getitem__(self, idx): + results = dict(img=torch.tensor([1]), img_metas=dict()) + return results + + def __len__(self): + return 1 + + +class ExampleModel(nn.Module): + + def __init__(self): + super(ExampleModel, self).__init__() + self.test_cfg = None + self.online_net = nn.Conv2d(3, 3, 3) + self.target_net = nn.Conv2d(3, 3, 3) + self.base_momentum = 0.96 + self.momentum = self.base_momentum + + def forward(self, img, img_metas, test_mode=False, **kwargs): + return img + + def train_step(self, data_batch, optimizer): + loss = self.forward(**data_batch) + return dict(loss=loss) + + @torch.no_grad() + def _momentum_update(self): + """Momentum update of the target network.""" + for param_ol, param_tgt in zip(self.online_net.parameters(), + self.target_net.parameters()): + param_tgt.data = param_tgt.data * self.momentum + \ + param_ol.data * (1. - self.momentum) + + @torch.no_grad() + def momentum_update(self): + self._momentum_update() + + +def test_byol_hook(): + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + data_loader = DataLoader( + test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) + + runner_cfg = dict(type='EpochBasedRunner', max_epochs=2) + optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) + + # test BYOLHook + with tempfile.TemporaryDirectory() as tmpdir: + model = MMDataParallel(ExampleModel()) + optimizer = obj_from_dict(optim_cfg, torch.optim, + dict(params=model.parameters())) + byol_hook = BYOLHook() + runner = build_runner( + runner_cfg, + default_args=dict( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logging.getLogger())) + runner.register_hook(byol_hook) + runner.run([data_loader], [('train', 1)]) + assert runner.model.module.momentum == 0.98 diff --git a/tests/test_runtime/test_hooks/test_deepcluster_hook.py b/tests/test_runtime/test_hooks/test_deepcluster_hook.py new file mode 100644 index 00000000..9237f298 --- /dev/null +++ b/tests/test_runtime/test_hooks/test_deepcluster_hook.py @@ -0,0 +1,79 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import tempfile +from unittest.mock import MagicMock + +import torch +from mmcv.parallel import MMDataParallel +from mmcv.runner import build_runner +from torch.utils.data import Dataset + +from mmselfsup.core.hooks import DeepClusterHook +from mmselfsup.core.optimizer import build_optimizer +from mmselfsup.models.algorithms import DeepCluster + +num_classes = 10 +with_sobel = True, +backbone = dict( + type='ResNet', + depth=50, + in_channels=2, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN')) +neck = dict(type='AvgPool2dNeck') +head = dict( + type='ClsHead', + with_avg_pool=False, # already has avgpool in the neck + in_channels=2048, + num_classes=num_classes) + + +class ExampleDataset(Dataset): + + def __getitem__(self, idx): + results = dict(img=torch.randn((3, 224, 224)), img_metas=dict()) + return results + + def __len__(self): + return 10 + + +def test_deepcluster_hook(): + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + + alg = DeepCluster( + backbone=backbone, with_sobel=with_sobel, neck=neck, head=head) + extractor = dict( + dataset=test_dataset, + imgs_per_gpu=1, + workers_per_gpu=0, + persistent_workers=False) + + runner_cfg = dict(type='EpochBasedRunner', max_epochs=3) + optim_cfg = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0005) + lr_config = dict(policy='CosineAnnealing', min_lr=0.) + + # test DeepClusterHook + with tempfile.TemporaryDirectory() as tmpdir: + model = MMDataParallel(alg) + optimizer = build_optimizer(model, optim_cfg) + deepcluster_hook = DeepClusterHook( + extractor=extractor, + clustering=dict(type='Kmeans', k=num_classes, pca_dim=16), + unif_sampling=True, + reweight=False, + reweight_pow=0.5, + initial=True, + interval=1, + dist_mode=False) + runner = build_runner( + runner_cfg, + default_args=dict( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logging.getLogger())) + runner.register_training_hooks(lr_config) + runner.register_hook(deepcluster_hook) + assert deepcluster_hook.clustering_type == 'Kmeans' diff --git a/tests/test_runtime/test_hooks/test_densecl_hook.py b/tests/test_runtime/test_hooks/test_densecl_hook.py new file mode 100644 index 00000000..10f7d186 --- /dev/null +++ b/tests/test_runtime/test_hooks/test_densecl_hook.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import tempfile +from unittest.mock import MagicMock + +import torch +import torch.nn as nn +from mmcv.parallel import MMDataParallel +from mmcv.runner import build_runner, obj_from_dict +from torch.utils.data import DataLoader, Dataset + +from mmselfsup.core.hooks import DenseCLHook + + +class ExampleDataset(Dataset): + + def __getitem__(self, idx): + results = dict(img=torch.tensor([1]), img_metas=dict()) + return results + + def __len__(self): + return 1 + + +class ExampleModel(nn.Module): + + def __init__(self): + super(ExampleModel, self).__init__() + self.test_cfg = None + self.loss_lambda = 0.5 + self.conv = nn.Conv2d(3, 3, 3) + + def forward(self, img, img_metas, test_mode=False, **kwargs): + return img + + def train_step(self, data_batch, optimizer): + loss = self.forward(**data_batch) + return dict(loss=loss) + + +def test_densecl_hook(): + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + data_loader = DataLoader( + test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) + + runner_cfg = dict(type='EpochBasedRunner', max_epochs=2) + optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) + + # test DenseCLHook + with tempfile.TemporaryDirectory() as tmpdir: + model = MMDataParallel(ExampleModel()) + optimizer = obj_from_dict(optim_cfg, torch.optim, + dict(params=model.parameters())) + + densecl_hook = DenseCLHook(start_iters=1) + runner = build_runner( + runner_cfg, + default_args=dict( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logging.getLogger())) + runner.register_hook(densecl_hook) + runner.run([data_loader], [('train', 1)]) + cur_iter = runner.iter + if cur_iter >= 1: + assert runner.model.module.loss_lambda == 0.5 + else: + assert runner.model.module.loss_lambda == 0. diff --git a/tests/test_runtime/test_hooks/test_optimizer_hook.py b/tests/test_runtime/test_hooks/test_optimizer_hook.py new file mode 100644 index 00000000..344c855c --- /dev/null +++ b/tests/test_runtime/test_hooks/test_optimizer_hook.py @@ -0,0 +1,125 @@ +import logging +import tempfile +from unittest.mock import MagicMock + +import pytest +import torch +import torch.nn as nn +from mmcv.parallel import MMDataParallel +from mmcv.runner import build_runner, obj_from_dict +from torch.utils.data import DataLoader, Dataset + +from mmselfsup.core.hooks import DistOptimizerHook, GradAccumFp16OptimizerHook + + +class ExampleDataset(Dataset): + + def __getitem__(self, idx): + results = dict(img=torch.tensor([1.]), img_metas=dict()) + return results + + def __len__(self): + return 1 + + +class ExampleModel(nn.Module): + + def __init__(self): + super(ExampleModel, self).__init__() + self.test_cfg = None + self.linear = nn.Linear(1, 1) + self.prototypes_test = nn.Linear(1, 1) + + def forward(self, img, img_metas, test_mode=False, **kwargs): + out = self.linear(img) + out = self.prototypes_test(out) + return out + + def train_step(self, data_batch, optimizer): + loss = self.forward(**data_batch) + return dict(loss=loss, num_samples=len(data_batch)) + + +def test_optimizer_hook(): + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + data_loader = DataLoader( + test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) + + runner_cfg = dict(type='EpochBasedRunner', max_epochs=5) + optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) + optim_hook_cfg = dict( + grad_clip=dict(max_norm=10), frozen_layers_cfg=dict(prototypes=5005)) + + optimizer_hook = DistOptimizerHook(**optim_hook_cfg) + + # test DistOptimizerHook + with tempfile.TemporaryDirectory() as tmpdir: + model = MMDataParallel(ExampleModel()) + optimizer = obj_from_dict(optim_cfg, torch.optim, + dict(params=model.parameters())) + + runner = build_runner( + runner_cfg, + default_args=dict( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logging.getLogger())) + runner.register_training_hooks(optimizer_hook) + + prototypes_start = [] + for name, p in runner.model.module.named_parameters(): + if 'prototypes_test' in name: + prototypes_start.append(p) + + # run training + runner.run([data_loader], [('train', 1)]) + + prototypes_end = [] + for name, p in runner.model.module.named_parameters(): + if 'prototypes_test' in name: + prototypes_end.append(p) + + assert len(prototypes_start) == len(prototypes_end) + for i in range(len(prototypes_start)): + p_start = prototypes_start[i] + p_end = prototypes_end[i] + assert p_start == p_end + + +@pytest.mark.skipif( + not torch.cuda.is_available(), reason='CUDA is not available.') +def test_fp16optimizer_hook(): + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + data_loader = DataLoader( + test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) + + runner_cfg = dict(type='EpochBasedRunner', max_epochs=5) + optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) + optim_hook_cfg = dict( + grad_clip=dict(max_norm=10), + loss_scale=16., + frozen_layers_cfg=dict(prototypes=5005)) + + optimizer_hook = GradAccumFp16OptimizerHook(**optim_hook_cfg) + + # test GradAccumFp16OptimizerHook + with tempfile.TemporaryDirectory() as tmpdir: + model = MMDataParallel(ExampleModel()) + optimizer = obj_from_dict(optim_cfg, torch.optim, + dict(params=model.parameters())) + + runner = build_runner( + runner_cfg, + default_args=dict( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logging.getLogger(), + meta=dict())) + runner.register_training_hooks(optimizer_hook) + # run training + runner.run([data_loader], [('train', 1)]) + assert runner.meta['fp16']['loss_scaler']['scale'] == 16. diff --git a/tests/test_runtime/test_hooks/test_simsiam_hook.py b/tests/test_runtime/test_hooks/test_simsiam_hook.py new file mode 100644 index 00000000..19c274ce --- /dev/null +++ b/tests/test_runtime/test_hooks/test_simsiam_hook.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import tempfile +from unittest.mock import MagicMock + +import torch +import torch.nn as nn +from mmcv.parallel import MMDataParallel +from mmcv.runner import build_runner +from torch.utils.data import DataLoader, Dataset + +from mmselfsup.core.hooks import SimSiamHook +from mmselfsup.core.optimizer import build_optimizer + + +class ExampleDataset(Dataset): + + def __getitem__(self, idx): + results = dict(img=torch.tensor([1]), img_metas=dict()) + return results + + def __len__(self): + return 1 + + +class ExampleModel(nn.Module): + + def __init__(self): + super(ExampleModel, self).__init__() + self.test_cfg = None + self.conv = nn.Conv2d(3, 3, 3) + self.predictor = nn.Linear(2, 1) + + def forward(self, img, img_metas, test_mode=False, **kwargs): + return img + + def train_step(self, data_batch, optimizer): + loss = self.forward(**data_batch) + return dict(loss=loss) + + +def test_simsiam_hook(): + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + + data_loader = DataLoader( + test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) + runner_cfg = dict(type='EpochBasedRunner', max_epochs=2) + optim_cfg = dict( + type='SGD', + lr=0.05, + momentum=0.9, + weight_decay=0.0005, + paramwise_options={'predictor': dict(fix_lr=True)}) + lr_config = dict(policy='CosineAnnealing', min_lr=0.) + + # test SimSiamHook + with tempfile.TemporaryDirectory() as tmpdir: + model = MMDataParallel(ExampleModel()) + optimizer = build_optimizer(model, optim_cfg) + simsiam_hook = SimSiamHook(True, 0.05) + runner = build_runner( + runner_cfg, + default_args=dict( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logging.getLogger())) + runner.register_training_hooks(lr_config) + runner.register_hook(simsiam_hook) + runner.run([data_loader], [('train', 1)]) + + for param_group in runner.optimizer.param_groups: + if 'fix_lr' in param_group and param_group['fix_lr']: + assert param_group['lr'] == 0.05 + else: + assert param_group['lr'] != 0.05 diff --git a/tests/test_runtime/test_hooks/test_swav_hook.py b/tests/test_runtime/test_hooks/test_swav_hook.py new file mode 100644 index 00000000..3f2f7fed --- /dev/null +++ b/tests/test_runtime/test_hooks/test_swav_hook.py @@ -0,0 +1,76 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import tempfile +from unittest.mock import MagicMock + +import torch +import torch.nn as nn +from mmcv.parallel import MMDataParallel +from mmcv.runner import build_runner, obj_from_dict +from torch.utils.data import DataLoader, Dataset + +from mmselfsup.core.hooks import SwAVHook +from mmselfsup.models.heads import SwAVHead + + +class ExampleDataset(Dataset): + + def __getitem__(self, idx): + results = dict(img=torch.tensor([1.]), img_metas=dict()) + return results + + def __len__(self): + return 1 + + +class ExampleModel(nn.Module): + + def __init__(self): + super(ExampleModel, self).__init__() + self.test_cfg = None + self.linear = nn.Linear(1, 1) + self.prototypes_test = nn.Linear(1, 1) + self.head = SwAVHead(feat_dim=2, num_crops=[2, 6], num_prototypes=3) + + def forward(self, img, img_metas, test_mode=False, **kwargs): + out = self.linear(img) + out = self.prototypes_test(out) + return out + + def train_step(self, data_batch, optimizer): + loss = self.forward(**data_batch) + return dict(loss=loss) + + +def test_swav_hook(): + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + data_loader = DataLoader( + test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) + + runner_cfg = dict(type='EpochBasedRunner', max_epochs=2) + optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) + + # test SwAVHook + with tempfile.TemporaryDirectory() as tmpdir: + model = MMDataParallel(ExampleModel()) + optimizer = obj_from_dict(optim_cfg, torch.optim, + dict(params=model.parameters())) + + swav_hook = SwAVHook( + batch_size=1, + epoch_queue_starts=15, + crops_for_assign=[0, 1], + feat_dim=128, + queue_length=300) + runner = build_runner( + runner_cfg, + default_args=dict( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logging.getLogger())) + runner.register_hook(swav_hook) + runner.run([data_loader], [('train', 1)]) + assert swav_hook.queue_length == 300 + assert runner.model.module.head.use_queue is False diff --git a/tests/test_utils/test_alias_multinomial.py b/tests/test_utils/test_alias_multinomial.py new file mode 100644 index 00000000..42c52ed8 --- /dev/null +++ b/tests/test_utils/test_alias_multinomial.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmselfsup.utils import AliasMethod + + +def test_alias_multinomial(): + example_in = torch.Tensor([1, 2, 3, 4]) + example_alias_method = AliasMethod(example_in) + assert (example_alias_method.prob.numpy() <= 1).all() + assert len(example_in) == len(example_alias_method.alias) + + # test assertion if N is smaller than 0 + with pytest.raises(AssertionError): + example_alias_method.draw(-1) + with pytest.raises(AssertionError): + example_alias_method.draw(0) + + example_res = example_alias_method.draw(5) + assert len(example_res) == 5 diff --git a/tests/test_utils/test_clustering.py b/tests/test_utils/test_clustering.py new file mode 100644 index 00000000..332de657 --- /dev/null +++ b/tests/test_utils/test_clustering.py @@ -0,0 +1,28 @@ +import numpy as np +import pytest +import torch + +from mmselfsup.utils.clustering import PIC, Kmeans + + +@pytest.mark.skipif( + not torch.cuda.is_available(), reason='CUDA is not available.') +def test_kmeans(): + fake_input = np.random.rand(10, 8).astype(np.float32) + pca_dim = 2 + + kmeans = Kmeans(2, pca_dim) + loss = kmeans.cluster(fake_input) + assert loss is not None + + with pytest.raises(AssertionError): + loss = kmeans.cluster(np.random.rand(10, 8)) + + +@pytest.mark.skipif( + not torch.cuda.is_available(), reason='CUDA is not available.') +def test_pic(): + fake_input = np.random.rand(1000, 16).astype(np.float32) + pic = PIC(pca_dim=8) + res = pic.cluster(fake_input) + assert res == 0 diff --git a/tests/test_utils/test_misc.py b/tests/test_utils/test_misc.py new file mode 100644 index 00000000..d8e4972e --- /dev/null +++ b/tests/test_utils/test_misc.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmselfsup.utils.misc import tensor2imgs + + +def test_tensor2imgs(): + with pytest.raises(AssertionError): + tensor2imgs(torch.rand((3, 16, 16))) + fake_tensor = torch.rand((3, 3, 16, 16)) + fake_imgs = tensor2imgs(fake_tensor) + assert len(fake_imgs) == 3 + assert fake_imgs[0].shape == (16, 16, 3) diff --git a/tests/test_utils/test_test_helper.py b/tests/test_utils/test_test_helper.py new file mode 100644 index 00000000..8dcc6a35 --- /dev/null +++ b/tests/test_utils/test_test_helper.py @@ -0,0 +1,44 @@ +from unittest.mock import MagicMock + +import numpy as np +import torch +import torch.nn as nn +from torch.utils.data import DataLoader, Dataset + +from mmselfsup.utils.test_helper import single_gpu_test + + +class ExampleDataset(Dataset): + + def __getitem__(self, idx): + results = dict(img=torch.tensor([1]), img_metas=dict()) + return results + + def __len__(self): + return 1 + + +class ExampleModel(nn.Module): + + def __init__(self): + super(ExampleModel, self).__init__() + self.test_cfg = None + self.conv = nn.Conv2d(3, 3, 3) + + def forward(self, img, mode='test', **kwargs): + return dict(img=img) + + def train_step(self, data_batch, optimizer): + loss = self.forward(**data_batch) + return dict(loss=loss) + + +def test_test_helper(): + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + data_loader = DataLoader( + test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) + model = ExampleModel() + + res = single_gpu_test(model, data_loader) + assert res['img'] == np.array([[1]]) diff --git a/tests/test_utils/test_version_utils.py b/tests/test_utils/test_version_utils.py new file mode 100644 index 00000000..16ff52b7 --- /dev/null +++ b/tests/test_utils/test_version_utils.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmselfsup import digit_version + + +def test_digit_version(): + assert digit_version('0.2.16') == (0, 2, 16, 0, 0, 0) + assert digit_version('1.2.3') == (1, 2, 3, 0, 0, 0) + assert digit_version('1.2.3rc0') == (1, 2, 3, 0, -1, 0) + assert digit_version('1.2.3rc1') == (1, 2, 3, 0, -1, 1) + assert digit_version('1.0rc0') == (1, 0, 0, 0, -1, 0) + assert digit_version('1.0') == digit_version('1.0.0') + assert digit_version('1.5.0+cuda90_cudnn7.6.3_lms') == digit_version('1.5') + assert digit_version('1.0.0dev') < digit_version('1.0.0a') + assert digit_version('1.0.0a') < digit_version('1.0.0a1') + assert digit_version('1.0.0a') < digit_version('1.0.0b') + assert digit_version('1.0.0b') < digit_version('1.0.0rc') + assert digit_version('1.0.0rc1') < digit_version('1.0.0') + assert digit_version('1.0.0') < digit_version('1.0.0post') + assert digit_version('1.0.0post') < digit_version('1.0.0post1') + assert digit_version('v1') == (1, 0, 0, 0, 0, 0) + assert digit_version('v1.1.5') == (1, 1, 5, 0, 0, 0)