From ff9c230174f68efa343dcdb2b28170e16fe29839 Mon Sep 17 00:00:00 2001 From: liangyuanzhi Date: Thu, 20 Jun 2019 17:38:11 +0800 Subject: [PATCH] refactored --- .DS_Store | Bin 10244 -> 0 bytes .gitattributes | 2 - CUB_test.py | 115 --------- LICENSE => LICENSE.txt | 0 README.md | 106 +++++--- config.py | 131 ++++++++++ dataset/dataset_CUB_test.py | 65 ----- dataset/dataset_DCL.py | 95 ------- datasets/CUB_pre.py | 66 ----- models/Asoftmax_linear.py | 59 +++++ models/LoadModel.py | 80 ++++++ models/__pycache__/LoadModel.cpython-36.pyc | Bin 0 -> 2406 bytes models/__pycache__/focal_loss.cpython-36.pyc | Bin 0 -> 1427 bytes models/focal_loss.py | 48 ++++ resnet_swap_2loss_add.py | 42 ---- test.py | 152 ++++++++++++ train.py | 230 +++++++++++++++++ train_rel.py | 136 ---------- .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 165 bytes .../__pycache__/functional.cpython-36.pyc | Bin 0 -> 24076 bytes .../__pycache__/transforms.cpython-36.pyc | Bin 0 -> 40659 bytes utils/Asoftmax_loss.py | 47 ++++ .../__pycache__/Asoftmax_loss.cpython-36.pyc | Bin 0 -> 1517 bytes utils/__pycache__/autoaugment.cpython-36.pyc | Bin 0 -> 8871 bytes utils/__pycache__/dataset_DCL.cpython-36.pyc | Bin 0 -> 6281 bytes utils/__pycache__/eval_model.cpython-36.pyc | Bin 0 -> 2424 bytes utils/__pycache__/train_model.cpython-36.pyc | Bin 0 -> 3796 bytes utils/__pycache__/utils.cpython-36.pyc | Bin 0 -> 4483 bytes utils/autoaugment.py | 232 ++++++++++++++++++ utils/dataset_DCL.py | 181 ++++++++++++++ utils/eval_model.py | 81 ++++++ utils/test_tool.py | 99 ++++++++ utils/train_model.py | 164 +++++++++++++ utils/train_util_DCL.py | 128 ---------- utils/utils.py | 124 ++++++++++ 35 files changed, 1705 insertions(+), 678 deletions(-) delete mode 100644 .DS_Store delete mode 100644 .gitattributes delete mode 100644 CUB_test.py rename LICENSE => LICENSE.txt (100%) create mode 100644 config.py delete mode 100644 dataset/dataset_CUB_test.py delete mode 100644 dataset/dataset_DCL.py delete mode 100644 datasets/CUB_pre.py create mode 100644 models/Asoftmax_linear.py create mode 100644 models/LoadModel.py create mode 100644 models/__pycache__/LoadModel.cpython-36.pyc create mode 100644 models/__pycache__/focal_loss.cpython-36.pyc create mode 100644 models/focal_loss.py delete mode 100644 resnet_swap_2loss_add.py create mode 100644 test.py create mode 100644 train.py delete mode 100644 train_rel.py create mode 100644 transforms/__pycache__/__init__.cpython-36.pyc create mode 100644 transforms/__pycache__/functional.cpython-36.pyc create mode 100644 transforms/__pycache__/transforms.cpython-36.pyc create mode 100644 utils/Asoftmax_loss.py create mode 100644 utils/__pycache__/Asoftmax_loss.cpython-36.pyc create mode 100644 utils/__pycache__/autoaugment.cpython-36.pyc create mode 100644 utils/__pycache__/dataset_DCL.cpython-36.pyc create mode 100644 utils/__pycache__/eval_model.cpython-36.pyc create mode 100644 utils/__pycache__/train_model.cpython-36.pyc create mode 100644 utils/__pycache__/utils.cpython-36.pyc create mode 100644 utils/autoaugment.py create mode 100644 utils/dataset_DCL.py create mode 100644 utils/eval_model.py create mode 100644 utils/test_tool.py create mode 100644 utils/train_model.py delete mode 100644 utils/train_util_DCL.py create mode 100644 utils/utils.py diff --git a/.DS_Store b/.DS_Store deleted file mode 100644 index f70de0d4ebff2157109a9c52a7462c7f26017759..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10244 zcmeHMYiu0V6+XwYlbNw;ZqkJ8uCru`W8x%Evd+^EaqugNod*Hz#7+otc6TQBi1Ciu znYHsEUA$=QKhre%$a-7Idkuv`R07*-UR^c$m&f1F#u5L;F8tQ$>+!Q{3!cXcs;cwk^BK1 zFu>KFl$Es0tb>jq0zm|V2m}!bA`nF2e<1?I+5D$eOBtMl2m}!bB9M;&&kr#=xNP&} zgp?w6&`B%-NLEs}_|Y}31B6e^lWm@ykW#qPJ>}{FJy-OW7%1IIALHgE+dMfTrE~|B z?ttFN=&evtj81+rVGh_PWpEB65JaFn0(|X04RlC=OGm-_-L|-KiOL#k>P^`rqXqXV zu^6$tx{{WiBn#!APrZBR<^{iOFTc--tzUxG=y2I+O7T>|Nsc?C-gvJjM=jHFa|CYs>+=Drq-VQBZm$jx&7!Jr(^`t%VsdWAfo*8G#zEnDmPZ-iz!q3yqaAe-(`)Ou2O`i6`#MiJ^y*;aNW zO_~|0gq6yYvC(c>z2;%V8E}n*F2CC2*crD|7!H`^%lih|Ozyv9)HUt2v&+ahBvtiH z&Fsj$x`w4I)~;_qf5o?^vl%m0bq_^1?V1NnBeQed)ZO9T2`6Qwbu&HGKV~{+(lYwe zi4kK)RfDPuuPf(-LIc^Pd0W^HCue)_aD724{ye;9lJ4TH(&FU3z_zs%YZmw(Bbv`GhuB}1hLp*L> zTU@pnC(ST4!Uos`dmsgA7>9@81e}B?;W>B#egZ#--@xzSHTXUJ0p5nc!n^Puybu3^ zPvJB89066FjaT9vjA1=4#0Ff1D{w8g;5xh(yKoC`!|ixI_TwNXFo`L&FpCHA5FW;3 z_%J?-kKyC^ef%Llk7w{Kp2L^$S8{F@Ib?5SdS7wHK0jba2$&1M5n#Su zBhmQnS+lRIYiMj*b8TDxZI-;hB8U~DNGn!=03P)(?G!ob`*ES;sxjDnn@@YhBFlQg~~uHQ#Z-xjsO%!|pY`Pw~thvqid3`9%-|hm9JyD)5wCZrr7Hyj^(fs#B=97f-+{Tde zSakk1OX927UDqYJN1Lh5n(WIi;5YG|0g9CxD^^BsAf9 z;d(_W%tg|nA-tGp1(BF!&r)qU&jcbrN!JzHDxNB2-jZEwwHBTqa`{VkuGcn*E~qi~Gq?Ie5$o`N61S@<#h z41No*5W&3)ufv}y&HNocgpc53_&1T8LgZGBH5kVESceNRE@*8%wqgfvC1SfCcTj@4 z1^0;rGeoK7HcBln-j4U;aeM$D#7FQHo+f&G8h?a8#TSX(evL2VYxpNTFVjh$ztr(G#>9nY1v{IhS;hjyB zRT8(J7mG$2hmmBu1@+XdAf(IYqQy~-O%XL3H(s_}i?S+Fjq&EnRhq^sMZLzG%`K#z zD#|k6Y~4Uwkd-5EtMyThs`#&r#Ao3Qyd;XO*WnHL3;Yc}fKLd0RXCl>sxZzan!8HS z+!CU>rGn;oakZXkZZq~!iM5qbxC;kxH|`;tGjIrp(IlF4aSX?)*t&~Kt>btf-Y;nG z5h}V)6knFn6W9N3u1==LG7nW&XX6r+GzV|F4zADHP^_gISlUv?!J}l_= zVTA&1;o~T$4xjM2yU60gL-K-ueJN7JUdF+aBr%=p|^Aem^x{5=4XfN&Xvuk{`d(YQ0{u4*l=`sWSE_ zJM(kUzl}G~AOsUUWdTQPrB>e#Y(CXlfd!qNI(;{A`(EJnOF^ly`8~T=4k{+L8dT9b zX{}!m>V>@#G`?ZN72Xpjyx8u|Ozfa3_yg819brzk(dOF9vbL?g{y@ZOt}9ALS-v%p z{apJZQc)hO9A3?#jA2T$ScJy4L-EHyyXfQ}p%a@`1lBXQ#RV7E6F;zpCmi8E;eqpv zIV^Cg0$sk3eD`VIj#VU}*2hZ;^Ou}4#S|a2iPf_vjI%NC*<%*kXdSdJS`Tdrt&i4M z8*2lWU(l=fW>m8`s~V(TU=vR9*{u_wAJtrM zerm&Va6E?=<;YsE>Fq<*In6Ab9V`EhB80nR$v;%9cTSzhxZb-kL4M?ze`+nTA5Ll! zr5)=zm$kQew%+h88_!#B@jUm2XT^Aa{}#{lr=GB06jf0()h}RlNzm@Nq9JAuX|LbG z$VP2{VUI7P{b}kWyb0~X6=<(Q`|GimVp}*bvO#xE)RCQ@CNj<*(rLX5{dEAs6LV%K z-$RU#952-S)45Ib2Z8}47dDc=frYlEtJ$a@cG4)%W1#dDjcqOoFxr=Kp2h0sji8#x z;ql;z02TOo4Ab(>wIjk+Fn{c@r+Z6d*dO_OQ@Snis3uj5<4;gDei}$GtF;oo!ts_w{_RrTWpHc84;_l(t18 zB`r~xy0MCslG-(wReGeHNe7V#a~LpF(oxz~1Ip!){_G7exB$hGTNp~(MO}FiKODuG zN}?1YNv^ct*-la+<4n)4i)g5l-FSVs`|V(mt_WRBX5IVOzcyo{+aT*wK!VkbSJuYuUT4s!U3SP8fv6a(vr@gTaZnVKc zh!D=iIv_86OGes^X;V4R{ z76VfG71dBL5Z{T@6!-X~La^o*9XM1+p zm8Z%|*C5~zpn;D6hsq^XTR}x}&GMYtj}sCv^0WQy_j@0C&kskV=${|MZ>Ir5f1=mO zhxI2g)Q7-0;<>|cN;nq`h(lS6f+ z-UB93f;j@L{!28++~fXvKk<0PdptbHiT@J$D4~Efu>FH3ORHy%QWpd?4>Ql90J-^k zsJCIGk;VqKq$DlE$iSL;h5+|D0w_F-sikVY5>h%O8!}eZg=WmsJY6nRJ5V`bt!D3F$$y2Q0?6&sbKaN-_2udi7|!XqIAH6=|JMrC^`!KV!!ar#!1zUg*X8 zXu52;sMK`sB48DSu(vvu6rdcOa=-|Wae&WuZ-U&rc4}sOs+sF$`d#KU91UQ&^*#*s z6-LOA67i5@h#9wNLyXVKb0UrLIC>t+_c_iXxf8VUG$}oUTfCu$f==J*Y$!;5jt#w! z44so38P$QIEocI6uSI1D*ntU3&@zPkt%p(lHQ1tH>vx08s72q9O>Yxk+5OeoHDPCW zP!0|LFR}^$9~pe2#%r;&3Z6!tuJeBc=z$@vxA6=$!333KIKyyfaD#uxP$;1x+yRUM z09V+-(tf);FlU_Y;;g(=R6wUsU_1zllr)l`@)LKynfMfFfTu1(&YO zau`5=KaoS2q`Q!R^!*OyyN6IEPwj*FRv~y|9TR?Vg2JmoLIkV>fBP@ z$ZR3UaM^lUs_o#QYEpeDYSl>VRid`^Ovr{=KU?$Eh6*leAz|;z)8d290UPA0UI@@o z>!qbomCKd&R?SIQuYl9!UC~;9DbhL~$Pf%B6vTzLflg&EbTZr)^NA?(g|@yvU5UhB z$>zugQ2HX%7Jq8-ei9v}I$LxV?4nRgMlRNlxuUeac_U9!$w#nJ9{@vm2Z!)QB*H^H z!T0eF31I~iZwR|Fd2>7PJ0OpUPtNY%_P`ZdOuEb;#$?vT90#sv>vSogXCua-z1Edm z4;g#0POI%n*I@Ep=ho<|T{BTtjNLjNW7&su>3r|r0@qc_y-*<;2c0AieB5{53|3fq~&M5W@izkmUfx#XLYFg&~D8harR^g(;XplldhhP)L*U7NeFQ zkmM>UO3W)x%P-0;Udd3z0+a$1zs&VB@^e%5b21b2()Ei{<6WG6;>(QnQ<8Jy(=$sl lN|W>v8ujDjGxIV_;^XxSDsOSvO`{7j*B~r31kLBGBkCr-=b;;U{EbC&+VyZ~0q_uas>m`Thn?oM* z+&nWBMfz}qEN!g4jgvG;(4Ytw+aLMS!YL3m4bq}O&|rZUTlA7-(aSUiwoMB}Xi=m; z`X_(V=Xt-GIp^?VNshNIijp{U`7ZBwd%ySo&AFkW;(1HiiQ-)G zEu%S+Hr#>LqPJig{8!do@vBDD`ih1+I5#9^hv$anJ~B5V_tCjgxgVH2pwcSyo-ub& zWmOLMLn^NdxR0r#8o>Rq`YAQ2hTb#hj;LWZf|PMLyE0KV)u=jv)T8R4I)v0?>Y_TV zjv(b3HLfO*a$Fr%$8di(D*uc+j@%RKS#<)b&#C8>jk~R$S0{0QUcI0`hxDW zUs5k2<%{ZydRfijX-Zl5jMCLdT#}L*=2h)M&9AkZw@T?SGuv>M+%PY<+igD_l&9C5 z%TBZ6hQoJeZ{EE9*8H2V-@P|``&I=DD1W@C@w4@GaWTxidC>9(@zggS3kfowBX~gnF_LYwHP*pMwt=*(oJ8x^Bk5x*%mgj3vK$uB zMBq04mKPSAs@A|G+Ao=20YGE~`ocXBjXcQhl~-m>&Dr*B>_2(;v*MiJn-} zqm)XmX3?MYtw*fx4dHT0o({&ZrL>pFb7(c&VXmnh&vQ1yH2b(u??%|W=31k@p*wdR zwd2llN|MfT;dA&&=l&3=5Ez`Q_Ar-v+w@NcmNHjTL7H#;|CSkK(EaR6Zrk#{9^_XF znAD;gW!@I^Or#AU?<*a1WpLYA8B&>T^PbVcFBn!?AlCN;!tlxnN_`KIMgidfeh2Y8 zw3I^1n96-*ZCQ9f+_APnU8Eg60gkb9MCDg4@B2(c*>UO3ges`wM=AE9`5h@UfHHsD zSLSF>nW~f-T(y45^oILNjrEkeETx8|)JrPUSMFF`Zp=WNN02vw-!m)6QRmrhnXN6% zA$mJ3^%vC0R;tO|BMN;(yPirTr5K!G8hW0>j6H{ETcwY3oVH5gp4v_Uj)^%=sr0rb zcc9M7^V?{*JKuo!gWWT(h*$?UA6;uTAGls%JNCi?$bc<`VPU~;c}NgEDMx69#Gqws z#4Z<$HvaCpE=t{R-Iu+A0@$E-qvg4_as#JU_oH%Gyd{4|YOq$jRrU;8?C^8 zcgvIgMe_GZE`NciTOe5(6>PNK(44)Fmvp7o-q=jf{?v;<8JgEa^Tjap)oY8_JlUqD zR2POfj_Re8LW?}^v9NF-U|#n;(331_niGU+LYBSk9YPUrjF({+E>^u(LvPuzAY`!J z^4+j_bt7=~h{H^^-U5siXuiiiDgRu6?E1;{HIkJSKDMx&H5r2IEENNNH)SwKr;9kY{KP4Ou>tcF&Q1L&iq zEYdH~m*)3STlY>=D^HE6(Ismuoj|z{DCyN4pxg(P1Avm&{6I?Nk^T>(9-!ZR1HChw zHz--Dv8B`&I78H*?*dXrAbl5*4g(T6g!F_Z_JVYf6fnyBA!hdo>R^s^t>6fFfQ9_= zkI2@KI{z38-6eRMl#%{8O*?1haC_11dXa~)6-SG6jBxz?I?o73mY z=gSYLL2cAxtF=0feP2OSet9`))L&pPea$v@5$G2T2kE2Aci#+o0H6*yRRhcJd?nKx0LOB2@iUA>k-AQFn#xpt5?GG zwV(OwU)e0(Zq_#hl8@5117=EKEtw5MJO7#n2vUn`-U$Lo-MIJW<7=LaYi^#u zuK6|xn6v}g@if4KiIH`6C$BUi{7a-*?>T1Kygbhf#Zb+#2CkDE>B$UJjkS81CTs9U zSVEZF3wQ$krxYrTF3kAL4%pvOmUxkuGrUam@<1m+Y018!l+7F?`4)_ zhsjf*RfI$C!#38#RrA8pjskR}7V!Q2e8g=e#}9k5&h=;QWJiNwM~Ry86*T7G!o?US zM@oa&41)uaFQxH5it7>bpPpaZ9K-uCc+x0TlQD3l3E@w8GAGP5IMt}-5o9GrxcDQF zO4P5*c+nELKN^@ysN%y@0VHA2Olv#!n(_9rucx+BTWQeKl%~NeY0^*Ac<01hkQ%Aj zI2};HTa-BJV9i8)k=(SLWUoWjvKua_A&CXGHEbvadhwvX46b`dc>oEbZr7}YnzhQ zz;h!G9F_Ne2#Kihb&L4ffqbxPdQIG+Zu{TtAO-K=!X59p15>4p;r-o?xs{4oJv5g?tF?~b@($wTy$v8As7U&#hJT6F!hAmmU{Vbn`Qea#>}kDL^8Zj!`DIjJCzly4vf zDxZ`!gThJ?=DVK{281abTFv2kP~}t}tVGfhwKt{4RF zYt-;ZDHsRIL`DJgUv$j0v6aRzgI{(lCppqy>pw^BWkkgUQ&1yEAw7vW1Y^ED_tk(R zdNHb#l{y!79Z_si9*o9R8QiI!9>6^p9H1c*EXY7T7$m{+v_k742NvW`wu89?8_QHJ zb-0=m16{YQ?%A*JE{i%e{R zb6~%+DDk`t^t)5`8&mexSbp~Vr>76jeU4pli!;!o`YNE-mmx0oEQP3CEE4xOffB8i z6o6hpMHx#RrS>Syv}kAwHzfiICe*ExlMxO5jYh)_yjrDi2I2}`1%N}3Q)k#nw11o= zRS$+DNbkUi9eQ`kcH=B110YOFNWJi6=^a=Y^wa?Nn6gWb>Vpvtrx#3=1YXTK(E`yV zdjJU!!*9EgXRy=vZ>=ig?lUDI@-WdkQJdY zwPrHaRH^$qO^_mD0&zuDYT91|OhE3!msZZc-B&MJu*VMff9EihUJ3E+>+oI!)uZek z=y{Cw?Z-=kcZ8}&ik~#I`8Yvv^V!`pV7!(P6*iCU&X&E*ypy~0qN;jZqKvllT!N^u z`7(Pa3?w#@iF&|o=5S$gKOdutrvFo7L2~TPQ5dZvB+@zFG9ZR&B7T^QusJLw)4X{# zX^VEut{m(F$Dv2WTlJH6JY=S zU56D*m$Z4Zx*kmy!M*pqE+)6EHfCr$r^@vyv>u_gatkc zvk6&Y9?T~!@bs?eOkKi(7h^E_VmH!VP-f}}}fRwq49qgHC%s6wg~v(+N`s2us}a{9Bo^Z(?RYp~UixeM9_f zv6>t;)4Rn9X?@0iiz@Cu#iYeu#-(Jm@NVkf_kw*a<$z&{kq|pSzY5M&e%M zpUg^PEhg)N60)DA?Vi&B4{{q&qH7gc+k9AhMB!do=+c5dOURGMlB64ZS~4)ck0l8_ zPqx1g)RP^cEe(-*r;Fzc%$Lp2E#rNImmRo2sL@Ob^1npn=Z-TDz@CE*;JuIM3R2~d z_r!tm1-yJR^{16V)BDZ8b))6gHW4=f9e2a7)@ysB{nH~XAqnMwES0r;dLSJ@wWlMw zxxGlP_X4ItyRb&C-<-WOfAjV0_vi1qWp?^DXPP9$C-x+*fGf;a+2eS5&9+*8lFX}f`RNk z#$*^PM&#KLQ>B98N2Mm1?0f{< z>*PB{fn5~XBLaIwV2||XT{PbD-ZD^o^x;G&kDiZi9q5d%zpkx+mhj8N>S_J$iXROp~I@cMz>YgWQwALT(wYl5M_osM^=#98Sjjt{zT`nu8SEQmywPmN1%%-@Ye{jGa)$>=J_CFuuKsoF9ac6)sltt77?d=td`6@yJmx4oL7DPSA*(B9{(13Yz2*m% zR-=8X&F!cg-JJ5*op$+Ry#-q3Un;Y*j$iGjbt`t&zl217n!b;-({bOu@35a_l~5J` z8g5~F8CJw@&%K{V;yc|gBQ7D5I}y{YohxA$-WxX(LJ=8JYnE~du%nMlK+a`lTo0rB z#$2w}nCEn8DOC70*)ifW*aPn)US$4=bqKUeW~Iq~j3UtxmkXUPNAUlexjC`lL_Q+q zM^xCl)2FrOD@ZF1%V@Ol@Cz*Ti@eZf1aFZ@l3~tYMl2wdVQ98+R7aXEojGBu>8^{+ z5f-l8x<7m6=IoVwVWti1m|HrkolF$Wy1$+L3>-AiCin6B5FuAVqfVQAIh#i}jO zA%<~QqSeiZp}DbB@=0^%8k|_lMTjpi>afE!ra0a6e+&T~`A8X{OvGGzytH~Sh9_vy z2w9}W#D(GeL4<(hQ4bz5c*TZI?>EpwLRYxDTp01<{4pLNN5z`eiJh8NbBtaQPy;&G z&jz3<;#)CRvi6?jWaWbgGdhSixi?}c1t`+7ImkjztyM+S^v{|DyagOx1A*o`zLRIaV(GHa8 zrRze`K%?cxSZyI!o9FIm?{4H{(hiAW9XfG*aKn#AF3uCtkT=iWWe5?-k@s&5MvZ8VBH|r`bi5RnfT?@QPS$ z67l8e4!RW7@$*7!5%)#Ji|bMEBu<4Y{8plqeM}0)z(iwY5H^}I*y~`m6N+?Dm;mXM z_nXMSULkGfABhW}g}^90JUxLCvkZg?MlO3l9e^ZI?rr8F%gIxcFJxkz(P$}W9*zDb zH@e-fuZhDi#-T{Xvk^baLpsqufv10RIngP#LPr$D`iie=aRVk3iS+0Rfc?LmbO7V8 z^c}1~f}rM;89Sbkf^cJr*Y-1`B81qbBz(gk!+oiU&U5|jNdyqrVWasn)-BHIS+s6f zx@)REE(EoUHZk!@=o4s`ZlAIoJw$CcpeH4A!jgO% zs2Bzej<*X`jQhgEvqv42h?#zQ#9SVY7i78NZx8cgrEkeY}_q*{rI=`3DAxj=9B4scmP zTp*P#oDe{82kBE}Tm3mScSWs;uS@t#)Aiv2tzyRYS){#~HLO890{#*sJuR?|D;JQx zj)uhq`h3-?AnM$nltVZ`br_u15s?n50+n@55iYt_%Jwz%XR&|TohIVcLFSt75*)z~ z+$j96Y@d;MMFtEm)}6|#wq3y`8zt{VAJE1nxIF5*oV~tWL$n5>E1DI=`TK}5p@h}# ziw%P5*wGj;QD>tW+O_wRIhnDqCWB9A=CiP)*Wzi7x8-MseEAr(_xFGpG1`s9osH(& z`}@e;iEkqjzQtuTP_^AqU1 z@NPe{*R4WQ#_*Cpw4)C=A#GFuL9h@JE4?O>lXw& zFfrpWfe{~J;eqN7qDx`;E#Sa6qEA-__{oNk%wL><(U$K;7;Fb=Y=ogY`<9`G{;F}; z_=*t>!q5vVCsh$V55sT_y98n~5xWYuPs6Qd`=d@fmF$A$H3>PRibo4j#LeylcjVn> zK6HhIbb$R~%Zii}1z+iM;N3WiMM+KYtFlq4NA4AWxmHagwdg^_Hw1zj7;9uiU<=V< z6?^gqv=B;}-438}U~)5V6?*C-xjNe0f!qwwin_l5m=QWGu=m^wPJ;$BvKHwOTfdRC z%Z(HtiqqYaUWmUFQEc|Y*G``)UzkEz;f1&150y-w0q4O$A14FdW_i7~T7ws-=9F9B z(lpTBTndJn3z@^-^R+5eqp*3xlO@eFCqDW*%OG` z*Q+zp>855&uU?r5f{-elxEEu|vMpUCTz0if1Y^AJgC=2&dL#__b3EcSC=22tMf!^+ zNX}Y6l10n*Nx~i$x4T=SJ;ljg0tvz2k6zt;zGu;ZoIHSt$?2W=$A-0duK<+yEHBA< zOz$dQVYa8g4tC=uoP%EeB?;lu;Wuv) zhWP;B@qi_0r^HZ9tvW#QM#DL4@6&r*4O`T{f`_GIn; zM0xtHBFbBEE}-XkTn~ubu$P=h!vW^W5n8d>@cdyP9llccTlQ+RwT>L$zSCNVJ?^Hn zZr_hT3IhvhKSjmK*eLiNX3dyt3-&KUo5AraZMm)DAWUnWh9H>#MEsfQ+7V{w8Djg2 zefGiy`xLqf>^x;pPR`EbVVbEW`xMjB{nC@t!Z%oYzGtvI3FBu)5%sJl+U2cY6%iU; zJ<$_&!8)zgk&*L7SZ7voGG>Q?0Y`LIYaK^v9BY2>AHefWyC}ocK7gfj%!s_4mXH-q*nZTy%ugG&TkCKWeh2l# z6oO%SriX(JGcsLzOVg_(PQ!rP7(X~yHzQn~mvVdgHXlLQ(q96CeWpvAYyuPdB!r7Z z%KJTD=&OULtpy$)TWJ0n(|^Fr_nBFcw&oj7Tl$55OP-WsAH%_j;z+qUc9!R|j02=M z4F1f*GIJb1D8~O1>3zYUiQDQFa`q=PAQf<0!J)+XH+qgO=Z`?0?jmFjOqL8ti&wNd z@Ei$tiD1uD3J#vY>p99I!0gF)#R!9Q5Y1oUs@k zNqx8_!C^3MhcW?x!Q>I04CJZ6*YDk*;wL3`92V?PyY}U)v)8tI&m$&)$uzIPj2(TS zfzkmRBF#dmbKgE<(O4Y&)Zb~)K>K$r&Y!mQ0eT7j#%ay&k}QKFQ|7c!zIYOoVxPQo zGCoiOe-{PR?%<>O=7 z@gZ}1%O|B2znv%L2BI>MnyjVlRwk0D@JX-t@|}!C2W2`LmB}Ey26u#Xb)abraHid!m;-&ybSSjiH)W4kp+hz$?_a1vp6T6s^D8%DI8-& zV2|M0AD~$DK`v}R2={!)hfG`tURvE`%uxS`V43d;Lp`>CCx4Y=;oKNQv^3{aXRKr9 zgf*7BY+g=DVEMmCUM0s8_#?O^DDk)PvOkN34F(Zk0Y006leFjIpoJwwKG|?F$Vak0 za1Ie6H}Q_ohTyYQi0atVu^qq*l^eqkCo>r90W6B&5zU`RJO?93H2X&QU@$;Tl+(O_ zQz5>iW;U%F!U<7#u#*dhR)!Jnk*TITMLEqq;CzRc7<@4Y>|K^jQ#Gmwkxf#I>zc-{ zx^7#1xAJj-h>Bs!9h{1920u>0Af}UA2%)+f%Lj{Yu zyHqnK+z=TU<3;AujX{An19{wsFw*sK3S2Qc!B5&t_Yuzkn?i6p>0S&PjH7${Pr}%{ z<`L?{a)KDZx-KO_-)jpgy%kE2A_ZYkYj57|AnNd5t4YKOKs_>&%#f}l07ed6lM3Vwr z&0cB{Il9-zh0%V9pxhCVLn`W#az+%K6QslPvsbQP$LBoa@ElTk@89ww^!wlO=?{7N z54`*wFTxE%K2>??mp>5mDHQE@6TNAKBFJ$ZrW=Lk^f`p)z((^QQE_jF;wEzTC+g5f z@zou#O#O*O91%A+kd{VTvzsPS+6ez$$*By#o{FbDh&BF~ScfsvMF`(b?oDvtP^#=>{Np$s2J+N04h3*#kRg*&$cKckjl&%frTID)2(0c}heg!xiO z#=@LwdH+R*$K$&|LPO5&mP~wmRmeaob^Usrg^#o9w9{d6e!jYfRdna)J%HEWr1O5r zr(eY-OhFyh;}GjF4HQBMjqrr~48qSy_!)#J-5SC*3EK!Cb%zli_H2~@5rjYH zjvzcD?++t9UtBJi;g4JqYiS z@JA6o<&GgdCgBN$PrKs?k4yeZgwME-A^ezxk0ShnyBFcT68;#%A9o)|_;CqOA^Zt< zAHw^*Db#li;ZM5z5#BHF>j;0!J%I252_HxJta}jQgAzW0@b|cf5I!W~lL)`)K7sHP z53v2E9F0weSurl3QS?vVKbG>ujYX==a ztX=QC`P%EMq@kIPzv8r-JufU@^#cAaU7L4WURay<+Fj3|_B*R#;Z4U4%da~tbFLE( zUv=7UXXWDRYU^G&q5{)Po#wo!U%lbG9;%66v4Fkm`PGi!>~-2*r**m2Tn)!n>}Nc` z+eDobw@1aPwzyLJZWVP6jZ+cT%gx)KysDr}^04>yX4`Z8>%P+t7T6_rKOCO!v^xIx zHoNFZI2x7Q>8h7e(=RS8AZs{u#dq!n((|akI3nsvaK~Bg?ZMF1W$fxRuUx$P%C+mg z@i^*bj4Pv)$zFbK`qQ|QQLkT|nVx;)+B~ON|A3AZVw%8%mz%ZR9FUO(M3i?6U(5j! z0Zrg1MjxyJ1x7b2P3e%okg#^Wo+-n?md=9azrZZLV)*6$N}x*dC7!B|xKD|elh)s`1maQ^)H zL?!j8p%XVu3z#6?l6nk%6DLnhPHmS|1NKB;vbQHC*E~m-KmO%4jVq|(wr;KG8bH$+ zM{5R4trqucG9MNLueIPGM!J8T^Pk0^?|ka`QfI|G-fBAS#pAx$xHSEGyZhF2%ehOr zw?{Ve>-lwS)7r4!slJ6SM2(k*P8R$lNE{ZLD~qAkl~$|UbBsnK0N|}g<2XV=8MmA@ zf`7?|s08!GB>b@vMO@O5^LSu0mMz!XpWCqRK8sKujXH!zS#JIlh{NB)a$!BcX)*N9 zUN+G!syAqAY34iDL}=A|70&nl6C1sJZE~_04xwo+CkPr1wzBQ4c#Vd1DJ-kyth=^L z1sg%vZ?+eGE|!dL`rfMFXq>^zU<5bS&8jtSRa2c+$z~=i{xKv9YmLTA$6afo?r@~h zcxTONsg{J5M#JsQqe;&roj=Lj3El`6i5o{*7UVN{2)Ht z0dkwX%$dq(PrPd1wco-uJ!v=FHZX<_9J1)iI=XRU%0BhN3vaV*2RDCyseZZDak`4E z5NiZW&Z=imOrr!TDP4+v<3t^0oUEUCyZ(Tc*#(Rd$O+uNe`FSa8gCJTR-5zo#MV6m z#*$%To98^>DH%e~9T*>$b=B*x`RxQ^WTficSvwj)WNnfJs}3*$FoktF9O`x&YQQJ+ z(Hcb4`ay$$a|xG10M*_ejkp!o#5CXL|XGNmNC2BEUOoNwxho z5=v)SM1*pZHEQTDO9B2#7IliZ)4XjD!BN~&AU@<4 z+~OB4&@)gnp=V%tg`Sb^srf|A&KUB?@U8}y;OeyP2*x>sX5I&ZT!H7&e$X_KDTOKM zHzjHS7H~5wffXee&7GxY%d=N~FYx@^KtZxfZab~DXsu+DSfCZG3ocv5JJ^i4*#LOX zlS8qCRcGEq?_2=%U9V-k-fDM=E2`OEUF+Je({=2L&Z^?+CS%|u0KP_QE#=*`n*p9@ zI&BXqV8ZnloV8YWk}dR6I<5fp%LbYSwqRL-z2cx@&RhqTaJ9$P#nhX1oUF5&IK`3^ z*r)AzaGh#SWFw>swal>WZr;3l<>i|<)6G7WQRJCNEAnE0nWr<#JgLhZY^Cdg zFI)ll=h}1kqz^LC-Frl0%_%SDIyeS5Z#LWAn>VNMut2n@9)qQhk4MajfK9z_H`23w z)?Vs%SI-_lzS?ZHI(O>6=eQV1kdvU^>>T&n$4}SK)bAe0Sh{na&aLA>QuCPppuW^y zX&vFj22%8l;SRW#4Q|L{WQxocAtL`1NZpSXNXF)HX|(WIED{VdBdm(+d72qUaZ}it zw11Op*6RK0gD|vShkXQRJM`SYxH$!x7}_M%xITsD&D&^#SPXaxvIgM@kTzzeAsAi8 zNsrmb@w$J^h!8zoYRrau=`rg=->ESpQjO6!cqELRo4YmSKToa++!9Ap_$6{rhF2=i z_{5&QDrfT?c%|=HV3lH&uV%K6B)Zw;>-w8b3b2YxI9@VBHEWD)P`dew7LW~Ayih+i zcnH3SfOwS^3E#83Xw?eU;dw-u9|z?Eu7F%yiYRO^;tV6X^E;r3Mpy+#j);QHoqY%F zx4q(lJP?NlUECet1sc8G6k$)Ho+}U=+mh@o7V*UQpPZ`K>r>y~o&>g3`CNH%jW4FI zNj%t1q-`Tb659dK8dhIm{|*+n-PW?S?K$&HIs?&oj5%-KOpzv(+pI_21Q86d{f(%i zx9xMlLg|=e(t49((msyPx+Z+{W|n*><1!I=@2q*iELd~bqDQujF9VysOtv{j)hOQH zD(~PpZ&lJf@dtnWOE8$m6^!AgWH5*{xdtR&Vv`xlUTjuaK;;qN@1I3l|9g0gd2V61 z37MO?0*Wa)t8Deg4VYx(GA5Mz$MF#3GOWVFW(D2v!C z(af!x)*m;`>zh!`weA>C5u4m)>4K+*z`7Rum{)_R2x>8&a`RwQ3&Orq9b?#6biL|- z1W8i_nvgC~b_ov(%Da$hy1@6FdH?xtk@AhYFA(~3?L*xX<6P@jJ~-PgyPyg0B8SmY z6lUIM%X+8ZYUv>n4__>^qaPt_hFz$qMut)b7=w zOKfxDLS0vZ^f%}EL0F!V2j~sj-LTN|+TrNM0Q@a^#8-U3<7)!*66)^!xu5#ptJ?h08WI1 z5Pr~Ih!{}#&V#8|t!oA%2beP3tUS%k2T*#qJPkc zDZ9B~FL}+yrLKJ*nZetOpn*D5#X(*K6iN#Xagk0*E05|Tj_V>At@M{t5moW4ot7{& zWpRs1OdIqM8wVNX4rsVM^vQNe4dQJZUx5K*r$lZY)-Hx7w|)7)CToVP_QQS zPJMEWn&}4-db6a?LavE`LtrL`F-_vjfF8hu42;j-KL{267CR=Wu$mZ!J~YVpW)y4$ zFtgCR9i$c)63KGGwU&!`B2_XJ{waeJ0r^RAQ?$Mu9#kS#@>K(J7WB2!@_t?*f!M>{ zae=$m7TGQW!}V5@TwWD^d zo)Kb#`Zc1bSgJ`kXUgt9hbb=kJxQ2%+f3g%-?>K%Ninkyv z@+m9@Ypb3w!z&XTp^C8RV3#W_-$J6r0P0aul?p@|gz*O)J5th|RV2a~l|eB0Vsd~& zi)NH$I@}d)Sj>CeP>HdTu1M
    Z(s0oZDxXw4&Feq4VSa6PIxOm?aFr`-006lzGZ zK_|AObb0f#e;Z+;GzJrzW2Cxmsk4TZc~sX&RSkM1<-pR_K)@_1DsCvZV+Yt&fgX~j zfjt0DgPcH0*H5FUmsEH#o2jN^M3E#ap?n(vPj*`F!cG_4WnrC*TBs~33Um<}{g-*0 z<}HDK6n5=HIh^bNh%15;vbMd);=M@&D%q{fVr+XmCMz7YRqufUqyYqMTBsZYTNV*( zJ*+^a!GcpGu@#5?pE(i)+g!D86#7UDkra^=$2j=E#bLkC!lgCf_fvDCiNTI#L{8kl zW$_{-Rv;smGqC>!rXVCHu|Mo~$Xs)SLv7TTXwM@X95v8^5uXU#*(l|5FsjI0za40? zz|{tj?EA#-KpwaZNjvWQ&Q-8G<9uqUWX;!(EfdFDE_(U1l0 zpr-aBaK9|!K#P@ZTOZ>H3I*&hOg8F~MyhfYzb$of@k4-IM5|AchD1WDDvc5BOVr8~ z0zu1K-a_}dFRV1xaox`9@wra7+gY)}|E!W@yzRB_MUC#S?M$+^RLSf|U(F&Ivn8w& z{Ca0~%8o0JDvkPtS#FR#K8V%uT zFj>H>@wiLrSEuYNQ})YR!5E~6CsmH{7;1M4rHH0Lrtr=ZG@*&9;Knx~AmA3MwAYjp z&6Oko&%(VCOmP%jx34XA)>_y<0Y9U&PKi{70reKh<`WEqdn+p*cI@XOIF2UsS>ykvPY^$tv3R0$Eq91sRmWklUc z786B5kl~mpfp3B>;C>pAkOz4!+98B-gRTv!SwCxwB|Ns0zi43X8z-^bK4k*{&b+NU zb;|Zy9uz!X&Z7+&U5ZcySVcY5oW)e5S)|Cw09@<^VvXB3P$o)_%a>YCy`9QuChURT z{p;+eX_f4srW%~pQPs<2N6(9M@^Ga z@(qjFJhBefy#ZTM(2C(1|I=teSR|-~m1vT~Au|jjTffgH_dfMzgyC1#0BF#NU@s)1 z0S>1di|~vFAVn3XuwOxIlHS<;jr1D`E3DW53~z7o_F3E@b5fM|e}K=UecV)75_~N3 zJN5Rye+9|Hs@`f4={~G|hWq5AQS_;x3M&z~hea;ga3l%}vOz13u)fdp*5K_XZy5|) z5pyYGe9qcy?SuaD0NDBQe3>l$2*xFwOEa^)55=ap0U-1iH;b47tM>&1WoZ_g@V2cI zfL(bcW(^HRh5=r!q|~4Vl-D;D4^je*aaba5EgFUlHq8tJ&CJkovF?g>`;`AT3tAyM zIl*oLrzsUbCyjFdltx=Jap&%zg1)reFX^Y}aT|M;Lu|LKM~e7S%QZ`vmxU|vx$~_` zvS5FPOR;y47xT4|KK72oc7Z7#ajS9+d3s+E+gL0ukXrv3%rL~npe5$Bu)e?xBS2>| zagx%W&{n))-<0X?aUDZ7CziNSa&FxviX&WR3nPcnG>y!Fvc|fFNMF|{B4H_m$OmCf zRZ5DlmUT(mrJ>G6aaoJ;`FQrmprBEOWUoLD@-!}Gj0w;qp5&^7F{aP*bzNJ@BS#H{ zCu_<8a#Iu4IvU6|P(QpS)v?U_G-&Qv+91$8!=gpnpfOdkPp!X0a16)P9?W%P3EcbQ z%T(fst)=j%4)p9G7+_xv5Q&mGV!V(Mw%%~ z1f&48n-^L;z9onjS-L-q3`%2s7*85l_?!ippNs)>-0B_dADI|LkxqUE-1+DF zRC1ElC6|lO*m>5ke;hIXf>XGqB=jNC(9#TXD7lZT%n(ImTLBiT`FoJD_bUpStA&7# z)(0#YC)yc<+&kyYHCxT@y$^Hc5WPclCI)Dr3&~pe>OFQim;3fh0~dq=I?!t-EQ^GAY*d%i2;`wLMQ_+F^Ml4llaJ(%PnHGwMqQ5ViI36 zW?tpU%xg$c)xrc#Tma|~Bx3*XmswKOrmOGt@5J6gp* zz!K~llL#d6ra?oXg3ISsvrfy!{AjUSnMb7g5gsn!3TRi7TB61v3=6U3KZe)6k87@K z3(7*-)}g&R2o}U0sJGIzhOo&?fbK;+bOCbRXzn(_d2Hmn*d*sXS-HE1)(L#$8#K-< ztQQUnZ-N62hhq>d`wJ)~ETPK9X#0|2=r1$25-hDPK>wko7y@Njl4gZgGafNthKi86 z6hr1X7~dLBE9hk;2dc1)6lBg|M6zavg(M1=Ns-byd>%_@_pAA>wjmxc3FFAzdr?h7 zq%kz~U+qg990}9+TM%Uye9VNxm+7e}peWA0Oa;}$B7TV#q(0uF*ua7s%<{aPFH6Du zwyQkQy(PT&9=|F;p$@6-d>AF`Em^k7m$6_!wF3z)Gm{>alp&!+a&6UGoW4xzwu1L_Z5iq?Z`(-Y)W3fNS6DGhVK zViv6*P$o}NOGowf!)uaJOryEbXnP)Y0TVC?$F6Ie_61#~$74OFSF}CRmMmQukn>Hd z$=E>j>3L}SvE9>R;%6f6V#I%!n=V>(Wiq{38`8d-CQx|0+d-Epu++h4m>D|#pi*eCX z=43pg!il+MdyskcPuW>D6fV%4L9g89W(wddVTGkvOJ5r@O;n{EDu&S~NPf*JdwI%M z28T*9v5NB_C?0`SM{oR(?NF$21KalaH12Tbtklz!hDz49lwqF z3$5iozzAzN?b=Yy`!T|Ya6ZE|a6RCC@?&IBHBI1<$El5AT1LSY4JH33f)dy{75xL8 zac(UybN>0a;D3G{mNG6ZXAmm7`Ge$QKd;htODG3$u+Coeegl8Dx@Fpu>ph8duO?JZ~VF#+kBTOJ103pD?!&{fPfVVz0LB8l02nJ(# z$W?3BXyK4GmFKk&yZI4wKtRwO);vu4e|o&)1cS`)iEF1Xg71U8ujKwA zI(dTpUvo!6As_SiqY&pM;QMLtBbjs&Otm77VmK-179BB;LNo+9O%;`iB?Q@IVtpk1 zED0i=#OP@|^Cj6DJwH2Gvw z68TbtkV@cG?81CWMTxzOVQXwef~|pI!h>fJ8E`OD1bPY(shtAhVqZgYm|z0~z;l#z7TU=p zh*SdI5Ao?;-k7kTGbcMlhDrz=4f;iVKcsN+RRsrJ2r)-Q-jhEb;v-rTPK*WvcmJ>p zXKtGol(xSoN?V+D5=ja+@dtBza~nm)C4B{5LZ~F6{8w_2;EOQhfvL`VX}!D`Yg1Ig zm1Ha>HHobvSAH|+7QuC`uUFSA`P@cj1AobhiQH%THN-Q7T5lnQ995A3C1$-UT7scO zEL&NT*xLHgI&)X3J9ww|R<1jw%he@9R#bISA&p#a=}>MXPNid1NflMnWZC&~4A4Vs zA=K(4{C&u}2mum%LTe!&c_|SC2!v&nky<+sv8>Gc%;ax(s^D;v$db*~zvP(exc4by z#ZqvzEWiDd5{GO4Ext}@Mb_F)Xp2_V8LE=sS!=sEC+$EJQViblyZ$>!9~R)w0)o~@ znb)knu;Rc28eD$xOoRsyZe7Qx&O-ttYUmm=vPymyNjes+`?zyTk!3Dg8xOf%qp-OG zho0YO?aRZtK>kTsVC}QU3j45sfgOw|2N&}e>J~D&vYgN00RT@dgoT7FzgbAAGYaZh zi&AIcZ3$O-lRLIn@8`|DKBC|gp<5xR5dlg-eks~)m?nb@?CVIx*0F_7VJ;Q?kC82Z7JV3eP@Lj$iBMVwIP7CKW_ zE=3p;o0@!ElD9ZzDsQWOZ!4TY*)By|Oj~uPH)K>NyF|cZObE^xB~Q{`pUE zv{d=MV}rnovpwQVto|Z5TUx>Sy2;6=%BuO0n(BWAEet#o!%>doq1&IxNntH1$Bm%Dw~cPR{7KTH@XEtX?{EJe}N_bAa3vB z!tYOCdhgTn`^%RwIX{6I{~zE6B$cJLA|(3X&l`CIzsK7{sIAC0|922MqqZ6^j~waq zKEk5;6TFpV)VDaVM|EHC`vjpp>UllUi6r?KDaAlDTf%F%Zq36S-J8A+<`(R(X4TE! zYfx8EdyQN(xuw{Wiw6DinL-@mJF&$fyZ#1aFFiWX7kmiSK5C`lAZIW%{u{i#$s6ZF zAVR~fF|X1V2Lisb+fBf^NXR9B7#-|8&^JVeq1@((pMQ;&nq{FYk~-L)bF71C_Cq_^ zcMyd+PoYRPi;xN4en;MjW!InFZx6bsiph49UeCoav7;3x^Td z_t@4;a92%5qBb`oQNI zu~~)jjZVUAE-hLJY?xVpt$T!I7pQw zXp(k(E97!=%07arz#7N@*euQPC5co2k%Lubj5{HAyf{(=Pzlaru<6MW8wK;XCT?hl zgcFk!e62i)@QKqd8Qz|pM71&KoVgUaVu~k*K~L)U}3X{&=00UP!_ z_MlzmQDOmJ0Xlb}noj^Ma|%@9z>h!_MrTf-@Q8|6U4H>%*oC{0SQbIv9bhjCah7&4xiIVWdLTFMhs{dUv4GjR+@N}>{EFcyfGRkLLB zxh7Rj=`m3yHk{jwC{+bifyt&FJ2_dW6P<~Z9A~EO!0o3Rdnqop8$mKMjVri>o7#=| zz+PrV0NJ6J86~h>Kx6v1`q@A+*43JS5pAAjtlSLlZV<1k9ev|VW{(DZgSfLtVHCy1 zM>&*tI^#)ZXaVO3WByvVLu94Cc5nw`k?p_*8q!PF-^V+H>v*^whY#XRW_gRYXtry| zTMQ&QS6H}gG3`!um$rX3-4bHY$K*9X!)0klWA~Cyh8{*0T;8Yea*rqW8 ze@kS)dx`Aj0RwuWvfm4o_CcwvDASd9q=NE}v@=>;eF$0`?$V1!R+E;Vg!KL-`Vxyb z$_|;b)0asOl+>^3FV0bpMT;Vv00YS@HiR~^F#(2ZC~~Pd1B*WD^};F`wU`F`Y{Y{I z@lPa(U!oZqbl5b5@);_p9-h`(1AYpVUW`&Bw1vk)Gho|T2K;1pGc}2}F7MnBfRwat zsPsWt1+*^4IT<{eiYc8+G!#S3p+PwE;;zA~fS3i5 zSf$IRCpb2VJUCcvaY!$zL_VNo+3YI8DBkO2A}v_rt0wpsiTEZ{a>y|o9>38(3MMvu zaCMF;uPLfgkZk*!Nmt$To!gF&uSCp?V>&b^MLk6(x^c({63 z&qA!jiA$>b?Q5U8#Iyb)sG-(+Qkl-9E3sKX6`GnVl+qJ3t}Z)nu&Kq zdQ;iWgS&0zZX)s|RJX6-5PR>b8#u#$*S^2;HYR-@{x%l5KODhA9k@pp@Y?`(b?*G} z%+fU?`YEP+*4&I_@-4jtdCnXs%wd(3_1T$06v?qE;Sw71ckv+O0v)&>pS>yu-n;Mw ztffW|qjt5M=jj33cj-}c%i?u4N)x*fppB^uhXXjqQd$-td^k@uT*N>p7$0m`;1RRjcH|C*DMZGt!0V9HrkDRl6`THC1|8M+&j|D7bx?|WVQpU$2XsSiR z{K|^9Md#-Z!eL^o=t~dbxPcyr#+?fbErt^h+$Mh2i&moAlaA`uRuKiOf*jwE3{XH z!@z;=RKXxt1hiP{n28cT(rYR%BR;tKDzuy{YtW{GAT7)HsS;Jy6VHtWXC9zvmMe(| z1Tq|m3P3ZXJz<*f@vPO273b~*Ke1vTOB4Zua?<`-A|m?y3X3v*GDk`?TBZ}#P4XSL zN*5KiNe2@xN(}6FZHklC6LYcScE!a-EsG-(ZChINwrdk!*KORQR%|&>smZ)?3^|~V zT=U*ecvG^^pF1IN_To7hYQ3O5Bne*3w%Q>OE5Ss>3m#X=HS%J;qlhbfDip(742t6V zM!-3&#eEG6Xooi7`3B1Le~!1G=j|7G%b<$?UawI)Ul#+Oy@}7R#l(=Sl|SMR62k(n z;LmYegmKO~*aaM}c#*OLNFH$5Iu18%VxxJTj6(j)Fxy!!bm31EzBQ?Ks?s0_r&x-QVw@&`cR+P^|zEUC|>Oj6MyHA4J9} z4CmJ2{pZUVXGxn!+De=jX-exwwz6C1UmPsr;freT&uNR}M7AQ_wTrnn;sZMGAsdQ# zFn>wB!W9eLvZlO1(UD?+Q$@n|E#X`s5Rv|pSj6!$L#FY9p}yp9frda;acmZN#BrD5 zKqyFHi!|a(!EOvVAikzY>hNvs)GH?EW(QO&O&=o+P}Y?3%>4*P9wP);%`0hfk7`aM zhFb_=RM_HcZF}O#Xu&DyB)b+?UsRhM=ieiAA|}_HNJ3?7gOb?TkFq>+D|bh&)YvR; z<8B=_&_e&0(5C;Y8~4an>Vb)8Ag**z9GPA1f7@}jwWz1W5@sT52xV9FKXgT4wU*Ie z|BJZ6M#;GwmI2~k+x5v8`7Pe4fd+^24L=&8zmwEdY-oC{f zXJ0G^Le^~~l&ckS@aquFlhXH5#NbF+zPB$MFpP9VL`8%(jY~=-`$aAtSFBsWc`Kk^ zASD8D?+48~N%I0+kb`ccHHYVF8?WHcAf?GzREABDz;dOg4k36quesOlazYF^%i{(4op+q^5VCIzUZ)n0Ifba zZe>la6NCEUtDAHhNq#maAzU{2N`Owr9SLoKu=PF!Od+SB0=Nr~j;uVeN2)TjLRq-} zj=%)mbxFIY?2koHiO;P;p``2-Cho^}*U@KSr!q{j({b_{Solhd9rvf2a1r}i`gD6# zk(^U&wqW_49tzq|e4ww2!EqbUBB$!d_;yw(in!t}5J$^7YHFtWy*Kd?didmFPT%1- z59cA^0I{>>F9D%oe0p5Y*X;BReuF=9fX`7{=&e>A^A8?<*(H4N;b`UD>qhqQ`2nU2`=N`oWyB8Lnf3WVsX z?>@xi5+#{9bxbUkvj;Uw1g22e5*NT0z9DBlD5S7gYaqwIcrbvFN7JHo;`mb%$u^>F z=8Y&93%xv(l21rFg>3SmJ*_o)oOv_v9~Hhm=ReuS=Z%#b5WKWHb5JxN!YM!jb{;7} zC$y_Nk#5Q&-SC)wu2D^@4tuBjkwoOEL>Vgy$ZG$X_$YRh4u$JV$5bE48Eu%RGNHq8 z3}K(D1BG8eml8$}R3L?w2xs`07^-S~5tbG3hGjJ=TEhA@7V%Gc`*q%agSS3f{5;~0 zkmBsc1HGBxqc7GdEUl|nS$R9y4=f7aU9dA9IcO2U@RTIZK;LR2xHF=dZFOFqrxu_k zRC>We?`srI2AmbB#dEXIT|o?129IR&EZT0$cnnZ7PSQz0ssbQty6}mMCt&dx3lgBw zhM&YU$1yAkE$SaXGC?OfejI^Z1@7A;pJZ2~?||21Q6R|tnc2vOzvrsSy zbQF`YanZ%{FF6IhiEZkU+U?88rEP09C*hr&z@Fut;)>geLa}GkTu_^=x_B@L3L8T+ zq*!11G7V=Q@nE=)0m(Jwt5^0>+p%A?-MhG@BU9~@I>2<8fnik-QAhNNt)uoFE4dd{ z635ogwDT#QdK-*)OG^C zq>J&C83@r>-u_#u>k*_b`sSV&6%oG$0%6-oTYLj1pV}j06@n;F0#ef9H%BDG=-Q~* zWs0O2RL7^!P)6s@`ExJSag1CUPS0Ex{%pPi`oZ52F2i>ApaFSk{(nMJ;!rN#8u^#H za9@O7%>q7)`FgjSmIj1z_!iRB7r#p1MS92-l71cO!DhT`-JKGDj?Zr3lZWe-B9v9P ztjn48fm2KhpeHD7|1x|3j3HZF(s^MCn6P z`jhJg96a8Oqrg?{4_N-UQCdw(`!-7BUQBHrZnkd?eHD(aQC?BWK?r>u@o?gTrwQa4 zW}fvTWTB#*zaDN+N$RL?NOc|cgOOtWU$uhIp#>u<59%1kH;*V24dWX}TR-VCW+UkBiJN|dI1lyr1~WH1fhE8-x|KmFp>yWB?O0Dctfey zMw)7#a;^5yc2StmSf(Y5%OB7G^LZLq@Xv9}kZJMVLM)RZe}T#doR=35Z8-Z6A8Fzn zKzy$HHW0AAw_AUh6P&XX@Bt&Sn2&}hSZ(Ob}NROEYr>;ZNc?f z{CrGoMO6*%KR08Rnpu31j*KQ7`!7ZW5A*F+b>4PZ0U@CW&d12k;9xn}5qyYGU=APr zBy&jhnNJ}!EQ953xY!SpzxTosjVQ>#CIGMx*eKTj1c8luaESE)A4+56)`2_>MX)({wGnI7WVO1b}jH12bQrrQG#g|9YtU-c|HGizN4#$A=mmEKC%eeDc^h7 zj3ei0TLpljun#twN^JV5Zp6Y^hCU!4FIM<4=Rn1Ck{D1ieNy@Y1!{?Gkq`TiB5zpm z@!_Vv(Wk25S2_AcJme}*4ldU|MFG;ryueQ*;L;Qg+h{^z_i;grppOM3F%w+6QA}Q9 z&LXB5#T48U_TXPfOgW0dqJpjvZW!H)#8_^XfAK+WQ6^yCz!SJb2XnGmhhBuPak;vX zb8DQxt=RRnSD zx%&e48?tVCkMpBgN{%{rUq4B)1___LZyuG}LSgA5n}jpdTcPE!=r-rOVYSiVUmRMO z!{D;TWp=!@28hTzwA}tlJ%Q-o=A=>qz;m<2$^92&V(*#JqS9yNCrdLk5f}Jjs65z? zt89>{JouHC$&n}fzSBa$x`6A^eGxu`Tn=@l)15Y@XQjoathDGm_hcF~&1RuWZYXBB zK|8Uw*P;tl;jkx8ati?-kj&ZG_!Di~6E8XLBrmNn`T#ahEcq+CIf-LWO6|^V<5;jg z#vQV()}1>C`!Bd4lGDB#VNmZMu_;kE4C>mhpwpQJo!0$SR@uIdDsjdUVy^0*PkLgs ztJFvHn#Lt_s|=U4r(k?nBcm5uG534O)DOAjJpPD)!cuWsZ`=%7%s9v^yyahFEvbAA z4??76E8n&LJ~d$ZZH;5M#p$s2AShvsGr(v|s-2i?i0;Y^$H3sgjuq@KI0jhEg1krY zZDo^Ky~!usqh=uP(2P_yJyLx*o0J|&)ky8d1MziypdxmqcyJJ~0eWuH;)QpPgHwg9Wj$kA{L4xK}*SFSI2DI_{>w$z5 zCy%Wr_SFUBB(_nBoK9+-ayCZD1_LJFr}0)_DF!obQHpuL1?GD1XMcvj-}#Vz!2Zt! zoJYPXw!v9kg$8GxLO_lUSa3}~>gL-JN5J36$5XIE^Kea8??yh7+#)fWnEaRiW@EL` zdRpS z9vorQ`_M00tTJX&6RM2%#E9-+v78-S&)dlTr+NTjA4EjAYx3h^tYZ!@@s zIlsbZAySn_1Fjt#jj$3K%ZQ7gu=4V2ViOZqUcWdqJ^RKr;abCTXKooLKw=Q+Q;X=6 z0rV;I`yb(LgtuM1aVJhbsI2S=JqGviM#EBnoHtS~aXjtsSM2tO^@9j Hd;0$XL|VV& literal 0 HcmV?d00001 diff --git a/utils/Asoftmax_loss.py b/utils/Asoftmax_loss.py new file mode 100644 index 0000000..44327eb --- /dev/null +++ b/utils/Asoftmax_loss.py @@ -0,0 +1,47 @@ +import torch +import torch.nn as nn +from torch.autograd import Variable +import torch.nn.functional as F +from torch.nn import Parameter +import math + +import pdb + +class AngleLoss(nn.Module): + def __init__(self, gamma=0): + super(AngleLoss, self).__init__() + self.gamma = gamma + self.it = 0 + self.LambdaMin = 50.0 + self.LambdaMax = 1500.0 + self.lamb = 1500.0 + + def forward(self, input, target, decay=None): + self.it += 1 + cos_theta,phi_theta = input + target = target.view(-1,1) #size=(B,1) + + index = cos_theta.data * 0.0 #size=(B,Classnum) + index.scatter_(1,target.data.view(-1,1),1) + index = index.byte() + index = Variable(index) + + if decay is None: + self.lamb = max(self.LambdaMin,self.LambdaMax/(1+0.1*self.it )) + else: + self.LambdaMax *= decay + self.lamb = max(self.LambdaMin, self.LambdaMax) + output = cos_theta * 1.0 #size=(B,Classnum) + output[index] -= cos_theta[index]*(1.0+0)/(1+self.lamb) + output[index] += phi_theta[index]*(1.0+0)/(1+self.lamb) + + logpt = F.log_softmax(output, 1) + logpt = logpt.gather(1,target) + logpt = logpt.view(-1) + pt = Variable(logpt.data.exp()) + + loss = -1 * (1-pt)**self.gamma * logpt + loss = loss.mean() + + return loss + diff --git a/utils/__pycache__/Asoftmax_loss.cpython-36.pyc b/utils/__pycache__/Asoftmax_loss.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5eb9f2d09162824ea6ad50364ba84a97979c6fc7 GIT binary patch literal 1517 zcmYjRO=}!C7?w02tC{`ChfR}EC?&mgbMO{&a0;PuNNFiJrcfwlND=aAyq@Xo%toV4 z>@a&;LykR`_8;^o^sng9OHaP#+)MjtcI~y6_tiV^w;sLP$HQU#_pkitp8`VuAvcx} z`2f_s0#iiMk|gV$7A#@Vu+p3PiBH$spkPIq^q}`jI^zl7=%Xa+qT?j~mMC8Z7eoaa zEBY%h87TS#8HbmsK#s=L@*kzn<#Cxoj=q;#&N7qf)S0iAYs;RS5>IgzsPKX&{(It+BtVJCM$fBhnH|4{!y24idd}g4%Wi_w}UaaL9>`=TDzt#7b356BZLj6ayFBe z<;F&ba(1lb%e=Z4<(c(MXvfU@O;(=h9-sdA_b6HI%Iul>N zI23OlPgGipX>Lvz$CHK0%VzStsZY#Io{17C-4e72nyt+uK{)m$Tz9v7MxsP#_1b-6Rd5(&n1DNr@$OhZ4;_B{YA;x=6It?DC_If6d|9u}D4s^5RU+iKRgYHPwUO4n;0*=@JBO7-<_H}Ff{4$@kw zzwVcA`9Z&Q?(x!%&0CN5s7uoi&|XT|EV-rlx$=6g-D~+z=!%OMFP6B=R_Q5C9}Rn^ zJVMo1wN9|sRc&8_H0pDvN#;oDV)mt4P^y)l-@4gV_4U%tq~qwQn%`5p6fCWD+dU|E z=IbLvlkh`C*ZrV*+dp&e!sp75?4e6^PITByF~sR+_om+oy6Q~%l}L%fw=q|B6B#}9 zTS8hv3f{&f^s(TiNwY^Yw=H&ZsL9amAQx@VHfz}Cm1bvG` zHAU%yv!#3GJFohxA3Dh(VLn)|sb0qqg3u1u{hE5IY=(L4`)Ij`4)05B ze^h4zzqJg~l#u;!)6n%d8nqFLi{%Yr4_M-b)rl;?dO%mQ{y`-u_JtiEX}T4`wpu4hnD z&XGo)THE)$(Dl4_S8lebJnnh4SmTzQ=gDr}^VCDcMzYgaE_Kl@l@qZ*m`U<|yl2Lod+hlRkAK|h`dxJ!k5 z13+FndN&>p015tZW=dQvAaOz#0BuF&lQEb~5%>TUW;7&D!Ff-EW=`fc;6%`IOasNd zTmS$q%Hz7m1z2w zMQEu$N!c#N$dRujr8u@n)WgEq!Z3`*`W}p8j3OMjlvYwJ|LKKZ@l=W)fl6y_lZ%&;;;af4)^gyr(yX&dT0`Gmjq5( z>Bi0&_W*_}KvY~K&}tKSY4=R!BXFWcT@&55rL z9)hM5t0`z4qpePkJcTAkD*_X>Oc?=43Q~jx8YLnaA!J}EV>ik$mf{qljm8)aZ0a%U zLx{*gnBoe-pIS=$06Yoz?geGO1^W9%nWO0j`agr1pORMksx&I+Vew^d7+*-!LxRiC zXzUnVMn}F2vOG@HcHhWy9=&@s?8pU(YWGJLI%DW4po5if-Jw_@B1v})L;`Kx6Z6qQ zg|1a~;z|##En4YxkqXuUFwupW`tMl#lur)TKQvUfb(y?N)Ys(xbacnN6g%byam?%p zt{;OJNXAI}L>_fly%Y`5PUE(_>v#Mog*P1XhJif3 z8)hG~xJ`LRifwZvE3-Rz-?28J_lC{k5_>hwTz~fRr6^9oXJo-;9589|?aB~6$RPqn zp{VEvKpQR0*Ev?(?E9hB@n1u@2_dgAgV2JHC<`4%!#twH2pMfkkRXn>>s%TZqu84F z8lp}fg{j^mogxoX6t^GIVJ+?GmY!^N;YxywOS`*j`;cs8U2@X_?QpUW5<*BDSmWEG zpMgzan})d~uwqU80o~VZwAMr5bs2Ah39*`wtZ zYasS&uof-3k5=~IeyHr^OjveWJKu%n;X+K{8|EHpd4&}lV0L9YhD z%&P8YM}{^&r)~9cW}r_uX76n1c6+|6^_$%e%KA)k7H#K-+ECVQ=gMvr=(~>L6ss-jX$$@{ZTdrb+_r@)3Mjea_S9b7AAuWkWGgi0lgO}|j2%ARe8W)30h19} zTKGgpXZWZsgSIcAEz2-uQ`owwHc=N{8bT8=YdgKS$z|gd^MvUz+e=6V~(Bp5xOP z0|>kHFx%N|_qNnmK+md%cK6nXU+?pWD)lH;pQnri#QThf(^>Sb?4qC{2|T7TpN>_$ HK!5)Ox!NL5 literal 0 HcmV?d00001 diff --git a/utils/__pycache__/dataset_DCL.cpython-36.pyc b/utils/__pycache__/dataset_DCL.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7167fda90b31921ecd988925bca3906312c4568 GIT binary patch literal 6281 zcmbtY-ESjT6~A}BJa!z%o21!fyTGPT_?>gdj+0G`5Vq!if1Gim4=N1yrkudfR6FER6TP`-^X{VkGEgt9LNvMD8%t-jT?IJcWN=T6VC>t zi(xgKN4^x+!Ug2Zp|vd<^;1wL8dCdV{4h@AVX}ov`Q1UQ8*OK;tZh+0ekFV^J{w>9 z7bLNGBD-R2;j<6l$V4u3d2DTp```S~8aq$Ki8zrDO6s-T>Bz1-^gAB&G=#|o{` ze(dMorl1j0OCz>tm*Q z4QE;tzp)OSZShB;===>!Tk9`rd5D73N|K>2QyPSEJL3VLe`jNFIEXg-aVzO=s3_RH z^M3H~)s3*-54v%-H`?78WpO{<2wPbzjWX(A-~Z_I%FTY9X6@l%|JF64g$VP+(Q~Sm zgu_9Qwg&tCX#Hk?*lzXHTPRFMpF0YPhmTT6fsf+jD8&(&zTy)_0y(LO*Fb`_{S4%G&X(aCsuD(I#Jf%3uwCu)l6x5LaClV$DMhi znV2V}!~bRJ%c`|#)nrw!%4_nvR3zdy4VnCi1-J2~cacP*DMJCfl%edBg0Yf?c4&7> zY-tj<%68Ut3j1;k`|=9=Di!wS8~f6wLe8hO(bn5-;xPRvCu!Zmm;Mb&E_+bDKq9x0 zTgYwXHgX5Ki`+wA%KY5QfCs&~UiH{gzsNx?jB5UvX4g=&rnQCSt30}naT$7SDD-p; zN(;HKKFO}+qF2MWaBO`@JEM#q+H)~?kuK%5?{Y4YK9iS_uH;17)x3;!H54!_XR82> zEl63rEM(0xCb2ixbXBz;5hnsQ9A>&qX|UhQ_OzFb2JL>T*1!Voq7=NM%cC?3iXvx+ zgHd14OwYhU}>5Sp6~mA2cRu68rkiW6O?z95Z{BJE^Rnl&mI9`!rgyE9BW zao3p9D_Bx}o04@(HYj-+i7xXh!NXQxy-JO2Z&Rbweh|b-oCU#i=uE$XL_m8d|GKQn zx~$OOa^#BiX8uR@LdDi6OH3hwP>z6vm5elwaJmPygvjJp!%_=qQ{O@Y6nKrw7pN)) z0W2{H-asG2fsjWFXWMb#)JG8Jkb#m~r+>xX8TKM&CPju4LrsC;RCjX=7*DO9dJkzS zw~M+@qs9&t3<_xGbnI=Sx*UoJW%Wxc(dYz-K8AYsDx{>nIra_6$rz|kX0Qw&1y3Pi zz|LbotDfNGGrwY=K9lGjdT=wB17@QKf2fB3$C!Y9$W{bKLQ zxkYagbZzqKm#&KO0!A&P(yaz^?KAGb*3r%>S~v`T-W=&DAX`{SZm9wYp6W=WUVq z{t4O(4n4IuOJz2M$q_8i7MrR!QNDcV zS(aa}4?Nj$-f&obAFBa*ZX&y|=l!!iPydZy;~8xY0rBiJoZc`_)YoVhVVAD#$Niu` zY=x0B*vhC&IL}B7gjd7;Af}*JF+LNk7?l~MwHqg4bg1nCR;3_9eVvjUlsu25QD>f%a@DF? z%W}o?QLFLaL1_`CYdnJ5eFRh$jI3ZLm5!D!vCpX|pop5h0sEcTQGnk;NOf#|`ux~} zH6k8AmY-JP0qhRiL5b>vW`EsI5?P2mT zQdz@c;WSGS0V{{>7{Oey9>SJgd?}f>KtNcPM~jm)oas-Sz<_?_$S4qIXZCj#5Pyje zttS>-iul;V^+SdhoC+Zy;;PWO3Oi#5U>DaDciQiq>v!Nl+`}Z3nMD_&&>;(@3((j* z{28|brlzg8;jCOZP>KTWyaeawxe$ zpzdv*GhN~YgT@UM7|K`25}?6gz6K!A z1b7Jd6vG9FN`wxGGjUq=9xdt{WN;0(@S+}{>`51O^^C6)>IUMt1U_h$rNdd+N0zQ=gzE-E>kz~ zrBx)pynfZU{TF@LpW|!RRKg@3nskST{sRe+Y5J&xvO%gp<@7xB2RywHaC;PB^K*-- zACfpRAP)uu2U;kp3F8i%;9kk_qpe4KtS$uxz9JX zgKIlQs7J1bo_v6bZikSsY{FSa6b?g9$xu$oVosTD!~#VLG=M_@@)g>JuWdiT3x>Ok zx3E1#0E2GIE?-e3rhWKi<9xqD^RG~HCJ)wj9!qTZDTx^wO(e$r4qZs)-kDUAS|N+`+@DCs z@Vnstm+-qZ%kMtuG_l{MTvkK$H%e-sh8<8sF%>gdkvXX^^*l^Z|=)uJT$lDoZBfQlHikzO>q=W&W&Mst~J;*vi zxoT5iND0}nB1hdU-+lM}bq-6_J=C?0y17-_`{6FXIqK3TX5+n=5txmdHIezvgvf&a7^Y~+mgSGwi4!MH)3_f#1Xz+KDYhXPiVp3H9lBs^hjkNL2#O>pa%IV* z=zh{Cufr}ydl|65u-~&AuRG;0^t6Yx+a#?O^8F%e9v&YN&_BeskFoRNt}Z_*f69SL~#O2(-gh^v(IAxFp0oBce6ad7X=v$12xn8pW6K` zw7h|ilRoJi6Vj)lF)^?zJ$TXV{<_&6Y<9!V?ss1Id#`(cqku(y9`Se($wHbvm3Gcq zPl^8!hVV|ich|R%vq7|-#D03T%_8sit-Icnz3nhayrY;Oj}Es-JWlfM=!u_rgDi}a zt>JS?L*7()6fk128v4LeJbu^E7SS|d2@e6l3Mzs}#}--|3A}&?0CR*3!5(^R0v0<2haywTUrq%xIH4VL{L(9As#{Mmq3Hzd9jr>6;%6^456v z2gmq{aU1-jXAkLke!RfzVj-j}q(3(yn5o6<7}Ma*{yg->73d~Le*qz$+7|^8E%+?~ zcUx2i=n4^w6WVV9NBTx=8g7SH*XKJmRIO5&1O z!(OKTrQiO1K(J1Em5kRhkD?v;D0*3#{Z&Cjr%m`pL~u==NSn;$=121S8QW#eSK9E$ znNRcQint=2Wiq~sXjH>m>og~vur81#{L+LVMpy^&nR{)3Uq|bpT^3h+)iYW2(F_l8 zCO?vU)_1^fMQI>}Df^Ke}TvCJYbvt{Z zT0;MAPJO%Z>ExwGHVikY~3Ufi3BLa4g{LI|K{oW^`(U0R&^aVkrmmqe-O z$vV$C>OSVtKxw3@#xPasvUqn#-Bz-3i-CNba+VFBYmK9K<>m2v6r!d&t)MW4WtRPFRiBx?$B^1OGgp+a8~-x+PEeyeW%Haa`ckS z4fz?%W+3mRB-URj$? z6yQaI*;Vg?uQ*wZ)8UBcvY7aXQIgA2HsV@=AOlh9J(}uF?+sZLx;EmxH=Rjy!z_2_ zW=nhs$w!Lak3IIJ4h!;i=L5I3&*Eg<*K8lUM^h6D0k%>qyyOBe_RObVK3Im(ewh SWo*_qs@PyveH0wE3;zQY+>Dn1 literal 0 HcmV?d00001 diff --git a/utils/__pycache__/train_model.cpython-36.pyc b/utils/__pycache__/train_model.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..038caf5e65c33c1ac9f090eff5d771235e60ef9b GIT binary patch literal 3796 zcmb_fJB%C08Qxtkm&@hzc)a(+lw?Xe$#M07r#v#K^?PVvn=rk=h46 zJJf@ot>`oX45$f^I;qkGu2KXkQn^Zi)Kdfq(&T^?DO|Zpz8T)jfdiz-lKam;|NQg6 z=AVCN@6>AE&!4*={c6Q9{$^Zw3c$aEH~TFBHK-XGu}RF>B9^K8R#YGb-P**~ZIKjp z>yQ%KLgdC}Qa<559rK8XF+1|&3aRL^VpNT5q^5Bvs>cn|h?}IT`=w|hZjqM8-Dojx zleWgo(NerjmNjlgD`bUwwDPGzR_Ow5(B`KmS)B*Lv_N(LFDrZQzFXul0g)zk)G0j`dC2jeGqGT3LK z|JMC?{l|CqX)p2zLw+a15gfVhyk(Y%#GexU!EgLh7Ix{O@xe~R@%JTE7nqEd~+(rxhlMJ$WU)Lb**hm~JR>SF0)A6zdy zx7Y)*J=c)GDAq-jme-AC+8&kA`rS^cI_XzGeh(M8lZ z(Oyw4wezcZuR&vn)|BMzvBv9K%ak?}taN@Iv~_6xown14*3Qfl*N|B|khG&EwqcKp zkxLl4j*%M}xiQx@x}@V}kD9twvI?xuFVp7eil~UD*c`ox_;zQ8xN?T=oMAUkv6(|l zbm21;MA)+k8E-&F>%YsG*=PK<(baP;I!_ly*ZzZ25#C9BZT6PhXOO-~+il}i3i?ZP zZFE^w+Jy)+8O^|zMtPZ*~2Be4*I&-6Sw-d+QIn&cF5A`B^75_ zyCDwFZTR>L#JW89kbMEGuZh~pKCPX}+Y}X5C+77aKM-}q?LcwyT~UX1TgVHW{Sy3p zqOLMn#}e4<1~l(uC*Q&<9?Y;t+L8lh$;V%OV0`?G4-Bz5x~=-uKmmT?0YlX@-j+pO zC685Sy4g=zjLIIR1E0Ne%U2b(o8i9)iZp}wk5%nFE-HX>X$=1KNA>*UO&kM78uo%` zr1;+Z_wPLJyUzxkpho=J1jUGhlAp&$d z@ao=e7&t#?!g1O=QdZx38T>5l`E$DR@64G2MC1AAo6}lv!WhohPQ>nZGwEt(9ILVb zFx_|(4hKgZv8VcvK=@sNy)i$UE@Z*ukaqOJ*5PTVn@u+qX`HHK|74~_Fr3 zzwqqbk=|jzdq;jYoQAUG`%#$qJ}ZGipNG;;$6+F^G?Vr?;78IPK_>B3LiE#!hD;W4 zBr}x((s}nlo%*tQpP~6)!dW_grYNke%hQmh`mO>! z@Qc(HLtoujbS4q4=U$ffc^o{IC0sFrEacKvs-Fb7?G*KeNjf_HxpZjA1L(JhJdC9i zj1dzmoyk~TPDp7Ga$hZ!qmYO(by1Lo;4qVw>{-$~VrinHTR?23!@@XytO9`bkzU3j z!KcF>mz6Lc^Jjh!ib7fJAxpxY+tRt8CjH?+7WLvHSSqPh2q2pWjQg4(%jp=mB4}Z< z89v3%VSWZVbP|D5Q4YfMKrNT66hkAEWlh!#Up~tg>0w&$k9rOeG%i%u5LcYzN{xLk&eC=)|JoAmp`r^MCSHH<>y z29(ZKRa9(GiUX;u&V}{VedJX%FCMa})e-HL&I3}Oo779Vw3XY^Rt`vKn2aYJZ4?}a zQAWzcc;F{N9A>04C;6%n5qDmWf+w<+PPm#vN~gwD`k~rDS^@FStsqVijgJDK6c3R- zK5+&?9HT9tl{PqfQrk%FbhrCKuXphE?(MI4?+{mAwl&`=cKeKW=frEZS6Pp)tbMSTfonJ4ZZ7T%W4(s7;l=4M+@55t#xQ@ z*;UA~p~*Ar_&MNe6}0s`1=doYEGp0j*jbmJj}IGsdW5K3>NH~NAYxYmq^-UbSXH4# zT{YMpO^#J?aEUkAfdaaC)pd#43nn~OUpcz^kc$3p*wb)c6kSXp3wkqVd*>;rD;#Og z+4Y9eEV>ks#X0%3;OK;&SHr@X9_l))y}Php`rAe=nsi@-By8h1ly6xFpsC_}vSwAY QT4(~t4-t2(IL*y}18i9`EdT%j literal 0 HcmV?d00001 diff --git a/utils/__pycache__/utils.cpython-36.pyc b/utils/__pycache__/utils.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbc4098116dec03bb11408fe67ed187f8e9b3b17 GIT binary patch literal 4483 zcma)9-EJI774GW)>G|=*Hi?yF#VCuG9R#+#2`ns2VsLO)i+5NOqe!Dgt#()2?n%36 zT;1b1rhPS$_69!yh!^07Yq;V)>JIG%$^&r272m0zX^)+d(4#tYy8i2&?|i5FZnNom z-~9Wl?=5KBH`?4|Lf=5i{s6%>&JwL({UqH>mw5ElcHou0NYtxzq z&Yo!9K1n~k0-tDVU|6P z!lB@yj)C;hKb1|C>^cNzh31!%6{O?r7_IjNLu=QNu82ZE!A#N$hvPJt-fobG2Ywcx zMqOQ+S(NMv3w{5h2?u}wSz9|89!6_P9HjeeBJv+@Zu`e~*Laxt`*D6S-d!8#agvpC zb@W2Iz8|M??)xd+4!9C*mUa$!}(+fQ} zNGr5~xvagXp$0<+R-u0kBlwnjvpG|5wi@qXd@}8&#@*81)OTv5SIS1N^x`Z|vph({ zNSaBU<0z*4x>Y>!7g$fFkY ztPPi%g9_qWJ z;pmMUb@rp&KMs-=ER%T<8f!4K^y1>&9;e-UUAEP=$?<7$82P^ReE)FB#|h~T-#;1$ zN!cO@6XF_)B@%?NIYCwubVyPZbX?|IuBYn$_50<&KKkeB%LkWCQeh^?5cphEnEB2L zFSLi+?^IfQeJrh>t|#hHL<2%DJ&)r3gFN%op*Rc@6%^7b?UG47O{Kj#Opia}(k^>S z6X*z}GwmSlrIimw zcmT3H61ggay*WxuFcI+=3*b z6Qm1W+=F>7autV@R2`YuI$eLN;P(UCW{`m0d8fovCkb9eqVr*#!up?ddYzNb4$5Y? z%cQv*2ig9g{z%W?AIy}0R0?c!6WbEsCUKjDvZ^~y<2|h7i#61BO<9jqe=iF1F?=8! z!*O1{sUG4E4ZR3{756mfCnzQ1=xvzEL{V#nsIMNf5^3uD4k8iPndy$h`dH(BSH-S-rG*gdrw z@jjMCq&uU4bKqavSuSE8DRb?>GY;I91Yy*zODh|XA|dW#M9HVhCAaV@O**8k|AJC_ z4-Nsjcux=u2(36sq`sR<^KrEOTWN|Y8F!sBZLw!+dY{LK(jjL-5kI8)oaadKK1LbX z7j{we{V)l#%=b(0DaR3~KSY^kik9ABAFvg6x;$s`*AqVByvk>U2O^GXq9@dYV1RdR z0AoXgiQz@oLjt3?5I-gHGl;G=J;TYBhAZ%r4gmT*5PTbhGTJv~U;3Rc&F^#CtE-n7 z0pKZi#kWY%1*^39FQLii)EHruBr}5$l)6k%01IUt@)Esegj$!iT;;bAFV*vAJlhJ- zbVVgR12mWL?8t?^agx07vG+6z^N8<+vw7&>n3ik{$`v@ns=h%jI@fqmY&Rg&%$dwlh%q*phM2>h& z>yYn(vYK+u|ADf1u_itjvY6xkc7-yDI%#FXS@t)G%NU!{BtBxik{*efOLCkMBT(i* zcZsVN%4%v9JizPt+%mXxjw`+I&GoO-MdX>09TNpVGsK_K)=;DH&YRUr?zuG^HPICT zHxvOkQG2moOA&CZ_lgdo&bI}6xkoQVelzMV5sySWs{=Pw?a+Tk^(3a8p=NqZJ>{OV z&q5hRpHqd3q6;F&$60#3E`EY0Y3!!!g5pM6#MEw$hO9bW{L>Fu$l-$rb;^y{o)ZQzS!=Kt4&Yv%q<8cb)n99*vvI4Gtt5nOO8 zawu+tXGA+7A!rUX%O{Gs(X{Pzfj*{OAghM%8toOY;kLZm;=*G4zv}6z=Kufz literal 0 HcmV?d00001 diff --git a/utils/autoaugment.py b/utils/autoaugment.py new file mode 100644 index 0000000..9f39b51 --- /dev/null +++ b/utils/autoaugment.py @@ -0,0 +1,232 @@ +from PIL import Image, ImageEnhance, ImageOps +import numpy as np +import random + + +class ImageNetPolicy(object): + """ Randomly choose one of the best 24 Sub-policies on ImageNet. + Example: + >>> policy = ImageNetPolicy() + >>> transformed = policy(image) + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> ImageNetPolicy(), + >>> transforms.ToTensor()]) + """ + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.4, "posterize", 8, 0.6, "rotate", 9, fillcolor), + SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor), + SubPolicy(0.6, "posterize", 7, 0.6, "posterize", 6, fillcolor), + SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor), + + SubPolicy(0.4, "equalize", 4, 0.8, "rotate", 8, fillcolor), + SubPolicy(0.6, "solarize", 3, 0.6, "equalize", 7, fillcolor), + SubPolicy(0.8, "posterize", 5, 1.0, "equalize", 2, fillcolor), + SubPolicy(0.2, "rotate", 3, 0.6, "solarize", 8, fillcolor), + SubPolicy(0.6, "equalize", 8, 0.4, "posterize", 6, fillcolor), + + SubPolicy(0.8, "rotate", 8, 0.4, "color", 0, fillcolor), + SubPolicy(0.4, "rotate", 9, 0.6, "equalize", 2, fillcolor), + SubPolicy(0.0, "equalize", 7, 0.8, "equalize", 8, fillcolor), + SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor), + SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor), + + SubPolicy(0.8, "rotate", 8, 1.0, "color", 2, fillcolor), + SubPolicy(0.8, "color", 8, 0.8, "solarize", 7, fillcolor), + SubPolicy(0.4, "sharpness", 7, 0.6, "invert", 8, fillcolor), + SubPolicy(0.6, "shearX", 5, 1.0, "equalize", 9, fillcolor), + SubPolicy(0.4, "color", 0, 0.6, "equalize", 3, fillcolor), + + SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor), + SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor), + SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor), + SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor) + ] + + + def __call__(self, img): + policy_idx = random.randint(0, len(self.policies) - 1) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment ImageNet Policy" + + +class CIFAR10Policy(object): + """ Randomly choose one of the best 25 Sub-policies on CIFAR10. + Example: + >>> policy = CIFAR10Policy() + >>> transformed = policy(image) + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> CIFAR10Policy(), + >>> transforms.ToTensor()]) + """ + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.1, "invert", 7, 0.2, "contrast", 6, fillcolor), + SubPolicy(0.7, "rotate", 2, 0.3, "translateX", 9, fillcolor), + SubPolicy(0.8, "sharpness", 1, 0.9, "sharpness", 3, fillcolor), + SubPolicy(0.5, "shearY", 8, 0.7, "translateY", 9, fillcolor), + SubPolicy(0.5, "autocontrast", 8, 0.9, "equalize", 2, fillcolor), + + SubPolicy(0.2, "shearY", 7, 0.3, "posterize", 7, fillcolor), + SubPolicy(0.4, "color", 3, 0.6, "brightness", 7, fillcolor), + SubPolicy(0.3, "sharpness", 9, 0.7, "brightness", 9, fillcolor), + SubPolicy(0.6, "equalize", 5, 0.5, "equalize", 1, fillcolor), + SubPolicy(0.6, "contrast", 7, 0.6, "sharpness", 5, fillcolor), + + SubPolicy(0.7, "color", 7, 0.5, "translateX", 8, fillcolor), + SubPolicy(0.3, "equalize", 7, 0.4, "autocontrast", 8, fillcolor), + SubPolicy(0.4, "translateY", 3, 0.2, "sharpness", 6, fillcolor), + SubPolicy(0.9, "brightness", 6, 0.2, "color", 8, fillcolor), + SubPolicy(0.5, "solarize", 2, 0.0, "invert", 3, fillcolor), + + SubPolicy(0.2, "equalize", 0, 0.6, "autocontrast", 0, fillcolor), + SubPolicy(0.2, "equalize", 8, 0.8, "equalize", 4, fillcolor), + SubPolicy(0.9, "color", 9, 0.6, "equalize", 6, fillcolor), + SubPolicy(0.8, "autocontrast", 4, 0.2, "solarize", 8, fillcolor), + SubPolicy(0.1, "brightness", 3, 0.7, "color", 0, fillcolor), + + SubPolicy(0.4, "solarize", 5, 0.9, "autocontrast", 3, fillcolor), + SubPolicy(0.9, "translateY", 9, 0.7, "translateY", 9, fillcolor), + SubPolicy(0.9, "autocontrast", 2, 0.8, "solarize", 3, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.1, "invert", 3, fillcolor), + SubPolicy(0.7, "translateY", 9, 0.9, "autocontrast", 1, fillcolor) + ] + + + def __call__(self, img): + policy_idx = random.randint(0, len(self.policies) - 1) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment CIFAR10 Policy" + + +class SVHNPolicy(object): + """ Randomly choose one of the best 25 Sub-policies on SVHN. + Example: + >>> policy = SVHNPolicy() + >>> transformed = policy(image) + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> SVHNPolicy(), + >>> transforms.ToTensor()]) + """ + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.9, "shearX", 4, 0.2, "invert", 3, fillcolor), + SubPolicy(0.9, "shearY", 8, 0.7, "invert", 5, fillcolor), + SubPolicy(0.6, "equalize", 5, 0.6, "solarize", 6, fillcolor), + SubPolicy(0.9, "invert", 3, 0.6, "equalize", 3, fillcolor), + SubPolicy(0.6, "equalize", 1, 0.9, "rotate", 3, fillcolor), + + SubPolicy(0.9, "shearX", 4, 0.8, "autocontrast", 3, fillcolor), + SubPolicy(0.9, "shearY", 8, 0.4, "invert", 5, fillcolor), + SubPolicy(0.9, "shearY", 5, 0.2, "solarize", 6, fillcolor), + SubPolicy(0.9, "invert", 6, 0.8, "autocontrast", 1, fillcolor), + SubPolicy(0.6, "equalize", 3, 0.9, "rotate", 3, fillcolor), + + SubPolicy(0.9, "shearX", 4, 0.3, "solarize", 3, fillcolor), + SubPolicy(0.8, "shearY", 8, 0.7, "invert", 4, fillcolor), + SubPolicy(0.9, "equalize", 5, 0.6, "translateY", 6, fillcolor), + SubPolicy(0.9, "invert", 4, 0.6, "equalize", 7, fillcolor), + SubPolicy(0.3, "contrast", 3, 0.8, "rotate", 4, fillcolor), + + SubPolicy(0.8, "invert", 5, 0.0, "translateY", 2, fillcolor), + SubPolicy(0.7, "shearY", 6, 0.4, "solarize", 8, fillcolor), + SubPolicy(0.6, "invert", 4, 0.8, "rotate", 4, fillcolor), + SubPolicy(0.3, "shearY", 7, 0.9, "translateX", 3, fillcolor), + SubPolicy(0.1, "shearX", 6, 0.6, "invert", 5, fillcolor), + + SubPolicy(0.7, "solarize", 2, 0.6, "translateY", 7, fillcolor), + SubPolicy(0.8, "shearY", 4, 0.8, "invert", 8, fillcolor), + SubPolicy(0.7, "shearX", 9, 0.8, "translateY", 3, fillcolor), + SubPolicy(0.8, "shearY", 5, 0.7, "autocontrast", 3, fillcolor), + SubPolicy(0.7, "shearX", 2, 0.1, "invert", 5, fillcolor) + ] + + + def __call__(self, img): + policy_idx = random.randint(0, len(self.policies) - 1) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment SVHN Policy" + + +class SubPolicy(object): + def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)): + ranges = { + "shearX": np.linspace(0, 0.3, 10), + "shearY": np.linspace(0, 0.3, 10), + "translateX": np.linspace(0, 150 / 331, 10), + "translateY": np.linspace(0, 150 / 331, 10), + "rotate": np.linspace(0, 30, 10), + "color": np.linspace(0.0, 0.9, 10), + "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int), + "solarize": np.linspace(256, 0, 10), + "contrast": np.linspace(0.0, 0.9, 10), + "sharpness": np.linspace(0.0, 0.9, 10), + "brightness": np.linspace(0.0, 0.9, 10), + "autocontrast": [0] * 10, + "equalize": [0] * 10, + "invert": [0] * 10 + } + + # from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand + def rotate_with_fill(img, magnitude): + rot = img.convert("RGBA").rotate(magnitude) + return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(img.mode) + + func = { + "shearX": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0), + Image.BICUBIC, fillcolor=fillcolor), + "shearY": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0), + Image.BICUBIC, fillcolor=fillcolor), + "translateX": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0), + fillcolor=fillcolor), + "translateY": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])), + fillcolor=fillcolor), + "rotate": lambda img, magnitude: rotate_with_fill(img, magnitude), + # "rotate": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])), + "color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])), + "posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude), + "solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude), + "contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "autocontrast": lambda img, magnitude: ImageOps.autocontrast(img), + "equalize": lambda img, magnitude: ImageOps.equalize(img), + "invert": lambda img, magnitude: ImageOps.invert(img) + } + + # self.name = "{}_{:.2f}_and_{}_{:.2f}".format( + # operation1, ranges[operation1][magnitude_idx1], + # operation2, ranges[operation2][magnitude_idx2]) + self.p1 = p1 + self.operation1 = func[operation1] + self.magnitude1 = ranges[operation1][magnitude_idx1] + self.p2 = p2 + self.operation2 = func[operation2] + self.magnitude2 = ranges[operation2][magnitude_idx2] + + + def __call__(self, img): + if random.random() < self.p1: img = self.operation1(img, self.magnitude1) + if random.random() < self.p2: img = self.operation2(img, self.magnitude2) + return img diff --git a/utils/dataset_DCL.py b/utils/dataset_DCL.py new file mode 100644 index 0000000..54200a6 --- /dev/null +++ b/utils/dataset_DCL.py @@ -0,0 +1,181 @@ +# coding=utf8 +from __future__ import division +import os +import torch +import torch.utils.data as data +import pandas +import random +import PIL.Image as Image +from PIL import ImageStat + +import pdb + +def random_sample(img_names, labels): + anno_dict = {} + img_list = [] + anno_list = [] + for img, anno in zip(img_names, labels): + if not anno in anno_dict: + anno_dict[anno] = [img] + else: + anno_dict[anno].append(img) + + for anno in anno_dict.keys(): + anno_len = len(anno_dict[anno]) + fetch_keys = random.sample(list(range(anno_len)), anno_len//10) + img_list.extend([anno_dict[anno][x] for x in fetch_keys]) + anno_list.extend([anno for x in fetch_keys]) + return img_list, anno_list + + + +class dataset(data.Dataset): + def __init__(self, Config, anno, swap_size=[7,7], common_aug=None, swap=None, totensor=None, train=False, train_val=False, test=False): + self.root_path = Config.rawdata_root + self.numcls = Config.numcls + self.dataset = Config.dataset + self.use_cls_2 = Config.cls_2 + self.use_cls_mul = Config.cls_2xmul + if isinstance(anno, pandas.core.frame.DataFrame): + self.paths = anno['ImageName'].tolist() + self.labels = anno['label'].tolist() + elif isinstance(anno, dict): + self.paths = anno['img_name'] + self.labels = anno['label'] + + if train_val: + self.paths, self.labels = random_sample(self.paths, self.labels) + self.common_aug = common_aug + self.swap = swap + self.totensor = totensor + self.cfg = Config + self.train = train + self.swap_size = swap_size + self.test = test + + def __len__(self): + return len(self.paths) + + def __getitem__(self, item): + img_path = os.path.join(self.root_path, self.paths[item]) + img = self.pil_loader(img_path) + if self.test: + img = self.totensor(img) + label = self.labels[item] + return img, label, self.paths[item] + img_unswap = self.common_aug(img) if not self.common_aug is None else img + + image_unswap_list = self.crop_image(img_unswap, self.swap_size) + + swap_range = self.swap_size[0] * self.swap_size[1] + swap_law1 = [(i-(swap_range//2))/swap_range for i in range(swap_range)] + + if self.train: + img_swap = self.swap(img_unswap) + image_swap_list = self.crop_image(img_swap, self.swap_size) + unswap_stats = [sum(ImageStat.Stat(im).mean) for im in image_unswap_list] + swap_stats = [sum(ImageStat.Stat(im).mean) for im in image_swap_list] + swap_law2 = [] + for swap_im in swap_stats: + distance = [abs(swap_im - unswap_im) for unswap_im in unswap_stats] + index = distance.index(min(distance)) + swap_law2.append((index-(swap_range//2))/swap_range) + img_swap = self.totensor(img_swap) + label = self.labels[item] + if self.use_cls_mul: + label_swap = label + self.numcls + if self.use_cls_2: + label_swap = -1 + img_unswap = self.totensor(img_unswap) + return img_unswap, img_swap, label, label_swap, swap_law1, swap_law2, self.paths[item] + else: + label = self.labels[item] + swap_law2 = [(i-(swap_range//2))/swap_range for i in range(swap_range)] + label_swap = label + img_unswap = self.totensor(img_unswap) + return img_unswap, label, label_swap, swap_law1, swap_law2, self.paths[item] + + def pil_loader(self,imgpath): + with open(imgpath, 'rb') as f: + with Image.open(f) as img: + return img.convert('RGB') + + def crop_image(self, image, cropnum): + width, high = image.size + crop_x = [int((width / cropnum[0]) * i) for i in range(cropnum[0] + 1)] + crop_y = [int((high / cropnum[1]) * i) for i in range(cropnum[1] + 1)] + im_list = [] + for j in range(len(crop_y) - 1): + for i in range(len(crop_x) - 1): + im_list.append(image.crop((crop_x[i], crop_y[j], min(crop_x[i + 1], width), min(crop_y[j + 1], high)))) + return im_list + + + def get_weighted_sampler(self): + img_nums = len(self.labels) + weights = [self.labels.count(x) for x in range(self.numcls)] + return torch.utils.data.sampler.WeightedRandomSampler(weights, num_samples=img_nums) + + +def collate_fn4train(batch): + imgs = [] + label = [] + label_swap = [] + law_swap = [] + img_name = [] + for sample in batch: + imgs.append(sample[0]) + imgs.append(sample[1]) + label.append(sample[2]) + label.append(sample[2]) + if sample[3] == -1: + label_swap.append(1) + label_swap.append(0) + else: + label_swap.append(sample[2]) + label_swap.append(sample[3]) + law_swap.append(sample[4]) + law_swap.append(sample[5]) + img_name.append(sample[-1]) + return torch.stack(imgs, 0), label, label_swap, law_swap, img_name + +def collate_fn4val(batch): + imgs = [] + label = [] + label_swap = [] + law_swap = [] + img_name = [] + for sample in batch: + imgs.append(sample[0]) + label.append(sample[1]) + if sample[3] == -1: + label_swap.append(1) + else: + label_swap.append(sample[2]) + law_swap.append(sample[3]) + img_name.append(sample[-1]) + return torch.stack(imgs, 0), label, label_swap, law_swap, img_name + +def collate_fn4backbone(batch): + imgs = [] + label = [] + img_name = [] + for sample in batch: + imgs.append(sample[0]) + if len(sample) == 5: + label.append(sample[1]) + else: + label.append(sample[2]) + img_name.append(sample[-1]) + return torch.stack(imgs, 0), label, img_name + + +def collate_fn4test(batch): + imgs = [] + label = [] + img_name = [] + for sample in batch: + imgs.append(sample[0]) + label.append(sample[1]) + img_name.append(sample[-1]) + return torch.stack(imgs, 0), label, img_name diff --git a/utils/eval_model.py b/utils/eval_model.py new file mode 100644 index 0000000..811ffe9 --- /dev/null +++ b/utils/eval_model.py @@ -0,0 +1,81 @@ +#coding=utf8 +from __future__ import print_function, division +import os,time,datetime +import numpy as np +import datetime +from math import ceil + +import torch +from torch import nn +from torch.autograd import Variable +import torch.nn.functional as F + +from utils.utils import LossRecord + +import pdb + +def dt(): + return datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S") + +def eval_turn(model, data_loader, val_version, epoch_num, log_file): + + model.train(False) + + val_corrects1 = 0 + val_corrects2 = 0 + val_corrects3 = 0 + val_size = data_loader.__len__() + item_count = data_loader.total_item_len + t0 = time.time() + get_l1_loss = nn.L1Loss() + get_ce_loss = nn.CrossEntropyLoss() + + val_batch_size = data_loader.batch_size + val_epoch_step = data_loader.__len__() + num_cls = data_loader.num_cls + + val_loss_recorder = LossRecord(val_batch_size) + val_celoss_recorder = LossRecord(val_batch_size) + print('evaluating %s ...'%val_version, flush=True) + with torch.no_grad(): + for batch_cnt_val, data_val in enumerate(data_loader): + inputs = Variable(data_val[0].cuda()) + labels = Variable(torch.from_numpy(np.array(data_val[1])).long().cuda()) + outputs = model(inputs) + loss = 0 + + ce_loss = get_ce_loss(outputs[0], labels).item() + loss += ce_loss + + val_loss_recorder.update(loss) + val_celoss_recorder.update(ce_loss) + + if outputs[1].size(1) != 2: + outputs_pred = outputs[0] + outputs[1][:,0:num_cls] + outputs[1][:,num_cls:2*num_cls] + else: + outputs_pred = outputs[0] + top3_val, top3_pos = torch.topk(outputs_pred, 3) + + print('{:s} eval_batch: {:-6d} / {:d} loss: {:8.4f}'.format(val_version, batch_cnt_val, val_epoch_step, loss), flush=True) + + batch_corrects1 = torch.sum((top3_pos[:, 0] == labels)).data.item() + val_corrects1 += batch_corrects1 + batch_corrects2 = torch.sum((top3_pos[:, 1] == labels)).data.item() + val_corrects2 += (batch_corrects2 + batch_corrects1) + batch_corrects3 = torch.sum((top3_pos[:, 2] == labels)).data.item() + val_corrects3 += (batch_corrects3 + batch_corrects2 + batch_corrects1) + + val_acc1 = val_corrects1 / item_count + val_acc2 = val_corrects2 / item_count + val_acc3 = val_corrects3 / item_count + + log_file.write(val_version + '\t' +str(val_loss_recorder.get_val())+'\t' + str(val_celoss_recorder.get_val()) + '\t' + str(val_acc1) + '\t' + str(val_acc3) + '\n') + + t1 = time.time() + since = t1-t0 + print('--'*30, flush=True) + print('% 3d %s %s %s-loss: %.4f ||%s-acc@1: %.4f %s-acc@2: %.4f %s-acc@3: %.4f ||time: %d' % (epoch_num, val_version, dt(), val_version, val_loss_recorder.get_val(init=True), val_version, val_acc1,val_version, val_acc2, val_version, val_acc3, since), flush=True) + print('--' * 30, flush=True) + + return val_acc1, val_acc2, val_acc3 + diff --git a/utils/test_tool.py b/utils/test_tool.py new file mode 100644 index 0000000..3c409e0 --- /dev/null +++ b/utils/test_tool.py @@ -0,0 +1,99 @@ +import os +import math +import numpy as np +import cv2 +import datetime + +import torch +from torchvision.utils import save_image, make_grid + +import pdb + +def dt(): + return datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S") + +def set_text(text, img): + font = cv2.FONT_HERSHEY_SIMPLEX + if isinstance(text, str): + cont = text + cv2.putText(img, cont, (20, 50), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA) + if isinstance(text, float): + cont = '%.4f'%text + cv2.putText(img, cont, (20, 50), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA) + if isinstance(text, list): + for count in range(len(img)): + cv2.putText(img[count], text[count], (20, 50), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA) + return img + +def save_multi_img(img_list, text_list, grid_size=[5,5], sub_size=200, save_dir='./', save_name=None): + if len(img_list) > grid_size[0]*grid_size[1]: + merge_height = math.ceil(len(img_list) / grid_size[0]) * sub_size + else: + merge_height = grid_size[1]*sub_size + merged_img = np.zeros((merge_height, grid_size[0]*sub_size, 3)) + + if isinstance(img_list[0], str): + img_name_list = img_list + img_list = [] + for img_name in img_name_list: + img_list.append(cv2.imread(img_name)) + + img_counter = 0 + for img, txt in zip(img_list, text_list): + img = cv2.resize(img, (sub_size, sub_size)) + img = set_text(txt, img) + pos = [img_counter // grid_size[1], img_counter % grid_size[1]] + sub_pos = [pos[0]*sub_size, (pos[0]+1)*sub_size, + pos[1]*sub_size, (pos[1]+1)*sub_size] + merged_img[sub_pos[0]:sub_pos[1], sub_pos[2]:sub_pos[3], :] = img + img_counter += 1 + + if save_name is None: + img_save_path = os.path.join(save_dir, dt()+'.png') + else: + img_save_path = os.path.join(save_dir, save_name+'.png') + cv2.imwrite(img_save_path, merged_img) + print('saved img in %s ...'%img_save_path) + + +def cls_base_acc(result_gather): + top1_acc = {} + top3_acc = {} + cls_count = {} + for img_item in result_gather.keys(): + acc_case = result_gather[img_item] + + if acc_case['label'] in cls_count: + cls_count[acc_case['label']] += 1 + if acc_case['top1_cat'] == acc_case['label']: + top1_acc[acc_case['label']] += 1 + if acc_case['label'] in [acc_case['top1_cat'], acc_case['top2_cat'], acc_case['top3_cat']]: + top3_acc[acc_case['label']] += 1 + else: + cls_count[acc_case['label']] = 1 + if acc_case['top1_cat'] == acc_case['label']: + top1_acc[acc_case['label']] = 1 + else: + top1_acc[acc_case['label']] = 0 + + if acc_case['label'] in [acc_case['top1_cat'], acc_case['top2_cat'], acc_case['top3_cat']]: + top3_acc[acc_case['label']] = 1 + else: + top3_acc[acc_case['label']] = 0 + + for label_item in cls_count: + top1_acc[label_item] /= max(1.0*cls_count[label_item], 0.001) + top3_acc[label_item] /= max(1.0*cls_count[label_item], 0.001) + + print('top1_acc:', top1_acc) + print('top3_acc:', top3_acc) + print('cls_count', cls_count) + + return top1_acc, top3_acc, cls_count + + + + + + + diff --git a/utils/train_model.py b/utils/train_model.py new file mode 100644 index 0000000..2dfd5c1 --- /dev/null +++ b/utils/train_model.py @@ -0,0 +1,164 @@ +#coding=utf8 +from __future__ import print_function, division + +import os,time,datetime +import numpy as np +from math import ceil +import datetime + +import torch +from torch import nn +from torch.autograd import Variable +#from torchvision.utils import make_grid, save_image + +from utils.utils import LossRecord, clip_gradient +from models.focal_loss import FocalLoss +from utils.eval_model import eval_turn +from utils.Asoftmax_loss import AngleLoss + +import pdb + +def dt(): + return datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S") + + +def train(Config, + model, + epoch_num, + start_epoch, + optimizer, + exp_lr_scheduler, + data_loader, + save_dir, + data_size=448, + savepoint=500, + checkpoint=1000 + ): + # savepoint: save without evalution + # checkpoint: save with evaluation + + step = 0 + eval_train_flag = False + rec_loss = [] + checkpoint_list = [] + + train_batch_size = data_loader['train'].batch_size + train_epoch_step = data_loader['train'].__len__() + train_loss_recorder = LossRecord(train_batch_size) + + if savepoint > train_epoch_step: + savepoint = 1*train_epoch_step + checkpoint = savepoint + + date_suffix = dt() + log_file = open(os.path.join(Config.log_folder, 'formal_log_r50_dcl_%s_%s.log'%(str(data_size), date_suffix)), 'a') + + add_loss = nn.L1Loss() + get_ce_loss = nn.CrossEntropyLoss() + get_focal_loss = FocalLoss() + get_angle_loss = AngleLoss() + + for epoch in range(start_epoch,epoch_num-1): + exp_lr_scheduler.step(epoch) + model.train(True) + + save_grad = [] + for batch_cnt, data in enumerate(data_loader['train']): + step += 1 + loss = 0 + model.train(True) + if Config.use_backbone: + inputs, labels, img_names = data + inputs = Variable(inputs.cuda()) + labels = Variable(torch.from_numpy(np.array(labels)).cuda()) + + if Config.use_dcl: + inputs, labels, labels_swap, swap_law, img_names = data + + inputs = Variable(inputs.cuda()) + labels = Variable(torch.from_numpy(np.array(labels)).cuda()) + labels_swap = Variable(torch.from_numpy(np.array(labels_swap)).cuda()) + swap_law = Variable(torch.from_numpy(np.array(swap_law)).float().cuda()) + + optimizer.zero_grad() + + if inputs.size(0) < 2*train_batch_size: + outputs = model(inputs, inputs[0:-1:2]) + else: + outputs = model(inputs, None) + + if Config.use_focal_loss: + ce_loss = get_focal_loss(outputs[0], labels) + else: + ce_loss = get_ce_loss(outputs[0], labels) + + if Config.use_Asoftmax: + fetch_batch = labels.size(0) + if batch_cnt % (train_epoch_step // 5) == 0: + angle_loss = get_angle_loss(outputs[3], labels[0:fetch_batch:2], decay=0.9) + else: + angle_loss = get_angle_loss(outputs[3], labels[0:fetch_batch:2]) + loss += angle_loss + + loss += ce_loss + + alpha_ = 1 + beta_ = 1 + gamma_ = 0.01 if Config.dataset == 'STCAR' or Config.dataset == 'AIR' else 1 + if Config.use_dcl: + swap_loss = get_ce_loss(outputs[1], labels_swap) * beta_ + loss += swap_loss + law_loss = add_loss(outputs[2], swap_law) * gamma_ + loss += law_loss + + loss.backward() + torch.cuda.synchronize() + + optimizer.step() + torch.cuda.synchronize() + + if Config.use_dcl: + print('step: {:-8d} / {:d} loss=ce_loss+swap_loss+law_loss: {:6.4f} = {:6.4f} + {:6.4f} + {:6.4f} '.format(step, train_epoch_step, loss.detach().item(), ce_loss.detach().item(), swap_loss.detach().item(), law_loss.detach().item()), flush=True) + if Config.use_backbone: + print('step: {:-8d} / {:d} loss=ce_loss+swap_loss+law_loss: {:6.4f} = {:6.4f} '.format(step, train_epoch_step, loss.detach().item(), ce_loss.detach().item()), flush=True) + rec_loss.append(loss.detach().item()) + + train_loss_recorder.update(loss.detach().item()) + + # evaluation & save + if step % checkpoint == 0: + rec_loss = [] + print(32*'-', flush=True) + print('step: {:d} / {:d} global_step: {:8.2f} train_epoch: {:04d} rec_train_loss: {:6.4f}'.format(step, train_epoch_step, 1.0*step/train_epoch_step, epoch, train_loss_recorder.get_val()), flush=True) + print('current lr:%s' % exp_lr_scheduler.get_lr(), flush=True) + if eval_train_flag: + trainval_acc1, trainval_acc2, trainval_acc3 = eval_turn(model, data_loader['trainval'], 'trainval', epoch, log_file) + if abs(trainval_acc1 - trainval_acc3) < 0.01: + eval_train_flag = False + + val_acc1, val_acc2, val_acc3 = eval_turn(model, data_loader['val'], 'val', epoch, log_file) + + save_path = os.path.join(save_dir, 'weights_%d_%d_%.4f_%.4f.pth'%(epoch, batch_cnt, val_acc1, val_acc3)) + torch.cuda.synchronize() + torch.save(model.state_dict(), save_path) + print('saved model to %s' % (save_path), flush=True) + torch.cuda.empty_cache() + + # save only + elif step % savepoint == 0: + train_loss_recorder.update(rec_loss) + rec_loss = [] + save_path = os.path.join(save_dir, 'savepoint_weights-%d-%s.pth'%(step, dt())) + + checkpoint_list.append(save_path) + if len(checkpoint_list) == 6: + os.remove(checkpoint_list[0]) + del checkpoint_list[0] + torch.save(model.state_dict(), save_path) + torch.cuda.empty_cache() + + + log_file.close() + + + diff --git a/utils/train_util_DCL.py b/utils/train_util_DCL.py deleted file mode 100644 index 2a9c0a4..0000000 --- a/utils/train_util_DCL.py +++ /dev/null @@ -1,128 +0,0 @@ -#coding=utf8 -from __future__ import division -import torch -import os,time,datetime -from torch.autograd import Variable -import logging -import numpy as np -from math import ceil -from torch.nn import L1Loss -from torch import nn - -def dt(): - return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') - -def trainlog(logfilepath, head='%(message)s'): - logger = logging.getLogger('mylogger') - logging.basicConfig(filename=logfilepath, level=logging.INFO, format=head) - console = logging.StreamHandler() - console.setLevel(logging.INFO) - formatter = logging.Formatter(head) - console.setFormatter(formatter) - logging.getLogger('').addHandler(console) - -def train(cfg, - model, - epoch_num, - start_epoch, - optimizer, - criterion, - exp_lr_scheduler, - data_set, - data_loader, - save_dir, - print_inter=200, - val_inter=3500 - ): - - step = 0 - add_loss = L1Loss() - for epoch in range(start_epoch,epoch_num-1): - # train phase - exp_lr_scheduler.step(epoch) - model.train(True) # Set model to training mode - - for batch_cnt, data in enumerate(data_loader['train']): - - step+=1 - model.train(True) - inputs, labels, labels_swap, swap_law = data - inputs = Variable(inputs.cuda()) - labels = Variable(torch.from_numpy(np.array(labels)).cuda()) - labels_swap = Variable(torch.from_numpy(np.array(labels_swap)).cuda()) - swap_law = Variable(torch.from_numpy(np.array(swap_law)).float().cuda()) - - # zero the parameter gradients - optimizer.zero_grad() - - outputs = model(inputs) - if isinstance(outputs, list): - loss = criterion(outputs[0], labels) - loss += criterion(outputs[1], labels_swap) - loss += add_loss(outputs[2], swap_law) - loss.backward() - optimizer.step() - - if step % val_inter == 0: - logging.info('current lr:%s' % exp_lr_scheduler.get_lr()) - # val phase - model.train(False) # Set model to evaluate mode - - val_loss = 0 - val_corrects1 = 0 - val_corrects2 = 0 - val_corrects3 = 0 - val_size = ceil(len(data_set['val']) / data_loader['val'].batch_size) - - t0 = time.time() - - for batch_cnt_val, data_val in enumerate(data_loader['val']): - # print data - inputs, labels, labels_swap, swap_law = data_val - - inputs = Variable(inputs.cuda()) - labels = Variable(torch.from_numpy(np.array(labels)).long().cuda()) - labels_swap = Variable(torch.from_numpy(np.array(labels_swap)).long().cuda()) - # forward - if len(inputs)==1: - inputs = torch.cat((inputs,inputs)) - labels = torch.cat((labels,labels)) - labels_swap = torch.cat((labels_swap,labels_swap)) - outputs = model(inputs) - - if isinstance(outputs, list): - outputs1 = outputs[0] + outputs[1][:,0:cfg['numcls']] + outputs[1][:,cfg['numcls']:2*cfg['numcls']] - outputs2 = outputs[0] - outputs3 = outputs[1][:,0:cfg['numcls']] + outputs[1][:,cfg['numcls']:2*cfg['numcls']] - _, preds1 = torch.max(outputs1, 1) - _, preds2 = torch.max(outputs2, 1) - _, preds3 = torch.max(outputs3, 1) - - - batch_corrects1 = torch.sum((preds1 == labels)).data.item() - val_corrects1 += batch_corrects1 - batch_corrects2 = torch.sum((preds2 == labels)).data.item() - val_corrects2 += batch_corrects2 - batch_corrects3 = torch.sum((preds3 == labels)).data.item() - val_corrects3 += batch_corrects3 - - - # val_acc = 0.5 * val_corrects / len(data_set['val']) - val_acc1 = 0.5 * val_corrects1 / len(data_set['val']) - val_acc2 = 0.5 * val_corrects2 / len(data_set['val']) - val_acc3 = 0.5 * val_corrects3 / len(data_set['val']) - - t1 = time.time() - since = t1-t0 - logging.info('--'*30) - logging.info('current lr:%s' % exp_lr_scheduler.get_lr()) - logging.info('%s epoch[%d]-val-loss: %.4f ||val-acc@1: c&a: %.4f c: %.4f a: %.4f||time: %d' - % (dt(), epoch, val_loss, val_acc1, val_acc2, val_acc3, since)) - - # save model - save_path = os.path.join(save_dir, - 'weights-%d-%d-[%.4f].pth'%(epoch,batch_cnt,val_acc1)) - torch.save(model.state_dict(), save_path) - logging.info('saved model to %s' % (save_path)) - logging.info('--' * 30) - diff --git a/utils/utils.py b/utils/utils.py new file mode 100644 index 0000000..7552510 --- /dev/null +++ b/utils/utils.py @@ -0,0 +1,124 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import pdb + + +class LossRecord(object): + def __init__(self, batch_size): + self.rec_loss = 0 + self.count = 0 + self.batch_size = batch_size + + def update(self, loss): + if isinstance(loss, list): + avg_loss = sum(loss) + avg_loss /= (len(loss)*self.batch_size) + self.rec_loss += avg_loss + self.count += 1 + if isinstance(loss, float): + self.rec_loss += loss/self.batch_size + self.count += 1 + + def get_val(self, init=False): + pop_loss = self.rec_loss / self.count + if init: + self.rec_loss = 0 + self.count = 0 + return pop_loss + + +def weights_normal_init(model, dev=0.01): + if isinstance(model, list): + for m in model: + weights_normal_init(m, dev) + else: + for m in model.modules(): + if isinstance(m, nn.Conv2d): + m.weight.data.normal_(0.0, dev) + elif isinstance(m, nn.Linear): + m.weight.data.normal_(0.0, dev) + + +def clip_gradient(model, clip_norm): + """Computes a gradient clipping coefficient based on gradient norm.""" + totalnorm = 0 + for p in model.parameters(): + if p.requires_grad: + modulenorm = p.grad.data.norm() + totalnorm += modulenorm ** 2 + totalnorm = torch.sqrt(totalnorm).item() + norm = (clip_norm / max(totalnorm, clip_norm)) + for p in model.parameters(): + if p.requires_grad: + p.grad.mul_(norm) + + +def Linear(in_features, out_features, bias=True): + """Weight-normalized Linear layer (input: N x T x C)""" + m = nn.Linear(in_features, out_features, bias=bias) + m.weight.data.uniform_(-0.1, 0.1) + if bias: + m.bias.data.uniform_(-0.1, 0.1) + return m + + +class convolution(nn.Module): + def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True): + super(convolution, self).__init__() + + pad = (k - 1) // 2 + self.conv = nn.Conv2d(inp_dim, out_dim, (k, k), padding=(pad, pad), stride=(stride, stride), bias=not with_bn) + self.bn = nn.BatchNorm2d(out_dim) if with_bn else nn.Sequential() + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + conv = self.conv(x) + bn = self.bn(conv) + relu = self.relu(bn) + return relu + +class fully_connected(nn.Module): + def __init__(self, inp_dim, out_dim, with_bn=True): + super(fully_connected, self).__init__() + self.with_bn = with_bn + + self.linear = nn.Linear(inp_dim, out_dim) + if self.with_bn: + self.bn = nn.BatchNorm1d(out_dim) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + linear = self.linear(x) + bn = self.bn(linear) if self.with_bn else linear + relu = self.relu(bn) + return relu + +class residual(nn.Module): + def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True): + super(residual, self).__init__() + + self.conv1 = nn.Conv2d(inp_dim, out_dim, (3, 3), padding=(1, 1), stride=(stride, stride), bias=False) + self.bn1 = nn.BatchNorm2d(out_dim) + self.relu1 = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(out_dim, out_dim, (3, 3), padding=(1, 1), bias=False) + self.bn2 = nn.BatchNorm2d(out_dim) + + self.skip = nn.Sequential( + nn.Conv2d(inp_dim, out_dim, (1, 1), stride=(stride, stride), bias=False), + nn.BatchNorm2d(out_dim) + ) if stride != 1 or inp_dim != out_dim else nn.Sequential() + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + conv1 = self.conv1(x) + bn1 = self.bn1(conv1) + relu1 = self.relu1(bn1) + + conv2 = self.conv2(relu1) + bn2 = self.bn2(conv2) + + skip = self.skip(x) + return self.relu(bn2 + skip)