From 19f5118a36276ad7e8fe16c2e4deca8498e75a60 Mon Sep 17 00:00:00 2001 From: Isi <86603298+Isi-dev@users.noreply.github.com> Date: Mon, 5 Aug 2024 20:15:16 +0100 Subject: [PATCH] Delete utils directory --- utils/__init__.py | 0 utils/__pycache__/__init__.cpython-310.pyc | Bin 190 -> 0 bytes utils/__pycache__/__init__.cpython-39.pyc | Bin 134 -> 0 bytes utils/__pycache__/assign_cfg.cpython-310.pyc | Bin 1443 -> 0 bytes utils/__pycache__/assign_cfg.cpython-39.pyc | Bin 1661 -> 0 bytes utils/__pycache__/config.cpython-310.pyc | Bin 6934 -> 0 bytes utils/__pycache__/config.cpython-39.pyc | Bin 6798 -> 0 bytes utils/__pycache__/distributed.cpython-310.pyc | Bin 13026 -> 0 bytes utils/__pycache__/distributed.cpython-39.pyc | Bin 13630 -> 0 bytes utils/__pycache__/logging.cpython-310.pyc | Bin 2608 -> 0 bytes utils/__pycache__/logging.cpython-39.pyc | Bin 2522 -> 0 bytes utils/__pycache__/multi_port.cpython-310.pyc | Bin 663 -> 0 bytes utils/__pycache__/multi_port.cpython-39.pyc | Bin 605 -> 0 bytes utils/__pycache__/registry.cpython-310.pyc | Bin 5564 -> 0 bytes utils/__pycache__/registry.cpython-39.pyc | Bin 5584 -> 0 bytes .../registry_class.cpython-310.pyc | Bin 837 -> 0 bytes .../__pycache__/registry_class.cpython-39.pyc | Bin 781 -> 0 bytes utils/__pycache__/seed.cpython-310.pyc | Bin 498 -> 0 bytes utils/__pycache__/seed.cpython-39.pyc | Bin 442 -> 0 bytes utils/__pycache__/transforms.cpython-310.pyc | Bin 15264 -> 0 bytes utils/__pycache__/transforms.cpython-39.pyc | Bin 15998 -> 0 bytes utils/__pycache__/video_op.cpython-310.pyc | Bin 9287 -> 0 bytes utils/__pycache__/video_op.cpython-39.pyc | Bin 9445 -> 0 bytes utils/assign_cfg.py | 158 ---- utils/config.py | 488 ---------- utils/distributed.py | 863 ------------------ utils/logging.py | 183 ---- utils/mp4_to_gif.py | 34 - utils/multi_port.py | 20 - utils/optim/__init__.py | 7 - utils/optim/adafactor.py | 463 ---------- utils/optim/lr_scheduler.py | 119 --- utils/registry.py | 337 ------- utils/registry_class.py | 41 - utils/seed.py | 24 - utils/transforms.py | 709 -------------- utils/util.py | 35 - utils/video_op.py | 721 --------------- 38 files changed, 4202 deletions(-) delete mode 100644 utils/__init__.py delete mode 100644 utils/__pycache__/__init__.cpython-310.pyc delete mode 100644 utils/__pycache__/__init__.cpython-39.pyc delete mode 100644 utils/__pycache__/assign_cfg.cpython-310.pyc delete mode 100644 utils/__pycache__/assign_cfg.cpython-39.pyc delete mode 100644 utils/__pycache__/config.cpython-310.pyc delete mode 100644 utils/__pycache__/config.cpython-39.pyc delete mode 100644 utils/__pycache__/distributed.cpython-310.pyc delete mode 100644 utils/__pycache__/distributed.cpython-39.pyc delete mode 100644 utils/__pycache__/logging.cpython-310.pyc delete mode 100644 utils/__pycache__/logging.cpython-39.pyc delete mode 100644 utils/__pycache__/multi_port.cpython-310.pyc delete mode 100644 utils/__pycache__/multi_port.cpython-39.pyc delete mode 100644 utils/__pycache__/registry.cpython-310.pyc delete mode 100644 utils/__pycache__/registry.cpython-39.pyc delete mode 100644 utils/__pycache__/registry_class.cpython-310.pyc delete mode 100644 utils/__pycache__/registry_class.cpython-39.pyc delete mode 100644 utils/__pycache__/seed.cpython-310.pyc delete mode 100644 utils/__pycache__/seed.cpython-39.pyc delete mode 100644 utils/__pycache__/transforms.cpython-310.pyc delete mode 100644 utils/__pycache__/transforms.cpython-39.pyc delete mode 100644 utils/__pycache__/video_op.cpython-310.pyc delete mode 100644 utils/__pycache__/video_op.cpython-39.pyc delete mode 100644 utils/assign_cfg.py delete mode 100644 utils/config.py delete mode 100644 utils/distributed.py delete mode 100644 utils/logging.py delete mode 100644 utils/mp4_to_gif.py delete mode 100644 utils/multi_port.py delete mode 100644 utils/optim/__init__.py delete mode 100644 utils/optim/adafactor.py delete mode 100644 utils/optim/lr_scheduler.py delete mode 100644 utils/registry.py delete mode 100644 utils/registry_class.py delete mode 100644 utils/seed.py delete mode 100644 utils/transforms.py delete mode 100644 utils/util.py delete mode 100644 utils/video_op.py diff --git a/utils/__init__.py b/utils/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/utils/__pycache__/__init__.cpython-310.pyc b/utils/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 52a43aa6b08ead9932948ff62645c984647db9be..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 190 zcmd1j<>g`kf|lOtX(0MBh(HF6K#l_t7qb9~6oz01O-8?!3`HPe1o11;*(xTqIJKxa zCbKv*D?i3LKR2y1)HA+3GcP5-yg0rfzo;ZJDJK;s5tCe6T#}y~pO>GKS_~7656#PT v%*)J8EJ=+iEy>I&j){-Y%*!l^kJl@xyv1RYo1apelWGUDx|j(_urL4s^l&qM diff --git a/utils/__pycache__/__init__.cpython-39.pyc b/utils/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index e0d4a3ec81a75a432c5c101a28d991935add1188..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 134 zcmYe~<>g`kf|lOtX(0MBh(HF6K#l_t7qb9~6oz01O-8?!3`HPe1o2DT*(xTqIJKxa zCbKv*D?cVQFVitEGdHm$HKw#AGp9HvK0Y%qvm`!Vub}c4hfQvNN@-529mtT+K+FID DCIlWg diff --git a/utils/__pycache__/assign_cfg.cpython-310.pyc b/utils/__pycache__/assign_cfg.cpython-310.pyc deleted file mode 100644 index 60b8b2294e90a30101b6f27b4ed198265812eec3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1443 zcmb7EOK%e~5VpPE%_dDs%A=(~5Gto|Koky0s6wrZgpg1#RfPya(GqW9L-v8~q!eu~ zlxxM23y1XBzvL??&Ky99O3dtTL~=j^NB+j1nRx7(Z#J&gDg?%dqffnuWkSB-W_B>x zY{FC<0D=fQAgx@|7KJ???X{*muYhoW0SUG|n zu`y5iq;yOkkmsvyHnzu(c2h@}51G83+9??kkTkVd$)u7x$CQ#cjnqNS2CSaI8#v+Q z(4r)A*NNKN2M_!`g_*@($YS65&)7*3J4xAG>}L=gG!XmQY;3p>>#GW!)Hsi0Pxx}DS(R8Y!{fw{OgWK3EIZxaOT!PmaA5cV+zgjKU}e|q`T5_B z9cVvPrV^+iQre5UzTqP1YU2zO;c4Hvqk#zVpk~QfAS6(Q$g>!V_RIoWI)y8JZRqHP z$cte7+18=D-cU-2ia8OBdOBT4!fzomLq8 zb>(!iJiD))y5;(Jw=6G#`(ODeIas9)P~=%6Q?oQk>>{~E1XA=u4wMveue5`rC&IrCf=GyK%c{9Pv&6dUIITx@3*()+mWmUfgc5Zzs`9Vcl@S_-9wQY0?q00NFxg^F5<3+g4R5K&OEvQBZZov^!!C~_{O zM=EY0aY~N;1N||3s>F%EzyUF1yU?gYDx6u%Z}!VG>wWXat5iw^+QH%H_LCwZUvY4@ zF&J#XtJVPo5i}%?%+dyhF$-B_HLQp=7$w3I{D3sLu!RHG76svgbp)-GTJalrAlo%& zT;cn@R=2lrIAV+9Lf#5sU3fR)RaIEgBq1X@q`769CZxjz9kGuuhnBWFJh8_1gh0;` z?2rw4!pDUP*&;6PlRWlG!lq&$Lu{CV*vIE$|2P{PPRQD@#Br+K-ee z1uBS@_TrXrxCmO>IQ^dRv~S!&C?a%JD;NumL~`UDhN5j##+BWCD}8O~;E2dYSp0r> zbFI0fe5sn?y3=j$#KGe@h`?pDuY*uEJ*9%Z7{9*Se4GxD!Vh)mrJnYe;)l!9!U;YmS>Gb)up6Lr~yP`NswiP8H5VLEW#y(Du5{r zRGMG4f}F=m%m}%Ra2a6%;R?c41Wbtxu0hW%=WM6Bg^qp`ejC;=Lilyq*RlXk+f?EB z0<(F+ur%$kv4!$kA`>b z@)EKIGKLwD@zyyRf0&Z7RVUfiS2Dq}t}?vm=~u?_-v&ynjEoENaC-fC-wj*GUgT#~ zq%8cAfVrFilwc;b4RD~4NfcTYo?k+vausGF&J0u&bS~fvzK?=yoYl0&q-Kju_4P$v)yATsYID)# zIyY)o-Cner_PWMRZavVr<)t=_MTfHmtzd6qHLbX%qbx9)sx_9EtNwDCpo2dHoeTJa zk5G6T?TFn@aD$r<81}Qc{XoNhDej=Qc$#NW+kAv)QK$GxKE}s^>F{Uy1fN7J%@6P? zjAi&iehBplKg_35XZaC+6!j>7iXTHg#-HZLQIGRycpi0*>!ACTwC5XsrMi5xSSHT& zekn98eDnB%HVV#SaHGXUn(MJ~So0Z28EdW+LmhDvBk}f z4*XQi#MM}9I~|&x<`&8f`XjNnng+f-&<1`x8{4to(Ko0)iWWnQ))_;8ocavXBMB6x zQLZdYHY>I0>9RUhFn7zZUi7Q=Qs}u=zv7A6^-{fdvyhXvTl3bun$#PCG@GSxMW%RF z_@%lhQ>EL1f;g^Qsn$H#mF8-r>dWzD)dW~;l;w!qEQ!E#OJW(TsPcgCc^rFH!)mEk zjY?s);meF$Ym~T4d&?=e?ulj31+8vHH0ti{QsBu6x7FlCr2?sy9Kzt$awxOCLFyG! z(hR&>MVhpxI0V_qG@&NvnJBk}@ca-ng;x$6Vtp|OB8MPSElvAA)Wp+R;0t&D?9#2k z6Twn7sIE4a<{S0O`mL+(y{gX}_X4-s5Mk+d&Fc*;m0Ljw+WiLif*#P_VfIohtk#01 zgkQ7GbqQ7yPh58d+XY8a9A#5_mgShKXPAu^{n27$EE-Q#a<(_8%z)qfk%ig#lx7Mv zLT#0C#`RCx28*GKWJ{z~s|L3}HJPShuqU1lO`hpPU{QROtf>#lDagbCWD>ST>sx#( z!D`zb(zS!d=*&g2tb{M}m4?VCCv6GXd_G+9(B~d87`|WxEGmm?lQbc+W@bo9l>kV$ zqT_Ev_wr$*JBv)QC6w0WX}n-a6Z1o9u6VU3bkwVqTD345otaVmtb5@~gGaAi9a6Y_ z0{(iw>X&OR%+KGPe>)E{gB7R(S1Zp1>p|$%qp7gmJUcfBbuQIb8bNsW75oZ@cowV` zr%*^UTyJ_&YGwvUYAwqQBriNS6fN&cG=04lHd|p{4PiU*0>*}uf^fT3rj}YnEf*b~ zndxfOOc0iQUaB>GZ{~KZTI11ouli7Lm_U%PRunfBJYNm+rTh+dK7kU?-%XTLnvG@{ zL{rzn_Z>kfB7&iFSU%Ym?8iZ^WGBZU#a120@iy79kl7OFWX%dD#_9A%5DJ;Tarv!_ zSBsY}-;fzKQ<;207t@4dCyJm>0)m4P~;2-j6&ueabcv{NT0WE!idQeP_df*EG0(So@B)q2D!QigCur`VrD+ zvna^nf5GOqw((PED45CN(&P%=S`}=dz$#gD*E!+c6biY>*MJc?C`{(SlCvzD*dZCq z5YCe+hTVd`kj45Y(VgBfLh|>6&yASP1ySESk?6K;q_(>+pmyW%1LV_+VgQ1SS;Xb{EXJ;cX;EaZqvViw*8 zzGzeb?7#*KtyMc_?NmoY%i-pyh-dF2ZkhOR8t_qw#V=~XbN3L<=>Hx*f`v&WO^2Bd zYmab5S8gBCcL$2!aR1)M7#E4ecW+C1-c^;001ou0sHmq@JrrY_v^?R!5*P zAX;-!w7vtuo*rbsKOfqp3T-F?1Zo{oyL5XH?;TxSqkY@y-FQ5O6pB&GrRX9G4(?3> zjSs7sqN6%U2X|t#o#|+6Onk!A1MSRCTP0%*i5yN9n+qE6jcMY~F~TX7aRgr$H2*&7 zw-Ht;?Mu?N?x~P04~|Z2{kOVl`)nQb0x%$iB2%eF`xzIl~?l(%4s`aCJNeg(~SVHq1WH ziN;O9(?c}!1hOxU3O^a`Q;QY$rIyCN617OLyZi)ibKrAd3kta;V2HD@7x6tR$We%I zQbEaDq|eSRrrp;rE?jnR+$t`PB+a+3U%LF}qNO@pgeNGI1M=)mBub-A;UNgGc`79? zOo$VN@hTM;s31ooE>b~ZMtL1+!|#{xaX}$gri9n5mCBwp?|AEhJia^AE;s6RWF6gX zT3Y2s%MZo3X|V%TOwl5T_nB32lxs?YN?P3u=(Zp((@b(!$P9$H=Bh;(M=2vGpQ+jDD-|LhW>SL37uGPYaI;%__-xmKCIb9 z_+s!CnmB=gY=sCv;u!P-@YsYcTOFNgNGmMrBTSnJh~^p#o{J6f!VZ*zcR?>olT@Q% zNvnz+G!WOYrOe>&;ZvqZ5~Qn+5=g;L_)aWbOc7gs@1CIOE!iE()^@X%g!mc0fI?Fa z$IpN%89l2zO#cw9c&Zl*_PB-(#RAHR`WKBDosj%A+r(u9-Y%`ea}S~)LaK$xaQqDf z<~DrvD(hi)kjNl%0D?=3#`TYlBN~!1>YM$(86t+n=B7@m8aQsjx8d3wTj1nNv{S(< zD#H?#?tO9U`Wj+$m3w*0%km#=KWYSqSO1Z zci8rZ7)=4U&*%p@V2^d2fvq~&dhm&CRj)O>*veF2v6b@3&)NG2du(mvc8DO8!Y(NY z)4s!k*RX!7ttXHUL=1tYIE5&^ZIRLk3&3z7SOrZmy7NgH>VT6GbjRUnlUcRyEIXOu z$K5$;%=zU$bAB>B2Uu)ynJmrsC}Y9=Hd*Q+wknd~j?4-&XjqJ{e!l z!^vo#GPq1p*;-j zf8C?IF>uHvql3kUKciCQk{{+_WXe#tCN02b4e1Q_chJXK(^P)hc*o zi9MIJO_(M<1k!`!z|)^q99oiGB>9sCTJjAP&@W63SxXb+G17E|CfwHWVq{e4j+KlW z7(EfI4JkS07IgJQi13CjfgLl##Vg|#+)-2}LN9NEUa}I!aM6l%Z>C_0(*(Rp1)WUH zppaGtp*O^f0ZB-jUd^lT5{|U+VAm27;{jc!vJlBJL@kF!CwmO--b^Vt&Yn|GHo^H> zHSMuGDsBA|twqVkk8w+VKLZa4_I-l0;C8LQgFEJ3_|64QybKgY4PI^Z*>8wj)PD!X zLne4{Owd9yeQw$Fz57jZ{x9eTw2S`XF)cdtL=yXxa6ea!xj}2;QIJl8_fJu3sY-FGZ%EnFJsyD@*y|#8; zWV6C`|K8hr=wUP&+kb0%d!mO?vK93>O3G^_E)A)+ji)obdeQ6RYJdl7i|7cFSCc)c z5p^v0j~*f5`AC=NH0?1W$duLjNsB|AJyvH|T2|1dS#HpSF!#JBwe~!HnnsaIH`o8! z+cm?YTyNLjV8OUqEJB2osukFk!UU-lW#v$*jO*4LyrrJ+v#xu$RjMT;;u&D8$GIf+ zr^g_1g^KC zC+mzmbIyokI7Ze`uAz`l-cByif=wy}6<IE%fz1{##W5&I6>GyVk6p8j~< z{rbJv@2fmL?HhPD?)ND$%VV}!J zA#)#l8pR5g{~i2GJdgh@ zH-YCN<(_JV)!OovVuk26d>mBhJB2UWK*3o88g-bQ<7Q$l7$NAfU?fIhBuuU*MvwKW zWyV&|=626XjJjEO`>anCK`Y1r#-rXoTeHXNChFP5PRzc!Ms*)Gh8pU@6xumzV+{k? zx_NkDWkh^zE$S6a5-Pm2gp}ttC@dyZRtv3y5t58u__vBDrlEwB!aSBj+CuS zx6xW&7E;-@u-Z~?wI!S7Sb11BsMZ=n?Fxn9K`pGswQ{4@EyuN1sQjRT&4SzIW<$*f zO(B;>pl4vu;O%lG)Xt#O=H*xfghslbUX#}uFSTzgFVKhw!4$EL zW*clDvzgE4nagte^-=5YNSVGggs(7y9XuxfEoWK_|rQJ zrR=3M7f!rZJbmVp@;7f?!IX1!1~+A|7U{HDp!1QO?ZFqlgu;NRnxDBA%Jluzbw96e zf9g8jxuH03?pq-#9zLYm=+hGY6e^rF3_ERHpIx({X)JCo7~e3~%$r_97QuSo+)I*a z7bPjdPuP+BpW)GX7wq&v**3%~Xw*nFsH#|d%i*9kG)%o!3%`zKA`b+Tt%x>dzZs0}i0bV5z!KcYey%MV~UU?gHFi5|$@ zXY#GsCS$s8{^HOYi=DcguwJHbpa!FQ2S(scD0>_4y2Xr}A3SSB=btf{e!qt@yb{}b zwQTJ7Sq}!d&$zqS;F$$5?*CggoW-iY8(%dcW9Svnc6Tiu8R`R4ELsOBChsxjlD!d9 zK8>T@{}05WJNF=HAt!SfS7s}cU!{sGZb3v~d|GW0Dw|Y_+(iSn6lJdb%jK#_7gip6 z8d{W~$4NB>fpVgDqZWgu`Vz6KBr0#=n%c2xuC!S?0hoyFiUD1p>irkYgX}rh-96;w z$iC?bTFNW5^zb1@j+l86Bhd0-ONT`>=ZyCtgq+khZT zN_&EpAA#uS#)%&OIEIJShhM;LFwxnocNw(6Qg~RuqJ4YWt#CZkhuD)?X}k!7!*n4Y z#DK;pMM^XOuj6=dFR^=m-&kdLncU6cjf}PY@fQ4%DO@)jzKWgLmkmA~H8Ar}iOqco z;uOBz4HG@@r9C~^LijZBf_l@5IXEAAMH)UG-D8l>8T4p+Bo5bo3kK%8;ZuXLEYGAg z(dCG8ob}0ra+73q= z`$9O@HIC#f+_Bc|5%rC`5PD#goxt(Kv}ao!HT)KeVufw7rLixB?IuM*-Pv(jD9onT zLOzGPBcG>&RD~>1LH_6zxX2B9whY+>X?N zt=?;;)ohkSKJZYLQ)zX=ST53H(^SmRA`9EhDtKBkr3NLdbrv~k@>?{M^hwDF{;Ppr zv@}g#E@@S*-Q;5gL{4g2Xn%mVJdZDW3xxqY>A?h%@;ZR}JUe6_Hgn82XV@It$4w7@ z!k+)yHq6wZ?}hO`z-)yhCwSBLj=cJ_6(aiZA+KZ<{aix>x7O$zFuTQTxcRurHjEY` zo>fDhgc){X7#{f&Bm(`3jeG3$O&G7Mi9>A|XAfr3US&}+v81=zOD4Nu7E3k>1p))7 z29Ge3XRxL6;qHe~Tn>qM6gX&B6x@{JM7&al2pWoa3*s z*_`Qvj6Ul=KC}l9327$m0eKXom>@wTCSA?e5f?y9&yo2Y^_aWiU%@&YI|qZ@gPyLl z;aDHe3#rJzufO)S{TLZ8f|E-81QtZuDc$ZX1R zYF;(@;XXAYW#Q5&cuI|}bUlmdKgSo%qaZ=G%{?p!=XDmMILCam`{XvXoxE@pR3?i% zqVg|sx*^>-15HiP^zb8TssL$tPPM85}w2d?Vg6k;%+B;h0Ull%|Hp?3Mu66YHF5{D zvf5Ife)zn*ESj5~2{<;semZZs58ywZgb#l^IvrW$kGI{YJ2~%j_xZyE@USJD=N{sf zuCqOU1MHAZf=xD|SI7$*P!IBcX_K}XKRh`ONKNfAIHIO0OAm+B@SxrpybGRPdNj12 zg|pEtef#{Pd>w+Sub{k0HM(5dYTehS>)G#!+XzY92)l7JTXHGyUnt+IH9O6;_h|lk zKmV$Thpl(lx{noG;i7hx%VC_yrPH_eKA8|`yzkQbc`Ed+&596lr$h!sn>ej5$1Tz3 zEtZ!tNm(_1OI|^(l*P({cB!}C=Wk>9H~6BfC=52b%NFi3jEIYyy<3+#m?HBt-h)Ms5JPJy{Y#2X-O8ALUWt4*1 z@KPgm+E~Y>qU0IHSYzojr#P`BX-m>BM3bn90!Xm^iE zatu;D$#qfLW6L5Gx7zafhiLvApV58lk;siEAUM|~IY;Xt_@gr*_;b@E4Wd6yRHTS& zm2^|e%t`Gu?L_U*_#)!2!Mw~q*4_1xohRr(o+f>#@ac&Zt`AOJVA`X@87b`C$_o$f zrOt5vjiwSCjm?bWaO{D-h~|2)8B{pyf$ro%^-~Y1YrVVXLjv^uwt1)7mKDG~uyqi? zot2;s2DffK9NT_tiis(iNjF6&G%4!0QgsGj+pUEhDK1L+)gxqm9l|3@h4~@aNP*@4 zF^0(0faWyq0VFH{>hsePW1KzFXV(%|FqK_tQ396>(Wcr%PoJex_|om${~X(OcDUf BB@qAs diff --git a/utils/__pycache__/distributed.cpython-310.pyc b/utils/__pycache__/distributed.cpython-310.pyc deleted file mode 100644 index 1148ab6af6ccd6b4a4626b6b03392d5c454a36e2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 13026 zcmbtaTaaAGSw6R!IWs$Z(MnqDW@}_yw#Tt$S+*QYactdUCyugCEGa1_a)#0L+12dM zO?}Qt*81#ARv|K=K%hu~qF^=^C{%$!0pU{I67E%bp(vn=!wW^IqWp?pD2lTAzQ1R7 zW>!0~5_aqKboc4gxBvG2e|KwgGH2oM#;va|{L}|6>w7E=|9L2!z!Q3oWhqP9b*p54 z?S|u6w$*S;o^5JUjdUr~%ljo?T4zgH`OTFk@axp`jmgrav~lZGjp@>~Z7o{L`y~;=-LD?FVXxVx{ej;t zpk}`++^|qSpbn@9G3tZrA@wkRAHtXi)g$=!Vf6Q?dJMUP>XWLd9!Ktxp;14eooVYdmMGosArM;0P2pa=a73seNa7*k)Bk?)N%YCQXf(; z;Pf=&#T-oQW;`w*TAM-^!nRrdBKnLnf zts0@|zuak7qgtz3+?Q;xg=JL>qjIfTi)xj6Z6#32)M5~oueEetl|xh{KFhk&Tu!ph z;97Z6w>s@)rwYPIx2~7lx>XIruveYARMEN?=p=hdw<@Yy38N%esn^RGzf%p8bpLm; z5?w|!dDqKNn0J^^HA;4J(8YS|Ql-wHRlZ#& zzE;EPu7%}xOGoUa|Keh`6Gm8evjyVzE6W#}wUfou zP1W-RC(aej*ix?A$oK!4ckys_cXl9&tTjvfk==D-YaP5we!cC?zr%(_S31xKkk=_B zx9ub)EH_w~KG=Wx6f$=>?hacTj~!P(f<{GWUf(rFntc_SY;2tApD&KE{sT-{8tI** za_Jw(6YfJItI9|A5>~Zf*R0t7y8R`t4)oInG#FW0h80edqFjYZdih#KFNT|^z(z;$ zgd9C>&)S}?SvQ_}=I~Y~lI8ZUL-BL?t*79l?a+v_eA!vPIF6?`Y~J?X)2ti6{zvdO zvSR39WiNYr+vZ&un#QAOik%>74Uw~~aP;G>c#_E`I`I9Ac;DHydYkHnAN$3ohv54$5Q1!&=b*=Z4Jkgke!k4>aP$HKoMpE_O7JC!9i(wtf7k?FVfT$JTsz zeuO$C*9LWR(2!sr(#4BCq05A!%S|y#;r3Sn5ZCO_+p(>m6dJk86B>DjMyn~(Cr*Jv zLjnitq|R9pYM0XJI~`>}KWEKZ&7fajrPl4YttcDix)X7F9pG-&S84QX#Tn9lH5=vQ zEPivbALo``eFZooKt7Ois#%{^$dn_@ku+y%pHSU^1U+nI6#Ts48ibr~) zu47NK7;cd)X6mB5YOgx0ZVa^$J6~{iTU{uF4RJ1@ArP;3wH921mNl!161$n$ciCF5 z=_(#s)5BJAn-R}R;Hv>e^+~iavP#KD6_?=~uL z8UVc#=vJtYNhJ_!;;z)%#k@#;Dc@<<7Frt66UciY{+ifHpJDPkzst!GBKnCZAU(^} zRQMRlAoonWqExN9xLG}L?w`RE9ztT2!;I}Yb2P4Y&duSOvZtsv@H=bIxpT-tjlAbU zNppOUXm&)05Fe00qT&A@nb=YGRX~VWtdHC|1j=lS7JNzYRnBG^}p|A2c315sRn1m)suQ?nQGs(mm(F72PH@JE?>@>Ev zF=SKPn2<16STwzgb;kB?Ye>?B9DV6>mxJq&om{Qi?nL7I7-}6v526BojQvs2c33O> zhg_tsw17yWra8Xt=z#^{tTCRT*~gI|5iP?>& zXrheKB+5#>=sM($EL`+;h3~kmBrDrP-_{^Wk(veh0Fw+$d9!_LXq$hOAIn}(Be#Ml zd=N?Ap;%BD@^;>tg)r=baQH~`6blLh+gI^y{L!`GW^&ZM|TxpoasfNhata_v*we zFnf^t%xkZoJ)>Lvl8SPOn`Th_X*?nK(08~JJdj5etE5H$kHSpICMzKwxe|&L7{ZA4 zgow4%by4PewksFV%tH$|^13Pdb%1veIfzsW9>LKl)Age)YGGEpIhF2CMENzV-(oMd zHuN@x*IAnEPQ~mg9XZ`;f`O%(*j1TZeL?i+bsck(eM#JEyOVg;j;hcgome^kI&f0q zt<^%LuS4_ECc|((X~0cG|EUXq@IaO#a6jUXu6k5%K3a-|w!CR}a6ls%(^5+)o zD@>@Ubq7h3Zr7^IRQIaWXorbg51L7)-dbG5KuH$lIEy^u4Su=YtTcjhImy7SYw~TH zO1%;Z3nvq&u1CQ+V9d&5kW4ISxZIaI3kwW~@XdMSdN@(5=)>p7ER7*(4<^!Up61~A zECaWloee=#AwBsz*?};IHnE*@)G8aS@(Xw)pdbepn6qyg|a~!GKRC93a5Y$;dnsjJ4?{()a8rJ3+@BEyQ{9~bp{cAU_c;A z(d{1k@`3zm%I#Y&9L5a8jIq~ynqo70)kvm3oDus!=CANV2M0DyTvDXry3U&nIk2$+stI@8|&zzV3A`)N~ zfd?sp3W!lPqvxI#9vll4e+!@8p15@PZ{emY#`hIK^P+m~hE?+XgU#c&#}w#VmOg7CJUxWaL~N<5(aHbZ%-X9ZPEp z0b}*hz7Q_N0MO!%UFae8wtds?`s-FVizkPuIUF(~=2d8EYGb%l(6!XTSB}NT8f^`7 z`iy;NGh;){3}f41Y^%@M@R-TdxBEOTPPP6LssYoIp({p`UxZ}j}OOq z2V%PwJByt65QmsIO=Tv;F}x${U08`ALWf8a@*9G-nW%LMS{PuY(r&}vYI+4ps*Z4W z2p0Q1+Y5^cN1frzES0oz#D($K*hV<)=a>_CLFeGxc)|h_03xys^e$N_?>b~B*8|Yl zWukm+J%X=|X!qFYAMv*5qq{JTjFwFFSjiDqLdXZKtqEFECs9$CQ5;&kTVR? zi?ozsh7n)c#9DpK?JQ;Rjl^2OTsTAmb7dmGkC27Ad||F0V|6ElxqM+R_|x5dM3WBV zFU)m@Xc}4?y+SKuHvL`&0DXU!(_hBQ_0KT*StjB(%N}poJHgEV!I#1`j|?--_>hdg zgfEKICM-!jWHb^5cQflSej?CQxJG}4#ji5?c_w5QP3v0B8CE&UiVc;dPqDbcIrB+ILVhnwM!3c|d;@ipx5+uPL!2|~AjHG(#)bz)pMiVcKRigA>8>1^f9?E7 zjsqywuNT5jn}-Hr;q)gj^rIe#d^O;;!jlE=K9noqPl$*U3$vXfYqW$wu9Ti6-#aQX z5_1GqW`mw&P6#=mCcpbHJmN{@T?sF;ZtU{(v+U(jBqQU~AuFbmbgL7=&+5Bd5<7c_ z9|(oEngBoiZ?<@j2}>h#>7jzp$MM`@6C;<9Nl)*NO~j0tNFrw7dY8(5I4)+Q^F_B8 zXL^cpCLT%MV}uD%LCoeSF|^^?oybAiiYgs2-&RXw%m2l5$48R4XXyUFt64jCiTaoE zc672lIQyX|JBRM{VJ4gC&KEF6mhSQ;OZPaZ!`whQ)cc_)bsvZP6(*Y|#TV&~NwM?; zP3r%(Q}oQY+$p}8*q9VcBVdlWgU@L^;R8qrLA&tc;9&t_fEg_WVFkYL2I2#4>uk{d=i5#howP?yBJAY>FBM4@n2TFTNaGj~^bZ8sXtV3tOXfu?|!ST{1) zzA$kDA$>gKLF*~};xJ2oVbH|jnIMul7zCzb=p1n^1Q|uC4c7v02;GVo*c(xjweG;r z%)s4%|E*HD93mpDJpDcg2U|rj;g{QQ8a5FtPsFm1#5jS>%Ko8a<1JwdT3QAI=Zx?^ z$cl%Uh*XRHImx16m1E3(8cC8H#3VtUUuC^m7E&1qMm)$K4ZhBooLn+7G@biM3mUq8 zaQkRl7~RMNd}w0S5vp*KqzL<#h~f96N{pSvdjJj^dzOqYHbD!$X?+vuQp*HKC9OAA z6RtVHsZvF8s`OIzZ4^eP$mx-JWQs2%b1zc_GTLCA*jfU^5LDkn%nM=Uw6H|)ENKu{ z0{}dL?l-T4bV^dy%bn)3{x#$w6{9rSq)C!=Ic(Q!(W6{i4j&Atpud4)70<{Y7kC2C z9a^`*D#2qYL553ifRHMV$l#KdQpkg!C7(tQkKH|kV7?uw=Sj>g-VtOarfcWGZI>d{*(!NZO6;^ihqW$oC<5H_$ zt3Hio-(h#Yvobd_t3&-JV@KED#CN*Dgg!6EbgR*u`gd5diHR}@x*!0fj$%0dduXt- zb9AuY`+FD*D|u$TB=iL z$S7YfU+Glp{Qy$AtXfr8BN!Uhs*NDJ+*0~?@s<7qCV$Liux`mw*!6EBImCUYkoy*5 zI}Th}#P%J;^yTeXCey!xPlg!gb-BBs`p`-0m-n9lwJMXOa`--33E*+?%Fal z_7B7sf630jwZ#bBurnJY^m(B-!Z=<)d&Bh}GBKjHhWBLhf3(FQB*;g(@8oZ9z~8-L zHfXQ^Wc1?!<^BRMKFn!;hb>qdOR4aE)xkhD$hAGF38)3A13#IdESxh}q*nHAQU*PQ ze$Rx}f5^FxBkOBijR@e5p+SEjqDuc;5tVFnuHXMXNU~@RJ&tmp>G!lUGSdyJLH|=? z^`A2NGbVq|%ZcZ3QU9x$C2@Koc>><*~+&5%*NAjT<2TS zkb-$>qw|@K&d13lMe8)DJFo>J^y8?yz`lk51{3e^&HQ*l{96q1S$_H*j>XbDwACY+ zpRP8s38xdzqj7960#bLab?a;P>lO~E-Bk~9YevdF`hSS{^6b(>d?pj6km7a=U?~nf z6hJA?F0%lu{$9YRHQPi`@Kr8Cd?(KChM6}Q1;IO4-sUh?o>B32XBBOhklz>uaTj13 z?>>BmAmD}7NsKgwesbOEb%5a2sd#E>29UwMWg{Lmy*d-4Jz`19wwf=-6WwhH6wWFq zp6PDq(N1@V!mW;VUfOd3pm)X-YlL`twv*pEwnY%pyv>`pI2G_#x5X3jw%usUw0j>; zaJ_X0hd#M@8(=vu1IIWL!b~!V;ODNKMg2@Xz2H($;RdU|iOn5gFo`bckD%@yGD6Ww z?6#)=UECv~l*H#X)mAg;9kP4%7LLFP(F9Z{I5BK093M6$Zf$%UuNu_q0<8_wzQEQe zkt9x|p+CXWd6pz1Pit)SW*O(}0!AcG3tls5V-E9{qX z1AqrXyl7jTO;QVJ8zy-K9?R7_P9Jbr6D?3eEFD>USb3ua34Uda>kxU>BAq)s_FrOKjD+#&+^W{rFJ#>2Q#lw~h5ZWioCV4RNeF&Pav zjrgU@FXasmD9xL@`@8ZIdN8fUr9^wIg^|#KV=0cFu<>~ulaOJP%yJO4acc(GKj_;f zZt!N?aNFll6fQE+LE*>mvM$AhoT>i-30Me$>8Lo7*q_4J?NI**+6%*!K0kZdz8hwdLI6GH{@%#y5p)oRs46N~){$CLQQM74!;J-PZGnyUv>ro&1nYSDIT za%++)i^iEmKdYwg56ci|I;UU@c|WI~@k$uxO)9i8*O zK$5NNPPtmC;#xRy`A}?`5q}BX>g~+!V6u}5V_%xFCe7H61Wm--GD!LxtRvPJpeVRP zELD=i=|%mz-osaWne1b7KNE_mp`DofL2?i;68{C`i@ZqUg$jv}^I*C5ARz)?8SwYx zmJt5^RL;wJv-rnvHvJIJHFhGjIR?_OWw#PTwTecKCD%f4yZ`&&t?JZN&cg5N_3tcy^W&EFuPluJJQPmi2|dTMl%?#3RW@(C z={S~cHQlmjo0?QJUCs>he%Y7a*>YCixpE$Fr%`B5m8YbS+n8?7lxJ*f#Zul6?DDKi zsq|H=yhpvLGRnVdm*>=HRaWJY+pCsTUKNnrr>4|2dhJ(d)Qp-%&AfU^?NM{c9SAb3 z2Uia%d)ZNY)jrf59;?}}=23G*y{rzXgUBtYSJYv31i4!RtAFdJrOv8b)U8+T4ZD0> z;P;EDxlI+XS}5PHZdZ3;);rXl>Mp$RROi&)>L|XwOTDV@QTHNuxB8qasr!&SIyURa z)cvTrN4=&VP>aajt3IzDRL77jsV}I9)N$nQQ)TtAdIY(TsV}NW)nmxruRg9G$4n2X zC)5eN7u6@!lXyS)yrn*=p1NwOr-GRcw|orcr`1W6PfGbAl%G-0qWr9skE47_oksbz zlphX`tv*tIGjrw|^lIfKos=U$G4OIzI zk@zg@YHKaYwt_b*E4tn7B>PnmM!J2a($Vc&5Qc;5%!R7f^*|@t3%XrZwQ3k8xoV?P z!TjA?kfew2m1=Ym-K1@h&zm*?REv`RoOGqpzEExO)gFvLzY??pZ2+xs+`+eVA<=F{A3s%j0v6Dh7)w zNRQ#@St-@poh!*~*uf&}HLPK+8B|+g;)nIt+QqhR)rg((ANbZfg(sXv5?OtF(`rxS z?QB|StsC}I$xS?>Dk%VG)yvg-qk5qcB-1F3A@7Ez@51+z{P*Ii6X#zI109~PhxOI= z`Bz)@XIk}UH44smqk1De&+&BqLN^N3V&{sc5V@Zq5Ejdpa?NIb@MGS^BXI6RCbBjx zJsa75C$=_0b)>u-&eA*VSaM|meLM0xh2(~vq=a}zAnLn^jSnI-oR^(H_`D@&N#8VY zn&aQgWNTZ@oNPFbXE-Oz6LWG8xFMfmrv)Uk)`O9~inT7=bt|@iX#Wb=4E*RKI!r7$ z!wRRIBHx5bdhLy>UJ17Y!A_6j2|2rEr|mggvu=0dm_w@oGR;ljgkax!dLRA9W|PIs zUbEo@o{($fl$tfWulx?QMpg{zs_ZpS-@1MM0|1Vq8`e+E8H8t9;ph@8?qjlT|JnW& zo&g*MTeGG{>KgtCAs@j2CjcDI0FFBh9I+ic%Pw%?j^W}aSg1O0cu6`6T47r!Zm4U! z1IEtJ;u!(PnvVvsJVq{gnmm^{?auB%vEy@iMnJKqCOAZfKW;rA*)38$cFP^?)?K6B zirr-o`{ljYeiiU_DP_QN6-2}$Ds~5s-Cw{n0**BkJ4T`=aD+N6BOwj$n_!Zm9J3>AAZODDboHdBsrulkV_r#9@I~+~uQ)VGuRCvB0$KL`L8fw?DVmK_aW>9u zxH2PX(unp_-q6$fqwP+(I5IhkI=Iw@)esh8$P^7Wic!0GAt=_`t*E}zZFj?aOCOpH z^)cW@Kg>ihD>xSP!v43u#QMjPtlv5UX(wuX=Ux160)hlcVhs-?5nh-hFNh+yx&LGQ z0K>zrx1z*pbrKKOo`P6^1fL~#*AU($WOXzNWTK3r^IcVM-UPt=Wybg31wEW=Vtfy^ z9A6el;ADXd=iQ#&b9!!UE0E^boI_S0f@6#Ii|7dJ*t=X0-hfOtD~uAmmDo4wZDQau z9$9B;k74C0SYjj0>nGSQwhy@wBBk4bxt4-~*$MO>R%JjuQlcKzPcu2mdbe49)0o=p zfo_NTA*qBdo4D)sPN^UgqFm^<>dS2nBNaCKNL1)gaGWQZP$rjiGKGkV#1nHq%hXi( zn911qnNC%ydTV97C?V{x;RzR!SOY;a>v;A&{yaBl=kQG9&v$*Kb1v$UOTX(oTGs8+ z?wyb~v^R(z(FOm2OzbH8vK1Cyu)Yc!jLH3!Jq~n0@fxrgK|)8675cHrh5Ue=>7`l) z%96gfN$HRp$)PwU+?$TltB^2sRj)gcHvVchPD7idfc(t&ouR&n{C+M5y{i=F`Yzz` z<(-bbTc~CHRY;@3SJ|vc`B%&@tpUcP+RAe#+Hboo*yvlEK#97(tUHzs`&Zh1uUbQvDXH;ZHwbEis zyyyxz?F_OdUw@u$TvC#it)c&G>{8*E*X08M7?ulW>(tl=lL3rook4CLPe`-aatgu_ zWP<|ob6|jjU<)59TF3(IKVi(%LE%Q8!V^-)7*DSUb3~Y6ADIAo692uCwF>h`dDmeS zArG}WP)VsJ*drd@;>2s!Y7HG=B(0EuKH!4a*5Kg6&Y<{7^7Nz!b>dyfK9p0hoq6TO z(|Va-Qb>-`5D5TcxSW6X zN)()hEm~a(lKiqpMB+ksc{$MK99zyAcgBg@B_FW?Kxx##$_NnCJxOzNe3l8#U}Gk% zpq>&W*@tk4HZh<|)UFtd=6h%n@7JEAMgaBZ6xmF`zA)yJEzJdgnikJr}F%=Na zYDJGdB2>7m>G-GkEF|Zq9p|T>U3ho>z6!KIao#|X0%5%Y?srlgH^EL#zD*+#_Sj|s zYtoK%G!31^t4Bd|pwskAsM#)lsLFoAdaAOKrYcPtX5);8+3rWKoN9HOK|{x^3wKg0mVwOBI3LT(tFuo4`an+B-Jb}bbt)^q?O$J~@AF>yWg*X9v zynYNLq~5l#*?oW0>Syuf5L<`iMq9ZD8BJLXw+oV%68Msip^r+bUEJ+mi5e_sh}s5G zTSKCTUrlHliMn2#;8IC!!W!AJ`tElS z8GIv=84wwcoIqrm$RAqCLS()W*?_?Mc_A`ihz$OBzYtNe!vqi_yGDx|k{Y8zG6I_6 zs9UDc&i3^OBtQL&*d+ZGCJ!?aOl;{!v9A9O^^+uabezO?5BTUW;LFmC369c+GIEUg z*-mJT+>B^UUuE%YOuo*9q@}-sq?9v+^&l&@1es=BS8vgozQ*FN6gG#}f5a2shh*s4 zF-+u>%HYG%D6I8 z1yQvo{9O3Hu-6xJMf?QaI1Mq|DV#~&32T?URxS*VVGMt5c``f3p!wZ@;0Zk>7EBq< znxDhspb_;^3^f5I9lcU2Nw>QZ{I{VSC-J*QG#NY+8Qb+CKm0E|2298T&6){*=K}bA zhTW*I48McdJ$*bjkvrO361f96xl|tG@wovvxr12OgN$|YVC;QHxnPEfTK+VqHq^Zz zIS6wR)gv<9X?g7VKX?X*AO(BY)~x&J7sw;O552&7fT%yl2T8($-pJRk9#9q_-`Ib^i|w`k63vkZaiqLu|-zTM+9ej9Xe_d`{sB zZ$(0byzeM_x3IauJ{H2);>;WZzn9#l2D|g;7^@Xb!Jr<_yyY|*@jkp^hl;v}XrSOm zOb%zvCVFQVYD(D5gaY!g{sCH5PCZs0r77cY# z`HnbRVk<_eEk_GZ8J(;rIU4OoYtw<>oq_X(n1o7QcW5JG<>_54KuQD()c@Qw@DOu!36k6> zZVRM+i}j*Q85=5Rj3+$6%s1Ic;FA0pI0uL!s&>m>fNMb{J$dhFCIs#e{aYuP=`wwE#FNk_Mofz}D>q)kjYQD#+>Ake>jN;E|*R z#OIKCKOn+#+#;vgS_K)=`M8eAAOh`aA(X+%+?Y$3*%ePos&=v4TGQXg^x&0AoNN=h zNxBkt8ujQNE{&$|2n+hxQL3?O{Ko~J#3K>7z$(MDDuc7jZh){eJSI4<=KW zPpgdNalizc&n8o+aV_MTM&o(8$g+MLi@0TjlONv87z-vDvt`2DqtnkvHki0D-rnFI z0bC(c%?;chSkcwaMSAtH7`89k53e*Yv>Wx>LtHk8vz+z$38)r_T|U6<`a3c=0%p;h z`g_Q4pErXL6~a~O(XnawPfk1N@d0MVnu#fRc;CmXoX5gd#aK$)=l2=QBmhs00kCb| z!!AoVeBHr-Nv=|9wpF*mU|XS5xzw#ThM~7gMYU_JMkqO|*P20ev90uPa^=6xq|ao; zP?8hr^c!ecBn*@$zU?^?jHJcbSaVD@)~WoZMl% z4HO@W%>IDW{MHV0annza%{8R0!Cbr17QQh2M9CE+Vy=tGn1%fAR&Ip7KI%R_HqDTN z2GeX&klvj9cr?O~(C|r)umk?t_A}srB!c9!CBzZL55$h26D-;QM@F z)a&2j8h1ndnXz5kH#Yr;Lc7eg6WU2)XGb%A0NEiDOYer`*G6NmPe8wgVT5TYBlHiL z{2r4ZGWmTbA_n@*3EoGEu*Hwj@Zv5Mz_y=(0(MtCK2dNA*9+HAZ$m%t9O5=YHG&Vo z+m<1<5JJ#ZRfk>*i>j-6)G=K6E@XoB?8->Hk;s?EHhIqwzTN5Ly{6cSHYgG4S zHN|eyV0Z1RMeyuKEz4uGj21H7v8COG!3}`;MSi@4Fkss|v`cC(lHrem6zcv{c*2+P zX>6~+spwmq)(`AgEL>M`dmhd)cyi*=XU7Qu&$2zl>M~IZDegtVFvk&?f?1BU>@3W6 z{|HRQ4ckQ1@Kr9#!`IIqf=OfSF$8p7Y0Y7-0#8XcogVtEBEK~YVsMPK9l%!z4F6GY z3NuY(oLqlq6K+6nI-Xvgg?Z#&w-Fnh>CMLIkGSIB+19u0OV7pm{vL#n=aduA_V@C* zufI>>PS7T=iMnti_Q&}R8ngxWllMIPBD8E;3#Jw4L|$)CoR9Y$LSLr+131(7HXR%U z=HfkYO>mJs#xWsa$>78&cj>#RpN(ghT}n&be%9;Q-jT&=PEJ0Fx_3w-B`2{vniwb@ zmQY#Z^NMr36%4Ljc#SqLMWi`K3Q5_g4~JvrrkrIM|IVug^@cbS#;^D+d(+KGoMuyh ziY1;zl;w=3GKx^)#ziH`c3a`4ZV=#}HSR`t0-XKPj4$JqUj=Vg*hin{SoU+c6T-tm zUeYejl~c>;i{om9p)0ip&ZBV87d=qI`3kc3nx^TV*tNuy^_)P-9C&282;3T0Vc8#k zih;23VJS)1+FghYxTjIjDW}7F6Q`1R*V?$WQqEMvu)c!p%wDq^u8oOCk&1MdFOd}( z{IQ&Qd)7_cX}4(KiV)i@;?}bW@gUNO8Xt$txI2P$fpKhp`S1M5vi|w$cgQRg9K($v zDuiSH1sVD!jAW7K>FDwpT^w2*F7xsuU8dN@T@Kc#HZ9PqaFpR19BEkn4EZPaj)146 zgqvt_D)Kk1eHQ9`@CEozIXJh>VocmY1NCN&i~RbhaUU(qQDWRRIfCO71_z#y*>F#d zU%LEKT5xD>T5cZi(w8xU=`C(G`eQAOi;tXZachN)&*A8d8&J-y1wjXQt8gKRo^0X< zZ*~l|y^5kxk%|5aJ^nH4NW$iP?9cc`Y8l5AQ7NC;FXQV@sDFamate1=DlZR+r>UP| zPh0E1U|Xiv)fKcSc6~e6Z$tZci1j;=v2e&mC)fwZd1kQ>Z@R-gu{ex3yiWEn;adCn zKSx%G+cW3zEXfrbwhyZjox}wEKMiaif7$vEIh>>a5-oI%PNvi37A> zbjxVC26VtWkDXzUg{%wXI*ZsEjD6N)>o9IGdf*KDv+%wckMnB0{Rh17?Eisy2E;@v zFYAI&m8f90#&V@)e%xrzGCa5X2oOHByU8aAwEXTPB(In_QAsVNPLB(|+BnJ@rA3)kf^oH4Ob)C3lPWJ$ z&5s(L7wV{3D!e{5krh3uQ>9*k7ZobsBeT~NwbV{x4{YBqM3xrn&i2#u`&JL9$^=R? z(P$GMH+cbSRK~Zb21NE(qO?M-!_;g%bN7q*E-L7{AGz`+ATmU;`cPH4YYqGx5ZXB- z=b*STm~`qUUgc{45vW?zv*6rW&^bNpz)pBhzy-;iCS90CjG{yQL>KOo|3g?^)4l0B z^M4`>Z|+&HK5_xQF?TIR&o<|tr3eceA~|wHd*R$UCvvP`oW68+ZSG88o|ALB@Bzd1 zxqnU;!92KhUvTBV0PkbL!9^Q%X1}y9$+e1`+Iry9NbyL>xNJbL5DY7K! zuh9I@od*xL_m!#AT&}oo#c9Rjm0{z{51`9eqI&Zx13Y7|*_(az_14W)WjZhUKz##~ zg8#saDz(#i>t;*s09E7_uf*V8hLaLPCmjRJtiM&hwZ-DC&v&-|v$J&o5#3?nkfTxq z2@WKATJ8^CF_bmKey)-h2sod}OlOJIcv1*(vk8b%Ku}m;G2JoO_ramLG@$|#ff?`K z{@w%A1@<}}!#k$CCnQ7wXv3@@*w;R9Z;>0|QgRbIXg)J2+C3D|fEj%E`R;t|xSp`*#uY#?p)tOL5HPL?XU0l3vI=t4iDfg92u35Z81 z{DLQ;-zPm1!S6cVgdXWW3ICGYz>r~XT75{n&}@uhGD0t(gNI|rn*Gl@!IvEJFDUoI z*}$<*ay_wgRr4$#<($QcI~MX*oR3;MSMk26_^lXXr;$~h+S^+By11d4K$ycERXY&x@ zvs#VNvMtjt9J6UK>d3c&zM?2p(nYno0~4$^kD!N^_ZW=FKVM zt{|cQOuxs6Qchq7ZV$shvFO@`F8lN>~t*oIMNn$*!!uQ|+fc5Qb!|3VD@(tK|7JC0G zx_vmJde8+yK!YcnUP!JH`7taEHcclHfe;nOTn6g-@!mv7ziHZijb^+J<@YSH{1+w H^rC+N@zuTL diff --git a/utils/__pycache__/logging.cpython-39.pyc b/utils/__pycache__/logging.cpython-39.pyc deleted file mode 100644 index 1e3b71dc834b69ae83840174febe3eb2468451bf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2522 zcmZWrOOM+&5EiM2W$iObyXmb$+67iY;^xo?Y6RP+*=`dcb&6~j1=I}$tw`H+WXUC| z1nbzR>RjAlHwCYHpBO&XxjAAc2u@a5so-3BpeWSc{vPapN3h*IV~itlo{-4 z8Hsl&R4CYu9AyhSQZ_A!a=I+1%I)Tr=${zofF0T>ZQRe$gq|Q49F97FSdq&$&-D)O z>8=4?+$awRG?Rk+dP;ZiuHS7R>LGnieH%Yzl-zD84L;uxt_Ed})2#5NS3 ztxp_7A>vht|D~m&U7A~S&0QW`nSC&^1|Lq)98X=aVPoRX(bSuGXY_gJ^m$qz@gT2_ z@tYb{REn?~84N9w6Lw1;4l9V~D5uAOqM{W(Eb^m}7U>~OYs{V954_PgOogQUSn5WY z79>1Q=tY6BD2tot1>hK?W_SF}FPmGb$Rr!mNIU^1r_X3!q=pSQw`!^lRL%-ol+ouz zGeZD>+5?gC>gMpXO%iV2Yj6J8-rS3jY7hWocPN1b;BcA_JJD5zNfoa$k<^9&=*mnr z7Op9WmnFcf4{qcDYl9Kh>{0mufK0ja1(4b><=pS=K2%K*AEiAwM>Tg#4#saI#|z_d)>ppXq9dws zw}0{0tbldZC|QK$8rUtaLua`@4r~wmXc>9PK^WnGJ=aI8zy~P68)zLyP4g|-o$1TW^R_gsT$i8z_qS8<39}q9#P2|g!Tk#0ITOr_6;vaG-F*xNvN$3OMW9{-I``1 z?34w)5ppn_7h!6&)!v8C`z-5+utnon=hk2nG?!g7)n}BaJnJ9TFucQiqHW}%{?_WZ z069>3qxRzDTCP@K3q_b;0hjm&bO1F+TX-?1fzE3!v;*7f;@99huIB=!gLLv*Yy+cT zi&yYE%&fV!W4%OEOQHdu=$AJzv_tq?gTZCm2oJ~Vbu(C+`kq(w#WkND2%^@plG{I^pOC8ZV5bt@S--tbE;udra zym+#8fT!A4C~Olf(M!y?ppe+<0TRPBj~hERp}g9x$X2*@OQEL8 z0G+%-X9Oawnxf<~c}kB&yi^IAENmD%FWO?-fm`S@NryrOf(@aXYK6)r)o>(Cu9cr8 z8Xifa923PBoFCb-YYvTrQv>*?uyWD*)$>Mnvc8froIXkQ*CGf}Rl;QgIGzHPzZ{v?S m?2MJ>pMiOL{oBKmR5{(&5f&do=RoB+^Rt(2f8D?42mb?4;Gls3 diff --git a/utils/__pycache__/multi_port.cpython-310.pyc b/utils/__pycache__/multi_port.cpython-310.pyc deleted file mode 100644 index a38934394287ab79c1d35a8b65cc55287816203c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 663 zcmYjP&ubGw6n?Y2Y2r4mKMsNy$-zsIZK%>7iijbNP}Bsn4PsnaHanAMn%SMWGiz+o zQ+gGTUYn!;(p)|DUl0V}L;`)w_szWbzM1#tv2tg}2ehm2KPQ(2;J0;dn}g0PglZsB zphUpPu8ts?Gja$}mwHz)aw!R+S-C+K^qRy}Qy~@4&r%!M7#4sUqJvNrNrNWWM3Zv| z;jWw|`SJK0e1&U|k`*C#SHa8{qDq~WOF-8!c4vhRJc1>84-Piqfd@;>g|H9m=>pH$ z?*oFm*pzqqu`$(pu8vwQrIU0fKeJ*Y5ZZHm1iQFTQ^Kr1yXh*Bv^V|VqM#Km?QOmOP1 zh$Gka$iL((r~M0@cn(6^v%b&vd!PJ%INjb30PXtw&*>Eb`0XxtjpF4wLN}2ZFp|T_ z?KCF^9Z>?zWBxUaJVs&&YqzL^UPx>`%cU0OdFCAd4HG~E(Lrd2WWbOcV#ozWcx$>M zKTf{ESGe&RSrg(;HH>T^>Wr>E0;Yk!duuG<5v<5NpjdzpKCCbn!vW~GkAYO{Ju>sK zDF5n1b7ss!AGccCq}g14;%b`9%l%9it&dBtjgX~o9Xvfcd~$Tyl4X?3EX`+9n`kC4 zBPJseEkrht(rBtUj}}sys9Y8ku9_jWpj>0Oj(TCB+{o;Pmf4&eTW`NeUiP|Yb~EmG zUM2C_pxb_9{fQ`<4Kz1SCKtvAasPGVBq-gA`^lg?jJxgA(}DH0QFinEpR`O1j?*Rj z7pNV~^C#*YkB6EoJr-JA$nmfg?NSt};p3$dxgHnG+=#?gyubKj?@uvt;+m*jK0cw{ zj{tp00YhqgDL}MjERflm7abgMZTd0q*1pGmO;8jg;-1Ni#JCORe14XP+Af z=05uk6J~B~GkGqx1^UZgj9_U6BNi~i#t1eXkya0Dx$%_kLF%C+Oi)&C?b#S(rS*MN zZszQ;kz44ub6DIn%z@tN0QP;s{=n`U zw+!1z&+Pt*8GAKmWR3KUIF;Lw)y!*{v63e$d$nKHcGwQdIjiLS?F2c09g#EjALN+g zG&~3CrB_=s-aoM%{Hnk1^}>`t=mnwhC13AFoivD}cB8>@y(>48Wku0bXMKY&ilCFW zmifD!2)^?$=se_|I7+=BN_f|k-j<*GlE@_S6Q0IA_2h=1@=k~z9*^oEq2={qF9A;} z=wpAEFQmI&e_@FS5mb38347?DD2+$Z+#c8}{f!_=9VV3E|bPIwfju$AVp9&9N0^u2h~PkS;NapdhWk387);Ym0q z)|6E2zTgjbA#XeWIEXfmF3e3YfOm&Wa~rO@CS@##T_52Q_Gc;uPQFI>1F5wk{%6|V%*x-b2xtTKy$+&2-g2W@ z_(|F6-id;Dbu``Ur9qh7D-CPI@zV+=-F1m!dQ-d%^~rl^8q+h(VNKRxGpu1YOdC`K zJtjNNCh1qJva`%F(Q8s)lg%>c*XF;*pJpuiDui`#k6pAv@8C{W(f9@>WF%v@W{DY< zh}oJgs-lL}T%lx!|EhRNG{nSXwpIf*DNccML{m)RU0s|O)A(-)6KQ)^QiAq=0UZGK za=LuW>n3`a$2xccza!Y4^r7wAC*D>!^oOX!S5{W|hc~WX``IminU|5fxS+chS_6Pq zjPK~~wpQy)R|U)B5iXrMZu-fppDt>UY#q@^S%fmqxOnL&5i-x^zCmrZeky}4QfOVC z*D~i@;|j-x^dTvYkt?2k4$3#E@Jg)Bv5=+u^7*ohymtMQVQ%CAu~fK>Sk33gn> zXpp*Yh^K_GqRRBI$+GEk>DndNSou#=BJ5JrBCO1fP2-T|>?!L`fHpOK%aHSU*|OwC z{K+Y5UZ)OIE)b3L%2BfvQ-c2Iq~m3lO%5!P%q(59-~RgcR21Ge|UPrHf#@9t$itsJ3#f7_CS;hsAoFPwASy@9F*4Y5~Wn50HU)@nH_gYKLJLQ*^}Aa z3EEB!KDmUU{|}+oI3d)~KMb{t<@Cmbtpw42yj{SQ%6P+(PrgadP5rWD&Fa_tXIn1Y zcrU+6ewfhlGe;fA#E*Q7UMN)h7E#}(Mnh9!ljNdKtqL5eR%{S4l9uv!#Q%LX*_-7= zhP6&<*-3#Re?X&;1jF?#-rK6JABjG#8;DZfE~aJ zjnz;1+9B)%1u&0hMdi?;FDHPP>S|^#^X$|zzf?|;FBGhunF}igYqaV`H9;lsi^2}+ zfKO{v*f@?RYF&z|*X{a|C>pNY-_)*J#KPW;gQ#WejPes2HHpH%BLh{SC4~hUUjlfS zbbJQpgwTXD8zp^%eU;hfMOMXjAAx+uI_74=?<#KP=1U;(fyj7j>>=qK0`}=s8a_`U z1w4go_t65EjZI4-Hd4Go@XCRmvpK@u>Z<%PWPC{jE#@uU2?5~-bleBN3rLM`R}dWoF-B z;tkXy?_s38hNh?_IMm}<>uU6a1htpQkNvz}OpGkRQIi1NTIF9bAR#w8fZFrpC5Psq zy!{#&bP+M3^;|_qG3mOGdS0kAKv8pD5qBVWk(gDL@di;pq~n(TSa1v+@y<9k$7)!ODQ9YOLcNCzCWu#MB?TFDpew5S)S_69lU+EMbhNHe<)W%A NXU0LWIo9mTzX3}1!7Ts) diff --git a/utils/__pycache__/registry.cpython-39.pyc b/utils/__pycache__/registry.cpython-39.pyc deleted file mode 100644 index cc83bf69ebc388ed310c31500e85ee492b196714..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5584 zcmdT|&2QYs6`vuwyWEv5OLlB0Za&7UQ|%_o)~M40b#1|JY_~z`HDEh2qBX<6z!>pRE1pR;(wrj46i*E=&d&cMf!U)%l%NKHrK8+!^in}@6DSx zzxRgSW5*f{zuV4V)_(IH#{Nz(lRpP9S5cC8QAs9w$X3mhuX6ovtyIbPK(O$M<22U%P(ZLJ{Gx&H$THUYZ>>1wy*L_!7pzPe${dJ za=zclZL~W%G#)TjWbdXkvYOjpGFjWRcR6R$mGy9T^Vm*B(~WR$^Y~7cvvfX3|BiE? zMT?x#U+U<2GdbT_!2H=ghxGoKMb-tDTj0;tKcUHq9GYPC$^Dmd=NbP4S2wfy+*xA* z^!$qdp5JG8nZwdkf8g6cuSG>s(&L+BN6<>|@w?r}nE_)G*Hr>;j8I za6UccyniI;KTmKz{w+D5fQ{#2V@NOUFDuyFh3>zxdE&ai)*XbYcsK|`=_|1|h8kwo zAW7A>XxZ~Z=ff+Zwo?aLzWlZCn=7~Y0P%B(sOQJg|oTEkkf zuFW&F;vGL7sA$5F_ogiJaKncsVVKxZN)Pv?c(@IITk)qsw0>}7?sNlKcf2+CV5%ER z#cJI2fxmsA<;G_{U+ZoLWv>ra-6YdLglWM<>%A=2v6jN2hHPuk=gl? zS*LiFrZ=NH*`=FU<$dgD{3H>B1a7h{p7_H5tRDuwAPuqJ&3>%X@h+AAC^AQ!J9kdO zXIYCEuLaSXul%UzdxI$GM!_ch!}Ft5b8l`0SKDbEhe><1 z{t5ChTPYjjRxf_LNmKSy6krO+-|B{iLx-=Z4*C-NP}H!vK71NPq8DD&N3KU}FE_R2 z7B(=Ya7*tgs^+9o%VUr1xH?x-df&FSx<)ehpELC;ba?UhdzT)3NZ#}ScHM{{d>94S zj7vNiq(PWGDEYTxzXc6--kORxJ>%IggGX`|6snhFDoy5w~t%zwwK^;)*C8ueoHJmRcip)dR1Z=C%>* zD^Krr^S~$r`*-}L4-O*-Tnph`- z?O1@(x^-A0K&M?afjkI-rI#Ftzr$08bV4-ArpuxeM=*kTe+YaOC&6KW&{x{LJxWJ{ z#AGCs$8;RqyI~kV(eQwnoDSf^ns1pUJc^$XaF6*@+EC){GDX!<7e!X%pg10f#9H#q z&U34%7#MR)O?!lW<&S6r?e|cU@1e?h&O9Wns5vEwo~3I$xB9Om;bdM#B3*pWJFTi( z!1!WjVnsz=%kprB3oVZ*jVwiDY}A%hIEf#w>G_I;s_#-ocxbBEh_UOzM}yS!LcAsV zz5oO>f7wF0b?tK0RHjc$BJ9%3B8<%02HWR3f5!W#L0g8t%hVz|Tedonhnl16O=_^z z647U=npR5>O38Djk!g9hg!^yw9s8s#vnb)lAOTH=j?O71{jytQ+LP835@N| zlhu@UG;2RF^N>`Du+n_7BH#pRWXJxNKw~{Yd|L3rKbq9NZ zp4t{uTdBKW$K40Hy{u7N&R@YjOS<~DRi!&i&P@AdP*Aeo$=wVE(@u&c9N;Um88cQ-%c}3`-2TNGj8T49ZVy8dQZ${#zBo;Z zQy5Z`sr-J}?V0vgLy>jVOH|dUBBL2k)&~RrOIYK1+PMlBd3b~~n-mUFEerWWm=rSr zO)N+ceQuc}p?y!gojwS4Pw8ubuQEtTPNQNJW2zIGAuv$sZGP|h%Mt$jI+ zd%*RT_K1~ssprPij8wX68%MiHlopK)hOQ!IeB4GyqTz$j&1PqgkoN!Re2pWV5A9>; zJ6|q%+Wksk9eUt77^(C?+~L&Q^gd@g%b{8Q=J2vA)G}H79aOCu<59H1hk2B0Q-?;X z?-O-_Dg#%AL!65`)!LPm-oud(DcaoMqWm}Sd8b_7I4dgkJ<5U94@n0tX6!TzBc~~+ ze?gK*eslI{Ia44+6DUR0ApScLDL&R{@V1}fLye}LeNCUi`wm}cKr?=weew}~c%hMi znLD|)U(p{*^xWJgklDjW7|tFyM}7F%vw_bkuK=O|K{VIA;p2yN_B0qhZpzBOOCM7J zMb%DbT@u-`OX705NU>D#c4jTD6ui-@7u5upL0=Y5NOyVKwZg&8GtoO%RJ;AYAIYNO zc|!p3^!66cMjS*f$Hbo%nl&r^q^AO%&LxEn9^VKD9?AF<25Wd{Tp;;JqK|an;pe}u zTG==}=%G_)CfKT?Xft01fp13a8QVdK+DC|>Z*2H(g>dlug-6 zb@t-T{>oS2@-GyYy*ctGAg$k5o;P;Z@%YRVzs#IZmPG@3%XQ3C*H9Ie1h;qGU_FDA z;GoYK;rB2HC}t+ie^4Pnyb<|VOi0Lx40b=ur_vKMf@1s|#1@$o{%4G!t7s`^J@4_L z8=7EI)I3kdJuq&f&!i-85%nXgeoU2C{}xgD;EgMSY<;Qv2t+cE%H__fM$NTdyK%g6 r+?~ZJj$b` zrJ&&}ZA-;hpkg*S=)_q5`#1ZTof&UA81xX>-Tl|}a|5BD%6Vu_a9+X7zkwtu!5PZ& z7U$Yl!zj^`#vR%=2u_-$k+kl$Z48#`wB6ZiZhMgVX;7bA8*6QE*0MWy z77KsvtXyf#o%P0ZF8z)2bbEPe&tZ>j+wPU)+R|{`h3!G8lh;~8z-y_$@$8LfIj(HK zb=JOhEsfL0rR7>zw&zYqtOaQ>mC~terv#X~%B-)%P%=<5R5DUBRx(kd{xf@|3^2)?tVB$EW=t`U@ub$)B$M10*4+cF1adZ1M|EweQQzdt!1<4C|`8SXhrMN&l zyuv$erD2q6seXf2dWO?hrl;*&ZPfxc8g2u38a9B|@Spxg{|vU`JBrB#lMP zGx2Ydc$<}kW#Yg}0<6Z9SP*gCXKFeXb@eG`%FI-kdXjIY$<7@Z??1$B%YPu&f$zVc zTs~b#JY#%KIek~HqmtSs-NhnX9|SG<`n+(H6kzS~{wRCR863()ZNcjUXt6G^wCpHc zZXEA&IdgpQP!F0jJG28Q3}koa&gW6!Ex@Duh6tSh_%$C@6AcS2)CWWIL)VF?MBUEhDewm5G&+ ziF0UC&$_$s?9ZqBQCXHEknQW+>Kpv04~`{(;0DxOfT4)u78QRU7kG$+BBTKgx2T9H zzDIfV1xz%rv3Gm%I0T)6nkg8IcK8LoV+^PXcHtgJO}rcJ@js#%IO8U%PAG!>!Tcdl zU2K)EpWLY+vL|h27^5z#drI8-(N#s-)&;V2BTXb#!bJlyS(5shNorgSU!-&yWtM5d zB{!DWIo1i(_<{5U+|T1|S}qLJrsReHHVaIQ*}yPvTWJmp{#pjm8hhm z%p5MOh2*!A3u0N>Ti%+|S8~~HTxyu@yP<=TACKQhiYIVVe1`QIgn6v}9`qQDi)Amm gjrQE(aVYZ0WS*2<2}S#sUHi!!-%WxNoZw0L3y^?)lmGw# diff --git a/utils/__pycache__/seed.cpython-39.pyc b/utils/__pycache__/seed.cpython-39.pyc deleted file mode 100644 index 5e0b3222c6264bb5cc9dccf395724f1378f79564..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 442 zcmYjNy-ve05ccJ#4WWUTKtfq~fC?c{_b$*Sid31L5e|tH+0Iaju`4rc8F?kIOstHZ zIEM=LPIvd+`SbaFJj+r>vVD7h`l0yclVb@exg|3f%?GrAFAHA7`2PkB*y# v6lSC;=rgM2vGyC)Q*tg=L)&k(=Z=Ttb4M=oq~yBoaA?Gh-=Xo`1W3bQp#EpL diff --git a/utils/__pycache__/transforms.cpython-310.pyc b/utils/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 4576b9a153e8b548e94c52cc0782b745f67ea4be..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 15264 zcmdU0TWnm%c|NzjaJi&b5=D`ce3dPG9b2&-qe&gdqD5P>T!pqJ%Sl-|+tr>WwbXK# znsX@Gn`P=oY9%#wfD~!drWccTT7+#{1nonSqD|4f1xSFlKoR5sMFRA})ms6fFKrO_ z`~EqXJ*kQoNqQU7d3OZq5lNSjF1Vvek4kzw(nWU+=`l&~KziKW zhV(W`??k%fZby2%q$iNx;qF9wr=)iwJ>l*`dY7apk)CvSBfVSFyOEx9r;(nP^c2#2 z+`UNemGm^y``kN_zC+S`kiOHs3+cNgy%*`b-FuL}N7DO{zSrH4^nOX-f%JXu{Yc+0 z={sHXq)|R_og+V2wt~#DOl_wwQ92- z6lUu!l$llSm7ti!Uwu3nPg0*~xF~s|+H%{=x?~bZ$x)qp{z$WdN{&`nJ+D!19d53w zV05Cmuhd&!TLmL?ZM9r&lB$xXE~z}&{oL!ms@D8lJa`5z zF1AQojRVnpSxC&__8vx1H|9;(n73ThwJsa;wrjf%{sAaAi~o$9bMyGmx+56Wf+B=# ztRT7==RJ%22I1c|yG8{VK51;4bDL&R0JSY|0X_0C8ID(PE~qJ#+ceZ3{QXaD_Q}(y zyt?vEH@wDD`!wcd;f+(rDi<3V+lyXhrLFwxnPxqDaJshY`R(OOtL@gkD6?{^)p(}W zSg!i@)2n`?>77nmJ-C9gtyCJVhF__SlT6ZEFgv9X0tcgqTRLW=AUo#Wi@-qWDQeO`=%Y2@2>*%k!(PT0O{k%|@**<0joMTWT7mf(*J>e_e(^ z2YZ=2TB$77{f1v(u2i-o!^g-I&jmy*^NaBm}XevBT1t>O_Owy<+vjV$W zZ|TuDg^d38bCn88R4Tjiwz>mBXIs*$JV}ia$0r^mPAb|no;6;}u325HimF2d(+GM;~ zshnS}HuV#=4;86<7~IF;0S2tRC*aV_3>6WPk;@x-D{tqsMOz~t`KgPYp22_*E6 zCqX`JYBZ^X$nNhDt7N~!QOuiw{5CB$h9jQ*O`^Of5X1zPt{4lZNQivbd={uDX+XZY zZe0Z@_|8&hL&!yU?YR^v2}pu?Co3gMivY=M1y-J$5CcKzP+Sr`cj3OlQOp|!tC$f$)sReGoVFR<9&sLH(yZG{ z&Vm&pGZP{+E6B_#)+tog_;z$L&c4s$zEMJ;Wja{es6bhSXvCm!l!7s!v94Cy&8pvM zw>C9@>j8Wk&j-O0#*uM@P|FgO)NvT2`e0PD|GY_*H;bie0w}!%a3N(!Da*QMUNf#3SIjHcf-OUm!gJY~Qxu}Z0ZjH20FQNA`7mxl?VU#FzGO&l0$WE< zfW6#ksYmd3kWuZ`mRru~>WS9Cycn2hRR&m3#WToOF?30tKzH0MnfIELB5V6whpg=f zZOj|DfG-h7+C&QFjgnP#1a&p!r7q6z3~u!S1Q41z{9XScf|SN#vk^4aE+j-SZkifz z#ML0YJ;i)cXFC%UZZO;;0Vr_$B;HHIErJ>(mvJuRZGXY|EWpRq6s+?-#x=8R_oS>C zY&5IVV9RK*U1xW;AZ`Si6qz?DAp~*TW5iikfx0Dgw{evX@OorSrYl7YXBGCds-+9~mm;f`dW)tT06 zDzWp2qcawUe}@{VqD34+*ts-ItqI^#-KEX=NLddqFYzmx#nA;34!Tgg;;pxRo$yDuQcn>xRl00=3>M3&j#7E z^~T~^UvVk}`|yjORAiCi+oPW%h;o zDvaBVqhGM9TqEw6sXm2QH?vx^96?-F8JuAteaRg@HhT)U^amo52-Dq)S^X7cNspe` zI^?m)Ie?7h=kR?PMpfN-V zq9qkM1*V^@se>HC`YGX+(a_H=SXNGzdWi?ms zXDr>?=UE$d_r3IsJ;wqE8Bi03jxu#WGh#}SE2?ig&W+jhH<0CVwVtr)4yZr4BGOu7 zz+}zcf;q`)pJ9u_oJ?(DPWEC3xA=Gx??b;+84^0H@~nU_L>J?{XK^R1I`vCT4qMax z4j;;zZdbBqdG+yxNBbLydGyclW-o`iux*yiTieZ1DdADGw}!zy8d~O3d$>A;Q&g;V zXs;#M%(7y%>5jN!!y%F4XFAw!&qlso?)`m^m}s+d--!JjS@z3pbRf&dD(vGdB8)-n zO*4zE8`I44HXoiuP?N+ORB5SY?2z_`4|W?LaVPMW*6}J)xJR z{g8@u*pn!(Nfp`>0tx9autEw6c#lnZjs@mOrY>rW8Qij^5b=rl=(K~?#FmTRg2}oW z;TDe5?I^d8p`79hyN5s#4m26R!u%Noo!!aYa}#SgzmR-V>phKyFwriXA0Nl&M~Gn} z89282zZcInvRy08#s6O8lBFGP@Idx?K%}%2y$@c6M@WyRA*zeC zAZH65$j$mWgs>X-aJj{JM#tgAGnevi?k#a+qeNI|LKjMgwMI0-gFS+Ek*tk01+qd4 zOX+nI6$EB6)WX*>l3Vpk4w<66oa4(=5>CEGZNN0A1O48&Sk`61^n1th`%J|0(Ss7z<@=m_=5eA7WO-3Ra9T^i@v%b#4G8i4k zVqN-Gq{QScn2*C%HDynUTzJTO5UC0C0JZ8?Mhd0G5N9b><}IA1Wc7F1w3Z^{CY+_h z{>V#Rkras*m>hNW0)qFx{0F?gf9N(O2F9A!Mv#a9xKdU1YGiVV4YI~6WoQzxP?CI; z`7;PQJ8$K!BCy#Lks?1tB$N@M>z+>g3A*gzxuWxfp?1+klg%dJ1 zE%hcdejP!L_G@%8&YNT5aoV(g_N@gA2NXtSzs_!D@DF8yT{~`x85IXxVMYZ`#b`e{ z-1p4*t8kNQ$rZqLxj&{HsckF;ByXlTZVE820bP!xdu*R#mUx%M>JJWe@GVG})voIKZM z-Q4GqvzGTq`~vbuyV-6&5*KSDy}6ONSSz@8cf^7GI%lYtAUI5Qp=FuI`7z`d!u-<^ zQE@)RNfFQRln(H<(J*awM5%sL3DeQuvKw|I)<%yFA72eyZ-u$>`H2j zgsW2#0qV1iQ9%dxa`pA{xW?BT%uST_Z}PcWQCIjsf=WX{aFxZ5GI(A}Gy-!e$U}c6 zazbO1u*7j1%9E946B(>I^(nVX7~&&R=98)qR<aA;$H&y=bFPjF15*a0ttjxMgcZz?0WjxsL@#%0ULmjL6wJ9*)%eO52VcKlW1P z_;W{Ishl`=^!Zn`iO|wMjl@bRJB1Y`9^9u@XTdW0jyqs*lk!f|_qo<8sp& zXz3fa2rQ=hLzEe=yg8XTU~8yTEXLH(`S((48@A~pR_y zw1@<+8KLI34Dld~)0)dwPjM6?GyG2xmyAIWDzO_gDAC!~d%Scwh6rrlp}Ek5p-CUx zz|bW2=Ob4$y4z>i4!~3nm5D&&vP&=(9*QBRuHppfTV{l;vRO8{v*$KeJpe4bAg6E@ z$&sNGM1gZMG77r8GWSH)Zuaz9L^|cZo(=~Utd;~+m)ML9W&*FTGB1YL1ey@pfE5={ za6udmY-(;ew1|LlRzsBLjUBWLnODP|y4dL%-0B(vSbVg9w9WYy#t7gV>xeGKBmNhs zQSV_aG3c-)5)&^&6E89DGC|>t;!by~?s-AJ*2ZRP)$@abSM^s#UA6-I>}p+OE~_Qh zpQAM;{(!GB_LmHXNwIzGF4YouSoyb2$%ZsR>48%#w`}#IOm7ZIg9aj2lo_T?^LYD0HoZ=d`rrqc`Ut z4jnS71rd58_B8Q)gK|L>E6yiwSdp!4Df&-??NxUP2aqet&{ETeMF3965}n_wntK=6@np!`!bPzdQK5JS=hbm^KO8dx*gf~z2rLKBFH6w0p2k%~*b zN20Vy`4Ea=Xb#}7PsTig`o)s_6V6^92O`)1_-UqAm^Q3U07(=BVm3& z8=Z<;##%PvH0iGlr%)D4rTrvo60h*|LcuD4zi<@#M}hgtc$cW}v*Ef_qlSc5}6YEsdV1PW;r7r{u2W-Aw#6BTsG3FiY7{S|(Z4#w~d` zc(5(+KtW}#HpD>X26C!A4ehfr}#~!sc#_vo9OJ%ZBq-E6GNDMm1O*oyzz2zLY zi-q+%zGsT{y%m_p1N+3$!_fOVh^7m5IDp80{bW+XA10X}MkI1(g=B7(juGdncUV=T z&693(W@cF5yR0wt^Q7vVg9P{nUw@K8GHOX3oP>|E4zUuM+A?n(su|oKrB3YJq1hy# z&;am`D&f(V1p~#^EMI&ALA(1Yv)8|sejwjS<#!O_cNrUotPW*Iasv)8 z{lOCtBwn(-y(mG4n+=|a(`|#_sMt~>AC`cLB_;AbC18N9k8I#e#dC18B3@t(c%Op` zCA5Jf%y7vWOZjvjOga6b3E$1&UAO05{U+&%-i&kfdvInYF0AYCuuJjjM$;}u%plPd>@@2$=Olx%+U(H#qmB4=K*l}?K$Z9RX z2V(MhR*-F z*_mZ^uO5q8!!~J?v?9e;62~!ucvm)(5XnbA6Tr|%J_0zffy6*yxPd?h@}clAK#-p# zfO5{QeoW7Uk|-;154xtRy1MGtt$R=1x>a4AZFT8!XW&Ilyy_bu`EN=fx z%CbDmQ!Q)W{HtZ#wv@H(%)83ujl- z6PD+Cxr>&UYizmT%ugbn_X7A0EMta)Yh4d~-??QUFw+HDxlHQH< zjJFr*y^`L8^gizFYk3ZIGqLLG}Ro`#c+Q(a~ zIvk(tv^x5UW`H*08cyiit6}k(&NGd+-_haNsg7Q*wMbRjP?uI7?t1igv*vA2dGg zdDd@e|4h?wE_Kd4+ipJGZZ6k?#+lWi+49e%?;l+Om#WofyBSoglWb5ZQETOBFptI$ z2S{{KT1dPNkpM&n>VmCz2X@z5a=L1dbpb*X$4?9SdMAoj+^__?+I~=L*BfEZZ#C-; z;evFcf;vzt%r)Cy;}z+U31*l(Uac-Rf@aWIu2#1p!_OnIR7p+RYdcbslQMO{LUJ*l zVJxV&mK5nUl-Z%vYP1b6m5|ZjexX`LiE5SXj#;%eZC9RUI7KccoMOk&qVm)f0Y@*s7^`I?-iwO#%?u7QxRn6~sV87tGDZ`K_(cRoyXX}2MaRf5GBOvt zJd4}kk04Qqf@gcqMFo`*bcIUfp#nvnL!oN`>p?w8Sho*F5+U0TjZh2rIzniMP0cU^ z=xCM;on||LUO?Ng8fns>Kz4t3*zH?zCp;{wwXFsNMoT@ll|=dXAxH??f5}=3 z@1l-B@9zXfy$y1L${Pa~jw2$5ss#VRNnI4Pclpbs~{h$tne&`6E z{3mW}?NHKNBQQrFY;_RxA3`K9UCA4sy3El>S*tLU?P^TPR?Ol<%n-4WrbS{XN`wcI z7^>-%UG8OaET>9WSW`~3u)duEsWg%~QXVNb6kDHzQnX7s5u`d2nTx|Ui`zepAdyAY zaF*N!JCev;B$0WM$b#mW2u*dq9bb&I-*dSAG6I9lP-LqL$d9BD3?YZYEs(;7Ug@-I zL9^4oYP8xg@6&ibOdQEi>5ZlGP-qMfIMf&}jqE*d66Mb$=t2Vb0A0@5>-M^H*4!d^2FpJgkF(6U{bdo4Ao%Ict+&J0H^ zw}pOzO|;^DM3?e)7uztRTXsd_>d47l9NSsk9}GqdD0%Id2r@GeNlJt^YfcDgUR6dW zX}S!P$p?`iPjx#|b8Wa#vO6T2xBonXtWd@R31qX*<@|$BSYLyPGPN6)_zc9b>-41T zL?mfKWkoV)MDiNDHXxT{$P`HB_={W~OyttO4B1>#yR6G>82=0Irs@S)m{fHbd4?-S zNX>+7zjAMAzk07>QX-y@pa~9SJo6p&!DVSKU-9^(?D;n1SHvC^ z(=SDG3il+1GCLx%Tq-@scSUogsx*-LC@G)89V4fsnN!D|xj2fmxU=F{6oZztxG`v5 zuKNBvSH#n)ccrGpVKa{b)%| z;PZ!gmy@{wDB#hf5o=FDM)eV6zQ@!0c^YtbZ`r-p6&tXrz>QuRg=j&V87B1_U=ExO z*Q3;{-PWdk#fj38dr7+@^UfuA0oGBbMgnocNWFd(1P@3(LFF`VS%rwyAEwkVfq*4U zPc!_7u`~l@4lwz7>h;L5z>LKTv9u`ahF@D=X*Dnx6RE@8%S|sh8|Ke8nu})xO>GOE z<4--WsTlo(LDYPj`GVGW4H<~hN-@wIN~Gv8E0f;bX?hbtf7uhxWKrB<9KC|2P!~V@@$zV(Attc1 zG0@}$B|ak1P%6^KZvbfYZ?lG{7z}}v#h6}!lLI511H(~;h?1bnkC5m^ieE=mfrPra zDXT7JblqU6+WcIuRSoD{r{g>qQ6LrXwVp%=NZdtsbF;wq?18dh2`ws;G z)YS%CeWrBV!g^QYcdxFcgX(i=prVRQC${JWO>njQcfH-PdB82j+NUIR#cG zNtxp(qdDHnEE-Z(ttee%by;wDePjzX+N6bph)4@FaSKbbP=Q&~?*r0YIvOClg|Tc~ zZ!n8M1V>`lT!=i2`VdPbR3caG1LA?nO>pAt$nqx36SV#>#8@CO_2rS3iO`^r+SEn{1ZtTWW1*y1skv zzEu6?DNEpDJW>B|AYQY@7gcXIXq8&PN zE;(@JWS~M^IUmmo;sXG~cfkz<43$1U{+gk|_hLc^&>*q+Z@{gfA7iyLnajF9tFzya z+pM@fI{OTBxmv*jjoJyv_VS`=JKP4x`A~Frh++Rnc<7fASaxLCJ5!j};V|eI;%GJ> zt)o<^Z?ihlQ3a2`s2J<+y*S~`;`R?Bh;>wati~~}u<>O!!t!2DG?m=E5##++cu#+Z zf$%V8Bz*xt#2wGwYHe34Y202Mt+VWsOxeek(OKEd`W`m3BJHA%LBkyW?~PaVaFs^6 z_}^A+C$GAZLIM9< zHE?rq;e4HyJ&1%?P6(#w0?TK{gO^Sw2CG9^ME?#N+TH3kxee#*%6NFOl+@;ug()Y6{MP-M|Xh zzC#@WYD}ryhN)779F^ft@aNb9HFETRA_3qBxFcklbhSVNaxzzfEc=liBMZ)7P{#(n zkQ@*3e!ly?KjHO@V>f^?7!}#VDPj+yTGNeMY&kJk(?Ku{d)`KViak5t&jU#{p!FxX z^D{&utcf;oxOFqdjT?9)>F8o;Hfk=zdw94-=a|DC zEo?jHgIoY4*eG^$-TV}+R&M`XQN6i)<6P_f5xhB;@lKlbaPMRx^~uY^1elWMh_x|h zmLc4cU3eE=#PuJ?qR77=d-qwn{_EK2nf62a)>1oM4_Pw*6Ph=Ae|u(dM(g|lVZVBF z{hwo^pwc3AxrJ7z7U(Y_KP;@an>^@YWR7Ns;9cnAG{;$Nai7c|_9+$#6pYK}EulaT zY2jFb9L|ElQG%f{p~q1;CE}{BmzZ%5L7mOT7vsE1uFn~S_IQA;(ghm_Tc%~N*J+pV zj|mAozT8f=sxG!stt#Bk@fn0@zdRSL!i{e5IYA9@`TSxzFtLh!yx77q=l^D zrcco>aa(w2%H~na5H8sWFVdB-VS>S28p8_6j$j2qfRiXcGjM^e3fHqE00C%h}cPF}~?%0CM1AnJcQ?R8= z$lQXxZ-u?@E!{1_Bzto~owL5B^c`K>(AFJ`^I^P!8|$U^11P&SD(j-`b6sb>#C~*3 zalY9>Umt51Sr4BZ+Gy0Rk3%Rxm&AK0qrrRX8V6E08KiUmqexcDk>=}Kd$A#!ua{YD zkwF^)4kmRLMM^@a@yd$qYa7d_!y<$LO{boi0->{9d!;gIWQ;o0v+c2hWT>k8Jpac~ zX#@n~y_{uP0S?s+)lyi*?3dz%kx!ZlIEF<$S*hvTvi}Y;L|te+6zvJ%!T}({w3@c> zAvUP(4ntRv0-^09K7rrb?yMlERSvA*>4u^1`wt+v>}*6+<)%90;^YF%?ki^742xcD zoaH{m2?h?yfcqt|HlXA%-e8N;J-%db=6iOCD^92$3n#EuhJ8w}+P7JpGdsBJs91lQ zfvEZ{V_7RyK?tG+ah4I$`L3a-ag_cFYx-nT-37)HOYS|SqVr7CYGx2CVw=rL@lgAC z2aXd$YX8xjuQ2ac210roNQ1=Hf59yc0-TD)1A?^_xy;3(o5k&CEVxmp!6r~LG7)}x zY(NxjJe^}*9659TCc$28V5$}1yKwv)c-}*{P)vI?54Oi1ohie3IaE4&ahNoD0Ix$f ztW7yL2>jY-&UnU~!(#FbIpIa|J&jvj?y^QGn>9j!GL_0H$9{|q%Ok=$ zXi}~9F)bD;r7yCokyAW}6*!+v@s4KBXj!2=CR04D-$ksdv9jjL!OT<-XdTmAP&hL9 z|Lz!3lh>?JbVKHUn8#V)<(e;?iH-l?L!E|IecY#5mE*3-(XE}m2b4!k78QecWN!3G z)|~;48DJiA-iY@Bz=A&GdWfWP(ib?Rrir9Q2eK$>mvIR9brs87MO75{kGpb|tb_>FeoeVT09^3Y$!C!q8OgexFSzP&P$|(l0}?l zboxyOf56}m8GM7m5K-MnmKdXwyZ!vFP4%3iA1<#H4E9OnH9#bTgezTZ!``$oAM-4s zgCjA?iCEd8f{W^65iV&31!l~aoh`($;f#H+4Opd{m=L=*EYfMr<}RMonokaWoqt}? zwU|9Wi=K$hE;3I%k*OOeXE*KG9=G*ZksofWc}uH)P)%9O*F*-ez z6%#g=dY-z;Q&*l+`_gxF$BokCL!q#_Lxq#j}K z(%jK(wuh8dT#5KIq=&4By;~?wYUb5(XD&kIEbb3x?`}oO$aeYhhqHI}*YT>p#6YM_ zNu@>dIg(06!q#Qvn&DSb!_l4Pc#A|GKm_A{<96;oR3I7~eaYa#Nw9&h^Ag*`4%L&P z^Yn@1ut5p{zLy$sWKoIxJ90nrxF?fw+eO_N#Nk+j^*32nYFVU=;Rw&hV;C8{Am z1<>TrSjk9xmc#a0)w?8*8>W)W&XlAhhx;_{%xMNu@ML6!|CMOY91A+t zKt7-wIW{MgvAH$=*bV}AosmI#Aa&??&1EPd1x0*hVJ_(CI*;TBwfZ@kvh`bhIBF12 zkoCqqEU6j0WUp--Am9{udmAgcoxu?X9RD5}O9g{t@F?pfql&f}6GSstqS$vJJ08qM z2=-eFq5T$oV?(zkih3Jfs5pzIpZ{RxbES8VE&lkWx4%Dg_rD%n{Lb8Kt#|+D7sv4M zrg`tP?>zO>V~ej|yY1fMU;pIT;!l4f*Ntsqz)9Sw!Bq4EFP^Oa98~DPU@!zPzQEp7 z>SP78J^5mCG+>bZBBK9@2)|g`v}9c?JHDH?vdqVTJeGUG_CJXdbSgT)0yq{o`R$e? zC5llAn0Qj6*i!-q;KtY{zQ;Xh>o*ZEu?DolS?7BuwQKL;x<<{rg%tv3G zmfDS%BjpL*mzs^24c~vjGOoY+Vl{NV=1bL)^8GAFV;ZF`7zGnB6nNx+3bCWOkEpfG z_(YgB8muUOPsQU~w5MGokT+|g`4-W=m+)0)r+u{Nx|g?#;qfR$c^KQBvtik31(T4NKm9@c0lqZuv(MaO0vVzp`C)%R9=G aN9rxlHCywJp!Rt8pbvEnG#!f;Sho;=8x~F=o zt9njV&uyzs7AM46@FsvFEgL{Rpp`IhL_$cgLWn0GctbpJB&3yi*c}N8BqTt7*zovX+!#s+N*rx|WuqQ8Q$isbyrCtz~7HtL0>vujOS}s1;;b ztQBQARvVLHsaBHVcx_yU}!%u$uf^H^`cUp-lp$*p{k1Xvm2ouoc{)**-J~@w%Dt_?;@2!oO8=-+0}$d zp6^&KTV!PLJh#?{(HDI$Y-rTh@X8=Ga3^pFe+?00nMC*tCan0`a+Ct6BhkCf+?x1abx4HIphC8=Ihqv^hp=qJPEAQdII)P9N?gY^5#`;>2PVX~l}LFz*H^F0iMp;#EAMHu z${SdxD;iTU2=&5ErK$;S@ez>>LW_qtwVL7eI`zhq#f2W)>meUQL$|Ewp1S;cVDsRz z6F6=E@^gNtx$*k>`kLc0e=VqYeI8m@Tze)4%l7Q`EdBwFF%Xl-r>9{T=W|pSXT6co#xJWo&$O&_V5%QiLZgg!<6DJa$y~a}% z)8$&!+rHz8q+F~b;o2UbpiIr{iX^S!4Uw|CUE5=F`Wnk38T!1jB(hCTFW>8QH>w$7 zG^}pu_+B6~@;&4S&Cr`9h zZzTvd(0&2A1(pLs{7x^~b`A!3?WJW1lF0fH?zxO|)mDF$O5Vff)(sXhay_!NI~zZKE^hfi6-Jv+&(Hc)~~#Y zRmjFJD!LLL_(wIMPa)Vek8!hI-N~YIO?tFcHIKE;wW`?w7O8w+?7oQl z|1k&ABJ9q{rK)Tx1C`<9{kFEEU8JEVdqA)!G03FE7@$xTAU;8vP<%mv;wFJXv=4Xh38{_-v3$Rnp1)w*?}+3a-fmbtKDE;LOE=O@ht z*KfJ@s_mL9J;x4_Lyd7L3GFc(%E&?pVlb*`+RJblVsXrQ}CpD@jZ zuCrcmc0zNbw|eU2BVDt4P$W9mI;Xkh1Y7(Fgh++97xvD4bJxcF2))yD>rfwz)kUh|TAgm46EccK!wR_>$5u~@w9{GRP|>1t{_AhP zcK*@}^_O3L;idXFUw`J6^OxQdnjZ+gYlTakuvw&;!#%vKFs@jEEn}%*BM9w|Fgn(< zjclk48=qKChseCJ-ms-6sZNQkRD*SDjTluk{stm5TJZc~>O=3?mM0QH$oMgt_PD<$ z4EfcY-2l4H4*7;Kut(~$XqsYF{5Ta)$&VBWwJtL9yVNl#k!U*5crp!=(8UN6jSj<5 zcg81F`_A<@cb|oZo`P2)X2#~#c{QtLNl|Gf$UKx*RxN2V)GBIGE8^DV^V6!K9Y*<* zI;Eb(J*VoLfs{_U?`S{O|BXjzv_Z^q=bA)hq_d7noz+&QZi3c=Zb~rlM%?&8H)TQv zxul!+%gu)RP&ehG+#o5n4`S^U^hKKKqyt$J+8_sQ1I;(oMZ3A+p+j9{kfs`BqRfu| z0yEWa(@kh&q?`7ZCf$@BmEO@!xuI^#WBkyJQ9jCzbQ82EOU){Mkaw1USBdgcE9F^% z^b|A`bRm%cDl}6oEVPS*F)2$@j}&DbnrWm*3WG8m+oMMc?Fs0K5*wF#q!Q(C(<8Dy zmGw2>Z}gq9*bg8rdFMv^6ldTu(LY|lW?O~iU`<8^zX+3-E)NaD8`tDMKN`uHglPmmy<&S4HK zF?0Pzik%_x6%uD5?x`a9IV$oD38G*8MG_M1M~KhAif13u^Yaj!N96upYZ;~J|<-bJYEfNxVYZUus5|>F3 zaPqfFSR}5HAUxy^5~NwUO`=JnMdDXT)JaG*|5b`DK~yKXgFqM(rRomj%aldH$6XQ~ z5`=raOTs6yLV}YZgyR8;O%frA9*I>FYb4f5Y>@aCiQ8vbjNdh<)NAlE#5|yE2goV_ zr8|MlAVv-_)70A`EVT!~vbwJ7pYf|`u3oksNJ#7~O3^2g?nq?I{8t)B>R6pc|&GYb0pL{ifd8(U5SXiCfKIRI7!vV1`4j{|7Nmv#Qh%GIbi zn21V5JY~GH1(=XfQ2|s;Zu}L1qAY=;+|Ns($n?us%PU{QTOyP|fw%3c{BNT?U?KyU zDBt>?-02b}e9zt34<(M9%r`whG->t70^D{%cbEw^Zgbmf*n6PkxC9j|lm!E@1v7LY zV6irR&@IdJI~Mw3c4#*UYfStRG`XOJrc}}UgAb8jK7yPt3^YHa<|NR3`L0b-53J^1 zyM32Ua@r~qT||0F>G-2~1jkT9vJ=VnM0%xXxlXvTO}lgZ1Bf4g|M%$nr>Cn^`@+zE zkYl3Weo#WJ4UPrJ7ul8_)+K@CG4MQyDj-k2js!B1i;ev{d28Twac6c zN{LI205k^Ed%@_+$6!ur$(%6Kdorgz?=fOdGhj~1JH=U=G{i9W~Wb_h?dE;%Ej^Lg;+F5dGlHTfQi-yk7r&>akD_6tML z=Vm|zM4W&xW>kB~&(97z{I{r`ds3e7QtthE&&<6h`5#d?ATD};)H$q?)fVGUY`ek6 z1N!XRZbRxr^z}CT&BTKd_2KZj+?U+wG8f+deTfmCgZjW%59%|t&h}EDcKR|bz<#!$ z>*wLoGhm-3q8x0gLKA+&ej-W?vRg_&;VAtgoixlUgWML`PTw8w@)Ep;d{PJ9#iF$WdU#7RjmcV;b~J7t85 z(ECe3?;ilY4@o+Eh;raLMf&QYm4A*yNoVJoBJJM42#>G>gQIw#`7I6o93(A92S)PH z=z$CSqB)U`*rb)_>mx{#@rLW)pB_e(b8wA1J(Ekwn=C}H0(?WT6+_X9O z`BQj;K_;7SFIcKR{5ez>&a#%Qt}WCT#e~J^=n5WUYqf<#KHq(e!PnRAi6p$7ogmgm zWXpp@2lYmeS+QR<4f~|iga6c&dGM&>K+fKeea8oHa4y1*p%XR!kgP#|O;`l{cPNz# zdd;S@&fiCZUne2;;bDq>hYIL42O|51<@W3s=&dC!7s)QClfI8pq-sivUdzXeAoc=_ zdBKmHs8 zsXi*15@sn_l>T#a8i|A-bYY!s1ae&oz2*8>;P58@y7cwytxcyZ^oEbqWu2^Mp|=Cy z6RED#SaxlZBDeeLQ*v2}^pX|OiW3I6aS#yTQy5nq&k7ojBhqMyHX(l={mR8BG}vRX zesJpA<1Xqg4y%b&oUTpCqu@o)TWt0`X_{LuPL4es4v*6O_t8RaT$WtL$-3_?%7bwn z@X+}<&VX0rbR|YD?lF%Q;q(8Q`pRxP|6i#mQ^}p&{C}rT+?IMAhU#bk1H~uck^lez diff --git a/utils/__pycache__/video_op.cpython-39.pyc b/utils/__pycache__/video_op.cpython-39.pyc deleted file mode 100644 index bf39bf1d80d9280ef78aa2a38ef1ae851b1b791a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9445 zcmd5?U2h!Md7iJ`o&8=em*289mMvTBm=rC`it8$}{B608s@SqsZ>4TWduErjoSj*o znWac(#t14qmfI?3(!gjMG*}iX5ap`qb&#w4fTCyvbkK{Sy@(0)qCkPRAPS_`b zWy)|8r07!f&N*{F=FFKh&-1+Rk(tkDB>X7K|`UwhQBnc|K0uT(ScT*AflS@pvp}8u(sDKbo!tjwf;WM`1$g`eY>E7SnWFnv&jEW~8^V zN>>ynVF>c2yHZt&m4(GP;RhxU?kY9i?RDynC6mWmV66qbh;DYi@WM0K-tsN(U$cF? z?Ol7zwV!kCju}|jdV%fu*H&$2d3CS5&|Qzy=USF)t#$c%dcOa_A#FT}OLgk6j#e#e z;hf_&OvgXJFsjSxL8^*42@@x7IA&uRG?wE;;PJ*% zoN02eQ^$04*Q@DRZVv8Tk>p3`RsA_t zlcb+L%TM7X%r;)Ow5=h#P2bKoO;AHXF%tBf#F54E0uKKNSU`ak$f43#7*_h=Yhvmx zB}&nE6KbK-PS7{iPcoGyaNut%5oJVa;y$I>AQh@%620WYTwg~EnNsbvxQ~MSs4R&u zBp1pkMLxoU?PtR5mfX*VnL&PEZG5+=O?7e%ZMPJezWh{0isT@Vl#V;mGwkm{L4L2c zB5EtBt%%xrjK2U($cMR>N?$(u&!aTYviQn#z=*tnT`9~H1|U~#$zi$zOxcvLcca3l zys;1z!~89bXIa^l`PV~I`N)D7DLIJJwRR zKz8omqjg!d9&cCr69L)DP{K@&i}VzwC#5YVDhA{IX;uk~-D|pdF7{UI@G@_G29Dw zHk5GxWQ5W6r^E5VI1Y}B6{E^ktj0}c<@;A9_i0TE<~Eh}@5xd$Axj%a!wD*X2Xhon zygPwAEbisp%;4&xw21j$S!6|k*WAW2BV*u4AB@1CXN>39URZm1?bWrbqCmjNHi6?~+1MuL zxhwPE#%aSi$&Ge(w}|RB>C#fwIN3ICR*eRbO6G^6^P6bz&%(3_pUYw)OIuQ3W_YRJ zR93V%X{d=F@a<^~G9eHM808bh-BoxAO@V%$)&M#Ppa;;AIY*e&@H*Y|SLh;r+ZzkM zxoQpOr_QY=n$=C02`t`+!lQ}A+uCMmi)5#JW+#2ZuQ$2bvHYDB?-=vEXY4-QhRr{k2}xyeDnapmHdufB2d%1iavF2D3j{VQ)h_xi;v z@5GAd$6D76miRGhl43S@@vO0a!}KkYO8V=5V0B`>V=h}L2Ax=V!D2e%^h;|EOR!9J zGR_E!tW$5Kn3{uk5T{4Wou8z8XdTORW7Q8BuZq0Oy_>Nv-g>j^gZHd}ug5yRkh-Xv zq7+r1r|L=ZlKfb%$7%6i>KIh4Hf^w>$U_EnF#@&GVHoP}_+r_*clFJF#zMoL{MTTl z;$4-enM~(Qtc#I zB*iphMwS3aWWhn;!y!ZMl|o?-8A>Pq>Zil>E~`OvDhC(~eT*1ue{Eu{%&7J*V`Ybo zmBaYKmtii7vI_V@u*wWrJUKHP&ks1}O@M@P!wZ&-V96rJ5g#lw1n(FC zRB_#QyS>0@crLSrR9qO)&{?cZ{uCLwz7jt}=C{ZYEAew=zC`9cnP*{sRodXsQ4b{L z_{(GjHH`Ryzlv*{bX^d`3Guz!r&mXSaws40*Qv#qVXp8?l)OxaK4$(EGH;L}N~_Ka z0OoJe1wvo`HW>n6ewEDckr6=s4y9^jexD2hEx$(Q56PHh2tWA^G7U0BN8BRQBy*k2 zACM8~U#CzIvJl#K&D4# zmCQ{tYh*Ua9Gu~J>{D|}H}qeGk&63)uOig^0pJ7N6##D~Svd%KL%=%;aO4sCY3S73 z@JF7^2=u;>^(0_T4axoeH0bG~RJ%`U8epaoQ1uhSO(lVyWZDR>5<+0709PdCGhv2l z0GkX;F&$u&5nz)E0AUcwS(XM2r2#rg0-ftSQGVmcVU{V&3jY^~+l&ymX-IUmq<$c} z(*TH(9RUQ~?>2yjWrxz(A(%od$a0e8ceJPg;f;IZF0Vr57Nb&Fq&rPZLWn;j#BGTc zmlFV{(z1L302e}T&q>i3fN5-5gIs@R<$uCLe>^OabQS5!7R0K67J21cSEZGYAy^?x z*S`U1DFa%LgykqFz$6`&-z~3vyg!Yn$29_$m5%|+X~2ri`(X~Sk_N1lcm7-;gFqF} zb=D6cM>9J3JM0MY6?vEMlsa1ZFnh_es zsi9=#r;$ToNfqixsL43B(lZ@9Sl^}(dHX|{pZ~>o==V?0RwoaIt-~PdL9{yzdPo%_ zs9}3?rey_nArSchsE(rv#96N+fG^Gt0u6PF_8^Aic!;S8+!`0S^~<3Z0acu)aE-YA z9%jYK;Sff!MbQtu^+Pdh^6=^YYxM3DUr7Z5u3dcwkOEMVI#X~ag;oY+733p;DN=F+ zb@niU)i==g09Z}n!U(J`>?*YXcVGpN{@YO=9{t-x!20CZ0jy2gMV#Rtt(Ch+Ye9QK zYvsej$crbfRbT~*9KwsQ3<2);ApqA6NMjMS5x|X&0B&jsaAg5-UGkd;<3PWp;(_3tgQNLU#GDKy#$NJSbzit_;= zRrt9PkWTS;v8MPv0_*<_v;K{4F2vO1;v))9uP)gmIrVu^>r?9L7Ipa^nfJ*Ev2~Bm zn)$T>>vQWYf-ioXjL>cSB-SqvRs2t=pZf}{J5>7F>T3Fall+fp>tBF59IMD*B&>Q` z8TgWfR)wKQNj~U4%=!p@9Pla)To_^2x7DG*x}Sv7hF<02@(a5T5Lnn7fWR91jQa&v zJ9Q0yX_SewQ4Y~Z9bTgvX5r!Gn}~5nYN+-zTT-OjQdFRwjTxz*-5Pjw4-avngcv8q zQ%Pq19mbLmOY{d1ap13(M6acNy`ps3CMo37^Za?IC@* ze?00XH56Sm`UCfIo1;3qsM81H4DO+^)ZiX0Jq2kfaBH^=;&Mm3D>T z(`ym(EDzB;K{yPFJ0T?QL@*XkK#*dN;VZroN4XrpyYn}gMQMbl z6wRGP`|?oG{sVTWCaLf1v(Z#Ah4|?dVg=LTWPc`{COL|TX$>-b+`$%OG=o_8Oo$kC z`$#k!&I0vm6rThSw2$J>I;-4KK3a_C!b#EJk>FV02#=t@2g14jgW&_i{(g8Bqtozr zi=xftCvXq{pBEu`>F*sJ?s@b*fxcCKG@PX8X&(m*P9lvpLEA`diYcK!C$VX~6OGN> zNure=Jj7;i$&k5HbW~uzx&r4<_~j38s_PEK^D*I!;_A>Z z|4De1Nf6K~-waN$x&9+~&ZAoj?tg^1QEX&AaOqGEAYXdmZVT7?31kVv@cZ{?hY9Hd zLb*1Da`%ONDT#}_!CVUE+QLu&4eH;gz*++?c}>8ah%D3*mJIB!W7YS93U&Ad^_JbN zFL~VF!2Xlz)Stw5;9=f7{~bK*o<_!lXqORBtjhdrDA>>z&`(W$<>Jd%>`#Dg)nu$R zyK$0S1~aJThiY{gA{@jNO>D*KtS9uO8*7f|bvI7F<@&vD*W=`!V6zU+2SU{g2DS_h z3LzUFH|SOi8^`I{hEbEiGrk%bO%wIm18!kC}rQZwg*gr-xWD34F0B zfQ?7%Fr`1(U__u4-W2T|@`ptWiqXRR;NPTN((g5!_8R{>GW-KFf<+&q6nP%l&!vcS z=55pISufF33l|_xbUE#4{u!!N4dD*da)U)Mh-k;T4kF`Defg%zTYfF;dp+K;p#59^ zzSu^GZZnE)#PW?;Zp5-1%S*9*vo^863)z{k zLI$}U;?blFD7T1|4qrz>S{yriTF>o9p-GXT#Q#lPj8)I?!uwnI z#afEBmgC((z@GL8#9oEg+OWH^*6^@vu93eUYi-|i<7C%vEIU@5q-}$-^x<&|HHifmEAaQ=V_`?R#~rj=7&eQOgM4jVY;!NT z?n1NYilBh$U{~D5#`h@Czk?oXW1{9N_WnJ0L2USAdy4k`vA?}KNK^($#l42*`?lL! zpzn_ZQ?TCYzkS3>%XYo4KipNheDQVhEs1@4v5P<0rAfJ7y~Rx?!uulJCEQ3(x=c*; zzKENMy@@?@{T$YH&#}&9El55^?8p%%$@dw5c=1%H3`>6%RWBuzNjWKAHeUFD=DxHq X|6jSMlZjF3f9Fme$UT)*%D?zu5TO27 diff --git a/utils/assign_cfg.py b/utils/assign_cfg.py deleted file mode 100644 index 74e0c61..0000000 --- a/utils/assign_cfg.py +++ /dev/null @@ -1,158 +0,0 @@ -<<<<<<< HEAD -import os, yaml -from copy import deepcopy, copy - - -# def get prior and ldm config -def assign_prior_mudule_cfg(cfg): - ''' - ''' - # - prior_cfg = deepcopy(cfg) - vldm_cfg = deepcopy(cfg) - - with open(cfg.prior_cfg, 'r') as f: - _cfg_update = yaml.load(f.read(), Loader=yaml.SafeLoader) - # _cfg_update = _cfg_update.cfg_dict - for k, v in _cfg_update.items(): - if isinstance(v, dict) and k in cfg: - prior_cfg[k].update(v) - else: - prior_cfg[k] = v - - with open(cfg.vldm_cfg, 'r') as f: - _cfg_update = yaml.load(f.read(), Loader=yaml.SafeLoader) - # _cfg_update = _cfg_update.cfg_dict - for k, v in _cfg_update.items(): - if isinstance(v, dict) and k in cfg: - vldm_cfg[k].update(v) - else: - vldm_cfg[k] = v - - return prior_cfg, vldm_cfg - - -# def get prior and ldm config -def assign_vldm_vsr_mudule_cfg(cfg): - ''' - ''' - # - vldm_cfg = deepcopy(cfg) - vsr_cfg = deepcopy(cfg) - - with open(cfg.vldm_cfg, 'r') as f: - _cfg_update = yaml.load(f.read(), Loader=yaml.SafeLoader) - # _cfg_update = _cfg_update.cfg_dict - for k, v in _cfg_update.items(): - if isinstance(v, dict) and k in cfg: - vldm_cfg[k].update(v) - else: - vldm_cfg[k] = v - - with open(cfg.vsr_cfg, 'r') as f: - _cfg_update = yaml.load(f.read(), Loader=yaml.SafeLoader) - # _cfg_update = _cfg_update.cfg_dict - for k, v in _cfg_update.items(): - if isinstance(v, dict) and k in cfg: - vsr_cfg[k].update(v) - else: - vsr_cfg[k] = v - - return vldm_cfg, vsr_cfg - - -# def get prior and ldm config -def assign_signle_cfg(cfg, _cfg_update, tname): - ''' - ''' - # - vldm_cfg = deepcopy(cfg) - if os.path.exists(_cfg_update[tname]): - with open(_cfg_update[tname], 'r') as f: - _cfg_update = yaml.load(f.read(), Loader=yaml.SafeLoader) - # _cfg_update = _cfg_update.cfg_dict - for k, v in _cfg_update.items(): - if isinstance(v, dict) and k in cfg: - vldm_cfg[k].update(v) - else: - vldm_cfg[k] = v -======= -import os, yaml -from copy import deepcopy, copy - - -# def get prior and ldm config -def assign_prior_mudule_cfg(cfg): - ''' - ''' - # - prior_cfg = deepcopy(cfg) - vldm_cfg = deepcopy(cfg) - - with open(cfg.prior_cfg, 'r') as f: - _cfg_update = yaml.load(f.read(), Loader=yaml.SafeLoader) - # _cfg_update = _cfg_update.cfg_dict - for k, v in _cfg_update.items(): - if isinstance(v, dict) and k in cfg: - prior_cfg[k].update(v) - else: - prior_cfg[k] = v - - with open(cfg.vldm_cfg, 'r') as f: - _cfg_update = yaml.load(f.read(), Loader=yaml.SafeLoader) - # _cfg_update = _cfg_update.cfg_dict - for k, v in _cfg_update.items(): - if isinstance(v, dict) and k in cfg: - vldm_cfg[k].update(v) - else: - vldm_cfg[k] = v - - return prior_cfg, vldm_cfg - - -# def get prior and ldm config -def assign_vldm_vsr_mudule_cfg(cfg): - ''' - ''' - # - vldm_cfg = deepcopy(cfg) - vsr_cfg = deepcopy(cfg) - - with open(cfg.vldm_cfg, 'r') as f: - _cfg_update = yaml.load(f.read(), Loader=yaml.SafeLoader) - # _cfg_update = _cfg_update.cfg_dict - for k, v in _cfg_update.items(): - if isinstance(v, dict) and k in cfg: - vldm_cfg[k].update(v) - else: - vldm_cfg[k] = v - - with open(cfg.vsr_cfg, 'r') as f: - _cfg_update = yaml.load(f.read(), Loader=yaml.SafeLoader) - # _cfg_update = _cfg_update.cfg_dict - for k, v in _cfg_update.items(): - if isinstance(v, dict) and k in cfg: - vsr_cfg[k].update(v) - else: - vsr_cfg[k] = v - - return vldm_cfg, vsr_cfg - - -# def get prior and ldm config -def assign_signle_cfg(cfg, _cfg_update, tname): - ''' - ''' - # - vldm_cfg = deepcopy(cfg) - if os.path.exists(_cfg_update[tname]): - with open(_cfg_update[tname], 'r') as f: - _cfg_update = yaml.load(f.read(), Loader=yaml.SafeLoader) - # _cfg_update = _cfg_update.cfg_dict - for k, v in _cfg_update.items(): - if isinstance(v, dict) and k in cfg: - vldm_cfg[k].update(v) - else: - vldm_cfg[k] = v ->>>>>>> 626e7afc02230297b6f553675ea1c32c29971314 - return vldm_cfg \ No newline at end of file diff --git a/utils/config.py b/utils/config.py deleted file mode 100644 index d92b1df..0000000 --- a/utils/config.py +++ /dev/null @@ -1,488 +0,0 @@ -<<<<<<< HEAD -import os -import yaml -import json -import copy -import argparse - -from ..utils import logging -# logger = logging.get_logger(__name__) - -class Config(object): - def __init__(self, load=True, cfg_dict=None, cfg_level=None): - self._level = "cfg" + ("." + cfg_level if cfg_level is not None else "") - - current_directory = os.path.dirname(os.path.abspath(__file__)) - parent_directory = os.path.dirname(current_directory) - self.config_file_loc = os.path.join(parent_directory, 'configs/UniAnimate_infer.yaml') - - if load: - self.args = self._parse_args() - # logger.info("Loading config from {}.".format(self.args.cfg_file)) - self.need_initialization = True - cfg_base = self._load_yaml(self.args) # self._initialize_cfg() - cfg_dict = self._load_yaml(self.args) - cfg_dict = self._merge_cfg_from_base(cfg_base, cfg_dict) - cfg_dict = self._update_from_args(cfg_dict) - self.cfg_dict = cfg_dict - self._update_dict(cfg_dict) - - def _parse_args(self): - parser = argparse.ArgumentParser( - description="Argparser for configuring the codebase" - ) - parser.add_argument( - "--cfg", - dest="cfg_file", - help="Path to the configuration file", - default= self.config_file_loc - ) - parser.add_argument( - "--init_method", - help="Initialization method, includes TCP or shared file-system", - default="tcp://localhost:9999", - type=str, - ) - parser.add_argument( - '--debug', - action='store_true', - default=False, - help='Output debug information' - ) - parser.add_argument( - '--windows-standalone-build', - action='store_true', - default=False, - help='Indicates if the build is a standalone build for Windows' - ) - parser.add_argument( - "opts", - help="Other configurations", - default=None, - nargs=argparse.REMAINDER - ) - return parser.parse_args() - - - def _path_join(self, path_list): - path = "" - for p in path_list: - path+= p + '/' - return path[:-1] - - def _update_from_args(self, cfg_dict): - args = self.args - for var in vars(args): - cfg_dict[var] = getattr(args, var) - return cfg_dict - - def _initialize_cfg(self): - if self.need_initialization: - self.need_initialization = False - if os.path.exists('./configs/base.yaml'): - with open("./configs/base.yaml", 'r') as f: - cfg = yaml.load(f.read(), Loader=yaml.SafeLoader) - else: - with open(os.path.realpath(__file__).split('/')[-3] + "/configs/base.yaml", 'r') as f: - cfg = yaml.load(f.read(), Loader=yaml.SafeLoader) - return cfg - - def _load_yaml(self, args, file_name=""): - assert args.cfg_file is not None - if not file_name == "": # reading from base file - with open(file_name, 'r') as f: - cfg = yaml.load(f.read(), Loader=yaml.SafeLoader) - else: - if os.getcwd().split("/")[-1] == args.cfg_file.split("/")[0]: - args.cfg_file = args.cfg_file.replace(os.getcwd().split("/")[-1], "./") - with open(args.cfg_file, 'r') as f: - cfg = yaml.load(f.read(), Loader=yaml.SafeLoader) - file_name = args.cfg_file - - if "_BASE_RUN" not in cfg.keys() and "_BASE_MODEL" not in cfg.keys() and "_BASE" not in cfg.keys(): - # return cfg if the base file is being accessed - cfg = self._merge_cfg_from_command_update(args, cfg) - return cfg - - if "_BASE" in cfg.keys(): - if cfg["_BASE"][1] == '.': - prev_count = cfg["_BASE"].count('..') - cfg_base_file = self._path_join(file_name.split('/')[:(-1-cfg["_BASE"].count('..'))] + cfg["_BASE"].split('/')[prev_count:]) - else: - cfg_base_file = cfg["_BASE"].replace( - "./", - args.cfg_file.replace(args.cfg_file.split('/')[-1], "") - ) - cfg_base = self._load_yaml(args, cfg_base_file) - cfg = self._merge_cfg_from_base(cfg_base, cfg) - else: - if "_BASE_RUN" in cfg.keys(): - if cfg["_BASE_RUN"][1] == '.': - prev_count = cfg["_BASE_RUN"].count('..') - cfg_base_file = self._path_join(file_name.split('/')[:(-1-prev_count)] + cfg["_BASE_RUN"].split('/')[prev_count:]) - else: - cfg_base_file = cfg["_BASE_RUN"].replace( - "./", - args.cfg_file.replace(args.cfg_file.split('/')[-1], "") - ) - cfg_base = self._load_yaml(args, cfg_base_file) - cfg = self._merge_cfg_from_base(cfg_base, cfg, preserve_base=True) - if "_BASE_MODEL" in cfg.keys(): - if cfg["_BASE_MODEL"][1] == '.': - prev_count = cfg["_BASE_MODEL"].count('..') - cfg_base_file = self._path_join(file_name.split('/')[:(-1-cfg["_BASE_MODEL"].count('..'))] + cfg["_BASE_MODEL"].split('/')[prev_count:]) - else: - cfg_base_file = cfg["_BASE_MODEL"].replace( - "./", - args.cfg_file.replace(args.cfg_file.split('/')[-1], "") - ) - cfg_base = self._load_yaml(args, cfg_base_file) - cfg = self._merge_cfg_from_base(cfg_base, cfg) - cfg = self._merge_cfg_from_command(args, cfg) - return cfg - - def _merge_cfg_from_base(self, cfg_base, cfg_new, preserve_base=False): - for k,v in cfg_new.items(): - if k in cfg_base.keys(): - if isinstance(v, dict): - self._merge_cfg_from_base(cfg_base[k], v) - else: - cfg_base[k] = v - else: - if "BASE" not in k or preserve_base: - cfg_base[k] = v - return cfg_base - - def _merge_cfg_from_command_update(self, args, cfg): - if len(args.opts) == 0: - return cfg - - assert len(args.opts) % 2 == 0, 'Override list {} has odd length: {}.'.format( - args.opts, len(args.opts) - ) - keys = args.opts[0::2] - vals = args.opts[1::2] - - for key, val in zip(keys, vals): - cfg[key] = val - - return cfg - - def _merge_cfg_from_command(self, args, cfg): - assert len(args.opts) % 2 == 0, 'Override list {} has odd length: {}.'.format( - args.opts, len(args.opts) - ) - keys = args.opts[0::2] - vals = args.opts[1::2] - - # maximum supported depth 3 - for idx, key in enumerate(keys): - key_split = key.split('.') - assert len(key_split) <= 4, 'Key depth error. \nMaximum depth: 3\n Get depth: {}'.format( - len(key_split) - ) - assert key_split[0] in cfg.keys(), 'Non-existant key: {}.'.format( - key_split[0] - ) - if len(key_split) == 2: - assert key_split[1] in cfg[key_split[0]].keys(), 'Non-existant key: {}.'.format( - key - ) - elif len(key_split) == 3: - assert key_split[1] in cfg[key_split[0]].keys(), 'Non-existant key: {}.'.format( - key - ) - assert key_split[2] in cfg[key_split[0]][key_split[1]].keys(), 'Non-existant key: {}.'.format( - key - ) - elif len(key_split) == 4: - assert key_split[1] in cfg[key_split[0]].keys(), 'Non-existant key: {}.'.format( - key - ) - assert key_split[2] in cfg[key_split[0]][key_split[1]].keys(), 'Non-existant key: {}.'.format( - key - ) - assert key_split[3] in cfg[key_split[0]][key_split[1]][key_split[2]].keys(), 'Non-existant key: {}.'.format( - key - ) - if len(key_split) == 1: - cfg[key_split[0]] = vals[idx] - elif len(key_split) == 2: - cfg[key_split[0]][key_split[1]] = vals[idx] - elif len(key_split) == 3: - cfg[key_split[0]][key_split[1]][key_split[2]] = vals[idx] - elif len(key_split) == 4: - cfg[key_split[0]][key_split[1]][key_split[2]][key_split[3]] = vals[idx] - return cfg - - def _update_dict(self, cfg_dict): - def recur(key, elem): - if type(elem) is dict: - return key, Config(load=False, cfg_dict=elem, cfg_level=key) - else: - if type(elem) is str and elem[1:3]=="e-": - elem = float(elem) - return key, elem - dic = dict(recur(k, v) for k, v in cfg_dict.items()) - self.__dict__.update(dic) - - def get_args(self): - return self.args - - def __repr__(self): - return "{}\n".format(self.dump()) - - def dump(self): - return json.dumps(self.cfg_dict, indent=2) - - def deep_copy(self): - return copy.deepcopy(self) - -# if __name__ == '__main__': -# # debug -# cfg = Config(load=True) -======= -import os -import yaml -import json -import copy -import argparse - -from ..utils import logging -# logger = logging.get_logger(__name__) - -class Config(object): - def __init__(self, load=True, cfg_dict=None, cfg_level=None): - self._level = "cfg" + ("." + cfg_level if cfg_level is not None else "") - - current_directory = os.path.dirname(os.path.abspath(__file__)) - parent_directory = os.path.dirname(current_directory) - self.config_file_loc = os.path.join(parent_directory, 'configs/UniAnimate_infer.yaml') - - if load: - self.args = self._parse_args() - # logger.info("Loading config from {}.".format(self.args.cfg_file)) - self.need_initialization = True - cfg_base = self._load_yaml(self.args) # self._initialize_cfg() - cfg_dict = self._load_yaml(self.args) - cfg_dict = self._merge_cfg_from_base(cfg_base, cfg_dict) - cfg_dict = self._update_from_args(cfg_dict) - self.cfg_dict = cfg_dict - self._update_dict(cfg_dict) - - def _parse_args(self): - parser = argparse.ArgumentParser( - description="Argparser for configuring the codebase" - ) - parser.add_argument( - "--cfg", - dest="cfg_file", - help="Path to the configuration file", - default= self.config_file_loc - ) - parser.add_argument( - "--init_method", - help="Initialization method, includes TCP or shared file-system", - default="tcp://localhost:9999", - type=str, - ) - parser.add_argument( - '--debug', - action='store_true', - default=False, - help='Output debug information' - ) - parser.add_argument( - '--windows-standalone-build', - action='store_true', - default=False, - help='Indicates if the build is a standalone build for Windows' - ) - parser.add_argument( - "opts", - help="Other configurations", - default=None, - nargs=argparse.REMAINDER - ) - return parser.parse_args() - - - def _path_join(self, path_list): - path = "" - for p in path_list: - path+= p + '/' - return path[:-1] - - def _update_from_args(self, cfg_dict): - args = self.args - for var in vars(args): - cfg_dict[var] = getattr(args, var) - return cfg_dict - - def _initialize_cfg(self): - if self.need_initialization: - self.need_initialization = False - if os.path.exists('./configs/base.yaml'): - with open("./configs/base.yaml", 'r') as f: - cfg = yaml.load(f.read(), Loader=yaml.SafeLoader) - else: - with open(os.path.realpath(__file__).split('/')[-3] + "/configs/base.yaml", 'r') as f: - cfg = yaml.load(f.read(), Loader=yaml.SafeLoader) - return cfg - - def _load_yaml(self, args, file_name=""): - assert args.cfg_file is not None - if not file_name == "": # reading from base file - with open(file_name, 'r') as f: - cfg = yaml.load(f.read(), Loader=yaml.SafeLoader) - else: - if os.getcwd().split("/")[-1] == args.cfg_file.split("/")[0]: - args.cfg_file = args.cfg_file.replace(os.getcwd().split("/")[-1], "./") - with open(args.cfg_file, 'r') as f: - cfg = yaml.load(f.read(), Loader=yaml.SafeLoader) - file_name = args.cfg_file - - if "_BASE_RUN" not in cfg.keys() and "_BASE_MODEL" not in cfg.keys() and "_BASE" not in cfg.keys(): - # return cfg if the base file is being accessed - cfg = self._merge_cfg_from_command_update(args, cfg) - return cfg - - if "_BASE" in cfg.keys(): - if cfg["_BASE"][1] == '.': - prev_count = cfg["_BASE"].count('..') - cfg_base_file = self._path_join(file_name.split('/')[:(-1-cfg["_BASE"].count('..'))] + cfg["_BASE"].split('/')[prev_count:]) - else: - cfg_base_file = cfg["_BASE"].replace( - "./", - args.cfg_file.replace(args.cfg_file.split('/')[-1], "") - ) - cfg_base = self._load_yaml(args, cfg_base_file) - cfg = self._merge_cfg_from_base(cfg_base, cfg) - else: - if "_BASE_RUN" in cfg.keys(): - if cfg["_BASE_RUN"][1] == '.': - prev_count = cfg["_BASE_RUN"].count('..') - cfg_base_file = self._path_join(file_name.split('/')[:(-1-prev_count)] + cfg["_BASE_RUN"].split('/')[prev_count:]) - else: - cfg_base_file = cfg["_BASE_RUN"].replace( - "./", - args.cfg_file.replace(args.cfg_file.split('/')[-1], "") - ) - cfg_base = self._load_yaml(args, cfg_base_file) - cfg = self._merge_cfg_from_base(cfg_base, cfg, preserve_base=True) - if "_BASE_MODEL" in cfg.keys(): - if cfg["_BASE_MODEL"][1] == '.': - prev_count = cfg["_BASE_MODEL"].count('..') - cfg_base_file = self._path_join(file_name.split('/')[:(-1-cfg["_BASE_MODEL"].count('..'))] + cfg["_BASE_MODEL"].split('/')[prev_count:]) - else: - cfg_base_file = cfg["_BASE_MODEL"].replace( - "./", - args.cfg_file.replace(args.cfg_file.split('/')[-1], "") - ) - cfg_base = self._load_yaml(args, cfg_base_file) - cfg = self._merge_cfg_from_base(cfg_base, cfg) - cfg = self._merge_cfg_from_command(args, cfg) - return cfg - - def _merge_cfg_from_base(self, cfg_base, cfg_new, preserve_base=False): - for k,v in cfg_new.items(): - if k in cfg_base.keys(): - if isinstance(v, dict): - self._merge_cfg_from_base(cfg_base[k], v) - else: - cfg_base[k] = v - else: - if "BASE" not in k or preserve_base: - cfg_base[k] = v - return cfg_base - - def _merge_cfg_from_command_update(self, args, cfg): - if len(args.opts) == 0: - return cfg - - assert len(args.opts) % 2 == 0, 'Override list {} has odd length: {}.'.format( - args.opts, len(args.opts) - ) - keys = args.opts[0::2] - vals = args.opts[1::2] - - for key, val in zip(keys, vals): - cfg[key] = val - - return cfg - - def _merge_cfg_from_command(self, args, cfg): - assert len(args.opts) % 2 == 0, 'Override list {} has odd length: {}.'.format( - args.opts, len(args.opts) - ) - keys = args.opts[0::2] - vals = args.opts[1::2] - - # maximum supported depth 3 - for idx, key in enumerate(keys): - key_split = key.split('.') - assert len(key_split) <= 4, 'Key depth error. \nMaximum depth: 3\n Get depth: {}'.format( - len(key_split) - ) - assert key_split[0] in cfg.keys(), 'Non-existant key: {}.'.format( - key_split[0] - ) - if len(key_split) == 2: - assert key_split[1] in cfg[key_split[0]].keys(), 'Non-existant key: {}.'.format( - key - ) - elif len(key_split) == 3: - assert key_split[1] in cfg[key_split[0]].keys(), 'Non-existant key: {}.'.format( - key - ) - assert key_split[2] in cfg[key_split[0]][key_split[1]].keys(), 'Non-existant key: {}.'.format( - key - ) - elif len(key_split) == 4: - assert key_split[1] in cfg[key_split[0]].keys(), 'Non-existant key: {}.'.format( - key - ) - assert key_split[2] in cfg[key_split[0]][key_split[1]].keys(), 'Non-existant key: {}.'.format( - key - ) - assert key_split[3] in cfg[key_split[0]][key_split[1]][key_split[2]].keys(), 'Non-existant key: {}.'.format( - key - ) - if len(key_split) == 1: - cfg[key_split[0]] = vals[idx] - elif len(key_split) == 2: - cfg[key_split[0]][key_split[1]] = vals[idx] - elif len(key_split) == 3: - cfg[key_split[0]][key_split[1]][key_split[2]] = vals[idx] - elif len(key_split) == 4: - cfg[key_split[0]][key_split[1]][key_split[2]][key_split[3]] = vals[idx] - return cfg - - def _update_dict(self, cfg_dict): - def recur(key, elem): - if type(elem) is dict: - return key, Config(load=False, cfg_dict=elem, cfg_level=key) - else: - if type(elem) is str and elem[1:3]=="e-": - elem = float(elem) - return key, elem - dic = dict(recur(k, v) for k, v in cfg_dict.items()) - self.__dict__.update(dic) - - def get_args(self): - return self.args - - def __repr__(self): - return "{}\n".format(self.dump()) - - def dump(self): - return json.dumps(self.cfg_dict, indent=2) - - def deep_copy(self): - return copy.deepcopy(self) - -# if __name__ == '__main__': -# # debug -# cfg = Config(load=True) ->>>>>>> 626e7afc02230297b6f553675ea1c32c29971314 -# print(cfg.DATA) \ No newline at end of file diff --git a/utils/distributed.py b/utils/distributed.py deleted file mode 100644 index 284dbdb..0000000 --- a/utils/distributed.py +++ /dev/null @@ -1,863 +0,0 @@ -<<<<<<< HEAD -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. - -import torch -import torch.nn.functional as F -import torch.distributed as dist -import functools -import pickle -import numpy as np -from collections import OrderedDict -from torch.autograd import Function - -__all__ = ['is_dist_initialized', - 'get_world_size', - 'get_rank', - 'new_group', - 'destroy_process_group', - 'barrier', - 'broadcast', - 'all_reduce', - 'reduce', - 'gather', - 'all_gather', - 'reduce_dict', - 'get_global_gloo_group', - 'generalized_all_gather', - 'generalized_gather', - 'scatter', - 'reduce_scatter', - 'send', - 'recv', - 'isend', - 'irecv', - 'shared_random_seed', - 'diff_all_gather', - 'diff_all_reduce', - 'diff_scatter', - 'diff_copy', - 'spherical_kmeans', - 'sinkhorn'] - -#-------------------------------- Distributed operations --------------------------------# - -def is_dist_initialized(): - return dist.is_available() and dist.is_initialized() - -def get_world_size(group=None): - return dist.get_world_size(group) if is_dist_initialized() else 1 - -def get_rank(group=None): - return dist.get_rank(group) if is_dist_initialized() else 0 - -def new_group(ranks=None, **kwargs): - if is_dist_initialized(): - return dist.new_group(ranks, **kwargs) - return None - -def destroy_process_group(): - if is_dist_initialized(): - dist.destroy_process_group() - -def barrier(group=None, **kwargs): - if get_world_size(group) > 1: - dist.barrier(group, **kwargs) - -def broadcast(tensor, src, group=None, **kwargs): - if get_world_size(group) > 1: - return dist.broadcast(tensor, src, group, **kwargs) - -def all_reduce(tensor, op=dist.ReduceOp.SUM, group=None, **kwargs): - if get_world_size(group) > 1: - return dist.all_reduce(tensor, op, group, **kwargs) - -def reduce(tensor, dst, op=dist.ReduceOp.SUM, group=None, **kwargs): - if get_world_size(group) > 1: - return dist.reduce(tensor, dst, op, group, **kwargs) - -def gather(tensor, dst=0, group=None, **kwargs): - rank = get_rank() # global rank - world_size = get_world_size(group) - if world_size == 1: - return [tensor] - tensor_list = [torch.empty_like(tensor) for _ in range(world_size)] if rank == dst else None - dist.gather(tensor, tensor_list, dst, group, **kwargs) - return tensor_list - -def all_gather(tensor, uniform_size=True, group=None, **kwargs): - world_size = get_world_size(group) - if world_size == 1: - return [tensor] - assert tensor.is_contiguous(), 'ops.all_gather requires the tensor to be contiguous()' - - if uniform_size: - tensor_list = [torch.empty_like(tensor) for _ in range(world_size)] - dist.all_gather(tensor_list, tensor, group, **kwargs) - return tensor_list - else: - # collect tensor shapes across GPUs - shape = tuple(tensor.shape) - shape_list = generalized_all_gather(shape, group) - - # flatten the tensor - tensor = tensor.reshape(-1) - size = int(np.prod(shape)) - size_list = [int(np.prod(u)) for u in shape_list] - max_size = max(size_list) - - # pad to maximum size - if size != max_size: - padding = tensor.new_zeros(max_size - size) - tensor = torch.cat([tensor, padding], dim=0) - - # all_gather - tensor_list = [torch.empty_like(tensor) for _ in range(world_size)] - dist.all_gather(tensor_list, tensor, group, **kwargs) - - # reshape tensors - tensor_list = [t[:n].view(s) for t, n, s in zip( - tensor_list, size_list, shape_list)] - return tensor_list - -@torch.no_grad() -def reduce_dict(input_dict, group=None, reduction='mean', **kwargs): - assert reduction in ['mean', 'sum'] - world_size = get_world_size(group) - if world_size == 1: - return input_dict - - # ensure that the orders of keys are consistent across processes - if isinstance(input_dict, OrderedDict): - keys = list(input_dict.keys) - else: - keys = sorted(input_dict.keys()) - vals = [input_dict[key] for key in keys] - vals = torch.stack(vals, dim=0) - dist.reduce(vals, dst=0, group=group, **kwargs) - if dist.get_rank(group) == 0 and reduction == 'mean': - vals /= world_size - dist.broadcast(vals, src=0, group=group, **kwargs) - reduced_dict = type(input_dict)([ - (key, val) for key, val in zip(keys, vals)]) - return reduced_dict - -@functools.lru_cache() -def get_global_gloo_group(): - backend = dist.get_backend() - assert backend in ['gloo', 'nccl'] - if backend == 'nccl': - return dist.new_group(backend='gloo') - else: - return dist.group.WORLD - -def _serialize_to_tensor(data, group): - backend = dist.get_backend(group) - assert backend in ['gloo', 'nccl'] - device = torch.device('cpu' if backend == 'gloo' else 'cuda') - - buffer = pickle.dumps(data) - if len(buffer) > 1024 ** 3: - logger = logging.getLogger(__name__) - logger.warning( - 'Rank {} trying to all-gather {:.2f} GB of data on device' - '{}'.format(get_rank(), len(buffer) / (1024 ** 3), device)) - storage = torch.ByteStorage.from_buffer(buffer) - tensor = torch.ByteTensor(storage).to(device=device) - return tensor - -def _pad_to_largest_tensor(tensor, group): - world_size = dist.get_world_size(group=group) - assert world_size >= 1, \ - 'gather/all_gather must be called from ranks within' \ - 'the give group!' - local_size = torch.tensor( - [tensor.numel()], dtype=torch.int64, device=tensor.device) - size_list = [torch.zeros( - [1], dtype=torch.int64, device=tensor.device) - for _ in range(world_size)] - - # gather tensors and compute the maximum size - dist.all_gather(size_list, local_size, group=group) - size_list = [int(size.item()) for size in size_list] - max_size = max(size_list) - - # pad tensors to the same size - if local_size != max_size: - padding = torch.zeros( - (max_size - local_size, ), - dtype=torch.uint8, device=tensor.device) - tensor = torch.cat((tensor, padding), dim=0) - return size_list, tensor - -def generalized_all_gather(data, group=None): - if get_world_size(group) == 1: - return [data] - if group is None: - group = get_global_gloo_group() - - tensor = _serialize_to_tensor(data, group) - size_list, tensor = _pad_to_largest_tensor(tensor, group) - max_size = max(size_list) - - # receiving tensors from all ranks - tensor_list = [torch.empty( - (max_size, ), dtype=torch.uint8, device=tensor.device) - for _ in size_list] - dist.all_gather(tensor_list, tensor, group=group) - - data_list = [] - for size, tensor in zip(size_list, tensor_list): - buffer = tensor.cpu().numpy().tobytes()[:size] - data_list.append(pickle.loads(buffer)) - return data_list - -def generalized_gather(data, dst=0, group=None): - world_size = get_world_size(group) - if world_size == 1: - return [data] - if group is None: - group = get_global_gloo_group() - rank = dist.get_rank() # global rank - - tensor = _serialize_to_tensor(data, group) - size_list, tensor = _pad_to_largest_tensor(tensor, group) - - # receiving tensors from all ranks to dst - if rank == dst: - max_size = max(size_list) - tensor_list = [torch.empty( - (max_size, ), dtype=torch.uint8, device=tensor.device) - for _ in size_list] - dist.gather(tensor, tensor_list, dst=dst, group=group) - - data_list = [] - for size, tensor in zip(size_list, tensor_list): - buffer = tensor.cpu().numpy().tobytes()[:size] - data_list.append(pickle.loads(buffer)) - return data_list - else: - dist.gather(tensor, [], dst=dst, group=group) - return [] - -def scatter(data, scatter_list=None, src=0, group=None, **kwargs): - r"""NOTE: only supports CPU tensor communication. - """ - if get_world_size(group) > 1: - return dist.scatter(data, scatter_list, src, group, **kwargs) - -def reduce_scatter(output, input_list, op=dist.ReduceOp.SUM, group=None, **kwargs): - if get_world_size(group) > 1: - return dist.reduce_scatter(output, input_list, op, group, **kwargs) - -def send(tensor, dst, group=None, **kwargs): - if get_world_size(group) > 1: - assert tensor.is_contiguous(), 'ops.send requires the tensor to be contiguous()' - return dist.send(tensor, dst, group, **kwargs) - -def recv(tensor, src=None, group=None, **kwargs): - if get_world_size(group) > 1: - assert tensor.is_contiguous(), 'ops.recv requires the tensor to be contiguous()' - return dist.recv(tensor, src, group, **kwargs) - -def isend(tensor, dst, group=None, **kwargs): - if get_world_size(group) > 1: - assert tensor.is_contiguous(), 'ops.isend requires the tensor to be contiguous()' - return dist.isend(tensor, dst, group, **kwargs) - -def irecv(tensor, src=None, group=None, **kwargs): - if get_world_size(group) > 1: - assert tensor.is_contiguous(), 'ops.irecv requires the tensor to be contiguous()' - return dist.irecv(tensor, src, group, **kwargs) - -def shared_random_seed(group=None): - seed = np.random.randint(2 ** 31) - all_seeds = generalized_all_gather(seed, group) - return all_seeds[0] - -#-------------------------------- Differentiable operations --------------------------------# - -def _all_gather(x): - if not (dist.is_available() and dist.is_initialized()) or dist.get_world_size() == 1: - return x - rank = dist.get_rank() - world_size = dist.get_world_size() - tensors = [torch.empty_like(x) for _ in range(world_size)] - tensors[rank] = x - dist.all_gather(tensors, x) - return torch.cat(tensors, dim=0).contiguous() - -def _all_reduce(x): - if not (dist.is_available() and dist.is_initialized()) or dist.get_world_size() == 1: - return x - dist.all_reduce(x) - return x - -def _split(x): - if not (dist.is_available() and dist.is_initialized()) or dist.get_world_size() == 1: - return x - rank = dist.get_rank() - world_size = dist.get_world_size() - return x.chunk(world_size, dim=0)[rank].contiguous() - -class DiffAllGather(Function): - r"""Differentiable all-gather. - """ - @staticmethod - def symbolic(graph, input): - return _all_gather(input) - - @staticmethod - def forward(ctx, input): - return _all_gather(input) - - @staticmethod - def backward(ctx, grad_output): - return _split(grad_output) - -class DiffAllReduce(Function): - r"""Differentiable all-reducd. - """ - @staticmethod - def symbolic(graph, input): - return _all_reduce(input) - - @staticmethod - def forward(ctx, input): - return _all_reduce(input) - - @staticmethod - def backward(ctx, grad_output): - return grad_output - -class DiffScatter(Function): - r"""Differentiable scatter. - """ - @staticmethod - def symbolic(graph, input): - return _split(input) - - @staticmethod - def symbolic(ctx, input): - return _split(input) - - @staticmethod - def backward(ctx, grad_output): - return _all_gather(grad_output) - -class DiffCopy(Function): - r"""Differentiable copy that reduces all gradients during backward. - """ - @staticmethod - def symbolic(graph, input): - return input - - @staticmethod - def forward(ctx, input): - return input - - @staticmethod - def backward(ctx, grad_output): - return _all_reduce(grad_output) - -diff_all_gather = DiffAllGather.apply -diff_all_reduce = DiffAllReduce.apply -diff_scatter = DiffScatter.apply -diff_copy = DiffCopy.apply - -#-------------------------------- Distributed algorithms --------------------------------# - -@torch.no_grad() -def spherical_kmeans(feats, num_clusters, num_iters=10): - k, n, c = num_clusters, *feats.size() - ones = feats.new_ones(n, dtype=torch.long) - - # distributed settings - rank = get_rank() - world_size = get_world_size() - - # init clusters - rand_inds = torch.randperm(n)[:int(np.ceil(k / world_size))] - clusters = torch.cat(all_gather(feats[rand_inds]), dim=0)[:k] - - # variables - new_clusters = feats.new_zeros(k, c) - counts = feats.new_zeros(k, dtype=torch.long) - - # iterative Expectation-Maximization - for step in range(num_iters + 1): - # Expectation step - simmat = torch.mm(feats, clusters.t()) - scores, assigns = simmat.max(dim=1) - if step == num_iters: - break - - # Maximization step - new_clusters.zero_().scatter_add_(0, assigns.unsqueeze(1).repeat(1, c), feats) - all_reduce(new_clusters) - - counts.zero_() - counts.index_add_(0, assigns, ones) - all_reduce(counts) - - mask = (counts > 0) - clusters[mask] = new_clusters[mask] / counts[mask].view(-1, 1) - clusters = F.normalize(clusters, p=2, dim=1) - return clusters, assigns, scores - -@torch.no_grad() -def sinkhorn(Q, eps=0.5, num_iters=3): - # normalize Q - Q = torch.exp(Q / eps).t() - sum_Q = Q.sum() - all_reduce(sum_Q) - Q /= sum_Q - - # variables - n, m = Q.size() - u = Q.new_zeros(n) - r = Q.new_ones(n) / n - c = Q.new_ones(m) / (m * get_world_size()) - - # iterative update - cur_sum = Q.sum(dim=1) - all_reduce(cur_sum) - for i in range(num_iters): - u = cur_sum - Q *= (r / u).unsqueeze(1) - Q *= (c / Q.sum(dim=0)).unsqueeze(0) - cur_sum = Q.sum(dim=1) - all_reduce(cur_sum) - return (Q / Q.sum(dim=0, keepdim=True)).t().float() -======= -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. - -import torch -import torch.nn.functional as F -import torch.distributed as dist -import functools -import pickle -import numpy as np -from collections import OrderedDict -from torch.autograd import Function - -__all__ = ['is_dist_initialized', - 'get_world_size', - 'get_rank', - 'new_group', - 'destroy_process_group', - 'barrier', - 'broadcast', - 'all_reduce', - 'reduce', - 'gather', - 'all_gather', - 'reduce_dict', - 'get_global_gloo_group', - 'generalized_all_gather', - 'generalized_gather', - 'scatter', - 'reduce_scatter', - 'send', - 'recv', - 'isend', - 'irecv', - 'shared_random_seed', - 'diff_all_gather', - 'diff_all_reduce', - 'diff_scatter', - 'diff_copy', - 'spherical_kmeans', - 'sinkhorn'] - -#-------------------------------- Distributed operations --------------------------------# - -def is_dist_initialized(): - return dist.is_available() and dist.is_initialized() - -def get_world_size(group=None): - return dist.get_world_size(group) if is_dist_initialized() else 1 - -def get_rank(group=None): - return dist.get_rank(group) if is_dist_initialized() else 0 - -def new_group(ranks=None, **kwargs): - if is_dist_initialized(): - return dist.new_group(ranks, **kwargs) - return None - -def destroy_process_group(): - if is_dist_initialized(): - dist.destroy_process_group() - -def barrier(group=None, **kwargs): - if get_world_size(group) > 1: - dist.barrier(group, **kwargs) - -def broadcast(tensor, src, group=None, **kwargs): - if get_world_size(group) > 1: - return dist.broadcast(tensor, src, group, **kwargs) - -def all_reduce(tensor, op=dist.ReduceOp.SUM, group=None, **kwargs): - if get_world_size(group) > 1: - return dist.all_reduce(tensor, op, group, **kwargs) - -def reduce(tensor, dst, op=dist.ReduceOp.SUM, group=None, **kwargs): - if get_world_size(group) > 1: - return dist.reduce(tensor, dst, op, group, **kwargs) - -def gather(tensor, dst=0, group=None, **kwargs): - rank = get_rank() # global rank - world_size = get_world_size(group) - if world_size == 1: - return [tensor] - tensor_list = [torch.empty_like(tensor) for _ in range(world_size)] if rank == dst else None - dist.gather(tensor, tensor_list, dst, group, **kwargs) - return tensor_list - -def all_gather(tensor, uniform_size=True, group=None, **kwargs): - world_size = get_world_size(group) - if world_size == 1: - return [tensor] - assert tensor.is_contiguous(), 'ops.all_gather requires the tensor to be contiguous()' - - if uniform_size: - tensor_list = [torch.empty_like(tensor) for _ in range(world_size)] - dist.all_gather(tensor_list, tensor, group, **kwargs) - return tensor_list - else: - # collect tensor shapes across GPUs - shape = tuple(tensor.shape) - shape_list = generalized_all_gather(shape, group) - - # flatten the tensor - tensor = tensor.reshape(-1) - size = int(np.prod(shape)) - size_list = [int(np.prod(u)) for u in shape_list] - max_size = max(size_list) - - # pad to maximum size - if size != max_size: - padding = tensor.new_zeros(max_size - size) - tensor = torch.cat([tensor, padding], dim=0) - - # all_gather - tensor_list = [torch.empty_like(tensor) for _ in range(world_size)] - dist.all_gather(tensor_list, tensor, group, **kwargs) - - # reshape tensors - tensor_list = [t[:n].view(s) for t, n, s in zip( - tensor_list, size_list, shape_list)] - return tensor_list - -@torch.no_grad() -def reduce_dict(input_dict, group=None, reduction='mean', **kwargs): - assert reduction in ['mean', 'sum'] - world_size = get_world_size(group) - if world_size == 1: - return input_dict - - # ensure that the orders of keys are consistent across processes - if isinstance(input_dict, OrderedDict): - keys = list(input_dict.keys) - else: - keys = sorted(input_dict.keys()) - vals = [input_dict[key] for key in keys] - vals = torch.stack(vals, dim=0) - dist.reduce(vals, dst=0, group=group, **kwargs) - if dist.get_rank(group) == 0 and reduction == 'mean': - vals /= world_size - dist.broadcast(vals, src=0, group=group, **kwargs) - reduced_dict = type(input_dict)([ - (key, val) for key, val in zip(keys, vals)]) - return reduced_dict - -@functools.lru_cache() -def get_global_gloo_group(): - backend = dist.get_backend() - assert backend in ['gloo', 'nccl'] - if backend == 'nccl': - return dist.new_group(backend='gloo') - else: - return dist.group.WORLD - -def _serialize_to_tensor(data, group): - backend = dist.get_backend(group) - assert backend in ['gloo', 'nccl'] - device = torch.device('cpu' if backend == 'gloo' else 'cuda') - - buffer = pickle.dumps(data) - if len(buffer) > 1024 ** 3: - logger = logging.getLogger(__name__) - logger.warning( - 'Rank {} trying to all-gather {:.2f} GB of data on device' - '{}'.format(get_rank(), len(buffer) / (1024 ** 3), device)) - storage = torch.ByteStorage.from_buffer(buffer) - tensor = torch.ByteTensor(storage).to(device=device) - return tensor - -def _pad_to_largest_tensor(tensor, group): - world_size = dist.get_world_size(group=group) - assert world_size >= 1, \ - 'gather/all_gather must be called from ranks within' \ - 'the give group!' - local_size = torch.tensor( - [tensor.numel()], dtype=torch.int64, device=tensor.device) - size_list = [torch.zeros( - [1], dtype=torch.int64, device=tensor.device) - for _ in range(world_size)] - - # gather tensors and compute the maximum size - dist.all_gather(size_list, local_size, group=group) - size_list = [int(size.item()) for size in size_list] - max_size = max(size_list) - - # pad tensors to the same size - if local_size != max_size: - padding = torch.zeros( - (max_size - local_size, ), - dtype=torch.uint8, device=tensor.device) - tensor = torch.cat((tensor, padding), dim=0) - return size_list, tensor - -def generalized_all_gather(data, group=None): - if get_world_size(group) == 1: - return [data] - if group is None: - group = get_global_gloo_group() - - tensor = _serialize_to_tensor(data, group) - size_list, tensor = _pad_to_largest_tensor(tensor, group) - max_size = max(size_list) - - # receiving tensors from all ranks - tensor_list = [torch.empty( - (max_size, ), dtype=torch.uint8, device=tensor.device) - for _ in size_list] - dist.all_gather(tensor_list, tensor, group=group) - - data_list = [] - for size, tensor in zip(size_list, tensor_list): - buffer = tensor.cpu().numpy().tobytes()[:size] - data_list.append(pickle.loads(buffer)) - return data_list - -def generalized_gather(data, dst=0, group=None): - world_size = get_world_size(group) - if world_size == 1: - return [data] - if group is None: - group = get_global_gloo_group() - rank = dist.get_rank() # global rank - - tensor = _serialize_to_tensor(data, group) - size_list, tensor = _pad_to_largest_tensor(tensor, group) - - # receiving tensors from all ranks to dst - if rank == dst: - max_size = max(size_list) - tensor_list = [torch.empty( - (max_size, ), dtype=torch.uint8, device=tensor.device) - for _ in size_list] - dist.gather(tensor, tensor_list, dst=dst, group=group) - - data_list = [] - for size, tensor in zip(size_list, tensor_list): - buffer = tensor.cpu().numpy().tobytes()[:size] - data_list.append(pickle.loads(buffer)) - return data_list - else: - dist.gather(tensor, [], dst=dst, group=group) - return [] - -def scatter(data, scatter_list=None, src=0, group=None, **kwargs): - r"""NOTE: only supports CPU tensor communication. - """ - if get_world_size(group) > 1: - return dist.scatter(data, scatter_list, src, group, **kwargs) - -def reduce_scatter(output, input_list, op=dist.ReduceOp.SUM, group=None, **kwargs): - if get_world_size(group) > 1: - return dist.reduce_scatter(output, input_list, op, group, **kwargs) - -def send(tensor, dst, group=None, **kwargs): - if get_world_size(group) > 1: - assert tensor.is_contiguous(), 'ops.send requires the tensor to be contiguous()' - return dist.send(tensor, dst, group, **kwargs) - -def recv(tensor, src=None, group=None, **kwargs): - if get_world_size(group) > 1: - assert tensor.is_contiguous(), 'ops.recv requires the tensor to be contiguous()' - return dist.recv(tensor, src, group, **kwargs) - -def isend(tensor, dst, group=None, **kwargs): - if get_world_size(group) > 1: - assert tensor.is_contiguous(), 'ops.isend requires the tensor to be contiguous()' - return dist.isend(tensor, dst, group, **kwargs) - -def irecv(tensor, src=None, group=None, **kwargs): - if get_world_size(group) > 1: - assert tensor.is_contiguous(), 'ops.irecv requires the tensor to be contiguous()' - return dist.irecv(tensor, src, group, **kwargs) - -def shared_random_seed(group=None): - seed = np.random.randint(2 ** 31) - all_seeds = generalized_all_gather(seed, group) - return all_seeds[0] - -#-------------------------------- Differentiable operations --------------------------------# - -def _all_gather(x): - if not (dist.is_available() and dist.is_initialized()) or dist.get_world_size() == 1: - return x - rank = dist.get_rank() - world_size = dist.get_world_size() - tensors = [torch.empty_like(x) for _ in range(world_size)] - tensors[rank] = x - dist.all_gather(tensors, x) - return torch.cat(tensors, dim=0).contiguous() - -def _all_reduce(x): - if not (dist.is_available() and dist.is_initialized()) or dist.get_world_size() == 1: - return x - dist.all_reduce(x) - return x - -def _split(x): - if not (dist.is_available() and dist.is_initialized()) or dist.get_world_size() == 1: - return x - rank = dist.get_rank() - world_size = dist.get_world_size() - return x.chunk(world_size, dim=0)[rank].contiguous() - -class DiffAllGather(Function): - r"""Differentiable all-gather. - """ - @staticmethod - def symbolic(graph, input): - return _all_gather(input) - - @staticmethod - def forward(ctx, input): - return _all_gather(input) - - @staticmethod - def backward(ctx, grad_output): - return _split(grad_output) - -class DiffAllReduce(Function): - r"""Differentiable all-reducd. - """ - @staticmethod - def symbolic(graph, input): - return _all_reduce(input) - - @staticmethod - def forward(ctx, input): - return _all_reduce(input) - - @staticmethod - def backward(ctx, grad_output): - return grad_output - -class DiffScatter(Function): - r"""Differentiable scatter. - """ - @staticmethod - def symbolic(graph, input): - return _split(input) - - @staticmethod - def symbolic(ctx, input): - return _split(input) - - @staticmethod - def backward(ctx, grad_output): - return _all_gather(grad_output) - -class DiffCopy(Function): - r"""Differentiable copy that reduces all gradients during backward. - """ - @staticmethod - def symbolic(graph, input): - return input - - @staticmethod - def forward(ctx, input): - return input - - @staticmethod - def backward(ctx, grad_output): - return _all_reduce(grad_output) - -diff_all_gather = DiffAllGather.apply -diff_all_reduce = DiffAllReduce.apply -diff_scatter = DiffScatter.apply -diff_copy = DiffCopy.apply - -#-------------------------------- Distributed algorithms --------------------------------# - -@torch.no_grad() -def spherical_kmeans(feats, num_clusters, num_iters=10): - k, n, c = num_clusters, *feats.size() - ones = feats.new_ones(n, dtype=torch.long) - - # distributed settings - rank = get_rank() - world_size = get_world_size() - - # init clusters - rand_inds = torch.randperm(n)[:int(np.ceil(k / world_size))] - clusters = torch.cat(all_gather(feats[rand_inds]), dim=0)[:k] - - # variables - new_clusters = feats.new_zeros(k, c) - counts = feats.new_zeros(k, dtype=torch.long) - - # iterative Expectation-Maximization - for step in range(num_iters + 1): - # Expectation step - simmat = torch.mm(feats, clusters.t()) - scores, assigns = simmat.max(dim=1) - if step == num_iters: - break - - # Maximization step - new_clusters.zero_().scatter_add_(0, assigns.unsqueeze(1).repeat(1, c), feats) - all_reduce(new_clusters) - - counts.zero_() - counts.index_add_(0, assigns, ones) - all_reduce(counts) - - mask = (counts > 0) - clusters[mask] = new_clusters[mask] / counts[mask].view(-1, 1) - clusters = F.normalize(clusters, p=2, dim=1) - return clusters, assigns, scores - -@torch.no_grad() -def sinkhorn(Q, eps=0.5, num_iters=3): - # normalize Q - Q = torch.exp(Q / eps).t() - sum_Q = Q.sum() - all_reduce(sum_Q) - Q /= sum_Q - - # variables - n, m = Q.size() - u = Q.new_zeros(n) - r = Q.new_ones(n) / n - c = Q.new_ones(m) / (m * get_world_size()) - - # iterative update - cur_sum = Q.sum(dim=1) - all_reduce(cur_sum) - for i in range(num_iters): - u = cur_sum - Q *= (r / u).unsqueeze(1) - Q *= (c / Q.sum(dim=0)).unsqueeze(0) - cur_sum = Q.sum(dim=1) - all_reduce(cur_sum) - return (Q / Q.sum(dim=0, keepdim=True)).t().float() ->>>>>>> 626e7afc02230297b6f553675ea1c32c29971314 diff --git a/utils/logging.py b/utils/logging.py deleted file mode 100644 index f5d0758..0000000 --- a/utils/logging.py +++ /dev/null @@ -1,183 +0,0 @@ -<<<<<<< HEAD -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. - -"""Logging.""" - -import builtins -import decimal -import functools -import logging -import os -import sys -from ..lib import simplejson -# from fvcore.common.file_io import PathManager - -from ..utils import distributed as du - - -def _suppress_print(): - """ - Suppresses printing from the current process. - """ - - def print_pass(*objects, sep=" ", end="\n", file=sys.stdout, flush=False): - pass - - builtins.print = print_pass - - -# @functools.lru_cache(maxsize=None) -# def _cached_log_stream(filename): -# return PathManager.open(filename, "a") - - -def setup_logging(cfg, log_file): - """ - Sets up the logging for multiple processes. Only enable the logging for the - master process, and suppress logging for the non-master processes. - """ - if du.is_master_proc(): - # Enable logging for the master process. - logging.root.handlers = [] - else: - # Suppress logging for non-master processes. - _suppress_print() - - logger = logging.getLogger() - logger.setLevel(logging.INFO) - logger.propagate = False - plain_formatter = logging.Formatter( - "[%(asctime)s][%(levelname)s] %(name)s: %(lineno)4d: %(message)s", - datefmt="%m/%d %H:%M:%S", - ) - - if du.is_master_proc(): - ch = logging.StreamHandler(stream=sys.stdout) - ch.setLevel(logging.DEBUG) - ch.setFormatter(plain_formatter) - logger.addHandler(ch) - - if log_file is not None and du.is_master_proc(du.get_world_size()): - filename = os.path.join(cfg.OUTPUT_DIR, log_file) - fh = logging.FileHandler(filename) - fh.setLevel(logging.DEBUG) - fh.setFormatter(plain_formatter) - logger.addHandler(fh) - - -def get_logger(name): - """ - Retrieve the logger with the specified name or, if name is None, return a - logger which is the root logger of the hierarchy. - Args: - name (string): name of the logger. - """ - return logging.getLogger(name) - - -def log_json_stats(stats): - """ - Logs json stats. - Args: - stats (dict): a dictionary of statistical information to log. - """ - stats = { - k: decimal.Decimal("{:.6f}".format(v)) if isinstance(v, float) else v - for k, v in stats.items() - } - json_stats = simplejson.dumps(stats, sort_keys=True, use_decimal=True) - logger = get_logger(__name__) - logger.info("{:s}".format(json_stats)) -======= -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. - -"""Logging.""" - -import builtins -import decimal -import functools -import logging -import os -import sys -from ..lib import simplejson -# from fvcore.common.file_io import PathManager - -from ..utils import distributed as du - - -def _suppress_print(): - """ - Suppresses printing from the current process. - """ - - def print_pass(*objects, sep=" ", end="\n", file=sys.stdout, flush=False): - pass - - builtins.print = print_pass - - -# @functools.lru_cache(maxsize=None) -# def _cached_log_stream(filename): -# return PathManager.open(filename, "a") - - -def setup_logging(cfg, log_file): - """ - Sets up the logging for multiple processes. Only enable the logging for the - master process, and suppress logging for the non-master processes. - """ - if du.is_master_proc(): - # Enable logging for the master process. - logging.root.handlers = [] - else: - # Suppress logging for non-master processes. - _suppress_print() - - logger = logging.getLogger() - logger.setLevel(logging.INFO) - logger.propagate = False - plain_formatter = logging.Formatter( - "[%(asctime)s][%(levelname)s] %(name)s: %(lineno)4d: %(message)s", - datefmt="%m/%d %H:%M:%S", - ) - - if du.is_master_proc(): - ch = logging.StreamHandler(stream=sys.stdout) - ch.setLevel(logging.DEBUG) - ch.setFormatter(plain_formatter) - logger.addHandler(ch) - - if log_file is not None and du.is_master_proc(du.get_world_size()): - filename = os.path.join(cfg.OUTPUT_DIR, log_file) - fh = logging.FileHandler(filename) - fh.setLevel(logging.DEBUG) - fh.setFormatter(plain_formatter) - logger.addHandler(fh) - - -def get_logger(name): - """ - Retrieve the logger with the specified name or, if name is None, return a - logger which is the root logger of the hierarchy. - Args: - name (string): name of the logger. - """ - return logging.getLogger(name) - - -def log_json_stats(stats): - """ - Logs json stats. - Args: - stats (dict): a dictionary of statistical information to log. - """ - stats = { - k: decimal.Decimal("{:.6f}".format(v)) if isinstance(v, float) else v - for k, v in stats.items() - } - json_stats = simplejson.dumps(stats, sort_keys=True, use_decimal=True) - logger = get_logger(__name__) - logger.info("{:s}".format(json_stats)) ->>>>>>> 626e7afc02230297b6f553675ea1c32c29971314 diff --git a/utils/mp4_to_gif.py b/utils/mp4_to_gif.py deleted file mode 100644 index 2be4c04..0000000 --- a/utils/mp4_to_gif.py +++ /dev/null @@ -1,34 +0,0 @@ -<<<<<<< HEAD -import os - - - -# source_mp4_dir = "outputs/UniAnimate_infer" -# target_gif_dir = "outputs/UniAnimate_infer_gif" - -source_mp4_dir = "outputs/UniAnimate_infer_long" -target_gif_dir = "outputs/UniAnimate_infer_long_gif" - -os.makedirs(target_gif_dir, exist_ok=True) -for video in os.listdir(source_mp4_dir): - video_dir = os.path.join(source_mp4_dir, video) - gif_dir = os.path.join(target_gif_dir, video.replace(".mp4", ".gif")) - cmd = f'ffmpeg -i {video_dir} {gif_dir}' -======= -import os - - - -# source_mp4_dir = "outputs/UniAnimate_infer" -# target_gif_dir = "outputs/UniAnimate_infer_gif" - -source_mp4_dir = "outputs/UniAnimate_infer_long" -target_gif_dir = "outputs/UniAnimate_infer_long_gif" - -os.makedirs(target_gif_dir, exist_ok=True) -for video in os.listdir(source_mp4_dir): - video_dir = os.path.join(source_mp4_dir, video) - gif_dir = os.path.join(target_gif_dir, video.replace(".mp4", ".gif")) - cmd = f'ffmpeg -i {video_dir} {gif_dir}' ->>>>>>> 626e7afc02230297b6f553675ea1c32c29971314 - os.system(cmd) \ No newline at end of file diff --git a/utils/multi_port.py b/utils/multi_port.py deleted file mode 100644 index b39be00..0000000 --- a/utils/multi_port.py +++ /dev/null @@ -1,20 +0,0 @@ -<<<<<<< HEAD -import socket -from contextlib import closing - -def find_free_port(): - """ https://stackoverflow.com/questions/1365265/on-localhost-how-do-i-pick-a-free-port-number """ - with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: - s.bind(('', 0)) - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) -======= -import socket -from contextlib import closing - -def find_free_port(): - """ https://stackoverflow.com/questions/1365265/on-localhost-how-do-i-pick-a-free-port-number """ - with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: - s.bind(('', 0)) - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) ->>>>>>> 626e7afc02230297b6f553675ea1c32c29971314 - return str(s.getsockname()[1]) \ No newline at end of file diff --git a/utils/optim/__init__.py b/utils/optim/__init__.py deleted file mode 100644 index a37e510..0000000 --- a/utils/optim/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -<<<<<<< HEAD -from .lr_scheduler import * -from .adafactor import * -======= -from .lr_scheduler import * -from .adafactor import * ->>>>>>> 626e7afc02230297b6f553675ea1c32c29971314 diff --git a/utils/optim/adafactor.py b/utils/optim/adafactor.py deleted file mode 100644 index 8369553..0000000 --- a/utils/optim/adafactor.py +++ /dev/null @@ -1,463 +0,0 @@ -<<<<<<< HEAD -import math -import torch -from torch.optim import Optimizer -from torch.optim.lr_scheduler import LambdaLR - -__all__ = ['Adafactor'] - -class Adafactor(Optimizer): - """ - AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: - https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py - Paper: *Adafactor: Adaptive Learning Rates with Sublinear Memory Cost* https://arxiv.org/abs/1804.04235 Note that - this optimizer internally adjusts the learning rate depending on the `scale_parameter`, `relative_step` and - `warmup_init` options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and - `relative_step=False`. - Arguments: - params (`Iterable[nn.parameter.Parameter]`): - Iterable of parameters to optimize or dictionaries defining parameter groups. - lr (`float`, *optional*): - The external learning rate. - eps (`Tuple[float, float]`, *optional*, defaults to (1e-30, 1e-3)): - Regularization constants for square gradient and parameter scale respectively - clip_threshold (`float`, *optional*, defaults 1.0): - Threshold of root mean square of final gradient update - decay_rate (`float`, *optional*, defaults to -0.8): - Coefficient used to compute running averages of square - beta1 (`float`, *optional*): - Coefficient used for computing running averages of gradient - weight_decay (`float`, *optional*, defaults to 0): - Weight decay (L2 penalty) - scale_parameter (`bool`, *optional*, defaults to `True`): - If True, learning rate is scaled by root mean square - relative_step (`bool`, *optional*, defaults to `True`): - If True, time-dependent learning rate is computed instead of external learning rate - warmup_init (`bool`, *optional*, defaults to `False`): - Time-dependent learning rate computation depends on whether warm-up initialization is being used - This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested. - Recommended T5 finetuning settings (https://discuss.huggingface.co/t/t5-finetuning-tips/684/3): - - Training without LR warmup or clip_threshold is not recommended. - - use scheduled LR warm-up to fixed LR - - use clip_threshold=1.0 (https://arxiv.org/abs/1804.04235) - - Disable relative updates - - Use scale_parameter=False - - Additional optimizer operations like gradient clipping should not be used alongside Adafactor - Example: - ```python - Adafactor(model.parameters(), scale_parameter=False, relative_step=False, warmup_init=False, lr=1e-3) - ``` - Others reported the following combination to work well: - ```python - Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) - ``` - When using `lr=None` with [`Trainer`] you will most likely need to use [`~optimization.AdafactorSchedule`] - scheduler as following: - ```python - from transformers.optimization import Adafactor, AdafactorSchedule - optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) - lr_scheduler = AdafactorSchedule(optimizer) - trainer = Trainer(..., optimizers=(optimizer, lr_scheduler)) - ``` - Usage: - ```python - # replace AdamW with Adafactor - optimizer = Adafactor( - model.parameters(), - lr=1e-3, - eps=(1e-30, 1e-3), - clip_threshold=1.0, - decay_rate=-0.8, - beta1=None, - weight_decay=0.0, - relative_step=False, - scale_parameter=False, - warmup_init=False, - ) - ```""" - - def __init__( - self, - params, - lr=None, - eps=(1e-30, 1e-3), - clip_threshold=1.0, - decay_rate=-0.8, - beta1=None, - weight_decay=0.0, - scale_parameter=True, - relative_step=True, - warmup_init=False, - ): - r"""require_version("torch>=1.5.0") # add_ with alpha - """ - if lr is not None and relative_step: - raise ValueError("Cannot combine manual `lr` and `relative_step=True` options") - if warmup_init and not relative_step: - raise ValueError("`warmup_init=True` requires `relative_step=True`") - - defaults = dict( - lr=lr, - eps=eps, - clip_threshold=clip_threshold, - decay_rate=decay_rate, - beta1=beta1, - weight_decay=weight_decay, - scale_parameter=scale_parameter, - relative_step=relative_step, - warmup_init=warmup_init, - ) - super().__init__(params, defaults) - - @staticmethod - def _get_lr(param_group, param_state): - rel_step_sz = param_group["lr"] - if param_group["relative_step"]: - min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2 - rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"])) - param_scale = 1.0 - if param_group["scale_parameter"]: - param_scale = max(param_group["eps"][1], param_state["RMS"]) - return param_scale * rel_step_sz - - @staticmethod - def _get_options(param_group, param_shape): - factored = len(param_shape) >= 2 - use_first_moment = param_group["beta1"] is not None - return factored, use_first_moment - - @staticmethod - def _rms(tensor): - return tensor.norm(2) / (tensor.numel() ** 0.5) - - @staticmethod - def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col): - # copy from fairseq's adafactor implementation: - # https://github.com/huggingface/transformers/blob/8395f14de6068012787d83989c3627c3df6a252b/src/transformers/optimization.py#L505 - r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) - c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() - return torch.mul(r_factor, c_factor) - - def step(self, closure=None): - """ - Performs a single optimization step - Arguments: - closure (callable, optional): A closure that reevaluates the model - and returns the loss. - """ - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - for p in group["params"]: - if p.grad is None: - continue - grad = p.grad.data - if grad.dtype in {torch.float16, torch.bfloat16}: - grad = grad.float() - if grad.is_sparse: - raise RuntimeError("Adafactor does not support sparse gradients.") - - state = self.state[p] - grad_shape = grad.shape - - factored, use_first_moment = self._get_options(group, grad_shape) - # State Initialization - if len(state) == 0: - state["step"] = 0 - - if use_first_moment: - # Exponential moving average of gradient values - state["exp_avg"] = torch.zeros_like(grad) - if factored: - state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad) - state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad) - else: - state["exp_avg_sq"] = torch.zeros_like(grad) - - state["RMS"] = 0 - else: - if use_first_moment: - state["exp_avg"] = state["exp_avg"].to(grad) - if factored: - state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad) - state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad) - else: - state["exp_avg_sq"] = state["exp_avg_sq"].to(grad) - - p_data_fp32 = p.data - if p.data.dtype in {torch.float16, torch.bfloat16}: - p_data_fp32 = p_data_fp32.float() - - state["step"] += 1 - state["RMS"] = self._rms(p_data_fp32) - lr = self._get_lr(group, state) - - beta2t = 1.0 - math.pow(state["step"], group["decay_rate"]) - update = (grad**2) + group["eps"][0] - if factored: - exp_avg_sq_row = state["exp_avg_sq_row"] - exp_avg_sq_col = state["exp_avg_sq_col"] - - exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=(1.0 - beta2t)) - exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=(1.0 - beta2t)) - - # Approximation of exponential moving average of square of gradient - update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) - update.mul_(grad) - else: - exp_avg_sq = state["exp_avg_sq"] - - exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t)) - update = exp_avg_sq.rsqrt().mul_(grad) - - update.div_((self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)) - update.mul_(lr) - - if use_first_moment: - exp_avg = state["exp_avg"] - exp_avg.mul_(group["beta1"]).add_(update, alpha=(1 - group["beta1"])) - update = exp_avg - - if group["weight_decay"] != 0: - p_data_fp32.add_(p_data_fp32, alpha=(-group["weight_decay"] * lr)) - - p_data_fp32.add_(-update) - - if p.data.dtype in {torch.float16, torch.bfloat16}: - p.data.copy_(p_data_fp32) - - return loss -======= -import math -import torch -from torch.optim import Optimizer -from torch.optim.lr_scheduler import LambdaLR - -__all__ = ['Adafactor'] - -class Adafactor(Optimizer): - """ - AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: - https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py - Paper: *Adafactor: Adaptive Learning Rates with Sublinear Memory Cost* https://arxiv.org/abs/1804.04235 Note that - this optimizer internally adjusts the learning rate depending on the `scale_parameter`, `relative_step` and - `warmup_init` options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and - `relative_step=False`. - Arguments: - params (`Iterable[nn.parameter.Parameter]`): - Iterable of parameters to optimize or dictionaries defining parameter groups. - lr (`float`, *optional*): - The external learning rate. - eps (`Tuple[float, float]`, *optional*, defaults to (1e-30, 1e-3)): - Regularization constants for square gradient and parameter scale respectively - clip_threshold (`float`, *optional*, defaults 1.0): - Threshold of root mean square of final gradient update - decay_rate (`float`, *optional*, defaults to -0.8): - Coefficient used to compute running averages of square - beta1 (`float`, *optional*): - Coefficient used for computing running averages of gradient - weight_decay (`float`, *optional*, defaults to 0): - Weight decay (L2 penalty) - scale_parameter (`bool`, *optional*, defaults to `True`): - If True, learning rate is scaled by root mean square - relative_step (`bool`, *optional*, defaults to `True`): - If True, time-dependent learning rate is computed instead of external learning rate - warmup_init (`bool`, *optional*, defaults to `False`): - Time-dependent learning rate computation depends on whether warm-up initialization is being used - This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested. - Recommended T5 finetuning settings (https://discuss.huggingface.co/t/t5-finetuning-tips/684/3): - - Training without LR warmup or clip_threshold is not recommended. - - use scheduled LR warm-up to fixed LR - - use clip_threshold=1.0 (https://arxiv.org/abs/1804.04235) - - Disable relative updates - - Use scale_parameter=False - - Additional optimizer operations like gradient clipping should not be used alongside Adafactor - Example: - ```python - Adafactor(model.parameters(), scale_parameter=False, relative_step=False, warmup_init=False, lr=1e-3) - ``` - Others reported the following combination to work well: - ```python - Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) - ``` - When using `lr=None` with [`Trainer`] you will most likely need to use [`~optimization.AdafactorSchedule`] - scheduler as following: - ```python - from transformers.optimization import Adafactor, AdafactorSchedule - optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) - lr_scheduler = AdafactorSchedule(optimizer) - trainer = Trainer(..., optimizers=(optimizer, lr_scheduler)) - ``` - Usage: - ```python - # replace AdamW with Adafactor - optimizer = Adafactor( - model.parameters(), - lr=1e-3, - eps=(1e-30, 1e-3), - clip_threshold=1.0, - decay_rate=-0.8, - beta1=None, - weight_decay=0.0, - relative_step=False, - scale_parameter=False, - warmup_init=False, - ) - ```""" - - def __init__( - self, - params, - lr=None, - eps=(1e-30, 1e-3), - clip_threshold=1.0, - decay_rate=-0.8, - beta1=None, - weight_decay=0.0, - scale_parameter=True, - relative_step=True, - warmup_init=False, - ): - r"""require_version("torch>=1.5.0") # add_ with alpha - """ - if lr is not None and relative_step: - raise ValueError("Cannot combine manual `lr` and `relative_step=True` options") - if warmup_init and not relative_step: - raise ValueError("`warmup_init=True` requires `relative_step=True`") - - defaults = dict( - lr=lr, - eps=eps, - clip_threshold=clip_threshold, - decay_rate=decay_rate, - beta1=beta1, - weight_decay=weight_decay, - scale_parameter=scale_parameter, - relative_step=relative_step, - warmup_init=warmup_init, - ) - super().__init__(params, defaults) - - @staticmethod - def _get_lr(param_group, param_state): - rel_step_sz = param_group["lr"] - if param_group["relative_step"]: - min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2 - rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"])) - param_scale = 1.0 - if param_group["scale_parameter"]: - param_scale = max(param_group["eps"][1], param_state["RMS"]) - return param_scale * rel_step_sz - - @staticmethod - def _get_options(param_group, param_shape): - factored = len(param_shape) >= 2 - use_first_moment = param_group["beta1"] is not None - return factored, use_first_moment - - @staticmethod - def _rms(tensor): - return tensor.norm(2) / (tensor.numel() ** 0.5) - - @staticmethod - def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col): - # copy from fairseq's adafactor implementation: - # https://github.com/huggingface/transformers/blob/8395f14de6068012787d83989c3627c3df6a252b/src/transformers/optimization.py#L505 - r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) - c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() - return torch.mul(r_factor, c_factor) - - def step(self, closure=None): - """ - Performs a single optimization step - Arguments: - closure (callable, optional): A closure that reevaluates the model - and returns the loss. - """ - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - for p in group["params"]: - if p.grad is None: - continue - grad = p.grad.data - if grad.dtype in {torch.float16, torch.bfloat16}: - grad = grad.float() - if grad.is_sparse: - raise RuntimeError("Adafactor does not support sparse gradients.") - - state = self.state[p] - grad_shape = grad.shape - - factored, use_first_moment = self._get_options(group, grad_shape) - # State Initialization - if len(state) == 0: - state["step"] = 0 - - if use_first_moment: - # Exponential moving average of gradient values - state["exp_avg"] = torch.zeros_like(grad) - if factored: - state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad) - state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad) - else: - state["exp_avg_sq"] = torch.zeros_like(grad) - - state["RMS"] = 0 - else: - if use_first_moment: - state["exp_avg"] = state["exp_avg"].to(grad) - if factored: - state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad) - state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad) - else: - state["exp_avg_sq"] = state["exp_avg_sq"].to(grad) - - p_data_fp32 = p.data - if p.data.dtype in {torch.float16, torch.bfloat16}: - p_data_fp32 = p_data_fp32.float() - - state["step"] += 1 - state["RMS"] = self._rms(p_data_fp32) - lr = self._get_lr(group, state) - - beta2t = 1.0 - math.pow(state["step"], group["decay_rate"]) - update = (grad**2) + group["eps"][0] - if factored: - exp_avg_sq_row = state["exp_avg_sq_row"] - exp_avg_sq_col = state["exp_avg_sq_col"] - - exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=(1.0 - beta2t)) - exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=(1.0 - beta2t)) - - # Approximation of exponential moving average of square of gradient - update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) - update.mul_(grad) - else: - exp_avg_sq = state["exp_avg_sq"] - - exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t)) - update = exp_avg_sq.rsqrt().mul_(grad) - - update.div_((self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)) - update.mul_(lr) - - if use_first_moment: - exp_avg = state["exp_avg"] - exp_avg.mul_(group["beta1"]).add_(update, alpha=(1 - group["beta1"])) - update = exp_avg - - if group["weight_decay"] != 0: - p_data_fp32.add_(p_data_fp32, alpha=(-group["weight_decay"] * lr)) - - p_data_fp32.add_(-update) - - if p.data.dtype in {torch.float16, torch.bfloat16}: - p.data.copy_(p_data_fp32) - - return loss ->>>>>>> 626e7afc02230297b6f553675ea1c32c29971314 diff --git a/utils/optim/lr_scheduler.py b/utils/optim/lr_scheduler.py deleted file mode 100644 index 2349b9a..0000000 --- a/utils/optim/lr_scheduler.py +++ /dev/null @@ -1,119 +0,0 @@ -<<<<<<< HEAD -import math -from torch.optim.lr_scheduler import _LRScheduler - -__all__ = ['AnnealingLR'] - -class AnnealingLR(_LRScheduler): - - def __init__(self, optimizer, base_lr, warmup_steps, total_steps, decay_mode='cosine', min_lr=0.0, last_step=-1): - assert decay_mode in ['linear', 'cosine', 'none'] - self.optimizer = optimizer - self.base_lr = base_lr - self.warmup_steps = warmup_steps - self.total_steps = total_steps - self.decay_mode = decay_mode - self.min_lr = min_lr - self.current_step = last_step + 1 - self.step(self.current_step) - - def get_lr(self): - if self.warmup_steps > 0 and self.current_step <= self.warmup_steps: - return self.base_lr * self.current_step / self.warmup_steps - else: - ratio = (self.current_step - self.warmup_steps) / (self.total_steps - self.warmup_steps) - ratio = min(1.0, max(0.0, ratio)) - if self.decay_mode == 'linear': - return self.base_lr * (1 - ratio) - elif self.decay_mode == 'cosine': - return self.base_lr * (math.cos(math.pi * ratio) + 1.0) / 2.0 - else: - return self.base_lr - - def step(self, current_step=None): - if current_step is None: - current_step = self.current_step + 1 - self.current_step = current_step - new_lr = max(self.min_lr, self.get_lr()) - if isinstance(self.optimizer, list): - for o in self.optimizer: - for group in o.param_groups: - group['lr'] = new_lr - else: - for group in self.optimizer.param_groups: - group['lr'] = new_lr - - def state_dict(self): - return { - 'base_lr': self.base_lr, - 'warmup_steps': self.warmup_steps, - 'total_steps': self.total_steps, - 'decay_mode': self.decay_mode, - 'current_step': self.current_step} - - def load_state_dict(self, state_dict): - self.base_lr = state_dict['base_lr'] - self.warmup_steps = state_dict['warmup_steps'] - self.total_steps = state_dict['total_steps'] - self.decay_mode = state_dict['decay_mode'] - self.current_step = state_dict['current_step'] -======= -import math -from torch.optim.lr_scheduler import _LRScheduler - -__all__ = ['AnnealingLR'] - -class AnnealingLR(_LRScheduler): - - def __init__(self, optimizer, base_lr, warmup_steps, total_steps, decay_mode='cosine', min_lr=0.0, last_step=-1): - assert decay_mode in ['linear', 'cosine', 'none'] - self.optimizer = optimizer - self.base_lr = base_lr - self.warmup_steps = warmup_steps - self.total_steps = total_steps - self.decay_mode = decay_mode - self.min_lr = min_lr - self.current_step = last_step + 1 - self.step(self.current_step) - - def get_lr(self): - if self.warmup_steps > 0 and self.current_step <= self.warmup_steps: - return self.base_lr * self.current_step / self.warmup_steps - else: - ratio = (self.current_step - self.warmup_steps) / (self.total_steps - self.warmup_steps) - ratio = min(1.0, max(0.0, ratio)) - if self.decay_mode == 'linear': - return self.base_lr * (1 - ratio) - elif self.decay_mode == 'cosine': - return self.base_lr * (math.cos(math.pi * ratio) + 1.0) / 2.0 - else: - return self.base_lr - - def step(self, current_step=None): - if current_step is None: - current_step = self.current_step + 1 - self.current_step = current_step - new_lr = max(self.min_lr, self.get_lr()) - if isinstance(self.optimizer, list): - for o in self.optimizer: - for group in o.param_groups: - group['lr'] = new_lr - else: - for group in self.optimizer.param_groups: - group['lr'] = new_lr - - def state_dict(self): - return { - 'base_lr': self.base_lr, - 'warmup_steps': self.warmup_steps, - 'total_steps': self.total_steps, - 'decay_mode': self.decay_mode, - 'current_step': self.current_step} - - def load_state_dict(self, state_dict): - self.base_lr = state_dict['base_lr'] - self.warmup_steps = state_dict['warmup_steps'] - self.total_steps = state_dict['total_steps'] - self.decay_mode = state_dict['decay_mode'] - self.current_step = state_dict['current_step'] ->>>>>>> 626e7afc02230297b6f553675ea1c32c29971314 diff --git a/utils/registry.py b/utils/registry.py deleted file mode 100644 index fdaec06..0000000 --- a/utils/registry.py +++ /dev/null @@ -1,337 +0,0 @@ -<<<<<<< HEAD -# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved. - -# Registry class & build_from_config function partially modified from -# https://github.com/open-mmlab/mmcv/blob/master/mmcv/utils/registry.py -# Copyright 2018-2020 Open-MMLab. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import inspect -import warnings - - -def build_from_config(cfg, registry, **kwargs): - """ Default builder function. - - Args: - cfg (dict): A dict which contains parameters passes to target class or function. - Must contains key 'type', indicates the target class or function name. - registry (Registry): An registry to search target class or function. - kwargs (dict, optional): Other params not in config dict. - - Returns: - Target class object or object returned by invoking function. - - Raises: - TypeError: - KeyError: - Exception: - """ - if not isinstance(cfg, dict): - raise TypeError(f"config must be type dict, got {type(cfg)}") - if "type" not in cfg: - raise KeyError(f"config must contain key type, got {cfg}") - if not isinstance(registry, Registry): - raise TypeError(f"registry must be type Registry, got {type(registry)}") - - cfg = copy.deepcopy(cfg) - - req_type = cfg.pop("type") - req_type_entry = req_type - if isinstance(req_type, str): - req_type_entry = registry.get(req_type) - if req_type_entry is None: - try: - print(f"For Windows users, we explicitly import registry function {req_type} !!!") - from tools.inferences.inference_unianimate_entrance import inference_unianimate_entrance - from tools.inferences.inference_unianimate_long_entrance import inference_unianimate_long_entrance - # from tools.modules.diffusions.diffusion_ddim import DiffusionDDIM - # from tools.modules.diffusions.diffusion_ddim import DiffusionDDIMLong - # from tools.modules.autoencoder import AutoencoderKL - # from tools.modules.clip_embedder import FrozenOpenCLIPTextVisualEmbedder - # from tools.modules.unet.unet_unianimate import UNetSD_UniAnimate - - req_type_entry = eval(req_type) - except: - raise KeyError(f"{req_type} not found in {registry.name} registry") - - if kwargs is not None: - cfg.update(kwargs) - - if inspect.isclass(req_type_entry): - try: - return req_type_entry(**cfg) - except Exception as e: - raise Exception(f"Failed to init class {req_type_entry}, with {e}") - elif inspect.isfunction(req_type_entry): - try: - return req_type_entry(**cfg) - except Exception as e: - raise Exception(f"Failed to invoke function {req_type_entry}, with {e}") - else: - raise TypeError(f"type must be str or class, got {type(req_type_entry)}") - - -class Registry(object): - """ A registry maps key to classes or functions. - - Example: - >>> MODELS = Registry('MODELS') - >>> @MODELS.register_class() - >>> class ResNet(object): - >>> pass - >>> resnet = MODELS.build(dict(type="ResNet")) - >>> - >>> import torchvision - >>> @MODELS.register_function("InceptionV3") - >>> def get_inception_v3(pretrained=False, progress=True): - >>> return torchvision.models.inception_v3(pretrained=pretrained, progress=progress) - >>> inception_v3 = MODELS.build(dict(type='InceptionV3', pretrained=True)) - - Args: - name (str): Registry name. - build_func (func, None): Instance construct function. Default is build_from_config. - allow_types (tuple): Indicates how to construct the instance, by constructing class or invoking function. - """ - - def __init__(self, name, build_func=None, allow_types=("class", "function")): - self.name = name - self.allow_types = allow_types - self.class_map = {} - self.func_map = {} - self.build_func = build_func or build_from_config - - def get(self, req_type): - return self.class_map.get(req_type) or self.func_map.get(req_type) - - def build(self, *args, **kwargs): - return self.build_func(*args, **kwargs, registry=self) - - def register_class(self, name=None): - def _register(cls): - if not inspect.isclass(cls): - raise TypeError(f"Module must be type class, got {type(cls)}") - if "class" not in self.allow_types: - raise TypeError(f"Register {self.name} only allows type {self.allow_types}, got class") - module_name = name or cls.__name__ - if module_name in self.class_map: - warnings.warn(f"Class {module_name} already registered by {self.class_map[module_name]}, " - f"will be replaced by {cls}") - self.class_map[module_name] = cls - return cls - - return _register - - def register_function(self, name=None): - def _register(func): - if not inspect.isfunction(func): - raise TypeError(f"Registry must be type function, got {type(func)}") - if "function" not in self.allow_types: - raise TypeError(f"Registry {self.name} only allows type {self.allow_types}, got function") - func_name = name or func.__name__ - if func_name in self.class_map: - warnings.warn(f"Function {func_name} already registered by {self.func_map[func_name]}, " - f"will be replaced by {func}") - self.func_map[func_name] = func - return func - - return _register - - def _list(self): - keys = sorted(list(self.class_map.keys()) + list(self.func_map.keys())) - descriptions = [] - for key in keys: - if key in self.class_map: - descriptions.append(f"{key}: {self.class_map[key]}") - else: - descriptions.append( - f"{key}: ") - return "\n".join(descriptions) - - def __repr__(self): - description = self._list() - description = '\n'.join(['\t' + s for s in description.split('\n')]) - return f"{self.__class__.__name__} [{self.name}], \n" + description - - -======= -# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved. - -# Registry class & build_from_config function partially modified from -# https://github.com/open-mmlab/mmcv/blob/master/mmcv/utils/registry.py -# Copyright 2018-2020 Open-MMLab. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import inspect -import warnings - - -def build_from_config(cfg, registry, **kwargs): - """ Default builder function. - - Args: - cfg (dict): A dict which contains parameters passes to target class or function. - Must contains key 'type', indicates the target class or function name. - registry (Registry): An registry to search target class or function. - kwargs (dict, optional): Other params not in config dict. - - Returns: - Target class object or object returned by invoking function. - - Raises: - TypeError: - KeyError: - Exception: - """ - if not isinstance(cfg, dict): - raise TypeError(f"config must be type dict, got {type(cfg)}") - if "type" not in cfg: - raise KeyError(f"config must contain key type, got {cfg}") - if not isinstance(registry, Registry): - raise TypeError(f"registry must be type Registry, got {type(registry)}") - - cfg = copy.deepcopy(cfg) - - req_type = cfg.pop("type") - req_type_entry = req_type - if isinstance(req_type, str): - req_type_entry = registry.get(req_type) - if req_type_entry is None: - try: - print(f"For Windows users, we explicitly import registry function {req_type} !!!") - from tools.inferences.inference_unianimate_entrance import inference_unianimate_entrance - from tools.inferences.inference_unianimate_long_entrance import inference_unianimate_long_entrance - # from tools.modules.diffusions.diffusion_ddim import DiffusionDDIM - # from tools.modules.diffusions.diffusion_ddim import DiffusionDDIMLong - # from tools.modules.autoencoder import AutoencoderKL - # from tools.modules.clip_embedder import FrozenOpenCLIPTextVisualEmbedder - # from tools.modules.unet.unet_unianimate import UNetSD_UniAnimate - - req_type_entry = eval(req_type) - except: - raise KeyError(f"{req_type} not found in {registry.name} registry") - - if kwargs is not None: - cfg.update(kwargs) - - if inspect.isclass(req_type_entry): - try: - return req_type_entry(**cfg) - except Exception as e: - raise Exception(f"Failed to init class {req_type_entry}, with {e}") - elif inspect.isfunction(req_type_entry): - try: - return req_type_entry(**cfg) - except Exception as e: - raise Exception(f"Failed to invoke function {req_type_entry}, with {e}") - else: - raise TypeError(f"type must be str or class, got {type(req_type_entry)}") - - -class Registry(object): - """ A registry maps key to classes or functions. - - Example: - >>> MODELS = Registry('MODELS') - >>> @MODELS.register_class() - >>> class ResNet(object): - >>> pass - >>> resnet = MODELS.build(dict(type="ResNet")) - >>> - >>> import torchvision - >>> @MODELS.register_function("InceptionV3") - >>> def get_inception_v3(pretrained=False, progress=True): - >>> return torchvision.models.inception_v3(pretrained=pretrained, progress=progress) - >>> inception_v3 = MODELS.build(dict(type='InceptionV3', pretrained=True)) - - Args: - name (str): Registry name. - build_func (func, None): Instance construct function. Default is build_from_config. - allow_types (tuple): Indicates how to construct the instance, by constructing class or invoking function. - """ - - def __init__(self, name, build_func=None, allow_types=("class", "function")): - self.name = name - self.allow_types = allow_types - self.class_map = {} - self.func_map = {} - self.build_func = build_func or build_from_config - - def get(self, req_type): - return self.class_map.get(req_type) or self.func_map.get(req_type) - - def build(self, *args, **kwargs): - return self.build_func(*args, **kwargs, registry=self) - - def register_class(self, name=None): - def _register(cls): - if not inspect.isclass(cls): - raise TypeError(f"Module must be type class, got {type(cls)}") - if "class" not in self.allow_types: - raise TypeError(f"Register {self.name} only allows type {self.allow_types}, got class") - module_name = name or cls.__name__ - if module_name in self.class_map: - warnings.warn(f"Class {module_name} already registered by {self.class_map[module_name]}, " - f"will be replaced by {cls}") - self.class_map[module_name] = cls - return cls - - return _register - - def register_function(self, name=None): - def _register(func): - if not inspect.isfunction(func): - raise TypeError(f"Registry must be type function, got {type(func)}") - if "function" not in self.allow_types: - raise TypeError(f"Registry {self.name} only allows type {self.allow_types}, got function") - func_name = name or func.__name__ - if func_name in self.class_map: - warnings.warn(f"Function {func_name} already registered by {self.func_map[func_name]}, " - f"will be replaced by {func}") - self.func_map[func_name] = func - return func - - return _register - - def _list(self): - keys = sorted(list(self.class_map.keys()) + list(self.func_map.keys())) - descriptions = [] - for key in keys: - if key in self.class_map: - descriptions.append(f"{key}: {self.class_map[key]}") - else: - descriptions.append( - f"{key}: ") - return "\n".join(descriptions) - - def __repr__(self): - description = self._list() - description = '\n'.join(['\t' + s for s in description.split('\n')]) - return f"{self.__class__.__name__} [{self.name}], \n" + description - - ->>>>>>> 626e7afc02230297b6f553675ea1c32c29971314 diff --git a/utils/registry_class.py b/utils/registry_class.py deleted file mode 100644 index 3b52145..0000000 --- a/utils/registry_class.py +++ /dev/null @@ -1,41 +0,0 @@ -<<<<<<< HEAD -from .registry import Registry, build_from_config - -def build_func(cfg, registry, **kwargs): - """ - Except for config, if passing a list of dataset config, then return the concat type of it - """ - return build_from_config(cfg, registry, **kwargs) - -AUTO_ENCODER = Registry("AUTO_ENCODER", build_func=build_func) -DATASETS = Registry("DATASETS", build_func=build_func) -DIFFUSION = Registry("DIFFUSION", build_func=build_func) -DISTRIBUTION = Registry("DISTRIBUTION", build_func=build_func) -EMBEDDER = Registry("EMBEDDER", build_func=build_func) -ENGINE = Registry("ENGINE", build_func=build_func) -INFER_ENGINE = Registry("INFER_ENGINE", build_func=build_func) -MODEL = Registry("MODEL", build_func=build_func) -PRETRAIN = Registry("PRETRAIN", build_func=build_func) -VISUAL = Registry("VISUAL", build_func=build_func) -EMBEDMANAGER = Registry("EMBEDMANAGER", build_func=build_func) -======= -from .registry import Registry, build_from_config - -def build_func(cfg, registry, **kwargs): - """ - Except for config, if passing a list of dataset config, then return the concat type of it - """ - return build_from_config(cfg, registry, **kwargs) - -AUTO_ENCODER = Registry("AUTO_ENCODER", build_func=build_func) -DATASETS = Registry("DATASETS", build_func=build_func) -DIFFUSION = Registry("DIFFUSION", build_func=build_func) -DISTRIBUTION = Registry("DISTRIBUTION", build_func=build_func) -EMBEDDER = Registry("EMBEDDER", build_func=build_func) -ENGINE = Registry("ENGINE", build_func=build_func) -INFER_ENGINE = Registry("INFER_ENGINE", build_func=build_func) -MODEL = Registry("MODEL", build_func=build_func) -PRETRAIN = Registry("PRETRAIN", build_func=build_func) -VISUAL = Registry("VISUAL", build_func=build_func) -EMBEDMANAGER = Registry("EMBEDMANAGER", build_func=build_func) ->>>>>>> 626e7afc02230297b6f553675ea1c32c29971314 diff --git a/utils/seed.py b/utils/seed.py deleted file mode 100644 index 93967ef..0000000 --- a/utils/seed.py +++ /dev/null @@ -1,24 +0,0 @@ -<<<<<<< HEAD -import torch -import random -import numpy as np - - -def setup_seed(seed): - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - np.random.seed(seed) - random.seed(seed) -======= -import torch -import random -import numpy as np - - -def setup_seed(seed): - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - np.random.seed(seed) - random.seed(seed) ->>>>>>> 626e7afc02230297b6f553675ea1c32c29971314 - torch.backends.cudnn.deterministic = True \ No newline at end of file diff --git a/utils/transforms.py b/utils/transforms.py deleted file mode 100644 index a397e86..0000000 --- a/utils/transforms.py +++ /dev/null @@ -1,709 +0,0 @@ -<<<<<<< HEAD -import torch -import torchvision.transforms.functional as F -import random -import math -import numpy as np -from PIL import Image, ImageFilter - -__all__ = ['Compose', 'Resize', 'Rescale', 'CenterCrop', 'CenterCropV2', 'CenterCropWide', 'RandomCrop', 'RandomCropV2', 'RandomHFlip',\ - 'GaussianBlur', 'ColorJitter', 'RandomGray', 'ToTensor', 'Normalize', "ResizeRandomCrop", "ExtractResizeRandomCrop", "ExtractResizeAssignCrop"] - - -class Compose(object): - - def __init__(self, transforms): - self.transforms = transforms - - def __getitem__(self, index): - if isinstance(index, slice): - return Compose(self.transforms[index]) - else: - return self.transforms[index] - - def __len__(self): - return len(self.transforms) - - def __call__(self, rgb): - for t in self.transforms: - rgb = t(rgb) - return rgb - -class Resize(object): - - def __init__(self, size=256): - if isinstance(size, int): - size = (size, size) - self.size = size - - def __call__(self, rgb): - if isinstance(rgb, list): - rgb = [u.resize(self.size, Image.BILINEAR) for u in rgb] - else: - rgb = rgb.resize(self.size, Image.BILINEAR) - return rgb - -class Rescale(object): - - def __init__(self, size=256, interpolation=Image.BILINEAR): - self.size = size - self.interpolation = interpolation - - def __call__(self, rgb): - w, h = rgb[0].size - scale = self.size / min(w, h) - out_w, out_h = int(round(w * scale)), int(round(h * scale)) - rgb = [u.resize((out_w, out_h), self.interpolation) for u in rgb] - return rgb - -class CenterCrop(object): - - def __init__(self, size=224): - self.size = size - - def __call__(self, rgb): - w, h = rgb[0].size - assert min(w, h) >= self.size - x1 = (w - self.size) // 2 - y1 = (h - self.size) // 2 - rgb = [u.crop((x1, y1, x1 + self.size, y1 + self.size)) for u in rgb] - return rgb - -class ResizeRandomCrop(object): - - def __init__(self, size=256, size_short=292): - self.size = size - # self.min_area = min_area - self.size_short = size_short - - def __call__(self, rgb): - - # consistent crop between rgb and m - while min(rgb[0].size) >= 2 * self.size_short: - rgb = [u.resize((u.width // 2, u.height // 2), resample=Image.BOX) for u in rgb] - scale = self.size_short / min(rgb[0].size) - rgb = [u.resize((round(scale * u.width), round(scale * u.height)), resample=Image.BICUBIC) for u in rgb] - out_w = self.size - out_h = self.size - w, h = rgb[0].size # (518, 292) - x1 = random.randint(0, w - out_w) - y1 = random.randint(0, h - out_h) - - rgb = [u.crop((x1, y1, x1 + out_w, y1 + out_h)) for u in rgb] - # rgb = [u.resize((self.size, self.size), Image.BILINEAR) for u in rgb] - # # center crop - # x1 = (img[0].width - self.size) // 2 - # y1 = (img[0].height - self.size) // 2 - # img = [u.crop((x1, y1, x1 + self.size, y1 + self.size)) for u in img] - return rgb - - - -class ExtractResizeRandomCrop(object): - - def __init__(self, size=256, size_short=292): - self.size = size - # self.min_area = min_area - self.size_short = size_short - - def __call__(self, rgb): - - # consistent crop between rgb and m - while min(rgb[0].size) >= 2 * self.size_short: - rgb = [u.resize((u.width // 2, u.height // 2), resample=Image.BOX) for u in rgb] - scale = self.size_short / min(rgb[0].size) - rgb = [u.resize((round(scale * u.width), round(scale * u.height)), resample=Image.BICUBIC) for u in rgb] - out_w = self.size - out_h = self.size - w, h = rgb[0].size # (518, 292) - x1 = random.randint(0, w - out_w) - y1 = random.randint(0, h - out_h) - - rgb = [u.crop((x1, y1, x1 + out_w, y1 + out_h)) for u in rgb] - wh = [x1, y1, x1 + out_w, y1 + out_h] - return rgb, wh - - -class ExtractResizeAssignCrop(object): - - def __init__(self, size=256, size_short=292): - self.size = size - # self.min_area = min_area - self.size_short = size_short - - def __call__(self, rgb, wh): - - # consistent crop between rgb and m - while min(rgb[0].size) >= 2 * self.size_short: - rgb = [u.resize((u.width // 2, u.height // 2), resample=Image.BOX) for u in rgb] - scale = self.size_short / min(rgb[0].size) - rgb = [u.resize((round(scale * u.width), round(scale * u.height)), resample=Image.BICUBIC) for u in rgb] - - rgb = [u.crop(wh) for u in rgb] - rgb = [u.resize((self.size, self.size), Image.BILINEAR) for u in rgb] - return rgb - -class CenterCropV2(object): - def __init__(self, size): - self.size = size - - def __call__(self, img): - # fast resize - while min(img[0].size) >= 2 * self.size: - img = [u.resize((u.width // 2, u.height // 2), resample=Image.BOX) for u in img] - scale = self.size / min(img[0].size) - img = [u.resize((round(scale * u.width), round(scale * u.height)), resample=Image.BICUBIC) for u in img] - - # center crop - x1 = (img[0].width - self.size) // 2 - y1 = (img[0].height - self.size) // 2 - img = [u.crop((x1, y1, x1 + self.size, y1 + self.size)) for u in img] - return img - - -class CenterCropWide(object): - def __init__(self, size, interpolation=Image.BOX): - self.size = size - self.interpolation = interpolation - - def __call__(self, img): - if isinstance(img, list): - scale = min(img[0].size[0]/self.size[0], img[0].size[1]/self.size[1]) - img = [u.resize((round(u.width // scale), round(u.height // scale)), resample=self.interpolation) for u in img] - - # center crop - x1 = (img[0].width - self.size[0]) // 2 - y1 = (img[0].height - self.size[1]) // 2 - img = [u.crop((x1, y1, x1 + self.size[0], y1 + self.size[1])) for u in img] - return img - else: - scale = min(img.size[0]/self.size[0], img.size[1]/self.size[1]) - img = img.resize((round(img.width // scale), round(img.height // scale)), resample=self.interpolation) - x1 = (img.width - self.size[0]) // 2 - y1 = (img.height - self.size[1]) // 2 - img = img.crop((x1, y1, x1 + self.size[0], y1 + self.size[1])) - return img - - - -class RandomCrop(object): - - def __init__(self, size=224, min_area=0.4): - self.size = size - self.min_area = min_area - - def __call__(self, rgb): - - # consistent crop between rgb and m - w, h = rgb[0].size - area = w * h - out_w, out_h = float('inf'), float('inf') - while out_w > w or out_h > h: - target_area = random.uniform(self.min_area, 1.0) * area - aspect_ratio = random.uniform(3. / 4., 4. / 3.) - out_w = int(round(math.sqrt(target_area * aspect_ratio))) - out_h = int(round(math.sqrt(target_area / aspect_ratio))) - x1 = random.randint(0, w - out_w) - y1 = random.randint(0, h - out_h) - - rgb = [u.crop((x1, y1, x1 + out_w, y1 + out_h)) for u in rgb] - rgb = [u.resize((self.size, self.size), Image.BILINEAR) for u in rgb] - - return rgb - -class RandomCropV2(object): - - def __init__(self, size=224, min_area=0.4, ratio=(3. / 4., 4. / 3.)): - if isinstance(size, (tuple, list)): - self.size = size - else: - self.size = (size, size) - self.min_area = min_area - self.ratio = ratio - - def _get_params(self, img): - width, height = img.size - area = height * width - - for _ in range(10): - target_area = random.uniform(self.min_area, 1.0) * area - log_ratio = (math.log(self.ratio[0]), math.log(self.ratio[1])) - aspect_ratio = math.exp(random.uniform(*log_ratio)) - - w = int(round(math.sqrt(target_area * aspect_ratio))) - h = int(round(math.sqrt(target_area / aspect_ratio))) - - if 0 < w <= width and 0 < h <= height: - i = random.randint(0, height - h) - j = random.randint(0, width - w) - return i, j, h, w - - # Fallback to central crop - in_ratio = float(width) / float(height) - if (in_ratio < min(self.ratio)): - w = width - h = int(round(w / min(self.ratio))) - elif (in_ratio > max(self.ratio)): - h = height - w = int(round(h * max(self.ratio))) - else: # whole image - w = width - h = height - i = (height - h) // 2 - j = (width - w) // 2 - return i, j, h, w - - def __call__(self, rgb): - i, j, h, w = self._get_params(rgb[0]) - rgb = [F.resized_crop(u, i, j, h, w, self.size) for u in rgb] - return rgb - -class RandomHFlip(object): - - def __init__(self, p=0.5): - self.p = p - - def __call__(self, rgb): - if random.random() < self.p: - rgb = [u.transpose(Image.FLIP_LEFT_RIGHT) for u in rgb] - return rgb - -class GaussianBlur(object): - - def __init__(self, sigmas=[0.1, 2.0], p=0.5): - self.sigmas = sigmas - self.p = p - - def __call__(self, rgb): - if random.random() < self.p: - sigma = random.uniform(*self.sigmas) - rgb = [u.filter(ImageFilter.GaussianBlur(radius=sigma)) for u in rgb] - return rgb - -class ColorJitter(object): - - def __init__(self, brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.5): - self.brightness = brightness - self.contrast = contrast - self.saturation = saturation - self.hue = hue - self.p = p - - def __call__(self, rgb): - if random.random() < self.p: - brightness, contrast, saturation, hue = self._random_params() - transforms = [ - lambda f: F.adjust_brightness(f, brightness), - lambda f: F.adjust_contrast(f, contrast), - lambda f: F.adjust_saturation(f, saturation), - lambda f: F.adjust_hue(f, hue)] - random.shuffle(transforms) - for t in transforms: - rgb = [t(u) for u in rgb] - - return rgb - - def _random_params(self): - brightness = random.uniform( - max(0, 1 - self.brightness), 1 + self.brightness) - contrast = random.uniform( - max(0, 1 - self.contrast), 1 + self.contrast) - saturation = random.uniform( - max(0, 1 - self.saturation), 1 + self.saturation) - hue = random.uniform(-self.hue, self.hue) - return brightness, contrast, saturation, hue - -class RandomGray(object): - - def __init__(self, p=0.2): - self.p = p - - def __call__(self, rgb): - if random.random() < self.p: - rgb = [u.convert('L').convert('RGB') for u in rgb] - return rgb - -class ToTensor(object): - - def __call__(self, rgb): - if isinstance(rgb, list): - rgb = torch.stack([F.to_tensor(u) for u in rgb], dim=0) - else: - rgb = F.to_tensor(rgb) - - return rgb - -class Normalize(object): - - def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]): - self.mean = mean - self.std = std - - def __call__(self, rgb): - rgb = rgb.clone() - rgb.clamp_(0, 1) - if not isinstance(self.mean, torch.Tensor): - self.mean = rgb.new_tensor(self.mean).view(-1) - if not isinstance(self.std, torch.Tensor): - self.std = rgb.new_tensor(self.std).view(-1) - if rgb.dim() == 4: - rgb.sub_(self.mean.view(1, -1, 1, 1)).div_(self.std.view(1, -1, 1, 1)) - elif rgb.dim() == 3: - rgb.sub_(self.mean.view(-1, 1, 1)).div_(self.std.view(-1, 1, 1)) - return rgb - -======= -import torch -import torchvision.transforms.functional as F -import random -import math -import numpy as np -from PIL import Image, ImageFilter - -__all__ = ['Compose', 'Resize', 'Rescale', 'CenterCrop', 'CenterCropV2', 'CenterCropWide', 'RandomCrop', 'RandomCropV2', 'RandomHFlip',\ - 'GaussianBlur', 'ColorJitter', 'RandomGray', 'ToTensor', 'Normalize', "ResizeRandomCrop", "ExtractResizeRandomCrop", "ExtractResizeAssignCrop"] - - -class Compose(object): - - def __init__(self, transforms): - self.transforms = transforms - - def __getitem__(self, index): - if isinstance(index, slice): - return Compose(self.transforms[index]) - else: - return self.transforms[index] - - def __len__(self): - return len(self.transforms) - - def __call__(self, rgb): - for t in self.transforms: - rgb = t(rgb) - return rgb - -class Resize(object): - - def __init__(self, size=256): - if isinstance(size, int): - size = (size, size) - self.size = size - - def __call__(self, rgb): - if isinstance(rgb, list): - rgb = [u.resize(self.size, Image.BILINEAR) for u in rgb] - else: - rgb = rgb.resize(self.size, Image.BILINEAR) - return rgb - -class Rescale(object): - - def __init__(self, size=256, interpolation=Image.BILINEAR): - self.size = size - self.interpolation = interpolation - - def __call__(self, rgb): - w, h = rgb[0].size - scale = self.size / min(w, h) - out_w, out_h = int(round(w * scale)), int(round(h * scale)) - rgb = [u.resize((out_w, out_h), self.interpolation) for u in rgb] - return rgb - -class CenterCrop(object): - - def __init__(self, size=224): - self.size = size - - def __call__(self, rgb): - w, h = rgb[0].size - assert min(w, h) >= self.size - x1 = (w - self.size) // 2 - y1 = (h - self.size) // 2 - rgb = [u.crop((x1, y1, x1 + self.size, y1 + self.size)) for u in rgb] - return rgb - -class ResizeRandomCrop(object): - - def __init__(self, size=256, size_short=292): - self.size = size - # self.min_area = min_area - self.size_short = size_short - - def __call__(self, rgb): - - # consistent crop between rgb and m - while min(rgb[0].size) >= 2 * self.size_short: - rgb = [u.resize((u.width // 2, u.height // 2), resample=Image.BOX) for u in rgb] - scale = self.size_short / min(rgb[0].size) - rgb = [u.resize((round(scale * u.width), round(scale * u.height)), resample=Image.BICUBIC) for u in rgb] - out_w = self.size - out_h = self.size - w, h = rgb[0].size # (518, 292) - x1 = random.randint(0, w - out_w) - y1 = random.randint(0, h - out_h) - - rgb = [u.crop((x1, y1, x1 + out_w, y1 + out_h)) for u in rgb] - # rgb = [u.resize((self.size, self.size), Image.BILINEAR) for u in rgb] - # # center crop - # x1 = (img[0].width - self.size) // 2 - # y1 = (img[0].height - self.size) // 2 - # img = [u.crop((x1, y1, x1 + self.size, y1 + self.size)) for u in img] - return rgb - - - -class ExtractResizeRandomCrop(object): - - def __init__(self, size=256, size_short=292): - self.size = size - # self.min_area = min_area - self.size_short = size_short - - def __call__(self, rgb): - - # consistent crop between rgb and m - while min(rgb[0].size) >= 2 * self.size_short: - rgb = [u.resize((u.width // 2, u.height // 2), resample=Image.BOX) for u in rgb] - scale = self.size_short / min(rgb[0].size) - rgb = [u.resize((round(scale * u.width), round(scale * u.height)), resample=Image.BICUBIC) for u in rgb] - out_w = self.size - out_h = self.size - w, h = rgb[0].size # (518, 292) - x1 = random.randint(0, w - out_w) - y1 = random.randint(0, h - out_h) - - rgb = [u.crop((x1, y1, x1 + out_w, y1 + out_h)) for u in rgb] - wh = [x1, y1, x1 + out_w, y1 + out_h] - return rgb, wh - - -class ExtractResizeAssignCrop(object): - - def __init__(self, size=256, size_short=292): - self.size = size - # self.min_area = min_area - self.size_short = size_short - - def __call__(self, rgb, wh): - - # consistent crop between rgb and m - while min(rgb[0].size) >= 2 * self.size_short: - rgb = [u.resize((u.width // 2, u.height // 2), resample=Image.BOX) for u in rgb] - scale = self.size_short / min(rgb[0].size) - rgb = [u.resize((round(scale * u.width), round(scale * u.height)), resample=Image.BICUBIC) for u in rgb] - - rgb = [u.crop(wh) for u in rgb] - rgb = [u.resize((self.size, self.size), Image.BILINEAR) for u in rgb] - return rgb - -class CenterCropV2(object): - def __init__(self, size): - self.size = size - - def __call__(self, img): - # fast resize - while min(img[0].size) >= 2 * self.size: - img = [u.resize((u.width // 2, u.height // 2), resample=Image.BOX) for u in img] - scale = self.size / min(img[0].size) - img = [u.resize((round(scale * u.width), round(scale * u.height)), resample=Image.BICUBIC) for u in img] - - # center crop - x1 = (img[0].width - self.size) // 2 - y1 = (img[0].height - self.size) // 2 - img = [u.crop((x1, y1, x1 + self.size, y1 + self.size)) for u in img] - return img - - -class CenterCropWide(object): - def __init__(self, size, interpolation=Image.BOX): - self.size = size - self.interpolation = interpolation - - def __call__(self, img): - if isinstance(img, list): - scale = min(img[0].size[0]/self.size[0], img[0].size[1]/self.size[1]) - img = [u.resize((round(u.width // scale), round(u.height // scale)), resample=self.interpolation) for u in img] - - # center crop - x1 = (img[0].width - self.size[0]) // 2 - y1 = (img[0].height - self.size[1]) // 2 - img = [u.crop((x1, y1, x1 + self.size[0], y1 + self.size[1])) for u in img] - return img - else: - scale = min(img.size[0]/self.size[0], img.size[1]/self.size[1]) - img = img.resize((round(img.width // scale), round(img.height // scale)), resample=self.interpolation) - x1 = (img.width - self.size[0]) // 2 - y1 = (img.height - self.size[1]) // 2 - img = img.crop((x1, y1, x1 + self.size[0], y1 + self.size[1])) - return img - - - -class RandomCrop(object): - - def __init__(self, size=224, min_area=0.4): - self.size = size - self.min_area = min_area - - def __call__(self, rgb): - - # consistent crop between rgb and m - w, h = rgb[0].size - area = w * h - out_w, out_h = float('inf'), float('inf') - while out_w > w or out_h > h: - target_area = random.uniform(self.min_area, 1.0) * area - aspect_ratio = random.uniform(3. / 4., 4. / 3.) - out_w = int(round(math.sqrt(target_area * aspect_ratio))) - out_h = int(round(math.sqrt(target_area / aspect_ratio))) - x1 = random.randint(0, w - out_w) - y1 = random.randint(0, h - out_h) - - rgb = [u.crop((x1, y1, x1 + out_w, y1 + out_h)) for u in rgb] - rgb = [u.resize((self.size, self.size), Image.BILINEAR) for u in rgb] - - return rgb - -class RandomCropV2(object): - - def __init__(self, size=224, min_area=0.4, ratio=(3. / 4., 4. / 3.)): - if isinstance(size, (tuple, list)): - self.size = size - else: - self.size = (size, size) - self.min_area = min_area - self.ratio = ratio - - def _get_params(self, img): - width, height = img.size - area = height * width - - for _ in range(10): - target_area = random.uniform(self.min_area, 1.0) * area - log_ratio = (math.log(self.ratio[0]), math.log(self.ratio[1])) - aspect_ratio = math.exp(random.uniform(*log_ratio)) - - w = int(round(math.sqrt(target_area * aspect_ratio))) - h = int(round(math.sqrt(target_area / aspect_ratio))) - - if 0 < w <= width and 0 < h <= height: - i = random.randint(0, height - h) - j = random.randint(0, width - w) - return i, j, h, w - - # Fallback to central crop - in_ratio = float(width) / float(height) - if (in_ratio < min(self.ratio)): - w = width - h = int(round(w / min(self.ratio))) - elif (in_ratio > max(self.ratio)): - h = height - w = int(round(h * max(self.ratio))) - else: # whole image - w = width - h = height - i = (height - h) // 2 - j = (width - w) // 2 - return i, j, h, w - - def __call__(self, rgb): - i, j, h, w = self._get_params(rgb[0]) - rgb = [F.resized_crop(u, i, j, h, w, self.size) for u in rgb] - return rgb - -class RandomHFlip(object): - - def __init__(self, p=0.5): - self.p = p - - def __call__(self, rgb): - if random.random() < self.p: - rgb = [u.transpose(Image.FLIP_LEFT_RIGHT) for u in rgb] - return rgb - -class GaussianBlur(object): - - def __init__(self, sigmas=[0.1, 2.0], p=0.5): - self.sigmas = sigmas - self.p = p - - def __call__(self, rgb): - if random.random() < self.p: - sigma = random.uniform(*self.sigmas) - rgb = [u.filter(ImageFilter.GaussianBlur(radius=sigma)) for u in rgb] - return rgb - -class ColorJitter(object): - - def __init__(self, brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.5): - self.brightness = brightness - self.contrast = contrast - self.saturation = saturation - self.hue = hue - self.p = p - - def __call__(self, rgb): - if random.random() < self.p: - brightness, contrast, saturation, hue = self._random_params() - transforms = [ - lambda f: F.adjust_brightness(f, brightness), - lambda f: F.adjust_contrast(f, contrast), - lambda f: F.adjust_saturation(f, saturation), - lambda f: F.adjust_hue(f, hue)] - random.shuffle(transforms) - for t in transforms: - rgb = [t(u) for u in rgb] - - return rgb - - def _random_params(self): - brightness = random.uniform( - max(0, 1 - self.brightness), 1 + self.brightness) - contrast = random.uniform( - max(0, 1 - self.contrast), 1 + self.contrast) - saturation = random.uniform( - max(0, 1 - self.saturation), 1 + self.saturation) - hue = random.uniform(-self.hue, self.hue) - return brightness, contrast, saturation, hue - -class RandomGray(object): - - def __init__(self, p=0.2): - self.p = p - - def __call__(self, rgb): - if random.random() < self.p: - rgb = [u.convert('L').convert('RGB') for u in rgb] - return rgb - -class ToTensor(object): - - def __call__(self, rgb): - if isinstance(rgb, list): - rgb = torch.stack([F.to_tensor(u) for u in rgb], dim=0) - else: - rgb = F.to_tensor(rgb) - - return rgb - -class Normalize(object): - - def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]): - self.mean = mean - self.std = std - - def __call__(self, rgb): - rgb = rgb.clone() - rgb.clamp_(0, 1) - if not isinstance(self.mean, torch.Tensor): - self.mean = rgb.new_tensor(self.mean).view(-1) - if not isinstance(self.std, torch.Tensor): - self.std = rgb.new_tensor(self.std).view(-1) - if rgb.dim() == 4: - rgb.sub_(self.mean.view(1, -1, 1, 1)).div_(self.std.view(1, -1, 1, 1)) - elif rgb.dim() == 3: - rgb.sub_(self.mean.view(-1, 1, 1)).div_(self.std.view(-1, 1, 1)) - return rgb - ->>>>>>> 626e7afc02230297b6f553675ea1c32c29971314 diff --git a/utils/util.py b/utils/util.py deleted file mode 100644 index e93d55b..0000000 --- a/utils/util.py +++ /dev/null @@ -1,35 +0,0 @@ -<<<<<<< HEAD -import torch - -def to_device(batch, device, non_blocking=False): - if isinstance(batch, (list, tuple)): - return type(batch)([ - to_device(u, device, non_blocking) - for u in batch]) - elif isinstance(batch, dict): - return type(batch)([ - (k, to_device(v, device, non_blocking)) - for k, v in batch.items()]) - elif isinstance(batch, torch.Tensor) and batch.device != device: - batch = batch.to(device, non_blocking=non_blocking) - else: - return batch - return batch -======= -import torch - -def to_device(batch, device, non_blocking=False): - if isinstance(batch, (list, tuple)): - return type(batch)([ - to_device(u, device, non_blocking) - for u in batch]) - elif isinstance(batch, dict): - return type(batch)([ - (k, to_device(v, device, non_blocking)) - for k, v in batch.items()]) - elif isinstance(batch, torch.Tensor) and batch.device != device: - batch = batch.to(device, non_blocking=non_blocking) - else: - return batch - return batch ->>>>>>> 626e7afc02230297b6f553675ea1c32c29971314 diff --git a/utils/video_op.py b/utils/video_op.py deleted file mode 100644 index 03cacaa..0000000 --- a/utils/video_op.py +++ /dev/null @@ -1,721 +0,0 @@ -<<<<<<< HEAD -import os -import os.path as osp -import sys -import cv2 -import glob -import math -import torch -import gzip -import copy -import time -import json -import pickle -import base64 -import imageio -import hashlib -import requests -import binascii -import zipfile -# import skvideo.io -import numpy as np -from io import BytesIO -import urllib.request -import torch.nn.functional as F -import torchvision.utils as tvutils -from multiprocessing.pool import ThreadPool as Pool -from einops import rearrange -from PIL import Image, ImageDraw, ImageFont - - -def gen_text_image(captions, text_size): - num_char = int(38 * (text_size / text_size)) - font_size = int(text_size / 20) - font = ImageFont.truetype('data/font/DejaVuSans.ttf', size=font_size) - text_image_list = [] - for text in captions: - txt_img = Image.new("RGB", (text_size, text_size), color="white") - draw = ImageDraw.Draw(txt_img) - lines = "\n".join(text[start:start + num_char] for start in range(0, len(text), num_char)) - draw.text((0, 0), lines, fill="black", font=font) - txt_img = np.array(txt_img) - text_image_list.append(txt_img) - text_images = np.stack(text_image_list, axis=0) - text_images = torch.from_numpy(text_images) - return text_images - -@torch.no_grad() -def save_video_refimg_and_text( - local_path, - ref_frame, - gen_video, - captions, - mean=[0.5, 0.5, 0.5], - std=[0.5, 0.5, 0.5], - text_size=256, - nrow=4, - save_fps=8, - retry=5): - ''' - gen_video: BxCxFxHxW - ''' - nrow = max(int(gen_video.size(0) / 2), 1) - vid_mean = torch.tensor(mean, device=gen_video.device).view(1, -1, 1, 1, 1) #ncfhw - vid_std = torch.tensor(std, device=gen_video.device).view(1, -1, 1, 1, 1) #ncfhw - - text_images = gen_text_image(captions, text_size) # Tensor 8x256x256x3 - text_images = text_images.unsqueeze(1) # Tensor 8x1x256x256x3 - text_images = text_images.repeat_interleave(repeats=gen_video.size(2), dim=1) # 8x16x256x256x3 - - ref_frame = ref_frame.unsqueeze(2) - ref_frame = ref_frame.mul_(vid_std).add_(vid_mean) - ref_frame = ref_frame.repeat_interleave(repeats=gen_video.size(2), dim=2) # 8x16x256x256x3 - ref_frame.clamp_(0, 1) - ref_frame = ref_frame * 255.0 - ref_frame = rearrange(ref_frame, 'b c f h w -> b f h w c') - - gen_video = gen_video.mul_(vid_std).add_(vid_mean) # 8x3x16x256x384 - gen_video.clamp_(0, 1) - gen_video = gen_video * 255.0 - - images = rearrange(gen_video, 'b c f h w -> b f h w c') - images = torch.cat([ref_frame, images, text_images], dim=3) - - images = rearrange(images, '(r j) f h w c -> f (r h) (j w) c', r=nrow) - images = [(img.numpy()).astype('uint8') for img in images] - - for _ in [None] * retry: - try: - if len(images) == 1: - local_path = local_path + '.png' - cv2.imwrite(local_path, images[0][:,:,::-1], [int(cv2.IMWRITE_JPEG_QUALITY), 100]) - else: - local_path = local_path + '.mp4' - frame_dir = os.path.join(os.path.dirname(local_path), '%s_frames' % (os.path.basename(local_path))) - os.system(f'rm -rf {frame_dir}'); os.makedirs(frame_dir, exist_ok=True) - for fid, frame in enumerate(images): - tpth = os.path.join(frame_dir, '%04d.png' % (fid+1)) - cv2.imwrite(tpth, frame[:,:,::-1], [int(cv2.IMWRITE_JPEG_QUALITY), 100]) - cmd = f'ffmpeg -y -f image2 -loglevel quiet -framerate {save_fps} -i {frame_dir}/%04d.png -vcodec libx264 -crf 17 -pix_fmt yuv420p {local_path}' - os.system(cmd); os.system(f'rm -rf {frame_dir}') - # os.system(f'rm -rf {local_path}') - exception = None - break - except Exception as e: - exception = e - continue - - -@torch.no_grad() -def save_i2vgen_video( - local_path, - image_id, - gen_video, - captions, - mean=[0.5, 0.5, 0.5], - std=[0.5, 0.5, 0.5], - text_size=256, - retry=5, - save_fps = 8 -): - ''' - Save both the generated video and the input conditions. - ''' - vid_mean = torch.tensor(mean, device=gen_video.device).view(1, -1, 1, 1, 1) #ncfhw - vid_std = torch.tensor(std, device=gen_video.device).view(1, -1, 1, 1, 1) #ncfhw - - text_images = gen_text_image(captions, text_size) # Tensor 1x256x256x3 - text_images = text_images.unsqueeze(1) # Tensor 1x1x256x256x3 - text_images = text_images.repeat_interleave(repeats=gen_video.size(2), dim=1) # 1x16x256x256x3 - - image_id = image_id.unsqueeze(2) # B, C, F, H, W - image_id = image_id.repeat_interleave(repeats=gen_video.size(2), dim=2) # 1x3x32x256x448 - image_id = image_id.mul_(vid_std).add_(vid_mean) # 32x3x256x448 - image_id.clamp_(0, 1) - image_id = image_id * 255.0 - image_id = rearrange(image_id, 'b c f h w -> b f h w c') - - gen_video = gen_video.mul_(vid_std).add_(vid_mean) # 8x3x16x256x384 - gen_video.clamp_(0, 1) - gen_video = gen_video * 255.0 - - images = rearrange(gen_video, 'b c f h w -> b f h w c') - images = torch.cat([image_id, images, text_images], dim=3) - images = images[0] - images = [(img.numpy()).astype('uint8') for img in images] - - exception = None - for _ in [None] * retry: - try: - frame_dir = os.path.join(os.path.dirname(local_path), '%s_frames' % (os.path.basename(local_path))) - os.system(f'rm -rf {frame_dir}'); os.makedirs(frame_dir, exist_ok=True) - for fid, frame in enumerate(images): - tpth = os.path.join(frame_dir, '%04d.png' % (fid+1)) - cv2.imwrite(tpth, frame[:,:,::-1], [int(cv2.IMWRITE_JPEG_QUALITY), 100]) - cmd = f'ffmpeg -y -f image2 -loglevel quiet -framerate {save_fps} -i {frame_dir}/%04d.png -vcodec libx264 -crf 17 -pix_fmt yuv420p {local_path}' - os.system(cmd); os.system(f'rm -rf {frame_dir}') - break - except Exception as e: - exception = e - continue - - if exception is not None: - raise exception - - -@torch.no_grad() -def save_i2vgen_video_safe( - local_path, - gen_video, - captions, - mean=[0.5, 0.5, 0.5], - std=[0.5, 0.5, 0.5], - text_size=256, - retry=5, - save_fps = 8 -): - ''' - Save only the generated video, do not save the related reference conditions, and at the same time perform anomaly detection on the last frame. - ''' - vid_mean = torch.tensor(mean, device=gen_video.device).view(1, -1, 1, 1, 1) #ncfhw - vid_std = torch.tensor(std, device=gen_video.device).view(1, -1, 1, 1, 1) #ncfhw - - gen_video = gen_video.mul_(vid_std).add_(vid_mean) # 8x3x16x256x384 - gen_video.clamp_(0, 1) - gen_video = gen_video * 255.0 - - images = rearrange(gen_video, 'b c f h w -> b f h w c') - images = images[0] - images = [(img.numpy()).astype('uint8') for img in images] - num_image = len(images) - exception = None - for _ in [None] * retry: - try: - if num_image == 1: - local_path = local_path + '.png' - cv2.imwrite(local_path, images[0][:,:,::-1], [int(cv2.IMWRITE_JPEG_QUALITY), 100]) - else: - writer = imageio.get_writer(local_path, fps=save_fps, codec='libx264', quality=8) - for fid, frame in enumerate(images): - if fid == num_image-1: # Fix known bugs. - ratio = (np.sum((frame >= 117) & (frame <= 137)))/(frame.size) - if ratio > 0.4: continue - writer.append_data(frame) - writer.close() - break - except Exception as e: - exception = e - continue - - if exception is not None: - raise exception - - -@torch.no_grad() -def save_t2vhigen_video_safe( - local_path, - gen_video, - captions, - mean=[0.5, 0.5, 0.5], - std=[0.5, 0.5, 0.5], - text_size=256, - retry=5, - save_fps = 8 -): - ''' - Save only the generated video, do not save the related reference conditions, and at the same time perform anomaly detection on the last frame. - ''' - vid_mean = torch.tensor(mean, device=gen_video.device).view(1, -1, 1, 1, 1) #ncfhw - vid_std = torch.tensor(std, device=gen_video.device).view(1, -1, 1, 1, 1) #ncfhw - - gen_video = gen_video.mul_(vid_std).add_(vid_mean) # 8x3x16x256x384 - gen_video.clamp_(0, 1) - gen_video = gen_video * 255.0 - - images = rearrange(gen_video, 'b c f h w -> b f h w c') - images = images[0] - images = [(img.numpy()).astype('uint8') for img in images] - num_image = len(images) - exception = None - for _ in [None] * retry: - try: - if num_image == 1: - local_path = local_path + '.png' - cv2.imwrite(local_path, images[0][:,:,::-1], [int(cv2.IMWRITE_JPEG_QUALITY), 100]) - else: - frame_dir = os.path.join(os.path.dirname(local_path), '%s_frames' % (os.path.basename(local_path))) - os.system(f'rm -rf {frame_dir}'); os.makedirs(frame_dir, exist_ok=True) - for fid, frame in enumerate(images): - if fid == num_image-1: # Fix known bugs. - ratio = (np.sum((frame >= 117) & (frame <= 137)))/(frame.size) - if ratio > 0.4: continue - tpth = os.path.join(frame_dir, '%04d.png' % (fid+1)) - cv2.imwrite(tpth, frame[:,:,::-1], [int(cv2.IMWRITE_JPEG_QUALITY), 100]) - cmd = f'ffmpeg -y -f image2 -loglevel quiet -framerate {save_fps} -i {frame_dir}/%04d.png -vcodec libx264 -crf 17 -pix_fmt yuv420p {local_path}' - os.system(cmd) - os.system(f'rm -rf {frame_dir}') - break - except Exception as e: - exception = e - continue - - if exception is not None: - raise exception - - - - -@torch.no_grad() -def save_video_multiple_conditions_not_gif_horizontal_3col(local_path, video_tensor, model_kwargs, source_imgs, - mean=[0.5,0.5,0.5], std=[0.5,0.5,0.5], nrow=8, retry=5, save_fps=8): - mean=torch.tensor(mean,device=video_tensor.device).view(1,-1,1,1,1)#ncfhw - std=torch.tensor(std,device=video_tensor.device).view(1,-1,1,1,1)#ncfhw - video_tensor = video_tensor.mul_(std).add_(mean) #### unnormalize back to [0,1] - video_tensor.clamp_(0, 1) - - b, c, n, h, w = video_tensor.shape - source_imgs = F.adaptive_avg_pool3d(source_imgs, (n, h, w)) - source_imgs = source_imgs.cpu() - - model_kwargs_channel3 = {} - for key, conditions in model_kwargs[0].items(): - - - if conditions.size(1) == 1: - conditions = torch.cat([conditions, conditions, conditions], dim=1) - conditions = F.adaptive_avg_pool3d(conditions, (n, h, w)) - if conditions.size(1) == 2: - conditions = torch.cat([conditions, conditions[:,:1,]], dim=1) - conditions = F.adaptive_avg_pool3d(conditions, (n, h, w)) - elif conditions.size(1) == 3: - conditions = F.adaptive_avg_pool3d(conditions, (n, h, w)) - elif conditions.size(1) == 4: # means it is a mask. - color = ((conditions[:, 0:3] + 1.)/2.) # .astype(np.float32) - alpha = conditions[:, 3:4] # .astype(np.float32) - conditions = color * alpha + 1.0 * (1.0 - alpha) - conditions = F.adaptive_avg_pool3d(conditions, (n, h, w)) - model_kwargs_channel3[key] = conditions.cpu() if conditions.is_cuda else conditions - - # filename = rand_name(suffix='.gif') - for _ in [None] * retry: - try: - vid_gif = rearrange(video_tensor, '(i j) c f h w -> c f (i h) (j w)', i = nrow) - - # cons_list = [rearrange(con, '(i j) c f h w -> c f (i h) (j w)', i = nrow) for _, con in model_kwargs_channel3.items()] - # vid_gif = torch.cat(cons_list + [vid_gif,], dim=3) #Uncomment this and previous line to compare output video with input pose frames - - vid_gif = vid_gif.permute(1,2,3,0) - - images = vid_gif * 255.0 - images = [(img.numpy()).astype('uint8') for img in images] - if len(images) == 1: - - local_path = local_path.replace('.mp4', '.png') - cv2.imwrite(local_path, images[0][:,:,::-1], [int(cv2.IMWRITE_JPEG_QUALITY), 100]) - # bucket.put_object_from_file(oss_key, local_path) - else: - - outputs = [] - for image_name in images: - x = Image.fromarray(image_name) - outputs.append(x) - from pathlib import Path - save_fmt = Path(local_path).suffix - - if save_fmt == ".mp4": - with imageio.get_writer(local_path, fps=save_fps) as writer: - for img in outputs: - img_array = np.array(img) # Convert PIL Image to numpy array - writer.append_data(img_array) - - elif save_fmt == ".gif": - outputs[0].save( - fp=local_path, - format="GIF", - append_images=outputs[1:], - save_all=True, - duration=(1 / save_fps * 1000), - loop=0, - ) - else: - raise ValueError("Unsupported file type. Use .mp4 or .gif.") - - # fourcc = cv2.VideoWriter_fourcc(*'mp4v') - # fps = save_fps - # image = images[0] - # media_writer = cv2.VideoWriter(local_path, fourcc, fps, (image.shape[1],image.shape[0])) - # for image_name in images: - # im = image_name[:,:,::-1] - # media_writer.write(im) - # media_writer.release() - - - exception = None - break - except Exception as e: - exception = e - continue - if exception is not None: - print('save video to {} failed, error: {}'.format(local_path, exception), flush=True) - -======= -import os -import os.path as osp -import sys -import cv2 -import glob -import math -import torch -import gzip -import copy -import time -import json -import pickle -import base64 -import imageio -import hashlib -import requests -import binascii -import zipfile -# import skvideo.io -import numpy as np -from io import BytesIO -import urllib.request -import torch.nn.functional as F -import torchvision.utils as tvutils -from multiprocessing.pool import ThreadPool as Pool -from einops import rearrange -from PIL import Image, ImageDraw, ImageFont - - -def gen_text_image(captions, text_size): - num_char = int(38 * (text_size / text_size)) - font_size = int(text_size / 20) - font = ImageFont.truetype('data/font/DejaVuSans.ttf', size=font_size) - text_image_list = [] - for text in captions: - txt_img = Image.new("RGB", (text_size, text_size), color="white") - draw = ImageDraw.Draw(txt_img) - lines = "\n".join(text[start:start + num_char] for start in range(0, len(text), num_char)) - draw.text((0, 0), lines, fill="black", font=font) - txt_img = np.array(txt_img) - text_image_list.append(txt_img) - text_images = np.stack(text_image_list, axis=0) - text_images = torch.from_numpy(text_images) - return text_images - -@torch.no_grad() -def save_video_refimg_and_text( - local_path, - ref_frame, - gen_video, - captions, - mean=[0.5, 0.5, 0.5], - std=[0.5, 0.5, 0.5], - text_size=256, - nrow=4, - save_fps=8, - retry=5): - ''' - gen_video: BxCxFxHxW - ''' - nrow = max(int(gen_video.size(0) / 2), 1) - vid_mean = torch.tensor(mean, device=gen_video.device).view(1, -1, 1, 1, 1) #ncfhw - vid_std = torch.tensor(std, device=gen_video.device).view(1, -1, 1, 1, 1) #ncfhw - - text_images = gen_text_image(captions, text_size) # Tensor 8x256x256x3 - text_images = text_images.unsqueeze(1) # Tensor 8x1x256x256x3 - text_images = text_images.repeat_interleave(repeats=gen_video.size(2), dim=1) # 8x16x256x256x3 - - ref_frame = ref_frame.unsqueeze(2) - ref_frame = ref_frame.mul_(vid_std).add_(vid_mean) - ref_frame = ref_frame.repeat_interleave(repeats=gen_video.size(2), dim=2) # 8x16x256x256x3 - ref_frame.clamp_(0, 1) - ref_frame = ref_frame * 255.0 - ref_frame = rearrange(ref_frame, 'b c f h w -> b f h w c') - - gen_video = gen_video.mul_(vid_std).add_(vid_mean) # 8x3x16x256x384 - gen_video.clamp_(0, 1) - gen_video = gen_video * 255.0 - - images = rearrange(gen_video, 'b c f h w -> b f h w c') - images = torch.cat([ref_frame, images, text_images], dim=3) - - images = rearrange(images, '(r j) f h w c -> f (r h) (j w) c', r=nrow) - images = [(img.numpy()).astype('uint8') for img in images] - - for _ in [None] * retry: - try: - if len(images) == 1: - local_path = local_path + '.png' - cv2.imwrite(local_path, images[0][:,:,::-1], [int(cv2.IMWRITE_JPEG_QUALITY), 100]) - else: - local_path = local_path + '.mp4' - frame_dir = os.path.join(os.path.dirname(local_path), '%s_frames' % (os.path.basename(local_path))) - os.system(f'rm -rf {frame_dir}'); os.makedirs(frame_dir, exist_ok=True) - for fid, frame in enumerate(images): - tpth = os.path.join(frame_dir, '%04d.png' % (fid+1)) - cv2.imwrite(tpth, frame[:,:,::-1], [int(cv2.IMWRITE_JPEG_QUALITY), 100]) - cmd = f'ffmpeg -y -f image2 -loglevel quiet -framerate {save_fps} -i {frame_dir}/%04d.png -vcodec libx264 -crf 17 -pix_fmt yuv420p {local_path}' - os.system(cmd); os.system(f'rm -rf {frame_dir}') - # os.system(f'rm -rf {local_path}') - exception = None - break - except Exception as e: - exception = e - continue - - -@torch.no_grad() -def save_i2vgen_video( - local_path, - image_id, - gen_video, - captions, - mean=[0.5, 0.5, 0.5], - std=[0.5, 0.5, 0.5], - text_size=256, - retry=5, - save_fps = 8 -): - ''' - Save both the generated video and the input conditions. - ''' - vid_mean = torch.tensor(mean, device=gen_video.device).view(1, -1, 1, 1, 1) #ncfhw - vid_std = torch.tensor(std, device=gen_video.device).view(1, -1, 1, 1, 1) #ncfhw - - text_images = gen_text_image(captions, text_size) # Tensor 1x256x256x3 - text_images = text_images.unsqueeze(1) # Tensor 1x1x256x256x3 - text_images = text_images.repeat_interleave(repeats=gen_video.size(2), dim=1) # 1x16x256x256x3 - - image_id = image_id.unsqueeze(2) # B, C, F, H, W - image_id = image_id.repeat_interleave(repeats=gen_video.size(2), dim=2) # 1x3x32x256x448 - image_id = image_id.mul_(vid_std).add_(vid_mean) # 32x3x256x448 - image_id.clamp_(0, 1) - image_id = image_id * 255.0 - image_id = rearrange(image_id, 'b c f h w -> b f h w c') - - gen_video = gen_video.mul_(vid_std).add_(vid_mean) # 8x3x16x256x384 - gen_video.clamp_(0, 1) - gen_video = gen_video * 255.0 - - images = rearrange(gen_video, 'b c f h w -> b f h w c') - images = torch.cat([image_id, images, text_images], dim=3) - images = images[0] - images = [(img.numpy()).astype('uint8') for img in images] - - exception = None - for _ in [None] * retry: - try: - frame_dir = os.path.join(os.path.dirname(local_path), '%s_frames' % (os.path.basename(local_path))) - os.system(f'rm -rf {frame_dir}'); os.makedirs(frame_dir, exist_ok=True) - for fid, frame in enumerate(images): - tpth = os.path.join(frame_dir, '%04d.png' % (fid+1)) - cv2.imwrite(tpth, frame[:,:,::-1], [int(cv2.IMWRITE_JPEG_QUALITY), 100]) - cmd = f'ffmpeg -y -f image2 -loglevel quiet -framerate {save_fps} -i {frame_dir}/%04d.png -vcodec libx264 -crf 17 -pix_fmt yuv420p {local_path}' - os.system(cmd); os.system(f'rm -rf {frame_dir}') - break - except Exception as e: - exception = e - continue - - if exception is not None: - raise exception - - -@torch.no_grad() -def save_i2vgen_video_safe( - local_path, - gen_video, - captions, - mean=[0.5, 0.5, 0.5], - std=[0.5, 0.5, 0.5], - text_size=256, - retry=5, - save_fps = 8 -): - ''' - Save only the generated video, do not save the related reference conditions, and at the same time perform anomaly detection on the last frame. - ''' - vid_mean = torch.tensor(mean, device=gen_video.device).view(1, -1, 1, 1, 1) #ncfhw - vid_std = torch.tensor(std, device=gen_video.device).view(1, -1, 1, 1, 1) #ncfhw - - gen_video = gen_video.mul_(vid_std).add_(vid_mean) # 8x3x16x256x384 - gen_video.clamp_(0, 1) - gen_video = gen_video * 255.0 - - images = rearrange(gen_video, 'b c f h w -> b f h w c') - images = images[0] - images = [(img.numpy()).astype('uint8') for img in images] - num_image = len(images) - exception = None - for _ in [None] * retry: - try: - if num_image == 1: - local_path = local_path + '.png' - cv2.imwrite(local_path, images[0][:,:,::-1], [int(cv2.IMWRITE_JPEG_QUALITY), 100]) - else: - writer = imageio.get_writer(local_path, fps=save_fps, codec='libx264', quality=8) - for fid, frame in enumerate(images): - if fid == num_image-1: # Fix known bugs. - ratio = (np.sum((frame >= 117) & (frame <= 137)))/(frame.size) - if ratio > 0.4: continue - writer.append_data(frame) - writer.close() - break - except Exception as e: - exception = e - continue - - if exception is not None: - raise exception - - -@torch.no_grad() -def save_t2vhigen_video_safe( - local_path, - gen_video, - captions, - mean=[0.5, 0.5, 0.5], - std=[0.5, 0.5, 0.5], - text_size=256, - retry=5, - save_fps = 8 -): - ''' - Save only the generated video, do not save the related reference conditions, and at the same time perform anomaly detection on the last frame. - ''' - vid_mean = torch.tensor(mean, device=gen_video.device).view(1, -1, 1, 1, 1) #ncfhw - vid_std = torch.tensor(std, device=gen_video.device).view(1, -1, 1, 1, 1) #ncfhw - - gen_video = gen_video.mul_(vid_std).add_(vid_mean) # 8x3x16x256x384 - gen_video.clamp_(0, 1) - gen_video = gen_video * 255.0 - - images = rearrange(gen_video, 'b c f h w -> b f h w c') - images = images[0] - images = [(img.numpy()).astype('uint8') for img in images] - num_image = len(images) - exception = None - for _ in [None] * retry: - try: - if num_image == 1: - local_path = local_path + '.png' - cv2.imwrite(local_path, images[0][:,:,::-1], [int(cv2.IMWRITE_JPEG_QUALITY), 100]) - else: - frame_dir = os.path.join(os.path.dirname(local_path), '%s_frames' % (os.path.basename(local_path))) - os.system(f'rm -rf {frame_dir}'); os.makedirs(frame_dir, exist_ok=True) - for fid, frame in enumerate(images): - if fid == num_image-1: # Fix known bugs. - ratio = (np.sum((frame >= 117) & (frame <= 137)))/(frame.size) - if ratio > 0.4: continue - tpth = os.path.join(frame_dir, '%04d.png' % (fid+1)) - cv2.imwrite(tpth, frame[:,:,::-1], [int(cv2.IMWRITE_JPEG_QUALITY), 100]) - cmd = f'ffmpeg -y -f image2 -loglevel quiet -framerate {save_fps} -i {frame_dir}/%04d.png -vcodec libx264 -crf 17 -pix_fmt yuv420p {local_path}' - os.system(cmd) - os.system(f'rm -rf {frame_dir}') - break - except Exception as e: - exception = e - continue - - if exception is not None: - raise exception - - - - -@torch.no_grad() -def save_video_multiple_conditions_not_gif_horizontal_3col(local_path, video_tensor, model_kwargs, source_imgs, - mean=[0.5,0.5,0.5], std=[0.5,0.5,0.5], nrow=8, retry=5, save_fps=8): - mean=torch.tensor(mean,device=video_tensor.device).view(1,-1,1,1,1)#ncfhw - std=torch.tensor(std,device=video_tensor.device).view(1,-1,1,1,1)#ncfhw - video_tensor = video_tensor.mul_(std).add_(mean) #### unnormalize back to [0,1] - video_tensor.clamp_(0, 1) - - b, c, n, h, w = video_tensor.shape - source_imgs = F.adaptive_avg_pool3d(source_imgs, (n, h, w)) - source_imgs = source_imgs.cpu() - - model_kwargs_channel3 = {} - for key, conditions in model_kwargs[0].items(): - - - if conditions.size(1) == 1: - conditions = torch.cat([conditions, conditions, conditions], dim=1) - conditions = F.adaptive_avg_pool3d(conditions, (n, h, w)) - if conditions.size(1) == 2: - conditions = torch.cat([conditions, conditions[:,:1,]], dim=1) - conditions = F.adaptive_avg_pool3d(conditions, (n, h, w)) - elif conditions.size(1) == 3: - conditions = F.adaptive_avg_pool3d(conditions, (n, h, w)) - elif conditions.size(1) == 4: # means it is a mask. - color = ((conditions[:, 0:3] + 1.)/2.) # .astype(np.float32) - alpha = conditions[:, 3:4] # .astype(np.float32) - conditions = color * alpha + 1.0 * (1.0 - alpha) - conditions = F.adaptive_avg_pool3d(conditions, (n, h, w)) - model_kwargs_channel3[key] = conditions.cpu() if conditions.is_cuda else conditions - - # filename = rand_name(suffix='.gif') - for _ in [None] * retry: - try: - vid_gif = rearrange(video_tensor, '(i j) c f h w -> c f (i h) (j w)', i = nrow) - - # cons_list = [rearrange(con, '(i j) c f h w -> c f (i h) (j w)', i = nrow) for _, con in model_kwargs_channel3.items()] - # vid_gif = torch.cat(cons_list + [vid_gif,], dim=3) #Uncomment this and previous line to compare output video with input pose frames - - vid_gif = vid_gif.permute(1,2,3,0) - - images = vid_gif * 255.0 - images = [(img.numpy()).astype('uint8') for img in images] - if len(images) == 1: - - local_path = local_path.replace('.mp4', '.png') - cv2.imwrite(local_path, images[0][:,:,::-1], [int(cv2.IMWRITE_JPEG_QUALITY), 100]) - # bucket.put_object_from_file(oss_key, local_path) - else: - - outputs = [] - for image_name in images: - x = Image.fromarray(image_name) - outputs.append(x) - from pathlib import Path - save_fmt = Path(local_path).suffix - - if save_fmt == ".mp4": - with imageio.get_writer(local_path, fps=save_fps) as writer: - for img in outputs: - img_array = np.array(img) # Convert PIL Image to numpy array - writer.append_data(img_array) - - elif save_fmt == ".gif": - outputs[0].save( - fp=local_path, - format="GIF", - append_images=outputs[1:], - save_all=True, - duration=(1 / save_fps * 1000), - loop=0, - ) - else: - raise ValueError("Unsupported file type. Use .mp4 or .gif.") - - # fourcc = cv2.VideoWriter_fourcc(*'mp4v') - # fps = save_fps - # image = images[0] - # media_writer = cv2.VideoWriter(local_path, fourcc, fps, (image.shape[1],image.shape[0])) - # for image_name in images: - # im = image_name[:,:,::-1] - # media_writer.write(im) - # media_writer.release() - - - exception = None - break - except Exception as e: - exception = e - continue - if exception is not None: - print('save video to {} failed, error: {}'.format(local_path, exception), flush=True) - ->>>>>>> 626e7afc02230297b6f553675ea1c32c29971314