From 069bd2e7333f5acf4d02e90127c2a562b348d773 Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Mon, 9 Dec 2024 16:24:54 +0000 Subject: [PATCH] Update documentation --- _images/combined_libs.png | Bin 0 -> 27424 bytes _images/single_spec_libs.png | Bin 0 -> 49801 bytes _modules/index.html | 126 + _modules/pystellibs/basel.html | 248 + _modules/pystellibs/btsettl.html | 232 + _modules/pystellibs/elodie.html | 236 + _modules/pystellibs/ezunits.html | 133 + _modules/pystellibs/ezunits/pint.html | 1321 ++++ _modules/pystellibs/helpers.html | 603 ++ _modules/pystellibs/interpolator.html | 142 + .../pystellibs/interpolator/interpolator.html | 130 + _modules/pystellibs/interpolator/lejeune.html | 913 +++ .../pystellibs/interpolator/ndlinear.html | 134 + _modules/pystellibs/kurucz.html | 210 + _modules/pystellibs/marcs.html | 329 + _modules/pystellibs/munari.html | 212 + _modules/pystellibs/pbar.html | 420 ++ _modules/pystellibs/rauch.html | 217 + _modules/pystellibs/simpletable.html | 2956 +++++++++ _modules/pystellibs/stellib.html | 973 +++ _modules/pystellibs/tlusty.html | 220 + _sources/index.rst.txt | 121 +- _sources/modules.rst.txt | 7 + _sources/pystellibs.ezunits.rst.txt | 22 + _sources/pystellibs.future.rst.txt | 22 + _sources/pystellibs.interpolator.rst.txt | 38 + _sources/pystellibs.rst.txt | 135 + _static/basic.css | 307 +- _static/doctools.js | 377 +- _static/documentation_options.js | 9 +- _static/language_data.js | 106 +- _static/pygments.css | 144 +- _static/searchtools.js | 810 +-- _static/sphinx_highlight.js | 144 + genindex.html | 961 ++- index.html | 157 +- modules.html | 438 ++ objects.inv | Bin 273 -> 2050 bytes py-modindex.html | 233 + pystellibs.ezunits.html | 376 ++ pystellibs.future.html | 135 + pystellibs.html | 5361 +++++++++++++++++ pystellibs.interpolator.html | 305 + search.html | 57 +- searchindex.js | 2 +- 45 files changed, 19056 insertions(+), 966 deletions(-) create mode 100644 _images/combined_libs.png create mode 100644 _images/single_spec_libs.png create mode 100644 _modules/index.html create mode 100644 _modules/pystellibs/basel.html create mode 100644 _modules/pystellibs/btsettl.html create mode 100644 _modules/pystellibs/elodie.html create mode 100644 _modules/pystellibs/ezunits.html create mode 100644 _modules/pystellibs/ezunits/pint.html create mode 100644 _modules/pystellibs/helpers.html create mode 100644 _modules/pystellibs/interpolator.html create mode 100644 _modules/pystellibs/interpolator/interpolator.html create mode 100644 _modules/pystellibs/interpolator/lejeune.html create mode 100644 _modules/pystellibs/interpolator/ndlinear.html create mode 100644 _modules/pystellibs/kurucz.html create mode 100644 _modules/pystellibs/marcs.html create mode 100644 _modules/pystellibs/munari.html create mode 100644 _modules/pystellibs/pbar.html create mode 100644 _modules/pystellibs/rauch.html create mode 100644 _modules/pystellibs/simpletable.html create mode 100644 _modules/pystellibs/stellib.html create mode 100644 _modules/pystellibs/tlusty.html create mode 100644 _sources/modules.rst.txt create mode 100644 _sources/pystellibs.ezunits.rst.txt create mode 100644 _sources/pystellibs.future.rst.txt create mode 100644 _sources/pystellibs.interpolator.rst.txt create mode 100644 _sources/pystellibs.rst.txt create mode 100644 _static/sphinx_highlight.js create mode 100644 modules.html create mode 100644 py-modindex.html create mode 100644 pystellibs.ezunits.html create mode 100644 pystellibs.future.html create mode 100644 pystellibs.html create mode 100644 pystellibs.interpolator.html diff --git a/_images/combined_libs.png b/_images/combined_libs.png new file mode 100644 index 0000000000000000000000000000000000000000..46514c8748b75cd09c7505c2deaff2e0c63a0113 GIT binary patch literal 27424 zcmc$`bySpL`!za*DAFLMgwjZNhcYOQ0@B?m(%nc6-JK#S-QC>+(%mB64c|R}@4J5I zd}p2i&RU1%=o;pkC+@hez4x^*13$>U$9VeuDFgz+kd%NbKp==-5D3EZ6BO_puF?5* z@DH->TS=uS;K%KWAq>1nwUSV?g+Q?M;4g$ckz6zIOFlbsRXasXV>?GZ8zab9Jv%FN zOFMHj{g)0#HnwJ#7BAVE*_c@wUz*z4S@E;5{LcZ*mNq6V*&WOL3o5juW%w$;>F3)vfgIOLCT5bpp5W| z(ALeBs?0bi4)}?T(UY}!C)|n-*gDb&9}$Wa{Sv=%0=p3 z1trZ=_V!%(`1rTGCL(LY0tbfkwbsap-p~Mg_#@*>K@d7vAxMunPnQ6hKS@`30d;iK?527=h(kFnD1#DwD#$7la?EJ>VW!T@-j7|X)Gubg zlo+k|Mw0HIT)CyVpU)usyb-=V6e}+;uNRyYx|r{1x>%fBvz?lnijIyRUps=&?C((~ zzFG|qW%^xD{n3dLMa9IBb|#CYt?d5Y$#>*KNXgVd483Are*Ox$*Ds3aMrVH~WD?n_ zy+`502_5mOM<&Vd?tVujpElSZ%TTuwr{Q!u`hISgg^5X4Mi+h_whUx!)n+$7a<@YS zNXyN_0Y8bNf&%VH1bmS7jYW$P_p#fPU7-5v&r82-w5v%axm{rls4 z>ysg#3eSgo*eV`Q&;Ufl`zf1dPNLk;^J&N zao$^<&zMx`cLh!EfsHemCX@`{2O%VUQT98Nk+HG(N6y79_bzFk_s(RlyKMJoIuEfS zA?RWW{yqh761Z zI+#sg0(-!3zXIYmKK>$pFTC*5d{0eXPU(0a%Jzl>2ypw#d$MveQPT^ zJ$>ZQ=nv;V-vgeY_>5i>2ylW`NF$T*Yo%HAC8QOsWf!}`rQ1!OXZZMMSG*JyV-Iu) zF&hde{W}iQ`7B$~#I^Foy51X*)Z<_V#$d;Ed92E08wr<+GST@D}2;&a|C%V7v5wRKO+p-!JCtNvm(dc>D^1P>ofam+{SU4^x zfZ;Wz?OHUl&4LKR=~F~~@qnxcHTPEAQ6VXj-pWh6k0*R(;M z(sCqCcRQBcxtC4mKKk<`yxe9(>hL#7985oF#-JEiJgw{ehc)`nQx2$Kn(V4>& zOpEjFQIGRp)sz!hwzU_Ft_biCARrK_>{Ju1SS1V&%7C+E-hFj61&LiIO)yczqAq)+ zz=4oawKa-bTD96-EnR96CUL;^s6B=YdFJCzZsRK$i(%A8xs7TuUIW*$+MUF2r?=KZ z=~Vu>qT4mY{5y;g;k#2skYalW2iKD=n^zug(;vjNwaGkh=9YNRek+#Em_tA)E$`_e z14-jz$x|dZ)#15fj?79?&En_yT_JD`YLomv{JXa&BU{%yC0i>2DZp6+QJ(Yf;)f7mI?L>lO*FE z>RD>(xaA|KT98HIh|ul(L5wu5Yt-NmMcj;%uVsvt1dXSXl- zmdkp`epFUgzBiaERO_hQN*lo7ng2y|#lXb0XRnrWb+QFt`HSusT5=+Hr;~d+Zb#xf zLAp4BnB$!JXa3V4_BV=#&KggOk}>eL&6Vl;9Bov?6#LRU{X7P;maZ??!V!2^VU$FI zPOK2j0HlnCw@+G@LQ#AEdgN` z9x^m)il>(3VSe}A*>1utrqGc z9q*OBgo@(R2$qpdx-e<-Q1^kyMZ=_fyp9K7xbQWdyxOOJ8>bMTdQFb8`3^L6l4+i=jF z3|3~f`AmD&l=i@kN%~M1p$$0hY(jp5SpWrKWWkej|u3@E2R48r3I1jOd>^*seQByLo4d2WSQr>U6!OA5-@J zjIzyph@dSNWU!e=Y?QKp(Zu0nRw?O$kw~HHifqZZyU32h%zo#6{QdhkpXp-!BJ6D zbFk6*vyVPErF|@0S`Fa03ge;7^71Gv-jI>4A6Z$xaPT;1(NLt-w!UzoN(adHrndPF#l>Oo$()fhWYtJ;wgqZf2AlmM z*<1*(pZg$VM9*`+=r~E4b}7p+=-z2K^lxUkSHR;=ID1FD?SO`s8}jFfC^AfSaR2q9 zlv3S-g^jIken|-}_*q@M{DyL1VXt@~Kb(DC$NXZ8G*&EW-v|&Y>?g!ATqrZ2Ug(Ak zi2hi5*}5X1cr89X zwRav`m~oi&2)jLFB+6sBq`okTATxAddRy|mnJJ=9a?U=<3Tr2e1Lh+K<2KjP0?PX3 zrlbpGzyNEcYj;tDO1wJ}e;vdapzla)whQ1;aUG zq=hd=w7n5o^X%5Eaibqut;7}OOlp`6Tq}R5EY#}p-}8T8LI;O2BX`ggTyU4Ulmy(O z3;3|>YgAZH)@JTMre0QMF9j(83eCd1k7y%oNS;>O{y{eK25N*~ zO5i17!ifZ^$ryrstM10|J8q?GqNE*|^zoGbN(@esgTqRZ;q#i?*2TnbxOm_Wnw#_4*Y|h$?ui_Cf*BBUrdEH)T5c=cjr9F#E}Q>WPW4J z0e|Vtkof0maM6+wLO6yXK`<&7%U{brdlPok3AfH~e%J^TJH>ztN%R+-`KwmcpvEuYR!lUHnjggtZpGZc2^PIB)9sa>enQ0` zf;m$tM{?eKKu!#j2^SZa(x*==R=jj{bdXk7bdZJU=;`&wvLr!ZX|UH}hwn8N9e1v? zv$IP#VK_AFjikgQjf2^}#O7A5m=bGZk&$fXFy4}cYH(M5iyj8dHTT;qmZi9RQk@L$ z(eHM2Ly04!lq^BKsPiD10+>%?-HDUpalKurT9#>_-m|t6`JN0O${LUR833H~-`(A% z6DK3xO7|2tGBVP1-K!FOxIGEPd=ITJEo&b*e=Y)cnX3Nu3S|=0qKmCuWElLK?e8Eu z83X}Lqzz0=qbrp-Pj3z%eMWtOKmzr|S9|vkrR8qV(s?V@@p3&{-f6#XD?!-Z<@X3B z1=3R5HDss15xu{1t+b2z%J$~YH}rC(J3l7G?i~naCND$H8Gt~?$Y{P$4Vbd= za7rIQ#Khd@M1WnUIIRW>g1k+ld0d{8BGm;`S#>+JnvT>BrvY2JqkXbSV+PDUxX%>N`zs}7<@2N5mlTnJ4bNz9Q}QhM zbG^H`6IVa)LV1>@rgpfdPjFkC@Qsr}Zq9KIQ*M_)j(Btrgibg=$OS5x6O@X5?x*|jo44z^J^N#p zS=6dFdJ4WOt^5c2yPJ$XPS%lJYfuZ)uO+1pH2v>cv;zpaKx@Gtdh5CYk50HJtds#gBUX z9p1lrKh%3%1j*8zH;F+;Y$;-II6jWzHwY5PvN4MGKveA92LRNlR)4 z>>74jX#pGL+u;pDH=O(FR(tmAGo zZa1CRngTxGb%*U}Gqu)~8yg#`i#t>JKay=Q?zO8`%GJ*l@FY>H7O|IHVcWap zQ0;HoizcjFLkl3`05oB{I6qWWvS+^Fke3#M!xwY^TC(NzumdYPF0MP6rFoj4p~hl? zAFeos!HE`lR=9X26xK1?c+EnDUOl6&tUS4LX%NPT^5=4_Xllo3Bu(h6@{3>zn;C_T zAAOdcmAlfxB=H5~pX#+xk+_6?)GTAzc0$=dyP|v#|7V$|@JCwgKEX zw{i&0HK~VwJAgxjh;lp7`r)7aJQJ3gIx84)4{!5%%whjmDb4*{ zjYc-96O4k?*RM>8iHU|rMn{0so^M~gp~6qd>C<+McrKqeHNqtWD95V-BT5E2)a4W& zN5wViT_BhvQ5(7ZsO%ourdA_Se0Hat79|)1=~k=6ZPo1^sLA&1Av2>1xMA9DJa-K{Jv>7|9M!RJ^P-{ zoSkaUdtlMSBeQ$Q=)U}^+sfumyv$2v-Xhk$5E%l`F=#7ncx@?!1zC5oyg$#p;jED4 z{!)!H^oOmC?OwF_W`-56>C{7qMVb1s~j2BzUkH7QQj} zxE@bG)_FM70qWS*X7VkNIW`GfE+2tpXr4X{uXK8b(OHMIdUGE!o|5NRbdU#Xm%N!DE5ET-t&8rKSWxgn*7VAaEVIG-c)P zC`Q60EE9A3gdHQ!Hnh(tA6xMihA>(=IEU+mP6@n>Y9T=RuyMr7OG!EUT_Q*k(MeQG zixdL(#ooCZBLO=*J6r)OE-vms{&jG%cJO9^v}SJMLRTDmo%mcH7P_HbJ;B)`k2zq! zT58|ItAAe}q#8ircoO&N_)@&jlU@~to7(H+@v;w=%|X)z4L>?gWL8D4aH~nfbL*S- zCg$~BX)D@L)hATd+?7u>h0pu6l;uh0Fg#at-sNZ@&eP-41fz#spG+-!wt}N0NajMn~tgqn*EL9kW7 ztkkd%S7v;@ey8|Hb2HgEr!xSgmpeKS{XlbA*kXQ3L4mYT6Duq{Hv#J~{!UkL(5DkD zt|(pm@LoJ?t`(8@a-WPQ7vF+3dB(+3i<4|5`9p6bs^7;shZ{b=^=!4ev#QiVXImcE zd5g`)x`wl|xr3g)i<&utCjn~pIZ3p_%qH7OpAlEid7$TpA-&&!qpo8obAQzb8Z=zt zA@YACDTxXv%fmo9CyiYzz=Ofip*d#db^SdL`xX=ndPX1oE!ZsC*3Pt?ey9#w*;(_C z%YCuHtGOJ1IvMBOKr*iJ7dh%Vh?j|gk3cJqsRav}>qO z?S0=;n*oI+Vl3DgO;X}eDt&*m^5*%0zd~d-V}sa-2x0qyjPXerG z`7O$5^JOpKd96jlr*0Bkv3Zj~`c95vaaiag=_Pm$PPmW+sLp4ACII zvm>ji{E}Q#SS~-OpPgIaRRw|a|6pge&S7Vz7$bkwj#6;pq^h%TWE$pkN~XOS@`*S` zDTm(+7b6K&TO*kty+tWF=PUyh=@&_EREzLyi3;|;5KRJ41$l#CF9-C#HfheYE6$cP z4Owh$RM{(W`_T`t7~ExOZLjjwvK{IhINmn-u~6t5o-odm(2G-tG3vt!9615dgjY>CsZs_h{ zvf3iq!x+W$u6=4t*~(onfd(FhOJ0WJ`5Wtowo`jety; zHHr8B-}yb#3zoE3e8NH3(KfR*Kr@%U*p~l}`~g0R z$WbE_!eVH1yh4macpY~y50462rFR%_sz2Qj;l7!P7v-II@l-D!(6#*(fzl!=J-k*a zI_>N8BCaYcVTI0Pud=<8gI(=b5)XlrKird}8QaTw;5Fsqd2$Vd>#42gq4py{@-R7R z*UK7N7_iy%EuLB*>k+$^h%z)OChh})X8~wReH4ZXjV(h%!j4`mO}n?H|2qsCJX8B@ zJ-d8_CJnK=d8_2w&ZZHehY={wOG#1i zM`%9%g2k(ziSy%zE`7ZZIjJ~Ta-W-CEiP?tc93EK27?-;fI>m-ML|JNgUvmyyhqEj zFuCJ8^g21l7b@*&5BaK$)?t(K%tT-sFsw#4qMkp78ROKtK)F=h!WjR2gokp7A~ZTDgY(NE$TN7;JV0nNS{RD=VeCrWFWAM9>)ZOtkA6 z2KcsAkVzDP4G_$PM2#C8*jc;8P)d<1>sq~gm)U{+5m*hPq#ED&^ygr!xtl(4?Nw4x zd*hTM%4=&z{b7Yd{@r=@PU;zX03GUdCpfgzo$Rfj;>PT@h3L zQM0RJgZF4Jmc9m0PRTLM^K(bXS<*ib-;$*>T=Z?v1KEYP51J~vDw8S-rHY)4b-r9$ zkR|!ft$QbB7MeaTRetFaX{=4n|8GtyK*rN?kFX0^ycSnTU3>%BAZ-9&0Ni(c|-~7(-NY&gO=%qu2lUtny&&_6h=k+PkmG zPerBKKN2VKDiRanNcw)A5Eh*lj-7Bx{b3;K zM|u~7VIQ@O*!X*=ao>*hS|9u4XOP_3M_<8AabU1sQKO_G4$qx|CggS$IgJCnR+kpz zhfG<#pHb-WeUC26m4^L-q0zkH$RLR(V2h8EU#@r{h?Z#IBjYsT^^pYouafVuUdR~H zlL})UyaV%m6UFTCZ*~G5Cz?1z_XvZ$4RrlDQqqY;k#CTz&OsfPLgWr1b!AnN&aiz%=A%SM0wQmxGnNi(0djA!YjE`b8JGW4>R;otvd54Aq5x&8q{O1Ub6|>`uJ_+Bxihjk1 zIc8_30@W9V2bynJ-%x-62GZ!q1GJ9`8yURs;~m7Q98O1%AuUTIK$YoT?~4W%absX0 z0d5I)voWeT7M-*|hfqVvQ(_wMUB)?uk=%yoWva_CM8cW+5_zb;4C*ohC^-qJXj(t| z@jl#mAdX2%MWw>!>@yIk;59M0K$a=?ED`~TAm?>Rh137S5hpUb)Unnx!)be+bXccf ze=CU<+^gcSrw(XZ2lrgT&!0qW`pEuzK&*VHt*s4+89+|?McsJ%2HEj@6CUUlb-vOj zUuXXxm8;@r#|uc_kxODrA)*e^n7LI4v6|qOHKi8Ut!ZNXZXSo#`-UA`XgU5st}l~^z`ek3 zF<&d3`W`hy;`8UHzCe#M0LwMU>&?fwR2h@4co&n#W;DyZyE)tXh4yxO%IjwXrz(zP zZQrT8ug6e9(1#{7Baj1)WF0eF#Opr}3b-M(yC7(I7W;uZwTm zha}1|P(K>Bn5(S2yO)b@Ay|wE1WEU7WBGZBN}tX*V$s;1@!Wwnicvu0Uck$kKnSKy zbIKe@uDJ6M@n~t8TdP~xH}$8bkjz^o+}yd7-OWSQEo)t=lOw54W7=0e)D{o#SrD;y zDp^fxDKzPcn@*}JjOt-g(9xA-r1sLfgnJ#}C3xVC;RV8JJ;}{~cx5*|JC5W#z(js< zw@%#%{&o^WA99G;{IihC9oojMtOsS?*KH0{wv?E+uln_lN;?MPp#MLWdOx5T*$H@n z67gUH+mAVGi<;z>4%dr(Eof9at!`=YU`LJ=IT*pnt6`W_y&+%WZ{LOy>~fbx)wd^~ zbxNXIeQp2lyGnvxBXBsxvt)IDwPkyEHgy6t%8NgI%<*U{G5s7ciHGtLQr#hKteY1; z=-bBjmX5}u4EmEVjD(&~X*hl%!FH}w90A)7($F&q+SvH^<}X|W zw5Lt}31>8}Egs3e;>oS~){tI&%_Cb3k1N#h@;D z2?#eCMui1djq;w!kT904H*3d=|wbK>&wBt&+QDR963MYd`Eov(Ox zv-GgIkx*y5RV|I~Dh<-;bqP?pN-mr)t7qT)kN4}I?Z@(dgvYia2-)G^f)W~jWE@Ea zFx$Dpy-Dt}aK|K?6L6$4Glp#i>j^gR14mf(%%@G$R88i2n0s8{A zzok`-l;9-qH4-IKK!0mzVt*@)DqZa6lEBDbXVK_i9r^T}UT7-*?Jsc5XbwLLMbsFG ziEy}CZ^QD4m>Z;pm|IDie>#zj9T9~WGeWRL&{JOwZe=GmolR=UDsGQhVaE z!BeyypDHdhGlv&^B%DQA{CP@)}} z0E#9&nisde(-WzYFLmn@7EnQ5G79{-DUNV!Tp_{1!N8-ka9=UE2ltAh;?W!e*H9OO zAP(^|AXLILGp+q&D2xQa%5lbjh19En#=A|K>)}U*+POFNi*tzVjsx8%Dd{;dVRg%U zUMpJEZ|6Aq(NQ2p=p_M3>}XM$XG!^rIBHGzgYi0{2-r?R`gT!4>TnT9L6aH5a-lxf z^QOl02XKhNy#+qxw_e}H0uW0*A3P>YbVh)-*tVUUJ`74JW~QdPch_f(+KmY!w<}m7 z>6Y*N;G|Kb5*tdP|4;nEj_3D+SIg9@ z&527IkDn@k>4^^W*JA~Ob8+qJ)8hKSKPUHq`2GrnM4m+_%1OO$zrT>6&NQ>6-F&Qa z08{{&3aUsogkIGrOSMT@=3{11Mb~N~dVp9`2dsSjviFceWd>k5A`B*A`ds0@Ihg1M z+$&|6AE78ubbc)lG?6VX?RW2Cag?{>p8=K)h$`cyHQOUI1xY$!0LoxX}y+ z7zKH)pIkiT!R%hBdfB}g6clI}TD$R7>Rb($o(*jPViKu^fL-DE*lo#=HS+p{DHEfr zMNi|k28-wa0C2^e>o}%|ocy_c4U@VK`E$?zQJNMA%m6+|vra@9?CS%iPz<;MP%CV% z$(09G;z&TXxM&W9*$z!UM~7;Y|?xo?#uovwXKeohE0UP)P=9?T;LI+d$ zqMKYV9Qchvkq7<;PmcHd(D{^(XOn#!fS2EYCjEnDpBj=S9My@7f%_p#1Ke*Turt-J z1~I@38K8U$gzS*-d3kj)Z^gwQrF-76h}B6%j@*i4(=9%*O5VimAvEwE|AWl6ebBYa|L&9RZE~0KL}z|ibK_> z2ygjdQL5S1BuMp>fDVAIkXTKBy)>`>t(KpQ{DF!+^qy7c;N1HE74MjDj?Tu}^Tvhe&S~NQ;`uo)7`MH=dw=HimPQVZ zNVFBw<9*9!`Zfu}LXaalSJ%B%Bzs?dTGTvw9 zpIOWIpbz(E^aBmJ_{Ww5=lQQ%3x1AZ5w|fI%lz$~-_XH1soTC-y5nK`-Akznw(6 z0xh;AO}+CwXw=x^H3ZZqDjZtkzoEYnvn7$b&(h)eFGli0)bs&mB`36RC^|#F5OZrSby) zsUO9~%w)N^ksdjVgvJwTG~fLDGD3O}r2d7m0@g|@9tKkc7?mG5nnAhnB@ue}ni9oW z_@x+1fddb)TI#EY{TS)d;5G7%5|vbU&qt!e{c5d&Yy$$&njL=wsY#W3-@w3HR?m+8!P5cl6c|+At@AY$9Ze|$CFuh#wml+oM5-vDi71}$Mef!O5z005 z$NyHf1WMCl7gRZCj_Q~Fn%Tx&Fey_79RasX4>z85e}5L}lj_gs;lU&-FgD151P=We z=Yzh0&oY)RWbd(y!x$z-)zH;$My-ZEvg2|5ew77eGU+(R6&BAcG!Pls;wa@y28``;EZf4s!V33+nvLhc ztsxLr;3fn9R}wP!(~rRIe&yu*nu1Lkc!avrXk=cBXk|-?yR?5Ui`f68b5V0%>fb;m zp(*Sla{cEA+nfI=O% zsj8Wkf5f0r7ZzqzT}4Ga;Mv%7m|tviH8VFiuNXSGh)zr-F8BqdlH-nXozYw|0866h z&K)T8+^aT7!xzmLLhyJ9x@3ZeCXb>Ht$5+~Jh;ta*8BnBc2Hgnd!ImoOPO5-5W~SM zkq$X^7WHZ_6hy|`jBhS2Z*mr>OucB4%?J)(3_YmxLhzo9+6P8RNC%$Av$F~@on^AK z9x)+t+%&!^T(bNy^DL8fd{_jl;^_d{9f(X?=Zs~ecEp*XFlNKRAKL}e$B++}a4)bTBea69q0Ub#T3v}IX?-Ae8;}}n=AnmVCH>5K z$xzLM{Uo`m{=jnbVYhqX6E~3Yb_rBDtLidNJxdCVu9SHmKfJ1eY>~%yP(P$BZ90$Z zEJ(gb3)~~Hnr3tom`=2)DYfc{!B!cI&Nv(lxgR)rnKHMe&AX@((2)@}ftSS%q>9wV z3m|@gjlBk(zU>w@6;>;6EgJT?A@{diBDQA}Dn0yzwIFckXVGJC0)rV~`ow8dCpY&o zw}Ey;7T&hugL*T{j=2Wi(-3-`&+7{zx{%HRZM>P{P;Ez$r4N-`NNP9Ey*6(s^p zd@}Vi_mDVCdpve@F!Zo!5r}1P=ibG84~V1`$hv|FO9v*PTL&Pp$6g&+A}QqehXF#L z@p!O-bx)yd>L&S8KRnFLht3sH zIyZ(yYs?pv)@w4uJ;YAw%Fxw3cPbvj z(;!JXeZcZ02{;yn@_pDk2xJPftUzV}W<@2ZBNPmO!wZi`dT^a758xG3P#}pm{xQPg zrPid{#BwL5`4A4g+T$b+(|RGq9RBc>4}Sp=VFkCX0`qVu$1|M3Nca7hpn`}VSUTPl zQW2a<5-IVy9AZMA*tM{BbDxyNC08=wO=E$&3{Fye)cpc@8oty#$to-i0bvA=NSypA zDcG>llg;Ta^s7W&d^u3m)LZT*-W-v{kyK{3nsbU7Zo%T9Rc>%&7b8v`dXJb4t7iry}wX6{hpjWZaQ!;nX?9-T&k+7Dr-;>%*>yBp|9u%l)z-*qlol)3Bnw( z{|Y$l{&AQ;-<#_j898uB0o)ao6_oy!6_Sm2?olZyqO7bXA@V|JN}I!}(l4?9*vWvV z7+_0Garv8n^h6SxnN|3zxB4@=So!YHEqaPEc@TIT!Q`od8>UTnW0x1Xp7I$c81i%7&{B-) zbY|oAJ|fg?1=hQNJxz;F1_1^@O9rMeKvjgvUfF*EW*fM1Fc4FWDu1sKt+ar-_1BYN z;b54AerFN>{93rkaNfrs2|Si$z?$*(9GK(a4m}X?2uiH#0Tk@9pVI09b=@Hy(6$5( zpp1&SedU9(p`F6bFsyxk<98s{_}i%+Ib?Iz2xaI&w&rcGZ|fVbr~C{EV+$U!-atJz zq(ibDPgJ{IiSC{}X5dLn98x6bsif~gtYjD+8?6=_Xg$qw8)(apwb4>GPSJwKjicIW zQM^M*tBvvIQ{^Qwh6jTAIy|qaC|o(=D?{_@^*T2@5X}^IbVddfIpCI`tE&;uVc;hr zj5R1g4?%ynGa%?zUl1APM#%l;VE-=pOr*iuG1d$wk*ogn4HEEH5iWkX5m_U}x7y#e zk6)`?MSX5(mB79Amnqg;(!x3!!CbFmhKR!$9qg{ai+tC0VA$FOZXjb|J~E%Jq#s>@ zQBeawcefOI-L?a@CxtZuyWVwyfb}#ZDb0Vyv?>~Jt)~5Y7oqADhB2xSQ7Q^&>OYH$ zj6i=Vs(>djK*Af@f(co2YHNA*tYK6Qva#BK8(%#gbLFvJf-B^Bf!`t|bNh7A{sqU2 zD|@w6{4E{@%x8(kaFQkXy_ya9oj!`Mz>7;f6QJcJFuiZTVc-{JQq zaT1=|OuS}1zWfJIN$4u|fL@nT&FPhWE{zD1ybLpf2xz!k-`qUEcDA>-7m~`%+yiY6 zt*jvqkJ2@aso-rPi!M7%5V&cKx&BY4Ll3-dlwZw(g&mvox}%WkBx z+B?tl_k8R)+k=JH;;Kw>!Y%viQe(ZXIMK+5K@7zid3hHtj9^FHUQe}zy@6^qfUPUQ z_@N1Oo5IC6xWVUL5qgM(r(x0%smRadZy}7ZTOYZ&amldppMEbX+zo4fZM@bsWqvfS zfK|#;BGB*%9&$6ApE~tLQNt?^;8DsA`=aRHQ&G5AwY=&DqHIt(iRz70a`!jMjy`Le zPY8&S9@*L4OF-Y1I570?`ECzsa9P;kEwZbYgG+AIb>eqgms=ta&YkgZ$H&Gl!6qQv zfI+iZfDl-9(?mpLSeI!_`3s!xDqK>GILWBP=}Xixb|lm&(0mU-SP%cqoUL z_JTyk=BBj#JKBFSzIb3WqXR_{lhNN%79-S1((?k|r;p*?OF-D5z<%-p5PZ<73B1t2 z)LiDUH!Do4kt&+iGV@_yZ)2F|-MT>8Mfsl!SQfKAqNIfsWK9ORCTW3zbfDSZpz7_L zcM7o>_>ok($gFkS??5*O(2Cp?H?v=1)3n5sihyco9X-OcNX)&TXr)E7^41Oq0v@2V z+C~wS4ELN6QNCQb0wk6taLWCPg8kQ@R4wT2jOIpmCR=BO`N9P)1v@tU>)ib>e9Rdy z*m=hnw~{>pnkq-5Y8iwmY>w_^e}jFLJiAvwy!Neq39T?kQiR#uUfha^V>AqLYdn zvix5!z>UW3f7~7WB6UueO@Fydf*Et8eXSUR4;11tUWsrXpkX;Qt@N% z4admWcBi$%aya;{*0mitc7}q`< zwbsFAJ(HG?1pXcQwTi=vAHrTu2%P4(1ezA5@D zR`Pnz3@ABv8i!g~Z#7nOrYHlgk}7PM0Ju(PHr_@=3DTN52!KBh(pwDft~SgdW;p>b zfT7nDVw#(I1mUHL-UsuaTmQJjBj7E!g?a~nLwEfR%0A&bO5YR@Xeg{l6_O8zZ37gd zrjBIpLChkQWil(em>NB*pCvRS6_vDjjZA6+UY;vfkPdWe{*Z+H3qN{+(u^Pdc!)_R zH|3huwKX;cV4M88Luq{V9|TC%CE%ThZ3i|akqOw{z|;Kqd38m}sx4w$4m1D{591zN zPbbPNplk{dWAfq)hc|7T*aj zse;UO$E#a^zc#hW+L3d6nrC@5&wk1RWt?K{5w#b42It82p04GU2`A?sHSGy&%J42U z0G~sS(s#nR$GApMItrPUh##5;x3c9x$NNE(m}`&;)%(pK1=JXjtK|0sBUa2M`5+$V??5y9FG`{=2O zlN(e-XkM51(x{Zzid1y=)0RV0pIsC z1BHtI+b6#Ce7@+kv|CqqktqpI?7QnJDP}D?UyLNv?%dPv7Q$Z~4=vsZ-f(hNcQWt| z|KQ8r8|6q(&7`qRiArIrzba&*l5BaXwN0Lq2zL|wI7FizI9$TWVstFah}6N(>V9QQ z7w>(wdCc6*V|Y60Fz!d}Vm@$W(Q;rxlH2ip$^55sQ%@yJQB3Omm10$K4h_?sW>LLy z$rxTr{hsSpzl_x1)rTmhEr*@97NvsfzJfxk+&CF?Esva%wU^gr=rSo}oKjvC)T2XP0%EkW7+DDPN2fuul$Y=8Rs_pS@>_4uU(C|;}a>ezO1sUm5ym+Jw z`v^hFIlokyRcZQETwgLKpEO-G@p7jk!OgI*x+b-Lv)! z-COJKEprry8(33257fd;l7BB>=`XcBO|Rdo!uNjWB}ye}CuJ@7K3+UozGtlq`(lz7CC^sX9ElZsUi(b- zR>8&o$v8CZ}uj86uo#Tla&jYD!Dwd=B3K*C{V=H{=?)9 z!TCJ1`B~^P{%=Nz&vbi$D0Z1(+PhDri#a|4j=e-BHtg>;?mvjJ{+>|rGuLrnNN1`3 zzT%aAxk>Zj*%)25q_Bi;;O^lPIo#x+iei{5d?H zUa0CrpX7JXSPhrYdY$_tgvn6~y7a=Y=$A^0K8i7&3GI{W@v%T6sfF6|vz!9{CO2H~ zIxKa+F<*J0qX_C5&vSpgG__x%JrQ_~)IY>e&DfPr6-ve@-t>T9cC6B?KKLh6krDpX z&-qSq&+g|_6=!^ykG`Y|Zjyinw&0FFm@p}ihK>Dslg?AbnPiTT9=zjFRMN2$_LYka4AFKhH0sT#nIWVfP&b z{?tIJ;iOpuH@QV4tziR;&yB*iI<%4=;u!}wp0y?MDuKtI!@vGbCvIv{`tY8lRHIm; zq@NbzF0N6b6M4}=;}{$YYW;Q_+Q1H zS5#Enwyq}#q9S3L1JQ*65d?_>0sHK!3Ijnh zQedg%oRJ`K{^?%(o^#KBxUJpOPTLRbq1wV(v&QVBr}6i1Dr^^YALZJMU}<#S4O z(I>ByETiKSPkOsJ9@nom`cQk?T#bH`g;Oc1w?@|glgQRT#@&fOz8JHJmKn2Tzv$Sx zn-)7oOUt>>uc`fUrKhIZod)SEGaIf}YX#qXm!tYVN=YrhcxBm#D5Z;Q)k7VNRtn_N zncm96#2H1)b`hqROd?B4`kU*L(kJh_$gX`GW7yDVOiK)YnGeZ75C@o4yBrxQ0i|6$ZaQxb1+{14m&%&skOT4zQ7;lldzOUHwXAp;*vi-8b^yW9Ay z6V^4oQ3*;Q%KYu^oCm($ElukmJ#24hr?1`g;Z8@Y`DX#@{`(P~_Y2FPZdc~%aU9cR zx9l8KQ&Uii$DWheBc!|1`R>?}4VRibY{ZejI=jnz|AZ0B}< zZ5y^D6Xx&t28G0_>w4K2sD-YoVf?Bj+9;LVo6(+YFQ&G#Sfqemm1C6>li86#)5gP! zx{V2;opvck9@b(;3(Joc)hB$VFJB|;teyO$gG)20=daC)=E`ATuXyJqBve?PE-+ob zLvH^GZ+7XFyl00_d~n`!K<2}UbEs2@Z$+n1*OMgUkB=&Y{xZfCod#3GNnIx^T@r>{{Fo!S)G21daj=$ zvl_~`d|IHya3u>>boZy9PB{?5Jrc$fvHznUPszqR#@rRV|GIMf+^;(!@wQ`9Y5kw2 zhD1!$S__IlR_}T;v&_Q#&7MVplqb&)>G)hw=QV2jDU(-`GfdOu;i-2YYi^Xe#oflJ z_8Rw==lk|2EAC29t@ZPTWG3RYXHEkD({gi^MKTQoEj;y+*G6gRY|=Hy`=egfPE3#BfW&ZcJ+6bFGd6F8wNM;C{@h zG6O}TSJ@@HfwTLq;Qq|1(ktEHx3WCD#&PBK^R;Y^bEV^KyVHKXhz_~fWgVkD#zM4@ z3J>k$+_z`3-n*jnSA3^fSjE**m}*whsZe&VuL$_%%7Vr*rSmeN|{pMTVHk^O<#wEc#hFmY>DXeHPB zhHaudw`(gd8pc*{`Kz^~g?_R$E8jS+gU@4hHD5vJl$YB~i(+o++%m}mhX*mbs^3|N zh0*twy33|_gk@5PhuL_F%g1+cdVkmAR$aQuz8>e#*jp z?Vri&dRb!HHi>j&v#BA$roN+t`;slr4GdqL-&!BnvRKBFwwK_W%VxKT!q_g`R;i;X zuj|lam$#U|ebb!~O3ix;kMr?^{42WU;4h!Y2=xy-*+jP{KNk-)=$NZ}YusKgYoI5g zLMeDJnC)m2>vb(@I3#~WOu_f|kd9CGoW~7!f`M<`nK>H(A&1VfixPgnW-cVJnKEI$ zV{}y_(nvD%00Y&_`~6NCT0zKw>CqnLT9b2^f5@#$!ku}IjYUh*@*M98prhM|?sNmr zUG6`8n2Z6mLtx_^!y#3;cn~|8h}csO42qc29G*EyQU}~kBNffl&pMXKQ9akGn-fZI zkBD;sQ~7DswY_r1_)sQzBX&!R3rO4!=R!jj1EkCx_$P%!^o9n3ho7wag#H>5G|zhN zG_ST+sKQ>@T4&ZPaH7XC@P#bhyy8DETL1Yo9|E0DGxj6TCJ9nk_4=*txe|cSY>>A z(bAXm+$Fu~j9O^)8Ry05p-P2>;}5Mqo7~tQMm%?1E{rfO}hES(uM$Tzfh84u!P}A4+3pjnX z9Wt5Ub9OaB3_ ztCJu$={Y)P>=PG%7o+`f_y2s8mmB2kY_@x+uP0GlgQu48W<4?mhl699VQLep)ZCq~ zjh#4VnQTm(oqYrQPVO=Zgl{NAR6LxQrZJsec+N4aJhO3ARbQL2>VBWg-2eP5)7L9f3+7TUBy;A@ z?ei=#PV+6e!%BLR0$pO;)4JoV?_!i2`aiYJp2?W*+b#09eOfRX42=4S`I97{^5zum0YGfuZ#U#$3TZ%bcknbP9h!^8%wL*_ZCPrQxY zMZjRkR{y(A?f)3N2N~-y|7?P@gew5XrShxXv;_dE0JI$gBcqMOlh_+quOdQWOxzYA zvq)L@R2Y`qx7Q39MD!*9P7-)KhM>Wo*RS@gAI^sR zFbdg#sFRN4)f3Q&x^H?BByuXt)ca#|T4sr6U&@W$Jc~|qF7zkUBZotyw`mkpWnymo zS-LmoCmZ9be2TvMETgbv$m!_iGxKYC(@C$>AxaIuu=9_Vq*BA~*UVS}03lOvS@*iN z;@}|Et~T+|+>q1dG+=*(;}>3@6hVMV$;&guNGn6Ix~ zW||dFPQ)!C|2T3!-!#oAtLe^qAqksmF0B|@$u>JRc@*Iuk{@`;$~?CvcT2}q!M|E6 zbM`gfh2Fy-YigQ725_hQ{uJcrKbVq|G6zu9Bk;Q>Tk=yG6KMbLnX><6*# zGpHtMV8$6hcA%-LS%-_|`pmjyn3tZy+1YU35{~6ETU%RCm4daVd{IB);os2_iP3+H zw2awRz!3_7LZUTJ>vr(&dN3T` zH~N>po$jGNPIHj!`_X(HaR`OFY^B|)S~3JSPMq0(NF44^Xcw7%Jw}MvZ0atw{M+eC zuX-(oNri_33jkA%(WWt}Za{H^)Oxpwh)B|Mfu|wTK>fgWnXJ;H3284v>r-IGN2N>L+{Daxh8v+#RUwvvi6F8!e`UHVa;ca{(!2j%%<{yr#4TI=Kk>7lWvpyA1jwX5&?me`;xMZLQXJo9k2f$Y;xHmB3l{1KsjD;Z6!nNC_`M_i?YP^z0SuNhtRT;Bn& z#mqupuZP}UiEr~3mA!l($YqNfcTk%{gEx9cH%_Dj~_n@xE{sF zD*-JqNbnw-TtW4jhX_X#0Gn8_GYpO$ixw-kJ>HKuTt2^|z$2Qg&zI*` z%Py{~{WE+*Si`Ak>?TMW@xOlgux;L_3Ls4_=z*e2Uk(%3ZizYnP@Mp;gGGW0HAxjj z23ILM>8wRvl?fZM0rkksE zot>X8F3^|Ec%l3zta)M>BS!E*9z1*~BqpYdqRx*Nz_xXV27Hmx=ZO$Q77m_2iEy zsNdwDX3#ib-93~#E`*q_?e%dD`AHwhD$N`mJhy)1MhEELy})*IhjitqT@Zdf$p(Qz z*8$Vr~Zcq1nP1<_XfIM#R;n+^zauBSVL*(IS(ee#_-Q^v)bzFqLiap5_%q}J1y$_kn=3Nd*0ZYS@bHj zU6}T(&RT$`BCKIBm)6u6+fZ>-D{0hAY*c9v*o~P4v1# zmmg&-HT1N#wfXF**CDJIKJi?YR9n5Cc(ea&p+%K+&&6-!3AcO3qmURZKxtgH(&$WE!6LOqTyMkE{O$ye1+nH!%8~0efAjTMm}QiyP>{04*~?gkpR~sgY0hD z^+Gef`5(Qunvr*H^)F-KQQIW{c}B&nyq;$JNnbQUZdCilxoYnn#Wl z`f`@+?IZGfMuF%jNo@&C=~|_ptVvsKnjo#?8uKLhRg1^YUh8 z?YTH|bw542qp293D3f9@_lGo}8eN|KE=kSKu-N*cfCDlWr5n;`^9D$x=Ld){0eP<$ zkZOLkQxND!+|knacxM19=|YVks=gT4JE`Y*e%Jt2{{ z;}j3oeiJzYY}AME7kV)%Hqda&)HDqs@d2cHP5F@I@!2?WM0X^V@8@|5ndo5*r93P; zl|`2_KlroCN&_9Op8WpT=m|js%`qDc#ONf@vq$=WjodSX_v)#M%esGmU8!qZ$5=0b zqzz!^NjNm6^V#q+yq#m(){328rw-U`jT8pn#HpKHf6X{g(CCXzGBMh4RX!C^YUfnY zeI}~ZD6^qUGojiYpqbW|Ra8WQBOm_c$qvgPF+j{oeDXf_fOmB)8iRbs6ONV5Ss$>s zB@u~09}NeTDiUc0Ps8vb9bV0ETpT5fmHA@P9flBpn;{@6UM@*VPnSUCX=peLY_ucz zS4WXR&LV=ecJM-r{pSbp^@{6dTzje|DU~F9zQ4aeNQ^p2FQuRKooZC0oOpg#qnCa6 zRs8H=v2G-}wsEf5%UiZ=seF5bRm;Z*_EsgW;&C)V6;BUIzaZ|9zO?vL3vLvWdhyMh zH+O@dB*`H^sx;3b8;2kr(a@mp3izz2Jw4aq;Vkk@yK2QHA93*iKH#iXeE4& z$|dzbgpDVLCMN53qF&S}(6CD*5W~qqsMmw7xkBp#);u7`0L9O~=Oq8FKtyNfS@MG` z>X)2~3n45z@J~n|3Xvy70GwJ0P5T$?FCpn+G9%-ic6Mt zb#;}(`E7M6#YIJJZI_o8tkGq%o*Dg4jsk-u)=XZa2;^#h`Na3I-02hf=VM4bT-CQX z*R+s}kc_@cpaE4b2z>vJQZLNh1F4Zx3);y4k|+Luc5ZI%2XE3vhHJx+yM7E~i z+KzXA`1tA5fNQBk<|{P_M#qV>Ot0B5F6Z=jXUVv(T_-lVQc6iC_!l}luygBiM^x-$?qZWQds?mE|FAMkm27Y@yIweM- z2d2aE8jJ^Vadb;L{}vPyuRTiU`h_&=!uVARg2x;Yx*eQo`A%^H?xvgebFq`@HKe%* zKT6Dy5dnqv@2Cg1nwKLBXB2siA@&35Z}at)YwyR#W?~;!MTj3p7Ilv92Q=C)aq;8m z8|1fpMB%o`xx@1`hBA>%f-yTr_mNs3yug4ym+w6%Q(awMZyE^3xem=D==ykmsn}HC z5x&e0UF|tg^s;WSZnW>Kdz_k?*@P^ftoVyj)nn6;_IE!nE(&%!PH_@;6hJPMU|~wsO{G0V>CBJ7 zZ26^pjQV3MokV(ZF&%6l6H8L#n2Er)A=AKCSzg#=Kw;^RMqJUth+wKn@uVJzhsOfgx$EGC1#(3ue+8QVQpRG`lL=kP@^}HLP5a1= z2&r)08}$g@`n`^$Tl`Mx_hntfNZbzC-26M&4ML~ ziHW{%`mbNVE>cnX@gqe5?SVe*g8=%3Z+q^tTV~kTXR7gHhWM@LUnbxe(=VID6-CN8 zpP5hlQZ3W5dI8V3S%u?3QeB;1JK(pJByeUI@Mecvu<9i+5m@?cZ{grR$Myn)(z(X0 z-~A}GV46^dke#RuvqD3pew2o;@qLE!qpz3YZ~PilGP1Le=?y$63aJs$B#+Pp`T%^d z$K{*5!Z9$iv@K<7XlGSbm7mdEfddO?As2Pi+F=2L&kzFHez>64SFXu$H*(+R)1E&4 zj9K!9*rkDwn9)tx>QtmlE+_(oH+DhcOU4z1?=9PT&ImmZ4i|~)Q9(gL*`b_z zWl9Yy6*s(#rnkw|$p9%UspXZFmUh6)Q(Ci6UjuQS4%nnNMZl4kr-f$0sL=TDn?kQP zo&y=10q|{$hG8LiJc{ZRIHa_)va+zQNw|aoplln_lyk%0-+kbK8S>;v2|K-HQ>qUf zJx9Jju0S3{U=XYiB)575dPxj#aVz*3gCrkK4spc*d)Zoa=>_O|Z)0{7Zh?pda~3dj zqN;gP%Bhua7-JMEGhtegWNK1DKN_Tg6cf(%zJn#2@$Nsnf zY>#6OXNI}&S?gL?pXW87733sQkzOM~AP`ik_hL#A2y_4h0=0|)2i{>HnO_J0!8(gb zsUU!VJ_sgZ;5DMddrfBu1l{oY0rgWb-x9pZ?INz>qHJ&G;%?|<3b8eGaj>y>v9UBF zbu)ExwzRh+Wo2YxWTq#zaB*?qVPg8<4=~z0nKOO2UHS@vkV2%ygjGD!_ZQqfa5gU6 zPL6B_e%N@uC;NsSM#|3i{e2wm8`Vze=|6OdS-}x66gxk8DW1$iHHV@8`9#`G^Wi8& zEwOUO&B6gtPC`F)>hA3-m}4eO_}M0+H`eARaJ2WvLFnR@mCG0XhQ#< zT1aCFJ--#kBk}*^lfMcRoP3U}VqX`_{2p$%UE=eRo)@9=Qe0KFn%4g3*p33u_Ya5P zsgdB(iSr!S{;E0&K7Rl~5tTmxqMEM!@8e;#Dh!AQK0bWH!oo?uBuq?Yv|$qecBS*P zmugCN;@DnwN&4R#T&kYX8ecnFS-;T#yUj(R59WIt8+uGkOx00u_tSML5mC{Ry$e;0 z6!0x%K>=3pA;Hhj#17PokoIS6I0WwYD^t-#pYOI>s8-|!!N$TuKt=syS3BiG{d@_A zdI>EP%-PLWmi}qArKP1R{^x(UqLwY0M-=n=&+Q83t8qRZj($g`OemNx-`}^Xv6=Za zV?)WummV970U1o^?&wLN>KPcIU}cS8US8fskovd&bae*)fTwvx@^|kNT72(VeXbpd zy|&5t`1vDr6H-!o(z%_lw{v4OTK)J94GmRa#sB;2I-##r0S~tqYmG-O9dmPYuKPRr zS(K!t0aRH&75&yr9&4y*gj{XlmC{!-#AgAAAzM@+)me`Hpx3U6{2`l`ICFTs(s8~s z7CJpWy^Hp5F*$>yd1wYYIy%8~l$4Ze#^nEYFOmQ|JU5qi)cVvi?-qf}@Vl!^GBE5} z*krqmvRG|k)u;DoLuZrP_Hh{*x~o2FYPj|-Hvm;9gji`2_D#|xAp<7N6;0=~D*K$wvd zxoY43NqSr}l``Ppj&e3FD-iN>Dk>_H1$@0^LlvzH3O{&x@x)gmKP!RLcp-6HP!KFo zobtH?cA^haY46O@oHzSv-1_ZmosXA=M@L70X_c_!{8On?msE;?&!0c{r*X#E&eg=F zr|Ve7CnY6y0V$J1HHDp@yEHX7Q$XQ>@9s2Zu{eq4_r4r1QfE?9R_-HeQNa<9i2aW= z>47xOGd)A9s@Ou%h(1h}>YkjP(YgMyx3Z#*5qzw}{m)T?ji5l?U0)xsC&;t9?vAt0 zQIL~kU}1ggSowJvTl$_A_SqD|3bT7?mp^@imWU?QwBr>N6r3#9%oP^y}SIMiLsC6!7JpCsTMXxoI?`j z{$nbNFWGjWU}@(osa*eSI!Tm80WZbL1O$jfI}|skcE<9n5hS0Nks~)o4pqF3@vGf{ z^vxTQ|JAyX06HaTI8vbS#$C~>&TRkLRGOJHM112t(LcYZf+wp6g%=(Zvjud=XlL|0 zp`agOKnN-UtHm%WvG+d9r@+8!q>XU$qJU?A09GXpnJUvC0#+dwNf4I$b>ZKlS;Elu91p=;&BwH4zR@DknF0XSSN|Vs`>2z0u!hw(2v3cFpqM zUfyN%}>wT>j_*ZN;Y?QL6MUmq?vmNYUl zVtySZCNBPx4yMxo$?x>+>~nay(b?vJ27Tf#+Dm(20b=!ZIn`xqlzQ|mn|aAMW} z_a_h$5vzc=n0L$Ib>EyWH~zm*A$7p)^z^jXR$W0Mpkrm2G)KD9CDWr7SYh5QEw| zYi6fs(T2SNH+?G^z5Z2Q&v3Ip1^Dqqt}GCxB452r=60scm5iBulPCSGJ`z!{H@p^d zCDEOnoj*rJI1_mvR8QgaxKINZ4!ztCPqGdSjb5`t?9Ag|IR22^BS$(jGb1J`2^>gZ zkI5Ta+VJdb(l&0vV-R{MnV4dJ{-oxxUyS@?RB#x}s4XXlmOv%H3GPy?Q4Za#haON{ z%Q==WZ+^PoQ(8mB>$VO&l=0>MOwxtS4B|i4P!&!f7IJrYPh`@Ub8%s-udm;qE)QJt zyNh~$0HMaGrAdQ7^d&MBRjk_|EjGLCO`?h`a{1kRq}Uo88R2s~k+-+E!@hWdNkla8 z=>;<5tl#e-MDW`2_S~}N>Cs#LH0jG1F%V0DKcJfw;NsH2t^Ic#lfyQ2b-#WwKi;3o z`#s*66MHN}!J!d$w|#{1T>c1u4dVCp*^po#J+`tUM_N>Ya{jYVMkC@;vAYAC87@$! z+IQn+VPPRBCug=CB|mIBlLG-pZI|77YmkD5=5tsWLWBF!A+26UMg}SuUb9ZO32Xfz zA{ni`s%m0q9#&es_816+oj?dB%ZzLu#|Cid#5%TD{^ui-pJQUoLA06oyE`b>u071L zRaG?w`+|UwhHh_f5BVEIGPFBUD9;jbh-P8?vUEycekc4JTNs)Y=qH1 z{StIu%>{68@Kg;?T2wMM-Fh#)`m6ocak#h=)BBP+{32~OB66tN@xNh+=xDOZLyG3U z&?s9BXA1|<+D-rZsN3kqV%CqZeyVN(j??Gl4_<%DyHF6C@Es_fD3ukl49hjb!orq; z7QwYGEs+#Wd=L>4Dfb3JbGSsiw$D@Bb#YbInA>faQ{wU|E-aQ9k}fy@dldC5A+`#e zI<>dKa?09f;fmO+8ByB~3GyhwU@>Aro=NX$_S?bg>Tn8ZX?q!xs?*$Bkh&C_blkgr|oif6ZDuqI~(R&8sEXe5kk<71Nr)#12B;RPL4CQM|7}dCR!r??GnC0+I)xS*_?(o8i0E{AT^&AWN;+k> z90Yfl2W$0PnMK!W7=Osg4_1$I2Z@XxubkymgV4pwIGvBLSFvpOUbwtrcp$B5QaMEq zzx*oqA!_A{hzVxd67h#rCuz;m6GZ;XASX3)WHTyE+Dy(n!(q&8YA@Q(eyr^mRs`ec z2hZBGlTBVr;_bhG*IvnN;|pa=_GB6XpCAsx=F?T{ljD-_Ra{zHL}4Ly_$uw~eFwVW z@qe?zm)iW=3rOCFAJV+>@$tie>^L|Q)6)s*ago8|je#RW!Dog9)Gqm@fxP)|B+UoX zOs`#$05wMMmOt3?-Y!!Qed2%gI^W`ODFAbZm_74!6w1GKK2e^FSA}?IHnG zDLP$em*>dr(sVwSy?B8ynBTG7$IA>&e{mMteS>S}21y#7`&fC(Q2H^xE=U$q{J2N+ zILjZQjOY(>;In(_+tBoRX8rpE0nUb!>Q{x1BZ_GF*Xga1u1?+=y7dJz*O!J6+(Vho zMY)gjOWYXEa$a7+7&&afC-S-Bo;B(90_;imT~4)OC%G35kgkAh*Mf2!6WX5|l~fh(Lf}_6jdEs~J5YX)1jqaAk^la}6ea&;i1q!hY@c7mf9J z@soqnD0)J?-}FU#1`7$eUBsm#I@b2o>x`UyJ)s+Iyzt$m6l?SCd(rcW4Ejo4>9M4A znr)c?KPJgjd!x%EG`Fmt&96`wC(fve$hs!xyPrEX`W({B*UC~6jz%VEVK_wFf{QIXr5qQpL1Dr`* zM{8@MT3WAx^)PbuYfsRQ!Qc}U>zVID4kK^A=ua&L&a4jvY1gc7Eu81rqGwdHm}ELn z)HW)1(ZTGVZ4dpaUA|MVC5@Noiy?b>d+vqqC+nZ4w&@x4>XG^)bAhKtwCwVU*3oC< z8(Yg#Qg2nuH(1YJ1!LD+D|IZU;;=PF>oldIyHI{%rTMf!(KO*H3-#`mV{V+8X793M zEM@w#XZ5vJ5Xh?XddmLpO_iC0{JC`f$mp=;&d#p!fP+B641L7oKib3%6Frg0|4|=ZV8vJ>dxUMM(Z0K{ z*}u5mUfjM?m*%ocXTFaPNSieD<&K}lXVA_`m`BURW=W@BP36eic z;FqKJWmrLoqLfWl+1FT+BX#{Vv)&8|f^*$0!%ndg%otc8f?lc1s50x|my6(BYtVe}XWL z#xlLekw}y!9qRrL;hcH6vEb|6-0c|OuxwE+X%I$ryczGu`%1PCmeEAKYP&v7yQBH? zBmqW6Zpbf*!Et7po#-$SMnhvIxUD(pXvLVCx$y~>)kmyX!9ZYds?B;Ul<&=rq2(F) z|D^4$-57Z*6-f?Cah7 zuA0AdJm~k=$1}*syLzamZa2>)o#w15tp_`od;DV77|n0CGSI`Jo%MkA$)?R^gH z7ra{)PS;-QtZyIPlkKm_*M2_EFoF8V+Hw?Ty2e&}(dSIIwDGVZqDE2m>y*BK#^u4B zH6|8hq0v1b_~857y|TRT-(T+OKIfD?8Kq)O>2-B^>iJXgl+}ML$HUHnsk`XQg``*48j4?bmAcVHLO+V!XE{}rcO zk3L%T8J_sn(Q?We_s-7UOx=Md9Y-ag=~M=%+W^&3X!#jE{EF0+`Q6ig7)_6Xb2Q>AY6M_A@p3_*E?@DxWP(EON*+iZVse%fsE4Qa>iVG z;e{A7=r@qj(iTpuySTVSbt)PZeG(1q)-(%Q7@%WOWu3?|Mfg>SfWmISyUgn{= zw|%3oLT^`Bk>QA=E-ey=&nYhJq&p6(%pP~Vjq&ij=={ZiV#Vq1SI5a=#A%-1>k zTrIfYpO48GYgYbs^ovM$ijV?ip?zJR+hfy)tuf zUDJ)($;-fl*=l2u$IE2!dW7k(M_+y*!N=EIDUjl?N z+Tzg%gkAC_c~YH!s7{{7R;KmCn-9ltS>%3fezkB{4~6*9!)?yb2cUMnvmml*SIbr! z8hg(#|MqA>hwW>&slpCFpKqEL%Z{teK7Lzj?khrR#uhAZgFN?g;w}-GRB!CVwRucR zwruXSK|PJ%k`xN95pW@2gHmc%k9Gok{P1l)yEu4N~(hxlB1R+v8AD1a2h~ zB=Z(xLLEx%pf(BlmRiib@VLGa5NJ`Fs+8v@n}#-h^xHBX*`S)UJ<)a)^KMF#JdYFC#U>+beVUoag{@y??8gh zVxA$|>n~>|<>UrsUtD`Cw1rv4zr?F)Fl0K`@}hY59D$M|BINH&HnvjI*@2tB_ac70 z#F>H!LjiGolew>^LNu5O$T1_O2ikJzd`r#AT<`63?u>(V0pgq3h!gfc=hI_ks7 zNi7hJz4-n%Fbq}rKHZIT+8N||!lqRaZz()F{PlN^EX5>l;^u<(M=v0v4ytRgyevy^ zH!uKE{#lI0dd;cR(;98{uG*j@b9{9*Mf-*B_g6JIN{R;rG)V!8Kwh;Whg5MC!?~6# zu2-$!l&+*?1>v}^4s9p)-6ZZczI+Mxdc~cwLa4-OzkMn4Z89gAtX5}D+NBPDik*69 z?Cdnkkgu`HLcC6~?AYVVfcS_QMQBdJ{l`3pQt_AWj)hOtPP7r;xiBZy_{Y`w*UO0^ ze-5pO({}t0#IS5GZk&%K*?{ZSxlh75)YlpM73dX06w4kcF8y1R@KoUB6?c{Wbhy-1 zZSnD4(`@<&9yL1xCQc^kVsXUmKRQhUKL#-%o`bN%3;R($+rq7&Iw;-nrPt()tLqfp zG4qUbn^dKj@=KJZ5_!dUL~4(Ml<)m^=9L73*Vk`*LhA&&KRjoc zvh#Q8JMMYH4H<~^yIn28mf5fUi$Up1LogJ(-PH(H$5VOX6VQY+I}#FZ4VVGWG|D~Q z0#W(i2u1F`195?4P``HNM1IS%L9Jef{2KLzgPJ6rC&&Y zc%yIenIqFKldBl-5`rraS?K?mo@M$e|NGBO^8kD4L;Qv~2Jfd1peRN?OWg>22yR<~ zgM<5+d{tbDpkx+_5TYSv2&&_m2zQLm%Om&k^&QzUncn;G^@~z6gvYyhzFz?LTEdah zRL~L8f3#_3*y-q7A#K495bs-W|cg=u8ooB6X4>GzYM4g8m-m3qXHFm{mo8* z^NX$0cQb#6@V@BH*T3N#xJ9ys|55Z!Yry>AKqKV=YRRP3G~y7?Z^#p~ad&@11?Ovt z_tL7zzPZnxH9GF&Y*w=I(v(d0>(4{oUHV7#u#3#ws~yX5%>;*bP^DA5f>!dkngn%^ z`?fUASMy8x-0t&L(S8eN#>3jbaBPfzb#Xm%ImMnV5$9fYvB>DZl9Zz<8VSZ&{qS81 z`nr#)mDzTd&1@+1AocFKu?Q-$u&5}2XelWv)m<%mOEdK9;OSbL8mta+KefKN)m!7i zRU1lyWPX@e`R1Kx#6MT-e1tB&volx_C|BU^1J! z)K$nUCoD{f>_++SF)f$e98YD&8%HE8HLs`c@9Lk3oyigc=_JNKeSO^Jw_xw&WMm&f z4tNGC`q-?jq3*c1Dg53XS^kgpxP}u}>Z62h#p#vl(lh8n56d}5OBZ{tFv~KlxckvQ z=M5EG2j5Kz;oC~lw@j>vQ|bp{9mMGLq4`iEt+gMJgRQJ;Yq}cYozK_>#NfE3CtXm^ zNdJs{`qG5EKiG(ePLWApP5kkeALY~=)hVOdn2wYGZ?9~OA%|x|T9U*Rhy0kpu5g7D z*6}GqM_=wW8fE=at4oKn{Q26F02RYkg~Mo@(EZU-u-(`m2)7e*DQkGmEjMZyc1#yMyPx z*N-tcv>^#-!F_r_Bop&wlY%_m?lB2GoGYuJ!XHnl>wlw0sx%vT!^cM$fT!;h_l&+p z#EQM-@igOyd~7KRNA0?~6I(SqZBI=4U^Hqc0^PTW&O_H#ueW`L*PP2+$dyPMlUf-ub!MGzlv75bko#xiO$>TQ{J3*dLO!|&DQ{v+8v|~x4WzOQ<~G7>(R>5*zXBFMVIZb@i4FP28q~Sl~4>OQI3~6 zp1?%2X{8Bd$2q>*tJGhesQz{90CzM`__0iU6Ap>2R3`O)?`v$Vkcmm&o-62rf`;-N zdiqGcX0Q6_T7Z`*=l3$+(^}zyBq9S|0E(0^PqN%OJm+nd9$s{&&fkI{LE2adCA5L^ z1LS?b3tFvb^0_L}r8xi0yX1a8-nZIGOWG<6UhL&g%THRBm+yIKxZOe|ZibH%U^>5Z z8hkf|ahrp>^`1xm$%aVs-e=dcg4OM1V^>n^x!Z&1nwxq1ZXzYACz-tuT-57GV|goN zyJo*PC8DAVIKh04FbRt(G5`qtDBAdkC0MC)5IURl`m0yV8vDz+oROi%R|Bt6#LGC1 zS=EZWD{CcBf+cKoGuMw#Uc~yQ3lqCLl0M1Rq zIGwMvu3~WjBi1)vWSZNQfjEz^lH6vR9jJxg)f0GJZNhDEjw_T=iu0>S-S>BX-@;DX zKy@*SxpUi$ieCRnvUc;95j(;u@iyebV|TS~^8WK4Pugxwno`Bj)AH#T9?-4yp3(ZQ z&+W5SWS-&XwW7$#!;S-eqD>VqD9|~BWva}~dXn4l_`v91gYIw;qC~{%JZK0N`F9k% zEnEqQDEIbBqriH80Xm}DZqDcprn%((jiH9Bo%vf+#=tktDSpQI zdS(ZHikKo{eZaL&gANcblb(!<3J&OQ&QzM`Qh)etyHsPpBnOb=YJ`HxMl2-ZE(#kb z3je6Rt9(nb9xc~)r~}A*IbMas2Oa)^=vC(jKK6a6;sc-g@F!iHn5lPLWo%P^zqzeQ zLr*0k4qW#5=sJyA=vP&w3nKvr4`x0tlHA5ek(oO87kArcDTfV7YxgY!?IE^=vokSx z?jD#@0#Pb-G5sWc+}_rsgs460{5MO{@EqT- zu$wFVF_lK$77n!)?!pxDGGZnv%`RC!;_urV*9YHN=Py7#DmdMt`_TBhqsL~NhB?0w zq^j5?Ya)fhMp+{7{w~sE_XK030*bW*BrfK2cF@UB@qNvKPW@H?V;5nY$S90P=UmA?$qYOX{As&5)^Cxe zNNW)le&nGG)Qy!t<;6TJ`<8ZQI%2VYRQ@wRFtV2-gnKCaF#BWS?h5Q@<;?!-jlPS? z$zCmsY1b!NYcQ{9e)ahhZJ%d0i#=3JL1?6@&g{{Q_z1JVx*fOTj3`J`)|4_xTh7#w z={+1a8%kZ(@1-PJN4og}^xXF5X_|;M63(kf5{B0?COaW_WLpS<{6_}zhhi;@u7YXu zpsTc`&>*2=GYBA#aadU_p@g2!@RxMczYJjju>gQjQL(W(6P^I;cAcmrU^WQACHvg> zCEYKXaE?0?4*hk=MZE=YM4B@fXDeyM{ik$R^$b0FsB>m1`wwJStdEQw0@I%$?l1{u zWnJyg9Cn%h_yGn?Z;KA2*2fskS+;Ss;JBp+JW|h_ShEEkx@CUc(>(z!Jw>13^zYMphlv6YzW?n*~KQ6|Ne_$85gGrGI>}t; z(0wH`rRz=v9=ZivVh1X9rfFr>h&&5_TB87}#PcUWl!3G>F=(XnoIBG^Qh>X@N2mu@ zVYlREPbJ<<-^U{cRF7kLbHfYITmsB(N53tMbZ?y2dBMo9p`)*6X7K{$Rvrf_u8h%6 z*+x5u`s3DhrplR%;-=t&lWu*)6^~ueS5kk}I?9gCLClZr_IKDj33WS1lw0G{l0F3Z z2}0dIu(2`1kSg2$F1SY{$E{kk(Idd!uY&0*y{joS|5o0b;(NlKH?!|D$=%ncF<+Gh zgAgzX{W95U?+~}`mjJRQfJ*u}1Q2ROQBDZE4(GkE{f2yP_%eGU9}RVuhKugg0Nx*x z7xuM|JQy)MrdlfH07txi6<0`2j*q*>S%WDaf;))m@{{KrPQSZs)}st}6L~YyHR3o= zJexpKI8iB-K(hMMSY@w5edM(ZlHlS*Bz8ePN~$-)PoEhg_gaV+2eEkH-$8L*sRH=- zANYlgP8p5l%QjT*CHROWEev@?kQ1C|19b7k5=;4CY@*$JmNIx_r05Qb;YLYf5RU@LHA}32#NN{QG4}7KYjKay=f>7P^YiK!zJ+(N z@;lE5j1>==RI2uU>G0te6XH`6)n~aQgHhqM+LsQm!F8Wjbc?ZV_=bbM%nG`>H_~#g z>r~Y8+&tnum8Xxs7+NyY;~qx zL0=uRg_nGz*2PM#@s|{NyBWHByqby(xwt_CNBqw&QPf1`d1m+SrtJy&xA-X!4d_>k zR|J9fT5EnY_d_)g z{+9#h`_kP&DYXZy(qFL1U*>Ja_P9c>lH0QxyR0qcRlQB6T+lC2R+6ML`Vk(lp zW!8@Np3_8Jrki#*iGssd95X~B+r^iHh=2cXMhMf^mn-1qdvn*W)&M`8&HO#7Qf@s^ z*Xk#l4$-Ez_+0(xH=vVIICC)1>%=7cHY7V%625i@r81g|Y7cV7$TQfQBxMgcF1!5Dz;qDY;D<4Csk#fVC%G{Kfbw_Bji4$KkHpwyc0eW zUl}Q_)g9ew5I@|&_RPFsCo05PrB!1<%m(z0r^l<-o9nfho}L~uI=Tpen-e{cDx?``A=Od|8I zJ4t+0V%VKp=HEQwfB$A_dXau@cSf(cF)@=3r3tG0pgL!gMV0CC$@6TJ#sAR<&?^9) zf$yXfd*Y(5NY1ttCfT!xKVVBiwR&8yc~!J69unRltcWvP-INq!*nDF-K-5WLlhbHQ z%P>DxN$X3C-RjC*nn9A#FgfPqT7$8=t7hvAiIRAG)O3bcX+C5IxF}6`hmH6sto8R+ zv0v9@Aasi`+vq|c7bN_I-3n~Ov4repyfo9ucX2@_dRS-4&!20at&9R&*vj&oCtk=D z=B$Q-#o|!dEraI491!Ff#lM=H06q=vWCu0@0pLi?`#(MWnI+=)swyO5U{jjm4v+|h z_iBFC$e#o<1sRDLqpgEzAE&bnXHjQW5*fH1C>+-qIrHTPx2GRc*APD)UtXyK|FNMp zG9x16nb;d@>=ypNqlg0qTc-3+W&k9Lrj>^rHS95}Lt9kNB(q!N@?=DhmbQ=%>XogGk~u5jl- zbwNfveN1@ys%&IU6y8>ZetwEOtVPyTpc-R!6i|D)K;va9OW)^29AGCg02T*WIi8-! zw?YyswjYI7is%L}u!S5LVSBp6>8kNvR-iFd%X4y)U=_waB?5!}79K^%!pQC`?D$NG za$CFj=?CxQbj|NmK$DIjP1Cbfo?19_^eupHnyk`nT=%D00OQUz(_?O3ial3%brqhDbsb`q&rG>Mt&601Loz!Lp|LX?AJYg z?uRl49IA|QW$)3miLgl#voW65_f*f3lV;Y%BP2TN-2uNZ2)%Qq`@3*Eoeatip5Cp8 zAMcmJfiv{-%dp5AU>0(e+7n9Eu5@+3O=R^y-}o!&p&OK*wH7)nHuR7#C5q^DFO4l- zm}d-XN{)^32LWd>{B(Q8vdu7E$-(^BoXxg5(V6FM)}4RY#*V)H2jGfxR8FO|r2%*0 z^TXX0-ra14skCh9#)nCihF*$*!!36hno4+ukbq$7uM-Pp+rgzjV;gd)UBbL#8zuKi z$(d8{?^R~0pSrGp;V~x+u09mZ^F3i`%bQLJjf;fM%3uj8(trd`&%SWn6)vz6!NagZ z{XR4J+2bH7zUHhf+Z%d3q_~2;y!yO8nC8EAd&!hd?KK`rf=|L0V%=Q(yjb2Af!@Ye zDuw0P7jHxmsGcJcP90WT?*)W*t93%9dfh?AP)TE%OAwYGi1ZIWs(2%Huy>LS1nQjU zEkBRk8n|)S)GSUKL$uqjhaIXf{J75I+?Vd3Pef+k>CagWITX&*|DBt$X=MaK}@?mh*I_w;uO4`{nDXuF-ZIiA?-+q8#;!HU=(E z)?a?P*97?;5%8uTXzDt_y&NyKzNA7jTg_XF;1yaJg0J(vB@fmnKlUMfMA%v&s{|)Sq&} z&q2AACW01Ng4w3FMVQu`hM^X16VZ{$w#SL;xKrOv{ecL6TF&ls zW;k->4O)yS2_p&pRkA1*k=glAWdXgPGJ0gR2&I+Ogk?i%gN9>;`$7Tj?3eGITYN%7 z`JD76?Dy~AEnh{~!6r2zX{b+P91GTddI!=JF;yj;V2(I!3yjf-tjYQIf}kbK?ss+C z>J>a|+-f8r?VcXeXqrXpiUI}0-v|bZ=p2A{q_ea0GqilYTU6%2Roef#tFZ9&51u|V z2yQB>6#V=dpuuoFYh8wkfw64$%uuH*Yx-^9`Vi8w(l#&<1v=R=lO=swwayfE2-u`Oy>DNhr&ixFp z>#b)6NrQ%yDFs)C_|8y}ap~JkSRDu0ry!E!fQ0 z#SLcgCZwhHWeW%5^Lw!Y7Qb6{8mlRi^<*(10iixqo_eUVhRgH?!B7LBdAg&v1Qr^W zg{ZbsDW%W#y}X3Hc6~M#T^Anyg;@d7wc7^}oew}yTHgAQ>di5SANL&s9a?*IosHfu ziNq0Qg?l50I-Tq$1P~r;Q3$zWJ3GbP4(EHGIZBmgvY*QU*!gX8w5QvvIdjggF$$~? z#(*Qu^SB)z{2kSNDDy?ziUW8>!VG3zz=+4hEtOpQdq7DAyyv2V;i7RpsBor^M;O>9;pQ zY9P~pXlNdoZl^mrMR-fM$81YvT+iUcD4{C=8WSv*j`VJ!y-g)s+m}nNp6mJ8k$1b) z3`_HU=-$d!w+z79#!(^~fUfuP#RWrcMrf$ySBe1dtHbZwwRY6uzX5-*&xQaSy91yG zh-he()YM_y+ZMR=8VZN+fHI0DG3p|qqr;OjGh+b$s9YxBj+L^4f{=&^G$7a~UHDlc zPZ73B6GJm{!~gnK=dz;aAcA1ajF3*a@v5}Zs8VP+?vloi}VswsR_El$2A}HY%n%3 zjt_c*hQ2N9n~VAY03hQwC4iyN4P)~gDC`h_`PyY2D`zPvlMrX0ele@p7CEMO@QsL$ zAu#E#7?PM78#55eZ5EqS0Oww54)ANs?G_sPye4yF1bV^r&2+Uj-jt5=WujDJVZ$!k zU>fI8HJ9oA^+_ZFXLORD*Jr?NPOKZWt9@rT&!y*gW6I;UudzQ5K>ug53ZQf6NyZQZ zhRQpjEl3vudWcAbNp;v^hpVOrgj?P(i3*=LP-UGOy^$H`ZGk@tKM^O2*N5Vzl}Rd8 z8>-C*?;OgWO2B`(w9T79YRqvK?06?-du+wR*}e64RvCmA=fMQ}#Q zp>!HtXhcI$$S5h{R8{-#8+1^?iX7&inYqtIU92snm=cZhoaqw)^I#QC^n$q~D2;k2 z3Ls0@j3uzg@=udMzW~Rd#i%nZy!EqIvBO+Vp$_cowIrDZnb?3b4pD}dU~e~pKXt7w z!I`9soA)chg{5Ndc;N`ntJ6gi#0n*iFfEw`o`B~8g2%_lru)-Az>i-$IMB5!hygyU z!DS~${5|CU;c5vI6de4O3-ABMuYG3@+H8NLn$8A!Z5A3{1^~`>$g@NH8$|$fy!7;# z1fHCYE6YPY1DqhkmIr6V_*&A=jC9Z+O+8y2xXkY@B{W4e6X*io>DqPw&?g2Fx{m^^s|48N?X!dgM1U`Ms|J z8~7C>%-x&LUGhspDcYn9^1eXm;-aoU8ZgaM!g6=z`!Rq?H zUjcucIIvv^=*A7WRbQm>f9bDDcW!Ks^+f^gGh^N>OpwCV80d4G)vi`}^(J_A1jPp& zal=z0#Qw(|y?R^M4^W(Sf`9*}Cvn9srKLkNQ9k*60+gvqQYA7mQ;T?r7CkNq0_t{+ zUVy+qTeAB5_ix%S>Cq&DnZT8)?=AvWT>;oOQ?0I-S3}XdjBNe7YTJhq8yHpBIxkqL z@w4XeZ;Qmq=c%&NFGW;hLn*?JV~NxGDqBPj%4+#S+j&CumGlkIdZNV!XFEEhp`Jm- z^NO(;jOtI6Sv7%hs77GKW22_8x7P^Z>957s*Voq8ybc8cF6HLtR_-?O$A-hWS}VfW z7x3I&z*N(;I%o_OvPXy4fra2v20Trpim7H4!3~7;E+`{>%Ww=-cQmeb@3Z&N6ZYeK z;3Fnid4h*z+46T?+sDME>niAyA(7DvC%`@fn5KZpa(^+UZ(7sW-#_j`UG+@v{EllW z;C)Gdb8`dca3B$Xax`Uv0kyLe%$r2`+RjvnpK&qf74A}$K~L&VuU!p!1{7ARkJz|0 zHi7Z331D#s7sb~IVxSJ&?)hGO_ZHChB-`sNyMMk2<#riVi;4r^FOopEOr!i`d1pro zXkb`mWF9c+>$|%o4TSiFTwC^==V?CQTNgljS6kItDF?HqmammY1CY9^3ld&B+o9l;`!= zz%{4vePUkKu}MmC4H|5iLqPXRT*3-H=^moeprZ>4{>hU1u+IN@z2@PSsJr_1?b|>= zh}X^yOYhm7uXo-V$%V(JlrcD*uh%G1+Z@Uo`Y%_|a;~NX;0G3+c6igSe zqNE0;?}MPxo}!^8^PCyY(S-lrhH8zCd)ZaCcO9G%~?Gl zdo$4`_OV!w=3zC*?Ck7NxY3U&t=tQWkK~T+^jYf82lEHu9m3@lqPc7dlGBgEP;wL{U=)ce$ad2(V1q?vS4ESJH9NkXDuV69bU(GDik829R3 zg;y{fOt+qdN7Nh?n&5~1+M7h&Q=S(gsfYvZqowjwH;F?YYu%kepMmIoog;tA0SUIc z1Ah;<;{4fyBk`VI%pK1QE1s9{!x}y$!2WWWt2(W#uXhAU1(;S;51HkH)v1E6JKfM? z#hnQ#poI>X;;qlKPMxAEfOh5#Vb6n z28!3Dc*v(pJO$N*KVA^$vy6*2!a`#71Q}?&)epGDK16Gw4?AAA?0ekWAE2jy`U%z? z`I7{InvC1gjK3yQHu9qR>jigZn3P@L!V=M1a~i=g>w5{fpr2@|rrIFeqAP2C))`|n@+=(%wtok&l{&7CqouLH*0q~zqxdg3Y0cPApi9AGqvu@cWvEw}R~#>FTAkO%2hDW|LGC9i|5%2*10HrmQRt9C}8+aZ-GoV4rvSWuj|!JtD~RjMa}F5UnNt(>03_ zkkM6pws(p)ts?kiiK%r5Suv@gus6C@(B$Um>GwBWgLiH#cX^uY#v4Bp1^AqFF)#&K znH0cKvK0C<@Zuh)Jydzgl=Lt7h zX5c@Ehlic!f$Q)qM_s#S%#McWDsosK`rE+%#6jR4LF-|ZRVpy@`V8%(_eS~kOy zr@U(13sMn;e88A->KaDHmuCV)O;BE5KGByu?O6Phil!JwmkH_~1qBQ+ z!9-WPu-NR4h=MXs`o@u_Hw`8^SyRYL?xfzIO6g3)i!>@pEyM8wqF0Cg-cQ(&NT}e3 zB8XaPs1Qt^+~p1(+Am+e*UKDI!qu*QHIXMx zbb!1TRF`5I4`nk~!|?1wo&(wA?H;H%!oaj8NbTX_P+xZv*f$A-DijdL-8~bf+uT{+ zUQ^j2_xW!dxzA%kaI5i-KX=Y=5x{z~UX)rvC^qwH+;-?o);9NGz>Y(H}I&ou|}L?1Nl#dclq zc*sOUWzcR-aLi-L1k;|I;y#C-{%V)3u?_=angh1y`yNOdZcvyaIFl_m%5E_)#lIb7 z$SagsNvoBSNO=7v3xPv1_nj@ecdEj^W6Z0xFyN9c>cMpq!I4amUjRAwJ1}ee+<+&o zrL6YB1V73Da^j5_^`d3bsps8tXX6@#(M`~jmB+vnB=jMQhm zl-5dHXdx$P?FOAiPkMw{3dk!?MF4mt??-Se-^|ip7$dv)@z8ol@9r(@SevT zBDv~$vhb(0GO7}TJx*6?j*{5sH214~lVTEPk>0PA69nCc-N!l~d0|m{ zZ*;#kpc(h3JFB10zMn-s#~+W&&``MNK>IM}>f%46Wh!QWjxO2;_F6=<=rOj0D2rrM%;Iij zIBDMPTyPk17nbC2z)7rg3;D#RSa{|Vm>afi*46DVS+R1#=rLR16)>k1PJhROpfL7o(xMoaejv!4-M zbhC#I8{N{j??^zFSp%d;3V8XIvlOpD-h&i0z<@nTDX(%%KE3%=bVvasiz*q@sT{XZ zkHRL^dHqnTWQ3t~hONOJylY9W zmy|>@qyD+Q#vAO$%z>|Sa-b(Sx0SQKBJX65^gM64Yel3ND#LX5thuC-R$Mo1Y==Xo zvD{KG(TD;h#gk^5?j&k#{L?GC7e2b|aUQrRO;zj?yWG#@-Pn-UdoUUAfOp`y$N9zv zzsu|N)nNV=U`goko}(5bgSJZ{@MGwwULj?YXb zX=&OI>lul6bOo~F55Hd;YmbkerWZTDoK%;rZ}2@qTd^StUpGZxD~p7|N~NqfIFf`S z$vCANMO{aB@87u-+S~giL`t<@Qc{wXg2KqT)9@aJ?S6<+0!HW0?v&_?ujBTS7^Pp& zRS^EVQ?btz>s1)3?GF&sXyC#~j5?d^$WE*8%&%prZ4NN?wwdtUI-vB3`A2Y(uenK% zrq)i0T6BGT<3pvRDH*cA^b3oaxUJ^#(DwemG1#DKgR@WUjjoE@Pb4=WISQ@$W_R4G z`T9n%wKSIaUs?0ZA(~_4g(uBy{>(x;$-Yiud&9@nw4C9SzZbSTeqBnG?lheCvH8X( zbpDkr?Ru}_bU(n`3p_UMot-T`Jz?M?HRrmiOgKuITIJuQm05=GF#U?$n!kh6CK~DG z!(vuxl)zh4BR2CJc6h^}bN<+WL;tKkvHi3otYpi-G?26cw`(6$Q9*%Y`w%43)8Iv2 z>`sDnGMMrqS!>}7EYpNMwtRx25$usC|2XECGe6K#3gRJffcV?^1{_&tK5NGJiIb|X zpn2kYi{{xEc`eVk*+X)%YQqwWsuuCsKF~lJ8XA%x);2VV0d{^3eEndv{O>cXkYc1( z#j@&|UpB4yX3dh6HZ`C5(2)emaedxIQC9PgL}H8?tGYm3d6#gh9@DQWLNrM%^WeqR z)V?ZGJ*|#SR)g%%LR~eZGjX*?gza%7Uu!78QSjg+YPTC?T;1HZTP`aVdK}t8Y*AY4 zheuo%+&+gwj<6XoHdt;ahzsR^-m&c=M%FT;G9YK;)5pv`sm-}c)%XEAjJb7{$>R1$ zQhTHR1b2fUuO(S>sJshp$p)`@vUhV`%#Y_4zV>56{{P9(y(( zMzS-UoM7h?Yuh$SzhQoEwEd;VxQiB`3j=41pOGvst`uiMCLckkKaltQ3*gYt2xTHA zlvNsAd0&_3V>oc)SlrJyex6*Cgvs{_2~?zgaFJ2FZLmi0#zxS~@zCW}k;)Gv?AJg4 zBvrrNJ-vMX<%A-pyKg*aHb2HY_fhMUTNqPUy-%7TpJX-E^Kd>!Q5VWm!2Q|a^m@uU zTM%jucsG^g>=+^{6aqqJGUf+0EZT zbc<>2rr*g`efRJ5`^g)vsTpmqbzv5ax2?S3L5^m~Q30r^?A)Z&iTh?>omX9B+S>pAPr1I6Iec)bA z!Hpw7og`j}8};AaepgiI0pUhT^gE#x?9{i1m!)u(nqE}>TuGR5c{Pp`Yz#M+l_Kn& z7u60;dLM}Y{Ra4G4(d23-5TeOfw_r&OFh{Eyb)T2QV2)e;fCom*p19#5I-R@QrFbT zpgU!}DV0+7=A)X_Q%UF8sN;y(?bBz)3dW06WNC@NZX!IiAt?3^Fo*rtjO5DJzke>@ zL$7q8o^~G$EHt#Ej5?J0?XU%FZ+4ZTT4UUOMNDz$x>>1SS5I8WdpPX=%x&?1Jbk#Z zoSzIA+{s+^URvaT`<6kh_{@bMqCP6~*UUn?3{vTJ>IPPSS-`(E>)d#eTM_R;ED{C? zoGm0yq46|m5Wt&@RHfA6%&#r?i37E;ma_3~I@7k~X0uFPKSCBRM1?a_N<>;W`8snt z^;-QMSoJgO{+!gtUAhzBv(K%uR554mCF~JZYJrd4+Vvm;u>p`1(zg8IC*T4i3qcc1 zK&A2!`WiTiJfKGdIA_B1;04Fqn}K?)aMtWGWyEwp9OEi#)jp zrA2~I%wwasduP6(94T0-pCHp`%gaA^kSzABPps7KfFwda<GI~;%lb36_GR^K ziON9Oyl|kxt(vq~^vl$6yxLJUNA*3hjL*U*9S2`PK_d45zMc2rINr#%Q%M45G+H_( zs5EO~(Qw;bsGsQH8cZ?%uz^&&Dl|3nBE|(GJr%_viOgAWpo)lyta)98je7aZmoHGf zUjSxn#TKN@kXccN{&wkvsYX{)Ghmkl9cdl(L-4TY*07a6Uj9z-Q@ zR-g0L3g=?lB#cT)9hHI88OO(DAUSI?+#sPBZV{*+)`cz-P=YyN1fS8t# zu8Pdw9ZzFt3vPR1r@AIjXNt}4v5nLRcBI`*} zG@FTxjI0vgWEs*1_FV7Yy}L5egmvN4AN27KuF!DlV?(l`__|vwqzR(1EBw##2naBz zpi`WhaNlTcZpQKfyUoY=cv(n61VX`X_@CU*dpKVQ-G}Hqo?~ftk{7ni;65p^PsE6X ziv5u!*^+JwoUMk7DHm5mo%fR^#Ru>e=XPhiI7sZTzj=J)^^(qDkK_%C3c)TnYv={h zs$Iy#3i5&EV>ut>emY?TN*YSw(JMQQG<;~+%yfNUz^)7?I7T-;(PEPN7- zEz_BhKg(lJ$VGC@Sb>c0q!oWbN|(OkI8{g#Tt=K)>Psr2O87#nC&SCd8=me!OYk+3 z8w@@|BBGz(9}{?0?&BQa`D48Oms{S7s>NGS{UOWDq$1u5C~uiM1y6RK6NS^kYZ2d^b{&IGZ{^~r!Fva~cV zYT>Fpx7%g`dwPE##w?AwDW2KO;&zEpPNyecP@S0k*hqC8q0O&p+wuD*;sF^-(sFWr zGwKkYbq@rVaNT|bmNPNA0T#eWxn)qh;S8|lKtI0!@k1IMvt953HRo2yE?+)G+2@VA zgVj(pamNVFv=gxLX*fSifXKPtVxx|g(;}XT6by$7<0E#AgbZ3u+s*R!9z?J{UneUy zC)%gj2KNctLs6(DjGjN76>K)`-{nHgYVwSvGu&yY_6e;%Z_QL+tSwr;ey-i+v?bk| z_b8$iGJR?f7ZU#c`zIixk<|z45pme&%W%bh*)aOTH__lNmM#^d4|Paus5uz$5!Ack z?d^@gH7O@APl(nnQ;oiFSR)i}T}IQx=3{g-9ajbhX2EX0nBtub#vl7p%LQP7(_y}yl z7N9hvR~kGH@3FA3yjj<+ak5k|(D77(UOs`>N08}Y?Ii~$OD1XQuolkhziPKDmyVGI z6T}Fz6_vLftwaK%#P(Wx4*29V*`>u4xethkqxN`S4I5u?5Nd9DH|FRo$?=7BMyhvC z#d&w>7rm%QEZ}`A=~6_ryk?K!%EqX4*}4uWPScDAhPK8Y@~5%Cs4exoJihQtpGd@D3&hj7!Qh!aY)LGb|x%FaqbgWWZhE8VA@P{)>dbMj^)Kgkt{e@!*$`3QS1Nsnv~G;D5218K zWd++m*%W(L`g5sZi*qdF87A>g4jg z$orK1yeWOw;M2c1FCI;NuOm85(tYxd3=Y&PRKWh( zw7u_+rVlWlFs*pz=+q}%@{;3>SHpdD)#PL_tIC6;tu>vT!WXJS5D`3sbWc>R04bPn zlEu7E1mH{%Kr{P5!CJwfq@Z8~l<^-xedNFQr$1iD9D_YT>DF!c6;_*(-NyD$!=HJW z0~Fbq>S|^G2=(yGU#s-j^7^=18X@H9Gy0|&Vpc91X0Ij8V9LA`pb3;RCBPvtwCHI7n}<` zF^~B`Fj&h$>10@=;{yQXBe-yAXrA3VaQfb_(pVLex-*E3?62EvV(X6+bZ&#ZWm41eO@u>{m#IoKI>&(Q!4XG^%|Ge!MlZc+Sk8KsM`t}ht#q(d5iBj=DJk7 zRnFYX7$)C@bQs7^6jxINr^PqWvfp>xW!A{oexRl%=&;lXe+$KOnOVO+s@?~lKzOqe z%jn)6NoCDimm^Dy$4pc12h<*_uG0EbE-n6$o1e|rLsdwVcdAC~u3pKx za)V=3KBcwo-|`Q&6fo*=U!{`{`YBJjdC~deL)R*-}~@Bj|s@Pbt*L<%f)b_ z{PoW2PB{Rh=17;~J)fM2Byl_4SPh)SLG|ySa|V!J1r!MKf5?#_ztC7T&y&rIlnnpa zDiQnLZca(FtX5@c*7f%C7sB%s>>%8MG-IEd+yFK!vU#%!4ko5Iz$DbTQO`?&vlvv{ zMaM_{G{1fqn`7s7OkNyvA{cs!+gp5D+LorV_I7_Qi(Ui`xg`2_k8I6U&3k5V^gv0$ z`gZK}N{AWpgRI!4HSXlNin}*jZU;5oHQ_RBwRJjIsmzNb360D}P^u9`jT*%1+BU9q z)O9uE9_Dx9 zw4vvCO;#2EQh!zB_Tk^2n^d8yW9&R~HT1$RAE0;HUmury`ZT;bP@N-3TXIW&A#J3Q zTm;9W_?fr%ADzakp5YB_eV*JULn5#ZMar6fICc805l&1p%@rA6CN_1fLgcbPxPvkX zS=H)oPsYT4&O~Pd=J2ZT)_T<d3!Q&DBbqZuX!3OS(8H$kww$-poZ(YS z>q%!iujK8GTxR(QWD_L6Ae_jQkCOnt6qwSMt~{IXx4F1Jf|AaR>$6=?zbE48szRiP zkJxYIF;`1GaFgNo2~4}d>ur8NLV=Dt=_~B+cVsm+vo!-7SN}G=xym0p|Km$RyMR~- zjl%W!N5`b)$%+O;i??08^#sbWI@-K;(oWC{3Y4DcH6$O;^JQq~&Wq|fo`){E01>hO z@fT!bYR!M<_>6Oy@3s1NkT`o^dFvV`W@`b)AC~GdgtGvz#q?gf3j1h>48rVBa{d;+ zyPum^`@rO4vC&3QOG?o7;()ig&NVGVAD;1`a8v6or$88Vq41K9Aq+~U>vdKb+ztR*c;z&Eii zLleYfu4Ca1?D|OKa|z@5m$}Q|`ylLs-YEUwLyD6 zLmKmKc(oMQ1sWaKd5V=ky2{X4X1}N@2Vh&WkPWkl+%k42NSxjZ%OnAV-HAxg7wI)D zbp@mg4lz=RoN~jl)Ny~@GA3@e_q~4tsL9SQ8dF?{l*uw8Dw*!ce7XujKoS6FCZna5 zg|d=H%u5&qm8o!NzA5$uld~4#V&+uNbrLz)LER$WM5*zUc-57JsfsM3e&+3nR`GkO zbOqce=q7C;nq7I3^zoE$yDxK#UT_OXR3ka9tnI@aGsbGl~Cx~v%|BeDI0%3^2Yre0-j6>FWD3~C(kN=o$={v5UW-C7{7{v zi)~lY$(=J!K2gkDC!G}kF*`y@zPGvt7r9$ttUEns1CfPPO?uWM$xIl3LQPU(*{Mhaq?maAfn;`+>+X-Gh^@FafVYI~+g6}QcK;(2m3!)Ug$ z32Ckp-@=>8J2oeFl;?w@vEtUxLhW;5pHx6!LrvH{smj2>;IKaS3|+3-d@6N7Y2^_m z1VP(HZ;s2kq4n(k_u4kMFK6%t?#*q`Sse&)vwiew`6zF7B8Eh-$$j1lc>HgDib8`> zsnQ3~HfU|}2nikBbmo6X4Hv%j!@WT!S!8*j4_K_ljip+0J=@X7XZiR*_dS!6LzstQ z8U(XkU>fIkJ<+b|!7ZgwxpbNF3tYKUzk7NNrDmPKEaZk8Jas*avG>@~2!|t|=SjY( zE65_K#k^{B1C;N8NATBYnaec=pf8U+^o0H~l=guT>p-DAhENysm)#t(FNm`2YKrmF?lr1oDZmAt+DM2-9VzZjRB9t?uT&q{A3Q+d_s zas0bX0ivwh6wiH{Fp+&~5MJFupnjY`JKo5Gr95B{DejUDUZ?NE!T_2~!z=eh*0_CK zeP0%#bif6Y)m$I6A7|87DsawHS`5P{sP)?u%KYd#f05ymNPBV6&f&I_KuUO5?F290 zzN7u#lUAbC_S+liFp6yD^uVj{6x+CagqxuSO$WY=bUW9Rh#6qw@{rUkwMO!;=E$Dn zgn}EikME3#Qdcpr`;*N3r7@S*&58;49-9b{owm!4wr1xLlg{n)KUd1)`WqDSRV;bt z@5?|s@d$(y+&YlB{roB&Auh-jE+$by;_Ca$w^8t4;!NGaCV;dVG*!lSuwtU;`z zeF0#2V0+=H{~DI7`sEwQvDrBA%+d zKe#gIuN+jZ+rqSr$|8U_4Lci?{t&FZZ;w=DJ0)LtILCW(KP9}mF{ACBQirYE6=l&^ zWC$PV65#;uJgNjrqjvYVT+8?GT49#JtiZs5rwv}79B)2%eH(BvKr)~o_U`}6Ht?JiDA zZ&Kwu@P?3C6$3sVG#ea21Mh)4sa(mgftf$(+z4<`*e?HO0*ERX{rNh}g?yI)X1-N3 zixb6zl|8ta&!W2HMnt=F&&SOVy+oCDmK|`{*Ee<7*LE2L)3TC}4+uA}yv+)i_K5%Z zF1y)-Nhn~<0WQ%1LYi*?`@i~=O^O_h*%NaccS3goQDM?DGSF4Lg}(;81!V#xyeJAW zlJr?Hi!8BU{Npg_LIhu6#q#%?*GC@*w38v~)n~r&jG0w9XvJhJ$_p?^QiXnEMO7;+ zq|_v?48z&6=8(e{a9dU;f{&Atk+F8`JDpP-Dh>wV3hvY<3%mHu^Q{iyi+Y_m5JASEuZ(=&Ji0y8E zO{2<-TziemyUA9+Cvt1;UtO4hwx|z=IA~mYK(04Tkl#yQShXb8%LIK*<>9I%Itq9obEnc*Lm349e0!IYDT~Td8i4o-f=(GyOVW7gH3jK>O?xdLG$U5}!zoKRN z4`Edhspnu{{;)hU;!^*>y@v|#&y|Iz@gI+4t}M1SYrGJaRYqIgE2E-51NENeYK@^z zm@F?T3Lj*BNMBGqFgD5340<3u#)Lp=nxLFlxb9%J7rtkUb5YFX^` z;qFAo9r{nqf!IFZSSto~t$4DSaaYI-hlh0bWN&v~va zGi>3y)m=aYTg54isK)EE5aaR`8H1Le5!%x2PHTENQ!{*=X{IoD;*pfp$Q3ymnWo9f zhR*e8oLHv7z0(D)VVMrs4==0RKpQyxh582(Qup7pB={7r#5{|;_I-RxPTlEVP(x5^ z{IoLzK0{~buEwFz7yM@R#Hom5^IyzE9sSL98BsF0n*eJrX2j^s00CfR>#8iJnt(iE z8N$tsO%jLOvQ>9dj|#b!-g6Q06#nnrXAPWc6vErgRVd5p!oO5|#b>oM!l5@T&0s!7 zy3*M2_g`*9nyxzx%XxY7n2kUO?fjOz`|Im7Zh&B0N_s>)KqmKdMtugixC8l1xNnXG znPVB0lZTRfd)9T)HU@tsAB!AAdIw8Zi{oKdiS}am+u6Ly+?8fH_@#SD6Q-OOGHZ9 z!NFW8%za%8TU;f~Xl(xe#k))&?;hOdnm;ZvT=)@L82z)BUrO+?Fhp8Y$5i%2aTE*iRsBMPuMwD@TtEme6@}|h` zeq2Pj?;v`e?ocJo43Loh`FZ52=tS@7+1A@=j^*By#cel_Kl9XHTcG-RK*tgC*Fg~V zu0h&;4_rh;zz)N{sH~~kK_|`BI9uz$Fc}S^jcCL14Swsj6r(}nMYsg_F`f8uzUo2! z*BOk}=zp+BtPPp^3bXmqJ<4LWVxBb~Qfq}eu?owj&Iz*q8&5--jPIT0s(dz*!OZ>| z6(kBRbT3Q}0c^hoX1d4$BRJ`?L-gs>r+kL^sGJ;h78pPmU=9Yz1PUfJ1|vs3&rGC1 zIbF)%-+!oXAK*hYe=0*R#!O`ODGT-7awPor38FrO{dgmpM8SK>fxEtB67QbBuP{-2 zJ5hq`BlK6SrC{{Dc_V=GmvjZvxorjZKk zjlVC4s+`tnQF{xl_eYN&eTa&Z1XmM`8leV0c3=l)UO-Y@`nb6eAaXzw@iIWmO}Opg zn3tTDj3E~VWM$kXzl}O2BoE?3zI9LcC8t_BIcC{oS+@>Zf1fL z-px;OEbzCXQdkJ*d+oGMRcb{+ylzct>?|Keh`#l2DE8N`sL%`Tf$yb%<0^g>RZ%ZGpgrNfyL zD}O7L{{A;uS^&ow0@H;KvyiO61F(>i*l8r#(F=aq1K%N)Asb~n@O8N1P?;wNK(u;- zA&ciYtJ$QbOjw$)og*r03dRj2Z=)w4N!19yci{4DcpTRmEsEW#*_#^0N>Vitqouqb zn-YmTUsr$gR}xljL;&n7ml%Y90N)1_J6;tn=H&dWw4FuJsZ$sj7~o!pu}m~D@dqH* zc>vFucLgDq4X9aH5PiLZGvni+E|7!J@6_d|JeI@p6K*vo7^=Sf!%-Cj%ca^~@2MrN z7#m7!;JxhWn(9hT*43&A%Wb`mi!Itw_LY#~?1&r{v2Ed5dr{m$65-jS_7YyRKQ?ab zunvZRSM@yFg*$U{y=uBS;5nTt6%VL%(9I0CW9_-JlHy`iDg?`y`ILD2dh~UYHnm1j zQAIki2_VjXuYYB3Yh=Zs%ktef_R+SzF};-D=i5HY!rCZ-QxN#QcU_44xSOctT@0rJ)sEXfiDXwEO7 z=hp-ThL#0jSQeDHEWb3Kv6fOKLK)GkdP3Xy)DRipgYw2}K34)CAUK81ya5frq(EUG_blD0H1FLj#B_A^-qH_#z)TZ*We%&I%Ultnv-IN)4Q4? zaac23ye0aJzgi<=C5V%*fH42JYPz9&>0a_J!dA`k>vo716(yd|5^6PZzr$_Oim!N6 z)>it}tKSKa)YRZYG8w#+!3?oCKxsgzb5gQ{gOq^WBMd6gyq^b;WLv0W0eE?dY@lpF5oQibiq)(MJwE+I) zEY>!ENL_5_GQJ1JgU8w9wA=s}mEN03=Z_z$5I3;9KAr&U#cqAfGPSx#E|+U?r`o0~SZwWYdQ zlr%K$fK0E}!+5tBnFs9JIEg9O_~J^|l1q%bkm$|v&#Z?mZhjAb@4<4GRa7R&J42YY?Q?Z5kRi| z`U?h%F#^Sco|I@lTJBZ&Y~9ClnUS1W*(1ite+%cG_ZP|8$F8Y8=a5w>9#a-`0qZeo z>Tv?G)2+?U^X>?P@N0$4u+<=62?tP=*uEWfFd~|0Tw?$^oO3;5=j;)-Fk>64#r?~+^=N^DC(oz>ym8e!Ev%$+Xv;U(n%GVzowKTe&AFEFhT?&6iuYv2!uh+K3uARtYECa{C@>~4g+&}<-!-eXUMW0Edp*{Nrk zb&XcsBGYHw5Jh-e9COHeKNMj(+dbdQk5zc(aNw0FBrImK^Myys+cmJoTuOtuXbdi8 z2YdS)e0&4TuA>#!j|(b@Il%+-1ymI8H(+Vqfg%LR+?Q~7iCf{=NlW9wg?iV)>;Lj; zP-mq+z=DyI&tNJF$QMw6gQ?zHQZEm)JZ_=P5TnHY=}t16wt&tEuTP)?Ztn|FOyV(G zA$NPk-yS@ZD|tJM!ObzQn;y|4d`U-kzA5UqfD21=|3h{So>y4tMGeE=q+pfMuUBIf z!f}`Eu`P4``t{Y&0?ndtFVh#&V47fbWo6~l5VUslfLTcyZVHlyPQ5EVsRuXPVik=C z`fJxs2zN*3s}bAi)r5T!}Giw>lwDL2rpv?}vP=Zt;RgQU8uln@4hYTsL>< zCzhS*ME)LDxy8=E1~eCZXkFvZlM0U>VZ&z1f}?GK<2O8};VS#vKu98bhuiZAJ&3OO zCtUo%Z4;R-V<&kTT#%+$^&`Gm406S)9kc5FxQpHXv-`!3SH13(UP(rGr^vdImAyl2 zA?H_EFq~or7c>(4T_&h7jhE}j7yXf+AATAfPXF}=lZUFYV{Z45I#bHCuMqN#xbH4C zfuJuAlzyY-mL-Z^TG=^)5EFKh>dt529mUrjHh~5;{rwr=?*j3Fe(by~=<7dWMuPm} zwM32DqtR^e$I( zh&M!5{(N}|EHeRx%e%Cya$>z{7QGdNhd`E+QTn=+m3nR5L96U{_ZFtt+prGK&z5rnAT_KR+VD6!Z{A!S7_iFPqAr!`yUQxhal zt&yoFtZ7AYK<7BteBp^(q{9R{eYWxvZrz2`8jjm+LUFSu-ZNGy?{CH>Y^_8cP#>sK z8a!8q5I#fv`Ol@YJss-4HaA9nZz%~=_X@7hromEJK zDQq@vC1r8|??@+-A0XI&_qj`dUUp&IVjoWUal7X3^9ugFC0I}oA3lT;SHGaO7o6jN z-K|7O=g=J_E}t<*(CsubSoe869rnySx>P_Y!Y&DQ{y65FnqCMHb-(-Y^!T#z(6`rJ zw!tSeH)9IaPG(3HL}A$RS_dqO#AJxE$dC~7#aoL#HzIvEUy%W)WGT_n*nek~|Myyv zToQ(ns9#tH_(WzG7aZJNlo&e;-%R#o0jznMz9w<`@5^WwI3+Kctc%Lf)qiVYp2(V2^s$6_$Nz#54N@9`Zlc`qW6B7-Yaa>vkjj!{$b7f?FQ)kL~ zlWBe>*ZW(N*8rcq1{v+XSQnn=s&k+cGaDP5Pj8|@P*c%4BWI$mj2%9Cq1P7o2_(Wt zezT^rXMth`!~keviKp>ax(5c$rkSi}RdTw-V%sp9;A0-+=hD9 zoaegohcdiJwzZevJgd=Lrk*%$ZFy0mlrr{&3m*Wz86jnE8{(9>(L-iJ&gH1mKqM4J4wU@Ruz5U$Uq zfpd;AulJRI=xfIv{^0L2@78kv$D9BN&+sQOd2MpR`}ys@KL#ucBI<=AJCa_)_k4Eg zxvf!^#BrByVUEm^5yTLH3cY>V71|DVomx#lpbqIZFu*Y~DqCwj7x5wn!{soU6SGck z|JZ+xy;vSJ;{MFmT0F_p=_3!f8RC|v4(XjMy&rYHS3UCB=MR|W))uU%djOjcYI-?` ze+%8<~T<6=qXF>08dq(3ifmpt`q-;vtR_CKjs-v!UxwXGNFw~zu zB@;W{y?oVkmlTwd2!KkaK$YG7|59c08?T%yDH3N1Q*&}W_68}Cw-Yp)ImbiaJ)Z8u z*`Fp1iSoW@drKn=Mk5nj*hrqpq_OFI02kfm*nexUP$7S7`pE&$MQ|hjcA5M57$&u6 zfXf{s6yrK#?6ZiW7*zPbw2zs+b6?y>J z$`?W04>Xp*lqs8lC#mGMB9lJE^hNUp-LM32uCK5$lp|$DKM4!&asrU~saRKJH`AMX ziT|zHkw|R~N3xptt#Jp1bY6S}ZVpcv2HhGPvsaXfsEyC}C^>40Xeda&n~YrvnP zaqb?slt(wYdrsYwr&Cg{P2U=Ms0sZZq&3W;O#(zOl|nQVRCQE>S%3hdZ8&r(vO$da z7ZTTSES?dY^5m`_Ez%-u&2p1h>fA-dzL(|v96ji-O`ZlEf+T)ti$K4{lZ)r8rx?Mo z!@gI1&e6jYnwTamHmhV2fB2HbS4BIan@&oG^d2F_6wwz8v!tQ-*ReDOuqxLz2|)XL zA`INgoiX|mnxwQ%hl;|m%1aX488*;r9r(%-$)-nZ(6=c?t3$X4vKJBM2e^2>8J8mQg}sK zSpX)@_R{w3i)qC;1@x{D568IvOHi4wmw=~I2oJzwxVUxM70QPUV}&?M|1nXY|1?Bq zP=>`#^sVZ#RlZhh%q7-gWNChM%F6Jl{H~V?6+DNu>os|9CdQ#kCh#y^sxM4gPPFy< z{Bp4m95(OS*^d_JG&^L!1eGQl0~^O>AXW5W3YZjEz+d5#45NI=dZ7L5LNAE*$$4=s zHm@%{vfB7Q-IY6K^}*8vE!9TIB_HSYS_Lyu%ZX=wRaJj(p(Yy!|h3AC+tZ!Fqp zE!Fhbxz?n>u|mSH)$bz}OjGICJB7!on^Cdx=x^nzw5o-$a#2iWq1B2lrg9`{acM!Y5GaPK}P52wodE~Ty@W%?Jd$4GNoXaS4=JpBM z;u{C85@u@MAicz{P*q}b*F)Xpxxt2r7sB7PYPgRAXQ~xx zivx8v?s!)@XJ#2Ht7Xmx7S-D-)mdW^6JzSfC(zGeQ+PbeqGYlR;c!;Jdk%9U@Z z>0R^KOO-d_5(D?EwUWde`1!*^nwa8Ex^2;ROD$9bZRIrLKYB3ZRh=bRt6mB6p_?Ji zhTDZJyDg@)-je`P*emGko8)~{R2-X|2tr(#?)`H;wq2EP1v@`;C@aOX;w#k?8 zpC$551UI)F#(;m6UghzkS=qhfW~c>+$W)c-ZBf#Bptg-#%ZB)lq` zc(k_?v-iXw3xY_>#1YeWWL?-j#FOfGUK^V`jrK?x!27TTrQ&dWp!^bsG$_B7!$Q}N&iDr3r9aUAwRQG%_*q$5_S4^RAo`*O`~#@N1QozS2%mUCjQB$~ zoGfofZvvN&FRC@O#aJK!1jj{W`|Tre9kjs<#z7VdoGdqacuI$M?rK*yf@HrNK2-|o12?@Fcq2%F=M{JCjZGmP!h71m*0eUG`>jD-DTv^`+pu^osJUt+`j0V@X&Uw9W}tUzy)uK@-_PU`YrVlr--4C1vj*1ab003w?1Xh zCDr-x#2YO}-u-5Q%g1*1d+qtzafa6m0LuWLy5mRsLaa`w2c0D$AUl$Y{S&xOH5s8b78!!MGj04)iVxa;! za=~j7b$aa_=P^wqTbyzDJ^3YyCGD*6y~_5llx}tK^DtV88ioIMg{ksrR4PodHUZ0( z|J%1(qm7zJCq{;bnxl={l{Oe^YRls_{9(e|KZt0Wq4{t@=g^0Q%tLDc7+LNQ`!L`J zP^J}@8COD=e?R~?0QRVgc~u0i95fh#*L(omF6%wasg!hb;)BnB?fhhi7=jW}qaFx6 z#y_O2NR_~oz~(7tI6nSubj?T5m*HRaqS7(nrD%o__^^*8!OsoaIv6tg7Ca_9XeJ^E z_3h724kc=&HRB)FR8?`o`+|3Hex=?Sm<*H+fM+G1WuH7XR$+Y=Y{#D<^ASJ~)bj+K z4h=-u!u;x4NZxh$R}CqRD4qtGP-f8=}*_@*`DQr}1F4nF#ocht1cNs6y5MJ0SwoD96%faf8Cb7J(WFl^nf^!o#Bn___ISk}ty@!>Z!^)vmIyKiG zF<_C@3XCTz+(J`ZA*Ic!fICip7qm0ro7)8S{`0W5)>h>@H%G9dS@ORagSBQhnBN7v zs$j+Be>i<~e)q}ANn%6e`_)8qcFom`jS;g?xt2uklgD)SMp1F*ZVrFYa`>v&-mV5q z9nZFeWk>C_si_GdL^2Tf4^&{HBQ)VB2KO9T*$TbSy-*gsW{Ozv-{$m*xBI~r%GRcNAoly&oO_`V zlt3CC9c_IgeLI0UsPsZ_%TV-Wbo}#ruXMkvPfx=n@fhdUq9Y@LYsN;z;X!hkpiL>+I~D`za9W_vH%OxN7t`Ja5o#@Ip~$+)MO$@RHIu$yP@zl&oIG214dt2pDM=Lbi-eOxSqw zzYbyHLEJi-A!Q^6Lf=`oFmMgZpZV3bwIbVDDU`knV-WcUM;s?4Dri;M!xMtA*69c( z(I%L$-DS1b%0e*DPJ|I{DHgTceas)Y2}$#u+fP3gUow?2hW@YEg`7uUwoQPR!LEh)e^ zW&1*0>Bougp zrilrL^73+O0h?1z`>f~GMN8)KeQPvyoTzk$0o+tyu6o| zmLcebFL0LT_T|2E^c~nmtS0NPpmsnw^uLrF$m$9moE+L$6@$?Y@Y}}L);GGfT1n(x ziP3kk(%BEAk}*Cnp5rlowxTv*X8v_n!4yYrCwqQD)PN9u{=oBW{q1K9rsXAoRmr6* z3Qj+CV*sdwZj9f*f1_F6U~)w<6VR#7t+=_mHg$Co0p;?hrbY-Pgg!turo-Xd|Gmy; zS^|(7YWPNv!Ql%3v4N0=U>a^*C#M(Lx)aOG(L)UKJ#0KVQW`zJ7lU~pP57ozZ zr-+4p_&@@CO#CL<`Yb5JjSw@`ah_t z9%q4nQFA}}d-DpWSK^Ht!b>7p=+|hrO?>)RebNaWmxvFu%X;X^a8$QxfHH=zSj=`hD#H`f1YBU!Z{LYGIk9sI6PKq7#D_ zip66_FqDT4pm3*`mwe#ZOF4AJz!hCcO%)^N+>hgb{=7t=+2b($!A_AdDqMPcbTt#_ zYIbzSKm!j;vj_97O__|>Gp_xa(zsB`ceaL`f6$Bn&pqYs#|zI?-^2BsjKWLQ;9hr- zNmra3NQ!Rr=FM?xXBFTviv&Xb#cF{A_ZJjpd=KCE_IwHOml(ki3~T(0_I3fI#O*~Z4^$l(JhC?)e3u)tKd zZL5C$nn-H83-fbcJ(YC){Lva6EBN_e$=ZskU%YgQ#pj^bFEvG@nkUOjVj87i(J)_{12(an?dkO|Ejt)2tFq{*V@0+4?u0K9=`b8%?Li8W zhc#bD{=9mzw4Q~&H(utGDAED1(iN-RSDOM@sO(H#6 z+}o(YvaRdLrDXP_`5nL+ioChQ!oo=S!(`5h%%y$w zXqLegnEsHNzoXRvy-0^(nhABHU#Fb%r+YTWlO@GJjn(eL%hj#xr@8pwyJ_P327h3e z{QUcO7mPgw?D6v(*98@&cF(HnsWRSa7a!v9L<=uzQ7sMAJw6TDJZQ&UrfL-s5jZ!7) zn&lmDaDU@Xqf1(?2?mC*mjBA0v4Z*3`o0!)$S3*0a7W=D`zK4NzoVEhZuaO~k*sAd z1rf-*^YiD=Q9$*0Pz)5weVm=0(IkXH_E4!Nx~ADXIEa}(NWtBTiHPj5m%$}|@2=3` z+hlVIk3v*gnYSTPV;61@b;^Am@abq`V=k~_Xov$l0CNqFxcT%qspAdkX|GUlS6PhlAnmP+dL@3fk;YwWez@Dfj~)7c(y2-{sKQ-4NPd0&+wOl@$DKP2XJ#_ zG>HANW0Amz%9rBbMpJGZ%pNnuC0NYQ+Q#2gr_|dLK5WzkJUxZPAnNALU_{syST#`b z6`>z#y0v8Xd+N2pPv)>zP(NSW?nI$cVg1CFew4NaEf>@6L3^yW=ci@S71pGmzkqda z;-h?1{{ovyIg8RVIvy>(IRU}A!@f!0hk`Tp!c}Eqi%x*uyDc>ICt(`=!`#x;1K+Ulx7upgZ-|*Tm;tzs;1{oDpPAK>e;9aX6)r? zEQtH;kIN0aAXtHi^6Imwd)Aw$7vS{S)A zp;4Sm*mR#YMZJ>crKm;JlJ?HJ_*g_+o8=`PQ7kk*a=$jh+SOy-invCFPF_R$hY=k` zofx%6Rf0tOMq^`RgGW&@ezyW@+WEH}6T&~a*rrUyr4g81h`DObF8`QdOhRy+Rq9*) zg}v0Cc*)yb(!v|o!ytyR!DEk5ZT*CP6dy<0I~A6wzlcueIu`T z=>ws&Q4K`bclXral$Mpn8%rL+`Lj5(4i6b$w|TP@zHS))bH6zNwjvP4t>__u zl)FwySl9}Akrgs8+kV_LJH>#?%&XtFBjFteWoSCT#9T2_eqA=3)e>}L1A$Y2TX`ug zLDAPg&stY{}2=%P>{oOKoIe9>uqay)q7gP_A$KnMf!kW)efVSU9SF8yVgUIFtc>ekOx2c7S>K;|rw5tLi+pT{SB8zSy)e;re_heW>^fICT;`t zH_F#`G&irPSeW02UKD}wAy5=(!!U%BWH3sUThXTLn~Yl?^~}dFIm*ubpWoc=Hb8e3 z9W6{umao=0&yJUkG=ll6d|X^y5;lNKS80iA)tN^qa9}O=?7lU%e>`ZYWAR)EKfI0( zxhyRBjBqJk11V!^X=#T`J@d1Njz*)shYt;nd3tdc!H^xVs?}t*pvEAX^lz*;aT8!T za`g*jWkv^fBu8Xz+t|>Y6Loj)w1;DnU+s(>P_9gIl9);Mji!KBy0nj#M3H^A+ao`4 ze0)3)`7wa`6{Gnyvv$ zo1CeQhHsUH5Pi9{ly+`Z4^%$bQPa^Oh=XLmm(sJx2K(mOlj1A)S4RP#4 z$<48q{K^2yzx2EaMyUB(ymi|)F;P)19{V@;0e>k${(U4d4RIR#?DgMXfbUX;;<&U< zBLUB6#xbR(Y8dOp0oH<~cTm!LMn|uaC5Vlp-~$2W(cPaZ8@P9G2;4_Clk70$y8?b6 z3`nNeL65ufnK?Pvknp9#j89RhvoW}%vK*}DE?-|&qzFnvUlL+&@s2IX%Khi1nz^l_ zCmp`9X&sB?Yp>tC7m2o-qB1hmF z;##?q`QPjpQtvX@V_H^=VcZrP&bf3HJglEUVrhqA@sb-i?u1!A7px4_9UXxOX0o&A z0^=!NrUDiv@@^hAD=f8?;j>7Xr3<`pfewv|f`Kkthb)2c;?R5vY{+pCpj%oiy!+Tx zPye|33shI|gujHgd?+HJVH_Wj3q$*0JI&A4aHt;x)MQLrVysHhg8b;u_RO`5&{-91J1 z3xeT6kAt=Rn2be7L8S_fiQxtIt5}%xWMpKtLxUN%-VM^y+M1fwtxX57fZd=)`@=X8 zx7kDR&-i%qg9jH0UXqpN#4kVocy>du5}k}gE!TJVmPIL$#YF5ejUJhcTdB)QGeThk zEw{S78VB!5z8_7m>|I?qm~IA^1J{{#pm^`Giln5qNS*D_i_=prOmnR!$xsfTP2e8j zlKW3yVz{w^y0_i`84X0L0(dWCr?#v(UuJX7+N0#@)8?nga7a^0=y2ri*yI7Edb54I zLoGW?s8;>mZ7V5a^Cc~RYzLhNn;(+alD3KRVIY7@`WdCAq@<8dH?Hb^OQX?K=zm?2 zmo1v3Xq%i%y^>bcv9jOvBtK%XnS<=#=r>hWT4mD7a>_j_k(eiAHuFU#l9iF3Z`GC=s?d#KA-53U530DrUDhx08#@D=K$()~}Y|L1XqO!|$=N!z(x0*uerQ zgHBC`adH>#LUQeHXlaSrsjE8{a0tPdhb?p%fX}S91nZ3{aQmNRPXF)PYo@<{mjJ7J z?DbyZhaP;nH*fO%8SitxH#js@O{1|1tDdaH6sslR(0oEe>gx@tK!|HL0(D+;%E~*@ zV-49DD#9+n-bYbbfik9~_uK4(EU+h7#yaRNCN?(gii(PNRJ>9;L?Ur|oZ$+3MPYn^PYXZv1%-^auYMUCmdjh$sORc|SDKK4A|^Et8H zwjg-(+~+K54^?KLfQpQno&0X`j9*7amR-v`s2&K2ehrNV4Ge28mV}+v%{C>~Zuk<`Tz8-?)^$T)5wr2VVPhj?_wGUAZsGg?# zd&rU_RBOjq@-r+Z%#Za6*7-U4%b`%Xp{|Y@y)Iye`xeMLEqT%H$NW1}Mq`T)Xisi+`LIV-8<^!WK^OHu{(QJZ|2- zyKK*%JG64IRSh;6r(92R9I=m|Oswg8Iuz+z}Eg_^h#9`9kcHK%IT)&+%x>8HYXh6G?`uXiO&-}vOyLV$g zt1UkMhs-hJfgp`%k3`~0tNvvn6c4&0n6@WqqQB8n^2uezw@9oF$%v%+QpkKJFR$f2 z$9Uf*U6y>rC8vzOgOx-PP>E!4U?+*|*P8GaFUx=LG0@tVl75n#TM36m^YlC06jf%< z)h|)aXKZbK1#uI{8j9XhJ-f?qb3|}81=io$%7d+yGx2sWuKYvhX}X)m4ft^)kU?l7 zfd(k6rec0vHtoR!cU2{bdtO7W8&Py*_Q9U3Dapl*J}x$PS9P^#JQ+@h$JxIyJTw%Z zk|L@WFmDrVvRq5>2F99^sa>}@g0C~}1o;S3g$&eCW#6|HyEgFNp(&63#>Rn&C`n)p zT`Po_FBFnDOFw8gqyt}pTegb*Fw?1Bl@BpPM^Kn94+$AXU zJ3s_NdI6POh3Jr(wFc=d^1ISAzvw|}Tm)vlktz}5qG;wM=v-pUILe0_ z;9wChmTyHX`S^${09;CxwDhJ;=4L9Yk8^XmFzx6Cp37v{vq75@?>fxq7=_MsBu_s6BRTeN|Nu8qCt7 zBLoz~aLnhSL)`weSvbI*jvaf4q%*bNbZv;s1~dN(2$WI8I3`Q-ra})MEw^2c>Vag* zx4p5r8x%x83m3K26}{7MQPH+((t- z>FtO_SWZdlC04SDu`z^Z|7@(5F&`L|F5`J!723K_9lSx8VBy?(vf=c7DHJkTJ(vU1 z8!DAG;TVt$Phk}js(XPH1nJSM`udQ|msv5f6HrLhZ(;5)P**4b9|7ShJD2)g+{@DF zjSq_wguf))wt^$ckEQM7$CV3nqX8{EN+Qdmt-F>O2xgNFfj0fv5ZHq|b#z=*8MOq3 z9$T~AcDV=86F64rq7G$oCBA0q`<9(japGo+o-NC42l4!+{1Bp zPc4>YoH+%|>jHIj0nRB3Gi7f`-#j)0??Q_pk4Tz*(trQ|uxr;ii2#oGm6atE{{(PN z%gBfm2(QWbWXSbm0sjiF6m_tY3M!G>IIZ~|(7H$mv6fU& z<&eX!fVqy};WkH5Mk~s?sP-_F0Im0r8_gIt2Lw~tTE40pc@2he5AqZyQf1@~fH;3H+% zA&;MEY`Q~+PfT1~=R>mOTNDA36#{REl#B+f25khid+A_SDqOj%Ir=EcFO!B29`%u=01cvm^&d!cXQhUu4Uco7^VHxQfbC8sq!gS|$*t;E<)l zci7sl!GGgl5DZe=A~*X^jje-Vx9TLl3)qDPKeMxImVe%XXXggcdGU0&|4L-DR3Jzw zcqLvy4NN}-BhaEE1@#M7j;B>cqzIImnHfXimV##+mmo~v4ssXE_~q;g`eR(E%=fp?9b zLfzsTyQ{C8qx8f{6Ws0dlMW?wIR~GIq0v$*d}+r|o(us>hu$s^gTx*6bLQ#C?Cq1Q z`nfA+$M%%(?Yq26s7?f>{<5-w*3d11Gue}kS$KGO$j~WU|8ZU%wwC=vCn0A48R=Y! zY?X_J@wm5l6%xJ4uEs03m0U%6x-kyyC)&;TfZ+=a4P8?I_AN3>PNJ2XtsVuHO!yVB zT#QIBmC*SV($@|&`P@GBjVFF076tA`mDjg7z{XK4m1FOSrl8+HJm7>-X$M&a=K9~& z^E7klMaWOSg3QqdGb^%!&ECl=45<VDd-Tg8mH`ml6m8+&^vn!N3$&8C@b;f#2A=Tw zs*q+nu@ILKl&>*V5}h%A1jk%Gjdle-iP&`#5{eW%wJl)2)SwI<=$(!nd5PhJeH}n+ z%8JU$j_B&J$iYD+A4ZjW7SM!&k;MghF@$m$^YDfNSFti8yoBn|V?ZR|2+}#ex&Ga|NIE;R z)K;mUlzfgx)?_jOuH9fQmz;xx!>zbD$H@RH{Tekdk=v^?NX65iiscPJDZm z9y(KPKo;?qGEOBWC0Q96mvHr)5$Kr$BZRlz!XM^Me+eqoS|S+mQ+z8{yu>`;8thn0 zEJPFvuF5YcI3iS+2MGD9#{4cKz8qEm`Ewaf6`3D9HS|82AsjskP_}Rchmsv~dS`@F zlCR^PJ1fj!5cjJ)odg;F)7MusG1}dLjq?hys}_svHI)m7=e2NtR@T*J0tz8?b~rv{ zHgCSAettF%ndeoX;kHF+LT5+XmXX(iZeQIb-(!bQrqi<>dU6|rbsM~%R&WMQ4oSpd zoDKT*R+Da{V(~+XH*Qq%&-BkPLq#eTmy;ulO|t3$rcZ;LS^XT1$%oqV#1s`2zIK-3 z4?LH3V0pTl6B`%ziZqx2R~m-^^!!9WhiyecK{Os$yd}Y1s8@UiYPTKdi52J&0crw` zJM(e(`N*B~L7?X1#Y~_QMGTH-G(B*ag(W|~QF-yMUfahPg@_~Tf#N1S{6iy3qj|JLyd;XU!v z!es9??>-E(&m0rnFFse}0c|SwsVthL(AnVt&Mb8)PWz;rEA_i7|erk&(cVG56lrVnkpW$Jd)7i-Ov6c1hKS8{}KI|?pvys7w zt@@T0vmdHBQZE`k%M>f?r*du2Pwh-U3SGS(4k!6qOW+Sg16(vhMqdYdC^BFQ8_d4s zWVc)~*ZTupQhjA*;K0yOK8zGk0Bz&PAXCXxGO%TL{6-LxWPn7JBGP2vgF}I+1hW$) zt~4}oBHLFOm*V6Eev}fLUqVaVct_4Y=+h~zGE6{Nj?H!H^5v)v`NhR`Fvd{PoKxmS z&^h5J`GRH8UkuAL89hN!et$oo_MRa@19m5E>~;9ew}j@IoA2Krf*hQVBIBE3oOl!6 z+1_sd^3rnTso;CeVnzs^>FfK%)(g052}Rs)PUq05hVbPS*sa?(2f+K~78mE3%qlPE z;-VB4^95_tiTJk@n;CYHK3W>j4URS2ZTgHiQ5YB)cFiOcr7rJ3eyqdF=?j~-$g-rP zNPoyrCDzu~un8G|@q*uLa^au;{x{KJiHQD`2-kLuS8Ir|Wc0RqBt*ZyH;Pe}0BJvA zI)mZC0oUw3a}5^@tHslEe>Wk3nD=>YUzj}+uhH4omIP=h34#u%OJCTX=O+i~DYWT7 zd~k30b!!5lmi?NT2rDd9rrN*#Kzla7Cgwn+F~H<^IinLGBtDg%&8IQu!exSM-UT2_ z0w`iL^q07VgkD?_6$oPb)yNhysHYNZ!SS9Kt)Za~vdi)nE5uDTq@>t1r%qU66R|m* z#oM6M{E{De<<7R#967cXOcY*OONrr@En7-Fa065zRQ31~iM@9y+i5O?!?1`wRbq;70JUNtoDdqRaj~}5<-?#Y$w!42c2VqvsISA_|QiJY*|5M=|?$}NkJ38OUKYkbfQ=@&0rsnAiO`PYES8=v{O!{i9w zJ*c}JrTxc+B#I9uD~TBH+0)BYPg~>aJ&v49@L=qfaQ@T7Z6@{!U}`8T&yxMJPD-xu z&a#6|;~&8E94XQ`H^EM2j`W>4c`^&jGg)%;K*#kM2h%VFXskpgn1gfyI`ZJ9vnlrz zKx_3HqzRM0SAJX`Jj}Gh)^uQ;F|$0~?4S`Wrb6nzD`5P3diMAF+G}e#fhIBFeF-R@ zo^vciy@q8aP3Ll-jjy=x*mS6x#z;?x=-OAb$9Z^*TXP0Y!R!-5$bP4!0g9#3k+`~u zG6)4l49OCBIv0S1uCx8ZrwsBg}e zXHX?}i?DXWs8Px)Bsx05(-R|0@%p20-9p6>8^~Mp$#9R#Ksur=f$3b5XL%K91XG4= z?Gu_-Zy6YqC{#wnN22#Z+^Pjl3+(8H0+-q8p&W0Y`|8)>qGY>RFqm>UCmqnJspIip zJ8xreqyI#|OT>VgI zcjnjEB?g9uF?=U>fan8uti%HusYMcdnS3)?Jb_~B*t)87MA_M)@;Sy@MXZQG;4i!AIchb962J6 zWd>&d_`2i-eLloWBm*lxNKX zX}4tYj=vg1?yqC;u*SJb!f+6!CUETZx|O`VuUo9h_Wt^H^5x@lkRX#6r+MGXbfGw* zuq?RgwHgA~Y!Palh#b`0a6%b1Kci8m1@Hghq%!mVgonvMLC6t}4FPd0f@aEJ&ckOy zN}6~#O}r)2I-bJ?1KHF+)39Eq;#k%^%L(6r7}^Y(4z&db1>t-S!GXM(Q}m$?_Y0ic zga?5MIK6d*CuJE~ztvI`lT$M@7V&KxZVP0+BIoJTXU{}8Zd?YN$W9PMtJTgB6H^jW za=$WAgK24Ll739}nBs(JNbG}e-tYX8Sc@ORNG&AWjgX-U3x(#0z)2QDP*X1F|y# z;?=G&($x(!xOSjMBQ_@HwUG9-vn#t$oju+i*JT|`Kuf~hAsIjYb*rUjb*!^)Pgk!0H z!Nmidyx6tI|E28K{V{0Bgu>k=WZMJWprK#{RuM_JEDKzgC;p-9+rvz^3jVd@NA^6>h zH_ZCpFFAL@?5}3cO#ZV9o)F2qPO&25A#$~oTcjkdPnr)R3H#SF0od146pQIR z(U{uF&aM&&wVip5ABMtq*23KWT6TZ98{f@{OPA~@bO!v;#UB9i{=oB78wspO(uI5| z1S>iZ5&G2ce+=WF@viKBJZVy}_9V9G5F%@SVWA`ag+PeOiYP+j3#!8Ub|yXcz%ACle}5fz)jasaav`>Dh7`*b7#b2HS;_>C$POw3`3Y>( zJov1iV3{e_#(h$bJ&$l|g(8cHoQtB$RxG#WYoRAvB2j>Q<6FZ<7(Q@n?EdFzQ%&@v z>8-n0R+*nHla_sHV{J`q%|7^s7>7vKio0L5Zy?l!Lw#8}oSpIXX&kWd;`JawQGkuj z>F7}$;_kIDVyFCktZ-GH+;}TaHaI$Kaq7mSJZ;EG9)Zv1)9szqIM=?)&p9{xv8eMX?kDB3w^l$sGkCuocE z_a{|Kk$R=TmV6wOWDXxrNw0pG2d|Uew^vb^i~i`-%){N|rKP3E=V#9jBkSo3Kg{!a zCou`rlga^fZ3~F$YuddONvef~o@XI)6gclC{la*XhIU^{HzE4IWMar~48fuqe7g<> zzY0OPXJFv95ntnG6dUxdq-DD`I{5%#RDpY{+vQmb-v`~j-8wt=SDZ~x0eMSY1-&X* zqk9<|%lyePzTkIX@jB~6PHSS-n!g2=9!)jv(E{!Gi4)pbOdx3qhYYV3h8Amo5TX4V zAFlzocBIHmIZBU*EY8yNbAk{F + + + + + + Overview: module code — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+ + +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/basel.html b/_modules/pystellibs/basel.html new file mode 100644 index 0000000..e1e32e4 --- /dev/null +++ b/_modules/pystellibs/basel.html @@ -0,0 +1,248 @@ + + + + + + + + pystellibs.basel — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.basel

+""" BaSeL 2.2 library """
+import numpy as np
+from .simpletable import SimpleTable
+try:
+    from astropy.io import fits as pyfits
+except ImportError:
+    import pyfits
+
+from .stellib import Stellib
+from .config import libsdir
+
+
+
[docs]class BaSeL(Stellib): + """ BaSeL 2.2 library derived class + This library + Rauch is used in Pegase.2 + + The BaSeL stellar spectral energy distribution (SED) libraries are libraries + of theoretical stellar SEDs recalibrated using empirical photometric data. + Therefore, we call them semi-empirical libraries. + + The BaSeL 2.2 library was calibrated using photometric data from solar + metallicity stars. + + References + ---------- + * Lejeune, Cuisiner, and Buser, 1998 A&AS, 130, 65 + * can be downloaded http://www.astro.unibas.ch/BaSeL_files/BaSeL2_2.tar.gz + """ + def __init__(self, *args, **kwargs): + self.name = 'BaSeL 2.2' + self.source = libsdir + '/stellib_BaSeL_v2.2.grid.fits' + self._load_() + Stellib.__init__(self, *args, **kwargs) + + def _load_(self): + with pyfits.open(self.source) as f: + # load data + self._getWaveLength_(f) + self._getTGZ_(f) + self._getSpectra_(f) + self._getWaveLength_units(f) + + def _getWaveLength_(self, f): + self._wavelength = f[0].data[-1] + + def _getWaveLength_units(self, f): + self.wavelength_unit = 'angstrom' + + def _getTGZ_(self, f): + self.grid = SimpleTable(f[1].data) + self.grid.header.update(f[1].header.items()) + self.grid.header['NAME'] = 'TGZ' + +
[docs] def bbox(self, dlogT=0.05, dlogg=0.25): + """ Boundary of Basel 2.2 library + + Parameters + ---------- + dlogT: float + log-temperature tolerance before extrapolation limit + + dlogg: float + log-g tolerance before extrapolation limit + + Returns + ------- + bbox: ndarray + (logT, logg) edges of the bounding polygon + """ + bbox = [(3.301 - dlogT, 5.500 + dlogg), + (3.301 - dlogT, 3.500 - dlogg), + (3.544 - dlogT, 3.500 - dlogg), + (3.544 - dlogT, 1.000), + (3.477, 0.600 + dlogg), + (3.447 - dlogT, 0.600 + dlogg), + (3.398 - dlogT, 0.280 + dlogg), + (3.398 - dlogT, -1.020 - dlogg), + (3.398, -1.020 - dlogg), + (3.447, -1.020 - dlogg), + (3.505 + dlogT, -0.700 - dlogg), + (3.544 + dlogT, -0.510 - dlogg), + (3.574 + dlogT, -0.290 - dlogg), + (3.602 + dlogT, 0.000 - dlogg), + (3.778, 0.000 - dlogg), + (3.778 + dlogT, 0.000), + (3.875 + dlogT, 0.500), + (3.929 + dlogT, 1.000), + (3.954 + dlogT, 1.500), + (4.021 + dlogT, 2.000 - dlogg), + (4.146, 2.000 - dlogg), + (4.146 + dlogT, 2.000), + (4.279 + dlogT, 2.500), + (4.415 + dlogT, 3.000), + (4.491 + dlogT, 3.500), + (4.544 + dlogT, 4.000), + (4.602 + dlogT, 4.500), + (4.699 + dlogT, 5.000 - dlogg), + (4.699 + dlogT, 5.000 + dlogg), + (3.525 + dlogT, 5.000 + dlogg), + (3.525 + dlogT, 5.500 + dlogg), + (3.301 - dlogT, 5.500 + dlogg) ] + + return np.array(bbox)
+ + def _getSpectra_(self, f): + self.spectra = f[0].data[:-1] + + @property + def logg(self): + return self.grid['logg'] + + @property + def logT(self): + return self.grid['logT'] + + @property + def Teff(self): + return self.grid['Teff'] + + @property + def Z(self): + return self.grid['Z'] + + @property + def logZ(self): + return self.grid['logZ'] + + @property + def NHI(self): + return self.grid['NHI'] + + @property + def NHeI(self): + return self.grid['NHeI'] + + @property + def NHeII(self): + return self.grid['NHeII']
+
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/btsettl.html b/_modules/pystellibs/btsettl.html new file mode 100644 index 0000000..780ef25 --- /dev/null +++ b/_modules/pystellibs/btsettl.html @@ -0,0 +1,232 @@ + + + + + + + + pystellibs.btsettl — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.btsettl

+import numpy as np
+from .stellib import AtmosphereLib
+from .config import libsdir
+from .simpletable import SimpleTable
+try:
+    from astropy.io import fits as pyfits
+except ImportError:
+    import pyfits
+
+
+
[docs]class BTSettl(AtmosphereLib): + """ + BT-Settl Library + + References + ---------- + + Paper: Few refereed publications + Older Ref = http://adsabs.harvard.edu/abs/2000ApJ...539..366A + + Conference Proceedings: + http://adsabs.harvard.edu/abs/2016sf2a.conf..223A + http://adsabs.harvard.edu/abs/2012RSPTA.370.2765A + + Files available at: https://phoenix.ens-lyon.fr/Grids/BT-Settl/ + + Current Library: AGSS2009 Abundances (due to grid availability) + Spectra rebinned to match Kurucz, and custom 2 Ang medium resolution + """ + def __init__(self, medres=True, *args, **kwargs): + self.name = 'BTSettl' + if medres: + self.source = libsdir + '/bt-settl.medres.grid.fits' + else: + self.source = libsdir + '/bt-settl.lowres.grid.fits' + self._load_() + AtmosphereLib.__init__(self, *args, **kwargs) + + def _load_(self): + with pyfits.open(self.source) as f: + # load data + self._getWaveLength_(f) + self._getTGZ_(f) + self._getSpectra_(f) + self._getWaveLength_units(f) + + def _getWaveLength_units(self, f): + self.wavelength_unit = 'angstrom' + + def _getWaveLength_(self, f): + self._wavelength = f[0].data[-1] + + def _getTGZ_(self, f): + self.grid = SimpleTable(f[1].data) + self.grid.header.update(f[1].header.items()) + self.grid.header['NAME'] = 'TGZ' + + def _getSpectra_(self, f): + self.spectra = f[0].data[:-1] + +
[docs] def bbox(self, dlogT=0.05, dlogg=0.25): + """ Boundary of BT-Settl library + + Parameters + ---------- + dlogT: float + log-temperature tolerance before extrapolation limit + + dlogg: float + log-g tolerance before extrapolation limit + + Returns + ------- + bbox: ndarray + (logT, logg) edges of the bounding polygon + """ + bbox = [(3.41497 - dlogT, 6.0 + dlogg), + (3.41497 - dlogT, -0.5 - dlogg), + (3.84510 + dlogT, -0.5 - dlogg), + (4.07918 + dlogT, 0.0 - dlogg), + (4.17609 + dlogT, 0.5 - dlogg), + (4.30103 + dlogT, 1.0 - dlogg), + (4.39794 + dlogT, 1.5 - dlogg), + (4.47712 + dlogT, 2.0 - dlogg), + (4.60206 + dlogT, 2.5 - dlogg), + (4.60206 + dlogT, 3.0 - dlogg), + (4.69897 + dlogT, 3.5 - dlogg), + (4.84510 + dlogT, 4.0 - dlogg), + (4.84510 + dlogT, 4.5 + dlogg), + (4.00000 + dlogT, 4.5 + dlogg), + (4.00000 + dlogT, 5.0 + dlogg), + (3.69897 + dlogT, 5.0 + dlogg), + (3.69897 + dlogT, 5.5 + dlogg), + (3.60206 + dlogT, 5.5 + dlogg), + (3.60206 + dlogT, 6.0 + dlogg), + (3.41497 - dlogT, 6.0 + dlogg)] + + return np.array(bbox)
+ +
[docs] def get_interpolation_data(self): + """ interpolation needs alpha """ + return np.array([self.logT, self.logg, self.logZ]).T
+ + @property + def logT(self): + return self.grid['logT'] + + @property + def logg(self): + return self.grid['logg'] + + @property + def Teff(self): + return self.grid['Teff'] + + @property + def Z(self): + return self.grid['Z'] + + @property + def logZ(self): + return self.grid['logZ']
+
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/elodie.html b/_modules/pystellibs/elodie.html new file mode 100644 index 0000000..75cfa7e --- /dev/null +++ b/_modules/pystellibs/elodie.html @@ -0,0 +1,236 @@ + + + + + + + + pystellibs.elodie — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.elodie

+""" Elodie 3.1
+"""
+
+import numpy as np
+from .simpletable import SimpleTable
+try:
+    from astropy.io import fits as pyfits
+except ImportError:
+    import pyfits
+
+from .stellib import Stellib
+from .config import libsdir
+
+
+
[docs]class Elodie(Stellib): + """ Elodie 3.1 stellar library derived class """ + def __init__(self, *args, **kwargs): + self.name = 'ELODIE v3.1 (Prugniel et al 2007, astro-ph/703658)' + self.source = libsdir + '/stellib_ELODIE_3.1.fits' + self._load_() + Stellib.__init__(self, *args, **kwargs) + + def _load_(self): + with pyfits.open(self.source) as f: + # load data + self._getWaveLength_(f) + self._getTGZ_(f) + self._getSpectra_(f) + self._getWaveLength_units(f) + + def _getWaveLength_units(self, f): + self.wavelength_unit = 'angstrom' + + def _getWaveLength_(self, f): + self._wavelength = np.asarray(f[0].data[-1]) + + def _getTGZ_(self, f): + self.grid = SimpleTable(f[1].data) + self.grid.header.update(f[1].header.items()) + self.grid.header['NAME'] = 'TGZ' + + def _getSpectra_(self, f): + self.spectra = f[0].data[:-1] + +
[docs] def bbox(self, dlogT=0.05, dlogg=0.25): + """ Boundary of Elodie library + + Parameters + ---------- + dlogT: float + log-temperature tolerance before extrapolation limit + + dlogg: float + log-g tolerance before extrapolation limit + + Returns + ------- + bbox: ndarray + (logT, logg) edges of the bounding polygon + """ + bbox = [(3.301 - dlogT, 5.500 + dlogg), + (3.301 - dlogT, 3.500 - dlogg), + (3.544 - dlogT, 3.500 - dlogg), + (3.544 - dlogT, 1.000), + (3.477, 0.600 + dlogg), + (3.447 - dlogT, 0.600 + dlogg), + (3.398 - dlogT, 0.280 + dlogg), + (3.398 - dlogT, -1.020 - dlogg), + (3.398, -1.020 - dlogg), + (3.447, -1.020 - dlogg), + (3.505 + dlogT, -0.700 - dlogg), + (3.544 + dlogT, -0.510 - dlogg), + (3.574 + dlogT, -0.290 - dlogg), + (3.602 + dlogT, 0.000 - dlogg), + (3.778, 0.000 - dlogg), + (3.778 + dlogT, 0.000), + (3.875 + dlogT, 0.500), + (3.929 + dlogT, 1.000), + (3.954 + dlogT, 1.500), + (4.021 + dlogT, 2.000 - dlogg), + (4.146, 2.000 - dlogg), + (4.146 + dlogT, 2.000), + (4.279 + dlogT, 2.500), + (4.415 + dlogT, 3.000), + (4.491 + dlogT, 3.500), + (4.544 + dlogT, 4.000), + (4.602 + dlogT, 4.500), + (4.699 + dlogT, 5.000 - dlogg), + (4.699 + dlogT, 5.000 + dlogg), + (3.525 + dlogT, 5.000 + dlogg), + (3.525 + dlogT, 5.500 + dlogg), + (3.301 - dlogT, 5.500 + dlogg) ] + + return np.array(bbox)
+ + @property + def logg(self): + return self.grid['logg'] + + @property + def logT(self): + return self.grid['logT'] + + @property + def Teff(self): + return 10 ** self.logT + + @property + def Z(self): + return self.grid['Z'] + + @property + def NHI(self): + return self.grid['NHI'] + + @property + def NHeI(self): + return self.grid['NHeI'] + + @property + def NHeII(self): + return self.grid['NHeII'] + + @property + def logZ(self): + return np.log10(self.Z)
+
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/ezunits.html b/_modules/pystellibs/ezunits.html new file mode 100644 index 0000000..853fdf9 --- /dev/null +++ b/_modules/pystellibs/ezunits.html @@ -0,0 +1,133 @@ + + + + + + + + pystellibs.ezunits — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.ezunits

+# -*- coding: utf-8 -*-
+"""
+    pint
+    ~~~~
+
+    Pint is Python module/package to define, operate and manipulate
+    **physical quantities**: the product of a numerical value and a
+    unit of measurement. It allows arithmetic operations between them
+    and conversions from and to different units.
+
+    :copyright: (c) 2012 by Hernan E. Grecco.
+    :license: BSD, see LICENSE for more details.
+"""
+
+from .pint import UnitRegistry, DimensionalityError, UnitsContainer, UndefinedUnitError, logger, __version__
+
+# load a default registery.
+## Example sage unit['m * s **-1']
+unit = UnitRegistry()
+
+
+
[docs]def hasUnit(val): + return hasattr(val, 'units')
+
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/ezunits/pint.html b/_modules/pystellibs/ezunits/pint.html new file mode 100644 index 0000000..7fe790c --- /dev/null +++ b/_modules/pystellibs/ezunits/pint.html @@ -0,0 +1,1321 @@ + + + + + + + + pystellibs.ezunits.pint — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.ezunits.pint

+# -*- coding: utf-8 -*-
+"""
+    pint
+    ~~~~
+
+    Pint is Python module/package to define, operate and manipulate
+    **physical quantities**: the product of a numerical value and a
+    unit of measurement. It allows arithmetic operations between them
+    and conversions from and to different units.
+
+    :copyright: 2012 by Hernan E. Grecco.
+    :license: BSD, see LICENSE for more details.
+"""
+
+from __future__ import division, print_function, absolute_import
+
+__version__ = '0.1'
+
+import os
+import sys
+import copy
+import math
+import logging
+import operator
+import functools
+import itertools
+
+try:
+    from collections.abc import Iterable
+except ImportError:
+    from collections import Iterable
+
+from pkg_resources import resource_filename
+
+from io import BytesIO
+from numbers import Number
+import tokenize
+from tokenize import untokenize, NUMBER, STRING, NAME, OP
+
+logger = logging.getLogger(__name__)
+
+if hasattr(logging, 'NullHandler'):
+    logger.addHandler(logging.NullHandler())
+else:
+    class NullHandler(logging.Handler):
+        """
+        This handler does nothing. It's intended to be used to avoid the
+        "No handlers could be found for logger XXX" one-off warning. This is
+        important for library code, which may contain code to log events. If a user
+        of the library does not configure logging, the one-off warning might be
+        produced; to avoid this, the library developer simply needs to instantiate
+        a NullHandler and add it to the top-level logger of the library module or
+        package.
+        """
+        def handle(self, record):
+            pass
+
+        def emit(self, record):
+            pass
+
+        def createLock(self):
+            self.lock = None
+
+    logger.addHandler(NullHandler())
+
+if hasattr(functools, 'total_ordering'):
+    total_ordering = functools.total_ordering
+else:
+    def total_ordering(cls):
+        """Class decorator that fills in missing ordering methods"""
+        convert = {
+            '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
+                       ('__le__', lambda self, other: self < other or self == other),
+                       ('__ge__', lambda self, other: not self < other)],
+            '__le__': [('__ge__', lambda self, other: not self <= other or self == other),
+                       ('__lt__', lambda self, other: self <= other and not self == other),
+                       ('__gt__', lambda self, other: not self <= other)],
+            '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
+                       ('__ge__', lambda self, other: self > other or self == other),
+                       ('__le__', lambda self, other: not self > other)],
+            '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
+                       ('__gt__', lambda self, other: self >= other and not self == other),
+                       ('__lt__', lambda self, other: not self >= other)]
+        }
+        roots = set(dir(cls)) & set(convert)
+        if not roots:
+            raise ValueError('must define at least one ordering operation: < > <= >=')
+        root = max(roots)       # prefer __lt__ to __le__ to __gt__ to __ge__
+        for opname, opfunc in convert[root]:
+            if opname not in roots:
+                #print (opname, opfunc)
+                opfunc.__name__ = opname
+                opfunc.__doc__ = getattr(int, opname).__doc__
+                setattr(cls, opname, opfunc)
+        return cls
+
+
+if sys.version < '3':
+    from io import open
+    from StringIO import StringIO
+    string_types = basestring
+    _tokenize = lambda input: tokenize.generate_tokens(StringIO(input).readline)
+else:
+    string_types = str
+    _tokenize = lambda input: tokenize.tokenize(BytesIO(input.encode('utf-8')).readline)
+
+PRETTY = '⁰¹²³⁴⁵⁶⁷⁸⁹·⁻'
+
+try:
+    import numpy as np
+    from numpy import ndarray
+
+    HAS_NUMPY = True
+    NUMERIC_TYPES = (Number, ndarray)
+
+    def _to_magnitude(value, force_ndarray=False):
+        if value is None:
+            return value
+        elif isinstance(value, (list, tuple)):
+            return np.asarray(value)
+        if force_ndarray:
+            return np.asarray(value)
+        return value
+
+except ImportError:
+
+    class ndarray(object):
+        pass
+
+    HAS_NUMPY = False
+    NUMERIC_TYPES = (Number, )
+
+    def _to_magnitude(value, force_ndarray=False):
+        return value
+
+
+def _eq(first, second):
+    """Comparison of scalars and arrays
+    """
+    out = first == second
+    if isinstance(out, Iterable):
+        if isinstance(out, ndarray):
+            return np.all(out)
+        else:
+            return all(out)
+    return out
+
+
+def _definitions_from_file(filename):
+    """Load definition from file.
+    """
+    with open(filename, encoding='utf-8') as fp:
+        for line in fp:
+            line = line.strip()
+            if not line or line.startswith('#'):
+                continue
+            try:
+                if ';' in line:
+                    [definition, modifiers] = line.split(';', 2)
+                    modifiers = (modifier.split('=') for modifier in modifiers.split(';'))
+                    modifiers = dict((key.strip(), float(value.strip())) for key, value in modifiers)
+                else:
+                    definition = line
+                    modifiers = {}
+                result = [res.strip() for res in definition.split('=')]
+                name, value, aliases = result[0], result[1], result[2:]
+            except Exception as ex:
+                logger.error("Exception: Cannot parse '{}' {}".format(line, ex))
+                continue
+            yield name, value, aliases, modifiers
+
+
+def _solve_dependencies(dependencies):
+    """Solve a dependency graph.
+
+    :param dependencies: dependency dictionary. For each key, the value is
+                         an iterable indicating its dependencies.
+    :return: list of sets, each containing keys of independents tasks dependent
+
+    """
+    d = dict((key, set(dependencies[key])) for key in dependencies)
+    r = []
+    while d:
+        # values not in keys (items without dep)
+        t = set(i for v in d.values() for i in v) - set(d.keys())
+        # and keys without value (items without dep)
+        t.update(k for k, v in d.items() if not v)
+        # can be done right away
+        r.append(t)
+        # and cleaned up
+        d = dict(((k, v - t) for k, v in d.items() if v))
+    return r
+
+
+class _Exception(Exception):
+
+    def __init__(self, internal):
+        self.internal = internal
+
+
+
[docs]class UndefinedUnitError(ValueError): + """Raised when the units are not defined in the unit registry. + """ + + def __init__(self, unit_names): + super(ValueError, self).__init__() + self.unit_names = unit_names + + def __str__(self): + if isinstance(self.unit_names, string_types): + return "'{}' is not defined in the unit registry".format(self.unit_names) + elif isinstance(self.unit_names, (list, tuple)) and len(self.unit_names) == 1: + return "'{}' is not defined in the unit registry".format(self.unit_names[0]) + elif isinstance(self.unit_names, set) and len(self.unit_names) == 1: + uname = list(self.unit_names)[0] + return "'{}' is not defined in the unit registry".format(uname) + else: + return '{} are not defined in the unit registry'.format(self.unit_names)
+ + +
[docs]class DimensionalityError(ValueError): + """Raised when trying to convert between incompatible units. + """ + + def __init__(self, units1, units2, dim1=None, dim2=None): + super(DimensionalityError, self).__init__() + self.units1 = units1 + self.units2 = units2 + self.dim1 = dim1 + self.dim2 = dim2 + + def __str__(self): + if self.dim1 or self.dim2: + dim1 = ' ({})'.format(self.dim1) + dim2 = ' ({})'.format(self.dim2) + else: + dim1 = '' + dim2 = '' + return "Cannot convert from '{}'{} to '{}'{}".format(self.units1, dim1, self.units2, dim2)
+ + +
[docs]class AliasDict(dict): + + def __init__(self, *args, **kwargs): + dict.__init__(self, *args, **kwargs) + self.preferred_alias = {} + +
[docs] def add_alias(self, key, value, preferred=False): + if value not in self: + raise IndexError("The aliased value '{}' is not present in the dictionary".format(value)) + self[key] = value = self.get_aliased(value) + if preferred: + self.preferred_alias[value] = key
+ +
[docs] def get_aliased(self, key): + value = self[key] + if isinstance(value, string_types): + return self.get_aliased(value) + return key
+ + +
[docs]class UnitsContainer(dict): + """The UnitsContainer stores the product of units and their respective + exponent and implements the corresponding operations + """ + + def __init__(self, *args, **kwargs): + dict.__init__(self, *args, **kwargs) + for key, value in self.items(): + if not isinstance(key, string_types): + raise TypeError('key must be a str, not {}'.format(type(key))) + if not isinstance(value, NUMERIC_TYPES): + raise TypeError('value must be a NUMERIC_TYPES, not {}'.format(type(value))) + if not isinstance(value, float): + self[key] = float(value) + + def __missing__(self, key): + return 0.0 + + def __setitem__(self, key, value): + if not isinstance(key, string_types): + raise TypeError('key must be a str, not {}'.format(type(key))) + if not isinstance(value, NUMERIC_TYPES): + raise TypeError('value must be a NUMERIC_TYPES, not {}'.format(type(value))) + dict.__setitem__(self, key, float(value)) + +
[docs] def add(self, key, value): + newval = self.__getitem__(key) + value + if newval: + self.__setitem__(key, newval) + else: + del self[key]
+ + def _formatter(self, product_sign=' * ', superscript_format=' ** {:n}', + as_ratio=True, single_denominator=False, short_form=False): + if not self: + return 'dimensionless' + + if as_ratio: + fun = abs + else: + fun = lambda x: x + + tmp_plus = [] + tmp_minus = [] + for key, value in sorted(self.items()): + if value == 1: + tmp_plus.append(key) + elif value > 1: + tmp_plus.append(key + superscript_format.format(value)) + elif value == -1: + tmp_minus.append(key) + else: + tmp_minus.append(key + superscript_format.format(fun(value))) + + if tmp_plus: + ret = product_sign.join(tmp_plus) + elif as_ratio: + ret = '1' + else: + ret = '' + + if tmp_minus: + if as_ratio: + ret += ' / ' + if single_denominator: + ret += ' / '.join(tmp_minus) + else: + ret += product_sign.join(tmp_minus) + else: + ret += product_sign.join(tmp_minus) + + return ret + + def __str__(self): + return self._formatter() + + def __repr__(self): + tmp = '{%s}' % ', '.join(["'{}': {}".format(key, value) for key, value in sorted(self.items())]) + return '<UnitsContainer({})>'.format(tmp) + + def __format__(self, spec): + if spec == '!s' or spec == '': + return str(self) + elif spec == '!r': + return repr(self) + elif spec == '!l': + tmp = self._formatter(r' \cdot ', '^[{:n}]', True, True).replace('[', '{').replace(']', '}') + if '/' in tmp: + return r'\frac{%s}' % tmp.replace(' / ', '}{') + elif spec == '!p': + pretty = '{}'.format(self).replace(' ** ', '').replace(' * ', PRETTY[10]).replace('-', PRETTY[11]).replace(' / ', '/') + for n in range(10): + pretty = pretty.replace(str(n), PRETTY[n]) + return pretty + else: + raise ValueError('{} is not a valid format for UnitsContainer'.format(spec)) + + def __copy__(self): + ret = self.__class__() + ret.update(self) + return ret + + def __imul__(self, other): + if not isinstance(other, self.__class__): + raise TypeError('Cannot multiply UnitsContainer by {}'.format(type(other))) + for key, value in other.items(): + self[key] += value + keys = [key for key, value in self.items() if value == 0] + for key in keys: + del self[key] + + return self + + def __mul__(self, other): + if not isinstance(other, self.__class__): + raise TypeError('Cannot multiply UnitsContainer by {}'.format(type(other))) + ret = copy.copy(self) + ret *= other + return ret + + __rmul__ = __mul__ + + def __ipow__(self, other): + if not isinstance(other, NUMERIC_TYPES): + raise TypeError('Cannot power UnitsContainer by {}'.format(type(other))) + for key, value in self.items(): + self[key] *= other + return self + + def __pow__(self, other): + if not isinstance(other, NUMERIC_TYPES): + raise TypeError('Cannot power UnitsContainer by {}'.format(type(other))) + ret = copy.copy(self) + ret **= other + return ret + + def __itruediv__(self, other): + if not isinstance(other, self.__class__): + raise TypeError('Cannot divide UnitsContainer by {}'.format(type(other))) + + for key, value in other.items(): + self[key] -= value + + keys = [key for key, value in self.items() if value == 0] + for key in keys: + del self[key] + + return self + + def __truediv__(self, other): + if not isinstance(other, self.__class__): + raise TypeError('Cannot divide UnitsContainer by {}'.format(type(other))) + + ret = copy.copy(self) + ret /= other + return ret + + def __rtruediv__(self, other): + if not isinstance(other, self.__class__) and other != 1: + raise TypeError('Cannot divide {} by UnitsContainer'.format(type(other))) + + ret = copy.copy(self) + ret **= -1 + return ret
+ + +
[docs]def converter_to_reference(scale, offset, log_base): + def _inner(value): + if log_base: + return log_base ** (value / scale + offset) + else: + return value * scale + offset + return _inner
+ + +
[docs]def converter_from_reference(scale, offset, log_base): + def _inner(value): + if log_base: + return (math.log10(value) / math.log10(log_base) - offset) / scale + else: + return (value - offset) / scale + return _inner
+ + +
[docs]class UnitRegistry(object): + """The unit registry stores the definitions and relationships between + units. + + :param filename: path of the units definition file to load. + Empty to load the default definition file. + None to leave the UnitRegistry empty. + :param force_ndarray: convert any input, scalar or not to a numpy.ndarray. + """ + + #: Map unit name (string) to unit value (Quantity), and unit alias to canonical unit name + _UNITS = AliasDict() + + #: Map prefix name (string) to prefix value (float), and unit alias to canonical prefix name + _PREFIXES = AliasDict({'': 1}) + + #: Map suffix name (string) to canonical , and unit alias to canonical unit name + _SUFFIXES = AliasDict({'': None, 's': ''}) + + # Location of default file + # _DEFAULT_FILENAME = os.path.join(os.path.dirname(__file__), 'default_en.txt') + _DEFAULT_FILENAME = resource_filename('pystellibs', + os.path.join('ezunits','default_en.txt')) + + def __init__(self, filename='', force_ndarray=False): + self.Quantity = _build_quantity_class(self, force_ndarray) + self._definition_files = [] + if filename == '': + self.add_from_file(self._DEFAULT_FILENAME) + elif filename is not None: + self.add_from_file(filename) + + def __getattr__(self, item): + return self.Quantity(1, item) + + def __getitem__(self, item): + return self._parse_expression(item) + +
[docs] def add_unit(self, name, value, aliases=tuple(), **modifiers): + """Add unit to the registry. + """ + if not isinstance(value, self.Quantity): + value = self.Quantity(value, **modifiers) + + self._UNITS[name] = value + + for ndx, alias in enumerate(aliases): + if ' ' in alias: + logger.warn('Alias cannot contain a space ' + alias) + self._UNITS.add_alias(alias.strip(), name, not ndx)
+ +
[docs] def add_prefix(self, name, value, aliases=tuple()): + """Add prefix to the registry. + """ + + if not isinstance(value, NUMERIC_TYPES): + value = eval(value, {'__builtins__': None}, {}) + self._PREFIXES[name] = float(value) + + for ndx, alias in enumerate(aliases): + self._PREFIXES.add_alias(alias.strip(), name, not ndx)
+ +
[docs] def add_from_file(self, filename): + """Add units and prefixes defined in a definition text file. + """ + self._definition_files.append(filename) + pending = dict() + dependencies = dict() + conv = dict() + for name, value, aliases, modifiers in _definitions_from_file(filename): + try: + if name.endswith('-'): + # Prefix + self.add_prefix(name[:-1], value, [alias[:-1] for alias in aliases]) + continue + if '[' in value: + # Reference units, indicates dimensionality + value = value.strip('[]') + if value: + value = self.Quantity(None, UnitsContainer({value: 1})) + else: + value = self.Quantity(None, None) + + conv[name] = name + for alias in aliases: + conv[alias] = name + self.add_unit(name, value, aliases, **modifiers) + if modifiers: + self.add_unit('delta_' + name, value, tuple('delta_' + item for item in aliases)) + except UndefinedUnitError as ex: + pending[name] = (value, aliases) + dependencies[name] = ex.unit_names + except Exception as ex: + logger.error("Exception: Cannot add '{}' {}".format(name, ex)) + + dep2 = {} + for unit_name, deps in dependencies.items(): + dep2[unit_name] = set(conv[dep_name] for dep_name in deps) + + for unit_names in _solve_dependencies(dep2): + for unit_name in unit_names: + if not unit_name in self._UNITS: + self.add_unit(unit_name, *pending[unit_name])
+ +
[docs] def get_alias(self, name): + """Return the preferred alias for a unit + """ + candidates = list(self._parse_candidate(name)) + if not candidates: + raise UndefinedUnitError(name) + elif len(candidates) == 1: + prefix, unit_name, _ = candidates[0] + else: + logger.warning('Parsing {} yield multiple results. ' + 'Options are: {}'.format(name, candidates)) + prefix, unit_name, _ = candidates[0] + + return self._PREFIXES.preferred_alias.get(prefix, prefix) + self._UNITS.preferred_alias.get(unit_name, unit_name)
+ + def _to_canonical(self, candidate): + """Return the canonical name of a unit. + """ + + if candidate == 'dimensionless': + return '' + + try: + return self._UNITS.get_aliased(candidate) + except KeyError: + pass + + candidates = tuple(self._parse_candidate(candidate)) + if not candidates: + raise UndefinedUnitError(candidate) + elif len(candidates) == 1: + prefix, unit_name, _ = candidates[0] + else: + logger.warning('Parsing {} yield multiple results. ' + 'Options are: {}'.format(candidate, candidates)) + prefix, unit_name, _ = candidates[0] + + if prefix: + alias = self.get_alias(prefix + unit_name) + if prefix + unit_name == 'kilogram': + pass + self.add_unit(prefix + unit_name, self.Quantity(self._PREFIXES[prefix], unit_name), (alias, )) + return prefix + unit_name + + return unit_name + + def _parse_candidate(self, candidate): + """Parse a unit to identify prefix, suffix and unit name + by walking the list of prefix and suffix. + """ + + for suffix, prefix in itertools.product(self._SUFFIXES.keys(), self._PREFIXES.keys()): + if candidate.startswith(prefix) and candidate.endswith(suffix): + unit_name = candidate[len(prefix):] + if suffix: + unit_name = unit_name[:-len(suffix)] + if len(unit_name) == 1: + continue + if unit_name in self._UNITS: + yield (self._PREFIXES.get_aliased(prefix), + self._UNITS.get_aliased(unit_name), + self._SUFFIXES.get_aliased(suffix)) + + def _parse_candidate2(self, candidate): + """Parse a unit to identify prefix, suffix and unit name + by walking the list of units. + """ + for unit_name in self._UNITS: + if unit_name in candidate: + try: + [prefix, suffix] = candidate.split(unit_name) + if len(unit_name) == 1 and len(suffix) == 1: + continue + except ValueError: # too many values to unpack + continue + if prefix in self._PREFIXES and suffix in self._SUFFIXES: + yield (self._PREFIXES.get_aliased(prefix), + self._UNITS.get_aliased(unit_name), + self._SUFFIXES.get_aliased(suffix)) + + def _parse_expression(self, input): + """Parse expression mathematical units and return a quantity object. + """ + + if not input: + return self.Quantity(1) + + gen = _tokenize(input) + result = [] + unknown = set() + for toknum, tokval, _, _, _ in gen: + if toknum in (STRING, NAME): # replace NUMBER tokens + # TODO: Integrate math better, Replace eval + if tokval == 'pi': + result.append((toknum, str(math.pi))) + continue + try: + tokval = self._to_canonical(tokval) + except UndefinedUnitError as ex: + unknown.add(ex.unit_names) + if tokval: + result.extend([ + (NAME, 'Q_'), + (OP, '('), + (NUMBER, '1'), + (OP, ','), + (NAME, 'U_'), + (OP, '('), + (STRING, tokval), + (OP, '='), + (NUMBER, '1'), + (OP, ')'), + (OP, ')') + ]) + else: + result.extend([ + (NAME, 'Q_'), + (OP, '('), + (NUMBER, '1'), + (OP, ','), + (NAME, 'U_'), + (OP, '('), + (OP, ')'), + (OP, ')') + ]) + else: + result.append((toknum, tokval)) + + if unknown: + raise UndefinedUnitError(unknown) + + return eval(untokenize(result), {'__builtins__': None}, + {'REGISTRY': self._UNITS, + 'Q_': self.Quantity, + 'U_': UnitsContainer})
+ + +def _build_quantity_class(registry, force_ndarray): + """Create a Quantity Class. + """ + + @total_ordering + class _Quantity(object): + """Quantity object constituted by magnitude and units. + + :param value: value of the physical quantity to be created. + :type value: str, Quantity or any numeric type. + :param units: units of the physical quantity to be created. + :type units: UnitsContainer, str or Quantity. + """ + + _REGISTRY = registry + + def __reduce__(self): + return _build_quantity, (self.magnitude, self.units) + + def __new__(cls, value, units=None, offset=0, log_base=0): + if units is None: + if isinstance(value, string_types): + inst = cls._REGISTRY._parse_expression(value) + elif isinstance(value, cls): + inst = copy.copy(value) + else: + inst = object.__new__(cls) + inst._magnitude = _to_magnitude(value, force_ndarray) + inst._units = UnitsContainer() + elif isinstance(units, UnitsContainer): + inst = object.__new__(cls) + inst._magnitude = _to_magnitude(value, force_ndarray) + inst._units = units + elif isinstance(units, string_types): + inst = cls._REGISTRY._parse_expression(units) + inst._magnitude = _to_magnitude(value, force_ndarray) + elif isinstance(units, cls): + inst = copy.copy(units) + inst._magnitude = _to_magnitude(value, force_ndarray) + else: + raise TypeError('units must be of type str, Quantity or UnitsContainer; not {}.'.format(type(units))) + + return inst + + def __copy__(self): + return self.__class__(copy.copy(self._magnitude), copy.copy(self._units)) + + def __str__(self): + return '{} {}'.format(self._magnitude, self._units) + + def __repr__(self): + return "<Quantity({}, '{}')>".format(self._magnitude, self._units) + + def __format__(self, spec): + if not spec: + return str(self) + if '!' in spec: + fmt, conv = spec.split('!') + conv = '!' + conv + else: + fmt, conv = spec, '' + + if conv.endswith('~'): + _d = {} + for key, value in self.units.items(): + _d[self._REGISTRY.get_alias(key)] = value + units = UnitsContainer(_d) + conv = conv[:-1] + else: + units = self.units + + return format(self.magnitude, fmt) + ' ' + format(units, conv) + + @property + def magnitude(self): + """Quantity's magnitude. + """ + return self._magnitude + + @property + def units(self): + """Quantity's units. + + :rtype: UnitContainer + """ + return self._units + + @property + def unitless(self): + """Return true if the quantity does not have units. + """ + return not bool(self.convert_to_reference().units) + + @property + def dimensionless(self): + """Return true if the quantity is dimensionless. + """ + tmp = copy.copy(self).convert_to_reference() + + return not bool(tmp.dimensionality) + + @property + def dimensionality(self): + """Quantity's dimensionality (e.g. {length: 1, time: -1}) + """ + try: + return self._dimensionality + except AttributeError: + if self._magnitude is None: + return UnitsContainer(self.units) + + tmp = UnitsContainer() + for key, value in self.units.items(): + reg = self._REGISTRY._UNITS[key] + tmp = tmp * reg.dimensionality ** value + + self._dimensionality = tmp + + return self._dimensionality + + def ito(self, other=None): + """Inplace rescale to different units. + + :param other: destination units. + :type other: Quantity or str. + """ + if isinstance(other, string_types): + other = self._REGISTRY._parse_expression(other) + + if self._units == other._units: + return self.__class__(self._magnitude, other) + + factor = self.__class__(1, self.units / other.units) + factor = factor.convert_to_reference() + + if not factor.unitless: + raise DimensionalityError(self.units, other.units, + self.dimensionality, other.dimensionality) + + self._magnitude *= factor.magnitude + self._units = copy.copy(other._units) + return self + + def to(self, other=None): + """Return Quantity rescaled to different units. + + :param other: destination units. + :type other: Quantity or str. + """ + ret = copy.copy(self) + ret.ito(other) + return ret + + def _convert_to_reference(self, input_units): + + factor = 1 + units = UnitsContainer() + for key, value in input_units.items(): + reg = self._REGISTRY._UNITS[key] + if reg._magnitude is None: + units.add(key, value) + else: + fac, uni = self._convert_to_reference(reg.units) + factor *= (reg._magnitude * fac) ** value + units *= uni ** value + + return factor, units + + def convert_to_reference(self): + """Return Quantity rescaled to reference units. + """ + + factor, units = self._convert_to_reference(self.units) + + return self.__class__(self.magnitude * factor, units) + + def convert_to_reference2(self): + """Return Quantity rescaled to reference units. + """ + + tmp = self.__class__(self.magnitude) + + for key, value in self.units.items(): + reg = self._REGISTRY._UNITS[key] + if reg._magnitude is None: + factor = self.__class__(1, key) ** value + else: + factor = reg.convert_to_reference() ** value + + tmp = tmp * factor + + return tmp + + # Mathematical operations + def __float__(self): + if self.dimensionless: + return float(self._magnitude) + raise DimensionalityError(self.units, 'dimensionless') + + def __complex__(self): + if self.dimensionless: + return complex(self._magnitude) + raise DimensionalityError(self.units, 'dimensionless') + + def iadd_sub(self, other, fun): + if isinstance(other, self.__class__): + if not self.dimensionality == other.dimensionality: + raise DimensionalityError(self.units, other.units, + self.dimensionality, other.dimensionality) + if self._units == other._units: + self._magnitude = fun(self._magnitude, other._magnitude) + else: + self._magnitude = fun(self._magnitude, other.to(self)._magnitude) + else: + if self.dimensionless: + self._magnitude = fun(self._magnitude, _to_magnitude(other, force_ndarray)) + else: + raise DimensionalityError(self.units, 'dimensionless') + + return self + + def add_sub(self, other, fun): + ret = copy.copy(self) + fun(ret, other) + return ret + + def __iadd__(self, other): + return self.iadd_sub(other, operator.iadd) + + def __add__(self, other): + return self.add_sub(other, operator.iadd) + + __radd__ = __add__ + + def __isub__(self, other): + return self.iadd_sub(other, operator.isub) + + def __sub__(self, other): + return self.add_sub(other, operator.isub) + + __rsub__ = __sub__ + + def __imul__(self, other): + if isinstance(other, self.__class__): + self._magnitude *= other._magnitude + self._units *= other._units + else: + self._magnitude *= _to_magnitude(other, force_ndarray) + + return self + + def __mul__(self, other): + if isinstance(other, self.__class__): + return self.__class__(self._magnitude * other._magnitude, self._units * other._units) + else: + return self.__class__(self._magnitude * other, self._units) + + __rmul__ = __mul__ + + def __itruediv__(self, other): + if isinstance(other, self.__class__): + self._magnitude /= other._magnitude + self._units /= other._units + else: + self._magnitude /= _to_magnitude(other, force_ndarray) + + return self + + def __truediv__(self, other): + ret = copy.copy(self) + ret /= other + return ret + + def __rtruediv__(self, other): + if isinstance(other, NUMERIC_TYPES): + return self.__class__(other / self._magnitude, 1 / self._units) + raise NotImplementedError + + def __ifloordiv__(self, other): + if isinstance(other, self.__class__): + self._magnitude //= other._magnitude + self._units /= other._units + else: + self._magnitude //= _to_magnitude(other, force_ndarray) + + return self + + def __floordiv__(self, other): + ret = copy.copy(self) + ret //= other + return ret + + def __div__(self, other): + ret = copy.copy(self) + ret /= other + return ret + + #__div__ = __floordiv__ + __idiv__ = __ifloordiv__ + + def __rfloordiv__(self, other): + if isinstance(other, self.__class__): + return self.__class__(other._magnitude // self._magnitude, other._units / self._units) + else: + return self.__class__(other // self._magnitude, 1.0 / self._units) + + def __ipow__(self, other): + self._magnitude **= _to_magnitude(other, force_ndarray) + self._units **= other + return self + + def __pow__(self, other): + ret = copy.copy(self) + ret **= other + return ret + + def __abs__(self): + return self.__class__(abs(self._magnitude), self._units) + + def __round__(self, ndigits=0): + return self.__class__(round(self._magnitude, ndigits=ndigits), self._units) + + def __pos__(self): + return self.__class__(operator.pos(self._magnitude), self._units) + + def __neg__(self): + return self.__class__(operator.neg(self._magnitude), self._units) + + def __eq__(self, other): + # This is class comparison by name is to bypass that + # each Quantity class is unique. + if other.__class__.__name__ != self.__class__.__name__: + return self.dimensionless and _eq(self.magnitude, other) + + if _eq(self._magnitude, 0) and _eq(other._magnitude, 0): + return self.dimensionality == other.dimensionality + + if self._units == other._units: + return _eq(self._magnitude, other._magnitude) + + try: + return _eq(self.to(other).magnitude, other._magnitude) + except DimensionalityError: + return False + + def __lt__(self, other): + if not isinstance(other, self.__class__): + if self.dimensionless: + return operator.lt(self.magnitude, other) + else: + raise ValueError('Cannot compare Quantity and {}'.format(type(other))) + + if self.units == other.units: + return operator.lt(self._magnitude, other._magnitude) + if self.dimensionality != other.dimensionality: + raise DimensionalityError(self.units, other.units, + self.dimensionality, other.dimensionality) + return operator.lt(self.convert_to_reference().magnitude, + self.convert_to_reference().magnitude) + + def __bool__(self): + return bool(self._magnitude) + + __nonzero__ = __bool__ + + # Experimental NumPy Support + + #: Dictionary mapping ufunc/attributes names to the units that they + #: require (conversion will be tried). + __require_units = {'cumprod': '', + 'arccos': '', 'arcsin': '', 'arctan': '', 'arctan2': '', + 'exp': '', 'expm1': '', + 'log': '', 'log10': '', 'log1p': '', 'log2': '', + 'sin': 'radian', 'cos': 'radian', 'tan': 'radian', + 'sinh': 'radian', 'cosh': 'radian', 'tanh': 'radian', + 'radians': 'degree', 'degrees': 'radian', + 'add': '', 'subtract': ''} + + #: Dictionary mapping ufunc/attributes names to the units that they + #: will set on output. + __set_units = {'cos': '', 'sin': '', 'tan': '', + 'cosh': '', 'sinh': '', 'tanh': '', + 'arccos': 'radian', 'arcsin': 'radian', + 'arctan': 'radian', 'arctan2': 'radian', + 'arccosh': 'radian', 'arcsinh': 'radian', + 'arctanh': 'radian', + 'degrees': 'degree', 'radians': 'radian', + 'expm1': '', 'cumprod': ''} + + #: List of ufunc/attributes names in which units are copied from the + #: original. + __copy_units = 'clip compress conj conjugate copy cumsum diagonal flatten ' \ + 'max mean min ptp ravel repeat reshape round ' \ + 'squeeze std sum take trace transpose ' \ + 'ceil diff ediff1d floor hypot rint trapz ' \ + 'add subtract multiply'.split() + + #: Dictionary mapping ufunc/attributes names to the units that they will + #: set on output. The value is interpreded as the power to which the unit + #: will be raised. + __prod_units = {'var': 2, 'prod': 'size', 'true_divide': -1, 'divide': -1} + + def __numpy_method_wrap(self, func, *args, **kwargs): + """Convenience method to wrap on the fly numpy method taking + care of the units. + """ + if func.__name__ in self.__require_units: + try: + self.ito(self.__require_units[func.__name__]) + except: + raise ValueError('Quantity must be dimensionless.') + + value = func(*args, **kwargs) + + if func.__name__ in self.__copy_units: + return self.__class__(value, self._units) + + if func.__name__ in self.__prod_units: + tmp = self.__prod_units[func.__name__] + if tmp == 'size': + return self.__class__(value, self._units ** self._magnitude.size) + return self.__class__(value, self._units ** tmp) + + return value + + def __len__(self): + return len(self._magnitude) + + def __getattr__(self, item): + if item.startswith('__array_'): + if isinstance(self._magnitude, ndarray): + return getattr(self._magnitude, item) + else: + raise AttributeError('__array_* attributes are only taken from ndarray objects.') + try: + return functools.partial(self.__numpy_method_wrap, getattr(self._magnitude, item)) + except AttributeError: + raise AttributeError("Neither Quantity object nor its magnitude ({})" + "has attribute '{}'".format(self._magnitude, item)) + + def __getitem__(self, key): + try: + value = self._magnitude[key] + return self.__class__(value, self._units) + except TypeError: + raise TypeError("Neither Quantity object nor its magnitude ({})" + "supports indexing".format(self._magnitude)) + + def __setitem__(self, key, value): + try: + if isinstance(value, self.__class__): + factor = self.__class__(value.magnitude, value.units / self.units).convert_to_reference() + else: + factor = self.__class__(value, self._units ** (-1)).convert_to_reference() + + if isinstance(factor, self.__class__): + if not factor.dimensionless: + raise ValueError + self._magnitude[key] = factor.magnitude + else: + self._magnitude[key] = factor + + except TypeError: + raise TypeError("Neither Quantity object nor its magnitude ({})" + "supports indexing".format(self._magnitude)) + + def tolist(self): + units = self._units + return [self.__class__(value, units).tolist() if isinstance(value, list) else self.__class__(value, units) + for value in self._magnitude.tolist()] + + __array_priority__ = 21 + + def __array_prepare__(self, obj, context=None): + try: + uf, objs, huh = context + if uf.__name__ in self.__require_units: + try: + self.ito(self.__require_units[uf.__name__]) + except: + raise _Exception(ValueError) + return self.magnitude.__array_prepare__(obj, context) + except _Exception as ex: + raise ex.internal + except Exception as ex: + print(ex) + return self.magnitude.__array_prepare__(obj, context) + + def __array_wrap__(self, obj, context=None): + try: + uf, objs, huh = context + out = self.magnitude.__array_wrap__(obj, context) + if uf.__name__ in self.__set_units: + try: + out = self.__class__(out, self.__set_units[uf.__name__]) + except: + raise _Exception(ValueError) + elif uf.__name__ in self.__copy_units: + try: + out = self.__class__(out, self.units) + except: + raise _Exception(ValueError) + elif uf.__name__ in self.__prod_units: + tmp = self.__prod_units[uf.__name__] + if tmp == 'size': + out = self.__class__(out, self.units ** self._magnitude.size) + else: + out = self.__class__(out, self.units ** tmp) + return out + except _Exception as ex: + raise ex.internal + except Exception as ex: + print(ex) + return self.magnitude.__array_wrap__(obj, context) + + return _Quantity + + +_DEFAULT_REGISTRY = UnitRegistry() + + +def _build_quantity(value, units): + return _DEFAULT_REGISTRY.Quantity(value, units) +
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/helpers.html b/_modules/pystellibs/helpers.html new file mode 100644 index 0000000..8e7162f --- /dev/null +++ b/_modules/pystellibs/helpers.html @@ -0,0 +1,603 @@ + + + + + + + + pystellibs.helpers — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.helpers

+"""
+This is a first collection of tools making the design easier
+"""
+import sys
+from functools import partial, wraps, update_wrapper
+try:
+    from inspect import getargspec
+except ImportError:  # Apparently since py3.11
+    from inspect import getfullargspec as getargspec       
+from inspect import ismethod
+import warnings
+import numpy as np
+import itertools
+
+from .ezunits import unit, hasUnit
+
+
+__all__ = ['NameSpace', 'Pipe', 'Pipeable', 'Pipegroup', 'chunks',
+           'deprecated', 'generator', 'isNestedInstance', 'keywords_first',
+           'kfpartial', 'merge_records', 'missing_units_warning', 'nbytes',
+           'path_of_module', 'pretty_size_print', 'type_checker',
+           'val_in_unit']
+
+
+
[docs]class NameSpace(dict): + """A dict subclass that exposes its items as attributes. + """ + def __init__(self, name, *args, **kwargs): + self.__name__ = name + dict.__init__(self, *args, **kwargs) + + def __dir__(self): + return tuple(self) + + def __repr__(self): + names = ', '.join([k for k in dir(self) if k[0] != '_']) + return "{s.__name__:s}: {r:s}".format(s=self, r=names) + + def __getattribute__(self, name): + try: + return self[name] + except KeyError: + msg = "'{s.__name__:s}' has no attribute '{name:s}'" + raise AttributeError(msg.format(s=self, name=name)) + + def __setattr__(self, name, value): + self[name] = value + + def __delattr__(self, name): + del self[name] + + def __enter__(self): + return self + + def __exit__(self, *args, **kwargs): + return False
+ + +
[docs]def generator(func): + """ A dummy decorator that only make codes mode readable. + It allow to explicitly mark a function as generator (yielding values) + and does nothing more than calling the initial function + """ + @wraps(func) + def deco(*args, **kwargs): + return func(*args, **kwargs) + return deco
+ + +
[docs]def deprecated(func): + """ A dummy decorator that warns against using a deprecated function """ + @wraps(func) + def deco(*args, **kwargs): + txt = 'Function {0:s} is deprecated. You should avoid its usage' + warnings.warn(txt.format(func.__name__)) + return func(*args, **kwargs) + return deco
+ + +
[docs]@generator +def chunks(l, n): + """ Yield successive n-sized chunks from l. + + Parameters + ---------- + l: iterable + object to iter over + + n: int + number of elements per slice + + Returns + ------- + chunk: tuple + n values from l + """ + it = iter(l) + while True: + chunk = tuple(itertools.islice(it, n)) + if chunk: + yield chunk + else: + raise StopIteration
+ + +
[docs]def isNestedInstance(obj, cl): + """ Test for sub-classes types + I could not find a universal test + + Parameters + ---------- + obj: object instance + object to test + + cl: Class + top level class to test + + returns + ------- + r: bool + True if obj is indeed an instance or subclass instance of cl + """ + tree = [ cl ] + if hasattr(cl, '__subclasses'): + for k in cl.__subclasses(): + if hasattr(k, '__subclasses'): + tree += k.__subclasses__() + return issubclass(obj.__class__, tuple(tree))
+ + +
[docs]def type_checker(name, obj, tp): + """ Check a given type and raise a type error if not correct + + Parameters + ---------- + name: str + name of the variable to show in the exception text + + obj: object + object to check + + tp: type + expected type of obj + + Raises + ------ + :exc:TypeError: + raises a TypeError if object is not of the correct type of a subclass of it + """ + if not isNestedInstance(obj, tp): + txt = 'Expected "{0:s}" of type {1:s}, got {2:s} instead.' + raise TypeError(txt.format(name, str(tp.__name__), str(type(obj).__name__)))
+ + +
[docs]class Pipeable(object): + """ Decorator overloading | operator (__ror__) such that you can pipe + functions where the first argument is the variable on the left side of the + | operator. + This decorator allows you to use the decorated function normally and uses + the provided values when using in pipes. + + >>> import pylab as plt + >>> _p = Pipeable(plt.plot, color='red', linestyle='--') + >>> _p(range(10), 'o-') # works + >>> range(10) | _p # will plot a red dashed line + + """ + def __init__(self, func, *args, **kwargs): + self.func = func + self.args = args + self.kwargs = kwargs + + def __ror__(self, lhs): + return self.func(lhs, *self.args, **self.kwargs) + + def __call__(self, *args, **kwargs): + return self.func(*args, **kwargs) + + def __repr__(self): + return self.func.__repr__()
+ + +
[docs]class Pipe(object): + """ Decorator overloading | operator (__ror__) such that you can pipe + functions where the first argument is the variable on the left side of the + | operator. + The difference with Pipeable is that you cannot use decorated function + outside of pipes but you gain the possibility to update the calling + parameters + + Used with keywords_first make this a powerful Task + """ + def __init__(self, func, *args, **kwargs): + self.func = func + self.args = args + self.kwargs = kwargs + update_wrapper(self, func) + + def __or__(self, other): + if isinstance(other, Pipe): + return Pipegroup( (self, other), mode='multi' ) + + def __and__(self, other): + if isinstance(other, Pipe): + return Pipegroup( (self, other) ) + + def __ror__(self, other): + return self.func(other, *self.args, **self.kwargs) + + def __call__(self, *args, **kwargs): + return Pipeable(self.func, *args, **kwargs) + + def __repr__(self): + return 'Pipe: {}'.format(self.func.__repr__()) + + def __str__(self): + return '{}'.format(self.func.__name__)
+ + +
[docs]class Pipegroup(object): + def __init__(self, pipes, mode='sequential'): + self.pipes = list(pipes) + self.mode = mode + self.func = self + + def __len__(self): + return len(self.pipes) + +
[docs] def seq_call(self, val, *args, **kwargs): + r = self.pipes[0].func(val) + if len(self) > 1: + for pk in self.pipes[1:]: + r = pk.func(r) + return r
+ +
[docs] def append(self, other): + self.pipes.append(other)
+ + def __or__(self, other): + if isinstance(other, Pipe): + if self.mode in ['multi', '|']: + self.append(other) + return self + else: + return Pipegroup( (self, other), mode='multi') + + def __and__(self, other): + if isinstance(other, Pipe): + if self.mode in ['sequential', '&']: + self.append(other) + return self + else: + return Pipegroup( (self, other), mode='sequential') + +
[docs] def multi_call(self, vals, iter=True): + return [ pk.func(vals) for pk in self.pipes ]
+ + def __call__(self, val, *args, **kwargs): + mode = kwargs.get('mode', self.mode).lower() + if mode in ['sequential', '&']: + return self.seq_call(val, *args, **kwargs) + elif mode in ['multi', '|']: + return self.multi_call(val, *args, **kwargs) + else: + raise NotImplemented + + def __ror__(self, other): + return self(other) + + def __repr__(self): + txt = 'Pipegroup: mode={},\n\t | '.format(self.mode) + + if self.mode == 'sequential': + txt += ' & '.join([str(pk) for pk in self.pipes]) + else: + txt += '\n\t | '.join([str(pk) for pk in self.pipes]) + return txt + + def __str__(self): + if self.mode == 'sequential': + delim = ' & ' + else: + delim = ' | ' + return '({})'.format(delim.join([str(pk) for pk in self.pipes])).replace('Pipe: ', '')
+ + +
[docs]def keywords_first(f): + """ Decorator that enables to access any argument or keyword as a keyword """ + # http://code.activestate.com/recipes/577922/ (r2) + @wraps(f) + def wrapper(*a, **k): + a = list(a) + for idx, arg in enumerate(getargspec(f).args, -ismethod(f)): + if arg in k: + if idx < len(a): + a.insert(idx, k.pop(arg)) + else: + break + return f(*a, **k) + return wrapper
+ + +
[docs]def kfpartial(fun, *args, **kwargs): + """ Allows to create partial functions with arbitrary arguments/keywords """ + return partial(keywords_first(fun), *args, **kwargs)
+ + +def warning_on_one_line(message, category, filename, lineno, file=None, + line=None): + return " {0:s}:{1:d} {2:s}:{3:s}".format(filename, lineno, + category.__name__, str(message)) + + +
[docs]def missing_units_warning(name, defaultunit): + """ Warn if any unit is missing + + Parameters + ---------- + name: str + name of the variable + + defaultunit: str + default unit definition + + Raises + ------ + warning: warnings.warn + warn if units are assumed + """ + warnings.formatwarning = warning_on_one_line + msg = 'Variable {0:s} does not have explicit units. Assuming `{1:s}`\n' + # stacklevel makes the correct code reference + warnings.warn(msg.format(name, defaultunit), stacklevel=4)
+ + +
[docs]def val_in_unit(varname, value, defaultunit): + """ check units and convert to defaultunit or create the unit information + + Parameters + ---------- + varname: str + name of the variable + + value: value + value of the variable, which may be unitless + + defaultunit: str + default units is unitless + + Returns + ------- + quantity: ezunits.Quantity + value with units + + Example + ------- + >>> r = 0.5 + >>> print(val_in_unit('r', r, 'degree')) + # UserWarning: Variable r does not have explicit units. Assuming `degree` + <Quantity(0.5, 'degree')> + + >>> r = 0.5 * unit['degree'] + >>> print(val_in_unit('r', r, 'degree')) + <Quantity(0.5, 'degree')> + """ + if not hasUnit(value): + missing_units_warning(varname, defaultunit) + return value * unit[defaultunit] + else: + return value.to(defaultunit)
+ + +
[docs]def merge_records(lst): + """ generates a stack of records even with slightly different but compatible + dtypes + + Parameters + ---------- + + lst: sequence of np.recarray + sequence of individual records + + Returns + ------- + val: np.recarray + array of stacked records + Note if if lst is empty, returns an empty list + """ + r = [] + for rk in lst: + r.append(rk.tolist()[0]) + names = rk.dtype.names + if len(r) > 0: + return np.rec.fromrecords(r, names=names) + else: + return []
+ + +
[docs]def path_of_module(mod=None): + """ returns the definition code path of a given module, object or function + If nothing is provided, the current frame will be query, i.e., the current + working directory of the calling function. + + Parameters + ---------- + mod: module, class, function + object to find the defintion + if None, inspect.currentframe is used + + returns + ------- + path: str + path of the definition + """ + import os + import inspect + + if mod is None: + mod = inspect.currentframe() + return '/'.join(os.path.abspath(inspect.getfile(mod)).split('/')[:-1])
+ + +
[docs]def pretty_size_print(num_bytes): + """ + Output number of bytes in a human readable format + + Parameters + ---------- + num_bytes: int + number of bytes to convert + + returns + ------- + output: str + string representation of the size with appropriate unit scale + """ + if num_bytes is None: + return + + KiB = 1024 + MiB = KiB * KiB + GiB = KiB * MiB + TiB = KiB * GiB + PiB = KiB * TiB + EiB = KiB * PiB + ZiB = KiB * EiB + YiB = KiB * ZiB + + if num_bytes > YiB: + output = '%.3g YB' % (num_bytes / YiB) + elif num_bytes > ZiB: + output = '%.3g ZB' % (num_bytes / ZiB) + elif num_bytes > EiB: + output = '%.3g EB' % (num_bytes / EiB) + elif num_bytes > PiB: + output = '%.3g PB' % (num_bytes / PiB) + elif num_bytes > TiB: + output = '%.3g TB' % (num_bytes / TiB) + elif num_bytes > GiB: + output = '%.3g GB' % (num_bytes / GiB) + elif num_bytes > MiB: + output = '%.3g MB' % (num_bytes / MiB) + elif num_bytes > KiB: + output = '%.3g KB' % (num_bytes / KiB) + else: + output = '%.3g Bytes' % (num_bytes) + + return output
+ + +
[docs]def nbytes(obj, pprint=False): + """ return the number of bytes of the object, which includes size of nested + structures + + Parameters + ---------- + obj: object + object to find the size of + + pprint: bool, optional (default=False) + if set, returns the result after calling pretty_size_print + + returns + ------- + num_bytes: int or str + total number of bytes or human readable corresponding string + """ + num_bytes = sum(k.nbytes if hasattr(k, 'nbytes') else sys.getsizeof(k) + for k in obj.__dict__.values()) + if pprint: + return pretty_size_print(num_bytes) + else: + return num_bytes
+
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/interpolator.html b/_modules/pystellibs/interpolator.html new file mode 100644 index 0000000..ee98d15 --- /dev/null +++ b/_modules/pystellibs/interpolator.html @@ -0,0 +1,142 @@ + + + + + + + + pystellibs.interpolator — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.interpolator

+from .interpolator import BaseInterpolator
+from .lejeune import LejeuneInterpolator
+from .ndlinear import NDLinearInterpolator
+
+
+
[docs]def find_interpolator(name, osl=None, **kwargs): + """ Find an interpolator from its name and + instanciate it if an osl was provided + + Parameters + ---------- + name: str + name of the interpolation + osl: Stellib instance, optional + library to work with + """ + mapping = {"lejeune": LejeuneInterpolator, + "ndlinear": NDLinearInterpolator, + "lejeuneinterpolator": LejeuneInterpolator, + "ndlinearinterpolator": NDLinearInterpolator} + + try: + cls = mapping.get(name.lower(), None) + if cls is not None: + if osl is not None: + return cls(osl, **kwargs) + else: + return cls + except AttributeError: + pass + + return None
+
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/interpolator/interpolator.html b/_modules/pystellibs/interpolator/interpolator.html new file mode 100644 index 0000000..4f211da --- /dev/null +++ b/_modules/pystellibs/interpolator/interpolator.html @@ -0,0 +1,130 @@ + + + + + + + + pystellibs.interpolator.interpolator — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.interpolator.interpolator

+""" Base interpolator: a dummy class is derived by the different interpolator schemes """
+
+
+
[docs]class BaseInterpolator(object): + """ Base class for interpolation + + It sets what can be expected as methods during the interpolation calls + """ + def __init__(self, osl, *args, **kwargs): + pass + +
[docs] def interp(self, aps, *args, **kwargs): + """ Interpolation over spectra """ + raise NotImplementedError()
+ +
[docs] def interp_other(self, aps, values, *args, **kwargs): + """ Interpolation over provided values """ + raise NotImplementedError()
+
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/interpolator/lejeune.html b/_modules/pystellibs/interpolator/lejeune.html new file mode 100644 index 0000000..4304541 --- /dev/null +++ b/_modules/pystellibs/interpolator/lejeune.html @@ -0,0 +1,913 @@ + + + + + + + + pystellibs.interpolator.lejeune — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.interpolator.lejeune

+""" Lejeune interpolator is basically a linear interpolator for a Lejeune grid
+based spectral library.
+
+This is the simplest interpolator but most commonly used.
+
+It takes care of boundary conditions by imposing limits to extrapolation on a
+given grid.
+"""
+
+import numpy as np
+from .interpolator import BaseInterpolator
+
+
+def __det3x3__(a):
+    """ compute the 3x3 determinant of an array
+
+    Hard coded equations are 8 times faster than np.linalg.det for a matrix 3x3
+
+    Parameters
+    ----------
+    a: ndarray, shape=(3,3), dtype=float
+        array matrix
+
+    Returns
+    -------
+    val: float
+        determinant of a
+    """
+    val  = +a[0] * (a[4] * a[8] - a[7] * a[5])
+    val += -a[1] * (a[3] * a[8] - a[6] * a[5])
+    val += +a[2] * (a[3] * a[7] - a[6] * a[4])
+    return val
+
+
+def __interp__(T0, g0, T,g, dT_max=0.1, eps=1e-6):
+    """
+    Interpolation of the (T,g) grid at fixed Z
+
+    Translated from Pegase.2 fortran version
+    (this may not be pythonic though)
+
+    Note: preference is always given to the temperature over
+        the gravity when needed.
+
+    Parameters
+    ----------
+    T0: float
+        log(Teff) to obtain
+
+    g0: float
+        log(g) to obtain
+
+    T: float
+        log(Teff) of the grid
+
+    g: float
+        log(g) of the grid
+
+    dT_max: float, optional
+        If, T2 (resp. T1) is too far from T compared to T1 (resp. T2), i2
+        (resp. i1) is not used.  (see below for namings)
+
+    eps: float
+        temperature sensitivity under which points are considered to have
+        the same temperature
+
+    Returns
+    -------
+    idx: ndarray, dtype=int, size=4
+        4 star indexes
+
+    w: ndarray, dtype=float, size=4
+        4 associated weights
+
+    ..note::
+        if index is -1, this means the point is rejected and the associated
+        weight is 0.
+
+
+    Naming conventions
+    ------------------
+
+    i1 = index of the star with temperature > T and gravity > g.
+    Among all such stars, one chooses the one minimizing
+    |Delta T|+kappa*|Delta g|.
+    If no star with temperature > T and gravity > g exists, i1 = -1
+
+    i2 = index of the star with temperature > T and gravity < g.
+
+    i3 = index of the star with temperature < T and gravity > g.
+
+    i4 = index of the star with temperature < T and gravity < g.
+
+    g
+    /|\
+    | i3  |
+    |     |  i1
+    | ----x------
+    |     |    i2
+    |  i4 |
+    |__________\ T
+                /
+    """
+    kappa  = 0.1
+
+    idx    = np.arange(len(g))
+    deltag = g - g0
+    deltaT = T - T0
+    dist   = kappa * abs(deltag) + abs(deltaT)
+
+    if dist.min() == 0:
+        return np.array((dist.argmin(),-1,-1,-1)), np.array((1.,0.,0.,0.))
+
+    # Looking for i_{1..4}
+    ind_dT = deltaT >= 0
+    ind_dg = deltag >= 0
+
+    # i1
+    ind = (ind_dT & ind_dg)
+    if True in ind:
+        i1  = idx[ind][dist[ind].argmin()]
+    else:
+        i1 = -1
+
+    # i2
+    ind = (ind_dT & ~ind_dg)
+    if True in ind:
+        i2  = idx[ind][dist[ind].argmin()]
+    else:
+        i2 = -1
+
+    # i3
+    ind = (~ind_dT & ind_dg)
+    if True in ind:
+        i3  = idx[ind][dist[ind].argmin()]
+    else:
+        i3 = -1
+
+    # i4
+    ind = (~ind_dT & ~ind_dg)
+    if True in ind:
+        i4  = idx[ind][dist[ind].argmin()]
+    else:
+        i4 = -1
+
+    # checking integrity
+    if ( (i1 < 0) & (i2 < 0) & (i3 < 0) & (i4 < 0) ):
+        raise ValueError("Interp. Error, could not find appropriate knots")
+
+    T1 = T[i1]
+    T2 = T[i2]
+    T3 = T[i3]
+    T4 = T[i4]
+    g1 = g[i1]
+    g2 = g[i2]
+    g3 = g[i3]
+    g4 = g[i4]
+
+    # If, T2 (resp. T1) is too far from T compared to T1
+    # (resp. T2), i2 (resp. i1) is not used.
+    # The same for i3 and i4.
+    if ( (i1 > 0) & (i2 > 0) ):
+        if (T1 < T2 - dT_max):
+            i2 = -1
+        elif (T2 < T1 - dT_max):
+            i1 = -1
+
+    if ( (i3 > 0) & (i4 > 0) ):
+        if (T3 > T4 + dT_max):
+            i4 = -1
+        elif (T4 > T3 + dT_max):
+            i3 = -1
+
+    if ( (i1 < 0) & (i2 < 0) & (i3 < 0) & (i4 < 0) ):
+        raise ValueError("Interp. Error, could not find appropriate knots")
+
+    # Interpolation in the (T, g) plane between the used points
+    # (at least 1, at most 4).
+    # Code "0110" means that i1 = i4 = 0, i2 /=0 and i3 /= 0.
+    #
+    # Note: preference is always given to the temperature over
+    #   the gravity when needed.
+    if (i1 < 0):
+        if (i2 < 0):
+            if (i3 < 0):
+                if (i4 < 0):
+                    #                   # 0000
+                    raise ValueError("Error")  # should not be possible
+                else:                   # 0001
+                    alpha1 = 0.
+                    alpha2 = 0.
+                    alpha3 = 0.
+                    alpha4 = 1.
+                # endif
+            elif (i4 < 0):              # 0010
+                alpha1 = 0.
+                alpha2 = 0.
+                alpha3 = 1.
+                alpha4 = 0.
+            else:                       # 0011
+                alpha1 = 0.
+                alpha2 = 0.
+                if ( abs(T3 - T4) < eps ):
+                    if (g3 == g4):
+                        alpha3 = 0.5
+                    else:
+                        alpha3 = (g0 - g4) / (g3 - g4)
+                    # endif
+                    alpha4 = 1. - alpha3
+                else:
+                    if (T3 > T4):
+                        alpha3 = 1.
+                        alpha4 = 0.
+                        i4 = -1
+                    else:
+                        alpha3 = 0.
+                        i3 = -1
+                        alpha4 = 1.
+                    # endif
+                # endif
+            # endif
+        elif (i3 < 0):
+            if (i4 < 0):
+                #                        # 0100
+                alpha1 = 0.
+                alpha2 = 1.
+                alpha3 = 0.
+                alpha4 = 0.
+            else:                        # 0101
+                alpha1 = 0.
+                if (T2 == T4):
+                    alpha2 = 0.5
+                else:
+                    alpha2 = (T0 - T4) / (T2 - T4)
+                # endif
+                alpha3 = 0.
+                alpha4 = 1. - alpha2
+            # endif
+        elif (i4 < 0):                   # 0110
+            alpha1 = 0.
+            if (T2 == T3):
+                alpha2 = 0.5
+            else:
+                alpha2 = (T0 - T3) / (T2 - T3)
+            # endif
+            alpha3 = 1. - alpha2
+            alpha4 = 0.
+        else:                            # 0111
+            # Assume that (T, g) is within the triangle i
+            # formed by the three points.
+
+            mat0 = np.asarray([
+                [ T2, T3, T4 ],
+                [ g2, g3, g4 ],
+                [ 1., 1., 1. ]  ])
+            mat2 = np.asarray([
+                [ T0, T3, T4 ],
+                [ g0, g3, g4 ],
+                [ 1., 1.,  1.]  ])
+            mat3 = np.asarray([
+                [ T2, T0, T4 ],
+                [ g2, g0, g4 ],
+                [ 1., 1.,  1.]  ])
+            mat4 = np.asarray([
+                [ T2, T3, T0 ],
+                [ g2, g3, g0 ],
+                [ 1., 1., 1. ]  ])
+            det0 = __det3x3__(mat0.ravel())
+            det2 = __det3x3__(mat2.ravel())
+            det3 = __det3x3__(mat3.ravel())
+            det4 = __det3x3__(mat4.ravel())
+            alpha1 = 0.
+            alpha2 = det2 / det0
+            alpha3 = det3 / det0
+            alpha4 = det4 / det0
+
+            # If (T, g) is outside the triangle formed
+            # by the three used points use only two points.
+            if ((alpha2 < 0.) | (alpha2 > 1. ) | (alpha3 < 0.) | (alpha3 > 1.) | 
+                (alpha4 < 0.) | (alpha4 > 1. ) ):
+                alpha1 = 0.
+                if (T2 == T3):
+                    alpha2 = 0.5
+                else:
+                    alpha2 = (T0 - T3) / (T2 - T3)
+                # endif
+                alpha3 = 1. - alpha2
+                alpha4 = 0.
+                i4 = -1
+            # endif
+        # endif
+    elif (i2 < 0):
+        if (i3 < 0):
+            if (i4 < 0):
+                #                      # 1000
+                alpha1 = 1.
+                alpha2 = 0.
+                alpha3 = 0.
+                alpha4 = 0.
+            else:                      # 1001
+                if (T1 == T4):
+                    alpha1 = 0.5
+                else:
+                    alpha1 = (T0 - T4) / (T1 - T4)
+                # endif
+                alpha2 = 0.
+                alpha3 = 0.
+                alpha4 = 1. - alpha1
+            # endif
+        elif (i4 < 0):                 # 1010
+            if (T1 == T3):
+                alpha1 = 0.5
+            else:
+                alpha1 = (T0 - T3) / (T1 - T3)
+            # endif
+            alpha2 = 0.
+            alpha3 = 1. - alpha1
+            alpha4 = 0.
+        else:                          # 1011
+            # Assume that (T, g) is within the triangle formed by the three points.
+            mat0 = np.asarray([
+                [ T1, T3, T4 ],
+                [ g1, g3, g4 ],
+                [ 1., 1.,  1.]  ])
+            mat1 = np.asarray([
+                [ T0, T3, T4 ],
+                [ g0, g3, g4 ],
+                [ 1., 1.,  1.]  ])
+            mat3 = np.asarray([
+                [ T1, T0, T4 ],
+                [ g1, g0, g4 ],
+                [ 1., 1.,  1.]  ])
+            mat4 = np.asarray([
+                [ T1, T3, T0 ],
+                [ g1, g3, g0 ],
+                [ 1., 1.,  1.]  ])
+            det0 = __det3x3__(mat0.ravel())
+            det1 = __det3x3__(mat1.ravel())
+            det3 = __det3x3__(mat3.ravel())
+            det4 = __det3x3__(mat4.ravel())
+            alpha1 = det1 / det0
+            alpha2 = 0.
+            alpha3 = det3 / det0
+            alpha4 = det4 / det0
+
+            # If (T, g) is outside the triangle formed by the three used points,
+            # use only two points.
+
+            if ((alpha1 < 0.) | (alpha1 > 1.) | (alpha3 < 0.) | (alpha3 > 1.) | (alpha4 < 0.) | (alpha4 > 1.) ):
+                if (T1 == T4):
+                    alpha1 = 0.5
+                else:
+                    alpha1 = (T0 - T4) / (T1 - T4)
+                # endif
+                alpha2 = 0.
+                alpha3 = 0.
+                i3 = -1
+                alpha4 = 1. - alpha1
+            # endif
+        # endif
+    elif (i3 < 0):
+        if (i4 < 0):
+            #                       # 1100
+            if (abs(T1 - T2) < eps):
+                if (g1 == g2):
+                    alpha1 = 0.5
+                else:
+                    alpha1 = (g0 - g2) / (g1 - g2)
+                # endif
+                alpha2 = 1. - alpha1
+            else:
+                if (T1 < T2):
+                    alpha1 = 1.
+                    alpha2 = 0.
+                    i2 = -1
+                else:
+                    alpha1 = 0.
+                    i1 = -1
+                    alpha2 = 1.
+                # endif
+            # endif
+            alpha3 = 0.
+            alpha4 = 0.
+        else:                       # 1101
+            # Assume that (T, g) is within the triangle formed by the three points.
+            mat0 = np.asarray([
+                [ T1, T2, T4 ],
+                [ g1, g2, g4 ],
+                [ 1., 1.,  1.]  ])
+            mat1 = np.asarray([
+                [ T0, T2, T4 ],
+                [ g0, g2, g4 ],
+                [ 1., 1.,  1.]  ])
+            mat2 = np.asarray([
+                [ T1, T0, T4 ],
+                [ g1, g0, g4 ],
+                [ 1., 1.,  1.]  ])
+            mat4 = np.asarray([
+                [ T1, T2, T0 ],
+                [ g1, g2, g0 ],
+                [ 1., 1.,  1. ]  ])
+            det0 = __det3x3__(mat0.ravel())
+            det1 = __det3x3__(mat1.ravel())
+            det2 = __det3x3__(mat2.ravel())
+            det4 = __det3x3__(mat4.ravel())
+            alpha1 = det1 / det0
+            alpha2 = det2 / det0
+            alpha3 = 0.
+            alpha4 = det4 / det0
+
+            # If (T, g) is outside the triangle formed by the three used points,
+            # use only two points.
+            if ((alpha1 < 0.) | (alpha1 > 1.) | (alpha2 < 0.) | (alpha2 > 1.) | (alpha4 < 0.) | (alpha4 > 1.) ):
+                if (T1 == T4):
+                    alpha1 = 0.5
+                else:
+                    alpha1 = (T0 - T4) / (T1 - T4)
+                # endif
+                alpha2 = 0.
+                i2 = -1
+                alpha3 = 0.
+                alpha4 = 1. - alpha1
+            # endif
+        # endif
+    elif (i4 < 0):
+        #                           # 1110
+        # Assume that (T, g) is within the triangle formed by the three points.
+        mat0 = np.asarray([
+            [ T1, T2, T3 ],
+            [ g1, g2, g3 ],
+            [ 1., 1.,  1.]  ])
+        mat1 = np.asarray([
+            [ T0, T2, T3 ],
+            [ g0, g2, g3 ],
+            [ 1., 1.,  1.]  ])
+        mat2 = np.asarray([
+            [ T1, T0, T3 ],
+            [ g1, g0, g3 ],
+            [ 1., 1.,  1.]  ])
+        mat3 = np.asarray([
+            [ T1, T2, T0 ],
+            [ g1, g2, g0 ],
+            [ 1., 1.,  1.]  ])
+        det0 = __det3x3__(mat0.ravel())
+        det1 = __det3x3__(mat1.ravel())
+        det2 = __det3x3__(mat2.ravel())
+        det3 = __det3x3__(mat3.ravel())
+        alpha1 = det1 / det0
+        alpha2 = det2 / det0
+        alpha3 = det3 / det0
+        alpha4 = 0.
+
+        # If (T, g) is outside the triangle formed by the three used points,
+        # use only two points.
+        if ((alpha1 < 0.) | (alpha1 > 1.) | (alpha2 < 0.) | (alpha2 > 1.) | (alpha3 < 0.) | (alpha3 > 1.) ):
+            alpha1 = 0.
+            i1 = -1
+            if (T2 == T3):
+                alpha2 = 0.5
+            else:
+                alpha2 = (T0 - T3) / (T2 - T3)
+            # endif
+            alpha3 = 1. - alpha2
+            alpha4 = 0.
+        # endif
+    # endif
+
+    # All four points used.
+
+    if ( (i3 >= 0) & (i4 >= 0) & (i1 >= 0) & (i2 >= 0) ):
+        if (T1 != T3):
+            alpha = (T0 - T3) / (T1 - T3)
+        else:
+            alpha = 0.5
+        # endif
+        if (T2 != T4):
+            beta = (T0 - T4) / (T2 - T4)
+        else:
+            beta = 0.5
+        # endif
+        gprim = alpha * g1 + (1 - alpha) * g3
+        gsec  = beta * g2  + (1 - beta ) * g4
+        if (gprim != gsec):
+            gamma = ( g0 - gsec ) / ( gprim - gsec )
+        else:
+            gamma = 0.5
+        # endif
+        alpha1 = alpha * gamma
+        alpha2 = beta * ( 1 - gamma )
+        alpha3 = ( 1 - alpha ) * gamma
+        alpha4 = (  1 - beta ) * ( 1 - gamma )
+    # endif
+    return np.asarray((i1, i2, i3, i4)), np.asarray((alpha1, alpha2, alpha3, alpha4))
+
+
+'''
+class LejeuneInterpolator(BaseInterpolator):
+    """ Interpolation for grid based on the Lejeune library definition
+
+    The interpolation is N-D linear in log-temperature, log-gravity, and linear
+    in metallicity Z. Preference is always given to the temperature over the
+    gravity when needed.
+
+    This version is translated from Pegase
+
+    Attributes
+    ----------
+
+    dT_max: float, optional
+        If, T2 (resp. T1) is too far from T compared to T1 (resp. T2), i2
+        (resp. i1) is not used.  (see below for namings)
+
+    eps: float
+        temperature sensitivity under which points are considered to have
+        the same temperature
+    """
+    def __init__(self, osl, dT_max=0.1, eps=1e-6, *args, **kwargs):
+        BaseInterpolator.__init__(self, osl, *args, **kwargs)
+        self.dlogT_max = dT_max
+        self.eps = eps
+        self.osl = osl
+        
+    def interp(self, aps, weights=None, **kwargs):
+        return self.interp_other(aps, self.osl.spectra, weights=weights, **kwargs)
+        
+    def interp_other(self, aps, other, weights=None, **kwargs):
+        # get osl data
+        osl_logT, osl_logg, osl_logZ = self.osl.get_interpolation_data().T[:3]
+        grid_logZ   = np.unique(osl_logZ)
+        if np.ndim(other) < 2:
+            values = np.atleast_2d([other]).T
+        else:
+            values = np.atleast_2d(other)
+        
+        # params
+        library_index = np.arange(len(osl_logT), dtype=int)
+        _aps = np.atleast_2d(aps)
+        if weights is None:
+            _weights = np.ones(len(_aps), dtype=float)
+        elif np.ndim(weights) == 0:
+            _weights = np.ones(len(_aps), dtype=float) * weights
+        else:
+            _weights = weights
+        
+        final_values = []
+        for current_aps, current_weight in zip(np.atleast_2d(aps), _weights):
+            logT, logg, logZ = current_aps
+            # logZ = np.log10(Z)
+        
+            # find Zsup and Zinf
+            where = np.searchsorted(grid_logZ, logZ)
+            if where >=0:
+                logZinf = grid_logZ[where]
+            else:
+                raise ValueError("Metallicity extrapolation")
+            if abs(logZinf - logZ) < 1e-4: 
+                # exact match no need to interpolate twice.
+                select = (abs(logZinf - osl_logZ) < 1e-4)
+                # call Pegase interpolation scheme
+                #   Interpolation of the (logT, logg) grid at fixed Z from pegase.2
+                #   it returns the knots'indices from the input data and their weights, resp.
+                #   the final result is then the weighted sum.
+                indices, alphas = __interp__(logT, logg, 
+                                             osl_logT[select], osl_logg[select], 
+                                             dT_max=self.dlogT_max, eps=self.eps)
+                # indices are for the selection
+                # if indices[k] = -1, then one corner is rejected
+                data_indices = library_index[select][indices[indices >= 0]]
+                data_weights = alphas[indices >= 0]
+                spectrum = np.sum(values[data_indices] * data_weights[:, None], axis=0)
+                # store the weighted sum * the input requested weight
+                final_values.append(spectrum * current_weight)
+            else:
+                logZsup = grid_logZ[where + 1]
+                # interpolate within each (therefore calling interp with Zinf, Zsup, resp.) 
+                # then linearly interpolate between logZ values.
+                inf_spectrum = self.interp_other((logT, logg, logZinf), values, weights=current_weight, **kwargs)
+                sup_spectrum = self.interp_other((logT, logg, logZsup), values, weights=current_weight, **kwargs)
+                spectrum = inf_spectrum * (logZ - logZinf) / (logZsup - logZinf) + sup_spectrum * (logZsup - logZ) / (logZsup - logZinf)
+                final_values.append(spectrum)
+        return np.squeeze(final_values)
+'''
+
+
+
[docs]class LejeuneInterpolator(BaseInterpolator): + """ Interpolation for grid based on the Lejeune library definition + + The interpolation is N-D linear in log-temperature, log-gravity, and linear + in metallicity Z. Preference is always given to the temperature over the + gravity when needed. + + This version is translated from Pegase + + Attributes + ---------- + + dT_max: float, optional + If, T2 (resp. T1) is too far from T compared to T1 (resp. T2), i2 + (resp. i1) is not used. (see below for namings) + + eps: float + temperature sensitivity under which points are considered to have + the same temperature + """ + def __init__(self, osl, dT_max=0.1, eps=1e-6, *args, **kwargs): + BaseInterpolator.__init__(self, osl, *args, **kwargs) + self.dlogT_max = dT_max + self.eps = eps + self.osl = osl + + def _osl_interp_weights(self, osl, T0, g0, Z0, dT_max=0.1, eps=1e-6): + """ Interpolation of the T,g grid + + Interpolate on the grid and returns star indices and associated weights, + and Z. + + 3 to 12 stars are returned. + It calls _interp_, but reduce the output to the relevant stars. + + Parameters + ---------- + T0: float + log(Teff) to obtain + + g0: float + log(g) to obtain + + Z0: float + metallicity to obtain + + dT_max: float, optional + If, T2 (resp. T1) is too far from T compared to T1 (resp. T2), i2 + (resp. i1) is not used. (see below for namings) + + eps: float + temperature sensitivity under which points are considered to have + the same temperature + + Returns + ------- + idx: ndarray, dtype=int, size=4 + 4 star indexes + + w: ndarray, dtype=float, size=4 + 4 associated weights + + Returns 3 to 12 star indexes and associated weights + """ + # interpolation must be by construction from logT, logg, Z + # logZ could be an alternative. + osl_logT, osl_logg, osl_logZ = self.osl.get_interpolation_data().T[:3] + _Z = 10 ** osl_logZ + _Zv = np.unique(_Z) + _T = np.asarray(osl_logT, dtype=np.double) + _g = np.asarray(osl_logg, dtype=np.double) + + bZ_m = True in (abs(_Zv - Z0) < 1e-28) # Z_match bool + r = np.where((_Zv < Z0))[0] + Z_inf = _Zv[r.max()] if len(r) > 0 else -1. + r = np.where((_Zv > Z0))[0] + Z_sup = _Zv[r.min()] if len(r) > 0 else -1. + + index = np.zeros(4 * 3) - 1 + weights = np.zeros(4 * 3) + Z = np.zeros(4 * 3) + + if (bZ_m): + ind = np.where((abs(_Z - Z0) < 1e-28)) + i, w = __interp__(T0, g0, _T[ind], _g[ind], dT_max, eps) + index[8:] = ind[0][i] + weights[8:] = np.squeeze(w) + Z[8:] = [Z0] * 4 + else: + if (Z_inf > 0.): + ind = np.where(_Z == Z_inf) + i, w = __interp__(T0, g0, _T[ind], _g[ind], dT_max, eps) + index[:4] = ind[0][i] + weights[:4] = np.squeeze(w) + Z[:4] = [Z_inf] * 4 + + if (Z_sup > 0.): + ind = np.where(_Z == Z_sup) + i, w = __interp__(T0, g0, _T[ind], _g[ind], dT_max, eps) + index[4:8] = ind[0][i] + weights[4:8] = np.squeeze(w) + Z[4:8] = [Z_sup] * 4 + + if ((Z_inf > 0.) & (Z_sup > 0.)): + if ( Z_sup - Z_inf ) > 0.: + fz = (Z0 - Z_inf) / ( Z_sup - Z_inf ) + weights[:4] *= fz + weights[4:8] *= ( 1. - fz ) + else: + weights[:8] *= 0.5 + + ind = np.where(weights > 0) + return index[ind].astype(int), weights[ind] # / (weights[ind].sum()) #, Z[ind] + + + def _interp_weights(self, aps, weights=None, **kwargs): + """ returns interpolation nodes and weights + + Parameters + ---------- + aps: ndarray + (logT, logg, logZ) sequence. + Or appropriately defined similarly to self.osl.get_interpolation_data + weights: ndarray + optional weights of each ap vector to apply during the interpolation + + Returns + ------- + node_weights: array + osl grid node indices and interpolation weights + """ + _aps = np.atleast_2d(aps) + + if weights is None: + _weights = np.ones(len(_aps), dtype=float) + elif np.ndim(weights) == 0: + _weights = np.ones(len(_aps), dtype=float) * weights + else: + _weights = weights + + node_weights = [] + for s, current_weight in zip(_aps, _weights): + logT, logg, logZ = s[:3] + Z = 10 ** logZ + current_nodes = np.array(self._osl_interp_weights(self.osl, logT, logg, Z, **kwargs)).T + current_nodes[:, 1] *= current_weight + node_weights.append(current_nodes) + + return node_weights + + def _evaluate_from_weights(self, r, other): + """ Evaluate the interpolation from interpolation nodes and weights + + Basically do a weighted sum on the grid using the interpolation weights + + Parameters + ---------- + node_weights: array + osl grid node indices and interpolation weights + result of interp_weights + + other: array + values to interpolate + + Returns + ------- + interpolated: ndarray (size(node_weights), ) + interpolated values + """ + if np.ndim(other) < 2: + values = np.atleast_2d([other]).T + else: + values = np.atleast_2d(other) + interpolated = [(((values[rk[:, 0].astype(int)].T) * rk[:, 1])).sum(1) for rk in r] + return np.squeeze(interpolated) + +
[docs] def interp(self, aps, weights=None, **kwargs): + """ + Interpolate spectra + + Parameters + ---------- + aps: ndarray + (logT, logg, logZ) sequence. + Or appropriately defined similarly to self.osl.get_interpolation_data + weights: ndarray + optional weights of each ap vector to apply during the interpolation + + Returns + ------- + s0: ndarray (len(aps), len(l0)) + interpolated spectra + """ + s0 = self.interp_other(aps, self.osl.spectra, weights=weights, **kwargs) + return s0
+ +
[docs] def interp_other(self, aps, other, weights=None, **kwargs): + """ Interpolate other grid values + + Basically do a weighted sum on the grid using the interpolation weights + + Parameters + ---------- + aps: ndarray + (logT, logg, logZ) sequence. + Or appropriately defined similarly to self.osl.get_interpolation_data + weights: ndarray + optional weights of each ap vector to apply during the interpolation + + Returns + ------- + interpolated: ndarray (size(node_weights), ) + interpolated values + """ + r = self._interp_weights(aps, weights, **kwargs) + interpolated = self._evaluate_from_weights(r, other) + return interpolated
+
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/interpolator/ndlinear.html b/_modules/pystellibs/interpolator/ndlinear.html new file mode 100644 index 0000000..f1bfc73 --- /dev/null +++ b/_modules/pystellibs/interpolator/ndlinear.html @@ -0,0 +1,134 @@ + + + + + + + + pystellibs.interpolator.ndlinear — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.interpolator.ndlinear

+"""
+N-D linear interpolation
+"""
+from .interpolator import BaseInterpolator
+from scipy.interpolate import LinearNDInterpolator
+
+
+
[docs]class NDLinearInterpolator(BaseInterpolator): + def __init__(self, osl, *args, **kwargs): + BaseInterpolator.__init__(self, osl, *args, **kwargs) + data = osl.get_interpolation_data() + values = osl.spectra + self.func = LinearNDInterpolator(data, values, **kwargs) + +
[docs] def interp(self, aps, weights=1., **kwargs): + """ Interpolate spectra """ + return self.func(aps) * weights
+ +
[docs] def interp_other(self, aps, values, **kwargs): + """ Interpolate on other values """ + f = LinearNDInterpolator(self.func.tri, values) + return f(aps)
+
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/kurucz.html b/_modules/pystellibs/kurucz.html new file mode 100644 index 0000000..bd69e57 --- /dev/null +++ b/_modules/pystellibs/kurucz.html @@ -0,0 +1,210 @@ + + + + + + + + pystellibs.kurucz — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.kurucz

+import numpy as np
+from .stellib import AtmosphereLib
+from .config import libsdir
+from .simpletable import SimpleTable
+try:
+    from astropy.io import fits as pyfits
+except ImportError:
+    import pyfits
+
+
+
[docs]class Kurucz(AtmosphereLib): + """ + The stellar atmosphere models by Castelli and Kurucz 2004 or ATLAS9 + + * LTE + * PP + * line blanketing + """ + def __init__(self, *args, **kwargs): + self.name = 'Kurucz 2004' + self.source = libsdir + '/kurucz2004.grid.fits' + self._load_() + AtmosphereLib.__init__(self, *args, **kwargs) + + def _load_(self): + with pyfits.open(self.source) as f: + # load data + self._getWaveLength_(f) + self._getTGZ_(f) + self._getSpectra_(f) + self._getWaveLength_units(f) + + def _getWaveLength_units(self, f): + self.wavelength_unit = 'angstrom' + + def _getWaveLength_(self, f): + self._wavelength = f[0].data[-1] + + def _getTGZ_(self, f): + self.grid = SimpleTable(f[1].data) + self.grid.header.update(f[1].header.items()) + self.grid.header['NAME'] = 'TGZ' + + def _getSpectra_(self, f): + self.spectra = f[0].data[:-1] + +
[docs] def bbox(self, dlogT=0.05, dlogg=0.25): + """ Boundary of Kurucz 2004 library + + Parameters + ---------- + dlogT: float + log-temperature tolerance before extrapolation limit + + dlogg: float + log-g tolerance before extrapolation limit + + Returns + ------- + bbox: ndarray + (logT, logg) edges of the bounding polygon + """ + bbox = [(3.54406 - dlogT, 5.000 + dlogg), + (3.55403 - dlogT, 0.000 - dlogg), + (3.778, 0.000 - dlogg), + (3.778 + dlogT, 0.000), + (3.875 + dlogT, 0.500), + (3.929 + dlogT, 1.000), + (3.954 + dlogT, 1.500), + (4.146, 2.000 - dlogg), + (4.146 + dlogT, 2.000), + (4.279 + dlogT, 2.500), + (4.415 + dlogT, 3.000), + (4.491 + dlogT, 3.500), + (4.591 + dlogT, 4.000), + (4.689 + dlogT, 4.500), + (4.699 + dlogT, 5.000 + dlogg), + (3.544 - dlogT, 5.000 + dlogg) ] + + return np.array(bbox)
+ + @property + def logT(self): + return self.grid['logT'] + + @property + def logg(self): + return self.grid['logg'] + + @property + def Teff(self): + return self.grid['Teff'] + + @property + def Z(self): + return self.grid['Z'] + + @property + def logZ(self): + return self.grid['logz']
+
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/marcs.html b/_modules/pystellibs/marcs.html new file mode 100644 index 0000000..942aa17 --- /dev/null +++ b/_modules/pystellibs/marcs.html @@ -0,0 +1,329 @@ + + + + + + + + pystellibs.marcs — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.marcs

+import numpy as np
+from .stellib import AtmosphereLib
+from .config import libsdir
+from .simpletable import SimpleTable
+try:
+    from astropy.io import fits as pyfits
+except ImportError:
+    import pyfits
+
+
+
[docs]class Marcs(AtmosphereLib): + """ + MARCS stellar atmosphere models + + Gustafsson et al 2008. + + http://marcs.astro.uu.se/ + """ + def __init__(self, *args, **kwargs): + self.name = 'MARCS 2008' + self.source = libsdir + '/marcs.grid.fits' + self._load_() + AtmosphereLib.__init__(self, *args, **kwargs) + + def _load_(self): + with pyfits.open(self.source) as f: + # load data + self._getWaveLength_(f) + self._getTGZ_(f) + self._getSpectra_(f) + self._getWaveLength_units(f) + + def _getWaveLength_units(self, f): + self.wavelength_unit = 'angstrom' + + def _getWaveLength_(self, f): + self._wavelength = f[0].data[-1] + + def _getTGZ_(self, f): + self.grid = SimpleTable(f[1].data) + self.grid.header.update(f[1].header.items()) + self.grid.header['NAME'] = 'TGZ' + + def _getSpectra_(self, f): + self.spectra = f[0].data[:-1] + +
[docs] def bbox(self, dlogT=0.05, dlogg=0.25): + """ Boundary of Kurucz 2004 library + + Parameters + ---------- + dlogT: float + log-temperature tolerance before extrapolation limit + + dlogg: float + log-g tolerance before extrapolation limit + + Returns + ------- + bbox: ndarray + (logT, logg) edges of the bounding polygon + """ + bbox = [(3.39794 - dlogT, 5.500 + dlogg), + (3.39794 - dlogT, 3.000 - dlogg), + (3.47700 - dlogT, 3.000 - dlogg), + (3.47700 - dlogT, 0.000 - dlogg), + (3.51853 - dlogT, 0.000 - dlogg), + (3.51853 - dlogT, -0.500 - dlogg), + (3.62903 - dlogT, -0.5000 - dlogg), + (3.62903 - dlogT, 0.0000 - dlogg), + (3.720, 0.000 - dlogg), + (3.778 + dlogT, 0.500), + (3.829 + dlogT, 1.000), + (3.860 + dlogT, 1.500), + (3.906, 2.000 - dlogg), + (3.906 + dlogT, 2.000), + (3.906 + dlogT, 2.500), + (3.906 + dlogT, 3.000), + (3.906 + dlogT, 3.500), + (3.906 + dlogT, 4.000), + (3.906 + dlogT, 4.500), + (3.906 + dlogT, 5.000 + dlogg), + (3.591 + dlogT, 5.000 + dlogg), + (3.591 + dlogT, 5.500 + dlogg) + ] + + return np.array(bbox)
+ +
[docs] def get_interpolation_data(self): + """ interpolation needs alpha """ + return np.array([self.logT, self.logg, self.logZ, self.alpha]).T
+ + @property + def logT(self): + return np.log10(self.grid['teff']) + + @property + def logg(self): + return self.grid['logg'] + + @property + def Teff(self): + return self.grid['teff'] + + @property + def Z(self): + return 10 ** self.logZ + + @property + def logZ(self): + return self.grid['logz'] + + @property + def alpha(self): + return self.grid['alpha'] + +
[docs] def generate_stellar_spectrum(self, logT, logg, logL, Z, alpha=0., + raise_extrapolation=True, **kwargs): + """ Generates individual spectrum for the given stars APs and the + stellar library + + Returns NaN spectra if the boundary conditions are not met (no extrapolation) + + Parameters + ---------- + logT: float + temperature + + logg: float + log-gravity + + logL: float + log-luminosity + + Z: float + metallicity + + alpha: float + alpha element + + raise_extrapolation: bool + if set throw error on extrapolation + + null: value + value of the flux when extrapolation and raise_extrapolation is not set + + returns + ------- + s0: ndarray, shape=(len(stars), len(l0)) + array of spectra, one per input star + Spectrum in ergs/s/AA or ergs/s/AA/Lsun + """ + null_value = kwargs.pop('null', np.nan) + + # weights to apply during the interpolation (note that radii must be in cm) + weights = self.get_weights(logT, logg, logL) + logZ = np.log10(Z) + + l0 = self.wavelength + + # check boundary conditions, keep the data but do not compute the sed + # if outside + if not self.points_inside(np.atleast_2d([logT, logg]))[0]: + if raise_extrapolation: + raise RuntimeError('Outside library interpolation range') + else: + return l0, np.full(len(self.wavelength), null_value) + + aps = logT, logg, logZ, alpha + spec = self.interpolator.interp(aps) * weights + + return spec
+ +
[docs] def generate_individual_spectra(self, stars, nthreads=0, **kwargs): + """ Generates individual spectra for the given stars and stellar library + + Returns NaN spectra if the boundary conditions are not met (no extrapolation) + + Parameters + ---------- + stars: Table + contains at least (logT, logg, logL, Z) of the considered stars + + returns + ------- + l0: ndarray, ndim=1 + wavelength definition of the spectra + wavelength in AA + + s0: ndarray, shape=(len(stars), len(l0)) + array of spectra, one per input star + Spectrum in ergs/s/AA or ergs/s/AA/Lsun + """ + null_value = kwargs.pop('null', np.nan) + ndata = len(stars) + logT, logg, logL, Z = stars['logT'], stars['logg'], stars['logL'], stars['Z'] + try: + alpha = stars['alpha'] + except: + alpha = np.zeros_like(logT) + + # weights to apply during the interpolation (note that radii must be in cm) + weights = self.get_weights(logT, logg, logL) + + # check boundary conditions, keep the data but do not compute the sed + # if outside + bound = self.points_inside(np.array([logT, logg]).T) + specs = np.empty((ndata, len(self._wavelength)), dtype=float) + specs[~bound] = np.full(len(self.wavelength), null_value) + + logZ = np.log10(Z) + aps = np.array([logT, logg, logZ, alpha]).T + s = self.interpolator.interp(aps[bound]) * weights[bound, None] + specs[bound] = s + + l0 = self.wavelength + specs = specs * self.flux_units + + return l0, specs
+
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/munari.html b/_modules/pystellibs/munari.html new file mode 100644 index 0000000..df6bd82 --- /dev/null +++ b/_modules/pystellibs/munari.html @@ -0,0 +1,212 @@ + + + + + + + + pystellibs.munari — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.munari

+import numpy as np
+from .stellib import AtmosphereLib
+from .config import libsdir
+from .simpletable import SimpleTable
+try:
+    from astropy.io import fits as pyfits
+except ImportError:
+    import pyfits
+
+
+
[docs]class Munari(AtmosphereLib): + """ + ATLAS9 stellar atmospheres providing higher res than Kurucz + medium resolution (1 Ang/pix) in optical (2500-10500 Ang) + + References + ---------- + + Paper: Munari et al. 2005 A&A 442 1127 + http://adsabs.harvard.edu/abs/2005A%26A...442.1127M + + Files available at: http://archives.pd.astro.it/2500-10500/ + """ + def __init__(self, *args, **kwargs): + self.name = 'Munari' + self.source = libsdir + '/atlas9-munari.hires.grid.fits' + self._load_() + AtmosphereLib.__init__(self, *args, **kwargs) + + def _load_(self): + with pyfits.open(self.source) as f: + # load data + self._getWaveLength_(f) + self._getTGZ_(f) + self._getSpectra_(f) + self._getWaveLength_units(f) + + def _getWaveLength_units(self, f): + self.wavelength_unit = 'angstrom' + + def _getWaveLength_(self, f): + self._wavelength = f[0].data[-1] + + def _getTGZ_(self, f): + self.grid = SimpleTable(f[1].data) + self.grid.header.update(f[1].header.items()) + self.grid.header['NAME'] = 'TGZ' + + def _getSpectra_(self, f): + self.spectra = f[0].data[:-1] + +
[docs] def bbox(self, dlogT=0.05, dlogg=0.25): + """ Boundary of Munari library + + Parameters + ---------- + dlogT: float + log-temperature tolerance before extrapolation limit + + dlogg: float + log-g tolerance before extrapolation limit + + Returns + ------- + bbox: ndarray + (logT, logg) edges of the bounding polygon + """ + bbox = [(3.54407 - dlogT, 5.0 + dlogg), + (3.54407 - dlogT, 0.0 - dlogg), + (3.77815 + dlogT, 0.0 - dlogg), + (3.87506 + dlogT, 0.5 - dlogg), + (3.91645 + dlogT, 1.0 - dlogg), + (3.95424 + dlogT, 1.5 - dlogg), + (3.98900 + dlogT, 2.0 - dlogg), + (3.98900 + dlogT, 5.0 + dlogg), + (3.54407 - dlogT, 5.0 + dlogg)] + + return np.array(bbox)
+ +
[docs] def get_interpolation_data(self): + """ interpolation needs alpha """ + return np.array([self.logT, self.logg, self.logZ]).T
+ + @property + def logT(self): + return self.grid['logT'] + + @property + def logg(self): + return self.grid['logg'] + + @property + def Teff(self): + return self.grid['Teff'] + + @property + def Z(self): + return self.grid['Z'] + + @property + def logZ(self): + return self.grid['logZ']
+
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/pbar.html b/_modules/pystellibs/pbar.html new file mode 100644 index 0000000..9c31882 --- /dev/null +++ b/_modules/pystellibs/pbar.html @@ -0,0 +1,420 @@ + + + + + + + + pystellibs.pbar — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.pbar

+"""
+Simple progressbar
+==================
+
+This package implement a unique progress bar class that can be used to decorate
+an iterator, a function or even standalone.
+
+The format of the meter is flexible and can display along with the progress
+meter, the running time, an eta, and the rate of the iterations.
+
+An example is::
+    description    [----------] k/n  10% [time: 00:00:00, eta: 00:00:00, 2.7 iters/sec]
+"""
+import time as _time
+import sys
+import signal
+from array import array
+
+try:
+    from fcntl import ioctl
+    import termios
+except ImportError:
+    pass
+
+
+__all__ = ['Pbar']
+
+
+
[docs]class Pbar(object): + """ + make a progress string in a shape of:: + + [----------] k/n 10% [time: 00:00:00, eta: 00:00:00, 2.7 iters/sec] + + + Attributes + ---------- + + time: bool, optional (default: True) + if set, add the runtime information + + eta: bool, optional (default: True) + if set, add an estimated time to completion + + rate: bool, optional (default: True) + if set, add the rate information + + length: int, optional (default: None) + number of characters showing the progress meter itself + if None, the meter will adapt to the buffer width + + TODO: make it variable with the buffer length + + keep: bool, optional (default: True) + If not set, deletes its traces from screen after completion + + file: buffer + the buffer to write into + + mininterval: float (default: 0.5) + minimum time in seconds between two updates of the meter + + miniters: int, optional (default: 1) + minimum iteration number between two updates of the meter + + units: str, optional (default: 'iters') + unit of the iteration + """ + def __init__(self, maxval=None, desc=None, time=True, eta=True, rate=True, length=None, + file=None, keep=True, mininterval=0.5, miniters=1, units='iters', **kwargs): + self.time = time + self.eta = eta + self.rate = rate + self.desc = desc or '' + self.units = units + self.file = file or sys.stdout + self._last_print_len = 0 + self.keep = keep + self.mininterval = mininterval + self.miniters = miniters + self._auto_width = True + self.length = 10 + if length is not None: + self.length = length + self._auto_width = False + #backward compatibility + self._start_t = _time.time() + self._maxval = maxval + if 'txt' in kwargs: + self.desc = kwargs['txt'] + + def _buffer_width(self): + """ returns the width of the buffer when available """ + try: + self.handle_resize(None, None) + signal.signal(signal.SIGWINCH, self.handle_resize) + self._auto_width = True + except: + self.term_width = 79 + self._auto_width = False + + return self.term_width + +
[docs] def handle_resize(self, signum, frame): + h, w = array('h', ioctl(self.file, termios.TIOCGWINSZ, '\0' * 8))[:2] + self.term_width = w
+ +
[docs] @staticmethod + def format_interval(t): + """ make a human readable time interval decomposed into days, hours, + minutes and seconds + + Parameters + ---------- + t: int + interval in seconds + + Returns + ------- + txt: str + string representing the interval + (format: <days>d <hrs>:<min>:<sec>) + """ + mins, s = divmod(int(t), 60) + h, m = divmod(mins, 60) + d, h = divmod(h, 24) + + txt = '{m:02d}:{s:02d}' + if h: + txt = '{h:02d}:' + txt + if d: + txt = '{d:d}d ' + txt + return txt.format(d=d, h=h, m=m, s=s)
+ +
[docs] def build_str_meter(self, n, total, elapsed): + """ + make a progress string in a shape of:: + + [----------] k/n 10% [time: 00:00:00, eta: 00:00:00, 2.7 iters/sec] + + Parameters + ---------- + n: int + number of finished iterations + + total: int + total number of iterations, or None + + elapsed: int + number of seconds passed since start + + Returns + ------- + txt: str + string representing the meter + """ + if n > total: + total = None + + vals = {'n': n} + vals['elapsed'] = self.format_interval(elapsed) + vals['rate'] = '{0:5.2f}'.format((n / elapsed)) if elapsed else '?' + vals['units'] = self.units + + if not total: + txt = '{n:d}' + else: + txt = '|{bar:s}| {n:d}/{total:d} {percent:s}' + + if self.time or self.eta or self.rate: + txt += ' [' + info = [] + if self.time: + info.append('time: {elapsed:s}') + if self.eta and total: + info.append('eta: {left:s}') + if self.rate: + info.append('{rate:s} {units:s}/sec') + txt += ', '.join(info) + ']' + + if not total: + return txt.format(**vals) + + frac = float(n) / total + bar_length = int(frac * self.length) + vals['bar'] = '#' * bar_length + '-' * (self.length - bar_length) + vals['percent'] = '{0:3.0%}'.format(frac) + vals['left'] = self.format_interval(elapsed / n * (total - n)) if n else '?' + vals['total'] = total + + if self._auto_width: + full_length = self._buffer_width() + current_length = len(txt.format(**vals)) + new_length = full_length - current_length + self.length - 1 - len(self.desc) + frac = float(n) / total + bar_length = int(frac * new_length) + vals['bar'] = '#' * bar_length + '-' * (new_length - bar_length) + + return txt.format(**vals)
+ +
[docs] def print_status(self, s): + """ print a status s on the last file line and clean the rest of the line + + Parameters + ---------- + s: str + message to write + """ + self.file.write('\r' + s + ' ' * max(self._last_print_len - len(s), 0)) + self.file.flush() + self._last_print_len = len(s)
+ +
[docs] def iterover(self, iterable, total=None): + """ + Get an iterable object, and return an iterator which acts exactly like the + iterable, but prints a progress meter and updates it every time a value is + requested. + + Parameters + ---------- + iterable: generator or iterable object + object to iter over. + + total: int, optional + the number of iterations is assumed to be the length of the + iterator. But sometimes the iterable has no associated length or + its length is not the actual number of future iterations. In this + case, total can be set to define the number of iterations. + + Returns + ------- + gen: generator + pass the values from the initial iterator + """ + if total is None: + try: + total = len(iterable) + except TypeError: + total = self._maxval + + prefix = '{0:s}:'.format(self.desc) if self.desc else '' + + self.print_status(prefix + self.build_str_meter(0, total, 0)) + last_print_n = 0 + + start_t = last_print_t = _time.time() + + n = 0 + for obj in iterable: + yield obj + n += 1 + if n - last_print_n >= self.miniters: + cur_t = _time.time() + if cur_t - last_print_t >= self.mininterval: + self.print_status(prefix + self.build_str_meter(n, total, cur_t - start_t)) + last_print_n = n + last_print_t = cur_t + + if not self.keep: + self.print_status('') + sys.stdout.write('\r') + else: + if last_print_n < n: + cur_t = _time.time() + self.print_status(prefix + self.build_str_meter(n, total, cur_t - start_t)) + self.file.write('\n')
+ + def __enter__(self): + return self + + def __exit__(self, *args, **kwargs): + return False + +
[docs] def update(self, n, desc=None, total=None): + """ Kept for backward compatibility and the decorator feature + + Parameters + ---------- + n: int + force iteration number n + + desc: str + update description string + + total: int + update the total number of iterations + """ + if total is None: + total = self._maxval + if desc is not None: + self.desc = desc + prefix = '{0:s}:'.format(self.desc) if self.desc else '' + cur_t = _time.time() + self.print_status(prefix + self.build_str_meter(n, total, cur_t - self._start_t))
+ +
[docs] def decorator(self, func): + """ Provide a function decorator allowing for counting calls and rates + """ + self._deco_iter = 0 + self.desc = func.__name__ + + def deco(*args, **kwargs): + # start the time at the first call + if self._deco_iter == 0: + self._start_t = _time.time() + self.update(self._deco_iter) + self._deco_iter += 1 + return func(*args, **kwargs) + + return deco
+
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/rauch.html b/_modules/pystellibs/rauch.html new file mode 100644 index 0000000..33cfa18 --- /dev/null +++ b/_modules/pystellibs/rauch.html @@ -0,0 +1,217 @@ + + + + + + + + pystellibs.rauch — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.rauch

+""" Rauch White Dwarfs stellar atmospheres """
+import numpy as np
+from .simpletable import SimpleTable
+try:
+    from astropy.io import fits as pyfits
+except ImportError:
+    import pyfits
+
+from .stellib import Stellib
+from .config import libsdir
+
+
+
[docs]class Rauch(Stellib): + """ + Rauch White Dwarfs stellar atmospheres + + References + ---------- + + Rauch, T.; Werner, K.; Bohlin, R.; Kruk, J. W., "The virtual observatory service + TheoSSA: Establishing a database of synthetic stellar flux standards. I. NLTE + spectral analysis of the DA-type white dwarf G191-B2B" + """ + def __init__(self, *args, **kwargs): + self.name = 'Rauch' + self.source = libsdir + '/stellib_Rauch.grid.fits' + self._load_() + Stellib.__init__(self, *args, **kwargs) + + def _load_(self): + with pyfits.open(self.source) as f: + # load data + self._getWaveLength_(f) + self._getTGZ_(f) + self._getSpectra_(f) + self._getWaveLength_units(f) + + def _getWaveLength_units(self, f): + self.wavelength_unit = 'angstrom' + + def _getWaveLength_(self, f): + self._wavelength = f[0].data[-1] + + def _getTGZ_(self, f): + self.grid = SimpleTable(f[1].data) + self.grid.header.update(f[1].header.items()) + self.grid.header['NAME'] = 'TGZ' + + def _getSpectra_(self, f): + self.spectra = f[0].data[:-1] + +
[docs] def bbox(self, dlogT=0.05, dlogg=0.25): + """ Boundary of Rauch library + + Parameters + ---------- + dlogT: float + log-temperature tolerance before extrapolation limit + + dlogg: float + log-g tolerance before extrapolation limit + + Returns + ------- + bbox: ndarray + (logT, logg) edges of the bounding polygon + """ + bbox = [(4.700 - dlogT, 8.000 + dlogg), + (4.700 - dlogT, 5.000 - dlogg), + (5.000 + dlogT, 5.000 - dlogg), + (5.280 + dlogT, 6.000 - dlogg), + (5.280 + dlogT, 8.000 + dlogg), + (4.700 - dlogT, 8.000 + dlogg) ] + + return np.array(bbox)
+ + @property + def logT(self): + return self.grid['logT'] + + @property + def logg(self): + return self.grid['logg'] + + @property + def Teff(self): + return 10 ** self.grid['logT'] + + @property + def Z(self): + return self.grid['Z'] + + @property + def logZ(self): + return np.log10(self.Z) + + @property + def NHI(self): + return self.grid['NHI'] + + @property + def NHeI(self): + return self.grid['NHeI'] + + @property + def NHeII(self): + return self.grid['NHeII']
+
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/simpletable.html b/_modules/pystellibs/simpletable.html new file mode 100644 index 0000000..699a4ac --- /dev/null +++ b/_modules/pystellibs/simpletable.html @@ -0,0 +1,2956 @@ + + + + + + + + pystellibs.simpletable — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.simpletable

+""" This file implements a Table class
+    that is designed to be the basis of any format
+
+Requirements
+------------
+
+* FIT format:
+    * astropy:
+        provides a replacement to pyfits
+        pyfits can still be used instead but astropy is now the default
+
+* HDF5 format:
+    * pytables
+
+RuntimeError will be raised when writing to a format associated with missing
+package.
+
+
+.. code-block::python
+
+    >>> t = SimpleTable('path/mytable.csv')
+    # get a subset of columns only
+    >>> s = t.get('M_* logTe logLo U B V I J K')
+    # set some aliases
+    >>> t.set_alias('logT', 'logTe')
+    >>> t.set_alias('logL', 'logLLo')
+    # make a query on one or multiple column
+    >>> q = s.selectWhere('logT logL', '(J > 2) & (10 ** logT > 5000)')
+    # q is also a table object
+    >>> q.plot('logT', 'logL', ',')
+    # makes a simple plot
+    >>> s.write('newtable.fits')
+    # export the initial subtable to a new file
+"""
+from __future__ import (absolute_import, division, print_function)
+
+__version__ = '3.0'
+__all__ = ['AstroHelpers', 'AstroTable', 'SimpleTable', 'stats']
+
+import sys
+import math
+from copy import deepcopy
+import re
+import itertools
+from functools import wraps, partial
+import numpy as np
+from numpy import deg2rad, rad2deg, sin, cos, sqrt, arcsin, arctan2
+from numpy.lib import recfunctions
+import types
+
+try:
+    from astropy.io import fits as pyfits
+except ImportError:
+    import pyfits
+except:
+    pyfits = None
+
+try:
+    import tables
+except ImportError:
+    tables = None
+
+try:
+    import pandas as _pd
+except ImportError:
+    _pd = None
+
+
+# ==============================================================================
+# Python 3 compatibility behavior
+# ==============================================================================
+# remap some python 2 built-ins on to py3k behavior or equivalent
+# Most of them become generators
+import operator
+
+PY3 = sys.version_info[0] > 2
+
+if PY3:
+    iteritems = operator.methodcaller('items')
+    itervalues = operator.methodcaller('values')
+    basestring = (str, bytes)
+else:
+    range = xrange
+    from itertools import izip as zip
+    iteritems = operator.methodcaller('iteritems')
+    itervalues = operator.methodcaller('itervalues')
+    basestring = (str, unicode)
+
+
+# ==============================================================================
+# Specials -- special functions
+# ==============================================================================
+
+def pretty_size_print(num_bytes):
+    """
+    Output number of bytes in a human readable format
+
+    keywords
+    --------
+    num_bytes: int
+        number of bytes to convert
+
+    returns
+    -------
+    output: str
+        string representation of the size with appropriate unit scale
+    """
+    if num_bytes is None:
+        return
+
+    KiB = 1024
+    MiB = KiB * KiB
+    GiB = KiB * MiB
+    TiB = KiB * GiB
+    PiB = KiB * TiB
+    EiB = KiB * PiB
+    ZiB = KiB * EiB
+    YiB = KiB * ZiB
+
+    if num_bytes > YiB:
+        output = '%.3g YB' % (num_bytes / YiB)
+    elif num_bytes > ZiB:
+        output = '%.3g ZB' % (num_bytes / ZiB)
+    elif num_bytes > EiB:
+        output = '%.3g EB' % (num_bytes / EiB)
+    elif num_bytes > PiB:
+        output = '%.3g PB' % (num_bytes / PiB)
+    elif num_bytes > TiB:
+        output = '%.3g TB' % (num_bytes / TiB)
+    elif num_bytes > GiB:
+        output = '%.3g GB' % (num_bytes / GiB)
+    elif num_bytes > MiB:
+        output = '%.3g MB' % (num_bytes / MiB)
+    elif num_bytes > KiB:
+        output = '%.3g KB' % (num_bytes / KiB)
+    else:
+        output = '%.3g Bytes' % (num_bytes)
+
+    return output
+
+
+def _fits_read_header(hdr):
+    """
+    Convert pyfits header into dictionary with relevant values
+
+    Parameters
+    ----------
+
+    hdr: pyftis.Header
+        fits unit
+
+    Returns
+    -------
+    header: dict
+        header dictionary
+
+    alias: dict
+        aliases
+
+    units: dict
+        units
+
+    comments: dict
+        comments/description of keywords
+    """
+    header = {}
+    alias = {}
+    units = {}
+    comments = {}
+
+    # generic cards
+    genTerms = ['XTENSION', 'BITPIX', 'NAXIS', 'NAXIS1',
+                'NAXIS2', 'PCOUNT', 'GCOUNT', 'TFIELDS',
+                'EXTNAME']
+    fieldTerms = ['TTYPE', 'TFORM', 'TUNIT', 'ALIAS']
+
+    # read col comments
+    for k, name, comment in hdr.ascard['TTYPE*']:
+        comments[name] = comment
+        u = hdr.get(k.replace('TYPE', 'UNIT'), None)
+        if u is not None:
+            units[name] = u
+
+    for k, val, _ in hdr.ascard['ALIAS*']:
+        al, orig = val.split('=')
+        alias[al] = orig
+
+    # other specific keywords: COMMENT, HISTORY
+    header_comments = []
+    header_history = []
+    for k, v in hdr.items():
+        if (k not in genTerms) and (k[:5] not in fieldTerms):
+            if (k == 'COMMENT'):
+                header_comments.append(v)
+            elif (k == 'HISTORY'):
+                header_history.append(v)
+            else:
+                header[k] = v
+
+    # COMMENT, HISTORY polish
+    if len(header_comments) > 0:
+        header['COMMENT'] = '\n'.join(header_comments)
+    if len(header_history) > 0:
+        header['HISTORY'] = '\n'.join(header_history)
+
+    if 'EXTNAME' in hdr:
+        header['NAME'] = hdr['EXTNAME']
+
+    return header, alias, units, comments
+
+
+def _fits_generate_header(tab):
+    """ Generate the corresponding fits Header that contains all necessary info
+
+    Parameters
+    ----------
+
+    tab: SimpleTable instance
+        table
+
+    Returns
+    -------
+    hdr: pyfits.Header
+        header instance
+    """
+    # get column cards
+
+    cards = []
+
+    # names units and comments
+    for e, k in enumerate(tab.keys()):
+        cards.append(('TTYPE{0:d}'.format(e + 1), k, tab._desc.get(k, '')))
+        u = tab._units.get(k, '')
+        if u not in ['', 'None', None]:
+            cards.append(('TUNIT{0:d}'.format(e + 1), tab._units.get(k, ''),
+                          'unit of {0:s}'.format(k)))
+
+    # add aliases
+    for e, v in enumerate(tab._aliases.items()):
+        cards.append( ('ALIAS{0:d}'.format(e + 1), '='.join(v), '') )
+
+    if tab.header['NAME'] not in ['', 'None', None, 'No Name']:
+        cards.append(('EXTNAME', tab.header['NAME'], ''))
+
+    hdr = pyfits.Header(cards)
+
+    for k, v in tab.header.items():
+        if (v not in ['', 'None', None]) & (k != 'NAME'):
+            if (k != 'COMMENT') & (k != 'HISTORY'):
+                hdr.update(k, v)
+            else:
+                txt = v.split('\n')
+                for j in txt:
+                    if k == 'COMMENT':
+                        hdr.add_comment(j)
+                    elif k == 'HISTORY':
+                        hdr.add_history(j)
+    return hdr
+
+
+def _fits_writeto(filename, data, header=None, output_verify='exception',
+                  clobber=False, checksum=False):
+    """
+    Create a new FITS file using the supplied data/header.
+    Patched version of pyfits to correctly include provided header
+
+    Parameters
+    ----------
+    filename : file path, file object, or file like object
+        File to write to.  If opened, must be opened in a writeable binary
+        mode such as 'wb' or 'ab+'.
+
+    data : array, record array, or groups data object
+        data to write to the new file
+
+    header : `Header` object, optional
+        the header associated with ``data``. If `None`, a header
+        of the appropriate type is created for the supplied data. This
+        argument is optional.
+
+    output_verify : str
+        Output verification option.  Must be one of ``"fix"``, ``"silentfix"``,
+        ``"ignore"``, ``"warn"``, or ``"exception"``.  May also be any
+        combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``,
+        ``+warn``, or ``+exception" (e.g. ``"fix+warn"``).  See :ref:`verify`
+        for more info.
+
+    clobber : bool, optional
+        If `True`, and if filename already exists, it will overwrite
+        the file.  Default is `False`.
+
+    checksum : bool, optional
+        If `True`, adds both ``DATASUM`` and ``CHECKSUM`` cards to the
+        headers of all HDU's written to the file
+    """
+
+    hdu = pyfits.convenience._makehdu(data, header)
+    hdu.header.update(header.cards)
+    if hdu.is_image and not isinstance(hdu, pyfits.PrimaryHDU):
+        hdu = pyfits.PrimaryHDU(data, header=header)
+    hdu.writeto(filename, clobber=clobber, output_verify=output_verify,
+                checksum=checksum)
+
+
+def _fits_append(filename, data, header=None, checksum=False, verify=True,
+                 **kwargs):
+    """
+    Append the header/data to FITS file if filename exists, create if not.
+
+    If only ``data`` is supplied, a minimal header is created.
+    Patched version of pyfits to correctly include provided header
+
+    Parameters
+    ----------
+    filename : file path, file object, or file like object
+        File to write to.  If opened, must be opened for update (rb+) unless it
+        is a new file, then it must be opened for append (ab+).  A file or
+        `~gzip.GzipFile` object opened for update will be closed after return.
+
+    data : array, table, or group data object
+        the new data used for appending
+
+    header : `Header` object, optional
+        The header associated with ``data``.  If `None`, an appropriate header
+        will be created for the data object supplied.
+
+    checksum : bool, optional
+        When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the header
+        of the HDU when written to the file.
+
+    verify : bool, optional
+        When `True`, the existing FITS file will be read in to verify it for
+        correctness before appending.  When `False`, content is simply appended
+        to the end of the file.  Setting ``verify`` to `False` can be much
+        faster.
+
+    kwargs
+        Any additional keyword arguments to be passed to
+        `astropy.io.fits.open`.
+    """
+
+    name, closed, noexist_or_empty = pyfits.convenience._stat_filename_or_fileobj(filename)
+
+    if noexist_or_empty:
+        #
+        # The input file or file like object either doesn't exits or is
+        # empty.  Use the writeto convenience function to write the
+        # output to the empty object.
+        #
+        _fits_writeto(filename, data, header, checksum=checksum, **kwargs)
+    else:
+        hdu = pyfits.convenience._makehdu(data, header)
+        hdu.header.update(header.cards)
+
+        if isinstance(hdu, pyfits.PrimaryHDU):
+            hdu = pyfits.ImageHDU(data, header)
+
+        if verify or not closed:
+            f = pyfits.convenience.fitsopen(filename, mode='append')
+            f.append(hdu)
+
+            # Set a flag in the HDU so that only this HDU gets a checksum when
+            # writing the file.
+            hdu._output_checksum = checksum
+            f.close(closed=closed)
+        else:
+            f = pyfits.convenience._File(filename, mode='append')
+            hdu._output_checksum = checksum
+            hdu._writeto(f)
+            f.close()
+
+
+def _ascii_read_header(fname, comments='#', delimiter=None, commentedHeader=True,
+                       *args, **kwargs):
+    """
+    Read ASCII/CSV header
+
+    Parameters
+    ----------
+    fname: str or stream
+        File, filename, or generator to read.
+        Note that generators should return byte strings for Python 3k.
+
+    comments: str, optional
+        The character used to indicate the start of a comment;
+        default: '#'.
+
+    delimiter: str, optional
+        The string used to separate values.  By default, this is any
+        whitespace.
+
+    commentedHeader: bool, optional
+        if set, the last line of the header is expected to be the column titles
+
+    Returns
+    -------
+    nlines: int
+        number of lines from the header
+
+    header: dict
+        header dictionary
+
+    alias: dict
+        aliases
+
+    units: dict
+        units
+
+    comments: dict
+        comments/description of keywords
+
+    names: sequence
+        sequence or str, first data line after header, expected to be the column
+        names.
+    """
+    if hasattr(fname, 'read'):
+        stream = fname
+    else:
+        stream = open(fname, 'r')
+
+    header = {}
+    alias = {}
+    units = {}
+    desc = {}
+
+    def parseStrNone(v):
+        """ robust parse """
+        _v = v.split()
+        if (len(_v) == 0):
+            return None
+        else:
+            _v = ' '.join(_v)
+            if (_v.lower()) == 'none' or (_v.lower() == 'null'):
+                return None
+            else:
+                return _v
+
+    done = False
+    oldline = None
+    lasthdr = None
+    nlines = 0
+    header.setdefault('COMMENT', '')
+    header.setdefault('HISTORY', '')
+    while done is False:
+        line = stream.readline()[:-1]  # getting rid of '\n'
+        nlines += 1
+        if (line[0] == comments):  # header part
+            if (len(line) > 2):
+                if line[1] == comments:  # column meta data
+                    # column meta is expected to start with ##
+                    k = line[2:].split('\t')
+                    colname = k[0].strip()
+                    colunit = None
+                    colcomm = None
+                    if len(k) > 1:
+                        colunit = parseStrNone(k[1])
+                    if len(k) > 2:
+                        colcomm = parseStrNone(k[2])
+
+                    if colunit is not None:
+                        units[colname] = colunit
+                    if colcomm is not None:
+                        desc[colname] = colcomm
+                else:
+                    # header is expected as "# key \t value"
+                    k = line[1:].split('\t')
+                    if len(k) > 1:
+                        key = k[0].strip()  # remove trainling spaces
+                        val = ' '.join(k[1:]).strip()
+
+                        if key in ('', None, 'None', 'NONE', 'COMMENT'):
+                            header['COMMENT'] = header['COMMENT'] + '\n' + val
+                        if key in ('HISTORY', ):
+                            header['HISTORY'] = header['HISTORY'] + '\n' + val
+                        elif 'alias' in key.lower():
+                            # take care of aliases
+                            al, orig = val.split('=')
+                            alias[al] = orig
+                        else:
+                            header[key] = val
+                        lasthdr = key
+                    else:
+                        header['COMMENT'] = header['COMMENT'] + '\n' + line[1:]
+        else:
+            done = True
+            if commentedHeader and (oldline is not None):
+                names = oldline.split(delimiter)
+                nlines -= 1
+                if lasthdr == names[0]:
+                    header.pop(lasthdr)
+            else:
+                names = line.split(delimiter)
+        oldline = line[1:]
+
+    if not hasattr(fname, 'read'):
+        stream.close()
+    else:
+        stream.seek(stream.tell() - len(line))
+        nlines = 0  # make sure the value is set to the current position
+
+    return nlines, header, units, desc, alias, names
+
+
+def _hdf5_write_data(filename, data, tablename=None, mode='w', append=False,
+                     header={}, units={}, comments={}, aliases={}, **kwargs):
+    """ Write table into HDF format
+
+    Parameters
+    ----------
+    filename : file path, or tables.File instance
+        File to write to.  If opened, must be opened and writable (mode='w' or 'a')
+
+    data: recarray
+        data to write to the new file
+
+    tablename: str
+        path of the node including table's name
+
+    mode: str
+        in ('w', 'a') mode to open the file
+
+    append: bool
+        if set, tends to append data to an existing table
+
+    header: dict
+        table header
+
+    units: dict
+        dictionary of units
+
+    alias: dict
+        aliases
+
+    comments: dict
+        comments/description of keywords
+
+    .. note::
+        other keywords are forwarded to :func:`tables.openFile`
+    """
+
+    if hasattr(filename, 'read'):
+        raise Exception("HDF backend does not implement stream")
+
+    if append is True:
+        mode = 'a'
+    silent = kwargs.pop('silent', False)
+
+    if isinstance(filename, tables.File):
+        if (filename.mode != mode) & (mode != 'r'):
+            raise tables.FileModeError('The file is already opened in a different mode')
+        hd5 = filename
+    else:
+        hd5 = tables.openFile(filename, mode=mode)
+
+    # check table name and path
+    tablename = tablename or header.get('NAME', None)
+    if tablename in ('', None, 'Noname', 'None'):
+        tablename = '/data'
+
+    w = tablename.split('/')
+    where = '/'.join(w[:-1])
+    name = w[-1]
+    if where in ('', None):
+        where = '/'
+    if where[0] != '/':
+        where = '/' + where
+
+    if append:
+        try:
+            t = hd5.getNode(where + name)
+            t.append(data.astype(t.description._v_dtype))
+            t.flush()
+        except tables.NoSuchNodeError:
+            if not silent:
+                print(("Warning: Table {0} does not exists.  \n A new table will be created").format(where + name))
+            append = False
+
+    if not append:
+        t = hd5.createTable(where, name, data, **kwargs)
+
+        # update header
+        for k, v in header.items():
+            if (k == 'FILTERS') & (float(t.attrs['VERSION']) >= 2.0):
+                t.attrs[k.lower()] = v
+            else:
+                t.attrs[k] = v
+        if 'TITLE' not in header:
+            t.attrs['TITLE'] = name
+
+        # add column descriptions and units
+        for e, colname in enumerate(data.dtype.names):
+            _u = units.get(colname, None)
+            _d = comments.get(colname, None)
+            if _u is not None:
+                t.attrs['FIELD_{0:d}_UNIT'] = _u
+            if _d is not None:
+                t.attrs['FIELD_{0:d}_DESC'] = _d
+
+        # add aliases
+        for i, (k, v) in enumerate(aliases.items()):
+            t.attrs['ALIAS{0:d}'.format(i)] = '{0:s}={1:s}'.format(k, v)
+
+        t.flush()
+
+    if not isinstance(filename, tables.File):
+        hd5.flush()
+        hd5.close()
+
+
+def _hdf5_read_data(filename, tablename=None, silent=False, *args, **kwargs):
+    """ Generate the corresponding ascii Header that contains all necessary info
+
+    Parameters
+    ----------
+    filename: str
+        file to read from
+
+    tablename: str
+        node containing the table
+
+    silent: bool
+        skip verbose messages
+
+    Returns
+    -------
+    hdr: str
+        string that will be be written at the beginning of the file
+    """
+    source = tables.openFile(filename, *args, **kwargs)
+
+    if tablename is None:
+        node = source.listNodes('/')[0]
+        tablename = node.name
+    else:
+        if tablename[0] != '/':
+            node = source.getNode('/' + tablename)
+        else:
+            node = source.getNode(tablename)
+    if not silent:
+        print("\tLoading table: {0}".format(tablename))
+
+    hdr = {}
+    aliases = {}
+
+    # read header
+    exclude = ['NROWS', 'VERSION', 'CLASS', 'EXTNAME', 'TITLE']
+    for k in node.attrs._v_attrnames:
+        if (k not in exclude):
+            if (k[:5] != 'FIELD') & (k[:5] != 'ALIAS'):
+                hdr[k] = node.attrs[k]
+            elif k[:5] == 'ALIAS':
+                c0, c1 = node.attrs[k].split('=')
+                aliases[c0] = c1
+
+    empty_name = ['', 'None', 'Noname', None]
+    if node.attrs['TITLE'] not in empty_name:
+        hdr['NAME'] = node.attrs['TITLE']
+    else:
+        hdr['NAME'] = '{0:s}/{1:s}'.format(filename, node.name)
+
+    # read column meta
+    units = {}
+    desc = {}
+
+    for (k, colname) in enumerate(node.colnames):
+        _u = getattr(node.attrs, 'FIELD_{0:d}_UNIT'.format(k), None)
+        _d = getattr(node.attrs, 'FIELD_{0:d}_DESC'.format(k), None)
+        if _u is not None:
+            units[colname] = _u
+        if _d is not None:
+            desc[colname] = _d
+
+    data = node[:]
+
+    source.close()
+
+    return hdr, aliases, units, desc, data
+
+
+def _ascii_generate_header(tab, comments='#', delimiter=' ',
+                           commentedHeader=True):
+    """ Generate the corresponding ascii Header that contains all necessary info
+
+    Parameters
+    ----------
+
+    tab: SimpleTable instance
+        table
+
+    comments: str
+        string to prepend header lines
+
+    delimiter: str, optional
+        The string used to separate values.  By default, this is any
+        whitespace.
+
+    commentedHeader: bool, optional
+        if set, the last line of the header is expected to be the column titles
+
+    Returns
+    -------
+    hdr: str
+        string that will be be written at the beginning of the file
+    """
+    hdr = []
+
+    if comments is None:
+        comments = ''
+
+    # table header
+    length = max(map(len, tab.header.keys()))
+    fmt = '{{0:s}} {{1:{0:d}s}}\t{{2:s}}'.format(length)
+    for k, v in tab.header.items():
+        for vk in v.split('\n'):
+            if len(vk) > 0:
+                hdr.append(fmt.format(comments, k.upper(), vk.strip()))
+
+    # column metadata
+    hdr.append(comments)  # add empty line
+    length = max(map(len, tab.keys()))
+    fmt = '{{0:s}}{{0:s}} {{1:{0:d}s}}\t{{2:s}}\t{{3:s}}'.format(length)
+    for colname in tab.keys():
+        unit = tab._units.get(colname, 'None')
+        desc = tab._desc.get(colname, 'None')
+        hdr.append(fmt.format(comments, colname, unit, desc))
+
+    # aliases
+    if len(tab._aliases) > 0:
+        hdr.append(comments)  # add empty line
+        for k, v in tab._aliases.items():
+            hdr.append('{0:s} alias\t{1:s}={2:s}'.format(comments, k, v))
+
+    # column names
+    hdr.append(comments)
+    if commentedHeader:
+        hdr.append('{0:s} {1:s}'.format(comments, delimiter.join(tab.keys())))
+    else:
+        hdr.append('{0:s}'.format(delimiter.join(tab.keys())))
+
+    return '\n'.join(hdr)
+
+
+def _latex_writeto(filename, tab, comments='%'):
+    """ Write the data into a latex table format
+
+    Parameters
+    ----------
+    filename: str
+        file or unit to write into
+
+    tab: SimpleTable instance
+        table
+
+    comments: str
+        string to prepend header lines
+
+    delimiter: str, optional
+        The string used to separate values.  By default, this is any
+        whitespace.
+
+    commentedHeader: bool, optional
+        if set, the last line of the header is expected to be the column titles
+    """
+    txt = "\\begin{table}\n\\begin{center}\n"
+
+    # add caption
+    tabname = tab.header.get('NAME', None)
+    if tabname not in ['', None, 'None']:
+        txt += "\\caption{{{0:s}}}\n".format(tabname)
+
+    # tabular
+    txt += '\\begin{{tabular}}{{{0:s}}}\n'.format('c' * tab.ncols)
+    txt += tab.pprint(delim=' & ', fields='MAG*', headerChar='', endline='\\\\\n', all=True, ret=True)
+    txt += '\\end{tabular}\n'
+
+    # end table
+    txt += "\\end{center}\n"
+
+    # add notes if any
+    if len(tab._desc) > 0:
+        txt += '\% notes \n\\begin{scriptsize}\n'
+        for e, (k, v) in enumerate(tab._desc.items()):
+            if v not in (None, 'None', 'none', ''):
+                txt += '{0:d} {1:s}: {2:s} \\\\\n'.format(e, k, v)
+        txt += '\\end{scriptsize}\n'
+    txt += "\\end{table}\n"
+    if hasattr(filename, 'write'):
+        filename.write(txt)
+    else:
+        with open(filename, 'w') as unit:
+            unit.write(txt)
+
+
+def _convert_dict_to_structured_ndarray(data):
+    """convert_dict_to_structured_ndarray
+
+    Parameters
+    ----------
+
+    data: dictionary like object
+        data structure which provides iteritems and itervalues
+
+    returns
+    -------
+    tab: structured ndarray
+        structured numpy array
+    """
+    newdtype = []
+    try:
+        for key, dk in iteritems(data):
+            _dk = np.asarray(dk)
+            dtype = _dk.dtype
+            # unknown type is converted to text
+            if dtype.type == np.object_:
+                if len(data) == 0:
+                    longest = 0
+                else:
+                    longest = len(max(_dk, key=len))
+                    _dk = _dk.astype('|%iS' % longest)
+            if _dk.ndim > 1:
+                newdtype.append((str(key), _dk.dtype, (_dk.shape[1],)))
+            else:
+                newdtype.append((str(key), _dk.dtype))
+        tab = np.rec.fromarrays(itervalues(data), dtype=newdtype)
+    except AttributeError:  # not a dict
+        # hope it's a tuple ((key, value),) pairs.
+        from itertools import tee
+        d1, d2 = tee(data)
+        for key, dk in d1:
+            _dk = np.asarray(dk)
+            dtype = _dk.dtype
+            # unknown type is converted to text
+            if dtype.type == np.object_:
+                if len(data) == 0:
+                    longest = 0
+                else:
+                    longest = len(max(_dk, key=len))
+                    _dk = _dk.astype('|%iS' % longest)
+            if _dk.ndim > 1:
+                newdtype.append((str(key), _dk.dtype, (_dk.shape[1],)))
+            else:
+                newdtype.append((str(key), _dk.dtype))
+        tab = np.rec.fromarrays((dk for (_, dk) in d2), dtype=newdtype)
+
+    return tab
+
+
+def __indent__(rows, header=None, units=None, headerChar='-',
+               delim=' | ', endline='\n', **kwargs):
+    """Indents a table by column.
+
+    Parameters
+    ----------
+    rows: sequences of rows
+        one sequence per row.
+
+    header: sequence of str
+        row consists of the columns' names
+
+    units: sequence of str
+        Sequence of units
+
+    headerChar: char
+        Character to be used for the row separator line
+
+    delim: char
+        The column delimiter.
+
+    returns
+    -------
+    txt: str
+        string represation of rows
+    """
+    length_data = list(map(max, zip(*[list(map(len, k)) for k in rows])))
+    length = length_data[:]
+
+    if (header is not None):
+        length_header = list(map(len, header))
+        length = list(map(max, zip(length_data, length_header)))
+
+    if (units is not None):
+        length_units = list(map(len, units))
+        length = list(map(max, zip(length_data, length_units)))
+
+    if headerChar not in (None, '', ' '):
+        rowSeparator = headerChar * (sum(length) + len(delim) * (len(length) - 1)) + endline
+    else:
+        rowSeparator = ''
+
+    # make the format
+    fmt = ['{{{0:d}:{1:d}s}}'.format(k, l) for (k, l) in enumerate(length)]
+    fmt = delim.join(fmt) + endline
+    # write the string
+    txt = rowSeparator
+    if header is not None:
+        txt += fmt.format(*header)  # + endline
+        txt += rowSeparator
+    if units is not None:
+        txt += fmt.format(*units)  # + endline
+        txt += rowSeparator
+    for r in rows:
+        txt += fmt.format(*r)  # + endline
+    txt += rowSeparator
+    return txt
+
+
+def pprint_rec_entry(data, num=0, keys=None):
+        """ print one line with key and values properly to be readable
+
+        Parameters
+        ----------
+        data: recarray
+            data to extract entry from
+
+        num: int, slice
+            indice selection
+
+        keys: sequence or str
+            if str, can be a regular expression
+            if sequence, the sequence of keys to print
+        """
+        if (keys is None) or (keys == '*'):
+            _keys = data.dtype.names
+        elif type(keys) in basestring:
+            _keys = [k for k in data.dtype.names if (re.match(keys, k) is not None)]
+        else:
+            _keys = keys
+
+        length = max(map(len, _keys))
+        fmt = '{{0:{0:d}s}}: {{1}}'.format(length)
+        data = data[num]
+
+        for k in _keys:
+            print(fmt.format(k, data[k]))
+
+
+def pprint_rec_array(data, idx=None, fields=None, ret=False, all=False,
+                     headerChar='-', delim=' | ', endline='\n' ):
+        """ Pretty print the table content
+            you can select the table parts to display using idx to
+            select the rows and fields to only display some columns
+            (ret is only for insternal use)
+
+        Parameters
+        ----------
+        data: array
+            array to show
+
+        idx: sequence, slide
+            sub selection to print
+
+        fields: str, sequence
+            if str can be a regular expression, and/or list of fields separated
+            by spaces or commas
+
+        ret: bool
+            if set return the string representation instead of printing the result
+
+        all: bool
+            if set, force to show all rows
+
+        headerChar: char
+            Character to be used for the row separator line
+
+        delim: char
+            The column delimiter.
+        """
+        if (fields is None) or (fields == '*'):
+            _keys = data.dtype.names
+        elif type(fields) in basestring:
+            if ',' in fields:
+                _fields = fields.split(',')
+            elif ' ' in fields:
+                _fields = fields.split()
+            else:
+                _fields = [fields]
+            lbls = data.dtype.names
+            _keys = []
+            for _fk in _fields:
+                _keys += [k for k in lbls if (re.match(_fk, k) is not None)]
+        else:
+            lbls = data.dtype.names
+            _keys = []
+            for _fk in _fields:
+                _keys += [k for k in lbls if (re.match(_fk, k) is not None)]
+
+        nfields = len(_keys)
+        nrows = len(data)
+        fields = list(_keys)
+
+        if idx is None:
+            if (nrows < 10) or (all is True):
+                rows = [ [ str(data[k][rk]) for k in _keys ] for rk in range(nrows)]
+            else:
+                _idx = range(6)
+                rows = [ [ str(data[k][rk]) for k in _keys ] for rk in range(5) ]
+                if nfields > 1:
+                    rows += [ ['...' for k in range(nfields) ] ]
+                else:
+                    rows += [ ['...' for k in range(nfields) ] ]
+                rows += [ [ str(data[k][rk]) for k in fields ] for rk in range(-5, 0)]
+        elif isinstance(idx, slice):
+            _idx = range(idx.start, idx.stop, idx.step or 1)
+            rows = [ [ str(data[k][rk]) for k in fields ] for rk in _idx]
+        else:
+            rows = [ [ str(data[k][rk]) for k in fields ] for rk in idx]
+
+        out = __indent__(rows, header=_keys, units=None, delim=delim,
+                         headerChar=headerChar, endline=endline)
+        if ret is True:
+            return out
+        else:
+            print(out)
+
+
+def elementwise(func):
+    """
+    Quick and dirty elementwise function decorator it provides a quick way
+    to apply a function either on one element or a sequence of elements
+    """
+    @wraps(func)
+    def wrapper(it, **kwargs):
+        if hasattr(it, '__iter__') & (type(it) not in basestring):
+            _f = partial(func, **kwargs)
+            return map(_f, it)
+        else:
+            return func(it, **kwargs)
+    return wrapper
+
+
+
[docs]class AstroHelpers(object): + """ Helpers related to astronomy data """ + +
[docs] @staticmethod + @elementwise + def hms2deg(_str, delim=':'): + """ Convert hex coordinates into degrees + + Parameters + ---------- + str: string or sequence + string to convert + + delimiter: str + character delimiting the fields + + Returns + ------- + deg: float + angle in degrees + """ + if _str[0] == '-': + neg = -1 + _str = _str[1:] + else: + neg = 1 + _str = _str.split(delim) + return neg * ((((float(_str[-1]) / 60. + + float(_str[1])) / 60. + + float(_str[0])) / 24. * 360.))
+ +
[docs] @staticmethod + @elementwise + def deg2dms(val, delim=':'): + """ Convert degrees into hex coordinates + Parameters + ---------- + deg: float + angle in degrees + + delimiter: str + character delimiting the fields + + Returns + ------- + str: string or sequence + string to convert + """ + if val < 0: + sign = -1 + else: + sign = 1 + d = int( sign * val ) + m = int( (sign * val - d) * 60. ) + s = (( sign * val - d) * 60. - m) * 60. + return '{0}{1}{2}{3}{4}'.format( sign * d, delim, m, delim, s)
+ +
[docs] @staticmethod + @elementwise + def deg2hms(val, delim=':'): + """ Convert degrees into hex coordinates + + Parameters + ---------- + deg: float + angle in degrees + + delimiter: str + character delimiting the fields + + Returns + ------- + str: string or sequence + string to convert + """ + if val < 0: + sign = -1 + else: + sign = 1 + h = int( sign * val / 45. * 3.) # * 24 / 360 + m = int( (sign * val / 45. * 3. - h) * 60. ) + s = (( sign * val / 45. * 3. - h) * 60. - m) * 60. + return '{0}{1}{2}{3}{4}'.format( sign * h, delim, m, delim, s)
+ +
[docs] @staticmethod + @elementwise + def dms2deg(_str, delim=':'): + """ Convert hex coordinates into degrees + Parameters + ---------- + str: string or sequence + string to convert + + delimiter: str + character delimiting the fields + + Returns + ------- + deg: float + angle in degrees + """ + if _str[0] == '-': + neg = -1 + _str = _str[1:] + else: + neg = 1 + _str = _str.split(delim) + return (neg * ((float(_str[-1]) / 60. + float(_str[1])) / 60. + float(_str[0])))
+ +
[docs] @staticmethod + @elementwise + def euler(ai_in, bi_in, select, b1950=False, dtype='f8'): + """ + Transform between Galactic, celestial, and ecliptic coordinates. + Celestial coordinates (RA, Dec) should be given in equinox J2000 + unless the b1950 is True. + + select From To | select From To + ---------------------------------------------------------------------- + 1 RA-Dec (2000) Galactic | 4 Ecliptic RA-Dec + 2 Galactic RA-DEC | 5 Ecliptic Galactic + 3 RA-Dec Ecliptic | 6 Galactic Ecliptic + + Parameters + ---------- + + long_in: float, or sequence + Input Longitude in DEGREES, scalar or vector. + + lat_in: float, or sequence + Latitude in DEGREES + + select: int + Integer from 1 to 6 specifying type of coordinate transformation. + + b1950: bool + set equinox set to 1950 + + + Returns + ------- + long_out: float, seq + Output Longitude in DEGREES + + lat_out: float, seq + Output Latitude in DEGREES + + + REVISION HISTORY: + Written W. Landsman, February 1987 + Adapted from Fortran by Daryl Yentis NRL + Converted to IDL V5.0 W. Landsman September 1997 + Made J2000 the default, added /FK4 keyword W. Landsman December 1998 + Add option to specify SELECT as a keyword W. Landsman March 2003 + Converted from IDL to numerical Python: Erin Sheldon, NYU, 2008-07-02 + """ + + # Make a copy as an array. ndmin=1 to avoid messed up scalar arrays + ai = np.array(ai_in, ndmin=1, copy=True, dtype=dtype) + bi = np.array(bi_in, ndmin=1, copy=True, dtype=dtype) + + PI = math.pi + # HALFPI = PI / 2.0 + D2R = PI / 180.0 + R2D = 1.0 / D2R + + twopi = 2.0 * PI + fourpi = 4.0 * PI + + # J2000 coordinate conversions are based on the following constants + # (see the Hipparcos explanatory supplement). + # eps = 23.4392911111d Obliquity of the ecliptic + # alphaG = 192.85948d Right Ascension of Galactic North Pole + # deltaG = 27.12825d Declination of Galactic North Pole + # lomega = 32.93192d Galactic longitude of celestial equator + # alphaE = 180.02322d Ecliptic longitude of Galactic North Pole + # deltaE = 29.811438523d Ecliptic latitude of Galactic North Pole + # Eomega = 6.3839743d Galactic longitude of ecliptic equator + # Parameters for all the different conversions + if b1950: + # equinox = '(B1950)' + psi = np.array([ 0.57595865315, 4.9261918136, + 0.00000000000, 0.0000000000, + 0.11129056012, 4.7005372834], dtype=dtype) + stheta = np.array([ 0.88781538514, -0.88781538514, + 0.39788119938, -0.39788119938, + 0.86766174755, -0.86766174755], dtype=dtype) + ctheta = np.array([ 0.46019978478, 0.46019978478, + 0.91743694670, 0.91743694670, + 0.49715499774, 0.49715499774], dtype=dtype) + phi = np.array([ 4.9261918136, 0.57595865315, + 0.0000000000, 0.00000000000, + 4.7005372834, 0.11129056012], dtype=dtype) + else: + # equinox = '(J2000)' + psi = np.array([ 0.57477043300, 4.9368292465, + 0.00000000000, 0.0000000000, + 0.11142137093, 4.71279419371], dtype=dtype) + stheta = np.array([ 0.88998808748, -0.88998808748, + 0.39777715593, -0.39777715593, + 0.86766622025, -0.86766622025], dtype=dtype) + ctheta = np.array([ 0.45598377618, 0.45598377618, + 0.91748206207, 0.91748206207, + 0.49714719172, 0.49714719172], dtype=dtype) + phi = np.array([ 4.9368292465, 0.57477043300, + 0.0000000000, 0.00000000000, + 4.71279419371, 0.11142137093], dtype=dtype) + + # zero offset + i = select - 1 + a = ai * D2R - phi[i] + + b = bi * D2R + sb = sin(b) + cb = cos(b) + cbsa = cb * sin(a) + b = -stheta[i] * cbsa + ctheta[i] * sb + w, = np.where(b > 1.0) + if w.size > 0: + b[w] = 1.0 + bo = arcsin(b) * R2D + a = arctan2( ctheta[i] * cbsa + stheta[i] * sb, cb * cos(a) ) + ao = ( (a + psi[i] + fourpi) % twopi) * R2D + return ao, bo
+ +
[docs] @staticmethod + def sphdist(ra1, dec1, ra2, dec2): + """measures the spherical distance between 2 points + + Parameters + ---------- + ra1: float or sequence + first right ascensions in degrees + + dec1: float or sequence + first declination in degrees + ra2: float or sequence + second right ascensions in degrees + dec2: float or sequence + first declination in degrees + + Returns + ------- + Outputs: float or sequence + returns a distance in degrees + """ + dec1_r = deg2rad(dec1) + dec2_r = deg2rad(dec2) + return 2. * rad2deg(arcsin(sqrt((sin((dec1_r - dec2_r) / 2)) ** 2 + + cos(dec1_r) * cos(dec2_r) * ( + sin((deg2rad(ra1 - ra2)) / 2)) ** + 2)))
+ +
[docs] @staticmethod + def conesearch(ra0, dec0, ra, dec, r, outtype=0): + """ Perform a cone search on a table + + Parameters + ---------- + ra0: ndarray[ndim=1, dtype=float] + column name to use as RA source in degrees + + dec0: ndarray[ndim=1, dtype=float] + column name to use as DEC source in degrees + + ra: float + ra to look for (in degree) + + dec: float + ra to look for (in degree) + + r: float + distance in degrees + + outtype: int + type of outputs + 0 -- minimal, indices of matching coordinates + 1 -- indices and distances of matching coordinates + 2 -- full, boolean filter and distances + + Returns + ------- + t: tuple + if outtype is 0: + only return indices from ra0, dec0 + elif outtype is 1: + return indices from ra0, dec0 and distances + elif outtype is 2: + return conditional vector and distance to all ra0, dec0 + """ + @elementwise + def getDist( pk ): + """ get spherical distance between 2 points """ + return AstroHelpers.sphdist(pk[0], pk[1], ra, dec) + + dist = np.array(list(getDist(zip(ra0, dec0)))) + v = (dist <= r) + + if outtype == 0: + return np.ravel(np.where(v)) + elif outtype == 1: + return np.ravel(np.where(v)), dist[v] + else: + return v, dist
+ + +# ============================================================================== +# SimpleTable -- provides table manipulations with limited storage formats +# ============================================================================== +
[docs]class SimpleTable(object): + """ Table class that is designed to be the basis of any format wrapping + around numpy recarrays + + Attributes + ---------- + + fname: str or object + if str, the file to read from. This may be limited to the format + currently handled automatically. If the format is not correctly handled, + you can try by providing an object.__ + + if object with a structure like dict, ndarray, or recarray-like + the data will be encapsulated into a Table + + caseless: bool + if set, column names will be caseless during operations + + aliases: dict + set of column aliases (can be defined later :func:`set_alias`) + + units: dict + set of column units (can be defined later :func:`set_unit`) + + desc: dict + set of column description or comments (can be defined later :func:`set_comment`) + + header: dict + key, value pair corresponding to the attributes of the table + """ + + def __init__(self, fname, *args, **kwargs): + + dtype = kwargs.pop('dtype', None) + self.caseless = kwargs.get('caseless', False) + self._aliases = kwargs.get('aliases', {}) + self._units = kwargs.get('units', {}) + self._desc = kwargs.get('desc', {}) + + if (isinstance(fname, (dict, tuple, list, types.GeneratorType))) or (dtype in [dict, 'dict']): + try: + self.header = fname.pop('header', {}) + except (AttributeError, TypeError): + self.header = kwargs.pop('header', {}) + self.data = _convert_dict_to_structured_ndarray(fname) + elif (type(fname) in (str,)) or (dtype is not None): + if (type(fname) in (str,)): + extension = fname.split('.')[-1] + else: + extension = None + if (extension == 'csv') or dtype == 'csv': + kwargs.setdefault('delimiter', ',') + commentedHeader = kwargs.pop('commentedHeader', False) + n, header, units, comments, aliases, names = _ascii_read_header(fname, commentedHeader=commentedHeader, **kwargs) + kwargs.setdefault('names', names) + if _pd is not None: # pandas is faster + kwargs.setdefault('comment', '#') + kwargs.setdefault('as_recarray', True) + kwargs.setdefault('skiprows', n) + self.data = _pd.read_csv(fname, *args, **kwargs) + else: + kwargs.setdefault('skip_header', n) + kwargs.setdefault('comments', '#') + self.data = np.recfromcsv(fname, *args, **kwargs) + self.header = header + self._units.update(**units) + self._desc.update(**comments) + self._aliases.update(**aliases) + kwargs.setdefault('names', True) + elif (extension in ('tsv', 'dat', 'txt')) or dtype in ('tsv', 'dat', 'txt'): + commentedHeader = kwargs.pop('commentedHeader', True) + n, header, units, comments, aliases, names = _ascii_read_header(fname, commentedHeader=commentedHeader, **kwargs) + kwargs.setdefault('names', names) + if _pd is not None: # pandas is faster + kwargs.setdefault('delimiter', '\s+') + kwargs.setdefault('comment', '#') + kwargs.setdefault('as_recarray', True) + self.data = _pd.read_csv(fname, *args, **kwargs) + else: + kwargs.setdefault('delimiter', None) + kwargs.setdefault('comments', '#') + kwargs.setdefault('skip_header', n) + self.data = np.recfromtxt(fname, *args, **kwargs) + self.header = header + self._units.update(**units) + self._desc.update(**comments) + self._aliases.update(**aliases) + elif (extension == 'fits') or dtype == 'fits': + if pyfits is None: + raise RuntimeError('Cannot read this format, Astropy or pyfits not found') + if ('extname' not in kwargs) and ('ext' not in kwargs) and (len(args) == 0): + args = (1, ) + self.data = np.array(pyfits.getdata(fname, *args, **kwargs)) + header, aliases, units, comments = _fits_read_header(pyfits.getheader(fname, *args, **kwargs)) + self.header = header + self._desc.update(**comments) + self._units.update(**units) + self._aliases.update(**aliases) + elif (extension in ('hdf5', 'hd5', 'hdf')) or dtype in (extension in ('hdf5', 'hd5', 'hdf')): + if tables is None: + raise RuntimeError('Cannot read this format, pytables not found') + hdr, aliases, units, desc, data = _hdf5_read_data(fname, *args, **kwargs) + self.data = data + self.header = hdr + self._units.update(**units) + self._desc.update(**desc) + self._aliases.update(**aliases) + else: + raise Exception('Format {0:s} not handled'.format(extension)) + elif type(fname) == np.ndarray: + self.data = fname + self.header = {} + elif type(fname) == pyfits.FITS_rec: + self.data = np.array(fname) + self.header = {} + elif type(fname) == SimpleTable: + cp = kwargs.pop('copy', True) + if cp: + self.data = deepcopy(fname.data) + self.header = deepcopy(fname.header) + self._aliases = deepcopy(fname._aliases) + self._units = deepcopy(fname._units) + self._desc = deepcopy(fname._desc) + else: + self.data = fname.data + self.header = fname.header + self._aliases = fname._aliases + self._units = fname._units + self._desc = fname._desc + elif hasattr(fname, 'dtype'): + self.data = np.array(fname) + self.header = {} + else: + raise Exception('Type {0!s:s} not handled'.format(type(fname))) + if 'NAME' not in self.header: + if type(fname) not in basestring: + self.header['NAME'] = 'No Name' + else: + self.header['NAME'] = fname + +
[docs] def pprint_entry(self, num, keys=None): + """ print one line with key and values properly to be readable + + Parameters + ---------- + num: int, slice + indice selection + + keys: sequence or str + if str, can be a regular expression + if sequence, the sequence of keys to print + """ + if (keys is None) or (keys == '*'): + _keys = self.keys() + elif type(keys) in basestring: + _keys = [k for k in (self.keys() + tuple(self._aliases.keys())) + if (re.match(keys, k) is not None)] + else: + _keys = keys + + length = max(map(len, _keys)) + fmt = '{{0:{0:d}s}}: {{1}}'.format(length) + data = self[num] + + for k in _keys: + print(fmt.format(k, data[self.resolve_alias(k)]))
+ +
[docs] def pprint(self, idx=None, fields=None, ret=False, all=False, + full_match=False, headerChar='-', delim=' | ', endline='\n', + **kwargs): + """ Pretty print the table content + you can select the table parts to display using idx to + select the rows and fields to only display some columns + (ret is only for insternal use) + + Parameters + ---------- + + idx: sequence, slide + sub selection to print + + fields: str, sequence + if str can be a regular expression, and/or list of fields separated + by spaces or commas + + ret: bool + if set return the string representation instead of printing the result + + all: bool + if set, force to show all rows + + headerChar: char + Character to be used for the row separator line + + delim: char + The column delimiter. + """ + if full_match is True: + fn = re.fullmatch + else: + fn = re.match + + if (fields is None) or (fields == '*'): + _keys = self.keys() + elif type(fields) in basestring: + if ',' in fields: + _fields = fields.split(',') + elif ' ' in fields: + _fields = fields.split() + else: + _fields = [fields] + lbls = self.keys() + tuple(self._aliases.keys()) + _keys = [] + for _fk in _fields: + _keys += [k for k in lbls if (fn(_fk, k) is not None)] + else: + lbls = self.keys() + tuple(self._aliases.keys()) + _keys = [] + for _fk in _fields: + _keys += [k for k in lbls if (fn(_fk, k) is not None)] + + nfields = len(_keys) + + fields = list(map( self.resolve_alias, _keys )) + + if idx is None: + if (self.nrows < 10) or all: + rows = [ [ str(self[k][rk]) for k in _keys ] for rk in range(self.nrows)] + else: + _idx = range(6) + rows = [ [ str(self[k][rk]) for k in _keys ] for rk in range(5) ] + if nfields > 1: + rows += [ ['...' for k in range(nfields) ] ] + else: + rows += [ ['...' for k in range(nfields) ] ] + rows += [ [ str(self[k][rk]) for k in fields ] for rk in range(-5, 0)] + elif isinstance(idx, slice): + _idx = range(idx.start, idx.stop, idx.step or 1) + rows = [ [ str(self[k][rk]) for k in fields ] for rk in _idx] + else: + rows = [ [ str(self[k][rk]) for k in fields ] for rk in idx] + + if len(self._units) == 0: + units = None + else: + units = [ '(' + str( self._units.get(k, None) or '') + ')' for k in fields ] + + out = __indent__(rows, header=_keys, units=units, delim=delim, + headerChar=headerChar, endline=endline) + if ret is True: + return out + else: + print(out)
+ +
[docs] def write(self, fname, **kwargs): + """ write table into file + + Parameters + ---------- + fname: str + filename to export the table into + + .. note:: + additional keywords are forwarded to the corresponding libraries + :func:`pyfits.writeto` or :func:`pyfits.append` + :func:`np.savetxt` + """ + extension = kwargs.pop('extension', None) + if extension is None: + extension = fname.split('.')[-1] + if (extension == 'csv'): + comments = kwargs.pop('comments', '#') + delimiter = kwargs.pop('delimiter', ',') + commentedHeader = kwargs.pop('commentedHeader', False) + hdr = _ascii_generate_header(self, comments=comments, delimiter=delimiter, + commentedHeader=commentedHeader) + header = kwargs.pop('header', hdr) + np.savetxt(fname, self.data, delimiter=delimiter, header=header, + comments='', **kwargs) + elif (extension in ['txt', 'dat']): + comments = kwargs.pop('comments', '#') + delimiter = kwargs.pop('delimiter', ' ') + commentedHeader = kwargs.pop('commentedHeader', True) + hdr = _ascii_generate_header(self, comments=comments, delimiter=delimiter, + commentedHeader=commentedHeader) + header = kwargs.pop('header', hdr) + np.savetxt(fname, self.data, delimiter=delimiter, header=header, + comments='', **kwargs) + elif (extension == 'fits'): + hdr0 = kwargs.pop('header', None) + append = kwargs.pop('append', False) + hdr = _fits_generate_header(self) + if hdr0 is not None: + hdr.update(**hdr0) + if append: + _fits_append(fname, self.data, hdr, **kwargs) + else: + # patched version to correctly include the header + _fits_writeto(fname, self.data, hdr, **kwargs) + elif (extension in ('hdf', 'hdf5', 'hd5')): + _hdf5_write_data(fname, self.data, header=self.header, + units=self._units, comments=self._desc, + aliases=self._aliases, **kwargs) + else: + raise Exception('Format {0:s} not handled'.format(extension))
+ +
[docs] def set_alias(self, alias, colname): + """ + Define an alias to a column + + Parameters + ---------- + alias: str + The new alias of the column + + colname: str + The column being aliased + """ + if (colname not in self.keys()): + raise KeyError("Column {0:s} does not exist".format(colname)) + self._aliases[alias] = colname
+ +
[docs] def reverse_alias(self, colname): + """ + Return aliases of a given column. + + Given a colname, return a sequence of aliases associated to this column + Aliases are defined by using .define_alias() + """ + _colname = self.resolve_alias(colname) + if (_colname not in self.keys()): + raise KeyError("Column {0:s} does not exist".format(colname)) + + return tuple([ k for (k, v) in self._aliases.iteritems() if (v == _colname) ])
+ +
[docs] def resolve_alias(self, colname): + """ + Return the name of an aliased column. + + Given an alias, return the column name it aliases. This + function is a no-op if the alias is a column name itself. + + Aliases are defined by using .define_alias() + """ + # User aliases + if hasattr(colname, '__iter__') & (type(colname) not in basestring): + return [ self.resolve_alias(k) for k in colname ] + else: + if self.caseless is True: + maps = dict( [ (k.lower(), v) for k, v in self._aliases.items() ] ) + maps.update( (k.lower(), k) for k in self.keys() ) + return maps.get(colname.lower(), colname) + else: + return self._aliases.get(colname, colname)
+ +
[docs] def set_unit(self, colname, unit): + """ Set the unit of a column referenced by its name + + Parameters + ---------- + colname: str + column name or registered alias + + unit: str + unit description + """ + if isinstance(unit, basestring) and isinstance(colname, basestring): + self._units[self.resolve_alias(colname)] = str(unit) + else: + for k, v in zip(colname, unit): + self._units[self.resolve_alias(k)] = str(v)
+ +
[docs] def set_comment(self, colname, comment): + """ Set the comment of a column referenced by its name + + Parameters + ---------- + colname: str + column name or registered alias + + comment: str + column description + """ + if isinstance(comment, basestring) and isinstance(colname, basestring): + self._desc[self.resolve_alias(colname)] = str(comment) + else: + for k, v in zip(colname, comment): + self._desc[self.resolve_alias(k)] = str(v)
+ +
[docs] def keys(self, regexp=None, full_match=False): + """ + Return the data column names or a subset of it + + Parameters + ---------- + regexp: str + pattern to filter the keys with + + full_match: bool + if set, use :func:`re.fullmatch` instead of :func:`re.match` + + Try to apply the pattern at the start of the string, returning + a match object, or None if no match was found. + + returns + ------- + seq: sequence + sequence of keys + """ + if (regexp is None) or (regexp == '*'): + return self.colnames + elif type(regexp) in basestring: + if full_match is True: + fn = re.fullmatch + else: + fn = re.match + + if regexp.count(',') > 0: + _re = regexp.split(',') + elif regexp.count(' ') > 0: + _re = regexp.split() + else: + _re = [regexp] + + lbls = self.colnames + tuple(self._aliases.keys()) + _keys = [] + for _rk in _re: + _keys += [k for k in lbls if (fn(_rk, k) is not None)] + + return _keys + elif hasattr(regexp, '__iter__'): + _keys = [] + for k in regexp: + _keys += self.keys(k) + return _keys + else: + raise ValueError('Unexpected type {0} for regexp'.format(type(regexp)))
+ + @property + def name(self): + """ name of the table given by the Header['NAME'] attribute """ + return self.header.get('NAME', None) + + @property + def colnames(self): + """ Sequence of column names """ + return self.data.dtype.names + + @property + def ncols(self): + """ number of columns """ + return len(self.colnames) + + @property + def nrows(self): + """ number of lines """ + return len(self.data) + + @property + def nbytes(self): + """ number of bytes of the object """ + n = sum(k.nbytes if hasattr(k, 'nbytes') else sys.getsizeof(k) + for k in self.__dict__.values()) + return n + + def __len__(self): + """ number of lines """ + return self.nrows + + @property + def shape(self): + """ shape of the data """ + return self.data.shape + + @property + def dtype(self): + """ dtype of the data """ + return self.data.dtype + + def __getitem__(self, v): + return np.asarray(self.data.__getitem__(self.resolve_alias(v))) + +
[docs] def get(self, v, full_match=False): + """ returns a table from columns given as v + + this function is equivalent to :func:`__getitem__` but preserve the + Table format and associated properties (units, description, header) + + Parameters + ---------- + v: str + pattern to filter the keys with + + full_match: bool + if set, use :func:`re.fullmatch` instead of :func:`re.match` + + """ + new_keys = self.keys(v) + t = self.__class__(self[new_keys]) + t.header.update(**self.header) + t._aliases.update((k, v) for (k, v) in self._aliases.items() if v in new_keys) + t._units.update((k, v) for (k, v) in self._units.items() if v in new_keys) + t._desc.update((k, v) for (k, v) in self._desc.items() if v in new_keys) + return t
+ + def __setitem__(self, k, v): + if k in self: + return self.data.__setitem__(self.resolve_alias(k), v) + else: + object.__setitem__(self, k, v) + + def __getattr__(self, k): + try: + return self.data.__getitem__(self.resolve_alias(k)) + except: + return object.__getattribute__(self, k) + + def __iter__(self): + newtab = self.select('*', [0]) + for d in self.data: + newtab.data[0] = d + yield newtab + # return self.data.__iter__() + +
[docs] def iterkeys(self): + """ Iterator over the columns of the table """ + for k in self.colnames: + yield k
+ +
[docs] def itervalues(self): + """ Iterator over the lines of the table """ + for l in self.data: + yield l
+ +
[docs] def info(self): + s = "\nTable: {name:s}\n nrows={s.nrows:d}, ncols={s.ncols:d}, mem={size:s}" + s = s.format(name=self.header.get('NAME', 'Noname'), s=self, + size=pretty_size_print(self.nbytes)) + + s += '\n\nHeader:\n' + vals = list(self.header.items()) + length = max(map(len, self.header.keys())) + fmt = '\t{{0:{0:d}s}} {{1}}\n'.format(length) + for k, v in vals: + s += fmt.format(k, v) + + vals = [(k, self._units.get(k, ''), self._desc.get(k, '')) + for k in self.colnames] + lengths = [(len(k), len(self._units.get(k, '')), len(self._desc.get(k, ''))) + for k in self.colnames] + lengths = list(map(max, (zip(*lengths)))) + + s += '\nColumns:\n' + + fmt = '\t{{0:{0:d}s}} {{1:{1:d}s}} {{2:{2:d}s}}\n'.format(*(k + 1 for k in lengths)) + for k, u, c in vals: + s += fmt.format(k, u, c) + + print(s) + + if len(self._aliases) > 0: + print("\nTable contains alias(es):") + for k, v in self._aliases.items(): + print('\t{0:s} --> {1:s}'.format(k, v))
+ + def __repr__(self): + s = object.__repr__(self) + s += "\nTable: {name:s}\n nrows={s.nrows:d}, ncols={s.ncols:d}, mem={size:s}" + return s.format(name=self.header.get('NAME', 'Noname'), s=self, + size=pretty_size_print(self.nbytes)) + + def __getslice__(self, i, j): + return self.data.__getslice__(i, j) + + def __contains__(self, k): + return (k in self.colnames) or (k in self._aliases) + + def __array__(self): + return self.data + + def __call__(self, *args, **kwargs): + if (len(args) > 0) or (len(kwargs) > 0): + return self.evalexpr(*args, **kwargs) + else: + return self.info() + +
[docs] def sort(self, keys, copy=False): + """ + Sort the table inplace according to one or more keys. This operates on + the existing table (and does not return a new table). + + Parameters + ---------- + + keys: str or seq(str) + The key(s) to order by + + copy: bool + if set returns a sorted copy instead of working inplace + """ + if not hasattr(keys, '__iter__'): + keys = [keys] + + if copy is False: + self.data.sort(order=keys) + else: + t = self.__class__(self, copy=True) + t.sort(keys, copy=False) + return t
+ +
[docs] def match(self, r2, key): + """ Returns the indices at which the tables match + matching uses 2 columns that are compared in values + + Parameters + ---------- + r2: Table + second table to use + + key: str + fields used for comparison. + + Returns + ------- + indexes: tuple + tuple of both indices list where the two columns match. + """ + return np.where( np.equal.outer( self[key], r2[key] ) )
+ + ''' + def stack(self, r, defaults=None): + """ + Superposes arrays fields by fields inplace + + Parameters + ---------- + r: Table + """ + if not hasattr(r, 'data'): + raise AttributeError('r should be a Table object') + self.data = recfunctions.stack_arrays([self.data, r.data], defaults, + usemask=False, asrecarray=True) + ''' + +
[docs] def stack(self, r, *args, **kwargs): + """ + Superposes arrays fields by fields inplace + + t.stack(t1, t2, t3, default=None, inplace=True) + + Parameters + ---------- + r: Table + """ + if not hasattr(r, 'data'): + raise AttributeError('r should be a Table object') + defaults = kwargs.get('defaults', None) + inplace = kwargs.get('inplace', False) + + data = [self.data, r.data] + [k.data for k in args] + sdata = recfunctions.stack_arrays(data, defaults, usemask=False, + asrecarray=True) + + if inplace: + self.data = sdata + else: + t = self.__class__(self) + t.data = sdata + return t
+ +
[docs] def join_by(self, r2, key, jointype='inner', r1postfix='1', r2postfix='2', + defaults=None, asrecarray=False, asTable=True): + """ + Join arrays `r1` and `r2` on key `key`. + + The key should be either a string or a sequence of string corresponding + to the fields used to join the array. + An exception is raised if the `key` field cannot be found in the two input + arrays. + Neither `r1` nor `r2` should have any duplicates along `key`: the presence + of duplicates will make the output quite unreliable. Note that duplicates + are not looked for by the algorithm. + + Parameters + ---------- + key: str or seq(str) + corresponding to the fields used for comparison. + + r2: Table + Table to join with + + jointype: str in {'inner', 'outer', 'leftouter'} + * 'inner' : returns the elements common to both r1 and r2. + * 'outer' : returns the common elements as well as the elements of r1 not in r2 and the elements of not in r2. + * 'leftouter' : returns the common elements and the elements of r1 not in r2. + + r1postfix: str + String appended to the names of the fields of r1 that are present in r2 + + r2postfix: str + String appended to the names of the fields of r2 that are present in r1 + + defaults: dict + Dictionary mapping field names to the corresponding default values. + + Returns + ------- + tab: Table + joined table + + .. note:: + + * The output is sorted along the key. + + * A temporary array is formed by dropping the fields not in the key + for the two arrays and concatenating the result. This array is + then sorted, and the common entries selected. The output is + constructed by filling the fields with the selected entries. + Matching is not preserved if there are some duplicates... + """ + arr = recfunctions.join_by(key, self.data, r2.data, jointype=jointype, + r1postfix=r1postfix, r2postfix=r2postfix, + defaults=defaults, usemask=False, + asrecarray=True) + + return SimpleTable(arr)
+ + @property + def empty_row(self): + """ Return an empty row array respecting the table format """ + return np.rec.recarray(shape=(1,), dtype=self.data.dtype) + +
[docs] def add_column(self, name, data, dtype=None, unit=None, description=None): + """ + Add one or multiple columns to the table + + Parameters + ---------- + name: str or sequence(str) + The name(s) of the column(s) to add + + data: ndarray, or sequence of ndarray + The column data, or sequence of columns + + dtype: dtype + numpy dtype for the data to add + + unit: str + The unit of the values in the column + + description: str + A description of the content of the column + """ + + _data = np.array(data, dtype=dtype) + dtype = _data.dtype + + # unknown type is converted to text + if dtype.type == np.object_: + if len(data) == 0: + longest = 0 + else: + longest = len(max(data, key=len)) + _data = np.asarray(data, dtype='|%iS' % longest) + + dtype = _data.dtype + + if len(self.data.dtype) > 0: + # existing data in the table + self.data = recfunctions.append_fields(self.data, name, _data, + dtypes=dtype, usemask=False, + asrecarray=True) + else: + if _data.ndim > 1: + newdtype = (str(name), _data.dtype, (_data.shape[1],)) + else: + newdtype = (str(name), _data.dtype) + self.data = np.array(_data, dtype=[newdtype]) + + if unit is not None: + self.set_unit(name, unit) + + if description is not None: + self.set_unit(name, description)
+ +
[docs] def append_row(self, iterable): + """ + Append one row in this table. + + see also: :func:`stack` + + Parameters + ---------- + iterable: iterable + line to add + """ + if (len(iterable) != self.ncols): + raise AttributeError('Expecting as many items as columns') + r = self.empty_row + for k, v in enumerate(iterable): + r[0][k] = v + self.stack(r)
+ +
[docs] def remove_columns(self, names): + """ + Remove several columns from the table + + Parameters + ---------- + names: sequence + A list containing the names of the columns to remove + """ + self.pop_columns(names)
+ +
[docs] def pop_columns(self, names): + """ + Pop several columns from the table + + Parameters + ---------- + + names: sequence + A list containing the names of the columns to remove + + Returns + ------- + + values: tuple + list of columns + """ + + if not hasattr(names, '__iter__') or type(names) in basestring: + names = [names] + + p = [self[k] for k in names] + + _names = set([ self.resolve_alias(k) for k in names ]) + self.data = recfunctions.drop_fields(self.data, _names) + for k in names: + self._aliases.pop(k, None) + self._units.pop(k, None) + self._desc.pop(k, None) + + return p
+ +
[docs] def find_duplicate(self, index_only=False, values_only=False): + """Find duplication in the table entries, return a list of duplicated + elements Only works at this time is 2 lines are *the same entry* not if + 2 lines have *the same values* + """ + dup = [] + idd = [] + for i in range(len(self.data)): + if (self.data[i] in self.data[i + 1:]): + if (self.data[i] not in dup): + dup.append(self.data[i]) + idd.append(i) + if index_only: + return idd + elif values_only: + return dup + else: + return zip(idd, dup)
+ +
[docs] def evalexpr(self, expr, exprvars=None, dtype=float): + """ evaluate expression based on the data and external variables + all np function can be used (log, exp, pi...) + + Parameters + ---------- + expr: str + expression to evaluate on the table + includes mathematical operations and attribute names + + exprvars: dictionary, optional + A dictionary that replaces the local operands in current frame. + + dtype: dtype definition + dtype of the output array + + Returns + ------- + out : NumPy array + array of the result + """ + _globals = {} + for k in ( list(self.colnames) + list(self._aliases.keys()) ): + if k in expr: + _globals[k] = self[k] + + if exprvars is not None: + if (not (hasattr(exprvars, 'keys') & hasattr(exprvars, '__getitem__' ))): + raise AttributeError("Expecting a dictionary-like as condvars") + for k, v in ( exprvars.items() ): + _globals[k] = v + + # evaluate expression, to obtain the final filter + r = np.empty( self.nrows, dtype=dtype) + r[:] = eval(expr, _globals, np.__dict__) + + return r
+ +
[docs] def where(self, condition, condvars=None, *args, **kwargs): + """ Read table data fulfilling the given `condition`. + Only the rows fulfilling the `condition` are included in the result. + + Parameters + ---------- + condition: str + expression to evaluate on the table + includes mathematical operations and attribute names + + condvars: dictionary, optional + A dictionary that replaces the local operands in current frame. + + Returns + ------- + out: ndarray/ tuple of ndarrays + result equivalent to :func:`np.where` + + """ + ind = np.where(self.evalexpr(condition, condvars, dtype=bool ), *args, **kwargs) + return ind
+ +
[docs] def select(self, fields, indices=None, **kwargs): + """ + Select only a few fields in the table + + Parameters + ---------- + fields: str or sequence + fields to keep in the resulting table + + indices: sequence or slice + extract only on these indices + + returns + ------- + tab: SimpleTable instance + resulting table + """ + _fields = self.keys(fields) + + if fields == '*': + if indices is None: + return self + else: + tab = self.__class__(self[indices]) + for k in self.__dict__.keys(): + if k not in ('data', ): + setattr(tab, k, deepcopy(self.__dict__[k])) + return tab + else: + d = {} + for k in _fields: + _k = self.resolve_alias(k) + if indices is not None: + d[k] = self[_k][indices] + else: + d[k] = self[_k] + d['header'] = deepcopy(self.header) + tab = self.__class__(d) + for k in self.__dict__.keys(): + if k not in ('data', ): + setattr(tab, k, deepcopy(self.__dict__[k])) + return tab
+ +
[docs] def selectWhere(self, fields, condition, condvars=None, **kwargs): + """ Read table data fulfilling the given `condition`. + Only the rows fulfilling the `condition` are included in the result. + + Parameters + ---------- + fields: str or sequence + fields to keep in the resulting table + + condition: str + expression to evaluate on the table + includes mathematical operations and attribute names + + condvars: dictionary, optional + A dictionary that replaces the local operands in current frame. + + Returns + ------- + tab: SimpleTable instance + resulting table + """ + if condition in [True, 'True', None]: + ind = None + else: + ind = self.where(condition, condvars, **kwargs) + + tab = self.select(fields, indices=ind) + + return tab
+ +
[docs] def groupby(self, key): + """ + Create an iterator which returns (key, sub-table) grouped by each value + of key(value) + + Parameters + ---------- + key: str + expression or pattern to filter the keys with + + Returns + ------- + key: str or sequence + group key + + tab: SimpleTable instance + sub-table of the group + header, aliases and column metadata are preserved (linked to the + master table). + """ + _key = self.keys(key) + getter = operator.itemgetter(*_key) + + for k, grp in itertools.groupby(self.data, getter): + t = self.__class__(np.dstack(grp)) + t.header = self.header + t._aliases = self._aliases + t._units = self._units + t._desc = self._desc + yield (k, t)
+ +
[docs] def stats(self, fn=None, fields=None, fill=None): + """ Make statistics on columns of a table + + Paramters + --------- + fn: callable or sequence of callables + functions to apply to each column + default: (np.mean, np.std, np.nanmin, np.nanmax) + + fields: str or sequence + any key or key expression to subselect columns + default is all columns + + fill: value + value when not applicable + default np.nan + + returns + ------- + tab: Table instance + collection of statistics, one column per function in fn and 1 ligne + per column in the table + """ + from collections import OrderedDict + + fn = (stats.mean, stats.std, + stats.min, stats.max, + stats.has_nan) + + d = OrderedDict() + d.setdefault('FIELD', []) + for k in fn: + d.setdefault(k.__name__, []) + + if fields is None: + fields = self.colnames + else: + fields = self.keys(fields) + + if fill is None: + fill = np.nan + + for k in fields: + d['FIELD'].append(k) + for fnk in fn: + try: + val = fnk(self[k]) + except: + val = fill + d[fnk.__name__].append(val) + + return self.__class__(d, dtype=dict)
+ + # method aliases + remove_column = remove_columns + + # deprecated methods + addCol = add_column + addLine = append_row + setComment = set_comment + setUnit = set_unit + delCol = remove_columns
+ + +
[docs]class AstroTable(SimpleTable): + """ + Derived from the Table, this class add implementations of common astro + tools especially conesearch + """ + def __init__(self, *args, **kwargs): + super(self.__class__, self).__init__(*args, **kwargs) + self._ra_name, self._dec_name = self.__autoRADEC__() + if (len(args) > 0): + if isinstance(args[0], AstroTable): + self._ra_name = args[0]._ra_name + self._dec_name = args[0]._dec_name + self._ra_name = kwargs.get('ra_name', self._ra_name) + self._dec_name = kwargs.get('dec_name', self._dec_name) + + def __autoRADEC__(self): + """ Tries to identify the columns containing RA and DEC coordinates """ + if 'ra' in self: + ra_name = 'ra' + elif 'RA' in self: + ra_name = 'RA' + else: + ra_name = None + if 'dec' in self: + dec_name = 'dec' + elif 'DEC' in self: + dec_name = 'DEC' + else: + dec_name = None + return ra_name, dec_name + +
[docs] def set_RA(self, val): + """ Set the column that defines RA coordinates """ + assert(val in self), 'column name {} not found in the table'.format(val) + self._ra_name = val
+ +
[docs] def set_DEC(self, val): + """ Set the column that defines DEC coordinates """ + assert(val in self), 'column name {} not found in the table'.format(val) + self._dec_name = val
+ +
[docs] def get_RA(self, degree=True): + """ Returns RA, converted from hexa/sexa into degrees """ + if self._ra_name is None: + return None + if (not degree) or (self.dtype[self._ra_name].kind != 'S'): + return self[self._ra_name] + else: + if (len(str(self[0][self._ra_name]).split(':')) == 3): + return np.asarray(AstroHelpers.hms2deg(self[self._ra_name], + delim=':')) + elif (len(str(self[0][self._ra_name]).split(' ')) == 3): + return np.asarray(AstroHelpers.hms2deg(self[self._ra_name], + delim=' ')) + else: + raise Exception('RA Format not understood')
+ +
[docs] def get_DEC(self, degree=True): + """ Returns RA, converted from hexa/sexa into degrees """ + if self._dec_name is None: + return None + if (not degree) or (self.dtype[self._dec_name].kind != 'S'): + return self[self._dec_name] + else: + if (len(str(self[0][self._dec_name]).split(':')) == 3): + return np.asarray(AstroHelpers.dms2deg(self[self._dec_name], + delim=':')) + elif (len(str(self[0][self._dec_name]).split(' ')) == 3): + return np.asarray(AstroHelpers.dms2deg(self[self._dec_name], + delim=' ')) + else: + raise Exception('RA Format not understood')
+ +
[docs] def info(self): + s = "\nTable: {name:s}\n nrows={s.nrows:d}, ncols={s.ncols:d}, mem={size:s}" + s = s.format(name=self.header.get('NAME', 'Noname'), s=self, + size=pretty_size_print(self.nbytes)) + + s += '\n\nHeader:\n' + vals = list(self.header.items()) + length = max(map(len, self.header.keys())) + fmt = '\t{{0:{0:d}s}} {{1}}\n'.format(length) + for k, v in vals: + s += fmt.format(k, v) + + vals = [(k, self._units.get(k, ''), self._desc.get(k, '')) + for k in self.colnames] + lengths = [(len(k), len(self._units.get(k, '')), len(self._desc.get(k, ''))) + for k in self.colnames] + lengths = list(map(max, (zip(*lengths)))) + + if (self._ra_name is not None) & (self._dec_name is not None): + s += "\nPosition coordinate columns: {0}, {1}\n".format(self._ra_name, + self._dec_name) + + s += '\nColumns:\n' + + fmt = '\t{{0:{0:d}s}} {{1:{1:d}s}} {{2:{2:d}s}}\n'.format(*(k + 1 for k in lengths)) + for k, u, c in vals: + s += fmt.format(k, u, c) + + print(s) + + if len(self._aliases) > 0: + print("\nTable contains alias(es):") + for k, v in self._aliases.items(): + print('\t{0:s} --> {1:s}'.format(k, v))
+ +
[docs] def coneSearch(self, ra, dec, r, outtype=0): + """ Perform a cone search on a table + + Parameters + ---------- + ra0: ndarray[ndim=1, dtype=float] + column name to use as RA source in degrees + + dec0: ndarray[ndim=1, dtype=float] + column name to use as DEC source in degrees + + ra: float + ra to look for (in degree) + + dec: float + ra to look for (in degree) + + r: float + distance in degrees + + outtype: int + type of outputs + 0 -- minimal, indices of matching coordinates + 1 -- indices and distances of matching coordinates + 2 -- full, boolean filter and distances + + Returns + ------- + t: tuple + if outtype is 0: + only return indices from ra0, dec0 + elif outtype is 1: + return indices from ra0, dec0 and distances + elif outtype is 2: + return conditional vector and distance to all ra0, dec0 + """ + if (self._ra_name is None) or (self._dec_name is None): + raise AttributeError('Coordinate columns not set.') + + ra0 = self.get_RA() + dec0 = self.get_DEC() + return AstroHelpers.conesearch(ra0, dec0, ra, dec, r, outtype=outtype)
+ +
[docs] def zoneSearch(self, ramin, ramax, decmin, decmax, outtype=0): + """ Perform a zone search on a table, i.e., a rectangular selection + Parameters + ---------- + ramin: float + minimal value of RA + + ramax: float + maximal value of RA + + decmin: float + minimal value of DEC + + decmax: float + maximal value of DEC + + outtype: int + type of outputs + 0 or 1 -- minimal, indices of matching coordinates + 2 -- full, boolean filter and distances + + Returns + ------- + r: sequence + indices or conditional sequence of matching values + """ + + assert( (self._ra_name is not None) & (self._dec_name is not None) ), 'Coordinate columns not set.' + + ra0 = self.get_RA() + dec0 = self.get_DEC() + ind = (ra0 >= ramin) & (ra0 <= ramax) & (dec0 >= decmin) & (dec0 <= decmax) + if outtype <= 2: + return ind + else: + return np.where(ind)
+ +
[docs] def where(self, condition=None, condvars=None, cone=None, zone=None, **kwargs): + """ Read table data fulfilling the given `condition`. + Only the rows fulfilling the `condition` are included in the result. + + Parameters + ---------- + condition: str + expression to evaluate on the table + includes mathematical operations and attribute names + + condvars: dictionary, optional + A dictionary that replaces the local operands in current frame. + + Returns + ------- + out: ndarray/ tuple of ndarrays + result equivalent to :func:`np.where` + """ + if cone is not None: + if len(cone) != 3: + raise ValueError('Expecting cone keywords as a triplet (ra, dec, r)') + if zone is not None: + if len(zone) != 4: + raise ValueError('Expecting zone keywords as a tuple of 4 elements (ramin, ramax, decmin, decmax)') + + if condition is not None: + ind = super(self.__class__, self).where(condition, **kwargs) + if ind is None: + if (cone is None) & (zone is None): + return None + else: + ind = True + + blobs = [] + if (cone is not None): + ra, dec, r = cone + _ind, d = self.coneSearch(ra, dec, r, outtype=1) + ind = ind & _ind.astype(bool) + blobs.append(d) + if (zone is not None): + _ind = self.zoneSearch(zone[0], zone[1], zone[2], zone[3], outtype=1) + ind = ind & _ind + elif (cone is not None) and (zone is not None): # cone + zone + ra, dec, r = cone + ind, d = self.coneSearch(ra, dec, r, outtype=2) + ind = ind & self.zoneSearch(zone[0], zone[1], zone[2], zone[3], outtype=2) + d = d[ind] + ind = np.where(ind) + blobs.append(d) + + return ind, blobs[0]
+ +
[docs] def selectWhere(self, fields, condition=None, condvars=None, cone=None, zone=None, **kwargs): + """ Read table data fulfilling the given `condition`. + Only the rows fulfilling the `condition` are included in the result. + conesearch is also possible through the keyword cone formatted as (ra, dec, r) + zonesearch is also possible through the keyword zone formatted as (ramin, ramax, decmin, decmax) + + Combination of multiple selections is also available. + """ + ind, blobs = self.where(condition, condvars, cone, zone, **kwargs) + tab = self.select(fields, indices=ind) + + if cone is not None: + tab.add_column('separation', np.asarray(blobs), unit='degree') + + if self._ra_name in tab: + tab.set_RA(self._ra_name) + + if self._dec_name in tab: + tab.set_DEC(self._dec_name) + + return tab
+ + +
[docs]class stats(object): +
[docs] @classmethod + def has_nan(s, v): + return (True in np.isnan(v))
+ +
[docs] @classmethod + def mean(s, v): + return np.nanmean(v)
+ +
[docs] @classmethod + def max(s, v): + return np.nanmax(v)
+ +
[docs] @classmethod + def min(s, v): + return np.nanmin(v)
+ +
[docs] @classmethod + def std(s, v): + return np.nanstd(v)
+ +
[docs] @classmethod + def var(s, v): + return np.var(v)
+ +
[docs] @classmethod + def p16(s, v): + try: + return np.nanpercentile(v, 16) + except AttributeError: + return np.percentile(v, 16)
+ +
[docs] @classmethod + def p84(s, v): + try: + return np.nanpercentile(v, 84) + except AttributeError: + return np.percentile(v, 84)
+ +
[docs] @classmethod + def p50(s, v): + try: + return np.nanmedian(v) + except AttributeError: + return np.percentile(v, 50)
+ + +# ============================================================================= +# Adding some plotting functions +# ============================================================================= + +try: + import pylab as plt + + def plot_function(tab, fn, *args, **kwargs): + """ Generate a plotting method of tab from a given function + + Parameters + ---------- + tab: SimpleTable instance + table instance + + fn: str or callable + if str, will try a function in matplotlib + if callable, calls the function directly + + xname: str + expecting a column name from the table + + yname: str, optional + if provided, another column to use for the plot + + onlywhere: sequence or str, optional + if provided, selects only data with this condition + the condition can be a ndarray slice or a string. + When a string is given, the evaluation calls :func:`SimpleTable.where` + + ax: matplotlib.Axes instance + if provided make sure it uses the axis to do the plots if a mpl + function is used. + + Returns + ------- + r: object + anything returned by the called function + """ + if not hasattr(fn, '__call__'): + ax = kwargs.pop('ax', None) + if ax is None: + ax = plt.gca() + _fn = getattr(ax, fn, None) + if _fn is None: + raise AttributeError('function neither callable or found in matplotlib') + else: + _fn = fn + + onlywhere = kwargs.pop('onlywhere', None) + if type(onlywhere) in basestring: + select = tab.where(onlywhere) + else: + select = onlywhere + + _args = () + for a in args: + if (hasattr(a, '__iter__')): + try: + b = tab[a] + if select is not None: + b = b.compress(select) + if (len(b.dtype) > 1): + b = list((b[k] for k in b.dtype.names)) + _args += (b, ) + except Exception as e: + print(e) + _args += (a, ) + else: + _args += (a, ) + + return _fn(*_args, **kwargs) + + def attached_function(fn, doc=None, errorlevel=0): + """ eclare a function as a method to the class table""" + + def _fn(self, *args, **kwargs): + try: + return plot_function(self, fn, *args, **kwargs) + except Exception as e: + if errorlevel < 1: + pass + else: + raise e + + if doc is not None: + _fn.__doc__ = doc + + return _fn + + SimpleTable.plot_function = plot_function + SimpleTable.plot = attached_function('plot', plt.plot.__doc__) + SimpleTable.hist = attached_function('hist', plt.hist.__doc__) + SimpleTable.hist2d = attached_function('hist2d', plt.hist2d.__doc__) + SimpleTable.hexbin = attached_function('hexbin', plt.hexbin.__doc__) + SimpleTable.scatter = attached_function('scatter', plt.scatter.__doc__) + + # newer version of matplotlib + if hasattr(plt, 'violinplot'): + SimpleTable.violinplot = attached_function('violinplot', plt.violinplot.__doc__) + if hasattr(plt, 'boxplot'): + SimpleTable.boxplot = attached_function('boxplot', plt.boxplot.__doc__) + +except Exception as e: + print(e) +
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/stellib.html b/_modules/pystellibs/stellib.html new file mode 100644 index 0000000..4840949 --- /dev/null +++ b/_modules/pystellibs/stellib.html @@ -0,0 +1,973 @@ + + + + + + + + pystellibs.stellib — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.stellib

+"""
+Stellar library class
+
+Intent to implement a generic module to manage stellar library from various
+sources.
+
+The interpolation is implemented from the pegase.2 fortran converted algorithm.
+(this may not be super pythonic though)
+
+.. note::
+
+    a cython version is available for speed up and should be used transparently when available
+    (run make once)
+"""
+import numpy as np
+from scipy.interpolate import interp1d
+from itertools import groupby
+
+from .ezunits import unit, hasUnit
+from .helpers import nbytes, isNestedInstance
+from .future import Path
+from .interpolator import NDLinearInterpolator, find_interpolator
+
+
+lsun = 3.839e+26   # in W (Watts)
+sig_stefan = 5.67037321 * 1e-8  # W * m**-2 * K**-4
+rsun = 6.955e8  # in meters
+_default_interpolator = find_interpolator('lejeune')
+
+
+def _drop_units(q):
+    """ Drop the unit definition silently """
+    try:
+        return q.magnitude
+    except:
+        return q
+
+
+
[docs]class Stellib(object): + """ Basic stellar library class + + Attributes + ---------- + interpolator: interpolator.BaseInterpolator + interpolator to use, default LeujeuneInterpolator + """ + def __init__(self, *args, **kwargs): + """ Contructor """ + self.interpolator = find_interpolator( + kwargs.pop('interpolator', None), + osl=self) + if self.interpolator is None: + self.interpolator = _default_interpolator(self) + if not hasattr(self, 'wavelength_unit'): + self.wavelength_unit = None + self._dlogT = 0.5 + self._dlogg = 0.5 + +
[docs] def set_default_extrapolation_bounds(self, dlogT=None, dlogg=None): + if dlogT is not None: + self._dlogT = dlogT + if dlogg is not None: + self._dlogg = dlogg
+ +
[docs] def get_interpolation_data(self): + """ Default interpolation """ + return np.array([self.logT, self.logg, self.logZ]).T
+ + @property + def wavelength(self): + l = np.copy(self._wavelength) + if self.wavelength_unit is not None: + return l * unit[self.wavelength_unit] + else: + return l + + @property + def flux_units(self): + if self.wavelength_unit is not None: + return unit['erg/s/' + self.wavelength_unit] + else: + return 1. + + def _load_(self): + """ Load the library """ + raise NotImplementedError + + @property + def nbytes(self): + """ return the number of bytes of the object """ + return nbytes(self) + +
[docs] def plot_boundary(self, ax=None, dlogT=0., dlogg=0., **kwargs): + """ + Parameters + ---------- + + dlogT: float + margin in logT (see get_boundaries) + + dlogg: float + margin in logg (see get_boundaries) + + .. see also:: + :func:`matplotlib.plot` + For additional kwargs + """ + import matplotlib.patches as patches + from pylab import gca + if ax is None: + ax = gca() + p = self.get_boundaries(dlogT=dlogT, dlogg=dlogg) + ax.add_patch(patches.PathPatch(p, **kwargs)) + return p
+ + def __add__(self, other): + if not isNestedInstance(other, Stellib): + raise ValueError('expecting a Stellib object, got {0}'.format(type(other))) + + return CompositeStellib([self, other]) + + def __repr__(self): + return "{0:s}, ({1:s})\n{2:s}".format(self.name, nbytes(self, pprint=True), + object.__repr__(self)) + +
[docs] def get_weights(self, logT, logg, logL, weights=None): + """ Returns the proper weights for the interpolation + + in spectra libraries the default is to have Lbol=1 normalization + + Parameters + ---------- + logT: float or ndarray + log-temperatures log(T/K) + logg: float or ndarray + log-gravity log(g) + logL: float or ndarray + bolometric luminosity (log (L/Lsun)) + """ + # weights to apply during the interpolation (note that radii must be in cm) + # Stellar library models are given in cm^-2 ( 4 pi R) + # Compute radii of each point using log(T) and log(L) + L = 10 ** logL * unit['lsun'].to("ergs/s").magnitude + if weights is not None: + weights *= L + else: + weights = L + return weights
+ +
[docs] def generate_stellar_spectrum(self, logT, logg, logL, Z, + raise_extrapolation=True, **kwargs): + """ Generates individual spectrum for the given stars APs and the + stellar library + + Returns NaN spectra if the boundary conditions are not met (no extrapolation) + + Parameters + ---------- + logT: float + temperature + + logg: float + log-gravity + + logL: float + log-luminosity + + Z: float + metallicity + + raise_extrapolation: bool + if set throw error on extrapolation + + null: value + value of the flux when extrapolation and raise_extrapolation is not set + + returns + ------- + s0: ndarray, shape=(len(stars), len(l0)) + array of spectra, one per input star + Spectrum in ergs/s/AA or ergs/s/AA/Lsun + """ + null_value = kwargs.pop('null', np.nan) + + # weights to apply during the interpolation (note that radii must be in cm) + weights = self.get_weights(logT, logg, logL) + logZ = np.log10(Z) + + l0 = self.wavelength + + # check boundary conditions, keep the data but do not compute the sed + # if outside + if not self.points_inside(np.atleast_2d([logT, logg]))[0]: + if raise_extrapolation: + raise RuntimeError('Outside library interpolation range') + else: + return l0, np.full(len(self.wavelength), null_value) + + aps = logT, logg, logZ + spec = self.interpolator.interp(aps) * weights + + return spec
+ + +
[docs] def generate_individual_values(self, stars, values, **kwargs): + """ Generates individual spectra for the given stars and stellar library + + Returns NaN spectra if the boundary conditions are not met (no extrapolation) + + Parameters + ---------- + stars: Table + contains at least (logT, logg, logL, Z) of the considered stars + + values: sequence or attribute name + value to interpolate + + dlogT: float + margin in logT + + dlogg: float + margin in logg + + returns + ------- + values: sequence + value to interpolate + """ + _values = np.atleast_1d(getattr(self, values, values)) + + null_value = kwargs.pop('null', np.nan) + dlogT = kwargs.pop('dlogT', self._dlogT) + dlogg = kwargs.pop('dlogg', self._dlogg) + ndata = len(stars) + logT, logg, logL, Z = stars['logT'], stars['logg'], stars['logL'], stars['Z'] + + # weights to apply during the interpolation (note that radii must be in cm) + weights = self.get_weights(logT, logg, logL) + + # check boundary conditions, keep the data but do not compute the sed + # if outside + bound = self.points_inside(np.array([logT, logg]).T, + dlogT=dlogT, dlogg=dlogg) + if np.ndim(_values) == 1: + specs = np.empty(ndata, dtype=float) + else: + specs = np.empty((ndata, _values.shape[1]), dtype=float) + + specs[~bound] = null_value + logZ = np.log10(Z) + aps = np.array([logT, logg, logZ]).T + s = self.interpolator.interp_other(aps[bound], _values) * weights[bound] + specs[bound] = np.squeeze(s) + + return specs
+ +
[docs] def generate_individual_spectra(self, stars, **kwargs): + """ Generates individual spectra for the given stars and stellar library + + Returns NaN spectra if the boundary conditions are not met (no extrapolation) + + Parameters + ---------- + stars: Table + contains at least (logT, logg, logL, Z) of the considered stars + + dlogT: float + margin in logT + + dlogg: float + margin in logg + + returns + ------- + l0: ndarray, ndim=1 + wavelength definition of the spectra + wavelength in AA + + s0: ndarray, shape=(len(stars), len(l0)) + array of spectra, one per input star + Spectrum in ergs/s/AA or lsun/AA + """ + null_value = kwargs.pop('null', np.nan) + dlogT = kwargs.pop('dlogT', self._dlogT) + dlogg = kwargs.pop('dlogg', self._dlogg) + logT, logg, logL, Z = stars['logT'], stars['logg'], stars['logL'], stars['Z'] + ndata = len(logT) + + # weights to apply during the interpolation (note that radii must be in cm) + weights = self.get_weights(logT, logg, logL) + + # check boundary conditions, keep the data but do not compute the sed + # if outside + bound = self.points_inside(np.array([logT, logg]).T, + dlogT=dlogT, dlogg=dlogg) + specs = np.empty((ndata, len(self._wavelength)), dtype=float) + specs[~bound] = np.full(len(self.wavelength), null_value) + + logZ = np.log10(Z) + aps = np.array([logT, logg, logZ]).T + s = self.interpolator.interp(aps[bound]) * weights[bound, None] + specs[bound] = s + + l0 = self.wavelength + specs = specs * self.flux_units + + return l0, specs
+ +
[docs] def points_inside(self, xypoints, dlogT=0.1, dlogg=0.5): + """ + Returns if a point is inside the polygon defined by the boundary of the library + + Parameters + ---------- + xypoints: sequence + a sequence of N logg, logT pairs. + + dlogT: float + margin in logT + + dlogg: float + margin in logg + + returns + ------- + r: ndarray(dtype=bool) + a boolean ndarray, True for points inside the polygon. + A point on the boundary may be treated as inside or outside. + """ + p = self.get_boundaries(dlogT=dlogT, dlogg=dlogg) + return p.contains_points(xypoints)
+ +
[docs] def get_radius(self, logl, logt): + """ Returns the radius of a star given its luminosity and temperature + + Assuming a black body, it comes: + + .. math:: + + R ^ 2 = L / ( 4 \pi \sigma T ^ 4 ), + + with: + + * L, luminosity in W, + * pi, 3.141592... + * sig, Stefan constant in W * m**-2 * K**-4 + * T, temperature in K + + Parameters + ---------- + logl: ndarray[float, ndim=1] + log luminosities from the isochrones, in Lsun + + logt: ndarray[float, ndim=1] + log temperatures from the isochrones, in K + + returns + ------- + radii: ndarray[float, ndim=1] + array of radii in m (SI units) + """ + return np.sqrt( (10 ** logl) * lsun / (4.0 * np.pi * sig_stefan * ((10 ** logt) ** 4)) )
+ +
[docs] def get_boundaries(self, dlogT=0.1, dlogg=0.3, **kwargs): + """ Returns the closed boundary polygon around the stellar library with + given margins + + Parameters + ---------- + s: Stellib + Stellar library object + + dlogT: float + margin in logT + + dlogg: float + margin in logg + + returns + ------- + b: ndarray[float, ndim=2] + closed boundary edge points: [logT, logg] + + .. note:: + + as computing the boundary could take time, it is saved in the object + and only recomputed when parameters are updated + """ + # if bbox is defined then assumes it is more precise and use it instead. + if dlogT is None: + dlogT = 0.1 + if dlogg is None: + dlogg = 0.3 + if hasattr(self, 'bbox'): + return Path(self.bbox(dlogT, dlogg)) + + if getattr(self, '_bound', None) is not None: + # check if recomputing is needed + if ((self._bound[1] - dlogT) < 1e-3) and (abs(self._bound[2] - dlogg) < 1e-3): + return self._bound[0] + + leftb = [(np.max(self.logT[self.logg == k]) + dlogT, k ) for k in np.unique(self.logg)] + leftb += [(leftb[-1][1], leftb[-1][0] + dlogg)] + leftb = [(leftb[0][1], leftb[0][0] - dlogg)] + leftb + + rightb = [(np.min(self.logT[self.logg == k]) - dlogT, k) for k in np.unique(self.logg)[::-1]] + rightb += [(rightb[-1][1], rightb[-1][0] - dlogg)] + rightb = [(rightb[0][1], rightb[0][0] + dlogg)] + rightb + + b = leftb + rightb + b += [b[0]] + + self._bound = (Path(np.array(b)), dlogT, dlogg) + return self._bound[0]
+ + +
[docs]class AtmosphereLib(Stellib): + """ + Almost identical to a spectral library. The difference lies into the units + of the input libraries. + """ +
[docs] def get_weights(self, logT, logg, logL, weights=None): + """ Returns the proper weights for the interpolation + Stellar atmospheres are normalized to Radius = 1 + + Parameters + ---------- + logT: float or ndarray + log-temperatures log(T/K) + logg: float or ndarray + log-gravity log(g) + logL: float or ndarray + bolometric luminosity (log (L/Lsun)) + """ + # weights to apply during the interpolation (note that radii must be in cm) + # Stellar library models are given in cm^-2 ( 4 pi R) + # Compute radii of each point using log(T) and log(L) + radii = self.get_radius(logL, logT) + if weights is not None: + weights *= 4. * np.pi * (radii * 1e2) ** 2 + else: + weights = 4. * np.pi * (radii * 1e2) ** 2 + return weights
+ + +
[docs]class CompositeStellib(Stellib): + """ Generates an object from the union of multiple individual libraries """ + def __init__(self, osllist, *args, **kwargs): + self._olist = osllist + self._dlogT = 0.5 + self._dlogg = 0.5 + + @property + def name(self): + return ' + '.join([sl.name for sl in self._olist]) + +
[docs] def set_default_extrapolation_bounds(self, dlogT=None, dlogg=None): + if dlogT is not None: + self._dlogT = dlogT + if dlogg is not None: + self._dlogg = dlogg + for oslk in self._olist: + oslk.set_default_extrapolation_bounds(dlogT, dlogg)
+ + def __add__(self, other): + """ Adding a library after """ + if not isNestedInstance(other, Stellib): + raise ValueError('expecting a Stellib object, got {0}'.format(type(other))) + + lst = [k for k in self._olist] + [other] + return CompositeStellib(lst) + + def __radd__(self, other): + """ Adding a library before """ + if not isNestedInstance(other, Stellib): + raise ValueError('expecting a Stellib object, got {0}'.format(type(other))) + + lst = [other] + [k for k in self._olist] + return CompositeStellib(lst) + + @property + def wavelength(self): + """ return a common wavelength sampling to all libraries. This can be + used to reinterpolate any spectrum onto a common definition """ + # check units + has_units = [hasUnit(osl.wavelength) for osl in self._olist] + test = sum(has_units) + if (test == 0): + return np.unique(np.asarray([ osl._wavelength for osl in self._olist ])) + + # which library sets the units + common_unit = self._olist[0].wavelength_unit + libset_unit = self._olist[0].name + # if some libraries do not have units... Should not happen often! + if (test < len(self._olist)): + for k, osl in enumerate(self._olist): + common_unit = osl.wavelength_unit + libset_unit = osl.name + if common_unit is not None: + break + print("Warning: Some libraries do not have units. Assuming consistency with {0:s}".format(libset_unit)) + + wave = [] + for osl in self._olist: + wave.append(osl.wavelength.to(common_unit).magnitude) + return np.unique(np.array(wave)) * unit[common_unit] + + @property + def _wavelength(self): + return _drop_units(self.wavelength) + + @property + def source(self): + return ' + '.join([k.name for k in self._olist]) + + @property + def logT(self): + return np.hstack([osl.logT for osl in self._olist]) + + @property + def logg(self): + return np.hstack([osl.logg for osl in self._olist]) + + @property + def Teff(self): + return np.hstack([osl.Teff for osl in self._olist]) + + @property + def Z(self): + return np.hstack([osl.Z for osl in self._olist]) + + @property + def logZ(self): + return np.hstack([osl.logZ for osl in self._olist]) + + @property + def flux_units(self): + return self._olist[0].flux_units + +
[docs] def which_osl(self, xypoints, **kwargs): + """ + Returns the library indice that contains each point in xypoints + + The decision is made from a two step search: + + * first, each point is checked against the strict boundary of each + library (i.e., dlogT = 0, dlogg = 0). + * second, if points are not found in strict mode, the boundary is + relaxed and a new search is made. + + Each point is associated to the first library matching the above conditions. + + Parameters + ---------- + xypoints: sequence + a sequence of N logg, logT pairs. + + dlogT: float + margin in logT + + dlogg: float + margin in logg + + returns + ------- + res: ndarray(dtype=int) + a ndarray, 0 meaning no library covers the point, and 1, ... n, for the n-th library + """ + dlogT = kwargs.pop('dlogT', self._dlogT) + dlogg = kwargs.pop('dlogg', self._dlogg) + + xy = np.atleast_2d(xypoints) + + # check that all points are in the full boundary area + # MF: testing why points_inside does not agree on all computers... + # as we do not keep individual results, no need to store then all + # first, collapse directly + + # res_temp = np.zeros((len(xy),len(self._olist))) + # for ek,ok in enumerate(self._olist): + # res_temp[:, ek] = ok.points_inside(xy, dlogT=dlogT, dlogg=dlogg).astype(int) + res_temp = np.zeros(len(xy), dtype=int) + for ek, ok in enumerate(self._olist): + res_temp += ok.points_inside(xy, dlogT=dlogT, dlogg=dlogg).astype(int) + + ind = res_temp > 0 + res = np.zeros(len(xy), dtype=int) + res[ind] = 1 + res = res - 1 + + # res = self.points_inside(xy, dlogT=dlogT, dlogg=dlogg).astype(int) - 1 + # if res == -1: invalid point, res == 0: proceed + + if max(res) < 0: + # DEBUG: should generate an exeception in further functions + # TODO: get rid and replace + return res + # return res + + # Strict mode + # =========== + # Not extrapolation allowed >> dlogT = 0, dlogg = 0 + # 0 is used to flag points without a matching library yet + # libraries are then indexed from 1 to n + # -1 means point outside the compound library + for ek, ok in enumerate(self._olist): + if 0 in res: + ind = np.atleast_1d(np.squeeze(np.where(res == 0))) + r = ok.points_inside(xy[ind], dlogT=0., dlogg=0.) + res[ind[r]] = ek + 1 + + # Relaxed mode + # ============ + # In this case we accept some flexibility in the boundary limits, + # which allows limited extrapolation ranges. + # this only affects points not already matched + if 0 in res: + for ek, ok in enumerate(self._olist): + if 0 in res: + ind = np.atleast_1d(np.squeeze(np.where(res == 0))) + r = ok.points_inside(xy[ind], dlogT=dlogT, dlogg=dlogg) + res[ind[r]] = ek + 1 + return res
+ + def __repr__(self): + return "CompositeStellib, {0}\n{1}".format(object.__repr__(self), '\n'.join([k.name for k in self._olist])) + +
[docs] def get_boundaries(self, **kwargs): + """ Returns the closed boundary polygon around the stellar library with + given margins + + Parameters + ---------- + s: Stellib + Stellar library object + + dlogT: float + margin in logT + + dlogg: float + margin in logg + + returns + ------- + b: ndarray[float, ndim=2] + (closed) boundary points: [logg, Teff] (or [Teff, logg] is swap is True) + + .. note:: + + as computing the boundary could take time, it is saved in the object + and only recomputed when parameters are updated + """ + dlogT = kwargs.pop('dlogT', self._dlogT) + dlogg = kwargs.pop('dlogg', self._dlogg) + + if getattr(self, '_bound', None) is not None: + if ((self._bound[1] - dlogT) < 1e-3) and (abs(self._bound[2] - dlogg) < 1e-3): + return self._bound[0] + + b = [osl.get_boundaries(dlogT=dlogT, dlogg=dlogg, **kwargs) for osl in self._olist] + self._bound = (Path.make_compound_path(*b), dlogT, dlogg) + return self._bound[0]
+ +
[docs] def generate_stellar_spectrum(self, logT, logg, logL, Z, + raise_extrapolation=True, **kwargs): + """ Generates individual spectrum for the given stars APs and the + stellar library + + Returns NaN spectra if the boundary conditions are not met (no extrapolation) + + Parameters + ---------- + logT: float + temperature + + logg: float + log-gravity + + logL: float + log-luminosity + + Z: float + metallicity + + raise_extrapolation: bool + if set throw error on extrapolation + + null: value + value of the flux when extrapolation and raise_extrapolation is not set + + returns + ------- + s0: ndarray, shape=(len(stars), len(l0)) + array of spectra, one per input star + Spectrum in ergs/s/AA or ergs/s/AA/Lsun + """ + try: + bounds = kwargs.pop('bounds', None) + dlogT = bounds.get('dlogT', self._dlogT) + dlogg = bounds.get('dlogg', self._dlogg) + except: + dlogT = None + dlogg = None + + osl_index = self.which_osl(np.atleast_2d([logT, logg]), dlogT=dlogT, dlogg=dlogg)[0] + osl = self._olist[osl_index - 1] + specs = osl.generate_stellar_spectrum(logT, logg, logL, Z, + raise_extrapolation, + **kwargs) + specs = self.reinterpolate_spectra(osl.wavelength, specs, fill_value=0., bounds_error=False) + return specs
+ +
[docs] def reinterpolate_spectra(self, l0, specs, **kwargs): + """ One-dimensional linear interpolation onto the common wavelength. + + Returns the one-dimensional interpolated spectrum + + Parameters + ---------- + l0 : 1-D sequence of floats (with units or not) + wavelength of the spectrum to interpolate + + specs : 1-D sequence of floats + spectrum to reinterpolate + + left : float, optional + Value to return for `x < xp[0]`, default is `fp[0]`. + + right : float, optional + Value to return for `x > xp[-1]`, default is `fp[-1]`. + + period : None or float, optional + A period for the x-coordinates. This parameter allows the proper + interpolation of angular x-coordinates. Parameters `left` and `right` + are ignored if `period` is specified. + + Returns + ------- + spec : ndarray + The interpolated values + """ + # TODO: proper reinterpolation that conserves energy... but makes a new + # resolution + wave = self.wavelength + try: + wave = wave.to(l0.unit) + except: + wave = _drop_units(wave) + func = interp1d(_drop_units(l0), _drop_units(specs), **kwargs) + f = func(_drop_units(wave)) + return f
+ +
[docs] def generate_individual_spectra(self, stars, **kwargs): + """ Generates individual spectra for the given stars and stellar library + + Returns NaN spectra if the boundary conditions are not met (no extrapolation) + + Parameters + ---------- + stars: Table + contains at least (logT, logg, logL, Z) of the considered stars + + returns + ------- + l0: ndarray, ndim=1 + wavelength definition of the spectra + wavelength in AA + + s0: ndarray, shape=(len(stars), len(l0)) + array of spectra, one per input star + Spectrum in ergs/s/AA or ergs/s/AA/Lsun + """ + null_value = kwargs.pop('null', np.nan) + try: + bounds = kwargs.pop('bounds', None) + dlogT = bounds.get('dlogT', self._dlogT) + dlogg = bounds.get('dlogg', self._dlogg) + except: + dlogT = None + dlogg = None + + ndata = len(stars) + logT, logg, logL, Z = stars['logT'], stars['logg'], stars['logL'], stars['Z'] + + # find which library per star + osl_index = self.which_osl(list(zip(logT, logg)), dlogT=dlogT, dlogg=dlogg) + + # group calculations per library + groups = [(osl_i, [k[0] for k in grp]) + for osl_i, grp in groupby(enumerate(osl_index), lambda x:x[1])] + + # Do the actual interpolation, avoiding exptrapolations + spectra = np.empty( (ndata, len(self.wavelength)), dtype=float ) + + for osl_i, idx_group in groups: + ind = np.array(idx_group) + if osl_i > 0: + _osl = self._olist[osl_i - 1] + lambdas, specs = _osl.generate_individual_spectra(stars[ind]) + spectra[ind] = self.reinterpolate_spectra( + _drop_units(lambdas), + _drop_units(specs), + bounds_error=False, + fill_value=0., + **kwargs + ) + else: + spectra[ind] = null_value + + l0 = self.wavelength + spectra = spectra * self.flux_units + + return l0, spectra, osl_index
+ +
[docs] def generate_individual_values(self, stars, values, **kwargs): + """ Generates individual spectra for the given stars and stellar library + + Returns NaN spectra if the boundary conditions are not met (no extrapolation) + + Parameters + ---------- + stars: Table + contains at least (logT, logg, logL, Z) of the considered stars + + values: sequence or attribute name + value to interpolate + + returns + ------- + values: sequence + value to interpolate + """ + null_value = kwargs.pop('null', np.nan) + try: + bounds = kwargs.pop('bounds', None) + dlogT = bounds.get('dlogT', self._dlogT) + dlogg = bounds.get('dlogg', self._dlogg) + except: + dlogT = None + dlogg = None + + ndata = len(stars) + logT, logg, logL, Z = stars['logT'], stars['logg'], stars['logL'], stars['Z'] + + # find which library per star + osl_index = self.which_osl(list(zip(logT, logg)), dlogT=dlogT, dlogg=dlogg) + + # group calculations per library + groups = [(osl_i, [k[0] for k in grp]) + for osl_i, grp in groupby(enumerate(osl_index), lambda x:x[1])] + + # Do the actual interpolation, avoiding exptrapolations + spectra = np.empty(ndata, dtype=float ) + + for osl_i, idx_group in groups: + ind = np.array(idx_group) + if osl_i > 0: + _osl = self._olist[osl_i - 1] + spectra[ind] = _osl.generate_individual_values(stars[ind], values, **kwargs) + else: + spectra[ind] = null_value + + return spectra, osl_index
+
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_modules/pystellibs/tlusty.html b/_modules/pystellibs/tlusty.html new file mode 100644 index 0000000..747e922 --- /dev/null +++ b/_modules/pystellibs/tlusty.html @@ -0,0 +1,220 @@ + + + + + + + + pystellibs.tlusty — pystellibs 1.0 documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for pystellibs.tlusty

+import numpy as np
+from .stellib import AtmosphereLib
+from .config import libsdir
+from .simpletable import SimpleTable
+try:
+    from astropy.io import fits as pyfits
+except ImportError:
+    import pyfits
+
+
+
[docs]class Tlusty(AtmosphereLib): + """ + Tlusty O and B stellar atmospheres + + * NLTE + * Parallel Planes + * line blanketing + + References + ---------- + Hubeny 1988 for initial reference + Lanz, T., & Hubeny, I. (2003) for more recent (NL TE) developments + + * **OSTAR2002 Grid**: O-type stars, 27500 K <= Teff <= 55000 K + * Reference: Lanz & Hubeny (2003) + + * **BSTAR2006 Grid**: Early B-type stars, 15000 K <= Teff <= 30000 K + * Reference: Lanz & Hubeny (2007) + + files are available at: http://nova.astro.umd.edu/Tlusty2002/database/ + + O and B stars rebinned to nearly 20,000 frequency points (for CLOUDY usage) + http://nova.astro.umd.edu/Tlusty2002/database/obstar_merged_3d.ascii.gz + """ + def __init__(self, *args, **kwargs): + self.name = 'Tlusty' + self.source = libsdir + '/tlusty.lowres.grid.fits' + self._load_() + AtmosphereLib.__init__(self, *args, **kwargs) + + def _load_(self): + with pyfits.open(self.source) as f: + # load data + self._getWaveLength_(f) + self._getTGZ_(f) + self._getSpectra_(f) + self._getWaveLength_units(f) + + def _getWaveLength_units(self, f): + self.wavelength_unit = 'angstrom' + + def _getWaveLength_(self, f): + self._wavelength = f[0].data[-1] + + def _getTGZ_(self, f): + self.grid = SimpleTable(f[1].data) + self.grid.header.update(f[1].header.items()) + self.grid.header['NAME'] = 'TGZ' + + def _getSpectra_(self, f): + self.spectra = f[0].data[:-1] + +
[docs] def bbox(self, dlogT=0.05, dlogg=0.25): + """ Boundary of Tlusty library + + Parameters + ---------- + dlogT: float + log-temperature tolerance before extrapolation limit + + dlogg: float + log-g tolerance before extrapolation limit + + Returns + ------- + bbox: ndarray + (logT, logg) edges of the bounding polygon + """ + bbox = [(4.176 - dlogT, 4.749 + dlogg), + (4.176 - dlogT, 1.750 - dlogg), + (4.176 + dlogT, 1.750 - dlogg), + (4.255 + dlogT, 2.000 - dlogg), + (4.447 + dlogT, 2.750 - dlogg), + (4.478 + dlogT, 3.000 - dlogg), + (4.544 + dlogT, 3.250 - dlogg), + (4.740 + dlogT, 4.000 - dlogg), + (4.740 + dlogT, 4.749 + dlogg), + (4.176 - dlogT, 4.749 + dlogg) ] + + return np.array(bbox)
+ + @property + def logT(self): + return self.grid['logT'] + + @property + def logg(self): + return self.grid['logg'] + + @property + def Teff(self): + return self.grid['Teff'] + + @property + def Z(self): + return self.grid['Z'] + + @property + def logZ(self): + return np.log10(self.Z)
+
+ +
+ +
+
+ +
+
+ + + + + Fork me on GitHub + + + + + + \ No newline at end of file diff --git a/_sources/index.rst.txt b/_sources/index.rst.txt index 68ae867..22089cc 100644 --- a/_sources/index.rst.txt +++ b/_sources/index.rst.txt @@ -1,15 +1,125 @@ -.. No Errors Test Project documentation master file, created by - sphinx-quickstart on Fri Aug 30 17:07:56 2019. +.. pystellibs documentation master file, created by + sphinx-quickstart on Mon Oct 17 14:28:28 2016. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. -Welcome to No Errors Test Project's documentation! -================================================== +pystellibs - making synthetic spectra +====================================== + +This is a set of tools to compute synthetic spectra in a simple way, ideal to +integrate in larger projects. + +This package provides a common interface to compute a single or a collection of +spectra using spectral libraries or atmospheres. + +Package main content +~~~~~~~~~~~~~~~~~~~~ + +The main package articulates around 2 parts: + +* :class:`pystellibs.stellib.Stellib` a stellar spectra library, commonly normalized to + bolometric luminosities +* :class:`pystellibs.stellib.AtmosphereLib` a stellar atmosphere library, commonly normalized to + stellar radii. + +The interpolation within libraries is left as a separated component. + +This package provides already multiple input libraries: + +* :class:`pystellibs.basel.BaSeL`: BaSeL 2.2, ~ Atlas 9 empirically recalibrated (Leujeune et al 1998) +* :class:`pystellibs.rauch.Rauch`: a White dwarf library +* :class:`pystellibs.kurucz.Kurucz`: Castelli and Kurucz 2004 or ATLAS9 +* :class:`pystellibs.tlusty.Tlusty`: NLTE O, B stars [Lanz, T., & Hubeny, I. (2003)] +* :class:`pystellibs.elodie.Elodie`: version 3.1, high resolution optical library. +* :class:`pystellibs.munari.Munari`: extended ATLAS9 stellar atmospheres (Munari et al. 2005 A&A 442 1127) +* :class:`pystellibs.btsettl.BTSettl`: BT-Settl Library (Allard, Hauschildt and Schweitzer 2000) + + +Future libraries to be included: + +* XSL: Xshooter spectral library +* PHOENIX +* MARCS + +References (TBU) +---------------- + +* Leujeune et al 1998: http://cdsads.u-strasbg.fr/abs/1998yCat..41300065L + + +API +~~~ + +The API has been reduced to minimum to make it easy to use. Mostly 2 functions: + +* :func:`Stellib.generate_stellar_spectrum` + that computes one spectrum for one set of stellar parameters +* :func:`Stellib.generate_individual_spectra` + that computes a spectrum for each of many sets of paramaters + +Contents: .. toctree:: :maxdepth: 2 - :caption: Hello World! + modules + + + +Quick Start +~~~~~~~~~~~ + +* This example shows how to use one of 2 libraries to compute a spectrum + +.. code-block:: python + + import pylab as plt + from pystellibs import BaSeL, Kurucz + + # load 2 libraries + basel = BaSeL() + kurucz = Kurucz() + ap = (4., 3.5, 0., 0.02) + sb = basel.generate_stellar_spectrum(*ap) + sk = kurucz.generate_stellar_spectrum(*ap) + plt.figure() + plt.loglog(osl._wavelength, sb, label='BaSel') + plt.loglog(osl._wavelength, sk, label='Kurucz') + plt.legend(frameon=False, loc='lower right') + plt.xlabel("Wavelength [{0}]".format(basel.wavelength_unit)) + plt.ylabel("Flux [{0}]".format(basel.flux_units)) + plt.xlim(800, 5e4) + plt.ylim(1e25, 5e30) + plt.tight_layout() + +.. image:: single_spec_libs.png + :scale: 70 % + +* Combining multiple libraries (with priority): the following example combines + BaSeL with white-dwarf models. + +.. code-block:: python + + from pystellibs import BaSeL, Rauch + + # Combine 2 libraries by priority order + lib = BaSeL() + Rauch() + + for osl in lib._olist: + l = plt.plot(osl.logT, osl.logg, 'o')[0] + osl.plot_boundary(color=l.get_color(), dlogT=0.1, dlogg=0.3, alpha=0.3, + label=osl.name) + + plt.xlim(5.6, 2.8) + plt.ylim(8.5, -2) + plt.xlabel('log T$_{eff}$') + plt.ylabel('log g') + plt.tight_layout() + + plt.legend(frameon=False, loc='upper left') + +.. image:: combined_libs.png + :scale: 70 % Indices and tables @@ -18,3 +128,4 @@ Indices and tables * :ref:`genindex` * :ref:`modindex` * :ref:`search` + diff --git a/_sources/modules.rst.txt b/_sources/modules.rst.txt new file mode 100644 index 0000000..97cd888 --- /dev/null +++ b/_sources/modules.rst.txt @@ -0,0 +1,7 @@ +pystellibs +========== + +.. toctree:: + :maxdepth: 4 + + pystellibs diff --git a/_sources/pystellibs.ezunits.rst.txt b/_sources/pystellibs.ezunits.rst.txt new file mode 100644 index 0000000..c625969 --- /dev/null +++ b/_sources/pystellibs.ezunits.rst.txt @@ -0,0 +1,22 @@ +pystellibs.ezunits package +========================== + +Submodules +---------- + +pystellibs.ezunits.pint module +------------------------------ + +.. automodule:: pystellibs.ezunits.pint + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pystellibs.ezunits + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/pystellibs.future.rst.txt b/_sources/pystellibs.future.rst.txt new file mode 100644 index 0000000..4e903d6 --- /dev/null +++ b/_sources/pystellibs.future.rst.txt @@ -0,0 +1,22 @@ +pystellibs.future package +========================= + +Submodules +---------- + +pystellibs.future.path module +----------------------------- + +.. automodule:: pystellibs.future.path + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pystellibs.future + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/pystellibs.interpolator.rst.txt b/_sources/pystellibs.interpolator.rst.txt new file mode 100644 index 0000000..45ebf2b --- /dev/null +++ b/_sources/pystellibs.interpolator.rst.txt @@ -0,0 +1,38 @@ +pystellibs.interpolator package +=============================== + +Submodules +---------- + +pystellibs.interpolator.interpolator module +------------------------------------------- + +.. automodule:: pystellibs.interpolator.interpolator + :members: + :undoc-members: + :show-inheritance: + +pystellibs.interpolator.lejeune module +-------------------------------------- + +.. automodule:: pystellibs.interpolator.lejeune + :members: + :undoc-members: + :show-inheritance: + +pystellibs.interpolator.ndlinear module +--------------------------------------- + +.. automodule:: pystellibs.interpolator.ndlinear + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pystellibs.interpolator + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/pystellibs.rst.txt b/_sources/pystellibs.rst.txt new file mode 100644 index 0000000..b06b0c0 --- /dev/null +++ b/_sources/pystellibs.rst.txt @@ -0,0 +1,135 @@ +pystellibs package +================== + +Subpackages +----------- + +.. toctree:: + + pystellibs.ezunits + pystellibs.future + pystellibs.interpolator + +Submodules +---------- + +pystellibs\.basel module +------------------------ + +.. automodule:: pystellibs.basel + :members: + :undoc-members: + :show-inheritance: + +pystellibs\.btsettl module +-------------------------- + +.. automodule:: pystellibs.btsettl + :members: + :undoc-members: + :show-inheritance: + +pystellibs\.config module +------------------------- + +.. automodule:: pystellibs.config + :members: + :undoc-members: + :show-inheritance: + +pystellibs\.elodie module +------------------------- + +.. automodule:: pystellibs.elodie + :members: + :undoc-members: + :show-inheritance: + +pystellibs\.ezmap module +------------------------ + +.. automodule:: pystellibs.ezmap + :members: + :undoc-members: + :show-inheritance: + +pystellibs\.helpers module +-------------------------- + +.. automodule:: pystellibs.helpers + :members: + :undoc-members: + :show-inheritance: + +pystellibs\.kurucz module +------------------------- + +.. automodule:: pystellibs.kurucz + :members: + :undoc-members: + :show-inheritance: + +pystellibs\.marcs module +------------------------ + +.. automodule:: pystellibs.marcs + :members: + :undoc-members: + :show-inheritance: + +pystellibs\.munari module +------------------------- + +.. automodule:: pystellibs.munari + :members: + :undoc-members: + :show-inheritance: + +pystellibs\.pbar module +----------------------- + +.. automodule:: pystellibs.pbar + :members: + :undoc-members: + :show-inheritance: + +pystellibs\.rauch module +------------------------ + +.. automodule:: pystellibs.rauch + :members: + :undoc-members: + :show-inheritance: + +pystellibs\.simpletable module +------------------------------ + +.. automodule:: pystellibs.simpletable + :members: + :undoc-members: + :show-inheritance: + +pystellibs\.stellib module +-------------------------- + +.. automodule:: pystellibs.stellib + :members: + :undoc-members: + :show-inheritance: + +pystellibs\.tlusty module +------------------------- + +.. automodule:: pystellibs.tlusty + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pystellibs + :members: + :undoc-members: + :show-inheritance: diff --git a/_static/basic.css b/_static/basic.css index 0119285..7577acb 100644 --- a/_static/basic.css +++ b/_static/basic.css @@ -4,7 +4,7 @@ * * Sphinx stylesheet -- basic theme. * - * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -15,6 +15,12 @@ div.clearer { clear: both; } +div.section::after { + display: block; + content: ''; + clear: left; +} + /* -- relbar ---------------------------------------------------------------- */ div.related { @@ -124,7 +130,7 @@ ul.search li a { font-weight: bold; } -ul.search li div.context { +ul.search li p.context { color: #888; margin: 2px 0 0 30px; text-align: left; @@ -216,7 +222,7 @@ table.modindextable td { /* -- general body styles --------------------------------------------------- */ div.body { - min-width: 450px; + min-width: 360px; max-width: 800px; } @@ -231,16 +237,6 @@ a.headerlink { visibility: hidden; } -a.brackets:before, -span.brackets > a:before{ - content: "["; -} - -a.brackets:after, -span.brackets > a:after { - content: "]"; -} - h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, @@ -271,25 +267,25 @@ p.rubric { font-weight: bold; } -img.align-left, .figure.align-left, object.align-left { +img.align-left, figure.align-left, .figure.align-left, object.align-left { clear: left; float: left; margin-right: 1em; } -img.align-right, .figure.align-right, object.align-right { +img.align-right, figure.align-right, .figure.align-right, object.align-right { clear: right; float: right; margin-left: 1em; } -img.align-center, .figure.align-center, object.align-center { +img.align-center, figure.align-center, .figure.align-center, object.align-center { display: block; margin-left: auto; margin-right: auto; } -img.align-default, .figure.align-default { +img.align-default, figure.align-default, .figure.align-default { display: block; margin-left: auto; margin-right: auto; @@ -313,24 +309,35 @@ img.align-default, .figure.align-default { /* -- sidebars -------------------------------------------------------------- */ -div.sidebar { +div.sidebar, +aside.sidebar { margin: 0 0 0.5em 1em; border: 1px solid #ddb; - padding: 7px 7px 0 7px; + padding: 7px; background-color: #ffe; width: 40%; float: right; + clear: right; + overflow-x: auto; } p.sidebar-title { font-weight: bold; } +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + /* -- topics ---------------------------------------------------------------- */ +nav.contents, +aside.topic, div.topic { border: 1px solid #ccc; - padding: 7px 7px 0 7px; + padding: 7px; margin: 10px 0 10px 0; } @@ -352,10 +359,6 @@ div.admonition dt { font-weight: bold; } -div.admonition dl { - margin-bottom: 0; -} - p.admonition-title { margin: 0px 10px 5px 0px; font-weight: bold; @@ -366,9 +369,34 @@ div.body p.centered { margin-top: 25px; } +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + /* -- tables ---------------------------------------------------------------- */ table.docutils { + margin-top: 10px; + margin-bottom: 10px; border: 0; border-collapse: collapse; } @@ -398,10 +426,6 @@ table.docutils td, table.docutils th { border-bottom: 1px solid #aaa; } -table.footnote td, table.footnote th { - border: 0 !important; -} - th { text-align: left; padding-right: 5px; @@ -416,32 +440,34 @@ table.citation td { border-bottom: none; } -th > p:first-child, -td > p:first-child { +th > :first-child, +td > :first-child { margin-top: 0px; } -th > p:last-child, -td > p:last-child { +th > :last-child, +td > :last-child { margin-bottom: 0px; } /* -- figures --------------------------------------------------------------- */ -div.figure { +div.figure, figure { margin: 0.5em; padding: 0.5em; } -div.figure p.caption { +div.figure p.caption, figcaption { padding: 0.3em; } -div.figure p.caption span.caption-number { +div.figure p.caption span.caption-number, +figcaption span.caption-number { font-style: italic; } -div.figure p.caption span.caption-text { +div.figure p.caption span.caption-text, +figcaption span.caption-text { } /* -- field list styles ----------------------------------------------------- */ @@ -468,10 +494,71 @@ table.field-list td, table.field-list th { /* -- hlist styles ---------------------------------------------------------- */ +table.hlist { + margin: 1em 0; +} + table.hlist td { vertical-align: top; } +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + /* -- other body styles ----------------------------------------------------- */ @@ -495,26 +582,53 @@ ol.upperroman { list-style: upper-roman; } -li > p:first-child { +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { margin-top: 0px; } -li > p:last-child { +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { margin-bottom: 0px; } -dl.footnote > dt, -dl.citation > dt { - float: left; +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; } -dl.footnote > dd, -dl.citation > dd { - margin-bottom: 0em; +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; } -dl.footnote > dd:after, -dl.citation > dd:after { +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { content: ""; clear: both; } @@ -531,10 +645,6 @@ dl.field-list > dt { padding-right: 5px; } -dl.field-list > dt:after { - content: ":"; -} - dl.field-list > dd { padding-left: 0.5em; margin-top: 0em; @@ -546,7 +656,7 @@ dl { margin-bottom: 15px; } -dd > p:first-child { +dd > :first-child { margin-top: 0px; } @@ -560,6 +670,11 @@ dd { margin-left: 30px; } +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + dt:target, span.highlighted { background-color: #fbe54e; } @@ -573,14 +688,6 @@ dl.glossary dt { font-size: 1.1em; } -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - .versionmodified { font-style: italic; } @@ -621,8 +728,9 @@ dl.glossary dt { .classifier:before { font-style: normal; - margin: 0.5em; + margin: 0 0.5em; content: ":"; + display: inline-block; } abbr, acronym { @@ -637,29 +745,69 @@ pre { overflow-y: hidden; /* fixes display issues on Chrome browsers */ } +pre, div[class*="highlight-"] { + clear: both; +} + span.pre { -moz-hyphens: none; -ms-hyphens: none; -webkit-hyphens: none; hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; } td.linenos pre { - padding: 5px 0px; border: 0; background-color: transparent; color: #aaa; } table.highlighttable { - margin-left: 0.5em; + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; } table.highlighttable td { - padding: 0 0.5em 0 0.5em; + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; } div.code-block-caption { + margin-top: 1em; padding: 2px 5px; font-size: small; } @@ -668,12 +816,14 @@ div.code-block-caption code { background-color: transparent; } -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.doctest > div.highlight span.gp { /* gp: Generic.Prompt */ - user-select: none; +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ } div.code-block-caption span.caption-number { @@ -685,21 +835,7 @@ div.code-block-caption span.caption-text { } div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; + margin: 1em 0; } code.xref, a code { @@ -740,8 +876,7 @@ span.eqno { } span.eqno a.headerlink { - position: relative; - left: 0px; + position: absolute; z-index: 1; } diff --git a/_static/doctools.js b/_static/doctools.js index daccd20..d06a71d 100644 --- a/_static/doctools.js +++ b/_static/doctools.js @@ -2,314 +2,155 @@ * doctools.js * ~~~~~~~~~~~ * - * Sphinx JavaScript utilities for all documentation. + * Base JavaScript utilities for all Sphinx HTML documentation. * - * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); } - return result; }; -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - var bbox = node.parentElement.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - /** * Small JavaScript module for the documentation. */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { - this.initOnKeyListeners(); - } +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); }, /** * i18n support */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", // gettext and ngettext don't access this so that the functions // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } }, - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; }, - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; }, /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 + * helper function to focus on search bar */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); }, /** - * highlight the search words provided in the url in the text + * Initialise the domain index toggle buttons */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, + }; - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); }, - initOnKeyListeners: function() { - $(document).keydown(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT' - && !event.altKey && !event.ctrlKey && !event.metaKey && !event.shiftKey) { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); } + break; } } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } }); - } + }, }; // quick alias for translations -_ = Documentation.gettext; +const _ = Documentation.gettext; -$(document).ready(function() { - Documentation.init(); -}); +_ready(Documentation.init); diff --git a/_static/documentation_options.js b/_static/documentation_options.js index 4790c4d..a7f754b 100644 --- a/_static/documentation_options.js +++ b/_static/documentation_options.js @@ -1,11 +1,14 @@ var DOCUMENTATION_OPTIONS = { URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '', - LANGUAGE: 'None', + VERSION: '1.0', + LANGUAGE: 'en', COLLAPSE_INDEX: false, BUILDER: 'html', FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', HAS_SOURCE: true, SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, }; \ No newline at end of file diff --git a/_static/language_data.js b/_static/language_data.js index d2b4ee9..250f566 100644 --- a/_static/language_data.js +++ b/_static/language_data.js @@ -5,15 +5,16 @@ * This script contains the language-specific data used by searchtools.js, * namely the list of stopwords, stemmer, scorer and splitter. * - * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ -var stopwords = ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"]; +var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; -/* Non-minified version JS is _stemmer.js if file is provided */ +/* Non-minified version is copied as a separate JS file, is available */ + /** * Porter Stemmer */ @@ -196,102 +197,3 @@ var Stemmer = function() { } } - - - - -var splitChars = (function() { - var result = {}; - var singles = [96, 180, 187, 191, 215, 247, 749, 885, 903, 907, 909, 930, 1014, 1648, - 1748, 1809, 2416, 2473, 2481, 2526, 2601, 2609, 2612, 2615, 2653, 2702, - 2706, 2729, 2737, 2740, 2857, 2865, 2868, 2910, 2928, 2948, 2961, 2971, - 2973, 3085, 3089, 3113, 3124, 3213, 3217, 3241, 3252, 3295, 3341, 3345, - 3369, 3506, 3516, 3633, 3715, 3721, 3736, 3744, 3748, 3750, 3756, 3761, - 3781, 3912, 4239, 4347, 4681, 4695, 4697, 4745, 4785, 4799, 4801, 4823, - 4881, 5760, 5901, 5997, 6313, 7405, 8024, 8026, 8028, 8030, 8117, 8125, - 8133, 8181, 8468, 8485, 8487, 8489, 8494, 8527, 11311, 11359, 11687, 11695, - 11703, 11711, 11719, 11727, 11735, 12448, 12539, 43010, 43014, 43019, 43587, - 43696, 43713, 64286, 64297, 64311, 64317, 64319, 64322, 64325, 65141]; - var i, j, start, end; - for (i = 0; i < singles.length; i++) { - result[singles[i]] = true; - } - var ranges = [[0, 47], [58, 64], [91, 94], [123, 169], [171, 177], [182, 184], [706, 709], - [722, 735], [741, 747], [751, 879], [888, 889], [894, 901], [1154, 1161], - [1318, 1328], [1367, 1368], [1370, 1376], [1416, 1487], [1515, 1519], [1523, 1568], - [1611, 1631], [1642, 1645], [1750, 1764], [1767, 1773], [1789, 1790], [1792, 1807], - [1840, 1868], [1958, 1968], [1970, 1983], [2027, 2035], [2038, 2041], [2043, 2047], - [2070, 2073], [2075, 2083], [2085, 2087], [2089, 2307], [2362, 2364], [2366, 2383], - [2385, 2391], [2402, 2405], [2419, 2424], [2432, 2436], [2445, 2446], [2449, 2450], - [2483, 2485], [2490, 2492], [2494, 2509], [2511, 2523], [2530, 2533], [2546, 2547], - [2554, 2564], [2571, 2574], [2577, 2578], [2618, 2648], [2655, 2661], [2672, 2673], - [2677, 2692], [2746, 2748], [2750, 2767], [2769, 2783], [2786, 2789], [2800, 2820], - [2829, 2830], [2833, 2834], [2874, 2876], [2878, 2907], [2914, 2917], [2930, 2946], - [2955, 2957], [2966, 2968], [2976, 2978], [2981, 2983], [2987, 2989], [3002, 3023], - [3025, 3045], [3059, 3076], [3130, 3132], [3134, 3159], [3162, 3167], [3170, 3173], - [3184, 3191], [3199, 3204], [3258, 3260], [3262, 3293], [3298, 3301], [3312, 3332], - [3386, 3388], [3390, 3423], [3426, 3429], [3446, 3449], [3456, 3460], [3479, 3481], - [3518, 3519], [3527, 3584], [3636, 3647], [3655, 3663], [3674, 3712], [3717, 3718], - [3723, 3724], [3726, 3731], [3752, 3753], [3764, 3772], [3774, 3775], [3783, 3791], - [3802, 3803], [3806, 3839], [3841, 3871], [3892, 3903], [3949, 3975], [3980, 4095], - [4139, 4158], [4170, 4175], [4182, 4185], [4190, 4192], [4194, 4196], [4199, 4205], - [4209, 4212], [4226, 4237], [4250, 4255], [4294, 4303], [4349, 4351], [4686, 4687], - [4702, 4703], [4750, 4751], [4790, 4791], [4806, 4807], [4886, 4887], [4955, 4968], - [4989, 4991], [5008, 5023], [5109, 5120], [5741, 5742], [5787, 5791], [5867, 5869], - [5873, 5887], [5906, 5919], [5938, 5951], [5970, 5983], [6001, 6015], [6068, 6102], - [6104, 6107], [6109, 6111], [6122, 6127], [6138, 6159], [6170, 6175], [6264, 6271], - [6315, 6319], [6390, 6399], [6429, 6469], [6510, 6511], [6517, 6527], [6572, 6592], - [6600, 6607], [6619, 6655], [6679, 6687], [6741, 6783], [6794, 6799], [6810, 6822], - [6824, 6916], [6964, 6980], [6988, 6991], [7002, 7042], [7073, 7085], [7098, 7167], - [7204, 7231], [7242, 7244], [7294, 7400], [7410, 7423], [7616, 7679], [7958, 7959], - [7966, 7967], [8006, 8007], [8014, 8015], [8062, 8063], [8127, 8129], [8141, 8143], - [8148, 8149], [8156, 8159], [8173, 8177], [8189, 8303], [8306, 8307], [8314, 8318], - [8330, 8335], [8341, 8449], [8451, 8454], [8456, 8457], [8470, 8472], [8478, 8483], - [8506, 8507], [8512, 8516], [8522, 8525], [8586, 9311], [9372, 9449], [9472, 10101], - [10132, 11263], [11493, 11498], [11503, 11516], [11518, 11519], [11558, 11567], - [11622, 11630], [11632, 11647], [11671, 11679], [11743, 11822], [11824, 12292], - [12296, 12320], [12330, 12336], [12342, 12343], [12349, 12352], [12439, 12444], - [12544, 12548], [12590, 12592], [12687, 12689], [12694, 12703], [12728, 12783], - [12800, 12831], [12842, 12880], [12896, 12927], [12938, 12976], [12992, 13311], - [19894, 19967], [40908, 40959], [42125, 42191], [42238, 42239], [42509, 42511], - [42540, 42559], [42592, 42593], [42607, 42622], [42648, 42655], [42736, 42774], - [42784, 42785], [42889, 42890], [42893, 43002], [43043, 43055], [43062, 43071], - [43124, 43137], [43188, 43215], [43226, 43249], [43256, 43258], [43260, 43263], - [43302, 43311], [43335, 43359], [43389, 43395], [43443, 43470], [43482, 43519], - [43561, 43583], [43596, 43599], [43610, 43615], [43639, 43641], [43643, 43647], - [43698, 43700], [43703, 43704], [43710, 43711], [43715, 43738], [43742, 43967], - [44003, 44015], [44026, 44031], [55204, 55215], [55239, 55242], [55292, 55295], - [57344, 63743], [64046, 64047], [64110, 64111], [64218, 64255], [64263, 64274], - [64280, 64284], [64434, 64466], [64830, 64847], [64912, 64913], [64968, 65007], - [65020, 65135], [65277, 65295], [65306, 65312], [65339, 65344], [65371, 65381], - [65471, 65473], [65480, 65481], [65488, 65489], [65496, 65497]]; - for (i = 0; i < ranges.length; i++) { - start = ranges[i][0]; - end = ranges[i][1]; - for (j = start; j <= end; j++) { - result[j] = true; - } - } - return result; -})(); - -function splitQuery(query) { - var result = []; - var start = -1; - for (var i = 0; i < query.length; i++) { - if (splitChars[query.charCodeAt(i)]) { - if (start !== -1) { - result.push(query.slice(start, i)); - start = -1; - } - } else if (start === -1) { - start = i; - } - } - if (start !== -1) { - result.push(query.slice(start)); - } - return result; -} - - diff --git a/_static/pygments.css b/_static/pygments.css index dd6621d..0d49244 100644 --- a/_static/pygments.css +++ b/_static/pygments.css @@ -1,77 +1,75 @@ +pre { line-height: 125%; } +td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } .highlight .hll { background-color: #ffffcc } -.highlight { background: #f8f8f8; } -.highlight .c { color: #8f5902; font-style: italic } /* Comment */ -.highlight .err { color: #a40000; border: 1px solid #ef2929 } /* Error */ -.highlight .g { color: #000000 } /* Generic */ -.highlight .k { color: #004461; font-weight: bold } /* Keyword */ -.highlight .l { color: #000000 } /* Literal */ -.highlight .n { color: #000000 } /* Name */ -.highlight .o { color: #582800 } /* Operator */ -.highlight .x { color: #000000 } /* Other */ -.highlight .p { color: #000000; font-weight: bold } /* Punctuation */ -.highlight .ch { color: #8f5902; font-style: italic } /* Comment.Hashbang */ -.highlight .cm { color: #8f5902; font-style: italic } /* Comment.Multiline */ -.highlight .cp { color: #8f5902 } /* Comment.Preproc */ -.highlight .cpf { color: #8f5902; font-style: italic } /* Comment.PreprocFile */ -.highlight .c1 { color: #8f5902; font-style: italic } /* Comment.Single */ -.highlight .cs { color: #8f5902; font-style: italic } /* Comment.Special */ -.highlight .gd { color: #a40000 } /* Generic.Deleted */ -.highlight .ge { color: #000000; font-style: italic } /* Generic.Emph */ -.highlight .gr { color: #ef2929 } /* Generic.Error */ +.highlight { background: #eeffcc; } +.highlight .c { color: #408090; font-style: italic } /* Comment */ +.highlight .err { border: 1px solid #FF0000 } /* Error */ +.highlight .k { color: #007020; font-weight: bold } /* Keyword */ +.highlight .o { color: #666666 } /* Operator */ +.highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #007020 } /* Comment.Preproc */ +.highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ +.highlight .gd { color: #A00000 } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +.highlight .gr { color: #FF0000 } /* Generic.Error */ .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ .highlight .gi { color: #00A000 } /* Generic.Inserted */ -.highlight .go { color: #888888 } /* Generic.Output */ -.highlight .gp { color: #745334 } /* Generic.Prompt */ -.highlight .gs { color: #000000; font-weight: bold } /* Generic.Strong */ +.highlight .go { color: #333333 } /* Generic.Output */ +.highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ -.highlight .gt { color: #a40000; font-weight: bold } /* Generic.Traceback */ -.highlight .kc { color: #004461; font-weight: bold } /* Keyword.Constant */ -.highlight .kd { color: #004461; font-weight: bold } /* Keyword.Declaration */ -.highlight .kn { color: #004461; font-weight: bold } /* Keyword.Namespace */ -.highlight .kp { color: #004461; font-weight: bold } /* Keyword.Pseudo */ -.highlight .kr { color: #004461; font-weight: bold } /* Keyword.Reserved */ -.highlight .kt { color: #004461; font-weight: bold } /* Keyword.Type */ -.highlight .ld { color: #000000 } /* Literal.Date */ -.highlight .m { color: #990000 } /* Literal.Number */ -.highlight .s { color: #4e9a06 } /* Literal.String */ -.highlight .na { color: #c4a000 } /* Name.Attribute */ -.highlight .nb { color: #004461 } /* Name.Builtin */ -.highlight .nc { color: #000000 } /* Name.Class */ -.highlight .no { color: #000000 } /* Name.Constant */ -.highlight .nd { color: #888888 } /* Name.Decorator */ -.highlight .ni { color: #ce5c00 } /* Name.Entity */ -.highlight .ne { color: #cc0000; font-weight: bold } /* Name.Exception */ -.highlight .nf { color: #000000 } /* Name.Function */ -.highlight .nl { color: #f57900 } /* Name.Label */ -.highlight .nn { color: #000000 } /* Name.Namespace */ -.highlight .nx { color: #000000 } /* Name.Other */ -.highlight .py { color: #000000 } /* Name.Property */ -.highlight .nt { color: #004461; font-weight: bold } /* Name.Tag */ -.highlight .nv { color: #000000 } /* Name.Variable */ -.highlight .ow { color: #004461; font-weight: bold } /* Operator.Word */ -.highlight .w { color: #f8f8f8; text-decoration: underline } /* Text.Whitespace */ -.highlight .mb { color: #990000 } /* Literal.Number.Bin */ -.highlight .mf { color: #990000 } /* Literal.Number.Float */ -.highlight .mh { color: #990000 } /* Literal.Number.Hex */ -.highlight .mi { color: #990000 } /* Literal.Number.Integer */ -.highlight .mo { color: #990000 } /* Literal.Number.Oct */ -.highlight .sa { color: #4e9a06 } /* Literal.String.Affix */ -.highlight .sb { color: #4e9a06 } /* Literal.String.Backtick */ -.highlight .sc { color: #4e9a06 } /* Literal.String.Char */ -.highlight .dl { color: #4e9a06 } /* Literal.String.Delimiter */ -.highlight .sd { color: #8f5902; font-style: italic } /* Literal.String.Doc */ -.highlight .s2 { color: #4e9a06 } /* Literal.String.Double */ -.highlight .se { color: #4e9a06 } /* Literal.String.Escape */ -.highlight .sh { color: #4e9a06 } /* Literal.String.Heredoc */ -.highlight .si { color: #4e9a06 } /* Literal.String.Interpol */ -.highlight .sx { color: #4e9a06 } /* Literal.String.Other */ -.highlight .sr { color: #4e9a06 } /* Literal.String.Regex */ -.highlight .s1 { color: #4e9a06 } /* Literal.String.Single */ -.highlight .ss { color: #4e9a06 } /* Literal.String.Symbol */ -.highlight .bp { color: #3465a4 } /* Name.Builtin.Pseudo */ -.highlight .fm { color: #000000 } /* Name.Function.Magic */ -.highlight .vc { color: #000000 } /* Name.Variable.Class */ -.highlight .vg { color: #000000 } /* Name.Variable.Global */ -.highlight .vi { color: #000000 } /* Name.Variable.Instance */ -.highlight .vm { color: #000000 } /* Name.Variable.Magic */ -.highlight .il { color: #990000 } /* Literal.Number.Integer.Long */ \ No newline at end of file +.highlight .gt { color: #0044DD } /* Generic.Traceback */ +.highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #007020 } /* Keyword.Pseudo */ +.highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #902000 } /* Keyword.Type */ +.highlight .m { color: #208050 } /* Literal.Number */ +.highlight .s { color: #4070a0 } /* Literal.String */ +.highlight .na { color: #4070a0 } /* Name.Attribute */ +.highlight .nb { color: #007020 } /* Name.Builtin */ +.highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ +.highlight .no { color: #60add5 } /* Name.Constant */ +.highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ +.highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ +.highlight .ne { color: #007020 } /* Name.Exception */ +.highlight .nf { color: #06287e } /* Name.Function */ +.highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ +.highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ +.highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #bb60d5 } /* Name.Variable */ +.highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mb { color: #208050 } /* Literal.Number.Bin */ +.highlight .mf { color: #208050 } /* Literal.Number.Float */ +.highlight .mh { color: #208050 } /* Literal.Number.Hex */ +.highlight .mi { color: #208050 } /* Literal.Number.Integer */ +.highlight .mo { color: #208050 } /* Literal.Number.Oct */ +.highlight .sa { color: #4070a0 } /* Literal.String.Affix */ +.highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ +.highlight .sc { color: #4070a0 } /* Literal.String.Char */ +.highlight .dl { color: #4070a0 } /* Literal.String.Delimiter */ +.highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #4070a0 } /* Literal.String.Double */ +.highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ +.highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ +.highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ +.highlight .sx { color: #c65d09 } /* Literal.String.Other */ +.highlight .sr { color: #235388 } /* Literal.String.Regex */ +.highlight .s1 { color: #4070a0 } /* Literal.String.Single */ +.highlight .ss { color: #517918 } /* Literal.String.Symbol */ +.highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #06287e } /* Name.Function.Magic */ +.highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ +.highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ +.highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ +.highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */ +.highlight .il { color: #208050 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/_static/searchtools.js b/_static/searchtools.js index d11b33a..97d56a7 100644 --- a/_static/searchtools.js +++ b/_static/searchtools.js @@ -4,22 +4,24 @@ * * Sphinx JavaScript utilities for the full-text search. * - * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ +"use strict"; -if (!Scorer) { - /** - * Simple result scoring code. - */ +/** + * Simple result scoring code. + */ +if (typeof Scorer === "undefined") { var Scorer = { // Implement the following function to further tweak the score for each result - // The function takes a result array [filename, title, anchor, descr, score] + // The function takes a result array [docname, title, anchor, descr, score, filename] // and returns the new score. /* - score: function(result) { - return result[4]; + score: result => { + const [docname, title, anchor, descr, score, filename] = result + return score }, */ @@ -28,9 +30,11 @@ if (!Scorer) { // or matches in the last dotted part of the object name objPartialMatch: 6, // Additive scores depending on the priority of the object - objPrio: {0: 15, // used to be importantResults - 1: 5, // used to be objectResults - 2: -5}, // used to be unimportantResults + objPrio: { + 0: 15, // used to be importantResults + 1: 5, // used to be objectResults + 2: -5, // used to be unimportantResults + }, // Used when the priority is not in the mapping. objPrioDefault: 0, @@ -39,442 +43,495 @@ if (!Scorer) { partialTitle: 7, // query found in terms term: 5, - partialTerm: 2 + partialTerm: 2, }; } -if (!splitQuery) { - function splitQuery(query) { - return query.split(/\s+/); +const _removeChildren = (element) => { + while (element && element.lastChild) element.removeChild(element.lastChild); +}; + +/** + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ +const _escapeRegExp = (string) => + string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + +const _displayItem = (item, searchTerms) => { + const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; + const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT; + const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; + const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; + const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + + const [docName, title, anchor, descr, score, _filename] = item; + + let listItem = document.createElement("li"); + let requestUrl; + let linkUrl; + if (docBuilder === "dirhtml") { + // dirhtml builder + let dirname = docName + "/"; + if (dirname.match(/\/index\/$/)) + dirname = dirname.substring(0, dirname.length - 6); + else if (dirname === "index/") dirname = ""; + requestUrl = docUrlRoot + dirname; + linkUrl = requestUrl; + } else { + // normal html builders + requestUrl = docUrlRoot + docName + docFileSuffix; + linkUrl = docName + docLinkSuffix; + } + let linkEl = listItem.appendChild(document.createElement("a")); + linkEl.href = linkUrl + anchor; + linkEl.dataset.score = score; + linkEl.innerHTML = title; + if (descr) + listItem.appendChild(document.createElement("span")).innerHTML = + " (" + descr + ")"; + else if (showSearchSummary) + fetch(requestUrl) + .then((responseData) => responseData.text()) + .then((data) => { + if (data) + listItem.appendChild( + Search.makeSearchSummary(data, searchTerms) + ); + }); + Search.output.appendChild(listItem); +}; +const _finishSearch = (resultCount) => { + Search.stopPulse(); + Search.title.innerText = _("Search Results"); + if (!resultCount) + Search.status.innerText = Documentation.gettext( + "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." + ); + else + Search.status.innerText = _( + `Search finished, found ${resultCount} page(s) matching the search query.` + ); +}; +const _displayNextItem = ( + results, + resultCount, + searchTerms +) => { + // results left, load the summary and display it + // this is intended to be dynamic (don't sub resultsCount) + if (results.length) { + _displayItem(results.pop(), searchTerms); + setTimeout( + () => _displayNextItem(results, resultCount, searchTerms), + 5 + ); } + // search finished, update title and status message + else _finishSearch(resultCount); +}; + +/** + * Default splitQuery function. Can be overridden in ``sphinx.search`` with a + * custom function per language. + * + * The regular expression works by splitting the string on consecutive characters + * that are not Unicode letters, numbers, underscores, or emoji characters. + * This is the same as ``\W+`` in Python, preserving the surrogate pair area. + */ +if (typeof splitQuery === "undefined") { + var splitQuery = (query) => query + .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) + .filter(term => term) // remove remaining empty strings } /** * Search Module */ -var Search = { - - _index : null, - _queued_query : null, - _pulse_status : -1, - - htmlToText : function(htmlString) { - var htmlElement = document.createElement('span'); - htmlElement.innerHTML = htmlString; - $(htmlElement).find('.headerlink').remove(); - docContent = $(htmlElement).find('[role=main]')[0]; - if(docContent === undefined) { - console.warn("Content block not found. Sphinx search tries to obtain it " + - "via '[role=main]'. Could you check your theme or template."); - return ""; - } - return docContent.textContent || docContent.innerText; +const Search = { + _index: null, + _queued_query: null, + _pulse_status: -1, + + htmlToText: (htmlString) => { + const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); + htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); + const docContent = htmlElement.querySelector('[role="main"]'); + if (docContent !== undefined) return docContent.textContent; + console.warn( + "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." + ); + return ""; }, - init : function() { - var params = $.getQueryParameters(); - if (params.q) { - var query = params.q[0]; - $('input[name="q"]')[0].value = query; - this.performSearch(query); - } + init: () => { + const query = new URLSearchParams(window.location.search).get("q"); + document + .querySelectorAll('input[name="q"]') + .forEach((el) => (el.value = query)); + if (query) Search.performSearch(query); }, - loadIndex : function(url) { - $.ajax({type: "GET", url: url, data: null, - dataType: "script", cache: true, - complete: function(jqxhr, textstatus) { - if (textstatus != "success") { - document.getElementById("searchindexloader").src = url; - } - }}); - }, + loadIndex: (url) => + (document.body.appendChild(document.createElement("script")).src = url), - setIndex : function(index) { - var q; - this._index = index; - if ((q = this._queued_query) !== null) { - this._queued_query = null; - Search.query(q); + setIndex: (index) => { + Search._index = index; + if (Search._queued_query !== null) { + const query = Search._queued_query; + Search._queued_query = null; + Search.query(query); } }, - hasIndex : function() { - return this._index !== null; - }, + hasIndex: () => Search._index !== null, - deferQuery : function(query) { - this._queued_query = query; - }, + deferQuery: (query) => (Search._queued_query = query), - stopPulse : function() { - this._pulse_status = 0; - }, + stopPulse: () => (Search._pulse_status = -1), - startPulse : function() { - if (this._pulse_status >= 0) - return; - function pulse() { - var i; + startPulse: () => { + if (Search._pulse_status >= 0) return; + + const pulse = () => { Search._pulse_status = (Search._pulse_status + 1) % 4; - var dotString = ''; - for (i = 0; i < Search._pulse_status; i++) - dotString += '.'; - Search.dots.text(dotString); - if (Search._pulse_status > -1) - window.setTimeout(pulse, 500); - } + Search.dots.innerText = ".".repeat(Search._pulse_status); + if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); + }; pulse(); }, /** * perform a search for something (or wait until index is loaded) */ - performSearch : function(query) { + performSearch: (query) => { // create the required interface elements - this.out = $('#search-results'); - this.title = $('

' + _('Searching') + '

').appendTo(this.out); - this.dots = $('').appendTo(this.title); - this.status = $('

 

').appendTo(this.out); - this.output = $('