From 07baddb23c51e7fa98fd795816d3cc199d1e7ce4 Mon Sep 17 00:00:00 2001 From: Pedro Arduino Date: Thu, 13 Jun 2024 17:29:19 -0700 Subject: [PATCH] cleaning repository --- ..gitignore.un~ | Bin 990 -> 0 bytes .gitignore~ | 0 source/.case_1.rst.un~ | Bin 7587 -> 0 bytes source/.case_2.rst.swp | Bin 45056 -> 0 bytes source/.case_2.rst.un~ | Bin 4506 -> 0 bytes source/.case_5.rst.un~ | Bin 3188 -> 0 bytes source/.case_r.rst.un~ | Bin 3376 -> 0 bytes source/case_1.rst~ | 343 ------------------- source/case_2.rst~ | 723 ----------------------------------------- source/case_5.rst~ | 611 ---------------------------------- source/case_r.rst~ | 8 - 11 files changed, 1685 deletions(-) delete mode 100644 ..gitignore.un~ delete mode 100644 .gitignore~ delete mode 100644 source/.case_1.rst.un~ delete mode 100644 source/.case_2.rst.swp delete mode 100644 source/.case_2.rst.un~ delete mode 100644 source/.case_5.rst.un~ delete mode 100644 source/.case_r.rst.un~ delete mode 100644 source/case_1.rst~ delete mode 100644 source/case_2.rst~ delete mode 100644 source/case_5.rst~ delete mode 100644 source/case_r.rst~ diff --git a/..gitignore.un~ b/..gitignore.un~ deleted file mode 100644 index 1079c2ec771d48c8283caa5f9c4905d984cd4c46..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 990 zcmWH`%$*;a=aT=FfysqyrFOgX`StN3CokFe&bvHSMC@wzc~{;_QFkZi3#z6J3=G^r z%&VW5pOPA%o1apelUfWEU#&b lm;s|S6+!@*k2s+D2pn-NKr`8Z7?dkP0gsw9O&g!D0sv0uF}45z diff --git a/.gitignore~ b/.gitignore~ deleted file mode 100644 index e69de29..0000000 diff --git a/source/.case_1.rst.un~ b/source/.case_1.rst.un~ deleted file mode 100644 index 50ee0efb4b096aa5a058ba59ac13aaba24c76fb9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7587 zcmeI1O>5LZ7{}8WUuvthpdhIDR4ZQU;z18Ch){abON*_wtp`oBlg-p@vLxAU7f*fy zp@`r?dREYzCvToTRQwF$O;8ZL>GPkRxKpCduUa{b!PWW=_`DZwHg% zC#(0y_?vep$Lp8pKdw!E9a}s-SFJu2vlBy?+`&_?KlraKtLKhoSx;s?K~7k!M zrJAqb$8jG z>BrGN>)wX$(5rJ~d`LauDk?wO)TK}71|R z$z2eP2^!O4dGOOiZwtZiYYhRP100Ntq_8#|&>fLMUW5Xy zkW||b`Te@f_a7VUgLQ^)HOl$}NcB3_n1JP=F$^H1v|eCJJ&{+=n4V@~8sN|67=N15 zuUzmMFh&D*#OV;)3SRI+p2obMXYm@Glz$QfAw@@z3-m~r3u37wa?6?6i!5Se2C?lT zA?f4;x7dQas%Q(5HcCN~0n!y&=bQE$ZQ_!>>*)_o-%~;bkR{{(hWC;aWL9^|LUH_7C nCdzBNj&PB zbNk-zo?QtEs#2b=`p500dMw&qo-FZsLaFq{`0SFt-f`c< zOU<}d?WXmHeqZc!e0KcU)Q;(+~!aiCG@t|s+Pd*VdW zPU2Q&W#Vw!InrI6*ne>D@UA_37H&Rr@Wjz_)jt<^(nfuv-RQO|$wVb?CkxZ%R=cxt zk(+lmDX>X_i~^4@?UX_O$uyMV3Puy6xgJ|CIvPr z@K1vRo#vxU_tDgcgWcd^9*^e#ez3>?{zd-FFmQ+e{V4ud!Qc7ckK}&^93lhd@fffU z-s68ig6}T|TPR5Pe*w&aJN@s=_^#{s_}_fp{A^NSlLDI**rdQF1vV+LNr6oYY*Jv8 z0-F@rq`)QxHYxBBQ6Od!|If(l30i!!|1TTB_pd0Gz6ZVlJ_w3T916nsFf^J zJxZ*^-L`6>LRJ#Xq|;ET^+tWXTTj)(ax}MsJ4Upo+|MPoMkVd6lalOK;`*|#wbrBC zyOdO0kE%&CsaKUJ?dOLzxf)d(tIb9`HLB1EmA49MT2#^zhT7?(HK!aMh*z7+L#6P9 zRVk)jl}=KP7Af6Iwl=jz<8-+dH&@o7ZlS1hG~bBgN~O`QcOu#%deG_|38U3+yQ2oD z^-81FY^cAg{D2B^M9y?F=fFbSbTv_egF$KjYQspybx(N+3hm3$^H&mjGor(~m5y@YkqR`h-$`|Ql!}E(7PXs6MU51d zTn)?7$$AxcA?jevW#wjfi&b1q#MWfL1aTu+H_*=4Ol7pw)wBjvHLDoSb0fRaRX8`Br6p zBF09BBx-}2oTDBEq@sVP*X}l@)(~A9>mrWhdbqoqz=<@(xYS73Ka||qWhB7(v-FbV zJ4a?m&_?_%gL?IjVLcX~U|O z7oY9MQYWe)W*9p*h9zPc|8bMfCM)r`I3{YZw>!zIRZ~rqBvkE|={x$=k$1zo59PZU z8RFz(?0VG6Zu&URB(q6OFI{{WLJBGXfjtRZs^tpRSXLht5YiNtRtXeCQE zCoF9|g#p?L%V!mA)43B-*|)QVTjG-^Uw_Hw*sY1U<@++1cZuZin#TcDl@-*%E{2Ai2d2gv#` zH=|TUA_+bEk3mDl=^UH)jJ=sLT6D>{V|L9ansuCzuo>BSpK*~?)9$MJ4_2sl=`Zm{ zquXhA8L*6ILavJ?pTw$h8(Zs8}danVv3eQwU-;}Z)>5=tu_QGNBUB{Y_oKJ z5Lk`GDK;XkmVOjvdHHHtZW{2JPQu`3XhLS#!;*JUp`m@Y=Hd9exK{%~~#9J~k zAeGL~_=BWD!t|?#{4O1DH5O~hYIGoJS6XRPlck&aO&kS5C;^Au!aJ7X4*lZjSX}Wm zM?dmPo@(ODKY0`})>%uC6nav$a9te)nZ_YHSUaWk?a`vxV#TH{SR!)DRp1%;D=CY4 zxrN?9^=6|RyUlg+=gnMNY&6bh`cx*ny(BGA<~CXh^UcLeNp$?gjpIA7ncUeMXKQO~ zWjEKfd0@PfmK&|*iE7-5Cl*_cHT;7o=zYBYn2p3L0#T!dEAn%cAH zn%hif&PlC84#XW(NZ=}xFEiknTpCWa`#IIAmwsH(%)S~>R8HjD>_0$7)NJ{8oA|$f zRb)Q4@vZ)LTE7MZD<#)+h$)7tx-751L*KM68FwybF#&6%j{Oev8Ra2Sk7-x|I5FzVW&yzXr%#~X{pB4Kex8uja+uaH4k`+X(otWq6AMG+heXJ1 zx3kh{%|r)VOr{KX){ph%LaDTGgHP$;!IRHJiJA|+bjUAgY^W?a=lPkE*8h*bqEz~2 zp!L6&#`+%V_vUAl0-F@rq`)QxHYu=4flUf*Qecw;n-tijz$OJYDezB)0*BbqsMXja zI(l_As-2GTR$6N)my?G)3w{&94> z$*F-6M%F%3%2ck+8aARDbqsPzwbcl9E=RWh|6}^{TKw*{{+EybJ*@rT1>OoSfOT*d zoB_WIj)Eg#2J8Xb!8Y(T@F?&|@S)2~rN0F4#2?@t;O*dca2q%VZUR?=t>DA>27Cy- z4qt%Rg4cjWAm4!N!9H*$*b43>u;At3T5t`h;4`oYVlV-o1s)3?1MbCt;7j0(;1|K8 zz$3xm5WMgSa5s1__)YLe@CL9KTnA>rG#CTFN3g+<<|%u?4ZHt#(qfBP=ThyivzJqkdb?%isK8~Da2e#l&EZto z=YS2xg%=T&E-Tp zC$?wgMqkc}D$9Xwa#&@tL02jn;>kA2w6i0<1=Y$A?&63J+iBWMFl+&S1!zkZm)nk9uP^KN96*h4i;UmJP{p(VPmD7I|RX?HU_) z(Oi-I*H*7c59~d_A)xi zhGYjSoSOH^*!xDlrZ7@&_xnRJ4FoQj4b1HrrnRCFDA9CiRw;R96tnzJt=RpicG84;Wj(6H}RS&eYLZXp5 zj^|M5t5;rGqFq3oPZw;J^~9hn*!(wVJv=*&1giDLJb5*p<#juz-F~O< z7ng!UrYYLAV#f!27hBg0wu|11*YL1i>8#fLO_U+?*0|oL>Ae#bhR5}LacD_~C(xAB zN^s_^M@{?{R^#*dvf1C5zm|y}$-d}^?wmezYqT#KnI5Y*%guD6$?wrICFD!tVSjD# zVU?wawf}z*wd3Dvita|0B3Syums+4{iWc;920A;Om!>7x)^u7yK=F4Qu#UgLgqs(e$07>x2GeP8`{i z4Hni|VDzl6GhLlyd@>Pt8qKs~?qSObK3kQQI!ZaiyQli#_lX?~TM;&`%$y~&Q(8xt z4rAWNYrppj;>V3IxsS`KiqY%VV)zj;Ds9*yk|b~{vPSVu{y0I<#y!6&>Wn|GT%%Z= znB}UBPBCC$OB80HX0i-mu5=DkXh_~w+}bclJi>S@jb4y$j}Ec2#W$w%+@xNQw?{Xo z^>r3WFI}3CvvN4ZST`7(cF_I zq$_HfBo_{K@`{EE%*pH70;=bQltA(ha((y+>so0k)WMl0R$gmMdS zo?4|pSomIze!OU^GRp_K^O7%f6=uGrZp}oiHxg3Im88~W8R311SgWkYt+T9BO(?i9 z1C*HA!ukjSCoeG+!&;O#j?g8Dn~N=mHMnUj z7$lfZ;W_%j89-ous1&RsHigJ+;FVmT+U6fPGRweX1cprZ`B)QWTUbPDEuyFyxEn2& z*q5HNF!C~-&zZ{@L^eXwMwLm>;%b=w;M`W)KHFP+G9dbM#(K-hBAv7^)42#!=wD|$ zmEI;UZwy)L;ZGvH zdf8NYns5ex)s|eW`uaIXYK#R+^*sMc;T(WNXp1Rrk>(#p^4^JRdTzKPvMuj*Soo2@ zG4%U3WgXoS?bJW_xr6n=p55WsrT*Vh(i4S33qOXIn!gxou%lNJ)hSO;_I_Xd+e21^ z>0UW^MAs6W())e!Zx2}wrh4Vj3ozH7-i3>P^EJqOlWxoZxa9ZH3&#ns@+IBTCse$B z9hrEuk654KFx z>bc93ej7x`N(hnhd*KHXbXoYF`8U#iV zd7-RJ3)yQgG2_l3TA*3KwDvr)_Rk6^J$HcM@}h8O!`O22+ zP4U`?aNC-Q_q?8Y-7uq}wpKN5THLBx#oJvXhC*nJ*Srex4`L-ZIqoV)hlwt1rzn%h z((@L_Xr?aQt2fy0I-$B=x;rnm$rXxS@WpPh)m7@%b>~Iunpf`nM&{47zJt472YP{e z1Qj?VqT4E0#H>XZF2SR+g4i8Ao{a()cM@?~4l#3}1@b$nSfF@G99oY%^x#O|x=zg9SHg>fV5WrZ67q2pzFv66i9@|7Gk~I-Nk03>5Z&TsU&Gk>>X+)0kGMDJcZ-ZrUGq@I91D*jM4&H`MpbLH#%z~Xj=N5bmyTDx_2J!{?FW3kE8oUq4 z58yfAY2bVG!56^C!N-95Xsoj?3|LGrv7*wqyQY-5S4o zJIqHZ`>tlK$_CGn0`f^MB?%XE32g;1B%RaQ``T4(y{odX54JFwychgBafJPrrSj@R z2lF<@fJ$Y4wu=>x$l_gnL~yxS)h?CC23h#rNqyRvsw=0{^wzOG<=s=ZvQ1MBb2NkH zWTd{(BqXxa9xWR6NJIg}Tpn;HA#bv&CG|iBvhKB7T&7+rkP40yM}N)HcrpjkNP^H#_i5R%6S3jZ?!Enb!oiVo@2Xe{?G=G ziM@KQ4GkFJr;H4pB_q>AIp)IIBDVPMbgRjb3jOS;bM(+sXCvC&|1tYO`r{%6{#0W< z>zk#e&O_cgvz1D8dU+ThhAx>$XlN`~hCO5g%@fB-7ymvBTa)q8wwcU_LW7?e7$f|Zq`A21u%ZaU=JW;zx`v*)eXR>7r<3fV%btIQeVrVql z8ac7-V&&PfY!VCKYQ^h?!ncM>sVoV_$y=#tUuJ!I)sPlhCL%{W>DUZ#NphIf>5)UG zyktQaqlx}9vjRk_jY*`$B9Wd7AEQt~s)_#fPM$`Vj#8(KBCL?qDz?vWyM<~_>t#=& zB>i{0a_;s%*2k0NI=rL(RiACvb9Gl2^_Shks2@j{>@S~->#uyvT<=HLXTAIieWq$> z9e>e`q6tV8ZHuNxi;aF@4cF7wO8TLXwMx1dK1r>e^r>XJb^j@>r$a$g`FUFgX?~1U>WO?u$Wo7(>T!RVrLo|{VDRz^;cA#FOV~9ont^({T>a} zq1ctH3x|&xjZKxMfq0KH*M#=L>CB|={6SzO#$k}v3thQ90u(pKjoM=uRy zyS$uqMz&Pvx0p`dtFTJar}wZGut9Q)CWF(r_VJj@f<5uje?ffh*^L-0h1=mj&PjMN z@@xm^1*zGmx7d1zthcZ$sDFp~cWY4?htz|GptE114A^jsR#aQ=PnH9D?**?0EpR3H4r~2)fYV?< z_|L5IUk~%h~&6M@$E-vplqe+BLW zYhWAr4(s_p2j{_la0B=uYxs|WJHd0ojX>-B&$5R940tzq7kC9o!A;;w@Obbz@HN)- z{~G*0xC7h<4uJh&D|i(62E_R&knjJSK?)9lXMs<$w*LgU3%nb=3%m^64vvDoU>o=W zYyJ;|*MkbU8ax490lvbz|I6Uh;EiAoJQaM2b^jN^d%#^_fH$nO87=IQvqH=Unq8*r zU-Ht?!8DzA9e+m>;nIbBT)me}qj1iK`37v`Q@D{cECWW=21$D(e6YHOay7i8zE7y> zU{ZgFLi^J(Y#dc`6P~hl7s=@iC(PZlc*YHtgGNA~O{uSVWU9P#1}^Kgq0Y$Cmi=*E z_CrZa=SlbyIkzo{%rBjZUXr}DZv-ai?F^bjJj9_otp4iNgRK_oGb$5`Lx1V|?JMi6 zU6!-ohzc2`m7d-joyh9qw!I=2*(Qun^>Y9EHBbTvKxSaK^q&xYKW%x5& zFFAcVr~{>Z!Jh7jc(~=#&s94-CcSaGkW0v#o4Yw=DZ6JnBZo~j7jOvV=(tR89H>IB zZ-(9zf%*(~g{sqlq&C>CcDbhk?xfQ~&+>K2{cPGYmG3^sYG<@KQnV5?7pXoQTc5D7 z!bHZR?X6JH`%mHxDLg$u;k2HoFP=#KN}G2WGdW;98 zMk2LvCdQ4d%u9l`fq5a{3^dX6`>ONBz72Xg?HpD#moHkf$TPg)P_hUxMy1Z-kL}BU zNZUQUbWa$o{?DY2=q`O73oFUsx(4x(N9lLME+-J_VHG^-WN!F=b*;vhA zm@JM#D`Q_|CUY1K^}5G=5>mK4R`SJ#Hxo^f$@MfUj;{ui&ev=&!rc55{qO6w z;@89ZPe_IL@Q|wY=$@-E7pHF3oLE(LO8MOStXfm-^7Qr+^mM;r%U8{oX}wNUmXhNS?wZ#xvwdglo2P|F?7nN4JOw@|= z<_kVP!HH&Q4Th0RV@q*Np|3@xJ{`qb8~f^uJF?a$TMqHOooN&E!42SQpuK;s{b#^-unjyDe1x_Bdx7@(e;2$NTnYX!>-`Jh#b77c z3LXu9hxPuOz-vJlBw!Xi6a1Jp{^x=A`2PaD4x9nn>)#G82j60i{~7RZ@EhQjAO$x9 z#R5DQJO+H3_5SNY7rYR>02~L3|NlB`{C9#kgGKNwK<_H}F6;ZxfscXLf&^R(#=uWl zc&C`sjgu^1Q*EkrS@+9=#dg`aYKGEHSJQsu`OG z$!I&Ny$#UpzDn@fN;x96{*_q~*Q5r{>i~N@eZm}Eb(N^2qCEHMIN!E2k}k#V;&c}> zUu0uZRkdEAvaH8zZ2`N{DmiStwMT+&2Md@;oe6iq4UY;t`-s%eo*6#f^>DV+U%?^ z=vbDZ&PYt6o6Ewxj|YgUXENWQ3+_(_F6{n%E{iZCOvNOu!QSZ*l+4I~dILxod{;2R z?qjkq`Dg5(zopsi$gmq>BHovUSYw=+l<8t?cE#K{@w!pKqyqHab_(&Gg$%b6YA(^A zW1d|2@X!itt)?L@K~c!d*V-?bHoEPXfE(Q=b;x^I=3Xpplp0R(0^uT0WV~{bXWZ!6 z@BuTXY;@O~dN#Ukip>!6WnCemm^m|ML=cB9;LVbD78Z=LU})#xxIMC%dvwx>kBgz2 z5AMVJ6;kEpxv_*~*PIWBap!FmT%Rltd$3(e*<1IIWb5wUecPNXBcH=FeC6F8{IzF% zLc(w5^H56{%guFtlkE_np!_qkX6AqUZ9m*Fn|66?bk$Xk6}LuPe~uqt_$=pToKWme z8#^ji3j$&Nodc{VXYi<>-svx#e7>JB%q^LD?QlNiPQf#wMd9e?!r4n9+un)rIH=3< z(+78Fi&z(bGISOogf^Q8Tx6QP(E{t1ZK=lopl=3J?=%i)yDR(R=hKc}8Z#fX)WAy7 ze!qs+N%aw*pL1`>ZC}B&a<}biu`;_2LI-hrQ<~1jC27P}shQ7GhW?IC@1Vl6Y_M zMS{zSze1Pd=0wv7dUQyNqvXi*o43^wje>oUpRp$-cZ|# zIW1#$ZnPYN&`YkNIl2;mkUw^SC83?Y5Ke`O`hDQZic#`l0yoiw7qv)SIB{xW%EytE zM28+v4)dmWu9JEd4I`Adm$`F&^4{>;W}^OfepQ(F$`Q*#e+(Agq2cP16JN{kd~%ZN zcIY2ep6o4ZL!wHI8Hl&5Nww_DhMjWnSkI{jH|yw#TJe+_f69$k;f2~})D8v;Vm6U2 z3||-~J4XpQo-Z&w5>C)ku%>^V4`-pcV@^0Y$hIYXcmaY7`2`sjDhMN)Dbu10+{sVj zm^BMX&Ix#&J$iB0ic+rX;0T>Kip`e*=6SXifh?@B#4e!TZ5mz{`O4_z$vvKLDNso&tUWe2=yJcfo%E9|j)+cY{A; z?fyRSN8s(?55ap_v)={o0IT3Mcp(@AqhJI)1(d*jtlfVC64vHbf%SF;X#E|7V_*;1 z4Yq+0vp>n&djgDuf27Ud1oFqf2fPEk9cVrNDj=G?5^R2c4iw1d8orcH_D)&J$w6Y9 zq|e*Ao9q)u&*n84xH0H(n&a6k6`f{tDFL|a5^;3<56&IlwPz2r&4Rr?&j~?z^r$2d zquWP}S(K5DZi^PWpm3wU&~eTLu?22+Zk~(hv7wBHmVdVxI~NwN&f5+ppAab7K}Yad4t`FE>&MD6qq9{oxqrc=M5BcFWN% z?yM8)oE@FPAXF2wb3ulv_w$^w5sl0rnjiJ_P`;{ic+ZcTKsE%Bh6PYC({p}qIM^cu z+quZ4+{uKZ1m;5jHYMufjwetkZT}1JXyV}8?r^GPwuZLB#$$6lRXsGH4=*I*Pw#+p zZy^=;6vPSQrSJSjo+wBanqub-z)xA=tSYVp%)&__`OTA3j-;a3Ni-0iM58n~!kfMG68)hS zHcQy~a!P84-q_&~`LkFCI`J5HqwZ^q5KSp@_gS#DIEKF#qnEF?2! z)9%_7q7!24Y>c{9C|?=eN!1Rwk^fkWH{ogUaH35p&ctdclT8JcR^VWh&}~-HQgiK4 zM+JD4Blm8yXhd%(8|?{{{?|EX(p_yyu7-LYfEr3VwDk1oWLtDo6a2c0dZ1(~!1v%-upmy{}V)LVs9y znjp%uQsv!5BVpAi2J=&c9?VZyHj^}v*lq|oT_Y^}B!2QrU-5fs;|@g@^`sF85e`22 zfThu;dtI_^q0?B9YzxXDon{(CcyFu;etSQzjm!2*+RSK}y};9xF!!!Y^#F{JXvJw3 zilFOZPvsCO@`AZIt&8EPU?C+>A_&x?9rf)M+MBJuqZMAWiicjWI4^ zsUKbd8VYc^O8Ahmh(u0CDNhJxkB*}GypfsKWOmkM+m%8H!{t`S1yb0&>qCHRg~y_E ztczU;vs!0*q*N$b0`jx4+LhHg)O9xKQB;|!!xX=VuBT1NeM5frilDw$xW;Himu7=w(r1$H8ji(=IZ<+PK-dle! zzW(n6uLY;U3Gi(2Oz<#p@8$TugAap00C$2l@FGwK_hC2qJoszyr{FE%ji3Qu0FHwR zpz{Ks2!4p|;447q1$+eD4c-gh4*n3l0o(~*4rFU+f(BRwF}Mx<1Y5(8K{!i5F$S*y ziZSSbaGv0^`2JMzzp+7l8^|v4HXvVv4mbslgC~P0fgiEv`yr73!9C!wz&f}VYy(@s zSFlHX6s&^^xEfpmet>=Av*1mj25tgl;6CgXI$Pia;J1LzAY2A>U>ZCTdo_PNU6_yF&WH4=cbG zD}Mo%%a&5v909rm1PXVuHs!bBl!rcD*Ay4c>K_SP(S94b zSQ8aSn(@^!4xcOfL~OOag<;Lro}%;X@ojeU`$+Dfo|09^|0(>?wUyJpZsXQ$h>k!# zGot1ftEY$YuS_jK!OS3%X&V;g^0qvu$`d2BYE2Z?AGzC~8LNcvXN%_Q&)^`$KN?)8=_LHga}sb#!cOvRsU*T;++85uLX>5l&wn z<+16}v9qO6tp4}vn7yUC%JGv;_CzZU3_zNQnFZ{n-8w^4$J6^`GqQX{mNP}RbC2Zc zu~n;{rg96TBO9LHfX)16_G_0k%Uq|(7RckX&K}Uou@`+nQsbfzXidNaxGMB$*z;L1 z6*DXCEDz$$$B}*)tk8t`2;W>==oAtYy9{3hCs$90tb` zsWR~IP|>vJa^XB=Kf)dR!F*%YHF#z%pRM<&Bd5YHtEzI#yG>45%t>~C zE1k+AfZ^T3`8;f3XAgS+`C4O{L%1u+2rou%GTn1LDYWir!D>mf{$leuc@9qZU% zhA>&kNc7JUKa|Af@g2GA^q=z~F+z!)$d10e&2pk@>UJ(_+1FGh<)TWqD)QGKMedq{ z*2alUwvHZPX4dHNI@+>lcegtq@rlV7QRZU}2vk*sLB7|p;z4%|d6*C+qEQ+3`UtYi zbA;)IZgI;}1`}4+8QGV-vnGo+_d<25R0w(BjtPgQQGJeyBKM3`JdR*u?L(FAW7=)7 zI`Pt`w8j}yD97ei6ee4D4u7GZ1--uHGb7^D`Q6HcM44nqw?Gi3@KYBO#Hj$@b0hDC zVR>!M8-yOfcbC}x4R!MWZ_-=S_oKr5yT_TBQ~XYNrwnKH&DIthtqalNmBtdRmAYsA zlRg9e8|Ff>ztLRB<93C!_Xd12j&TMu*>NWF;hsZ=&-OU4p+N>$*a*`y#CaI_y>|DA z`}@;+q1X}O54jV=iNG3j-m%B;#{YCK3^|7r1|Kuds=c$UGoB+1YdB*|5$ORkX4$jZ zhNq?*g*lh>fghXhj=!?>*q|0HxNu=XasOKu7IbrAfoN&FAMw#U7V<|Kb zW(l${Atk5(3?y=EWF%cMgl4I-FCn=mr-Afgsq6BA`dPB=CgrOY5=azakkW(|Nob^MiAe z%I;-f<_}q0v2p1rO7piH@utt&2wa1+? ra3!(xr1f0(R4}>jixk#M&f$V}j`YT>vS%}YLuWP8dmr3;Z%Y3UuUE`) diff --git a/source/.case_2.rst.un~ b/source/.case_2.rst.un~ deleted file mode 100644 index 01f9cdc1e74b9eef3afc73a476bd545a60787b36..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4506 zcmeH~J4*vW6oqFe@d=7piB%d)(IznoXk}w3f(As;paw#G6jAVjh0j6*{sarL5ya9) z1wqtO5VWxs+cZ`ddhV=Aj*u)CPB#O0_wEkNUe3pzJu{Y_KN?*aTzpeH)ia>a#$)kf z=U)F|G5k3+GJI9=x>=x{Z{vk^z6XkA`POGDefKVUc*9o=TPpSh#V@Vt~d#q?> uV7p~(ftG}@ai__0Kv+Jry8{3TY$pY_XM;`qPVx@&KOm|ew|z_Ue)jNb!(Ur-MVDvCje`khPE*ZDQb%lVk&I>fl3SnSdiFQyCN~O zu(5yy%m^_tly_IBSV*vW^GVLxjuTt&)4OC-?W7yYTK7fB^2)-|#oqqS`{Crd^m;n_ zI6irL9DgmZ-)%3By0;HP$cFfdJi0lwc|AorYoZ;U7YlS(iu%1?Aa$mMTl}&L4JHVF z6%AZiP@o;9XN*IL&oZR18+#C?il@VGt4Yu-pZ`%U|?l2m_&U@mpeg z)vDK2%~*9iywd~J^7K0%dOX@prtb)x8 wxaCc5_|(nhXKsJhNz-d|Krobecp9!wW=hz~;8tL6a#|-FhWx}$)P`@@-bgmii>tqq%BDkiLFEtI@yAtll}!7TpeBf zFGLU*aTWgnbyeT@n!7$*)Y9UX2QPPdcb6RR^S+nl-JM{)vRr)@@u$pbV`Y9WHF5n~ zx}QE0i8)&l3~jZs6T~! z+qu}3ID~kgq#A)LQ!IOGUkgKVo{in@>%$x>ya$Ck6>ea^8gImVl^R7WY^ z7boxLC~mHV_>^I#G%a-%_&&n7B+uorC{P4^Cnykg;78(Cx$L#ondDieJ}ZjguwC5h zWx(`jQG^(|uCX3Sb(uukMoRjyZZIB8jEPQdOSMULY9QE32o7DPRD5VLThEKYAw7NA zPvG+O%vn6wcFsTA^@oqEv=EU5sAALtSA>Wql1xyH-Y7oY~6WdHyG diff --git a/source/case_1.rst~ b/source/case_1.rst~ deleted file mode 100644 index a0488fb..0000000 --- a/source/case_1.rst~ +++ /dev/null @@ -1,343 +0,0 @@ -.. _case_1: - -QuoFEM - Settlements -==================== - -Author: Kendra Mutch ---------------------- - -Introduction ------------- - -The goal of this project is to quantify settlement, parameters impacting settlement, and observe how uncertainty in input parameters impacts the ultimate settlement of a cohesive soil. These calculations are performed through use of the SimCenter QuoFEM tool. For more details on settlement calculations, the user is encounged to read :cite:`Holtz2011`. - -Project Description -------------------- - -Soil settlement is characterized by a change in the effective stress of soil, often driven by either a change in the ground water table, placement of fill/surchage load, or dissipation of excess pore water pressure. While a minimal amount of settlement is expected and may not prove hazardous, larger magnitudes of settlment, or differential settlement, can be detrimental to the integretity and functionality of a super-structure. Settlement of cohesive soil is especially hazardous, as the small pore space in fine grained soil restricts water from draining quickly through the voids. As a result, cohesive soil may continue to settle for a long period of time following the placement of a structure. Granular soil exhibits a significantly lower settlement hazard, as water tends to drain rapidly through the large pore space in the soil, meaning, much of the settlement of coarse grained soil is complete before construction ends. This project focuses on the hazard pertaining to the settlement of cohesive soil. - -When computing settlement, it is important to consider uncertainty and not accept a single predicted value as completely true to reality, as in-situ testing, lab testing, and various models used to determine soil paramters all contain uncertainty. Additionally, soil may differ vastly throughout a project site, with only a few samples taken to represent the whole site. This project uses the program QuoFEM to integrate standard settlement equations with uncertainty quantifiction tools. - -The example problems in this project will utilize the scenario, soil profile, and paramters depicted below (modified from S. Kramer CESG-562 class notes): - -**Scenario:** -*A site adjacent to San Francisco Bay is underlain by San Francisco Bay Mud. The site is to be readied for development by placement of 5ft of fill material, and the ultimamte settlement of the fill is of interest. The site conditions, shown below, indicate the presence of a crust of desiccated Bay Mud with thickness, h1, which is not expected to consolidate noticeably. The clay is underlain by a dense gravel, which will also not consolidate.* - -.. figure:: ./images/case1_settlementProblem.png - :scale: 45 % - :align: center - - Fig. 1. Problem statement. - - - - -.. list-table:: Soil Profile Parameters - :widths: 25 25 50 - :header-rows: 1 - - * - Parameter - - Mean Value - - Coefficient of Variation (%) - * - h1 - - 3 ft - - 5 - * - h2 - - 25 ft - - 5 - * - Cc - - 0.75 - - 20 - * - eo - - 1.54 - - 7 - * - Cr - - 0.05 - - 20 - * - change in pre-consol pres. - - 200 psf - - 50 - * - k - - 10E-6 (cm/sec) - - 200 - * - unit weight of fill - - 130 pcf - - 7 - * - height of fill - - 5 ft - - 2 - - -Solution Strategy ------------------ -The magnitude of settlement can be predicted using conventional consolidation theory, as outlined in the equations below: - -#. If soil is normally consolidated, σp' = σo': - - .. math:: - H_{ult} = \frac{C_c}{1+e_o}log(\frac{σ_f'}{σ_o'})H_o - - -#. If soil is over consolidated, σp' > σo' and σo' + Δσ' < or = σp': - - .. math:: - H_{ult} = \frac{C_r}{1+e_o}log(\frac{σ_f'}{σ_o'})H_o - - -#. If soil is over consolidated, σp' > σo' and σo' + Δσ' > σp': - - .. math:: - H_{ult} = \frac{C_r}{1+e_o}log(\frac{σ_p'}{σ_o'})H_o + \frac{C_c}{1+e_o}log(\frac{σ_f'}{σ_p'})H_o - -Where: - - - :math:`H_{ult}` = Ultimate Settlement - - :math:`C_c` = Commpression Index - - :math:`e_o` = Void Ratio - - :math:`C_r` = Recompression Index - - :math:`σ_f'` = Final Vertical Effective Stress - - :math:`σ_o'` = Initial Vertical Effective Stress - - :math:`σ_p'` = Preconsolidation Pressure - - :math:`Δσ'` = Change in Vertical Effective Stress - - :math:`H_o` = Thickness of Compressible Layer - -For an accurate evaluation of ultimate settlement, it is recommended to subdivide the compressible layer into sublayers. These equations should be applied to each sublayer using corresponding estimations of initial and final effective stress, as well as material properties, particularly preconsolidation pressure. - -Though these equations provide a starting point for predicting settlement, they don't capture uncertainty. To account for uncertainty, methods such as Forward Propagation, Sensitivity Analysis, and Parameter Calibration integrate standard equations with uncertainty quantification. - -Forward Propagation allows us to determine how uncertainty in soil parameters translates to uncertainty in ultimate settlement. This analysis method enables us to understand the effect of compounding uncertainty. - -Sensitivity Analysis allows us to determine which input parameters impact the resulting ultimate settlement most. Sensitivity Analysis may be performed in both Python and QuoFEM. A Python script performing Sensitivity Analysis may be found here. This script produces a **tornado diagram** (as depicted below), a visual representation of the change in magnitude of settlement resulting from the application of uncertainty to a single variable at a time. These results indicate that, for the given example and material properties, the compression index (Cc), unit weight of the fill (gamma_fill), and preconsolidation pressure are the most relevant parameters. - -Finally, Parameter Calibration, allows one to determine an unknown soil paramter, given a value (or set of values) of ultimate settlement. Two examples of parameter calibartion are discussed in the **Example Applications** section. One example utilizes Bayesian Calibration, while another example utilizes Deterministic Calibration. - -.. figure:: ./images/case1_TornadoDiagram.png - :scale: 50 % - :align: center - - Fig. 2. Tornado diagram. - - -SimCenter Tool Used -------------------- -In this project we use the SimCenter tool QuoFEM. QouFEM allows the integration of the finite element method and hazard compuatations with uncertainty quantification tools. Although the tool was originally developed for finite element applications, it can also be utilized with other solution methods. In this project, the settlement calculations are implemented in a simple Python script that propagates settlement evaluations through sublayers to determine the ultimate surface settlement. This python script can be easily uploaded in QuoFEM instead of specifying a FEM application. - -There are five different tabs in QuoFEM; four input tabs and one results tab. The four input tabs are outlined below: - - * **UQ tab** - The UQ tab allows one to select the analysis method (Forward Propagation, Bayesian - Callibration, Sensitivity Analysis, etc.). Additionally, one can specify a statistics model and the number - of samples to run. - - * **FEM tab** - The FEM is where a python script is input, and a finite element method (such as Openseas) may - be selected. - - * **RV tab** - The RV tab allows you define random variables and apply desired uncertainty and statistic distributions - (normal distribution, uniform distribution etc.) to each variable. - - * **EDP tab** - The EDP tab allows one to define quantities of interest to compute (i.e., ultimate settlement). - - .. figure:: ./images/case1_InputResultsTabs.png - :align: center - - Fig. 3. QuoFEM interface. - - - -After entering parameters in the input tabs, one may choose run the project on their machine by simply clicking **Run** or to run the project in the cloud by selecting **Run at Design Safe**. When choosing to run a project in the cloud, one must login to Design Safe and specify a maximum run time. To ensure that the project does not expire while waiting in the queue, select a run time of at least 10 hours. - -The results tab contains both a **Summary** page and a **Data Values** page. The **Summary** page contains a brief -outline of the values computed. The **Data Values** page contains a more comprehensive set of results and figures. There are various features within the **Data Values** page of the **Results** tab which may aid in analysis. Below is information about navigating the **Data Values** page to extract desired information: - - * **To View a Scatterplot of a Parameter vs. Run Number** - left click once on any column. - - * **To View a Cumulative Frequency Distribution for a Variable** - First left click once on the column for the - variable that you want to view a cumulative frequency distribution for. Then right click once on the same - column. - - * **To View a Histogram for a Variable** - After following the steps to display a cumulative frequency - distribution, left click on the same column once more to display the histogram. - - * **To View a Scatterplot of One Variable vs. Another Variable** - Right click once on one of the variables. - This defines which variable will be on the x-axis. Then, left click once on the variable which you want - plotted on the y-axis. - - * **To Export the Data Table** - Select the Save Table icon above the data, and choose a location for saving - the table as a .csv file. - - -Example Applications --------------------- - -The following sections utilize the settlement scenario to demonstrate the various capabilities of QuoFEM in incorporating uncertainty quantification into the analysis. These capabilities include the propagation of uncertainty, deterministic and Bayesian calibration, as well as sensitivity analysis. - -Example One - Forward Propagation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -#. Open QuoFEM. By default, the **UQ method** is **Forward Propagation** and the **UQ Engine** is **Dakota**. In this example, we will use these defaults. Specify a **Sample Number** of 200 and a **Seed Number** of 949. Ensure the **Parellel Execution** and the **Save Working dirs** boxes are checked. - -#. Select the **FEM** tab. From the drop down menu, select **Python**. Navigate to the location of the **Input Script** and the **Parameters Script**. Both Python scripts are available at the below links: - - * *settlement.py* - * *params.py* - -#. Select the **RV** tab. Enter the random variables (listed in the table in the problem description). Select **Normal Distribution** for each random variable, and enter the mean and standard deviation. The standard deviation must be calculated for each variable from the given coefficient of variation. The below table shows values which should be input for each random variable. - - .. list-table:: Random Variables - :widths: 25 25 50 50 - :header-rows: 1 - - * - Variable Name - - Distribution - - Mean Value - - Standard Deviation - * - h1 - - Normal Distribution - - 3 - - 0.15 - * - h2 - - Normal Distribution - - 25 - - 1.25 - * - Cc - - Normal Distribution - - 0.75 - - 0.15 - * - Cr - - Normal Distribution - - 0.05 - - 0.01 - * - eo - - Normal Distribution - - 1.54 - - 0.1078 - * - Δσ' - - Normal Distribution - - 200 - - 100 - * - k - - Normal Distribution - - 0.000001 - - 0.000002 - * - unit weight of fill - - Normal Distribution - - 130 - - 9.1 - * - height of fill - - Normal Distribution - - 5 - - 0.1 -#. In the **EDP** tab, specify the variable of interest as **Settlement** and assign it a **Length** of **1**. - -#. Run the example either on your machine or in the cloud. For running in the cloud, see the **SimCenter Tool Used** section for additional details. - -The results for Forward Propagation are outlined below: - -.. figure:: ./images/case1_ForwardPropagationResults.png - :align: center - - Fig. 4. Forward propagation results. - - -The results indicate that, given the mean parameters and standard deviation, a total settlement of 1.31 inches is expected with a standard deviation of 0.88 inches (CoV = 0.66). The corresponding histogram, based on Latin Hypercube Sampling (LHS), along with the associated normal distribution curve, is shown in the figure below: - -.. figure:: ./images/case1_propagation_Normalized_Settl_histogram.png - :scale: 40% - :align: center - - Fig. 5. QuoFEM propagation histogram. - - -Example Two - Sensitivity Analysis -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. In the UQ tab, select **Sensitivity Analysis** as the **UQ Method**. From the **UQ Engine** drop down, select **SimCenterUQ**. In the Method drop down, select **Monte Carlo**. For the **Number of samples**, enter 500, and for the **Seed Number**, enter 106. - -#. Select the **FEM** tab. From the **FEM** drop down, select **Python**. Locate the file path for the **Input Script** and the **Paramters Script**. Both Python scripts are available at the below links. - - * *Input Script.py* - * *Parameters Script.py* - -#. In the **RV** tab, enter the same random variables as the Forward Propagation example. - -#. In the **EDP** tab, use the same inputs as the Forward Propagation example. - -#. Choose to run the example either on your machine in the cloud. For running in the cloud, see the **SimCenter Tool Used** section for additional details. - -The results for the Sensitivity Analysis in QuoFEM are outlined below. Uncertainty in preconsolidation pressure and compression index translate to the greatest uncertainty in the predicted settlement. These findings are consistent with the results shown in the tornado diagram. - -.. figure:: ./images/case1_Sensitivity2.png - :scale: 60 % - :align: center - - Fig. 6. QuoFEM sensitivity results. - -.. figure:: ./images/case1_Sensitivity.png - :scale: 100 % - :align: center - - Fig. 7. QuoFEM sensitivity results - most relevant parameters. - - -Example Three - Parameter Calibration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Two parameter calibration strategies available in QuoFEM are explored: i) Deterministic calibration and ii) Bayesian calibration. In both cases, parameters are identified to match assumed field settlement data at several locations, with an average total settlement of 0.88 inches. - - -Deterministic Calibration -^^^^^^^^^^^^^^^^^^^^^^^^^ - -Two deterministic calibration methods are used: i)NL2SOL and ii)OPT++GaussNewton). - -When testing the two different deterministic calibration algorithms supported in QuoFEM, we found that they provided vastly different results. This indicates that there are multiple combinations of the compression index (Cc) and preconsolidation pressure that can be considered optimal. To further explore this issue, the figure below shows a settlement field for varying values of Cc and preconsolidation pressure. It is evident that the settlement field is nonlinear due to the logarithmic nature of the solution equation. Additionally, when examining points with constant settlement (e.g., 0.88 or 1.316 inches), the red lines indicate that multiple combinations of compression index (Cc) and preconsolidation pressure yield the same settlement with the black dots representing two solutions obtained using the two deterministic calibration methoods in QuoFEM. Clearly, for this scenario, deterministic calibration cannot identify a single optimal value, making Bayesian calibration necessary. - -.. figure:: ./images/case1_SettlementField.png - :scale: 80% - :align: center - - Fig. 8. Settlement field as a function of Cc and Precon pressure. - -Bayesian Calibration -^^^^^^^^^^^^^^^^^^^^ - -This is a classic scenario where Bayesian methods can be preferred instead of deterministic methods - Bayesian methods show that there is not just one best parameter value but several values are almost equally good. This issue frequently arises when we have many parameters to be calibrated with not much data. A single best parameter value is usually unidentifiable in such cases - - - -#. Open QuoFEM. In the **UQ** tab, change the **UQ method** to **Bayesain Callibration** and change the **UQ Engine** to **UCSD-UQ**. For the model, select **Non-hierarchical**. Enter a **Sample** number of 500 and **Seed** number of 85. For the **Calibration Data File**, navigate to **data_2.txt**. This text file may be downloaded at the below link: - - * *data_2.txt* - -#. In the **FEM** tab, navigate to the location of the **Input Script** and **Parameter Script**. The Bayesian Calibration Python scripts may be downloaded at the below links: - - * *Settlement_2.py* - * *params.py* - -#. In the **RV** tab, enter the same random variables as the Forward Propagation example. - -#. In the EDP tab, add two variables of interset. The first variable is **settlement** with a **Length** of **1**, and the second variable is a **dummy** variable with a **Length** of 1. - -#. Choose to run the example either on your machine in the cloud. For running in the cloud, see the **SimCenter Tool Used** section for additional details. - -The results for Bayesian Calibration are outlined below: - -.. figure:: ./images/case1_BayesianResults1.png - -.. figure:: ./images/case1_BayesianResults2.png - :align: center - - Fig. 9. QuoFEM Bayesian calibration results. - -The figure shows Cc and Precon pressure are the most relevant parameters. - -A more in-depth analysis using prior and posterior distributions reveals that the posterior distributions from the Bayesian calibration process result in more accurate and less uncertain settlement estimations. The figure below illustrates these distributions. - -.. figure:: ./images/case1_calibration_PriorPost.png - :scale: 70% - :align: center - - Fig. 10. Prior and posterior distributions from Bayesian calibration. - - -Remarks -------- -By accounting for uncertainty in settlement, chances of highly underpredicting or overpredicting settlement are reduced. diff --git a/source/case_2.rst~ b/source/case_2.rst~ deleted file mode 100644 index cf1bac5..0000000 --- a/source/case_2.rst~ +++ /dev/null @@ -1,723 +0,0 @@ -.. _case_2: - -EEUQ - Transfer Function -======================== - -Author: Erick Martinez ----------------------- - -Introduction ------------- - -This page introduces the fundamental concept of transfer functions and their application in site response analysis. -The project also examines the impact of uncertainty in input variables during this process, utilizing the SimCenter tool EE-UQ. -Special attention is given to soil amplification factors. For more detailed information on transfer functions, users are encouraged to read :cite:`Kramer1996`. - - -.. This page describes the basic concept of transfer functions and their use in a site response analysis. Along with this, the uncertainty in this process will be investigated using EE-UQ, a SimCenter tool. For more details, the user is encouraged to read :cite:`Kramer1996`. - -A Jupyter Notebook for this example can be found within `DesignSafe PRJ-4604 `_. - - -.. note:: - This example was prepared on a Mac system. Differences in the UI between Mac and other systems are possible, but should not affect the outcome. - - -Problem Description -------------------- - -A transfer function acts as a filter that can amplify or de-amplify an incoming wave from a medium to produce the output signal in another medium. To simplify the idea of a transfer function, a spring-mass system can be used. As a motion is applied on the mass connected to a spring; a responsive outgoing wave will then be propagated through the mass and the spring. This outgoing motion will be a composite factor of the stiffness and elastic damping forces found within the spring-mass system. - -.. seealso:: - For more information, visit the `Free Vibrations of a Spring-Mass-Damper System `_. - - - -This can be applied to earth systems in the form of ground motions. An example of this is an earthquake motion acting on a rock layer at a certain depth. This motion is then transferred through the soil profile and is reflected as a different motion at the surface. In order to determine the influence of a soil profile on the motion, three major components are required: thickness of layer (H), shear wave velocity (Vs), and damping ratio. - -It is important to ensure that this difference in motion is accounted for. When a structure is constructed, it has a specific resonance. Understanding this resonance is important because if an earthquake motion has similar peaks in frequency, causing amplification of the motion, the structure could have a significant and potentially disastrous behavior. While the incoming motion at the rock might have a different natural resonance, the motion at the surface might match that of the structure. The design of earthquake resistant structures relies on the accurate determination of transfer functions in sites. An example of this is shown below - where a motion was amplified from 0.03g to 0.15g due to the presence of soft clay. - - -.. figure:: ./images/case2_Resonance_Building_Example_TF.png - :scale: 40% - :align: center - - Fig. 1. Mexico City Earthquake Amplification. - - -As with any engineering properties, there will always be the presence of uncertainty. A layer might have differential thicknesses in certain regions, causing the height to be non-uniform. Shear wave velocity can change very quickly depending on depth and composition of the materials within the layer. Damping can also be affected by changes in stratigraphy and composition. To account for this, uncertainty must be incorporated into a transfer function analysis. This inclusion will aid in the accuracy and reliability of site response analyses. - - -Solution Strategy ------------------ - -Fourier Transform -^^^^^^^^^^^^^^^^^ - -In earth systems, this relationship between incoming and outgoing wave can be evaluated through mathematically converting an input motion, typically an acceleration-time history, to a Fourier series using the Fast Fourier Transform (FFT). In the Fourier space, the motion is then multiplied by the transfer function, resulting in the outgoing Fourier motion. This can then be converted back into various plots, such as acceleration-time history and spectral acceleration vs. period, that allow for analysis of the outgoing motion. An analysis of this ground motion can provide frequencies of interest where ground accelerations would be highest/lowest, which can aid in site response analysis and planning. - -.. figure:: ./images/case2_TF_Rock_to_Soil1.png - :scale: 40% - :align: center - - Fig. 2. Transfer Function from Rock to Soil. - - - - -Transfer Function Equation -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To calculate a transfer function, the equation below can be used. In a single layer soil profile, it is assumed that the impedance contrast between layers is zero. Using a tool like EE-UQ can help provide the ratio between the input and output motion and provides the uncertainty in those motions and variables. - - -.. figure:: ./images/case2_TF_Equation.png - :scale: 40% - :align: center - - Eqn. 1. Transfer Function Equation [Kramer, 1996] - - -.. note:: - This equation changes based on the soil profile. Having multiple soil layers can lead to an impedance contrast. The equation also changes if the damping ratio is assumed to be zero. Kramer (1996) provides more information for the different instances. - -Example -^^^^^^^ - -A typical transfer function would look similar to the one provided below. In the figure below, there are various peaks of natural resonance for the transfer function, which is where the motion will have the greatest amplification/de-amplification. - -.. figure:: ./images/case2_TF_Nat_Freqs.png - :scale: 60% - :align: center - - Fig. 3. Transfer Function from Rock to Soil. - - -SimCenter Tool Used -------------------- - - -To understand transfer functions, there are many tools available. One of these tools is the SimCenter Transfer Function Tool (TFT). This tool introduces users to transfer functions by providing the output motion at a site given the motion, thickness of layers, shear wave velocities, and damping ratio. TFT allows for easy analysis of amplification/de-amplification of ground motions based on specific sites. - -The Earthquake Engineering with Uncertainty Quantification Application (EE-UQ) is a SimCenter research application that also allows for site response predictions due to earthquake loading. In addition to basic transfer function quantification, it allows for the analysis of uncertainty in the predictions based on the uncertainty found within the input model, motion, etc. This workflow application allows the user to run analyses in the background and provides a simple user interface that facilitates its use. - - - -Example Application -------------------- - -Soil Profile -^^^^^^^^^^^^ - -In this example, we will analyze the amplification/deamplification effects of a ground motion caused by its propagation through the soil layer. The 10 meter soil layer has a shear wave velocity (Vs) of 500 m/s and a damping ratio of 3%. - - -.. figure:: ./images/case2_CESG599_TF_image1.png - :scale: 50 % - :align: center - - Fig. 4. Soil Profile & Material Properties. - -Uncertainty -^^^^^^^^^^^^ - -Because of the presence of uncertainty in the soil properties, the transfer function will include uncertainty in its effects. Normal distribution values for each variable (H, Vs, damping) will be provided. This uncertainty will be quantified through multiple runs in EE-UQ and expressed as ratios of mean velocity and acceleration, along with standard deviation and skewness. - -The following normal distribution figures represent the uncertainty within each variable. - - - -.. figure:: ./images/case2_Combined_RV_1.png - :scale: 50 % - :align: center - - Fig. 5. Uncertainty in Each Variable (H, Vs, Damping). - -Motion -^^^^^^ - -An earthquake motion will be applied to a rock, located at the bottom of a one-dimensional soil profile. The motion is shown below as an acceleration time history as well as a Fourier amplitude spectra (FAS). - - -.. figure:: ./images/case2_Input_Motion_TF.png - :scale: 40 % - :align: center - - Fig. 6. Input Ground Motion. - -Pre-Workflow Python Script -^^^^^^^^^^^^^^^^^^^^^^^^^^ -To complete a transfer function analysis in EE-UQ various Python files had to be generated. The following script calculates the transfer function of a soil layer and applies it to a given acceleration record. - -.. raw:: html - -
- Click to expand the full Transfer Function Example code -

-
-.. code-block:: python
-
-    # ############################################################################################################
-    # Title: Transfer Function Calculation
-    # Description: This script calculates the transfer function of a soil layer and applies it to a given acceleration record.
-    # Author: Pedro Arduino
-    # UW Computational Geotechnical Group
-    # Date: 2024
-    # All Rights Reserved
-    # ############################################################################################################
-
-    # %%
-    import numpy as np
-    import json
-    import matplotlib.pyplot as plt
-    from numpy.fft import fft, ifft
-    from scipy import integrate
-    from respSpectra import resp_spectra
-
-    class TFunctionClass:
-        def __init__(self, damping, H, Vs):
-            # Define the variables
-            self.m_freq = None
-            self.m_time = None
-            self.m_acc = None
-            self.m_absFft = None
-            self.m_absSoilTF = None
-            self.m_absIFft = None
-            self.m_accT = None
-
-            self.m_vel = None
-            self.m_disp = None
-            self.m_velT = None
-            self.m_dispT = None
-
-            # Define soil layer parameters
-            self.m_damping = damping / 100.0 # damping from percentage to number
-            self.m_H = H
-            self.m_Vs = Vs
-
-        
-        def calculateResponse(self):
-            SoilTF = np.empty_like(self.m_freq, dtype=np.complex_)
-            absSoilTF = np.empty_like(self.m_freq, dtype=np.float_)
-            
-            # Compute the Fourier amplitude spectrum
-            fas = fft(self.m_acc)
-            # fas = fas[:self.nyquist_index]
-            absfas = np.abs(fas)
-            self.m_absFft = absfas
-            
-            # Compute transfer function of soil layer
-            SoilTF = self.calcSoilTf()
-            self.m_absSoilTF = np.abs(SoilTF)
-            
-            # Compute surface soil response
-            ifas = fas * SoilTF
-            absfas2 = np.abs(ifas)
-            self.m_absIFft = absfas2
-            accT = ifft(ifas)
-            self.m_accT = accT.real  # Take only the real part
-
-
-        def calcSoilTf(self):
-
-            tf = []
-
-            if self.m_freq is None:
-                print("Frequency vector is not defined")    
-            else:
-
-                for f in self.m_freq:
-                    """
-                    * The uniform damped soil on rigid rock transfer function
-                    *                             1
-                    *  H = -------------------------------------------------
-                    *       cos ( 2* PI * freq * H / (Vs(1+ i*damping))
-                    """
-                    kstar = 2.0 * np.pi * f / self.m_Vs - self.m_damping * 2.0 * np.pi * f / self.m_Vs * 1j
-                    Vsstar = self.m_Vs + self.m_damping * self.m_Vs * 1j
-                    tf.append(1.0 / np.cos(2.0 * np.pi * f * self.m_H / Vsstar))
-
-            return tf
-
-        def calculate_nat_freq(self):
-            n_pt = len(self.m_freq)
-            N_freq = []
-            N_freqVal = []
-            dfreq = self.m_freq[-1] / n_pt
-
-            TF_tan = 1.0
-            for i in range(1, len(self.m_freq)):
-                TF_tan1 = (self.m_absSoilTF[i] - self.m_absSoilTF[i - 1]) / dfreq
-                if TF_tan1 * TF_tan <= 0 and TF_tan > 0:
-                    N_freq.append(self.m_freq[i])
-                    N_freqVal.append(self.m_absSoilTF[i])
-                TF_tan = TF_tan1
-        
-            return N_freq, N_freqVal
-
-        def calculate_ratio(self):
-
-            grav = 9.81 # m/s2
-            dT = self.m_time[1] - self.m_time[0]
-            accAux = [self.m_acc[ii]*grav for ii in range(len(self.m_acc))]
-            self.m_vel = integrate.cumtrapz(accAux, dx=dT)
-            # self.mvel = np.insert(self.m_vel, 0, 0.0)
-            self.m_disp = integrate.cumtrapz(self.m_vel, dx=dT)
-            # mdisp = np.insert(self.m_disp, 0, 0.0)
-
-            self.m_velT = integrate.cumtrapz((self.m_accT*grav), dx=dT)
-            # self.mvel = np.insert(self.m_vel, 0, 0.0)
-            self.m_dispT = integrate.cumtrapz(self.m_velT, dx=dT)
-            # mdisp = np.insert(self.m_disp, 0, 0.0)
-
-            ratioA = abs(max(self.m_accT))/abs(max(self.m_acc))
-            ratioV = abs(max(self.m_velT))/abs(max(self.m_vel))
-
-            return ratioA, ratioV
-
-        def sin_record(self, f):
-            n_points = 2000
-            self.m_dt = 0.02
-            self.m_acc = [0] * n_points
-            accel = []
-
-            for s in range(n_points):
-                accel.append(0.4 * np.sin(2 * f * np.pi * s * self.m_dt))
-
-            self.m_acc = accel
-            self.set_time()
-            self.set_freq()
-
-        def sweep_record(self):
-            n_points = 8000
-            self.m_dt = 0.002
-            self.m_acc = [0] * n_points
-            self.m_time = [0] * n_points
-
-            for i in range(len(self.m_time)):
-                time = i * self.m_dt
-                self.m_time[i] = time
-                self.m_acc[i] = np.sin(25.0 * time + 150.0 * (time * time / 2.0) / 16.0)
-
-            self.set_freq()
-
-
-        def load_file(self, file_name):
-            
-            self.m_filename = file_name
-            
-            try:
-                with open(file_name, 'r') as file:
-                    # Read file contents into a JSON object
-                    jsonObj = json.load(file)
-            except FileNotFoundError as e:
-                print(f"Cannot read file {file_name}: {e}")
-                return
-
-            events = jsonObj.get("Events", [])
-
-            if events:
-                patterns = events[0].get("pattern", [])
-                timeseries = events[0].get("timeSeries", [])
-                pattern_type = patterns[0].get("type", "")
-                tsname = patterns[0].get("timeSeries", "")
-
-                units = events[0].get("units", {})
-                acc_unit = 1.0
-                acc_type = units.get("acc", "")
-                if acc_type == "g":
-                    acc_unit = 1.0
-                elif acc_type == "m/s2":
-                    acc_unit = 1.0 / 9.81
-                elif acc_type in ["cm/s2", "gal", "Gal"]:
-                    acc_unit = 1.0 / 981.0
-
-                timeseries_data = timeseries[0].get("data", [])
-                dT = timeseries[0].get("dT", 0.0)
-                self.read_GM(timeseries_data, dT, acc_unit)
-                
-
-        def read_GM(self, acc_TH, dT, acc_unit):
-            n_points = len(acc_TH)
-            self.m_dt = dT
-            # self.m_acc = [acc_TH[ii].toDouble() * acc_unit for ii in range(n_points)]
-            self.m_acc = [acc_TH[ii] * acc_unit for ii in range(n_points)]
-
-            if n_points % 2 == 0:
-                self.m_acc.append(0.0)
-            self.m_acc = np.array(self.m_acc) # Convert to numpy array
-
-            self.set_time()
-            self.set_freq()        
-
-
-        def set_freq(self):
-
-            if self.m_dt == 0:
-                self.m_dt = 0.005
-                nfreq = 1 / self.m_dt*10
-                sample_freq = 1.0 / self.m_dt
-
-            else:
-                nfreq = len(self.m_acc)
-                sample_freq = 1.0 / self.m_dt
-
-            # self.m_freq = [0] * (len(self.m_acc) // 2 + 1)
-            # self.m_freq = [0] * (len(self.m_acc))   # m_freq as a list
-            self.m_freq = np.zeros(nfreq) # m_freq as a numpy array
-            sample_freq = 1.0 / self.m_dt
-
-            self.nyquist_freq = sample_freq / 2.0
-            self.nyquist_index = int(len(self.m_freq) / 2)
-            for i in range(len(self.m_freq)):
-                self.m_freq[i] = i * sample_freq / len(self.m_acc)
-
-
-        def set_time(self):
-            # self.m_time = [0] * len(self.m_acc) # m_time as a list
-            self.m_time = np.zeros(len(self.m_acc)) # m_time as a numpy array
-
-            for i in range(len(self.m_time)):
-                self.m_time[i] = i * self.m_dt
-
-
-        def plot_acc(self):
-            plt.figure()
-            plt.plot(self.m_time, self.m_acc, 'b-', label='input')
-            plt.plot(self.m_time, self.m_accT, 'r-', label='output')
-            plt.xlabel('Time [sec]')
-            plt.ylabel('Acc [g]')
-            plt.legend()
-            plt.show()
-
-        def plot_fft(self):
-            plt.figure()
-            plt.plot(self.m_freq[:self.nyquist_index], self.m_absFft[:self.nyquist_index], 'b-', label='input')
-            plt.plot(self.m_freq[:self.nyquist_index], self.m_absIFft[:self.nyquist_index], 'r-', label='output')
-            plt.xlabel('Frequency [Hz]')
-            plt.ylabel('Fourier Amplitude')
-            plt.legend()
-            plt.show()
-
-        def plot_tf(self):
-            plt.figure()
-            plt.plot(self.m_freq[:self.nyquist_index], self.m_absSoilTF[:self.nyquist_index], 'b-')
-            plt.xlabel('Frequency [Hz]')
-            plt.ylabel('TF')
-            plt.show()
-            
-        def plot_spectra(self):
-            n_points = len(self.m_acc)
-            accAux = [self.m_acc[ii]*9.81 for ii in range(n_points)]
-            accTAux = [self.m_accT[ii]*9.81 for ii in range(n_points)]
-            periods, psa = resp_spectra(self.m_time, accAux, 0.05)
-            periodsT, psaT = resp_spectra(self.m_time, accTAux, 0.05)
-            
-            plt.figure()
-            plt.plot(periods, psa, 'b-', label='input')
-            plt.plot(periodsT, psaT, 'r-', label='output')
-            plt.xlabel('Periods [s]')
-            plt.ylabel('PSA [cm/s2]')
-            plt.legend()
-            plt.show()
-
-    def main():
-        # Define input parameters
-        damping = 5.0  # damping ratio in %
-        H = 20.0  # layer height in m
-        Vs = 200.0  # shear wave velocity in m/s
-        
-        TF = TFunctionClass(damping, H, Vs)
-        
-        # Sinusoidal record
-        f = 0.5  # frequency in Hz
-        TF.sin_record(f)
-        
-        # Calculate response
-        TF.calculateResponse()
-        
-        # Calculate ratios
-        ratioA, ratioV = TF.calculate_ratio()
-        print(f"Acceleration Ratio: {ratioA}")
-        print(f"Velocity Ratio: {ratioV}")
-        
-        # Plot acceleration
-        TF.plot_acc()
-        
-        # Plot Fourier Transform
-        TF.plot_fft()
-        
-        # Plot Transfer Function
-        TF.plot_tf()
-        
-        # Plot Spectra
-        TF.plot_spectra()
-
-    if __name__ == "__main__":
-        main()
-
-.. raw:: html
-
-    
-
- - -.. raw:: html - -

- - -This script performs post-processing by building response spectra from acceleration time history. - -.. raw:: html - -
- Click to expand the full Response Spectra Python code -

-
-.. code-block:: python
-
-    #########################################################
-    #
-    # Postprocessing python script
-    #
-    # Copyright: UW Computational Mechanics Group
-    #            Pedro Arduino
-    #
-    # Participants: Alborz Ghofrani
-    #               Long Chen
-    #
-    #-------------------------------------------------------
-
-    import numpy as np
-
-
-    def resp_spectra(a, time, nstep):
-        """
-        This function builds response spectra from acceleration time history,
-        a should be a numpy array,T and nStep should be integers.
-        """
-        
-        # add initial zero value to acceleration and change units
-        a = np.insert(a, 0, 0)
-        # number of periods at which spectral values are to be computed
-        nperiod = 100
-        # define range of considered periods by power of 10
-        minpower = -3.0
-        maxpower = 1.0
-        # create vector of considered periods
-        p = np.logspace(minpower, maxpower, nperiod)
-        # incremental circular frequency
-        dw = 2.0 * np.pi / time
-        # vector of circular freq
-        w = np.arange(0, (nstep+1)*dw, dw)
-        # fast fourier Horm of acceleration
-        afft = np.fft.fft(a)
-        # arbitrary stiffness value
-        k = 1000.0
-        # damping ratio
-        damp = 0.05
-        umax = np.zeros(nperiod)
-        vmax = np.zeros(nperiod)
-        amax = np.zeros(nperiod)
-        # loop to compute spectral values at each period
-        for j in range(0, nperiod):
-            # compute mass and dashpot coeff to produce desired periods
-            m = ((p[j]/(2*np.pi))**2)*k
-            c = 2*damp*(k*m)**0.5
-            h = np.zeros(nstep+2, dtype=complex)
-            # compute transfer function 
-            for l in range(0, int(nstep/2+1)):
-                h[l] = 1./(-m*w[l]*w[l] + 1j*c*w[l] + k)
-                # mirror image of Her function
-                h[nstep+1-l] = np.conj(h[l])
-            
-            # compute displacement in frequency domain using Her function
-            qfft = -m*afft
-            u = np.zeros(nstep+1, dtype=complex)
-            for l in range(0, nstep+1):
-                u[l] = h[l]*qfft[l]
-            
-            # compute displacement in time domain (ignore imaginary part)
-            utime = np.real(np.fft.ifft(u))
-            
-            # spectral displacement, velocity, and acceleration
-            umax[j] = np.max(np.abs(utime))
-            vmax[j] = (2*np.pi/p[j])*umax[j]
-            amax[j] = (2*np.pi/p[j])*vmax[j]
-        
-        return p, umax, vmax, amax
-
-.. raw:: html
-
-    
-
- -.. raw:: html - -

- -Workflow in EE-UQ -^^^^^^^^^^^^^^^^^ - -The procedure for performing a transfer function analysis is shown below. - -A forward propagation problem will be performed. The UQ engine to be used is Dakota with parallel execution and saved working directories. The Latin Hypercube Sampling (LHS) method will be used with 10 samples and a seed of 913. The UQ tab should look similar to the one below. - - -.. figure:: ./images/case2_UQTab_Workflow_TF.png - :scale: 30 % - :align: center - - Fig. 7. Uncertainty Quantification. - -The General Information (GI) tab will not be utilized in this example since no structure will be used. - -For the simulation (SIM tab), the input script will be loaded using a CustomPy Model. Along with this, the number of response nodes will be 1 with a spatial dimension of 2. Each node will have 3 degrees of freedom (DOF) and the profile will have damping ratio of 2%. The centroid node value will be 1. - - -.. figure:: ./images/case2_SimTab_TF.png - :scale: 30 % - :align: center - - Fig. 8. Simulations. - -In the Event (EVT) tab, a Multiple SimCenter load generator will be used. The motion of interest will be uploaded here as a JSON file and will have a factor of 1. - -In the Finite Element Modeling (FEM) tab, select a CustomPy-Simulation. - -In the Engineering Demand Parameter (EDP) tab, select a user defined generator. The response parameters will be the ratio of acceleration spectra and velocity spectra from the propagation from rock to the soil. - - -.. figure:: ./images/case2_EDPTab_Workflow_TF.png - :scale: 30 % - :align: center - - Fig. 9. Engineering Demand Parameters. - - -The Random Variables (RV) tab is where the values of H, Vs, and damping are implemented in the analysis. The values seen above are to be input here. A normal distribution will be used for all of these variables. - - -.. figure:: ./images/case2_RVTab_Workflow_TF.png - :scale: 30 % - :align: center - - Fig. 10. Random Variables. - - -The user can opt for running the analysis on their local device or in DesignSafe. - - -Results -^^^^^^^ -When the run is completed, the mean values of ratioA and ratioV, as well as uncertainty values,should be provided. These values show the ratio of average amplification/de-amplification in acceleration in velocity of the ground motion at the rock and the motion at the surface. The positive value of the ratio shows amplification occurred due to the propagation of the motion through the soil layer. - - -.. figure:: ./images/case2_Results_Workflow_TF.png - :scale: 30 % - :align: center - - Fig. 11. Results - - -Because the input variables (H, Vs, damping, motions) each have uncertainty, that uncertainty is carried on to the transfer function analysis. EE-UQ allows for uncertainty quantification which allows for an analysis of which variables might be most important or what the "worst-case scenario" could be when designing. The normalized normal distribution for the acceleration and velocity amplification ratios are shown below. - - -.. figure:: ./images/case2_Normalized_RatioA_histogram.png - :scale: 70 % - :align: center - - Fig. 12. Normalized Acceleration Amplification Factor Histogram - -.. figure:: ./images/case2_Normalized_RatioV_histogram.png - :scale: 70 % - :align: center - - Fig. 13. Normalized Velocity Amplification Factor Histogram - - -Due to the infinite possibilities of variability the three main variables (H, Vs, Damping) can have, we see that the normal distribution is not well suited for this analysis, specifically. EE-UQ allows for other methods of uncertainty quantification. Below is a Gaussian Mixture Model. This method is effective in measuring the probability of certain subpopulations within a larger population. - - -.. figure:: ./images/case2_Gaussian_Mixture_RatioA_histogram.png - :scale: 70 % - :align: center - - Fig. 14. Gaussian Mixture Model - Acceleration Amplification Ratio. - - -.. figure:: ./images/case2_Gaussian_Mixture_RatioV_histogram.png - :scale: 70 % - :align: center - - Fig. 15. Gaussian Mixture Model - Velocity Amplification Ratio. - -.. note:: - This situation is specific only to this example; normal distributions could very well suit another example. - - - -By extrapolating the values from EE-UQ, the shape of the transfer function can be determined. The natural frequencies of the first 4 peaks in the transfer function are also shown below. - - -.. figure:: ./images/case2_TF_Nat_Freqs.png - :scale: 70 % - :align: center - - Fig. 16. Transfer Function. - - - - -.. raw:: html - -
- -.. table:: Table 1. Natural Frequencies in the Transfer Function - :widths: auto - - +------------+---------------------------------------------+ - | Peak | Amplification Factor at Natural Frequencies | - +============+=============================================+ - | 1 | 20.49 | - +------------+---------------------------------------------+ - | 2 | 7.03 | - +------------+---------------------------------------------+ - | 3 | 4.20 | - +------------+---------------------------------------------+ - | 4 | 2.98 | - +------------+---------------------------------------------+ - -.. raw:: html - -
- - - -With the transfer function plotted, the input motion can be transformed using the transfer function to reflect the motion at the surface. The figure below reflects the large amplification that occurred. The value of the highest acceleration increased from ~0.4g in the rock to ~1.25g in the soil. This amplification is also reflected in other frequencies. - - -.. figure:: ./images/case2_Full_Results_TF.png - :scale: 40 % - :align: center - - Fig. 17. Amplification of Ground Motion. - -The spectral acceleration spectra can be also determined for each of the motions. These spectra can be used to determine if a structure will be affected by the amplification. A single story structure (~0.1 second period) might be at risk due to this amplification. Any periods with a large amplification ratios should be further analyzed to ensure the safety of the structure and site. - - -.. figure:: ./images/case2_SpectralAcc_Results_TF.png - :scale: 50 % - :align: center - - Fig. 18. Amplification in Spectral Acceleration. - - -Remarks -------- -I'd like to thank everyone at SimCenter, specifically Sang-ri Yi, Frank McKenna, Jinyan Zhao, Aakash Bangalore Satish, and Barbaros Cetiner, for all of their effort and assistance they provided during the entire quarter. Navigating these tools and creating examples for them would've been a lot more stressful without their help. - -Transfer function is one of my favorite topics in geotechnical engineering. I'd really like to continue working with site response and performance based design so being able to create this example along with my class was great. - -Finally, I'd like to thank Prof. Arduino who made all of this possible. His determination and motivation was contagious throughout the academic quarter. There are many great professors but there is only one Pedro Arduino. diff --git a/source/case_5.rst~ b/source/case_5.rst~ deleted file mode 100644 index 33d9910..0000000 --- a/source/case_5.rst~ +++ /dev/null @@ -1,611 +0,0 @@ -.. _case_5: - -R2D - Liquefaction -================== - -Author: Morgan Sanger ---------------------- - -Introduction ------------- - -This page describes basic concepts of geospatial liquefaction hazard modeling withing the umbrella of SimCenter tools. - - -Problem Description -------------------- - -Coseismic soil liquefaction is a phenomenon in which the strength and stiffness of a soil is reduced by earthquake shaking. Resilient communities and infrastructure networks, like lifelines or transportation systems, must be built to withstand and respond to hazards posed by coseismic soil liquefaction. Ideally, these predictions could be made: - -* quickly, in near-real-time after an event; - -* at high resolution, consistent with the scale of individual assets; and - -* at map-scale, across the regional extent affected by large earthquakes. - - -Common liquefaction models in practice require in-situ testing which cannot be continuously performed across large areas, thus presenting the need for “geospatial” liquefaction models. Prior tests of such models (e.g., :cite:`Zhu2017`) have shown both promising potential and severe shortcomings in predicting subsurface conditions with few geospatial predictors. There is a need to advance geospatial liquefaction modeling by integrating geotechnical data, liquefaction mechanics, artificial intelligence (AI), and many geospatial predictor variables to provide reliable regional liquefaction predictions for any earthquake event. When integrated with regional hazard assessment capabilities, geospatial liquefcation models will provide value throughout the life of infrastructure projects - from initial desk studies to refined project-specific hazard analyses - and will unlock insights beyond conventional practice, with opportunity to: - -* prescribe event-specific emergency response and evacuation routes immediately after an earthquake, - -* evaluate network reliability and infrastructure network resiliency using structural databases or other asset inventories, and - -* understand the impacts of earthquake events of vulnerable communities using population demographic data. - -Solution Strategy ------------------ - -The state-of-practice geospatial liquefaction model is the :cite:`RB2020` model (updated version of the :cite:`Zhu2017` model), which uses logistic regression to predict probability of liquefaction based on five (5) geospatial variables and trained on a database of liquefaction case histories. - -In this problem, another modeling solution strategy is proposed, according to :cite:`Sanger2024`. The :cite:`Sanger2024` approach parses the problem into that which is empirical and best predicted by AI (the relationship between geospatial variables and subsurface traits) and that which is best predicted by mechanics (liquefaction response, conditioned on those traits). In this approach, the subsurface traits are characterized at point locations using available cone penetration testing (CPT) data. The liquefaction response at each CPT location is computed across a range of magnitude-scale peak ground accelerations (PGAM7.5) using state-of-practice liquefaction manifestation models (e.g., liquefaction potential index, LPI), thereby retaining the knowledge of liquefaction mechanics developed over the last 50+ years. The relationship between manifestation index and PGAM7.5 is represented as a functional form (Eqn 1) with two curve-fitting parameters: A and B (Fig 1). Therefore, the liquefaction response (i.e., A and B) at each CPT location becomes target variables of supervised learning AI models. - -.. math:: - MI = \left\{ \begin{array}{ll} - 0, & \text{PGA}_{M7.5} < 0.1g \\ - \arctan(B \cdot (\text{PGA}_{M7.5} - \frac{A}{B})^2) \cdot 100, & \text{PGA}_{M7.5} \geq 0.1g - \end{array} \right. - -**Eqn 1.** Manifestation index as a function of A, B, and PGAM7.5. - - -.. figure:: ./images/case5_manifestationcurve.png - :scale: 65 % - :align: center - :figclass: align-center - - **Fig 1.** Example manifestation curve of LPI vs. PGAM7.5 for a single CPT. - -The AI model is trained to predict liquefaction response using a set of 37 geospatial predictor variables that serve as proxies for liquefaction. These predictor variables include metrics of surface topography and roughness, distance to and elevation above water bodies, and information about geology, geomorphology, and hydrology. By applying the trained AI model to these predictor datasets, the A and B parameters are predicted geospatially at 100-meter resolution. A crucial step in this approach is geostatistically updating the AI predictions via regression kriging near field measurements. The effect is that predictions near known subsurface conditions have lower model uncertainty, while predictions farther away rely on the AI model and have higher uncertainty. This method effectively pre-computes liquefaction response for all possible ground motions based on AI-predicted subsurface conditions. These predictions are stored as mapped parameters, ready for use with specific earthquake data, such as a PGAM7.5 raster in R2D. - -Model predictions were then tested against the leading geospatial model :cite:`RB2020` in three case-history events using receiver operating characteristic and area under the curve analyses (Fig 2). The :cite:`Sanger2024` AI model (before kriging) performed significantly better than :cite:`RB2020` and was further improved by kriging (Fig 3). - -.. figure:: ./images/case5_sanger2024-roc.png - :scale: 100 % - :align: center - :figclass: align-center - - **Fig 2.** Receiver operator characteristic curves and area under the curve (AUC) analyses comparing :cite:`RB2020` (“R&B”), and the :cite:`Sanger2024` *before* regression kriging (“LPI”). - -.. figure:: ./images/case5_zhu2017.png - :scale: 100 % - :align: center - :figclass: align-center - - **(a)** - -.. figure:: ./images/case5_sanger2024-ai.png - :scale: 100 % - :align: center - :figclass: align-center - - **(b)** - -.. figure:: ./images/case5_sanger2024-krig.png - :scale: 100 % - :align: center - :figclass: align-center - - **(c)** - - **Fig 3.** Comparison between **(a)** Rashidian & Baise (2020), and this model **(b)** before and **(c)** after regression kriging for the Feb. 2011 M6.1 Christchurch event. - - -SimCenter Tool Used -------------------- - -The presented problem can be solved using SimCenter's Regional Resilience Determination `R2D `_ Tool. A substantially complete description of the tool is provided in the `R2D Documentation `_. - -The updated :cite:`Zhu2017` model (:cite:`RB2020`) is implemented in the R2D tool (version 4.2.0), whereas the :cite:`Sanger2024` model is not yet implemented in the R2D tool. In this project, the :cite:`Sanger2024` model was implemented in the R2D tool using the `applications.py` file. - - -Example Application -------------------- - -This example demonstrates the application of the :cite:`Zhu2017` liquefaction-induced ground failure model (really the :cite:`RB2020` model) currently implemented in R2D, and the implementation of the next-generation :cite:`Sanger2024` model described in the **Solution Strategy**. - -.. note:: - In R2D, ground failure models are considered intermediate results that are accessible only through the Earthquake Event Generator tool, and they cannot be executed within the damage and loss assessment tools. Future development in R2D should consider (1) implementing ground failure models with other earthquake hazard source options (e.g., Shakemap Earthquake Scenario), (2) extending the implementation of the ground failure modeling beyond California, and (3) incorporating ground failure models into the damage and loss assessment tools. - -Zhu et al. (2017) -................. - -**WORKFLOW** - -The workflow for the :cite:`Zhu2017` model in the Earthquake Event Generator tool in R2D is as follows: - -**1. Define Analysis Grid**: Define the analysis grid for the study area. Here, an area of downtown San Francisco is selected for the analysis. The grid is defined with a resolution of approximately 100 meters, the true model resolution of the :cite:`Sanger2024` model. - -.. figure:: ./images/case5_EQGen1.png - :width: 800px - :align: center - :figclass: align-center - -.. raw:: html - -
- -**2. Forecast Rupture Scenarios**: Large events (>M7) are forecasted for the study area to demonstrate the model performance under extreme conditions. - -.. figure:: ./images/case5_EQGen2.png - :width: 800px - :align: center - :figclass: align-center - -.. raw:: html - -
- -**3. Select Earthquake Event**: Select an earthquake event scenario. Here, the M8 N. San Andreas rupture event scenario is selected. - -.. figure:: ./images/case5_EQGen3.png - :width: 800px - :align: center - :figclass: align-center - -.. raw:: html - -
- -**4. Select Intensity Measures**: PGA and PGV are selected as the intensity measures for the analysis, both required for the :cite:`Zhu2017` model. - -.. figure:: ./images/case5_EQGen4.png - :width: 800px - :align: center - :figclass: align-center - -.. raw:: html - -
- -**5. Select Ground Failure Model**: Select the ground failure model. Here, the :cite:`Zhu2017` model is selected. - -.. figure:: ./images/case5_EQGen5.png - :width: 800px - :align: center - :figclass: align-center - -.. raw:: html - -
- -**6. Run Hazard Simulation**. - -.. figure:: ./images/case5_EQGen6.png - :width: 800px - :align: center - :figclass: align-center - -.. raw:: html - -
- -**7. View Results**: The results of the :cite:`Zhu2017` model for the probability of liquefaction are shown in Fig 4. - - - -**CODE** - -The following code snippet shows the implementation of the :cite:`Zhu2017` model in the R2D tool using the `applications.py` file. - -.. raw:: html - -
- Click to expand the full ZhuEtal2017 code -

-
-.. code-block:: python
-    :linenos:
-
-    # Zhu et al. (2017) code
-    -----------------------------------------------------------
-    class ZhuEtal2017(Liquefaction):
-        """
-        A map-based procedure to quantify liquefaction at a given location using logistic models by Zhu et al. (2017). Two models are provided:
-
-        1. For distance to coast < cutoff, **prob_liq** = f(**pgv**, **vs30**, **precip**, **dist_coast**, **dist_river**)
-        2. For distance to coast >= cutoff, **prob_liq** = f(**pgv**, **vs30**, **precip**, **dist_coast**, **dist_river**, **gw_depth**)
-        
-        Parameters
-        ----------
-        From upstream PBEE:
-        pgv: float, np.ndarray or list
-            [cm/s] peak ground velocity
-        mag: float, np.ndarray or list
-            moment magnitude
-        pga: float, np.ndarray or list
-            [g] peak ground acceleration, only to check threshold where prob_liq(pga<0.1g)=0
-        stations: list
-            a list of dict containing the site infomation. Keys in the dict are 'ID',
-            'lon', 'lat', 'vs30', 'z1pt0', 'z2pt5', 'vsInferred', 'rRup', 'rJB', 'rX'
-            
-        Geotechnical/geologic:
-        vs30: float, np.ndarray or list
-            [m/s] time-averaged shear wave velocity in the upper 30-meters
-        precip: float, np.ndarray or list
-            [mm] mean annual precipitation
-        dist_coast: float, np.ndarray or list
-            [km] distance to nearest coast
-        dist_river: float, np.ndarray or list
-            [km] distance to nearest river
-        dist_water: float, np.ndarray or list
-            [km] distance to nearest river, lake, or coast
-        gw_depth: float, np.ndarray or list
-            [m] groundwater table depth
-            
-        Fixed:
-        # dist_water_cutoff: float, optional
-        #     [km] distance to water cutoff for switching between global and coastal model, default = 20 km
-
-        Returns
-        -------
-        prob_liq : float, np.ndarray
-            probability for liquefaciton
-        liq_susc_val : str, np.ndarray
-            liquefaction susceptibility category value
-        
-        References
-        ----------
-        .. [1] Zhu, J., Baise, L.G., and Thompson, E.M., 2017, An Updated Geospatial Liquefaction Model for Global Application, Bulletin of the Seismological Society of America, vol. 107, no. 3, pp. 1365-1385.
-        
-        """
-        def __init__(self, parameters, stations) -> None:
-            self.stations = stations
-            self.parameters = parameters
-            self.dist_to_water = None #(km)
-            self.dist_to_river = None #(km)
-            self.dist_to_coast = None #(km)
-            self.gw_depth = None #(m)
-            self.precip = None # (mm)
-            self.vs30 = None #(m/s)
-            self.interpolate_spatial_parameters(parameters)
-
-        def interpolate_spatial_parameters(self, parameters):
-            # site coordinate in CRS 4326
-            lat_station = [site['lat'] for site in self.stations]
-            lon_station = [site['lon'] for site in self.stations]
-            # dist_to_water 
-            if parameters["DistWater"] == "Defined (\"distWater\") in Site File (.csv)":
-                self.dist_to_water = np.array([site['distWater'] for site in self.stations])
-            else:
-                self.dist_to_water = sampleRaster(parameters["DistWater"], parameters["inputCRS"],\
-                        lon_station, lat_station)
-            # dist_to_river
-            if parameters["DistRiver"] == "Defined (\"distRiver\") in Site File (.csv)":
-                self.dist_to_river = np.array([site['distRiver'] for site in self.stations])
-            else:
-                self.dist_to_river = sampleRaster(parameters["DistRiver"], parameters["inputCRS"],\
-                        lon_station, lat_station)
-            # dist_to_coast
-            if parameters["DistCoast"] == "Defined (\"distCoast\") in Site File (.csv)":
-                self.dist_to_coast = np.array([site['distCoast'] for site in self.stations])
-            else:
-                self.dist_to_coast = sampleRaster(parameters["DistCoast"], parameters["inputCRS"],\
-                        lon_station, lat_station)
-            # gw_water
-            if parameters["GwDepth"] == "Defined (\"gwDepth\") in Site File (.csv)":
-                self.gw_depth = np.array([site['gwDepth'] for site in self.stations])
-            else:
-                self.gw_depth = sampleRaster(parameters["GwDepth"], parameters["inputCRS"],\
-                        lon_station, lat_station)
-            # precipitation 
-            if parameters["Precipitation"] == "Defined (\"precipitation\") in Site File (.csv)":
-                self.precip = np.array([site['precipitation'] for site in self.stations])
-            else:
-                self.precip = sampleRaster(parameters["Precipitation"], parameters["inputCRS"],\
-                        lon_station, lat_station)
-            self.vs30 = np.array([site['vs30'] for site in self.stations])
-            print("Sampling finished")
-        
-        def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys):
-            if ('PGA' in im_list) and ('PGV' in im_list):
-                num_stations = len(self.stations)
-                num_scenarios = len(eq_data)
-                PGV_col_id = [i for i, x in enumerate(im_list) if x == 'PGV'][0]
-                PGA_col_id = [i for i, x in enumerate(im_list) if x == 'PGA'][0]
-                for scenario_id in range(num_scenarios):
-                    num_rlzs = ln_im_data[scenario_id].shape[2]
-                    im_data_scen = np.zeros([num_stations,\
-                                        len(im_list)+len(output_keys), num_rlzs])
-                    im_data_scen[:,0:len(im_list),:] = ln_im_data[scenario_id]
-                    for rlz_id in range(num_rlzs):
-                        pgv = np.exp(ln_im_data[scenario_id][:,PGV_col_id,rlz_id])
-                        pga = np.exp(ln_im_data[scenario_id][:,PGA_col_id,rlz_id])
-                        mag = float(eq_data[scenario_id][0])
-                        model_output = self.model(pgv, pga, mag)
-                        for i, key in enumerate(output_keys):
-                            im_data_scen[:,len(im_list)+i,rlz_id] = model_output[key]
-                    ln_im_data[scenario_id] = im_data_scen
-                im_list = im_list + output_keys
-                additional_output = dict()
-                for key in additional_output_keys:
-                    item = getattr(self, key, None)
-                    if item is None:
-                        warnings.warn(f"Additional output {key} is not avaliable in the liquefaction trigging model 'ZhuEtal2017'.")
-                    else:
-                        additional_output.update({key:item})
-            else:
-                sys.exit(f"At least one of 'PGA' and 'PGV' is missing in the selected intensity measures and the liquefaction trigging model 'ZhuEtal2017' can not be computed.")
-                # print(f"At least one of 'PGA' and 'PGV' is missing in the selected intensity measures and the liquefaction trigging model 'ZhuEtal2017' can not be computed."\
-                #       , file=sys.stderr)
-                # sys.stderr.write("test")
-                # sys.exit(-1)
-            return ln_im_data, eq_data, im_list, additional_output
-        
-        def model(self, pgv, pga, mag):
-            """Model"""
-            # zero prob_liq
-            zero_prob_liq = 1e-5 # decimal
-            
-            # distance cutoff for model
-            model_transition = 20 # km
-
-            # initialize arrays
-            x_logistic = np.empty(pgv.shape)
-            prob_liq = np.empty(pgv.shape)
-            liq_susc_val = np.ones(pgv.shape)*-99
-            liq_susc = np.empty(pgv.shape, dtype=int)
-            
-            # magnitude correction, from Baise & Rashidian (2020) and Allstadt et al. (2022)
-            pgv_mag = pgv/(1+np.exp(-2*(mag-6)))
-            pga_mag = pga/(10**2.24/mag**2.56)
-
-            # find where dist_water <= cutoff for model of 20 km
-            # coastal model
-            ind_coastal = self.dist_to_water<=model_transition
-            # global model
-            # ind_global = list(set(list(range(pgv.shape[0]))).difference(set(ind_coastal)))
-            ind_global = ~(self.dist_to_water<=model_transition)
-
-            # set cap of precip to 1700 mm
-            self.precip[self.precip>1700] = 1700
-
-            # x = b0 + b1*var1 + ...
-            # if len(ind_global) > 0:
-            # liquefaction susceptbility value, disregard pgv term
-            liq_susc_val[ind_global] = \
-                8.801 + \
-                -1.918   * np.log(self.vs30[ind_global]) + \
-                5.408e-4 * self.precip[ind_global] + \
-                -0.2054  * self.dist_to_water[ind_global] + \
-                -0.0333  * self.gw_depth[ind_global]
-            # liquefaction susceptbility value, disregard pgv term
-            liq_susc_val[ind_coastal] = \
-                12.435 + \
-                -2.615   * np.log(self.vs30[ind_coastal]) + \
-                5.556e-4 * self.precip[ind_coastal] + \
-                -0.0287  * np.sqrt(self.dist_to_coast[ind_coastal]) + \
-                0.0666   * self.dist_to_river[ind_coastal] + \
-                -0.0369  * self.dist_to_river[ind_coastal]*np.sqrt(self.dist_to_coast[ind_coastal])
-            # catch nan values
-            liq_susc_val[np.isnan(liq_susc_val)] = -99.
-            # x-term for logistic model = liq susc val + pgv term
-            x_logistic[ind_global] = liq_susc_val[ind_global] + 0.334*np.log(pgv_mag[ind_global])
-            # x-term for logistic model = liq susc val + pgv term
-            x_logistic[ind_coastal] = liq_susc_val[ind_coastal] + 0.301*np.log(pgv_mag[ind_coastal])
-
-            # probability of liquefaction
-            prob_liq = 1/(1+np.exp(-x_logistic)) # decimal
-            prob_liq = np.maximum(prob_liq,zero_prob_liq) # set prob to > "0" to avoid 0% in log
-
-            # for pgv_mag < 3 cm/s, set prob to "0"
-            prob_liq[pgv_mag<3] = zero_prob_liq
-            # for pga_mag < 0.1 g, set prob to "0"
-            prob_liq[pga_mag<0.1] = zero_prob_liq
-            # for vs30 > 620 m/s, set prob to "0"
-            prob_liq[self.vs30>620] = zero_prob_liq
-
-            # calculate sigma_mu
-            sigma_mu = (np.exp(0.25)-1) * prob_liq
-
-            # determine liquefaction susceptibility category
-            liq_susc[liq_susc_val>-1.15]  = liq_susc_enum['very_high'].value
-            liq_susc[liq_susc_val<=-1.15] = liq_susc_enum['high'].value
-            liq_susc[liq_susc_val<=-1.95] = liq_susc_enum['moderate'].value
-            liq_susc[liq_susc_val<=-3.15] = liq_susc_enum['low'].value
-            liq_susc[liq_susc_val<=-3.20] = liq_susc_enum['very_low'].value
-            liq_susc[liq_susc_val<=-38.1] = liq_susc_enum['none'].value
-
-            # liq_susc[prob_liq==zero_prob_liq] = 'none'
-            
-            return {"liq_prob":prob_liq, "liq_susc":liq_susc}
-
-
-.. raw:: html
-
-    
-
- - -.. raw:: html - -

- - - -**RESULTS** - -.. figure:: ./images/case5_zhu_pliq.png - :scale: 75 % - :align: center - :figclass: align-center - - **Fig 4.** Results of the Zhu et al. (2017) model for probability of liquefaction given the selected M8 N. San Andreas rupture event scenario. - - - -Sanger et al. (2024) -..................... -**WORKFLOW** - -The workflow for the :cite:`Sanger2024` model follows the same steps as the :cite:`Zhu2017` model, with the exception of the referenced geospatial parameters. The :cite:`Sanger2024` model needs only the LPI A and LPI B parameters, implemented here within the DistWater and DistCoast parameters in the R2D tool because the author did not have access to the user-interface code for the R2D tool. - -.. figure:: ./images/case5_EQGen10.png - :width: 800px - :align: center - :figclass: align-center - - -**CODE** - -In this example, the :cite:`Sanger2024` model is implemented in the R2D tool using the `applications.py` file, overwriting the `ZhuEtal2017` class for quick integration with the R2D user-interface. - -.. raw:: html - -
- Click to expand the full Sanger2024 code -

-
-.. code-block:: python
-    :linenos:
-
-    # Sanger et al. (2024) code
-    -----------------------------------------------------------
-    class ZhuEtal2017(Liquefaction):
-    """
-    A map-based procedure to quantify liquefaction at a given location Sanger et al. (2024). 
-
-    Parameters
-    ----------
-    From upstream PBEE:
-    mag: float, np.ndarray or list
-        moment magnitude
-    pga: float, np.ndarray or list
-        [g] peak ground acceleration, only to check threshold where prob_liq(pga<0.1g)=0
-    stations: list
-        a list of dict containing the site infomation. Keys in the dict are 'ID',
-        'lon', 'lat', 'vs30', 'z1pt0', 'z2pt5', 'vsInferred', 'rRup', 'rJB', 'rX'
-        
-    Geotechnical:
-    LPI A: float, np.ndarray or list
-    LPI B: float, np.ndarray or list
-
-    Returns
-    -------
-    prob_liq : float, np.ndarray
-        probability for liquefaciton (surface manifestation)
-    
-    """
-    def __init__(self, parameters, stations) -> None:
-        self.stations = stations
-        self.parameters = parameters
-        self.LPI_A = None #
-        self.LPI_B = None #
-        self.interpolate_spatial_parameters(parameters)
-
-    def interpolate_spatial_parameters(self, parameters):
-        # site coordinate in CRS 4326
-        lat_station = [site['lat'] for site in self.stations]
-        lon_station = [site['lon'] for site in self.stations]
-        # LPI_A
-        self.LPI_A = sampleRaster(parameters["DistWater"], parameters["inputCRS"],\
-                    lon_station, lat_station)
-        # LPI_B
-        self.LPI_B = sampleRaster(parameters["DistCoast"], parameters["inputCRS"],\
-                    lon_station, lat_station)
-        print("Sampling finished")
-    
-    def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys):
-        if ('PGA' in im_list):
-            num_stations = len(self.stations)
-            num_scenarios = len(eq_data)
-            PGA_col_id = [i for i, x in enumerate(im_list) if x == 'PGA'][0]
-            for scenario_id in range(num_scenarios):
-                num_rlzs = ln_im_data[scenario_id].shape[2]
-                im_data_scen = np.zeros([num_stations,\
-                                    len(im_list)+len(output_keys), num_rlzs])
-                im_data_scen[:,0:len(im_list),:] = ln_im_data[scenario_id]
-                for rlz_id in range(num_rlzs):
-                    pga = np.exp(ln_im_data[scenario_id][:,PGA_col_id,rlz_id])
-                    mag = float(eq_data[scenario_id][0])
-                    model_output = self.model(pga, mag)
-                    for i, key in enumerate(output_keys):
-                        im_data_scen[:,len(im_list)+i,rlz_id] = model_output[key]
-                ln_im_data[scenario_id] = im_data_scen
-            im_list = im_list + output_keys
-            additional_output = dict()
-            for key in additional_output_keys:
-                item = getattr(self, key, None)
-                if item is None:
-                    warnings.warn(f"Additional output {key} is not avaliable in the liquefaction trigging model 'SangerEtal2024'.")
-                else:
-                    additional_output.update({key:item})
-        else:
-            sys.exit(f"'PGA' is missing in the selected intensity measures and the liquefaction trigging model 'SangerEtal2024' can not be computed.")
-        return ln_im_data, eq_data, im_list, additional_output
-    
-    def model(self, pga, mag):
-        """Model"""
-        # magnitude correction, according to MSF Correction (SAND) Function according to Idriss and Boulanger (2008) 
-        MSF = 6.9*np.exp(-mag/4) - 0.058
-        if MSF > 1.8:
-            MSF = 1.8
-        pga_mag = pga / MSF  
-
-        # Geospatial LPI A and LPI B
-        # Initialize an array for the calculated MI
-        LPI = np.zeros_like(pga_mag)
-    
-        # Calculate MI for each element of the arrays
-        mask_low = pga_mag < 0.1
-        mask_high = pga_mag >= 0.1
-        LPI[mask_low] = 0
-        LPI[mask_high] = self.LPI_A[mask_high] * np.arctan(self.LPI_B[mask_high] * (pga_mag[mask_high] - self.LPI_A[mask_high] / self.LPI_B[mask_high]) ** 2) * 100
-        
-        from scipy.stats import norm
-
-        # Probability of liquefaction (manifestation at the surface) according to Geyin et al. (2020) (minor manifestation)
-        LPI_beta= 1.774
-        LPI_theta= 4.095 
-        prob_liq = norm.cdf(np.log(LPI/LPI_theta)/LPI_beta)
-        
-        return {"liq_prob":prob_liq, "liq_susc":LPI}
-
-.. raw:: html
-
-    
-
- - -.. raw:: html - -

- - -**RESULTS** - -.. figure:: ./images/case5_sanger_lpi.png - :scale: 75 % - :align: center - :figclass: align-center - - **Fig 5.** Results of the Sanger et al. (2024) model for liquefaction potential index given the selected M8 N. San Andreas rupture event scenario. - -.. figure:: ./images/case5_sanger_pliq.png - :scale: 75 % - :align: center - :figclass: align-center - - **Fig 6.** Results of the Sanger et al. (2024) model for probability of liquefaction given the selected M8 N. San Andreas rupture event scenario, using the Geyin and Maurer (2020) fragility function to map LPI to probability of surface manifestation (minor/all). - - -Remarks -------- - -.. note:: - Preliminary visualization of the results can be accomplished in the R2D VIZ tab using Graduated Symbols. However, the author recommends exporting the results to a GIS software for more detailed visualization and analysis. - .. figure:: ./images/case5_EQGen7.png - :width: 600px - :align: center - :figclass: align-center - - -.. note:: - Note the division spacing issue that arises when the number of divisions is greater than 10. This has been alerted to the developers. - - .. figure:: ./images/case5_EQGen11.png - :width: 600px - :align: center - :figclass: align-center - -.. bibliography:: - :filter: cited diff --git a/source/case_r.rst~ b/source/case_r.rst~ deleted file mode 100644 index 640be98..0000000 --- a/source/case_r.rst~ +++ /dev/null @@ -1,8 +0,0 @@ - -.. _case_r: - - -References -========== - -.. bibliography:: references.bib