diff --git a/lorenz96_compute_secondary_instabilities.py b/lorenz96_compute_secondary_instabilities.py index 9eefb2c..34f8712 100755 --- a/lorenz96_compute_secondary_instabilities.py +++ b/lorenz96_compute_secondary_instabilities.py @@ -1,4 +1,4 @@ - #!/home/user/anaconda3/bin/python +#!/home/user/anaconda3/bin/python import numpy as np import os import l96 @@ -7,24 +7,22 @@ from itertools import product - -# these are our constants -paraL96_2lay = {'F1' : 10, +paraL96_2lay = {'F1' : 64, 'F2' : 0, 'b' : 10, 'c' : 10, 'h' : 1, - 'dimX': 36, + 'dimX': 20, 'dimY' : 10, 'RescaledY' : False, 'expname' : 'secondaryinstabilities_2layer', - 'time' : np.arange(0,500,0.1), + 'time' : np.arange(0,50,0.01), 'spinup' : 100, '2lay' : True, 'integrator': 'classic' } -paraL96_1lay = {'F1' : 10, +paraL96_1lay = {'F1' : 8, 'F2' : 0, 'b' : 10, 'c' : 10, @@ -33,7 +31,7 @@ 'dimY' : 10, 'RescaledY' : False, 'expname' : 'secondaryinstabilities_1layer', - 'time' : np.arange(0,500,0.05), + 'time' : np.arange(0,200,0.1), 'spinup' : 100, '2lay' : False, 'integrator': 'classic' @@ -41,10 +39,13 @@ testzeroclv=True -steplengthforsecondorder = np.arange(0,400,1) -hs=[1.0]#, 0.5] # , 0.0625, 0.125 , 0.25 , 0.5 , 1. ] + +hs=[ 0.0 , 0.0625, 0.125 , 0.25 , 0.5 , 1. ] +experiments = [paraL96_2lay,paraL96_1lay] + +steplengthforsecondorder = np.arange(0,100,1) precision='float64' -for paraL96,h in product([paraL96_1lay],hs): +for paraL96,h in product(experiments ,hs): if not paraL96['2lay'] and not h == 1.0: print("1 lay only with h = 1.");break savename=paraL96['expname']+"_h_"+str(h) spinup = paraL96['spinup'] @@ -79,13 +80,14 @@ for tn, (ts,te) in enumerate(zip(t[0:-2],t[1:-1])): print(tn) contracted_CLVs[tn,:,:]=1/2*contract_func(invCLV[tn,:,:],CLV[tn,:,:]) + if tn % 1000: np.memmap.flush(contracted_CLVs) np.memmap.flush(contracted_CLVs) for tn, (ts,te) in enumerate(zip(t[0:-steplengthforsecondorder.max()-1],t[1:-steplengthforsecondorder.max()])): dtau=te -ts - print(tn) + print(tn,paraL96['h'],paraL96['2lay']) for n_ind, n_step in enumerate(steplengthforsecondorder): if n_ind == 0: solution[tn,n_ind,:,:] = 0 else: @@ -107,4 +109,10 @@ np.memmap.flush(full_solution) np.memmap.flush(growth) print("Saveing results in folder "+savename+".") - + del contracted_CLVs + del solution + del full_solution + del growth + del CLV + del lyaploc_clv + del invCLV diff --git a/lorenz96_compute_secondary_instabilities_v2.py b/lorenz96_compute_secondary_instabilities_v2.py new file mode 100644 index 0000000..fc0b72b --- /dev/null +++ b/lorenz96_compute_secondary_instabilities_v2.py @@ -0,0 +1,112 @@ + #!/home/user/anaconda3/bin/python +import numpy as np +import os +import l96 +from scipy import triu +import scipy.linalg as linalg +from itertools import product + + + +# these are our constants +paraL96_2lay = {'F1' : 10, + 'F2' : 0, + 'b' : 10, + 'c' : 10, + 'h' : 1, + 'dimX': 36, + 'dimY' : 10, + 'RescaledY' : False, + 'expname' : 'secondaryinstabilities_2layer', + 'time' : np.arange(0,500,0.1), + 'spinup' : 100, + '2lay' : True, + 'integrator': 'classic' + } + +paraL96_1lay = {'F1' : 10, + 'F2' : 0, + 'b' : 10, + 'c' : 10, + 'h' : 1, + 'dimX': 44, + 'dimY' : 10, + 'RescaledY' : False, + 'expname' : 'secondaryinstabilities_1layer', + 'time' : np.arange(0,500,0.05), + 'spinup' : 100, + '2lay' : False, + 'integrator': 'classic' + } + + +testzeroclv=True +steplengthforsecondorder = np.arange(0,100,1) +hs=[1.0]#, 0.5] # , 0.0625, 0.125 , 0.25 , 0.5 , 1. ] +precision='float64' +for paraL96,h in product([paraL96_1lay],hs): + if not paraL96['2lay'] and not h == 1.0: print("1 lay only with h = 1.");break + savename=paraL96['expname']+"_h_"+str(h) + spinup = paraL96['spinup'] + paraL96=np.load(savename+"/paraL96.npy") + paraL96=paraL96[()] + # M number exponents + if paraL96['2lay']: + contract_func = lambda x,y: l96.contract_hess_l96_2layer_v2(x,y,paraL96) + M = paraL96['dimX'] + paraL96['dimX']*paraL96['dimY'] # -1 full spectrum + dimN = paraL96['dimX'] + paraL96['dimX']*paraL96['dimY'] # -1 full spectrum + else: + contract_func = l96.contract_hess_l96_1layer_v2 + M = paraL96['dimX'] + dimN = paraL96['dimX'] + + np.save(savename+'/steplengthforsecondorder',steplengthforsecondorder) + + + t = paraL96['time'] + CLV = np.memmap(savename+'/CLV.dat',mode='r',shape=(len(t),dimN,M),dtype='float64', order = 'C') + lyaploc_clv = np.memmap(savename+'/lyaploc_clv',mode='r',shape=(len(t),M),dtype='float64', order = 'C') + invCLV = np.memmap(savename+'/invCLV.dat',mode='r',shape=(len(t),dimN,M),dtype='float64', order = 'C') + + + + lensteparray = len(steplengthforsecondorder) + contracted_CLVs = np.memmap(savename+'/contracted_clvs.dat',mode='w+',shape=(len(t),dimN,M),dtype=precision, order = 'C') #(final_clv,init_clv) + solution = np.memmap(savename+'/solution.dat',mode='w+',shape=(len(t),lensteparray,dimN,M),dtype=precision, order = 'C') + full_solution = np.memmap(savename+'/full_solution.dat',mode='w+',shape=(len(t),lensteparray,dimN,M),dtype=precision, order = 'C') + #normalized_solution = np.memmap(savename+'/normalized_solution.dat',mode='w+',shape=(len(t),lensteparray,dimN,M),dtype=precision, order = 'C') + #growth = np.memmap(savename+'/growth.dat',mode='w+',shape=(len(t),lensteparray,M),dtype=precision, order = 'C') + for tn, (ts,te) in enumerate(zip(t[0:-2],t[1:-1])): + print(tn) + contracted_CLVs[tn,:,:]=1/2*contract_func(invCLV[tn,:,:],CLV[tn,:,:]) + + np.memmap.flush(contracted_CLVs) + + + for tn, (ts,te) in enumerate(zip(t[0:-steplengthforsecondorder.max()-1],t[1:-steplengthforsecondorder.max()])): + dtau=te -ts + print(tn) + for n_ind, n_step in enumerate(steplengthforsecondorder): + dummy = np.exp(dtau*np.sum(lyaploc_clv[tn:tn+n_step,:], axis = 0)) + if n_ind == 0: solution[tn,n_ind,:,:] = 0 + else: + solution[tn,n_ind,:,:] = (solution[tn,n_ind-1,:,:] + + dtau * np.einsum('i,ij,j->ij',np.reciprocal(dummy),contracted_CLVs[tn+n_step,:,:], + dummy**2.0,order='C')) + for n_ind, n_step in enumerate(steplengthforsecondorder): + if n_ind == 0: continue + else: + full_solution[tn,n_ind,:,:] = np.einsum('ki,ij,i->kj',CLV[tn+n_step,:,:],solution[tn,n_ind,:,:],np.exp(dtau*np.sum(lyaploc_clv[tn:tn+n_step,:], axis = 0)),order='C') + solution[tn,n_ind,:,:] = np.einsum('ij,i->ij',solution[tn,n_ind,:,:],np.exp(dtau*np.sum(lyaploc_clv[tn:tn+n_step,:], axis = 0)),order='C') + #np.multiply(solution[tn,n_ind,:,:] ,np.exp(np.sum(dtau*lyaploc_clv[tn:tn+n_step,:,np.newaxis], axis = 0))) + #full_solution[tn,n_ind,:,:] = np.matmul(CLV[tn+n_step,:,:],solution[tn,n_step,:,:]) + #growth[tn,n_ind,:]=np.log(np.linalg.norm(full_solution[tn,n_ind,:,:],axis=0))/(n_step*dtau) + + if tn % 1 == 0: + np.memmap.flush(solution) + np.memmap.flush(contracted_CLVs) + #np.memmap.flush(normalized_solution) + np.memmap.flush(full_solution) + #np.memmap.flush(growth) + print("Saveing results in folder "+savename+".") + diff --git a/lorenz96_definterun.py b/lorenz96_definterun.py index b60dbf4..9bf4761 100755 --- a/lorenz96_definterun.py +++ b/lorenz96_definterun.py @@ -1,7 +1,8 @@ #!/home/user/anaconda3/bin/python import numpy as np from l96 import * - +from numpy import linalg +import os # these are our constants paraL96 = {'F1' : 64, 'F2' : 0, @@ -17,10 +18,11 @@ M = paraL96['dimX'] + paraL96['dimX']*paraL96['dimY'] # -1 full spectrum dimN = paraL96['dimX'] + paraL96['dimX']*paraL96['dimY'] # -1 full spectrum integrator = 'classic' -t = np.arange(0,200,0.01) +t = np.arange(0,50,0.01) spinup = 100; #setup L96 -hs=[ 0. , 0.0625, 0.125 , 0.25 , 0.5 , 1. ] +#hs=[ 0. , 0.0625, 0.125 , 0.25 , 0.5 , 1. ] +hs=[ 0. , 0.0625, 0.25 , 1. ] CLV = np.zeros((len(t),dimN,M,len(hs))) BLV = np.zeros((len(t),dimN,M,len(hs))) R = np.zeros((len(t),dimN,M,len(hs))) @@ -67,7 +69,7 @@ print("\nBackwards Steps ...") imax = tn - res=triu(np.random.rand(M,M)) + res=np.triu(np.random.rand(M,M)) CLV[imax,:,:,count]=np.matmul(BLV[imax,:,:,count],res) res=res/np.linalg.norm(res,axis=0,keepdims=True) lyaploc_clv[imax,:,count]=np.log(1/np.abs(np.linalg.norm(CLV[imax,:,:,count],axis=0)))/np.abs(te-ts) diff --git a/lorenz96_secondary_instabilities.py b/lorenz96_secondary_instabilities.py index c9c340a..21de5ae 100755 --- a/lorenz96_secondary_instabilities.py +++ b/lorenz96_secondary_instabilities.py @@ -11,16 +11,16 @@ #warnings.simplefilter("ignore", DeprecationWarning) # these are our constants -paraL96_2lay = {'F1' : 10, +paraL96_2lay = {'F1' : 64, 'F2' : 0, 'b' : 10, 'c' : 10, 'h' : 1, - 'dimX': 36, + 'dimX': 20, 'dimY' : 10, 'RescaledY' : False, 'expname' : 'secondaryinstabilities_2layer', - 'time' : np.arange(0,500,0.1), + 'time' : np.arange(0,50,0.01), 'spinup' : 100, '2lay' : True, 'integrator': 'classic' @@ -31,11 +31,11 @@ 'b' : 10, 'c' : 10, 'h' : 1, - 'dimX': 5, + 'dimX': 44, 'dimY' : 10, 'RescaledY' : False, 'expname' : 'secondaryinstabilities_1layer', - 'time' : np.arange(0,100,0.1), + 'time' : np.arange(0,200,0.1), 'spinup' : 100, '2lay' : False, 'integrator': 'classic' @@ -44,8 +44,8 @@ testzeroclv=True -hs=[ 1.0 ] # , 0.0625, 0.125 , 0.25 , 0.5 , 1. ] -experiments = [paraL96_1lay] +hs=[ 0.0 , 0.0625, 0.125 , 0.25 , 0.5 , 1. ] +experiments = [paraL96_2lay,paraL96_1lay] for paraL96,h in product(experiments,hs): if not paraL96['2lay'] and not h == 1.0: print("1 lay only with h = 1.");break @@ -87,12 +87,12 @@ print("\nExperiment is the following:") for key in paraL96.keys(): print(key+' : '+str(paraL96[key])) if paraL96['2lay']: L96,L96Jac,L96JacV,L96JacFull,dimN = l96.setupL96_2layer(paraL96) - else: L96,L96Jac,L96JacV,L96JacFull,dimN = l96.setupL96(paraL96) + else: L96,L96Jac,L96JacV,L96JacFull,dimN,_ = l96.setupL96(paraL96) field = l96.GinelliForward(dimN,M,tendfunc = L96, jacfunc = L96Jac, jacVfunc = L96JacV,jacfull=L96JacFull, integrator=paraL96['integrator']) - field.rtol = 1e-8 - field.atol = 1e-8 + field.rtol = None + field.atol = None # initialize fields print("\nInitialize ...") @@ -178,9 +178,9 @@ length=np.zeros((corrs.shape[0],M)) for n,c in enumerate(corrs): d = (np.logical_and(np.abs(tendcorr[:])>c,np.abs(tendcorr[:]) 1: length[n,:] = np.sum(d) - lowest[n,:] = np.average(lyaploc_clv[:,:],weights = d, axis = 0) + lowest[n,:] = np.average(lyaploc_clv,weights = d, axis = 0) else: lowest[n,:] = 0 maskcorr = (np.logical_and(np.abs(tendcorr[:])>0.95,np.abs(tendcorr[:])<1.01)) diff --git a/lorenz96_test_secondary_instabilities.py b/lorenz96_test_secondary_instabilities.py index 45e4b5a..2ca7429 100755 --- a/lorenz96_test_secondary_instabilities.py +++ b/lorenz96_test_secondary_instabilities.py @@ -15,26 +15,28 @@ scalar = lambda x,y : np.sum(x*y, dtype = precision ) norm = lambda x : np.sqrt(scalar(x,x), dtype = precision ) -normalize = lambda x : x/norm(x) +def normalize(x): + n = norm(x) + if n == 0: return 0.0 + return x/norm(x) corr = lambda x,y: scalar(normalize(x),normalize(y)) -# these are our constants -paraL96_2lay = {'F1' : 10, +paraL96_2lay = {'F1' : 64, 'F2' : 0, 'b' : 10, 'c' : 10, 'h' : 1, - 'dimX': 36, + 'dimX': 20, 'dimY' : 10, 'RescaledY' : False, 'expname' : 'secondaryinstabilities_2layer', - 'time' : np.arange(0,500,0.1), + 'time' : np.arange(0,50,0.01), 'spinup' : 100, '2lay' : True, 'integrator': 'classic' } -paraL96_1lay = {'F1' : 10, +paraL96_1lay = {'F1' : 8, 'F2' : 0, 'b' : 10, 'c' : 10, @@ -43,26 +45,26 @@ 'dimY' : 10, 'RescaledY' : False, 'expname' : 'secondaryinstabilities_1layer', - 'time' : np.arange(0,500,0.05), + 'time' : np.arange(0,200,0.1), 'spinup' : 100, '2lay' : False, 'integrator': 'classic' } - testzeroclv=True -hs=[1.0] #, 0.5] # , 0.0625, 0.125 , 0.25 , 0.5 , 1. ] -experiments = [paraL96_1lay]#,paraL96_2lay]#,paraL96_1lay] -integrator = 'classic' +hs=[ 0.0 , 0.0625, 0.125 , 0.25 , 0.5 , 1. ] +experiments = [paraL96_2lay,paraL96_1lay] + +testzeroclv=True # first test clv -epsilons=10.0**np.arange(0,2.1,0.2,dtype = precision) +epsilons=10.0**np.arange(-5,1.1,0.1,dtype = precision) intsteps=np.arange(0,100,1) -CLVs=[1,2,3,4,5] # ,13, 30]#np.arange(1,2,1) -timeintervall = range(200,600,100) +CLVs=[1,5,10,20,30,40]#np.arange(1,2,1) +timeintervall = range(500,1500,50) for paraL96,h in product(experiments ,hs): if not paraL96['2lay'] and not h == 1.0: print("1 lay only with h = 1.");break savename=paraL96['expname']+"_h_"+str(h) @@ -94,99 +96,100 @@ normerrorrel_1st = [] normerrorrel_2nd = [] normnonlin =[] + missing_terms = [] for clv in CLVs: - correlation.append(np.memmap(savename+'/correlation_only_1_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) - correlationv2.append(np.memmap(savename+'/correlation_1and2_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) - correlationv3.append(np.memmap(savename+'/correlation_only_2_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) + #correlation.append(np.memmap(savename+'/correlation_only_1_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) + #correlationv2.append(np.memmap(savename+'/correlation_1and2_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) + #correlationv3.append(np.memmap(savename+'/correlation_only_2_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) normerror.append(np.memmap(savename+'/normerror_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) normerror_1st.append(np.memmap(savename+'/normerror_1st_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) - normerrorrel_1st.append(np.memmap(savename+'/normerrorrel_1st_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) - normerrorrel_2nd.append(np.memmap(savename+'/normerrorrel_2nd_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) - normerrorrel.append(np.memmap(savename+'/normerrorrel_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) - normnonlin.append(np.memmap(savename+'/normnonlin_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) - realgrowth.append(np.memmap(savename+'/realgrowth_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) - + #normerrorrel_1st.append(np.memmap(savename+'/normerrorrel_1st_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) + #normerrorrel_2nd.append(np.memmap(savename+'/normerrorrel_2nd_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) + #normerrorrel.append(np.memmap(savename+'/normerrorrel_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) + #normnonlin.append(np.memmap(savename+'/normnonlin_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) + #realgrowth.append(np.memmap(savename+'/realgrowth_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) + missing_terms.append(np.memmap(savename+'/missing_terms_clv'+str(clv)+'.dat',mode='w+',shape=(len(timeintervall),len(epsilons),len(intsteps)-1),dtype=precision,order = 'C')) + CLV = np.memmap(savename+'/CLV.dat',mode='r',shape=(len(paraL96['time']),dimN,M),dtype='float64',order = 'C') lyaploc_clv = np.memmap(savename+'/lyaploc_clv',mode='r',shape=(len(paraL96['time']),M),dtype='float64',order = 'C') trajectory = np.memmap(savename+'/trajectory.dat',mode='r',shape=(len(paraL96['time']),dimN),dtype='float64',order = 'C') full_solution = np.memmap(savename+'/full_solution.dat',mode='r',shape=(len(paraL96['time']),len(steplengthforsecondorder),dimN,M),dtype='float64',order = 'C') if paraL96['2lay']: L96,L96Jac,L96JacV,L96JacFull,dimN = l96.setupL96_2layer(paraL96) - else: L96,L96Jac,L96JacV,L96JacFull,dimN = l96.setupL96(paraL96) + else: L96,L96Jac,L96JacV,L96JacFull,dimN,HessMat = l96.setupL96(paraL96) field = l96.GinelliForward(dimN,M,tendfunc = L96, jacfunc = L96Jac, jacVfunc = L96JacV,jacfull=L96JacFull, integrator=paraL96['integrator']) - field.rtol = 1e-8 - field.atol = 1e-8 + field.rtol = None + field.atol = None for i,tn in enumerate(timeintervall): print(tn) for en,epsilon in enumerate(epsilons): print(' eps: '+str(epsilon)) for nc, clv in enumerate(CLVs): - field.x['back']=trajectory[tn,:]+epsilon*CLV[tn,:,clv-1] + field.x['back']=trajectory[tn,:]+epsilon*CLV[tn,:,clv-1] #+ epsilon**2.0*CLV[tn,:,clv-1] + epsilon**3.0*CLV[tn,:,clv-1] #print(0,np.log10(epsilon),np.log10(np.linalg.norm(epsilon*CLV[tn,:,clv-1]))) - + stepsize=0 for step, (a,b) in enumerate(zip(intsteps[0:-1],intsteps[1:])): - stepsize=b - #print('g') - for counting in np.arange(0,b-a): - field.integrate_back(dtau,dt =dt) - #print('counting') nonlinear_prediction = field.x['back'] - trajectory[tn+stepsize,:] firstorder_prediction = CLV[tn+stepsize,:,clv-1]*np.exp(dtau*np.memmap.sum(lyaploc_clv[tn:tn+stepsize,clv-1]))*epsilon - secondorder_prediction = full_solution[tn,stepsize,:,clv-1]*epsilon**2.0 - secondorder_prediction_wo2e = full_solution[tn,stepsize,:,clv-1] + secondorder_prediction = full_solution[tn,stepsize,:,clv-1]*epsilon**2.0 #+firstorder_prediction*epsilon + #secondorder_prediction_wo2e = full_solution[tn,stepsize,:,clv-1] #+ firstorder_prediction/epsilon + + firstandsecondorder_prediction = (secondorder_prediction + firstorder_prediction) + + missing_terms[nc][i,en,step] = stepsize *dtau*norm(1/2*(2*np.einsum('j,ijk,k->i',firstorder_prediction,HessMat(0,np.zeros(5)),secondorder_prediction,order ='C',optimize=True) + +np.einsum('j,ijk,k->i',secondorder_prediction,HessMat(0,np.zeros(5)),secondorder_prediction,order ='C',optimize=True))) - firstandsecondorder_prediction = (full_solution[tn,stepsize,:,clv-1]*epsilon**2.0+ - epsilon*CLV[tn+stepsize,:,clv-1]* - np.exp(dtau*np.memmap.sum(lyaploc_clv[tn:tn+stepsize,clv-1]))) - firstandsecondorder_prediction_woe = (full_solution[tn,stepsize,:,clv-1]*epsilon - +CLV[tn+stepsize,:,clv-1] - *np.exp(dtau*np.memmap.sum(lyaploc_clv[tn:tn+stepsize,clv-1]))) + #firstandsecondorder_prediction_woe = (firstandsecondorder_prediction)/epsilon #print(step+1,np.log10(epsilon),np.log10(np.linalg.norm(nonlinear_prediction - firstorder_prediction)),np.log10(np.linalg.norm(nonlinear_prediction - firstandsecondorder_prediction))) - correlation[nc][i,en,step] = corr(nonlinear_prediction,firstorder_prediction) + #correlation[nc][i,en,step] = corr(nonlinear_prediction,firstorder_prediction) - correlationv2[nc][i,en,step] = corr(nonlinear_prediction,firstandsecondorder_prediction_woe) + #correlationv2[nc][i,en,step] = corr(nonlinear_prediction,firstandsecondorder_prediction_woe) #(scalar(nonlinear_prediction,firstorder_prediction) + #epsilon*scalar(nonlinear_prediction,secondorder_prediction_wo2e))/( # norm(nonlinear_prediction)*norm(firstandsecondorder_prediction_woe)) - correlationv3[nc][i,en,step] = corr(nonlinear_prediction,secondorder_prediction_wo2e) + #correlationv3[nc][i,en,step] = corr(nonlinear_prediction,secondorder_prediction_wo2e) - realgrowth[nc][i,en,step] = np.log(norm(nonlinear_prediction)/epsilon)/(dtau*(stepsize)) + #if stepsize==0: realgrowth[nc][i,en,step] = 0 + #else: realgrowth[nc][i,en,step] = np.log(norm(nonlinear_prediction)/epsilon)/(dtau*(stepsize)) normerror[nc][i,en,step] = norm(nonlinear_prediction - firstandsecondorder_prediction) normerror_1st[nc][i,en,step] = norm(nonlinear_prediction - firstorder_prediction) - normerrorrel[nc][i,en,step] = norm(nonlinear_prediction - firstandsecondorder_prediction)/norm(nonlinear_prediction) + #normerrorrel[nc][i,en,step] = norm(nonlinear_prediction - firstandsecondorder_prediction)/norm(nonlinear_prediction) - normerrorrel_1st[nc][i,en,step] = norm(nonlinear_prediction - firstorder_prediction)/norm(nonlinear_prediction) + #normerrorrel_1st[nc][i,en,step] = norm(nonlinear_prediction - firstorder_prediction)/norm(nonlinear_prediction) - normerrorrel_2nd[nc][i,en,step] = norm(nonlinear_prediction - secondorder_prediction)/norm(nonlinear_prediction) + #normerrorrel_2nd[nc][i,en,step] = norm(nonlinear_prediction - secondorder_prediction)/norm(nonlinear_prediction) - normnonlin[nc][i,en,step] = norm(nonlinear_prediction) + #normnonlin[nc][i,en,step] = norm(nonlinear_prediction) + stepsize=b + for counting in np.arange(0,b-a): + field.integrate_back(dtau,dt =dt) if i % 50 == 0: for nc, clv in enumerate(CLVs): - np.memmap.flush(realgrowth[nc]) - np.memmap.flush(correlation[nc]) - np.memmap.flush(correlationv2[nc]) - np.memmap.flush(correlationv3[nc]) + #np.memmap.flush(realgrowth[nc]) + #np.memmap.flush(correlation[nc]) + #np.memmap.flush(correlationv2[nc]) + #np.memmap.flush(correlationv3[nc]) np.memmap.flush(normerror[nc]) np.memmap.flush(normerror_1st[nc]) print("flushed") for nc, clv in enumerate(CLVs): - np.memmap.flush(realgrowth[nc]) - np.memmap.flush(correlation[nc]) - np.memmap.flush(correlationv2[nc]) - np.memmap.flush(correlationv3[nc]) + #np.memmap.flush(realgrowth[nc]) + #np.memmap.flush(correlation[nc]) + #np.memmap.flush(correlationv2[nc]) + #np.memmap.flush(correlationv3[nc]) np.memmap.flush(normerror[nc]) np.memmap.flush(normerror_1st[nc]) diff --git a/lorenz96_tmp.py b/lorenz96_tmp.py new file mode 100644 index 0000000..2711882 --- /dev/null +++ b/lorenz96_tmp.py @@ -0,0 +1,171 @@ +#!/home/user/anaconda3/bin/python +import numpy as np +import os +import l96 +from scipy import triu +import scipy.linalg as linalg +from itertools import product +import matplotlib.pyplot as plt + + +# these are our constants +paraL96_2lay = {'F1' : 10, + 'F2' : 0, + 'b' : 10, + 'c' : 10, + 'h' : 1, + 'dimX': 36, + 'dimY' : 10, + 'RescaledY' : False, + 'expname' : 'secondaryinstabilities_2layer', + 'time' : np.arange(0,2000,0.1), + 'spinup' : 100, + '2lay' : True + } + +paraL96_1lay = {'F1' : 10, + 'F2' : 0, + 'b' : 10, + 'c' : 10, + 'h' : 1, + 'dimX': 44, + 'dimY' : 10, + 'RescaledY' : False, + 'expname' : 'secondaryinstabilities_1layer', + 'time' : np.arange(0,1000,0.1), + 'spinup' : 100, + '2lay' : False + } + + +testzeroclv=True + +hs=[ 1. ] # , 0.0625, 0.125 , 0.25 , 0.5 , 1. ] +experiments = [paraL96_2lay] + +for paraL96,h in product(experiments,hs): + if not paraL96['2lay'] and not h == 1.0: print("1 lay only with h = 1.");break + # M number exponents + if paraL96['2lay']: + M = paraL96['dimX'] + paraL96['dimX']*paraL96['dimY'] # -1 full spectrum + dimN = paraL96['dimX'] + paraL96['dimX']*paraL96['dimY'] # -1 full spectrum + else: + M = paraL96['dimX'] + dimN = paraL96['dimX'] + + integrator = 'classic' + t = paraL96['time'] + dt = np.mean(np.diff(t)) + + savename=paraL96['expname']+"_h_"+str(h) + spinup = paraL96['spinup'] + + #setup L96 + + + + if not os.path.exists(savename): os.mkdir(savename) + CLV = np.memmap(savename+'/CLV.dat',mode='w+',shape=(len(t),dimN,M),dtype='float64') + BLV = np.memmap(savename+'/BLV.dat',mode='w+',shape=(len(t),dimN,M),dtype='float64') + R = np.memmap(savename+'/R.dat',mode='w+',shape=(len(t),dimN,M),dtype='float64') + lyapmean_blv = np.memmap(savename+'/lyapmean_blv.dat',mode='w+',shape=(M),dtype='float64') + lyapmean_clv = np.memmap(savename+'/lyapmean_clv.dat',mode='w+',shape=(M),dtype='float64') + lyaploc_clv = np.memmap(savename+'/lyaploc_clv',mode='w+',shape=(len(t),M),dtype='float64') + lyaploc_blv = np.memmap(savename+'/lyaploc_blv',mode='w+',shape=(len(t)-1,M),dtype='float64') + np.save(savename+'/t',t) + trajectory = np.memmap(savename+'/trajectory.dat',mode='w+',shape=(len(t),dimN),dtype='float64') + if testzeroclv: tendency = np.memmap(savename+'/tendency.dat',mode='w+',shape=(len(t),dimN),dtype='float64') + if testzeroclv: tendcorr = np.memmap(savename+'/tendcorr.dat',mode='w+',shape=(len(t)),dtype='float64') + + + # Compute the exponents + + paraL96['h']=h + print("\nExperiment is the following:") + for key in paraL96.keys(): print(key+' : '+str(paraL96[key])) + if paraL96['2lay']: L96,L96Jac,L96JacV,L96JacFull,dimN = l96.setupL96_2layer(paraL96) + else: L96,L96Jac,L96JacV,L96JacFull,dimN = l96.setupL96(paraL96) + field = l96.GinelliForward(dimN,M,tendfunc = L96, jacfunc = None, jacVfunc = L96JacV,jacfull=L96JacFull, integrator=integrator) + # initialize fields + print("\nInitialize ...") + field.init_back('random',0.1) + field.init_lin('random',0.1) + field.restoreQ() + # spinup + print("\nSpinup ...") + + field.integrate_back(spinup) + field.step_t = 0.0 + + BLV[0,:,:]=field.x['lin'] + print("\nQR Steps ...") + # Do QR step + for tn, (ts,te) in enumerate(zip(t[0:-1],t[1:])): + if testzeroclv: tendency[tn,:] = L96(ts,field.x['back']) + trajectory[tn,:]=field.x['back'] + field.qr_step(te-ts,dt=dt) + R[tn,:,:]=field.R + BLV[tn+1,:,:]=field.x['lin'] + print(te) + lyaploc_blv[tn,:]=field.lyap + if tn % 1 == 0: + np.memmap.flush(BLV) + np.memmap.flush(R) + np.memmap.flush(lyaploc_blv) + if testzeroclv: np.memmap.flush(tendency) + lyapmean_blv[:]=np.mean(lyaploc_blv[int(tn/2):,:],axis=0) + + # Do Backwards steps + + print("\nBackwards Steps ...") + imax = tn + res=triu(np.random.rand(M,M)) + CLV[imax,:,:]=np.matmul(BLV[imax,:,:],res) + res=res/np.linalg.norm(res,axis=0,keepdims=True) + lyaploc_clv[imax,:]=np.log(1/np.abs(np.linalg.norm(CLV[imax,:,:],axis=0)))/np.abs(te-ts) + + if testzeroclv: minloc=np.argmin(np.abs(lyapmean_blv)) + + for tn, (ts,te) in enumerate(zip(t[-2:0:-1],t[-1:0:-1])): + n = imax-tn + if n==0: break + res=linalg.solve(R[n-1,0:M,:],res) + CLV[n-1,:,:]=np.matmul(BLV[n-1,:,:],res) + res=res/np.linalg.norm(res,axis=0,keepdims=True) + lyaploc_clv[n-1,:]=np.log(1/np.abs(np.linalg.norm(CLV[n-1,:,:],axis=0)))/(te-ts) + CLV[n-1,:,:]= CLV[n-1,:,:]/np.linalg.norm(CLV[n-1,:,:],axis=0,keepdims=True) + + if testzeroclv: + tendcorr[n-1]=np.sum(np.multiply(CLV[n-1,:,minloc],tendency[n-1,:]))/np.sqrt(np.sum(np.multiply(tendency[n-1,:],tendency[n-1,:]))) + + if tn % 100 == 0: + np.memmap.flush(R) + np.memmap.flush(CLV) + np.memmap.flush(lyaploc_clv) + np.memmap.flush(tendcorr) + lyapmean_clv[:]=np.mean(lyaploc_clv[int(tn/2):,:],axis=0) + + + + print("Saveing results in folder "+savename+".") + np.save(savename+"/paraL96",paraL96) + np.save(savename+"/h",h) + + invCLV = np.memmap(savename+'/invCLV.dat',mode='w+',shape=(len(t),dimN,M),dtype='float64') + + for tn, (ts,te) in enumerate(zip(t[0:-1],t[1:])): + invCLV[tn,:,:]=np.linalg.inv(CLV[tn,:,:]) + + corrs=np.arange(0.01,1,0.1) + lowest=np.zeros((corrs.shape[0],M)) + length=np.zeros((corrs.shape[0],M)) + for n,c in enumerate(corrs): + d = (np.logical_and(np.abs(tendcorr[:])>c,np.abs(tendcorr[:])0.95,np.abs(tendcorr[:])<1.01)) + np.save(savename+"/maskcorr",maskcorr ) + diff --git a/lorenz96_tmp2.py b/lorenz96_tmp2.py new file mode 100644 index 0000000..93b62de --- /dev/null +++ b/lorenz96_tmp2.py @@ -0,0 +1,171 @@ +#!/home/user/anaconda3/bin/python +import numpy as np +import os +import l96 +from scipy import triu +import scipy.linalg as linalg +from itertools import product +import matplotlib.pyplot as plt + + +# these are our constants +paraL96_2lay = {'F1' : 10, + 'F2' : 0, + 'b' : 10, + 'c' : 10, + 'h' : 1, + 'dimX': 36, + 'dimY' : 10, + 'RescaledY' : False, + 'expname' : 'secondaryinstabilities_2layer', + 'time' : np.arange(0,2000,0.1), + 'spinup' : 100, + '2lay' : True + } + +paraL96_1lay = {'F1' : 10, + 'F2' : 0, + 'b' : 10, + 'c' : 10, + 'h' : 1, + 'dimX': 44, + 'dimY' : 10, + 'RescaledY' : False, + 'expname' : 'secondaryinstabilities_1layer', + 'time' : np.arange(0,1000,0.1), + 'spinup' : 100, + '2lay' : False + } + + +testzeroclv=True + +hs=[ 1. ] # , 0.0625, 0.125 , 0.25 , 0.5 , 1. ] +experiments = [paraL96_1lay] + +for paraL96,h in product(experiments,hs): + if not paraL96['2lay'] and not h == 1.0: print("1 lay only with h = 1.");break + # M number exponents + if paraL96['2lay']: + M = paraL96['dimX'] + paraL96['dimX']*paraL96['dimY'] # -1 full spectrum + dimN = paraL96['dimX'] + paraL96['dimX']*paraL96['dimY'] # -1 full spectrum + else: + M = paraL96['dimX'] + dimN = paraL96['dimX'] + + integrator = 'classic' + t = paraL96['time'] + dt = np.mean(np.diff(t)) + + savename=paraL96['expname']+"_h_"+str(h) + spinup = paraL96['spinup'] + + #setup L96 + + + + if not os.path.exists(savename): os.mkdir(savename) + CLV = np.memmap(savename+'/CLV.dat',mode='w+',shape=(len(t),dimN,M),dtype='float64') + BLV = np.memmap(savename+'/BLV.dat',mode='w+',shape=(len(t),dimN,M),dtype='float64') + R = np.memmap(savename+'/R.dat',mode='w+',shape=(len(t),dimN,M),dtype='float64') + lyapmean_blv = np.memmap(savename+'/lyapmean_blv.dat',mode='w+',shape=(M),dtype='float64') + lyapmean_clv = np.memmap(savename+'/lyapmean_clv.dat',mode='w+',shape=(M),dtype='float64') + lyaploc_clv = np.memmap(savename+'/lyaploc_clv',mode='w+',shape=(len(t),M),dtype='float64') + lyaploc_blv = np.memmap(savename+'/lyaploc_blv',mode='w+',shape=(len(t)-1,M),dtype='float64') + np.save(savename+'/t',t) + trajectory = np.memmap(savename+'/trajectory.dat',mode='w+',shape=(len(t),dimN),dtype='float64') + if testzeroclv: tendency = np.memmap(savename+'/tendency.dat',mode='w+',shape=(len(t),dimN),dtype='float64') + if testzeroclv: tendcorr = np.memmap(savename+'/tendcorr.dat',mode='w+',shape=(len(t)),dtype='float64') + + + # Compute the exponents + + paraL96['h']=h + print("\nExperiment is the following:") + for key in paraL96.keys(): print(key+' : '+str(paraL96[key])) + if paraL96['2lay']: L96,L96Jac,L96JacV,L96JacFull,dimN = l96.setupL96_2layer(paraL96) + else: L96,L96Jac,L96JacV,L96JacFull,dimN = l96.setupL96(paraL96) + field = l96.GinelliForward(dimN,M,tendfunc = L96, jacfunc = None, jacVfunc = L96JacV,jacfull=L96JacFull, integrator=integrator) + # initialize fields + print("\nInitialize ...") + field.init_back('random',0.1) + field.init_lin('random',0.1) + field.restoreQ() + # spinup + print("\nSpinup ...") + + field.integrate_back(spinup) + field.step_t = 0.0 + + BLV[0,:,:]=field.x['lin'] + print("\nQR Steps ...") + # Do QR step + for tn, (ts,te) in enumerate(zip(t[0:-1],t[1:])): + if testzeroclv: tendency[tn,:] = L96(ts,field.x['back']) + trajectory[tn,:]=field.x['back'] + field.qr_step(te-ts,dt=dt) + R[tn,:,:]=field.R + BLV[tn+1,:,:]=field.x['lin'] + print(te) + lyaploc_blv[tn,:]=field.lyap + if tn % 1 == 0: + np.memmap.flush(BLV) + np.memmap.flush(R) + np.memmap.flush(lyaploc_blv) + if testzeroclv: np.memmap.flush(tendency) + lyapmean_blv[:]=np.mean(lyaploc_blv[int(tn/2):,:],axis=0) + + # Do Backwards steps + + print("\nBackwards Steps ...") + imax = tn + res=triu(np.random.rand(M,M)) + CLV[imax,:,:]=np.matmul(BLV[imax,:,:],res) + res=res/np.linalg.norm(res,axis=0,keepdims=True) + lyaploc_clv[imax,:]=np.log(1/np.abs(np.linalg.norm(CLV[imax,:,:],axis=0)))/np.abs(te-ts) + + if testzeroclv: minloc=np.argmin(np.abs(lyapmean_blv)) + + for tn, (ts,te) in enumerate(zip(t[-2:0:-1],t[-1:0:-1])): + n = imax-tn + if n==0: break + res=linalg.solve(R[n-1,0:M,:],res) + CLV[n-1,:,:]=np.matmul(BLV[n-1,:,:],res) + res=res/np.linalg.norm(res,axis=0,keepdims=True) + lyaploc_clv[n-1,:]=np.log(1/np.abs(np.linalg.norm(CLV[n-1,:,:],axis=0)))/(te-ts) + CLV[n-1,:,:]= CLV[n-1,:,:]/np.linalg.norm(CLV[n-1,:,:],axis=0,keepdims=True) + + if testzeroclv: + tendcorr[n-1]=np.sum(np.multiply(CLV[n-1,:,minloc],tendency[n-1,:]))/np.sqrt(np.sum(np.multiply(tendency[n-1,:],tendency[n-1,:]))) + + if tn % 100 == 0: + np.memmap.flush(R) + np.memmap.flush(CLV) + np.memmap.flush(lyaploc_clv) + np.memmap.flush(tendcorr) + lyapmean_clv[:]=np.mean(lyaploc_clv[int(tn/2):,:],axis=0) + + + + print("Saveing results in folder "+savename+".") + np.save(savename+"/paraL96",paraL96) + np.save(savename+"/h",h) + + invCLV = np.memmap(savename+'/invCLV.dat',mode='w+',shape=(len(t),dimN,M),dtype='float64') + + for tn, (ts,te) in enumerate(zip(t[0:-1],t[1:])): + invCLV[tn,:,:]=np.linalg.inv(CLV[tn,:,:]) + + corrs=np.arange(0.01,1,0.1) + lowest=np.zeros((corrs.shape[0],M)) + length=np.zeros((corrs.shape[0],M)) + for n,c in enumerate(corrs): + d = (np.logical_and(np.abs(tendcorr[:])>c,np.abs(tendcorr[:])0.95,np.abs(tendcorr[:])<1.01)) + np.save(savename+"/maskcorr",maskcorr ) +