Skip to content

Commit

Permalink
Merge pull request #115 from abacusorg/power_spec
Browse files Browse the repository at this point in the history
Functionality for Xi_rppi from Pk
  • Loading branch information
lgarrison authored Nov 15, 2023
2 parents b76af8f + 682687e commit 12a974a
Show file tree
Hide file tree
Showing 15 changed files with 648 additions and 468 deletions.
317 changes: 237 additions & 80 deletions abacusnbody/analysis/power_spectrum.py

Large diffs are not rendered by default.

125 changes: 125 additions & 0 deletions abacusnbody/analysis/shear.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
import time
import gc

import numpy as np
import numpy.linalg as la
import numba
from scipy.fft import irfftn, rfftn
from scipy.ndimage import gaussian_filter

"""
Code still under construction. Originally written by Boryana Hadzhiyska for the ancient: https://arxiv.org/abs/1512.03402.
"""

def smooth_density(D, R, N_dim, Lbox):
# cell size
cell = Lbox/N_dim
# smoothing scale
R /= cell
D_smooth = gaussian_filter(D, R)
return D_smooth

# tophat
@numba.njit
def Wth(ksq, r):
k = np.sqrt(ksq)
w = 3*(np.sin(k*r)-k*r*np.cos(k*r))/(k*r)**3
return w

# gaussian
@numba.njit
def Wg(k, r):
return np.exp(-k*r*r/2.)

@numba.njit(parallel=False, fastmath=True) # parallel=True gives seg fault
def get_tidal(dfour, karr, N_dim, R, dtype=np.float32):

# initialize array
tfour = np.zeros((N_dim, N_dim, N_dim//2 + 1, 6), dtype=np.complex64)


# computing tidal tensor
for a in range(N_dim):
for b in range(N_dim):
for c in numba.prange(N_dim//2 + 1):
if a * b * c == 0:
continue

ksq = dtype(karr[a]**2 + karr[b]**2 + karr[c]**2)
dok2 = dfour[a, b, c]/ksq

# smoothed density Gauss fourier
#dksmo[a, b, c] = Wg(ksq)*dfour[a, b, c]
# smoothed density TH fourier
#dkth[a, b, c] = Wth(ksq)*dfour[a, b, c]
# 0,0 is 0; 0,1 is 1; 0,2 is 2; 1,1 is 3; 1,2 is 4; 2,2 is 5
tfour[a, b, c, 0] = karr[a]*karr[a]*dok2
tfour[a, b, c, 3] = karr[b]*karr[b]*dok2
tfour[a, b, c, 5] = karr[c]*karr[c]*dok2
tfour[a, b, c, 1] = karr[a]*karr[b]*dok2
tfour[a, b, c, 2] = karr[a]*karr[c]*dok2
tfour[a, b, c, 4] = karr[b]*karr[c]*dok2
if R is not None:
tfour[a, b, c, :] *= Wth(ksq, R)
return tfour

@numba.njit(parallel=False, fastmath=True)
def get_shear_nb(tidr, N_dim):
shear = np.zeros(shape=(N_dim, N_dim, N_dim), dtype=np.float32)
tensor = np.zeros((3, 3), dtype=np.float32)
for a in range(N_dim):
for b in range(N_dim):
for c in range(N_dim):
t = tidr[a, b, c, :]
tensor[0, 0] = t[0]
tensor[0, 1] = t[1]
tensor[0, 2] = t[2]
tensor[1, 0] = t[1]
tensor[1, 1] = t[3]
tensor[1, 2] = t[4]
tensor[2, 0] = t[2]
tensor[2, 1] = t[4]
tensor[2, 2] = t[5]
evals = la.eigvals(tensor)
l1 = evals[0]
l2 = evals[1]
l3 = evals[2]
shear[a, b, c] = np.sqrt(0.5*((l2-l1)**2 + (l3-l1)**2 + (l3-l2)**2))
return shear

def get_shear(dsmo, N_dim, Lbox, R=None, dtype=np.float32):
# user can also pass string
if isinstance(dsmo, str):
dsmo = np.load(dsmo)

# fourier transform the density field
dsmo = dsmo.astype(dtype)
dfour = rfftn(dsmo, overwrite_x=True, workers=-1)
del dsmo
gc.collect()

# k values
karr = np.fft.fftfreq(N_dim, d=Lbox/(2*np.pi*N_dim)).astype(dtype)

# compute fourier tidal
start = time.time()
tfour = get_tidal(dfour, karr, N_dim, R)
del dfour
gc.collect()
print("finished fourier tidal, took time", time.time() - start)

# compute real tidal
start = time.time()
tidr = irfftn(tfour, axes = (0, 1, 2), workers=-1).real
del tfour
gc.collect()
print("finished tidal, took time", time.time() - start)

# compute shear
start = time.time()
shear = get_shear_nb(tidr, N_dim)
del tidr
gc.collect()
print("finished shear, took time", time.time() - start)

return shear
38 changes: 23 additions & 15 deletions abacusnbody/analysis/tpcf_corrfunc.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,24 +38,32 @@ def tpcf_multipole(s_mu_tcpf_result, mu_bins, order=0):
Examples
--------
For demonstration purposes we create a randomly distributed set of points within a
periodic cube of length 250 Mpc/h.
>>> Npts = 100
>>> Lbox = 250.
>>> x = np.random.uniform(0, Lbox, Npts)
>>> y = np.random.uniform(0, Lbox, Npts)
>>> z = np.random.uniform(0, Lbox, Npts)
periodic cube of length 250 Mpc/h.::
>>> Npts = 100
>>> Lbox = 250.
>>> x = np.random.uniform(0, Lbox, Npts)
>>> y = np.random.uniform(0, Lbox, Npts)
>>> z = np.random.uniform(0, Lbox, Npts)
We transform our *x, y, z* points into the array shape used by the pair-counter by
taking the transpose of the result of `numpy.vstack`. This boilerplate transformation
is used throughout the `~halotools.mock_observables` sub-package:
>>> sample1 = np.vstack((x,y,z)).T
is used throughout the `~halotools.mock_observables` sub-package: ::
>>> sample1 = np.vstack((x,y,z)).T
First, we calculate the correlation function using
`~halotools.mock_observables.s_mu_tpcf`.
>>> from halotools.mock_observables import s_mu_tpcf
>>> s_bins = np.linspace(0.01, 25, 10)
>>> mu_bins = np.linspace(0, 1, 15)
>>> xi_s_mu = s_mu_tpcf(sample1, s_bins, mu_bins, period=Lbox)
Then, we can calculate the quadrapole of the correlation function:
>>> xi_2 = tpcf_multipole(xi_s_mu, mu_bins, order=2)
`~halotools.mock_observables.s_mu_tpcf`.::
>>> from halotools.mock_observables import s_mu_tpcf
>>> s_bins = np.linspace(0.01, 25, 10)
>>> mu_bins = np.linspace(0, 1, 15)
>>> xi_s_mu = s_mu_tpcf(sample1, s_bins, mu_bins, period=Lbox)
Then, we can calculate the quadrapole of the correlation function: ::
>>> xi_2 = tpcf_multipole(xi_s_mu, mu_bins, order=2)
"""

# process inputs
Expand Down
6 changes: 3 additions & 3 deletions abacusnbody/hod/abacus_hod.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ class AbacusHOD:
"""
A highly efficient multi-tracer HOD code for the AbacusSummmit simulations.
"""
def __init__(self, sim_params, HOD_params, clustering_params = None, chunk=-1, n_chunks=1, skip_staging=False): # TESTING
def __init__(self, sim_params, HOD_params, clustering_params = None, chunk=-1, n_chunks=1, skip_staging=False):
"""
Loads simulation. The ``sim_params`` dictionary specifies which simulation
volume to load. The ``HOD_params`` specifies the HOD parameters and tracer
Expand Down Expand Up @@ -1109,8 +1109,8 @@ def apply_zcv_xi(self, mock_dict, config, load_presaved=False):
r_bins = np.linspace(0., 200., 201)
pk_rsd_tr_fns = [save_z_dir / f"power{rsd_str}_tr_tr_nmesh{config['zcv_params']['nmesh']:d}.asdf"] # TODO: same as other (could check that we have this if presaved)
power_cv_tr_fn = save_z_dir / f"power{rsd_str}_ZCV_tr_nmesh{config['zcv_params']['nmesh']:d}.asdf" # TODO: should be an output (could check that we have this if presaved; run_zcv too)
r_binc, binned_poles_zcv, Npoles = pk_to_xi(power_cv_tr_fn, self.lbox, r_bins, poles=config['power_params']['poles'], key='P_k3D_tr_tr_zcv')
r_binc, binned_poles, Npoles = pk_to_xi(pk_rsd_tr_fns[0], self.lbox, r_bins, poles=config['power_params']['poles'], key='P_k3D_tr_tr')
r_binc, binned_poles_zcv, Npoles = pk_to_xi(asdf.open(power_cv_tr_fn)['data']['P_k3D_tr_tr_zcv'], self.lbox, r_bins, poles=config['power_params']['poles'])
r_binc, binned_poles, Npoles = pk_to_xi(asdf.open(pk_rsd_tr_fns[0])['data']['P_k3D_tr_tr'], self.lbox, r_bins, poles=config['power_params']['poles'])
zcv_dict['Xi_tr_tr_ell_zcv'] = binned_poles_zcv
zcv_dict['Xi_tr_tr_ell'] = binned_poles
zcv_dict['Np_tr_tr_ell'] = Npoles
Expand Down
2 changes: 1 addition & 1 deletion abacusnbody/hod/prepare_sim.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from abacusnbody.data.compaso_halo_catalog import CompaSOHaloCatalog
from abacusnbody.data.read_abacus import read_asdf

from .shear import smooth_density, get_shear
from ..analysis.shear import smooth_density, get_shear
from ..analysis.tsc import tsc_parallel

DEFAULTS = {}
Expand Down
Loading

0 comments on commit 12a974a

Please sign in to comment.