diff --git a/environment.yml b/environment.yml index a0bf77e0b..d59899717 100644 --- a/environment.yml +++ b/environment.yml @@ -35,4 +35,4 @@ dependencies: - pip: - ctapipe_io_lst~=0.18.2 - ctaplot~=0.6.2 - - pyirf~=0.6.0 + - pyirf==0.7 diff --git a/lstchain/high_level/__init__.py b/lstchain/high_level/__init__.py index f7d662e87..564130b5a 100644 --- a/lstchain/high_level/__init__.py +++ b/lstchain/high_level/__init__.py @@ -13,16 +13,30 @@ get_timing_params, set_expected_pos_to_reco_altaz, ) +from .interpolate import ( + check_in_delaunay_triangle, + compare_irfs, + get_nearest_az_node, + interp_params, + interpolate_irf, + load_irf_grid, +) __all__ = [ "add_icrs_position_params", - 'analyze_on_off', - 'analyze_wobble', + "analyze_on_off", + "analyze_wobble", + "check_in_delaunay_triangle", + "compare_irfs", "create_event_list", "create_hdu_index_hdu", "create_obs_index_hdu", + "get_nearest_az_node", "get_pointing_params", "get_timing_params", + "interp_params", + "interpolate_irf", + "load_irf_grid", "set_expected_pos_to_reco_altaz", - 'setup_logging', + "setup_logging", ] diff --git a/lstchain/high_level/hdu_table.py b/lstchain/high_level/hdu_table.py index 95cbac346..4784b80ef 100644 --- a/lstchain/high_level/hdu_table.py +++ b/lstchain/high_level/hdu_table.py @@ -3,23 +3,24 @@ import astropy.units as u import numpy as np -from astropy.coordinates import SkyCoord, AltAz +from astropy.coordinates import AltAz, SkyCoord from astropy.coordinates.erfa_astrom import ErfaAstromInterpolator, erfa_astrom from astropy.io import fits -from astropy.table import Table, QTable +from astropy.table import QTable, Table from astropy.time import Time from lstchain.__init__ import __version__ -from lstchain.reco.utils import location, camera_to_altaz +from lstchain.reco.utils import camera_to_altaz, location __all__ = [ "add_icrs_position_params", "create_event_list", "create_hdu_index_hdu", "create_obs_index_hdu", - "get_pointing_params", "get_timing_params", "set_expected_pos_to_reco_altaz", + "get_pointing_params", + "get_timing_params", ] log = logging.getLogger(__name__) @@ -235,9 +236,8 @@ def create_hdu_index_hdu( def get_timing_params(data, epoch=LST_EPOCH): """ - Get event lists and retrieve some timing parameters for the DL3 event list - as a dict - + Retrieve some timing parameters for the DL3 event list + as a dict. """ time_utc = Time(data["dragon_time"], format="unix", scale="utc") t_start_iso = time_utc[0].to_value("iso", "date_hms") @@ -342,8 +342,8 @@ def set_expected_pos_to_reco_altaz(data): def create_event_list( - data, run_number, source_name, source_pos, effective_time, elapsed_time, - epoch=LST_EPOCH, + data, run_number, source_name, source_pos, + effective_time, elapsed_time, data_pars, epoch=LST_EPOCH, ): """ Create the event_list BinTableHDUs from the given data @@ -362,6 +362,7 @@ def create_event_list( Float elapsed_time: Total elapsed time of triggered events of the run Float + data_pars: Returns ------- @@ -491,6 +492,9 @@ def create_event_list( ) pnt_header["TIMEREF"] = ev_header["TIMEREF"] + pnt_header["MEAN_ZEN"] = str(data_pars["ZEN_PNT"]) + pnt_header["MEAN_AZ"] = str(data_pars["AZ_PNT"]) + pnt_header["B_DELTA"] = str(data_pars["B_DELTA"]) # Create HDUs event = fits.BinTableHDU(event_table, header=ev_header, name="EVENTS") diff --git a/lstchain/high_level/interpolate.py b/lstchain/high_level/interpolate.py new file mode 100644 index 000000000..3a6b27dca --- /dev/null +++ b/lstchain/high_level/interpolate.py @@ -0,0 +1,620 @@ +import numpy as np +import logging +import astropy.units as u +from astropy.table import QTable, Table +from astropy.io import fits +from astropy.time import Time + +from lstchain.__init__ import __version__ + +from pyirf.io.gadf import ( + create_aeff2d_hdu, + create_energy_dispersion_hdu, + create_psf_table_hdu, +) +from pyirf.interpolation import ( + interpolate_effective_area_per_energy_and_fov, + interpolate_energy_dispersion, + interpolate_psf_table, + # interpolate_rad_max, +) +from scipy.spatial import Delaunay, distance, QhullError +from scipy.interpolate import griddata + +log = logging.getLogger(__name__) + + +def interp_params(params_list, data): + """ + From a given list of angular parameters, to be used for interpolation, + take values from a given data dict. + + Parameters + ---------- + params_list: List of basic angular parameters to get interpolation + parameters information . + 'list' + data: dict containing the required basic angular parameters. + 'dict' + + Returns + ------- + mc_pars: List of interpolation parameters + 'list' + """ + mc_pars = [] + if "ZEN_PNT" in params_list: + mc_pars.append(np.cos(u.Quantity(data["ZEN_PNT"], "deg").to_value(u.rad))) + + if "B_DELTA" in params_list: + mc_pars.append(np.sin(u.Quantity(data["B_DELTA"], "deg").to_value(u.rad))) + + if "AZ_PNT" in params_list: + mc_pars.append(u.Quantity(data["AZ_PNT"], "deg").to_value(u.rad)) + + return mc_pars + + +def check_in_delaunay_triangle(irfs, data_params, use_nearest_irf_node=False): + """ + From a given list of IRFs as grid points used for interpolation, retrieve + the Delaunay triangulation list of IRFs, where the simplex includes the + target points in data_params. + + If the target point does not exist inside the simplex, the IRF + corresponding to the nearest grid point to the target value is used. This + is also achieved if use_nearest_irf_node is passed as True. + + Fetching the nearest IRF node is also performed by checking the azimuth + values, as there might be more than a single node, with the same position + in the interpolation parameter space. + + If the list of given IRFs are not enough for calculating the Delaunay + triangulation, an empty list is returned. + + Parameters + ---------- + irfs: List of IRFs to check for Delaunay triangulation. + 'List' + data_pars: Dict of arrays of range of parameters of the observed data + in the event list, to check for interpolation. + 'Dict' + use_nearest_irf_node: Boolean if we need only the nearest IRF node. + 'Bool' + + Returns + ------- + irf_list: Revised list of IRFs after the checks. + 'List' + """ + # Exclude AZ_PNT as target interpolation parameter + # For the interpolation parameters only + data_pars_sel = [d for d in data_params.keys() if d != "AZ_PNT"] + az_idx = list(data_params.keys()).index("AZ_PNT") + + new_irfs = [] + mc_params_sel = np.empty((len(irfs), len(data_pars_sel))) + mc_params_full = np.empty((len(irfs), len(data_params))) + + for i, irf in enumerate(irfs): + f = fits.open(irf)[1].header + + mc_pars_sel = interp_params(data_pars_sel, f) + mc_params_sel[i, :] = np.array(mc_pars_sel) + + mc_pars_full = interp_params(data_params, f) + mc_params_full[i, :] = np.array(mc_pars_full) + + data_val_sel = interp_params(data_pars_sel, data_params) + data_val_full = interp_params(data_params, data_params) + + try: + tri = Delaunay(mc_params_sel) + except QhullError: + print("Not enough grid values for Delaunay triangulation") + # Fetch the nearest IRF node + index = distance.cdist([data_val_sel], mc_params_sel).argmin() + index = get_nearest_az_node( + mc_params_sel, index, mc_params_full, data_val_full, az_idx + ) + return [irfs[index]] + + target_in_simplex = tri.find_simplex(data_val_sel) + + if not use_nearest_irf_node: + if target_in_simplex == -1: + # The target values are not contained in any Delaunay triangle formed + # by the paramters of the list of IRFs provided. + # So just include the IRF with the closest parameter values + # to the target values + + index = distance.cdist([data_val_sel], mc_params_sel).argmin() + print("Target value is outside interpolation. Using the nearest IRF.") + index = get_nearest_az_node( + mc_params_sel, index, mc_params_full, data_val_full, az_idx + ) + new_irfs.append(irfs[index]) + else: + # Just select the IRFs that are needed for the Delaunay triangulation + for i in tri.simplices[target_in_simplex]: + i_sel = get_nearest_az_node( + mc_params_sel, i, mc_params_full, data_val_full, az_idx + ) + new_irfs.append(irfs[i_sel]) + else: + index = distance.cdist([data_val_sel], mc_params_sel).argmin() + print("Using the nearest IRF.") + index = get_nearest_az_node( + mc_params_sel, index, mc_params_full, data_val_full, az_idx + ) + new_irfs.append(irfs[index]) + + return new_irfs + + +def get_nearest_az_node( + irf_params_sel, index, irf_params_full, target_params_full, az_idx +): + """ + Check to see if a given IRF node overlaps with another node, in the + interpolation parameter space, and to select based on the azimuth angle, + the nearest node to the target. + + All interp_params objects are assumed to generated with the same IRF list. + + Parameters + ---------- + irf_params_sel: interp_params object for the interpolation parameters-based + information. + 'dict' + index: index of the list of IRF node information to compare. + 'int' + irf_params_full: interp_params object for all parameters information. + 'dict' + target_params_full: interp_params object for all parameters information of + the target. + 'dict' + az_idx: index of azimuth information (AZ_PNT) in the main dict information + used for the target. + 'int' + + Returns + ------- + idx: index of the IRF node information with the closest azimuth value as + with the target. + 'int' + """ + # Remove the numerical variations by shortening the precision of values + irf_params_short = np.around(irf_params_sel, 3) + + # Find the uniques set of nodes, to compare and choose the indices + irf_unique, irf_num = np.unique(irf_params_short, axis=0, return_counts=True) + idx = np.flatnonzero((irf_unique == irf_params_short[index]).all(1)) + + if irf_num[idx] > 1: + # Only using the overlapping nodes + idx_list = np.flatnonzero( + (irf_params_short == irf_params_short[index]).all(1) + ) + # Finding the shortest distance with respect to target azimuth + diff = np.abs( + irf_params_full[idx_list, az_idx] - target_params_full[az_idx] + ) + idx = idx_list[np.where(diff == diff.min())[0]][0] + else: + # if there are no overlapping nodes + idx = index + return idx + + +def compare_irfs(irfs): + """ + Compare the given list of IRFs with data binning and relevant + metadata values. + + Parameters + ---------- + irfs: List of IRFs to compare + 'List' + + Returns + ------- + : Boolean indicating whether the given list of IRFs are comparable + """ + bin_bool = False + meta_bool = True + + params = [] + + # Collect the list of metadata and column names to compare the values/data + select_meta = ["HDUCLAS3", "INSTRUME", "G_OFFSET"] + try: + h = Table.read(irfs[0], hdu="EFFECTIVE AREA") + h_meta = h.meta + cols = h.columns[:-1] + + energy_dependent_gh = "GH_EFF" in h_meta + point_like = h_meta["HDUCLAS3"] == "POINT-LIKE" + energy_dependent_theta = "TH_CONT" in h_meta + source_dep = "AL_CUT" in h_meta + + if energy_dependent_gh: + select_meta.append("GH_EFF") + else: + select_meta.append("GH_CUT") + + if point_like: + if energy_dependent_theta: + select_meta.append("TH_CONT") + else: + if source_dep: + select_meta.append("AL_CUT") + else: + select_meta.append("RAD_MAX") + except KeyError: + print(f"Effective Area is not present in {irfs[0]}") + + # Comparing the metadata information + for k in select_meta: + meta = [Table.read(i, hdu="ENERGY DISPERSION").meta[k] for i in irfs] + m = np.unique(meta) + + if len(m) != 1: + meta_bool *= False + + print(f"The metadata are{' ' if meta_bool else ' not '}comparable") + for irf in irfs: + e_table = Table.read(irf, hdu="ENERGY DISPERSION") + + for col in cols: + params.append(e_table[col].quantity[0]) + + # Comparing other paramater columns in IRFs + for i in np.arange(len(cols)): + a = [params[len(cols) * j + i] for j in np.arange(len(irfs))] + a2, a_ind = np.unique(a, return_index=True) + + if len(a2) == len(params[i]): + if (a2[np.argsort(a_ind)] == params[i].value).all(): + bin_bool = True + print( + "The other parameter axes data " f"are{' ' if bin_bool else ' not '}comparable" + ) + + return bin_bool * meta_bool + + +def load_irf_grid(irfs, extname, interp_col, gadf_irf=True): + """ + From a given list of IRFs, load the list of IRF data values that can be + interpolated. For GH_CUTS which is not GADF approved, the HDU is stored + differently and so requires a different way of loading the data. + + Parameters + ---------- + irfs: List of IRFs to use to interpolate + List + extname: Name of the IRF to be extracted + Str + interp_col: Name of the column whose values are to be interpolated + Str + gadf_irf: IRF being as per GADF standard or custom + Bool + + Returns + ------- + irf_list: List of columns of the IRF from each file + 'numpy.stack' + """ + irf_list = [] + for irf in irfs: + if gadf_irf: + irf_list.append(QTable.read(irf, hdu=extname)[interp_col][0].T) + else: + irf_list.append(QTable.read(irf, hdu=extname)[interp_col]) + return np.stack(irf_list) + + +def interpolate_gh_cuts( + gh_cuts, + grid_points, + target_point, + method="linear", +): + """ + Interpolates a grid of GH CUTS tables to a target-point. + Wrapper around scipy.interpolate.griddata [1]. + + Parameters + ---------- + gh_cuts: numpy.ndarray, shape=(N, M, ...) + Gammaness-cuts for all combinations of grid-points, energy and + fov_offset. + Shape (N:n_grid_points, M:n_energy_bins, n_fov_offset_bins) + grid_points: numpy.ndarray, shape=(N, O) + Array of the N O-dimensional morphing parameter values corresponding + to the N input templates. + target_point: numpy.ndarray, shape=(O) + Value for which the interpolation is performed (target point) + method: 'linear’, ‘nearest’, ‘cubic’ + Interpolation method for scipy.interpolate.griddata [1]. + Defaults to 'linear'. + + Returns + ------- + gh_cuts_interp: numpy.ndarray, shape=(1, M, ...) + Gammaness-cuts for the target grid-point, + shape (1, M:n_energy_bins, n_fov_offset_bins) + """ + + return griddata(grid_points, gh_cuts, target_point, method=method) + + +def interpolate_rad_max( + rad_max, + grid_points, + target_point, + method="linear", +): + """ + Interpolates a grid of RAD_MAX tables to a target-point. + Wrapper around scipy.interpolate.griddata [1]. + + Parameters + ---------- + rad_max: numpy.ndarray, shape=(N, M, ...) + Theta-cuts for all combinations of grid-points, energy and + fov_offset. + Shape (N:n_grid_points, M:n_energy_bins, n_fov_offset_bins) + grid_points: numpy.ndarray, shape=(N, O) + Array of the N O-dimensional morphing parameter values corresponding + to the N input templates. + target_point: numpy.ndarray, shape=(O) + Value for which the interpolation is performed (target point) + method: 'linear’, ‘nearest’, ‘cubic’ + Interpolation method for scipy.interpolate.griddata [1]. + Defaults to 'linear'. + + Returns + ------- + rad_max_interp: numpy.ndarray, shape=(1, M, ...) + Theta-cuts for the target grid-point, + shape (1, M:n_energy_bins, n_fov_offset_bins) + """ + + return griddata(grid_points, rad_max, target_point, method=method) + + +def interpolate_irf(irfs, data_pars, interp_method="linear"): + """ + Using pyirf functions with a list of IRFs and parameters to compare with + data, to interpolate over in a selected interpolation parameter space. + + Currently for the single telescope, we are using only cos zenith and + sin delta as the interpolation parameters for the IRFs, and not including + the contributions from azimuth direction (of the shower) as that becomes + significant only when considering a stereo system of telescopes. Hence, + we have to perform the selection criteria only for the selected + interpolation parameters. + + Along with IRFs, energy-dependent theta and gammaness cuts, RAD_MAX and + GH_CUTS respectively, are also interpolated if they exist in the list of + IRFs provided. + + Parameters + ---------- + irfs: List of IRFs to use to interpolate + List + data_pars: Dict of arrays of range of parameters of the observed data + in the event list, to check for interpolation. + 'Dict' + interp_method: Method of interpolation to be used by + scipy.interpolate.griddata. Values can be "linear", "nearest", "cubic" + 'Str' + + Returns + ------- + irf_interp: Final interpolated IRF + 'astropy.io.fits' + """ + + # Exclude AZ_PNT as target interpolation parameter + # For the interpolation parameters only + params_sel = [d for d in data_pars.keys() if d != "AZ_PNT"]#[*d.keys()] + n_grid = len(irfs) + irf_pars_sel = np.empty((n_grid, len(params_sel))) + interp_pars_sel = list() + + extra_keys = [ + "TELESCOP", + "INSTRUME", + "FOVALIGN", + "GH_CUT", + "G_OFFSET", + "RAD_MAX", + "B_TOTAL", + "B_INC", + ] + main_headers = fits.open(irfs[0])[1].header + + if main_headers["HDUCLAS3"] == "POINT-LIKE": + point_like = True + else: + point_like = False + + # Update headers to be added to the final IRFs + extra_headers = dict() + + for k in extra_keys: + if k in main_headers: + if main_headers.comments[k]: + extra_headers[k] = (main_headers[k], main_headers.comments[k]) + else: + extra_headers[k] = main_headers[k] + + for par in data_pars.keys(): + extra_headers[par] = (data_pars[par].to_value(u.deg), "deg") + + for i in np.arange(n_grid): + f = fits.open(irfs[i])[1].header + mc_pars_sel = interp_params(params_sel, f) + irf_pars_sel[i, :] = np.array(mc_pars_sel) + + interp_pars_sel = interp_params(params_sel, data_pars) + + # Keep interp_pars as a tuple to keep the right dimensions in interpolation + interp_pars_sel = tuple(interp_pars_sel) + irf_interp = fits.HDUList( + [ + fits.PrimaryHDU(), + ] + ) + + # Read select IRFs, for which interpolation is supported into lists and + # extract the necessary columns + hdus_interp = fits.open(irfs[0]) + + try: + hdus_interp["EFFECTIVE AREA"] + effarea_list = load_irf_grid( + irfs, extname="EFFECTIVE AREA", interp_col="EFFAREA" + ) + + temp_irf = QTable.read(irfs[0], hdu="EFFECTIVE AREA") + e_true = np.append(temp_irf["ENERG_LO"][0], temp_irf["ENERG_HI"][0][-1]) + fov_off = np.append(temp_irf["THETA_LO"][0], temp_irf["THETA_HI"][0][-1]) + + aeff_interp = interpolate_effective_area_per_energy_and_fov( + effarea_list, irf_pars_sel, interp_pars_sel, method=interp_method + ) + + aeff_hdu_interp = create_aeff2d_hdu( + aeff_interp.T, + true_energy_bins=e_true, + fov_offset_bins=fov_off, + point_like=point_like, + extname="EFFECTIVE AREA", + **extra_headers, + ) + + irf_interp.append(aeff_hdu_interp) + + except KeyError: + log.error("Effective Area not present for IRF interpolation") + + try: + hdus_interp["ENERGY DISPERSION"] + edisp_list = load_irf_grid( + irfs, extname="ENERGY DISPERSION", interp_col="MATRIX" + ) + temp_irf = QTable.read(irfs[0], hdu="ENERGY DISPERSION") + + # Check the units as well + e_true = np.append(temp_irf["ENERG_LO"][0], temp_irf["ENERG_HI"][0][-1]) + e_migra = np.append(temp_irf["MIGRA_LO"][0], temp_irf["MIGRA_HI"][0][-1]) + fov_off = np.append(temp_irf["THETA_LO"][0], temp_irf["THETA_HI"][0][-1]) + + edisp_interp = interpolate_energy_dispersion( + e_migra, edisp_list, irf_pars_sel, interp_pars_sel, quantile_resolution=1e-3 + ) + + edisp_hdu_interp = create_energy_dispersion_hdu( + edisp_interp, + true_energy_bins=e_true, + migration_bins=e_migra, + fov_offset_bins=fov_off, + point_like=point_like, + extname="ENERGY DISPERSION", + **extra_headers, + ) + + irf_interp.append(edisp_hdu_interp) + + except KeyError: + log.error("Energy Dispersion not present for IRF interpolation") + + try: + hdus_interp["GH_CUTS"] + gh_cuts_list = load_irf_grid( + irfs, extname="GH_CUTS", interp_col="cut", gadf_irf=False + ) + + temp_irf = QTable.read(irfs[0], hdu="GH_CUTS") + + gh_cut_interp = interpolate_gh_cuts( + gh_cuts_list, irf_pars_sel, interp_pars_sel, method=interp_method + ) + + gh_header = fits.Header() + gh_header["CREATOR"] = f"lstchain v{__version__}" + gh_header["DATE"] = Time.now().utc.iso + + for k, v in extra_headers.items(): + gh_header[k] = v + + temp_irf["cut"] = gh_cut_interp + + gh_cut_hdu_interp = fits.BinTableHDU(temp_irf, header=gh_header, name="GH_CUTS") + + irf_interp.append(gh_cut_hdu_interp) + + except KeyError: + log.error("GH CUTS not present for IRF interpolation") + + try: + hdus_interp["RAD_MAX"] + radmax_list = load_irf_grid(irfs, extname="RAD_MAX", interp_col="RAD_MAX") + temp_irf = QTable.read(irfs[0], hdu="RAD_MAX") + + rad_max_interp = interpolate_rad_max( + radmax_list, irf_pars_sel, interp_pars_sel, method=interp_method + ) + + temp_irf["RAD_MAX"] = rad_max_interp.T[np.newaxis, ...] * u.deg + + radmax_header = fits.Header() + radmax_header["CREATOR"] = f"lstchain v{__version__}" + radmax_header["DATE"] = Time.now().utc.iso + + for k, v in extra_headers.items(): + radmax_header[k] = v + + rad_max_hdu_interp = fits.BinTableHDU( + temp_irf, header=radmax_header, name="RAD_MAX" + ) + irf_interp.append(rad_max_hdu_interp) + + except KeyError: + log.error("RAD_MAX not present for IRF interpolation") + + if not point_like: + try: + hdus_interp["PSF"] + psf_list = load_irf_grid(irfs, extname="PSF", interp_col="RPSF") + temp_irf = QTable.read(irfs[0], hdu="PSF") + + e_true = np.append(temp_irf["ENERG_LO"][0], temp_irf["ENERG_HI"][0][-1]) + src_bins = np.append(temp_irf["RAD_LO"][0], temp_irf["RAD_HI"][0][-1]) + fov_off = np.append(temp_irf["THETA_LO"][0], temp_irf["THETA_HI"][0][-1]) + + psf_interp = interpolate_psf_table( + src_bins, + psf_list, + irf_pars_sel, + interp_pars_sel, + quantile_resolution=1e-3, + ) + psf_hdu_interp = create_psf_table_hdu( + psf_interp, + true_energy=e_true, + source_offset_bins=src_bins, + fov_offset_bins=fov_off, + extname="PSF", + **extra_headers, + ) + + irf_interp.append(psf_hdu_interp) + except KeyError: + log.error("PSF HDU not present for IRF interpolation") + + return irf_interp diff --git a/lstchain/high_level/tests/test_hdus.py b/lstchain/high_level/tests/test_hdus.py index 0d07d83ed..1ae9927ef 100644 --- a/lstchain/high_level/tests/test_hdus.py +++ b/lstchain/high_level/tests/test_hdus.py @@ -18,7 +18,7 @@ def dl3_file(tmp_dl3_path, observed_dl2_file, simulated_irf_file): from astropy.coordinates import SkyCoord from astropy.io import fits - events = read_data_dl2_to_QTable(observed_dl2_file) + events, data_pars = read_data_dl2_to_QTable(observed_dl2_file) t_eff, t_tot = get_effective_time(events) events = events[events["intensity"] > 200] source_pos = SkyCoord(ra=83.633, dec=22.01, unit="deg") @@ -31,6 +31,7 @@ def dl3_file(tmp_dl3_path, observed_dl2_file, simulated_irf_file): source_pos=source_pos, effective_time=t_eff.value, elapsed_time=t_tot.value, + data_pars=data_pars ) name = observed_dl2_file.name diff --git a/lstchain/high_level/tests/test_interpolate.py b/lstchain/high_level/tests/test_interpolate.py new file mode 100644 index 000000000..204fcd668 --- /dev/null +++ b/lstchain/high_level/tests/test_interpolate.py @@ -0,0 +1,382 @@ +import astropy.units as u +import numpy as np +from astropy.io import fits +from astropy.table import Table + + +def test_load_irf_grid(simulated_irf_file): + from lstchain.high_level.interpolate import load_irf_grid + + aeff_list = load_irf_grid([simulated_irf_file], "EFFECTIVE AREA", "EFFAREA") + + assert aeff_list.shape == (1, 23, 8) + + +def test_interp_irf(simulated_irf_file, simulated_dl2_file): + from lstchain.high_level.interpolate import interpolate_irf + from lstchain.scripts.tests.test_lstchain_scripts import run_program + + # Create another IRFs with different zenith and azimuth parameter + # Global cuts + + irf_file_g_2 = simulated_irf_file.parent / "irf_interp_0.fits.gz" + irf_file_g_3 = simulated_irf_file.parent / "irf_interp_1.fits.gz" + irf_file_g_final = simulated_irf_file.parent / "irf_interp_final.fits.gz" + + hdus_2_g = [ + fits.PrimaryHDU(), + ] + hdus_3_g = [ + fits.PrimaryHDU(), + ] + + # En-dep, point-like cuts IRF + irf_file_en_1 = simulated_dl2_file.parent / "en_dep_cut_irf_1.fits.gz" + irf_file_en_2 = simulated_dl2_file.parent / "en_dep_cut_irf_2.fits.gz" + irf_file_en_3 = simulated_dl2_file.parent / "en_dep_cut_irf_3.fits.gz" + irf_file_en_final = simulated_dl2_file.parent / "interp_en_dep_cut_irf.fits.gz" + + run_program( + "lstchain_create_irf_files", + "--input-gamma-dl2", + simulated_dl2_file, + "--output-irf-file", + irf_file_en_1, + "--energy-dependent-gh", + "--energy-dependent-theta", + "--point-like", + "--gh-efficiency=0.8", + "--theta-containment=0.8", + ) + + hdus_2_en = [ + fits.PrimaryHDU(), + ] + hdus_3_en = [ + fits.PrimaryHDU(), + ] + + for irf in [simulated_irf_file, irf_file_en_1]: + # Change the effective area for different angular pointings + aeff_1 = Table.read(irf, hdu="EFFECTIVE AREA") + aeff_1_meta = fits.open(irf)["EFFECTIVE AREA"].header + aeff_2 = aeff_1.copy() + aeff_2_meta = aeff_1_meta.copy() + + zen_1 = u.Quantity(aeff_1_meta["ZEN_PNT"], "deg").to_value(u.rad) + az_1 = u.Quantity(aeff_1_meta["AZ_PNT"], "deg").to_value(u.rad) + del_1 = u.Quantity(aeff_1_meta["B_DELTA"], "deg").to_value(u.rad) + + zen_2 = 2 * zen_1 + az_2 = 2 * az_1 + del_2 = 1.2 * del_1 + + factor_zd = np.cos(zen_2) / np.cos(zen_1) + factor_del = np.sin(del_2) / np.sin(del_1) + + aeff_1["EFFAREA"][0] *= factor_zd + aeff_2["EFFAREA"][0] *= factor_zd * factor_del + + aeff_1_meta["ZEN_PNT"] = (zen_2 * 180 / np.pi, "deg") + aeff_1_meta["AZ_PNT"] = (az_1 * 180 / np.pi, "deg") + aeff_1_meta["B_DELTA"] = (del_1 * 180 / np.pi, "deg") + aeff_2_meta["ZEN_PNT"] = (zen_2 * 180 / np.pi, "deg") + aeff_1_meta["AZ_PNT"] = (az_2 * 180 / np.pi, "deg") + aeff_2_meta["B_DELTA"] = (del_2 * 180 / np.pi, "deg") + + aeff_hdu_1 = fits.BinTableHDU( + aeff_1, header=aeff_1_meta, name="EFFECTIVE AREA" + ) + aeff_hdu_2 = fits.BinTableHDU( + aeff_2, header=aeff_2_meta, name="EFFECTIVE AREA" + ) + + # Change the energy migration for different angular pointings + edisp_1 = Table.read(irf, hdu="ENERGY DISPERSION") + edisp_1_meta = fits.open(irf)["ENERGY DISPERSION"].header + edisp_2 = edisp_1.copy() + edisp_2_meta = edisp_1_meta.copy() + + edisp_1["MATRIX"][0] *= factor_zd + edisp_2["MATRIX"][0] *= factor_zd * factor_del + + edisp_1_meta["ZEN_PNT"] = (zen_2 * 180 / np.pi, "deg") + edisp_1_meta["AZ_PNT"] = (az_1 * 180 / np.pi, "deg") + edisp_1_meta["B_DELTA"] = (del_1 * 180 / np.pi, "deg") + edisp_2_meta["ZEN_PNT"] = (zen_2 * 180 / np.pi, "deg") + edisp_2_meta["AZ_PNT"] = (az_2 * 180 / np.pi, "deg") + edisp_2_meta["B_DELTA"] = (del_2 * 180 / np.pi, "deg") + + edisp_hdu_1 = fits.BinTableHDU( + edisp_1, header=edisp_1_meta, name="ENERGY DISPERSION" + ) + edisp_hdu_2 = fits.BinTableHDU( + edisp_2, header=edisp_2_meta, name="ENERGY DISPERSION" + ) + + if "en_dep" in irf.name: + # For GH CUTS, apply the factors for cuts, lower than the max value + gh_1 = Table.read(irf, hdu="GH_CUTS") + gh_1_meta = fits.open(irf)["GH_CUTS"].header + gh_2 = gh_1.copy() + gh_2_meta = gh_1_meta.copy() + + mask = gh_1["cut"] < gh_1["cut"].max() + + gh_1["cut"][mask] *= factor_zd + gh_2["cut"][mask] *= factor_zd * factor_del + + gh_1_meta["ZEN_PNT"] = (zen_2 * 180 / np.pi, "deg") + gh_1_meta["AZ_PNT"] = (az_1 * 180 / np.pi, "deg") + gh_1_meta["B_DELTA"] = (del_1 * 180 / np.pi, "deg") + gh_2_meta["ZEN_PNT"] = (zen_2 * 180 / np.pi, "deg") + gh_2_meta["AZ_PNT"] = (az_2 * 180 / np.pi, "deg") + gh_2_meta["B_DELTA"] = (del_2 * 180 / np.pi, "deg") + + gh_hdu_1 = fits.BinTableHDU(gh_1, header=gh_1_meta, name="GH_CUTS") + gh_hdu_2 = fits.BinTableHDU(gh_2, header=gh_2_meta, name="GH_CUTS") + + # For RAD_MAX CUTS, apply the factors for cuts, lower than the max value + th_1 = Table.read(irf, hdu="RAD_MAX") + th_1_meta = fits.open(irf)["RAD_MAX"].header + th_2 = th_1.copy() + th_2_meta = th_1_meta.copy() + + mask = th_1["RAD_MAX"] < th_1["RAD_MAX"].max() + + th_1["RAD_MAX"][mask] *= factor_zd + th_2["RAD_MAX"][mask] *= factor_zd * factor_del + + th_1_meta["ZEN_PNT"] = (zen_2 * 180 / np.pi, "deg") + th_1_meta["AZ_PNT"] = (az_1 * 180 / np.pi, "deg") + th_1_meta["B_DELTA"] = (del_1 * 180 / np.pi, "deg") + th_2_meta["ZEN_PNT"] = (zen_2 * 180 / np.pi, "deg") + th_2_meta["AZ_PNT"] = (az_2 * 180 / np.pi, "deg") + th_2_meta["B_DELTA"] = (del_2 * 180 / np.pi, "deg") + + th_hdu_1 = fits.BinTableHDU(th_1, header=th_1_meta, name="RAD_MAX") + th_hdu_2 = fits.BinTableHDU(th_2, header=th_2_meta, name="RAD_MAX") + + hdus_2_en.append(aeff_hdu_1) + hdus_2_en.append(edisp_hdu_1) + hdus_2_en.append(gh_hdu_1) + hdus_2_en.append(th_hdu_1) + + hdus_3_en.append(aeff_hdu_2) + hdus_3_en.append(edisp_hdu_2) + hdus_3_en.append(gh_hdu_2) + hdus_3_en.append(th_hdu_2) + else: + hdus_2_g.append(aeff_hdu_1) + hdus_2_g.append(edisp_hdu_1) + + hdus_3_g.append(aeff_hdu_2) + hdus_3_g.append(edisp_hdu_2) + + fits.HDUList(hdus_2_g).writeto(irf_file_g_2, overwrite=True) + fits.HDUList(hdus_3_g).writeto(irf_file_g_3, overwrite=True) + fits.HDUList(hdus_2_en).writeto(irf_file_en_2, overwrite=True) + fits.HDUList(hdus_3_en).writeto(irf_file_en_3, overwrite=True) + + irfs_g = [simulated_irf_file, irf_file_g_2, irf_file_g_3] + irfs_en = [irf_file_en_1, irf_file_en_2, irf_file_en_3] + data_pars = { + "ZEN_PNT": 30 * u.deg, + "B_DELTA": (del_1 * 0.8 * u.rad).to(u.deg), + "AZ_PNT": 120 * u.deg + } + + hdu_g = interpolate_irf(irfs_g, data_pars) + hdu_g.writeto(irf_file_g_final, overwrite=True) + + hdu_en = interpolate_irf(irfs_en, data_pars) + hdu_en.writeto(irf_file_en_final, overwrite=True) + + assert hdu_g[1].header["ZEN_PNT"] == 30 + assert irf_file_g_2.exists() + assert irf_file_g_3.exists() + assert irf_file_g_final.exists() + + assert hdu_en[1].header["ZEN_PNT"] == 30 + assert irf_file_en_2.exists() + assert irf_file_en_3.exists() + assert irf_file_en_final.exists() + + +def test_compare_irfs( + simulated_irf_file, simulated_srcdep_irf_file, simulated_dl2_file, + simulated_srcdep_dl2_file +): + from lstchain.high_level.interpolate import compare_irfs + from lstchain.scripts.tests.test_lstchain_scripts import run_program + + # Create IRF with different global cuts for comparison + irf_file_2 = simulated_dl2_file.parent / "diff_cut_irf.fits.gz" + + run_program( + "lstchain_create_irf_files", + "--input-gamma-dl2", + simulated_dl2_file, + "--input-proton-dl2", + simulated_dl2_file, + "--input-electron-dl2", + simulated_dl2_file, + "--output-irf-file", + irf_file_2, + "--global-gh-cut=0.7", + ) + # IRFs with same and different efficiency values for energy-dependent cuts, + # and same direction pointing values. + irf_file_en_1 = simulated_dl2_file.parent / "en_dep_cut_irf_1.fits.gz" + irf_file_en_1_cut2 = simulated_dl2_file.parent / "en_dep_diff_cut_irf_1.fits.gz" + + run_program( + "lstchain_create_irf_files", + "--input-gamma-dl2", + simulated_dl2_file, + "--output-irf-file", + irf_file_en_1_cut2, + "--energy-dependent-gh", + "--energy-dependent-theta", + "--point-like", + "--gh-efficiency=0.7", + "--theta-containment=0.7", + ) + + # IRFs with same efficiency values for energy-dependent cuts, + # and different direction pointing values. + irf_file_en_2 = simulated_dl2_file.parent / "en_dep_cut_irf_2.fits.gz" + + # Src-dep IRF with different global alpha cut + srcdep_irf_file_2 = simulated_srcdep_dl2_file.parent / "irf_srcdep_2.fits.gz" + run_program( + "lstchain_create_irf_files", + "--input-gamma-dl2", + simulated_srcdep_dl2_file, + "--output-irf-file", + srcdep_irf_file_2, + "--point-like", + "--source-dep", + "--global-alpha-cut=11" + ) + + irfs_diff_global_cuts = [simulated_irf_file, irf_file_2] + irfs_same_file = [simulated_irf_file, simulated_irf_file] + irfs_same_en_dep_cuts_diff_dir = [irf_file_en_1, irf_file_en_2] + irfs_diff_en_dep_cuts = [irf_file_en_1, irf_file_en_1_cut2] + irfs_srcdep_diff_global_cuts = [simulated_srcdep_irf_file, srcdep_irf_file_2] + + assert compare_irfs(irfs_diff_global_cuts) == 0 + assert compare_irfs(irfs_same_file) + assert compare_irfs(irfs_diff_en_dep_cuts) == 0 + assert compare_irfs(irfs_same_en_dep_cuts_diff_dir) + assert compare_irfs(irfs_srcdep_diff_global_cuts) == 0 + + +def test_check_delaunay_triangles(simulated_irf_file): + from lstchain.high_level.interpolate import check_in_delaunay_triangle + + irf_file_3 = simulated_irf_file.parent / "irf_interp_0.fits.gz" + irf_file_4 = simulated_irf_file.parent / "irf_interp_1.fits.gz" + irf_file_5 = simulated_irf_file.parent / "irf_interp_final.fits.gz" + + irfs = [simulated_irf_file, irf_file_3, irf_file_4, irf_file_5] + + # Check on target being inside or outside Delaunay simplex + data_pars = {"ZEN_PNT": 25 * u.deg, "B_DELTA": 45 * u.deg, "AZ_PNT": 100 * u.deg} + data_pars2 = {"ZEN_PNT": 58 * u.deg, "B_DELTA": 70 * u.deg, "AZ_PNT": 200 * u.deg} + + new_irfs = check_in_delaunay_triangle(irfs, data_pars) + new_irfs2 = check_in_delaunay_triangle(irfs, data_pars2) + new_irfs3 = check_in_delaunay_triangle( + irfs, data_pars, use_nearest_irf_node=True + ) + new_irfs4 = check_in_delaunay_triangle([irfs[0]], data_pars) + + t3 = Table.read(new_irfs3[0], hdu=1).meta + + assert len(new_irfs) == 3 + assert len(new_irfs2) == 1 + assert t3["ZEN_PNT"] == 20 + assert len(new_irfs4) == 1 + + +def test_get_nearest_az_node(): + from scipy.spatial import distance + + from lstchain.high_level.interpolate import get_nearest_az_node, interp_params + + # 2 coincident nodes in IRF interpolation grid (cos zenith, sin delta) + data_0 = { + "ZEN_PNT": 10 * u.deg, + "B_DELTA": 50.3607 * u.deg, + "AZ_PNT": 102.199 * u.deg, + } + data_1 = { + "ZEN_PNT": 10 * u.deg, + "B_DELTA": 50.3607 * u.deg, + "AZ_PNT": 248.117 * u.deg, + } + # Target node, with same b_delta (sin delta) value as the above nodes + # AZ_PNT and ZEN_PNT are random close values, not physical. + data_target = { + "ZEN_PNT": 12 * u.deg, + "B_DELTA": 50.3607 * u.deg, + "AZ_PNT": 150 * u.deg, + } + params_list = ["ZEN_PNT", "B_DELTA"] + params_list_full = ["ZEN_PNT", "B_DELTA", "AZ_PNT"] + az_idx = list(data_target.keys()).index("AZ_PNT") + + # Use interp_params to have appropriate lists for the test + # Without AZ_PNT, for IRF interpolation space + check_params_0 = interp_params(params_list, data_0) + check_params_1 = interp_params(params_list, data_1) + check_params_target = interp_params(params_list, data_target) + check_params = np.array([check_params_0, check_params_1]) + + # With AZ_PNT + check_params_0_full = interp_params(params_list_full, data_0) + check_params_1_full = interp_params(params_list_full, data_1) + check_params_target_full = interp_params(params_list_full, data_target) + check_params_full = np.array([check_params_0_full, check_params_1_full]) + + # Get the distance between the target and given nodes, in IRF interpolation grid + index = distance.cdist([check_params_target], check_params).argmin() + + # Fetch the index of node, closest to the target + index2 = get_nearest_az_node( + check_params, index, check_params_full, check_params_target_full, az_idx + ) + + # Closest node + mc_closest = check_params_full[index2] + + # Values to compare + mc_closest_az = round(mc_closest[-1], 3) + az_check = round(102.199 * np.pi / 180, 3) + + assert mc_closest_az == az_check + + +def test_interpolate_gh_cuts(): + from lstchain.high_level.interpolate import interpolate_gh_cuts + + # Similar function as interpolate_rad_max, hence no need for extra test + # linear test case + gh_cuts_1 = np.array([[0, 0], [0.1, 0], [0.2, 0.1], [0.3, 0.2]]) + gh_cuts_2 = 2 * gh_cuts_1 + gh_cut = np.array([gh_cuts_1, gh_cuts_2]) + + grid_points = np.array([[0], [0.1]]) + target_point = np.array([0.05]) + + interp = interpolate_gh_cuts( + gh_cuts=gh_cut, + grid_points=grid_points, + target_point=target_point, + method="linear", + ) + + assert interp.shape == (1, *gh_cuts_1.shape) + assert np.allclose(interp, 1.5 * gh_cuts_1) diff --git a/lstchain/io/io.py b/lstchain/io/io.py index 66a1ebdf9..ecd065575 100644 --- a/lstchain/io/io.py +++ b/lstchain/io/io.py @@ -24,6 +24,7 @@ from pyirf.simulations import SimulatedEventsInfo +from lstchain.reco.utils import get_geomagnetic_delta from .lstcontainers import ( ExtraMCInfo, MetaData, @@ -848,7 +849,8 @@ def write_calibration_data(writer, mon_index, mon_event, new_ped=False, new_ff=F def read_mc_dl2_to_QTable(filename): """ Read MC DL2 files from lstchain and convert into pyirf internal format - - astropy.table.QTable + - astropy.table.QTable. + Also include simulation information necessary for some functions. Parameters ---------- @@ -856,7 +858,9 @@ def read_mc_dl2_to_QTable(filename): Returns ------- - `astropy.table.QTable`, `pyirf.simulations.SimulatedEventsInfo` + events: `astropy.table.QTable` + pyirf_simu_info: `pyirf.simulations.SimulatedEventsInfo` + extra_data: 'Dict' """ # mapping @@ -887,13 +891,28 @@ def read_mc_dl2_to_QTable(filename): unit_mapping['alpha'] = u.deg simu_info = read_simu_info_merged_hdf5(filename) + + # Temporary addition here, but can be included in the pyirf.simulations + # class of SimulatedEventsInfo + extra_data = {} + extra_data["GEOMAG_TOTAL"] = simu_info.prod_site_B_total + extra_data["GEOMAG_DEC"] = simu_info.prod_site_B_declination + extra_data["GEOMAG_INC"] = simu_info.prod_site_B_inclination + + extra_data["GEOMAG_DELTA"] = get_geomagnetic_delta( + zen = np.pi/2 - simu_info.min_alt.to_value(u.rad), + az = simu_info.min_az.to_value(u.rad), + geomag_dec = simu_info.prod_site_B_declination.to_value(u.rad), + geomag_inc = simu_info.prod_site_B_inclination.to_value(u.rad) + ) * u.rad + pyirf_simu_info = SimulatedEventsInfo( n_showers=simu_info.num_showers * simu_info.shower_reuse, energy_min=simu_info.energy_range_min, energy_max=simu_info.energy_range_max, max_impact=simu_info.max_scatter_range, spectral_index=simu_info.spectral_index, - viewcone=simu_info.max_viewcone_radius, + viewcone=simu_info.max_viewcone_radius ) events = pd.read_hdf(filename, key=dl2_params_lstcam_key) @@ -909,12 +928,14 @@ def read_mc_dl2_to_QTable(filename): for k, v in unit_mapping.items(): events[k] *= v - return events, pyirf_simu_info + return events, pyirf_simu_info, extra_data def read_data_dl2_to_QTable(filename, srcdep_pos=None): """ - Read data DL2 files from lstchain and return QTable format + Read data DL2 files from lstchain and return QTable format, along with + a dict of target parameters for IRF interpolation + Parameters ---------- filename: path to the lstchain DL2 file @@ -922,7 +943,8 @@ def read_data_dl2_to_QTable(filename, srcdep_pos=None): Returns ------- - `astropy.table.QTable` + data: `astropy.table.QTable` + data_params: 'Dict' of target interpolation parameters """ # Mapping @@ -953,14 +975,26 @@ def read_data_dl2_to_QTable(filename, srcdep_pos=None): data = pd.concat([data, data_srcdep], axis=1) data = data.rename(columns=name_mapping) - data = QTable.from_pandas(data) # Make the columns as Quantity for k, v in unit_mapping.items(): data[k] *= v - return data + # Create dict of target parameters for IRF interpolation + data_params = {} + + zen = np.pi / 2 * u.rad - data["pointing_alt"].mean().to(u.rad) + az = data["pointing_az"].mean().to(u.rad) + if az < 0: + az += 2*np.pi * u.rad + b_delta = u.Quantity(get_geomagnetic_delta(zen=zen, az=az)) + + data_params["ZEN_PNT"] = round(zen.to_value(u.deg), 5) * u.deg + data_params["AZ_PNT"] = round(az.to_value(u.deg), 5) * u.deg + data_params["B_DELTA"] = round(b_delta.to_value(u.deg), 5) * u.deg + + return data, data_params def read_dl2_params(t_filename, columns_to_read=None): @@ -1146,7 +1180,7 @@ def check_mc_type(filename): """ simu_info = read_simu_info_merged_hdf5(filename) - + min_viewcone = simu_info.min_viewcone_radius.value max_viewcone = simu_info.max_viewcone_radius.value @@ -1155,7 +1189,7 @@ def check_mc_type(filename): elif min_viewcone == 0.0: mc_type = 'diffuse' - + elif (max_viewcone - min_viewcone) < 0.1: mc_type = 'ring_wobble' diff --git a/lstchain/reco/tests/test_utils.py b/lstchain/reco/tests/test_utils.py index 650f07c87..7d42a12fb 100644 --- a/lstchain/reco/tests/test_utils.py +++ b/lstchain/reco/tests/test_utils.py @@ -210,7 +210,7 @@ def test_get_geomagnetic_delta(): # this is just a regression test assuming the values were correct when # first implementing the function inc = get_geomagnetic_delta(zen=20 * u.deg, az=0 * u.deg, time=GEOM_MAG_REFERENCE_TIME) - assert u.isclose(inc, 0.57002008 * u.rad) + assert u.isclose(inc, 1.26667967 * u.rad) inc = get_geomagnetic_delta(zen=50 * u.deg, az=20 * u.deg, time=GEOM_MAG_REFERENCE_TIME) - assert u.isclose(inc, 0.20785624 * u.rad) + assert u.isclose(inc, 1.73389012 * u.rad) diff --git a/lstchain/reco/utils.py b/lstchain/reco/utils.py index 0a8e11c9e..1023eb5e5 100644 --- a/lstchain/reco/utils.py +++ b/lstchain/reco/utils.py @@ -44,7 +44,7 @@ "rotate", "sky_to_camera", "source_dx_dy", - "source_side", + "source_side" ] # position of the LST1 @@ -54,15 +54,16 @@ # Geomagnetic parameters for the LST1 as per # https://www.ngdc.noaa.gov/geomag/calculators/magcalc.shtml?#igrfwmm and -# using IGRF model on date TIME_MC = 2020-06-29 -GEOM_MAG_REFERENCE_TIME = Time("2020-06-29", format="iso") -GEOMAG_DEC = (-5.0674 * u.deg).to(u.rad) -GEOMAG_INC = (37.4531 * u.deg).to(u.rad) -GEOMAG_TOTAL = 38.7305 * u.uT +# using IGRF model on date TIME_MC = 2021-11-29 at elevation 10 km a.s.l +# for the position where the particle shower is at its peak +GEOM_MAG_REFERENCE_TIME = Time("2021-11-29", format="iso") +GEOMAG_DEC = (-4.8443 * u.deg).to(u.rad) +GEOMAG_INC = (37.3663 * u.deg).to(u.rad) +GEOMAG_TOTAL = 38.5896 * u.uT -DELTA_DEC = (0.1656 * u.deg / u.yr).to(u.rad / u.year) -DELTA_INC = (-0.0698 * u.deg / u.yr).to(u.rad / u.year) -DELTA_TOTAL = 0.009 * u.uT / u.yr +DELTA_DEC = (0.1653 * u.deg / u.yr).to(u.rad / u.year) +DELTA_INC = (-0.0700 * u.deg / u.yr).to(u.rad / u.year) +DELTA_TOTAL = 0.0089 * u.uT / u.yr log = logging.getLogger(__name__) @@ -693,7 +694,6 @@ def get_effective_time(events): return t_eff, t_elapsed - def get_geomagnetic_field_orientation(time=None): ''' Linearly extrapolate the geomagnetic field parameters from the @@ -752,8 +752,8 @@ def get_geomagnetic_delta(zen, az, geomag_dec=None, geomag_inc=None, time=None): geomag_dec, geomag_inc = get_geomagnetic_field_orientation(time) term = ( - (np.sin(geomag_inc) * np.cos(zen)) + - (np.cos(geomag_inc) * np.sin(zen) * np.cos(az + geomag_dec)) + (np.sin(geomag_inc) * np.cos(zen)) - + (np.cos(geomag_inc) * np.sin(zen) * np.cos(az - geomag_dec)) ) delta = np.arccos(term) diff --git a/lstchain/scripts/tests/test_lstchain_scripts.py b/lstchain/scripts/tests/test_lstchain_scripts.py index 4d11e14b5..9395f52cd 100644 --- a/lstchain/scripts/tests/test_lstchain_scripts.py +++ b/lstchain/scripts/tests/test_lstchain_scripts.py @@ -472,9 +472,10 @@ def test_read_mc_dl2_to_QTable(simulated_dl2_file): from lstchain.io.io import read_mc_dl2_to_QTable import astropy.units as u - events, sim_info = read_mc_dl2_to_QTable(simulated_dl2_file) + events, sim_info, simu_geomag = read_mc_dl2_to_QTable(simulated_dl2_file) assert "true_energy" in events.colnames assert sim_info.energy_max == 330 * u.TeV + assert "GEOMAG_DELTA" in simu_geomag @pytest.mark.private_data @@ -484,8 +485,10 @@ def test_read_data_dl2_to_QTable(temp_dir_observed_files, observed_dl1_files): real_data_dl2_file = temp_dir_observed_files / ( observed_dl1_files["dl1_file1"].name.replace("dl1", "dl2") ) - events = read_data_dl2_to_QTable(real_data_dl2_file) + events, data_pars = read_data_dl2_to_QTable(real_data_dl2_file) + assert "gh_score" in events.colnames + assert "B_DELTA" in data_pars @pytest.mark.private_data diff --git a/lstchain/tools/lstchain_create_dl3_file.py b/lstchain/tools/lstchain_create_dl3_file.py index 1387decab..0a37a7eb6 100644 --- a/lstchain/tools/lstchain_create_dl3_file.py +++ b/lstchain/tools/lstchain_create_dl3_file.py @@ -6,19 +6,30 @@ The default values are written in the EventSelector and DL3Cuts Component and also given in some example configs in docs/examples/ -For the cuts on gammaness, the Tool looks at the IRF provided, to either use -global cuts, based on the header value of the global gammaness cut, GH_CUT, -present in each HDU, or energy-dependent cuts, based on the GH_CUTS HDU. +For using IRF interpolation methods, to get IRF with sky pointing the same or +closer (in the interpolation parameter space) to that of the data provided, +one has to provide, +1. the path to the IRFs, +2. glob search pattern for selecting the IRFs to be used, and +3. a final interpolated IRF file name. + +If instead of using IRF interpolation, one needs to add only the nearest IRF +node to the given data, in the interpolation space, then one needs to pass the +use-nearest-irf-node flag. + +For the cuts on gammaness, the Tool looks at the IRF provided or the final +interpolated/selected IRF, to either use global cuts, based on the header +value of the global gammaness cut, GH_CUT, present in each HDU, or +energy-dependent cuts, based on the GH_CUTS HDU. To use a separate config file for providing the selection parameters, copy and append the relevant example config files, into a custom config file. -For source-dependent analysis, a source-dep flag should be activated. +For source-dependent analysis, a source-dep flag should be passed. Similarly to the cuts on gammaness, the global alpha cut values are provided -from AL_CUT stored in the HDU header. -The alpha cut is already applied on this step, and all survived events with -each assumed source position (on and off) are saved after the gammaness and -alpha cut. +from AL_CUT stored in the HDU header. The alpha cut is already applied on this +step, and all survived events with each assumed source position (on and off) +are saved after the gammaness and alpha cut. To adapt to the high-level analysis used by gammapy, assumed source position (on and off) is set as a reco source position just as a trick to obtain survived events easily. @@ -27,6 +38,7 @@ from astropy.coordinates import SkyCoord from astropy.io import fits from astropy.table import vstack, QTable +import astropy.units as u from ctapipe.core import ( Provenance, Tool, @@ -42,7 +54,10 @@ ) from lstchain.high_level import ( add_icrs_position_params, + check_in_delaunay_triangle, + compare_irfs, create_event_list, + interpolate_irf, set_expected_pos_to_reco_altaz, ) from lstchain.paths import ( @@ -63,7 +78,7 @@ class DataReductionFITSWriter(Tool): > lstchain_create_dl3_file -d /path/to/DL2_data_file.h5 -o /path/to/DL3/file/ - --input-irf /path/to/irf.fits.gz + -i /path/to/irf/ --source-name Crab --source-ra 83.633deg --source-dec 22.01deg @@ -72,7 +87,7 @@ class DataReductionFITSWriter(Tool): > lstchain_create_dl3_file -d /path/to/DL2_data_file.h5 -o /path/to/DL3/file/ - --input-irf /path/to/irf.fits.gz + -i /path/to/irf/ --source-name Crab --source-ra 83.633deg --source-dec 22.01deg @@ -83,53 +98,99 @@ class DataReductionFITSWriter(Tool): > lstchain_create_dl3_file -d /path/to/DL2_data_file.h5 -o /path/to/DL3/file/ - --input-irf /path/to/irf.fits.gz + --input-irf-path /path/to/irf/ + --irf-file-pattern "irf*.fits.gz" + --final-irf-file final_interp_irf.fits.gz --source-name Crab --source-ra 83.633deg --source-dec 22.01deg - --global-gh-cut 0.9 --overwrite Or generate source-dependent DL3 files > lstchain_create_dl3_file -d /path/to/DL2_data_file.h5 -o /path/to/DL3/file/ - --input-irf /path/to/irf.fits.gz + --input-irf-path /path/to/irf + --irf-file-pattern "irf.fits.gz" --source-name Crab --source-dep --overwrite + + Or use a list of IRFs for including interpolated IRF: + > lstchain_create_dl3_file + -d /path/to/DL2_data_file.h5 + -o /path/to/DL3/file/ + -i /path/to/irf/ + -p "irf*.fits.gz" + -f final_interp_irf.fits.gz + --interp-method linear + --source-name Crab + --source-ra 83.633deg + --source-dec 22.01deg + --overwrite + + Or use a list of IRFs for including only the nearest IRF: + > lstchain_create_dl3_file + -d /path/to/DL2_data_file.h5 + -o /path/to/DL3/file/ + -i /path/to/irf/ + -p "irf*.fits.gz" + -f final_interp_irf.fits.gz + --use-nearest-irf-node + --source-name Crab + --source-ra 83.633deg + --source-dec 22.01deg + --overwrite + """ input_dl2 = traits.Path( help="Input data DL2 file", exists=True, directory_ok=False, - file_ok=True + file_ok=True, ).tag(config=True) output_dl3_path = traits.Path( help="DL3 output filedir", directory_ok=True, - file_ok=False + file_ok=False, ).tag(config=True) - input_irf = traits.Path( - help="Compressed FITS file of IRFs", + input_irf_path = traits.Path( + help="Path for compressed FITS file of IRFs", exists=True, + directory_ok=True, + file_ok=False, + ).tag(config=True) + + irf_file_pattern = traits.Unicode( + help="IRF file pattern to search in the given IRF files path", + default_value="*irf*.fits.gz", + ).tag(config=True) + + final_irf_file = traits.Path( + help="Final IRF file included with DL3 file", directory_ok=False, file_ok=True, + default_value="final_irf.fits.gz", ).tag(config=True) source_name = traits.Unicode( - help="Name of Source" + help="Name of Source", ).tag(config=True) source_ra = traits.Unicode( - help="RA position of the source" + help="RA position of the source", ).tag(config=True) source_dec = traits.Unicode( - help="DEC position of the source" + help="DEC position of the source", + ).tag(config=True) + + interp_method = traits.Unicode( + help="Interpolation method to be used, when required", + default_value="linear", ).tag(config=True) overwrite = traits.Bool( @@ -142,6 +203,11 @@ class DataReductionFITSWriter(Tool): default_value=False, ).tag(config=True) + use_nearest_irf_node = traits.Bool( + help="If True, only look for the nearest IRF node to the data. No interpolation", + default_value=False, + ).tag(config=True) + gzip = traits.Bool( help="If True, the DL3 file will be gzipped", default_value=False, @@ -152,8 +218,10 @@ class DataReductionFITSWriter(Tool): aliases = { ("d", "input-dl2"): "DataReductionFITSWriter.input_dl2", ("o", "output-dl3-path"): "DataReductionFITSWriter.output_dl3_path", - "input-irf": "DataReductionFITSWriter.input_irf", - "global-gh-cut": "DL3Cuts.global_gh_cut", + ("i", "input-irf-path"): "DataReductionFITSWriter.input_irf_path", + ("p", "irf-file-pattern"): "DataReductionFITSWriter.irf_file_pattern", + ("f", "final-irf-file"): "DataReductionFITSWriter.final_irf_file", + "interp-method": "DataReductionFITSWriter.interp_method", "source-name": "DataReductionFITSWriter.source_name", "source-ra": "DataReductionFITSWriter.source_ra", "source-dec": "DataReductionFITSWriter.source_dec", @@ -168,6 +236,10 @@ class DataReductionFITSWriter(Tool): {"DataReductionFITSWriter": {"source_dep": True}}, "source-dependent analysis if True", ), + "use-nearest-irf-node": ( + {"DataReductionFITSWriter": {"use_nearest_irf_node": True}}, + "Only use the closest IRF, if True", + ), "gzip": ( {"DataReductionFITSWriter": {"gzip": True}}, "gzip the DL3 files if True", @@ -194,6 +266,50 @@ def setup(self): f"Output file {self.output_file} already exists," " use --overwrite to overwrite" ) + + self.final_irf_output = self.output_dl3_path.absolute() / str(self.final_irf_file.name) + + if self.final_irf_output.exists(): + if self.overwrite: + self.log.warning(f"Overwriting {self.final_irf_output}") + self.final_irf_output.unlink() + else: + raise ToolConfigurationError( + f"Final IRF file {self.final_irf_output} already exists," + " use --overwrite to overwrite" + ) + + if self.input_irf_path: + self.irf_list = sorted( + self.input_irf_path.glob(self.irf_file_pattern) + ) + + if len(self.irf_list) > 1: + self.use_irf_interpolation = True + # Compare the IRFs for its metadata and cuts + if not compare_irfs(self.irf_list): + raise ToolConfigurationError( + f"IRF files in {self.input_irf_path} with pattern, " + f"{self.irf_file_pattern} are not similar and cannot" + " be used to interpolate. Use different list of IRFs." + ) + + elif len(self.irf_list) == 1: + self.use_irf_interpolation = False + self.log.info( + f"Only single IRF {self.irf_list[0]} provided." + " No interpolation possible" + ) + self.irf_final_hdu = fits.open(self.irf_list[0]) + self.irf_final_hdu.writeto( + self.final_irf_output, overwrite=self.overwrite + ) + else: + raise ToolConfigurationError( + f"No IRF files in {self.input_irf_path} with pattern, " + f"{self.irf_file_pattern} found. Use different parameters" + ) + if not (self.source_ra or self.source_dec): self.source_pos = SkyCoord.from_name(self.source_name) elif bool(self.source_ra) != bool(self.source_dec): @@ -205,30 +321,66 @@ def setup(self): self.log.debug(f"Output DL3 file: {self.output_file}") + def interp_irfs(self): + """ + Get the optimal number of IRFs necessary for interpolation + IF the target parameter is not inside a simplex formed by + the given list of IRFs, use the nearest grid point. + """ + + self.irf_list = check_in_delaunay_triangle( + self.irf_list, self.data_params + ) + + if len(self.irf_list) > 1: + self.log.info( + f"Paths of IRFs used for interpolation: {self.irf_list}" + ) + self.irf_final_hdu = interpolate_irf( + self.irf_list, self.data_params, self.interp_method + ) + self.irf_final_hdu.writeto( + self.final_irf_output, overwrite=self.overwrite + ) + else: + self.irf_final_hdu = fits.open(self.irf_list[0]) + self.irf_final_hdu.writeto( + self.final_irf_output, overwrite=self.overwrite + ) + self.log.info( + f"Nearest IRF {self.irf_list[0]} is used without interpolation" + ) + + def check_energy_dependent_cuts(self): + """ + Check if the final IRF has energy-dependent gammaness cuts or not. + """ try: - with fits.open(self.input_irf) as hdul: + with fits.open(self.final_irf_output) as hdul: self.use_energy_dependent_gh_cuts = ( "GH_CUT" not in hdul["EFFECTIVE AREA"].header ) - except: + except KeyError: raise ToolConfigurationError( - f"{self.input_irf} does not have EFFECTIVE AREA HDU, " + f"{self.final_irf_output} does not have EFFECTIVE AREA HDU, " " to check for global cut information in the Header value" ) if self.source_dep: - with fits.open(self.input_irf) as hdul: + with fits.open(self.final_irf_output) as hdul: self.use_energy_dependent_alpha_cuts = ( "AL_CUT" not in hdul["EFFECTIVE AREA"].header ) - + def apply_srcindep_gh_cut(self): - ''' apply gammaness cut ''' + """ + Apply gammaness cut. + """ self.data = self.event_sel.filter_cut(self.data) if self.use_energy_dependent_gh_cuts: self.energy_dependent_gh_cuts = QTable.read( - self.input_irf, hdu="GH_CUTS" + self.final_irf_output, hdu="GH_CUTS" ) self.data = self.cuts.apply_energy_dependent_gh_cuts( @@ -239,25 +391,27 @@ def apply_srcindep_gh_cut(self): f"{self.energy_dependent_gh_cuts.meta['GH_EFF']}" ) else: - with fits.open(self.input_irf) as hdul: + with fits.open(self.final_irf_output) as hdul: self.cuts.global_gh_cut = hdul[1].header["GH_CUT"] self.data = self.cuts.apply_global_gh_cut(self.data) self.log.info(f"Using global G/H cut of {self.cuts.global_gh_cut}") def apply_srcdep_gh_alpha_cut(self): - ''' apply gammaness and alpha cut for source-dependent analysis ''' + """ + Apply gammaness and alpha cut for source-dependent analysis. + """ srcdep_assumed_positions = get_srcdep_assumed_positions(self.input_dl2) for i, srcdep_pos in enumerate(srcdep_assumed_positions): - data_temp = read_data_dl2_to_QTable( + data_temp, _ = read_data_dl2_to_QTable( self.input_dl2, srcdep_pos=srcdep_pos ) data_temp = self.event_sel.filter_cut(data_temp) - + if self.use_energy_dependent_gh_cuts: self.energy_dependent_gh_cuts = QTable.read( - self.input_irf, hdu="GH_CUTS" + self.final_irf_output, hdu="GH_CUTS" ) data_temp = self.cuts.apply_energy_dependent_gh_cuts( @@ -268,13 +422,13 @@ def apply_srcdep_gh_alpha_cut(self): f"{self.energy_dependent_gh_cuts.meta['GH_EFF']}" ) else: - with fits.open(self.input_irf) as hdul: + with fits.open(self.final_irf_output) as hdul: self.cuts.global_gh_cut = hdul[1].header["GH_CUT"] data_temp = self.cuts.apply_global_gh_cut(data_temp) - + if self.use_energy_dependent_alpha_cuts: self.energy_dependent_alpha_cuts = QTable.read( - self.input_irf, hdu="AL_CUTS" + self.final_irf_output, hdu="AL_CUTS" ) data_temp = self.cuts.apply_energy_dependent_alpha_cuts( data_temp, self.energy_dependent_alpha_cuts @@ -284,7 +438,7 @@ def apply_srcdep_gh_alpha_cut(self): f'{self.energy_dependent_alpha_cuts.meta["AL_CONT"]}' ) else: - with fits.open(self.input_irf) as hdul: + with fits.open(self.final_irf_output) as hdul: self.cuts.global_alpha_cut = hdul[1].header["AL_CUT"] data_temp = self.cuts.apply_global_alpha_cut(data_temp) @@ -299,9 +453,19 @@ def apply_srcdep_gh_alpha_cut(self): def start(self): if not self.source_dep: - self.data = read_data_dl2_to_QTable(self.input_dl2) + self.data, self.data_params = read_data_dl2_to_QTable(self.input_dl2) else: - self.data = read_data_dl2_to_QTable(self.input_dl2, 'on') + self.data, self.data_params = read_data_dl2_to_QTable(self.input_dl2, "on") + + if self.use_irf_interpolation: + if not self.use_nearest_irf_node: + self.interp_irfs() + else: + self.final_irf_output = check_in_delaunay_triangle( + self.irf_list, self.data_params, self.use_nearest_irf_node + )[0] + self.check_energy_dependent_cuts() + self.effective_time, self.elapsed_time = get_effective_time(self.data) self.run_number = run_info_from_filename(self.input_dl2)[1] @@ -320,20 +484,48 @@ def start(self): source_pos=self.source_pos, effective_time=self.effective_time.value, elapsed_time=self.elapsed_time.value, + data_pars=self.data_params, ) + self.log.info(f"Target parameters for interpolation: {self.data_params}") self.hdulist = fits.HDUList( [fits.PrimaryHDU(), self.events, self.gti, self.pointing] ) - irf = fits.open(self.input_irf) + self.irf_final_hdu = fits.open(self.final_irf_output) + self.log.info("Adding IRF HDUs") + self.mc_params = dict() - for irf_hdu in irf[1:]: + h = self.irf_final_hdu[1].header + + for p in self.data_params.keys(): + self.mc_params[p] = u.Quantity(h[p], "deg") + + mc_gamma_offset = u.Quantity( + h["G_OFFSET"], + "deg" + ) + + self.log.info(f"Gamma offset for MC is {mc_gamma_offset:.3f}") + self.log.info( + f"Zenith pointing of MC at {self.mc_params['ZEN_PNT']:.3f}" + ) + self.log.info( + f"Azimuth pointing of MC at {self.mc_params['AZ_PNT']:.3f}" + ) + self.log.info( + f"Geomagnetic delta for the MC is {self.mc_params['B_DELTA']:.3f}" + ) + + for irf_hdu in self.irf_final_hdu[1:]: self.hdulist.append(irf_hdu) def finish(self): + self.hdulist.writeto(self.output_file, overwrite=self.overwrite) + if len(self.irf_list) > 1: + Provenance().add_output_file(self.final_irf_output) Provenance().add_output_file(self.output_file) diff --git a/lstchain/tools/lstchain_create_irf_files.py b/lstchain/tools/lstchain_create_irf_files.py index 686f8d8a5..8b793a811 100644 --- a/lstchain/tools/lstchain_create_irf_files.py +++ b/lstchain/tools/lstchain_create_irf_files.py @@ -263,11 +263,11 @@ def setup(self): ) filename = self.output_irf_file.name - if not (filename.endswith('.fits') or filename.endswith('.fits.gz')): + if not (filename.endswith(".fits") or filename.endswith(".fits.gz")): raise ValueError( - f"{filename} is not a correct compressed FITS file name" - "(use .fits or .fits.gz)." - ) + f"{filename} is not a correct compressed FITS file name " + "Use .fits or .fits.gz." + ) if self.input_proton_dl2 and self.input_electron_dl2 is not Undefined: self.only_gamma_irf = False @@ -311,7 +311,11 @@ def start(self): for particle_type, p in self.mc_particle.items(): self.log.info(f"Simulated {particle_type.title()} Events:") - p["events"], p["simulation_info"] = read_mc_dl2_to_QTable(p["file"]) + ( + p["events"], + p["simulation_info"], + p["geomag_params"], + ) = read_mc_dl2_to_QTable(p["file"]) p["mc_type"] = check_mc_type(p["file"]) @@ -331,6 +335,13 @@ def start(self): p["simulated_spectrum"], ) + p["ZEN_PNT"] = round( + 90 - p["events"]["pointing_alt"][0].to_value(u.deg), 5 + ) + p["AZ_PNT"] = round( + p["events"]["pointing_az"][0].to_value(u.deg), 5 + ) + if not self.source_dep: for prefix in ("true", "reco"): k = f"{prefix}_source_fov_offset" @@ -357,12 +368,17 @@ def start(self): self.log.debug(p["simulation_info"]) gammas = self.mc_particle["gamma"]["events"] + geomag_params = self.mc_particle["gamma"]["geomag_params"] + self.log.info(geomag_params) # Binning of parameters used in IRFs true_energy_bins = self.data_bin.true_energy_bins() reco_energy_bins = self.data_bin.reco_energy_bins() migration_bins = self.data_bin.energy_migration_bins() source_offset_bins = self.data_bin.source_offset_bins() + mean_fov_offset = round( + gammas["true_source_fov_offset"].mean().to_value(), 4 + ) gammas = self.event_sel.filter_cut(gammas) gammas = self.cuts.allowed_tels_filter(gammas) @@ -418,41 +434,59 @@ def start(self): else: gammas = self.cuts.apply_global_alpha_cut(gammas) self.log.info( - 'Using a global Alpha cut of ' - f'{self.cuts.global_alpha_cut} for point like IRF' + "Using a global Alpha cut of " + f"{self.cuts.global_alpha_cut} for point like IRF" ) if self.mc_particle["gamma"]["mc_type"] in ["point_like", "ring_wobble"]: mean_fov_offset = round( - gammas["true_source_fov_offset"].mean().to_value(), 1 + gammas["true_source_fov_offset"].mean().to_value(), 4 ) fov_offset_bins = [ mean_fov_offset - 0.1, mean_fov_offset + 0.1 ] * u.deg - self.log.info('Single offset for point like gamma MC') + + self.mc_particle["gamma"]["G_OFFSET"] = mean_fov_offset + self.log.info("Single offset for point like gamma MC") else: fov_offset_bins = self.data_bin.fov_offset_bins() - self.log.info('Multiple offset for diffuse gamma MC') + self.log.info("Multiple offset for diffuse gamma MC") if self.energy_dependent_theta: fov_offset_bins = [ round( - gammas["true_source_fov_offset"].min().to_value(), 1 + gammas["true_source_fov_offset"].min().to_value(), 3 ), round( - gammas["true_source_fov_offset"].max().to_value(), 1 + gammas["true_source_fov_offset"].max().to_value(), 3 ) ] * u.deg - self.log.info("For RAD MAX, the full FoV is used") + self.log.info( + "For RAD MAX, FoV where we have all of the reconstructed " + f"events, is used, {fov_offset_bins}" + ) if not self.only_gamma_irf: background = table.vstack( [ self.mc_particle["proton"]["events"], - self.mc_particle["electron"]["events"] + self.mc_particle["electron"]["events"], ] ) + # Check common parameters of the MCs used + for par in ["ZEN_PNT", "AZ_PNT"]: + k = [ + self.mc_particle["gamma"][par], + self.mc_particle["proton"][par], + self.mc_particle["electron"][par], + ] + if len(set(k)) != 1: + raise ToolConfigurationError( + "MCs of different " + par + " used." + "Use MCs with same zenith pointing." + ) + if self.energy_dependent_gh: background = self.cuts.apply_energy_dependent_gh_cuts( background, self.gh_cuts_gamma @@ -476,6 +510,36 @@ def start(self): "INSTRUME": "LST-" + " ".join(map(str, self.cuts.allowed_tels)), "FOVALIGN": "RADEC", } + + extra_headers["ZEN_PNT"] = ( + self.mc_particle["gamma"]["ZEN_PNT"], + "deg" + ) + extra_headers["AZ_PNT"] = ( + self.mc_particle["gamma"]["AZ_PNT"], + "deg" + ) + extra_headers["G_OFFSET"] = ( + mean_fov_offset, + "deg" + ) + extra_headers["B_TOTAL"] = ( + geomag_params["GEOMAG_TOTAL"].to_value(u.uT), + "uT", + ) + extra_headers["B_INC"] = ( + geomag_params["GEOMAG_INC"].to_value(u.rad), + "rad", + ) + extra_headers["B_DEC"] = ( + geomag_params["GEOMAG_DEC"].to_value(u.rad), + "rad", + ) + extra_headers["B_DELTA"] = ( + geomag_params["GEOMAG_DELTA"].to_value(u.deg), + "deg", + ) + if self.point_like: self.log.info("Generating point_like IRF HDUs") else: @@ -488,7 +552,7 @@ def start(self): else: extra_headers["GH_EFF"] = ( self.cuts.gh_efficiency, - "gamma/hadron efficiency" + "gamma/hadron efficiency", ) if self.point_like: @@ -496,7 +560,7 @@ def start(self): if self.energy_dependent_theta: extra_headers["TH_CONT"] = ( self.cuts.theta_containment, - "Theta containment region in percentage" + "Theta containment region in percentage", ) else: extra_headers["RAD_MAX"] = ( @@ -513,14 +577,14 @@ def start(self): if self.energy_dependent_alpha: extra_headers["AL_CONT"] = ( self.cuts.alpha_containment, - "Alpha containment region in percentage" + "Alpha containment region in percentage", ) else: extra_headers["AL_CUT"] = ( self.cuts.global_alpha_cut, 'deg' ) - + # Write HDUs self.hdus = [fits.PrimaryHDU(), ] @@ -529,14 +593,14 @@ def start(self): self.effective_area = effective_area_per_energy( gammas, self.mc_particle["gamma"]["simulation_info"], - true_energy_bins, + true_energy_bins=true_energy_bins, ) self.hdus.append( create_aeff2d_hdu( # add one dimension for single FOV offset - self.effective_area[..., np.newaxis], - true_energy_bins, - fov_offset_bins, + effective_area=self.effective_area[..., np.newaxis], + true_energy_bins=true_energy_bins, + fov_offset_bins=fov_offset_bins, point_like=self.point_like, extname="EFFECTIVE AREA", **extra_headers, @@ -546,14 +610,14 @@ def start(self): self.effective_area = effective_area_per_energy_and_fov( gammas, self.mc_particle["gamma"]["simulation_info"], - true_energy_bins, - fov_offset_bins, + true_energy_bins=true_energy_bins, + fov_offset_bins=fov_offset_bins, ) self.hdus.append( create_aeff2d_hdu( - self.effective_area, - true_energy_bins, - fov_offset_bins, + effective_area=self.effective_area, + true_energy_bins=true_energy_bins, + fov_offset_bins=fov_offset_bins, point_like=self.point_like, extname="EFFECTIVE AREA", **extra_headers, @@ -563,16 +627,16 @@ def start(self): self.log.info("Effective Area HDU created") self.edisp = energy_dispersion( gammas, - true_energy_bins, - fov_offset_bins, - migration_bins, + true_energy_bins=true_energy_bins, + fov_offset_bins=fov_offset_bins, + migration_bins=migration_bins, ) self.hdus.append( create_energy_dispersion_hdu( self.edisp, - true_energy_bins, - migration_bins, - fov_offset_bins, + true_energy_bins=true_energy_bins, + migration_bins=migration_bins, + fov_offset_bins=fov_offset_bins, point_like=self.point_like, extname="ENERGY DISPERSION", **extra_headers, @@ -590,8 +654,8 @@ def start(self): self.hdus.append( create_background_2d_hdu( self.background.T, - reco_energy_bins, - background_offset_bins, + reco_energy_bins=reco_energy_bins, + fov_offset_bins=background_offset_bins, extname="BACKGROUND", **extra_headers, ) @@ -608,9 +672,9 @@ def start(self): self.hdus.append( create_psf_table_hdu( self.psf, - true_energy_bins, - source_offset_bins, - fov_offset_bins, + true_energy_bins=true_energy_bins, + source_offset_bins=source_offset_bins, + fov_offset_bins=fov_offset_bins, extname="PSF", **extra_headers, ) @@ -649,10 +713,10 @@ def start(self): alpha_header = fits.Header() alpha_header["CREATOR"] = f"lstchain v{__version__}" alpha_header["DATE"] = Time.now().utc.iso - + for k, v in extra_headers.items(): alpha_header[k] = v - + self.hdus.append( fits.BinTableHDU( self.alpha_cuts, header=gh_header, name="AL_CUTS" diff --git a/lstchain/tools/tests/test_tools.py b/lstchain/tools/tests/test_tools.py index 597c4149d..770a111ab 100644 --- a/lstchain/tools/tests/test_tools.py +++ b/lstchain/tools/tests/test_tools.py @@ -4,6 +4,7 @@ from astropy.io import fits import numpy as np + def test_create_irf_full_enclosure(temp_dir_observed_files, simulated_dl2_file): """ Generating full enclosure IRF file from a test DL2 files @@ -94,7 +95,7 @@ def test_create_irf_point_like_srcdep( from lstchain.tools.lstchain_create_irf_files import IRFFITSWriter irf_file = temp_dir_observed_srcdep_files / "irf.fits.gz" - + assert ( run_tool( IRFFITSWriter(), @@ -106,8 +107,8 @@ def test_create_irf_point_like_srcdep( "--overwrite", ], cwd=temp_dir_observed_srcdep_files, - ) - == 0 + ) + == 0 ) with fits.open(irf_file) as hdul: @@ -161,7 +162,7 @@ def test_create_irf_point_like_srcdep_energy_dependent_cuts( from astropy.table import QTable irf_file = temp_dir_observed_srcdep_files / "irf_edep.fits.gz" - + assert ( run_tool( IRFFITSWriter(), @@ -178,13 +179,13 @@ def test_create_irf_point_like_srcdep_energy_dependent_cuts( ) == 0 ) - + gh_cuts = QTable.read(irf_file, hdu="GH_CUTS") assert isinstance(gh_cuts.meta["GH_EFF"], float) - + al_cuts = QTable.read(irf_file, hdu="AL_CUTS") assert isinstance(al_cuts.meta["AL_CONT"], float) - + @pytest.mark.private_data def test_create_dl3_energy_dependent_cuts( temp_dir_observed_files, observed_dl2_file @@ -208,7 +209,9 @@ def test_create_dl3_energy_dependent_cuts( argv=[ f"--input-dl2={observed_dl2_file}", f"--output-dl3-path={temp_dir_observed_files}", - f"--input-irf={irf_file}", + f"--input-irf-path={temp_dir_observed_files}", + "--irf-file-pattern=pnt_irf.fits.gz", + "--final-irf-file=final_pnt_irf.fits.gz", "--source-name=Crab", "--source-ra=83.633deg", "--source-dec=22.01deg", @@ -237,7 +240,38 @@ def test_create_dl3(temp_dir_observed_files, observed_dl2_file, simulated_irf_fi argv=[ f"--input-dl2={observed_dl2_file}", f"--output-dl3-path={temp_dir_observed_files}", - f"--input-irf={simulated_irf_file}", + f"--input-irf-path={simulated_irf_file.parent}", + f"--irf-file-pattern={simulated_irf_file.name}", + f"--final-irf-file={simulated_irf_file.name}", + "--source-name=Crab", + "--source-ra=83.633deg", + "--source-dec=22.01deg", + "--overwrite", + ], + cwd=temp_dir_observed_files, + ) + == 0 + ) + +@pytest.mark.private_data +def test_create_dl3_irf_interp( + temp_dir_observed_files, observed_dl2_file, simulated_dl2_file +): + """ + Generating an DL3 file from a test DL2 files and using IRF interpolation + function, to either get the interpolated IRF, or the nearest IRF node + """ + from lstchain.tools.lstchain_create_dl3_file import DataReductionFITSWriter + + assert ( + run_tool( + DataReductionFITSWriter(), + argv=[ + f"--input-dl2={observed_dl2_file}", + f"--output-dl3-path={temp_dir_observed_files}", + f"--input-irf-path={simulated_dl2_file.parent}", + "--irf-file-pattern=en_dep_cut*gz", + "--final-irf-file=final_irf_interp_1.fits.gz", "--source-name=Crab", "--source-ra=83.633deg", "--source-dec=22.01deg", @@ -248,6 +282,26 @@ def test_create_dl3(temp_dir_observed_files, observed_dl2_file, simulated_irf_fi == 0 ) + assert ( + run_tool( + DataReductionFITSWriter(), + argv=[ + f"--input-dl2={observed_dl2_file}", + f"--output-dl3-path={temp_dir_observed_files}", + f"--input-irf-path={simulated_dl2_file.parent}", + "--irf-file-pattern=en_dep_cut*gz", + "--final-irf-file=final_irf_interp_2.fits.gz", + "--source-name=Crab", + "--source-ra=83.633deg", + "--source-dec=22.01deg", + "--overwrite", + "--use-nearest-irf-node", + ], + cwd=temp_dir_observed_files, + ) + == 0 + ) + @pytest.mark.private_data def test_create_dl3_with_config(temp_dir_observed_files, observed_dl2_file): @@ -257,7 +311,6 @@ def test_create_dl3_with_config(temp_dir_observed_files, observed_dl2_file): """ from lstchain.tools.lstchain_create_dl3_file import DataReductionFITSWriter - irf_file = temp_dir_observed_files / "fe_irf.fits.gz" config_file = os.path.join(os.getcwd(), "docs/examples/dl3_tool_config.json") assert ( @@ -266,7 +319,8 @@ def test_create_dl3_with_config(temp_dir_observed_files, observed_dl2_file): argv=[ f"--input-dl2={observed_dl2_file}", f"--output-dl3-path={temp_dir_observed_files}", - f"--input-irf={irf_file}", + f"--input-irf-path={temp_dir_observed_files}", + "--irf-file-pattern=fe_irf.fits.gz", "--source-name=Crab", "--source-ra=83.633deg", "--source-dec=22.01deg", @@ -281,13 +335,14 @@ def test_create_dl3_with_config(temp_dir_observed_files, observed_dl2_file): @pytest.mark.private_data def test_create_srcdep_dl3( - temp_dir_observed_srcdep_files, observed_srcdep_dl2_file, simulated_srcdep_irf_file + temp_dir_observed_srcdep_files, observed_srcdep_dl2_file, + simulated_srcdep_irf_file ): """ Generating a source-dependent DL3 file from a test DL2 files and test IRF file """ from lstchain.tools.lstchain_create_dl3_file import DataReductionFITSWriter - from lstchain.paths import dl2_to_dl3_filename + from lstchain.paths import dl2_to_dl3_filename assert ( run_tool( @@ -295,7 +350,9 @@ def test_create_srcdep_dl3( argv=[ f"--input-dl2={observed_srcdep_dl2_file}", f"--output-dl3-path={temp_dir_observed_srcdep_files}", - f"--input-irf={simulated_srcdep_irf_file}", + f"--input-irf-path={simulated_srcdep_irf_file.parent}", + f"--irf-file-pattern={simulated_srcdep_irf_file.name}", + f"--final-irf-file={simulated_srcdep_irf_file.name}", "--source-name=Crab", "--source-ra=83.633deg", "--source-dec=22.01deg", @@ -316,6 +373,7 @@ def test_create_srcdep_dl3( np.testing.assert_allclose(ra, 83.63, atol=1e-2) np.testing.assert_allclose(dec, 22.01, atol=1e-2) + @pytest.mark.private_data def test_create_srcdep_dl3_energy_dependent_cuts( temp_dir_observed_srcdep_files, observed_srcdep_dl2_file @@ -334,7 +392,8 @@ def test_create_srcdep_dl3_energy_dependent_cuts( argv=[ f"--input-dl2={observed_srcdep_dl2_file}", f"--output-dl3-path={temp_dir_observed_srcdep_files}", - f"--input-irf={irf_file}", + f"--input-irf-path={irf_file.parent}", + f"--irf-file-pattern={irf_file.name}", "--source-name=Crab", "--source-ra=83.633deg", "--source-dec=22.01deg", diff --git a/setup.py b/setup.py index b0b61ab89..9c44d07e0 100644 --- a/setup.py +++ b/setup.py @@ -54,7 +54,7 @@ def find_scripts(script_dir, prefix): 'numpy<1.22.0a0', 'pandas', 'protobuf~=3.20.0', - 'pyirf~=0.6.0', + 'pyirf==0.7', 'scipy', 'seaborn', 'scikit-learn~=1.0',