diff --git a/.github/workflows/continuous-integration.yaml b/.github/workflows/continuous-integration.yaml index 1639000..4424731 100644 --- a/.github/workflows/continuous-integration.yaml +++ b/.github/workflows/continuous-integration.yaml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ['3.9'] + python-version: ['3.11'] steps: - uses: conda-incubator/setup-miniconda@v2 diff --git a/augur/_version.py b/augur/_version.py index d3ec452..493f741 100644 --- a/augur/_version.py +++ b/augur/_version.py @@ -1 +1 @@ -__version__ = "0.2.0" +__version__ = "0.3.0" diff --git a/augur/analyze.py b/augur/analyze.py index 1d228db..ba3e0aa 100644 --- a/augur/analyze.py +++ b/augur/analyze.py @@ -41,6 +41,7 @@ def __init__(self, config, likelihood=None, tools=None, req_params=None): self.lk = likelihood # Just to save some typing self.tools = tools self.req_params = req_params + self.data_fid = self.lk.get_data_vector() _config = parse_config(config) # Load full config # Get the fiducial cosmological parameters @@ -128,7 +129,9 @@ def f(self, x, labels, pars_fid, sys_fid): self.tools.reset() self.lk.reset() cosmo = ccl.Cosmology(**_pars) - self.lk.update(ParamsMap(_sys_pars)) + pmap = ParamsMap(_sys_pars) + self.lk.update(pmap) + self.tools.update(pmap) self.tools.prepare(cosmo) f_out = self.lk.compute_theory_vector(self.tools) elif x.ndim == 2: @@ -146,8 +149,10 @@ def f(self, x, labels, pars_fid, sys_fid): raise ValueError(f'Parameter name {labels[j]} not recognized') self.tools.reset() self.lk.reset() - self.lk.update(ParamsMap(_sys_pars)) + pmap = ParamsMap(_sys_pars) + self.lk.update(pmap) cosmo = ccl.Cosmology(**_pars) + self.tools.update(pmap) self.tools.prepare(cosmo) f_out.append(self.lk.compute_theory_vector(self.tools)) return np.array(f_out) @@ -186,7 +191,7 @@ def get_fisher_matrix(self, method='5pt_stencil'): else: return self.Fij - def compute_fisher_bias(self): + def get_fisher_bias(self): # Compute Fisher bias following the generalized Amara formalism # More details in Bianca's thesis and the note here: # https://github.com/LSSTDESC/augur/blob/note_bianca/note/main.tex @@ -195,8 +200,14 @@ def compute_fisher_bias(self): # They should have the same ells as the original data-vector # and the same length import os + + if self.derivatives is None: + self.get_derivatives() + + if self.Fij is None: + self.get_fisher_matrix() + _calculate_biased_cls = True - _cls_fid = self.lk.measured_data_vector # Get the fiducial data vector # Try to read the biased data vector if 'biased_dv' in self.config['fisher_bias']: @@ -216,7 +227,7 @@ def compute_fisher_bias(self): raise ValueError('The length of the provided Cls should be equal \ to the length of the data-vector') _calculate_biased_cls = False - self.biased_cls = biased_cls['dv_sys'] - _cls_fid + self.biased_cls = biased_cls['dv_sys'] - self.data_fid # If there's no biased data vector, calculate it if _calculate_biased_cls: @@ -240,13 +251,8 @@ def compute_fisher_bias(self): else: raise ValueError('bias_params is required if no biased_dv file is passed') - self.biased_cls = self.f(_x_here, _labels_here, _pars_here, _sys_here) - _cls_fid + self.biased_cls = self.f(_x_here, _labels_here, _pars_here, _sys_here) - self.data_fid - if self.derivatives is None: - self.get_derivatives() Bj = np.einsum('l, lm, jm', self.biased_cls, self.lk.inv_cov, self.derivatives) - - if self.Fij is None: - self.get_fisher_matrix() bi = np.einsum('ij, j', np.linalg.inv(self.Fij), Bj) self.bi = bi diff --git a/augur/generate.py b/augur/generate.py index 4c8bc8b..cd95c4c 100644 --- a/augur/generate.py +++ b/augur/generate.py @@ -231,8 +231,16 @@ def generate_sacc_and_stats(config): ells_here = ells[ells < ell_max] else: ells_here = ells - for ell in ells_here: - S.add_data_point(key, (tr1, tr2), 0.0, ell=ell, error=1e30) + # Trying to add bandpower windows + ells_aux = np.arange(0, np.max(ells_here)+1) + wgt = np.zeros((len(ells_aux), len(ells_here))) + for i in range(len(ells_here)): + in_win = (ells_aux > ell_edges[i]) & (ells_aux < ell_edges[i+1]) + wgt[in_win, i] = 1.0 + win = sacc.BandpowerWindow(ells_aux, wgt) + S.add_ell_cl(key, tr1, tr2, + ells_here, np.zeros(len(ells_here)), window=win) + # Now create TwoPoint objects for firecrown _aux_stat = TwoPoint(source0=sources[tr1], source1=sources[tr2], sacc_data_type=key) @@ -242,7 +250,7 @@ def generate_sacc_and_stats(config): return S, cosmo, stats, sys_params -def generate(config, return_all_outputs=False, write_sacc=True, force_read=True): +def generate(config, return_all_outputs=False, write_sacc=True): """ Generate likelihood object and sacc file with fiducial cosmology @@ -257,26 +265,26 @@ def generate(config, return_all_outputs=False, write_sacc=True, force_read=True) likelihood object. write_sacc : bool If `True` it writes a sacc file with fiducial data vector. - force_read : bool - If `True` it repopulates the likelihood data vector with the contents of the Sacc file - generated here. Note: For high-condition covariances where the Cholesky decomposition - fails, setting force_read to `True` may result in a `LinAlgError`. Returns: ------- - noise : float - Noise power for Cls for that particular tracer. That is 1/nbar for - number counts and e**2/nbar for weak lensing tracer. - - Note: - ----- - The input number_densities are #/arcmin. - The output units are in steradian. + lk : firecrown.likelihood.ConstGaussian + Likelihood object, only returned if `return_all_outputs` is True. + S : sacc.Sacc + Sacc object with fake data vector and covariance. It is only returned if + `return_all_outputs` is True. + tools : firecrown.modeling.ModelingTools + Modeling tools, only returned if `return_all_outputs` is True. + sys_params : dict + Dictionary containing the modeling systematic parameters. It is only returned if + `return_all_outputs` is True. + """ config = parse_config(config) # Generate placeholders S, cosmo, stats, sys_params = generate_sacc_and_stats(config) + # Generate likelihood object lk = ConstGaussian(statistics=stats) # Pass the correct binning/tracers @@ -293,6 +301,7 @@ def generate(config, return_all_outputs=False, write_sacc=True, force_read=True) cosmo.compute_nonlin_power() tools = ModelingTools(pt_calculator=pt_calculator) lk.update(sys_params) + tools.update(sys_params) tools.prepare(cosmo) # Run the likelihood (to get the theory) lk.compute_loglike(tools) @@ -306,7 +315,8 @@ def generate(config, return_all_outputs=False, write_sacc=True, force_read=True) st = st.statistic st.ready = False S.add_ell_cl(st.sacc_data_type, st.sacc_tracers[0], st.sacc_tracers[1], - st.ell_or_theta_, st.predicted_statistic_) + st.ell_or_theta_, st.get_theory_vector(), + window=st.theory_window_function) if config['cov_options']['cov_type'] == 'gaus_internal': fsky = config['cov_options']['fsky'] cov = get_gaus_cov(S, lk, cosmo, fsky, config) @@ -368,9 +378,7 @@ def generate(config, return_all_outputs=False, write_sacc=True, force_read=True) print(config['fiducial_sacc_path']) S.save_fits(config['fiducial_sacc_path'], overwrite=True) # Update covariance and inverse -- TODO need to update cholesky!! - if force_read: - # Hacky way to overwrite... - lk.read(S) - lk.measured_data_vector = lk.get_data_vector() + lk = ConstGaussian(statistics=stats) + lk.read(S) if return_all_outputs: return lk, S, tools, sys_params diff --git a/augur/tests/test_analyze.py b/augur/tests/test_analyze.py index c6d1080..5badba1 100644 --- a/augur/tests/test_analyze.py +++ b/augur/tests/test_analyze.py @@ -5,4 +5,4 @@ def test_analyze(): fish = Analyze('./examples/config_test.yml') fish.get_derivatives() fish.get_fisher_matrix() - fish.compute_fisher_bias() + fish.get_fisher_bias() diff --git a/environment.yml b/environment.yml index a3bafba..f284945 100644 --- a/environment.yml +++ b/environment.yml @@ -2,12 +2,11 @@ name: forecasting channels: - conda-forge dependencies: - - firecrown + - firecrown>=1.7.1 + - flake8 - healpy - jinja2 + - numdifftools - numpydoc - - pip - - flake8 - - pip: - - tjpcov - - qp + - qp-prob + - tjpcov diff --git a/pylintrc b/pylintrc new file mode 100644 index 0000000..0e8627d --- /dev/null +++ b/pylintrc @@ -0,0 +1,71 @@ +[MAIN] + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=0 + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.9 + +# Discover python modules and packages in the file system subtree. +recursive=yes + +# Add custom pylint plugins +# load-plugins=pylint_plugins.duplicate_code + +[MESSAGES CONTROL] + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=c-extension-no-member +disable=too-few-public-methods + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=any + +# Naming style matching correct attribute names. +attr-naming-style=any + +# Naming style matching correct variable names. +variable-naming-style=any + +# Naming style matching correct module level constants names. +const-naming-style=any + +# Naming style matching correct method names. +method-naming-style=any + +# Naming style matching correct function names. +function-naming-style=any + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=yes + +[TYPECHECK] + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=false + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format=LF + +[DESIGN] +max-args=20 +max-attributes=20 diff --git a/setup.cfg b/setup.cfg index fbcdfb7..891b9ec 100644 --- a/setup.cfg +++ b/setup.cfg @@ -17,7 +17,7 @@ install_requires = jinja2 healpy numpydoc - qp + qp-prob [options.packages.find] exclude =