diff --git a/.github/codecov.yml b/.github/codecov.yml index 7e551e678..e00ce3d69 100644 --- a/.github/codecov.yml +++ b/.github/codecov.yml @@ -1,6 +1,2 @@ -flag_management: - default_rules: - carryforward: true - github_checks: annotations: false diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 6a56e06f8..a36a93839 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -19,5 +19,5 @@ jobs: - name: Run check run: | # enable the MSRV - rustup default 1.70.0 + rustup default 1.80.1 cargo check --all-features --all-targets diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 6d8fe1fa8..ad304ee9e 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -11,7 +11,7 @@ jobs: strategy: matrix: - python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 61907ae49..a4ee50157 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -117,6 +117,7 @@ jobs: 3.9 3.11 3.12 + 3.13 3.10 - name: Install dependencies run: | @@ -379,6 +380,7 @@ jobs: 3.9 3.11 3.12 + 3.13 3.10 - name: Build wheels uses: PyO3/maturin-action@v1 @@ -429,6 +431,7 @@ jobs: 3.9 3.11 3.12 + 3.13 3.10 architecture: ${{ matrix.target }} - name: Build wheels diff --git a/.readthedocs.yml b/.readthedocs.yml index d8317c145..1cc0a04a8 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -8,7 +8,10 @@ build: os: ubuntu-22.04 tools: python: "3.10" - rust: "1.70" + commands: + - export RUST_WITHOUT=rust-docs,rustfmt + - asdf install rust latest + - asdf global rust latest python: install: diff --git a/CHANGELOG.md b/CHANGELOG.md index e8f234924..8b7e9d1df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed +- the macro `channel!` now accepts a channel specification that is of the + format `factor * (pid, ..) + ...` - Python API: dropped top-level Python interface layer - Python API: renamed `lumi` to `channel` in PyO3 Python interface. This concerns 1) the argument names of `convolute_with_one` and similar functions; @@ -33,12 +35,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - by default `pineappl plot` no longer shows a channel breakdown in the panel with absolute PDF predictions. However, this feature can be enabled with via a new array added at the start of the script +- raised MSRV to 1.80.1 +- changed the order of elements in `Grid::fill` of the parameter `ntuple` to + reflect the ordering of `kinematics` given to `Grid::new` ### Removed - Python API: removed `pineappl.grid.Grid.create()` and `pineappl.fk_table.FkTable.from_grid()` methods; use the constructors of the respective class instead +- removed the constructor `Grid::with_subgrid_type` ## [0.8.6] - 18/10/2024 @@ -106,8 +112,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - added `PidBasis::charge_conjugate` and `PidBasis::guess` - added `Grid::set_pid_basis` method - added `Grid::subgrids` and `Grid::subgrids_mut` methods -- added new switch `conv_fun_uncert_from` to subcommand `plot` to allow - choosing with convolution function uncertainty should be plotted +- added new switch `--conv-fun-uncert-from` to subcommand `plot` to allow + choosing which convolution function uncertainty should be plotted ### Changed diff --git a/Cargo.lock b/Cargo.lock index c375b37a7..12f49682d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -139,6 +139,9 @@ name = "bitflags" version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +dependencies = [ + "serde", +] [[package]] name = "block-buffer" @@ -206,7 +209,7 @@ version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "syn", @@ -581,6 +584,12 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hermit-abi" version = "0.3.4" @@ -693,16 +702,6 @@ version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" -[[package]] -name = "lock_api" -version = "0.4.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" -dependencies = [ - "autocfg", - "scopeguard", -] - [[package]] name = "log" version = "0.4.20" @@ -847,9 +846,9 @@ dependencies = [ [[package]] name = "numpy" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec170733ca37175f5d75a5bea5911d6ff45d2cd52849ce98b685394e4f2f37f4" +checksum = "cf314fca279e6e6ac2126a4ff98f26d88aa4ad06bc68fb6ae5cf4bd706758311" dependencies = [ "libc", "ndarray", @@ -872,29 +871,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" -[[package]] -name = "parking_lot" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-targets 0.48.5", -] - [[package]] name = "percent-encoding" version = "2.3.1" @@ -948,7 +924,28 @@ dependencies = [ [[package]] name = "pineappl" -version = "0.8.2" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041fcf611eb0c41f1f6562b498fabdd1319c8d4572fd137ac244ca4e73d999c" +dependencies = [ + "anyhow", + "arrayvec", + "bincode", + "bitflags 2.4.2", + "enum_dispatch", + "float-cmp", + "git-version", + "itertools", + "lz4_flex", + "ndarray", + "rustc-hash", + "serde", + "thiserror", +] + +[[package]] +name = "pineappl" +version = "1.0.0-alpha1" dependencies = [ "anyhow", "arrayvec", @@ -963,6 +960,7 @@ dependencies = [ "ndarray", "ndarray-npy", "num-complex", + "pineappl 0.8.3", "rand", "rand_pcg", "rustc-hash", @@ -973,7 +971,7 @@ dependencies = [ [[package]] name = "pineappl_applgrid" -version = "0.8.2" +version = "1.0.0-alpha1" dependencies = [ "cc", "cxx", @@ -984,15 +982,15 @@ dependencies = [ [[package]] name = "pineappl_capi" -version = "0.8.2" +version = "1.0.0-alpha1" dependencies = [ "itertools", - "pineappl", + "pineappl 1.0.0-alpha1", ] [[package]] name = "pineappl_cli" -version = "0.8.2" +version = "1.0.0-alpha1" dependencies = [ "anyhow", "assert_cmd", @@ -1010,7 +1008,7 @@ dependencies = [ "managed-lhapdf", "ndarray", "ndarray-npy", - "pineappl", + "pineappl 1.0.0-alpha1", "pineappl_applgrid", "pineappl_fastnlo", "predicates", @@ -1023,7 +1021,7 @@ dependencies = [ [[package]] name = "pineappl_fastnlo" -version = "0.8.2" +version = "1.0.0-alpha1" dependencies = [ "cxx", "cxx-build", @@ -1033,12 +1031,12 @@ dependencies = [ [[package]] name = "pineappl_py" -version = "0.8.2" +version = "1.0.0-alpha1" dependencies = [ "itertools", "ndarray", "numpy", - "pineappl", + "pineappl 1.0.0-alpha1", "pyo3", ] @@ -1099,9 +1097,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "7c3a7fc5db1e57d5a779a352c8cdb57b29aa4c40cc69c3a68a7fedc815fbf2f9" dependencies = [ "unicode-ident", ] @@ -1121,15 +1119,15 @@ dependencies = [ [[package]] name = "pyo3" -version = "0.21.2" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e00b96a521718e08e03b1a622f01c8a8deb50719335de3f60b3b3950f069d8" +checksum = "3d922163ba1f79c04bc49073ba7b32fd5a8d3b76a87c955921234b8e77333c51" dependencies = [ "cfg-if", "indoc", "libc", "memoffset", - "parking_lot", + "once_cell", "portable-atomic", "pyo3-build-config", "pyo3-ffi", @@ -1139,9 +1137,9 @@ dependencies = [ [[package]] name = "pyo3-build-config" -version = "0.21.2" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7883df5835fafdad87c0d888b266c8ec0f4c9ca48a5bed6bbb592e8dedee1b50" +checksum = "bc38c5feeb496c8321091edf3d63e9a6829eab4b863b4a6a65f26f3e9cc6b179" dependencies = [ "once_cell", "target-lexicon", @@ -1149,9 +1147,9 @@ dependencies = [ [[package]] name = "pyo3-ffi" -version = "0.21.2" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01be5843dc60b916ab4dad1dca6d20b9b4e6ddc8e15f50c47fe6d85f1fb97403" +checksum = "94845622d88ae274d2729fcefc850e63d7a3ddff5e3ce11bd88486db9f1d357d" dependencies = [ "libc", "pyo3-build-config", @@ -1159,9 +1157,9 @@ dependencies = [ [[package]] name = "pyo3-macros" -version = "0.21.2" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77b34069fc0682e11b31dbd10321cbf94808394c56fd996796ce45217dfac53c" +checksum = "e655aad15e09b94ffdb3ce3d217acf652e26bbc37697ef012f5e5e348c716e5e" dependencies = [ "proc-macro2", "pyo3-macros-backend", @@ -1171,11 +1169,11 @@ dependencies = [ [[package]] name = "pyo3-macros-backend" -version = "0.21.2" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08260721f32db5e1a5beae69a55553f56b99bd0e1c3e6e0a5e8851a9d0f5a85c" +checksum = "ae1e3f09eecd94618f60a455a23def79f79eba4dc561a97324bf9ac8c6df30ce" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "pyo3-build-config", "quote", @@ -1383,12 +1381,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - [[package]] name = "scratch" version = "1.0.7" @@ -1448,12 +1440,6 @@ dependencies = [ "digest", ] -[[package]] -name = "smallvec" -version = "1.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" - [[package]] name = "spin" version = "0.9.8" @@ -1480,9 +1466,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" -version = "2.0.48" +version = "2.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "83540f837a8afc019423a8edb95b52a8effe46957ee402287f4292fae35be021" dependencies = [ "proc-macro2", "quote", @@ -1502,9 +1488,9 @@ dependencies = [ [[package]] name = "target-lexicon" -version = "0.12.13" +version = "0.12.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69758bda2e78f098e4ccb393021a0963bb3442eac05f135c30f61b7370bbafae" +checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" [[package]] name = "tempfile" @@ -1939,7 +1925,7 @@ dependencies = [ [[package]] name = "xtask" -version = "0.8.2" +version = "1.0.0-alpha1" dependencies = [ "anyhow", "clap", diff --git a/Cargo.toml b/Cargo.toml index 5e3d24451..9fecccb9d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,8 +21,8 @@ edition = "2021" keywords = ["high-energy-physics", "physics"] license = "GPL-3.0-or-later" repository = "https://github.com/NNPDF/pineappl" -rust-version = "1.70.0" -version = "0.8.2" +rust-version = "1.80.1" +version = "1.0.0-alpha1" [workspace.lints.clippy] all = { level = "warn", priority = -1 } @@ -44,3 +44,8 @@ unsafe-op-in-unsafe-fn = "deny" codegen-units = 1 lto = true strip = "debuginfo" + +[profile.test-opt] +inherits = "test" +opt-level = 1 +debug = false diff --git a/README.md b/README.md index ff54d9ba3..e18744739 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ [![codecov](https://codecov.io/gh/NNPDF/pineappl/branch/master/graph/badge.svg)](https://codecov.io/gh/NNPDF/pineappl) [![Documentation](https://docs.rs/pineappl/badge.svg)](https://docs.rs/pineappl) [![crates.io](https://img.shields.io/crates/v/pineappl.svg)](https://crates.io/crates/pineappl) -[![MSRV](https://img.shields.io/badge/Rust-1.70+-lightgray.svg)](docs/installation.md) +[![MSRV](https://img.shields.io/badge/Rust-1.80+-lightgray.svg)](docs/installation.md) # What is PineAPPL? diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 000000000..e117ecbe2 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,2 @@ +doc-valid-idents = ["APPLgrid", "PineAPPL", ".."] +too-many-arguments-threshold = 12 diff --git a/docs/installation.md b/docs/installation.md index a8c7c05ae..59d60bb38 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -251,7 +251,7 @@ already installed, make sure it is recent enough: cargo --version -This should show a version that is at least 1.70.0. If you do not have `cargo` +This should show a version that is at least 1.80.1. If you do not have `cargo` or if it is too old, go to and follow the instructions there. diff --git a/examples/.gitignore b/examples/.gitignore index 9a7a8727e..8173dfeb4 100644 --- a/examples/.gitignore +++ b/examples/.gitignore @@ -3,7 +3,9 @@ fortran/dyaa fortran/test +fortran/test_v1 fortran/lhapdf_example +fortran/lhapdf_example_v1 **/*.o **/*.mod diff --git a/examples/cpp/Makefile b/examples/cpp/Makefile index 61866d496..39498abd5 100644 --- a/examples/cpp/Makefile +++ b/examples/cpp/Makefile @@ -5,10 +5,12 @@ LHAPDF_DEPS != pkg-config --cflags --libs lhapdf PROGRAMS = \ fill-grid \ + fill-grid-v1 \ fill-custom-grid \ advanced-convolution \ advanced-filling \ convolve-grid \ + convolve-grid-v1 \ deprecated \ display-channels \ display-orders \ @@ -29,6 +31,9 @@ advanced-filling: advanced-filling.cpp convolve-grid: convolve-grid.cpp $(CXX) $(CXXFLAGS) $< $(LHAPDF_DEPS) $(PINEAPPL_DEPS) -o $@ +convolve-grid-v1: convolve-grid-v1.cpp + $(CXX) $(CXXFLAGS) $< $(LHAPDF_DEPS) $(PINEAPPL_DEPS) -o $@ + deprecated: deprecated.cpp $(CXX) $(CXXFLAGS) $< $(LHAPDF_DEPS) $(PINEAPPL_DEPS) -o $@ @@ -44,6 +49,9 @@ fill-custom-grid: fill-grid.cpp fill-grid: fill-grid.cpp $(CXX) $(CXXFLAGS) $< $(PINEAPPL_DEPS) -o $@ +fill-grid-v1: fill-grid-v1.cpp + $(CXX) $(CXXFLAGS) $< $(PINEAPPL_DEPS) -o $@ + merge-grids: merge-grids.cpp $(CXX) $(CXXFLAGS) $< $(PINEAPPL_DEPS) -o $@ diff --git a/examples/cpp/convolve-grid-v1.cpp b/examples/cpp/convolve-grid-v1.cpp new file mode 100644 index 000000000..6bb643f6b --- /dev/null +++ b/examples/cpp/convolve-grid-v1.cpp @@ -0,0 +1,124 @@ +//////////////////////////////////////////////////////////////////////////// +// Exactly the same as `convolve-grid.cpp` but using the generalized +// convolution: `pineappl_grid_convolve`. +// +// TODO: Make it such that it does not exactly copy `convolve-grid.cpp`, +// perhaps showing as an example something with 3 Convolutions. +//////////////////////////////////////////////////////////////////////////// +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +int main(int argc, char* argv[]) { + std::string filename = "drell-yan-rap-ll-v1.pineappl.lz4"; + std::string pdfset = "NNPDF31_nlo_as_0118_luxqed"; + + switch (argc) { + case 3: + pdfset = argv[2]; + // fall through + case 2: + filename = argv[1]; + case 1: + break; + + default: + std::cout << "Usage: " << argv[0] << " [grid] [pdf]\n"; + } + + // disable LHAPDF banners to guarantee deterministic output + LHAPDF::setVerbosity(0); + + // read the grid from a file + auto* grid = pineappl_grid_read(filename.c_str()); + + auto* pdf = LHAPDF::mkPDF(pdfset, 0); + + // define callables for the PDFs and alphas + auto xfx = [](int32_t id, double x, double q2, void* pdf) { + return static_cast (pdf)->xfxQ2(id, x, q2); + }; + auto alphas = [](double q2, void* pdf) { + return static_cast (pdf)->alphasQ2(q2); + }; + + // how many bins does this grid have? + std::size_t bins = pineappl_grid_bin_count(grid); + + // how many dimensions does each bin have? + std::size_t dims = pineappl_grid_bin_dimensions(grid); + + // allocate a vector holding the left and right bin limits for each dimension + std::vector bin_limits(2 * bins * dims); + + for (std::size_t dim = 0; dim != dims; ++dim) { + pineappl_grid_bin_limits_left(grid, dim, &bin_limits.at((2 * dim + 0) * bins)); + pineappl_grid_bin_limits_right(grid, dim, &bin_limits.at((2 * dim + 1) * bins)); + } + + // allocate a vector holding the differential cross sections + std::vector dxsec(bins); + + auto order_mask = nullptr; + auto channel_mask = nullptr; + double xir = 1.0; + double xif = 1.0; + + // perform the convolution of `grid` with the PDFs given as `xfx` and the strong coupling in + // `alphas` and the extra parameter `pdf`, which is passed to `xfx` and `alphas` as the last + // parameter. The integer `2212` is the PDG MC id for a proton and signals and `xfx` is the PDF + // of a proton. In this case we assume that both initial state hadrons' PDFs can derived from + // that of a proton. If this isn't the case, for instance for a proton-lead collision, both PDFs + // must be given separately and the function `pineappl_grid_convolve_with_two` must be used. + // The parameters `order_mask` and `channel_mask` can be used to select specific orders and + // channels, respectively. Using `xir` and `xif` the renormalization and factorization scales + // can be varied around its central values, respectively. + std::vector mu_scales = { xir, xif, 1.0 }; + using LambdaType = double(*)(int32_t, double, double, void *); + LambdaType xfxs[] = { xfx, xfx}; + pineappl_grid_convolve(grid, xfxs, alphas, pdf, order_mask, channel_mask, nullptr, 1, + mu_scales.data(), dxsec.data()); + + std::vector normalizations(bins); + + // read out the bin normalizations, which is usually the size of each bin + pineappl_grid_bin_normalizations(grid, normalizations.data()); + + // print table header + std::cout << "idx"; + for (std::size_t dim = 0; dim != dims; ++dim) { + std::cout << " left right"; + } + std::cout << " dsig/dx dx\n"; + std::cout << "---"; + for (std::size_t dim = 0; dim != dims; ++dim) { + std::cout << " ----dim #" << dim << "---"; + } + std::cout << " ------------ ------\n"; + + for (std::size_t bin = 0; bin != bins; ++bin) { + // print the bin index + std::cout << std::setw(3) << bin << ' '; + + for (std::size_t dim = 0; dim != dims; ++dim) { + double left_limit = bin_limits.at((2 * dim + 0) * bins + bin); + double right_limit = bin_limits.at((2 * dim + 1) * bins + bin); + + // print the left and right bin limit for each dimension + std::cout << std::setw(6) << left_limit << ' ' << std::setw(6) << right_limit << ' '; + } + + // print the result together with the normalization + std::cout << std::scientific << dxsec.at(bin) << std::defaultfloat << ' ' + << std::setw(6) << normalizations.at(bin) << '\n'; + } + + pineappl_grid_delete(grid); +} diff --git a/examples/cpp/fill-grid-v1.cpp b/examples/cpp/fill-grid-v1.cpp new file mode 100644 index 000000000..32cc11545 --- /dev/null +++ b/examples/cpp/fill-grid-v1.cpp @@ -0,0 +1,252 @@ +//////////////////////////////////////////////////////////////////////////// +// Exactly the same as `fill-grid.cpp` but using the generalization features +// introduced by v1. This in particular concerns the following functions: +// +// - pineappl_add_channel +// - pineappl_grid_new2 +// - pineappl_grid_fill2 +// +// TODO: Make it such that it does not exactly copy `fill-grid.cpp`, perhaps +// showing as an example something with 3 Convolutions. +//////////////////////////////////////////////////////////////////////////// +#include +#include + +#include +#include +#include +#include +#include +#include + +double int_photo(double s, double t, double u) { + double alpha0 = 1.0 / 137.03599911; + return alpha0 * alpha0 / 2.0 / s * (t / u + u / t); +} + +struct Psp2to2 { + double s; + double t; + double u; + double x1; + double x2; + double jacobian; +}; + +Psp2to2 hadronic_pspgen(std::mt19937& rng, double mmin, double mmax) { + using std::acos; + using std::log; + using std::pow; + + double smin = mmin * mmin; + double smax = mmax * mmax; + + double r1 = std::generate_canonical(rng); + double r2 = std::generate_canonical(rng); + double r3 = std::generate_canonical(rng); + + double tau0 = smin / smax; + double tau = pow(tau0, r1); + double y = pow(tau, 1.0 - r2); + double x1 = y; + double x2 = tau / y; + double s = tau * smax; + + double jacobian = tau * log(tau0) * log(tau0) * r1; + + // theta integration (in the CMS) + double cos_theta = 2.0 * r3 - 1.0; + jacobian *= 2.0; + + double t = -0.5 * s * (1.0 - cos_theta); + double u = -0.5 * s * (1.0 + cos_theta); + + // phi integration + jacobian *= 2.0 * acos(-1.0); + + return { s, t, u, x1, x2, jacobian }; +} + +void fill_grid(pineappl_grid* grid, std::size_t calls) { + using std::acosh; + using std::fabs; + using std::log; + using std::sqrt; + + auto rng = std::mt19937(); + + // in GeV^2 pbarn + double hbarc2 = 389379372.1; + + for (std::size_t i = 0; i != calls; ++i) { + // generate a phase-space point + auto tmp = hadronic_pspgen(rng, 10.0, 7000.0); + auto s = tmp.s; + auto t = tmp.t; + auto u = tmp.u; + auto x1 = tmp.x1; + auto x2 = tmp.x2; + auto jacobian = tmp.jacobian; + + double ptl = sqrt((t * u / s)); + double mll = sqrt(s); + double yll = 0.5 * log(x1 / x2); + double ylp = fabs(yll + acosh(0.5 * mll / ptl)); + double ylm = fabs(yll - acosh(0.5 * mll / ptl)); + + jacobian *= hbarc2 / calls; + + // cuts for LO for the invariant-mass slice containing the + // Z-peak from CMSDY2D11 + if ((ptl < 14.0) || (fabs(yll) > 2.4) || (ylp > 2.4) + || (ylm > 2.4) || (mll < 60.0) || (mll > 120.0)) + { + continue; + } + + auto weight = jacobian * int_photo(s, t, u); + double q2 = 90.0 * 90.0; + std::size_t order = 0; + std::size_t channel = 0; + + // Values of the kinematic variables + std::vector ntuples = {q2, x1, x2}; + + // fill the LO `weight` into `grid` for parton fractions `x1` and `x2`, and the (squared) + // renormalization/factorization scale `q2`. The parameters `order` and `channel` are + // indices defined from the arrays `orders` and `channel` used in creating the grid. In this + // case they are both `0` and denote the order #0 (leading order) and the channel #0 + // (photon-photon channel), respectively + pineappl_grid_fill2(grid, order, fabs(yll), channel, ntuples.data(), weight); + } +} + +int main() { + // --- + // Create all channels + + // this object will contain all channels (initial states) that we define + auto* channels = pineappl_lumi_new(); + + // Specify the dimension of the channel, ie the number of convolutions required + std::size_t nb_convolutions = 2; + + // photon-photon initial state, where `22` is the photon (PDG MC ids) + int32_t pids1[] = { 22, 22 }; + + // factor that each channel is multiplied with when convoluting with PDFs + double factors1[] = { 1.0 }; + + // define the channel #0 + pineappl_channels_add(channels, 1, nb_convolutions, pids1, factors1); + + // create another channel, which we won't fill, however + + // this channel is the down-type-antidown-type quark channel; here we combine down-antidown, + // strange-antistrange and bottom-antibottom into a single channel, which is often done if the + // CKM matrix is taken to be diagonal + int32_t pids2[] = { 1, -1, 3, -3, 5, -5 }; + + // for each pair of particle ids we need to give a factor; in case of a non-diagonal CKM matrix + // we could factor out the CKM matrix elements in this array and still treat the down-type + // contributions in a single channel. In this case, however, all factors are `1.0`, for which we + // can also pass `nullptr` + + // define the channel #1 + pineappl_channels_add(channels, 3, nb_convolutions, pids2, nullptr); + + // --- + // Specify the perturbative orders that will be filled into the grid + + // in this example we only fill the LO, which has the exponents + // - 0 of alphas, + // - 2 of alpha (electroweak coupling), + // - 0 of log (xiR^2) (renormalization scale logarithm) and + // - 0 of log (xiF^2) (factorization scale logarithm) + std::vector orders = { + 0, 2, 0, 0, 0, // order #0: LO + 1, 2, 0, 0, 0, // order #1: NLO QCD + 1, 2, 0, 1, 0 // order #2: NLO QCD factorization log + }; + + // --- + // Specify the bin limits + + // Similar to many Monte Carlo integrators PineAPPL supports only one-dimensional differential + // distributions, and only one distribution for each grid. However, one can generate multiple + // grids to support multiple distributions, and since every n-dimensional distribution can be + // written as a one-dimensional one (by using the bin index as a new binning variable, for + // instance), this isn't a limitation. + + // we bin the rapidity of the final-state lepton pair from 0 to 2.4 in steps of 0.1 + std::vector bins = { + 0.0, + 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, + 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4 + }; + + // --- + // Construct the objects that are needed to fill the Grid + + // First we define the types of convolutions required by the involved initial-/final-state + // hadrons. Then we add the corresponding PID of each of the hadrons, and finally define the + // Basis onto which the partons are mapped. + PidBasis pid_basis = Evol; + int32_t pdg_ids[2] = { 2212, 2212}; + ConvType h1 = UnpolPDF; + ConvType h2 = UnpolPDF; + ConvType convolution_types[2] = { h1, h2 }; + + // Define the kinematics required for this process. In the following example we have ONE + // single scale and two momentum fractions (corresponding to the two initial-state hadrons). + // The format of the kinematics is: { type, value }. + Kinematics scales = { Scale, 0 }; + Kinematics x1 = { X, 0 }; + Kinematics x2 = { X, 1 }; + Kinematics kinematics[3] = { scales, x1, x2 }; + + // Define the specificities of the interpolations for each of the kinematic variables. + ReweightMeth scales_reweight = NoReweight; // Reweighting method + ReweightMeth moment_reweight = ApplGridX; + Map scales_mapping = ApplGridH0; // Mapping method + Map moment_mapping = ApplGridF2; + InterpMeth interpolation_meth = Lagrange; + InterpTuples interpolations[3] = { + { 1e2, 1e8, 40, 3, scales_reweight, scales_mapping, interpolation_meth }, // Interpolation fo `scales` + { 2e-7, 1.0, 50, 3, moment_reweight, moment_mapping, interpolation_meth }, // Interpolation fo `x1` + { 2e-7, 1.0, 50, 3, moment_reweight, moment_mapping, interpolation_meth }, // Interpolation fo `x2` + }; + + // Define the unphysical scale objecs + size_t mu_scales[] = { 1, 1, 0 }; + + // --- + // Create the grid using the previously set information about orders, bins and channels + + // create a new grid with the previously defined channels, 3 perturbative orders defined by the + // exponents in `orders`, 24 bins given as the 25 limits in `bins` and potential extra + // parameters in `keyval`. + auto* grid = pineappl_grid_new2(pid_basis, channels, orders.size() / 5, orders.data(), bins.size() - 1, + bins.data(), nb_convolutions, convolution_types, pdg_ids, kinematics, interpolations, mu_scales); + + // now we no longer need `keyval` and `lumi` + pineappl_lumi_delete(channels); + + // --- + // Fill the grid with phase-space points + fill_grid(grid, 10000000); + + std::string filename = "drell-yan-rap-ll-v1.pineappl"; + + // --- + // Write the grid to disk - the filename can be anything ... + pineappl_grid_write(grid, filename.c_str()); + + // but if it has an `.lz4` suffix ... + filename.append(".lz4"); + // the grid is automatically LZ4 compressed + pineappl_grid_write(grid, filename.c_str()); + + // destroy the object + pineappl_grid_delete(grid); +} diff --git a/examples/cpp/output b/examples/cpp/output index dd0c7936b..2e742d59a 100644 --- a/examples/cpp/output +++ b/examples/cpp/output @@ -37,6 +37,32 @@ idx p-p c#0 l#0 p-p~ c#0 l# p-d c#0 l#0 p-d dx 22 1.967518e-02 1.967518e-02 1.967518e-02 1.967518e-02 0.1 23 5.565306e-03 5.565306e-03 5.565306e-03 5.565306e-03 0.1 idx left right dsig/dx dx +--- ----dim #0--- ------------ ------ + 0 0 0.1 5.263109e-01 0.1 + 1 0.1 0.2 5.254908e-01 0.1 + 2 0.2 0.3 5.246824e-01 0.1 + 3 0.3 0.4 5.188340e-01 0.1 + 4 0.4 0.5 5.175482e-01 0.1 + 5 0.5 0.6 5.008841e-01 0.1 + 6 0.6 0.7 4.905325e-01 0.1 + 7 0.7 0.8 4.675734e-01 0.1 + 8 0.8 0.9 4.393159e-01 0.1 + 9 0.9 1 3.992921e-01 0.1 + 10 1 1.1 3.706801e-01 0.1 + 11 1.1 1.2 3.264717e-01 0.1 + 12 1.2 1.3 2.849345e-01 0.1 + 13 1.3 1.4 2.486723e-01 0.1 + 14 1.4 1.5 2.110419e-01 0.1 + 15 1.5 1.6 1.797439e-01 0.1 + 16 1.6 1.7 1.471492e-01 0.1 + 17 1.7 1.8 1.205566e-01 0.1 + 18 1.8 1.9 9.491625e-02 0.1 + 19 1.9 2 7.255720e-02 0.1 + 20 2 2.1 5.056967e-02 0.1 + 21 2.1 2.2 3.491788e-02 0.1 + 22 2.2 2.3 1.967518e-02 0.1 + 23 2.3 2.4 5.565306e-03 0.1 +idx left right dsig/dx dx --- ----dim #0--- ------------ ------ 0 0 0.1 5.263109e-01 0.1 1 0.1 0.2 5.254908e-01 0.1 diff --git a/examples/fortran/Makefile b/examples/fortran/Makefile index c3765d2aa..bed36838a 100644 --- a/examples/fortran/Makefile +++ b/examples/fortran/Makefile @@ -6,12 +6,14 @@ LHAPDF_LIBS != pkg-config lhapdf --libs %.o: %.f90 $(FC) $(FFLAGS) -c $< -all: pineappl.o dyaa.o test.o +all: pineappl.o dyaa.o test.o test_v1.o $(FC) $(FFLAGS) dyaa.o pineappl.o $(PINEAPPL_LIBS) -o dyaa $(FC) $(FFLAGS) test.o pineappl.o $(PINEAPPL_LIBS) -o test + $(FC) $(FFLAGS) test_v1.o pineappl.o $(PINEAPPL_LIBS) -o test_v1 -lhapdf_example: pineappl.o lhapdf_example.o +lhapdf_examples: pineappl.o lhapdf_example.o lhapdf_example_v1.o $(FC) $(FFLAGS) lhapdf_example.o pineappl.o $(LHAPDF_LIBS) $(PINEAPPL_LIBS) -o lhapdf_example + $(FC) $(FFLAGS) lhapdf_example_v1.o pineappl.o $(LHAPDF_LIBS) $(PINEAPPL_LIBS) -o lhapdf_example_v1 clean: - rm -f *.o *.mod dyaa test lhapdf_example + rm -f *.o *.mod dyaa test test_v1 lhapdf_example lhapdf_example_v1 diff --git a/examples/fortran/lhapdf_example.f90 b/examples/fortran/lhapdf_example.f90 index 969fcc487..1c7061ebe 100644 --- a/examples/fortran/lhapdf_example.f90 +++ b/examples/fortran/lhapdf_example.f90 @@ -10,13 +10,13 @@ program lhapdf_example type(pineappl_lumi) :: lumi type(pineappl_keyval) :: key_vals - procedure (pineappl_xfx), pointer :: xfx - procedure (pineappl_alphas), pointer :: alphas + type(pineappl_xfx) :: xfx + type(pineappl_alphas) :: alphas integer, target :: flags(2) lumi = pineappl_lumi_new() - call pineappl_lumi_add(lumi, 2, [0, 0, 1, -1, 2, -2], [1.0_dp, 1.0_dp, 1.0_dp]) + call pineappl_lumi_add(lumi, 3, [0, 0, 1, -1, 2, -2], [1.0_dp, 1.0_dp, 1.0_dp]) key_vals = pineappl_keyval_new() grid = pineappl_grid_new(lumi, 1, [2, 0, 0, 0], 2, [0.0_dp, 1.0_dp, 2.0_dp], key_vals) @@ -28,19 +28,19 @@ program lhapdf_example call lhapdf_initpdfset_byname(1, "nCTEQ15FullNuc_208_82") ! calling pineappl_grid_convolve without any flags - xfx => xfx_test1 - alphas => alphas_test1 + xfx = pineappl_xfx(xfx_test1) + alphas = pineappl_alphas(alphas_test1) write(*, *) "first pineappl_grid_convolve_with_one: " write(*, *) pineappl_grid_convolve_with_one(grid, 2212, xfx, alphas, & - [.true., .true.], [.true., .true.], 1.0_dp, 1.0_dp) + [.true.], [.true.], 1.0_dp, 1.0_dp) ! calling pineappl_grid_convolve with two integer flags that are used in xfx_test2 and alphas_test2 to determine the set and member indices - xfx => xfx_test2 - alphas => alphas_test2 + xfx = pineappl_xfx(xfx_test2) + alphas = pineappl_alphas(alphas_test2) flags = [1, 0] write(*, *) "second pineappl_grid_convolve_with_one: " write(*, *) pineappl_grid_convolve_with_one(grid, 2212, xfx, alphas, & - [.true., .true.], [.true., .true.], 1.0_dp, 1.0_dp, c_loc(flags(1))) + [.true.], [.true.], 1.0_dp, 1.0_dp, c_loc(flags(1))) contains ! Passing a Fortran procedure to C needs the iso_c_binding @@ -99,7 +99,7 @@ function alphas_test2(q2, state) bind(c) call c_f_pointer(state, flags, [2]) - call lhapdf_alphasq2(0, 0, q2, alphas_test2) + call lhapdf_alphasq2(flags(1), flags(2), q2, alphas_test2) end function end program lhapdf_example diff --git a/examples/fortran/lhapdf_example_v1.f90 b/examples/fortran/lhapdf_example_v1.f90 new file mode 100644 index 000000000..a61f74e4f --- /dev/null +++ b/examples/fortran/lhapdf_example_v1.f90 @@ -0,0 +1,132 @@ +program lhapdf_example + use iso_c_binding + use pineappl + + implicit none + + integer, parameter :: dp = kind(0.0d0) + + type(pineappl_grid) :: grid + type(pineappl_lumi) :: channels + type(pineappl_kinematics) :: kinematics(3) + type(pineappl_interp_tuples) :: interpolations(3) + + type(pineappl_xfx) :: xfx(2) + type(pineappl_alphas) :: alphas + + integer(kind(pineappl_reweight_meth)) :: q2_reweight + integer(kind(pineappl_reweight_meth)) :: x_reweight + integer(kind(pineappl_map)) :: q2_mapping + integer(kind(pineappl_map)) :: x_mapping + integer(kind(pineappl_interp_meth)) :: interpolation_meth + + integer, target :: flags(2) + + channels = pineappl_channels_new() + call pineappl_channels_add(channels, 3, 2, [0, 0, 1, -1, 2, -2], [1.0_dp, 1.0_dp, 1.0_dp]) + + kinematics = [& + pineappl_kinematics(pineappl_scale, 0), & + pineappl_kinematics(pineappl_x, 0), & + pineappl_kinematics(pineappl_x, 1) & + ] + + q2_reweight = pineappl_no_reweight + x_reweight = pineappl_applgrid_x + q2_mapping = pineappl_applgrid_h0 + x_mapping = pineappl_applgrid_f2 + interpolation_meth = pineappl_lagrange + interpolations = [ & + pineappl_interp_tuples(1e2_dp, 1e8_dp, 40, 3, q2_reweight, q2_mapping, interpolation_meth), & + pineappl_interp_tuples(2e-7_dp, 1.0_dp, 50, 3, x_reweight, x_mapping, interpolation_meth), & + pineappl_interp_tuples(2e-7_dp, 1.0_dp, 50, 3, x_reweight, x_mapping, interpolation_meth) & + ] + + grid = pineappl_grid_new2(pineappl_pdg, channels, 1, [2_1, 0_1, 0_1, 0_1, 0_1], 2, & + [0.0_dp, 1.0_dp, 2.0_dp], 2, [pineappl_unpol_pdf, pineappl_unpol_pdf], [2212, 2212], kinematics, interpolations, [1, 1, 0]) + + call pineappl_grid_fill_all2(grid, 0, 0.5_dp, [100.0_dp, 0.5_dp, 0.5_dp], [0.5_dp, 0.5_dp, 0.5_dp]) + call pineappl_grid_fill_all2(grid, 0, 1.5_dp, [100.0_dp, 0.5_dp, 0.5_dp], [1.5_dp, 1.5_dp, 1.5_dp]) + + call lhapdf_initpdfset_byname(0, "nCTEQ15_1_1") + ! call lhapdf_initpdfset_byname(0, "nCTEQ15FullNuc_208_82") + call lhapdf_initpdfset_byname(1, "nCTEQ15FullNuc_208_82") + + ! write(*, *) "xfx_test1: ", xfx_test1(0, 0.5_dp, 100.0_dp, c_null_ptr) + + ! calling pineappl_grid_convolve without any flags + xfx = pineappl_xfx(xfx_test1) + alphas = pineappl_alphas(alphas_test1) + write(*, *) "first pineappl_grid_convolve: " + write(*, *) pineappl_grid_convolve(grid, [xfx, xfx], alphas, & + [.true.], [.true.], [0, 1], 1, [1.0_dp, 1.0_dp, 1.0_dp]) + + ! calling pineappl_grid_convolve with two integer flags that are used in xfx_test2 and alphas_test2 to determine the set and member indices + xfx = pineappl_xfx(xfx_test2) + alphas = pineappl_alphas(alphas_test2) + flags = [1, 0] + write(*, *) "second pineappl_grid_convolve: " + write(*, *) pineappl_grid_convolve(grid, [xfx, xfx], alphas, & + [.true.], [.true.], [0, 1], 1, [1.0_dp, 1.0_dp, 1.0_dp], c_loc(flags(1))) +contains + + ! Passing a Fortran procedure to C needs the iso_c_binding + function xfx_test1(pdg_id, x, q2, state) bind(c) + use iso_c_binding + + implicit none + + integer(c_int32_t), value, intent(in) :: pdg_id + real(c_double), value, intent(in) :: x, q2 + type(c_ptr), value, intent(in) :: state + real(c_double) :: xfx_test1 + + call lhapdf_xfxq2(0, 0, pdg_id, x, q2, xfx_test1) + end function + + function xfx_test2(pdg_id, x, q2, state) bind(c) + use iso_c_binding + + implicit none + + integer(c_int32_t), value, intent(in) :: pdg_id + real(c_double), value, intent(in) :: x, q2 + type(c_ptr), value, intent(in) :: state + real(c_double) :: xfx_test2 + + integer, pointer :: flags(:) + + call c_f_pointer(state, flags, [2]) + + call lhapdf_xfxq2(flags(1), flags(2), pdg_id, x, q2, xfx_test2) + end function + + function alphas_test1(q2, state) bind(c) + use iso_c_binding + + implicit none + + real(c_double), value, intent(in) :: q2 + type(c_ptr), value, intent(in) :: state + real(c_double) :: alphas_test1 + + call lhapdf_alphasq2(0, 0, q2, alphas_test1) + end function + + function alphas_test2(q2, state) bind(c) + use iso_c_binding + + implicit none + + real(c_double), value, intent(in) :: q2 + type(c_ptr), value, intent(in) :: state + real(c_double) :: alphas_test2 + + integer, pointer :: flags(:) + + call c_f_pointer(state, flags, [2]) + + call lhapdf_alphasq2(flags(1), flags(2), q2, alphas_test2) + end function + +end program lhapdf_example diff --git a/examples/fortran/pineappl.f90 b/examples/fortran/pineappl.f90 index 470ec23c3..cdd881750 100644 --- a/examples/fortran/pineappl.f90 +++ b/examples/fortran/pineappl.f90 @@ -1,5 +1,6 @@ module pineappl - use iso_c_binding, only: c_null_ptr, c_ptr + use iso_c_binding + use iso_fortran_env implicit none @@ -17,8 +18,78 @@ module pineappl type (c_ptr) :: ptr = c_null_ptr end type + + ! As a workaround for typing Fortran enums, we define the name of the enum as the last enum value. This way, variables can be declared as, e.g. for pineappl_conv_type, integer(kind(pineappl_conv_type)). The compiler doesn't check that a value is from the right enum, but it clarifies the code for the user. + + enum, bind(c) ! :: pineappl_conv_type + enumerator :: pineappl_unpol_pdf + enumerator :: pineappl_pol_pdf + enumerator :: pineappl_unpol_ff + enumerator :: pineappl_pol_ff + + enumerator :: pineappl_conv_type + end enum + + enum, bind(c) ! :: pineappl_interp_meth + enumerator :: pineappl_lagrange + + enumerator :: pineappl_interp_meth + end enum + + enum, bind(c) ! :: pineappl_map + enumerator :: pineappl_applgrid_f2 + enumerator :: pineappl_applgrid_h0 + + enumerator :: pineappl_map + end enum + + enum, bind(c) ! :: pineappl_pid_basis + enumerator :: pineappl_pdg + enumerator :: pineappl_evol + + enumerator :: pineappl_pid_basis + end enum + + enum, bind(c) ! :: pineappl_reweight_meth + enumerator :: pineappl_applgrid_x + enumerator :: pineappl_no_reweight + + enumerator :: pineappl_reweight_meth + end enum + + enum, bind(c) ! :: pineappl_kinematics_tag + enumerator :: pineappl_scale + enumerator :: pineappl_x + + enumerator :: pineappl_kinematics_tag + end enum + + ! The Kinematics struct is a tuple-like struct in the Pineappl Rust code, which is realized as a C union. Fortran does not support unions, but fortunately the union is only for storing ints, so we just use an integer variable for `index` + type, bind(c) :: pineappl_kinematics + integer(kind(pineappl_kinematics_tag)) :: tag + integer(c_size_t) :: index + end type + + type, bind(c) :: pineappl_interp_tuples + real(c_double) :: node_min + real(c_double) :: node_max + integer(c_size_t) :: nb_nodes + integer(c_size_t) :: interp_degree + integer(kind(pineappl_reweight_meth)) :: reweighting_method + integer(kind(pineappl_map)) :: mapping + integer(kind(pineappl_interp_meth)) :: interpolation_method + end type + + type :: pineappl_xfx + procedure (pineappl_xfx_proc), pointer, nopass :: proc + end type + + type :: pineappl_alphas + procedure (pineappl_alphas_proc), pointer, nopass :: proc + end type + abstract interface - function pineappl_xfx(pdg_id, x, q2, state) bind(c) + function pineappl_xfx_proc(pdg_id, x, q2, state) bind(c) use iso_c_binding implicit none @@ -26,17 +97,17 @@ function pineappl_xfx(pdg_id, x, q2, state) bind(c) integer(c_int32_t), value, intent(in) :: pdg_id real(c_double), value, intent(in) :: x, q2 type (c_ptr), value, intent(in) :: state - real(c_double) :: pineappl_xfx + real(c_double) :: pineappl_xfx_proc end function - function pineappl_alphas(q2, state) bind(c) + function pineappl_alphas_proc(q2, state) bind(c) use iso_c_binding implicit none real(c_double), value, intent(in) :: q2 type (c_ptr), value, intent(in) :: state - real(c_double) :: pineappl_alphas + real(c_double) :: pineappl_alphas_proc end function end interface @@ -50,6 +121,20 @@ function strlen(s) bind(c, name="strlen") integer (c_size_t) :: strlen end function strlen + subroutine channels_add(lumi, combinations, nb_combinations, pdg_id_combinations, factors) & + bind(c, name = 'pineappl_channels_add') + + use iso_c_binding + type (c_ptr), value :: lumi + integer (c_size_t), value :: combinations, nb_combinations + integer (c_int32_t) :: pdg_id_combinations(*) + real (c_double) :: factors(*) + end subroutine + + type (c_ptr) function channels_new() bind(c, name = 'pineappl_channels_new') + use iso_c_binding + end function + integer (c_size_t) function grid_bin_count(grid) bind(c, name = 'pineappl_grid_bin_count') use iso_c_binding type (c_ptr), value :: grid @@ -85,6 +170,20 @@ type (c_ptr) function grid_clone(grid) bind(c, name = 'pineappl_grid_clone') type (c_ptr), value :: grid end function + subroutine grid_convolve(grid, xfxs, alphas, state, order_mask, channel_mask, & + bin_indices, nb_scales, mu_scales, results) & + bind(c, name = 'pineappl_grid_convolve') + + use iso_c_binding + type (c_ptr), value :: grid, state + type (c_funptr) :: xfxs(*) + type (c_funptr), value :: alphas + logical (c_bool) :: order_mask(*), channel_mask(*) + integer (c_size_t) :: bin_indices(*) + integer (c_size_t), value :: nb_scales + real (c_double) :: mu_scales(*), results(*) + end subroutine + subroutine grid_convolve_with_one(grid, pdg_id, xfx, alphas, state, order_mask, lumi_mask, xi_ren, xi_fac, results) & bind(c, name = 'pineappl_grid_convolve_with_one') use iso_c_binding @@ -125,6 +224,14 @@ subroutine grid_fill(grid, x1, x2, q2, order, observable, lumi, weight) bind(c, integer (c_size_t), value :: order, lumi end subroutine + subroutine grid_fill2(grid, order, observable, channel, ntuple, weight) bind(c, name = 'pineappl_grid_fill2') + use iso_c_binding + type (c_ptr), value :: grid + integer (c_size_t), value :: order, channel + real (c_double), value :: observable, weight + real (c_double) :: ntuple(*) + end subroutine + subroutine grid_fill_all(grid, x1, x2, q2, order, observable, weights) bind(c, name = 'pineappl_grid_fill_all') use iso_c_binding type (c_ptr), value :: grid @@ -133,6 +240,15 @@ subroutine grid_fill_all(grid, x1, x2, q2, order, observable, weights) bind(c, n integer (c_size_t), value :: order end subroutine + subroutine grid_fill_all2(grid, order, observable, ntuple, weights) bind(c, name = 'pineappl_grid_fill_all2') + use iso_c_binding + type (c_ptr), value :: grid + integer (c_size_t), value :: order + real (c_double), value :: observable + real (c_double) :: ntuple(*) + real (c_double) :: weights(*) + end subroutine + subroutine grid_fill_array(grid, x1, x2, q2, orders, observables, lumis, weights, size) & bind(c, name = 'pineappl_grid_fill_array') use iso_c_binding @@ -142,6 +258,15 @@ subroutine grid_fill_array(grid, x1, x2, q2, orders, observables, lumis, weights integer (c_size_t), value :: size end subroutine + subroutine grid_fill_array2(grid, orders, observables, ntuples, lumis, weights, size) & + bind(c, name = 'pineappl_grid_fill_array2') + use iso_c_binding + type (c_ptr), value :: grid + integer (c_size_t) :: orders(*), lumis(*) + real (c_double) :: observables(*), ntuples(*), weights(*) + integer (c_size_t), value :: size + end subroutine + function grid_key_value(grid, key) bind(c, name = 'pineappl_grid_key_value') use iso_c_binding type (c_ptr), value :: grid @@ -175,6 +300,23 @@ type (c_ptr) function grid_new(lumi, orders, order_params, bins, bin_limits, key real (c_double) :: bin_limits(*) end function + type (c_ptr) function grid_new2(pid_basis, channels, orders, order_params, bins, bin_limits, nb_convolutions, & + convolution_types, pdg_ids, kinematics, interpolations, mu_scales) bind(c, name = 'pineappl_grid_new2') + use iso_c_binding + import ! so we can use pineappl_kinematics and pineappl_interp_tuples + + integer (c_int32_t), value :: pid_basis + type (c_ptr), value :: channels + integer (c_int32_t) :: convolution_types(*) + integer (c_size_t), value :: orders, bins, nb_convolutions + integer (c_int8_t) :: order_params(*) + real (c_double) :: bin_limits(*) + integer (c_int32_t) :: pdg_ids(*) + type (pineappl_kinematics) :: kinematics(*) + type (pineappl_interp_tuples) :: interpolations(*) + integer (c_size_t) :: mu_scales(*) + end function + subroutine grid_optimize(grid) bind(c, name = 'pineappl_grid_optimize') use iso_c_binding type (c_ptr), value :: grid @@ -339,6 +481,14 @@ subroutine lumi_entry(lumi, entry, pdg_ids, factors) bind(c, name = 'pineappl_lu real (c_double) :: factors(*) end subroutine + subroutine channels_entry(lumi, entry, pdg_ids, factors) bind(c, name = 'pineappl_channels_entry') + use iso_c_binding + type (c_ptr), value :: lumi + integer (c_size_t), value :: entry + integer (c_int32_t) :: pdg_ids(*) + real (c_double) :: factors(*) + end subroutine + type (c_ptr) function lumi_new() bind(c, name = 'pineappl_lumi_new') use iso_c_binding end function @@ -374,6 +524,12 @@ function c_f_string(c_str) result(f_str) end do end function + type (pineappl_lumi) function pineappl_channels_new() + implicit none + + pineappl_channels_new = pineappl_lumi(channels_new()) + end function + integer function pineappl_grid_bin_count(grid) implicit none @@ -444,9 +600,8 @@ function pineappl_grid_convolve_with_one(grid, pdg_id, xfx, alphas, order_mask, type (pineappl_grid), intent(in) :: grid integer, intent(in) :: pdg_id - ! no pointer attribute here, see https://community.intel.com/t5/Intel-Fortran-Compiler/Segfault-when-passing-procedure-pointer-to-function-but-not-when/m-p/939797 - procedure (pineappl_xfx) :: xfx - procedure (pineappl_alphas) :: alphas + type (pineappl_xfx) :: xfx + type (pineappl_alphas) :: alphas logical, intent(in) :: order_mask(:), lumi_mask(:) real (dp), intent(in) :: xi_ren, xi_fac real (dp), allocatable :: res(:) @@ -456,10 +611,10 @@ function pineappl_grid_convolve_with_one(grid, pdg_id, xfx, alphas, order_mask, allocate(res(pineappl_grid_bin_count(grid))) - if (.not. c_associated(c_funloc(xfx))) then + if (.not. c_associated(c_funloc(xfx%proc))) then error stop "xfx is null" end if - if (.not. c_associated(c_funloc(alphas))) then + if (.not. c_associated(c_funloc(alphas%proc))) then error stop "alphas is null" end if @@ -469,7 +624,7 @@ function pineappl_grid_convolve_with_one(grid, pdg_id, xfx, alphas, order_mask, state_ = c_null_ptr end if - call grid_convolve_with_one(grid%ptr, pdg_id, c_funloc(xfx), c_funloc(alphas), state_, & + call grid_convolve_with_one(grid%ptr, pdg_id, c_funloc(xfx%proc), c_funloc(alphas%proc), state_, & [(logical(order_mask(i), c_bool), i = 1, size(order_mask))], & [(logical(lumi_mask(i), c_bool), i = 1, size(lumi_mask))], & xi_ren, xi_fac, res) @@ -483,8 +638,8 @@ function pineappl_grid_convolve_with_two(grid, pdg_id1, xfx1, pdg_id2, xfx2, alp type (pineappl_grid), intent(in) :: grid integer, intent(in) :: pdg_id1, pdg_id2 - procedure (pineappl_xfx) :: xfx1, xfx2 - procedure (pineappl_alphas) :: alphas + type (pineappl_xfx) :: xfx1, xfx2 + type (pineappl_alphas) :: alphas logical, intent(in) :: order_mask(:), lumi_mask(:) real (dp), intent(in) :: xi_ren, xi_fac real (dp), allocatable :: res(:) @@ -494,13 +649,13 @@ function pineappl_grid_convolve_with_two(grid, pdg_id1, xfx1, pdg_id2, xfx2, alp allocate(res(pineappl_grid_bin_count(grid))) - if (.not. c_associated(c_funloc(xfx1))) then + if (.not. c_associated(c_funloc(xfx1%proc))) then error stop "xfx1 is null" end if - if (.not. c_associated(c_funloc(xfx2))) then + if (.not. c_associated(c_funloc(xfx2%proc))) then error stop "xfx1 is null" end if - if (.not. c_associated(c_funloc(alphas))) then + if (.not. c_associated(c_funloc(alphas%proc))) then error stop "alphas is null" end if @@ -510,12 +665,63 @@ function pineappl_grid_convolve_with_two(grid, pdg_id1, xfx1, pdg_id2, xfx2, alp state_ = c_null_ptr end if - call grid_convolve_with_two(grid%ptr, pdg_id1, c_funloc(xfx1), pdg_id2, c_funloc(xfx2), c_funloc(alphas), state_, & - [(logical(order_mask(i), c_bool), i = 1, size(order_mask))], & + call grid_convolve_with_two(grid%ptr, pdg_id1, c_funloc(xfx1%proc), pdg_id2, c_funloc(xfx2%proc), & + c_funloc(alphas%proc), state_, [(logical(order_mask(i), c_bool), i = 1, size(order_mask))], & [(logical(lumi_mask(i), c_bool), i = 1, size(lumi_mask))], & xi_ren, xi_fac, res) end function + function pineappl_grid_convolve(grid, xfxs, alphas, order_mask, channel_mask, bin_indices, & + nb_scales, mu_scales, state) result(res) + + use iso_c_binding + + implicit none + + type (pineappl_grid), intent(in) :: grid + type (pineappl_xfx) :: xfxs(:) + type (pineappl_alphas) :: alphas + logical, intent(in) :: order_mask(:), channel_mask(:) + integer, intent(in) :: bin_indices(:), nb_scales + real (dp), intent(in) :: mu_scales(:) + type (c_ptr), optional, intent(in) :: state + real (dp), allocatable :: res(:) + + integer :: i + type (c_ptr) :: state_ + + allocate(res(size(bin_indices))) + + do i = 1, size(xfxs) + if (.not. c_associated(c_funloc(xfxs(i)%proc))) then + error stop "at least one proc is null in xfxs" + end if + end do + if (.not. c_associated(c_funloc(alphas%proc))) then + error stop "alphas%proc is null" + end if + + if (present(state)) then + state_ = state + else + state_ = c_null_ptr + end if + + call grid_convolve( & + grid%ptr, & + [(c_funloc(xfxs(i)%proc), i = 1, size(xfxs))], & + c_funloc(alphas%proc), & + state_, & + [(logical(order_mask(i), c_bool), i = 1, size(order_mask))], & + [(logical(channel_mask(i), c_bool), i = 1, size(channel_mask))], & + [(int(bin_indices, c_size_t), i = 1, size(bin_indices))], & + int(nb_scales, c_size_t), & + mu_scales, & + res & + ) + + end function + subroutine pineappl_grid_delete(grid) implicit none @@ -536,6 +742,18 @@ subroutine pineappl_grid_fill(grid, x1, x2, q2, order, observable, lumi, weight) call grid_fill(grid%ptr, x1, x2, q2, int(order, c_size_t), observable, int(lumi, c_size_t), weight) end subroutine + subroutine pineappl_grid_fill2(grid, order, observable, channel, ntuple, weight) + use iso_c_binding + + implicit none + + type (pineappl_grid), intent(in) :: grid + real (dp), intent(in) :: observable, ntuple(*), weight + integer, intent(in) :: order, channel + + call grid_fill2(grid%ptr, int(order, c_size_t), observable, int(channel, c_size_t), ntuple, weight) + end subroutine + subroutine pineappl_grid_fill_all(grid, x1, x2, q2, order, observable, weights) use iso_c_binding @@ -548,6 +766,19 @@ subroutine pineappl_grid_fill_all(grid, x1, x2, q2, order, observable, weights) call grid_fill_all(grid%ptr, x1, x2, q2, int(order, c_size_t), observable, weights) end subroutine + subroutine pineappl_grid_fill_all2(grid, order, observable, ntuple, weights) + use iso_c_binding + + implicit none + + type (pineappl_grid), intent(in) :: grid + integer, intent(in) :: order + real (dp), intent(in) :: observable, weights(*) + real (dp), dimension(*), intent(in) :: ntuple + + call grid_fill_all2(grid%ptr, int(order, c_size_t), observable, ntuple, weights) + end subroutine + subroutine pineappl_grid_fill_array(grid, x1, x2, q2, orders, observables, lumis, weights) use iso_c_binding @@ -562,6 +793,20 @@ subroutine pineappl_grid_fill_array(grid, x1, x2, q2, orders, observables, lumis observables, [(int(lumis(i), c_size_t), i = 1, size(lumis))], weights, int(size(orders), c_size_t)) end subroutine + subroutine pineappl_grid_fill_array2(grid, orders, observables, ntuples, lumis, weights) + use iso_c_binding + + implicit none + + type (pineappl_grid), intent(in) :: grid + real (dp), intent(in) :: observables(*), ntuples(*), weights(*) + integer, intent(in) :: orders(:), lumis(:) + integer (c_size_t) :: i + + call grid_fill_array2(grid%ptr, [(int(orders(i), c_size_t), i = 1, size(orders))], & + observables, ntuples, [(int(lumis(i), c_size_t), i = 1, size(lumis))], weights, int(size(orders), c_size_t)) + end subroutine + function pineappl_grid_key_value(grid, key) result(res) use iso_c_binding @@ -617,6 +862,40 @@ type (pineappl_grid) function pineappl_grid_new(lumi, orders, order_params, bins order_params, int(bins, c_size_t), bin_limits, key_vals%ptr)) end function + type (pineappl_grid) function pineappl_grid_new2(pid_basis, channels, orders, order_params, & + bins, bin_limits, nb_convolutions, convolution_types, pdg_ids, kinematics, & + interpolations, mu_scales) + implicit none + + integer(kind(pineappl_pid_basis)), intent(in) :: pid_basis + type (pineappl_lumi), intent(in) :: channels + integer, intent(in) :: orders, bins, nb_convolutions + integer(int8), dimension(5 * orders), intent(in) :: order_params + real (dp), dimension(bins + 1), intent(in) :: bin_limits + integer(kind(pineappl_conv_type)), dimension(nb_convolutions), intent(in) :: convolution_types + integer, dimension(nb_convolutions), intent(in) :: pdg_ids + type (pineappl_kinematics), dimension(nb_convolutions + 1), intent(in), target :: kinematics + type (pineappl_interp_tuples), dimension(nb_convolutions + 1), intent(in) :: interpolations + integer, dimension(3) :: mu_scales + + integer :: i + + pineappl_grid_new2 = pineappl_grid(grid_new2(& + pid_basis, & + channels%ptr, & + int(orders, c_size_t), & + order_params, & + int(bins, c_size_t), & + bin_limits, & + int(nb_convolutions, c_size_t), & + convolution_types, & + pdg_ids, & + kinematics, & + interpolations, & + [(int(mu_scales(i), c_size_t), i = 1, size(mu_scales))]) & + ) + end function + subroutine pineappl_grid_optimize(grid) implicit none @@ -856,6 +1135,19 @@ subroutine pineappl_lumi_add(lumi, combinations, pdg_id_pairs, factors) call lumi_add(lumi%ptr, int(combinations, c_size_t), pdg_id_pairs, factors) end subroutine + subroutine pineappl_channels_add(channels, combinations, nb_combinations, pdg_id_combinations, factors) + use iso_c_binding + + implicit none + + type (pineappl_lumi), intent(in) :: channels + integer, intent(in) :: combinations, nb_combinations + integer, dimension(2 * combinations), intent(in) :: pdg_id_combinations + real (dp), dimension(combinations), intent(in) :: factors + + call channels_add(channels%ptr, int(combinations, c_size_t), int(nb_combinations, c_size_t), pdg_id_combinations, factors) + end subroutine + integer function pineappl_lumi_combinations(lumi, entry) use iso_c_binding @@ -898,6 +1190,19 @@ subroutine pineappl_lumi_entry(lumi, entry, pdg_ids, factors) call lumi_entry(lumi%ptr, int(entry, c_size_t), pdg_ids, factors) end subroutine + subroutine pineappl_channels_entry(lumi, entry, pdg_ids, factors) + use iso_c_binding + + implicit none + + type (pineappl_lumi), intent(in) :: lumi + integer, intent(in) :: entry + integer, intent(out) :: pdg_ids(*) + real (dp), intent(out) :: factors(*) + + call channels_entry(lumi%ptr, int(entry, c_size_t), pdg_ids, factors) + end subroutine + type (pineappl_lumi) function pineappl_lumi_new() implicit none diff --git a/examples/fortran/test.f90 b/examples/fortran/test.f90 index be7ad1426..37bd19fbd 100644 --- a/examples/fortran/test.f90 +++ b/examples/fortran/test.f90 @@ -14,8 +14,8 @@ program test_pineappl character(len=:), allocatable :: string - procedure (pineappl_xfx), pointer :: xfx1, xfx2 - procedure (pineappl_alphas), pointer :: alphas + type(pineappl_xfx) :: xfx1, xfx2 + type(pineappl_alphas) :: alphas lumi = pineappl_lumi_new() call pineappl_lumi_add(lumi, 2, [0, 0, 1, -1], [1.0_dp, 1.0_dp]) @@ -72,6 +72,16 @@ program test_pineappl lumi2 = pineappl_grid_lumi(grid) + if (pineappl_lumi_count(lumi2) /= 1) then + write(*, *) "pineappl_lumi_count(): ", pineappl_lumi_count(lumi2) + error stop "error: pineappl_lumi_count" + end if + + if (pineappl_lumi_combinations(lumi2, 0) /= 2) then + write(*, *) "pineappl_lumi_combinations(): ", pineappl_lumi_combinations(lumi2, 0) + error stop "error: pineappl_lumi_combinations" + end if + grid2 = pineappl_grid_new(lumi, 1, [2, 0, 0, 0], 1, [2.0_dp, 3.0_dp], key_vals) call pineappl_grid_merge_and_delete(grid, grid2) @@ -132,9 +142,9 @@ program test_pineappl error stop "error: pineappl_keyval_string" end if - xfx1 => xfx1_test - xfx2 => xfx2_test - alphas => alphas_test + xfx1 = pineappl_xfx(xfx1_test) + xfx2 = pineappl_xfx(xfx2_test) + alphas = pineappl_alphas(alphas_test) result = pineappl_grid_convolve_with_one(grid, 2212, xfx1, alphas, & [.true., .true.], [.true., .true.], 1.0_dp, 1.0_dp) diff --git a/examples/fortran/test_v1.f90 b/examples/fortran/test_v1.f90 new file mode 100644 index 000000000..2e93036d8 --- /dev/null +++ b/examples/fortran/test_v1.f90 @@ -0,0 +1,227 @@ +program test_pineappl + use pineappl + use iso_c_binding + + implicit none + + integer, parameter :: dp = kind(0.0d0) + + type(pineappl_lumi) :: channels, channels2 + type(pineappl_grid) :: grid, grid2 + type(pineappl_kinematics) :: kinematics(3) + type(pineappl_interp_tuples) :: interpolations(3) + + real(dp), allocatable :: result(:), bin_limits_left(:), bin_limits_right(:), bin_normalizations(:) + + integer(kind(pineappl_reweight_meth)) :: q2_reweight + integer(kind(pineappl_reweight_meth)) :: x_reweight + integer(kind(pineappl_map)) :: q2_mapping + integer(kind(pineappl_map)) :: x_mapping + integer(kind(pineappl_interp_meth)) :: interpolation_meth + + type (pineappl_xfx) :: xfx1, xfx2 + type (pineappl_alphas) :: alphas + + channels = pineappl_channels_new() + call pineappl_channels_add(channels, 3, 2, [0, 0, 1, -1, 2, -2], [1.0_dp, 1.0_dp, 1.0_dp]) + + if (pineappl_lumi_count(channels) /= 1) then + write(*, *) "pineappl_lumi_count(): ", pineappl_lumi_count(channels) + error stop "error: pineappl_lumi_count" + end if + + if (pineappl_lumi_combinations(channels, 0) /= 3) then + write(*, *) "pineappl_lumi_combinations(): ", pineappl_lumi_combinations(channels, 0) + error stop "error: pineappl_lumi_combinations" + end if + + kinematics = [& + pineappl_kinematics(pineappl_scale, 0), & + pineappl_kinematics(pineappl_x, 0), & + pineappl_kinematics(pineappl_x, 1) & + ] + + q2_reweight = pineappl_no_reweight + x_reweight = pineappl_applgrid_x + q2_mapping = pineappl_applgrid_h0 + x_mapping = pineappl_applgrid_f2 + interpolation_meth = pineappl_lagrange + interpolations = [ & + pineappl_interp_tuples(1e2_dp, 1e8_dp, 40, 3, q2_reweight, q2_mapping, interpolation_meth), & + pineappl_interp_tuples(2e-7_dp, 1.0_dp, 50, 3, x_reweight, x_mapping, interpolation_meth), & + pineappl_interp_tuples(2e-7_dp, 1.0_dp, 50, 3, x_reweight, x_mapping, interpolation_meth) & + ] + grid = pineappl_grid_new2(pineappl_pdg, channels, 1, [2_1, 0_1, 0_1, 0_1, 0_1], 2, [0.0_dp, 1.0_dp, 2.0_dp], & + 2, [pineappl_unpol_pdf, pineappl_unpol_pdf], [2212, 2212], kinematics, interpolations, [1, 1, 0]) + + if (pineappl_grid_order_count(grid) /= 1) then + write(*, *) "pineappl_grid_order_count(): ", pineappl_grid_order_count(grid) + error stop "error: pineappl_grid_order_count" + end if + + if (any(pineappl_grid_order_params(grid) /= [2, 0, 0, 0])) then + write(*, *) "pineappl_grid_order_params(): ", pineappl_grid_order_params(grid) + error stop "error: pineappl_grid_order_params" + end if + + call pineappl_grid_fill2(grid, 0, 0.5_dp, 0, [100.0_dp, 0.5_dp, 0.5_dp], 14.0_dp) + call pineappl_grid_fill_all2(grid, 0, 0.5_dp, [100.0_dp, 0.5_dp, 0.5_dp], [15.0_dp, 16.0_dp]) + call pineappl_grid_fill_array2(grid, [0, 0], [1.5_dp, 1.5_dp], & + [100.0_dp, 0.4_dp, 0.6_dp, 110.0_dp, 0.6_dp, 0.4_dp], [0, 0], [20.0_dp, 21.0_dp]) + + if (pineappl_grid_bin_count(grid) /= 2) then + write(*, *) "pineappl_grid_bin_count(): ", pineappl_grid_bin_count(grid) + error stop "error: pineappl_grid_bin_count" + end if + + if (pineappl_grid_bin_dimensions(grid) /= 1) then + write(*, *) "pineappl_grid_bin_dimensions(): ", pineappl_grid_bin_dimensions(grid) + error stop "error: pineappl_grid_bin_dimensions" + end if + + bin_limits_left = pineappl_grid_bin_limits_left(grid, 0) + if (any(abs(bin_limits_left - [0.0_dp, 1.0_dp]) > 1e-10)) then + write(*, *) "pineappl_grid_bin_limits_left(): ", abs(bin_limits_left - [0.0_dp, 1.0_dp]) < 1e-6 + error stop "error: pineappl_grid_bin_limits_left" + end if + + bin_limits_right = pineappl_grid_bin_limits_right(grid, 0) + if (any(abs(bin_limits_right - [1.0_dp, 2.0_dp]) > 1e-10)) then + write(*, *) "pineappl_grid_bin_limits_right(): ", bin_limits_right + error stop "error: pineappl_grid_bin_limits_right" + end if + + bin_normalizations = pineappl_grid_bin_normalizations(grid) + if (any(abs(bin_normalizations - [1.0_dp, 1.0_dp]) > 1e-10)) then + write(*, *) "pineappl_grid_bin_normalizations(): ", bin_normalizations + error stop "error: pineappl_grid_bin_normalizations" + end if + + grid2 = pineappl_grid_clone(grid) + + call pineappl_grid_delete(grid2) + + channels2 = pineappl_grid_lumi(grid) + + if (pineappl_lumi_count(channels2) /= 1) then + write(*, *) "pineappl_lumi_count(): ", pineappl_lumi_count(channels2) + error stop "error: pineappl_lumi_count" + end if + + if (pineappl_lumi_combinations(channels2, 0) /= 3) then + write(*, *) "pineappl_lumi_combinations(): ", pineappl_lumi_combinations(channels2, 0) + error stop "error: pineappl_lumi_combinations" + end if + + grid2 = pineappl_grid_new2(pineappl_pdg, channels, 1, [2_1, 0_1, 0_1, 0_1, 0_1], 1, [2.0_dp, 3.0_dp], & + 2, [pineappl_unpol_pdf, pineappl_unpol_pdf], [2212, 2212], kinematics, interpolations, [1, 1, 0]) + + call pineappl_grid_merge_and_delete(grid, grid2) + + if (pineappl_grid_order_count(grid) /= 1) then + write(*, *) "pineappl_grid_order_count(): ", pineappl_grid_order_count(grid) + error stop "error: pineappl_grid_order_count" + end if + + call pineappl_grid_merge_bins(grid, 2, 3) + + if (pineappl_grid_order_count(grid) /= 1) then + write(*, *) "pineappl_grid_order_count(): ", pineappl_grid_order_count(grid) + error stop "error: pineappl_grid_order_count" + end if + + ! call pineappl_grid_optimize_using(grid, int(b'11111')) + + if (pineappl_grid_order_count(grid) /= 1) then + write(*, *) "pineappl_grid_order_count(): ", pineappl_grid_order_count(grid) + error stop "error: pineappl_grid_order_count" + end if + + if (any(pineappl_grid_order_params(grid) /= [2, 0, 0, 0])) then + write(*, *) "pineappl_grid_order_params(): ", pineappl_grid_order_params(grid) + error stop "error: pineappl_grid_order_params" + end if + + call pineappl_grid_scale(grid, 0.5_dp) + + call pineappl_grid_scale_by_bin(grid, [2.0_dp, 2.0_dp]) + + call pineappl_grid_scale_by_order(grid, 0.5_dp, 1.0_dp, 1.0_dp, 1.0_dp, 1.0_dp) + + call pineappl_grid_set_key_value(grid, "set_key_value", "set_key_value: success") + + ! at this point we have the bins [0, 1, 3] + call pineappl_grid_set_remapper(grid, 2, [1.0_dp, 1.0_dp], [0.0_dp, 1.0_dp, 10.0_dp, 11.0_dp, 1.0_dp, 3.0_dp, 11.0_dp, 13.0_dp]) + + call pineappl_grid_split_lumi(grid) + + xfx1 = pineappl_xfx(xfx1_test) + xfx2 = pineappl_xfx(xfx2_test) + alphas = pineappl_alphas(alphas_test) + + result = pineappl_grid_convolve_with_one(grid, 2212, xfx1, alphas, & + [.true.], [.true.], 1.0_dp, 1.0_dp) + if (any(result > 0 .neqv. [.true., .true., .false.])) then + write(*, *) "pineappl_grid_convolve_with_one(): ", result + error stop "error: pineappl_grid_convolve_with_one" + end if + + result = pineappl_grid_convolve_with_two(grid, 2212, xfx1, 2212, xfx2, alphas, & + [.true.], [.true.], 1.0_dp, 1.0_dp) + if (any(result < 0 .neqv. [.true., .true., .false.])) then + write(*, *) "pineappl_grid_convolve_with_two(): ", result + error stop "error: pineappl_grid_convolve_with_two" + end if + + result = pineappl_grid_convolve(grid, [xfx1, xfx2], alphas, [.true.], [.true.], & + [0, 1, 2], 1, [1.0_dp, 1.0_dp, 1.0_dp]) + if (any(result < 0 .neqv. [.true., .true., .false.])) then + write(*, *) "pineappl_grid_convolve_with_two(): ", result + error stop "error: pineappl_grid_convolve_with_two" + end if + + call pineappl_lumi_delete(channels) + + call pineappl_grid_delete(grid) + +contains + + function xfx1_test(pdg_id, x, q2, state) bind(c) + use iso_c_binding + + implicit none + + integer(c_int32_t), value, intent(in) :: pdg_id + real(c_double), value, intent(in) :: x, q2 + type(c_ptr), value, intent(in) :: state + real(c_double) :: xfx1_test + + xfx1_test = x + end function + + function xfx2_test(pdg_id, x, q2, state) bind(c) + use iso_c_binding + + implicit none + + integer(c_int32_t), value, intent(in) :: pdg_id + real(c_double), value, intent(in) :: x, q2 + type(c_ptr), value, intent(in) :: state + real(c_double) :: xfx2_test + + xfx2_test = -x + end function + + function alphas_test(q2, state) bind(c) + use iso_c_binding + + implicit none + + real(c_double), value, intent(in) :: q2 + type(c_ptr), value, intent(in) :: state + real(c_double) :: alphas_test + + alphas_test = q2 + end function + +end program test_pineappl diff --git a/maintainer/generate-coverage.sh b/maintainer/generate-coverage.sh index c0330cd43..048e0252e 100755 --- a/maintainer/generate-coverage.sh +++ b/maintainer/generate-coverage.sh @@ -38,11 +38,12 @@ export CARGO_TARGET_DIR="$(mktemp -d)" # relative paths sometimes don't work, so use an absolute path dir="${CARGO_TARGET_DIR}"/debug/doctestbins -export RUSTFLAGS="-Cinstrument-coverage" +# `-C link-dead-code` is needed to prevent 'warning: XX functions have mismatched data' warnings +export RUSTFLAGS="-Cinstrument-coverage -Clink-dead-code" export RUSTDOCFLAGS="-Cinstrument-coverage -Z unstable-options --persist-doctests ${dir}" # -Z doctest-in-workspace is enabled by default starting from 1.72.0 -cargo test -Z doctest-in-workspace --all-features 2> >(tee stderr 1>&2) +cargo test -Z doctest-in-workspace --features=applgrid,evolve,fastnlo,fktable 2> >(tee stderr 1>&2) # from https://stackoverflow.com/a/51141872/812178 sed -i 's/\x1B\[[0-9;]\{1,\}[A-Za-z]//g' stderr diff --git a/maintainer/make-release.sh b/maintainer/make-release.sh index bdee097b6..51b3cb450 100755 --- a/maintainer/make-release.sh +++ b/maintainer/make-release.sh @@ -57,7 +57,7 @@ if ! cargo msrv --help >/dev/null; then exit 1 fi -if ! cargo msrv --min 1.70.0 --max 1.70.0 >/dev/null; then +if ! cargo msrv --min 1.80.1 --max 1.80.1 >/dev/null; then echo "Minimum supported Rust version doesn't match avertised one." exit 1 fi diff --git a/maintainer/pineappl-ci/Containerfile b/maintainer/pineappl-ci/Containerfile index 5b317c1b3..e1d8fea89 100644 --- a/maintainer/pineappl-ci/Containerfile +++ b/maintainer/pineappl-ci/Containerfile @@ -3,15 +3,14 @@ FROM debian:11-slim ARG APPLGRID_V=1.6.36 -# `0.9.27+cargo-0.74.0` is the last version that support Rust 1.70 -ARG CARGOC_V=0.9.27+cargo-0.74.0 +ARG CARGOC_V=0.10.3 ARG FASTNLO_V=2.5.0-2826 ARG LHAPDF_V=6.5.4 ARG ZLIB_V=1.3.1 # the last version is the default Rust version used in the container # as long as we're using `persist-doctests` in the `Rust` workflow we need nightly as default -ARG RUST_V="1.70.0 nightly-2024-01-25" +ARG RUST_V="1.70.0 1.80.1 nightly-2024-09-04" ENV APPL_IGRID_DIR="/usr/local/src/applgrid-${APPLGRID_V}/src" ENV CARGO_HOME="/usr/local/cargo" diff --git a/pineappl/Cargo.toml b/pineappl/Cargo.toml index 7826a016f..9cc1254c3 100644 --- a/pineappl/Cargo.toml +++ b/pineappl/Cargo.toml @@ -19,13 +19,14 @@ workspace = true anyhow = "1.0.48" arrayvec = "0.7.2" bincode = "1.3.3" -bitflags = "2.4.2" +bitflags = { features = ["serde"], version = "2.4.2" } enum_dispatch = "0.3.7" float-cmp = "0.9.0" git-version = "0.3.5" itertools = "0.10.1" lz4_flex = "0.9.2" ndarray = { features = ["serde"], version = "0.15.4" } +pineappl-v0 = { package = "pineappl", version = "0.8.2" } rustc-hash = "1.1.0" serde = { features = ["derive"], version = "1.0.130" } thiserror = "1.0.30" diff --git a/pineappl/src/bin.rs b/pineappl/src/bin.rs index 3d45a597e..478ef5c4e 100644 --- a/pineappl/src/bin.rs +++ b/pineappl/src/bin.rs @@ -582,7 +582,7 @@ impl BinLimits { /// TODO #[must_use] pub fn new(mut limits: Vec) -> Self { - limits.sort_by(|left, right| left.partial_cmp(right).unwrap()); + limits.sort_by(f64::total_cmp); if limits .iter() @@ -790,6 +790,7 @@ impl BinLimits { #[cfg(test)] mod test { use super::*; + use float_cmp::assert_approx_eq; use std::iter; #[test] @@ -806,16 +807,16 @@ mod test { ])) .unwrap(); - assert_eq!(limits.left(), 0.0); - assert_eq!(limits.right(), 2.0); + assert_approx_eq!(f64, limits.left(), 0.0, ulps = 2); + assert_approx_eq!(f64, limits.right(), 2.0, ulps = 2); assert_eq!(limits.bins(), 6); let non_consecutive_bins = BinLimits::new(vec![3.0, 4.0]); assert!(limits.merge(&non_consecutive_bins).is_err()); - assert_eq!(limits.left(), 0.0); - assert_eq!(limits.right(), 2.0); + assert_approx_eq!(f64, limits.left(), 0.0, ulps = 2); + assert_approx_eq!(f64, limits.right(), 2.0, ulps = 2); assert_eq!(limits.bins(), 6); // left merge @@ -828,8 +829,8 @@ mod test { ])) .is_err()); - assert_eq!(limits.left(), 0.0); - assert_eq!(limits.right(), 2.0); + assert_approx_eq!(f64, limits.left(), 0.0, ulps = 2); + assert_approx_eq!(f64, limits.right(), 2.0, ulps = 2); assert_eq!(limits.bins(), 6); } diff --git a/pineappl/src/boc.rs b/pineappl/src/boc.rs index 1d70d3346..0f5b68ccd 100644 --- a/pineappl/src/boc.rs +++ b/pineappl/src/boc.rs @@ -1,13 +1,222 @@ //! Module containing structures for the 3 dimensions of a [`Grid`]: bins, [`Order`] and channels //! (`boc`). +//! +//! [`Grid`]: super::grid::Grid use float_cmp::approx_eq; use itertools::Itertools; use serde::{Deserialize, Serialize}; +use std::borrow::Cow; use std::cmp::Ordering; use std::str::FromStr; use thiserror::Error; +/// TODO +#[repr(C)] +#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum Kinematics { + /// TODO + Scale(usize), + /// TODO + X(usize), +} + +impl Kinematics { + /// TODO + pub const X1: Self = Self::X(0); + + /// TODO + pub const X2: Self = Self::X(1); +} + +/// TODO +#[repr(C)] +#[derive(Clone, Deserialize, Serialize)] +pub enum ScaleFuncForm { + /// TODO + NoScale, + /// TODO + Scale(usize), + /// TODO + QuadraticSum(usize, usize), + /// TODO + QuadraticMean(usize, usize), + /// TODO + QuadraticSumOver4(usize, usize), + /// TODO + LinearMean(usize, usize), + /// TODO + LinearSum(usize, usize), + /// TODO + ScaleMax(usize, usize), + /// TODO + ScaleMin(usize, usize), + /// TODO + Prod(usize, usize), + /// TODO + S2plusS1half(usize, usize), + /// TODO + Pow4Sum(usize, usize), + /// TODO + WgtAvg(usize, usize), + /// TODO + S2plusS1fourth(usize, usize), + /// TODO + ExpProd2(usize, usize), +} + +impl ScaleFuncForm { + /// TODO + #[must_use] + pub fn calc<'a>( + &self, + node_values: &'a [Vec], + kinematics: &[Kinematics], + ) -> Cow<'a, [f64]> { + match self.clone() { + Self::NoScale => Cow::Borrowed(&[]), + Self::Scale(index) => { + if node_values.is_empty() { + // TODO: empty subgrid should have as many node values as dimensions + Cow::Borrowed(&[]) + } else { + Cow::Borrowed( + &node_values[kinematics + .iter() + .position(|&kin| kin == Kinematics::Scale(index)) + // UNWRAP: this should be guaranteed by `Grid::new` + .unwrap_or_else(|| unreachable!())], + ) + } + } + Self::QuadraticSum(idx1, idx2) + | Self::QuadraticMean(idx1, idx2) + | Self::QuadraticSumOver4(idx1, idx2) + | Self::LinearMean(idx1, idx2) + | Self::LinearSum(idx1, idx2) + | Self::ScaleMax(idx1, idx2) + | Self::ScaleMin(idx1, idx2) + | Self::Prod(idx1, idx2) + | Self::S2plusS1half(idx1, idx2) + | Self::Pow4Sum(idx1, idx2) + | Self::WgtAvg(idx1, idx2) + | Self::S2plusS1fourth(idx1, idx2) + | Self::ExpProd2(idx1, idx2) => { + let calc_scale: fn((f64, f64)) -> f64 = match self.clone() { + Self::QuadraticSum(_, _) => |(s1, s2)| s1 + s2, + Self::QuadraticMean(_, _) => |(s1, s2)| 0.5 * (s1 + s2), + Self::QuadraticSumOver4(_, _) => |(s1, s2)| 0.25 * (s1 + s2), + Self::LinearMean(_, _) => |(s1, s2)| 0.25 * (s1.sqrt() + s2.sqrt()).powi(2), + Self::LinearSum(_, _) => |(s1, s2)| (s1.sqrt() + s2.sqrt()).powi(2), + Self::ScaleMax(_, _) => |(s1, s2)| s1.max(s2), + Self::ScaleMin(_, _) => |(s1, s2)| s1.min(s2), + Self::Prod(_, _) => |(s1, s2)| s1 * s2, + Self::S2plusS1half(_, _) => |(s1, s2)| 0.5 * s2.mul_add(2.0, s1), + Self::Pow4Sum(_, _) => |(s1, s2)| s1.hypot(s2), + Self::WgtAvg(_, _) => |(s1, s2)| s1.mul_add(s1, s2 * s2) / (s1 + s2), + Self::S2plusS1fourth(_, _) => |(s1, s2)| s1.mul_add(0.25, s2), + Self::ExpProd2(_, _) => { + |(s1, s2)| (s1.sqrt() * (0.3 * s2.sqrt()).exp()).powi(2) + } + _ => unreachable!(), + }; + + let scales1 = &node_values[kinematics + .iter() + .position(|&kin| kin == Kinematics::Scale(idx1)) + // UNWRAP: this should be guaranteed by `Grid::new` + .unwrap_or_else(|| unreachable!())]; + let scales2 = &node_values[kinematics + .iter() + .position(|&kin| kin == Kinematics::Scale(idx2)) + // UNWRAP: this should be guaranteed by `Grid::new` + .unwrap_or_else(|| unreachable!())]; + + Cow::Owned( + scales1 + .iter() + .copied() + .cartesian_product(scales2.iter().copied()) + .map(calc_scale) + .collect(), + ) + } + } + } + + /// TODO + #[must_use] + pub fn idx(&self, indices: &[usize], scale_dims: &[usize]) -> usize { + match self.clone() { + Self::NoScale => unreachable!(), + Self::Scale(index) => indices[index], + Self::QuadraticSum(idx1, idx2) + | Self::QuadraticMean(idx1, idx2) + | Self::QuadraticSumOver4(idx1, idx2) + | Self::LinearMean(idx1, idx2) + | Self::LinearSum(idx1, idx2) + | Self::ScaleMax(idx1, idx2) + | Self::ScaleMin(idx1, idx2) + | Self::Prod(idx1, idx2) + | Self::S2plusS1half(idx1, idx2) + | Self::Pow4Sum(idx1, idx2) + | Self::WgtAvg(idx1, idx2) + | Self::S2plusS1fourth(idx1, idx2) + | Self::ExpProd2(idx1, idx2) => indices[idx1] * scale_dims[1] + indices[idx2], + } + } +} + +/// TODO +#[derive(Clone, Deserialize, Serialize)] +pub struct Scales { + /// TODO + pub ren: ScaleFuncForm, + /// TODO + pub fac: ScaleFuncForm, + /// TODO + pub frg: ScaleFuncForm, +} + +impl<'a> From<&'a Scales> for [&'a ScaleFuncForm; 3] { + fn from(scales: &'a Scales) -> [&'a ScaleFuncForm; 3] { + [&scales.ren, &scales.fac, &scales.frg] + } +} + +impl Scales { + /// TODO + pub fn compatible_with(&self, kinematics: &[Kinematics]) -> bool { + for scale in [&self.ren, &self.fac, &self.frg].map(Clone::clone) { + match scale { + ScaleFuncForm::NoScale => {} + ScaleFuncForm::Scale(index) + if kinematics + .iter() + .any(|&kin| kin == Kinematics::Scale(index)) => {} + ScaleFuncForm::QuadraticSum(idx1, idx2) + | ScaleFuncForm::QuadraticMean(idx1, idx2) + | ScaleFuncForm::QuadraticSumOver4(idx1, idx2) + | ScaleFuncForm::LinearMean(idx1, idx2) + | ScaleFuncForm::LinearSum(idx1, idx2) + | ScaleFuncForm::ScaleMax(idx1, idx2) + | ScaleFuncForm::ScaleMin(idx1, idx2) + | ScaleFuncForm::Prod(idx1, idx2) + | ScaleFuncForm::S2plusS1half(idx1, idx2) + | ScaleFuncForm::Pow4Sum(idx1, idx2) + | ScaleFuncForm::WgtAvg(idx1, idx2) + | ScaleFuncForm::S2plusS1fourth(idx1, idx2) + | ScaleFuncForm::ExpProd2(idx1, idx2) + if kinematics.iter().any(|&kin| kin == Kinematics::Scale(idx1)) + && kinematics.iter().any(|&kin| kin == Kinematics::Scale(idx2)) => {} + _ => return false, + } + } + + true + } +} + /// Error type keeping information if [`Order::from_str`] went wrong. #[derive(Debug, Error, Eq, PartialEq)] #[error("{0}")] @@ -19,13 +228,18 @@ pub struct ParseOrderError(String); #[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub struct Order { /// Exponent of the strong coupling. - pub alphas: u32, + pub alphas: u8, /// Exponent of the electromagnetic coupling. - pub alpha: u32, + pub alpha: u8, /// Exponent of the logarithm of the scale factor of the renomalization scale. - pub logxir: u32, - /// Exponent of the logarithm of the scale factor of the factorization scale. - pub logxif: u32, + pub logxir: u8, + /// Exponent of the logarithm of the scale factor of the initial state factorization scale. + pub logxif: u8, + /// Exponent of the logarithm of the scale factor of the final state factorization scale + /// (fragmentation scale). + pub logxia: u8, + // /// Reserved for future usage. + // pub other: [u8; 3], } impl FromStr for Order { @@ -37,6 +251,7 @@ impl FromStr for Order { alpha: 0, logxir: 0, logxif: 0, + logxia: 0, }; for tuple in s @@ -61,6 +276,9 @@ impl FromStr for Order { ("lf", Ok(num)) => { result.logxif = num; } + ("la", Ok(num)) => { + result.logxia = num; + } (label, Err(err)) => { return Err(ParseOrderError(format!( "error while parsing exponent of '{label}': {err}" @@ -82,10 +300,11 @@ impl Ord for Order { // rest lexicographically (self.alphas + self.alpha) .cmp(&(other.alphas + other.alpha)) - .then((self.alpha, self.logxir, self.logxif).cmp(&( + .then((self.alpha, self.logxir, self.logxif, self.logxia).cmp(&( other.alpha, other.logxir, other.logxif, + other.logxia, ))) } } @@ -100,23 +319,24 @@ impl Order { /// Constructor. This function mainly exists to have a way of constructing `Order` that is less /// verbose. #[must_use] - pub const fn new(alphas: u32, alpha: u32, logxir: u32, logxif: u32) -> Self { + pub const fn new(alphas: u8, alpha: u8, logxir: u8, logxif: u8, logxia: u8) -> Self { Self { alphas, alpha, logxir, logxif, + logxia, } } /// Return a mask suitable to pass as the `order_mask` parameter of [`Grid::convolve`], - /// [`Grid::evolve`] or [`Grid::evolve_info`]. The selection of `orders` is controlled using - /// the `max_as` and `max_al` parameters, for instance setting `max_as = 1` and `max_al = 0` - /// selects the LO QCD only, `max_as = 2` and `max_al = 0` the NLO QCD; setting `max_as = 3` - /// and `max_al = 2` would select all NLOs, and the NNLO QCD. + /// [`Grid::evolve_with_slice_iter`] or [`Grid::evolve_info`]. The selection of `orders` is + /// controlled using the `max_as` and `max_al` parameters, for instance setting `max_as = 1` + /// and `max_al = 0` selects the LO QCD only, `max_as = 2` and `max_al = 0` the NLO QCD; + /// setting `max_as = 3` and `max_al = 2` would select all NLOs, and the NNLO QCD. /// /// [`Grid::convolve`]: super::grid::Grid::convolve - /// [`Grid::evolve`]: super::grid::Grid::evolve + /// [`Grid::evolve_with_slice_iter`]: super::grid::Grid::evolve_with_slice_iter /// [`Grid::evolve_info`]: super::grid::Grid::evolve_info /// /// # Example @@ -136,12 +356,12 @@ impl Order { /// use pineappl::boc::Order; /// /// let orders = [ - /// Order::new(0, 2, 0, 0), // LO : alpha^2 - /// Order::new(1, 2, 0, 0), // NLO QCD : alphas alpha^2 - /// Order::new(0, 3, 0, 0), // NLO EW : alpha^3 - /// Order::new(2, 2, 0, 0), // NNLO QCD : alphas^2 alpha^2 - /// Order::new(1, 3, 0, 0), // NNLO QCD—EW : alphas alpha^3 - /// Order::new(0, 4, 0, 0), // NNLO EW : alpha^4 + /// Order::new(0, 2, 0, 0, 0), // LO : alpha^2 + /// Order::new(1, 2, 0, 0, 0), // NLO QCD : alphas alpha^2 + /// Order::new(0, 3, 0, 0, 0), // NLO EW : alpha^3 + /// Order::new(2, 2, 0, 0, 0), // NNLO QCD : alphas^2 alpha^2 + /// Order::new(1, 3, 0, 0, 0), // NNLO QCD—EW : alphas alpha^3 + /// Order::new(0, 4, 0, 0, 0), // NNLO EW : alpha^4 /// ]; /// /// // LO EW @@ -167,11 +387,11 @@ impl Order { /// use pineappl::boc::Order; /// /// let orders = [ - /// Order::new(0, 2, 0, 0), // LO : alpha^2 - /// Order::new(1, 2, 0, 0), // NLO QCD : alphas alpha^2 - /// Order::new(1, 2, 1, 0), // NLO QCD : alphas alpha^2 logxif - /// Order::new(0, 3, 0, 0), // NLO EW : alpha^3 - /// Order::new(0, 3, 1, 0), // NLO EW : alpha^3 logxif + /// Order::new(0, 2, 0, 0, 0), // LO : alpha^2 + /// Order::new(1, 2, 0, 0, 0), // NLO QCD : alphas alpha^2 + /// Order::new(1, 2, 1, 0, 0), // NLO QCD : alphas alpha^2 logxif + /// Order::new(0, 3, 0, 0, 0), // NLO EW : alpha^3 + /// Order::new(0, 3, 1, 0, 0), // NLO EW : alpha^3 logxif /// ]; /// /// assert_eq!(Order::create_mask(&orders, 0, 2, true), [true, false, false, true, true]); @@ -184,13 +404,13 @@ impl Order { /// use pineappl::boc::Order; /// /// let orders = [ - /// Order::new(2, 0, 0, 0), // LO QCD : alphas^2 - /// Order::new(1, 1, 0, 0), // LO QCD—EW : alphas alpha - /// Order::new(0, 2, 0, 0), // LO EW : alpha^2 - /// Order::new(3, 0, 0, 0), // NLO QCD : alphas^3 - /// Order::new(2, 1, 0, 0), // NLO QCD—EW : alphas^2 alpha - /// Order::new(1, 2, 0, 0), // NLO QCD—EW : alphas alpha^2 - /// Order::new(0, 3, 0, 0), // NLO EW : alpha^3 + /// Order::new(2, 0, 0, 0, 0), // LO QCD : alphas^2 + /// Order::new(1, 1, 0, 0, 0), // LO QCD—EW : alphas alpha + /// Order::new(0, 2, 0, 0, 0), // LO EW : alpha^2 + /// Order::new(3, 0, 0, 0, 0), // NLO QCD : alphas^3 + /// Order::new(2, 1, 0, 0, 0), // NLO QCD—EW : alphas^2 alpha + /// Order::new(1, 2, 0, 0, 0), // NLO QCD—EW : alphas alpha^2 + /// Order::new(0, 3, 0, 0, 0), // NLO EW : alpha^3 /// ]; /// /// // LO EW @@ -201,7 +421,7 @@ impl Order { /// assert_eq!(Order::create_mask(&orders, 1, 1, false), [true, true, true, false, false, false, false]); /// ``` #[must_use] - pub fn create_mask(orders: &[Self], max_as: u32, max_al: u32, logs: bool) -> Vec { + pub fn create_mask(orders: &[Self], max_as: u8, max_al: u8, logs: bool) -> Vec { // smallest sum of alphas and alpha let lo = orders .iter() @@ -238,8 +458,9 @@ impl Order { alpha, logxir, logxif, + logxia, }| { - if !logs && (logxir > 0 || logxif > 0) { + if !logs && (logxir > 0 || logxif > 0 || logxia > 0) { return false; } @@ -265,7 +486,7 @@ impl Order { /// combination. #[derive(Clone, Debug, Deserialize, PartialEq, PartialOrd, Serialize)] pub struct Channel { - entry: Vec<(i32, i32, f64)>, + entry: Vec<(Vec, f64)>, } impl Channel { @@ -279,8 +500,8 @@ impl Channel { /// ```rust /// use pineappl::boc::Channel; /// - /// let entry1 = Channel::new(vec![(2, 2, 1.0), (4, 4, 1.0)]); - /// let entry2 = Channel::new(vec![(4, 4, 1.0), (2, 2, 1.0)]); + /// let entry1 = Channel::new(vec![(vec![2, 2], 1.0), (vec![4, 4], 1.0)]); + /// let entry2 = Channel::new(vec![(vec![4, 4], 1.0), (vec![2, 2], 1.0)]); /// /// // checks that the ordering doesn't matter /// assert_eq!(entry1, entry2); @@ -291,8 +512,8 @@ impl Channel { /// ```rust /// use pineappl::boc::Channel; /// - /// let entry1 = Channel::new(vec![(1, 1, 1.0), (1, 1, 3.0), (3, 3, 1.0), (1, 1, 6.0)]); - /// let entry2 = Channel::new(vec![(1, 1, 10.0), (3, 3, 1.0)]); + /// let entry1 = Channel::new(vec![(vec![1, 1], 1.0), (vec![1, 1], 3.0), (vec![3, 3], 1.0), (vec![1, 1], 6.0)]); + /// let entry2 = Channel::new(vec![(vec![1, 1], 10.0), (vec![3, 3], 1.0)]); /// /// assert_eq!(entry1, entry2); /// ``` @@ -306,28 +527,39 @@ impl Channel { /// /// let _ = Channel::new(vec![]); /// ``` + /// + /// Creating a channel with entries that have a different number of PIDs panics: + /// ```rust,should_panic + /// use pineappl::boc::Channel; + /// + /// let _ = Channel::new(vec![(vec![1, 1, 1], 1.0), (vec![1, 1], 1.0)]); + /// ``` #[must_use] - pub fn new(mut entry: Vec<(i32, i32, f64)>) -> Self { - assert!(!entry.is_empty()); + pub fn new(mut entry: Vec<(Vec, f64)>) -> Self { + assert!(!entry.is_empty(), "can not create empty channel"); + assert!( + entry.iter().map(|(pids, _)| pids.len()).all_equal(), + "can not create channel with a different number of PIDs" + ); // sort `entry` because the ordering doesn't matter and because it makes it easier to // compare `Channel` objects with each other - entry.sort_by(|x, y| (x.0, x.1).cmp(&(y.0, y.1))); + entry.sort_by(|x, y| x.0.cmp(&y.0)); Self { entry: entry .into_iter() .coalesce(|lhs, rhs| { // sum the factors of repeated elements - if (lhs.0, lhs.1) == (rhs.0, rhs.1) { - Ok((lhs.0, lhs.1, lhs.2 + rhs.2)) + if lhs.0 == rhs.0 { + Ok((lhs.0, lhs.1 + rhs.1)) } else { Err((lhs, rhs)) } }) // filter zeros // TODO: find a better than to hardcode the epsilon limit - .filter(|&(_, _, f)| !approx_eq!(f64, f.abs(), 0.0, epsilon = 1e-14)) + .filter(|&(_, f)| !approx_eq!(f64, f.abs(), 0.0, epsilon = 1e-14)) .collect(), } } @@ -340,25 +572,31 @@ impl Channel { /// use pineappl::boc::Channel; /// use pineappl::channel; /// - /// let entry = Channel::translate(&channel![103, 11, 1.0], &|evol_id| match evol_id { + /// let entry = channel![10.0 * (103, 11)].translate(&|evol_id| match evol_id { /// 103 => vec![(2, 1.0), (-2, -1.0), (1, -1.0), (-1, 1.0)], /// _ => vec![(evol_id, 1.0)], /// }); /// - /// assert_eq!(entry, channel![2, 11, 1.0; -2, 11, -1.0; 1, 11, -1.0; -1, 11, 1.0]); + /// assert_eq!(entry, channel![10.0 * (2, 11) + -10.0 * (-2, 11) + -10.0 * (1, 11) + 10.0 * (-1, 11)]); /// ``` - pub fn translate(entry: &Self, translator: &dyn Fn(i32) -> Vec<(i32, f64)>) -> Self { - let mut tuples = Vec::new(); + #[must_use] + pub fn translate(&self, translator: &dyn Fn(i32) -> Vec<(i32, f64)>) -> Self { + let mut result = Vec::new(); - for &(a, b, factor) in &entry.entry { - for (aid, af) in translator(a) { - for (bid, bf) in translator(b) { - tuples.push((aid, bid, factor * af * bf)); - } + for (pids, factor) in &self.entry { + for tuples in pids + .iter() + .map(|&pid| translator(pid)) + .multi_cartesian_product() + { + result.push(( + tuples.iter().map(|&(pid, _)| pid).collect(), + factor * tuples.iter().map(|(_, f)| f).product::(), + )); } } - Self::new(tuples) + Self::new(result) } /// Returns a tuple representation of this entry. @@ -369,19 +607,28 @@ impl Channel { /// use pineappl::channel; /// use pineappl::boc::Channel; /// - /// let entry = channel![4, 4, 1.0; 2, 2, 1.0]; + /// let entry = channel![1.0 * (4, 4) + 1.0 * (2, 2)]; /// - /// assert_eq!(entry.entry(), [(2, 2, 1.0), (4, 4, 1.0)]); + /// assert_eq!(entry.entry(), [(vec![2, 2], 1.0), (vec![4, 4], 1.0)]); /// ``` #[must_use] - pub fn entry(&self) -> &[(i32, i32, f64)] { + pub fn entry(&self) -> &[(Vec, f64)] { &self.entry } - /// Creates a new object with the initial states transposed. + /// Create a new object with the PIDs at index `i` and `j` transposed. #[must_use] - pub fn transpose(&self) -> Self { - Self::new(self.entry.iter().map(|(a, b, c)| (*b, *a, *c)).collect()) + pub fn transpose(&self, i: usize, j: usize) -> Self { + Self::new( + self.entry + .iter() + .map(|(pids, c)| { + let mut transposed = pids.clone(); + transposed.swap(i, j); + (transposed, *c) + }) + .collect(), + ) } /// If `other` is the same channel when only comparing PIDs and neglecting the factors, return @@ -393,11 +640,11 @@ impl Channel { /// ```rust /// use pineappl::channel; /// - /// let ch1 = channel![2, 2, 2.0; 4, 4, 2.0]; - /// let ch2 = channel![4, 4, 1.0; 2, 2, 1.0]; - /// let ch3 = channel![3, 4, 1.0; 2, 2, 1.0]; - /// let ch4 = channel![4, 3, 1.0; 2, 3, 2.0]; - /// let ch5 = channel![2, 2, 1.0; 4, 4, 2.0]; + /// let ch1 = channel![2.0 * (2, 2) + 2.0 * (4, 4)]; + /// let ch2 = channel![1.0 * (4, 4) + 1.0 * (2, 2)]; + /// let ch3 = channel![1.0 * (3, 4) + 1.0 * (2, 2)]; + /// let ch4 = channel![1.0 * (4, 3) + 2.0 * (2, 3)]; + /// let ch5 = channel![1.0 * (2, 2) + 2.0 * (4, 4)]; /// /// // ch1 is ch2 multiplied by two /// assert_eq!(ch1.common_factor(&ch2), Some(2.0)); @@ -418,7 +665,7 @@ impl Channel { .entry .iter() .zip(&other.entry) - .map(|(a, b)| ((a.0 == b.0) && (a.1 == b.1)).then_some(a.2 / b.2)) + .map(|((pids_a, fa), (pids_b, fb))| (pids_a == pids_b).then_some(fa / fb)) .collect(); result.and_then(|factors| { @@ -443,51 +690,46 @@ impl FromStr for Channel { type Err = ParseChannelError; fn from_str(s: &str) -> Result { - Ok(Self::new( - s.split('+') - .map(|sub| { - sub.split_once('*').map_or_else( - || Err(ParseChannelError(format!("missing '*' in '{sub}'"))), - |(factor, pids)| { - let tuple = pids.split_once(',').map_or_else( - || Err(ParseChannelError(format!("missing ',' in '{pids}'"))), - |(a, b)| { - Ok(( - a.trim() - .strip_prefix('(') - .ok_or_else(|| { - ParseChannelError(format!( - "missing '(' in '{pids}'" - )) - })? - .trim() - .parse::() - .map_err(|err| ParseChannelError(err.to_string()))?, - b.trim() - .strip_suffix(')') - .ok_or_else(|| { - ParseChannelError(format!( - "missing ')' in '{pids}'" - )) - })? - .trim() - .parse::() - .map_err(|err| ParseChannelError(err.to_string()))?, + let result: Vec<_> = s + .split('+') + .map(|sub| { + sub.split_once('*').map_or_else( + // TODO: allow a missing numerical factor which then is assumed to be `1` + || Err(ParseChannelError(format!("missing '*' in '{sub}'"))), + |(factor, pids)| { + let vector: Vec<_> = pids + .trim() + .strip_prefix('(') + .ok_or_else(|| ParseChannelError(format!("missing '(' in '{pids}'")))? + .strip_suffix(')') + .ok_or_else(|| ParseChannelError(format!("missing ')' in '{pids}'")))? + .split(',') + .map(|pid| { + pid.trim().parse::().map_err(|err| { + ParseChannelError(format!( + "could not parse PID: '{pid}', '{err}'" )) - }, - )?; - - Ok(( - tuple.0, - tuple.1, - str::parse::(factor.trim()) - .map_err(|err| ParseChannelError(err.to_string()))?, - )) - }, - ) - }) - .collect::>()?, - )) + }) + }) + .collect::>()?; + + Ok(( + vector, + str::parse::(factor.trim()) + .map_err(|err| ParseChannelError(err.to_string()))?, + )) + }, + ) + }) + .collect::>()?; + + if !result.iter().map(|(pids, _)| pids.len()).all_equal() { + return Err(ParseChannelError( + "PID tuples have different lengths".to_owned(), + )); + } + + Ok(Self::new(result)) } } @@ -500,29 +742,36 @@ impl FromStr for Channel { /// ```rust /// use pineappl::channel; /// -/// let entry1 = channel![2, 2, 1.0; 4, 4, 1.0]; -/// let entry2 = channel![4, 4, 1.0; 2, 2, 1.0]; +/// let entry1 = channel![1.0 * (2, 2) + 1.0 * (4, 4)]; +/// let entry2 = channel![1.0 * (4, 4) + 1.0 * (2, 2)]; /// /// assert_eq!(entry1, entry2); /// ``` #[macro_export] macro_rules! channel { - ($a:expr, $b:expr, $factor:expr $(; $c:expr, $d:expr, $fac:expr)*) => { - $crate::boc::Channel::new(vec![($a, $b, $factor), $(($c, $d, $fac)),*]) + ($factor:literal * ($($pids:expr),+) $(+ $more_factors:literal * ($($more_pids:expr),+))*) => { + $crate::boc::Channel::new( + vec![ + (vec![$($pids),+], $factor) $(, + (vec![$($more_pids),+], $more_factors) + )* + ] + ) }; } #[cfg(test)] mod tests { use super::*; - use crate::pids; + use float_cmp::assert_approx_eq; #[test] fn order_from_str() { - assert_eq!("as1".parse(), Ok(Order::new(1, 0, 0, 0))); - assert_eq!("a1".parse(), Ok(Order::new(0, 1, 0, 0))); - assert_eq!("as1lr1".parse(), Ok(Order::new(1, 0, 1, 0))); - assert_eq!("as1lf1".parse(), Ok(Order::new(1, 0, 0, 1))); + assert_eq!("as1".parse(), Ok(Order::new(1, 0, 0, 0, 0))); + assert_eq!("a1".parse(), Ok(Order::new(0, 1, 0, 0, 0))); + assert_eq!("as1lr1".parse(), Ok(Order::new(1, 0, 1, 0, 0))); + assert_eq!("as1lf1".parse(), Ok(Order::new(1, 0, 0, 1, 0))); + assert_eq!("as1la1".parse(), Ok(Order::new(1, 0, 0, 0, 1))); assert_eq!( "ab12".parse::().unwrap_err().to_string(), "unknown coupling: 'ab'" @@ -539,36 +788,36 @@ mod tests { #[test] fn order_cmp() { let mut orders = [ - Order::new(1, 2, 1, 0), - Order::new(1, 2, 0, 1), - Order::new(1, 2, 0, 0), - Order::new(0, 3, 1, 0), - Order::new(0, 3, 0, 1), - Order::new(0, 3, 0, 0), - Order::new(0, 2, 0, 0), + Order::new(1, 2, 1, 0, 0), + Order::new(1, 2, 0, 1, 0), + Order::new(1, 2, 0, 0, 0), + Order::new(0, 3, 1, 0, 0), + Order::new(0, 3, 0, 1, 0), + Order::new(0, 3, 0, 0, 0), + Order::new(0, 2, 0, 0, 0), ]; orders.sort(); - assert_eq!(orders[0], Order::new(0, 2, 0, 0)); - assert_eq!(orders[1], Order::new(1, 2, 0, 0)); - assert_eq!(orders[2], Order::new(1, 2, 0, 1)); - assert_eq!(orders[3], Order::new(1, 2, 1, 0)); - assert_eq!(orders[4], Order::new(0, 3, 0, 0)); - assert_eq!(orders[5], Order::new(0, 3, 0, 1)); - assert_eq!(orders[6], Order::new(0, 3, 1, 0)); + assert_eq!(orders[0], Order::new(0, 2, 0, 0, 0)); + assert_eq!(orders[1], Order::new(1, 2, 0, 0, 0)); + assert_eq!(orders[2], Order::new(1, 2, 0, 1, 0)); + assert_eq!(orders[3], Order::new(1, 2, 1, 0, 0)); + assert_eq!(orders[4], Order::new(0, 3, 0, 0, 0)); + assert_eq!(orders[5], Order::new(0, 3, 0, 1, 0)); + assert_eq!(orders[6], Order::new(0, 3, 1, 0, 0)); } #[test] fn order_create_mask() { // Drell—Yan orders let orders = [ - Order::new(0, 2, 0, 0), // LO : alpha^2 - Order::new(1, 2, 0, 0), // NLO QCD : alphas alpha^2 - Order::new(0, 3, 0, 0), // NLO EW : alpha^3 - Order::new(2, 2, 0, 0), // NNLO QCD : alphas^2 alpha^2 - Order::new(1, 3, 0, 0), // NNLO QCD—EW : alphas alpha^3 - Order::new(0, 4, 0, 0), // NNLO EW : alpha^4 + Order::new(0, 2, 0, 0, 0), // LO : alpha^2 + Order::new(1, 2, 0, 0, 0), // NLO QCD : alphas alpha^2 + Order::new(0, 3, 0, 0, 0), // NLO EW : alpha^3 + Order::new(2, 2, 0, 0, 0), // NNLO QCD : alphas^2 alpha^2 + Order::new(1, 3, 0, 0, 0), // NNLO QCD—EW : alphas alpha^3 + Order::new(0, 4, 0, 0, 0), // NNLO EW : alpha^4 ]; assert_eq!( @@ -638,18 +887,18 @@ mod tests { // Top-pair production orders let orders = [ - Order::new(2, 0, 0, 0), // LO QCD : alphas^2 - Order::new(1, 1, 0, 0), // LO QCD—EW : alphas alpha - Order::new(0, 2, 0, 0), // LO EW : alpha^2 - Order::new(3, 0, 0, 0), // NLO QCD : alphas^3 - Order::new(2, 1, 0, 0), // NLO QCD—EW : alphas^2 alpha - Order::new(1, 2, 0, 0), // NLO QCD—EW : alphas alpha^2 - Order::new(0, 3, 0, 0), // NLO EW : alpha^3 - Order::new(4, 0, 0, 0), // NNLO QCD : alphas^4 - Order::new(3, 1, 0, 0), // NNLO QCD—EW : alphas^3 alpha - Order::new(2, 2, 0, 0), // NNLO QCD—EW : alphas^2 alpha^2 - Order::new(1, 3, 0, 0), // NNLO QCD—EW : alphas alpha^3 - Order::new(0, 4, 0, 0), // NNLO EW : alpha^4 + Order::new(2, 0, 0, 0, 0), // LO QCD : alphas^2 + Order::new(1, 1, 0, 0, 0), // LO QCD—EW : alphas alpha + Order::new(0, 2, 0, 0, 0), // LO EW : alpha^2 + Order::new(3, 0, 0, 0, 0), // NLO QCD : alphas^3 + Order::new(2, 1, 0, 0, 0), // NLO QCD—EW : alphas^2 alpha + Order::new(1, 2, 0, 0, 0), // NLO QCD—EW : alphas alpha^2 + Order::new(0, 3, 0, 0, 0), // NLO EW : alpha^3 + Order::new(4, 0, 0, 0, 0), // NNLO QCD : alphas^4 + Order::new(3, 1, 0, 0, 0), // NNLO QCD—EW : alphas^3 alpha + Order::new(2, 2, 0, 0, 0), // NNLO QCD—EW : alphas^2 alpha^2 + Order::new(1, 3, 0, 0, 0), // NNLO QCD—EW : alphas alpha^3 + Order::new(0, 4, 0, 0, 0), // NNLO EW : alpha^4 ]; assert_eq!( @@ -718,24 +967,11 @@ mod tests { ); } - #[test] - fn channel_translate() { - let channel = Channel::translate(&channel![103, 203, 2.0], &pids::evol_to_pdg_mc_ids); - - assert_eq!( - channel, - channel![ 2, 2, 2.0; 2, -2, -2.0; 2, 1, -2.0; 2, -1, 2.0; - -2, 2, 2.0; -2, -2, -2.0; -2, 1, -2.0; -2, -1, 2.0; - 1, 2, -2.0; 1, -2, 2.0; 1, 1, 2.0; 1, -1, -2.0; - -1, 2, -2.0; -1, -2, 2.0; -1, 1, 2.0; -1, -1, -2.0] - ); - } - #[test] fn channel_from_str() { assert_eq!( str::parse::(" 1 * ( 2 , -2) + 2* (4,-4)").unwrap(), - channel![2, -2, 1.0; 4, -4, 2.0] + channel![1.0 * (2, -2) + 2.0 * (4, -4)] ); assert_eq!( @@ -756,7 +992,7 @@ mod tests { str::parse::(" 1 * ( 2 -2) + 2* (4,-4)") .unwrap_err() .to_string(), - "missing ',' in ' ( 2 -2) '" + "could not parse PID: ' 2 -2', 'invalid digit found in string'" ); assert_eq!( @@ -772,5 +1008,37 @@ mod tests { .to_string(), "missing ')' in ' ( 2, -2 '" ); + + assert_eq!( + str::parse::("1 * (2, 2, 2) + 2 * (4, 4)") + .unwrap_err() + .to_string(), + "PID tuples have different lengths" + ); + } + + #[test] + fn scale_func_form() { + let node_values = [vec![1.0, 2.0, 3.0], vec![4.0, 5.0]]; + let kinematics = [Kinematics::Scale(0), Kinematics::Scale(1)]; + let sff = ScaleFuncForm::QuadraticSum(0, 1); + + let ref_calc = [5.0, 6.0, 6.0, 7.0, 7.0, 8.0]; + let calc = sff.calc(&node_values, &kinematics).into_owned(); + + assert_eq!(calc.len(), ref_calc.len()); + + for (&calc, ref_calc) in calc.iter().zip(ref_calc) { + assert_approx_eq!(f64, calc, ref_calc, ulps = 2); + } + + let scale_dims = [3, 2]; + + assert_eq!(sff.idx(&[0, 0, 1], &scale_dims), 0); + assert_eq!(sff.idx(&[0, 1, 1], &scale_dims), 1); + assert_eq!(sff.idx(&[1, 0, 1], &scale_dims), 2); + assert_eq!(sff.idx(&[1, 1, 1], &scale_dims), 3); + assert_eq!(sff.idx(&[2, 0, 1], &scale_dims), 4); + assert_eq!(sff.idx(&[2, 1, 1], &scale_dims), 5); } } diff --git a/pineappl/src/convolutions.rs b/pineappl/src/convolutions.rs index ea726c440..989ce1198 100644 --- a/pineappl/src/convolutions.rs +++ b/pineappl/src/convolutions.rs @@ -1,389 +1,356 @@ -//! Module for everything related to luminosity functions. +//! Module for everything related to convolution functions. +use super::boc::Kinematics; +use super::boc::Scales; use super::grid::Grid; use super::pids; -use super::subgrid::{Mu2, Subgrid}; +use super::subgrid::{self, Subgrid, SubgridEnum}; +use itertools::izip; use rustc_hash::FxHashMap; +use serde::{Deserialize, Serialize}; -enum Pdfs<'a> { - Two { - xfx1: &'a mut dyn FnMut(i32, f64, f64) -> f64, - xfx1_cache: FxHashMap<(i32, usize, usize), f64>, - xfx2: &'a mut dyn FnMut(i32, f64, f64) -> f64, - xfx2_cache: FxHashMap<(i32, usize, usize), f64>, - }, - One { - xfx: &'a mut dyn FnMut(i32, f64, f64) -> f64, - xfx_cache: FxHashMap<(i32, usize, usize), f64>, - }, -} +const REN_IDX: usize = 0; +const FAC_IDX: usize = 1; +const FRG_IDX: usize = 2; +const SCALES_CNT: usize = 3; -impl<'a> Pdfs<'a> { - pub fn clear(&mut self) { - match self { - Self::One { xfx_cache, .. } => xfx_cache.clear(), - Self::Two { - xfx1_cache, - xfx2_cache, - .. - } => { - xfx1_cache.clear(); - xfx2_cache.clear(); - } - } - } +struct ConvCache1d<'a> { + xfx: &'a mut dyn FnMut(i32, f64, f64) -> f64, + cache: FxHashMap<(i32, usize, usize), f64>, + conv: Conv, } /// A cache for evaluating PDFs. Methods like [`Grid::convolve`] accept instances of this `struct` /// instead of the PDFs themselves. -pub struct LumiCache<'a> { - pdfs: Pdfs<'a>, +pub struct ConvolutionCache<'a> { + caches: Vec>, alphas: &'a mut dyn FnMut(f64) -> f64, alphas_cache: Vec, - mur2_grid: Vec, - muf2_grid: Vec, + mu2: [Vec; SCALES_CNT], x_grid: Vec, - imur2: Vec, - imuf2: Vec, - ix1: Vec, - ix2: Vec, - pdg1: i32, - pdg2: i32, - cc1: i32, - cc2: i32, } -impl<'a> LumiCache<'a> { - /// Construct a luminosity cache with two PDFs, `xfx1` and `xfx2`. The types of hadrons the - /// PDFs correspond to must be given as `pdg1` and `pdg2`. The function to evaluate the - /// strong coupling must be given as `alphas`. The grid that the cache will be used with must - /// be given as `grid`; this parameter determines which of the initial states are hadronic, and - /// if an initial states is not hadronic the corresponding 'PDF' is set to `xfx = x`. If some - /// of the PDFs must be charge-conjugated, this is automatically done in this function. - pub fn with_two( - pdg1: i32, - xfx1: &'a mut dyn FnMut(i32, f64, f64) -> f64, - pdg2: i32, - xfx2: &'a mut dyn FnMut(i32, f64, f64) -> f64, +impl<'a> ConvolutionCache<'a> { + /// TODO + pub fn new( + convolutions: Vec, + xfx: Vec<&'a mut dyn FnMut(i32, f64, f64) -> f64>, alphas: &'a mut dyn FnMut(f64) -> f64, ) -> Self { Self { - pdfs: Pdfs::Two { - xfx1, - xfx1_cache: FxHashMap::default(), - xfx2, - xfx2_cache: FxHashMap::default(), - }, + caches: xfx + .into_iter() + .zip(convolutions) + .map(|(xfx, conv)| ConvCache1d { + xfx, + cache: FxHashMap::default(), + conv, + }) + .collect(), alphas, - alphas_cache: vec![], - mur2_grid: vec![], - muf2_grid: vec![], - x_grid: vec![], - imur2: Vec::new(), - imuf2: Vec::new(), - ix1: Vec::new(), - ix2: Vec::new(), - pdg1, - pdg2, - cc1: 0, - cc2: 0, + alphas_cache: Vec::new(), + mu2: [const { Vec::new() }; SCALES_CNT], + x_grid: Vec::new(), } } - /// Construct a luminosity cache with a single PDF `xfx`. The type of hadron the PDF - /// corresponds to must be given as `pdg`. The function to evaluate the strong coupling must be - /// given as `alphas`. The grid that the cache should be used with must be given as `grid`; - /// this parameter determines which of the initial states are hadronic, and if an initial - /// states is not hadronic the corresponding 'PDF' is set to `xfx = x`. If some of the PDFs - /// must be charge-conjugated, this is automatically done in this function. - pub fn with_one( - pdg: i32, - xfx: &'a mut dyn FnMut(i32, f64, f64) -> f64, - alphas: &'a mut dyn FnMut(f64) -> f64, - ) -> Self { - Self { - pdfs: Pdfs::One { - xfx, - xfx_cache: FxHashMap::default(), - }, - alphas, - alphas_cache: vec![], - mur2_grid: vec![], - muf2_grid: vec![], - x_grid: vec![], - imur2: Vec::new(), - imuf2: Vec::new(), - ix1: Vec::new(), - ix2: Vec::new(), - pdg1: pdg, - pdg2: pdg, - cc1: 0, - cc2: 0, - } - } - - pub(crate) fn setup(&mut self, grid: &Grid, xi: &[(f64, f64)]) -> Result<(), ()> { - let convolutions = grid.convolutions(); - - // TODO: the following code only works with exactly two convolutions - assert_eq!(convolutions.len(), 2); - - // do we have to charge-conjugate the initial states? - let cc1 = if let Some(pid) = convolutions[0].pid() { - if self.pdg1 == pid { - 1 - } else if self.pdg1 == pids::charge_conjugate_pdg_pid(pid) { - -1 - } else { - // TODO: return a proper error - return Err(()); - } - } else { - 0 - }; - let cc2 = if let Some(pid) = convolutions[1].pid() { - if self.pdg2 == pid { - 1 - } else if self.pdg2 == pids::charge_conjugate_pdg_pid(pid) { - -1 - } else { - // TODO: return a proper error - return Err(()); - } - } else { - 0 - }; - + pub(crate) fn new_grid_conv_cache<'b>( + &'b mut self, + grid: &Grid, + xi: &[(f64, f64, f64)], + ) -> GridConvCache<'a, 'b> { // TODO: try to avoid calling clear self.clear(); + let scales: [_; SCALES_CNT] = grid.scales().into(); + let xi: Vec<_> = (0..SCALES_CNT) + .map(|idx| { + let mut vars: Vec<_> = xi + .iter() + .map(|&x| <[_; SCALES_CNT]>::from(x)[idx]) + .collect(); + vars.sort_by(f64::total_cmp); + vars.dedup(); + vars + }) + .collect(); + + for (result, scale, xi) in izip!(&mut self.mu2, scales, xi) { + result.clear(); + result.extend( + grid.subgrids() + .iter() + .filter(|subgrid| !subgrid.is_empty()) + .flat_map(|subgrid| { + scale + .calc(&subgrid.node_values(), grid.kinematics()) + .into_owned() + }) + .flat_map(|scale| xi.iter().map(move |&xi| xi * xi * scale)), + ); + result.sort_by(f64::total_cmp); + result.dedup(); + } + let mut x_grid: Vec<_> = grid .subgrids() .iter() - .filter_map(|subgrid| { - if subgrid.is_empty() { - None - } else { - let mut vec = subgrid.x1_grid().into_owned(); - vec.extend_from_slice(&subgrid.x2_grid()); - Some(vec) - } + .filter(|subgrid| !subgrid.is_empty()) + .flat_map(|subgrid| { + grid.kinematics() + .iter() + .zip(subgrid.node_values()) + .filter(|(kin, _)| matches!(kin, Kinematics::X(_))) + .flat_map(|(_, node_values)| node_values) }) - .flatten() .collect(); - x_grid.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); + x_grid.sort_by(f64::total_cmp); x_grid.dedup(); - let mut mur2_grid: Vec<_> = grid - .subgrids() + self.alphas_cache = self.mu2[REN_IDX] .iter() - .filter_map(|subgrid| { - if subgrid.is_empty() { - None - } else { - Some(subgrid.mu2_grid().into_owned()) - } - }) - .flatten() - .flat_map(|Mu2 { ren, .. }| { - xi.iter() - .map(|(xir, _)| xir * xir * ren) - .collect::>() - }) + .map(|&mur2| (self.alphas)(mur2)) .collect(); - mur2_grid.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); - mur2_grid.dedup(); + self.x_grid = x_grid; - let mut muf2_grid: Vec<_> = grid - .subgrids() + let perm = grid + .convolutions() .iter() - .filter_map(|subgrid| { - if subgrid.is_empty() { - None - } else { - Some(subgrid.mu2_grid().into_owned()) - } - }) - .flatten() - .flat_map(|Mu2 { fac, .. }| { - xi.iter() - .map(|(_, xif)| xif * xif * fac) - .collect::>() + .enumerate() + .map(|(max_idx, grid_conv)| { + self.caches + .iter() + .take(max_idx + 1) + .enumerate() + .rev() + .find_map(|(idx, ConvCache1d { conv, .. })| { + if grid_conv == conv { + Some((idx, false)) + } else if *grid_conv == conv.cc() { + Some((idx, true)) + } else { + None + } + }) + // TODO: convert `unwrap` to `Err` + .unwrap() }) .collect(); - muf2_grid.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); - muf2_grid.dedup(); - - self.alphas_cache = mur2_grid.iter().map(|&mur2| (self.alphas)(mur2)).collect(); - - self.mur2_grid = mur2_grid; - self.muf2_grid = muf2_grid; - self.x_grid = x_grid; - self.cc1 = cc1; - self.cc2 = cc2; - Ok(()) - } - - /// Return the PDF (multiplied with `x`) for the first initial state. - pub fn xfx1(&mut self, pdg_id: i32, ix1: usize, imu2: usize) -> f64 { - let ix1 = self.ix1[ix1]; - let x = self.x_grid[ix1]; - if self.cc1 == 0 { - x - } else { - let imuf2 = self.imuf2[imu2]; - let muf2 = self.muf2_grid[imuf2]; - let pid = if self.cc1 == 1 { - pdg_id - } else { - pids::charge_conjugate_pdg_pid(pdg_id) - }; - let (xfx, xfx_cache) = match &mut self.pdfs { - Pdfs::One { xfx, xfx_cache, .. } => (xfx, xfx_cache), - Pdfs::Two { - xfx1, xfx1_cache, .. - } => (xfx1, xfx1_cache), - }; - *xfx_cache - .entry((pid, ix1, imuf2)) - .or_insert_with(|| xfx(pid, x, muf2)) - } - } - - /// Return the PDF (multiplied with `x`) for the second initial state. - pub fn xfx2(&mut self, pdg_id: i32, ix2: usize, imu2: usize) -> f64 { - let ix2 = self.ix2[ix2]; - let x = self.x_grid[ix2]; - if self.cc2 == 0 { - x - } else { - let imuf2 = self.imuf2[imu2]; - let muf2 = self.muf2_grid[imuf2]; - let pid = if self.cc2 == 1 { - pdg_id - } else { - pids::charge_conjugate_pdg_pid(pdg_id) - }; - let (xfx, xfx_cache) = match &mut self.pdfs { - Pdfs::One { xfx, xfx_cache, .. } => (xfx, xfx_cache), - Pdfs::Two { - xfx2, xfx2_cache, .. - } => (xfx2, xfx2_cache), - }; - *xfx_cache - .entry((pid, ix2, imuf2)) - .or_insert_with(|| xfx(pid, x, muf2)) + GridConvCache { + cache: self, + perm, + imu2: [const { Vec::new() }; SCALES_CNT], + scales: grid.scales().clone(), + ix: Vec::new(), + scale_dims: Vec::new(), } } - /// Return the strong coupling for the renormalization scale set with [`LumiCache::set_grids`], - /// in the grid `mu2_grid` at the index `imu2`. - #[must_use] - pub fn alphas(&self, imu2: usize) -> f64 { - self.alphas_cache[self.imur2[imu2]] - } - /// Clears the cache. pub fn clear(&mut self) { self.alphas_cache.clear(); - self.pdfs.clear(); - self.mur2_grid.clear(); - self.muf2_grid.clear(); + for xfx_cache in &mut self.caches { + xfx_cache.cache.clear(); + } + for scales in &mut self.mu2 { + scales.clear(); + } self.x_grid.clear(); } +} + +/// TODO +pub struct GridConvCache<'a, 'b> { + cache: &'b mut ConvolutionCache<'a>, + perm: Vec<(usize, bool)>, + imu2: [Vec; SCALES_CNT], + scales: Scales, + ix: Vec>, + scale_dims: Vec, +} + +impl GridConvCache<'_, '_> { + /// TODO + pub fn as_fx_prod(&mut self, pdg_ids: &[i32], as_order: u8, indices: &[usize]) -> f64 { + // TODO: here we assume that + // - indices[0] is the (squared) factorization scale, + // - indices[1] is x1 and + // - indices[2] is x2. + // Lift this restriction! + let x_start = indices.len() - pdg_ids.len(); + let indices_scales = &indices[0..x_start]; + let indices_x = &indices[x_start..]; + + let ix = self.ix.iter().zip(indices_x).map(|(ix, &index)| ix[index]); + let idx_pid = self.perm.iter().zip(pdg_ids).map(|(&(idx, cc), &pdg_id)| { + ( + idx, + if cc { + pids::charge_conjugate_pdg_pid(pdg_id) + } else { + pdg_id + }, + ) + }); + + let fx_prod: f64 = ix + .zip(idx_pid) + .map(|(ix, (idx, pid))| { + let ConvCache1d { xfx, cache, conv } = &mut self.cache.caches[idx]; + + let (scale, scale_idx) = match conv.conv_type() { + ConvType::UnpolPDF | ConvType::PolPDF => ( + FAC_IDX, + self.scales.fac.idx(indices_scales, &self.scale_dims), + ), + ConvType::UnpolFF | ConvType::PolFF => ( + FRG_IDX, + self.scales.frg.idx(indices_scales, &self.scale_dims), + ), + }; + + let imu2 = self.imu2[scale][scale_idx]; + let mu2 = self.cache.mu2[scale][imu2]; + + *cache.entry((pid, ix, imu2)).or_insert_with(|| { + let x = self.cache.x_grid[ix]; + xfx(pid, x, mu2) / x + }) + }) + .product(); + let alphas_powers = if as_order != 0 { + let ren_scale_idx = self.scales.ren.idx(indices_scales, &self.scale_dims); + self.cache.alphas_cache[self.imu2[REN_IDX][ren_scale_idx]].powi(as_order.into()) + } else { + 1.0 + }; + + fx_prod * alphas_powers + } /// Set the grids. - pub fn set_grids( - &mut self, - mu2_grid: &[Mu2], - x1_grid: &[f64], - x2_grid: &[f64], - xir: f64, - xif: f64, - ) { - self.imur2 = mu2_grid - .iter() - .map(|Mu2 { ren, .. }| { - self.mur2_grid + pub fn set_grids(&mut self, grid: &Grid, subgrid: &SubgridEnum, xi: (f64, f64, f64)) { + let node_values = subgrid.node_values(); + let kinematics = grid.kinematics(); + let scales: [_; SCALES_CNT] = grid.scales().into(); + let xi: [_; SCALES_CNT] = xi.into(); + + for (result, values, scale, xi) in izip!(&mut self.imu2, &self.cache.mu2, scales, xi) { + result.clear(); + result.extend(scale.calc(&node_values, kinematics).iter().map(|s| { + values .iter() - .position(|&mur2| mur2 == xir * xir * ren) + .position(|&value| subgrid::node_value_eq(value, xi * xi * s)) + // UNWRAP: if this fails, `new_grid_conv_cache` hasn't been called properly .unwrap_or_else(|| unreachable!()) - }) - .collect(); - self.imuf2 = mu2_grid - .iter() - .map(|Mu2 { fac, .. }| { - self.muf2_grid + })); + } + + self.ix = (0..grid.convolutions().len()) + .map(|idx| { + kinematics .iter() - .position(|&muf2| muf2 == xif * xif * fac) + .zip(&node_values) + .find_map(|(kin, node_values)| { + matches!(kin, &Kinematics::X(index) if index == idx).then_some(node_values) + }) + // UNWRAP: guaranteed by the grid constructor .unwrap_or_else(|| unreachable!()) - }) - .collect(); - self.ix1 = x1_grid - .iter() - .map(|x1| { - self.x_grid .iter() - .position(|x| x1 == x) - .unwrap_or_else(|| unreachable!()) + .map(|&xd| { + self.cache + .x_grid + .iter() + .position(|&x| subgrid::node_value_eq(xd, x)) + .unwrap_or_else(|| unreachable!()) + }) + .collect() }) .collect(); - self.ix2 = x2_grid + self.scale_dims = grid + .kinematics() .iter() - .map(|x2| { - self.x_grid - .iter() - .position(|x| x2 == x) - .unwrap_or_else(|| unreachable!()) + .zip(node_values) + .filter_map(|(kin, node_values)| { + matches!(kin, Kinematics::Scale(_)).then_some(node_values.len()) }) .collect(); } } +/// TODO +#[repr(C)] +#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum ConvType { + /// Unpolarized parton distribution function. + UnpolPDF, + /// Polarized parton distribution function. + PolPDF, + /// Unpolarized fragmentation function. + UnpolFF, + /// Polarized fragmentation function. + PolFF, +} + +impl ConvType { + /// TODO + #[must_use] + pub const fn new(polarized: bool, time_like: bool) -> Self { + match (polarized, time_like) { + (false, false) => Self::UnpolPDF, + (false, true) => Self::UnpolFF, + (true, false) => Self::PolPDF, + (true, true) => Self::PolFF, + } + } +} + /// Data type that indentifies different types of convolutions. -#[derive(Debug, Eq, PartialEq)] -pub enum Convolution { - // TODO: eventually get rid of this value - /// No convolution. - None, - /// Unpolarized parton distribution function. The integer denotes the type of hadron with a PDG - /// MC ID. - UnpolPDF(i32), - /// Polarized parton distribution function. The integer denotes the type of hadron with a PDG - /// MC ID. - PolPDF(i32), - /// Unpolarized fragmentation function. The integer denotes the type of hadron with a PDG MC - /// ID. - UnpolFF(i32), - /// Polarized fragmentation function. The integer denotes the type of hadron with a PDG MC ID. - PolFF(i32), +#[repr(C)] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct Conv { + conv_type: ConvType, + pid: i32, } -impl Convolution { - /// Return the convolution if the PID is charged conjugated. +impl Conv { + /// Constructor. #[must_use] - pub const fn charge_conjugate(&self) -> Self { - match *self { - Self::None => Self::None, - Self::UnpolPDF(pid) => Self::UnpolPDF(pids::charge_conjugate_pdg_pid(pid)), - Self::PolPDF(pid) => Self::PolPDF(pids::charge_conjugate_pdg_pid(pid)), - Self::UnpolFF(pid) => Self::UnpolFF(pids::charge_conjugate_pdg_pid(pid)), - Self::PolFF(pid) => Self::PolFF(pids::charge_conjugate_pdg_pid(pid)), + pub const fn new(conv_type: ConvType, pid: i32) -> Self { + Self { conv_type, pid } + } + + /// TODO + #[must_use] + pub const fn with_pid(&self, pid: i32) -> Self { + Self { + conv_type: self.conv_type, + pid, } } - /// Return the PID of the convolution if it has any. + /// Return the convolution if the PID is charged conjugated. #[must_use] - pub const fn pid(&self) -> Option { - match *self { - Self::None => None, - Self::UnpolPDF(pid) | Self::PolPDF(pid) | Self::UnpolFF(pid) | Self::PolFF(pid) => { - Some(pid) - } + pub const fn cc(&self) -> Self { + Self { + conv_type: self.conv_type, + pid: pids::charge_conjugate_pdg_pid(self.pid), } } + + /// Return the PID of the convolution. + #[must_use] + pub const fn pid(&self) -> i32 { + self.pid + } + + /// Return the convolution type of this convolution. + #[must_use] + pub const fn conv_type(&self) -> ConvType { + self.conv_type + } } #[cfg(test)] @@ -391,32 +358,30 @@ mod tests { use super::*; #[test] - fn convolution_charge_conjugate() { - assert_eq!(Convolution::None.charge_conjugate(), Convolution::None); + fn conv_cc() { assert_eq!( - Convolution::UnpolPDF(2212).charge_conjugate(), - Convolution::UnpolPDF(-2212) + Conv::new(ConvType::UnpolPDF, 2212).cc(), + Conv::new(ConvType::UnpolPDF, -2212) ); assert_eq!( - Convolution::PolPDF(2212).charge_conjugate(), - Convolution::PolPDF(-2212) + Conv::new(ConvType::PolPDF, 2212).cc(), + Conv::new(ConvType::PolPDF, -2212) ); assert_eq!( - Convolution::UnpolFF(2212).charge_conjugate(), - Convolution::UnpolFF(-2212) + Conv::new(ConvType::UnpolFF, 2212).cc(), + Conv::new(ConvType::UnpolFF, -2212) ); assert_eq!( - Convolution::PolFF(2212).charge_conjugate(), - Convolution::PolFF(-2212) + Conv::new(ConvType::PolFF, 2212).cc(), + Conv::new(ConvType::PolFF, -2212) ); } #[test] - fn convolution_pid() { - assert_eq!(Convolution::None.pid(), None); - assert_eq!(Convolution::UnpolPDF(2212).pid(), Some(2212)); - assert_eq!(Convolution::PolPDF(2212).pid(), Some(2212)); - assert_eq!(Convolution::UnpolFF(2212).pid(), Some(2212)); - assert_eq!(Convolution::PolFF(2212).pid(), Some(2212)); + fn conv_pid() { + assert_eq!(Conv::new(ConvType::UnpolPDF, 2212).pid(), 2212); + assert_eq!(Conv::new(ConvType::PolPDF, 2212).pid(), 2212); + assert_eq!(Conv::new(ConvType::UnpolFF, 2212).pid(), 2212); + assert_eq!(Conv::new(ConvType::PolFF, 2212).pid(), 2212); } } diff --git a/pineappl/src/empty_subgrid.rs b/pineappl/src/empty_subgrid.rs index 79640e655..bacc4ced2 100644 --- a/pineappl/src/empty_subgrid.rs +++ b/pineappl/src/empty_subgrid.rs @@ -1,9 +1,8 @@ //! TODO -use super::grid::Ntuple; -use super::subgrid::{Mu2, Stats, Subgrid, SubgridEnum, SubgridIndexedIter}; +use super::interpolation::Interp; +use super::subgrid::{Stats, Subgrid, SubgridEnum, SubgridIndexedIter}; use serde::{Deserialize, Serialize}; -use std::borrow::Cow; use std::iter; /// A subgrid type that is always empty. @@ -11,37 +10,23 @@ use std::iter; pub struct EmptySubgridV1; impl Subgrid for EmptySubgridV1 { - fn convolve( - &self, - _: &[f64], - _: &[f64], - _: &[Mu2], - _: &mut dyn FnMut(usize, usize, usize) -> f64, - ) -> f64 { - 0.0 - } - - fn fill(&mut self, _: &Ntuple) { + fn fill(&mut self, _: &[Interp], _: &[f64], _: f64) { panic!("EmptySubgridV1 doesn't support the fill operation"); } - fn mu2_grid(&self) -> Cow<[Mu2]> { - Cow::Borrowed(&[]) + fn node_values(&self) -> Vec> { + Vec::new() } - fn x1_grid(&self) -> Cow<[f64]> { - Cow::Borrowed(&[]) - } - - fn x2_grid(&self) -> Cow<[f64]> { - Cow::Borrowed(&[]) + fn shape(&mut self) -> &[usize] { + panic!("EmptySubgridV1 doesn't have a shape"); } fn is_empty(&self) -> bool { true } - fn merge(&mut self, subgrid: &mut SubgridEnum, _: bool) { + fn merge(&mut self, subgrid: &SubgridEnum, _: Option<(usize, usize)>) { assert!( subgrid.is_empty(), "EmptySubgridV1 doesn't support the merge operation for non-empty subgrids" @@ -50,11 +35,7 @@ impl Subgrid for EmptySubgridV1 { fn scale(&mut self, _: f64) {} - fn symmetrize(&mut self) {} - - fn clone_empty(&self) -> SubgridEnum { - Self.into() - } + fn symmetrize(&mut self, _: usize, _: usize) {} fn indexed_iter(&self) -> SubgridIndexedIter { Box::new(iter::empty()) @@ -70,24 +51,24 @@ impl Subgrid for EmptySubgridV1 { } } - fn static_scale(&self) -> Option { - None - } + fn optimize_nodes(&mut self) {} } #[cfg(test)] mod tests { use super::*; + use crate::import_subgrid::ImportSubgridV1; + use crate::packed_array::PackedArray; + use crate::v0; #[test] fn create_empty() { let mut subgrid = EmptySubgridV1; - assert_eq!(subgrid.convolve(&[], &[], &[], &mut |_, _, _| 0.0), 0.0,); assert!(subgrid.is_empty()); - subgrid.merge(&mut EmptySubgridV1.into(), false); + subgrid.merge(&EmptySubgridV1.into(), None); subgrid.scale(2.0); - subgrid.symmetrize(); - assert!(subgrid.clone_empty().is_empty()); + subgrid.symmetrize(1, 2); + subgrid.optimize_nodes(); assert_eq!( subgrid.stats(), Stats { @@ -98,33 +79,32 @@ mod tests { bytes_per_value: 0, } ); - assert_eq!(subgrid.static_scale(), None); } #[test] #[should_panic(expected = "EmptySubgridV1 doesn't support the fill operation")] fn fill() { let mut subgrid = EmptySubgridV1; - subgrid.fill(&Ntuple { - x1: 0.0, - x2: 0.0, - q2: 0.0, - weight: 0.0, - }); + subgrid.fill(&v0::default_interps(2), &[0.0; 3], 0.0); } #[test] - fn q2_grid() { - assert!(EmptySubgridV1.mu2_grid().is_empty()); - } + #[should_panic( + expected = "EmptySubgridV1 doesn't support the merge operation for non-empty subgrids" + )] + fn merge_non_empty() { + let mut subgrid_lhs = EmptySubgridV1; - #[test] - fn x1_grid() { - assert!(EmptySubgridV1.x1_grid().is_empty()); + let mut array = PackedArray::new(vec![1, 1]); + array[0] = 1.0; + let node_values = vec![vec![1.0]; 2]; + let subgrid_rhs = ImportSubgridV1::new(array, node_values).into(); + + subgrid_lhs.merge(&subgrid_rhs, None); } #[test] - fn x2_grid() { - assert!(EmptySubgridV1.x2_grid().is_empty()); + fn node_values() { + assert!(EmptySubgridV1.node_values().is_empty()); } } diff --git a/pineappl/src/evolution.rs b/pineappl/src/evolution.rs index 83e57c3d2..4c19e61c3 100644 --- a/pineappl/src/evolution.rs +++ b/pineappl/src/evolution.rs @@ -1,28 +1,19 @@ -//! Supporting classes and functions for [`Grid::evolve`]. +//! Supporting classes and functions for [`Grid::evolve_with_slice_iter`]. -use super::boc::{Channel, Order}; -use super::channel; -use super::convolutions::Convolution; +use super::boc::{Channel, Kinematics, Order}; +use super::convolutions::ConvType; use super::grid::{Grid, GridError}; -use super::import_only_subgrid::ImportOnlySubgridV2; +use super::import_subgrid::ImportSubgridV1; +use super::packed_array::PackedArray; use super::pids::PidBasis; -use super::sparse_array3::SparseArray3; -use super::subgrid::{Mu2, Subgrid, SubgridEnum}; +use super::subgrid::{self, Subgrid, SubgridEnum}; use float_cmp::approx_eq; use itertools::izip; use itertools::Itertools; use ndarray::linalg; -use ndarray::{s, Array1, Array2, Array3, ArrayView1, ArrayView4, Axis}; +use ndarray::{s, Array1, Array2, Array3, ArrayD, ArrayView1, ArrayView4, Axis, Ix1, Ix2}; use std::iter; -/// Number of ULPS used to de-duplicate grid values in [`Grid::evolve_info`]. -pub(crate) const EVOLVE_INFO_TOL_ULPS: i64 = 256; - -/// Number of ULPS used to search for grid values in this module. This value must be a large-enough -/// multiple of [`EVOLVE_INFO_TOL_ULPS`], because otherwise similar values are not found in -/// [`Grid::evolve`]. See for details. -const EVOLUTION_TOL_ULPS: i64 = 4 * EVOLVE_INFO_TOL_ULPS; - /// This structure captures the information needed to create an evolution kernel operator (EKO) for /// a specific [`Grid`]. pub struct EvolveInfo { @@ -36,62 +27,6 @@ pub struct EvolveInfo { pub ren1: Vec, } -/// Information about the evolution kernel operator (EKO) passed to [`Grid::evolve`] as `operator`, -/// which is used to convert a [`Grid`] into an [`FkTable`]. The dimensions of the EKO must -/// correspond to the values given in [`fac1`], [`pids0`], [`x0`], [`pids1`] and [`x1`], exactly in -/// this order. Members with a `1` are defined at the squared factorization scales given in -/// [`fac1`] (often called process scales) and are found in the [`Grid`] that [`Grid::evolve`] is -/// called with. Members with a `0` are defined at the squared factorization scale [`fac0`] (often -/// called fitting scale or starting scale) and are found in the [`FkTable`] resulting from -/// [`Grid::evolve`]. -/// -/// The EKO may convert a `Grid` from a basis given by the particle identifiers [`pids1`] to a -/// possibly different basis given by [`pids0`]. This basis must also be identified using -/// [`pid_basis`], which tells [`FkTable::convolve`] how to perform a convolution. The members -/// [`ren1`] and [`alphas`] must be the strong couplings given at the respective renormalization -/// scales. Finally, [`xir`] and [`xif`] can be used to vary the renormalization and factorization -/// scales, respectively, around their central values. -/// -/// [`FkTable::convolve`]: super::fk_table::FkTable::convolve -/// [`FkTable`]: super::fk_table::FkTable -/// [`alphas`]: Self::alphas -/// [`fac0`]: Self::fac0 -/// [`fac1`]: Self::fac1 -/// [`pid_basis`]: Self::pid_basis -/// [`pids0`]: Self::pids0 -/// [`pids1`]: Self::pids1 -/// [`ren1`]: Self::ren1 -/// [`x0`]: Self::x0 -/// [`x1`]: Self::x1 -/// [`xif`]: Self::xif -/// [`xir`]: Self::xir -pub struct OperatorInfo { - /// Squared factorization scale of the `FkTable`. - pub fac0: f64, - /// Particle identifiers of the `FkTable`. - pub pids0: Vec, - /// `x`-grid coordinates of the `FkTable` - pub x0: Vec, - /// Squared factorization scales of the `Grid`. - pub fac1: Vec, - /// Particle identifiers of the `Grid`. If the `Grid` contains more particle identifiers than - /// given here, the contributions of them are silently ignored. - pub pids1: Vec, - /// `x`-grid coordinates of the `Grid`. - pub x1: Vec, - - /// Renormalization scales of the `Grid`. - pub ren1: Vec, - /// Strong couplings corresponding to the order given in [`ren1`](Self::ren1). - pub alphas: Vec, - /// Multiplicative factor for the central renormalization scale. - pub xir: f64, - /// Multiplicative factor for the central factorization scale. - pub xif: f64, - /// Particle ID basis for `FkTable`. - pub pid_basis: PidBasis, -} - /// Information about the evolution kernel operator slice (EKO) passed to /// [`Grid::evolve_with_slice_iter`](super::grid::Grid::evolve_with_slice_iter) as `operator`, /// which is used to convert a [`Grid`] into an [`FkTable`](super::fk_table::FkTable). The @@ -101,7 +36,7 @@ pub struct OperatorInfo { /// `fac1` (often called process scale) and are found in the [`Grid`] that /// `Grid::evolve_with_slice_iter` is called with. Members with a `0` are defined at the squared /// factorization scale [`fac0`](Self::fac0) (often called fitting scale or starting scale) and are -/// found in the `FkTable` resulting from [`Grid::evolve`]. +/// found in the `FkTable` resulting from [`Grid::evolve_with_slice_iter`]. /// /// The EKO slice may convert a `Grid` from a basis given by the particle identifiers `pids1` to a /// possibly different basis given by `pids0`. This basis must also be identified using @@ -109,13 +44,13 @@ pub struct OperatorInfo { /// [`FkTable::convolve`](super::fk_table::FkTable::convolve) how to perform a convolution. #[derive(Clone)] pub struct OperatorSliceInfo { - /// Squared factorization scale of the `FkTable`. + /// Squared factorization/fragmentation scale of the `FkTable`. pub fac0: f64, /// Particle identifiers of the `FkTable`. pub pids0: Vec, /// `x`-grid coordinates of the `FkTable` pub x0: Vec, - /// Squared factorization scale of the slice of `Grid` that should be evolved. + /// Squared factorization/fragmentation scale of the slice of `Grid` that should be evolved. pub fac1: f64, /// Particle identifiers of the `Grid`. If the `Grid` contains more particle identifiers than /// given here, the contributions of them are silently ignored. @@ -125,6 +60,8 @@ pub struct OperatorSliceInfo { /// Particle ID basis for `FkTable`. pub pid_basis: PidBasis, + /// TODO + pub conv_type: ConvType, } /// A mapping of squared renormalization scales in `ren1` to strong couplings in `alphas`. The @@ -145,16 +82,16 @@ impl AlphasTable { .subgrids() .iter() .flat_map(|subgrid| { - subgrid - .mu2_grid() - .iter() - .map(|Mu2 { ren, .. }| xir * xir * ren) - .collect::>() + grid.scales() + .ren + .calc(&subgrid.node_values(), grid.kinematics()) + .into_owned() + .into_iter() + .map(|ren| xir * xir * ren) }) .collect(); - // UNWRAP: if we can't sort numbers the grid is fishy - ren1.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); - ren1.dedup(); + ren1.sort_by(f64::total_cmp); + ren1.dedup_by(subgrid::node_value_eq_ref_mut); let ren1 = ren1; let alphas: Vec<_> = ren1.iter().map(|&mur2| alphas(mur2)).collect(); @@ -166,9 +103,9 @@ fn gluon_has_pid_zero(grid: &Grid) -> bool { // if there are any PID zero particles ... grid.channels() .iter() - .any(|entry| entry.entry().iter().any(|&(a, b, _)| (a == 0) || (b == 0))) + .any(|entry| entry.entry().iter().any(|(pids, _)| pids.iter().any(|&pid| pid == 0))) // and if the particle IDs are encoded using PDG MC IDs - && grid.pid_basis() == PidBasis::Pdg + && *grid.pid_basis() == PidBasis::Pdg } type Pid01IndexTuples = Vec<(usize, usize)>; @@ -222,14 +159,6 @@ fn pid_slices( Ok((pid_indices, pids)) } -fn channels0_with_one(pids: &[(i32, i32)]) -> Vec { - let mut pids0: Vec<_> = pids.iter().map(|&(pid0, _)| pid0).collect(); - pids0.sort_unstable(); - pids0.dedup(); - - pids0 -} - fn operator_slices( operator: &ArrayView4, info: &OperatorSliceInfo, @@ -242,7 +171,7 @@ fn operator_slices( .map(|&x1p| { info.x1 .iter() - .position(|&x1| approx_eq!(f64, x1p, x1, ulps = EVOLUTION_TOL_ULPS)) + .position(|&x1| subgrid::node_value_eq(x1p, x1)) .ok_or_else(|| { GridError::EvolutionFailure(format!("no operator for x = {x1p} found")) }) @@ -266,41 +195,61 @@ fn operator_slices( Ok(operators) } -type X1aX1bOp2Tuple = (Vec>, Option>); +type X1aX1bOpDTuple = (Vec>, Option>); -fn ndarray_from_subgrid_orders_slice( +fn ndarray_from_subgrid_orders_slice_many( + grid: &Grid, fac1: f64, + kinematics: &[Kinematics], subgrids: &ArrayView1, orders: &[Order], order_mask: &[bool], - (xir, xif): (f64, f64), + (xir, xif, xia): (f64, f64, f64), alphas_table: &AlphasTable, -) -> Result { - // TODO: skip empty subgrids +) -> Result { + // TODO: remove these assumptions from the following code + assert_eq!(grid.kinematics()[0], Kinematics::Scale(0)); + assert_eq!( + grid.kinematics()[1..] + .iter() + .map(|kin| match kin { + &Kinematics::X(idx) => idx, + Kinematics::Scale(_) => unreachable!(), + }) + .collect::>(), + (0..(grid.kinematics().len() - 1)).collect::>() + ); - let mut x1_a: Vec<_> = subgrids - .iter() - .enumerate() - .filter(|(index, _)| order_mask.get(*index).copied().unwrap_or(true)) - .flat_map(|(_, subgrid)| subgrid.x1_grid().into_owned()) - .collect(); - let mut x1_b: Vec<_> = subgrids + // create a Vec of all x values for each dimension + let mut x1n: Vec<_> = kinematics .iter() .enumerate() - .filter(|(index, _)| order_mask.get(*index).copied().unwrap_or(true)) - .flat_map(|(_, subgrid)| subgrid.x2_grid().into_owned()) + .filter_map(|(idx, kin)| matches!(kin, Kinematics::X(_)).then_some(idx)) + .map(|kin_idx| { + subgrids + .iter() + .enumerate() + .filter(|&(ord_idx, subgrid)| { + order_mask.get(ord_idx).copied().unwrap_or(true) + // TODO: empty subgrids don't have node values + && !subgrid.is_empty() + }) + .flat_map(|(_, subgrid)| subgrid.node_values()[kin_idx].clone()) + .collect::>() + }) .collect(); - x1_a.sort_by(f64::total_cmp); - x1_a.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLUTION_TOL_ULPS)); - x1_b.sort_by(f64::total_cmp); - x1_b.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLUTION_TOL_ULPS)); + for x1 in &mut x1n { + x1.sort_by(f64::total_cmp); + x1.dedup_by(subgrid::node_value_eq_ref_mut); + } - let mut array = Array2::::zeros((x1_a.len(), x1_b.len())); + let dim: Vec<_> = x1n.iter().map(Vec::len).collect(); + let mut array = ArrayD::::zeros(dim); let mut zero = true; + let mut x1_idx = vec![0; grid.convolutions().len()]; - // add subgrids for different orders, but the same bin and lumi, using the right - // couplings + // for the same bin and channel, sum subgrids of different orders, using the right couplings for (subgrid, order) in subgrids .iter() .zip(orders.iter()) @@ -327,34 +276,51 @@ fn ndarray_from_subgrid_orders_slice( logs *= (xif * xif).ln(); } - // TODO: use `try_collect` once stabilized - let xa_indices: Vec<_> = subgrid - .x1_grid() + if order.logxia > 0 { + if approx_eq!(f64, xia, 1.0, ulps = 4) { + continue; + } + + logs *= (xia * xia).ln(); + } + + let node_values = subgrid.node_values(); + let scale_dims: Vec<_> = grid + .kinematics() .iter() - .map(|&xa| { - x1_a.iter() - .position(|&x1a| approx_eq!(f64, x1a, xa, ulps = EVOLUTION_TOL_ULPS)) - .ok_or_else(|| { - GridError::EvolutionFailure(format!("no operator for x1 = {xa} found")) - }) + .zip(&node_values) + .filter_map(|(kin, node_values)| { + matches!(kin, Kinematics::Scale(_)).then_some(node_values.len()) }) - .collect::>()?; - let xb_indices: Vec<_> = subgrid - .x2_grid() + .collect(); + + let x1_indices: Vec> = kinematics .iter() - .map(|&xb| { - x1_b.iter() - .position(|&x1b| approx_eq!(f64, x1b, xb, ulps = EVOLUTION_TOL_ULPS)) - .ok_or_else(|| { - GridError::EvolutionFailure(format!("no operator for x1 = {xb} found")) + .enumerate() + .filter_map(|(idx, kin)| matches!(kin, Kinematics::X(_)).then_some(idx)) + .zip(&x1n) + .map(|(kin_idx, x1)| { + node_values[kin_idx] + .iter() + .map(|&xs| { + x1.iter() + .position(|&x| subgrid::node_value_eq(x, xs)) + // UNWRAP: `x1n` contains all x-values, so we must find each `x` + .unwrap() }) + .collect() }) - .collect::>()?; + .collect(); + + let rens = grid.scales().ren.calc(&node_values, grid.kinematics()); + let facs = grid.scales().fac.calc(&node_values, grid.kinematics()); - for ((ifac1, ix1, ix2), value) in subgrid.indexed_iter() { - let Mu2 { ren, fac } = subgrid.mu2_grid()[ifac1]; + for (indices, value) in subgrid.indexed_iter() { + // TODO: implement evolution for non-zero fragmentation scales + let ren = rens[grid.scales().ren.idx(&indices, &scale_dims)]; + let fac = facs[grid.scales().fac.idx(&indices, &scale_dims)]; - if !approx_eq!(f64, xif * xif * fac, fac1, ulps = EVOLUTION_TOL_ULPS) { + if !subgrid::node_value_eq(xif * xif * fac, fac1) { continue; } @@ -366,11 +332,9 @@ fn ndarray_from_subgrid_orders_slice( .ren1 .iter() .zip(alphas_table.alphas.iter()) - .find_map(|(&ren1, &alphas)| { - approx_eq!(f64, ren1, mur2, ulps = EVOLUTION_TOL_ULPS).then(|| alphas) - }) + .find_map(|(&ren1, &alphas)| subgrid::node_value_eq(ren1, mur2).then_some(alphas)) { - alphas.powi(order.alphas.try_into().unwrap()) + alphas.powi(order.alphas.into()) } else { return Err(GridError::EvolutionFailure(format!( "no alphas for mur2 = {mur2} found" @@ -379,280 +343,25 @@ fn ndarray_from_subgrid_orders_slice( zero = false; - array[[xa_indices[ix1], xb_indices[ix2]]] += als * logs * value; - } - } - - Ok((vec![x1_a, x1_b], (!zero).then_some(array))) -} - -pub(crate) fn evolve_slice_with_one( - grid: &Grid, - operator: &ArrayView4, - info: &OperatorSliceInfo, - order_mask: &[bool], - xi: (f64, f64), - alphas_table: &AlphasTable, -) -> Result<(Array3, Vec), GridError> { - let gluon_has_pid_zero = gluon_has_pid_zero(grid); - let has_pdf1 = grid.convolutions()[0] != Convolution::None; - - let (pid_indices, pids) = pid_slices(operator, info, gluon_has_pid_zero, &|pid| { - grid.channels() - .iter() - .flat_map(Channel::entry) - .any(|&(a, b, _)| if has_pdf1 { a } else { b } == pid) - })?; - - let channels0 = channels0_with_one(&pids); - let mut sub_fk_tables = Vec::with_capacity(grid.bin_info().bins() * channels0.len()); - let new_axis = if has_pdf1 { 2 } else { 1 }; - - let mut last_x1 = Vec::new(); - let mut ops = Vec::new(); - - for subgrids_ol in grid.subgrids().axis_iter(Axis(1)) { - let mut tables = vec![Array1::zeros(info.x0.len()); channels0.len()]; - - for (subgrids_o, channel1) in subgrids_ol.axis_iter(Axis(1)).zip(grid.channels()) { - let (mut x1, array) = ndarray_from_subgrid_orders_slice( - info.fac1, - &subgrids_o, - grid.orders(), - order_mask, - xi, - alphas_table, - )?; - - // skip over zero arrays to speed up evolution and avoid problems with NaNs - let Some(array) = array else { - continue; - }; - - let x1 = if has_pdf1 { x1.remove(0) } else { x1.remove(1) }; - - if x1.is_empty() { - continue; - } - - if (last_x1.len() != x1.len()) - || last_x1 - .iter() - .zip(x1.iter()) - .any(|(&lhs, &rhs)| !approx_eq!(f64, lhs, rhs, ulps = EVOLUTION_TOL_ULPS)) - { - ops = operator_slices(operator, info, &pid_indices, &x1)?; - last_x1 = x1; - } - - for (&pid1, &factor) in - channel1 - .entry() - .iter() - .map(|(a, b, f)| if has_pdf1 { (a, f) } else { (b, f) }) - { - for (fk_table, op) in - channels0 - .iter() - .zip(tables.iter_mut()) - .filter_map(|(&pid0, fk_table)| { - pids.iter() - .zip(ops.iter()) - .find_map(|(&(p0, p1), op)| { - (p0 == pid0 && p1 == pid1).then_some(op) - }) - .map(|op| (fk_table, op)) - }) - { - fk_table.scaled_add(factor, &op.dot(&array.index_axis(Axis(new_axis - 1), 0))); - } - } - } - - sub_fk_tables.extend(tables.into_iter().map(|table| { - ImportOnlySubgridV2::new( - SparseArray3::from_ndarray( - table - .insert_axis(Axis(0)) - .insert_axis(Axis(new_axis)) - .view(), - 0, - 1, - ), - vec![Mu2 { - // TODO: FK tables don't depend on the renormalization scale - //ren: -1.0, - ren: info.fac0, - fac: info.fac0, - }], - if has_pdf1 { info.x0.clone() } else { vec![1.0] }, - if has_pdf1 { vec![1.0] } else { info.x0.clone() }, - ) - .into() - })); - } - - let pid = if grid.convolutions()[0] == Convolution::None { - grid.channels()[0].entry()[0].0 - } else { - grid.channels()[0].entry()[0].1 - }; - - Ok(( - Array1::from_iter(sub_fk_tables) - .into_shape((1, grid.bin_info().bins(), channels0.len())) - .unwrap(), - channels0 - .iter() - .map(|&a| { - channel![ - if has_pdf1 { a } else { pid }, - if has_pdf1 { pid } else { a }, - 1.0 - ] - }) - .collect(), - )) -} - -pub(crate) fn evolve_slice_with_two( - grid: &Grid, - operator: &ArrayView4, - info: &OperatorSliceInfo, - order_mask: &[bool], - xi: (f64, f64), - alphas_table: &AlphasTable, -) -> Result<(Array3, Vec), GridError> { - let gluon_has_pid_zero = gluon_has_pid_zero(grid); - - // TODO: generalize by iterating up to `n` - let (pid_indices, pids01): (Vec<_>, Vec<_>) = (0..2) - .map(|d| { - pid_slices(operator, info, gluon_has_pid_zero, &|pid1| { - grid.channels() - .iter() - .flat_map(Channel::entry) - .any(|tuple| match d { - // TODO: `Channel::entry` should return a tuple of a `Vec` and an `f64` - 0 => tuple.0 == pid1, - 1 => tuple.1 == pid1, - _ => unreachable!(), - }) - }) - }) - .collect::, _>>()? - .into_iter() - .unzip(); - - let mut channels0: Vec<_> = pids01 - .iter() - .map(|pids| pids.iter().map(|&(pid0, _)| pid0)) - .multi_cartesian_product() - .collect(); - channels0.sort_unstable(); - channels0.dedup(); - let channels0 = channels0; - - let mut sub_fk_tables = Vec::with_capacity(grid.bin_info().bins() * channels0.len()); - - // TODO: generalize to `n` - let mut last_x1 = vec![Vec::new(); 2]; - let mut operators = vec![Vec::new(); 2]; - - for subgrids_oc in grid.subgrids().axis_iter(Axis(1)) { - let mut tables = vec![Array2::zeros((info.x0.len(), info.x0.len())); channels0.len()]; - - for (subgrids_o, channel1) in subgrids_oc.axis_iter(Axis(1)).zip(grid.channels()) { - let (x1, array) = ndarray_from_subgrid_orders_slice( - info.fac1, - &subgrids_o, - grid.orders(), - order_mask, - xi, - alphas_table, - )?; - - // skip over zero arrays to speed up evolution and avoid problems with NaNs - let Some(array) = array else { - continue; - }; - - for (last_x1, x1, pid_indices, operators) in - izip!(&mut last_x1, x1, &pid_indices, &mut operators) - { - if (last_x1.len() != x1.len()) - || last_x1 - .iter() - .zip(x1.iter()) - .any(|(&lhs, &rhs)| !approx_eq!(f64, lhs, rhs, ulps = EVOLUTION_TOL_ULPS)) - { - *operators = operator_slices(operator, info, pid_indices, &x1)?; - *last_x1 = x1; - } + // TODO: here we assume that all X are consecutive starting from the second element and + // are in ascending order + for (i, &index) in indices.iter().skip(1).enumerate() { + x1_idx[i] = x1_indices[i][index]; } - let mut tmp = Array2::zeros((last_x1[0].len(), info.x0.len())); - - for (pids1, factor) in channel1 - .entry() - .iter() - .map(|&(pida1, pidb1, factor)| ([pida1, pidb1], factor)) - { - for (fk_table, ops) in - channels0 - .iter() - .zip(tables.iter_mut()) - .filter_map(|(pids0, fk_table)| { - izip!(pids0, &pids1, &pids01, &operators) - .map(|(&pid0, &pid1, pids, operators)| { - pids.iter().zip(operators).find_map(|(&(p0, p1), op)| { - ((p0 == pid0) && (p1 == pid1)).then_some(op) - }) - }) - // TODO: avoid using `collect` - .collect::>>() - .map(|ops| (fk_table, ops)) - }) - { - linalg::general_mat_mul(1.0, &array, &ops[1].t(), 0.0, &mut tmp); - linalg::general_mat_mul(factor, ops[0], &tmp, 1.0, fk_table); - } - } + array[x1_idx.as_slice()] += als * logs * value; } - - sub_fk_tables.extend(tables.into_iter().map(|table| { - ImportOnlySubgridV2::new( - SparseArray3::from_ndarray(table.insert_axis(Axis(0)).view(), 0, 1), - vec![Mu2 { - // TODO: FK tables don't depend on the renormalization scale - //ren: -1.0, - ren: info.fac0, - fac: info.fac0, - }], - info.x0.clone(), - info.x0.clone(), - ) - .into() - })); } - Ok(( - Array1::from_iter(sub_fk_tables) - .into_shape((1, grid.bin_info().bins(), channels0.len())) - .unwrap(), - channels0 - .iter() - .map(|c| channel![c[0], c[1], 1.0]) - .collect(), - )) + Ok((x1n, (!zero).then_some(array))) } -pub(crate) fn evolve_slice_with_two2( +pub(crate) fn evolve_slice_with_many( grid: &Grid, operators: &[ArrayView4], infos: &[OperatorSliceInfo], order_mask: &[bool], - xi: (f64, f64), + xi: (f64, f64, f64), alphas_table: &AlphasTable, ) -> Result<(Array3, Vec), GridError> { let gluon_has_pid_zero = gluon_has_pid_zero(grid); @@ -660,27 +369,21 @@ pub(crate) fn evolve_slice_with_two2( // TODO: implement matching of different scales for different EKOs let mut fac1_scales: Vec<_> = infos.iter().map(|info| info.fac1).collect(); fac1_scales.sort_by(f64::total_cmp); - assert!(fac1_scales.windows(2).all(|scales| approx_eq!( - f64, - scales[0], - scales[1], - ulps = EVOLUTION_TOL_ULPS - ))); + assert!(fac1_scales + .windows(2) + .all(|scales| subgrid::node_value_eq(scales[0], scales[1]))); let fac1 = fac1_scales[0]; - // TODO: generalize by iterating up to `n` - let (pid_indices, pids01): (Vec<_>, Vec<_>) = izip!(0..2, operators, infos) + assert_eq!(operators.len(), infos.len()); + assert_eq!(operators.len(), grid.convolutions().len()); + + let (pid_indices, pids01): (Vec<_>, Vec<_>) = izip!(0..infos.len(), operators, infos) .map(|(d, operator, info)| { pid_slices(operator, info, gluon_has_pid_zero, &|pid1| { grid.channels() .iter() .flat_map(Channel::entry) - .any(|tuple| match d { - // TODO: `Channel::entry` should return a tuple of a `Vec` and an `f64` - 0 => tuple.0 == pid1, - 1 => tuple.1 == pid1, - _ => unreachable!(), - }) + .any(|(pids, _)| pids[d] == pid1) }) }) .collect::, _>>()? @@ -699,18 +402,18 @@ pub(crate) fn evolve_slice_with_two2( let mut sub_fk_tables = Vec::with_capacity(grid.bin_info().bins() * channels0.len()); // TODO: generalize to `n` - let mut last_x1 = vec![Vec::new(); 2]; - let mut eko_slices = vec![Vec::new(); 2]; + let mut last_x1 = vec![Vec::new(); infos.len()]; + let mut eko_slices = vec![Vec::new(); infos.len()]; + let dim: Vec<_> = infos.iter().map(|info| info.x0.len()).collect(); for subgrids_oc in grid.subgrids().axis_iter(Axis(1)) { - assert_eq!(infos[0].x0.len(), infos[1].x0.len()); - - let mut tables = - vec![Array2::zeros((infos[0].x0.len(), infos[1].x0.len())); channels0.len()]; + let mut tables = vec![ArrayD::zeros(dim.clone()); channels0.len()]; for (subgrids_o, channel1) in subgrids_oc.axis_iter(Axis(1)).zip(grid.channels()) { - let (x1, array) = ndarray_from_subgrid_orders_slice( + let (x1, array) = ndarray_from_subgrid_orders_slice_many( + grid, fac1, + grid.kinematics(), &subgrids_o, grid.orders(), order_mask, @@ -735,26 +438,20 @@ pub(crate) fn evolve_slice_with_two2( || last_x1 .iter() .zip(x1.iter()) - .any(|(&lhs, &rhs)| !approx_eq!(f64, lhs, rhs, ulps = EVOLUTION_TOL_ULPS)) + .any(|(&lhs, &rhs)| !subgrid::node_value_eq(lhs, rhs)) { *slices = operator_slices(operator, info, pid_indices, &x1)?; *last_x1 = x1; } } - let mut tmp = Array2::zeros((last_x1[0].len(), infos[1].x0.len())); - - for (pids1, factor) in channel1 - .entry() - .iter() - .map(|&(pida1, pidb1, factor)| ([pida1, pidb1], factor)) - { + for (pids1, factor) in channel1.entry() { for (fk_table, ops) in channels0 .iter() .zip(tables.iter_mut()) .filter_map(|(pids0, fk_table)| { - izip!(pids0, &pids1, &pids01, &eko_slices) + izip!(pids0, pids1, &pids01, &eko_slices) .map(|(&pid0, &pid1, pids, slices)| { pids.iter().zip(slices).find_map(|(&(p0, p1), op)| { ((p0 == pid0) && (p1 == pid1)).then_some(op) @@ -765,25 +462,22 @@ pub(crate) fn evolve_slice_with_two2( .map(|ops| (fk_table, ops)) }) { - // tmp = array * ops[1]^T - linalg::general_mat_mul(1.0, &array, &ops[1].t(), 0.0, &mut tmp); - // fk_table += factor * ops[0] * tmp - linalg::general_mat_mul(factor, ops[0], &tmp, 1.0, fk_table); + general_tensor_mul(*factor, &array, &ops, fk_table); } } } + // TODO: generalize this for arbitrary scales and x values + let mut node_values = vec![vec![infos[0].fac0]]; + + for info in infos { + node_values.push(info.x0.clone()); + } + sub_fk_tables.extend(tables.into_iter().map(|table| { - ImportOnlySubgridV2::new( - SparseArray3::from_ndarray(table.insert_axis(Axis(0)).view(), 0, 1), - vec![Mu2 { - // TODO: FK tables don't depend on the renormalization scale - //ren: -1.0, - ren: infos[0].fac0, - fac: infos[0].fac0, - }], - infos[0].x0.clone(), - infos[1].x0.clone(), + ImportSubgridV1::new( + PackedArray::from(table.insert_axis(Axis(0)).view()), + node_values.clone(), ) .into() })); @@ -794,8 +488,35 @@ pub(crate) fn evolve_slice_with_two2( .into_shape((1, grid.bin_info().bins(), channels0.len())) .unwrap(), channels0 - .iter() - .map(|c| channel![c[0], c[1], 1.0]) + .into_iter() + .map(|c| Channel::new(vec![(c, 1.0)])) .collect(), )) } + +fn general_tensor_mul( + factor: f64, + array: &ArrayD, + ops: &[&Array2], + fk_table: &mut ArrayD, +) { + match array.shape().len() { + 1 => { + let array = array.view().into_dimensionality::().unwrap(); + let mut fk_table = fk_table.view_mut().into_dimensionality::().unwrap(); + fk_table.scaled_add(factor, &ops[0].dot(&array)); + } + 2 => { + let array = array.view().into_dimensionality::().unwrap(); + let mut fk_table = fk_table.view_mut().into_dimensionality::().unwrap(); + + let mut tmp = Array2::zeros((array.shape()[0], ops[1].shape()[0])); + // tmp = array * ops[1]^T + linalg::general_mat_mul(1.0, &array, &ops[1].t(), 0.0, &mut tmp); + // fk_table += factor * ops[0] * tmp + linalg::general_mat_mul(factor, ops[0], &tmp, 1.0, &mut fk_table); + } + // TODO: generalize this to n dimensions + _ => unimplemented!(), + } +} diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index bf040da46..4e81a2bae 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -1,12 +1,14 @@ //! Provides the [`FkTable`] type. -use super::boc::Order; -use super::convolutions::{Convolution, LumiCache}; +use super::boc::{Channel, Kinematics, Order}; +use super::convolutions::ConvolutionCache; +use super::empty_subgrid::EmptySubgridV1; use super::grid::Grid; -use super::subgrid::Subgrid; -use float_cmp::approx_eq; -use ndarray::Array4; +use super::pids::OptRules; +use super::subgrid::{self, Subgrid}; +use ndarray::{s, ArrayD}; use std::fmt::{self, Display, Formatter}; +use std::iter; use std::str::FromStr; use thiserror::Error; @@ -144,42 +146,52 @@ impl FkTable { /// /// TODO #[must_use] - pub fn table(&self) -> Array4 { - let has_pdf1 = self.grid.convolutions()[0] != Convolution::None; - let has_pdf2 = self.grid.convolutions()[1] != Convolution::None; + pub fn table(&self) -> ArrayD { let x_grid = self.x_grid(); - let mut result = Array4::zeros(( - self.grid.bin_info().bins(), - self.grid.channels().len(), - if has_pdf1 { x_grid.len() } else { 1 }, - if has_pdf2 { x_grid.len() } else { 1 }, - )); + let mut dim = vec![self.grid.bin_info().bins(), self.grid.channels().len()]; + dim.extend(iter::repeat(x_grid.len()).take(self.grid.convolutions().len())); + let mut idx = vec![0; dim.len()]; + let mut result = ArrayD::zeros(dim); for ((_, bin, channel), subgrid) in self.grid().subgrids().indexed_iter() { - let indices1 = if has_pdf1 { - subgrid - .x1_grid() - .iter() - .map(|&s| x_grid.iter().position(|&x| approx_eq!(f64, s, x, ulps = 2))) - .collect::>() - .unwrap() - } else { - vec![0] - }; - let indices2 = if has_pdf2 { - subgrid - .x2_grid() - .iter() - .map(|&s| x_grid.iter().position(|&x| approx_eq!(f64, s, x, ulps = 2))) - .collect::>() - .unwrap() - } else { - vec![0] - }; - - for ((_, ix1, ix2), value) in subgrid.indexed_iter() { - result[[bin, channel, indices1[ix1], indices2[ix2]]] = value; + let indices: Vec> = self + .grid + .convolutions() + .iter() + .enumerate() + .map(|(index, _)| { + subgrid + .node_values() + .iter() + .zip(self.grid.kinematics()) + .find_map(|(node_values, kin)| { + matches!(kin, Kinematics::X(i) if *i == index).then(|| { + node_values + .iter() + .map(|&s| { + x_grid + .iter() + .position(|&x| subgrid::node_value_eq(s, x)) + // UNWRAP: must be guaranteed by the grid constructor + .unwrap() + }) + .collect() + }) + }) + // UNWRAP: must be guaranteed by the grid constructor + .unwrap() + }) + .collect(); + + for (index, value) in subgrid.indexed_iter() { + assert_eq!(index[0], 0); + idx[0] = bin; + idx[1] = channel; + for i in 2..result.shape().len() { + idx[i] = indices[i - 2][index[i - 1]]; + } + result[idx.as_slice()] = value; } } @@ -188,23 +200,30 @@ impl FkTable { /// Return the channel definition for this `FkTable`. All factors are `1.0`. #[must_use] - pub fn channels(&self) -> Vec<(i32, i32)> { + pub fn channels(&self) -> Vec> { self.grid .channels() .iter() - .map(|entry| (entry.entry()[0].0, entry.entry()[0].1)) + .map(|entry| entry.entry()[0].0.clone()) .collect() } /// Returns the single `muf2` scale of this `FkTable`. #[must_use] pub fn muf2(&self) -> f64 { - if let &[muf2] = &self.grid.evolve_info(&[true]).fac1[..] { - muf2 - } else { - // every `FkTable` has only a single factorization scale - unreachable!() - } + let [muf2] = self.grid.evolve_info(&[true]).fac1[..] + .try_into() + // UNWRAP: every `FkTable` has only a single factorization scale + .unwrap_or_else(|_| unreachable!()); + + muf2 + } + + /// Set a metadata key-value pair for this FK table. + pub fn set_key_value(&mut self, key: &str, value: &str) { + self.grid + .metadata_mut() + .insert(key.to_owned(), value.to_owned()); } /// Returns the x grid that all subgrids for all hadronic initial states share. @@ -217,82 +236,53 @@ impl FkTable { /// FK-tables have all orders merged together and do not support scale variations. pub fn convolve( &self, - lumi_cache: &mut LumiCache, + convolution_cache: &mut ConvolutionCache, bin_indices: &[usize], channel_mask: &[bool], ) -> Vec { - self.grid - .convolve(lumi_cache, &[], bin_indices, channel_mask, &[(1.0, 1.0)]) - } - - /// Set a metadata key-value pair - pub fn set_key_value(&mut self, key: &str, value: &str) { - self.grid.set_key_value(key, value); + self.grid.convolve( + convolution_cache, + &[], + bin_indices, + channel_mask, + &[(1.0, 1.0, 1.0)], + ) } - /// Optimizes the storage of FK tables based of assumptions of the PDFs at the FK table's - /// scale. - /// - /// # Panics - /// - /// TODO + /// Optimize the size of this FK-table by throwing away heavy quark flavors assumed to be zero + /// at the FK-table's scales and calling [`Grid::optimize`]. pub fn optimize(&mut self, assumptions: FkAssumptions) { - let mut add = Vec::new(); + let OptRules(sum, delete) = self.grid.pid_basis().opt_rules(assumptions); - match assumptions { - FkAssumptions::Nf6Ind => { - // nothing to do here - } - FkAssumptions::Nf6Sym => { - add.push((235, 200)); - } - FkAssumptions::Nf5Ind => { - add.extend_from_slice(&[(235, 200), (135, 100)]); - } - FkAssumptions::Nf5Sym => { - add.extend_from_slice(&[(235, 200), (135, 100), (224, 200)]); - } - FkAssumptions::Nf4Ind => { - add.extend_from_slice(&[(235, 200), (135, 100), (224, 200), (124, 100)]); - } - FkAssumptions::Nf4Sym => { - add.extend_from_slice(&[ - (235, 200), - (135, 100), - (224, 200), - (124, 100), - (215, 200), - ]); - } - FkAssumptions::Nf3Ind => { - add.extend_from_slice(&[ - (235, 200), - (135, 100), - (224, 200), - (124, 100), - (215, 200), - (115, 100), - ]); - } - FkAssumptions::Nf3Sym => { - add.extend_from_slice(&[ - (235, 200), - (135, 100), - (224, 200), - (124, 100), - (215, 200), - (115, 100), - (208, 200), - ]); + for idx in 0..self.grid.channels().len() { + let &[(ref pids, factor)] = self.grid.channels()[idx].entry() else { + // every FK-table must have a trivial channel definition + unreachable!() + }; + let mut pids = pids.clone(); + + for pid in &mut pids { + if delete.iter().any(|&delete| *pid == delete) { + for subgrid in self.grid.subgrids_mut().slice_mut(s![.., .., idx]) { + *subgrid = EmptySubgridV1.into(); + } + } else if let Some(replace) = sum + .iter() + .find_map(|&(search, replace)| (*pid == search).then_some(replace)) + { + *pid = replace; + } } + + self.grid.channels_mut()[idx] = Channel::new(vec![(pids, factor)]); } - self.grid.rewrite_channels(&add, &[]); + self.grid.optimize(); // store the assumption so that we can check it later on self.grid - .set_key_value("fk_assumptions", &assumptions.to_string()); - self.grid.optimize(); + .metadata_mut() + .insert("fk_assumptions".to_owned(), assumptions.to_string()); } } @@ -308,6 +298,7 @@ impl TryFrom for FkTable { alpha: 0, logxir: 0, logxif: 0, + logxia: 0, }] { return Err(TryFromGridError::NonTrivialOrder); @@ -318,15 +309,17 @@ impl TryFrom for FkTable { continue; } - let mu2_grid = subgrid.mu2_grid(); - - if mu2_grid.len() > 1 { + let [fac] = grid + .scales() + .fac + .calc(&subgrid.node_values(), grid.kinematics())[..] + else { return Err(TryFromGridError::MultipleScales); - } + }; if muf2 < 0.0 { - muf2 = mu2_grid[0].fac; - } else if muf2 != mu2_grid[0].fac { + muf2 = fac; + } else if !subgrid::node_value_eq(muf2, fac) { return Err(TryFromGridError::MultipleScales); } } @@ -334,7 +327,7 @@ impl TryFrom for FkTable { for channel in grid.channels() { let entry = channel.entry(); - if entry.len() != 1 || entry[0].2 != 1.0 { + if entry.len() != 1 || !subgrid::node_value_eq(entry[0].1, 1.0) { return Err(TryFromGridError::InvalidChannel); } } diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 19698d228..7920d6a64 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1,45 +1,31 @@ //! Module containing all traits and supporting structures for grids. use super::bin::{BinInfo, BinLimits, BinRemapper}; -use super::boc::{Channel, Order}; -use super::convolutions::{Convolution, LumiCache}; +use super::boc::{Channel, Kinematics, Order, ScaleFuncForm, Scales}; +use super::convolutions::{Conv, ConvType, ConvolutionCache}; use super::empty_subgrid::EmptySubgridV1; -use super::evolution::{self, AlphasTable, EvolveInfo, OperatorInfo, OperatorSliceInfo}; +use super::evolution::{self, AlphasTable, EvolveInfo, OperatorSliceInfo}; use super::fk_table::FkTable; -use super::import_only_subgrid::ImportOnlySubgridV2; -use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; -use super::ntuple_subgrid::NtupleSubgridV1; -use super::pids::{self, PidBasis}; -use super::subgrid::{ExtraSubgridParams, Mu2, Subgrid, SubgridEnum, SubgridParams}; +use super::import_subgrid::ImportSubgridV1; +use super::interp_subgrid::InterpSubgridV1; +use super::interpolation::Interp; +use super::pids::PidBasis; +use super::subgrid::{self, Subgrid, SubgridEnum}; +use super::v0; use bitflags::bitflags; -use float_cmp::{approx_eq, assert_approx_eq}; +use float_cmp::approx_eq; use git_version::git_version; +use itertools::Itertools; use lz4_flex::frame::{FrameDecoder, FrameEncoder}; -use ndarray::{s, Array3, ArrayView3, ArrayView5, ArrayViewMut3, Axis, CowArray, Dimension, Ix4}; -use serde::{Deserialize, Serialize, Serializer}; -use std::borrow::Cow; -use std::collections::{BTreeMap, HashMap}; +use ndarray::{s, Array3, ArrayD, ArrayView3, ArrayViewMut3, Axis, CowArray, Dimension, Ix4}; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; use std::io::{self, BufRead, BufReader, BufWriter, Read, Write}; use std::iter; use std::mem; use std::ops::Range; use thiserror::Error; -/// This structure represents a position (`x1`, `x2`, `q2`) in a `Subgrid` together with a -/// corresponding `weight`. The type `W` can either be a `f64` or `()`, which is used when multiple -/// weights should be signaled. -#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] -pub struct Ntuple { - /// Momentum fraction of the first parton. - pub x1: f64, - /// Momentum fraction of the second parton. - pub x2: f64, - /// Squared scale. - pub q2: f64, - /// Weight of this entry. - pub weight: W, -} - /// Error returned when merging two grids fails. #[derive(Debug, Error)] pub enum GridError { @@ -71,14 +57,12 @@ pub enum GridError { IoFailure(io::Error), /// Returned when trying to read a `PineAPPL` file with file format version that is not /// supported. - #[error("the file version is {file_version}, but supported is only {supported_version}")] - FileVersionMismatch { + #[error("file version {file_version} is not supported")] + FileVersionUnsupported { /// File format version of the file read. file_version: u64, - /// Maximum supported file format version for this library. - supported_version: u64, }, - /// Returned from [`Grid::evolve`] if the evolution failed. + /// Returned from [`Grid::evolve_with_slice_iter`] if the evolution failed. #[error("failed to evolve grid: {0}")] EvolutionFailure(String), /// Errors that do no originate from this crate itself. @@ -87,104 +71,24 @@ pub enum GridError { } #[derive(Clone, Deserialize, Serialize)] -struct Mmv1; - -#[derive(Clone, Deserialize, Serialize)] -struct Mmv2 { - remapper: Option, - key_value_db: HashMap, -} - -fn ordered_map_serialize( - value: &HashMap, - serializer: S, -) -> Result -where - S: Serializer, -{ - let ordered: BTreeMap<_, _> = value.iter().collect(); - ordered.serialize(serializer) -} - -#[derive(Clone, Deserialize, Serialize)] -struct Mmv3 { - remapper: Option, - // order the HashMap before serializing it to make the output stable - #[serde(serialize_with = "ordered_map_serialize")] - key_value_db: HashMap, - subgrid_template: SubgridEnum, -} - -impl Default for Mmv2 { - fn default() -> Self { - Self { - remapper: None, - key_value_db: [ - ( - "pineappl_gitversion".to_owned(), - git_version!( - args = ["--always", "--dirty", "--long", "--tags"], - cargo_prefix = "cargo:", - fallback = "unknown" - ) - .to_owned(), - ), - // by default we assume there are protons in the initial state - ("initial_state_1".to_owned(), "2212".to_owned()), - ("initial_state_2".to_owned(), "2212".to_owned()), - ] - .iter() - .cloned() - .collect(), - } - } -} - -impl Mmv3 { - fn new(subgrid_template: SubgridEnum) -> Self { - Self { - remapper: None, - key_value_db: [ - ( - "pineappl_gitversion".to_owned(), - git_version!( - args = ["--always", "--dirty", "--long", "--tags"], - cargo_prefix = "cargo:", - fallback = "unknown" - ) - .to_owned(), - ), - // by default we assume there are unpolarized protons in the initial state - // do not change these to the new metadata to not break backwards compatibility - ("initial_state_1".to_owned(), "2212".to_owned()), - ("initial_state_2".to_owned(), "2212".to_owned()), - ] - .iter() - .cloned() - .collect(), - subgrid_template, - } - } +struct Mmv4; + +fn default_metadata() -> BTreeMap { + iter::once(( + "pineappl_gitversion".to_owned(), + git_version!( + args = ["--always", "--dirty", "--long", "--tags"], + cargo_prefix = "cargo:", + fallback = "unknown" + ) + .to_owned(), + )) + .collect() } -// ALLOW: fixing the warning will break the file format -#[allow(clippy::large_enum_variant)] #[derive(Clone, Deserialize, Serialize)] enum MoreMembers { - V1(Mmv1), - V2(Mmv2), - V3(Mmv3), -} - -impl MoreMembers { - fn upgrade(&mut self) { - match self { - Self::V1(_) => { - *self = Self::V2(Mmv2::default()); - } - Self::V2(_) | Self::V3(_) => {} - } - } + V4(Mmv4), } bitflags! { @@ -192,12 +96,11 @@ bitflags! { #[derive(Clone, Copy)] #[repr(transparent)] pub struct GridOptFlags: u32 { - /// Change the [`Subgrid`] type to optimize storage effeciency. - const OPTIMIZE_SUBGRID_TYPE = 0b1; /// Recognize whether a subgrid was filled with events with a static scale and if this is - /// the case, optimize it by undoing the interpolation in the scale. This flag requires - /// [`Self::OPTIMIZE_SUBGRID_TYPE`] to be active. - const STATIC_SCALE_DETECTION = 0b10; + /// the case, optimize it by undoing the interpolation in the scale. + const OPTIMIZE_NODES = 0b1; + /// Change the [`Subgrid`] type to optimize storage efficiency. + const OPTIMIZE_SUBGRID_TYPE = 0b10; /// If two channels differ by transposition of the two initial states and the functions /// this grid is convolved with are the same for both initial states, this will merge one /// channel into the other, with the correct transpositions. @@ -219,109 +122,105 @@ pub struct Grid { channels: Vec, bin_limits: BinLimits, orders: Vec, - subgrid_params: SubgridParams, + metadata: BTreeMap, + convolutions: Vec, + pid_basis: PidBasis, more_members: MoreMembers, + kinematics: Vec, + interps: Vec, + remapper: Option, + scales: Scales, } impl Grid { /// Constructor. + /// + /// # Panics + /// + /// Panics when the number of PIDs in `channels` is not equal to `convolutions.len()`, or + /// `interps` and `kinematics` have different lengths or if `kinematics` are not compatible + /// with `scales`. #[must_use] pub fn new( + pid_basis: PidBasis, channels: Vec, orders: Vec, bin_limits: Vec, - subgrid_params: SubgridParams, + convolutions: Vec, + interps: Vec, + kinematics: Vec, + scales: Scales, ) -> Self { + for (channel_idx, channel) in channels.iter().enumerate() { + let offending_entry = channel + .entry() + .iter() + .find_map(|(pids, _)| (pids.len() != convolutions.len()).then_some(pids.len())); + + if let Some(pids_len) = offending_entry { + panic!("channel #{channel_idx} has wrong number of PIDs: expected {}, found {pids_len}", convolutions.len()); + } + } + + assert_eq!( + interps.len(), + kinematics.len(), + "interps and kinematics have different lengths: {} vs. {}", + interps.len(), + kinematics.len(), + ); + + assert!( + scales.compatible_with(&kinematics), + "scales and kinematics are not compatible" + ); + Self { subgrids: Array3::from_shape_simple_fn( (orders.len(), bin_limits.len() - 1, channels.len()), || EmptySubgridV1.into(), ), orders, - channels, bin_limits: BinLimits::new(bin_limits), - more_members: MoreMembers::V3(Mmv3::new( - LagrangeSubgridV2::new(&subgrid_params, &ExtraSubgridParams::from(&subgrid_params)) - .into(), - )), - subgrid_params, + metadata: default_metadata(), + more_members: MoreMembers::V4(Mmv4), + convolutions, + pid_basis, + channels, + interps, + kinematics, + remapper: None, + scales, } } - /// Constructor. This function can be used like `new`, but the additional parameter - /// `subgrid_type` selects the underlying `Subgrid` type. Supported values are: - /// - `LagrangeSubgrid` - /// - `LagrangeSparseSubgrid` - /// - `NtupleSubgrid` - /// - /// # Errors - /// - /// If `subgrid_type` is none of the values listed above, an error is returned. - pub fn with_subgrid_type( - channels: Vec, - orders: Vec, - bin_limits: Vec, - subgrid_params: SubgridParams, - extra: ExtraSubgridParams, - subgrid_type: &str, - ) -> Result { - let subgrid_template: SubgridEnum = match subgrid_type { - "LagrangeSubgrid" | "LagrangeSubgridV2" => { - LagrangeSubgridV2::new(&subgrid_params, &extra).into() - } - "LagrangeSubgridV1" => LagrangeSubgridV1::new(&subgrid_params).into(), - "NtupleSubgrid" => NtupleSubgridV1::new().into(), - "LagrangeSparseSubgrid" => LagrangeSparseSubgridV1::new(&subgrid_params).into(), - _ => return Err(GridError::UnknownSubgridType(subgrid_type.to_owned())), - }; + /// Return the convention by which the channels' PIDs are encoded. + #[must_use] + pub const fn pid_basis(&self) -> &PidBasis { + &self.pid_basis + } - Ok(Self { - subgrids: Array3::from_shape_simple_fn( - (orders.len(), bin_limits.len() - 1, channels.len()), - || EmptySubgridV1.into(), - ), - orders, - channels, - bin_limits: BinLimits::new(bin_limits), - subgrid_params, - more_members: MoreMembers::V3(Mmv3::new(subgrid_template)), - }) + /// Set the convention by which PIDs of channels are interpreted. + pub fn pid_basis_mut(&mut self) -> &mut PidBasis { + &mut self.pid_basis } - /// Return by which convention the particle IDs are encoded. + /// Return a vector containing the interpolation specifications for this grid. #[must_use] - pub fn pid_basis(&self) -> PidBasis { - if let Some(key_values) = self.key_values() { - if let Some(lumi_id_types) = key_values.get("lumi_id_types") { - match lumi_id_types.as_str() { - "pdg_mc_ids" => return PidBasis::Pdg, - "evol" => return PidBasis::Evol, - _ => unimplemented!("unknown particle ID convention {lumi_id_types}"), - } - } - } - - // if there's no basis explicitly set we're assuming to use PDG IDs - PidBasis::Pdg + pub fn interpolations(&self) -> &[Interp] { + &self.interps } - /// Set the convention by which PIDs of channels are interpreted. - pub fn set_pid_basis(&mut self, pid_basis: PidBasis) { - match pid_basis { - PidBasis::Pdg => self.set_key_value("lumi_id_types", "pdg_mc_ids"), - PidBasis::Evol => self.set_key_value("lumi_id_types", "evol"), - } + /// Return a vector containing the kinematic specifications for this grid. + #[must_use] + pub fn kinematics(&self) -> &[Kinematics] { + &self.kinematics } - fn pdg_channels(&self) -> Cow<[Channel]> { - match self.pid_basis() { - PidBasis::Evol => self - .channels - .iter() - .map(|entry| Channel::translate(entry, &pids::evol_to_pdg_mc_ids)) - .collect(), - PidBasis::Pdg => Cow::Borrowed(self.channels()), - } + /// Return a vector containg the scale specifications for this grid. + #[must_use] + pub const fn scales(&self) -> &Scales { + &self.scales } /// Perform a convolution using the PDFs and strong coupling in `lumi_cache`, and @@ -335,13 +234,13 @@ impl Grid { /// TODO pub fn convolve( &self, - lumi_cache: &mut LumiCache, + cache: &mut ConvolutionCache, order_mask: &[bool], bin_indices: &[usize], channel_mask: &[bool], - xi: &[(f64, f64)], + xi: &[(f64, f64, f64)], ) -> Vec { - lumi_cache.setup(self, xi).unwrap(); + let mut cache = cache.new_grid_conv_cache(self, xi); let bin_indices = if bin_indices.is_empty() { (0..self.bin_info().bins()).collect() @@ -350,13 +249,16 @@ impl Grid { }; let mut bins = vec![0.0; bin_indices.len() * xi.len()]; let normalizations = self.bin_info().normalizations(); - let pdg_channels = self.pdg_channels(); + let pdg_channels = self.channels_pdg(); - for (xi_index, &(xir, xif)) in xi.iter().enumerate() { + for (xi_index, &xis @ (xir, xif, xia)) in xi.iter().enumerate() { for ((ord, bin, chan), subgrid) in self.subgrids.indexed_iter() { let order = &self.orders[ord]; - if ((order.logxir > 0) && (xir == 1.0)) || ((order.logxif > 0) && (xif == 1.0)) { + if ((order.logxir != 0) && approx_eq!(f64, xir, 1.0, ulps = 4)) + || ((order.logxif != 0) && approx_eq!(f64, xif, 1.0, ulps = 4)) + || ((order.logxia != 0) && approx_eq!(f64, xia, 1.0, ulps = 4)) + { continue; } @@ -375,36 +277,32 @@ impl Grid { } let channel = &pdg_channels[chan]; - let mu2_grid = subgrid.mu2_grid(); - let x1_grid = subgrid.x1_grid(); - let x2_grid = subgrid.x2_grid(); - - lumi_cache.set_grids(&mu2_grid, &x1_grid, &x2_grid, xir, xif); - - let mut value = - subgrid.convolve(&x1_grid, &x2_grid, &mu2_grid, &mut |ix1, ix2, imu2| { - let x1 = x1_grid[ix1]; - let x2 = x2_grid[ix2]; - let mut lumi = 0.0; - - for entry in channel.entry() { - let xfx1 = lumi_cache.xfx1(entry.0, ix1, imu2); - let xfx2 = lumi_cache.xfx2(entry.1, ix2, imu2); - lumi += xfx1 * xfx2 * entry.2 / (x1 * x2); - } + let mut value = 0.0; - let alphas = lumi_cache.alphas(imu2); + cache.set_grids(self, subgrid, xis); - lumi *= alphas.powi(order.alphas.try_into().unwrap()); - lumi - }); + for (idx, v) in subgrid.indexed_iter() { + let mut lumi = 0.0; + + for entry in channel.entry() { + // TODO: we assume `idx` to be ordered as scale, x1, x2 + let fx_prod = cache.as_fx_prod(&entry.0, order.alphas, &idx); + lumi += fx_prod * entry.1; + } + + value += lumi * v; + } - if order.logxir > 0 { - value *= (xir * xir).ln().powi(order.logxir.try_into().unwrap()); + if order.logxir != 0 { + value *= (xir * xir).ln().powi(order.logxir.into()); } - if order.logxif > 0 { - value *= (xif * xif).ln().powi(order.logxif.try_into().unwrap()); + if order.logxif != 0 { + value *= (xif * xif).ln().powi(order.logxif.into()); + } + + if order.logxia != 0 { + value *= (xia * xia).ln().powi(order.logxia.into()); } bins[xi_index + xi.len() * bin_index] += value / normalizations[bin]; @@ -424,77 +322,83 @@ impl Grid { /// TODO pub fn convolve_subgrid( &self, - lumi_cache: &mut LumiCache, + cache: &mut ConvolutionCache, ord: usize, bin: usize, channel: usize, - xir: f64, - xif: f64, - ) -> Array3 { - lumi_cache.setup(self, &[(xir, xif)]).unwrap(); + xi @ (xir, xif, xia): (f64, f64, f64), + ) -> ArrayD { + let mut cache = cache.new_grid_conv_cache(self, &[(xir, xif, xia)]); let normalizations = self.bin_info().normalizations(); - let pdg_channels = self.pdg_channels(); + let pdg_channels = self.channels_pdg(); let subgrid = &self.subgrids[[ord, bin, channel]]; let order = &self.orders[ord]; let channel = &pdg_channels[channel]; - let mu2_grid = subgrid.mu2_grid(); - let x1_grid = subgrid.x1_grid(); - let x2_grid = subgrid.x2_grid(); - lumi_cache.set_grids(&mu2_grid, &x1_grid, &x2_grid, xir, xif); + cache.set_grids(self, subgrid, xi); + + let node_values = subgrid.node_values(); + // TODO: generalize this to N dimensions + assert_eq!(node_values.len(), 3); + let dim: Vec<_> = node_values.iter().map(Vec::len).collect(); - let mut array = Array3::zeros((mu2_grid.len(), x1_grid.len(), x2_grid.len())); + let mut array = ArrayD::zeros(dim); - for ((imu2, ix1, ix2), value) in subgrid.indexed_iter() { - let x1 = x1_grid[ix1]; - let x2 = x2_grid[ix2]; + for (idx, value) in subgrid.indexed_iter() { + assert_eq!(idx.len(), 3); let mut lumi = 0.0; for entry in channel.entry() { - let xfx1 = lumi_cache.xfx1(entry.0, ix1, imu2); - let xfx2 = lumi_cache.xfx2(entry.1, ix2, imu2); - lumi += xfx1 * xfx2 * entry.2 / (x1 * x2); + debug_assert_eq!(entry.0.len(), 2); + // TODO: we assume `idx` to be ordered as scale, x1, x2 + let fx_prod = cache.as_fx_prod(&entry.0, order.alphas, &idx); + lumi += fx_prod * entry.1; } - let alphas = lumi_cache.alphas(imu2); - - lumi *= alphas.powi(order.alphas.try_into().unwrap()); + array[idx.as_slice()] = lumi * value; + } - array[[imu2, ix1, ix2]] = lumi * value; + if order.logxir != 0 { + array *= (xir * xir).ln().powi(order.logxir.into()); } - if order.logxir > 0 { - array *= (xir * xir).ln().powi(order.logxir.try_into().unwrap()); + if order.logxif != 0 { + array *= (xif * xif).ln().powi(order.logxif.into()); } - if order.logxif > 0 { - array *= (xif * xif).ln().powi(order.logxif.try_into().unwrap()); + if order.logxia != 0 { + array *= (xia * xia).ln().powi(order.logxia.into()); } array /= normalizations[bin]; array } - /// Fills the grid with an ntuple for the given `order`, `observable`, and `channel`. + /// Fills the grid with an ntuple for the given `order`, `observable`, and `channel`. The + /// parameter `ntuple` must contain the variables specified by the `kinematics` parameter in + /// the constructor [`Grid::new`] in the same order. /// /// # Panics /// /// TODO - pub fn fill(&mut self, order: usize, observable: f64, channel: usize, ntuple: &Ntuple) { + pub fn fill( + &mut self, + order: usize, + observable: f64, + channel: usize, + ntuple: &[f64], + weight: f64, + ) { if let Some(bin) = self.bin_limits.index(observable) { let subgrid = &mut self.subgrids[[order, bin, channel]]; if let SubgridEnum::EmptySubgridV1(_) = subgrid { - if let MoreMembers::V3(mmv3) = &self.more_members { - *subgrid = mmv3.subgrid_template.clone_empty(); - } else { - unreachable!(); - } + *subgrid = InterpSubgridV1::new(&self.interps).into(); } - subgrid.fill(ntuple); + subgrid.fill(&self.interps, ntuple, weight); } } @@ -531,14 +435,11 @@ impl Grid { 0 }; - if file_version != 0 { - return Err(GridError::FileVersionMismatch { - file_version, - supported_version: 0, - }); + match file_version { + 0 => v0::read_uncompressed_v0(reader), + 1 => bincode::deserialize_from(reader).map_err(GridError::ReadFailure), + _ => Err(GridError::FileVersionUnsupported { file_version }), } - - bincode::deserialize_from(reader).map_err(GridError::ReadFailure) } /// Serializes `self` into `writer`. Writing is buffered. @@ -548,7 +449,7 @@ impl Grid { /// If writing fails an error is returned. pub fn write(&self, writer: impl Write) -> Result<(), GridError> { let mut writer = BufWriter::new(writer); - let file_header = b"PineAPPL\0\0\0\0\0\0\0\0"; + let file_header = b"PineAPPL\x01\0\0\0\0\0\0\0"; // first write PineAPPL file header writer.write(file_header).map_err(GridError::IoFailure)?; @@ -575,37 +476,20 @@ impl Grid { Ok(()) } - /// Fills the grid with events for the parton momentum fractions `x1` and `x2`, the scale `q2`, - /// and the `order` and `observable`. The events are stored in `weights` and their ordering - /// corresponds to the ordering of [`Grid::channels`]. - pub fn fill_all( - &mut self, - order: usize, - observable: f64, - ntuple: &Ntuple<()>, - weights: &[f64], - ) { - for (channel, weight) in weights.iter().enumerate() { - self.fill( - order, - observable, - channel, - &Ntuple { - x1: ntuple.x1, - x2: ntuple.x2, - q2: ntuple.q2, - weight: *weight, - }, - ); - } - } - /// Return the channels for this `Grid`. #[must_use] pub fn channels(&self) -> &[Channel] { &self.channels } + fn channels_pdg(&self) -> Vec { + self.channels() + .iter() + .cloned() + .map(|channel| self.pid_basis().translate(PidBasis::Pdg, channel)) + .collect() + } + /// Merges the bins for the corresponding range together in a single one. /// /// # Errors @@ -642,7 +526,7 @@ impl Grid { if new_subgrid.is_empty() { mem::swap(new_subgrid, subgrid); } else { - new_subgrid.merge(subgrid, false); + new_subgrid.merge(subgrid, None); } } else { let new_bin = if bin > bins.start { @@ -760,7 +644,7 @@ impl Grid { if self.subgrids[[self_i, self_j, self_k]].is_empty() { mem::swap(&mut self.subgrids[[self_i, self_j, self_k]], subgrid); } else { - self.subgrids[[self_i, self_j, self_k]].merge(&mut *subgrid, false); + self.subgrids[[self_i, self_j, self_k]].merge(subgrid, None); } } @@ -774,101 +658,37 @@ impl Grid { /// Panics if the metadata key--value pairs `convolution_particle_1` and `convolution_type_1`, /// or `convolution_particle_2` and `convolution_type_2` are not correctly set. #[must_use] - pub fn convolutions(&self) -> Vec { - self.key_values().map_or_else( - // if there isn't any metadata, we assume two unpolarized proton-PDFs are used - || vec![Convolution::UnpolPDF(2212), Convolution::UnpolPDF(2212)], - |kv| { - // the current file format only supports exactly two convolutions - (1..=2) - .map(|index| { - // if there are key-value pairs `convolution_particle_1` and - // `convolution_type_1` and the same with a higher index, we convert this - // metadata into `Convolution` - match ( - kv.get(&format!("convolution_particle_{index}")) - .map(|s| s.parse::()), - kv.get(&format!("convolution_type_{index}")) - .map(String::as_str), - ) { - (_, Some("None")) => Convolution::None, - (Some(Ok(pid)), Some("UnpolPDF")) => Convolution::UnpolPDF(pid), - (Some(Ok(pid)), Some("PolPDF")) => Convolution::PolPDF(pid), - (Some(Ok(pid)), Some("UnpolFF")) => Convolution::UnpolFF(pid), - (Some(Ok(pid)), Some("PolFF")) => Convolution::PolFF(pid), - (None, None) => { - // if these key-value pairs are missing use the old metadata - match kv - .get(&format!("initial_state_{index}")) - .map(|s| s.parse::()) - { - Some(Ok(pid)) => { - let condition = !self.channels().iter().all(|entry| { - entry.entry().iter().all(|&channels| match index { - 1 => channels.0 == pid, - 2 => channels.1 == pid, - _ => unreachable!(), - }) - }); - - if condition { - Convolution::UnpolPDF(pid) - } else { - Convolution::None - } - } - None => Convolution::UnpolPDF(2212), - Some(Err(err)) => panic!("metadata 'initial_state_{index}' could not be parsed: {err}"), - } - } - (None, Some(_)) => { - panic!("metadata 'convolution_type_{index}' is missing") - } - (Some(_), None) => { - panic!("metadata 'convolution_particle_{index}' is missing") - } - (Some(Ok(_)), Some(type_)) => { - panic!("metadata 'convolution_type_{index} = {type_}' is unknown") - } - (Some(Err(err)), Some(_)) => panic!( - "metadata 'convolution_particle_{index}' could not be parsed: {err}" - ), - } - }) - .collect() - }, - ) + pub fn convolutions(&self) -> &[Conv] { + &self.convolutions } - /// Set the convolution type for this grid for the corresponding `index`. - pub fn set_convolution(&mut self, index: usize, convolution: Convolution) { - // remove outdated metadata - self.key_values_mut() - .remove(&format!("initial_state_{}", index + 1)); - - let (type_, particle) = match convolution { - Convolution::UnpolPDF(pid) => ("UnpolPDF".to_owned(), pid.to_string()), - Convolution::PolPDF(pid) => ("PolPDF".to_owned(), pid.to_string()), - Convolution::UnpolFF(pid) => ("UnpolFF".to_owned(), pid.to_string()), - Convolution::PolFF(pid) => ("PolFF".to_owned(), pid.to_string()), - Convolution::None => ("None".to_owned(), String::new()), - }; + /// Return the convolution types. + pub fn convolutions_mut(&mut self) -> &mut [Conv] { + &mut self.convolutions + } - self.set_key_value(&format!("convolution_type_{}", index + 1), &type_); - self.set_key_value(&format!("convolution_particle_{}", index + 1), &particle); + /// Charge conjugate both the convolution function with index `convolution` and the PIDs in the + /// channel definition corresponding to it. This leaves the the results returned by + /// [`Grid::convolve`] invariant. + pub fn charge_conjugate(&mut self, convolution: usize) { + let pid_basis = *self.pid_basis(); - // update the remaining metadata - for (index, convolution) in self.convolutions().into_iter().enumerate() { - if self - .key_values() - // UNWRAP: we set some key-values before so there must be a storage - .unwrap_or_else(|| unreachable!()) - .get(&format!("initial_state_{}", index + 1)) - .is_some() - { - self.set_convolution(index, convolution); - } + for channel in self.channels_mut() { + *channel = Channel::new( + channel + .entry() + .iter() + .cloned() + .map(|(mut pids, f)| { + let (cc_pid, f1) = pid_basis.charge_conjugate(pids[convolution]); + pids[convolution] = cc_pid; + (pids, f * f1) + }) + .collect(), + ); } + + self.convolutions_mut()[convolution] = self.convolutions()[convolution].cc(); } fn increase_shape(&mut self, new_dim: &(usize, usize, usize)) { @@ -882,8 +702,8 @@ impl Grid { || EmptySubgridV1.into(), ); - for ((i, j, k), subgrid) in self.subgrids.indexed_iter_mut() { - mem::swap(&mut new_subgrids[[i, j, k]], subgrid); + for (index, subgrid) in self.subgrids.indexed_iter_mut() { + mem::swap(&mut new_subgrids[<[usize; 3]>::from(index)], subgrid); } self.subgrids = new_subgrids; @@ -909,15 +729,17 @@ impl Grid { alpha: f64, logxir: f64, logxif: f64, + logxia: f64, global: f64, ) { for ((i, _, _), subgrid) in self.subgrids.indexed_iter_mut() { let order = &self.orders[i]; let factor = global - * alphas.powi(order.alphas.try_into().unwrap()) - * alpha.powi(order.alpha.try_into().unwrap()) - * logxir.powi(order.logxir.try_into().unwrap()) - * logxif.powi(order.logxif.try_into().unwrap()); + * alphas.powi(order.alphas.into()) + * alpha.powi(order.alpha.into()) + * logxir.powi(order.logxir.into()) + * logxif.powi(order.logxif.into()) + * logxia.powi(order.logxia.into()); subgrid.scale(factor); } @@ -985,13 +807,7 @@ impl Grid { }); } - self.more_members.upgrade(); - - match &mut self.more_members { - MoreMembers::V1(_) => unreachable!(), - MoreMembers::V2(mmv2) => mmv2.remapper = Some(remapper), - MoreMembers::V3(mmv3) => mmv3.remapper = Some(remapper), - } + self.remapper = Some(remapper); Ok(()) } @@ -999,19 +815,11 @@ impl Grid { /// Return the currently set remapper, if there is any. #[must_use] pub const fn remapper(&self) -> Option<&BinRemapper> { - match &self.more_members { - MoreMembers::V1(_) => None, - MoreMembers::V2(mmv2) => mmv2.remapper.as_ref(), - MoreMembers::V3(mmv3) => mmv3.remapper.as_ref(), - } + self.remapper.as_ref() } fn remapper_mut(&mut self) -> Option<&mut BinRemapper> { - match &mut self.more_members { - MoreMembers::V1(_) => None, - MoreMembers::V2(mmv2) => mmv2.remapper.as_mut(), - MoreMembers::V3(mmv3) => mmv3.remapper.as_mut(), - } + self.remapper.as_mut() } /// Returns all information about the bins in this grid. @@ -1029,9 +837,11 @@ impl Grid { /// Optimizes the internal datastructures for space efficiency. The parameter `flags` /// determines which optimizations are applied, see [`GridOptFlags`]. pub fn optimize_using(&mut self, flags: GridOptFlags) { + if flags.contains(GridOptFlags::OPTIMIZE_NODES) { + self.optimize_nodes(); + } if flags.contains(GridOptFlags::OPTIMIZE_SUBGRID_TYPE) { - let ssd = flags.contains(GridOptFlags::STATIC_SCALE_DETECTION); - self.optimize_subgrid_type(ssd); + self.optimize_subgrid_type(); } if flags.contains(GridOptFlags::SYMMETRIZE_CHANNELS) { self.symmetrize_channels(); @@ -1047,26 +857,22 @@ impl Grid { } } - fn optimize_subgrid_type(&mut self, static_scale_detection: bool) { + fn optimize_nodes(&mut self) { + for subgrid in &mut self.subgrids { + subgrid.optimize_nodes(); + } + } + + fn optimize_subgrid_type(&mut self) { for subgrid in &mut self.subgrids { match subgrid { // replace empty subgrids of any type with `EmptySubgridV1` _ if subgrid.is_empty() => { *subgrid = EmptySubgridV1.into(); } - // can't be optimized without losing information - SubgridEnum::NtupleSubgridV1(_) => continue, _ => { - // TODO: this requires a `pub(crate)` in `LagrangeSubgridV2`; we should - // replace this with a method - if !static_scale_detection { - if let SubgridEnum::LagrangeSubgridV2(subgrid) = subgrid { - // disable static-scale detection - subgrid.static_q2 = -1.0; - } - } - - *subgrid = ImportOnlySubgridV2::from(&*subgrid).into(); + // TODO: check if we should remove this + *subgrid = ImportSubgridV1::from(&*subgrid).into(); } } } @@ -1141,11 +947,11 @@ impl Grid { rhs.scale(1.0 / factor); if lhs.is_empty() { // we can't merge into an EmptySubgridV1 - *lhs = rhs.clone_empty(); + *lhs = mem::replace(rhs, EmptySubgridV1.into()); + } else { + lhs.merge(rhs, None); + *rhs = EmptySubgridV1.into(); } - lhs.merge(rhs, false); - - *rhs = EmptySubgridV1.into(); } } } @@ -1185,30 +991,55 @@ impl Grid { } fn symmetrize_channels(&mut self) { - let convolutions = self.convolutions(); - if convolutions[0] != convolutions[1] { - return; - } + let pairs: Vec<_> = self + .convolutions() + .iter() + .enumerate() + .tuple_combinations() + .filter(|((_, conv_a), (_, conv_b))| conv_a == conv_b) + .map(|((idx_a, _), (idx_b, _))| (idx_a, idx_b)) + .collect(); + + let (idx_a, idx_b) = match *pairs.as_slice() { + [] => return, + [pair] => pair, + _ => panic!("more than two equal convolutions found"), + }; + let a_subgrid = self + .kinematics() + .iter() + .position(|&kin| kin == Kinematics::X(idx_a)) + // UNWRAP: should be guaranteed by the constructor + .unwrap(); + let b_subgrid = self + .kinematics() + .iter() + .position(|&kin| kin == Kinematics::X(idx_b)) + // UNWRAP: should be guaranteed by the constructor + .unwrap(); let mut indices: Vec = (0..self.channels.len()).rev().collect(); while let Some(index) = indices.pop() { let channel_entry = &self.channels[index]; - if *channel_entry == channel_entry.transpose() { + if *channel_entry == channel_entry.transpose(idx_a, idx_b) { // check if in all cases the limits are compatible with merging self.subgrids .slice_mut(s![.., .., index]) .iter_mut() .for_each(|subgrid| { - if !subgrid.is_empty() && (subgrid.x1_grid() == subgrid.x2_grid()) { - subgrid.symmetrize(); + if !subgrid.is_empty() + && (subgrid.node_values()[a_subgrid] + == subgrid.node_values()[b_subgrid]) + { + subgrid.symmetrize(a_subgrid, b_subgrid); } }); } else if let Some((j, &other_index)) = indices .iter() .enumerate() - .find(|(_, i)| self.channels[**i] == channel_entry.transpose()) + .find(|(_, i)| self.channels[**i] == channel_entry.transpose(idx_a, idx_b)) { indices.remove(j); @@ -1221,11 +1052,13 @@ impl Grid { if !rhs.is_empty() { if lhs.is_empty() { // we can't merge into an EmptySubgridV1 - *lhs = rhs.clone_empty(); + *lhs = mem::replace(rhs, EmptySubgridV1.into()); + // transpose `lhs` + todo!(); + } else { + lhs.merge(rhs, Some((a_subgrid, b_subgrid))); + *rhs = EmptySubgridV1.into(); } - - lhs.merge(rhs, true); - *rhs = EmptySubgridV1.into(); } } } @@ -1233,55 +1066,28 @@ impl Grid { } /// Upgrades the internal data structures to their latest versions. - pub fn upgrade(&mut self) { - self.more_members.upgrade(); - } + pub fn upgrade(&mut self) {} - /// Returns a map with key-value pairs, if there are any stored in this grid. + /// Return the metadata of this grid. #[must_use] - pub const fn key_values(&self) -> Option<&HashMap> { - match &self.more_members { - MoreMembers::V3(mmv3) => Some(&mmv3.key_value_db), - MoreMembers::V2(mmv2) => Some(&mmv2.key_value_db), - MoreMembers::V1(_) => None, - } + pub const fn metadata(&self) -> &BTreeMap { + &self.metadata } - /// Returns a map with key-value pairs, if there are any stored in this grid. + /// Return the metadata of this grid. /// /// # Panics /// /// TODO #[must_use] - pub fn key_values_mut(&mut self) -> &mut HashMap { - self.more_members.upgrade(); - - match &mut self.more_members { - MoreMembers::V1(_) => unreachable!(), - MoreMembers::V2(mmv2) => &mut mmv2.key_value_db, - MoreMembers::V3(mmv3) => &mut mmv3.key_value_db, - } - } - - /// Sets a specific key-value pair in this grid. - /// - /// # Panics - /// - /// TODO - pub fn set_key_value(&mut self, key: &str, value: &str) { - self.key_values_mut() - .insert(key.to_owned(), value.to_owned()); + pub fn metadata_mut(&mut self) -> &mut BTreeMap { + &mut self.metadata } /// Returns information for the generation of evolution operators that are being used in - /// [`Grid::evolve`] with the parameter `order_mask`. + /// [`Grid::convolve`] with the parameter `order_mask`. #[must_use] pub fn evolve_info(&self, order_mask: &[bool]) -> EvolveInfo { - use super::evolution::EVOLVE_INFO_TOL_ULPS; - - let has_pdf1 = self.convolutions()[0] != Convolution::None; - let has_pdf2 = self.convolutions()[1] != Convolution::None; - let mut ren1 = Vec::new(); let mut fac1 = Vec::new(); let mut x1 = Vec::new(); @@ -1292,32 +1098,41 @@ impl Grid { .indexed_iter() .filter_map(|(tuple, subgrid)| { (!subgrid.is_empty() && (order_mask.is_empty() || order_mask[tuple.0])) - .then_some((tuple.2, subgrid)) + .then_some((&self.channels()[tuple.2], subgrid)) }) { - ren1.extend(subgrid.mu2_grid().iter().map(|Mu2 { ren, .. }| *ren)); + ren1.extend( + self.scales() + .ren + .calc(&subgrid.node_values(), self.kinematics()) + .iter(), + ); ren1.sort_by(f64::total_cmp); - ren1.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLVE_INFO_TOL_ULPS)); + ren1.dedup_by(subgrid::node_value_eq_ref_mut); - fac1.extend(subgrid.mu2_grid().iter().map(|Mu2 { fac, .. }| *fac)); + fac1.extend( + self.scales() + .fac + .calc(&subgrid.node_values(), self.kinematics()) + .iter(), + ); fac1.sort_by(f64::total_cmp); - fac1.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLVE_INFO_TOL_ULPS)); + fac1.dedup_by(subgrid::node_value_eq_ref_mut); - if has_pdf1 { - x1.extend(subgrid.x1_grid().iter().copied()); - } - if has_pdf2 { - x1.extend(subgrid.x2_grid().iter()); - } + x1.extend( + subgrid + .node_values() + .iter() + .zip(self.kinematics()) + .filter_map(|(nv, kin)| matches!(kin, Kinematics::X(_)).then(|| nv)) + .flatten(), + ); x1.sort_by(f64::total_cmp); - x1.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLVE_INFO_TOL_ULPS)); + x1.dedup_by(subgrid::node_value_eq_ref_mut); - if has_pdf1 { - pids1.extend(self.channels()[channel].entry().iter().map(|(a, _, _)| a)); - } - if has_pdf2 { - pids1.extend(self.channels()[channel].entry().iter().map(|(_, b, _)| b)); + for (index, _) in self.convolutions().iter().enumerate() { + pids1.extend(channel.entry().iter().map(|(pids, _)| pids[index])); } pids1.sort_unstable(); @@ -1332,73 +1147,59 @@ impl Grid { } } - /// Converts this `Grid` into an [`FkTable`] using an evolution kernel operator (EKO) given as - /// `operator`. The dimensions and properties of this operator must be described using `info`. - /// The parameter `order_mask` can be used to include or exclude orders from this operation, - /// and must correspond to the ordering given by [`Grid::orders`]. Orders that are not given - /// are enabled, and in particular if `order_mask` is empty all orders are activated. - /// - /// # Errors - /// - /// Returns a [`GridError::EvolutionFailure`] if either the `operator` or its `info` is - /// incompatible with this `Grid`. - #[deprecated(since = "0.7.4", note = "use evolve_with_slice_iter instead")] - pub fn evolve( - &self, - operator: ArrayView5, - info: &OperatorInfo, - order_mask: &[bool], - ) -> Result { - self.evolve_with_slice_iter( - info.fac1 - .iter() - .zip(operator.axis_iter(Axis(0))) - .map(|(&fac1, op)| { - Ok::<_, GridError>(( - OperatorSliceInfo { - fac0: info.fac0, - pids0: info.pids0.clone(), - x0: info.x0.clone(), - fac1, - pids1: info.pids1.clone(), - x1: info.x1.clone(), - pid_basis: info.pid_basis, - }, - CowArray::from(op), - )) - }), - order_mask, - (info.xir, info.xif), - &AlphasTable { - ren1: info.ren1.clone(), - alphas: info.alphas.clone(), - }, - ) - } - // TODO: // - try to find a better solution than to require that E must be convertible into // anyhow::Error - /// Converts this `Grid` into an [`FkTable`] using `slices` that must iterate over a [`Result`] - /// of tuples of an [`OperatorSliceInfo`] and the corresponding sliced operator. The parameter - /// `order_mask` can be used to include or exclude orders from this operation, and must - /// correspond to the ordering given by [`Grid::orders`]. Orders that are not given are - /// enabled, and in particular if `order_mask` is empty all orders are activated. + /// Convert this `Grid` into an [`FkTable`] using `slices.len()` evolution operators, which for + /// each entry must iterate over a [`Result`] of tuples of an [`OperatorSliceInfo`] and the + /// corresponding sliced operator. The parameter `order_mask` can be used to include or exclude + /// orders from this operation, and must correspond to the ordering given by [`Grid::orders`]. + /// Orders that are not given are enabled, and in particular if `order_mask` is empty all + /// orders are activated. /// /// # Errors /// /// Returns a [`GridError::EvolutionFailure`] if either the `operator` or its `info` is /// incompatible with this `Grid`. Returns a [`GridError::Other`] if the iterator from `slices` /// return an error. - pub fn evolve_with_slice_iter<'a, E: Into>( + /// + /// # Panics + /// + /// Panics when the operators returned by either slice have different dimensions than promised + /// by the corresponding [`OperatorSliceInfo`]. + pub fn evolve< + 'a, + E: Into, + S: IntoIterator), E>>, + >( &self, - slices: impl IntoIterator), E>>, + slices: Vec, order_mask: &[bool], - xi: (f64, f64), + xi: (f64, f64, f64), alphas_table: &AlphasTable, ) -> Result { - use super::evolution::EVOLVE_INFO_TOL_ULPS; + struct Iter { + iters: Vec, + } + + impl Iterator for Iter { + type Item = Vec; + + fn next(&mut self) -> Option { + self.iters.iter_mut().map(Iterator::next).collect() + } + } + + fn zip_n(iters: O) -> impl Iterator> + where + O: IntoIterator, + T: IntoIterator, + { + Iter { + iters: iters.into_iter().map(IntoIterator::into_iter).collect(), + } + } let mut lhs: Option = None; // Q2 slices we use @@ -1410,153 +1211,94 @@ impl Grid { .evolve_info(order_mask) .fac1 .into_iter() + // TODO: also take care of the fragmentation scale .map(|fac| xi.1 * xi.1 * fac) .collect(); - for result in slices { - let (info, operator) = result.map_err(|err| GridError::Other(err.into()))?; + let mut perm = Vec::new(); - op_fac1.push(info.fac1); + for result in zip_n(slices) { + let (infos, operators): (Vec, Vec>) = result + .into_iter() + .map(|res| res.map_err(|err| GridError::Other(err.into()))) + .collect::>()?; - // it's possible that due to small numerical differences we get two slices which are - // almost the same. We have to skip those in order not to evolve the 'same' slice twice - if used_op_fac1 - .iter() - .any(|&fac| approx_eq!(f64, fac, info.fac1, ulps = EVOLVE_INFO_TOL_ULPS)) - { - continue; - } + let (info_0, infos_rest) = infos + .split_first() + // UNWRAP: TODO + .unwrap(); - // skip slices that the grid doesn't use - if !grid_fac1 - .iter() - .any(|&fac| approx_eq!(f64, fac, info.fac1, ulps = EVOLVE_INFO_TOL_ULPS)) - { - continue; - } - - let op_info_dim = ( - info.pids1.len(), - info.x1.len(), - info.pids0.len(), - info.x0.len(), + let dim_op_info_0 = ( + info_0.pids1.len(), + info_0.x1.len(), + info_0.pids0.len(), + info_0.x0.len(), ); assert_eq!( - operator.dim(), - op_info_dim, + operators[0].dim(), + dim_op_info_0, "operator information {:?} does not match the operator's dimensions: {:?}", - op_info_dim, - operator.dim(), + dim_op_info_0, + operators[0].dim(), ); - let view = operator.view(); - - let (subgrids, channels) = if self.convolutions()[0] != Convolution::None - && self.convolutions()[1] != Convolution::None - { - evolution::evolve_slice_with_two(self, &view, &info, order_mask, xi, alphas_table) - } else { - evolution::evolve_slice_with_one(self, &view, &info, order_mask, xi, alphas_table) - }?; - - let mut rhs = Self { - subgrids, - channels, - bin_limits: self.bin_limits.clone(), - orders: vec![Order::new(0, 0, 0, 0)], - subgrid_params: SubgridParams::default(), - more_members: self.more_members.clone(), - }; - - // TODO: use a new constructor to set this information - rhs.set_pid_basis(info.pid_basis); - - if let Some(lhs) = &mut lhs { - lhs.merge(rhs)?; - } else { - lhs = Some(rhs); + for (index, info) in infos_rest.iter().enumerate() { + // TODO: what if the scales of the EKOs don't agree? Is there an ordering problem? + assert!(subgrid::node_value_eq(info_0.fac1, info.fac1)); + + assert_eq!(info_0.pid_basis, info.pid_basis); + + let dim_op_info = ( + info.pids1.len(), + info.x1.len(), + info.pids0.len(), + info.x0.len(), + ); + + assert_eq!( + operators[index + 1].dim(), + dim_op_info, + "operator information {:?} does not match the operator's dimensions: {:?}", + dim_op_info, + operators[index + 1].dim(), + ); } - used_op_fac1.push(info.fac1); - } - - // UNWRAP: if we can't compare two numbers there's a bug - op_fac1.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); - - // make sure we've evolved all slices - if let Some(muf2) = grid_fac1.into_iter().find(|&grid_mu2| { - !used_op_fac1 - .iter() - .any(|&eko_mu2| approx_eq!(f64, grid_mu2, eko_mu2, ulps = EVOLVE_INFO_TOL_ULPS)) - }) { - return Err(GridError::EvolutionFailure(format!( - "no operator for muf2 = {muf2} found in {op_fac1:?}" - ))); - } - - // TODO: convert this unwrap into error - let grid = lhs.unwrap(); - - // UNWRAP: merging evolved slices should be a proper FkTable again - Ok(FkTable::try_from(grid).unwrap_or_else(|_| unreachable!())) - } + if perm.is_empty() { + let eko_conv_types: Vec = + infos.iter().map(|info| info.conv_type).collect(); - /// Converts this `Grid` into an [`FkTable`] using `slices` that must iterate over a [`Result`] - /// of tuples of an [`OperatorSliceInfo`] and the corresponding sliced operator. The parameter - /// `order_mask` can be used to include or exclude orders from this operation, and must - /// correspond to the ordering given by [`Grid::orders`]. Orders that are not given are - /// enabled, and in particular if `order_mask` is empty all orders are activated. - /// - /// # Errors - /// - /// Returns a [`GridError::EvolutionFailure`] if either the `operator` or its `info` is - /// incompatible with this `Grid`. Returns a [`GridError::Other`] if the iterator from `slices` - /// return an error. - pub fn evolve_with_slice_iter2<'a, E: Into>( - &self, - slices_a: impl IntoIterator), E>>, - slices_b: impl IntoIterator), E>>, - order_mask: &[bool], - xi: (f64, f64), - alphas_table: &AlphasTable, - ) -> Result { - use super::evolution::EVOLVE_INFO_TOL_ULPS; - use itertools::izip; - - let mut lhs: Option = None; - // Q2 slices we use - let mut used_op_fac1 = Vec::new(); - // Q2 slices we encounter, but possibly don't use - let mut op_fac1 = Vec::new(); - // Q2 slices needed by the grid - let grid_fac1: Vec<_> = self - .evolve_info(order_mask) - .fac1 - .into_iter() - .map(|fac| xi.1 * xi.1 * fac) - .collect(); - - // TODO: simplify the ugly repetition below by offloading some ops into fn - for (result_a, result_b) in izip!(slices_a, slices_b) { - // Operate on `slices_a` - let (info_a, operator_a) = result_a.map_err(|err| GridError::Other(err.into()))?; - // Operate on `slices_b` - let (info_b, operator_b) = result_b.map_err(|err| GridError::Other(err.into()))?; - - // TODO: what if the scales of the EKOs don't agree? Is there an ordering problem? - assert_approx_eq!(f64, info_a.fac1, info_b.fac1, ulps = EVOLVE_INFO_TOL_ULPS); - - // also the PID bases must be the same - assert_eq!(info_a.pid_basis, info_b.pid_basis); + perm = self + .convolutions() + .iter() + .enumerate() + .map(|(max_idx, conv)| { + eko_conv_types + .iter() + .take(max_idx + 1) + .enumerate() + .rev() + .find_map(|(idx, &eko_conv_type)| { + if conv.conv_type() == eko_conv_type { + Some(idx) + } else { + None + } + }) + // TODO: convert `unwrap` to `Err` + .unwrap() + }) + .collect(); + } - op_fac1.push(info_a.fac1); + op_fac1.push(info_0.fac1); // it's possible that due to small numerical differences we get two slices which are // almost the same. We have to skip those in order not to evolve the 'same' slice twice if used_op_fac1 .iter() - .any(|&fac| approx_eq!(f64, fac, info_a.fac1, ulps = EVOLVE_INFO_TOL_ULPS)) + .any(|&fac| subgrid::node_value_eq(fac, info_0.fac1)) { continue; } @@ -1564,88 +1306,70 @@ impl Grid { // skip slices that the grid doesn't use if !grid_fac1 .iter() - .any(|&fac| approx_eq!(f64, fac, info_a.fac1, ulps = EVOLVE_INFO_TOL_ULPS)) + .any(|&fac| subgrid::node_value_eq(fac, info_0.fac1)) { continue; } - let op_info_dim_a = ( - info_a.pids1.len(), - info_a.x1.len(), - info_a.pids0.len(), - info_a.x0.len(), - ); - - assert_eq!( - operator_a.dim(), - op_info_dim_a, - "operator information {:?} does not match the operator's dimensions: {:?}", - op_info_dim_a, - operator_a.dim(), - ); - - let op_info_dim_b = ( - info_b.pids1.len(), - info_b.x1.len(), - info_b.pids0.len(), - info_b.x0.len(), - ); - - assert_eq!( - operator_b.dim(), - op_info_dim_b, - "operator information {:?} does not match the operator's dimensions: {:?}", - op_info_dim_b, - operator_b.dim(), - ); - - let views = [operator_a.view(), operator_b.view()]; - let infos = [info_a, info_b]; - - assert!( - (self.convolutions()[0] != Convolution::None) - && (self.convolutions()[1] != Convolution::None), - "only one convolution found, use `Grid::evolve_with_slice_iter` instead" - ); + let operators: Vec<_> = perm.iter().map(|&idx| operators[idx].view()).collect(); + let infos: Vec<_> = perm.iter().map(|&idx| infos[idx].clone()).collect(); - let (subgrids, channels) = evolution::evolve_slice_with_two2( + let (subgrids, channels) = evolution::evolve_slice_with_many( self, - &views, + &operators, &infos, order_mask, xi, alphas_table, )?; - let mut rhs = Self { + let fac = if matches!(self.scales().fac, ScaleFuncForm::NoScale) { + ScaleFuncForm::NoScale + } else { + ScaleFuncForm::Scale(0) + }; + let frg = if matches!(self.scales().frg, ScaleFuncForm::NoScale) { + ScaleFuncForm::NoScale + } else { + ScaleFuncForm::Scale(0) + }; + + let rhs = Self { subgrids, channels, bin_limits: self.bin_limits.clone(), - orders: vec![Order::new(0, 0, 0, 0)], - subgrid_params: SubgridParams::default(), + orders: vec![Order::new(0, 0, 0, 0, 0)], + interps: self.interps.clone(), + metadata: self.metadata.clone(), + convolutions: self.convolutions.clone(), + pid_basis: infos[0].pid_basis, more_members: self.more_members.clone(), + kinematics: self.kinematics.clone(), + remapper: self.remapper.clone(), + scales: Scales { + // FK-tables have their renormalization scales burnt in + ren: ScaleFuncForm::NoScale, + fac, + frg, + }, }; - // TODO: use a new constructor to set this information - rhs.set_pid_basis(infos[0].pid_basis); - if let Some(lhs) = &mut lhs { lhs.merge(rhs)?; } else { lhs = Some(rhs); } - used_op_fac1.push(infos[0].fac1); + used_op_fac1.push(info_0.fac1); } - // UNWRAP: if we can't compare two numbers there's a bug - op_fac1.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); + op_fac1.sort_by(f64::total_cmp); // make sure we've evolved all slices if let Some(muf2) = grid_fac1.into_iter().find(|&grid_mu2| { !used_op_fac1 .iter() - .any(|&eko_mu2| approx_eq!(f64, grid_mu2, eko_mu2, ulps = EVOLVE_INFO_TOL_ULPS)) + .any(|&eko_mu2| subgrid::node_value_eq(grid_mu2, eko_mu2)) }) { return Err(GridError::EvolutionFailure(format!( "no operator for muf2 = {muf2} found in {op_fac1:?}" @@ -1736,29 +1460,11 @@ impl Grid { /// Change the particle ID convention. pub fn rotate_pid_basis(&mut self, pid_basis: PidBasis) { - match (self.pid_basis(), pid_basis) { - (PidBasis::Pdg, PidBasis::Evol) => { - self.channels = self - .channels() - .iter() - .map(|channel| Channel::translate(channel, &pids::pdg_mc_pids_to_evol)) - .collect(); - - self.set_pid_basis(PidBasis::Evol); - } - (PidBasis::Evol, PidBasis::Pdg) => { - self.channels = self - .channels() - .iter() - .map(|channel| Channel::translate(channel, &pids::evol_to_pdg_mc_ids)) - .collect(); - - self.set_pid_basis(PidBasis::Pdg); - } - (PidBasis::Evol, PidBasis::Evol) | (PidBasis::Pdg, PidBasis::Pdg) => { - // here's nothing to do - } + let self_pid_basis = *self.pid_basis(); + for channel in &mut self.channels { + *channel = self_pid_basis.translate(pid_basis, channel.clone()); } + *self.pid_basis_mut() = pid_basis; } /// Deletes channels with the corresponding `channel_indices`. Repeated indices and indices @@ -1805,42 +1511,6 @@ impl Grid { } } - pub(crate) fn rewrite_channels(&mut self, add: &[(i32, i32)], del: &[i32]) { - self.channels = self - .channels() - .iter() - .map(|entry| { - Channel::new( - entry - .entry() - .iter() - .map(|(a, b, f)| { - ( - // if `a` is to be added to another pid replace it with this pid - add.iter().fold( - *a, - |id, &(source, target)| if id == source { target } else { id }, - ), - // if `b` is to be added to another pid replace it with this pid - add.iter().fold( - *b, - |id, &(source, target)| if id == source { target } else { id }, - ), - // if any of the pids `a` or `b` are to b deleted set the factor to - // zero - if del.iter().any(|id| id == a || id == b) { - 0.0 - } else { - *f - }, - ) - }) - .collect(), - ) - }) - .collect(); - } - /// Splits the grid such that each channel contains only a single tuple of PIDs. pub fn split_channels(&mut self) { let indices: Vec<_> = self @@ -1858,7 +1528,7 @@ impl Grid { entry .entry() .iter() - .copied() + .cloned() .map(move |entry| Channel::new(vec![entry])) }) .collect(); @@ -1868,34 +1538,112 @@ impl Grid { #[cfg(test)] mod tests { use super::*; + use crate::boc::ScaleFuncForm; use crate::channel; + use crate::convolutions::ConvType; + use float_cmp::assert_approx_eq; use std::fs::File; #[test] - fn grid_with_subgrid_type() { - let subgrid_type = String::from("Idontexist"); - let result = Grid::with_subgrid_type( - vec![], - vec![], - vec![], - SubgridParams::default(), - ExtraSubgridParams::default(), - &subgrid_type, + #[should_panic(expected = "channel #0 has wrong number of PIDs: expected 2, found 3")] + fn grid_new_panic0() { + let channel = vec![(vec![1, -1, 1], 1.0), (vec![2, -2, 2], 1.0)]; + + let _ = Grid::new( + PidBasis::Pdg, + vec![Channel::new(channel)], + vec![Order::new(0, 2, 0, 0, 0)], + vec![0.0, 1.0], + vec![ + Conv::new(ConvType::UnpolPDF, 2212), + Conv::new(ConvType::UnpolPDF, 2212), + ], + v0::default_interps(2), + vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], + Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }, + ); + } + + #[test] + #[should_panic(expected = "interps and kinematics have different lengths: 2 vs. 3")] + fn grid_new_panic1() { + let channel = vec![(vec![1, -1], 1.0), (vec![2, -2], 1.0)]; + + let _ = Grid::new( + PidBasis::Pdg, + vec![Channel::new(channel)], + vec![Order::new(0, 2, 0, 0, 0)], + vec![0.0, 1.0], + vec![ + Conv::new(ConvType::UnpolPDF, 2212), + Conv::new(ConvType::UnpolPDF, 2212), + ], + v0::default_interps(1), + vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], + Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }, + ); + } + + #[test] + #[should_panic(expected = "scales and kinematics are not compatible")] + fn grid_new_panic2() { + let channel = vec![(vec![1, -1], 1.0), (vec![2, -2], 1.0)]; + + let _ = Grid::new( + PidBasis::Pdg, + vec![Channel::new(channel)], + vec![Order::new(0, 2, 0, 0, 0)], + vec![0.0, 1.0], + vec![ + Conv::new(ConvType::UnpolPDF, 2212), + Conv::new(ConvType::UnpolPDF, 2212), + ], + v0::default_interps(2), + vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], + Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(1), + frg: ScaleFuncForm::NoScale, + }, ); + } - matches!(result, Err(GridError::UnknownSubgridType(x)) if x == subgrid_type); + #[test] + fn grid_read_file_version_unsupported() { + assert!(matches!( + Grid::read( + &[b'P', b'i', b'n', b'e', b'A', b'P', b'P', b'L', 99, 0, 0, 0, 0, 0, 0, 0][..] + ), + Err(GridError::FileVersionUnsupported { file_version: 99 }) + )); } #[test] fn grid_merge_empty_subgrids() { let mut grid = Grid::new( + PidBasis::Pdg, vec![ - channel![2, 2, 1.0; 4, 4, 1.0], - channel![1, 1, 1.0; 3, 3, 1.0], + channel![1.0 * (2, 2) + 1.0 * (4, 4)], + channel![1.0 * (1, 1) + 1.0 * (3, 3)], ], - vec![Order::new(0, 2, 0, 0)], + vec![Order::new(0, 2, 0, 0, 0)], vec![0.0, 0.25, 0.5, 0.75, 1.0], - SubgridParams::default(), + vec![Conv::new(ConvType::UnpolPDF, 2212); 2], + v0::default_interps(2), + vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], + Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }, ); assert_eq!(grid.bin_info().bins(), 4); @@ -1903,14 +1651,22 @@ mod tests { assert_eq!(grid.orders().len(), 1); let other = Grid::new( + PidBasis::Pdg, vec![ // differently ordered than `grid` - channel![1, 1, 1.0; 3, 3, 1.0], - channel![2, 2, 1.0; 4, 4, 1.0], + channel![1.0 * (1, 1) + 1.0 * (3, 3)], + channel![1.0 * (2, 2) + 1.0 * (4, 4)], ], - vec![Order::new(1, 2, 0, 0), Order::new(1, 2, 0, 1)], + vec![Order::new(1, 2, 0, 0, 0), Order::new(1, 2, 0, 1, 0)], vec![0.0, 0.25, 0.5, 0.75, 1.0], - SubgridParams::default(), + vec![Conv::new(ConvType::UnpolPDF, 2212); 2], + v0::default_interps(2), + vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], + Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }, ); // merging with empty subgrids should not change the grid @@ -1924,13 +1680,21 @@ mod tests { #[test] fn grid_merge_orders() { let mut grid = Grid::new( + PidBasis::Pdg, vec![ - channel![2, 2, 1.0; 4, 4, 1.0], - channel![1, 1, 1.0; 3, 3, 1.0], + channel![1.0 * (2, 2) + 1.0 * (4, 4)], + channel![1.0 * (1, 1) + 1.0 * (3, 3)], ], - vec![Order::new(0, 2, 0, 0)], + vec![Order::new(0, 2, 0, 0, 0)], vec![0.0, 0.25, 0.5, 0.75, 1.0], - SubgridParams::default(), + vec![Conv::new(ConvType::UnpolPDF, 2212); 2], + v0::default_interps(2), + vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], + Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }, ); assert_eq!(grid.bin_info().bins(), 4); @@ -1938,42 +1702,32 @@ mod tests { assert_eq!(grid.orders().len(), 1); let mut other = Grid::new( + PidBasis::Pdg, vec![ - channel![2, 2, 1.0; 4, 4, 1.0], - channel![1, 1, 1.0; 3, 3, 1.0], + channel![1.0 * (2, 2) + 1.0 * (4, 4)], + channel![1.0 * (1, 1) + 1.0 * (3, 3)], ], vec![ - Order::new(1, 2, 0, 0), - Order::new(1, 2, 0, 1), - Order::new(0, 2, 0, 0), + Order::new(1, 2, 0, 0, 0), + Order::new(1, 2, 0, 1, 0), + Order::new(0, 2, 0, 0, 0), ], vec![0.0, 0.25, 0.5, 0.75, 1.0], - SubgridParams::default(), - ); - - other.fill_all( - 0, - 0.1, - &Ntuple { - x1: 0.1, - x2: 0.2, - q2: 90.0_f64.powi(2), - weight: (), - }, - &[1.0, 2.0], - ); - other.fill_all( - 1, - 0.1, - &Ntuple { - x1: 0.1, - x2: 0.2, - q2: 90.0_f64.powi(2), - weight: (), + vec![Conv::new(ConvType::UnpolPDF, 2212); 2], + v0::default_interps(2), + vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], + Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, }, - &[1.0, 2.0], ); + other.fill(0, 0.1, 0, &[90.0_f64.powi(2), 0.1, 0.2], 1.0); + other.fill(0, 0.1, 1, &[90.0_f64.powi(2), 0.1, 0.2], 2.0); + other.fill(1, 0.1, 0, &[90.0_f64.powi(2), 0.1, 0.2], 1.0); + other.fill(1, 0.1, 1, &[90.0_f64.powi(2), 0.1, 0.2], 2.0); + // merge with four non-empty subgrids grid.merge(other).unwrap(); @@ -1985,13 +1739,21 @@ mod tests { #[test] fn grid_merge_channels_entries() { let mut grid = Grid::new( + PidBasis::Pdg, vec![ - channel![2, 2, 1.0; 4, 4, 1.0], - channel![1, 1, 1.0; 3, 3, 1.0], + channel![1.0 * (2, 2) + 1.0 * (4, 4)], + channel![1.0 * (1, 1) + 1.0 * (3, 3)], ], - vec![Order::new(0, 2, 0, 0)], + vec![Order::new(0, 2, 0, 0, 0)], vec![0.0, 0.25, 0.5, 0.75, 1.0], - SubgridParams::default(), + vec![Conv::new(ConvType::UnpolPDF, 2212); 2], + v0::default_interps(2), + vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], + Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }, ); assert_eq!(grid.bin_info().bins(), 4); @@ -1999,24 +1761,25 @@ mod tests { assert_eq!(grid.orders().len(), 1); let mut other = Grid::new( - vec![channel![22, 22, 1.0], channel![2, 2, 1.0; 4, 4, 1.0]], - vec![Order::new(0, 2, 0, 0)], + PidBasis::Pdg, + vec![ + channel![1.0 * (22, 22)], + channel![1.0 * (2, 2) + 1.0 * (4, 4)], + ], + vec![Order::new(0, 2, 0, 0, 0)], vec![0.0, 0.25, 0.5, 0.75, 1.0], - SubgridParams::default(), + vec![Conv::new(ConvType::UnpolPDF, 2212); 2], + v0::default_interps(2), + vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], + Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }, ); // fill the photon-photon entry - other.fill( - 0, - 0.1, - 0, - &Ntuple { - x1: 0.1, - x2: 0.2, - q2: 90.0_f64.powi(2), - weight: 3.0, - }, - ); + other.fill(0, 0.1, 0, &[90.0_f64.powi(2), 0.1, 0.2], 3.0); grid.merge(other).unwrap(); @@ -2028,13 +1791,21 @@ mod tests { #[test] fn grid_merge_bins() { let mut grid = Grid::new( + PidBasis::Pdg, vec![ - channel![2, 2, 1.0; 4, 4, 1.0], - channel![1, 1, 1.0; 3, 3, 1.0], + channel![1.0 * (2, 2) + 1.0 * (4, 4)], + channel![1.0 * (1, 1) + 1.0 * (3, 3)], ], - vec![Order::new(0, 2, 0, 0)], + vec![Order::new(0, 2, 0, 0, 0)], vec![0.0, 0.25, 0.5], - SubgridParams::default(), + vec![Conv::new(ConvType::UnpolPDF, 2212); 2], + v0::default_interps(2), + vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], + Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }, ); assert_eq!(grid.bin_info().bins(), 2); @@ -2042,28 +1813,27 @@ mod tests { assert_eq!(grid.orders().len(), 1); let mut other = Grid::new( + PidBasis::Pdg, vec![ // channels are differently sorted - channel![1, 1, 1.0; 3, 3, 1.0], - channel![2, 2, 1.0; 4, 4, 1.0], + channel![1.0 * (1, 1) + 1.0 * (3, 3)], + channel![1.0 * (2, 2) + 1.0 * (4, 4)], ], - vec![Order::new(0, 2, 0, 0)], + vec![Order::new(0, 2, 0, 0, 0)], vec![0.5, 0.75, 1.0], - SubgridParams::default(), - ); - - other.fill_all( - 0, - 0.1, - &Ntuple { - x1: 0.1, - x2: 0.2, - q2: 90.0_f64.powi(2), - weight: (), + vec![Conv::new(ConvType::UnpolPDF, 2212); 2], + v0::default_interps(2), + vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], + Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, }, - &[2.0, 3.0], ); + other.fill(0, 0.1, 0, &[90.0_f64.powi(2), 0.1, 0.2], 2.0); + other.fill(0, 0.1, 1, &[90.0_f64.powi(2), 0.1, 0.2], 3.0); + grid.merge(other).unwrap(); assert_eq!(grid.bin_info().bins(), 4); @@ -2071,37 +1841,79 @@ mod tests { assert_eq!(grid.orders().len(), 1); } - // TODO: convolve_subgrid, merge_bins, subgrid, set_subgrid - #[test] fn grid_convolutions() { let mut grid = Grid::new( - vec![channel![21, 21, 1.0]], + PidBasis::Pdg, + vec![channel![1.0 * (21, 21)]], vec![Order { alphas: 0, alpha: 0, logxir: 0, logxif: 0, + logxia: 0, }], vec![0.0, 1.0], - SubgridParams::default(), + vec![Conv::new(ConvType::UnpolPDF, 2212); 2], + v0::default_interps(2), + vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], + Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }, ); // by default we assume unpolarized proton PDFs are used assert_eq!( grid.convolutions(), - [Convolution::UnpolPDF(2212), Convolution::UnpolPDF(2212)] + [ + Conv::new(ConvType::UnpolPDF, 2212), + Conv::new(ConvType::UnpolPDF, 2212) + ] ); - grid.set_convolution(0, Convolution::UnpolPDF(-2212)); - grid.set_convolution(1, Convolution::UnpolPDF(-2212)); + grid.convolutions_mut()[0] = Conv::new(ConvType::UnpolPDF, -2212); + grid.convolutions_mut()[1] = Conv::new(ConvType::UnpolPDF, -2212); assert_eq!( grid.convolutions(), - [Convolution::UnpolPDF(-2212), Convolution::UnpolPDF(-2212)] + [ + Conv::new(ConvType::UnpolPDF, -2212), + Conv::new(ConvType::UnpolPDF, -2212) + ] ); } + #[test] + fn grid_set_remapper_bin_number_mismatch() { + let mut grid = Grid::new( + PidBasis::Pdg, + vec![ + channel![1.0 * (2, 2) + 1.0 * (4, 4)], + channel![1.0 * (1, 1) + 1.0 * (3, 3)], + ], + vec![Order::new(0, 2, 0, 0, 0)], + vec![0.0, 0.25, 0.5, 0.75, 1.0], + vec![Conv::new(ConvType::UnpolPDF, 2212); 2], + v0::default_interps(2), + vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], + Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }, + ); + + assert!(matches!( + grid.set_remapper(BinRemapper::new(vec![1.0], vec![(0.0, 1.0)]).unwrap()), + Err(GridError::BinNumberMismatch { + grid_bins: 4, + remapper_bins: 1 + }) + )); + } + #[test] fn evolve_info() { let grid = diff --git a/pineappl/src/import_only_subgrid.rs b/pineappl/src/import_only_subgrid.rs deleted file mode 100644 index 04624c09a..000000000 --- a/pineappl/src/import_only_subgrid.rs +++ /dev/null @@ -1,785 +0,0 @@ -//! TODO - -use super::grid::Ntuple; -use super::sparse_array3::SparseArray3; -use super::subgrid::{Mu2, Stats, Subgrid, SubgridEnum, SubgridIndexedIter}; -use serde::{Deserialize, Serialize}; -use std::borrow::Cow; -use std::mem; - -/// TODO -#[derive(Clone, Deserialize, Serialize)] -pub struct ImportOnlySubgridV1 { - array: SparseArray3, - q2_grid: Vec, - x1_grid: Vec, - x2_grid: Vec, -} - -impl ImportOnlySubgridV1 { - /// Constructor. - #[must_use] - pub fn new( - array: SparseArray3, - q2_grid: Vec, - x1_grid: Vec, - x2_grid: Vec, - ) -> Self { - Self { - array, - q2_grid, - x1_grid, - x2_grid, - } - } - - /// Return the array containing the numerical values of the grid. - pub fn array_mut(&mut self) -> &mut SparseArray3 { - &mut self.array - } -} - -impl Subgrid for ImportOnlySubgridV1 { - fn convolve( - &self, - _: &[f64], - _: &[f64], - _: &[Mu2], - lumi: &mut dyn FnMut(usize, usize, usize) -> f64, - ) -> f64 { - self.array - .indexed_iter() - .map(|((imu2, ix1, ix2), sigma)| sigma * lumi(ix1, ix2, imu2)) - .sum() - } - - fn fill(&mut self, _: &Ntuple) { - panic!("ImportOnlySubgridV1 doesn't support the fill operation"); - } - - fn mu2_grid(&self) -> Cow<[Mu2]> { - self.q2_grid - .iter() - .copied() - .map(|q2| Mu2 { ren: q2, fac: q2 }) - .collect() - } - - fn x1_grid(&self) -> Cow<[f64]> { - Cow::Borrowed(&self.x1_grid) - } - - fn x2_grid(&self) -> Cow<[f64]> { - Cow::Borrowed(&self.x2_grid) - } - - fn is_empty(&self) -> bool { - self.array.is_empty() - } - - fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { - if let SubgridEnum::ImportOnlySubgridV1(other_grid) = other { - if self.array.is_empty() && !transpose { - mem::swap(&mut self.array, &mut other_grid.array); - } else { - // TODO: the general case isn't implemented - assert!(self.x1_grid() == other_grid.x1_grid()); - assert!(self.x2_grid() == other_grid.x2_grid()); - - for (other_index, mu2) in other_grid.mu2_grid().iter().enumerate() { - // the following should always be the case - assert_eq!(mu2.ren, mu2.fac); - let q2 = &mu2.ren; - - let index = match self - .q2_grid - .binary_search_by(|val| val.partial_cmp(q2).unwrap()) - { - Ok(index) => index, - Err(index) => { - self.q2_grid.insert(index, *q2); - self.array.increase_x_at(index); - index - } - }; - - for ((_, j, k), value) in other_grid - .array - .indexed_iter() - .filter(|&((i, _, _), _)| i == other_index) - { - let (j, k) = if transpose { (k, j) } else { (j, k) }; - self.array[[index, j, k]] += value; - } - } - } - } else { - todo!(); - } - } - - fn scale(&mut self, factor: f64) { - if factor == 0.0 { - self.array.clear(); - } else { - self.array.iter_mut().for_each(|x| *x *= factor); - } - } - - fn symmetrize(&mut self) { - let mut new_array = - SparseArray3::new(self.q2_grid.len(), self.x1_grid.len(), self.x2_grid.len()); - - for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k >= j) { - new_array[[i, j, k]] = sigma; - } - // do not change the diagonal entries (k==j) - for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k < j) { - new_array[[i, k, j]] += sigma; - } - - mem::swap(&mut self.array, &mut new_array); - } - - fn clone_empty(&self) -> SubgridEnum { - Self { - array: SparseArray3::new(self.q2_grid.len(), self.x1_grid.len(), self.x2_grid.len()), - q2_grid: self.q2_grid.clone(), - x1_grid: self.x1_grid.clone(), - x2_grid: self.x2_grid.clone(), - } - .into() - } - - fn indexed_iter(&self) -> SubgridIndexedIter { - Box::new(self.array.indexed_iter()) - } - - fn stats(&self) -> Stats { - Stats { - total: self.q2_grid.len() * self.x1_grid.len() * self.x2_grid.len(), - allocated: self.array.len() + self.array.zeros(), - zeros: self.array.zeros(), - overhead: self.array.overhead(), - bytes_per_value: mem::size_of::(), - } - } - - fn static_scale(&self) -> Option { - if let &[static_scale] = self.q2_grid.as_slice() { - Some(Mu2 { - ren: static_scale, - fac: static_scale, - }) - } else { - None - } - } -} - -/// TODO -#[derive(Clone, Deserialize, Serialize)] -pub struct ImportOnlySubgridV2 { - array: SparseArray3, - mu2_grid: Vec, - x1_grid: Vec, - x2_grid: Vec, -} - -impl ImportOnlySubgridV2 { - /// Constructor. - #[must_use] - pub fn new( - array: SparseArray3, - mu2_grid: Vec, - x1_grid: Vec, - x2_grid: Vec, - ) -> Self { - Self { - array, - mu2_grid, - x1_grid, - x2_grid, - } - } - - /// Return the array containing the numerical values of the grid. - pub fn array_mut(&mut self) -> &mut SparseArray3 { - &mut self.array - } -} - -impl Subgrid for ImportOnlySubgridV2 { - fn convolve( - &self, - _: &[f64], - _: &[f64], - _: &[Mu2], - lumi: &mut dyn FnMut(usize, usize, usize) -> f64, - ) -> f64 { - self.array - .indexed_iter() - .map(|((imu2, ix1, ix2), sigma)| sigma * lumi(ix1, ix2, imu2)) - .sum() - } - - fn fill(&mut self, _: &Ntuple) { - panic!("ImportOnlySubgridV2 doesn't support the fill operation"); - } - - fn mu2_grid(&self) -> Cow<[Mu2]> { - Cow::Borrowed(&self.mu2_grid) - } - - fn x1_grid(&self) -> Cow<[f64]> { - Cow::Borrowed(&self.x1_grid) - } - - fn x2_grid(&self) -> Cow<[f64]> { - Cow::Borrowed(&self.x2_grid) - } - - fn is_empty(&self) -> bool { - self.array.is_empty() - } - - fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { - if let SubgridEnum::ImportOnlySubgridV2(other_grid) = other { - if self.array.is_empty() && !transpose { - mem::swap(&mut self.array, &mut other_grid.array); - } else { - let rhs_x1 = if transpose { - other_grid.x2_grid() - } else { - other_grid.x1_grid() - }; - let rhs_x2 = if transpose { - other_grid.x1_grid() - } else { - other_grid.x2_grid() - }; - - if (self.x1_grid() != rhs_x1) || (self.x2_grid() != rhs_x2) { - let mut x1_grid = self.x1_grid.clone(); - let mut x2_grid = self.x2_grid.clone(); - - x1_grid.extend_from_slice(&rhs_x1); - x1_grid.sort_by(|a, b| a.partial_cmp(b).unwrap()); - x1_grid.dedup(); - x2_grid.extend_from_slice(&rhs_x2); - x2_grid.sort_by(|a, b| a.partial_cmp(b).unwrap()); - x2_grid.dedup(); - - let mut array = - SparseArray3::new(self.array.dimensions().0, x1_grid.len(), x2_grid.len()); - - for ((i, j, k), value) in self.array.indexed_iter() { - let target_j = x1_grid - .iter() - .position(|&x| x == self.x1_grid[j]) - .unwrap_or_else(|| unreachable!()); - let target_k = x2_grid - .iter() - .position(|&x| x == self.x2_grid[k]) - .unwrap_or_else(|| unreachable!()); - - array[[i, target_j, target_k]] = value; - } - - self.array = array; - self.x1_grid = x1_grid; - self.x2_grid = x2_grid; - } - - for (other_index, mu2) in other_grid.mu2_grid().iter().enumerate() { - let index = match self - .mu2_grid - .binary_search_by(|val| val.partial_cmp(mu2).unwrap()) - { - Ok(index) => index, - Err(index) => { - self.mu2_grid.insert(index, mu2.clone()); - self.array.increase_x_at(index); - index - } - }; - - for ((_, j, k), value) in other_grid - .array - .indexed_iter() - .filter(|&((i, _, _), _)| i == other_index) - { - let (j, k) = if transpose { (k, j) } else { (j, k) }; - let target_j = self - .x1_grid - .iter() - .position(|&x| x == rhs_x1[j]) - .unwrap_or_else(|| unreachable!()); - let target_k = self - .x2_grid - .iter() - .position(|&x| x == rhs_x2[k]) - .unwrap_or_else(|| unreachable!()); - - self.array[[index, target_j, target_k]] += value; - } - } - } - } else { - todo!(); - } - } - - fn scale(&mut self, factor: f64) { - if factor == 0.0 { - self.array.clear(); - } else { - self.array.iter_mut().for_each(|x| *x *= factor); - } - } - - fn symmetrize(&mut self) { - let mut new_array = - SparseArray3::new(self.mu2_grid.len(), self.x1_grid.len(), self.x2_grid.len()); - - for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k >= j) { - new_array[[i, j, k]] = sigma; - } - // do not change the diagonal entries (k==j) - for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k < j) { - new_array[[i, k, j]] += sigma; - } - - mem::swap(&mut self.array, &mut new_array); - } - - fn clone_empty(&self) -> SubgridEnum { - Self { - array: SparseArray3::new(self.mu2_grid.len(), self.x1_grid.len(), self.x2_grid.len()), - mu2_grid: self.mu2_grid.clone(), - x1_grid: self.x1_grid.clone(), - x2_grid: self.x2_grid.clone(), - } - .into() - } - - fn indexed_iter(&self) -> SubgridIndexedIter { - Box::new(self.array.indexed_iter()) - } - - fn stats(&self) -> Stats { - Stats { - total: self.mu2_grid.len() * self.x1_grid.len() * self.x2_grid.len(), - allocated: self.array.len() + self.array.zeros(), - zeros: self.array.zeros(), - overhead: self.array.overhead(), - bytes_per_value: mem::size_of::(), - } - } - - fn static_scale(&self) -> Option { - if let [static_scale] = self.mu2_grid.as_slice() { - Some(static_scale.clone()) - } else { - None - } - } -} - -impl From<&SubgridEnum> for ImportOnlySubgridV2 { - fn from(subgrid: &SubgridEnum) -> Self { - // find smallest ranges - let (mu2_range, x1_range, x2_range) = subgrid.indexed_iter().fold( - ( - subgrid.mu2_grid().len()..0, - subgrid.x1_grid().len()..0, - subgrid.x2_grid().len()..0, - ), - |prev, ((imu2, ix1, ix2), _)| { - ( - prev.0.start.min(imu2)..prev.0.end.max(imu2 + 1), - prev.1.start.min(ix1)..prev.1.end.max(ix1 + 1), - prev.2.start.min(ix2)..prev.2.end.max(ix2 + 1), - ) - }, - ); - - let (mu2_grid, static_scale) = subgrid.static_scale().map_or_else( - || (subgrid.mu2_grid()[mu2_range.clone()].to_vec(), false), - |scale| (vec![scale], true), - ); - let x1_grid = subgrid.x1_grid()[x1_range.clone()].to_vec(); - let x2_grid = subgrid.x2_grid()[x2_range.clone()].to_vec(); - - let mut array = SparseArray3::new(mu2_grid.len(), x1_grid.len(), x2_grid.len()); - - for ((imu2, ix1, ix2), value) in subgrid.indexed_iter() { - // if there's a static scale we want every value to be added to same grid point - let index = if static_scale { - 0 - } else { - imu2 - mu2_range.start - }; - - array[[index, ix1 - x1_range.start, ix2 - x2_range.start]] += value; - } - - Self { - array, - mu2_grid, - x1_grid, - x2_grid, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::lagrange_subgrid::LagrangeSubgridV2; - use crate::subgrid::{ExtraSubgridParams, SubgridParams}; - use float_cmp::assert_approx_eq; - use rand::distributions::{Distribution, Uniform}; - use rand::Rng; - use rand_pcg::Pcg64; - - #[test] - fn test_v1() { - let x = vec![ - 0.015625, 0.03125, 0.0625, 0.125, 0.1875, 0.25, 0.375, 0.5, 0.75, 1.0, - ]; - let mut grid1: SubgridEnum = ImportOnlySubgridV1::new( - SparseArray3::new(1, 10, 10), - vec![0.0], - x.clone(), - x.clone(), - ) - .into(); - - assert_eq!( - grid1.stats(), - Stats { - total: 100, - allocated: 0, - zeros: 0, - overhead: 2, - bytes_per_value: 8, - } - ); - - let mu2 = vec![Mu2 { ren: 0.0, fac: 0.0 }]; - - assert_eq!(grid1.mu2_grid().as_ref(), mu2); - assert_eq!(grid1.x1_grid().as_ref(), x); - assert_eq!(grid1.x2_grid(), grid1.x1_grid()); - - assert!(grid1.is_empty()); - - // only use exactly representable numbers here so that we can avoid using approx_eq - if let SubgridEnum::ImportOnlySubgridV1(ref mut x) = grid1 { - x.array_mut()[[0, 1, 2]] = 1.0; - x.array_mut()[[0, 1, 3]] = 2.0; - x.array_mut()[[0, 4, 3]] = 4.0; - x.array_mut()[[0, 7, 1]] = 8.0; - } else { - unreachable!(); - } - - assert!(!grid1.is_empty()); - - assert_eq!(grid1.indexed_iter().next(), Some(((0, 1, 2), 1.0))); - assert_eq!(grid1.indexed_iter().nth(1), Some(((0, 1, 3), 2.0))); - assert_eq!(grid1.indexed_iter().nth(2), Some(((0, 4, 3), 4.0))); - assert_eq!(grid1.indexed_iter().nth(3), Some(((0, 7, 1), 8.0))); - - // symmetric luminosity function - let lumi = - &mut (|ix1, ix2, _| x[ix1] * x[ix2]) as &mut dyn FnMut(usize, usize, usize) -> f64; - - assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 0.228515625); - - // create grid with transposed entries, but different q2 - let mut grid2: SubgridEnum = ImportOnlySubgridV1::new( - SparseArray3::new(1, 10, 10), - vec![1.0], - x.clone(), - x.clone(), - ) - .into(); - if let SubgridEnum::ImportOnlySubgridV1(ref mut x) = grid2 { - x.array_mut()[[0, 2, 1]] = 1.0; - x.array_mut()[[0, 3, 1]] = 2.0; - x.array_mut()[[0, 3, 4]] = 4.0; - x.array_mut()[[0, 1, 7]] = 8.0; - } else { - unreachable!(); - } - assert_eq!(grid2.convolve(&x, &x, &mu2, lumi), 0.228515625); - - assert_eq!(grid2.indexed_iter().next(), Some(((0, 1, 7), 8.0))); - assert_eq!(grid2.indexed_iter().nth(1), Some(((0, 2, 1), 1.0))); - assert_eq!(grid2.indexed_iter().nth(2), Some(((0, 3, 1), 2.0))); - assert_eq!(grid2.indexed_iter().nth(3), Some(((0, 3, 4), 4.0))); - - grid1.merge(&mut grid2, false); - - assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 2.0 * 0.228515625); - - let mut grid1 = { - let mut g = grid1.clone_empty(); - g.merge(&mut grid1, false); - g - }; - - // the luminosity function is symmetric, so after symmetrization the result must be - // unchanged - grid1.symmetrize(); - assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 2.0 * 0.228515625); - - grid1.scale(2.0); - assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 4.0 * 0.228515625); - - assert_eq!( - grid1.stats(), - Stats { - total: 200, - allocated: 14, - zeros: 6, - overhead: 42, - bytes_per_value: 8, - } - ); - } - - #[test] - fn test_v2() { - let x = vec![ - 0.015625, 0.03125, 0.0625, 0.125, 0.1875, 0.25, 0.375, 0.5, 0.75, 1.0, - ]; - let mut grid1: SubgridEnum = ImportOnlySubgridV2::new( - SparseArray3::new(1, 10, 10), - vec![Mu2 { ren: 0.0, fac: 0.0 }], - x.clone(), - x.clone(), - ) - .into(); - - let mu2 = vec![Mu2 { ren: 0.0, fac: 0.0 }]; - - assert_eq!(grid1.mu2_grid().as_ref(), mu2); - assert_eq!(grid1.x1_grid().as_ref(), x); - assert_eq!(grid1.x2_grid(), grid1.x1_grid()); - - assert!(grid1.is_empty()); - - // only use exactly representable numbers here so that we can avoid using approx_eq - if let SubgridEnum::ImportOnlySubgridV2(ref mut x) = grid1 { - x.array_mut()[[0, 1, 2]] = 1.0; - x.array_mut()[[0, 1, 3]] = 2.0; - x.array_mut()[[0, 4, 3]] = 4.0; - x.array_mut()[[0, 7, 1]] = 8.0; - } else { - unreachable!(); - } - - assert!(!grid1.is_empty()); - - assert_eq!(grid1.indexed_iter().next(), Some(((0, 1, 2), 1.0))); - assert_eq!(grid1.indexed_iter().nth(1), Some(((0, 1, 3), 2.0))); - assert_eq!(grid1.indexed_iter().nth(2), Some(((0, 4, 3), 4.0))); - assert_eq!(grid1.indexed_iter().nth(3), Some(((0, 7, 1), 8.0))); - - // symmetric luminosity function - let lumi = - &mut (|ix1, ix2, _| x[ix1] * x[ix2]) as &mut dyn FnMut(usize, usize, usize) -> f64; - - assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 0.228515625); - - // create grid with transposed entries, but different q2 - let mut grid2: SubgridEnum = ImportOnlySubgridV2::new( - SparseArray3::new(1, 10, 10), - vec![Mu2 { ren: 1.0, fac: 1.0 }], - x.clone(), - x.clone(), - ) - .into(); - if let SubgridEnum::ImportOnlySubgridV2(ref mut x) = grid2 { - x.array_mut()[[0, 2, 1]] = 1.0; - x.array_mut()[[0, 3, 1]] = 2.0; - x.array_mut()[[0, 3, 4]] = 4.0; - x.array_mut()[[0, 1, 7]] = 8.0; - } else { - unreachable!(); - } - assert_eq!(grid2.convolve(&x, &x, &mu2, lumi), 0.228515625); - - assert_eq!(grid2.indexed_iter().next(), Some(((0, 1, 7), 8.0))); - assert_eq!(grid2.indexed_iter().nth(1), Some(((0, 2, 1), 1.0))); - assert_eq!(grid2.indexed_iter().nth(2), Some(((0, 3, 1), 2.0))); - assert_eq!(grid2.indexed_iter().nth(3), Some(((0, 3, 4), 4.0))); - - grid1.merge(&mut grid2, false); - - assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 2.0 * 0.228515625); - - let mut grid1 = { - let mut g = grid1.clone_empty(); - g.merge(&mut grid1, false); - g - }; - - // the luminosity function is symmetric, so after symmetrization the result must be - // unchanged - grid1.symmetrize(); - assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 2.0 * 0.228515625); - - grid1.scale(2.0); - assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 4.0 * 0.228515625); - - assert_eq!( - grid1.stats(), - Stats { - total: 200, - allocated: 14, - zeros: 6, - overhead: 42, - bytes_per_value: 8, - } - ); - } - - #[test] - #[should_panic(expected = "ImportOnlySubgridV1 doesn't support the fill operation")] - fn fill_panic_v1() { - let mut grid = - ImportOnlySubgridV1::new(SparseArray3::new(1, 1, 1), vec![1.0], vec![1.0], vec![1.0]); - - grid.fill(&Ntuple { - x1: 0.0, - x2: 0.0, - q2: 0.0, - weight: 1.0, - }); - } - - #[test] - #[should_panic(expected = "ImportOnlySubgridV2 doesn't support the fill operation")] - fn fill_panic_v2() { - let mut grid = ImportOnlySubgridV2::new( - SparseArray3::new(1, 1, 1), - vec![Mu2 { ren: 1.0, fac: 1.0 }], - vec![1.0], - vec![1.0], - ); - - grid.fill(&Ntuple { - x1: 0.0, - x2: 0.0, - q2: 0.0, - weight: 1.0, - }); - } - - #[test] - fn from_lagrange_subgrid_v2() { - let mut lagrange = - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); - - // by default this should have 40 grid points - assert_eq!(lagrange.mu2_grid().len(), 40); - - // only `q2` are important: they're not static and fall between two grid points - lagrange.fill(&Ntuple { - x1: 0.25, - x2: 0.5, - q2: 10000.0, - weight: 1.0, - }); - lagrange.fill(&Ntuple { - x1: 0.0625, - x2: 0.125, - q2: 10001.0, - weight: 1.0, - }); - lagrange.fill(&Ntuple { - x1: 0.5, - x2: 0.0625, - q2: 10002.0, - weight: 1.0, - }); - lagrange.fill(&Ntuple { - x1: 0.1, - x2: 0.2, - q2: 10003.0, - weight: 1.0, - }); - - let x1 = lagrange.x1_grid().to_vec(); - let x2 = lagrange.x2_grid().to_vec(); - let mu2 = lagrange.mu2_grid().to_vec(); - - let lumi = &mut (|_, _, _| 1.0) as &mut dyn FnMut(usize, usize, usize) -> f64; - let reference = lagrange.convolve(&x1, &x2, &mu2, lumi); - - let imported = ImportOnlySubgridV2::from(&lagrange.into()); - let test = imported.convolve(&x1, &x2, &mu2, lumi); - - // make sure the conversion did not change the results - assert_approx_eq!(f64, reference, test, ulps = 8); - - // all unneccessary grid points should be gone; since we are inserting between two - // interpolation grid points, the imported grid should have as many interpolation grid - // points as its interpolation order - assert_eq!(imported.mu2_grid().len(), 4); - } - - #[test] - fn merge_with_different_x_grids() { - let mut params = SubgridParams::default(); - let mut grid1 = LagrangeSubgridV2::new(¶ms, &ExtraSubgridParams::default()); - - // change parameters of the second grid to force non-trivial merging - params.set_x_min(0.2); - params.set_x_max(0.5); - - let mut grid2 = LagrangeSubgridV2::new(¶ms, &ExtraSubgridParams::default()); - let mut rng = Pcg64::new(0xcafef00dd15ea5e5, 0xa02bdbf7bb3c0a7ac28fa16a64abf96); - let q2_range = Uniform::new(1e4, 1e8); - - for _ in 0..1000 { - grid1.fill(&Ntuple { - x1: rng.gen(), - x2: rng.gen(), - q2: q2_range.sample(&mut rng), - weight: 1.0, - }); - grid2.fill(&Ntuple { - x1: rng.gen(), - x2: rng.gen(), - q2: q2_range.sample(&mut rng), - weight: 1.0, - }); - } - - let lumi = &mut (|_, _, _| 1.0) as &mut dyn FnMut(usize, usize, usize) -> f64; - let result1 = grid1.convolve(&grid1.x1_grid(), &grid1.x2_grid(), &grid1.mu2_grid(), lumi); - let result2 = grid2.convolve(&grid2.x1_grid(), &grid2.x2_grid(), &grid2.mu2_grid(), lumi); - - let mut grid1: SubgridEnum = ImportOnlySubgridV2::from(&grid1.into()).into(); - let mut grid2: SubgridEnum = ImportOnlySubgridV2::from(&grid2.into()).into(); - - let result3 = grid1.convolve(&grid1.x1_grid(), &grid1.x2_grid(), &grid1.mu2_grid(), lumi); - let result4 = grid2.convolve(&grid2.x1_grid(), &grid2.x2_grid(), &grid2.mu2_grid(), lumi); - - // conversion from LangrangeSubgridV2 to ImportOnlySubgridV2 shouldn't change the results - assert!((result3 / result1 - 1.0).abs() < 1e-13); - assert!((result4 / result2 - 1.0).abs() < 1e-13); - - grid1.merge(&mut grid2, false); - - let result5 = grid1.convolve(&grid1.x1_grid(), &grid1.x2_grid(), &grid1.mu2_grid(), lumi); - - // merging the two grids should give the sum of the two results - assert!((result5 / (result3 + result4) - 1.0).abs() < 1e-12); - } -} diff --git a/pineappl/src/import_subgrid.rs b/pineappl/src/import_subgrid.rs new file mode 100644 index 000000000..93c5b2aaf --- /dev/null +++ b/pineappl/src/import_subgrid.rs @@ -0,0 +1,251 @@ +//! TODO + +use super::interpolation::Interp; +use super::packed_array::PackedArray; +use super::subgrid::{self, Stats, Subgrid, SubgridEnum, SubgridIndexedIter}; +use itertools::izip; +use serde::{Deserialize, Serialize}; +use std::mem; + +/// TODO +#[derive(Clone, Deserialize, Serialize)] +pub struct ImportSubgridV1 { + array: PackedArray, + node_values: Vec>, +} + +impl ImportSubgridV1 { + /// Constructor. + #[must_use] + pub const fn new(array: PackedArray, node_values: Vec>) -> Self { + Self { array, node_values } + } +} + +impl Subgrid for ImportSubgridV1 { + fn fill(&mut self, _: &[Interp], _: &[f64], _: f64) { + panic!("ImportSubgridV1 doesn't support the fill operation"); + } + + fn node_values(&self) -> Vec> { + self.node_values.clone() + } + + fn is_empty(&self) -> bool { + self.array.is_empty() + } + + fn merge(&mut self, other: &SubgridEnum, transpose: Option<(usize, usize)>) { + let lhs_node_values = self.node_values(); + let mut rhs_node_values = other.node_values(); + let mut new_node_values = lhs_node_values.clone(); + if let Some((a, b)) = transpose { + rhs_node_values.swap(a, b); + } + + if new_node_values != rhs_node_values { + for (new, rhs) in new_node_values.iter_mut().zip(&rhs_node_values) { + new.extend(rhs); + new.sort_by(f64::total_cmp); + new.dedup_by(subgrid::node_value_eq_ref_mut); + } + + let mut array = PackedArray::new(new_node_values.iter().map(Vec::len).collect()); + + for (indices, value) in self.array.indexed_iter() { + let target: Vec<_> = izip!(indices, &new_node_values, &lhs_node_values) + .map(|(index, new, lhs)| { + new.iter() + .position(|&value| subgrid::node_value_eq(value, lhs[index])) + // UNWRAP: must succeed, `new_node_values` is the union of + // `lhs_node_values` and `rhs_node_values` + .unwrap() + }) + .collect(); + + array[target.as_slice()] = value; + } + + self.array = array; + self.node_values.clone_from(&new_node_values); + } + + for (mut indices, value) in other.indexed_iter() { + if let Some((a, b)) = transpose { + indices.swap(a, b); + } + + let target: Vec<_> = izip!(indices, &new_node_values, &rhs_node_values) + .map(|(index, new, rhs)| { + new.iter() + .position(|&value| subgrid::node_value_eq(value, rhs[index])) + // UNWRAP: must succeed, `new_node_values` is the union of + // `lhs_node_values` and `rhs_node_values` + .unwrap() + }) + .collect(); + + self.array[target.as_slice()] += value; + } + } + + fn scale(&mut self, factor: f64) { + self.array *= factor; + } + + fn symmetrize(&mut self, a: usize, b: usize) { + let mut new_array = PackedArray::new(self.array.shape().to_vec()); + + for (mut index, sigma) in self.array.indexed_iter() { + // TODO: why not the other way around? + if index[b] < index[a] { + index.swap(a, b); + } + + new_array[index.as_slice()] += sigma; + } + + self.array = new_array; + } + + fn indexed_iter(&self) -> SubgridIndexedIter { + Box::new(self.array.indexed_iter()) + } + + fn shape(&mut self) -> &[usize] { + self.array.shape() + } + + fn stats(&self) -> Stats { + Stats { + total: self.array.shape().iter().product(), + allocated: self.array.non_zeros() + self.array.explicit_zeros(), + zeros: self.array.explicit_zeros(), + overhead: self.array.overhead(), + bytes_per_value: mem::size_of::(), + } + } + + fn optimize_nodes(&mut self) {} +} + +impl From<&SubgridEnum> for ImportSubgridV1 { + fn from(subgrid: &SubgridEnum) -> Self { + // find smallest ranges + let ranges: Vec<_> = subgrid.indexed_iter().fold( + subgrid + .node_values() + .iter() + .map(|values| values.len()..0) + .collect(), + |mut prev, (indices, _)| { + for (i, index) in indices.iter().enumerate() { + prev[i].start = prev[i].start.min(*index); + prev[i].end = prev[i].end.max(*index + 1); + } + prev + }, + ); + + let new_node_values: Vec<_> = subgrid + .node_values() + .iter() + .zip(&ranges) + .map(|(values, range)| values[range.clone()].to_vec()) + .collect(); + + let mut array = PackedArray::new(new_node_values.iter().map(Vec::len).collect()); + + for (mut indices, value) in subgrid.indexed_iter() { + for (index, range) in indices.iter_mut().zip(&ranges) { + *index -= range.start; + } + + array[indices.as_slice()] += value; + } + + Self::new(array, new_node_values) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::v0; + + #[test] + #[should_panic(expected = "ImportSubgridV1 doesn't support the fill operation")] + fn fill_packed_q1x2_subgrid_v1() { + let mut subgrid = + ImportSubgridV1::new(PackedArray::new(vec![0, 0, 0]), vec![Vec::new(); 3]); + subgrid.fill(&v0::default_interps(2), &[0.0; 3], 0.0); + } + + #[test] + fn test_v1() { + let x = vec![ + 0.015625, 0.03125, 0.0625, 0.125, 0.1875, 0.25, 0.375, 0.5, 0.75, 1.0, + ]; + let mut grid1: SubgridEnum = ImportSubgridV1::new( + PackedArray::new(vec![1, 10, 10]), + vec![vec![0.0], x.clone(), x.clone()], + ) + .into(); + + assert_eq!(grid1.node_values(), vec![vec![0.0], x.clone(), x.clone()]); + + assert!(grid1.is_empty()); + + // only use exactly representable numbers here so that we can avoid using approx_eq + if let SubgridEnum::ImportSubgridV1(ref mut x) = grid1 { + x.array[[0, 1, 2]] = 1.0; + x.array[[0, 1, 3]] = 2.0; + x.array[[0, 4, 3]] = 4.0; + x.array[[0, 7, 1]] = 8.0; + } + + assert!(!grid1.is_empty()); + + assert_eq!(grid1.indexed_iter().next(), Some((vec![0, 1, 2], 1.0))); + assert_eq!(grid1.indexed_iter().nth(1), Some((vec![0, 1, 3], 2.0))); + assert_eq!(grid1.indexed_iter().nth(2), Some((vec![0, 4, 3], 4.0))); + assert_eq!(grid1.indexed_iter().nth(3), Some((vec![0, 7, 1], 8.0))); + + // create grid with transposed entries, but different q2 + let mut grid2: SubgridEnum = ImportSubgridV1::new( + PackedArray::new(vec![1, 10, 10]), + vec![vec![1.0], x.clone(), x], + ) + .into(); + if let SubgridEnum::ImportSubgridV1(ref mut x) = grid2 { + x.array[[0, 2, 1]] = 1.0; + x.array[[0, 3, 1]] = 2.0; + x.array[[0, 3, 4]] = 4.0; + x.array[[0, 1, 7]] = 8.0; + } + + assert_eq!(grid2.indexed_iter().next(), Some((vec![0, 1, 7], 8.0))); + assert_eq!(grid2.indexed_iter().nth(1), Some((vec![0, 2, 1], 1.0))); + assert_eq!(grid2.indexed_iter().nth(2), Some((vec![0, 3, 1], 2.0))); + assert_eq!(grid2.indexed_iter().nth(3), Some((vec![0, 3, 4], 4.0))); + + grid1.merge(&grid2, None); + + // the luminosity function is symmetric, so after symmetrization the result must be + // unchanged + grid1.symmetrize(1, 2); + + grid1.scale(2.0); + + assert_eq!( + grid1.stats(), + Stats { + total: 200, + allocated: 8, + zeros: 0, + overhead: 12, + bytes_per_value: 8, + } + ); + } +} diff --git a/pineappl/src/interp_subgrid.rs b/pineappl/src/interp_subgrid.rs new file mode 100644 index 000000000..2504cb970 --- /dev/null +++ b/pineappl/src/interp_subgrid.rs @@ -0,0 +1,269 @@ +//! Module containing the Lagrange-interpolation subgrid. + +use super::interpolation::{self, Interp}; +use super::packed_array::PackedArray; +use super::subgrid::{self, Stats, Subgrid, SubgridEnum, SubgridIndexedIter}; +use itertools::izip; +use serde::{Deserialize, Serialize}; +use std::mem; + +/// Subgrid which uses Lagrange-interpolation. +#[derive(Clone, Deserialize, Serialize)] +pub struct InterpSubgridV1 { + array: PackedArray, + interps: Vec, + static_nodes: Vec>, +} + +impl InterpSubgridV1 { + /// Constructor. + #[must_use] + pub fn new(interps: &[Interp]) -> Self { + Self { + array: PackedArray::new(interps.iter().map(Interp::nodes).collect()), + interps: interps.to_vec(), + static_nodes: vec![Some(-1.0); interps.len()], + } + } +} + +impl Subgrid for InterpSubgridV1 { + fn fill(&mut self, interps: &[Interp], ntuple: &[f64], weight: f64) { + debug_assert_eq!(interps.len(), ntuple.len()); + + if interpolation::interpolate(interps, ntuple, weight, &mut self.array) { + for (value, previous_node) in ntuple.iter().zip(&mut self.static_nodes) { + if let Some(previous_value) = previous_node { + if *previous_value < 0.0 { + *previous_value = *value; + } else if !subgrid::node_value_eq(*previous_value, *value) { + *previous_node = None; + } + } + } + } + } + + fn node_values(&self) -> Vec> { + self.interps.iter().map(Interp::node_values).collect() + } + + fn is_empty(&self) -> bool { + self.array.is_empty() + } + + fn shape(&mut self) -> &[usize] { + self.array.shape() + } + + fn merge(&mut self, other: &SubgridEnum, transpose: Option<(usize, usize)>) { + // we cannot use `Self::indexed_iter` because it multiplies with `reweight` + if let SubgridEnum::InterpSubgridV1(other) = other { + // TODO: make sure `other` has the same interpolation as `self` + for (mut index, value) in other.array.indexed_iter() { + if let Some((a, b)) = transpose { + index.swap(a, b); + } + self.array[index.as_slice()] += value; + } + } else { + unimplemented!(); + } + } + + fn scale(&mut self, factor: f64) { + self.array *= factor; + } + + fn symmetrize(&mut self, a: usize, b: usize) { + let mut new_array = PackedArray::new(self.array.shape().to_vec()); + + for (mut index, sigma) in self.array.indexed_iter() { + // TODO: why not the other way around? + if index[b] < index[a] { + index.swap(a, b); + } + + new_array[index.as_slice()] += sigma; + } + + self.array = new_array; + } + + fn indexed_iter(&self) -> SubgridIndexedIter { + let nodes: Vec<_> = self.interps.iter().map(Interp::node_values).collect(); + + Box::new(self.array.indexed_iter().map(move |(indices, weight)| { + let reweight = self + .interps + .iter() + .enumerate() + .map(|(i, interp)| interp.reweight(nodes[i][indices[i]])) + .product::(); + (indices, weight * reweight) + })) + } + + fn stats(&self) -> Stats { + Stats { + total: self.array.shape().iter().product(), + allocated: self.array.non_zeros() + self.array.explicit_zeros(), + zeros: self.array.explicit_zeros(), + overhead: self.array.overhead(), + bytes_per_value: mem::size_of::(), + } + } + + fn optimize_nodes(&mut self) { + // find the optimal ranges in which the nodes are used + let ranges: Vec<_> = self.array.indexed_iter().fold( + self.node_values() + .iter() + .map(|values| values.len()..0) + .collect(), + |mut prev, (indices, _)| { + for (i, index) in indices.iter().enumerate() { + prev[i].start = prev[i].start.min(*index); + prev[i].end = prev[i].end.max(*index + 1); + } + prev + }, + ); + + let mut new_array = PackedArray::new( + ranges + .iter() + .zip(&self.static_nodes) + .map(|(range, static_node)| { + if static_node.is_some() { + 1 + } else { + range.clone().count() + } + }) + .collect(), + ); + + for (mut index, value) in self.array.indexed_iter() { + for (idx, range, static_node) in izip!(&mut index, &ranges, &self.static_nodes) { + if static_node.is_some() { + *idx = 0; + } else { + *idx -= range.start; + } + } + new_array[index.as_slice()] += value; + } + + self.array = new_array; + + for (interp, static_node, range) in izip!(&mut self.interps, &mut self.static_nodes, ranges) + { + *interp = if let &mut Some(value) = static_node { + Interp::new( + value, + value, + 1, + 0, + interp.reweight_meth(), + interp.map(), + interp.interp_meth(), + ) + } else { + interp.sub_interp(range) + }; + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::v0; + + #[test] + fn fill_zero() { + let interps = v0::default_interps(2); + let mut subgrid = InterpSubgridV1::new(&interps); + + subgrid.fill(&interps, &[1000.0, 0.5, 0.5], 0.0); + + assert!(subgrid.is_empty()); + assert_eq!(subgrid.indexed_iter().count(), 0); + assert_eq!( + subgrid.stats(), + Stats { + total: 100000, + allocated: 0, + zeros: 0, + overhead: 0, + bytes_per_value: mem::size_of::() + } + ); + } + + #[test] + fn fill_outside_range() { + let interps = v0::default_interps(2); + let mut subgrid = InterpSubgridV1::new(&interps); + + subgrid.fill(&interps, &[1000.0, 1e-10, 0.5], 0.0); + + assert!(subgrid.is_empty()); + assert_eq!(subgrid.indexed_iter().count(), 0); + assert_eq!( + subgrid.stats(), + Stats { + total: 100000, + allocated: 0, + zeros: 0, + overhead: 0, + bytes_per_value: mem::size_of::() + } + ); + } + + #[test] + fn fill() { + let interps = v0::default_interps(2); + let mut subgrid = InterpSubgridV1::new(&interps); + + subgrid.fill(&interps, &[1000.0, 0.5, 0.5], 1.0); + + assert!(!subgrid.is_empty()); + assert_eq!(subgrid.indexed_iter().count(), 4 * 4 * 4); + assert_eq!( + subgrid.stats(), + Stats { + total: 100000, + allocated: 64, + zeros: 0, + overhead: 32, + bytes_per_value: mem::size_of::() + } + ); + + subgrid.fill(&interps, &[1000000.0, 0.5, 0.5], 1.0); + + assert!(!subgrid.is_empty()); + assert_eq!(subgrid.indexed_iter().count(), 2 * 4 * 4 * 4); + assert_eq!( + subgrid.stats(), + Stats { + total: 100000, + allocated: 128, + zeros: 0, + overhead: 64, + bytes_per_value: mem::size_of::() + } + ); + + subgrid.optimize_nodes(); + + let node_values = subgrid.node_values(); + + assert_eq!(node_values[0].len(), 23); + assert_eq!(node_values[1].len(), 1); + assert_eq!(node_values[2].len(), 1); + } +} diff --git a/pineappl/src/interpolation.rs b/pineappl/src/interpolation.rs new file mode 100644 index 000000000..8f9230f75 --- /dev/null +++ b/pineappl/src/interpolation.rs @@ -0,0 +1,757 @@ +//! Interpolation module. + +use super::convert; +use super::packed_array::PackedArray; +use arrayvec::ArrayVec; +use serde::{Deserialize, Serialize}; +use std::mem; +use std::ops::Range; + +const MAX_INTERP_ORDER_PLUS_ONE: usize = 8; +const MAX_DIMENSIONS: usize = 8; + +mod applgrid { + pub fn reweight_x(x: f64) -> f64 { + (x.sqrt() / x.mul_add(-0.99, 1.0)).powi(3) + } + + pub fn fx2(y: f64) -> f64 { + let mut yp = y; + let mut deltap = f64::INFINITY; + + for _ in 0..10 { + let x = (-yp).exp(); + let delta = (1.0 - x).mul_add(-5.0, y - yp); + if (delta.abs() < 1e-15) && (delta >= deltap) { + return x; + } + let deriv = x.mul_add(-5.0, -1.0); + yp -= delta / deriv; + deltap = delta; + } + + unreachable!(); + } + + pub fn fy2(x: f64) -> f64 { + (1.0 - x).mul_add(5.0, -x.ln()) + } + + pub fn ftau0(q2: f64) -> f64 { + (q2 / 0.0625).ln().ln() + } + + pub fn fq20(tau: f64) -> f64 { + 0.0625 * tau.exp().exp() + } +} + +fn lagrange_weights(i: usize, n: usize, u: f64) -> f64 { + let mut factorials = 1; + let mut product = 1.0; + for z in 0..i { + product *= u - convert::f64_from_usize(z); + factorials *= i - z; + } + for z in i + 1..=n { + product *= convert::f64_from_usize(z) - u; + factorials *= z - i; + } + product / convert::f64_from_usize(factorials) +} + +/// TODO +#[repr(C)] +#[derive(Clone, Copy, Deserialize, Serialize)] +pub enum ReweightMeth { + /// TODO + ApplGridX, + /// TODO + NoReweight, +} + +/// TODO +#[repr(C)] +#[derive(Clone, Copy, Debug, Deserialize, Serialize)] +pub enum Map { + /// TODO + ApplGridF2, + /// TODO + ApplGridH0, +} + +/// TODO +#[repr(C)] +#[derive(Clone, Copy, Deserialize, Serialize)] +pub enum InterpMeth { + /// TODO + Lagrange, +} + +/// TODO +#[derive(Clone, Deserialize, Serialize)] +pub struct Interp { + min: f64, + max: f64, + nodes: usize, + order: usize, + reweight: ReweightMeth, + map: Map, + interp_meth: InterpMeth, +} + +impl Interp { + /// TODO + /// + /// # Panics + /// + /// Panics if `nodes` is `0`, or if `nodes` is smaller or equal to `order` or if `order` is + /// larger than some internally specified maximum. + #[must_use] + pub fn new( + min: f64, + max: f64, + nodes: usize, + order: usize, + reweight: ReweightMeth, + map: Map, + interp_meth: InterpMeth, + ) -> Self { + // minimum must be larger or equal to the maximum + assert!(min <= max); + // for interpolation to work `nodes` has to be at least `1` + assert!(nodes > 0); + // for each interpolated point `order + 1` nodes are updated + assert!(nodes > order); + // using arrays with fixed size limit the possible max value of `order` + assert!(order < MAX_INTERP_ORDER_PLUS_ONE); + + let mut result = Self { + min: 0.0, + max: 0.0, + nodes, + order, + reweight, + map, + interp_meth, + }; + + result.min = result.map_x_to_y(min); + result.max = result.map_x_to_y(max); + + // for some maps the minimum in x is mapped to the maximum in y + if result.min > result.max { + // TODO: alternatively we have to modify our range check in `Self::interpolate`, which + // has the advantage that we don't swap min and max in `x` space + mem::swap(&mut result.min, &mut result.max); + } + + result + } + + fn deltay(&self) -> f64 { + (self.max - self.min) / convert::f64_from_usize(self.nodes - 1) + } + + fn gety(&self, index: usize) -> f64 { + convert::f64_from_usize(index).mul_add(self.deltay(), self.min) + } + + /// TODO + #[must_use] + pub fn reweight(&self, x: f64) -> f64 { + match self.reweight { + ReweightMeth::ApplGridX => applgrid::reweight_x(x), + ReweightMeth::NoReweight => 1.0, + } + } + + /// TODO + #[must_use] + pub fn interpolate(&self, x: f64) -> Option<(usize, f64)> { + let y = self.map_x_to_y(x); + + // points falling outside the interpolation range shouldn't happen very often, because when + // it does it degrades the interpolation quality + if (self.min > y) || (y > self.max) { + return None; + } + + if self.nodes == 1 { + // TODO: is the `y_fraction` correct? + Some((0, 0.0)) + } else { + let index = convert::usize_from_f64( + (y - self.min) / self.deltay() - convert::f64_from_usize(self.order / 2), + ) + .min(self.nodes - self.order - 1); + let y_fraction = (y - self.gety(index)) / self.deltay(); + + Some((index, y_fraction)) + } + } + + /// TODO + #[must_use] + pub fn node_weights(&self, fraction: f64) -> ArrayVec { + (0..=self.order) + .map(|i| match self.interp_meth { + InterpMeth::Lagrange => lagrange_weights(i, self.order, fraction), + }) + .collect() + } + + /// TODO + #[must_use] + pub const fn order(&self) -> usize { + self.order + } + + /// TODO + #[must_use] + pub fn node_values(&self) -> Vec { + if self.nodes == 1 { + vec![self.map_y_to_x(self.min)] + } else { + (0..self.nodes) + .map(|node| self.map_y_to_x(self.gety(node))) + .collect() + } + } + + fn map_y_to_x(&self, y: f64) -> f64 { + match self.map { + Map::ApplGridF2 => applgrid::fx2(y), + Map::ApplGridH0 => applgrid::fq20(y), + } + } + + fn map_x_to_y(&self, x: f64) -> f64 { + match self.map { + Map::ApplGridF2 => applgrid::fy2(x), + Map::ApplGridH0 => applgrid::ftau0(x), + } + } + + /// TODO + #[must_use] + pub const fn nodes(&self) -> usize { + self.nodes + } + + /// TODO + #[must_use] + pub fn min(&self) -> f64 { + self.map_y_to_x(self.min).min(self.map_y_to_x(self.max)) + } + + /// TODO + #[must_use] + pub fn max(&self) -> f64 { + self.map_y_to_x(self.min).max(self.map_y_to_x(self.max)) + } + + /// TODO + #[must_use] + pub const fn map(&self) -> Map { + self.map + } + + /// TODO + #[must_use] + pub const fn interp_meth(&self) -> InterpMeth { + self.interp_meth + } + + /// TODO + #[must_use] + pub const fn reweight_meth(&self) -> ReweightMeth { + self.reweight + } + + /// TODO + #[must_use] + pub fn sub_interp(&self, range: Range) -> Self { + Self { + min: self.gety(range.start), + max: self.gety(range.end - 1), + nodes: range.count(), + order: self.order, + reweight: self.reweight, + map: self.map, + interp_meth: self.interp_meth, + } + } +} + +/// TODO +pub fn interpolate( + interps: &[Interp], + ntuple: &[f64], + weight: f64, + array: &mut PackedArray, +) -> bool { + use itertools::Itertools; + + if weight == 0.0 { + return false; + } + + // we must have as many variables as we want to interpolate + debug_assert_eq!(interps.len(), ntuple.len()); + debug_assert!(interps.len() <= MAX_DIMENSIONS); + + let Some((indices, fractions)): Option<( + ArrayVec<_, MAX_DIMENSIONS>, + ArrayVec<_, MAX_DIMENSIONS>, + )> = interps + .iter() + .zip(ntuple) + .map(|(interp, &x)| interp.interpolate(x)) + .collect() + else { + return false; + }; + + let weight = weight + / interps + .iter() + .zip(ntuple) + .map(|(interp, &x)| interp.reweight(x)) + .product::(); + + let node_weights: ArrayVec<_, MAX_DIMENSIONS> = interps + .iter() + .zip(fractions) + .map(|(interp, fraction)| interp.node_weights(fraction)) + .collect(); + + let shape: ArrayVec<_, MAX_DIMENSIONS> = + interps.iter().map(|interp| interp.order() + 1).collect(); + + for (i, node_weight) in node_weights + .into_iter() + // TODO: replace this with something else to avoid allocating memory + .multi_cartesian_product() + .map(|weights| weights.iter().product::()) + .enumerate() + { + let idx = array.sub_block_idx(&indices, i, &shape); + array[idx] += weight * node_weight; + } + + true +} + +#[cfg(test)] +mod tests { + use super::*; + use float_cmp::assert_approx_eq; + use float_cmp::Ulps; + + #[test] + fn interpolate_two_points() { + let interps = vec![ + Interp::new( + 1e2, + 1e8, + 40, + 3, + ReweightMeth::NoReweight, + Map::ApplGridH0, + InterpMeth::Lagrange, + ), + Interp::new( + 2e-7, + 1.0, + 50, + 3, + ReweightMeth::ApplGridX, + Map::ApplGridF2, + InterpMeth::Lagrange, + ), + Interp::new( + 2e-7, + 1.0, + 50, + 3, + ReweightMeth::ApplGridX, + Map::ApplGridF2, + InterpMeth::Lagrange, + ), + ]; + + let node_values: Vec<_> = interps.iter().map(Interp::node_values).collect(); + + let q2_reference = [ + 9.999999999999999e1, + 1.2242682307575689e2, + 1.507173582975839e2, + 1.8660624792652183e2, + 2.3239844323901826e2, + 2.911750445478316e2, + 3.670799619445291e2, + 4.657216764869711e2, + 5.947399998930223e2, + 7.646109579666331e2, + 9.897977073478313e2, + 1.2904078604330668e3, + 1.694597307328949e3, + 2.2420826491130997e3, + 2.989312590729525e3, + 4.017141299790263e3, + 5.442305429193529e3, + 7.434731381687921e3, + 1.024385467001917e4, + 1.4238990475802799e4, + 1.9971806922234402e4, + 2.8273883344269376e4, + 4.041048232844362e4, + 5.832525318921733e4, + 8.503347534094655e4, + 1.2526040013230646e5, + 1.864882133214792e5, + 2.806914902174795e5, + 4.272453808062111e5, + 6.578537431299294e5, + 1.0249965523865514e6, + 1.6165812577807596e6, + 2.581663421106388e6, + 4.1761634755570055e6, + 6.845167341538921e6, + 1.1373037585359517e7, + 1.916090997202005e7, + 3.2746801715531096e7, + 5.679435282347418e7, + 9.99999999999995e7, + ]; + + assert_eq!(node_values[0].len(), interps[0].nodes()); + + for (&node, ref_node) in node_values[0].iter().zip(q2_reference) { + assert_approx_eq!(f64, node, ref_node, ulps = 4); + } + + let x_reference = [ + 1.0, + 9.309440808716655e-1, + 8.627839323906108e-1, + 7.956242522922757e-1, + 7.295868442414312e-1, + 6.648139482473711e-1, + 6.01472197967335e-1, + 5.397572337880446e-1, + 4.7989890296102555e-1, + 4.2216677535896485e-1, + 3.668753186482242e-1, + 3.143874007692335e-1, + 2.65113704158282e-1, + 2.1950412650038867e-1, + 1.780256604256944e-1, + 1.4112080644438132e-1, + 1.0914375746330697e-1, + 8.228122126204893e-2, + 6.048002875444742e-2, + 4.341491741701319e-2, + 3.052158400782889e-2, + 2.108918668378718e-2, + 1.4375068581090129e-2, + 9.699159574043398e-3, + 6.496206194633799e-3, + 4.328500638819831e-3, + 2.8738675812817133e-3, + 1.9034634022867352e-3, + 1.2586797144272762e-3, + 8.314068836488144e-4, + 5.487795323670796e-4, + 3.6205449638139736e-4, + 2.3878782918561914e-4, + 1.5745605600841445e-4, + 1.0381172986576898e-4, + 6.843744918967896e-5, + 4.511438394964044e-5, + 2.97384953722449e-5, + 1.960250500238325e-5, + 1.2921015690745727e-5, + 8.516806677573052e-6, + 5.613757716930101e-6, + 3.700227206985489e-6, + 2.4389432928916775e-6, + 1.607585498470808e-6, + 1.0596094959101024e-6, + 6.984208530700364e-7, + 4.6035014748963986e-7, + 3.034304765867952e-7, + 1.9999999999999989e-7, + ]; + + assert_eq!(node_values[1].len(), interps[1].nodes()); + assert_eq!(node_values[2].len(), interps[2].nodes()); + + for (&node, ref_node) in node_values[1].iter().zip(x_reference) { + assert_approx_eq!(f64, node, ref_node, ulps = 4); + } + + for (&node, ref_node) in node_values[2].iter().zip(x_reference) { + assert_approx_eq!(f64, node, ref_node, ulps = 4); + } + + let mut array = crate::packed_array::PackedArray::::new(vec![40, 50, 50]); + let ntuples = [[100000.0, 0.25, 0.5], [1000.0, 0.5, 0.5]]; + let weight = 1.0; + + for ntuple in &ntuples { + interpolate(&interps, ntuple, weight, &mut array); + } + + let reference = [ + ([9, 6, 6], -4.091358497150521e-6), + ([9, 6, 7], 3.085859446366878e-5), + ([9, 6, 8], 6.0021251939206686e-5), + ([9, 6, 9], -5.0714506160633226e-6), + ([9, 7, 6], 3.085859446366878e-5), + ([9, 7, 7], -2.3274735101712016e-4), + ([9, 7, 8], -4.527032950262464e-4), + ([9, 7, 9], 3.825082500411933e-5), + ([9, 8, 6], 6.002125193920668e-5), + ([9, 8, 7], -4.5270329502624637e-4), + ([9, 8, 8], -8.805267704745902e-4), + ([9, 8, 9], 7.439944833384343e-5), + ([9, 9, 6], -5.071450616063322e-6), + ([9, 9, 7], 3.825082500411933e-5), + ([9, 9, 8], 7.439944833384344e-5), + ([9, 9, 9], -6.286325524659303e-6), + ([10, 6, 6], 3.25604540320380e-4), + ([10, 6, 7], -2.4558342839606324e-3), + ([10, 6, 8], -4.776700003368127e-3), + ([10, 6, 9], 4.036036802325844e-4), + ([10, 7, 6], -2.4558342839606324e-3), + ([10, 7, 7], 1.8522843767295388e-2), + ([10, 7, 8], 3.602770287209066e-2), + ([10, 7, 9], -3.0441337030269453e-3), + ([10, 8, 6], -4.776700003368127e-3), + ([10, 8, 7], 3.602770287209066e-2), + ([10, 8, 8], 7.007538316181437e-2), + ([10, 8, 9], -5.920966884642993e-3), + ([10, 9, 6], 4.036036802325844e-4), + ([10, 9, 7], -3.0441337030269453e-3), + ([10, 9, 8], -5.920966884642993e-3), + ([10, 9, 9], 5.002876512010676e-4), + ([11, 6, 6], 1.3274904136884986e-5), + ([11, 6, 7], -1.0012441676511976e-4), + ([11, 6, 8], -1.947461622401742e-4), + ([11, 6, 9], 1.6454930754680843e-5), + ([11, 7, 6], -1.0012441676511976e-4), + ([11, 7, 7], 7.551767401996306e-4), + ([11, 7, 8], 1.4688502237364042e-3), + ([11, 7, 9], -1.2410939677862364e-4), + ([11, 8, 6], -1.9474616224017418e-4), + ([11, 8, 7], 1.4688502237364042e-3), + ([11, 8, 8], 2.8569748840518382e-3), + ([11, 8, 9], -2.4139794768822075e-4), + ([11, 9, 6], 1.6454930754680843e-5), + ([11, 9, 7], -1.2410939677862364e-4), + ([11, 9, 8], -2.4139794768822075e-4), + ([11, 9, 9], 2.0396738337944602e-5), + ([12, 6, 6], -2.1682835394615433e-6), + ([12, 6, 7], 1.6354025801721504e-5), + ([12, 6, 8], 3.180926156637114e-5), + ([12, 6, 9], -2.6876996722875166e-6), + ([12, 7, 6], 1.6354025801721504e-5), + ([12, 7, 7], -1.2334833293517984e-4), + ([12, 7, 8], -2.3991764680339134e-4), + ([12, 7, 9], 2.0271661426154572e-5), + ([12, 8, 6], 3.180926156637114e-5), + ([12, 8, 7], -2.3991764680339134e-4), + ([12, 8, 8], -4.6664981907720756e-4), + ([12, 8, 9], 3.942922608215463e-5), + ([12, 9, 6], -2.6876996722875166e-6), + ([12, 9, 7], 2.0271661426154572e-5), + ([12, 9, 8], 3.942922608215462e-5), + ([12, 9, 9], -3.3315428526512343e-6), + ([23, 11, 6], -2.4353100307613186e-4), + ([23, 11, 7], 1.8368041980410083e-3), + ([23, 11, 8], 3.572660694686239e-3), + ([23, 11, 9], -3.0186928289005667e-4), + ([23, 12, 6], 2.9987494527093064e-3), + ([23, 12, 7], -2.2617718130482554e-2), + ([23, 12, 8], -4.399240411931119e-2), + ([23, 12, 9], 3.717105154670258e-3), + ([23, 13, 6], 1.424894308599361e-3), + ([23, 13, 7], -1.0747099197804599e-2), + ([23, 13, 8], -2.090355571205706e-2), + ([23, 13, 9], 1.766230244600708e-3), + ([23, 14, 6], -1.9189233197773798e-4), + ([23, 14, 7], 1.4473255417027965e-3), + ([23, 14, 8], 2.815108480681732e-3), + ([23, 14, 9], -2.3786047737056168e-4), + ([24, 11, 6], 2.4624842908465045e-3), + ([24, 11, 7], -1.8573000668924675e-2), + ([24, 11, 8], -3.612526013552098e-2), + ([24, 11, 9], 3.0523767307502974e-3), + ([24, 12, 6], -3.032210817598752e-2), + ([24, 12, 7], 2.2870096573985707e-1), + ([24, 12, 8], 4.4483290707142076e-1), + ([24, 12, 9], -3.758582248330245e-2), + ([24, 13, 6], -1.4407939057950724e-2), + ([24, 13, 7], 1.0867020055959625e-1), + ([24, 13, 8], 2.1136806777609088e-1), + ([24, 13, 9], -1.7859386182495846e-2), + ([24, 14, 6], 1.940335509895472e-3), + ([24, 14, 7], -1.463475436460085e-2), + ([24, 14, 8], -2.8465206988616668e-2), + ([24, 14, 9], 2.4051462916002946e-3), + ([25, 11, 6], 1.7967411488022474e-3), + ([25, 11, 7], -1.3551710637356816e-2), + ([25, 11, 8], -2.6358641814670535e-2), + ([25, 11, 9], 2.2271536489275397e-3), + ([25, 12, 6], -2.2124396764984615e-2), + ([25, 12, 7], 1.6687068317270662e-1), + ([25, 12, 8], 3.245704313515834e-1), + ([25, 12, 9], -2.7424334895599013e-2), + ([25, 13, 6], -1.0512691216379747e-2), + ([25, 13, 7], 7.929074785159325e-2), + ([25, 13, 8], 1.54223808179330e-1), + ([25, 13, 9], -1.3031024874237778e-2), + ([25, 14, 6], 1.415757520188257e-3), + ([25, 14, 7], -1.0678186036448784e-2), + ([25, 14, 8], -2.0769516741988788e-2), + ([25, 14, 9], 1.754904722467019e-3), + ([26, 11, 6], -2.1941078639412583e-4), + ([26, 11, 7], 1.6548802758317395e-3), + ([26, 11, 8], 3.218811086223127e-3), + ([26, 11, 9], -2.7197102590848567e-4), + ([26, 12, 6], 2.7017421490774826e-3), + ([26, 12, 7], -2.0377575170166206e-2), + ([26, 12, 8], -3.9635232727098596e-2), + ([26, 12, 9], 3.3489492294371172e-3), + ([26, 13, 6], 1.2837674744868705e-3), + ([26, 13, 7], -9.682666505130055e-3), + ([26, 13, 8], -1.8833189775767707e-2), + ([26, 13, 9], 1.5912962293338163e-3), + ([26, 14, 6], -1.7288660142000884e-4), + ([26, 14, 7], 1.303977034800947e-3), + ([26, 14, 8], 2.536289662216269e-3), + ([26, 14, 9], -2.1430189065349477e-4), + ]; + + for ((index, value), (ref_index, ref_value)) in array.indexed_iter().zip(reference) { + assert_eq!(index, ref_index); + assert_approx_eq!(f64, value, ref_value, ulps = 4); + } + } + + #[test] + fn interpolate_zero_and_outside() { + let interps = vec![ + Interp::new( + 1e2, + 1e8, + 40, + 3, + ReweightMeth::NoReweight, + Map::ApplGridH0, + InterpMeth::Lagrange, + ), + Interp::new( + 2e-7, + 1.0, + 50, + 3, + ReweightMeth::ApplGridX, + Map::ApplGridF2, + InterpMeth::Lagrange, + ), + Interp::new( + 2e-7, + 1.0, + 50, + 3, + ReweightMeth::ApplGridX, + Map::ApplGridF2, + InterpMeth::Lagrange, + ), + ]; + let mut array = crate::packed_array::PackedArray::::new(vec![40, 50, 50]); + + let ntuple = [1000.0, 0.5, 0.5]; + let weight = 0.0; + interpolate(&interps, &ntuple, weight, &mut array); + + assert_eq!(array.non_zeros(), 0); + assert_eq!(array.explicit_zeros(), 0); + + let ntuple = [10.0, 0.5, 0.5]; + let weight = 1.0; + interpolate(&interps, &ntuple, weight, &mut array); + + assert_eq!(array.non_zeros(), 0); + assert_eq!(array.explicit_zeros(), 0); + } + + #[test] + fn interpolate_with_one_node() { + // TODO: does it make sense for an interpolation to have `min = max`? There will be + // numerical problems if the `x` value doesn't exactly hit the limits + let interps = vec![Interp::new( + 90.0_f64.powi(2), + 90.0_f64.powi(2), + 1, + 0, + ReweightMeth::NoReweight, + Map::ApplGridH0, + InterpMeth::Lagrange, + )]; + let mut array = crate::packed_array::PackedArray::::new(vec![1]); + + let ntuple = [90.0_f64.powi(2)]; + let weight = 1.0; + interpolate(&interps, &ntuple, weight, &mut array); + + assert_approx_eq!(f64, array[[0]], 1.0, ulps = 2); + + let node_values = interps[0].node_values(); + + assert_eq!(node_values.len(), 1); + + assert_approx_eq!(f64, node_values[0], 90.0 * 90.0, ulps = 16); + } + + #[test] + fn applgrid_fx2_fy2_closure() { + let x_ref = Interp::new( + 2e-7, + 1.0, + 50, + 3, + ReweightMeth::ApplGridX, + Map::ApplGridF2, + InterpMeth::Lagrange, + ) + .node_values(); + + for &x in &x_ref { + // these two functions should be inverse to each other, within numerical noise + assert!(applgrid::fx2(applgrid::fy2(x)).ulps(&x) < 4); + } + } + + #[test] + fn applgrid_ftau0_fq20_closure() { + let q2_ref = Interp::new( + 1e2, + 1e8, + 40, + 3, + ReweightMeth::NoReweight, + Map::ApplGridH0, + InterpMeth::Lagrange, + ) + .node_values(); + + for &q2 in &q2_ref { + // these two functions should be inverse to each other, within numerical noise + assert!(applgrid::fq20(applgrid::ftau0(q2)).ulps(&q2) < 4); + } + } +} diff --git a/pineappl/src/lagrange_subgrid.rs b/pineappl/src/lagrange_subgrid.rs deleted file mode 100644 index 8888d394c..000000000 --- a/pineappl/src/lagrange_subgrid.rs +++ /dev/null @@ -1,1486 +0,0 @@ -//! Module containing the Lagrange-interpolation subgrid. - -use super::convert::{f64_from_usize, usize_from_f64}; -use super::grid::Ntuple; -use super::sparse_array3::SparseArray3; -use super::subgrid::{ - ExtraSubgridParams, Mu2, Stats, Subgrid, SubgridEnum, SubgridIndexedIter, SubgridParams, -}; -use arrayvec::ArrayVec; -use ndarray::Array3; -use serde::{Deserialize, Serialize}; -use std::borrow::Cow; -use std::iter; -use std::mem; - -fn weightfun(x: f64) -> f64 { - (x.sqrt() / (1.0 - 0.99 * x)).powi(3) -} - -fn fx(y: f64) -> f64 { - let mut yp = y; - - for _ in 0..100 { - let x = (-yp).exp(); - let delta = y - yp - 5.0 * (1.0 - x); - if (delta).abs() < 1e-12 { - return x; - } - let deriv = -1.0 - 5.0 * x; - yp -= delta / deriv; - } - - unreachable!(); -} - -fn fy(x: f64) -> f64 { - (1.0 - x).mul_add(5.0, -x.ln()) -} - -fn ftau(q2: f64) -> f64 { - (q2 / 0.0625).ln().ln() -} - -fn fq2(tau: f64) -> f64 { - 0.0625 * tau.exp().exp() -} - -fn fi(i: usize, n: usize, u: f64) -> f64 { - let mut factorials = 1; - let mut product = 1.0; - for z in 0..i { - product *= u - f64_from_usize(z); - factorials *= i - z; - } - for z in i + 1..=n { - product *= f64_from_usize(z) - u; - factorials *= z - i; - } - product / f64_from_usize(factorials) -} - -/// Subgrid which uses Lagrange-interpolation. -#[derive(Clone, Deserialize, Serialize)] -pub struct LagrangeSubgridV1 { - grid: Option>, - ntau: usize, - ny: usize, - yorder: usize, - tauorder: usize, - itaumin: usize, - itaumax: usize, - reweight: bool, - ymin: f64, - ymax: f64, - taumin: f64, - taumax: f64, -} - -impl LagrangeSubgridV1 { - /// Constructor. - #[must_use] - pub fn new(subgrid_params: &SubgridParams) -> Self { - Self { - grid: None, - ntau: subgrid_params.q2_bins(), - ny: subgrid_params.x_bins(), - yorder: subgrid_params.x_order(), - tauorder: subgrid_params.q2_order(), - itaumin: 0, - itaumax: 0, - reweight: subgrid_params.reweight(), - ymin: fy(subgrid_params.x_max()), - ymax: fy(subgrid_params.x_min()), - taumin: ftau(subgrid_params.q2_min()), - taumax: ftau(subgrid_params.q2_max()), - } - } - - fn deltay(&self) -> f64 { - (self.ymax - self.ymin) / f64_from_usize(self.ny - 1) - } - - fn deltatau(&self) -> f64 { - (self.taumax - self.taumin) / f64_from_usize(self.ntau - 1) - } - - fn gety(&self, iy: usize) -> f64 { - f64_from_usize(iy).mul_add(self.deltay(), self.ymin) - } - - fn gettau(&self, iy: usize) -> f64 { - f64_from_usize(iy).mul_add(self.deltatau(), self.taumin) - } - - fn increase_tau(&mut self, new_itaumin: usize, new_itaumax: usize) { - let min_diff = self.itaumin - new_itaumin; - - let mut new_grid = Array3::zeros((new_itaumax - new_itaumin, self.ny, self.ny)); - - for ((i, j, k), value) in self.grid.as_ref().unwrap().indexed_iter() { - new_grid[[i + min_diff, j, k]] = *value; - } - - self.itaumin = new_itaumin; - self.itaumax = new_itaumax; - - mem::swap(&mut self.grid, &mut Some(new_grid)); - } -} - -impl Subgrid for LagrangeSubgridV1 { - fn convolve( - &self, - x1: &[f64], - x2: &[f64], - _: &[Mu2], - lumi: &mut dyn FnMut(usize, usize, usize) -> f64, - ) -> f64 { - self.grid.as_ref().map_or(0.0, |grid| { - grid.indexed_iter() - .map(|((imu2, ix1, ix2), &sigma)| { - if sigma == 0.0 { - 0.0 - } else { - let mut value = sigma * lumi(ix1, ix2, imu2 + self.itaumin); - if self.reweight { - value *= weightfun(x1[ix1]) * weightfun(x2[ix2]); - } - value - } - }) - .sum() - }) - } - - fn fill(&mut self, ntuple: &Ntuple) { - if ntuple.weight == 0.0 { - return; - } - - let y1 = fy(ntuple.x1); - let y2 = fy(ntuple.x2); - let tau = ftau(ntuple.q2); - - if (y2 < self.ymin) - || (y2 > self.ymax) - || (y1 < self.ymin) - || (y1 > self.ymax) - || (tau < self.taumin) - || (tau > self.taumax) - { - return; - } - - let k1 = usize_from_f64((y1 - self.ymin) / self.deltay() - f64_from_usize(self.yorder / 2)) - .min(self.ny - 1 - self.yorder); - let k2 = usize_from_f64((y2 - self.ymin) / self.deltay() - f64_from_usize(self.yorder / 2)) - .min(self.ny - 1 - self.yorder); - - let u_y1 = (y1 - self.gety(k1)) / self.deltay(); - let u_y2 = (y2 - self.gety(k2)) / self.deltay(); - - let fi1: ArrayVec<_, 8> = (0..=self.yorder) - .map(|i| fi(i, self.yorder, u_y1)) - .collect(); - let fi2: ArrayVec<_, 8> = (0..=self.yorder) - .map(|i| fi(i, self.yorder, u_y2)) - .collect(); - - let k3 = usize_from_f64( - (tau - self.taumin) / self.deltatau() - f64_from_usize(self.tauorder / 2), - ) - .min(self.ntau - 1 - self.tauorder); - - let u_tau = (tau - self.gettau(k3)) / self.deltatau(); - - let factor = if self.reweight { - 1.0 / (weightfun(ntuple.x1) * weightfun(ntuple.x2)) - } else { - 1.0 - }; - - let size = self.tauorder + 1; - let ny = self.ny; - - if self.grid.is_none() { - self.itaumin = k3; - self.itaumax = k3 + size; - } else if k3 < self.itaumin || k3 + size > self.itaumax { - self.increase_tau(self.itaumin.min(k3), self.itaumax.max(k3 + size)); - } - - for i3 in 0..=self.tauorder { - let fi3i3 = fi(i3, self.tauorder, u_tau); - - for (i1, fi1i1) in fi1.iter().enumerate() { - for (i2, fi2i2) in fi2.iter().enumerate() { - let fillweight = factor * fi1i1 * fi2i2 * fi3i3 * ntuple.weight; - - let grid = self - .grid - .get_or_insert_with(|| Array3::zeros((size, ny, ny))); - - grid[[k3 + i3 - self.itaumin, k1 + i1, k2 + i2]] += fillweight; - } - } - } - } - - fn mu2_grid(&self) -> Cow<[Mu2]> { - (0..self.ntau) - .map(|itau| { - let q2 = fq2(self.gettau(itau)); - Mu2 { ren: q2, fac: q2 } - }) - .collect() - } - - fn x1_grid(&self) -> Cow<[f64]> { - (0..self.ny).map(|iy| fx(self.gety(iy))).collect() - } - - fn x2_grid(&self) -> Cow<[f64]> { - self.x1_grid() - } - - fn is_empty(&self) -> bool { - self.grid.is_none() - } - - fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { - let x1_equal = self.x1_grid() == other.x1_grid(); - let x2_equal = self.x2_grid() == other.x2_grid(); - - if let SubgridEnum::LagrangeSubgridV1(other_grid) = other { - if let Some(other_grid_grid) = &mut other_grid.grid { - if self.grid.is_some() { - // TODO: the general case isn't implemented - assert!(x1_equal); - assert!(x2_equal); - - let new_itaumin = self.itaumin.min(other_grid.itaumin); - let new_itaumax = self.itaumax.max(other_grid.itaumax); - let offset = other_grid.itaumin.saturating_sub(self.itaumin); - - // TODO: we need much more checks here if there subgrids are compatible at all - - if (self.itaumin != new_itaumin) || (self.itaumax != new_itaumax) { - self.increase_tau(new_itaumin, new_itaumax); - } - - let self_grid = self.grid.as_mut().unwrap(); - - if transpose { - for ((i, k, j), value) in other_grid_grid.indexed_iter() { - self_grid[[i + offset, j, k]] += value; - } - } else { - for ((i, j, k), value) in other_grid_grid.indexed_iter() { - self_grid[[i + offset, j, k]] += value; - } - } - } else { - self.grid = other_grid.grid.take(); - self.itaumin = other_grid.itaumin; - self.itaumax = other_grid.itaumax; - - if transpose { - if let Some(grid) = &mut self.grid { - grid.swap_axes(1, 2); - } - } - } - } - } else { - todo!(); - } - } - - fn scale(&mut self, factor: f64) { - if factor == 0.0 { - self.grid = None; - } else if let Some(self_grid) = &mut self.grid { - self_grid.iter_mut().for_each(|x| *x *= factor); - } - } - - fn symmetrize(&mut self) { - if let Some(grid) = self.grid.as_mut() { - let (i_size, j_size, k_size) = grid.dim(); - - for i in 0..i_size { - for j in 0..j_size { - for k in j + 1..k_size { - grid[[i, j, k]] += grid[[i, k, j]]; - grid[[i, k, j]] = 0.0; - } - } - } - } - } - - fn clone_empty(&self) -> SubgridEnum { - Self { - grid: None, - ntau: self.ntau, - ny: self.ny, - yorder: self.yorder, - tauorder: self.tauorder, - itaumin: 0, - itaumax: 0, - reweight: self.reweight, - ymin: self.ymin, - ymax: self.ymax, - taumin: self.taumin, - taumax: self.taumax, - } - .into() - } - - fn indexed_iter(&self) -> SubgridIndexedIter { - self.grid.as_ref().map_or_else( - || Box::new(iter::empty()) as Box>, - |grid| { - Box::new(grid.indexed_iter().filter(|(_, &value)| value != 0.0).map( - |(tuple, &value)| { - ( - (self.itaumin + tuple.0, tuple.1, tuple.2), - value - * if self.reweight { - weightfun(fx(self.gety(tuple.1))) - * weightfun(fx(self.gety(tuple.2))) - } else { - 1.0 - }, - ) - }, - )) - }, - ) - } - - fn stats(&self) -> Stats { - let (non_zeros, zeros) = self.grid.as_ref().map_or((0, 0), |array| { - array.iter().fold((0, 0), |(non_zeros, zeros), value| { - if *value == 0.0 { - (non_zeros, zeros + 1) - } else { - (non_zeros + 1, zeros) - } - }) - }); - - Stats { - total: non_zeros + zeros, - allocated: non_zeros + zeros, - zeros, - overhead: 0, - bytes_per_value: mem::size_of::(), - } - } - - fn static_scale(&self) -> Option { - if let [static_scale] = self.mu2_grid().as_ref() { - Some(static_scale.clone()) - } else { - None - } - } -} - -/// Subgrid which uses Lagrange-interpolation. -#[derive(Clone, Deserialize, Serialize)] -pub struct LagrangeSubgridV2 { - grid: Option>, - ntau: usize, - ny1: usize, - ny2: usize, - y1order: usize, - y2order: usize, - tauorder: usize, - itaumin: usize, - itaumax: usize, - reweight1: bool, - reweight2: bool, - y1min: f64, - y1max: f64, - y2min: f64, - y2max: f64, - taumin: f64, - taumax: f64, - pub(crate) static_q2: f64, -} - -impl LagrangeSubgridV2 { - /// Constructor. - #[must_use] - pub fn new(subgrid_params: &SubgridParams, extra_params: &ExtraSubgridParams) -> Self { - Self { - grid: None, - ntau: subgrid_params.q2_bins(), - ny1: subgrid_params.x_bins(), - ny2: extra_params.x2_bins(), - y1order: subgrid_params.x_order(), - y2order: extra_params.x2_order(), - tauorder: subgrid_params.q2_order(), - itaumin: 0, - itaumax: 0, - reweight1: subgrid_params.reweight(), - reweight2: extra_params.reweight2(), - y1min: fy(subgrid_params.x_max()), - y1max: fy(subgrid_params.x_min()), - y2min: fy(extra_params.x2_max()), - y2max: fy(extra_params.x2_min()), - taumin: ftau(subgrid_params.q2_min()), - taumax: ftau(subgrid_params.q2_max()), - static_q2: 0.0, - } - } - - fn deltay1(&self) -> f64 { - (self.y1max - self.y1min) / f64_from_usize(self.ny1 - 1) - } - - fn deltay2(&self) -> f64 { - (self.y1max - self.y2min) / f64_from_usize(self.ny2 - 1) - } - - fn deltatau(&self) -> f64 { - (self.taumax - self.taumin) / f64_from_usize(self.ntau - 1) - } - - fn gety1(&self, iy: usize) -> f64 { - if self.y1min == self.y1max { - debug_assert_eq!(iy, 0); - self.y1min - } else { - f64_from_usize(iy).mul_add(self.deltay1(), self.y1min) - } - } - - fn gety2(&self, iy: usize) -> f64 { - if self.y2min == self.y2max { - debug_assert_eq!(iy, 0); - self.y2min - } else { - f64_from_usize(iy).mul_add(self.deltay2(), self.y2min) - } - } - - fn gettau(&self, iy: usize) -> f64 { - if self.taumin == self.taumax { - debug_assert_eq!(iy, 0); - self.taumin - } else { - f64_from_usize(iy).mul_add(self.deltatau(), self.taumin) - } - } - - fn increase_tau(&mut self, new_itaumin: usize, new_itaumax: usize) { - let min_diff = self.itaumin - new_itaumin; - - let mut new_grid = Array3::zeros((new_itaumax - new_itaumin, self.ny1, self.ny2)); - - for ((i, j, k), value) in self.grid.as_ref().unwrap().indexed_iter() { - new_grid[[i + min_diff, j, k]] = *value; - } - - self.itaumin = new_itaumin; - self.itaumax = new_itaumax; - - self.grid = Some(new_grid); - } -} - -impl Subgrid for LagrangeSubgridV2 { - fn convolve( - &self, - x1: &[f64], - x2: &[f64], - _: &[Mu2], - lumi: &mut dyn FnMut(usize, usize, usize) -> f64, - ) -> f64 { - self.grid.as_ref().map_or(0.0, |grid| { - grid.indexed_iter() - .map(|((imu2, ix1, ix2), &sigma)| { - if sigma == 0.0 { - 0.0 - } else { - let mut value = sigma * lumi(ix1, ix2, imu2 + self.itaumin); - if self.reweight1 { - value *= weightfun(x1[ix1]); - } - if self.reweight2 { - value *= weightfun(x2[ix2]); - } - value - } - }) - .sum() - }) - } - - fn fill(&mut self, ntuple: &Ntuple) { - if ntuple.weight == 0.0 { - return; - } - - let y1 = fy(ntuple.x1); - let y2 = fy(ntuple.x2); - let tau = ftau(ntuple.q2); - - if self.static_q2 == 0.0 { - self.static_q2 = ntuple.q2; - } else if (self.static_q2 != -1.0) && (self.static_q2 != ntuple.q2) { - self.static_q2 = -1.0; - } - - if (y2 < self.y2min) - || (y2 > self.y2max) - || (y1 < self.y1min) - || (y1 > self.y1max) - || (tau < self.taumin) - || (tau > self.taumax) - { - return; - } - - let k1 = - usize_from_f64((y1 - self.y1min) / self.deltay1() - f64_from_usize(self.y1order / 2)) - .min(self.ny1 - 1 - self.y1order); - let k2 = - usize_from_f64((y2 - self.y2min) / self.deltay2() - f64_from_usize(self.y2order / 2)) - .min(self.ny2 - 1 - self.y2order); - - let u_y1 = (y1 - self.gety1(k1)) / self.deltay1(); - let u_y2 = (y2 - self.gety2(k2)) / self.deltay2(); - - let fi1: ArrayVec<_, 8> = (0..=self.y1order) - .map(|i| fi(i, self.y1order, u_y1)) - .collect(); - let fi2: ArrayVec<_, 8> = (0..=self.y2order) - .map(|i| fi(i, self.y2order, u_y2)) - .collect(); - - let k3 = usize_from_f64( - (tau - self.taumin) / self.deltatau() - f64_from_usize(self.tauorder / 2), - ) - .min(self.ntau - 1 - self.tauorder); - - let u_tau = (tau - self.gettau(k3)) / self.deltatau(); - - let factor = 1.0 - / (if self.reweight1 { - weightfun(ntuple.x1) - } else { - 1.0 - } * if self.reweight2 { - weightfun(ntuple.x2) - } else { - 1.0 - }); - - let size = self.tauorder + 1; - let ny1 = self.ny1; - let ny2 = self.ny2; - - if self.grid.is_none() { - self.itaumin = k3; - self.itaumax = k3 + size; - } else if k3 < self.itaumin || k3 + size > self.itaumax { - self.increase_tau(self.itaumin.min(k3), self.itaumax.max(k3 + size)); - } - - for i3 in 0..=self.tauorder { - let fi3i3 = fi(i3, self.tauorder, u_tau); - - for (i1, fi1i1) in fi1.iter().enumerate() { - for (i2, fi2i2) in fi2.iter().enumerate() { - let fillweight = factor * fi1i1 * fi2i2 * fi3i3 * ntuple.weight; - - let grid = self - .grid - .get_or_insert_with(|| Array3::zeros((size, ny1, ny2))); - - grid[[k3 + i3 - self.itaumin, k1 + i1, k2 + i2]] += fillweight; - } - } - } - } - - fn mu2_grid(&self) -> Cow<[Mu2]> { - (0..self.ntau) - .map(|itau| { - let q2 = fq2(self.gettau(itau)); - Mu2 { ren: q2, fac: q2 } - }) - .collect() - } - - fn x1_grid(&self) -> Cow<[f64]> { - (0..self.ny1).map(|iy| fx(self.gety1(iy))).collect() - } - - fn x2_grid(&self) -> Cow<[f64]> { - (0..self.ny2).map(|iy| fx(self.gety2(iy))).collect() - } - - fn is_empty(&self) -> bool { - self.grid.is_none() - } - - fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { - let x1_equal = self.x1_grid() == other.x1_grid(); - let x2_equal = self.x2_grid() == other.x2_grid(); - - if let SubgridEnum::LagrangeSubgridV2(other_grid) = other { - if let Some(other_grid_grid) = &mut other_grid.grid { - if self.grid.is_some() { - // TODO: the general case isn't implemented - assert!(x1_equal); - assert!(x2_equal); - - let new_itaumin = self.itaumin.min(other_grid.itaumin); - let new_itaumax = self.itaumax.max(other_grid.itaumax); - let offset = other_grid.itaumin.saturating_sub(self.itaumin); - - // TODO: we need much more checks here if there subgrids are compatible at all - - if (self.itaumin != new_itaumin) || (self.itaumax != new_itaumax) { - self.increase_tau(new_itaumin, new_itaumax); - } - - if (other_grid.static_q2 == -1.0) || (self.static_q2 != other_grid.static_q2) { - self.static_q2 = -1.0; - } - - let self_grid = self.grid.as_mut().unwrap(); - - if transpose { - for ((i, k, j), value) in other_grid_grid.indexed_iter() { - self_grid[[i + offset, j, k]] += value; - } - } else { - for ((i, j, k), value) in other_grid_grid.indexed_iter() { - self_grid[[i + offset, j, k]] += value; - } - } - } else { - self.grid = other_grid.grid.take(); - self.itaumin = other_grid.itaumin; - self.itaumax = other_grid.itaumax; - self.static_q2 = other_grid.static_q2; - - if transpose { - if let Some(grid) = &mut self.grid { - grid.swap_axes(1, 2); - } - } - } - } - } else { - todo!(); - } - } - - fn scale(&mut self, factor: f64) { - if let Some(self_grid) = &mut self.grid { - self_grid.iter_mut().for_each(|x| *x *= factor); - } - } - - fn symmetrize(&mut self) { - if let Some(grid) = self.grid.as_mut() { - let (i_size, j_size, k_size) = grid.dim(); - - for i in 0..i_size { - for j in 0..j_size { - for k in j + 1..k_size { - grid[[i, j, k]] += grid[[i, k, j]]; - grid[[i, k, j]] = 0.0; - } - } - } - } - } - - fn clone_empty(&self) -> SubgridEnum { - Self { - grid: None, - ntau: self.ntau, - ny1: self.ny1, - ny2: self.ny2, - y1order: self.y1order, - y2order: self.y2order, - tauorder: self.tauorder, - itaumin: 0, - itaumax: 0, - reweight1: self.reweight1, - reweight2: self.reweight2, - y1min: self.y1min, - y1max: self.y1max, - y2min: self.y2min, - y2max: self.y2max, - taumin: self.taumin, - taumax: self.taumax, - static_q2: 0.0, - } - .into() - } - - fn indexed_iter(&self) -> SubgridIndexedIter { - self.grid.as_ref().map_or_else( - || Box::new(iter::empty()) as Box>, - |grid| { - Box::new(grid.indexed_iter().filter(|(_, &value)| value != 0.0).map( - |(tuple, &value)| { - ( - (self.itaumin + tuple.0, tuple.1, tuple.2), - value - * if self.reweight1 { - weightfun(fx(self.gety1(tuple.1))) - } else { - 1.0 - } - * if self.reweight2 { - weightfun(fx(self.gety2(tuple.2))) - } else { - 1.0 - }, - ) - }, - )) - }, - ) - } - - fn stats(&self) -> Stats { - let (non_zeros, zeros) = self.grid.as_ref().map_or((0, 0), |array| { - array.iter().fold((0, 0), |(non_zeros, zeros), value| { - if *value == 0.0 { - (non_zeros, zeros + 1) - } else { - (non_zeros + 1, zeros) - } - }) - }); - - Stats { - total: non_zeros + zeros, - allocated: non_zeros + zeros, - zeros, - overhead: 0, - bytes_per_value: mem::size_of::(), - } - } - - fn static_scale(&self) -> Option { - (self.static_q2 > 0.0).then_some(Mu2 { - ren: self.static_q2, - fac: self.static_q2, - }) - } -} - -/// Subgrid which uses Lagrange-interpolation, but also stores its contents in a space-efficient -/// structure. -#[derive(Clone, Deserialize, Serialize)] -pub struct LagrangeSparseSubgridV1 { - array: SparseArray3, - ntau: usize, - ny: usize, - yorder: usize, - tauorder: usize, - reweight: bool, - ymin: f64, - ymax: f64, - taumin: f64, - taumax: f64, -} - -impl LagrangeSparseSubgridV1 { - /// Constructor. - #[must_use] - pub fn new(subgrid_params: &SubgridParams) -> Self { - Self { - array: SparseArray3::new( - subgrid_params.q2_bins(), - subgrid_params.x_bins(), - subgrid_params.x_bins(), - ), - ntau: subgrid_params.q2_bins(), - ny: subgrid_params.x_bins(), - yorder: subgrid_params.x_order(), - tauorder: subgrid_params.q2_order(), - reweight: subgrid_params.reweight(), - ymin: fy(subgrid_params.x_max()), - ymax: fy(subgrid_params.x_min()), - taumin: ftau(subgrid_params.q2_min()), - taumax: ftau(subgrid_params.q2_max()), - } - } - - fn deltay(&self) -> f64 { - (self.ymax - self.ymin) / f64_from_usize(self.ny - 1) - } - - fn deltatau(&self) -> f64 { - (self.taumax - self.taumin) / f64_from_usize(self.ntau - 1) - } - - fn gety(&self, iy: usize) -> f64 { - f64_from_usize(iy).mul_add(self.deltay(), self.ymin) - } - - fn gettau(&self, iy: usize) -> f64 { - f64_from_usize(iy).mul_add(self.deltatau(), self.taumin) - } -} - -impl Subgrid for LagrangeSparseSubgridV1 { - fn convolve( - &self, - x1: &[f64], - x2: &[f64], - _: &[Mu2], - lumi: &mut dyn FnMut(usize, usize, usize) -> f64, - ) -> f64 { - self.array - .indexed_iter() - .map(|((imu2, ix1, ix2), sigma)| { - let mut value = sigma * lumi(ix1, ix2, imu2); - if self.reweight { - value *= weightfun(x1[ix1]) * weightfun(x2[ix2]); - } - value - }) - .sum() - } - - fn fill(&mut self, ntuple: &Ntuple) { - if ntuple.weight == 0.0 { - return; - } - - let y1 = fy(ntuple.x1); - let y2 = fy(ntuple.x2); - let tau = ftau(ntuple.q2); - - if (y2 < self.ymin) - || (y2 > self.ymax) - || (y1 < self.ymin) - || (y1 > self.ymax) - || (tau < self.taumin) - || (tau > self.taumax) - { - return; - } - - let k1 = usize_from_f64((y1 - self.ymin) / self.deltay() - f64_from_usize(self.yorder / 2)) - .min(self.ny - 1 - self.yorder); - let k2 = usize_from_f64((y2 - self.ymin) / self.deltay() - f64_from_usize(self.yorder / 2)) - .min(self.ny - 1 - self.yorder); - - let u_y1 = (y1 - self.gety(k1)) / self.deltay(); - let u_y2 = (y2 - self.gety(k2)) / self.deltay(); - - let fi1: ArrayVec<_, 8> = (0..=self.yorder) - .map(|i| fi(i, self.yorder, u_y1)) - .collect(); - let fi2: ArrayVec<_, 8> = (0..=self.yorder) - .map(|i| fi(i, self.yorder, u_y2)) - .collect(); - - let k3 = usize_from_f64( - (tau - self.taumin) / self.deltatau() - f64_from_usize(self.tauorder / 2), - ) - .min(self.ntau - 1 - self.tauorder); - - let u_tau = (tau - self.gettau(k3)) / self.deltatau(); - - let factor = if self.reweight { - 1.0 / (weightfun(ntuple.x1) * weightfun(ntuple.x2)) - } else { - 1.0 - }; - - for i3 in 0..=self.tauorder { - let fi3i3 = fi(i3, self.tauorder, u_tau); - - for (i1, fi1i1) in fi1.iter().enumerate() { - for (i2, fi2i2) in fi2.iter().enumerate() { - let fillweight = factor * fi1i1 * fi2i2 * fi3i3 * ntuple.weight; - - self.array[[k3 + i3, k1 + i1, k2 + i2]] += fillweight; - } - } - } - } - - fn mu2_grid(&self) -> Cow<[Mu2]> { - (0..self.ntau) - .map(|itau| { - let q2 = fq2(self.gettau(itau)); - Mu2 { ren: q2, fac: q2 } - }) - .collect() - } - - fn x1_grid(&self) -> Cow<[f64]> { - (0..self.ny).map(|iy| fx(self.gety(iy))).collect() - } - - fn x2_grid(&self) -> Cow<[f64]> { - self.x1_grid() - } - - fn is_empty(&self) -> bool { - self.array.is_empty() - } - - fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { - if let SubgridEnum::LagrangeSparseSubgridV1(other_grid) = other { - if self.array.is_empty() && !transpose { - mem::swap(&mut self.array, &mut other_grid.array); - } else { - // TODO: the general case isn't implemented - assert!(self.x1_grid() == other_grid.x1_grid()); - assert!(self.x2_grid() == other_grid.x2_grid()); - - // TODO: we need much more checks here if there subgrids are compatible at all - - if transpose { - for ((i, k, j), value) in other_grid.array.indexed_iter() { - self.array[[i, j, k]] += value; - } - } else { - for ((i, j, k), value) in other_grid.array.indexed_iter() { - self.array[[i, j, k]] += value; - } - } - } - } else { - todo!(); - } - } - - fn scale(&mut self, factor: f64) { - if factor == 0.0 { - self.array.clear(); - } else { - self.array.iter_mut().for_each(|x| *x *= factor); - } - } - - fn symmetrize(&mut self) { - let mut new_array = SparseArray3::new(self.ntau, self.ny, self.ny); - - for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k >= j) { - new_array[[i, j, k]] = sigma; - } - for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k < j) { - new_array[[i, k, j]] += sigma; - } - - mem::swap(&mut self.array, &mut new_array); - } - - fn clone_empty(&self) -> SubgridEnum { - Self { - array: SparseArray3::new(self.ntau, self.ny, self.ny), - ntau: self.ntau, - ny: self.ny, - yorder: self.yorder, - tauorder: self.tauorder, - reweight: self.reweight, - ymin: self.ymin, - ymax: self.ymax, - taumin: self.taumin, - taumax: self.taumax, - } - .into() - } - - fn indexed_iter(&self) -> SubgridIndexedIter { - Box::new(self.array.indexed_iter().map(|(tuple, value)| { - ( - tuple, - value - * if self.reweight { - weightfun(fx(self.gety(tuple.1))) * weightfun(fx(self.gety(tuple.2))) - } else { - 1.0 - }, - ) - })) - } - - fn stats(&self) -> Stats { - Stats { - total: self.ntau * self.ny * self.ny, - allocated: self.array.len() + self.array.zeros(), - zeros: self.array.zeros(), - overhead: self.array.overhead(), - bytes_per_value: mem::size_of::(), - } - } - - fn static_scale(&self) -> Option { - if let [static_scale] = self.mu2_grid().as_ref() { - Some(static_scale.clone()) - } else { - None - } - } -} - -impl From<&LagrangeSubgridV1> for LagrangeSparseSubgridV1 { - fn from(subgrid: &LagrangeSubgridV1) -> Self { - Self { - array: subgrid.grid.as_ref().map_or_else( - || SparseArray3::new(subgrid.ntau, subgrid.ny, subgrid.ny), - |grid| SparseArray3::from_ndarray(grid.view(), subgrid.itaumin, subgrid.ntau), - ), - ntau: subgrid.ntau, - ny: subgrid.ny, - yorder: subgrid.yorder, - tauorder: subgrid.tauorder, - reweight: subgrid.reweight, - ymin: subgrid.ymin, - ymax: subgrid.ymax, - taumin: subgrid.taumin, - taumax: subgrid.taumax, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use float_cmp::assert_approx_eq; - - fn test_q2_slice_methods(mut grid: G) -> G { - grid.fill(&Ntuple { - x1: 0.1, - x2: 0.2, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid.fill(&Ntuple { - x1: 0.9, - x2: 0.1, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid.fill(&Ntuple { - x1: 0.009, - x2: 0.01, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid.fill(&Ntuple { - x1: 0.009, - x2: 0.5, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - - // the grid must not be empty - assert!(!grid.is_empty()); - - let x1 = grid.x1_grid(); - let x2 = grid.x2_grid(); - let mu2 = grid.mu2_grid(); - - let reference = grid.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); - - let mut test = 0.0; - - // check `reference` against manually calculated result from q2 slices - for ((_, ix1, ix2), value) in grid.indexed_iter() { - test += value / (x1[ix1] * x2[ix2]); - } - - assert_approx_eq!(f64, test, reference, ulps = 8); - - grid - } - - fn test_merge_method(mut grid1: G, mut grid2: G, mut grid3: G) - where - SubgridEnum: From, - { - grid1.fill(&Ntuple { - x1: 0.1, - x2: 0.2, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid1.fill(&Ntuple { - x1: 0.9, - x2: 0.1, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid1.fill(&Ntuple { - x1: 0.009, - x2: 0.01, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid1.fill(&Ntuple { - x1: 0.009, - x2: 0.5, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - - assert!(!grid1.is_empty()); - assert!(grid2.is_empty()); - - let x1 = grid1.x1_grid().into_owned(); - let x2 = grid1.x2_grid().into_owned(); - let mu2 = grid1.mu2_grid().into_owned(); - - let reference = - grid1.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); - - // merge filled grid into empty one - grid2.merge(&mut grid1.into(), false); - assert!(!grid2.is_empty()); - - let merged = grid2.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); - - assert_approx_eq!(f64, reference, merged, ulps = 8); - - grid3.fill(&Ntuple { - x1: 0.1, - x2: 0.2, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid3.fill(&Ntuple { - x1: 0.9, - x2: 0.1, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid3.fill(&Ntuple { - x1: 0.009, - x2: 0.01, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid3.fill(&Ntuple { - x1: 0.009, - x2: 0.5, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - - grid2.merge(&mut grid3.into(), false); - - let merged = grid2.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); - - assert_approx_eq!(f64, 2.0 * reference, merged, ulps = 8); - } - - fn test_empty_subgrid(mut grid: G) { - // this following events should be skipped - - // q2 is too large - grid.fill(&Ntuple { - x1: 0.5, - x2: 0.5, - q2: 2e+8, - weight: 1.0, - }); - // q2 is too small - grid.fill(&Ntuple { - x1: 0.5, - x2: 0.5, - q2: 5e+1, - weight: 1.0, - }); - // x1 is too large - grid.fill(&Ntuple { - x1: 1.1, - x2: 0.5, - q2: 1e+3, - weight: 1.0, - }); - // x1 is too small - grid.fill(&Ntuple { - x1: 0.5, - x2: 1e-7, - q2: 1e+3, - weight: 1.0, - }); - // x1 is too large - grid.fill(&Ntuple { - x1: 0.5, - x2: 1.1, - q2: 1e+3, - weight: 1.0, - }); - // x1 is too small - grid.fill(&Ntuple { - x1: 1e-7, - x2: 0.5, - q2: 1e+3, - weight: 1.0, - }); - - let x1 = grid.x1_grid(); - let x2 = grid.x2_grid(); - let mu2 = grid.mu2_grid(); - - let result = grid.convolve(&x1, &x2, &mu2, &mut |_, _, _| 1.0); - - assert_eq!(result, 0.0); - } - - #[test] - fn q2_slice_v1() { - let subgrid = test_q2_slice_methods(LagrangeSubgridV1::new(&SubgridParams::default())); - - assert_eq!( - subgrid.stats(), - Stats { - total: 10000, - allocated: 10000, - zeros: 9744, - overhead: 0, - bytes_per_value: 8 - } - ); - } - - #[test] - fn q2_slice_v2() { - let subgrid = test_q2_slice_methods(LagrangeSubgridV2::new( - &SubgridParams::default(), - &ExtraSubgridParams::default(), - )); - - assert_eq!( - subgrid.stats(), - Stats { - total: 10000, - allocated: 10000, - zeros: 9744, - overhead: 0, - bytes_per_value: 8 - } - ); - } - - #[test] - fn sparse_q2_slice() { - let subgrid = - test_q2_slice_methods(LagrangeSparseSubgridV1::new(&SubgridParams::default())); - - assert_eq!( - subgrid.stats(), - Stats { - total: 100000, - allocated: 432, - zeros: 176, - overhead: 402, - bytes_per_value: 8 - } - ); - } - - #[test] - fn fill_zero_v1() { - let mut subgrid = LagrangeSubgridV1::new(&SubgridParams::default()); - - subgrid.fill(&Ntuple { - x1: 0.5, - x2: 0.5, - q2: 1000.0, - weight: 0.0, - }); - - assert!(subgrid.is_empty()); - assert_eq!(subgrid.indexed_iter().count(), 0); - } - - #[test] - fn fill_zero_v1_sparse() { - let mut subgrid = LagrangeSparseSubgridV1::new(&SubgridParams::default()); - - subgrid.fill(&Ntuple { - x1: 0.5, - x2: 0.5, - q2: 1000.0, - weight: 0.0, - }); - - assert!(subgrid.is_empty()); - assert_eq!(subgrid.indexed_iter().count(), 0); - } - - #[test] - fn fill_zero_v2() { - let mut subgrid = - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); - - subgrid.fill(&Ntuple { - x1: 0.5, - x2: 0.5, - q2: 1000.0, - weight: 0.0, - }); - - assert!(subgrid.is_empty()); - assert_eq!(subgrid.indexed_iter().count(), 0); - } - - #[test] - fn from() { - // check conversion of empty grids - let mut dense = LagrangeSubgridV1::new(&SubgridParams::default()); - assert!(dense.is_empty()); - let sparse = LagrangeSparseSubgridV1::from(&dense); - assert!(sparse.is_empty()); - - let mu2 = dense.mu2_grid().into_owned(); - let x1 = dense.x1_grid().into_owned(); - let x2 = dense.x2_grid().into_owned(); - - assert_eq!(mu2, *sparse.mu2_grid()); - assert_eq!(x1, *sparse.x1_grid()); - assert_eq!(x2, *sparse.x2_grid()); - - // check conversion of a filled grid - dense.fill(&Ntuple { - x1: 0.1, - x2: 0.2, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - dense.fill(&Ntuple { - x1: 0.9, - x2: 0.1, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - - assert!(!dense.is_empty()); - - let sparse = LagrangeSparseSubgridV1::from(&dense); - assert!(!sparse.is_empty()); - - let reference = - dense.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); - let converted = - sparse.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); - - assert_approx_eq!(f64, reference, converted, ulps = 8); - } - - #[test] - #[should_panic(expected = "not yet implemented")] - fn merge_dense_v1_with_sparse() { - let mut dense = LagrangeSubgridV1::new(&SubgridParams::default()); - let sparse = LagrangeSparseSubgridV1::new(&SubgridParams::default()); - - dense.merge(&mut sparse.into(), false); - } - - #[test] - #[should_panic(expected = "not yet implemented")] - fn merge_dense_v1_with_dense_v2() { - let mut one = LagrangeSubgridV1::new(&SubgridParams::default()); - let two = LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); - - one.merge(&mut two.into(), false); - } - - #[test] - #[should_panic(expected = "not yet implemented")] - fn merge_dense_v2_with_dense_v1() { - let mut two = - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); - let one = LagrangeSubgridV1::new(&SubgridParams::default()); - - two.merge(&mut one.into(), false); - } - - #[test] - #[should_panic(expected = "not yet implemented")] - fn merge_dense_v2_with_sparse() { - let mut dense = - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); - let sparse = LagrangeSparseSubgridV1::new(&SubgridParams::default()); - - dense.merge(&mut sparse.into(), false); - } - - #[test] - #[should_panic(expected = "not yet implemented")] - fn merge_sparse_with_dense_v1() { - let mut sparse = LagrangeSparseSubgridV1::new(&SubgridParams::default()); - let dense = LagrangeSubgridV1::new(&SubgridParams::default()); - - sparse.merge(&mut dense.into(), false); - } - - #[test] - #[should_panic(expected = "not yet implemented")] - fn merge_sparse_with_dense_v2() { - let mut sparse = LagrangeSparseSubgridV1::new(&SubgridParams::default()); - let dense = - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); - - sparse.merge(&mut dense.into(), false); - } - - #[test] - fn merge_dense_v1() { - test_merge_method( - LagrangeSubgridV1::new(&SubgridParams::default()), - LagrangeSubgridV1::new(&SubgridParams::default()), - LagrangeSubgridV1::new(&SubgridParams::default()), - ); - } - - #[test] - fn merge_dense_v2() { - test_merge_method( - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()), - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()), - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()), - ); - } - - #[test] - fn merge_sparse() { - test_merge_method( - LagrangeSparseSubgridV1::new(&SubgridParams::default()), - LagrangeSparseSubgridV1::new(&SubgridParams::default()), - LagrangeSparseSubgridV1::new(&SubgridParams::default()), - ); - } - - #[test] - fn empty_v1() { - test_empty_subgrid(LagrangeSubgridV1::new(&SubgridParams::default())); - } - - #[test] - fn empty_v2() { - test_empty_subgrid(LagrangeSubgridV2::new( - &SubgridParams::default(), - &ExtraSubgridParams::default(), - )); - } - - #[test] - fn empty_sparse() { - test_empty_subgrid(LagrangeSparseSubgridV1::new(&SubgridParams::default())); - } -} diff --git a/pineappl/src/lib.rs b/pineappl/src/lib.rs index 5a0387999..f225dff0a 100644 --- a/pineappl/src/lib.rs +++ b/pineappl/src/lib.rs @@ -20,7 +20,7 @@ //! [`Grid::orders()`]: grid::Grid::orders //! [`Subgrid`]: subgrid::Subgrid //! [`SubgridEnum`]: subgrid::SubgridEnum -//! [`Order`]: order::Order +//! [`Order`]: boc::Order //! //! ## Metadata //! @@ -34,6 +34,7 @@ //! [CLI tutorial]: https://nnpdf.github.io/pineappl/docs/cli-tutorial.html mod convert; +mod v0; pub mod bin; pub mod boc; @@ -42,10 +43,9 @@ pub mod empty_subgrid; pub mod evolution; pub mod fk_table; pub mod grid; -pub mod import_only_subgrid; -pub mod lagrange_subgrid; -pub mod ntuple_subgrid; +pub mod import_subgrid; +pub mod interp_subgrid; +pub mod interpolation; pub mod packed_array; pub mod pids; -pub mod sparse_array3; pub mod subgrid; diff --git a/pineappl/src/ntuple_subgrid.rs b/pineappl/src/ntuple_subgrid.rs deleted file mode 100644 index 282d9fffc..000000000 --- a/pineappl/src/ntuple_subgrid.rs +++ /dev/null @@ -1,198 +0,0 @@ -//! Provides an implementation of the `Grid` trait with n-tuples. - -use super::grid::Ntuple; -use super::subgrid::{Mu2, Stats, Subgrid, SubgridEnum, SubgridIndexedIter}; -use serde::{Deserialize, Serialize}; -use std::borrow::Cow; -use std::mem; - -/// Structure holding a grid with an n-tuple as the storage method for weights. -#[derive(Clone, Default, Deserialize, Serialize)] -pub struct NtupleSubgridV1 { - ntuples: Vec>, -} - -impl NtupleSubgridV1 { - /// Constructor. - #[must_use] - pub const fn new() -> Self { - Self { ntuples: vec![] } - } -} - -impl Subgrid for NtupleSubgridV1 { - fn convolve( - &self, - _: &[f64], - _: &[f64], - _: &[Mu2], - _: &mut dyn FnMut(usize, usize, usize) -> f64, - ) -> f64 { - panic!("NtupleSubgridV1 doesn't support the convolve operation"); - } - - fn fill(&mut self, ntuple: &Ntuple) { - if ntuple.weight == 0.0 { - return; - } - - self.ntuples.push(ntuple.clone()); - } - - fn mu2_grid(&self) -> Cow<[Mu2]> { - Cow::Borrowed(&[]) - } - - fn x1_grid(&self) -> Cow<[f64]> { - Cow::Borrowed(&[]) - } - - fn x2_grid(&self) -> Cow<[f64]> { - Cow::Borrowed(&[]) - } - - fn is_empty(&self) -> bool { - self.ntuples.is_empty() - } - - fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { - assert!(!transpose); - - if let SubgridEnum::NtupleSubgridV1(other_grid) = other { - self.ntuples.append(&mut other_grid.ntuples); - } else { - panic!("NtupleSubgridV1 doesn't support the merge operation with subgrid types other than itself"); - } - } - - fn scale(&mut self, factor: f64) { - self.ntuples.iter_mut().for_each(|t| t.weight *= factor); - } - - fn symmetrize(&mut self) {} - - fn clone_empty(&self) -> SubgridEnum { - Self::new().into() - } - - fn indexed_iter(&self) -> SubgridIndexedIter { - panic!("NtupleSubgridV1 doesn't support the indexed_iter operation"); - } - - fn stats(&self) -> Stats { - Stats { - total: self.ntuples.len(), - allocated: self.ntuples.len(), - zeros: 0, - overhead: 0, - bytes_per_value: mem::size_of::>(), - } - } - - fn static_scale(&self) -> Option { - todo!() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::lagrange_subgrid::LagrangeSubgridV2; - use crate::subgrid::{ExtraSubgridParams, SubgridParams}; - - #[test] - #[should_panic(expected = "NtupleSubgridV1 doesn't support the convolve operation")] - fn convolve() { - NtupleSubgridV1::new().convolve(&[], &[], &[], &mut |_, _, _| 0.0); - } - - #[test] - fn fill_zero() { - let mut subgrid = NtupleSubgridV1::new(); - - subgrid.fill(&Ntuple { - x1: 0.5, - x2: 0.5, - q2: 1000.0, - weight: 0.0, - }); - - assert!(subgrid.is_empty()); - } - - #[test] - #[should_panic(expected = "NtupleSubgridV1 doesn't support the indexed_iter operation")] - fn indexed_iter() { - // `next` isn't called because `indexed_iter` panics, but it suppresses a warning about an - // unused result - NtupleSubgridV1::new().indexed_iter().next(); - } - - #[test] - fn stats() { - let subgrid = NtupleSubgridV1::new(); - assert_eq!( - subgrid.stats(), - Stats { - total: 0, - allocated: 0, - zeros: 0, - overhead: 0, - bytes_per_value: 32, - } - ); - } - - #[test] - #[should_panic(expected = "not yet implemented")] - fn static_scale() { - let subgrid = NtupleSubgridV1::new(); - subgrid.static_scale(); - } - - #[test] - #[should_panic( - expected = "NtupleSubgridV1 doesn't support the merge operation with subgrid types other than itself" - )] - fn merge_with_lagrange_subgrid() { - let mut subgrid = NtupleSubgridV1::new(); - let mut other = - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()) - .into(); - subgrid.merge(&mut other, false); - } - - #[test] - fn test() { - let mut subgrid1: SubgridEnum = NtupleSubgridV1::new().into(); - - assert!(subgrid1.is_empty()); - - subgrid1.fill(&Ntuple { - x1: 0.0, - x2: 0.0, - q2: 0.0, - weight: 1.0, - }); - - assert!(!subgrid1.is_empty()); - - assert_eq!(subgrid1.mu2_grid().as_ref(), []); - assert_eq!(subgrid1.x1_grid().as_ref(), []); - assert_eq!(subgrid1.x2_grid().as_ref(), []); - - subgrid1.symmetrize(); - subgrid1.scale(2.0); - - let mut subgrid2: SubgridEnum = subgrid1.clone_empty(); - - subgrid2.fill(&Ntuple { - x1: 0.0, - x2: 0.0, - q2: 0.0, - weight: 1.0, - }); - - subgrid2.merge(&mut subgrid1, false); - } -} diff --git a/pineappl/src/packed_array.rs b/pineappl/src/packed_array.rs index 436ed7513..fbd20c687 100644 --- a/pineappl/src/packed_array.rs +++ b/pineappl/src/packed_array.rs @@ -1,6 +1,6 @@ //! Provides the [`PackedArray`] struct. -use ndarray::ArrayView3; +use ndarray::{ArrayView3, ArrayViewD}; use serde::{Deserialize, Serialize}; use std::iter; use std::mem; @@ -9,8 +9,8 @@ use std::ops::{Index, IndexMut, MulAssign}; /// `D`-dimensional array similar to [`ndarray::ArrayBase`], except that `T::default()` is not /// stored to save space. Instead, adjacent non-default elements are grouped together and the index /// of their first element (`start_index`) and the length of the group (`lengths`) is stored. -#[derive(Clone, Deserialize, Serialize)] -pub struct PackedArray { +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct PackedArray { /// The actual values stored in the array. The length of `entries` is always the sum of the /// elements in `lengths`. entries: Vec, @@ -23,15 +23,15 @@ pub struct PackedArray { shape: Vec, } -impl PackedArray { +impl PackedArray { /// Constructs a new and empty `PackedArray` of shape `shape`. #[must_use] - pub fn new(shape: [usize; D]) -> Self { + pub const fn new(shape: Vec) -> Self { Self { entries: vec![], start_indices: vec![], lengths: vec![], - shape: shape.to_vec(), + shape, } } @@ -77,10 +77,8 @@ impl PackedArray { self.entries.iter().filter(|x| **x != T::default()).count() } - /// Returns an `Iterator` over the non-default (non-zero) elements of this array. The type of - /// an iterator element is `([usize; D], T)` where the first element of the tuple is the index - /// and the second element is the value. - pub fn indexed_iter(&self) -> impl Iterator + '_ { + /// TODO + pub fn indexed_iter(&self) -> impl Iterator, T)> + '_ { self.start_indices .iter() .zip(&self.lengths) @@ -91,21 +89,53 @@ impl PackedArray { .filter(|&(_, entry)| *entry != Default::default()) .map(|(indices, entry)| (indices, *entry)) } + + /// TODO + /// + /// # Panics + /// + /// TODO + // TODO: rewrite this method into `sub_block_iter_mut() -> impl Iterator` + #[must_use] + pub fn sub_block_idx( + &self, + start_index: &[usize], + mut i: usize, + fill_shape: &[usize], + ) -> usize { + use super::packed_array; + + assert_eq!(start_index.len(), fill_shape.len()); + + let mut index = { + assert!(i < fill_shape.iter().product()); + let mut indices = vec![0; start_index.len()]; + for (j, d) in indices.iter_mut().zip(fill_shape).rev() { + *j = i % d; + i /= d; + } + indices + }; + for (entry, start_index) in index.iter_mut().zip(start_index) { + *entry += start_index; + } + packed_array::ravel_multi_index(&index, &self.shape) + } } -impl, const D: usize> MulAssign for PackedArray { +impl> MulAssign for PackedArray { fn mul_assign(&mut self, rhs: T) { self.entries.iter_mut().for_each(|x| *x *= rhs); } } -impl PackedArray { - /// Converts `array` into a `PackedArray`. +impl PackedArray { + /// Converts `array` into a `PackedArray`. #[must_use] pub fn from_ndarray(array: ArrayView3, xstart: usize, xsize: usize) -> Self { let shape = array.shape(); - let mut result = Self::new([xsize, shape[1], shape[2]]); + let mut result = Self::new(vec![xsize, shape[1], shape[2]]); for ((i, j, k), &entry) in array .indexed_iter() @@ -118,8 +148,24 @@ impl PackedArray { } } +impl From> for PackedArray { + fn from(array: ArrayViewD) -> Self { + let mut result = Self::new(array.shape().to_vec()); + + for (i, &entry) in array + .iter() + .enumerate() + .filter(|(_, &entry)| entry != Default::default()) + { + result[i] = entry; + } + + result + } +} + /// Converts a `multi_index` into a flat index. -fn ravel_multi_index(multi_index: &[usize; D], shape: &[usize]) -> usize { +fn ravel_multi_index(multi_index: &[usize], shape: &[usize]) -> usize { assert_eq!(multi_index.len(), shape.len()); multi_index @@ -128,10 +174,15 @@ fn ravel_multi_index(multi_index: &[usize; D], shape: &[usize]) .fold(0, |acc, (i, d)| acc * d + i) } -/// Converts a flat `index` into a `multi_index`. -fn unravel_index(mut index: usize, shape: &[usize]) -> [usize; D] { +/// TODO +/// +/// # Panics +/// +/// TODO +#[must_use] +pub fn unravel_index(mut index: usize, shape: &[usize]) -> Vec { assert!(index < shape.iter().product()); - let mut indices = [0; D]; + let mut indices = vec![0; shape.len()]; for (i, d) in indices.iter_mut().zip(shape).rev() { *i = index % d; index /= d; @@ -139,10 +190,18 @@ fn unravel_index(mut index: usize, shape: &[usize]) -> [usize; D indices } -impl Index<[usize; D]> for PackedArray { +impl Index<[usize; D]> for PackedArray { type Output = T; fn index(&self, index: [usize; D]) -> &Self::Output { + &self[index.as_slice()] + } +} + +impl Index<&[usize]> for PackedArray { + type Output = T; + + fn index(&self, index: &[usize]) -> &Self::Output { assert_eq!(index.len(), self.shape.len()); assert!( index.iter().zip(self.shape.iter()).all(|(&i, &d)| i < d), @@ -151,7 +210,7 @@ impl Index<[usize; D]> for Packed self.shape ); - let raveled_index = ravel_multi_index(&index, &self.shape); + let raveled_index = ravel_multi_index(index, &self.shape); let point = self.start_indices.partition_point(|&i| i <= raveled_index); assert!( @@ -174,10 +233,159 @@ impl Index<[usize; D]> for Packed } } -impl IndexMut<[usize; D]> - for PackedArray -{ - fn index_mut(&mut self, index: [usize; D]) -> &mut Self::Output { +impl Index for PackedArray { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + assert!( + index < self.shape.iter().product(), + "index {index} is out of bounds for array of shape {:?}", + self.shape + ); + + let raveled_index = index; + // let raveled_index = ravel_multi_index(&index, &self.shape); + let point = self.start_indices.partition_point(|&i| i <= raveled_index); + + assert!( + point > 0, + "entry at index {index:?} is implicitly set to the default value" + ); + + let start_index = self.start_indices[point - 1]; + let length = self.lengths[point - 1]; + + let point_entries = + self.lengths.iter().take(point - 1).sum::() + raveled_index - start_index; + + assert!( + raveled_index < (start_index + length), + "entry at index {index:?} is implicitly set to the default value" + ); + + &self.entries[point_entries] + } +} + +impl IndexMut for PackedArray { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + // assert_eq!(index.len(), self.shape.len()); + + // // Panic if the index value for any dimension is greater or equal than the length of this + // // dimension. + // assert!( + // index.iter().zip(self.shape.iter()).all(|(&i, &d)| i < d), + // "index {:?} is out of bounds for array of shape {:?}", + // index, + // self.shape + // ); + + // // The insertion cases are: + // // 1. this array already stores an element at `index`: + // // -> we just have to update this element + // // 2. this array does not store an element at `index`: + // // a. the distance of the (raveled) `index` is `threshold_distance` away from the next + // // or previous element that is already stored: + // // -> we can merge the new element into already stored groups, potentially padding + // // with `T::default()` elements + // // b. the distance of the (raveled) `index` from the existing elements is greater than + // // `threshold_distance`: + // // -> we insert the element as a new group + + // let raveled_index = ravel_multi_index(&index, &self.shape); + let raveled_index = index; + + // To determine which groups the new element is close to, `point` is the index of the + // start_index of the first group after the new element. `point` is 0 if no elements before + // the new element are stored, and point is `self.start_indices.len()` if no elements after + // the new element are stored. + let point = self.start_indices.partition_point(|&i| i <= raveled_index); + + // `point_entries` is the index of the first element of the next group, given in + // `self.entries`, i.e. the element at index `self.start_indices[point]`. + let point_entries = self.lengths.iter().take(point).sum::(); + + // Maximum distance for merging groups. If the new element is within `threshold_distance` + // of an existing group (i.e. there are `threshold_distance - 1` implicit elements + // between them), we merge the new element into the existing group. We choose 2 as the + // `threshold_distance` based on memory: in the case of `T` = `f64`, it is more economical + // to store one zero explicitly than to store the start_index and length of a new group. + let threshold_distance = 2; + + // If `point > 0`, there is at least one group preceding the new element. Thus, in the + // following we determine if we can insert the new element into this group. + if point > 0 { + // start_index and length of the group before the new element, i.e. the group + // (potentially) getting the new element + let start_index = self.start_indices[point - 1]; + let length = self.lengths[point - 1]; + + // Case 1: an element is already stored at this `index` + if raveled_index < start_index + length { + return &mut self.entries[point_entries - length + raveled_index - start_index]; + // Case 2a: the new element can be merged into the preceding group + } else if raveled_index < start_index + length + threshold_distance { + let distance = raveled_index - (start_index + length) + 1; + // Merging happens by increasing the length of the group + self.lengths[point - 1] += distance; + // and inserting the necessary number of default elements. + self.entries.splice( + point_entries..point_entries, + iter::repeat(Default::default()).take(distance), + ); + + // If the new element is within `threshold_distance` of the *next* group, we merge + // the next group into this group. + if let Some(start_index_next) = self.start_indices.get(point) { + if raveled_index + threshold_distance >= *start_index_next { + let distance_next = start_index_next - raveled_index; + + // Increase the length of this group + self.lengths[point - 1] += distance_next - 1 + self.lengths[point]; + // and remove the next group. we don't have to manipulate `self.entries`, + // since the grouping of the elements is handled only by + // `self.start_indices` and `self.lengths` + self.lengths.remove(point); + self.start_indices.remove(point); + // Insert the default elements between the groups. + self.entries.splice( + point_entries..point_entries, + iter::repeat(Default::default()).take(distance_next - 1), + ); + } + } + + return &mut self.entries[point_entries - 1 + distance]; + } + } + + // Case 2a: the new element can be merged into the next group. No `self.lengths.remove` and + // `self.start_indices.remove` here, since we are not merging two groups. + if let Some(start_index_next) = self.start_indices.get(point) { + if raveled_index + threshold_distance >= *start_index_next { + let distance = start_index_next - raveled_index; + + self.start_indices[point] = raveled_index; + self.lengths[point] += distance; + self.entries.splice( + point_entries..point_entries, + iter::repeat(Default::default()).take(distance), + ); + return &mut self.entries[point_entries]; + } + } + + // Case 2b: we insert a new group of length 1 + self.start_indices.insert(point, raveled_index); + self.lengths.insert(point, 1); + self.entries.insert(point_entries, Default::default()); + + &mut self.entries[point_entries] + } +} + +impl IndexMut<&[usize]> for PackedArray { + fn index_mut(&mut self, index: &[usize]) -> &mut Self::Output { assert_eq!(index.len(), self.shape.len()); // Panic if the index value for any dimension is greater or equal than the length of this @@ -201,7 +409,7 @@ impl IndexMut<[usize; D]> // `threshold_distance`: // -> we insert the element as a new group - let raveled_index = ravel_multi_index(&index, &self.shape); + let raveled_index = ravel_multi_index(index, &self.shape); // To determine which groups the new element is close to, `point` is the index of the // start_index of the first group after the new element. `point` is 0 if no elements before @@ -292,22 +500,20 @@ impl IndexMut<[usize; D]> } } +impl IndexMut<[usize; D]> + for PackedArray +{ + fn index_mut(&mut self, index: [usize; D]) -> &mut Self::Output { + &mut self[index.as_slice()] + } +} + #[cfg(test)] mod tests { use super::*; use ndarray::Array3; use std::mem; - #[test] - fn unravel_index() { - assert_eq!(super::unravel_index(0, &[3, 2]), [0, 0]); - assert_eq!(super::unravel_index(1, &[3, 2]), [0, 1]); - assert_eq!(super::unravel_index(2, &[3, 2]), [1, 0]); - assert_eq!(super::unravel_index(3, &[3, 2]), [1, 1]); - assert_eq!(super::unravel_index(4, &[3, 2]), [2, 0]); - assert_eq!(super::unravel_index(5, &[3, 2]), [2, 1]); - } - #[test] fn ravel_multi_index() { assert_eq!(super::ravel_multi_index(&[0, 0], &[3, 2]), 0); @@ -320,7 +526,7 @@ mod tests { #[test] fn index() { - let mut a = PackedArray::new([4, 2]); + let mut a = PackedArray::new(vec![4, 2]); a[[0, 0]] = 1; assert_eq!(a[[0, 0]], 1); @@ -372,9 +578,64 @@ mod tests { assert_eq!(a.lengths, vec![8]); } + #[test] + fn flat_index() { + let shape = vec![4, 2]; + let mut a = PackedArray::new(shape.clone()); + + a[[0, 0]] = 1; + assert_eq!(a[super::ravel_multi_index(&[0, 0], &shape)], 1); + assert_eq!(a.entries, vec![1]); + assert_eq!(a.start_indices, vec![0]); + assert_eq!(a.lengths, vec![1]); + + a[[3, 0]] = 2; + assert_eq!(a[super::ravel_multi_index(&[0, 0], &shape)], 1); + assert_eq!(a[super::ravel_multi_index(&[3, 0], &shape)], 2); + assert_eq!(a.entries, vec![1, 2]); + assert_eq!(a.start_indices, vec![0, 6]); + assert_eq!(a.lengths, vec![1, 1]); + + a[[3, 1]] = 3; + assert_eq!(a[super::ravel_multi_index(&[0, 0], &shape)], 1); + assert_eq!(a[super::ravel_multi_index(&[3, 0], &shape)], 2); + assert_eq!(a[super::ravel_multi_index(&[3, 1], &shape)], 3); + assert_eq!(a.entries, vec![1, 2, 3]); + assert_eq!(a.start_indices, vec![0, 6]); + assert_eq!(a.lengths, vec![1, 2]); + + a[[2, 0]] = 9; + assert_eq!(a[super::ravel_multi_index(&[0, 0], &shape)], 1); + assert_eq!(a[super::ravel_multi_index(&[3, 0], &shape)], 2); + assert_eq!(a[super::ravel_multi_index(&[3, 1], &shape)], 3); + assert_eq!(a[super::ravel_multi_index(&[2, 0], &shape)], 9); + assert_eq!(a.entries, vec![1, 9, 0, 2, 3]); + assert_eq!(a.start_indices, vec![0, 4]); + assert_eq!(a.lengths, vec![1, 4]); + + a[[2, 0]] = 4; + assert_eq!(a[super::ravel_multi_index(&[0, 0], &shape)], 1); + assert_eq!(a[super::ravel_multi_index(&[3, 0], &shape)], 2); + assert_eq!(a[super::ravel_multi_index(&[3, 1], &shape)], 3); + assert_eq!(a[super::ravel_multi_index(&[2, 0], &shape)], 4); + assert_eq!(a.entries, vec![1, 4, 0, 2, 3]); + assert_eq!(a.start_indices, vec![0, 4]); + assert_eq!(a.lengths, vec![1, 4]); + + a[[1, 0]] = 5; + assert_eq!(a[super::ravel_multi_index(&[0, 0], &shape)], 1); + assert_eq!(a[super::ravel_multi_index(&[3, 0], &shape)], 2); + assert_eq!(a[super::ravel_multi_index(&[3, 1], &shape)], 3); + assert_eq!(a[super::ravel_multi_index(&[2, 0], &shape)], 4); + assert_eq!(a[super::ravel_multi_index(&[1, 0], &shape)], 5); + assert_eq!(a.entries, vec![1, 0, 5, 0, 4, 0, 2, 3]); + assert_eq!(a.start_indices, vec![0]); + assert_eq!(a.lengths, vec![8]); + } + #[test] fn iter() { - let mut a = PackedArray::new([6, 5]); + let mut a = PackedArray::new(vec![6, 5]); a[[2, 2]] = 1; a[[2, 4]] = 2; a[[4, 1]] = 3; @@ -383,18 +644,18 @@ mod tests { assert_eq!( a.indexed_iter().collect::>(), &[ - ([2, 2], 1), - ([2, 4], 2), - ([4, 1], 3), - ([4, 4], 4), - ([5, 0], 5), + (vec![2, 2], 1), + (vec![2, 4], 2), + (vec![4, 1], 3), + (vec![4, 4], 4), + (vec![5, 0], 5), ] ); } #[test] fn index_access() { - let mut array = PackedArray::new([40, 50, 50]); + let mut array = PackedArray::new(vec![40, 50, 50]); // after creation the array must be empty assert_eq!(array.overhead(), 0); @@ -539,7 +800,7 @@ mod tests { #[test] #[should_panic(expected = "index [40, 0, 50] is out of bounds for array of shape [40, 50, 50]")] fn index_mut_panic_dim0() { - let mut array = PackedArray::new([40, 50, 50]); + let mut array = PackedArray::new(vec![40, 50, 50]); array[[40, 0, 50]] = 1.0; } @@ -547,7 +808,7 @@ mod tests { #[test] #[should_panic(expected = "index [0, 50, 0] is out of bounds for array of shape [40, 50, 50]")] fn index_mut_panic_dim1() { - let mut array = PackedArray::new([40, 50, 50]); + let mut array = PackedArray::new(vec![40, 50, 50]); array[[0, 50, 0]] = 1.0; } @@ -555,7 +816,7 @@ mod tests { #[test] #[should_panic(expected = "index [0, 0, 50] is out of bounds for array of shape [40, 50, 50]")] fn index_mut_panic_dim2() { - let mut array = PackedArray::new([40, 50, 50]); + let mut array = PackedArray::new(vec![40, 50, 50]); array[[0, 0, 50]] = 1.0; } @@ -563,56 +824,89 @@ mod tests { #[test] #[should_panic(expected = "entry at index [0, 0, 0] is implicitly set to the default value")] fn index_panic_dim0_0() { - let mut array = PackedArray::new([40, 50, 50]); + let mut array = PackedArray::new(vec![40, 50, 50]); array[[1, 0, 0]] = 1; - assert_eq!(array[[0, 0, 0]], 0); + let _ = array[[0, 0, 0]]; } #[test] #[should_panic(expected = "entry at index [2, 0, 0] is implicitly set to the default value")] fn index_panic_dim0_1() { - let mut array = PackedArray::new([40, 50, 50]); + let mut array = PackedArray::new(vec![40, 50, 50]); array[[1, 0, 0]] = 1; - assert_eq!(array[[2, 0, 0]], 0); + let _ = array[[2, 0, 0]]; } #[test] #[should_panic(expected = "index [1, 50, 0] is out of bounds for array of shape [40, 50, 50]")] fn index_panic_dim1() { - let mut array = PackedArray::new([40, 50, 50]); + let mut array = PackedArray::new(vec![40, 50, 50]); array[[1, 0, 0]] = 1; - assert_eq!(array[[1, 50, 0]], 0); + let _ = array[[1, 50, 0]]; } #[test] #[should_panic(expected = "entry at index [0, 0, 0] is implicitly set to the default value")] fn index_panic_dim2_0() { - let mut array = PackedArray::new([40, 50, 50]); + let mut array = PackedArray::new(vec![40, 50, 50]); array[[0, 0, 1]] = 1; - assert_eq!(array[[0, 0, 0]], 0); + let _ = array[[0, 0, 0]]; } #[test] #[should_panic(expected = "entry at index [0, 0, 2] is implicitly set to the default value")] fn index_panic_dim2_1() { - let mut array = PackedArray::new([40, 50, 50]); + let mut array = PackedArray::new(vec![40, 50, 50]); array[[0, 0, 1]] = 1; - assert_eq!(array[[0, 0, 2]], 0); + let _ = array[[0, 0, 2]]; + } + + #[test] + #[should_panic(expected = "entry at index 0 is implicitly set to the default value")] + fn flat_index_panic_0() { + let shape = vec![40, 50, 50]; + let mut array = PackedArray::new(shape.clone()); + + array[[1, 0, 0]] = 1; + + let _ = array[super::ravel_multi_index(&[0, 0, 0], &shape)]; + } + + #[test] + #[should_panic(expected = "entry at index 2 is implicitly set to the default value")] + fn flat_index_panic_2() { + let shape = vec![40, 50, 50]; + let mut array = PackedArray::new(shape.clone()); + + array[[0, 0, 1]] = 1; + + let _ = array[super::ravel_multi_index(&[0, 0, 2], &shape)]; + } + + #[test] + #[should_panic(expected = "index 102550 is out of bounds for array of shape [40, 50, 50]")] + fn flat_index_panic_102550() { + let shape = vec![40, 50, 50]; + let mut array = PackedArray::new(shape.clone()); + + array[[1, 0, 0]] = 1; + + let _ = array[super::ravel_multi_index(&[40, 50, 50], &shape)]; } #[test] fn indexed_iter() { - let mut array = PackedArray::new([40, 50, 50]); + let mut array = PackedArray::new(vec![40, 50, 50]); // check shape assert_eq!(array.shape(), [40, 50, 50]); @@ -626,7 +920,7 @@ mod tests { let mut iter = array.indexed_iter(); // check iterator with one element - assert_eq!(iter.next(), Some(([2, 3, 4], 1))); + assert_eq!(iter.next(), Some((vec![2, 3, 4], 1))); assert_eq!(iter.next(), None); mem::drop(iter); @@ -636,8 +930,8 @@ mod tests { let mut iter = array.indexed_iter(); - assert_eq!(iter.next(), Some(([2, 3, 4], 1))); - assert_eq!(iter.next(), Some(([2, 3, 6], 2))); + assert_eq!(iter.next(), Some((vec![2, 3, 4], 1))); + assert_eq!(iter.next(), Some((vec![2, 3, 6], 2))); assert_eq!(iter.next(), None); mem::drop(iter); @@ -647,9 +941,9 @@ mod tests { let mut iter = array.indexed_iter(); - assert_eq!(iter.next(), Some(([2, 3, 4], 1))); - assert_eq!(iter.next(), Some(([2, 3, 6], 2))); - assert_eq!(iter.next(), Some(([4, 5, 7], 3))); + assert_eq!(iter.next(), Some((vec![2, 3, 4], 1))); + assert_eq!(iter.next(), Some((vec![2, 3, 6], 2))); + assert_eq!(iter.next(), Some((vec![4, 5, 7], 3))); assert_eq!(iter.next(), None); mem::drop(iter); @@ -659,16 +953,16 @@ mod tests { let mut iter = array.indexed_iter(); - assert_eq!(iter.next(), Some(([2, 0, 0], 4))); - assert_eq!(iter.next(), Some(([2, 3, 4], 1))); - assert_eq!(iter.next(), Some(([2, 3, 6], 2))); - assert_eq!(iter.next(), Some(([4, 5, 7], 3))); + assert_eq!(iter.next(), Some((vec![2, 0, 0], 4))); + assert_eq!(iter.next(), Some((vec![2, 3, 4], 1))); + assert_eq!(iter.next(), Some((vec![2, 3, 6], 2))); + assert_eq!(iter.next(), Some((vec![4, 5, 7], 3))); assert_eq!(iter.next(), None); } #[test] fn clear() { - let mut array = PackedArray::new([40, 50, 50]); + let mut array = PackedArray::new(vec![40, 50, 50]); array[[3, 5, 1]] = 1; array[[7, 8, 9]] = 2; diff --git a/pineappl/src/pids.rs b/pineappl/src/pids.rs index 5d130d06b..329cc6fd8 100644 --- a/pineappl/src/pids.rs +++ b/pineappl/src/pids.rs @@ -1,5 +1,9 @@ //! TODO +use super::boc::Channel; +use super::fk_table::FkAssumptions; +use float_cmp::approx_eq; +use serde::{Deserialize, Serialize}; use std::str::FromStr; use thiserror::Error; @@ -8,7 +12,8 @@ const EVOL_BASIS_IDS: [i32; 12] = [100, 103, 108, 115, 124, 135, 200, 203, 208, /// Particle ID bases. In `PineAPPL` every particle is identified using a particle identifier /// (PID), which is represented as an `i32`. The values of this `enum` specify how this value is /// interpreted. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[repr(C)] +#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)] pub enum PidBasis { /// This basis uses the [particle data group](https://pdg.lbl.gov/) (PDG) PIDs. For a complete /// definition see the section 'Monte Carlo Particle Numbering Scheme' of the PDG Review, for @@ -100,8 +105,76 @@ impl PidBasis { ), } } + + /// TODO + #[must_use] + pub fn translate(&self, to: Self, channel: Channel) -> Channel { + match (self, to) { + (&Self::Pdg, Self::Pdg) | (&Self::Evol, Self::Evol) => channel, + (&Self::Pdg, Self::Evol) => channel.translate(&pdg_mc_pids_to_evol), + (&Self::Evol, Self::Pdg) => channel.translate(&evol_to_pdg_mc_ids), + } + } + + /// TODO + #[must_use] + pub fn opt_rules(&self, assumptions: FkAssumptions) -> OptRules { + match (*self, assumptions) { + (Self::Evol | Self::Pdg, FkAssumptions::Nf6Ind) => OptRules(Vec::new(), Vec::new()), + (Self::Evol, FkAssumptions::Nf6Sym) => OptRules(vec![(235, 200)], Vec::new()), + (Self::Evol, FkAssumptions::Nf5Ind) => { + OptRules(vec![(235, 200), (135, 100)], Vec::new()) + } + (Self::Evol, FkAssumptions::Nf5Sym) => { + OptRules(vec![(235, 200), (135, 100), (224, 200)], Vec::new()) + } + (Self::Evol, FkAssumptions::Nf4Ind) => OptRules( + vec![(235, 200), (135, 100), (224, 200), (124, 100)], + Vec::new(), + ), + (Self::Evol, FkAssumptions::Nf4Sym) => OptRules( + vec![(235, 200), (135, 100), (224, 200), (124, 100), (215, 200)], + Vec::new(), + ), + (Self::Evol, FkAssumptions::Nf3Ind) => OptRules( + vec![ + (235, 200), + (135, 100), + (224, 200), + (124, 100), + (215, 200), + (115, 100), + ], + Vec::new(), + ), + (Self::Evol, FkAssumptions::Nf3Sym) => OptRules( + vec![ + (235, 200), + (135, 100), + (224, 200), + (124, 100), + (215, 200), + (115, 100), + (208, 200), + ], + Vec::new(), + ), + (Self::Pdg, FkAssumptions::Nf6Sym) => OptRules(vec![(-6, 6)], Vec::new()), + (Self::Pdg, FkAssumptions::Nf5Ind) => OptRules(Vec::new(), vec![-6, 6]), + (Self::Pdg, FkAssumptions::Nf5Sym) => OptRules(vec![(-5, 5)], vec![-6, 6]), + (Self::Pdg, FkAssumptions::Nf4Ind) => OptRules(Vec::new(), vec![-6, 6, -5, 5]), + (Self::Pdg, FkAssumptions::Nf4Sym) => OptRules(vec![(-4, 4)], vec![-6, 6, -5, 5]), + (Self::Pdg, FkAssumptions::Nf3Ind) => OptRules(Vec::new(), vec![-6, 6, -5, 5, -4, 4]), + (Self::Pdg, FkAssumptions::Nf3Sym) => { + OptRules(vec![(-3, 3)], vec![-6, 6, -5, 5, -4, 4]) + } + } + } } +/// Return type of [`PidBasis::optimization_rules`]. +pub struct OptRules(pub Vec<(i32, i32)>, pub Vec); + /// Error returned by [`PidBasis::from_str`] when passed with an unknown argument. #[derive(Debug, Error)] #[error("unknown PID basis: {basis}")] @@ -111,7 +184,7 @@ pub struct UnknownPidBasis { /// Translates IDs from the evolution basis into IDs using PDG Monte Carlo IDs. #[must_use] -pub fn evol_to_pdg_mc_ids(id: i32) -> Vec<(i32, f64)> { +fn evol_to_pdg_mc_ids(id: i32) -> Vec<(i32, f64)> { match id { 100 => vec![ (2, 1.0), @@ -237,7 +310,7 @@ pub fn evol_to_pdg_mc_ids(id: i32) -> Vec<(i32, f64)> { /// Translates PDG Monte Carlo IDs to particle IDs from the evolution basis. #[must_use] -pub fn pdg_mc_pids_to_evol(pid: i32) -> Vec<(i32, f64)> { +fn pdg_mc_pids_to_evol(pid: i32) -> Vec<(i32, f64)> { match pid { -6 => vec![ (100, 1.0 / 12.0), @@ -406,7 +479,7 @@ pub fn pdg_mc_ids_to_evol(tuples: &[(i32, f64)]) -> Option { .collect(); if let &[(pid, factor)] = non_zero.as_slice() { - if factor == 1.0 { + if approx_eq!(f64, factor, 1.0, ulps = 4) { return Some(pid); } } @@ -417,7 +490,6 @@ pub fn pdg_mc_ids_to_evol(tuples: &[(i32, f64)]) -> Option { #[cfg(test)] mod tests { use super::*; - use crate::boc::Channel; use crate::channel; use float_cmp::assert_approx_eq; @@ -925,15 +997,14 @@ mod tests { #[test] fn inverse_inverse_evol() { for pid in [-6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6] { - let result = Channel::translate( - &Channel::translate(&channel![pid, pid, 1.0], &pdg_mc_pids_to_evol), - &evol_to_pdg_mc_ids, - ); + let result = &channel![1.0 * (pid, pid)] + .translate(&pdg_mc_pids_to_evol) + .translate(&evol_to_pdg_mc_ids); assert_eq!(result.entry().len(), 1); - assert_eq!(result.entry()[0].0, pid); - assert_eq!(result.entry()[0].1, pid); - assert_approx_eq!(f64, result.entry()[0].2, 1.0, ulps = 8); + assert_eq!(result.entry()[0].0[0], pid); + assert_eq!(result.entry()[0].0[1], pid); + assert_approx_eq!(f64, result.entry()[0].1, 1.0, ulps = 8); } } @@ -1016,4 +1087,31 @@ mod tests { fn to_latex_str_error() { let _ = PidBasis::Pdg.to_latex_str(999); } + + #[test] + fn translate() { + let channel = PidBasis::Evol.translate(PidBasis::Pdg, channel![2.0 * (103, 203)]); + + assert_eq!( + channel, + channel![ + 2.0 * (2, 2) + + -2.0 * (2, -2) + + -2.0 * (2, 1) + + 2.0 * (2, -1) + + 2.0 * (-2, 2) + + -2.0 * (-2, -2) + + -2.0 * (-2, 1) + + 2.0 * (-2, -1) + + -2.0 * (1, 2) + + 2.0 * (1, -2) + + 2.0 * (1, 1) + + -2.0 * (1, -1) + + -2.0 * (-1, 2) + + 2.0 * (-1, -2) + + 2.0 * (-1, 1) + + -2.0 * (-1, -1) + ] + ); + } } diff --git a/pineappl/src/sparse_array3.rs b/pineappl/src/sparse_array3.rs deleted file mode 100644 index 1debae7e8..000000000 --- a/pineappl/src/sparse_array3.rs +++ /dev/null @@ -1,1135 +0,0 @@ -//! Module containing the `SparseArray3` struct. - -use ndarray::{ArrayView3, Axis}; -use serde::{Deserialize, Serialize}; -use std::iter; -use std::mem; -use std::ops::{Index, IndexMut, Range}; -use std::slice::{Iter, IterMut}; - -/// Struct for a sparse three-dimensional array, which is optimized for the sparsity of -/// interpolation grids. -#[derive(Clone, Deserialize, Serialize)] -pub struct SparseArray3 { - entries: Vec, - indices: Vec<(usize, usize)>, - start: usize, - dimensions: (usize, usize, usize), -} - -// TODO: write panic messages - -impl Index<[usize; 3]> for SparseArray3 { - type Output = T; - - fn index(&self, mut index: [usize; 3]) -> &Self::Output { - // index too small - assert!(index[0] >= self.start); - - let dim1 = if self.dimensions.1 > self.dimensions.2 { - index.swap(1, 2); - self.dimensions.2 - } else { - self.dimensions.1 - }; - - // index too large - assert!(index[0] < (self.start + (self.indices.len() - 1) / dim1)); - - // index too large - assert!(index[1] < dim1); - - let forward = dim1 * (index[0] - self.start) + index[1]; - let indices_a = &self.indices[forward]; - let indices_b = &self.indices[forward + 1]; - - let zeros_left = indices_a.0; - let offset = indices_a.1; - let non_zeros = indices_b.1 - offset; - - // index too small - assert!(index[2] >= zeros_left); - - // index too large - assert!(index[2] < (non_zeros + zeros_left)); - - &self.entries[offset + (index[2] - zeros_left)] - } -} - -impl IndexMut<[usize; 3]> for SparseArray3 { - fn index_mut(&mut self, mut index: [usize; 3]) -> &mut Self::Output { - let dim1 = if self.dimensions.1 > self.dimensions.2 { - index.swap(1, 2); - self.dimensions.2 - } else { - self.dimensions.1 - }; - - let max_index0 = self.start + (self.indices.len() - 1) / dim1; - - if index[0] < self.start { - let elements = self.start - index[0]; - self.start = index[0]; - self.indices - .splice(0..0, iter::repeat((0, 0)).take(elements * dim1)); - } else if index[0] >= self.dimensions.0 { - panic!(); - } else if self.entries.is_empty() || (index[0] >= max_index0) { - let elements = if self.entries.is_empty() { - self.start = index[0]; - 1 - } else { - index[0] - max_index0 + 1 - }; - - let insert = self.indices.len() - 1; - self.indices.splice( - insert..insert, - iter::repeat((0, self.indices.last().unwrap().1)).take(elements * dim1), - ); - } - - // index too large - assert!(index[1] < dim1); - - let forward = dim1 * (index[0] - self.start) + index[1]; - let indices_a = &self.indices[forward]; - let indices_b = &self.indices[forward + 1]; - - let zeros_left = indices_a.0; - let offset = indices_a.1; - let non_zeros = indices_b.1 - offset; - - let elements; - let insert; - - if index[2] < zeros_left { - elements = zeros_left - index[2]; - insert = offset; - self.indices[forward].0 -= elements; - } else if index[2] >= self.dimensions.2.max(self.dimensions.1) { - panic!(); - } else if non_zeros == 0 { - elements = 1; - insert = offset; - self.indices[forward].0 = index[2]; - } else if index[2] >= (zeros_left + non_zeros) { - elements = index[2] - (zeros_left + non_zeros) + 1; - insert = offset + non_zeros; - } else { - return &mut self.entries[offset + (index[2] - zeros_left)]; - } - - self.entries - .splice(insert..insert, iter::repeat(T::default()).take(elements)); - self.indices - .iter_mut() - .skip(forward + 1) - .for_each(|ix| ix.1 += elements); - - &mut self.entries[offset + (index[2] - self.indices[forward].0)] - } -} - -/// Immutable iterator over the elements of a `SparseArray3`. -pub struct IndexedIter<'a, T> { - entry_iter: Iter<'a, T>, - index_iter: Iter<'a, (usize, usize)>, - offset_a: Option<&'a (usize, usize)>, - offset_b: Option<&'a (usize, usize)>, - tuple: (usize, usize, usize), - dimensions: (usize, usize, usize), -} - -impl<'a, T: Copy + Default + PartialEq> Iterator for IndexedIter<'a, T> { - type Item = ((usize, usize, usize), T); - - fn next(&mut self) -> Option { - if let Some(element) = self.entry_iter.next() { - let offset_a = self.offset_a.unwrap(); - let offset_b = self.offset_b.unwrap(); - - if self.dimensions.1 > self.dimensions.2 { - self.tuple.1 = self.tuple.1.max(offset_a.0); - - if self.tuple.1 >= (offset_b.1 - offset_a.1 + offset_a.0) { - loop { - self.offset_a = self.offset_b; - self.offset_b = self.index_iter.next(); - - let offset_a = self.offset_a.unwrap(); - let offset_b = self.offset_b?; - - self.tuple.2 += 1; - - if self.tuple.2 >= self.dimensions.2 { - self.tuple.0 += 1; - self.tuple.2 = 0; - } - - if (offset_b.1 - offset_a.1) != 0 { - self.tuple.1 = offset_a.0; - break; - } - } - } - - if *element == T::default() { - self.tuple.1 += 1; - self.next() - } else { - let result = Some((self.tuple, *element)); - self.tuple.1 += 1; - result - } - } else { - self.tuple.2 = self.tuple.2.max(offset_a.0); - - if self.tuple.2 >= (offset_b.1 - offset_a.1 + offset_a.0) { - loop { - self.offset_a = self.offset_b; - self.offset_b = self.index_iter.next(); - - let offset_a = self.offset_a.unwrap(); - let offset_b = self.offset_b?; - - self.tuple.1 += 1; - - if self.tuple.1 >= self.dimensions.1 { - self.tuple.0 += 1; - self.tuple.1 = 0; - } - - if (offset_b.1 - offset_a.1) != 0 { - self.tuple.2 = offset_a.0; - break; - } - } - } - - if *element == T::default() { - self.tuple.2 += 1; - self.next() - } else { - let result = Some((self.tuple, *element)); - self.tuple.2 += 1; - result - } - } - } else { - None - } - } -} - -impl SparseArray3 { - /// Constructs a new and empty `SparseArray3` with the specified dimensions `nx`, `ny` and - /// `nz`. - #[must_use] - pub fn new(nx: usize, ny: usize, nz: usize) -> Self { - Self { - entries: vec![], - indices: vec![(0, 0)], - start: 0, - dimensions: (nx, ny, nz), - } - } - - /// Converts `array` into a `SparseArray3`. - #[must_use] - pub fn from_ndarray(array: ArrayView3, xstart: usize, xsize: usize) -> Self { - let (_, ny, nz) = array.dim(); - let array = if ny > nz { - let mut array = array; - array.swap_axes(1, 2); - array - } else { - array - }; - - let dimensions = (xsize, ny, nz); - let mut entries = vec![]; - let mut indices = vec![]; - - let mut offset = 0; - - for array2 in array.axis_iter(Axis(0)) { - for array1 in array2.axis_iter(Axis(0)) { - let start = array1.iter().position(|x| *x != T::default()); - - if let Some(start) = start { - let end = array1.iter().enumerate().skip(start).fold( - start, - |last_non_zero, (index, x)| { - if *x == T::default() { - last_non_zero - } else { - index - } - }, - ) + 1; - indices.push((start, offset)); - offset += end - start; - entries.splice( - entries.len()..entries.len(), - array1.iter().skip(start).take(end - start).cloned(), - ); - } else { - indices.push((0, offset)); - } - } - } - - indices.push((0, offset)); - - Self { - entries, - indices, - start: xstart, - dimensions, - } - } - - /// Clear the contents of the array. - pub fn clear(&mut self) { - self.entries.clear(); - self.indices.clear(); - self.indices.push((0, 0)); - self.start = 0; - } - - /// Returns the dimensions of this array. - #[must_use] - pub const fn dimensions(&self) -> (usize, usize, usize) { - self.dimensions - } - - /// Returns the overhead for storing the explicitly zero and non-zero elements. - #[must_use] - pub fn overhead(&self) -> usize { - (2 * self.indices.len() * mem::size_of::()) / mem::size_of::() - } - - /// Returns the number of default (zero) elements in this array. - #[must_use] - pub fn zeros(&self) -> usize { - self.entries.iter().filter(|x| **x == T::default()).count() - } - - /// Returns the number of non-default (non-zero) elements in this array. - #[must_use] - pub fn len(&self) -> usize { - self.entries.iter().filter(|x| **x != T::default()).count() - } - - /// Returns `true` if the array contains no element. - #[must_use] - pub fn is_empty(&self) -> bool { - self.entries.is_empty() - } - - /// Return an indexed `Iterator` over the non-zero elements of this array. The iterator element - /// type is `((usize, usize, usize), T)`. - #[must_use] - pub fn indexed_iter(&self) -> IndexedIter<'_, T> { - let mut result = IndexedIter { - entry_iter: self.entries.iter(), - index_iter: self.indices.iter(), - offset_a: None, - offset_b: None, - tuple: (self.start, 0, 0), - dimensions: self.dimensions, - }; - - result.offset_a = result.index_iter.next(); - result.offset_b = result.index_iter.next(); - - result - } - - /// Return an iterator over the elements, including zero elements. - pub fn iter_mut(&mut self) -> IterMut<'_, T> { - self.entries.iter_mut() - } - - /// Return a half-open interval of indices that are filled for the first dimension. - #[must_use] - pub fn x_range(&self) -> Range { - self.start - ..(self.start + (self.indices.len() - 1) / self.dimensions.1.min(self.dimensions.2)) - } - - /// Increase the number of entries of the x-axis by one by inserting zeros at `x`. - pub fn increase_x_at(&mut self, x: usize) { - let dim1 = self.dimensions.1.min(self.dimensions.2); - let nx = (self.indices.len() - 1) / dim1; - - if x <= self.start { - self.start += 1; - } else if x < self.start + nx { - let at = (x - self.start) * dim1; - let offset = self.indices[at].1; - self.indices - .splice(at..at, iter::repeat((0, offset)).take(dim1)); - } else if x <= self.dimensions.0 { - // nothing to do here - } else { - self.dimensions.0 = x; - } - - self.dimensions.0 += 1; - } - - /// Removes all elements with the specified x coordinate. - /// - /// # Panics - /// - /// TODO - pub fn remove_x(&mut self, x: usize) { - let dim1 = self.dimensions.1.min(self.dimensions.2); - let nx = (self.indices.len() - 1) / dim1; - - assert!((x >= self.start) && (x < self.start + nx)); - - let index_a = (x - self.start) * dim1; - let index_b = (x - self.start + 1) * dim1; - let offset_a = self.indices[index_a].1; - let offset_b = self.indices[index_b].1; - - self.entries.drain(offset_a..offset_b); - self.indices - .iter_mut() - .skip(index_b) - .for_each(|o| o.1 -= offset_b - offset_a); - - if (x != self.start) && (x != (self.start + nx - 1)) { - self.indices - .splice(index_a..index_b, iter::repeat((0, offset_a)).take(dim1)); - } else { - if x == self.start { - self.start += 1; - } - - self.indices.drain(index_a..index_b); - } - - if self.indices.last().unwrap().1 == 0 { - self.clear(); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use ndarray::Array3; - - #[test] - fn index_access() { - let mut array = SparseArray3::new(40, 50, 50); - - // after creation the array must be empty - assert_eq!(array.x_range(), 0..0); - assert_eq!(array.overhead(), 2); - assert!(array.is_empty()); - - // insert the first element - array[[5, 10, 10]] = 1.0; - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 1); - assert_eq!(array.zeros(), 0); - assert_eq!(array.x_range(), 5..6); - assert_eq!(array.overhead(), 102); - assert!(!array.is_empty()); - - // insert an element after the first one - array[[8, 10, 10]] = 2.0; - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 2); - assert_eq!(array.zeros(), 0); - assert_eq!(array.x_range(), 5..9); - assert_eq!(array.overhead(), 402); - assert!(!array.is_empty()); - - // insert an element before the first one - array[[1, 10, 10]] = 3.0; - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 3); - assert_eq!(array.zeros(), 0); - assert_eq!(array.x_range(), 1..9); - assert_eq!(array.overhead(), 802); - assert!(!array.is_empty()); - - array[[1, 10, 11]] = 4.0; - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 4); - assert_eq!(array.zeros(), 0); - assert_eq!(array.x_range(), 1..9); - assert_eq!(array.overhead(), 802); - assert!(!array.is_empty()); - - array[[1, 10, 9]] = 5.0; - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 5); - assert_eq!(array.zeros(), 0); - assert_eq!(array.x_range(), 1..9); - assert_eq!(array.overhead(), 802); - assert!(!array.is_empty()); - - array[[1, 10, 0]] = 6.0; - assert_eq!(array[[1, 10, 0]], 6.0); - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 6); - assert_eq!(array.x_range(), 1..9); - assert_eq!(array.overhead(), 802); - assert!(!array.is_empty()); - - // check zeros - assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array[[1, 10, 2]], 0.0); - assert_eq!(array[[1, 10, 3]], 0.0); - assert_eq!(array[[1, 10, 4]], 0.0); - assert_eq!(array[[1, 10, 5]], 0.0); - assert_eq!(array[[1, 10, 6]], 0.0); - assert_eq!(array[[1, 10, 7]], 0.0); - assert_eq!(array[[1, 10, 8]], 0.0); - assert_eq!(array.zeros(), 8); - - // insert where previously a zero was - array[[1, 10, 2]] = 7.0; - assert_eq!(array[[1, 10, 2]], 7.0); - assert_eq!(array[[1, 10, 0]], 6.0); - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 7); - assert_eq!(array.x_range(), 1..9); - assert_eq!(array.overhead(), 802); - assert!(!array.is_empty()); - - // check zeros - assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array[[1, 10, 3]], 0.0); - assert_eq!(array[[1, 10, 4]], 0.0); - assert_eq!(array[[1, 10, 5]], 0.0); - assert_eq!(array[[1, 10, 6]], 0.0); - assert_eq!(array[[1, 10, 7]], 0.0); - assert_eq!(array[[1, 10, 8]], 0.0); - assert_eq!(array.zeros(), 7); - - array[[1, 15, 2]] = 8.0; - assert_eq!(array[[1, 15, 2]], 8.0); - assert_eq!(array[[1, 10, 2]], 7.0); - assert_eq!(array[[1, 10, 0]], 6.0); - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 8); - assert_eq!(array.x_range(), 1..9); - assert_eq!(array.overhead(), 802); - assert!(!array.is_empty()); - - // check zeros - assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array[[1, 10, 3]], 0.0); - assert_eq!(array[[1, 10, 4]], 0.0); - assert_eq!(array[[1, 10, 5]], 0.0); - assert_eq!(array[[1, 10, 6]], 0.0); - assert_eq!(array[[1, 10, 7]], 0.0); - assert_eq!(array[[1, 10, 8]], 0.0); - assert_eq!(array.zeros(), 7); - - array[[1, 15, 4]] = 9.0; - assert_eq!(array[[1, 15, 4]], 9.0); - assert_eq!(array[[1, 15, 2]], 8.0); - assert_eq!(array[[1, 10, 2]], 7.0); - assert_eq!(array[[1, 10, 0]], 6.0); - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 9); - assert_eq!(array.x_range(), 1..9); - assert_eq!(array.overhead(), 802); - assert!(!array.is_empty()); - - // check zeros - assert_eq!(array[[1, 15, 3]], 0.0); - assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array[[1, 10, 3]], 0.0); - assert_eq!(array[[1, 10, 4]], 0.0); - assert_eq!(array[[1, 10, 5]], 0.0); - assert_eq!(array[[1, 10, 6]], 0.0); - assert_eq!(array[[1, 10, 7]], 0.0); - assert_eq!(array[[1, 10, 8]], 0.0); - assert_eq!(array.zeros(), 8); - - array[[1, 15, 0]] = 10.0; - assert_eq!(array[[1, 15, 0]], 10.0); - assert_eq!(array[[1, 15, 4]], 9.0); - assert_eq!(array[[1, 15, 2]], 8.0); - assert_eq!(array[[1, 10, 2]], 7.0); - assert_eq!(array[[1, 10, 0]], 6.0); - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 10); - assert_eq!(array.x_range(), 1..9); - assert_eq!(array.overhead(), 802); - assert!(!array.is_empty()); - - // check zeros - assert_eq!(array[[1, 15, 1]], 0.0); - assert_eq!(array[[1, 15, 3]], 0.0); - assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array[[1, 10, 3]], 0.0); - assert_eq!(array[[1, 10, 4]], 0.0); - assert_eq!(array[[1, 10, 5]], 0.0); - assert_eq!(array[[1, 10, 6]], 0.0); - assert_eq!(array[[1, 10, 7]], 0.0); - assert_eq!(array[[1, 10, 8]], 0.0); - assert_eq!(array.zeros(), 9); - } - - #[test] - #[should_panic(expected = "explicit panic")] - fn index_mut_panic_dim0() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[40, 0, 50]] = 1.0; - } - - #[test] - #[should_panic(expected = "assertion failed: index[1] < dim1")] - fn index_mut_panic_dim1() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[0, 50, 0]] = 1.0; - } - - #[test] - #[should_panic(expected = "explicit panic")] - fn index_mut_panic_dim2() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[0, 0, 50]] = 1.0; - } - - #[test] - #[should_panic(expected = "assertion failed: index[0] >= self.start")] - fn index_panic_dim0_0() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[1, 0, 0]] = 1.0; - - assert_eq!(array[[0, 0, 0]], 0.0); - } - - #[test] - #[should_panic( - expected = "assertion failed: index[0] < (self.start + (self.indices.len() - 1) / dim1)" - )] - fn index_panic_dim0_1() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[1, 0, 0]] = 1.0; - - assert_eq!(array[[2, 0, 0]], 0.0); - } - - #[test] - #[should_panic(expected = "assertion failed: index[1] < dim1")] - fn index_panic_dim1() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[1, 0, 0]] = 1.0; - - assert_eq!(array[[1, 50, 0]], 0.0); - } - - #[test] - #[should_panic(expected = "assertion failed: index[2] >= zeros_left")] - fn index_panic_dim2_0() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[0, 0, 1]] = 1.0; - - assert_eq!(array[[0, 0, 0]], 0.0); - } - - #[test] - #[should_panic(expected = "assertion failed: index[2] < (non_zeros + zeros_left)")] - fn index_panic_dim2_1() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[0, 0, 1]] = 1.0; - - assert_eq!(array[[0, 0, 2]], 0.0); - } - - #[test] - fn indexed_iter() { - let mut array = SparseArray3::new(40, 50, 50); - - // check empty iterator - assert_eq!(array.indexed_iter().next(), None); - - // insert an element - array[[2, 3, 4]] = 1.0; - - let mut iter = array.indexed_iter(); - - // check iterator with one element - assert_eq!(iter.next(), Some(((2, 3, 4), 1.0))); - assert_eq!(iter.next(), None); - - // insert another element - array[[2, 3, 6]] = 2.0; - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(((2, 3, 4), 1.0))); - assert_eq!(iter.next(), Some(((2, 3, 6), 2.0))); - assert_eq!(iter.next(), None); - - // insert yet another element - array[[4, 5, 7]] = 3.0; - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(((2, 3, 4), 1.0))); - assert_eq!(iter.next(), Some(((2, 3, 6), 2.0))); - assert_eq!(iter.next(), Some(((4, 5, 7), 3.0))); - assert_eq!(iter.next(), None); - - // insert at the very first position - array[[2, 0, 0]] = 4.0; - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(((2, 0, 0), 4.0))); - assert_eq!(iter.next(), Some(((2, 3, 4), 1.0))); - assert_eq!(iter.next(), Some(((2, 3, 6), 2.0))); - assert_eq!(iter.next(), Some(((4, 5, 7), 3.0))); - assert_eq!(iter.next(), None); - } - - #[test] - fn iter_mut() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[3, 5, 1]] = 1.0; - array[[7, 8, 9]] = 2.0; - array[[7, 8, 13]] = 3.0; - array[[9, 1, 4]] = 4.0; - - let mut iter = array.iter_mut(); - - assert_eq!(iter.next(), Some(&mut 1.0)); - assert_eq!(iter.next(), Some(&mut 2.0)); - assert_eq!(iter.next(), Some(&mut 0.0)); - assert_eq!(iter.next(), Some(&mut 0.0)); - assert_eq!(iter.next(), Some(&mut 0.0)); - assert_eq!(iter.next(), Some(&mut 3.0)); - assert_eq!(iter.next(), Some(&mut 4.0)); - assert_eq!(iter.next(), None); - } - - #[test] - fn clear() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[3, 5, 1]] = 1.0; - array[[7, 8, 9]] = 2.0; - array[[9, 1, 4]] = 3.0; - - assert!(!array.is_empty()); - assert_eq!(array.len(), 3); - assert_eq!(array.zeros(), 0); - assert_eq!(array.x_range(), 3..10); - - array.clear(); - - assert!(array.is_empty()); - assert_eq!(array.len(), 0); - assert_eq!(array.zeros(), 0); - assert_eq!(array.x_range(), 0..0); - } - - #[test] - fn remove_x() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[1, 5, 6]] = 1.0; - array[[1, 6, 5]] = 2.0; - array[[1, 2, 3]] = 3.0; - array[[1, 9, 3]] = 4.0; - array[[1, 8, 4]] = 5.0; - array[[2, 0, 0]] = 6.0; - array[[3, 4, 5]] = 7.0; - array[[3, 4, 6]] = 8.0; - array[[3, 4, 7]] = 9.0; - array[[4, 0, 2]] = 10.0; - array[[4, 0, 3]] = 11.0; - array[[5, 0, 1]] = 12.0; - array[[5, 0, 2]] = 13.0; - - assert_eq!(array.x_range(), 1..6); - assert_eq!(array.len(), 13); - assert_eq!(array.zeros(), 0); - - // remove the first five entries - array.remove_x(1); - - assert_eq!(array.x_range(), 2..6); - assert_eq!(array.len(), 8); - assert_eq!(array.zeros(), 0); - - // remove the last two entries - array.remove_x(5); - - assert_eq!(array.x_range(), 2..5); - assert_eq!(array.len(), 6); - assert_eq!(array.zeros(), 0); - - // remove the from the middle - array.remove_x(3); - - assert_eq!(array.x_range(), 2..5); - assert_eq!(array.len(), 3); - assert_eq!(array.zeros(), 0); - - // remove also the rest - array.remove_x(4); - array.remove_x(2); - - assert_eq!(array.x_range(), 0..0); - assert_eq!(array.len(), 0); - assert_eq!(array.zeros(), 0); - } - - #[test] - #[should_panic(expected = "assertion failed: (x >= self.start) && (x < self.start + nx)")] - fn remove_x_panic() { - let mut array = SparseArray3::::new(40, 50, 50); - - array.remove_x(0); - } - - #[test] - fn increase_at_x() { - let mut array = SparseArray3::new(1, 50, 50); - - array[[0, 0, 0]] = 1.0; - array[[0, 2, 3]] = 2.0; - array[[0, 2, 4]] = 3.0; - array[[0, 2, 5]] = 4.0; - array[[0, 3, 0]] = 5.0; - array[[0, 49, 49]] = 6.0; - - assert_eq!(array.dimensions(), (1, 50, 50)); - assert_eq!(array[[0, 0, 0]], 1.0); - assert_eq!(array[[0, 2, 3]], 2.0); - assert_eq!(array[[0, 2, 4]], 3.0); - assert_eq!(array[[0, 2, 5]], 4.0); - assert_eq!(array[[0, 3, 0]], 5.0); - assert_eq!(array[[0, 49, 49]], 6.0); - - // increase at the end - array.increase_x_at(1); - - assert_eq!(array.dimensions(), (2, 50, 50)); - assert_eq!(array[[0, 0, 0]], 1.0); - assert_eq!(array[[0, 2, 3]], 2.0); - assert_eq!(array[[0, 2, 4]], 3.0); - assert_eq!(array[[0, 2, 5]], 4.0); - assert_eq!(array[[0, 3, 0]], 5.0); - assert_eq!(array[[0, 49, 49]], 6.0); - - array[[1, 5, 0]] = 7.0; - array[[1, 5, 5]] = 8.0; - array[[1, 6, 3]] = 9.0; - array[[1, 6, 0]] = 10.0; - - assert_eq!(array[[0, 0, 0]], 1.0); - assert_eq!(array[[0, 2, 3]], 2.0); - assert_eq!(array[[0, 2, 4]], 3.0); - assert_eq!(array[[0, 2, 5]], 4.0); - assert_eq!(array[[0, 3, 0]], 5.0); - assert_eq!(array[[0, 49, 49]], 6.0); - assert_eq!(array[[1, 5, 0]], 7.0); - assert_eq!(array[[1, 5, 5]], 8.0); - assert_eq!(array[[1, 6, 3]], 9.0); - assert_eq!(array[[1, 6, 0]], 10.0); - - // increase at the start - array.increase_x_at(0); - - assert_eq!(array.dimensions(), (3, 50, 50)); - assert_eq!(array[[1, 0, 0]], 1.0); - assert_eq!(array[[1, 2, 3]], 2.0); - assert_eq!(array[[1, 2, 4]], 3.0); - assert_eq!(array[[1, 2, 5]], 4.0); - assert_eq!(array[[1, 3, 0]], 5.0); - assert_eq!(array[[1, 49, 49]], 6.0); - assert_eq!(array[[2, 5, 0]], 7.0); - assert_eq!(array[[2, 5, 5]], 8.0); - assert_eq!(array[[2, 6, 3]], 9.0); - assert_eq!(array[[2, 6, 0]], 10.0); - - // increase at the end - array.increase_x_at(3); - - assert_eq!(array.dimensions(), (4, 50, 50)); - assert_eq!(array[[1, 0, 0]], 1.0); - assert_eq!(array[[1, 2, 3]], 2.0); - assert_eq!(array[[1, 2, 4]], 3.0); - assert_eq!(array[[1, 2, 5]], 4.0); - assert_eq!(array[[1, 3, 0]], 5.0); - assert_eq!(array[[1, 49, 49]], 6.0); - assert_eq!(array[[2, 5, 0]], 7.0); - assert_eq!(array[[2, 5, 5]], 8.0); - assert_eq!(array[[2, 6, 3]], 9.0); - assert_eq!(array[[2, 6, 0]], 10.0); - - // increase after the end - array.increase_x_at(5); - - assert_eq!(array.dimensions(), (6, 50, 50)); - assert_eq!(array[[1, 0, 0]], 1.0); - assert_eq!(array[[1, 2, 3]], 2.0); - assert_eq!(array[[1, 2, 4]], 3.0); - assert_eq!(array[[1, 2, 5]], 4.0); - assert_eq!(array[[1, 3, 0]], 5.0); - assert_eq!(array[[1, 49, 49]], 6.0); - assert_eq!(array[[2, 5, 0]], 7.0); - assert_eq!(array[[2, 5, 5]], 8.0); - assert_eq!(array[[2, 6, 3]], 9.0); - assert_eq!(array[[2, 6, 0]], 10.0); - - // increase in the middle - array.increase_x_at(2); - - assert_eq!(array.dimensions(), (7, 50, 50)); - assert_eq!(array[[1, 0, 0]], 1.0); - assert_eq!(array[[1, 2, 3]], 2.0); - assert_eq!(array[[1, 2, 4]], 3.0); - assert_eq!(array[[1, 2, 5]], 4.0); - assert_eq!(array[[1, 3, 0]], 5.0); - assert_eq!(array[[1, 49, 49]], 6.0); - assert_eq!(array[[3, 5, 0]], 7.0); - assert_eq!(array[[3, 5, 5]], 8.0); - assert_eq!(array[[3, 6, 3]], 9.0); - assert_eq!(array[[3, 6, 0]], 10.0); - } - - #[test] - fn from_ndarray() { - let mut ndarray = Array3::zeros((2, 50, 50)); - - ndarray[[0, 4, 3]] = 1.0; - ndarray[[0, 4, 4]] = 2.0; - ndarray[[0, 4, 6]] = 3.0; - ndarray[[0, 5, 1]] = 4.0; - ndarray[[0, 5, 7]] = 5.0; - ndarray[[1, 3, 9]] = 6.0; - - let array = SparseArray3::from_ndarray(ndarray.view(), 3, 40); - - assert_eq!(array[[3, 4, 3]], 1.0); - assert_eq!(array[[3, 4, 4]], 2.0); - assert_eq!(array[[3, 4, 5]], 0.0); - assert_eq!(array[[3, 4, 6]], 3.0); - assert_eq!(array[[3, 5, 1]], 4.0); - assert_eq!(array[[3, 5, 2]], 0.0); - assert_eq!(array[[3, 5, 3]], 0.0); - assert_eq!(array[[3, 5, 4]], 0.0); - assert_eq!(array[[3, 5, 5]], 0.0); - assert_eq!(array[[3, 5, 6]], 0.0); - assert_eq!(array[[3, 5, 7]], 5.0); - assert_eq!(array[[4, 3, 9]], 6.0); - - assert_eq!(array.len(), 6); - assert_eq!(array.zeros(), 6); - } - - #[test] - fn test_index_swap() { - let mut array = SparseArray3::new(5, 50, 2); - - array[[0, 0, 0]] = 1.0; - array[[0, 0, 1]] = 2.0; - array[[1, 2, 1]] = 3.0; - array[[1, 5, 1]] = 4.0; - array[[1, 6, 0]] = 5.0; - array[[1, 8, 0]] = 6.0; - array[[1, 9, 0]] = 7.0; - array[[2, 0, 0]] = 8.0; - array[[3, 2, 1]] = 9.0; - array[[3, 4, 0]] = 10.0; - array[[3, 4, 1]] = 11.0; - array[[4, 0, 0]] = 12.0; - array[[4, 0, 1]] = 13.0; - - assert_eq!(array[[0, 0, 0]], 1.0); - assert_eq!(array[[0, 0, 1]], 2.0); - assert_eq!(array[[1, 2, 1]], 3.0); - assert_eq!(array[[1, 5, 1]], 4.0); - assert_eq!(array[[1, 6, 0]], 5.0); - assert_eq!(array[[1, 8, 0]], 6.0); - assert_eq!(array[[1, 9, 0]], 7.0); - assert_eq!(array[[2, 0, 0]], 8.0); - assert_eq!(array[[3, 2, 1]], 9.0); - assert_eq!(array[[3, 4, 0]], 10.0); - assert_eq!(array[[3, 4, 1]], 11.0); - assert_eq!(array[[4, 0, 0]], 12.0); - assert_eq!(array[[4, 0, 1]], 13.0); - - assert_eq!(array.x_range(), 0..5); - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(((0, 0, 0), 1.0))); - assert_eq!(iter.next(), Some(((0, 0, 1), 2.0))); - assert_eq!(iter.next(), Some(((1, 6, 0), 5.0))); - assert_eq!(iter.next(), Some(((1, 8, 0), 6.0))); - assert_eq!(iter.next(), Some(((1, 9, 0), 7.0))); - assert_eq!(iter.next(), Some(((1, 2, 1), 3.0))); - assert_eq!(iter.next(), Some(((1, 5, 1), 4.0))); - assert_eq!(iter.next(), Some(((2, 0, 0), 8.0))); - assert_eq!(iter.next(), Some(((3, 4, 0), 10.0))); - assert_eq!(iter.next(), Some(((3, 2, 1), 9.0))); - assert_eq!(iter.next(), Some(((3, 4, 1), 11.0))); - assert_eq!(iter.next(), Some(((4, 0, 0), 12.0))); - assert_eq!(iter.next(), Some(((4, 0, 1), 13.0))); - assert_eq!(iter.next(), None); - - let mut ndarray = Array3::zeros((5, 50, 2)); - - ndarray[[0, 0, 0]] = 1.0; - ndarray[[0, 0, 1]] = 2.0; - ndarray[[1, 2, 1]] = 3.0; - ndarray[[1, 5, 1]] = 4.0; - ndarray[[1, 6, 0]] = 5.0; - ndarray[[1, 8, 0]] = 6.0; - ndarray[[1, 9, 0]] = 7.0; - ndarray[[2, 0, 0]] = 8.0; - ndarray[[3, 2, 1]] = 9.0; - ndarray[[3, 4, 0]] = 10.0; - ndarray[[3, 4, 1]] = 11.0; - ndarray[[4, 0, 0]] = 12.0; - ndarray[[4, 0, 1]] = 13.0; - - let mut other = SparseArray3::from_ndarray(ndarray.view(), 0, 5); - - assert_eq!(other[[0, 0, 0]], 1.0); - assert_eq!(other[[0, 0, 1]], 2.0); - assert_eq!(other[[1, 2, 1]], 3.0); - assert_eq!(other[[1, 5, 1]], 4.0); - assert_eq!(other[[1, 6, 0]], 5.0); - assert_eq!(other[[1, 8, 0]], 6.0); - assert_eq!(other[[1, 9, 0]], 7.0); - assert_eq!(other[[2, 0, 0]], 8.0); - assert_eq!(other[[3, 2, 1]], 9.0); - assert_eq!(other[[3, 4, 0]], 10.0); - assert_eq!(other[[3, 4, 1]], 11.0); - assert_eq!(other[[4, 0, 0]], 12.0); - assert_eq!(other[[4, 0, 1]], 13.0); - - assert_eq!(other.x_range(), 0..5); - - other.remove_x(0); - - assert_eq!(other[[1, 2, 1]], 3.0); - assert_eq!(other[[1, 5, 1]], 4.0); - assert_eq!(other[[1, 6, 0]], 5.0); - assert_eq!(other[[1, 8, 0]], 6.0); - assert_eq!(other[[1, 9, 0]], 7.0); - assert_eq!(other[[2, 0, 0]], 8.0); - assert_eq!(other[[3, 2, 1]], 9.0); - assert_eq!(other[[3, 4, 0]], 10.0); - assert_eq!(other[[3, 4, 1]], 11.0); - assert_eq!(other[[4, 0, 0]], 12.0); - assert_eq!(other[[4, 0, 1]], 13.0); - - other.remove_x(3); - - assert_eq!(other[[1, 2, 1]], 3.0); - assert_eq!(other[[1, 5, 1]], 4.0); - assert_eq!(other[[1, 6, 0]], 5.0); - assert_eq!(other[[1, 8, 0]], 6.0); - assert_eq!(other[[1, 9, 0]], 7.0); - assert_eq!(other[[2, 0, 0]], 8.0); - assert_eq!(other[[4, 0, 0]], 12.0); - assert_eq!(other[[4, 0, 1]], 13.0); - - other.remove_x(4); - - assert_eq!(other[[1, 2, 1]], 3.0); - assert_eq!(other[[1, 5, 1]], 4.0); - assert_eq!(other[[1, 6, 0]], 5.0); - assert_eq!(other[[1, 8, 0]], 6.0); - assert_eq!(other[[1, 9, 0]], 7.0); - assert_eq!(other[[2, 0, 0]], 8.0); - } - - // https://github.com/NNPDF/pineappl/issues/220 - #[test] - fn regression_test_220() { - let mut array = SparseArray3::new(1, 2, 4); - - array[[0, 0, 0]] = 1.0; - - assert_eq!(array[[0, 0, 0]], 1.0); - - assert_eq!(array.x_range(), 0..1); - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(((0, 0, 0), 1.0))); - assert_eq!(iter.next(), None); - - array.increase_x_at(0); - - array[[0, 0, 0]] = 2.0; - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(((0, 0, 0), 2.0))); - assert_eq!(iter.next(), Some(((1, 0, 0), 1.0))); - assert_eq!(iter.next(), None); - - array.increase_x_at(1); - - array[[1, 0, 0]] = 3.0; - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(((0, 0, 0), 2.0))); - assert_eq!(iter.next(), Some(((1, 0, 0), 3.0))); - assert_eq!(iter.next(), Some(((2, 0, 0), 1.0))); - assert_eq!(iter.next(), None); - } -} diff --git a/pineappl/src/subgrid.rs b/pineappl/src/subgrid.rs index f71268b5a..24f11cb40 100644 --- a/pineappl/src/subgrid.rs +++ b/pineappl/src/subgrid.rs @@ -1,44 +1,36 @@ //! Module containing the trait `Subgrid` and supporting structs. use super::empty_subgrid::EmptySubgridV1; -use super::grid::Ntuple; -use super::import_only_subgrid::{ImportOnlySubgridV1, ImportOnlySubgridV2}; -use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; -use super::ntuple_subgrid::NtupleSubgridV1; +use super::import_subgrid::ImportSubgridV1; +use super::interp_subgrid::InterpSubgridV1; +use super::interpolation::Interp; use enum_dispatch::enum_dispatch; -use ndarray::Array3; +use float_cmp::approx_eq; use serde::{Deserialize, Serialize}; -use std::borrow::Cow; + +/// TODO +#[must_use] +pub fn node_value_eq(lhs: f64, rhs: f64) -> bool { + approx_eq!(f64, lhs, rhs, ulps = 4096) +} + +/// TODO +#[must_use] +pub fn node_value_eq_ref_mut(lhs: &mut f64, rhs: &mut f64) -> bool { + node_value_eq(*lhs, *rhs) +} /// Enum which lists all possible `Subgrid` variants possible. #[enum_dispatch(Subgrid)] #[derive(Clone, Deserialize, Serialize)] pub enum SubgridEnum { // WARNING: never change the order or content of this enum, only add to the end of it - /// Lagrange-interpolation subgrid. - LagrangeSubgridV1, - /// N-tuple subgrid. - NtupleSubgridV1, - /// Lagrange-interpolation subgrid. - LagrangeSparseSubgridV1, - /// Lagrange-interpolation subgrid with possibly different x1 and x2 bins. - LagrangeSubgridV2, - /// Import-only sparse subgrid with possibly different x1 and x2 bins. - ImportOnlySubgridV1, + /// Subgrid type that supports filling. + InterpSubgridV1, /// Empty subgrid. EmptySubgridV1, - /// Same as [`ImportOnlySubgridV1`], but with support for different renormalization and - /// factorization scales choices. - ImportOnlySubgridV2, -} - -/// Structure denoting renormalization and factorization scale values. -#[derive(Debug, Deserialize, Clone, PartialEq, PartialOrd, Serialize)] -pub struct Mu2 { - /// The (squared) renormalization scale value. - pub ren: f64, - /// The (squared) factorization scale value. - pub fac: f64, + /// TODO + ImportSubgridV1, } /// Size-related statistics for a subgrid. @@ -64,49 +56,30 @@ pub struct Stats { /// Trait each subgrid must implement. #[enum_dispatch] pub trait Subgrid { - /// Return a slice of [`Mu2`] values corresponding to the (squared) renormalization and - /// factorization values of the grid. If the subgrid does not use a grid, this method should - /// return an empty slice. - fn mu2_grid(&self) -> Cow<[Mu2]>; + /// TODO + fn node_values(&self) -> Vec>; - /// Return a slice of values of `x1`. If the subgrid does not use a grid, this method should - /// return an empty slice. - fn x1_grid(&self) -> Cow<[f64]>; - - /// Return a slice of values of `x2`. If the subgrid does not use a grid, this method should - /// return an empty slice. - fn x2_grid(&self) -> Cow<[f64]>; - - /// Convolute the subgrid with a luminosity function, which takes indices as arguments that - /// correspond to the entries given in the slices `x1`, `x2` and `mu2`. - fn convolve( - &self, - x1: &[f64], - x2: &[f64], - mu2: &[Mu2], - lumi: &mut dyn FnMut(usize, usize, usize) -> f64, - ) -> f64; - - /// Fills the subgrid with `weight` for the parton momentum fractions `x1` and `x2`, and the - /// scale `q2`. Filling is currently only support where both renormalization and factorization - /// scale have the same value. - fn fill(&mut self, ntuple: &Ntuple); + /// Fill the subgrid with `weight` that is being interpolated with `interps` using the + /// kinematic information in `ntuple`. The parameter `ntuple` assumes the same ordering given + /// by `kinematics` in [`Grid::new`] that was used to create the grid. + fn fill(&mut self, interps: &[Interp], ntuple: &[f64], weight: f64); /// Returns true if `fill` was never called for this grid. fn is_empty(&self) -> bool; - /// Merges `other` into this subgrid. - fn merge(&mut self, other: &mut SubgridEnum, transpose: bool); + /// Merge `other` into this subgrid, possibly transposing the two dimensions given by + /// `transpose`. + fn merge(&mut self, other: &SubgridEnum, transpose: Option<(usize, usize)>); /// Scale the subgrid by `factor`. fn scale(&mut self, factor: f64); - /// Assumes that the initial states for this grid are the same and uses this to optimize the - /// grid by getting rid of almost half of the entries. - fn symmetrize(&mut self); + /// Return the shape of the subgrid + fn shape(&mut self) -> &[usize]; - /// Returns an empty copy of the current subgrid. - fn clone_empty(&self) -> SubgridEnum; + /// Assume that the convolution functions for indices `a` and `b` for this grid are the same + /// and use this to optimize the size of the grid. + fn symmetrize(&mut self, a: usize, b: usize); /// Return an iterator over all non-zero elements of the subgrid. fn indexed_iter(&self) -> SubgridIndexedIter; @@ -114,248 +87,87 @@ pub trait Subgrid { /// Return statistics for this subgrid. fn stats(&self) -> Stats; - /// Return the static (single) scale, if this subgrid has one. - fn static_scale(&self) -> Option; -} - -// this is needed in the Python interface -impl From<&SubgridEnum> for Array3 { - fn from(subgrid: &SubgridEnum) -> Self { - let mut result = Self::zeros(( - subgrid.mu2_grid().len(), - subgrid.x1_grid().len(), - subgrid.x2_grid().len(), - )); - - for ((imu2, ix1, ix2), value) in subgrid.indexed_iter() { - result[[imu2, ix1, ix2]] = value; - } - - result - } + /// TODO + fn optimize_nodes(&mut self); } /// Type to iterate over the non-zero contents of a subgrid. The tuple contains the indices of the /// `mu2_grid`, the `x1_grid` and finally the `x2_grid`. -pub type SubgridIndexedIter<'a> = Box + 'a>; - -/// Subgrid creation parameters for subgrids that perform interpolation. -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct SubgridParams { - q2_bins: usize, - q2_max: f64, - q2_min: f64, - q2_order: usize, - reweight: bool, - x_bins: usize, - x_max: f64, - x_min: f64, - x_order: usize, -} - -impl Default for SubgridParams { - fn default() -> Self { - Self { - q2_bins: 40, - q2_max: 1e8, - q2_min: 1e2, - q2_order: 3, - reweight: true, - x_bins: 50, - x_max: 1.0, - x_min: 2e-7, - x_order: 3, - } - } -} - -impl SubgridParams { - /// Returns the number of bins for the $Q^2$ axis. - #[must_use] - pub const fn q2_bins(&self) -> usize { - self.q2_bins - } - - /// Returns the upper limit of the $Q^2$ axis. - #[must_use] - pub const fn q2_max(&self) -> f64 { - self.q2_max - } - - /// Returns the lower limit of the $Q^2$ axis. - #[must_use] - pub const fn q2_min(&self) -> f64 { - self.q2_min - } - - /// Returns the interpolation order for the $Q^2$ axis. - #[must_use] - pub const fn q2_order(&self) -> usize { - self.q2_order - } - - /// Returns whether reweighting is enabled or not. - #[must_use] - pub const fn reweight(&self) -> bool { - self.reweight - } - - /// Sets the number of bins for the $Q^2$ axis. - pub fn set_q2_bins(&mut self, q2_bins: usize) { - self.q2_bins = q2_bins; - } - - /// Sets the upper limit of the $Q^2$ axis. - pub fn set_q2_max(&mut self, q2_max: f64) { - self.q2_max = q2_max; - } - - /// Sets the lower limit of the $Q^2$ axis. - pub fn set_q2_min(&mut self, q2_min: f64) { - self.q2_min = q2_min; - } - - /// Sets the interpolation order for the $Q^2$ axis. - pub fn set_q2_order(&mut self, q2_order: usize) { - self.q2_order = q2_order; - } - - /// Sets the reweighting parameter. - pub fn set_reweight(&mut self, reweight: bool) { - self.reweight = reweight; - } - - /// Sets the number of bins for the $x$ axes. - pub fn set_x_bins(&mut self, x_bins: usize) { - self.x_bins = x_bins; - } - - /// Sets the upper limit of the $x$ axes. - pub fn set_x_max(&mut self, x_max: f64) { - self.x_max = x_max; - } - - /// Sets the lower limit of the $x$ axes. - pub fn set_x_min(&mut self, x_min: f64) { - self.x_min = x_min; - } - - /// Sets the interpolation order for the $x$ axes. - pub fn set_x_order(&mut self, x_order: usize) { - self.x_order = x_order; - } - - /// Returns the number of bins for the $x$ axes. - #[must_use] - pub const fn x_bins(&self) -> usize { - self.x_bins - } - - /// Returns the upper limit of the $x$ axes. - #[must_use] - pub const fn x_max(&self) -> f64 { - self.x_max - } - - /// Returns the lower limit of the $x$ axes. - #[must_use] - pub const fn x_min(&self) -> f64 { - self.x_min - } - - /// Returns the interpolation order for the $x$ axes. - #[must_use] - pub const fn x_order(&self) -> usize { - self.x_order - } -} - -/// Extra grid creation parameters when the limits for `x1` and `x2` are different. -pub struct ExtraSubgridParams { - reweight2: bool, - x2_bins: usize, - x2_max: f64, - x2_min: f64, - x2_order: usize, -} - -impl Default for ExtraSubgridParams { - fn default() -> Self { - Self { - reweight2: true, - x2_bins: 50, - x2_max: 1.0, - x2_min: 2e-7, - x2_order: 3, - } - } -} - -impl From<&SubgridParams> for ExtraSubgridParams { - fn from(subgrid_params: &SubgridParams) -> Self { - Self { - reweight2: subgrid_params.reweight(), - x2_bins: subgrid_params.x_bins(), - x2_max: subgrid_params.x_max(), - x2_min: subgrid_params.x_min(), - x2_order: subgrid_params.x_order(), +pub type SubgridIndexedIter<'a> = Box, f64)> + 'a>; + +#[cfg(test)] +mod tests { + use super::*; + use crate::interpolation::{Interp, InterpMeth, Map, ReweightMeth}; + + #[test] + fn check_old_x_values() { + let new_50_x = Interp::new( + 2e-7, + 1.0, + 50, + 3, + ReweightMeth::ApplGridX, + Map::ApplGridF2, + InterpMeth::Lagrange, + ) + .node_values(); + let old_50_x = [ + 1.0, + 9.309440808717544e-1, + 8.627839323906108e-1, + 7.956242522922756e-1, + 7.295868442414312e-1, + 6.648139482473823e-1, + 6.01472197967335e-1, + 5.397572337880445e-1, + 4.798989029610255e-1, + 4.221667753589648e-1, + 3.668753186482242e-1, + 3.1438740076927585e-1, + 2.651137041582823e-1, + 2.195041265003886e-1, + 1.7802566042569432e-1, + 1.4112080644440345e-1, + 1.0914375746330703e-1, + 8.228122126204893e-2, + 6.0480028754447364e-2, + 4.341491741702269e-2, + 3.0521584007828916e-2, + 2.108918668378717e-2, + 1.4375068581090129e-2, + 9.699159574043399e-3, + 6.496206194633799e-3, + 4.328500638820811e-3, + 2.8738675812817515e-3, + 1.9034634022867384e-3, + 1.2586797144272762e-3, + 8.314068836488144e-4, + 5.487795323670796e-4, + 3.6205449638139736e-4, + 2.3878782918561914e-4, + 1.5745605600841445e-4, + 1.0381172986576898e-4, + 6.843744918967897e-5, + 4.511438394964044e-5, + 2.97384953722449e-5, + 1.9602505002391748e-5, + 1.292101569074731e-5, + 8.516806677573355e-6, + 5.613757716930151e-6, + 3.7002272069854957e-6, + 2.438943292891682e-6, + 1.607585498470808e-6, + 1.0596094959101024e-6, + 6.984208530700364e-7, + 4.6035014748963906e-7, + 3.034304765867952e-7, + 1.9999999999999954e-7, + ]; + + // check that the old x-grid values are 'equal' to the new ones + for (old, new) in old_50_x.into_iter().zip(new_50_x) { + assert!(node_value_eq(old, new), "{old} {new}"); } } } - -impl ExtraSubgridParams { - /// Returns whether reweighting is enabled for the `x2` axis or not. - #[must_use] - pub const fn reweight2(&self) -> bool { - self.reweight2 - } - - /// Sets the reweighting parameter for the `x2` axis. - pub fn set_reweight2(&mut self, reweight2: bool) { - self.reweight2 = reweight2; - } - - /// Sets the number of bins for the `x2` axes. - pub fn set_x2_bins(&mut self, x_bins: usize) { - self.x2_bins = x_bins; - } - - /// Sets the upper limit of the `x2` axes. - pub fn set_x2_max(&mut self, x_max: f64) { - self.x2_max = x_max; - } - - /// Sets the lower limit of the `x2` axes. - pub fn set_x2_min(&mut self, x_min: f64) { - self.x2_min = x_min; - } - - /// Sets the interpolation order for the `x2` axes. - pub fn set_x2_order(&mut self, x_order: usize) { - self.x2_order = x_order; - } - - /// Returns the number of bins for the `x2` axes. - #[must_use] - pub const fn x2_bins(&self) -> usize { - self.x2_bins - } - - /// Returns the upper limit of the `x2` axes. - #[must_use] - pub const fn x2_max(&self) -> f64 { - self.x2_max - } - - /// Returns the lower limit of the `x2` axes. - #[must_use] - pub const fn x2_min(&self) -> f64 { - self.x2_min - } - - /// Returns the interpolation order for the `x2` axes. - #[must_use] - pub const fn x2_order(&self) -> usize { - self.x2_order - } -} diff --git a/pineappl/src/v0.rs b/pineappl/src/v0.rs new file mode 100644 index 000000000..5a8cdc79a --- /dev/null +++ b/pineappl/src/v0.rs @@ -0,0 +1,253 @@ +use super::bin::BinRemapper; +use super::boc::{Channel, Kinematics, Order, ScaleFuncForm, Scales}; +use super::convolutions::{Conv, ConvType}; +use super::grid::{Grid, GridError}; +use super::import_subgrid::ImportSubgridV1; +use super::interpolation::{Interp, InterpMeth, Map, ReweightMeth}; +use super::packed_array::PackedArray; +use super::pids::PidBasis; +use super::subgrid; +use pineappl_v0::grid::Grid as GridV0; +use std::io::BufRead; +use std::iter; + +pub fn default_interps(convolutions: usize) -> Vec { + let mut interps = vec![Interp::new( + 1e2, + 1e8, + 40, + 3, + ReweightMeth::NoReweight, + Map::ApplGridH0, + InterpMeth::Lagrange, + )]; + + for _ in 0..convolutions { + interps.push(Interp::new( + 2e-7, + 1.0, + 50, + 3, + ReweightMeth::ApplGridX, + Map::ApplGridF2, + InterpMeth::Lagrange, + )); + } + + interps +} + +pub fn read_uncompressed_v0(mut reader: impl BufRead) -> Result { + use pineappl_v0::subgrid::Subgrid as _; + + let grid = GridV0::read(&mut reader).map_err(|err| GridError::Other(err.into()))?; + let convolutions = read_convolutions_from_metadata(&grid); + + // TODO: read in flexible-scale grids properly + let mut kinematics = vec![Kinematics::Scale(0), Kinematics::X(0)]; + if convolutions[0].is_some() && convolutions[1].is_some() { + kinematics.push(Kinematics::X(1)); + } + + assert_eq!(convolutions.len(), 2); + + let mut result = Grid::new( + grid.key_values() + .and_then(|kv| kv.get("lumi_id_types")) + // TODO: use PidBasis::from_str + .map_or(PidBasis::Pdg, |lumi_id_types| { + match lumi_id_types.as_str() { + "pdg_mc_ids" => PidBasis::Pdg, + "evol" => PidBasis::Evol, + _ => panic!("unknown PID basis '{lumi_id_types}'"), + } + }), + grid.channels() + .iter() + .map(|c| { + Channel::new( + c.entry() + .iter() + .map(|&(a, b, f)| { + let mut pids = Vec::new(); + if convolutions[0].is_some() { + pids.push(a); + } + if convolutions[1].is_some() { + pids.push(b); + }; + (pids, f) + }) + .collect(), + ) + }) + .collect(), + grid.orders() + .iter() + .map(|o| Order { + // UNWRAP: there shouldn't be orders with exponents larger than 255 + alphas: o.alphas.try_into().unwrap(), + alpha: o.alpha.try_into().unwrap(), + logxir: o.logxir.try_into().unwrap(), + logxif: o.logxif.try_into().unwrap(), + logxia: 0, + }) + .collect(), + if grid.remapper().is_none() { + let limits = &grid.bin_info().limits(); + iter::once(limits[0][0].0) + .chain(limits.iter().map(|v| v[0].1)) + .collect() + } else { + // if there is a BinRemapper this member will likely have no impact + (0..=grid.bin_info().bins()) + .map(|i| f64::from(u16::try_from(i).unwrap())) + .collect() + }, + convolutions.clone().into_iter().flatten().collect(), + default_interps(convolutions.clone().into_iter().flatten().count()), + kinematics, + Scales { + // TODO: read in flexible-scale grids properly + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }, + ); + + for (new_subgrid, old_subgrid) in result.subgrids_mut().iter_mut().zip(grid.subgrids().iter()) { + if !old_subgrid.is_empty() { + let scale_node_values: Vec<_> = old_subgrid + .mu2_grid() + .iter() + .map(|mu2v0| { + // TODO: implement importing flexible-scale grids + assert!(subgrid::node_value_eq(mu2v0.ren, mu2v0.fac)); + + mu2v0.fac + }) + .collect(); + + let mut dim = vec![scale_node_values.len()]; + if convolutions[0].is_some() { + dim.push(old_subgrid.x1_grid().len()); + } + if convolutions[1].is_some() { + dim.push(old_subgrid.x2_grid().len()); + } + let mut array = PackedArray::new(dim); + + if convolutions[0].is_none() { + for (index, v) in old_subgrid.indexed_iter() { + array[[index.0, index.2]] = v; + } + } else if convolutions[1].is_none() { + for (index, v) in old_subgrid.indexed_iter() { + array[[index.0, index.1]] = v; + } + } else { + for (index, v) in old_subgrid.indexed_iter() { + array[<[usize; 3]>::from(index)] = v; + } + } + + let mut node_values = vec![scale_node_values]; + if convolutions[0].is_some() { + node_values.push(old_subgrid.x1_grid().into_owned()); + } + if convolutions[1].is_some() { + node_values.push(old_subgrid.x2_grid().into_owned()); + } + *new_subgrid = ImportSubgridV1::new(array, node_values).into(); + } + } + + *result.metadata_mut() = grid + .key_values() + .cloned() + .unwrap_or_default() + .into_iter() + .collect(); + + if let Some(r) = grid.remapper() { + result + .set_remapper( + BinRemapper::new(r.normalizations().to_vec(), r.limits().to_vec()) + // UNWRAP: if the old grid could be constructed with the given normalizations + // and limits we should be able to do the same without error + .unwrap(), + ) + // UNWRAP: there's a bug if this fails + .unwrap(); + } + + assert_eq!(result.bin_info().bins(), grid.bin_info().bins()); + + Ok(result) +} + +fn read_convolutions_from_metadata(grid: &GridV0) -> Vec> { + grid.key_values().map_or_else( + // if there isn't any metadata, we assume two unpolarized proton-PDFs are used + || vec![Some(Conv::new(ConvType::UnpolPDF, 2212)); 2], + |kv| { + // file format v0 only supports exactly two convolutions + (1..=2) + .map(|index| { + // if there are key-value pairs `convolution_particle_1` and + // `convolution_type_1` and the same with a higher index, we convert this + // metadata into `Convolution` + match ( + kv.get(&format!("convolution_particle_{index}")) + .map(|s| s.parse::()), + kv.get(&format!("convolution_type_{index}")) + .map(String::as_str), + ) { + (_, Some("None")) => None, + (Some(Ok(pid)), Some("UnpolPDF")) => Some(Conv::new(ConvType::UnpolPDF, pid)), + (Some(Ok(pid)), Some("PolPDF")) => Some(Conv::new(ConvType::PolPDF, pid)), + (Some(Ok(pid)), Some("UnpolFF")) => Some(Conv::new(ConvType::UnpolFF, pid)), + (Some(Ok(pid)), Some("PolFF")) => Some(Conv::new(ConvType::PolFF, pid)), + (None, None) => { + // if these key-value pairs are missing use the old metadata + match kv + .get(&format!("initial_state_{index}")) + .map(|s| s.parse::()) + { + Some(Ok(pid)) => { + let condition = !grid.channels().iter().all(|entry| { + entry.entry().iter().all(|&(a, b, _)| + match index { + 1 => a, + 2 => b, + _ => unreachable!() + } == pid + ) + }); + + condition.then_some(Conv::new(ConvType::UnpolPDF, pid)) + } + None => Some(Conv::new(ConvType::UnpolPDF, 2212)), + Some(Err(err)) => panic!( + "metadata 'initial_state_{index}' could not be parsed: {err}" + ), + } + } + (None, Some(_)) => { + panic!("metadata 'convolution_type_{index}' is missing") + } + (Some(_), None) => { + panic!("metadata 'convolution_particle_{index}' is missing") + } + (Some(Ok(_)), Some(type_)) => { + panic!("metadata 'convolution_type_{index} = {type_}' is unknown") + } + (Some(Err(err)), Some(_)) => { + panic!("metadata 'convolution_particle_{index}' could not be parsed: {err}") + } + } + }) + .collect() + }, + ) +} diff --git a/pineappl/tests/drell_yan_lo.rs b/pineappl/tests/drell_yan_lo.rs index 21dad5710..b8add159d 100644 --- a/pineappl/tests/drell_yan_lo.rs +++ b/pineappl/tests/drell_yan_lo.rs @@ -1,13 +1,18 @@ +#![allow(missing_docs)] + use anyhow::Result; use float_cmp::assert_approx_eq; +use itertools::izip; use lhapdf::Pdf; use num_complex::Complex; use pineappl::bin::BinRemapper; -use pineappl::boc::Order; +use pineappl::boc::{Kinematics, Order, ScaleFuncForm, Scales}; use pineappl::channel; -use pineappl::convolutions::LumiCache; -use pineappl::grid::{Grid, GridOptFlags, Ntuple}; -use pineappl::subgrid::{ExtraSubgridParams, Subgrid, SubgridEnum, SubgridParams}; +use pineappl::convolutions::{Conv, ConvType, ConvolutionCache}; +use pineappl::grid::{Grid, GridOptFlags}; +use pineappl::interpolation::{Interp, InterpMeth, Map, ReweightMeth}; +use pineappl::pids::PidBasis; +use pineappl::subgrid::{Subgrid, SubgridEnum}; use rand::Rng; use rand_pcg::Pcg64; use std::f64::consts::PI; @@ -24,6 +29,8 @@ fn int_photo(s: f64, t: f64, u: f64) -> f64 { alpha0.powi(2) / 2.0 / s * (t / u + u / t) } +// ALLOW: in this example we care more about readability than floating-point accuracy +#[allow(clippy::suboptimal_flops)] // Eq. (2.12) - quark-antiquark contribution to DY lepton pair production fn int_quark(s: f64, t: f64, u: f64, qq: f64, i3_wq: f64) -> f64 { let alphagf: f64 = 1.0 / 132.30818655547878; @@ -94,7 +101,7 @@ fn hadronic_pspgen(rng: &mut impl Rng, mmin: f64, mmax: f64) -> Psp2to2 { jacobian *= tau * tau0.ln().powi(2) * r1; // theta integration (in the CMS) - let cos_theta = 2.0 * rng.gen::() - 1.0; + let cos_theta = rng.gen::().mul_add(2.0, -1.0); jacobian *= 2.0; let t = -0.5 * s * (1.0 - cos_theta); @@ -113,24 +120,18 @@ fn hadronic_pspgen(rng: &mut impl Rng, mmin: f64, mmax: f64) -> Psp2to2 { } } -fn fill_drell_yan_lo_grid( - rng: &mut impl Rng, - calls: u32, - subgrid_type: &str, - dynamic: bool, - reweight: bool, -) -> Result { +fn fill_drell_yan_lo_grid(rng: &mut impl Rng, calls: u32, dynamic: bool, reweight: bool) -> Grid { let channels = vec![ // photons - channel![22, 22, 1.0], + channel![1.0 * (22, 22)], // up-antiup - channel![2, -2, 1.0; 4, -4, 1.0], + channel![1.0 * (2, -2) + 1.0 * (4, -4)], // antiup-up - channel![-2, 2, 1.0; -4, 4, 1.0], + channel![1.0 * (-2, 2) + 1.0 * (-4, 4)], // down-antidown - channel![1, -1, 1.0; 3, -3, 1.0; 5, -5, 1.0], + channel![1.0 * (1, -1) + 1.0 * (3, -3) + 1.0 * (5, -5)], // antidown-down - channel![-1, 1, 1.0; -3, 3, 1.0; -5, 5, 1.0], + channel![1.0 * (-1, 1) + 1.0 * (-3, 3) + 1.0 * (-5, 5)], ]; let orders = vec![ @@ -140,6 +141,7 @@ fn fill_drell_yan_lo_grid( alpha: 2, logxir: 0, logxif: 0, + logxia: 0, }, // NLO QCD - won't be filled Order { @@ -147,45 +149,93 @@ fn fill_drell_yan_lo_grid( alpha: 2, logxir: 0, logxif: 0, + logxia: 0, }, Order { alphas: 1, alpha: 2, logxir: 0, logxif: 1, + logxia: 0, }, ]; // we bin in rapidity from 0 to 2.4 in steps of 0.1 let bin_limits: Vec<_> = (0..=24).map(|x: u32| f64::from(x) / 10.0).collect(); - let mut subgrid_params = SubgridParams::default(); - let mut extra = ExtraSubgridParams::default(); - - subgrid_params.set_q2_bins(30); - subgrid_params.set_q2_max(1e6); - subgrid_params.set_q2_min(1e2); - subgrid_params.set_q2_order(3); - subgrid_params.set_reweight(reweight); - subgrid_params.set_x_bins(50); - subgrid_params.set_x_max(1.0); - subgrid_params.set_x_min(2e-7); - subgrid_params.set_x_order(3); - extra.set_x2_bins(50); - extra.set_x2_max(1.0); - extra.set_x2_min(2e-7); - extra.set_x2_order(3); - extra.set_reweight2(reweight); + // the grid represents data with two unpolarized proton PDFs + let convolutions = vec![ + Conv::new(ConvType::UnpolPDF, 2212), + Conv::new(ConvType::UnpolPDF, 2212), + ]; + + let reweight = if reweight { + ReweightMeth::ApplGridX + } else { + ReweightMeth::NoReweight + }; + + // define how `Grid::fill` interpolates + let interps = vec![ + // 1st dimension interpolation parameters + Interp::new( + 1e2, + 1e6, + 30, + 3, + ReweightMeth::NoReweight, + Map::ApplGridH0, + InterpMeth::Lagrange, + ), + // 2nd dimension interpolation parameters + Interp::new( + 2e-7, + 1.0, + 50, + 3, + reweight, + Map::ApplGridF2, + InterpMeth::Lagrange, + ), + // 3rd dimension interpolation parameters + Interp::new( + 2e-7, + 1.0, + 50, + 3, + reweight, + Map::ApplGridF2, + InterpMeth::Lagrange, + ), + ]; + + let kinematics = vec![ + // 1st dimension is factorization and at the same time also the renormalization scale + Kinematics::Scale(0), + // 2nd dimension is the parton momentum fraction of the first convolution + Kinematics::X1, + // 3rd dimension is the parton momentum fraction of the second convolution + Kinematics::X2, + ]; + + let scales = Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }; // create the PineAPPL grid - let mut grid = Grid::with_subgrid_type( + let mut grid = Grid::new( + // the integers in the channel definition are PDG Monte Carlo IDs + PidBasis::Pdg, channels, orders, bin_limits, - subgrid_params, - extra, - subgrid_type, - )?; + convolutions, + interps, + kinematics, + scales, + ); // in GeV^2 pbarn let hbarc2 = 3.893793721e8; @@ -226,62 +276,41 @@ fn fill_drell_yan_lo_grid( let pto = 0; let channel = 0; - grid.fill(pto, yll.abs(), channel, &Ntuple { x1, x2, q2, weight }); + grid.fill(pto, yll.abs(), channel, &[q2, x1, x2], weight); // LO up-antiup-type channel let weight = jacobian * int_quark(s, t, u, 2.0 / 3.0, 0.5); let pto = 0; let channel = 1; - grid.fill(pto, yll.abs(), channel, &Ntuple { x1, x2, q2, weight }); + grid.fill(pto, yll.abs(), channel, &[q2, x1, x2], weight); // LO antiup-up-type channel - swap (x1 <-> x2) and (t <-> u) let weight = jacobian * int_quark(s, u, t, 2.0 / 3.0, 0.5); let pto = 0; let channel = 2; - grid.fill( - pto, - yll.abs(), - channel, - &Ntuple { - x1: x2, - x2: x1, - q2, - weight, - }, - ); + grid.fill(pto, yll.abs(), channel, &[q2, x2, x1], weight); // LO down-antidown-type channel let weight = jacobian * int_quark(s, t, u, -1.0 / 3.0, -0.5); let pto = 0; let channel = 3; - grid.fill(pto, yll.abs(), channel, &Ntuple { x1, x2, q2, weight }); + grid.fill(pto, yll.abs(), channel, &[q2, x1, x2], weight); // LO antidown-down-type channel - swap (x1 <-> x2) and (t <-> u) let weight = jacobian * int_quark(s, u, t, -1.0 / 3.0, -0.5); let pto = 0; let channel = 4; - grid.fill( - pto, - yll.abs(), - channel, - &Ntuple { - x1: x2, - x2: x1, - q2, - weight, - }, - ); + grid.fill(pto, yll.abs(), channel, &[q2, x2, x1], weight); } - Ok(grid) + grid } fn perform_grid_tests( - subgrid_type: &str, dynamic: bool, reference: &[f64], reference_after_ssd: &[f64], @@ -289,16 +318,12 @@ fn perform_grid_tests( reweight: bool, ) -> Result<()> { let mut rng = Pcg64::new(0xcafef00dd15ea5e5, 0xa02bdbf7bb3c0a7ac28fa16a64abf96); - let mut grid = fill_drell_yan_lo_grid(&mut rng, INT_STATS, subgrid_type, dynamic, reweight)?; + let mut grid = fill_drell_yan_lo_grid(&mut rng, INT_STATS, dynamic, reweight); // TEST 1: `merge` and `scale` grid.merge(fill_drell_yan_lo_grid( - &mut rng, - INT_STATS, - subgrid_type, - dynamic, - reweight, - )?)?; + &mut rng, INT_STATS, dynamic, reweight, + ))?; grid.scale(0.5); // suppress LHAPDF banners @@ -325,29 +350,40 @@ fn perform_grid_tests( let mut grid = Grid::read(&mut file)?; // TEST 4: `scale_by_order` - grid.scale_by_order(10.0, 0.5, 10.0, 10.0, 1.0); - grid.scale_by_order(10.0, 1.0, 10.0, 10.0, 4.0); + grid.scale_by_order(10.0, 0.5, 10.0, 10.0, 1.0, 1.0); + grid.scale_by_order(10.0, 1.0, 10.0, 10.0, 1.0, 4.0); // TEST 5: `convolve` - let mut lumi_cache = LumiCache::with_one(2212, &mut xfx, &mut alphas); - let bins = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let mut convolution_cache = ConvolutionCache::new( + vec![Conv::new(ConvType::UnpolPDF, 2212)], + vec![&mut xfx], + &mut alphas, + ); + let bins = grid.convolve(&mut convolution_cache, &[], &[], &[], &[(1.0, 1.0, 1.0)]); for (result, reference) in bins.iter().zip(reference.iter()) { - assert_approx_eq!(f64, *result, *reference, ulps = 16); + assert_approx_eq!(f64, *result, *reference, ulps = 4); } - // TEST 5b: `convolve` with `LumiCache::with_two` + // TEST 5b: `convolve` with `ConvolutionCache::with_two` let mut xfx1 = |id, x, q2| pdf.xfx_q2(id, x, q2); let mut xfx2 = |id, x, q2| pdf.xfx_q2(id, x, q2); let mut alphas2 = |_| 0.0; - let mut lumi_cache2 = LumiCache::with_two(2212, &mut xfx1, 2212, &mut xfx2, &mut alphas2); - let bins2 = grid.convolve(&mut lumi_cache2, &[], &[], &[], &[(1.0, 1.0)]); + let mut convolution_cache2 = ConvolutionCache::new( + vec![ + Conv::new(ConvType::UnpolPDF, 2212), + Conv::new(ConvType::UnpolPDF, 2212), + ], + vec![&mut xfx1, &mut xfx2], + &mut alphas2, + ); + let bins2 = grid.convolve(&mut convolution_cache2, &[], &[], &[], &[(1.0, 1.0, 1.0)]); for (result, reference) in bins2.iter().zip(reference.iter()) { assert_approx_eq!(f64, *result, *reference, ulps = 16); } - mem::drop(lumi_cache2); + mem::drop(convolution_cache2); mem::drop(bins2); // TEST 6: `convolve_subgrid` @@ -355,7 +391,7 @@ fn perform_grid_tests( .map(|bin| { (0..grid.channels().len()) .map(|channel| { - grid.convolve_subgrid(&mut lumi_cache, 0, bin, channel, 1.0, 1.0) + grid.convolve_subgrid(&mut convolution_cache, 0, bin, channel, (1.0, 1.0, 1.0)) .sum() }) .sum() @@ -363,7 +399,7 @@ fn perform_grid_tests( .collect(); for (result, reference) in bins.iter().zip(reference.iter()) { - assert_approx_eq!(f64, *result, *reference, ulps = 24); + assert_approx_eq!(f64, *result, *reference, ulps = 16); } // TEST 7a: `optimize_using` - tests `symmetrize` for each subgrid type @@ -372,15 +408,20 @@ fn perform_grid_tests( // TEST 7b: `optimize` grid.optimize(); - assert_eq!(grid.subgrids()[[0, 0, 0]].x1_grid().as_ref(), x_grid); - assert_eq!(grid.subgrids()[[0, 0, 0]].x2_grid().as_ref(), x_grid); + let node_values = grid.subgrids()[[0, 0, 0]].node_values(); + + for (&node_value1, &node_value2, &ref_value) in izip!(&node_values[1], &node_values[2], x_grid) + { + assert_approx_eq!(f64, node_value1, ref_value, ulps = 4); + assert_approx_eq!(f64, node_value2, ref_value, ulps = 4); + } // TEST 8: `convolve_subgrid` for the optimized subgrids let bins: Vec<_> = (0..grid.bin_info().bins()) .map(|bin| { (0..grid.channels().len()) .map(|channel| { - grid.convolve_subgrid(&mut lumi_cache, 0, bin, channel, 1.0, 1.0) + grid.convolve_subgrid(&mut convolution_cache, 0, bin, channel, (1.0, 1.0, 1.0)) .sum() }) .sum() @@ -388,13 +429,13 @@ fn perform_grid_tests( .collect(); for (result, reference_after_ssd) in bins.iter().zip(reference_after_ssd.iter()) { - assert_approx_eq!(f64, *result, *reference_after_ssd, ulps = 24); + assert_approx_eq!(f64, *result, *reference_after_ssd, ulps = 32); } - let bins = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let bins = grid.convolve(&mut convolution_cache, &[], &[], &[], &[(1.0, 1.0, 1.0)]); for (result, reference_after_ssd) in bins.iter().zip(reference_after_ssd.iter()) { - assert_approx_eq!(f64, *result, *reference_after_ssd, ulps = 24); + assert_approx_eq!(f64, *result, *reference_after_ssd, ulps = 64); } // TEST 9: `set_remapper` @@ -416,7 +457,7 @@ fn perform_grid_tests( grid.merge_bins(0..1)?; for (result, reference_after_ssd) in bins.iter().zip(reference_after_ssd.iter()) { - assert_approx_eq!(f64, *result, *reference_after_ssd, ulps = 24); + assert_approx_eq!(f64, *result, *reference_after_ssd, ulps = 64); } // merge two bins with each other @@ -424,7 +465,7 @@ fn perform_grid_tests( grid.merge_bins(bin..bin + 2)?; } - let merged2 = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let merged2 = grid.convolve(&mut convolution_cache, &[], &[], &[], &[(1.0, 1.0, 1.0)]); for (result, reference_after_ssd) in merged2.iter().zip( reference_after_ssd @@ -439,7 +480,7 @@ fn perform_grid_tests( // delete a few bins from the start grid.delete_bins(&[0, 1]); - let deleted = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let deleted = grid.convolve(&mut convolution_cache, &[], &[], &[], &[(1.0, 1.0, 1.0)]); assert_eq!(deleted.len(), 10); @@ -455,7 +496,7 @@ fn perform_grid_tests( // delete a few bins from the ending grid.delete_bins(&[8, 9]); - let deleted2 = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let deleted2 = grid.convolve(&mut convolution_cache, &[], &[], &[], &[(1.0, 1.0, 1.0)]); assert_eq!(deleted2.len(), 8); @@ -466,314 +507,181 @@ fn perform_grid_tests( .skip(2) .take(6), ) { - assert_approx_eq!(f64, *result, reference_after_ssd, ulps = 16); + assert_approx_eq!(f64, *result, reference_after_ssd, ulps = 32); } Ok(()) } -fn generate_grid(subgrid_type: &str, dynamic: bool, reweight: bool) -> Result { +fn generate_grid(dynamic: bool, reweight: bool) -> Grid { let mut rng = Pcg64::new(0xcafef00dd15ea5e5, 0xa02bdbf7bb3c0a7ac28fa16a64abf96); - fill_drell_yan_lo_grid(&mut rng, 500_000, subgrid_type, dynamic, reweight) + fill_drell_yan_lo_grid(&mut rng, 500_000, dynamic, reweight) } // number is small enough for the tests to be quick and meaningful const INT_STATS: u32 = 10_000; const STATIC_REFERENCE: [f64; 24] = [ - 168.3895932203146, + 168.38959322031457, 513.0768026978171, - 371.7128739636697, - 235.8098362992926, - 186.2012477587763, - 216.23045149885752, - 232.04909213040406, - 170.06035328940015, - 103.33958880717893, - 191.8706139862758, - 182.48313944455572, - 171.6188420512558, - 419.8243491958518, - 151.1415299189055, - 226.05268712169604, - 199.6459179085148, - 369.89355607075987, - 77.06480847483716, - 24.490861801031464, + 371.71287396366995, + 235.8098362992929, + 186.20124775877665, + 216.2304514988581, + 232.04909213040415, + 170.06035328939964, + 103.33958880717813, + 191.87061398627344, + 182.48313944454654, + 171.6188420512471, + 419.82434919583875, + 151.14152991889875, + 226.05268712169035, + 199.64591790851492, + 369.8935560707606, + 77.06480847483719, + 24.490861801031517, 12.19000587591836, - 23.1109141871296, - 87.06841942116225, - 33.02815936198823, - 2.362076425563693, + 23.110914187129627, + 87.06841942116057, + 33.0281593619898, + 2.362076425563692, ]; -// numbers are slightly different from `STATIC_REFERENCE` because the static scale detection (SSD) -// removes the Q^2 interpolation error +// numbers are slightly different from `STATIC_REFERENCE` because the node optimization detects the +// static scale and removes the Q^2 interpolation error const STATIC_REFERENCE_AFTER_SSD: [f64; 24] = [ - 168.38968474125483, - 513.0770882860817, - 371.7130781560301, - 235.8099644394127, - 186.20134644917377, - 216.23056626541657, - 232.0492126966559, - 170.06043577796652, - 103.3396370886257, - 191.87070884467457, - 182.48320743598543, - 171.61890694318237, - 419.824506574201, - 151.14157394137038, - 226.05274545087767, - 199.64596561172144, - 369.89362418149375, - 77.0648225310743, - 24.490864173669205, - 12.190006763665675, - 23.110914180153898, - 87.06840698213031, - 33.02815852834857, + 168.38968474125505, + 513.0770882860825, + 371.7130781560312, + 235.80996443941294, + 186.20134644917403, + 216.2305662654171, + 232.04921269665587, + 170.060435777966, + 103.33963708862501, + 191.87070884467244, + 182.48320743597665, + 171.6189069431743, + 419.82450657418957, + 151.14157394136424, + 226.0527454508727, + 199.64596561172203, + 369.8936241814945, + 77.06482253107433, + 24.49086417366925, + 12.190006763665671, + 23.110914180153948, + 87.0684069821286, + 33.028158528350204, 2.3620759284762887, ]; const DYNAMIC_REFERENCE: [f64; 24] = [ - 167.73075899059725, + 167.73075899059722, 513.3569347058141, - 371.33363988284026, - 235.5300462322826, - 185.89154709174824, - 216.75969380255472, - 231.6876677545832, - 169.6799766165541, - 103.56259318132975, - 191.6874838895875, - 182.00660765470136, - 171.62215911516836, - 420.11223950904895, - 150.81647073656592, - 226.01683150243653, - 199.99645663613603, - 370.57403873938387, - 77.1345922714995, - 24.570096511403527, - 12.150599862369834, - 23.045883969060334, - 87.22986784601223, - 33.0557605051653, - 2.306133253577581, + 371.3336398828405, + 235.53004623228287, + 185.89154709174852, + 216.75969380255526, + 231.6876677545833, + 169.67997661655352, + 103.56259318132902, + 191.68748388958508, + 182.00660765469212, + 171.6221591151596, + 420.1122395090359, + 150.81647073655915, + 226.0168315024308, + 199.99645663613614, + 370.5740387393846, + 77.13459227149951, + 24.570096511403573, + 12.15059986236983, + 23.04588396906037, + 87.2298678460105, + 33.05576050516689, + 2.3061332535775794, ]; const DYNAMIC_REFERENCE_NO_REWEIGHT: [f64; 24] = [ - 167.02560507500345, - 511.63076013139545, - 370.16594705502035, - 234.5118769275643, - 185.2371693622032, - 215.91820762195567, - 230.92167039044898, - 169.015739215293, - 103.19948220954701, - 190.91537687434288, - 181.4974927711281, - 171.15900435348723, - 418.69191536302435, - 150.25485756167274, - 225.25882657551898, - 199.5592968215911, - 369.03016602405376, - 76.82053478041806, - 24.477528662138404, - 12.114147164297156, - 22.96720169916392, - 86.9945451584532, - 32.89496808038666, - 2.2999405116169536, + 167.02560507500328, + 511.6307601313951, + 370.1659470550191, + 234.51187692755803, + 185.23716936219668, + 215.91820762194604, + 230.92167039044742, + 169.0157392153116, + 103.19948220956648, + 190.9153768744078, + 181.49749277120952, + 171.15900435357685, + 418.69191536323956, + 150.25485756172543, + 225.25882657556969, + 199.5592968215913, + 369.0301660240426, + 76.82053478041635, + 24.477528662138134, + 12.114147164297055, + 22.967201699163894, + 86.99454515845706, + 32.894968080392196, + 2.2999405116169878, ]; -#[test] -fn drell_yan_lagrange_static() -> Result<()> { - perform_grid_tests( - "LagrangeSubgrid", - false, - &STATIC_REFERENCE, - &STATIC_REFERENCE_AFTER_SSD, - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], - true, - ) -} - -#[test] -fn drell_yan_lagrange_v1_static() -> Result<()> { - perform_grid_tests( - "LagrangeSubgridV1", - false, - &STATIC_REFERENCE, - &STATIC_REFERENCE, // LagrangeSubgridV1 doesn't have static-scale detection - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], - true, - ) -} +const X_GRID: [f64; 6] = [ + 0.030521584007828877, + 0.02108918668378717, + 0.014375068581090129, + 0.009699159574043398, + 0.006496206194633799, + 0.004328500638819831, +]; #[test] -fn drell_yan_lagrange_v2_static() -> Result<()> { +fn drell_yan_static() -> Result<()> { perform_grid_tests( - "LagrangeSubgridV2", false, &STATIC_REFERENCE, &STATIC_REFERENCE_AFTER_SSD, - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], + &X_GRID, true, ) } #[test] -fn drell_yan_lagrange_dynamic() -> Result<()> { - perform_grid_tests( - "LagrangeSubgrid", - true, - &DYNAMIC_REFERENCE, - &DYNAMIC_REFERENCE, - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], - true, - ) -} - -#[test] -fn drell_yan_lagrange_v1_dynamic() -> Result<()> { - perform_grid_tests( - "LagrangeSubgridV1", - true, - &DYNAMIC_REFERENCE, - &DYNAMIC_REFERENCE, - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], - true, - ) +fn drell_yan_dynamic() -> Result<()> { + perform_grid_tests(true, &DYNAMIC_REFERENCE, &DYNAMIC_REFERENCE, &X_GRID, true) } #[test] -fn drell_yan_lagrange_v1_dynamic_no_reweight() -> Result<()> { +fn drell_yan_dynamic_no_reweight() -> Result<()> { perform_grid_tests( - "LagrangeSubgridV1", true, &DYNAMIC_REFERENCE_NO_REWEIGHT, &DYNAMIC_REFERENCE_NO_REWEIGHT, - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], + &X_GRID, false, ) } #[test] -fn drell_yan_lagrange_v2_dynamic() -> Result<()> { - perform_grid_tests( - "LagrangeSubgridV2", - true, - &DYNAMIC_REFERENCE, - &DYNAMIC_REFERENCE, - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], - true, - ) -} - -#[test] -fn drell_yan_lagrange_v2_dynamic_no_reweight() -> Result<()> { - perform_grid_tests( - "LagrangeSubgridV2", - true, - &DYNAMIC_REFERENCE_NO_REWEIGHT, - &DYNAMIC_REFERENCE_NO_REWEIGHT, - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], - false, - ) -} - -#[test] -fn drell_yan_lagrange_sparse_dynamic() -> Result<()> { - perform_grid_tests( - "LagrangeSparseSubgrid", - true, - &DYNAMIC_REFERENCE, - &DYNAMIC_REFERENCE, - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], - true, - ) -} - -#[test] -fn grid_optimize() -> Result<()> { - let mut grid = generate_grid("LagrangeSubgridV2", false, false)?; +fn grid_optimize() { + let mut grid = generate_grid(false, false); assert_eq!(grid.orders().len(), 3); assert_eq!(grid.channels().len(), 5); assert!(matches!( grid.subgrids()[[0, 0, 0]], - SubgridEnum::LagrangeSubgridV2 { .. } + SubgridEnum::InterpSubgridV1 { .. } )); - assert_eq!(grid.subgrids()[[0, 0, 0]].x1_grid().len(), 50); - assert_eq!(grid.subgrids()[[0, 0, 0]].x2_grid().len(), 50); - assert_eq!(grid.subgrids()[[0, 0, 0]].mu2_grid().len(), 30); + + let node_values = grid.subgrids()[[0, 0, 0]].node_values(); + assert_eq!(node_values[0].len(), 30); + assert_eq!(node_values[1].len(), 50); + assert_eq!(node_values[2].len(), 50); let mut grid2 = grid.clone(); grid2.optimize_using(GridOptFlags::OPTIMIZE_SUBGRID_TYPE); @@ -781,23 +689,24 @@ fn grid_optimize() -> Result<()> { // `OPTIMIZE_SUBGRID_TYPE` changes the subgrid type ... assert!(matches!( grid2.subgrids()[[0, 0, 0]], - SubgridEnum::ImportOnlySubgridV2 { .. } + SubgridEnum::ImportSubgridV1 { .. } )); // and the dimensions of the subgrid - assert_eq!(grid2.subgrids()[[0, 0, 0]].x1_grid().len(), 6); - assert_eq!(grid2.subgrids()[[0, 0, 0]].x2_grid().len(), 6); - assert_eq!(grid2.subgrids()[[0, 0, 0]].mu2_grid().len(), 4); + let node_values = grid2.subgrids()[[0, 0, 0]].node_values(); + assert_eq!(node_values[0].len(), 4); + assert_eq!(node_values[1].len(), 6); + assert_eq!(node_values[2].len(), 6); - grid.optimize_using(GridOptFlags::OPTIMIZE_SUBGRID_TYPE | GridOptFlags::STATIC_SCALE_DETECTION); + grid.optimize_using(GridOptFlags::OPTIMIZE_NODES); assert!(matches!( grid.subgrids()[[0, 0, 0]], - SubgridEnum::ImportOnlySubgridV2 { .. } + SubgridEnum::InterpSubgridV1 { .. } )); - // if `STATIC_SCALE_DETECTION` is present the `mu2_grid` dimension are better optimized - assert_eq!(grid.subgrids()[[0, 0, 0]].x1_grid().len(), 6); - assert_eq!(grid.subgrids()[[0, 0, 0]].x2_grid().len(), 6); - assert_eq!(grid.subgrids()[[0, 0, 0]].mu2_grid().len(), 1); + let node_values = grid.subgrids()[[0, 0, 0]].node_values(); + assert_eq!(node_values[0].len(), 1); + assert_eq!(node_values[1].len(), 6); + assert_eq!(node_values[2].len(), 6); // has no effect for this test grid.optimize_using(GridOptFlags::SYMMETRIZE_CHANNELS); @@ -820,6 +729,4 @@ fn grid_optimize() -> Result<()> { assert_eq!(grid.orders().len(), 1); assert_eq!(grid.channels().len(), 3); - - Ok(()) } diff --git a/pineappl_applgrid/src/lib.rs b/pineappl_applgrid/src/lib.rs index e476ec28c..a4dd99663 100644 --- a/pineappl_applgrid/src/lib.rs +++ b/pineappl_applgrid/src/lib.rs @@ -13,6 +13,7 @@ use lhapdf::Pdf; use std::mem; use std::pin::Pin; +use std::ptr; use std::slice; use std::sync::{Mutex, OnceLock}; @@ -185,7 +186,7 @@ pub fn grid_convolve_with_one( grid, xfx, alphas, - (pdf as *mut Pdf).cast::(), + ptr::from_mut(pdf).cast::(), nloops, rscale, fscale, diff --git a/pineappl_capi/Cargo.toml b/pineappl_capi/Cargo.toml index eeb0d0446..bfd90bdbb 100644 --- a/pineappl_capi/Cargo.toml +++ b/pineappl_capi/Cargo.toml @@ -16,7 +16,7 @@ version.workspace = true workspace = true [dependencies] -pineappl = { path = "../pineappl", version = "=0.8.2" } +pineappl = { path = "../pineappl", version = "=1.0.0-alpha1" } itertools = "0.10.1" [features] diff --git a/pineappl_capi/src/lib.rs b/pineappl_capi/src/lib.rs index 38c45d2d7..7ee339abb 100644 --- a/pineappl_capi/src/lib.rs +++ b/pineappl_capi/src/lib.rs @@ -57,24 +57,35 @@ use itertools::izip; use pineappl::bin::BinRemapper; -use pineappl::boc::{Channel, Order}; -use pineappl::convolutions::LumiCache; -use pineappl::grid::{Grid, GridOptFlags, Ntuple}; -use pineappl::subgrid::{ExtraSubgridParams, SubgridParams}; +use pineappl::boc::{Channel, Kinematics, Order, ScaleFuncForm, Scales}; +use pineappl::convolutions::{Conv, ConvType, ConvolutionCache}; +use pineappl::grid::{Grid, GridOptFlags}; +use pineappl::interpolation::{Interp, InterpMeth, Map, ReweightMeth}; +use pineappl::pids::PidBasis; use std::collections::HashMap; use std::ffi::{CStr, CString}; use std::fs::File; use std::mem; -use std::os::raw::{c_char, c_void}; +use std::os::raw::{c_char, c_int, c_void}; use std::path::Path; use std::slice; // TODO: make sure no `panic` calls leave functions marked as `extern "C"` -fn grid_params(key_vals: Option<&KeyVal>) -> (String, SubgridParams, ExtraSubgridParams) { - let mut subgrid_type = "LagrangeSubgrid".to_owned(); - let mut subgrid_params = SubgridParams::default(); - let mut extra = ExtraSubgridParams::default(); +fn grid_interpolation_params(key_vals: Option<&KeyVal>) -> Vec { + let mut q2_min = 1e2; + let mut q2_max = 1e8; + let mut q2_nodes = 40; + let mut q2_order = 3; + let mut x1_min = 2e-7; + let mut x1_max = 1.0; + let mut x1_nodes = 50; + let mut x1_order = 3; + let mut x2_min = 2e-7; + let mut x2_max = 1.0; + let mut x2_nodes = 50; + let mut x2_order = 3; + let mut reweight = ReweightMeth::ApplGridX; if let Some(keyval) = key_vals { if let Some(value) = keyval @@ -82,7 +93,7 @@ fn grid_params(key_vals: Option<&KeyVal>) -> (String, SubgridParams, ExtraSubgri .get("q2_bins") .or_else(|| keyval.ints.get("nq2")) { - subgrid_params.set_q2_bins(usize::try_from(*value).unwrap()); + q2_nodes = usize::try_from(*value).unwrap(); } if let Some(value) = keyval @@ -90,7 +101,7 @@ fn grid_params(key_vals: Option<&KeyVal>) -> (String, SubgridParams, ExtraSubgri .get("q2_max") .or_else(|| keyval.doubles.get("q2max")) { - subgrid_params.set_q2_max(*value); + q2_max = *value; } if let Some(value) = keyval @@ -98,7 +109,7 @@ fn grid_params(key_vals: Option<&KeyVal>) -> (String, SubgridParams, ExtraSubgri .get("q2_min") .or_else(|| keyval.doubles.get("q2min")) { - subgrid_params.set_q2_min(*value); + q2_min = *value; } if let Some(value) = keyval @@ -106,17 +117,19 @@ fn grid_params(key_vals: Option<&KeyVal>) -> (String, SubgridParams, ExtraSubgri .get("q2_order") .or_else(|| keyval.ints.get("q2order")) { - subgrid_params.set_q2_order(usize::try_from(*value).unwrap()); + q2_order = usize::try_from(*value).unwrap(); } if let Some(value) = keyval.bools.get("reweight") { - subgrid_params.set_reweight(*value); + if !value { + reweight = ReweightMeth::NoReweight; + } } if let Some(value) = keyval.ints.get("x_bins").or_else(|| keyval.ints.get("nx")) { let value = usize::try_from(*value).unwrap(); - subgrid_params.set_x_bins(value); - extra.set_x2_bins(value); + x1_nodes = value; + x2_nodes = value; } if let Some(value) = keyval @@ -124,8 +137,8 @@ fn grid_params(key_vals: Option<&KeyVal>) -> (String, SubgridParams, ExtraSubgri .get("x_max") .or_else(|| keyval.doubles.get("xmax")) { - subgrid_params.set_x_max(*value); - extra.set_x2_max(*value); + x1_max = *value; + x2_max = *value; } if let Some(value) = keyval @@ -133,8 +146,8 @@ fn grid_params(key_vals: Option<&KeyVal>) -> (String, SubgridParams, ExtraSubgri .get("x_min") .or_else(|| keyval.doubles.get("xmin")) { - subgrid_params.set_x_min(*value); - extra.set_x2_min(*value); + x1_min = *value; + x2_min = *value; } if let Some(value) = keyval @@ -143,48 +156,72 @@ fn grid_params(key_vals: Option<&KeyVal>) -> (String, SubgridParams, ExtraSubgri .or_else(|| keyval.ints.get("xorder")) { let value = usize::try_from(*value).unwrap(); - subgrid_params.set_x_order(value); - extra.set_x2_order(value); + x1_order = value; + x2_order = value; } if let Some(value) = keyval.ints.get("x1_bins") { - subgrid_params.set_x_bins(usize::try_from(*value).unwrap()); + x1_nodes = usize::try_from(*value).unwrap(); } if let Some(value) = keyval.doubles.get("x1_max") { - subgrid_params.set_x_max(*value); + x1_max = *value; } if let Some(value) = keyval.doubles.get("x1_min") { - subgrid_params.set_x_min(*value); + x1_min = *value; } if let Some(value) = keyval.ints.get("x1_order") { - subgrid_params.set_x_order(usize::try_from(*value).unwrap()); + x1_order = usize::try_from(*value).unwrap(); } if let Some(value) = keyval.ints.get("x2_bins") { - extra.set_x2_bins(usize::try_from(*value).unwrap()); + x2_nodes = usize::try_from(*value).unwrap(); } if let Some(value) = keyval.doubles.get("x2_max") { - extra.set_x2_max(*value); + x2_max = *value; } if let Some(value) = keyval.doubles.get("x2_min") { - extra.set_x2_min(*value); + x2_min = *value; } if let Some(value) = keyval.ints.get("x2_order") { - extra.set_x2_order(usize::try_from(*value).unwrap()); - } - - if let Some(value) = keyval.strings.get("subgrid_type") { - value.to_str().unwrap().clone_into(&mut subgrid_type); + x2_order = usize::try_from(*value).unwrap(); } } - (subgrid_type, subgrid_params, extra) + vec![ + Interp::new( + q2_min, + q2_max, + q2_nodes, + q2_order, + ReweightMeth::NoReweight, + Map::ApplGridH0, + InterpMeth::Lagrange, + ), + Interp::new( + x1_min, + x1_max, + x1_nodes, + x1_order, + reweight, + Map::ApplGridF2, + InterpMeth::Lagrange, + ), + Interp::new( + x2_min, + x2_max, + x2_nodes, + x2_order, + reweight, + Map::ApplGridF2, + InterpMeth::Lagrange, + ), + ] } /// Type for defining a luminosity function. @@ -292,6 +329,10 @@ pub unsafe extern "C" fn pineappl_grid_clone(grid: *const Grid) -> Box { } /// Wrapper for [`pineappl_grid_convolve_with_one`]. +/// +/// # Safety +/// +/// See [`pineappl_grid_convolve_with_one`]. #[deprecated( since = "0.8.0", note = "please use `pineappl_grid_convolve_with_one` instead" @@ -326,6 +367,10 @@ pub unsafe extern "C" fn pineappl_grid_convolute_with_one( } /// Wrapper for [`pineappl_grid_convolve_with_two`]. +/// +/// # Safety +/// +/// See [`pineappl_grid_convolve_with_two`]. #[deprecated( since = "0.8.0", note = "please use `pineappl_grid_convolve_with_two` instead" @@ -397,7 +442,7 @@ pub unsafe extern "C" fn pineappl_grid_convolve_with_one( results: *mut f64, ) { let grid = unsafe { &*grid }; - let mut pdf = |id, x, q2| xfx(id, x, q2, state); + let mut xfx = |id, x, q2| xfx(id, x, q2, state); let mut als = |q2| alphas(q2, state); let order_mask = if order_mask.is_null() { vec![] @@ -410,14 +455,18 @@ pub unsafe extern "C" fn pineappl_grid_convolve_with_one( unsafe { slice::from_raw_parts(channel_mask, grid.channels().len()) }.to_vec() }; let results = unsafe { slice::from_raw_parts_mut(results, grid.bin_info().bins()) }; - let mut lumi_cache = LumiCache::with_one(pdg_id, &mut pdf, &mut als); + let mut convolution_cache = ConvolutionCache::new( + vec![Conv::new(ConvType::UnpolPDF, pdg_id)], + vec![&mut xfx], + &mut als, + ); results.copy_from_slice(&grid.convolve( - &mut lumi_cache, + &mut convolution_cache, &order_mask, &[], &channel_mask, - &[(xi_ren, xi_fac)], + &[(xi_ren, xi_fac, 1.0)], )); } @@ -458,8 +507,8 @@ pub unsafe extern "C" fn pineappl_grid_convolve_with_two( results: *mut f64, ) { let grid = unsafe { &*grid }; - let mut pdf1 = |id, x, q2| xfx1(id, x, q2, state); - let mut pdf2 = |id, x, q2| xfx2(id, x, q2, state); + let mut xfx1 = |id, x, q2| xfx1(id, x, q2, state); + let mut xfx2 = |id, x, q2| xfx2(id, x, q2, state); let mut als = |q2| alphas(q2, state); let order_mask = if order_mask.is_null() { vec![] @@ -472,14 +521,21 @@ pub unsafe extern "C" fn pineappl_grid_convolve_with_two( unsafe { slice::from_raw_parts(channel_mask, grid.channels().len()) }.to_vec() }; let results = unsafe { slice::from_raw_parts_mut(results, grid.bin_info().bins()) }; - let mut lumi_cache = LumiCache::with_two(pdg_id1, &mut pdf1, pdg_id2, &mut pdf2, &mut als); + let mut convolution_cache = ConvolutionCache::new( + vec![ + Conv::new(ConvType::UnpolPDF, pdg_id1), + Conv::new(ConvType::UnpolPDF, pdg_id2), + ], + vec![&mut xfx1, &mut xfx2], + &mut als, + ); results.copy_from_slice(&grid.convolve( - &mut lumi_cache, + &mut convolution_cache, &order_mask, &[], &channel_mask, - &[(xi_ren, xi_fac)], + &[(xi_ren, xi_fac, 1.0)], )); } @@ -523,7 +579,7 @@ pub unsafe extern "C" fn pineappl_grid_fill( ) { let grid = unsafe { &mut *grid }; - grid.fill(order, observable, lumi, &Ntuple { x1, x2, q2, weight }); + grid.fill(order, observable, lumi, &[q2, x1, x2], weight); } /// Fill `grid` for the given momentum fractions `x1` and `x2`, at the scale `q2` for the given @@ -547,17 +603,9 @@ pub unsafe extern "C" fn pineappl_grid_fill_all( let grid = unsafe { &mut *grid }; let weights = unsafe { slice::from_raw_parts(weights, grid.channels().len()) }; - grid.fill_all( - order, - observable, - &Ntuple { - x1, - x2, - q2, - weight: (), - }, - weights, - ); + for (channel, &weight) in weights.iter().enumerate() { + grid.fill(order, observable, channel, &[q2, x1, x2], weight); + } } /// Fill `grid` with as many points as indicated by `size`. @@ -591,7 +639,7 @@ pub unsafe extern "C" fn pineappl_grid_fill_array( for (&x1, &x2, &q2, &order, &observable, &lumi, &weight) in izip!(x1, x2, q2, orders, observables, lumis, weights) { - grid.fill(order, observable, lumi, &Ntuple { x1, x2, q2, weight }); + grid.fill(order, observable, lumi, &[q2, x1, x2], weight); } } @@ -622,10 +670,10 @@ pub unsafe extern "C" fn pineappl_grid_order_params(grid: *const Grid, order_par let order_params = unsafe { slice::from_raw_parts_mut(order_params, 4 * orders.len()) }; for (i, order) in orders.iter().enumerate() { - order_params[4 * i] = order.alphas; - order_params[4 * i + 1] = order.alpha; - order_params[4 * i + 2] = order.logxir; - order_params[4 * i + 3] = order.logxif; + order_params[4 * i] = order.alphas.into(); + order_params[4 * i + 1] = order.alpha.into(); + order_params[4 * i + 2] = order.logxir.into(); + order_params[4 * i + 3] = order.logxif.into(); } } @@ -681,40 +729,49 @@ pub unsafe extern "C" fn pineappl_grid_new( let orders: Vec<_> = order_params .chunks(4) .map(|s| Order { - alphas: s[0], - alpha: s[1], - logxir: s[2], - logxif: s[3], + // UNWRAP: there shouldn't be orders with exponents larger than 255 + alphas: s[0].try_into().unwrap(), + alpha: s[1].try_into().unwrap(), + logxir: s[2].try_into().unwrap(), + logxif: s[3].try_into().unwrap(), + // this function doesn't support fragmentation scale logs + logxia: 0, }) .collect(); let key_vals = unsafe { key_vals.as_ref() }; - let (subgrid_type, subgrid_params, extra) = grid_params(key_vals); + let interps = grid_interpolation_params(key_vals); let lumi = unsafe { &*lumi }; - let mut grid = Box::new( - Grid::with_subgrid_type( - lumi.0.clone(), - orders, - unsafe { slice::from_raw_parts(bin_limits, bins + 1) }.to_vec(), - subgrid_params, - extra, - &subgrid_type, - ) - .unwrap(), - ); + + let mut convolutions = vec![Conv::new(ConvType::UnpolPDF, 2212); 2]; if let Some(keyval) = key_vals { if let Some(value) = keyval.strings.get("initial_state_1") { - grid.set_key_value("initial_state_1", value.to_str().unwrap()); + convolutions[0] = + Conv::new(ConvType::UnpolPDF, value.to_string_lossy().parse().unwrap()); } if let Some(value) = keyval.strings.get("initial_state_2") { - grid.set_key_value("initial_state_2", value.to_str().unwrap()); + convolutions[1] = + Conv::new(ConvType::UnpolPDF, value.to_string_lossy().parse().unwrap()); } } - grid + Box::new(Grid::new( + PidBasis::Pdg, + lumi.0.clone(), + orders, + unsafe { slice::from_raw_parts(bin_limits, bins + 1) }.to_vec(), + convolutions, + interps, + vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], + Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }, + )) } /// Read a `PineAPPL` grid from a file with name `filename`. @@ -867,7 +924,7 @@ pub unsafe extern "C" fn pineappl_grid_scale_by_order( ) { let grid = unsafe { &mut *grid }; - grid.scale_by_order(alphas, alpha, logxir, logxif, global); + grid.scale_by_order(alphas, alpha, logxir, logxif, 1.0, global); } /// Return the value for `key` stored in `grid`. If `key` isn't found, `NULL` will be returned. @@ -891,12 +948,22 @@ pub unsafe extern "C" fn pineappl_grid_key_value( let key = unsafe { CStr::from_ptr(key) }; let key = key.to_string_lossy(); - CString::new( - grid.key_values() - .map_or("", |kv| kv.get(key.as_ref()).map_or("", String::as_str)), - ) - .unwrap() - .into_raw() + // backwards compatibility + let index = match key.as_ref() { + "initial_state_1" => Some(0), + "initial_state_2" => Some(1), + _ => None, + }; + + if let Some(index) = index { + return CString::new(grid.convolutions()[index].pid().to_string()) + .unwrap() + .into_raw(); + } + + CString::new(grid.metadata().get(key.as_ref()).map_or("", String::as_str)) + .unwrap() + .into_raw() } /// Sets an internal key-value pair for the grid. @@ -917,13 +984,25 @@ pub unsafe extern "C" fn pineappl_grid_set_key_value( value: *const c_char, ) { let grid = unsafe { &mut *grid }; - let key = unsafe { CStr::from_ptr(key) }; - let value = unsafe { CStr::from_ptr(value) }; + let key = unsafe { CStr::from_ptr(key) } + .to_string_lossy() + .into_owned(); + let value = unsafe { CStr::from_ptr(value) } + .to_string_lossy() + .into_owned(); + + // backwards compatibility + let index = match key.as_str() { + "initial_state_1" => Some(0), + "initial_state_2" => Some(1), + _ => None, + }; - grid.set_key_value( - key.to_string_lossy().as_ref(), - value.to_string_lossy().as_ref(), - ); + if let Some(index) = index { + grid.convolutions_mut()[index] = Conv::new(ConvType::UnpolPDF, value.parse().unwrap()); + } + + grid.metadata_mut().insert(key, value); } /// Sets a remapper for the grid. This can be used to 'upgrade' one-dimensional bin limits to @@ -1017,7 +1096,7 @@ pub unsafe extern "C" fn pineappl_lumi_add( pdg_id_pairs .chunks(2) .zip(factors) - .map(|x| (x.0[0], x.0[1], x.1)) + .map(|x| (vec![x.0[0], x.0[1]], x.1)) .collect(), )); } @@ -1076,12 +1155,12 @@ pub unsafe extern "C" fn pineappl_lumi_entry( entry .iter() - .flat_map(|(id1, id2, _)| vec![id1, id2]) + .flat_map(|(pids, _)| pids) .zip(pdg_ids.iter_mut()) .for_each(|(from, to)| *to = *from); entry .iter() - .map(|(_, _, factor)| factor) + .map(|(_, factor)| factor) .zip(factors.iter_mut()) .for_each(|(from, to)| *to = *from); } @@ -1276,3 +1355,400 @@ pub unsafe extern "C" fn pineappl_string_delete(string: *mut c_char) { mem::drop(unsafe { CString::from_raw(string) }); } } + +// Here starts the generalized C-API interface. + +/// Type for defining the interpolation object +#[repr(C)] +pub struct InterpTuples { + node_min: f64, + node_max: f64, + nb_nodes: usize, + interp_degree: usize, + reweighting_method: ReweightMeth, + mapping: Map, + interpolation_method: InterpMeth, +} + +#[must_use] +fn construct_interpolation(interp: &InterpTuples) -> Interp { + Interp::new( + interp.node_min, + interp.node_max, + interp.nb_nodes, + interp.interp_degree, + interp.reweighting_method, + interp.mapping, + interp.interpolation_method, + ) +} + +/// An exact duplicate of `pineappl_lumi_entry` to make naming (lumi -> channel) consistent. +/// should be deleted using `pineappl_lumi_delete`. +#[no_mangle] +#[must_use] +pub extern "C" fn pineappl_channels_new() -> Box { + Box::default() +} + +/// Adds a generalized linear combination of initial states to the Luminosity. +/// +/// # Safety +/// +/// The parameter `lumi` must point to a valid `Lumi` object created by `pineappl_lumi_new`. +/// `pdg_id_combinations` must be an array with length `nb_combinations * combinations`, and +/// `factors` with length of `combinations`. The `nb_convolutions` describe the number of +/// parton distributions involved, while `combinations` represent the number of different +/// channel combinations. +#[no_mangle] +pub unsafe extern "C" fn pineappl_channels_add( + channels: *mut Lumi, + combinations: usize, + nb_convolutions: usize, + pdg_id_combinations: *const i32, + factors: *const f64, +) { + let channels = unsafe { &mut *channels }; + let pdg_id_pairs = + unsafe { slice::from_raw_parts(pdg_id_combinations, nb_convolutions * combinations) }; + let factors = if factors.is_null() { + vec![1.0; combinations] + } else { + unsafe { slice::from_raw_parts(factors, combinations) }.to_vec() + }; + + channels.0.push(Channel::new( + pdg_id_pairs + .chunks(nb_convolutions) + .zip(factors) + .map(|x| ((0..nb_convolutions).map(|i| x.0[i]).collect(), x.1)) + .collect(), + )); +} + +/// Creates a new and empty grid that can accept any number of convolutions. The creation requires +/// the following different sets of parameters: +/// - The PID basis `pid_basis`: The basis onto which the partons are mapped, can be `Evol` or `Pdg`. +/// - The channel function `channels`: A pointer to the luminosity function that specifies how the +/// cross section should be reconstructed. +/// - Order specification `orders` and `order_params`. Each `PineAPPL` grid contains a number of +/// different perturbative orders, specified by `orders`. The array `order_params` stores the +/// exponent of each perturbative order and must contain 4 integers denoting the exponent of the +/// string coupling, of the electromagnetic coupling, of the logarithm of the renormalization +/// scale, and finally of the logarithm of the factorization scale. +/// - The observable definition `bins` and `bin_limits`. Each `PineAPPL` grid can store observables +/// from a one-dimensional distribution. To this end `bins` specifies how many observables are +/// stored and `bin_limits` must contain `bins + 1` entries denoting the left and right limit for +/// each bin. +/// - The types of convolutions `convolution_types` and their numbers `nb_convolutions`: specify how +/// how many different convolutions are involved and their types - which are a cross product of the +/// the following combination: (unpolarized, polarized) ⊗ (PDF, Fragmentation Function). +/// - The PDG IDs of the involved initial- or final-state hadrons `pdg_ids`. +/// - The types of kinematics `kinematics`: specify the various kinematics required to construct the +/// Grid. These can be the energy scales and the various momentum fractions. +/// - The specifications of the interpolation methods `interpolations`: provide the specifications on +/// how each of the kinematics should be interpolated. +/// - The unphysical renormalization, factorization, and fragmentation scales: `mu_scales`. Its entries +/// have to be ordered following {ren, fac, frg}. The mapping is as follows: +/// `0` -> `ScaleFuncForm::NoScale`, ..., `n` -> `ScaleFuncForm::Scale(n - 1)`. +/// +/// # Safety +/// TODO +/// +/// # Panics +/// TODO +#[no_mangle] +#[must_use] +pub unsafe extern "C" fn pineappl_grid_new2( + pid_basis: PidBasis, + channels: *const Lumi, + orders: usize, + order_params: *const u8, + bins: usize, + bin_limits: *const f64, + nb_convolutions: usize, + convolution_types: *const ConvType, + pdg_ids: *const c_int, + kinematics: *const Kinematics, + interpolations: *const InterpTuples, + mu_scales: *const usize, +) -> Box { + // Luminosity channels + let channels = unsafe { &*channels }; + + // Perturbative orders + let order_params = unsafe { slice::from_raw_parts(order_params, 5 * orders) }; + let orders: Vec<_> = order_params + .chunks(5) + .map(|s| Order { + alphas: s[0], + alpha: s[1], + logxir: s[2], + logxif: s[3], + logxia: s[4], + }) + .collect(); + + // Bin limits + let bin_limits = unsafe { slice::from_raw_parts(bin_limits, bins + 1).to_vec() }; + + // Construct the convolution objects + let convolution_types = + unsafe { slice::from_raw_parts(convolution_types, nb_convolutions).to_vec() }; + let pdg_ids = unsafe { slice::from_raw_parts(pdg_ids, nb_convolutions).to_vec() }; + let convolutions = izip!(convolution_types.iter(), pdg_ids.iter()) + .map(|(&conv, &pdg_value)| Conv::new(conv, pdg_value)) + .collect(); + + // Grid interpolations + let interp_slices = unsafe { std::slice::from_raw_parts(interpolations, nb_convolutions + 1) }; + let interp_vecs: Vec = interp_slices.iter().map(construct_interpolation).collect(); + + // Construct the kinematic variables + let kinematics = unsafe { slice::from_raw_parts(kinematics, interp_vecs.len()).to_vec() }; + + // Scales. An array containing the values of {ren, fac, frg} + let mu_scales = unsafe { std::slice::from_raw_parts(mu_scales, 3) }; + let mu_scales_vec: Vec = mu_scales + .iter() + .map(|&scale| { + if scale == 0 { + ScaleFuncForm::NoScale + } else { + ScaleFuncForm::Scale(scale - 1) + } + }) + .collect(); + + Box::new(Grid::new( + pid_basis, + channels.0.clone(), + orders, + bin_limits, + convolutions, + interp_vecs, + kinematics, + Scales { + ren: mu_scales_vec[0].clone(), + fac: mu_scales_vec[1].clone(), + frg: mu_scales_vec[2].clone(), + }, + )) +} + +/// Similar to `pineappl_grid_fill` but accepts any given momentum fractions {`x1`, ...,`xn`} at +/// various energy scalesfor the given value of the `order`, `observable`, and `lumi` with `weight`. +/// +/// # Safety +/// +/// If `grid` does not point to a valid `Grid` object, for example when `grid` is the null pointer, +/// this function is not safe to call. +#[no_mangle] +pub unsafe extern "C" fn pineappl_grid_fill2( + grid: *mut Grid, + order: usize, + observable: f64, + channel: usize, + ntuple: *const f64, + weight: f64, +) { + let grid = unsafe { &mut *grid }; + let ntuple = unsafe { slice::from_raw_parts(ntuple, grid.kinematics().len()) }; + + grid.fill(order, observable, channel, ntuple, weight); +} + +/// Similar to `pineappl_grid_fill_all` but accepts any given momentum fractions {`x1`, ...,`xn`} at +/// various energy scalesfor the given value of the `order`, `observable`, and `lumi` with `weight`. +/// +/// # Safety +/// +/// If `grid` does not point to a valid `Grid` object, for example when `grid` is the null pointer, +/// this function is not safe to call. +#[no_mangle] +pub unsafe extern "C" fn pineappl_grid_fill_all2( + grid: *mut Grid, + order: usize, + observable: f64, + ntuple: *const f64, + weights: *const f64, +) { + let grid = unsafe { &mut *grid }; + let ntuple = unsafe { slice::from_raw_parts(ntuple, grid.kinematics().len()) }; + let weights = unsafe { slice::from_raw_parts(weights, grid.channels().len()) }; + + for (channel, &weight) in weights.iter().enumerate() { + grid.fill(order, observable, channel, ntuple, weight); + } +} + +/// Similar to `pineappl_grid_fill_array` but accepts any given momentum fractions +/// {`x1`, ...,`xn`} at various energy scalesfor the given value of the `order`, `observable`, +/// and `lumi` with `weight`. +/// +/// # Safety +/// +/// If `grid` does not point to a valid `Grid` object, for example when `grid` is the null pointer, +/// this function is not safe to call. Additionally, all remaining pointer parameters must be +/// arrays as long as specified by `size`. +#[no_mangle] +pub unsafe extern "C" fn pineappl_grid_fill_array2( + grid: *mut Grid, + orders: *const usize, + observables: *const f64, + ntuples: *const f64, + channels: *const usize, + weights: *const f64, + size: usize, +) { + let grid = unsafe { &mut *grid }; + let orders = unsafe { slice::from_raw_parts(orders, size) }; + let observables = unsafe { slice::from_raw_parts(observables, size) }; + let channels = unsafe { slice::from_raw_parts(channels, size) }; + let weights = unsafe { slice::from_raw_parts(weights, size) }; + + // Convert the 1D slice into a 2D array + let ntuples = unsafe { slice::from_raw_parts(ntuples, size * grid.kinematics().len()) }; + let ntuples_2d: Vec<&[f64]> = ntuples.chunks(grid.kinematics().len()).collect(); + + for (ntuple, &order, &observable, &channel, &weight) in + izip!(ntuples_2d, orders, observables, channels, weights) + { + grid.fill(order, observable, channel, ntuple, weight); + } +} + +/// Similar to `pineappl_lumi_entry` but for luminosity channels that involve 3 partons, ie. +/// in the case of three convolutions. +/// +/// # Safety +/// +/// The parameter `lumi` must point to a valid `Lumi` object created by `pineappl_lumi_new` or +/// `pineappl_grid_lumi`. The parameter `factors` must point to an array as long as the size +/// returned by `pineappl_lumi_combinations` and `pdg_ids` must point to an array that is twice as +/// long. +#[no_mangle] +pub unsafe extern "C" fn pineappl_channels_entry( + channels: *const Lumi, + entry: usize, + pdg_ids: *mut i32, + factors: *mut f64, +) { + let channels = unsafe { &*channels }; + let entry = channels.0[entry].entry(); + let pdg_ids = unsafe { slice::from_raw_parts_mut(pdg_ids, 3 * entry.len()) }; + let factors = unsafe { slice::from_raw_parts_mut(factors, entry.len()) }; + + entry + .iter() + .flat_map(|(pids, _)| pids) + .zip(pdg_ids.iter_mut()) + .for_each(|(from, to)| *to = *from); + entry + .iter() + .map(|(_, factor)| factor) + .zip(factors.iter_mut()) + .for_each(|(from, to)| *to = *from); +} + +/// An extension of `pineappl_grid_order_params` that accounts for the order of the fragmentation +/// logs. +/// +/// # Safety +/// +/// If `grid` does not point to a valid `Grid` object, for example when `grid` is the null pointer, +/// this function is not safe to call. The pointer `order_params` must point to an array as large +/// as four times the number of orders in `grid`. +#[no_mangle] +pub unsafe extern "C" fn pineappl_grid_order_params2(grid: *const Grid, order_params: *mut u32) { + let grid = unsafe { &*grid }; + let orders = grid.orders(); + let order_params = unsafe { slice::from_raw_parts_mut(order_params, 5 * orders.len()) }; + + for (i, order) in orders.iter().enumerate() { + order_params[5 * i] = order.alphas.into(); + order_params[5 * i + 1] = order.alpha.into(); + order_params[5 * i + 2] = order.logxir.into(); + order_params[5 * i + 3] = order.logxif.into(); + order_params[5 * i + 4] = order.logxia.into(); + } +} + +/// A generalization of the convolution function. +/// +/// # Safety +/// +/// If `grid` does not point to a valid `Grid` object, for example when `grid` is the null pointer, +/// this function is not safe to call. The function pointers `xfx1`, `xfx2`, and `alphas` must not +/// be null pointers and point to valid functions. The parameters `order_mask` and `channel_mask` +/// must either be null pointers or point to arrays that are as long as `grid` has orders and +/// channels, respectively. Finally, `results` must be as long as `grid` has bins. +#[no_mangle] +pub unsafe extern "C" fn pineappl_grid_convolve( + grid: *const Grid, + xfxs: *const extern "C" fn(pdg_id: i32, x: f64, q2: f64, state: *mut c_void) -> f64, + alphas: extern "C" fn(q2: f64, state: *mut c_void) -> f64, + state: *mut c_void, + order_mask: *const bool, + channel_mask: *const bool, + bin_indices: *const usize, + nb_scales: usize, + mu_scales: *const f64, + results: *mut f64, +) { + let grid = unsafe { &*grid }; + + let order_mask = if order_mask.is_null() { + vec![] + } else { + unsafe { slice::from_raw_parts(order_mask, grid.orders().len()) }.to_owned() + }; + + let channel_mask = if channel_mask.is_null() { + vec![] + } else { + unsafe { slice::from_raw_parts(channel_mask, grid.channels().len()) }.to_vec() + }; + + let bin_indices = if bin_indices.is_null() { + &[] + } else { + unsafe { slice::from_raw_parts(bin_indices, grid.bin_info().bins()) } + }; + + // Construct the alphas and PDFs functions + let mut als = |q2| alphas(q2, state); + + let mut xfxs = unsafe { slice::from_raw_parts(xfxs, grid.convolutions().len()).to_vec() }; + let mut xfx_funcs: Vec<_> = xfxs + .iter_mut() + .map(|xfx| move |id, x, q2| xfx(id, x, q2, state)) + .collect(); + + // Construct the Convolution cache + let mut convolution_cache = ConvolutionCache::new( + grid.convolutions().to_vec(), + xfx_funcs + .iter_mut() + .map(|fx| fx as &mut dyn FnMut(i32, f64, f64) -> f64) + .collect(), + &mut als, + ); + + // The factorization, renormalization, and fragmentation scale factors + let mu_scales = if mu_scales.is_null() { + &[(1.0, 1.0, 1.0)] + } else { + unsafe { slice::from_raw_parts(mu_scales.cast::<(f64, f64, f64)>(), nb_scales) } + }; + + let results = unsafe { slice::from_raw_parts_mut(results, grid.bin_info().bins()) }; + + results.copy_from_slice(&grid.convolve( + &mut convolution_cache, + &order_mask, + bin_indices, + &channel_mask, + mu_scales, + )); +} diff --git a/pineappl_cli/Cargo.toml b/pineappl_cli/Cargo.toml index 3c569e5fc..6c74ccf6e 100644 --- a/pineappl_cli/Cargo.toml +++ b/pineappl_cli/Cargo.toml @@ -30,9 +30,9 @@ lhapdf = { package = "managed-lhapdf", version = "0.3.4" } lz4_flex = { optional = true, version = "0.9.2" } ndarray = "0.15.4" ndarray-npy = { optional = true, version = "0.8.1" } -pineappl = { path = "../pineappl", version = "=0.8.2" } -pineappl_applgrid = { optional = true, path = "../pineappl_applgrid", version = "=0.8.2" } -pineappl_fastnlo = { optional = true, path = "../pineappl_fastnlo", version = "=0.8.2" } +pineappl = { path = "../pineappl", version = "=1.0.0-alpha1" } +pineappl_applgrid = { optional = true, path = "../pineappl_applgrid", version = "=1.0.0-alpha1" } +pineappl_fastnlo = { optional = true, path = "../pineappl_fastnlo", version = "=1.0.0-alpha1" } prettytable-rs = { default-features = false, features = ["win_crlf"], version = "0.10.0" } rayon = "1.5.1" serde = { features = ["derive"], optional = true, version = "1.0.130" } diff --git a/pineappl_cli/src/analyze.rs b/pineappl_cli/src/analyze.rs index 3c644c487..ac77eeaaf 100644 --- a/pineappl_cli/src/analyze.rs +++ b/pineappl_cli/src/analyze.rs @@ -43,10 +43,10 @@ pub struct CkfOpts { conv_funs: ConvFuns, /// Order defining the K factors. #[arg(value_parser = helpers::parse_order)] - order: (u32, u32), + order: (u8, u8), /// Normalizing orders of the K factors. #[arg(value_delimiter = ',', value_parser = helpers::parse_order)] - orders_den: Vec<(u32, u32)>, + orders_den: Vec<(u8, u8)>, /// The maximum number of channels displayed. #[arg( default_value_t = 10, diff --git a/pineappl_cli/src/channels.rs b/pineappl_cli/src/channels.rs index 53173a1e6..f0bee66c6 100644 --- a/pineappl_cli/src/channels.rs +++ b/pineappl_cli/src/channels.rs @@ -49,7 +49,7 @@ pub struct Opts { value_delimiter = ',', value_parser = helpers::parse_order )] - orders: Vec<(u32, u32)>, + orders: Vec<(u8, u8)>, /// Do not sort the channels according to their size. #[arg(long)] dont_sort: bool, diff --git a/pineappl_cli/src/convolve.rs b/pineappl_cli/src/convolve.rs index b7a9d76a5..0c88dc85c 100644 --- a/pineappl_cli/src/convolve.rs +++ b/pineappl_cli/src/convolve.rs @@ -37,13 +37,16 @@ pub struct Opts { value_delimiter = ',', value_parser = helpers::parse_order )] - orders: Vec<(u32, u32)>, + orders: Vec<(u8, u8)>, /// Set the variation of the renormalization scale. #[arg(default_value = "1.0", long, num_args = 1)] xir: f64, /// Set the variation of the factorization scale. #[arg(default_value = "1.0", long, num_args = 1)] xif: f64, + /// Set the variation of the fragmentation scale. + #[arg(default_value = "1.0", long, num_args = 1)] + xia: f64, /// Set the number of fractional digits shown for absolute numbers. #[arg(default_value_t = 7, long, value_name = "ABS")] digits_abs: usize, @@ -64,7 +67,7 @@ impl Subcommand for Opts { &self.orders, &bins, &[], - &[(self.xir, self.xif)], + &[(self.xir, self.xif, self.xia)], if self.integrated { ConvoluteMode::Integrated } else { diff --git a/pineappl_cli/src/diff.rs b/pineappl_cli/src/diff.rs index 4e6e48585..9668d716e 100644 --- a/pineappl_cli/src/diff.rs +++ b/pineappl_cli/src/diff.rs @@ -34,7 +34,7 @@ pub struct Opts { value_delimiter = ',', value_parser = helpers::parse_order )] - orders1: Vec<(u32, u32)>, + orders1: Vec<(u8, u8)>, /// Select orders of the second grid. #[arg( long, @@ -42,7 +42,7 @@ pub struct Opts { value_delimiter = ',', value_parser = helpers::parse_order )] - orders2: Vec<(u32, u32)>, + orders2: Vec<(u8, u8)>, /// Scale all results of the first grid. #[arg(long, default_value = "1.0")] scale1: f64, @@ -183,10 +183,18 @@ impl Subcommand for Opts { let result1 = result1 * self.scale1; let result2 = result2 * self.scale2; + // ALLOW: here we really need an exact comparison + // TODO: change allow to `expect` if MSRV >= 1.81.0 + #[allow(clippy::float_cmp)] + let diff = if result1 == result2 { + 0.0 + } else { + result2 / result1 - 1.0 + }; + row.add_cell(cell!(r->format!("{:.*e}", self.digits_abs, result1))); row.add_cell(cell!(r->format!("{:.*e}", self.digits_abs, result2))); - row.add_cell(cell!(r->format!("{:.*e}", self.digits_rel, - if result1 == result2 { 0.0 } else { result2 / result1 - 1.0 }))); + row.add_cell(cell!(r->format!("{:.*e}", self.digits_rel, diff))); } } else { let orders = orders1; @@ -242,10 +250,19 @@ impl Subcommand for Opts { for (result1, result2) in order_results1.iter().zip(order_results2.iter()) { let result1 = result1[bin] * self.scale1; let result2 = result2[bin] * self.scale2; + + // ALLOW: here we really need an exact comparison + // TODO: change allow to `expect` if MSRV >= 1.81.0 + #[allow(clippy::float_cmp)] + let diff = if result1 == result2 { + 0.0 + } else { + result2 / result1 - 1.0 + }; + row.add_cell(cell!(r->format!("{:.*e}", self.digits_abs, result1))); row.add_cell(cell!(r->format!("{:.*e}", self.digits_abs, result2))); - row.add_cell(cell!(r->format!("{:.*e}", self.digits_rel, - if result1 == result2 { 0.0 } else { result2 / result1 - 1.0 }))); + row.add_cell(cell!(r->format!("{:.*e}", self.digits_rel, diff))); } } } diff --git a/pineappl_cli/src/evolve.rs b/pineappl_cli/src/evolve.rs index 23c476378..968bcb1e6 100644 --- a/pineappl_cli/src/evolve.rs +++ b/pineappl_cli/src/evolve.rs @@ -20,6 +20,7 @@ mod eko { use ndarray::iter::AxisIter; use ndarray::{Array4, Array5, Axis, CowArray, Ix4}; use ndarray_npy::{NpzReader, ReadNpyExt}; + use pineappl::convolutions::ConvType; use pineappl::evolution::OperatorSliceInfo; use pineappl::pids::{self, PidBasis}; use serde::Deserialize; @@ -73,9 +74,16 @@ mod eko { const BASES_V1_DEFAULT_PIDS: [i32; 14] = [22, -6, -5, -4, -3, -2, -1, 21, 1, 2, 3, 4, 5, 6]; + #[derive(Deserialize)] + struct OperatorConfigsV1 { + polarized: bool, + time_like: bool, + } + #[derive(Deserialize)] struct OperatorV1 { mu0: f64, + configs: OperatorConfigsV1, } #[derive(Deserialize)] @@ -162,6 +170,7 @@ mod eko { fac1: 0.0, pids1: metadata.targetpids, x1: metadata.targetgrid, + conv_type: ConvType::UnpolPDF, }, operator, }) @@ -240,6 +249,7 @@ mod eko { .rotations .targetgrid .unwrap_or(metadata.rotations.xgrid), + conv_type: ConvType::UnpolPDF, }, archive: Archive::new(File::open(eko_path)?), }) @@ -306,6 +316,10 @@ mod eko { .bases .targetgrid .unwrap_or_else(|| metadata.bases.xgrid.clone()), + conv_type: ConvType::new( + operator.configs.polarized, + operator.configs.time_like, + ), }, archive: Archive::new(File::open(eko_path)?), }) @@ -426,14 +440,13 @@ fn evolve_grid( grid: &Grid, ekos: &[&Path], use_alphas_from: &Pdf, - orders: &[(u32, u32)], + orders: &[(u8, u8)], xir: f64, xif: f64, - use_old_evolve: bool, + xia: f64, ) -> Result { - use anyhow::bail; use eko::EkoSlices; - use pineappl::evolution::{AlphasTable, OperatorInfo}; + use pineappl::evolution::AlphasTable; let order_mask: Vec<_> = grid .orders() @@ -450,54 +463,10 @@ fn evolve_grid( .iter() .map(|eko| EkoSlices::new(eko)) .collect::>()?; + let eko_slices: Vec<_> = eko_slices.iter_mut().collect(); let alphas_table = AlphasTable::from_grid(grid, xir, &|q2| use_alphas_from.alphas_q2(q2)); - if use_old_evolve { - assert_eq!(eko_slices.len(), 1); - - if let EkoSlices::V0 { - fac1, - info, - operator, - } = eko_slices.remove(0) - { - let op_info = OperatorInfo { - fac0: info.fac0, - pids0: info.pids0.clone(), - x0: info.x0.clone(), - fac1, - pids1: info.pids1.clone(), - x1: info.x1.clone(), - ren1: alphas_table.ren1, - alphas: alphas_table.alphas, - xir, - xif, - pid_basis: info.pid_basis, - }; - - #[allow(deprecated)] - Ok(grid.evolve(operator.view(), &op_info, &order_mask)?) - } else { - bail!("`--use-old-evolve` can only be used with the old EKO format (`V0`)") - } - } else { - match eko_slices.as_mut_slice() { - [eko] => { - Ok(grid.evolve_with_slice_iter(eko, &order_mask, (xir, xif), &alphas_table)?) - } - [eko_a, eko_b] => Ok(grid.evolve_with_slice_iter2( - eko_a, - eko_b, - &order_mask, - (xir, xif), - &alphas_table, - )?), - _ => unimplemented!( - "evolution with {} EKOs is not implemented", - eko_slices.len() - ), - } - } + Ok(grid.evolve(eko_slices, &order_mask, (xir, xif, xia), &alphas_table)?) } #[cfg(not(feature = "evolve"))] @@ -505,10 +474,10 @@ fn evolve_grid( _: &Grid, _: &[&Path], _: &Pdf, - _: &[(u32, u32)], + _: &[(u8, u8)], + _: f64, _: f64, _: f64, - _: bool, ) -> Result { Err(anyhow!( "you need to install `pineappl` with feature `evolve`" @@ -549,15 +518,16 @@ pub struct Opts { value_delimiter = ',', value_parser = helpers::parse_order )] - orders: Vec<(u32, u32)>, + orders: Vec<(u8, u8)>, /// Rescale the renormalization scale with this factor. #[arg(default_value_t = 1.0, long)] xir: f64, /// Rescale the factorization scale with this factor. #[arg(default_value_t = 1.0, long)] xif: f64, - #[arg(hide = true, long)] - use_old_evolve: bool, + /// Rescale the fragmentation scale with this factor. + #[arg(default_value_t = 1.0, long)] + xia: f64, } impl Subcommand for Opts { @@ -572,7 +542,7 @@ impl Subcommand for Opts { &self.orders, &[], &[], - &[(self.xir, self.xif)], + &[(self.xir, self.xif, self.xia)], ConvoluteMode::Normal, cfg, ); @@ -587,7 +557,7 @@ impl Subcommand for Opts { &self.orders, self.xir, self.xif, - self.use_old_evolve, + self.xia, )?; let evolved_results = helpers::convolve_scales( @@ -596,7 +566,7 @@ impl Subcommand for Opts { &[], &[], &[], - &[(1.0, 1.0)], + &[(1.0, 1.0, 1.0)], ConvoluteMode::Normal, cfg, ); @@ -614,7 +584,9 @@ impl Subcommand for Opts { .zip(evolved_results.into_iter()) .enumerate() { - // catches the case where both results are zero + // ALLOW: here we really need an exact comparison + // TODO: change allow to `expect` if MSRV >= 1.81.0 + #[allow(clippy::float_cmp)] let rel_diff = if one == two { 0.0 } else { two / one - 1.0 }; if rel_diff.abs() > self.accuracy { diff --git a/pineappl_cli/src/export.rs b/pineappl_cli/src/export.rs index e4ee819d5..492600d23 100644 --- a/pineappl_cli/src/export.rs +++ b/pineappl_cli/src/export.rs @@ -118,13 +118,14 @@ impl Subcommand for Opts { alpha, logxir, logxif, + logxia, } in grid .orders() .iter() .zip(order_mask.iter()) .filter_map(|(order, keep)| (!keep).then_some(order.clone())) { - println!("WARNING: the order O(as^{alphas} a^{alpha} lr^{logxir} lf^{logxif}) isn't supported by {grid_type} and will be skipped."); + println!("WARNING: the order O(as^{alphas} a^{alpha} lr^{logxir} lf^{logxif} la^{logxia}) isn't supported by {grid_type} and will be skipped."); } let orders: Vec<_> = grid @@ -138,10 +139,12 @@ impl Subcommand for Opts { alpha, logxir, logxif, + logxia, }, keep, )| { - (keep && (logxir == 0) && (logxif == 0)).then_some((alphas, alpha)) + (keep && (logxir == 0) && (logxif == 0) && (logxia == 0)) + .then_some((alphas, alpha)) }, ) .collect(); @@ -182,20 +185,19 @@ impl Subcommand for Opts { // catches the case where both results are zero let rel_diffs: Vec<_> = one .iter() - .zip(two.iter()) - .map(|(a, b)| if a == b { 0.0 } else { b / a - 1.0 }) + .zip(two) + .map(|(&a, &b)| { + // ALLOW: here we really need an exact comparison + // TODO: change allow to `expect` if MSRV >= 1.81.0 + #[allow(clippy::float_cmp)] + if a == b { + 0.0 + } else { + b / a - 1.0 + } + }) .collect(); - let max_rel_diff = rel_diffs - .iter() - .max_by(|a, b| a.abs().partial_cmp(&b.abs()).unwrap()) - .unwrap() - .abs(); - - if max_rel_diff > self.accuracy { - different = true; - } - let mut row = row![ bin.to_string(), r->format!("{:.*e}", self.digits_abs, one[0]), @@ -203,7 +205,22 @@ impl Subcommand for Opts { r->format!("{:.*e}", self.digits_rel, rel_diffs[0]) ]; + if rel_diffs[0].abs() > self.accuracy { + different = true; + } + if scale_variations > 1 { + // skip central scale choice + let &max_rel_diff = rel_diffs[1..] + .iter() + .max_by(|a, b| a.abs().total_cmp(&b.abs())) + // UNWRAP: in this branch we know there are scale variations + .unwrap(); + + if max_rel_diff.abs() > self.accuracy { + different = true; + } + row.add_cell(cell!(r->format!("{:.*e}", self.digits_rel, max_rel_diff))); } diff --git a/pineappl_cli/src/export/applgrid.rs b/pineappl_cli/src/export/applgrid.rs index 2f3ea6eb1..5b5429987 100644 --- a/pineappl_cli/src/export/applgrid.rs +++ b/pineappl_cli/src/export/applgrid.rs @@ -1,52 +1,93 @@ -use anyhow::{anyhow, bail, Result}; +use anyhow::{bail, Result}; use cxx::{let_cxx_string, UniquePtr}; -use float_cmp::approx_eq; +use float_cmp::assert_approx_eq; use lhapdf::Pdf; use ndarray::{s, Axis}; -use pineappl::boc::Order; -use pineappl::convolutions::Convolution; +use pineappl::boc::{Kinematics, Order}; use pineappl::grid::Grid; -use pineappl::subgrid::{Mu2, Subgrid, SubgridParams}; +use pineappl::interpolation::{Interp, InterpMeth, Map, ReweightMeth}; +use pineappl::subgrid::{self, Subgrid}; use pineappl_applgrid::ffi::{self, grid}; -use std::borrow::Cow; use std::f64::consts::TAU; use std::iter; use std::path::Path; use std::pin::Pin; -fn reconstruct_subgrid_params(grid: &Grid, order: usize, bin: usize) -> Result { - let mut result = SubgridParams::default(); +fn reconstruct_subgrid_params(grid: &Grid, order: usize, bin: usize) -> Result> { + if grid + .kinematics() + .iter() + .filter(|kin| matches!(kin, Kinematics::Scale(_))) + .count() + > 1 + { + bail!("APPLgrid does not support grids with more than one scale"); + } - let mu2_grid: Vec<_> = grid + let mut mu2_grid: Vec<_> = grid .subgrids() .slice(s![order, bin, ..]) .iter() - .map(|subgrid| { - subgrid - .mu2_grid() - .iter() - .map(|&Mu2 { ren, fac }| { - if !approx_eq!(f64, ren, fac, ulps = 128) { - bail!("subgrid has mur2 != muf2, which APPLgrid does not support"); - } - - Ok(fac) - }) - .collect::>>() + .filter(|subgrid| !subgrid.is_empty()) + .flat_map(|subgrid| { + grid.scales() + .fac + .calc(&subgrid.node_values(), grid.kinematics()) + .into_owned() }) - .collect::>()?; - let mut mu2_grid: Vec<_> = mu2_grid.into_iter().flatten().collect(); - mu2_grid.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = 128)); - let mu2_grid = mu2_grid.as_slice(); - - if let &[fac] = mu2_grid { - result.set_q2_bins(1); - result.set_q2_max(fac); - result.set_q2_min(fac); - result.set_q2_order(0); - } + .collect(); + mu2_grid.dedup_by(subgrid::node_value_eq_ref_mut); // TODO: implement the general case + let mut result = vec![ + Interp::new( + 2e-7, + 1.0, + 50, + 3, + ReweightMeth::ApplGridX, + Map::ApplGridF2, + InterpMeth::Lagrange, + ), + Interp::new( + 2e-7, + 1.0, + 50, + 3, + ReweightMeth::ApplGridX, + Map::ApplGridF2, + InterpMeth::Lagrange, + ), + ]; + + if let &[fac] = mu2_grid.as_slice() { + result.insert( + 0, + Interp::new( + fac, + fac, + 1, + 0, + ReweightMeth::NoReweight, + Map::ApplGridH0, + InterpMeth::Lagrange, + ), + ); + } else { + result.insert( + 0, + Interp::new( + 1e2, + 1e8, + 40, + 3, + ReweightMeth::NoReweight, + Map::ApplGridH0, + InterpMeth::Lagrange, + ), + ); + } + Ok(result) } @@ -69,38 +110,42 @@ pub fn convert_into_applgrid( bail!("grid has non-consecutive bin limits, which APPLgrid does not support"); } + if grid.convolutions().len() > 2 { + bail!("APPLgrid does not support grids with more than two convolutions"); + } + let lumis = grid.channels().len(); - let has_pdf1 = grid.convolutions()[0] != Convolution::None; - let has_pdf2 = grid.convolutions()[1] != Convolution::None; + let has_pdf1 = !grid.convolutions().is_empty(); + let has_pdf2 = grid.convolutions().get(1).is_some(); // TODO: check that PDG MC IDs are used - let combinations: Vec<_> = iter::once(lumis.try_into().unwrap()) - .chain( - grid.channels() - .iter() - .enumerate() - .flat_map(|(index, entry)| { - [ - index.try_into().unwrap(), - entry.entry().len().try_into().unwrap(), - ] - .into_iter() - .chain(entry.entry().iter().flat_map(|&(a, b, factor)| { - // TODO: if the factors aren't trivial, we have to find some other way to - // propagate them - assert_eq!(factor, 1.0); - - match (has_pdf1, has_pdf2) { - (true, true) => [a, b], - (true, false) => [a, 0], - (false, true) => [b, 0], - (false, false) => unreachable!(), - } - })) - }), - ) - .collect(); + let combinations: Vec<_> = + iter::once(lumis.try_into().unwrap()) + .chain( + grid.channels() + .iter() + .enumerate() + .flat_map(|(index, entry)| { + [ + index.try_into().unwrap(), + entry.entry().len().try_into().unwrap(), + ] + .into_iter() + .chain(entry.entry().iter().flat_map(|&(ref pids, factor)| { + // TODO: if the factors aren't trivial, we have to find some other way + // to propagate them + assert_approx_eq!(f64, factor, 1.0, ulps = 4); + + pids.iter() + .copied() + .chain(iter::repeat(0)) + .take(2) + .collect::>() + })) + }), + ) + .collect(); // `id` must end with '.config' for APPLgrid to know its type is `lumi_pdf` let id = "PineAPPL-Lumi.config"; @@ -133,14 +178,8 @@ pub fn convert_into_applgrid( .unwrap() - lo_alphas; - let mut applgrid = ffi::make_empty_grid( - &limits, - id, - lo_alphas.try_into().unwrap(), - loops.try_into().unwrap(), - "f2", - "h0", - ); + let mut applgrid = + ffi::make_empty_grid(&limits, id, lo_alphas.into(), loops.into(), "f2", "h0"); for (appl_order, order) in order_mask .iter() @@ -148,7 +187,7 @@ pub fn convert_into_applgrid( .filter_map(|(index, keep)| keep.then_some(index)) .enumerate() { - let factor = TAU.powi(grid.orders()[order].alphas.try_into().unwrap()); + let factor = TAU.powi(grid.orders()[order].alphas.into()); for (bin, subgrids) in grid .subgrids() @@ -156,46 +195,55 @@ pub fn convert_into_applgrid( .axis_iter(Axis(0)) .enumerate() { - let p = reconstruct_subgrid_params(grid, order, bin)?; + let interps = reconstruct_subgrid_params(grid, order, bin)?; + // TODO: support DIS case + assert_eq!(interps.len(), 3); + + // TODO: make sure interps[1] is the same as interps[2] let mut igrid = ffi::make_igrid( - p.q2_bins().try_into().unwrap(), - p.q2_min(), - p.q2_max(), - p.q2_order().try_into().unwrap(), - p.x_bins().try_into().unwrap(), - p.x_min(), - p.x_max(), - p.x_order().try_into().unwrap(), - "f2", - "h0", + interps[0].nodes().try_into().unwrap(), + interps[0].min(), + interps[0].max(), + interps[0].order().try_into().unwrap(), + interps[1].nodes().try_into().unwrap(), + interps[1].min(), + interps[1].max(), + interps[1].order().try_into().unwrap(), + match interps[1].map() { + Map::ApplGridF2 => "f2", + map @ Map::ApplGridH0 => panic!("export does not support {map:?}"), + }, + match interps[0].map() { + Map::ApplGridH0 => "h0", + map @ Map::ApplGridF2 => panic!("export does not support {map:?}"), + }, grid.channels().len().try_into().unwrap(), - has_pdf1 != has_pdf2, + grid.convolutions().len() == 1, ); let appl_q2: Vec<_> = (0..igrid.Ntau()).map(|i| igrid.getQ2(i)).collect(); let appl_x1: Vec<_> = (0..igrid.Ny1()).map(|i| igrid.getx1(i)).collect(); let appl_x2: Vec<_> = (0..igrid.Ny2()).map(|i| igrid.getx2(i)).collect(); - for (lumi, subgrid) in subgrids.iter().enumerate() { - let appl_q2_idx: Vec<_> = subgrid - .mu2_grid() + for (channel, subgrid) in subgrids + .iter() + .enumerate() + .filter(|(_, subgrid)| !subgrid.is_empty()) + { + let appl_q2_idx: Vec<_> = grid.scales().fac.calc(&subgrid.node_values(), grid.kinematics()) .iter() - .map(|&Mu2 { ren, fac }| { - if !approx_eq!(f64, ren, fac, ulps = 128) { - bail!("subgrid has mur2 != muf2, which APPLgrid does not support"); - } + .map(|&fac| { appl_q2 .iter() - .position(|&x| approx_eq!(f64, x, fac, ulps = 128)) + .position(|&x| subgrid::node_value_eq(x, fac)) .map_or_else( || { if discard_non_matching_scales { Ok(-1) } else { - Err(anyhow!( - "factorization scale muf2 = {} not found in APPLgrid", - fac - )) + bail!( + "factorization scale muf2 = {fac} not found in APPLgrid", + ) } }, |idx| Ok(idx.try_into().unwrap()), @@ -204,12 +252,54 @@ pub fn convert_into_applgrid( .collect::>()?; // in the DIS case APPLgrid always uses the first x dimension + let (x1_grid, x2_grid) = if has_pdf1 && has_pdf2 { - (subgrid.x1_grid(), subgrid.x2_grid()) + ( + grid.kinematics() + .iter() + .zip(subgrid.node_values()) + .find_map(|(kin, node_values)| { + matches!(kin, &Kinematics::X(idx) if idx == 0) + .then_some(node_values) + }) + // TODO: convert this into an error + .unwrap(), + grid.kinematics() + .iter() + .zip(subgrid.node_values()) + .find_map(|(kin, node_values)| { + matches!(kin, &Kinematics::X(idx) if idx == 1) + .then_some(node_values) + }) + // TODO: convert this into an error + .unwrap(), + ) } else if has_pdf1 { - (subgrid.x1_grid(), Cow::Owned(vec![])) + ( + grid.kinematics() + .iter() + .zip(subgrid.node_values()) + .find_map(|(kin, node_values)| { + matches!(kin, &Kinematics::X(idx) if idx == 0) + .then_some(node_values) + }) + // TODO: convert this into an error + .unwrap(), + Vec::new(), + ) } else { - (subgrid.x2_grid(), Cow::Owned(vec![])) + ( + grid.kinematics() + .iter() + .zip(subgrid.node_values()) + .find_map(|(kin, node_values)| { + matches!(kin, &Kinematics::X(idx) if idx == 1) + .then_some(node_values) + }) + // TODO: convert this into an error + .unwrap(), + Vec::new(), + ) }; let appl_x1_idx: Vec<_> = x1_grid @@ -217,14 +307,9 @@ pub fn convert_into_applgrid( .map(|&x1| { appl_x1 .iter() - .position(|&x| approx_eq!(f64, x, x1, ulps = 128)) + .position(|&x| subgrid::node_value_eq(x, x1)) .map_or_else( - || { - Err(anyhow!( - "momentum fraction x1 = {} not found in APPLgrid", - x1 - )) - }, + || bail!("momentum fraction x1 = {x1} not found in APPLgrid"), |idx| Ok(idx.try_into().unwrap()), ) }) @@ -234,29 +319,29 @@ pub fn convert_into_applgrid( .map(|&x2| { appl_x2 .iter() - .position(|&x| approx_eq!(f64, x, x2, ulps = 128)) + .position(|&x| subgrid::node_value_eq(x, x2)) .map_or_else( - || { - Err(anyhow!( - "momentum fraction x2 = {} not found in APPLgrid", - x2 - )) - }, + || bail!("momentum fraction x2 = {x2} not found in APPLgrid"), |idx| Ok(idx.try_into().unwrap()), ) }) .collect::>()?; - let mut weightgrid = ffi::igrid_weightgrid(igrid.pin_mut(), lumi); + let mut weightgrid = ffi::igrid_weightgrid(igrid.pin_mut(), channel); - for ((iq2, ix1, ix2), value) in subgrid.indexed_iter() { + for (indices, value) in subgrid.indexed_iter() { + // TODO: here we assume that all X are consecutive starting from the second + // element and are in ascending order + let iq2 = indices[0]; let appl_q2_idx = appl_q2_idx[iq2]; if appl_q2_idx == -1 { if value != 0.0 { println!( "WARNING: discarding non-matching scale muf2 = {}", - subgrid.mu2_grid()[iq2].fac + grid.scales() + .fac + .calc(&subgrid.node_values(), grid.kinematics())[iq2] ); } @@ -266,9 +351,9 @@ pub fn convert_into_applgrid( ffi::sparse_matrix_set( weightgrid.as_mut(), appl_q2_idx, - appl_x1_idx[ix1], + appl_x1_idx[indices[1]], if has_pdf1 && has_pdf2 { - appl_x2_idx[ix2] + appl_x2_idx[indices[2]] } else { 0 }, diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index c74e7fc64..c25b8033c 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -1,8 +1,9 @@ use super::GlobalConfiguration; use anyhow::{anyhow, ensure, Context, Error, Result}; use lhapdf::{Pdf, PdfSet}; -use ndarray::Array3; -use pineappl::convolutions::LumiCache; +use ndarray::{Array3, Ix3}; +use pineappl::boc::{ScaleFuncForm, Scales}; +use pineappl::convolutions::{Conv, ConvType, ConvolutionCache}; use pineappl::grid::Grid; use prettytable::format::{FormatBuilder, LinePosition, LineSeparator}; use prettytable::Table; @@ -129,30 +130,73 @@ pub fn create_table() -> Table { table } -pub const SCALES_VECTOR: [(f64, f64); 9] = [ - (1.0, 1.0), - (2.0, 2.0), - (0.5, 0.5), - (2.0, 1.0), - (1.0, 2.0), - (0.5, 1.0), - (1.0, 0.5), - (2.0, 0.5), - (0.5, 2.0), +pub const SCALES_VECTOR_REN_FAC: [(f64, f64, f64); 9] = [ + (1.0, 1.0, 1.0), + (2.0, 2.0, 1.0), + (0.5, 0.5, 1.0), + (2.0, 1.0, 1.0), + (1.0, 2.0, 1.0), + (0.5, 1.0, 1.0), + (1.0, 0.5, 1.0), + (2.0, 0.5, 1.0), + (0.5, 2.0, 1.0), +]; + +const SCALES_VECTOR_REN_FRG: [(f64, f64, f64); 9] = [ + (1.0, 1.0, 1.0), + (2.0, 1.0, 2.0), + (0.5, 1.0, 0.5), + (2.0, 1.0, 1.0), + (1.0, 1.0, 2.0), + (0.5, 1.0, 1.0), + (1.0, 1.0, 0.5), + (2.0, 1.0, 0.5), + (0.5, 1.0, 2.0), +]; + +const SCALES_VECTOR_27: [(f64, f64, f64); 27] = [ + (1.0, 1.0, 1.0), + (2.0, 2.0, 2.0), + (0.5, 0.5, 0.5), + (0.5, 0.5, 1.0), + (0.5, 1.0, 0.5), + (0.5, 1.0, 1.0), + (0.5, 1.0, 2.0), + (1.0, 0.5, 0.5), + (1.0, 0.5, 1.0), + (1.0, 1.0, 0.5), + (1.0, 1.0, 2.0), + (1.0, 2.0, 1.0), + (1.0, 2.0, 2.0), + (2.0, 1.0, 0.5), + (2.0, 1.0, 1.0), + (2.0, 1.0, 2.0), + (2.0, 2.0, 1.0), + (2.0, 0.5, 0.5), + (0.5, 2.0, 0.5), + (1.0, 2.0, 0.5), + (2.0, 2.0, 0.5), + (2.0, 0.5, 1.0), + (0.5, 2.0, 1.0), + (0.5, 0.5, 2.0), + (1.0, 0.5, 2.0), + (2.0, 0.5, 2.0), + (0.5, 2.0, 2.0), ]; pub fn labels_and_units(grid: &Grid, integrated: bool) -> (Vec<(String, &str)>, &str, &str) { - let key_values = grid.key_values(); + let metadata = grid.metadata(); ( (0..grid.bin_info().dimensions()) .map(|d| { ( - key_values - .and_then(|kv| kv.get(&format!("x{}_label", d + 1)).cloned()) + metadata + .get(&format!("x{}_label", d + 1)) + .cloned() .unwrap_or_else(|| format!("x{}", d + 1)), - key_values - .and_then(|kv| kv.get(&format!("x{}_unit", d + 1))) + metadata + .get(&format!("x{}_unit", d + 1)) .map_or("", String::as_str), ) }) @@ -160,16 +204,12 @@ pub fn labels_and_units(grid: &Grid, integrated: bool) -> (Vec<(String, &str)>, if integrated { "integ" } else { - key_values - .and_then(|kv| kv.get("y_label").map(String::as_str)) - .unwrap_or("diff") + metadata.get("y_label").map_or("diff", String::as_str) }, if integrated { "" // TODO: compute the units for the integrated cross section } else { - key_values - .and_then(|kv| kv.get("y_unit").map(String::as_str)) - .unwrap_or("") + metadata.get("y_unit").map_or("", String::as_str) }, ) } @@ -184,10 +224,10 @@ pub enum ConvoluteMode { pub fn convolve_scales( grid: &Grid, conv_funs: &mut [Pdf], - orders: &[(u32, u32)], + orders: &[(u8, u8)], bins: &[usize], channels: &[bool], - scales: &[(f64, f64)], + scales: &[(f64, f64, f64)], mode: ConvoluteMode, cfg: &GlobalConfiguration, ) -> Vec { @@ -232,39 +272,41 @@ pub fn convolve_scales( } }) .collect(); + let xfx: Vec<_> = funs + .iter_mut() + .map(|fun| fun as &mut dyn FnMut(i32, f64, f64) -> f64) + .collect(); let mut alphas_funs: Vec<_> = conv_funs .iter() .map(|fun| move |q2| fun.alphas_q2(q2)) .collect(); - let pdg_ids: Vec<_> = conv_funs + let convolutions: Vec<_> = conv_funs .iter() - .map(|fun| { - // if the field 'Particle' is missing we assume it's a proton PDF - fun.set() + .zip(grid.convolutions()) + .map(|(fun, convolution)| { + let pid = fun + .set() .entry("Particle") + // if the field 'Particle' is missing we assume it's a proton PDF .map_or(Ok(2212), |string| string.parse::()) // UNWRAP: if this fails, there's a non-integer string in the LHAPDF info file - .unwrap() + .unwrap(); + + match fun.set().entry("SetType").unwrap_or_default().as_str() { + "fragfn" => Conv::new(ConvType::UnpolFF, pid), + "" => { + // if we can not figure out the type of the convolution from the PDF set, we + // assume it from the grid convolution at the same index + convolution.with_pid(pid) + } + // TODO: convince the LHAPDF maintainers to make SetType necessary for polarized + // PDFs and all FFs + _ => unimplemented!(), + } }) .collect(); - // TODO: write a new constructor of `LumiCache` that accepts a vector of all the arguments - let mut cache = match funs.as_mut_slice() { - [funs0] => LumiCache::with_one(pdg_ids[0], funs0, &mut alphas_funs[cfg.use_alphas_from]), - [funs0, funs1] => LumiCache::with_two( - pdg_ids[0], - funs0, - pdg_ids[1], - funs1, - &mut alphas_funs[cfg.use_alphas_from], - ), - // TODO: convert this into an error - _ => panic!( - "convolutions with {} convolution functions is not supported", - conv_funs.len() - ), - }; - + let mut cache = ConvolutionCache::new(convolutions, xfx, &mut alphas_funs[cfg.use_alphas_from]); let mut results = grid.convolve(&mut cache, &orders, bins, channels, scales); match mode { @@ -307,10 +349,26 @@ pub fn convolve_scales( } } +pub fn scales_vector(grid: &Grid, scales: usize) -> &[(f64, f64, f64)] { + let Scales { fac, frg, .. } = grid.scales(); + + match (fac, frg, scales) { + (_, _, 1) => &SCALES_VECTOR_27[0..1], + (_, _, 3) => &SCALES_VECTOR_27[0..3], + (_, ScaleFuncForm::NoScale, 7) => &SCALES_VECTOR_REN_FAC[0..7], + (_, ScaleFuncForm::NoScale, 9) => &SCALES_VECTOR_REN_FAC[..], + (ScaleFuncForm::NoScale, _, 7) => &SCALES_VECTOR_REN_FRG[0..7], + (ScaleFuncForm::NoScale, _, 9) => &SCALES_VECTOR_REN_FRG[..], + (_, _, 17) => &SCALES_VECTOR_27[0..17], + (_, _, 27) => &SCALES_VECTOR_27[..], + _ => unreachable!(), + } +} + pub fn convolve( grid: &Grid, conv_funs: &mut [Pdf], - orders: &[(u32, u32)], + orders: &[(u8, u8)], bins: &[usize], lumis: &[bool], scales: usize, @@ -323,7 +381,7 @@ pub fn convolve( orders, bins, lumis, - &SCALES_VECTOR[0..scales], + scales_vector(grid, scales), mode, cfg, ) @@ -382,40 +440,47 @@ pub fn convolve_subgrid( } }) .collect(); + let xfx: Vec<_> = funs + .iter_mut() + .map(|fun| fun as &mut dyn FnMut(i32, f64, f64) -> f64) + .collect(); let mut alphas_funs: Vec<_> = conv_funs .iter() .map(|fun| move |q2| fun.alphas_q2(q2)) .collect(); - let pdg_ids: Vec<_> = conv_funs + let convolutions: Vec<_> = conv_funs .iter() - .map(|fun| { - // if the field 'Particle' is missing we assume it's a proton PDF - fun.set() + .zip(grid.convolutions()) + .map(|(fun, convolution)| { + let pid = fun + .set() .entry("Particle") + // if the field 'Particle' is missing we assume it's a proton PDF .map_or(Ok(2212), |string| string.parse::()) // UNWRAP: if this fails, there's a non-integer string in the LHAPDF info file - .unwrap() + .unwrap(); + + match fun.set().entry("SetType").unwrap_or_default().as_str() { + "fragfn" => Conv::new(ConvType::UnpolFF, pid), + "" => { + // if we can not figure out the type of the convolution from the PDF set, we + // assume it from the grid convolution at the same index + convolution.with_pid(pid) + } + // TODO: convince the LHAPDF maintainers to make SetType necessary for polarized + // PDFs and all FFs + _ => unimplemented!(), + } }) .collect(); - // TODO: write a new constructor of `LumiCache` that accepts a vector of all the arguments - let mut cache = match funs.as_mut_slice() { - [funs0] => LumiCache::with_one(pdg_ids[0], funs0, &mut alphas_funs[cfg.use_alphas_from]), - [funs0, funs1] => LumiCache::with_two( - pdg_ids[0], - funs0, - pdg_ids[1], - funs1, - &mut alphas_funs[cfg.use_alphas_from], - ), - // TODO: convert this into an error - _ => panic!( - "convolutions with {} convolution functions is not supported", - conv_funs.len() - ), - }; - - grid.convolve_subgrid(&mut cache, order, bin, lumi, 1.0, 1.0) + let mut cache = ConvolutionCache::new(convolutions, xfx, &mut alphas_funs[cfg.use_alphas_from]); + let subgrid = grid.convolve_subgrid(&mut cache, order, bin, lumi, (1.0, 1.0, 1.0)); + + subgrid + .into_dimensionality::() + .map_err(|_| anyhow!("Only 3-dimensional subgrids are supported",)) + .unwrap() } pub fn parse_integer_range(range: &str) -> Result> { @@ -437,7 +502,7 @@ pub fn parse_integer_range(range: &str) -> Result> { } } -pub fn parse_order(order: &str) -> Result<(u32, u32)> { +pub fn parse_order(order: &str) -> Result<(u8, u8)> { let mut alphas = 0; let mut alpha = 0; @@ -455,14 +520,14 @@ pub fn parse_order(order: &str) -> Result<(u32, u32)> { .chars() .take_while(|c| c.is_numeric()) .count(); - alphas = str::parse::(&order[index + 2..index + 2 + len]) + alphas = str::parse::(&order[index + 2..index + 2 + len]) .context(format!("unable to parse order '{order}'"))?; } else { let len = order[index + 1..] .chars() .take_while(|c| c.is_numeric()) .count(); - alpha = str::parse::(&order[index + 1..index + 1 + len]) + alpha = str::parse::(&order[index + 1..index + 1 + len]) .context(format!("unable to parse order '{order}'"))?; } } diff --git a/pineappl_cli/src/import.rs b/pineappl_cli/src/import.rs index e349640bf..eb82ac1e6 100644 --- a/pineappl_cli/src/import.rs +++ b/pineappl_cli/src/import.rs @@ -18,9 +18,8 @@ mod fktable; #[cfg(feature = "applgrid")] fn convert_applgrid( input: &Path, - alpha: u32, + alpha: u8, conv_funs: &mut [Pdf], - dis_pid: i32, _: usize, ) -> Result<(&'static str, Grid, Vec, usize)> { use pineappl_applgrid::ffi; @@ -28,7 +27,7 @@ fn convert_applgrid( // TODO: check AMCATNLO scale variations let mut grid = ffi::make_grid(input.to_str().unwrap())?; - let pgrid = applgrid::convert_applgrid(grid.pin_mut(), alpha, dis_pid)?; + let pgrid = applgrid::convert_applgrid(grid.pin_mut(), alpha)?; let results = applgrid::convolve_applgrid(grid.pin_mut(), conv_funs); Ok(("APPLgrid", pgrid, results, 1)) @@ -37,9 +36,8 @@ fn convert_applgrid( #[cfg(not(feature = "applgrid"))] fn convert_applgrid( _: &Path, - _: u32, + _: u8, _: &mut [Pdf], - _: i32, _: usize, ) -> Result<(&'static str, Grid, Vec, usize)> { Err(anyhow!( @@ -50,10 +48,9 @@ fn convert_applgrid( #[cfg(feature = "fastnlo")] fn convert_fastnlo( input: &Path, - alpha: u32, + alpha: u8, conv_funs: &ConvFuns, member: usize, - dis_pid: i32, scales: usize, fnlo_mur: Option<&str>, fnlo_muf: Option<&str>, @@ -81,7 +78,7 @@ fn convert_fastnlo( } } - let grid = fastnlo::convert_fastnlo_table(&file, alpha, dis_pid)?; + let grid = fastnlo::convert_fastnlo_table(&file, alpha)?; let mut reader = ffi::downcast_lhapdf_to_reader_mut(file.as_mut().unwrap()); // TODO: scale-variation log conversion is only enabled for flex grids @@ -91,10 +88,11 @@ fn convert_fastnlo( 1 }; - let unpermuted_results: Vec<_> = helpers::SCALES_VECTOR[0..scales] + // fastNLO does not support a fragmentation scale + let unpermuted_results: Vec<_> = helpers::SCALES_VECTOR_REN_FAC[0..scales] .iter() - .map(|&(mur, muf)| { - if !reader.as_mut().SetScaleFactorsMuRMuF(mur, muf) { + .map(|&(xir, xif, _)| { + if !reader.as_mut().SetScaleFactorsMuRMuF(xir, xif) { return None; } reader.as_mut().CalcCrossSection(); @@ -119,10 +117,9 @@ fn convert_fastnlo( #[cfg(not(feature = "fastnlo"))] fn convert_fastnlo( _: &Path, - _: u32, + _: u8, _: &ConvFuns, _: usize, - _: i32, _: usize, _: Option<&str>, _: Option<&str>, @@ -133,14 +130,14 @@ fn convert_fastnlo( } #[cfg(feature = "fktable")] -fn convert_fktable(input: &Path, dis_pid: i32) -> Result<(&'static str, Grid, Vec, usize)> { - let fktable = fktable::convert_fktable(input, dis_pid)?; +fn convert_fktable(input: &Path) -> Result<(&'static str, Grid, Vec, usize)> { + let fktable = fktable::convert_fktable(input)?; Ok(("fktable", fktable, vec![], 1)) } #[cfg(not(feature = "fktable"))] -fn convert_fktable(_: &Path, _: i32) -> Result<(&'static str, Grid, Vec, usize)> { +fn convert_fktable(_: &Path) -> Result<(&'static str, Grid, Vec, usize)> { Err(anyhow!( "you need to install `pineappl` with feature `fktable`" )) @@ -148,11 +145,10 @@ fn convert_fktable(_: &Path, _: i32) -> Result<(&'static str, Grid, Vec, us fn convert_grid( input: &Path, - alpha: u32, + alpha: u8, conv_funs: &mut [Pdf], fun_names: &ConvFuns, member: usize, - dis_pid: i32, scales: usize, fnlo_mur: Option<&str>, fnlo_muf: Option<&str>, @@ -165,13 +161,11 @@ fn convert_grid( .extension() .map_or(false, |ext| ext == "tab")) { - return convert_fastnlo( - input, alpha, fun_names, member, dis_pid, scales, fnlo_mur, fnlo_muf, - ); + return convert_fastnlo(input, alpha, fun_names, member, scales, fnlo_mur, fnlo_muf); } else if extension == "dat" { - return convert_fktable(input, dis_pid); + return convert_fktable(input); } else if extension == "appl" || extension == "root" { - return convert_applgrid(input, alpha, conv_funs, dis_pid, scales); + return convert_applgrid(input, alpha, conv_funs, scales); } } @@ -217,7 +211,7 @@ pub struct Opts { conv_funs: ConvFuns, /// LO coupling power in alpha. #[arg(default_value_t = 0, long)] - alpha: u32, + alpha: u8, /// Relative threshold between the table and the converted grid when comparison fails. #[arg(default_value = "1e-10", long)] accuracy: f64, @@ -246,9 +240,6 @@ pub struct Opts { /// Do not optimize converted grid. #[arg(long)] no_optimize: bool, - /// Particle ID for the non-hadronic initial states if it cannot be determined from the grid. - #[arg(long, default_value_t = 11)] - dis_pid: i32, } impl Subcommand for Opts { @@ -264,7 +255,6 @@ impl Subcommand for Opts { &mut conv_funs, &self.conv_funs, 0, - self.dis_pid, self.scales, self.fnlo_mur.as_deref(), self.fnlo_muf.as_deref(), @@ -310,20 +300,19 @@ impl Subcommand for Opts { // catches the case where both results are zero let rel_diffs: Vec<_> = one .iter() - .zip(two.iter()) - .map(|(a, b)| if a == b { 0.0 } else { b / a - 1.0 }) + .zip(two) + .map(|(&a, &b)| { + // ALLOW: here we really need an exact comparison + // TODO: change allow to `expect` if MSRV >= 1.81.0 + #[allow(clippy::float_cmp)] + if a == b { + 0.0 + } else { + b / a - 1.0 + } + }) .collect(); - let max_rel_diff = rel_diffs - .iter() - .max_by(|a, b| a.abs().total_cmp(&b.abs())) - .unwrap() - .abs(); - - if max_rel_diff > self.accuracy { - different = true; - } - let mut row = row![ bin.to_string(), r->format!("{:.*e}", self.digits_abs, one[0]), @@ -331,7 +320,22 @@ impl Subcommand for Opts { r->format!("{:.*e}", self.digits_rel, rel_diffs[0]) ]; + if rel_diffs[0].abs() > self.accuracy { + different = true; + } + if scale_variations > 1 { + // skip central scale choice + let &max_rel_diff = rel_diffs[1..] + .iter() + .max_by(|a, b| a.abs().total_cmp(&b.abs())) + // UNWRAP: in this branch we know there are scale variations + .unwrap(); + + if max_rel_diff.abs() > self.accuracy { + different = true; + } + row.add_cell(cell!(r->format!("{:.*e}", self.digits_rel, max_rel_diff))); } diff --git a/pineappl_cli/src/import/applgrid.rs b/pineappl_cli/src/import/applgrid.rs index 94b17b701..70ad45617 100644 --- a/pineappl_cli/src/import/applgrid.rs +++ b/pineappl_cli/src/import/applgrid.rs @@ -1,15 +1,17 @@ use anyhow::Result; +use float_cmp::assert_approx_eq; use lhapdf::Pdf; -use pineappl::boc::{Channel, Order}; -use pineappl::convolutions::Convolution; +use pineappl::boc::{Channel, Kinematics, Order, ScaleFuncForm, Scales}; +use pineappl::convolutions::{Conv, ConvType}; use pineappl::grid::Grid; -use pineappl::import_only_subgrid::ImportOnlySubgridV2; -use pineappl::sparse_array3::SparseArray3; -use pineappl::subgrid::{Mu2, SubgridParams}; +use pineappl::import_subgrid::ImportSubgridV1; +use pineappl::interpolation::{Interp, InterpMeth, Map, ReweightMeth}; +use pineappl::packed_array::PackedArray; +use pineappl::pids::PidBasis; use pineappl_applgrid::ffi::{self, grid}; use std::f64::consts::TAU; use std::pin::Pin; -use std::ptr; +use std::{iter, ptr}; fn convert_to_pdg_id(pid: usize) -> i32 { let pid = i32::try_from(pid).unwrap() - 6; @@ -22,11 +24,11 @@ fn convert_to_pdg_id(pid: usize) -> i32 { } } -fn reconstruct_luminosity_function(grid: &grid, order: i32, dis_pid: i32) -> Vec { +fn reconstruct_channels(grid: &grid, order: i32) -> Vec { let pdf = unsafe { &*grid.genpdf(order, false) }; let nproc: usize = pdf.Nproc().try_into().unwrap(); - let mut lumis = vec![Vec::new(); nproc]; + let mut channels = vec![Vec::new(); nproc]; let mut xfx1 = [0.0; 14]; let mut xfx2 = [0.0; 14]; let mut results = vec![0.0; nproc]; @@ -41,7 +43,7 @@ fn reconstruct_luminosity_function(grid: &grid, order: i32, dis_pid: i32) -> Vec for i in 0..nproc { if results[i] != 0.0 { - lumis[i].push((convert_to_pdg_id(a), dis_pid, results[i])); + channels[i].push((vec![convert_to_pdg_id(a)], results[i])); } } } else { @@ -54,7 +56,8 @@ fn reconstruct_luminosity_function(grid: &grid, order: i32, dis_pid: i32) -> Vec for i in 0..nproc { if results[i] != 0.0 { - lumis[i].push((convert_to_pdg_id(a), convert_to_pdg_id(b), results[i])); + channels[i] + .push((vec![convert_to_pdg_id(a), convert_to_pdg_id(b)], results[i])); } } @@ -65,28 +68,32 @@ fn reconstruct_luminosity_function(grid: &grid, order: i32, dis_pid: i32) -> Vec xfx1[a] = 0.0; } - lumis.into_iter().map(Channel::new).collect() + channels.into_iter().map(Channel::new).collect() } -pub fn convert_applgrid(grid: Pin<&mut grid>, alpha: u32, dis_pid: i32) -> Result { +pub fn convert_applgrid(grid: Pin<&mut grid>, alpha: u8) -> Result { let bin_limits: Vec<_> = (0..=grid.Nobs_internal()) .map(|i| grid.obslow_internal(i)) .collect(); - let leading_order: u32 = grid.leadingOrder().try_into().unwrap(); + let leading_order: u8 = grid + .leadingOrder() + .try_into() + // UNWRAP: exponents of orders shouldn't be larger than 255 + .unwrap(); let orders; let alphas_factor; if grid.calculation() == ffi::grid_CALCULATION::AMCATNLO { alphas_factor = 2.0 * TAU; orders = if grid.nloops() == 0 { - vec![Order::new(leading_order, alpha, 0, 0)] + vec![Order::new(leading_order, alpha, 0, 0, 0)] } else if grid.nloops() == 1 { vec![ - Order::new(leading_order + 1, alpha, 0, 0), // NLO - Order::new(leading_order + 1, alpha, 1, 0), // NLO mur - Order::new(leading_order + 1, alpha, 0, 1), // NLO muf - Order::new(leading_order, alpha, 0, 0), // LO + Order::new(leading_order + 1, alpha, 0, 0, 0), // NLO + Order::new(leading_order + 1, alpha, 1, 0, 0), // NLO mur + Order::new(leading_order + 1, alpha, 0, 1, 0), // NLO muf + Order::new(leading_order, alpha, 0, 0, 0), // LO ] } else { unimplemented!("nloops = {} is not supported", grid.nloops()); @@ -94,7 +101,18 @@ pub fn convert_applgrid(grid: Pin<&mut grid>, alpha: u32, dis_pid: i32) -> Resul } else if grid.calculation() == ffi::grid_CALCULATION::STANDARD { alphas_factor = 1.0 / TAU; orders = (0..=grid.nloops()) - .map(|power| Order::new(leading_order + u32::try_from(power).unwrap(), alpha, 0, 0)) + .map(|power| { + Order::new( + leading_order + + u8::try_from(power) + // UNWRAP: exponents of orders shouldn't be larger than 255 + .unwrap(), + alpha, + 0, + 0, + 0, + ) + }) .collect(); } else { unimplemented!("calculation is not supported"); @@ -109,38 +127,61 @@ pub fn convert_applgrid(grid: Pin<&mut grid>, alpha: u32, dis_pid: i32) -> Resul } // this setting isn't supported - assert_eq!(grid.getDynamicScale(), 0.0); + assert_approx_eq!(f64, grid.getDynamicScale(), 0.0, ulps = 4); let mut grids = Vec::with_capacity(orders.len()); + let dis = grid.isDIS(); + + // from APPLgrid alone we don't know what type of convolution we have + let convolutions = vec![Conv::new(ConvType::UnpolPDF, 2212); if dis { 1 } else { 2 }]; + // TODO: read out interpolation parameters from APPLgrid + let mut interps = vec![Interp::new( + 1e2, + 1e8, + 40, + 3, + ReweightMeth::NoReweight, + Map::ApplGridH0, + InterpMeth::Lagrange, + )]; + for _ in 0..convolutions.len() { + interps.push(Interp::new( + 2e-7, + 1.0, + 50, + 3, + ReweightMeth::ApplGridX, + Map::ApplGridF2, + InterpMeth::Lagrange, + )); + } for (i, order) in orders.into_iter().enumerate() { - let lumis = reconstruct_luminosity_function(&grid, i.try_into().unwrap(), dis_pid); - let lumis_len = lumis.len(); + let channels = reconstruct_channels(&grid, i.try_into().unwrap()); + let lumis_len = channels.len(); let mut pgrid = Grid::new( - lumis, + PidBasis::Pdg, + channels, vec![order], bin_limits.clone(), - SubgridParams::default(), + convolutions.clone(), + interps.clone(), + iter::once(Kinematics::Scale(0)) + .chain((0..convolutions.len()).map(Kinematics::X)) + .collect(), + Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }, ); - // from APPLgrid alone we don't know what type of convolution we have - pgrid.set_convolution(0, Convolution::UnpolPDF(2212)); - - if grid.isDIS() { - pgrid.set_convolution(1, Convolution::None); - } - for bin in 0..grid.Nobs_internal() { let igrid = grid.weightgrid(i.try_into().unwrap(), bin); let igrid = unsafe { &*igrid }; let reweight = ffi::igrid_m_reweight(igrid); - let mu2_values: Vec<_> = (0..igrid.Ntau()) - .map(|i| { - let q2 = igrid.getQ2(i); - Mu2 { ren: q2, fac: q2 } - }) - .collect(); + let scale_values: Vec<_> = (0..igrid.Ntau()).map(|i| igrid.getQ2(i)).collect(); let x1_values: Vec<_> = (0..igrid.Ny1()) .map(|i| igrid.getx1(i).clamp(0.0, 1.0)) .collect(); @@ -171,10 +212,13 @@ pub fn convert_applgrid(grid: Pin<&mut grid>, alpha: u32, dis_pid: i32) -> Resul let matrix = unsafe { &*matrix }; - let mut array = - SparseArray3::new(mu2_values.len(), x1_values.len(), x2_values.len()); + let mut array = PackedArray::new(if dis { + vec![scale_values.len(), x1_values.len()] + } else { + vec![scale_values.len(), x1_values.len(), x2_values.len()] + }); - for itau in 0..mu2_values.len() { + for itau in 0..scale_values.len() { for ix1 in 0..x1_values.len() { for ix2 in 0..x2_values.len() { let value = ffi::sparse_matrix_get( @@ -185,7 +229,12 @@ pub fn convert_applgrid(grid: Pin<&mut grid>, alpha: u32, dis_pid: i32) -> Resul ); if value != 0.0 { - array[[itau, ix1, ix2]] = value * x1_weights[ix1] * x2_weights[ix2]; + if dis { + array[[itau, ix1]] = value * x1_weights[ix1]; + } else { + array[[itau, ix1, ix2]] = + value * x1_weights[ix1] * x2_weights[ix2]; + } } } } @@ -193,11 +242,13 @@ pub fn convert_applgrid(grid: Pin<&mut grid>, alpha: u32, dis_pid: i32) -> Resul if !array.is_empty() { pgrid.subgrids_mut()[[0, bin.try_into().unwrap(), lumi]] = - ImportOnlySubgridV2::new( + ImportSubgridV1::new( array, - mu2_values.clone(), - x1_values.clone(), - x2_values.clone(), + if dis { + vec![scale_values.clone(), x1_values.clone()] + } else { + vec![scale_values.clone(), x1_values.clone(), x2_values.clone()] + }, ) .into(); } @@ -241,7 +292,7 @@ pub fn convert_applgrid(grid: Pin<&mut grid>, alpha: u32, dis_pid: i32) -> Resul } } - grid0.scale_by_order(alphas_factor, 1.0, 1.0, 1.0, global); + grid0.scale_by_order(alphas_factor, 1.0, 1.0, 1.0, 1.0, global); Ok(grid0) } diff --git a/pineappl_cli/src/import/fastnlo.rs b/pineappl_cli/src/import/fastnlo.rs index 8e4225938..ee9f63f88 100644 --- a/pineappl_cli/src/import/fastnlo.rs +++ b/pineappl_cli/src/import/fastnlo.rs @@ -1,18 +1,21 @@ use anyhow::Result; +use float_cmp::approx_eq; use itertools::Itertools; use ndarray::s; use pineappl::bin::BinRemapper; -use pineappl::boc::{Channel, Order}; -use pineappl::convolutions::Convolution; +use pineappl::boc::{Channel, Kinematics, Order, ScaleFuncForm, Scales}; +use pineappl::convolutions::{Conv, ConvType}; use pineappl::grid::Grid; -use pineappl::import_only_subgrid::ImportOnlySubgridV2; -use pineappl::sparse_array3::SparseArray3; -use pineappl::subgrid::{Mu2, SubgridParams}; +use pineappl::import_subgrid::ImportSubgridV1; +use pineappl::interpolation::{Interp, InterpMeth, Map, ReweightMeth}; +use pineappl::packed_array::PackedArray; +use pineappl::pids::PidBasis; use pineappl_fastnlo::ffi::{ self, fastNLOCoeffAddBase, fastNLOCoeffAddFix, fastNLOCoeffAddFlex, fastNLOLHAPDF, fastNLOPDFLinearCombinations, EScaleFunctionalForm, }; use std::f64::consts::TAU; +use std::iter; use std::mem; fn pid_to_pdg_id(pid: i32) -> i32 { @@ -23,36 +26,35 @@ fn pid_to_pdg_id(pid: i32) -> i32 { } } -fn create_lumi( +fn reconstruct_channels( table: &fastNLOCoeffAddBase, comb: &fastNLOPDFLinearCombinations, - dis_pid: i32, ) -> Vec { - let dis_pid = if table.GetNPDF() == 2 { 0 } else { dis_pid }; - let mut lumis = Vec::new(); + let mut channels = Vec::new(); + let npdf = table.GetNPDF(); - // if there's a (non-empty) PDF coefficient vector reconstruct the luminosity function; the - // advantage is that we preserve the order of the lumi entries in the PineAPPL grid + // if there's a (non-empty) PDF coefficient vector reconstruct the channels; the advantage is + // that we preserve the order of the channels in the PineAPPL grid for pdf_entry in 0..ffi::GetPDFCoeffSize(table) { let mut entries = Vec::new(); for entry in ffi::GetPDFCoeff(table, pdf_entry) { - let a = pid_to_pdg_id(entry.first); - let b = if dis_pid == 0 { - pid_to_pdg_id(entry.second) - } else { - dis_pid - }; - let f = 1.0; + let mut pids = vec![pid_to_pdg_id(entry.first)]; + + if npdf == 2 { + pids.push(pid_to_pdg_id(entry.second)); + } - entries.push((a, b, f)); + entries.push((pids, 1.0)); } - lumis.push(Channel::new(entries)); + channels.push(Channel::new(entries)); } - // if the PDF coefficient vector was empty, we must reconstruct the lumi function - if lumis.is_empty() { + // if the PDF coefficient vector was empty, we must reconstruct the channels in a different way + if channels.is_empty() { + assert_eq!(npdf, 2); + let nsubproc = table.GetNSubproc().try_into().unwrap(); let mut xfx1 = [0.0; 13]; @@ -67,15 +69,15 @@ fn create_lumi( for b in 0..13 { xfx2[b] = 1.0; - let lumi = ffi::CalcPDFLinearCombination(comb, table, &xfx1, &xfx2, false); + let channel = ffi::CalcPDFLinearCombination(comb, table, &xfx1, &xfx2, false); - assert!(lumi.len() == nsubproc); + assert!(channel.len() == nsubproc); - for (i, l) in lumi.iter().copied().enumerate().filter(|(_, l)| *l != 0.0) { + for (i, &l) in channel.iter().enumerate().filter(|(_, &l)| l != 0.0) { let ap = pid_to_pdg_id(i32::try_from(a).unwrap() - 6); let bp = pid_to_pdg_id(i32::try_from(b).unwrap() - 6); - entries[i].push((ap, bp, l)); + entries[i].push((vec![ap, bp], l)); } xfx2[b] = 0.0; @@ -84,51 +86,76 @@ fn create_lumi( xfx1[a] = 0.0; } - lumis = entries.into_iter().map(Channel::new).collect(); + channels = entries.into_iter().map(Channel::new).collect(); } - lumis + channels } fn convert_coeff_add_fix( table: &fastNLOCoeffAddFix, comb: &fastNLOPDFLinearCombinations, bins: usize, - alpha: u32, - dis_pid: i32, + alpha: u8, ) -> Grid { let table_as_add_base = ffi::downcast_coeff_add_fix_to_base(table); + // UNWRAP: shouldn't be larger than `2` + let npdf = usize::try_from(table_as_add_base.GetNPDF()).unwrap(); + assert!(npdf <= 2); + + // TODO: extract the proper convolution PIDs + let convolutions = vec![Conv::new(ConvType::UnpolPDF, 2212); npdf]; + let mut grid = Grid::new( - create_lumi(table_as_add_base, comb, dis_pid), + PidBasis::Pdg, + reconstruct_channels(table_as_add_base, comb), vec![Order { alphas: table_as_add_base.GetNpow().try_into().unwrap(), alpha, logxir: 0, logxif: 0, + logxia: 0, }], (0..=bins) .map(|limit| u16::try_from(limit).unwrap().into()) .collect(), - SubgridParams::default(), + convolutions, + // TODO: read out interpolation parameters from fastNLO + iter::once(Interp::new( + 1e2, + 1e8, + 40, + 3, + ReweightMeth::NoReweight, + Map::ApplGridH0, + InterpMeth::Lagrange, + )) + .chain( + iter::repeat(Interp::new( + 2e-7, + 1.0, + 50, + 3, + ReweightMeth::ApplGridX, + Map::ApplGridF2, + InterpMeth::Lagrange, + )) + .take(npdf), + ) + .collect(), + if npdf == 2 { + vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2] + } else { + vec![Kinematics::Scale(0), Kinematics::X1] + }, + Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }, ); - // UNWRAP: shouldn't be larger than `2` - let npdf = usize::try_from(table_as_add_base.GetNPDF()).unwrap(); - assert!(npdf <= 2); - - for index in 0..2 { - grid.set_convolution( - index, - if index < npdf { - // TODO: how do we determined the PID/type of the convolution for fixed tables? - Convolution::UnpolPDF(2212) - } else { - Convolution::None - }, - ); - } - let total_scalenodes: usize = table.GetTotalScalenodes().try_into().unwrap(); let npdfdim = table.GetNPDFDim(); @@ -148,21 +175,15 @@ fn convert_coeff_add_fix( for j in 0..table.GetTotalScalevars() { // TODO: for the time being we only extract the central scale result - if table.GetScaleFactor(j) != 1.0 { + if !approx_eq!(f64, table.GetScaleFactor(j), 1.0, ulps = 4) { continue; } let q_values = ffi::GetScaleNodes(table, obs, j); - let mu2_values: Vec<_> = q_values - .iter() - .map(|q| Mu2 { - ren: q * q, - fac: q * q, - }) - .collect(); + let scale_values: Vec<_> = q_values.into_iter().map(|q| q * q).collect(); let mut array = - SparseArray3::new(mu2_values.len(), x1_values.len(), x2_values.len()); + PackedArray::new(vec![scale_values.len(), x1_values.len(), x2_values.len()]); // TODO: figure out what the general case is supposed to be assert_eq!(j, 0); @@ -216,11 +237,9 @@ fn convert_coeff_add_fix( if !array.is_empty() { grid.subgrids_mut() [[0, obs.try_into().unwrap(), subproc.try_into().unwrap()]] = - ImportOnlySubgridV2::new( + ImportSubgridV1::new( array, - mu2_values, - x1_values.clone(), - x2_values.clone(), + vec![scale_values.clone(), x1_values.clone(), x2_values.clone()], ) .into(); } @@ -231,26 +250,46 @@ fn convert_coeff_add_fix( grid } +fn convert_scale_functional_form(ff: EScaleFunctionalForm) -> ScaleFuncForm { + match ff { + EScaleFunctionalForm::kScale1 => ScaleFuncForm::Scale(0), + EScaleFunctionalForm::kScale2 => ScaleFuncForm::Scale(1), + EScaleFunctionalForm::kQuadraticSum => ScaleFuncForm::QuadraticSum(0, 1), + EScaleFunctionalForm::kQuadraticMean => ScaleFuncForm::QuadraticMean(0, 1), + EScaleFunctionalForm::kQuadraticSumOver4 => ScaleFuncForm::QuadraticSumOver4(0, 1), + EScaleFunctionalForm::kLinearMean => ScaleFuncForm::LinearMean(0, 1), + EScaleFunctionalForm::kLinearSum => ScaleFuncForm::LinearSum(0, 1), + EScaleFunctionalForm::kScaleMax => ScaleFuncForm::ScaleMax(0, 1), + EScaleFunctionalForm::kScaleMin => ScaleFuncForm::ScaleMin(0, 1), + EScaleFunctionalForm::kProd => ScaleFuncForm::Prod(0, 1), + EScaleFunctionalForm::kS2plusS1half => ScaleFuncForm::S2plusS1half(0, 1), + EScaleFunctionalForm::kPow4Sum => ScaleFuncForm::Pow4Sum(0, 1), + EScaleFunctionalForm::kWgtAvg => ScaleFuncForm::WgtAvg(0, 1), + EScaleFunctionalForm::kS2plusS1fourth => ScaleFuncForm::S2plusS1fourth(0, 1), + EScaleFunctionalForm::kExpProd2 => ScaleFuncForm::ExpProd2(0, 1), + _ => unimplemented!(), + } +} + fn convert_coeff_add_flex( table: &fastNLOCoeffAddFlex, comb: &fastNLOPDFLinearCombinations, mur_ff: EScaleFunctionalForm, muf_ff: EScaleFunctionalForm, bins: usize, - alpha: u32, + alpha: u8, ipub_units: i32, - dis_pid: i32, ) -> Grid { let table_as_add_base = ffi::downcast_coeff_add_flex_to_base(table); let alphas = table_as_add_base.GetNpow().try_into().unwrap(); let orders: Vec<_> = [ - Order::new(alphas, alpha, 0, 0), - Order::new(alphas, alpha, 1, 0), - Order::new(alphas, alpha, 0, 1), - Order::new(alphas, alpha, 2, 0), - Order::new(alphas, alpha, 0, 2), - Order::new(alphas, alpha, 1, 1), + Order::new(alphas, alpha, 0, 0, 0), + Order::new(alphas, alpha, 1, 0, 0), + Order::new(alphas, alpha, 0, 1, 0), + Order::new(alphas, alpha, 2, 0, 0), + Order::new(alphas, alpha, 0, 2, 0), + Order::new(alphas, alpha, 1, 1, 0), ] .into_iter() .take(match table.GetNScaleDep() { @@ -263,30 +302,59 @@ fn convert_coeff_add_flex( .collect(); let orders_len = orders.len(); + let npdf = table_as_add_base.GetNPDF(); + assert!(npdf <= 2); + + let convolutions = (0..npdf) + .map(|index| Conv::new(ConvType::UnpolPDF, table.GetPDFPDG(index))) + .collect(); + + let npdf: usize = npdf.try_into().unwrap(); + let mut grid = Grid::new( - create_lumi(table_as_add_base, comb, dis_pid), + PidBasis::Pdg, + reconstruct_channels(table_as_add_base, comb), orders, (0..=bins) .map(|limit| u16::try_from(limit).unwrap().into()) .collect(), - SubgridParams::default(), + convolutions, + // TODO: read out interpolation parameters from fastNLO + iter::repeat(Interp::new( + 1e2, + 1e8, + 40, + 3, + ReweightMeth::NoReweight, + Map::ApplGridH0, + InterpMeth::Lagrange, + )) + .take(2) + .chain( + iter::repeat(Interp::new( + 2e-7, + 1.0, + 50, + 3, + ReweightMeth::ApplGridX, + Map::ApplGridF2, + InterpMeth::Lagrange, + )) + .take(npdf), + ) + .collect(), + [Kinematics::Scale(0), Kinematics::Scale(1)] + .into_iter() + .chain((0..npdf).map(Kinematics::X)) + .collect(), + Scales { + ren: convert_scale_functional_form(mur_ff), + fac: convert_scale_functional_form(muf_ff), + // TODO: does fastNLO not support fragmentation scales? + frg: ScaleFuncForm::NoScale, + }, ); - let npdf = table_as_add_base.GetNPDF(); - assert!(npdf <= 2); - - for index in 0..2 { - grid.set_convolution( - // UNWRAP: index is smaller than 2 - index.try_into().unwrap(), - if index < npdf { - Convolution::UnpolPDF(table.GetPDFPDG(index)) - } else { - Convolution::None - }, - ); - } - let rescale = 0.1_f64.powi(table.GetIXsectUnits() - ipub_units); for obs in 0..bins { @@ -299,30 +367,25 @@ fn convert_coeff_add_flex( vec![1.0] }; - let mu2_values: Vec<_> = scale_nodes1 - .iter() - .cartesian_product(scale_nodes2.iter()) - .map(|(&s1, &s2)| Mu2 { - ren: mur_ff.compute_scale(s1, s2), - fac: muf_ff.compute_scale(s1, s2), - }) - .collect(); + let mut dim = vec![scale_nodes1.len(), scale_nodes2.len(), x1_values.len()]; + if npdf > 1 { + dim.push(x2_values.len()); + } + let nx = ffi::GetNx(table, obs); for subproc in 0..table_as_add_base.GetNSubproc() { let factor = rescale / table_as_add_base.GetNevt(obs.try_into().unwrap(), subproc); - let mut arrays = - vec![ - SparseArray3::new(mu2_values.len(), x1_values.len(), x2_values.len()); - orders_len - ]; - for (mu2_slice, (is1, is2)) in (0..scale_nodes1.len()) - .cartesian_product(0..scale_nodes2.len()) - .enumerate() - { - let logmur2 = mu2_values[mu2_slice].ren.ln(); - let logmuf2 = mu2_values[mu2_slice].fac.ln(); + let mut arrays = vec![PackedArray::new(dim.clone()); orders_len]; + + for (is1, is2) in (0..scale_nodes1.len()).cartesian_product(0..scale_nodes2.len()) { + let logmur2 = mur_ff + .compute_scale(scale_nodes1[is1], scale_nodes2[is2]) + .ln(); + let logmuf2 = muf_ff + .compute_scale(scale_nodes1[is1], scale_nodes2[is2]) + .ln(); let logs00 = [ logmur2, logmuf2, @@ -365,8 +428,14 @@ fn convert_coeff_add_flex( .zip(arrays.iter_mut()) .filter(|(value, _)| *value != 0.0) { - array[[mu2_slice, ix1, ix2]] = - value * factor * x1_values[ix1] * x2_values[ix2]; + if npdf == 1 { + array[[is1, is2, ix1]] = value * factor * x1_values[ix1]; + } else if npdf == 2 { + assert_eq!(is2, 0); + + array[[is1, is2, ix1, ix2]] = + value * factor * x1_values[ix1] * x2_values[ix2]; + } } } } @@ -381,13 +450,23 @@ fn convert_coeff_add_flex( continue; } - *subgrid = ImportOnlySubgridV2::new( - array, - mu2_values.clone(), - x1_values.clone(), - x2_values.clone(), - ) - .into(); + let node_values = if npdf == 1 { + vec![ + scale_nodes1.iter().map(|s| s * s).collect(), + scale_nodes2.iter().map(|s| s * s).collect(), + x1_values.clone(), + ] + } else { + vec![ + scale_nodes1.iter().map(|s| s * s).collect(), + // scale_nodes2.iter().map(|s| s * s).collect(), + vec![100000.0], + x1_values.clone(), + x2_values.clone(), + ] + }; + + *subgrid = ImportSubgridV1::new(array, node_values).into(); } } } @@ -395,7 +474,7 @@ fn convert_coeff_add_flex( grid } -pub fn convert_fastnlo_table(file: &fastNLOLHAPDF, alpha: u32, dis_pid: i32) -> Result { +pub fn convert_fastnlo_table(file: &fastNLOLHAPDF, alpha: u8) -> Result { let file_as_reader = ffi::downcast_lhapdf_to_reader(file); let file_as_table = ffi::downcast_lhapdf_to_table(file); @@ -436,7 +515,6 @@ pub fn convert_fastnlo_table(file: &fastNLOLHAPDF, alpha: u32, dis_pid: i32) -> bins, alpha, file_as_table.GetIpublunits(), - dis_pid, )); } } else { @@ -445,7 +523,6 @@ pub fn convert_fastnlo_table(file: &fastNLOLHAPDF, alpha: u32, dis_pid: i32) -> linear_combinations, bins, alpha, - dis_pid, )); } } @@ -455,7 +532,7 @@ pub fn convert_fastnlo_table(file: &fastNLOLHAPDF, alpha: u32, dis_pid: i32) -> result.merge(grid)?; } - result.scale_by_order(1.0 / TAU, 1.0, 1.0, 1.0, 1.0); + result.scale_by_order(1.0 / TAU, 1.0, 1.0, 1.0, 1.0, 1.0); let dimensions: usize = file_as_table.GetNumDiffBin().try_into().unwrap(); let mut limits = Vec::new(); @@ -480,14 +557,18 @@ pub fn convert_fastnlo_table(file: &fastNLOLHAPDF, alpha: u32, dis_pid: i32) -> assert_eq!(labels.len(), dimensions); - for (dimension, label) in labels.iter().enumerate() { - result.set_key_value(&format!("x{}_label", dimension + 1), label); + for (dimension, label) in labels.into_iter().enumerate() { + result + .metadata_mut() + .insert(format!("x{}_label", dimension + 1), label); } - result.set_key_value("y_label", &ffi::GetXSDescr(file_as_table)); - result.set_key_value( - "fastnlo_scenario", - &ffi::GetScDescr(file_as_table).join("\n"), + result + .metadata_mut() + .insert("y_label".to_owned(), ffi::GetXSDescr(file_as_table)); + result.metadata_mut().insert( + "fastnlo_scenario".to_owned(), + ffi::GetScDescr(file_as_table).join("\n"), ); Ok(result) diff --git a/pineappl_cli/src/import/fktable.rs b/pineappl_cli/src/import/fktable.rs index 3fa6f2c28..a3109cf66 100644 --- a/pineappl_cli/src/import/fktable.rs +++ b/pineappl_cli/src/import/fktable.rs @@ -1,14 +1,14 @@ use anyhow::{anyhow, Context, Result}; use flate2::read::GzDecoder; use ndarray::s; -use pineappl::boc::Order; +use pineappl::boc::{Kinematics, Order, ScaleFuncForm, Scales}; use pineappl::channel; -use pineappl::convolutions::Convolution; +use pineappl::convolutions::{Conv, ConvType}; use pineappl::grid::Grid; -use pineappl::import_only_subgrid::ImportOnlySubgridV1; +use pineappl::import_subgrid::ImportSubgridV1; +use pineappl::interpolation::{Interp, InterpMeth, Map, ReweightMeth}; +use pineappl::packed_array::PackedArray; use pineappl::pids::PidBasis; -use pineappl::sparse_array3::SparseArray3; -use pineappl::subgrid::SubgridParams; use std::fs::File; use std::io::{BufRead, BufReader}; use std::iter; @@ -27,7 +27,7 @@ enum FkTableSection { FastKernel, } -fn read_fktable(reader: impl BufRead, dis_pid: i32) -> Result { +fn read_fktable(reader: impl BufRead) -> Result { let mut section = FkTableSection::Sof; let mut flavor_mask = Vec::::new(); let mut x_grid = Vec::new(); @@ -77,7 +77,7 @@ fn read_fktable(reader: impl BufRead, dis_pid: i32) -> Result { nx2 = if hadronic { nx1 } else { 1 }; - // FK tables are always in the flavor basis + // TODO: are FK tables always in the evolution basis? let basis = [ 22, 100, 21, 200, 203, 208, 215, 224, 235, 103, 108, 115, 124, 135, ]; @@ -86,47 +86,106 @@ fn read_fktable(reader: impl BufRead, dis_pid: i32) -> Result { .iter() .enumerate() .filter(|&(_, &value)| value) - .map(|(index, _)| channel![basis[index / 14], basis[index % 14], 1.0]) + .map(|(index, _)| channel![1.0 * (basis[index / 14], basis[index % 14])]) .collect() } else { flavor_mask .iter() .enumerate() .filter(|&(_, &value)| value) - .map(|(index, _)| channel![basis[index], dis_pid, 1.0]) + .map(|(index, _)| channel![1.0 * (basis[index])]) .collect() }; + let convolutions = if hadronic { + vec![Conv::new(ConvType::UnpolPDF, 2212); 2] + } else { + vec![Conv::new(ConvType::UnpolPDF, 2212)] + }; + // construct `Grid` - let mut fktable = Grid::new( + let fktable = Grid::new( + PidBasis::Evol, lumis, - vec![Order { - alphas: 0, - alpha: 0, - logxir: 0, - logxif: 0, - }], + vec![Order::new(0, 0, 0, 0, 0)], (0..=ndata).map(Into::into).collect(), - SubgridParams::default(), + // legacy FK-tables only support unpolarized proton PDFs + convolutions.clone(), + // TODO: what are sensible parameters for FK-tables? + if hadronic { + vec![ + Interp::new( + 1e2, + 1e8, + 40, + 3, + ReweightMeth::NoReweight, + Map::ApplGridH0, + InterpMeth::Lagrange, + ), + Interp::new( + 2e-7, + 1.0, + 50, + 3, + ReweightMeth::ApplGridX, + Map::ApplGridF2, + InterpMeth::Lagrange, + ), + Interp::new( + 2e-7, + 1.0, + 50, + 3, + ReweightMeth::ApplGridX, + Map::ApplGridF2, + InterpMeth::Lagrange, + ), + ] + } else { + vec![ + Interp::new( + 1e2, + 1e8, + 40, + 3, + ReweightMeth::NoReweight, + Map::ApplGridH0, + InterpMeth::Lagrange, + ), + Interp::new( + 2e-7, + 1.0, + 50, + 3, + ReweightMeth::ApplGridX, + Map::ApplGridF2, + InterpMeth::Lagrange, + ), + ] + }, + if hadronic { + vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2] + } else { + vec![Kinematics::Scale(0), Kinematics::X1] + }, + // TODO: is this correct? + Scales { + ren: ScaleFuncForm::NoScale, + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }, ); - // explicitly set the evolution basis - fktable.set_pid_basis(PidBasis::Evol); - - // legacy FK-tables only support unpolarized proton PDFs - fktable.set_convolution(0, Convolution::UnpolPDF(2212)); - - if hadronic { - fktable.set_convolution(1, Convolution::UnpolPDF(2212)); - } else { - fktable.set_convolution(1, Convolution::None); - } - grid = Some(fktable); - arrays = iter::repeat(SparseArray3::new(1, nx1, nx2)) - .take(flavor_mask.iter().filter(|&&value| value).count()) - .collect(); + arrays = iter::repeat(PackedArray::new(if hadronic { + vec![1, nx1, nx2] + } else { + vec![1, nx1] + })) + .take(flavor_mask.iter().filter(|&&value| value).count()) + .collect(); } _ => match section { FkTableSection::GridInfo => { @@ -136,9 +195,7 @@ fn read_fktable(reader: impl BufRead, dis_pid: i32) -> Result { hadronic = match value { "0" => false, "1" => true, - _ => { - unimplemented!("hadronic value: '{value}' is not supported") - } + _ => unreachable!(), } } "*NDATA:" => ndata = value.parse()?, @@ -186,18 +243,24 @@ fn read_fktable(reader: impl BufRead, dis_pid: i32) -> Result { .iter_mut() .zip(arrays.into_iter()) { - *subgrid = ImportOnlySubgridV1::new( + *subgrid = ImportSubgridV1::new( array, - vec![q0 * q0], - x_grid.clone(), - if hadronic { x_grid.clone() } else { vec![1.0] }, + if hadronic { + vec![vec![q0 * q0], x_grid.clone(), x_grid.clone()] + } else { + vec![vec![q0 * q0], x_grid.clone()] + }, ) .into(); } - arrays = iter::repeat(SparseArray3::new(1, nx1, nx2)) - .take(flavor_mask.iter().filter(|&&value| value).count()) - .collect(); + arrays = iter::repeat(PackedArray::new(if hadronic { + vec![1, nx1, nx2] + } else { + vec![1, nx1] + })) + .take(flavor_mask.iter().filter(|&&value| value).count()) + .collect(); last_bin = bin; } @@ -223,8 +286,11 @@ fn read_fktable(reader: impl BufRead, dis_pid: i32) -> Result { .zip(grid_values.iter()) .filter(|(_, value)| **value != 0.0) { - array[[0, x1, x2]] = - x_grid[x1] * if hadronic { x_grid[x2] } else { 1.0 } * value; + if hadronic { + array[[0, x1, x2]] = x_grid[x1] * x_grid[x2] * value; + } else { + array[[0, x1]] = x_grid[x1] * value; + } } } _ => {} @@ -242,11 +308,13 @@ fn read_fktable(reader: impl BufRead, dis_pid: i32) -> Result { .iter_mut() .zip(arrays.into_iter()) { - *subgrid = ImportOnlySubgridV1::new( + *subgrid = ImportSubgridV1::new( array, - vec![q0 * q0], - x_grid.clone(), - if hadronic { x_grid.clone() } else { vec![1.0] }, + if hadronic { + vec![vec![q0 * q0], x_grid.clone(), x_grid.clone()] + } else { + vec![vec![q0 * q0], x_grid.clone()] + }, ) .into(); } @@ -254,7 +322,7 @@ fn read_fktable(reader: impl BufRead, dis_pid: i32) -> Result { Ok(grid) } -pub fn convert_fktable(input: &Path, dis_pid: i32) -> Result { +pub fn convert_fktable(input: &Path) -> Result { let reader = GzDecoder::new(File::open(input)?); let mut archive = Archive::new(reader); @@ -265,7 +333,7 @@ pub fn convert_fktable(input: &Path, dis_pid: i32) -> Result { if let Some(extension) = path.extension() { if extension == "dat" { - return read_fktable(BufReader::new(file), dis_pid); + return read_fktable(BufReader::new(file)); } } } diff --git a/pineappl_cli/src/orders.rs b/pineappl_cli/src/orders.rs index 8a229251d..a9b371566 100644 --- a/pineappl_cli/src/orders.rs +++ b/pineappl_cli/src/orders.rs @@ -29,7 +29,7 @@ pub struct Opts { value_delimiter = ',', value_parser = helpers::parse_order )] - normalize: Vec<(u32, u32)>, + normalize: Vec<(u8, u8)>, /// Set the number of fractional digits shown for absolute numbers. #[arg(default_value_t = 7, long, value_name = "ABS")] digits_abs: usize, diff --git a/pineappl_cli/src/plot.rs b/pineappl_cli/src/plot.rs index 6320a9c4d..c381f0c6f 100644 --- a/pineappl_cli/src/plot.rs +++ b/pineappl_cli/src/plot.rs @@ -3,11 +3,11 @@ use super::{GlobalConfiguration, Subcommand}; use anyhow::Result; use clap::builder::{PossibleValuesParser, TypedValueParser}; use clap::{Parser, ValueHint}; +use float_cmp::assert_approx_eq; use itertools::Itertools; use ndarray::Axis; -use pineappl::boc::Channel; -use pineappl::convolutions::Convolution; -use pineappl::pids::PidBasis; +use pineappl::boc::{Channel, Kinematics}; +use pineappl::grid::Grid; use pineappl::subgrid::Subgrid; use rayon::{prelude::*, ThreadPoolBuilder}; use std::fmt::Write; @@ -79,29 +79,15 @@ fn map_format_e_join_repeat_last(slice: &[f64]) -> String { } /// Convert a channel to a good Python string representation. -fn map_format_channel( - channel: &Channel, - has_pdf1: bool, - has_pdf2: bool, - pid_basis: PidBasis, -) -> String { +fn map_format_channel(channel: &Channel, grid: &Grid) -> String { channel .entry() .iter() - .map(|&(a, b, _)| { - format!( - "{}{}", - if has_pdf1 { - pid_basis.to_latex_str(a) - } else { - "" - }, - if has_pdf2 { - pid_basis.to_latex_str(b) - } else { - "" - } - ) + .map(|(pids, _)| { + pids.iter() + .map(|&pid| grid.pid_basis().to_latex_str(pid)) + .collect::>() + .join("") }) .join(" + ") } @@ -207,14 +193,16 @@ impl Subcommand for Opts { "$\\SI{{{left}}}{{{unit}}} < {obs} < \\SI{{{right}}}{{{unit}}}$", left = grid.bin_info().left(d)[begin], obs = grid - .key_values() - .and_then(|map| map.get(&format!("x{}_label_tex", d + 1)).cloned()) + .metadata() + .get(&format!("x{}_label_tex", d + 1)) + .cloned() .unwrap_or_else(|| format!("x{}", d + 1)) .replace('$', ""), right = grid.bin_info().right(d)[end - 1], unit = grid - .key_values() - .and_then(|map| map.get(&format!("x{}_unit", d + 1)).cloned()) + .metadata() + .get(&format!("x{}_unit", d + 1)) + .cloned() .unwrap_or_default() ) }) @@ -398,12 +386,7 @@ impl Subcommand for Opts { let mut channel_mask = vec![false; grid.channels().len()]; channel_mask[channel] = true; ( - map_format_channel( - &grid.channels()[channel], - grid.convolutions()[0] != Convolution::None, - grid.convolutions()[1] != Convolution::None, - grid.pid_basis(), - ), + map_format_channel(&grid.channels()[channel], &grid), helpers::convolve( &grid, &mut conv_funs, @@ -464,10 +447,8 @@ impl Subcommand for Opts { data_string.push_str("]"); // prepare metadata - let key_values = grid.key_values().cloned().unwrap_or_default(); - let mut vector: Vec<_> = key_values.iter().collect(); - vector.sort(); - let vector = vector; + let metadata = grid.metadata(); + let vector: Vec<_> = metadata.iter().collect(); let mut output = self.input.clone(); @@ -484,12 +465,12 @@ impl Subcommand for Opts { } let xaxis = format!("x{}", grid.bin_info().dimensions()); - let xunit = key_values + let xunit = metadata .get(&format!("{xaxis}_unit")) .map_or("", String::as_str); let xlabel = format!( "{}{}", - key_values + metadata .get(&format!("{xaxis}_label_tex")) .map_or("", String::as_str), if xunit.is_empty() { @@ -498,10 +479,10 @@ impl Subcommand for Opts { format!(" [\\si{{{xunit}}}]") } ); - let yunit = key_values.get("y_unit").map_or("", String::as_str); + let yunit = metadata.get("y_unit").map_or("", String::as_str); let ylabel = format!( "{}{}", - key_values.get("y_label_tex").map_or("", String::as_str), + metadata.get("y_label_tex").map_or("", String::as_str), if yunit.is_empty() { String::new() } else { @@ -510,7 +491,7 @@ impl Subcommand for Opts { ); let xlog = !xunit.is_empty(); let ylog = xlog; - let title = key_values.get("description").map_or("", String::as_str); + let title = metadata.get("description").map_or("", String::as_str); let bins = grid.bin_info().bins(); let nconvs = self.conv_funs.len(); @@ -596,7 +577,7 @@ metadata = {{ helpers::create_conv_funs_for_set(&self.conv_funs[0], self.conv_fun_uncert_from)?; let (set2, mut conv_funs2) = helpers::create_conv_funs_for_set(&self.conv_funs[1], self.conv_fun_uncert_from)?; - let (order, bin, channel) = self + let index @ (order, bin, channel) = self .subgrid_pull .iter() .map(|num| num.parse::().unwrap()) @@ -606,6 +587,9 @@ metadata = {{ let cl = lhapdf::CL_1_SIGMA; let grid = helpers::read_grid(&self.input)?; + // TODO: convert this into an error + assert_eq!(grid.convolutions().len(), 2); + let member1 = self.conv_funs[0].members[self.conv_fun_uncert_from]; let member2 = self.conv_funs[1].members[self.conv_fun_uncert_from]; @@ -686,10 +670,27 @@ metadata = {{ ) .sum_axis(Axis(0)); - let subgrid = &grid.subgrids()[[order, bin, channel]]; + let subgrid = &grid.subgrids()[<[usize; 3]>::from(index)]; //let q2 = subgrid.q2_grid(); - let x1 = subgrid.x1_grid(); - let x2 = subgrid.x2_grid(); + let x1 = grid + .kinematics() + .iter() + .zip(subgrid.node_values()) + .find_map(|(kin, node_values)| { + matches!(kin, &Kinematics::X(idx) if idx == 0).then_some(node_values) + }) + // TODO: convert this into an error + .unwrap(); + + let x2 = grid + .kinematics() + .iter() + .zip(subgrid.node_values()) + .find_map(|(kin, node_values)| { + matches!(kin, &Kinematics::X(idx) if idx == 1).then_some(node_values) + }) + // TODO: convert this into an error + .unwrap(); let mut x1_vals = vec![]; let mut x2_vals = vec![]; @@ -697,7 +698,7 @@ metadata = {{ for (((ix1, ix2), &one), &two) in res1.indexed_iter().zip(res2.iter()) { if one == 0.0 { - assert_eq!(two, 0.0); + assert_approx_eq!(f64, two, 0.0, ulps = 4); continue; } diff --git a/pineappl_cli/src/pull.rs b/pineappl_cli/src/pull.rs index 0ec6fe65d..8707e9e33 100644 --- a/pineappl_cli/src/pull.rs +++ b/pineappl_cli/src/pull.rs @@ -39,7 +39,7 @@ pub struct Opts { value_delimiter = ',', value_parser = helpers::parse_order )] - orders: Vec<(u32, u32)>, + orders: Vec<(u8, u8)>, /// Number of threads to utilize. #[arg(default_value_t = thread::available_parallelism().map_or(1, NonZeroUsize::get), long)] threads: usize, diff --git a/pineappl_cli/src/read.rs b/pineappl_cli/src/read.rs index b08e4e159..48bf56ab2 100644 --- a/pineappl_cli/src/read.rs +++ b/pineappl_cli/src/read.rs @@ -60,7 +60,7 @@ pub struct Opts { impl Subcommand for Opts { fn run(&self, _: &GlobalConfiguration) -> Result { - let mut grid = helpers::read_grid(&self.input)?; + let grid = helpers::read_grid(&self.input)?; let mut table = helpers::create_table(); @@ -124,8 +124,11 @@ impl Subcommand for Opts { row.add_cell(cell!(format!("{index}"))); - for (id1, id2, factor) in channel.entry() { - row.add_cell(cell!(format!("{factor} \u{d7} ({id1:2}, {id2:2})"))); + for (pids, factor) in channel.entry() { + row.add_cell(cell!(format!( + "{factor} \u{d7} ({})", + pids.iter().map(|pid| format!("{pid:2}")).join(", ") + ))); } } } else if self.group.ew || self.group.qcd { @@ -161,44 +164,17 @@ impl Subcommand for Opts { println!("{orders}"); } else if let Some(key) = &self.group.get { - grid.upgrade(); - - grid.key_values().map_or_else( - || unreachable!(), - |key_values| { - if let Some(value) = key_values.get(key) { - println!("{value}"); - } - }, - ); + if let Some(value) = grid.metadata().get(key) { + println!("{value}"); + } } else if self.group.keys { - grid.upgrade(); - - grid.key_values().map_or_else( - || unreachable!(), - |key_values| { - let mut vector = key_values.iter().collect::>(); - vector.sort(); - - for (key, _) in &vector { - println!("{key}"); - } - }, - ); + for key in grid.metadata().keys() { + println!("{key}"); + } } else if self.group.show { - grid.upgrade(); - - grid.key_values().map_or_else( - || unreachable!(), - |key_values| { - let mut vector = key_values.iter().collect::>(); - vector.sort(); - - for (key, value) in &vector { - println!("{key}: {value}"); - } - }, - ); + for (key, value) in grid.metadata() { + println!("{key}: {value}"); + } } else { table.set_titles(row![c => "o", "order"]); @@ -210,11 +186,12 @@ impl Subcommand for Opts { alpha, logxir, logxif, + logxia, } = order; - let order_string = [alphas, alpha, logxir, logxif] + let order_string = [alphas, alpha, logxir, logxif, logxia] .iter() - .zip(["as^", "a^", "lr^", "lf^"].iter()) + .zip(["as^", "a^", "lr^", "lf^", "la^"].iter()) .filter_map(|(num, string)| { if **num == 0 && self.group.orders { None diff --git a/pineappl_cli/src/subgrids.rs b/pineappl_cli/src/subgrids.rs index 3cdd5323c..69d7beaa8 100644 --- a/pineappl_cli/src/subgrids.rs +++ b/pineappl_cli/src/subgrids.rs @@ -2,7 +2,7 @@ use super::helpers; use super::{GlobalConfiguration, Subcommand}; use anyhow::Result; use clap::{Args, Parser, ValueHint}; -use pineappl::subgrid::Mu2; +use pineappl::boc::Kinematics; use pineappl::subgrid::{Subgrid, SubgridEnum}; use prettytable::{cell, row}; use std::path::PathBuf; @@ -14,24 +14,12 @@ struct Group { /// Show the subgrid type. #[arg(long = "type")] type_: bool, - /// Show the renormalization grid values. - #[arg(long)] - mur: bool, - /// Show the squared renormalization grid values. - #[arg(long)] - mur2: bool, - /// Show the factorization grid values. - #[arg(long)] - muf: bool, - /// Show the squared factorization grid values. - #[arg(long)] - muf2: bool, - /// Show the x1 grid values. - #[arg(long)] - x1: bool, - /// Show the x2 grid values. - #[arg(long)] - x2: bool, + /// Show the scale node values for the given indices. + #[arg(long, require_equals = true, value_delimiter = ',', value_name = "IDX")] + scale: Vec, + /// Show the x-node values for the given indices. + #[arg(long, require_equals = true, value_delimiter = ',', value_name = "IDX")] + x: Vec, /// Show grid statistics (figures are the number of entries). #[arg(long)] stats: bool, @@ -62,23 +50,11 @@ impl Subcommand for Opts { if self.group.type_ { titles.add_cell(cell!(c->"type")); } - if self.group.mur { - titles.add_cell(cell!(c->"mur")); - } - if self.group.mur2 { - titles.add_cell(cell!(c->"mur2")); - } - if self.group.muf { - titles.add_cell(cell!(c->"muf")); + for index in &self.group.scale { + titles.add_cell(cell!(c->format!("scale{index}"))); } - if self.group.muf2 { - titles.add_cell(cell!(c->"muf2")); - } - if self.group.x1 { - titles.add_cell(cell!(c->"x1")); - } - if self.group.x2 { - titles.add_cell(cell!(c->"x2")); + for index in &self.group.x { + titles.add_cell(cell!(c->format!("x{index}"))); } if self.group.stats { titles.add_cell(cell!(c->"total")); @@ -102,65 +78,40 @@ impl Subcommand for Opts { if self.group.type_ { row.add_cell(cell!(l-> match subgrid { - SubgridEnum::LagrangeSubgridV1(_) => "LagrangeSubgridV1", - SubgridEnum::NtupleSubgridV1(_) => "NtupleSubgridV1", - SubgridEnum::LagrangeSparseSubgridV1(_) => "LagrangeSparseSubgridV1", - SubgridEnum::LagrangeSubgridV2(_) => "LagrangeSubgridV2", - SubgridEnum::ImportOnlySubgridV1(_) => "ImportOnlySubgridV1", - SubgridEnum::ImportOnlySubgridV2(_) => "ImportOnlySubgridV2", + SubgridEnum::InterpSubgridV1(_) => "InterpSubgridV1", SubgridEnum::EmptySubgridV1(_) => "EmptySubgridV1", + SubgridEnum::ImportSubgridV1(_) => "ImportSubgridV1", } )); } - if self.group.mur { - let values: Vec<_> = subgrid - .mu2_grid() - .iter() - .map(|Mu2 { ren, .. }| format!("{:.*}", self.digits, ren.sqrt())) - .collect(); - - row.add_cell(cell!(l->values.join(", "))); - } - if self.group.mur2 { - let values: Vec<_> = subgrid - .mu2_grid() + for &index in &self.group.scale { + let values: Vec<_> = grid + .kinematics() .iter() - .map(|Mu2 { ren, .. }| format!("{:.*}", self.digits, ren)) - .collect(); - - row.add_cell(cell!(l->values.join(", "))); - } - if self.group.muf { - let values: Vec<_> = subgrid - .mu2_grid() - .iter() - .map(|Mu2 { fac, .. }| format!("{:.*}", self.digits, fac.sqrt())) - .collect(); - - row.add_cell(cell!(l->values.join(", "))); - } - if self.group.muf2 { - let values: Vec<_> = subgrid - .mu2_grid() - .iter() - .map(|Mu2 { fac, .. }| format!("{:.*}", self.digits, fac)) - .collect(); - - row.add_cell(cell!(l->values.join(", "))); - } - if self.group.x1 { - let values: Vec<_> = subgrid - .x1_grid() - .iter() - .map(|x| format!("{:.*e}", self.digits, x)) + .zip(subgrid.node_values()) + .find_map(|(kin, node_values)| { + matches!(kin, &Kinematics::Scale(idx) if idx == index) + .then_some(node_values) + }) + // TODO: convert this into an error + .unwrap() + .into_iter() + .map(|x| format!("{:.*}", self.digits, x)) .collect(); row.add_cell(cell!(l->values.join(", "))); } - if self.group.x2 { - let values: Vec<_> = subgrid - .x2_grid() + for &index in &self.group.x { + let values: Vec<_> = grid + .kinematics() .iter() + .zip(subgrid.node_values()) + .find_map(|(kin, node_values)| { + matches!(kin, &Kinematics::X(idx) if idx == index).then_some(node_values) + }) + // TODO: convert this into an error + .unwrap() + .into_iter() .map(|x| format!("{:.*e}", self.digits, x)) .collect(); diff --git a/pineappl_cli/src/uncert.rs b/pineappl_cli/src/uncert.rs index ffe14dc63..8f94c389e 100644 --- a/pineappl_cli/src/uncert.rs +++ b/pineappl_cli/src/uncert.rs @@ -30,7 +30,7 @@ struct Group { long, require_equals = true, value_name = "SCALES", - value_parser = PossibleValuesParser::new(["3", "7", "9"]).try_map(|s| s.parse::()) + value_parser = PossibleValuesParser::new(["3", "7", "9", "17", "27"]).try_map(|s| s.parse::()) )] scale_abs: Option, /// Calculate scale uncertainties using the covariance method. @@ -40,17 +40,17 @@ struct Group { long, require_equals = true, value_name = "SCALES", - value_parser = PossibleValuesParser::new(["3", "7", "9"]).try_map(|s| s.parse::()) + value_parser = PossibleValuesParser::new(["3", "7", "9", "17", "27"]).try_map(|s| s.parse::()) )] scale_cov: Option, - /// Calculate the envelope of results where renormalization and factorization scales varied. + /// Calculate the envelope of results where renormalization, factorization and fragmentation scales are varied. #[arg( default_missing_value = "7", num_args = 0..=1, long, require_equals = true, value_name = "SCALES", - value_parser = PossibleValuesParser::new(["3", "7", "9"]).try_map(|s| s.parse::()) + value_parser = PossibleValuesParser::new(["3", "7", "9", "17", "27"]).try_map(|s| s.parse::()) )] scale_env: Option, } @@ -79,7 +79,7 @@ pub struct Opts { value_delimiter = ',', value_parser = helpers::parse_order )] - orders: Vec<(u32, u32)>, + orders: Vec<(u8, u8)>, /// Number of threads to utilize. #[arg(default_value_t = thread::available_parallelism().map_or(1, NonZeroUsize::get), long)] threads: usize, @@ -157,13 +157,14 @@ impl Subcommand for Opts { .map(|&x| usize::from(x)) .max() .unwrap_or(1); - let scale_results = helpers::convolve( + let scale_tuples = helpers::scales_vector(&grid, scales_max); + let scale_results = helpers::convolve_scales( &grid, &mut conv_funs, &self.orders, &[], &[], - scales_max, + scale_tuples, if self.integrated { ConvoluteMode::Integrated } else { @@ -191,8 +192,8 @@ impl Subcommand for Opts { } if let Some(scales) = self.group.scale_abs { - for scale in &helpers::SCALES_VECTOR[0..scales.into()] { - title.add_cell(cell!(c->format!("(r={},f={})\n[{}]", scale.0, scale.1, y_unit))); + for (xir, xif, xia) in &scale_tuples[0..scales.into()] { + title.add_cell(cell!(c->format!("{xir},{xif},{xia}\n(r,f,a)\n[{y_unit}]"))); } } diff --git a/pineappl_cli/src/write.rs b/pineappl_cli/src/write.rs index f01358d6c..8435a5510 100644 --- a/pineappl_cli/src/write.rs +++ b/pineappl_cli/src/write.rs @@ -30,8 +30,7 @@ pub struct Opts { #[derive(Clone)] enum OpsArg { - Cc1(bool), - Cc2(bool), + Cc(usize), DedupChannels(i64), DeleteBins(Vec>), DeleteChannels(Vec>), @@ -70,7 +69,23 @@ impl FromArgMatches for MoreArgs { args.resize(indices.iter().max().unwrap() + 1, None); match id.as_str() { - "cc1" | "cc2" | "optimize" | "split_channels" | "upgrade" => { + "cc" => { + let arguments: Vec> = matches + .remove_occurrences(&id) + .unwrap() + .map(Iterator::collect) + .collect(); + assert_eq!(arguments.len(), indices.len()); + + for (index, arg) in indices.into_iter().zip(arguments.into_iter()) { + assert_eq!(arg.len(), 1); + args[index] = Some(match id.as_str() { + "cc" => OpsArg::Cc(arg[0]), + _ => unreachable!(), + }); + } + } + "optimize" | "split_channels" | "upgrade" => { let arguments: Vec> = matches .remove_occurrences(&id) .unwrap() @@ -81,8 +96,6 @@ impl FromArgMatches for MoreArgs { for (index, arg) in indices.into_iter().zip(arguments.into_iter()) { assert_eq!(arg.len(), 1); args[index] = Some(match id.as_str() { - "cc1" => OpsArg::Cc1(arg[0]), - "cc2" => OpsArg::Cc2(arg[0]), "optimize" => OpsArg::Optimize(arg[0]), "split_channels" => OpsArg::SplitChannels(arg[0]), "upgrade" => OpsArg::Upgrade(arg[0]), @@ -258,26 +271,13 @@ impl FromArgMatches for MoreArgs { impl Args for MoreArgs { fn augment_args(cmd: Command) -> Command { cmd.arg( - Arg::new("cc1") + Arg::new("cc") .action(ArgAction::Append) - .default_missing_value("true") - .help("Charge conjugate the first initial state") - .long("cc1") - .num_args(0..=1) - .require_equals(true) - .value_name("ENABLE") - .value_parser(clap::value_parser!(bool)), - ) - .arg( - Arg::new("cc2") - .action(ArgAction::Append) - .default_missing_value("true") - .help("Charge conjugate the second initial state") - .long("cc2") - .num_args(0..=1) - .require_equals(true) - .value_name("ENABLE") - .value_parser(clap::value_parser!(bool)), + .help("Charge conjugate the convolution with the specified index") + .long("cc") + .num_args(1) + .value_name("IDX") + .value_parser(clap::value_parser!(usize)), ) .arg( Arg::new("dedup_channels") @@ -439,11 +439,11 @@ impl Args for MoreArgs { .arg( Arg::new("scale_by_order") .action(ArgAction::Append) - .help("Scales all grids with order-dependent factors") + .help("Scale subgrids with order-dependent factors") .long("scale-by-order") .num_args(1) .value_delimiter(',') - .value_name("AS,AL,LR,LF") + .value_name("FAC1,FAC2,...") .value_parser(value_parser!(f64)), ) .arg( @@ -500,40 +500,9 @@ impl Subcommand for Opts { for arg in &self.more_args.args { match arg { - OpsArg::Cc1(true) | OpsArg::Cc2(true) => { - let cc1 = matches!(arg, OpsArg::Cc1(true)); - let cc2 = matches!(arg, OpsArg::Cc2(true)); - - let pid_basis = grid.pid_basis(); - - for channel in grid.channels_mut() { - *channel = Channel::new( - channel - .entry() - .iter() - .map(|&(a, b, f)| { - let (ap, f1) = if cc1 { - pid_basis.charge_conjugate(a) - } else { - (a, 1.0) - }; - let (bp, f2) = if cc2 { - pid_basis.charge_conjugate(b) - } else { - (b, 1.0) - }; - (ap, bp, f * f1 * f2) - }) - .collect(), - ); - } - - if cc1 { - grid.set_convolution(0, grid.convolutions()[0].charge_conjugate()); - } - if cc2 { - grid.set_convolution(1, grid.convolutions()[1].charge_conjugate()); - } + // TODO: generalize to arbitrary convolutions + OpsArg::Cc(index) => { + grid.charge_conjugate(*index); } OpsArg::DedupChannels(ulps) => { grid.dedup_channels(*ulps); @@ -548,7 +517,7 @@ impl Subcommand for Opts { grid.delete_orders(&ranges.iter().flat_map(Clone::clone).collect::>()); } OpsArg::DeleteKey(key) => { - grid.key_values_mut().remove(key); + grid.metadata_mut().remove(key); } OpsArg::MergeBins(ranges) => { // TODO: sort after increasing start indices @@ -612,21 +581,22 @@ impl Subcommand for Opts { } OpsArg::ScaleByBin(factors) => grid.scale_by_bin(factors), OpsArg::ScaleByOrder(factors) => { - grid.scale_by_order(factors[0], factors[1], factors[2], factors[3], 1.0); + grid.scale_by_order( + factors[0], factors[1], factors[2], factors[3], factors[4], 1.0, + ); } OpsArg::SetKeyValue(key_value) => { - grid.set_key_value(&key_value[0], &key_value[1]); + grid.metadata_mut() + .insert(key_value[0].clone(), key_value[1].clone()); } OpsArg::SetKeyFile(key_file) => { - grid.set_key_value(&key_file[0], &fs::read_to_string(&key_file[1])?); + grid.metadata_mut() + .insert(key_file[0].clone(), fs::read_to_string(&key_file[1])?); } OpsArg::SplitChannels(true) => grid.split_channels(), OpsArg::Upgrade(true) => grid.upgrade(), - OpsArg::Cc1(false) - | OpsArg::Cc2(false) - | OpsArg::Optimize(false) - | OpsArg::SplitChannels(false) - | OpsArg::Upgrade(false) => {} + OpsArg::Optimize(false) | OpsArg::SplitChannels(false) | OpsArg::Upgrade(false) => { + } } } diff --git a/pineappl_cli/tests/analyze.rs b/pineappl_cli/tests/analyze.rs index 2b2f71fa9..c8a225c7f 100644 --- a/pineappl_cli/tests/analyze.rs +++ b/pineappl_cli/tests/analyze.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use assert_cmd::Command; const HELP_STR: &str = "Perform various analyses with grids diff --git a/pineappl_cli/tests/channels.rs b/pineappl_cli/tests/channels.rs index a7556af1a..2bf018d94 100644 --- a/pineappl_cli/tests/channels.rs +++ b/pineappl_cli/tests/channels.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use assert_cmd::Command; const HELP_STR: &str = "Shows the contribution for each partonic channel diff --git a/pineappl_cli/tests/convolve.rs b/pineappl_cli/tests/convolve.rs index 8ea0e8b45..3822634ed 100644 --- a/pineappl_cli/tests/convolve.rs +++ b/pineappl_cli/tests/convolve.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use assert_cmd::Command; use predicates::str; @@ -15,6 +17,7 @@ Options: -o, --orders Select orders manually --xir Set the variation of the renormalization scale [default: 1.0] --xif Set the variation of the factorization scale [default: 1.0] + --xia Set the variation of the fragmentation scale [default: 1.0] --digits-abs Set the number of fractional digits shown for absolute numbers [default: 7] --digits-rel Set the number of fractional digits shown for relative numbers [default: 2] -h, --help Print help @@ -36,9 +39,6 @@ const DEFAULT_STR: &str = "b etal dsig/detal const USE_ALPHAS_FROM_ERROR_STR: &str = "expected `use_alphas_from` to be `0` or `1`, is `2` "; -const THREE_PDF_ERROR_STR: &str = "convolutions with 3 convolution functions is not supported -"; - const FORCE_POSITIVE_STR: &str = "b etal dsig/detal [] [pb] -+----+----+----------- @@ -226,20 +226,6 @@ fn use_alphas_from_error() { .stderr(str::contains(USE_ALPHAS_FROM_ERROR_STR)); } -#[test] -fn three_pdf_error() { - Command::cargo_bin("pineappl") - .unwrap() - .args([ - "convolve", - "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", - "NNPDF31_nlo_as_0118_luxqed,NNPDF31_nlo_as_0118_luxqed,NNPDF31_nlo_as_0118_luxqed", - ]) - .assert() - .failure() - .stderr(str::contains(THREE_PDF_ERROR_STR)); -} - #[test] fn force_positive() { Command::cargo_bin("pineappl") diff --git a/pineappl_cli/tests/diff.rs b/pineappl_cli/tests/diff.rs index 6b3c59abd..ef7d810de 100644 --- a/pineappl_cli/tests/diff.rs +++ b/pineappl_cli/tests/diff.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use assert_cmd::Command; use assert_fs::NamedTempFile; diff --git a/pineappl_cli/tests/evolve.rs b/pineappl_cli/tests/evolve.rs index c6e02d904..b56e4868e 100644 --- a/pineappl_cli/tests/evolve.rs +++ b/pineappl_cli/tests/evolve.rs @@ -1,3 +1,4 @@ +#![allow(missing_docs)] #![cfg(feature = "evolve")] use assert_cmd::Command; @@ -21,6 +22,7 @@ Options: -o, --orders Select which orders to evolve --xir Rescale the renormalization scale with this factor [default: 1] --xif Rescale the factorization scale with this factor [default: 1] + --xia Rescale the fragmentation scale with this factor [default: 1] -h, --help Print help "; @@ -69,37 +71,37 @@ const LHCB_WP_7TEV_V2_STR: &str = "b Grid FkTable -+--------------------+--------------------+---------------------- 0 7.8752126798068639e2 7.8731064380928558e2 -2.6745204220435248e-4 1 7.1872113080347663e2 7.1853123147848032e2 -2.6421836906898033e-4 -2 6.2322357391848550e2 6.2306009928459093e2 -2.6230495882362259e-4 -3 5.0216762988872915e2 5.0203737363369049e2 -2.5938799573266280e-4 -4 3.7314505699003126e2 3.7305089832847733e2 -2.5233795755852384e-4 -5 2.5302044227292129e2 2.5295968261889854e2 -2.4013733229188983e-4 -6 1.1971045984774410e2 1.1968525412249538e2 -2.1055574659711862e-4 -7 2.9272102213930090e1 2.9268443366651141e1 -1.2499434622803562e-4 +2 6.2322357391848550e2 6.2306009928459105e2 -2.6230495882340055e-4 +3 5.0216762988872927e2 5.0203737363369049e2 -2.5938799573288485e-4 +4 3.7314505699003132e2 3.7305089832847727e2 -2.5233795755885691e-4 +5 2.5302044227292134e2 2.5295968261889854e2 -2.4013733229211187e-4 +6 1.1971045984774410e2 1.1968525412249534e2 -2.1055574659745169e-4 +7 2.9272102213930097e1 2.9268443366651130e1 -1.2499434622859074e-4 "; const LHCB_WP_7TEV_V2_XIR2_STR: &str = "b Grid FkTable rel. diff -+--------------------+--------------------+---------------------- 0 7.7634833292737017e2 7.7614037816519419e2 -2.6786270203205120e-4 -1 7.0866199875124983e2 7.0847444839781781e2 -2.6465417048249229e-4 -2 6.1427556024981789e2 6.1411417374531106e2 -2.6272655946324441e-4 -3 4.9482819982783724e2 4.9469964081143053e2 -2.5980535557890150e-4 +1 7.0866199875124971e2 7.0847444839781758e2 -2.6465417048271433e-4 +2 6.1427556024981789e2 6.1411417374531095e2 -2.6272655946346646e-4 +3 4.9482819982783735e2 4.9469964081143053e2 -2.5980535557912354e-4 4 3.6756257449354945e2 3.6746967569489709e2 -2.5274281196974169e-4 5 2.4912642701834142e2 2.4906651029915440e2 -2.4050727939273209e-4 -6 1.1776254040032327e2 1.1773772039493417e2 -2.1076316207790935e-4 -7 2.8749891297668260e1 2.8746299479656258e1 -1.2493327278395583e-4 +6 1.1776254040032327e2 1.1773772039493414e2 -2.1076316207813139e-4 +7 2.8749891297668260e1 2.8746299479656273e1 -1.2493327278351174e-4 "; const LHCB_WP_7TEV_V2_XIF_2_STR: &str = "b Grid FkTable rel. diff -+--------------------+--------------------+---------------------- -0 8.0902449713533758e2 8.0880109089579207e2 -2.7614273774967391e-4 -1 7.3869242569893402e2 7.3849113100483919e2 -2.7250136469769703e-4 +0 8.0902449713533770e2 8.0880109089579207e2 -2.7614273774978493e-4 +1 7.3869242569893402e2 7.3849113100483896e2 -2.7250136469803010e-4 2 6.4102495904778243e2 6.4085178025871448e2 -2.7015919836448354e-4 3 5.1668563837653949e2 5.1654786167667771e2 -2.6665478896348294e-4 4 3.8405066991124284e2 3.8395127677619655e2 -2.5880213949180941e-4 -5 2.6047697125229388e2 2.6041295913273854e2 -2.4574963094659008e-4 +5 2.6047697125229382e2 2.6041295913273854e2 -2.4574963094636804e-4 6 1.2324364745022301e2 1.2321715784184289e2 -2.1493690691698486e-4 -7 3.0134629982656573e1 3.0130872371345841e1 -1.2469412476256991e-4 +7 3.0134629982656573e1 3.0130872371345848e1 -1.2469412476234787e-4 "; const LHCB_WP_7TEV_V2_XIF_2_ERROR_STR: &str = "Error: failed to evolve grid: no operator for muf2 = 25825.775616000003 found in [6456.443904000001] @@ -169,7 +171,7 @@ const NUTEV_CC_NU_FE_SIGMARED_STR: &str = "b Grid FkTable rel. dif const CMS_TTB_8TEV_2D_TTM_TRAP_TOT_STR: &str = "b Grid FkTable rel. diff -+-----------+-----------+------------- -0 2.1596192e2 2.1590144e2 -2.8005486e-4 +0 2.0680644e2 2.0666857e2 -6.6663644e-4 "; const STAR_WMWP_510GEV_WM_AL_POL: &str = "b Grid FkTable rel. diff @@ -237,26 +239,6 @@ fn lhcb_wp_7tev() { .stdout(LHCB_WP_7TEV_OPTIMIZED_STR); } -#[test] -fn lhcb_wp_7tev_use_old_evolve() { - let output = NamedTempFile::new("fktable1c.lz4").unwrap(); - - Command::cargo_bin("pineappl") - .unwrap() - .args([ - "evolve", - "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", - "../test-data/LHCB_WP_7TEV.tar", - output.path().to_str().unwrap(), - "NNPDF40_nlo_as_01180", - "--orders=a2,as1a2", - "--use-old-evolve", - ]) - .assert() - .success() - .stdout(LHCB_WP_7TEV_STR); -} - #[test] fn lhcb_wp_7tev_v2() { let output = NamedTempFile::new("fktable2a.lz4").unwrap(); @@ -424,6 +406,7 @@ fn cms_ttb_8tev_2d_ttm_trap_tot() { .args([ "evolve", "--orders=as2,as3,as4", + "--xir=2", "../test-data/CMS_TTB_8TEV_2D_TTM_TRAP_TOT-opt.pineappl.lz4", "../test-data/CMS_TTB_8TEV_2D_TTM_TRAP_TOT.tar", output.path().to_str().unwrap(), diff --git a/pineappl_cli/tests/export.rs b/pineappl_cli/tests/export.rs index 47fe293a7..7a4ec5963 100644 --- a/pineappl_cli/tests/export.rs +++ b/pineappl_cli/tests/export.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use assert_cmd::Command; #[cfg(feature = "applgrid")] @@ -23,36 +25,36 @@ Options: #[cfg(feature = "applgrid")] const EXPORT_APPLGRID_STR: &str = - "WARNING: the order O(as^1 a^2 lr^1 lf^0) isn't supported by APPLgrid and will be skipped. -WARNING: the order O(as^1 a^2 lr^0 lf^1) isn't supported by APPLgrid and will be skipped. -WARNING: the order O(as^0 a^3 lr^0 lf^0) isn't supported by APPLgrid and will be skipped. -WARNING: the order O(as^0 a^3 lr^1 lf^0) isn't supported by APPLgrid and will be skipped. -WARNING: the order O(as^0 a^3 lr^0 lf^1) isn't supported by APPLgrid and will be skipped. + "WARNING: the order O(as^1 a^2 lr^1 lf^0 la^0) isn't supported by APPLgrid and will be skipped. +WARNING: the order O(as^1 a^2 lr^0 lf^1 la^0) isn't supported by APPLgrid and will be skipped. +WARNING: the order O(as^0 a^3 lr^0 lf^0 la^0) isn't supported by APPLgrid and will be skipped. +WARNING: the order O(as^0 a^3 lr^1 lf^0 la^0) isn't supported by APPLgrid and will be skipped. +WARNING: the order O(as^0 a^3 lr^0 lf^1 la^0) isn't supported by APPLgrid and will be skipped. b APPLgrid PineAPPL rel. diff --+------------+------------+-------------- 0 7.9566291e0 7.9566291e0 -2.5535130e-15 -1 2.3289219e1 2.3289219e1 -1.9984014e-15 +1 2.3289219e1 2.3289219e1 -1.7763568e-15 2 3.7442697e1 3.7442697e1 -2.1094237e-15 -3 5.0087316e1 5.0087316e1 -3.1086245e-15 -4 6.0873237e1 6.0873237e1 -2.8865799e-15 -5 6.8944378e1 6.8944378e1 -3.8857806e-15 +3 5.0087316e1 5.0087316e1 -2.9976022e-15 +4 6.0873237e1 6.0873237e1 -2.5535130e-15 +5 6.8944378e1 6.8944378e1 -3.5527137e-15 6 7.4277783e1 7.4277783e1 -2.8865799e-15 -7 7.6356931e1 7.6356931e1 -3.7747583e-15 -8 7.5009607e1 7.5009607e1 -1.5543122e-15 -9 7.0045787e1 7.0045787e1 -1.2212453e-15 +7 7.6356931e1 7.6356931e1 -3.3306691e-15 +8 7.5009607e1 7.5009607e1 -1.8873791e-15 +9 7.0045787e1 7.0045787e1 -9.9920072e-16 10 6.0009803e1 6.0009803e1 -7.7715612e-16 11 4.6770515e1 4.6770515e1 4.4408921e-16 12 3.3569217e1 3.3569217e1 1.5543122e-15 -13 2.1820341e1 2.1820341e1 1.3322676e-15 +13 2.1820341e1 2.1820341e1 1.1102230e-15 14 1.2542026e1 1.2542026e1 2.2204460e-16 15 6.0879666e0 6.0879666e0 -1.3322676e-15 16 1.5789361e0 1.5789361e0 -1.5543122e-15 -17 7.4959880e-2 7.4959880e-2 -1.3322676e-15 +17 7.4959880e-2 7.4959880e-2 -1.1102230e-15 "; #[cfg(feature = "applgrid")] const EXPORT_DIS_APPLGRID_STR: &str = - "WARNING: the order O(as^1 a^0 lr^0 lf^1) isn't supported by APPLgrid and will be skipped. + "WARNING: the order O(as^1 a^0 lr^0 lf^1 la^0) isn't supported by APPLgrid and will be skipped. b APPLgrid PineAPPL rel. diff --+-----------+-----------+-------------- 0 2.8829972e0 2.8829972e0 3.3306691e-15 diff --git a/pineappl_cli/tests/help.rs b/pineappl_cli/tests/help.rs index f620d84eb..a6b3ada0b 100644 --- a/pineappl_cli/tests/help.rs +++ b/pineappl_cli/tests/help.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use assert_cmd::Command; const HELP_STR: &str = "Display a manpage for selected subcommands diff --git a/pineappl_cli/tests/import.rs b/pineappl_cli/tests/import.rs index eee7b108e..88aa18e31 100644 --- a/pineappl_cli/tests/import.rs +++ b/pineappl_cli/tests/import.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use assert_cmd::Command; #[cfg(any(feature = "applgrid", feature = "fastnlo", feature = "fktable"))] @@ -22,7 +24,6 @@ Options: --digits-abs Set the number of fractional digits shown for absolute numbers [default: 7] --digits-rel Set the number of fractional digits shown for relative numbers [default: 7] --no-optimize Do not optimize converted grid - --dis-pid Particle ID for the non-hadronic initial states if it cannot be determined from the grid [default: 11] -h, --help Print help "; @@ -45,161 +46,161 @@ Options: --digits-abs Set the number of fractional digits shown for absolute numbers [default: 7] --digits-rel Set the number of fractional digits shown for relative numbers [default: 7] --no-optimize Do not optimize converted grid - --dis-pid Particle ID for the non-hadronic initial states if it cannot be determined from the grid [default: 11] -h, --help Print help "; #[cfg(feature = "fastnlo")] const IMPORT_FIX_GRID_STR: &str = "b PineAPPL fastNLO rel. diff -+------------+------------+-------------- -0 2.9158424e-4 2.9158424e-4 -2.9976022e-15 -1 2.4657895e-4 2.4657895e-4 -2.8865799e-15 +0 2.9158424e-4 2.9158424e-4 -2.7755576e-15 +1 2.4657895e-4 2.4657895e-4 -2.6645353e-15 "; #[cfg(feature = "fastnlo")] -const IMPORT_FLEX_GRID_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff --+------------+------------+--------------+------------- -0 8.2754182e1 8.2754182e1 -1.3544721e-14 1.3544721e-14 -1 3.6097335e1 3.6097335e1 -6.8833828e-15 8.8817842e-15 -2 8.0048746e0 8.0048746e0 5.3290705e-15 6.8833828e-15 -3 9.4319392e-1 9.4319392e-1 5.5511151e-15 5.5511151e-15 +const IMPORT_FLEX_GRID_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff +-+------------+------------+--------------+-------------- +0 8.2754182e1 8.2754182e1 -1.3544721e-14 -7.8825835e-15 +1 3.6097335e1 3.6097335e1 -6.8833828e-15 8.8817842e-15 +2 8.0048746e0 8.0048746e0 5.3290705e-15 6.8833828e-15 +3 9.4319392e-1 9.4319392e-1 5.5511151e-15 4.6629367e-15 "; #[cfg(feature = "fastnlo")] -const IMPORT_FLEX_GRID_SCALE_1_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff --+------------+------------+--------------+------------- -0 8.1965747e1 8.1965747e1 1.5543122e-15 7.6605389e-15 -1 3.6115068e1 3.6115068e1 -3.1086245e-15 1.4321877e-14 -2 8.1057136e0 8.1057136e0 8.8817842e-16 5.7731597e-15 -3 9.5444782e-1 9.5444782e-1 5.5511151e-15 5.5511151e-15 +const IMPORT_FLEX_GRID_SCALE_1_STR: &str = + "b PineAPPL fastNLO rel. diff svmaxreldiff +-+------------+------------+--------------+-------------- +0 8.1965747e1 8.1965747e1 1.5543122e-15 -7.6605389e-15 +1 3.6115068e1 3.6115068e1 -3.1086245e-15 -1.4321877e-14 +2 8.1057136e0 8.1057136e0 8.8817842e-16 -5.7731597e-15 +3 9.5444782e-1 9.5444782e-1 5.5511151e-15 3.7747583e-15 "; #[cfg(feature = "fastnlo")] -const IMPORT_FLEX_GRID_SCALE_2_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff --+------------+------------+-------------+------------- -0 8.3815533e1 8.3815533e1 4.8849813e-15 4.8849813e-15 -1 3.6084994e1 3.6084994e1 2.6645353e-15 7.7715612e-15 -2 7.8842272e0 7.8842272e0 1.9984014e-15 4.3298698e-15 -3 9.1960866e-1 9.1960866e-1 3.1086245e-15 5.3290705e-15 +const IMPORT_FLEX_GRID_SCALE_2_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff +-+------------+------------+-------------+-------------- +0 8.3815533e1 8.3815533e1 4.8849813e-15 4.4408921e-15 +1 3.6084994e1 3.6084994e1 2.6645353e-15 7.7715612e-15 +2 7.8842272e0 7.8842272e0 1.9984014e-15 -4.3298698e-15 +3 9.1960866e-1 9.1960866e-1 3.1086245e-15 5.3290705e-15 "; #[cfg(feature = "fastnlo")] const IMPORT_FLEX_GRID_QUADRATIC_SUM_STR: &str = - "b PineAPPL fastNLO rel. diff svmaxreldiff --+------------+------------+--------------+------------- -0 8.1098571e1 8.1098571e1 -4.7739590e-15 7.3274720e-15 -1 3.5222658e1 3.5222658e1 1.1102230e-15 6.6613381e-15 -2 7.7939468e0 7.7939468e0 1.9984014e-15 4.5519144e-15 -3 9.1540624e-1 9.1540624e-1 -5.9952043e-15 7.7715612e-15 + "b PineAPPL fastNLO rel. diff svmaxreldiff +-+------------+------------+--------------+-------------- +0 8.1098571e1 8.1098571e1 -4.7739590e-15 -7.3274720e-15 +1 3.5222658e1 3.5222658e1 1.1102230e-15 6.8833828e-15 +2 7.7939468e0 7.7939468e0 1.7763568e-15 -4.5519144e-15 +3 9.1540624e-1 9.1540624e-1 -5.7731597e-15 7.7715612e-15 "; #[cfg(feature = "fastnlo")] const IMPORT_FLEX_GRID_QUADRATIC_MEAN_STR: &str = - "b PineAPPL fastNLO rel. diff svmaxreldiff --+------------+------------+--------------+------------- -0 8.2712488e1 8.2712488e1 2.2204460e-16 1.0214052e-14 -1 3.6091182e1 3.6091182e1 -7.7715612e-16 5.9952043e-15 -2 7.9809031e0 7.9809031e0 -6.9944051e-15 9.5479180e-15 -3 9.3467326e-1 9.3467326e-1 6.6613381e-16 2.4424907e-15 + "b PineAPPL fastNLO rel. diff svmaxreldiff +-+------------+------------+--------------+-------------- +0 8.2712488e1 8.2712488e1 2.2204460e-16 1.0214052e-14 +1 3.6091182e1 3.6091182e1 -7.7715612e-16 5.9952043e-15 +2 7.9809031e0 7.9809031e0 -6.9944051e-15 -9.3258734e-15 +3 9.3467326e-1 9.3467326e-1 8.8817842e-16 2.4424907e-15 "; #[cfg(feature = "fastnlo")] -const IMPORT_FLEX_GRID_5_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff --+------------+------------+--------------+------------- -0 8.4122343e1 8.4122343e1 8.6597396e-15 8.6597396e-15 -1 3.6813708e1 3.6813708e1 5.9952043e-15 7.3274720e-15 -2 8.1178188e0 8.1178188e0 -1.1102230e-15 1.3322676e-14 -3 9.5090947e-1 9.5090947e-1 6.6613381e-15 6.6613381e-15 +const IMPORT_FLEX_GRID_5_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff +-+------------+------------+--------------+-------------- +0 8.4122343e1 8.4122343e1 8.6597396e-15 -7.8825835e-15 +1 3.6813708e1 3.6813708e1 5.9952043e-15 7.3274720e-15 +2 8.1178188e0 8.1178188e0 -1.1102230e-15 -1.3322676e-14 +3 9.5090947e-1 9.5090947e-1 6.6613381e-15 -5.2180482e-15 "; #[cfg(feature = "fastnlo")] -const IMPORT_FLEX_GRID_6_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff --+------------+------------+--------------+------------- -0 8.2853942e1 8.2853942e1 0.0000000e0 5.1070259e-15 -1 3.6103118e1 3.6103118e1 7.1054274e-15 9.2148511e-15 -2 8.0161351e0 8.0161351e0 -2.6645353e-15 1.1324275e-14 -3 9.4536395e-1 9.4536395e-1 5.1070259e-15 5.8841820e-15 +const IMPORT_FLEX_GRID_6_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff +-+------------+------------+--------------+-------------- +0 8.2853942e1 8.2853942e1 0.0000000e0 5.1070259e-15 +1 3.6103118e1 3.6103118e1 7.1054274e-15 -9.2148511e-15 +2 8.0161351e0 8.0161351e0 -2.6645353e-15 -1.1324275e-14 +3 9.4536395e-1 9.4536395e-1 5.1070259e-15 -5.8841820e-15 "; #[cfg(feature = "fastnlo")] -const IMPORT_FLEX_GRID_7_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff --+------------+------------+--------------+------------- -0 7.9163980e1 7.9163980e1 3.7747583e-15 1.2323476e-14 -1 3.4313126e1 3.4313126e1 -4.7739590e-15 9.2148511e-15 -2 7.7006079e0 7.7006079e0 -3.2196468e-15 1.1546319e-14 -3 9.2392932e-1 9.2392932e-1 -4.3298698e-15 5.5511151e-15 +const IMPORT_FLEX_GRID_7_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff +-+------------+------------+--------------+-------------- +0 7.9163980e1 7.9163980e1 3.7747583e-15 -1.2323476e-14 +1 3.4313126e1 3.4313126e1 -4.7739590e-15 -9.2148511e-15 +2 7.7006079e0 7.7006079e0 -3.2196468e-15 -1.1546319e-14 +3 9.2392932e-1 9.2392932e-1 -4.3298698e-15 -5.5511151e-15 "; #[cfg(feature = "fastnlo")] -const IMPORT_FLEX_GRID_8_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff --+------------+------------+--------------+------------- -0 8.1965747e1 8.1965747e1 1.5543122e-15 7.6605389e-15 -1 3.5968167e1 3.5968167e1 0.0000000e0 7.7715612e-15 -2 7.9289155e0 7.9289155e0 1.3322676e-15 1.0436096e-14 -3 9.3523838e-1 9.3523838e-1 -2.6645353e-15 9.5479180e-15 +const IMPORT_FLEX_GRID_8_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff +-+------------+------------+--------------+-------------- +0 8.1965747e1 8.1965747e1 1.5543122e-15 -7.6605389e-15 +1 3.5968167e1 3.5968167e1 0.0000000e0 7.7715612e-15 +2 7.9289155e0 7.9289155e0 1.3322676e-15 -1.0436096e-14 +3 9.3523838e-1 9.3523838e-1 -2.6645353e-15 9.5479180e-15 "; #[cfg(feature = "fastnlo")] -const IMPORT_FLEX_GRID_9_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff --+------------+------------+--------------+------------- -0 8.3760665e1 8.3760665e1 3.5527137e-15 9.5479180e-15 -1 3.6243722e1 3.6243722e1 -7.4384943e-15 1.7430501e-14 -2 8.1057136e0 8.1057136e0 8.8817842e-16 5.7731597e-15 -3 9.5444782e-1 9.5444782e-1 5.5511151e-15 5.5511151e-15 +const IMPORT_FLEX_GRID_9_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff +-+------------+------------+--------------+-------------- +0 8.3760665e1 8.3760665e1 3.5527137e-15 9.5479180e-15 +1 3.6243722e1 3.6243722e1 -7.4384943e-15 -1.7430501e-14 +2 8.1057136e0 8.1057136e0 8.8817842e-16 -5.7731597e-15 +3 9.5444782e-1 9.5444782e-1 5.5511151e-15 3.7747583e-15 "; #[cfg(feature = "fastnlo")] -const IMPORT_FLEX_GRID_10_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff --+------------+------------+--------------+------------- -0 6.9429217e1 6.9429217e1 -2.6645353e-15 5.1070259e-15 -1 2.9273448e1 2.9273448e1 -8.8817842e-16 5.7731597e-15 -2 6.6031456e0 6.6031456e0 2.6645353e-15 5.5511151e-15 -3 8.2741590e-1 8.2741590e-1 -8.7707619e-15 8.7707619e-15 +const IMPORT_FLEX_GRID_10_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff +-+------------+------------+--------------+-------------- +0 6.9429217e1 6.9429217e1 -2.6645353e-15 5.1070259e-15 +1 2.9273448e1 2.9273448e1 -8.8817842e-16 5.7731597e-15 +2 6.6031456e0 6.6031456e0 2.6645353e-15 5.5511151e-15 +3 8.2741590e-1 8.2741590e-1 -8.7707619e-15 -6.2172489e-15 "; #[cfg(feature = "fastnlo")] -const IMPORT_FLEX_GRID_11_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff --+------------+------------+--------------+------------- -0 8.2113143e1 8.2113143e1 -2.9976022e-15 6.4392935e-15 -1 3.5603233e1 3.5603233e1 -2.2204460e-16 1.2212453e-14 -2 7.8899185e0 7.8899185e0 -4.8849813e-15 8.4376950e-15 -3 9.3402696e-1 9.3402696e-1 -2.3314684e-15 6.8833828e-15 +const IMPORT_FLEX_GRID_11_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff +-+------------+------------+--------------+-------------- +0 8.2113143e1 8.2113143e1 -2.9976022e-15 6.4392935e-15 +1 3.5603233e1 3.5603233e1 -2.2204460e-16 1.2212453e-14 +2 7.8899185e0 7.8899185e0 -4.8849813e-15 8.4376950e-15 +3 9.3402696e-1 9.3402696e-1 -2.3314684e-15 -6.8833828e-15 "; #[cfg(feature = "fastnlo")] -const IMPORT_FLEX_GRID_12_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff --+------------+------------+--------------+------------- -0 8.1720563e1 8.1720563e1 -8.5487173e-15 1.0658141e-14 -1 3.5668000e1 3.5668000e1 3.9968029e-15 9.4368957e-15 -2 7.9130511e0 7.9130511e0 -1.6653345e-15 6.4392935e-15 -3 9.3503500e-1 9.3503500e-1 -2.2204460e-16 5.1070259e-15 +const IMPORT_FLEX_GRID_12_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff +-+------------+------------+--------------+-------------- +0 8.1720563e1 8.1720563e1 -8.5487173e-15 1.0658141e-14 +1 3.5668000e1 3.5668000e1 3.9968029e-15 -9.4368957e-15 +2 7.9130511e0 7.9130511e0 -1.6653345e-15 6.4392935e-15 +3 9.3503500e-1 9.3503500e-1 -2.2204460e-16 5.1070259e-15 "; #[cfg(feature = "fastnlo")] -const IMPORT_FLEX_GRID_13_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff --+------------+------------+--------------+------------- -0 8.2403226e1 8.2403226e1 -4.6629367e-15 5.5511151e-15 -1 3.6074907e1 3.6074907e1 -4.4408921e-16 5.8841820e-15 -2 7.9668487e0 7.9668487e0 -6.6613381e-16 1.0880186e-14 -3 9.3711914e-1 9.3711914e-1 -3.3306691e-15 8.6597396e-15 +const IMPORT_FLEX_GRID_13_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff +-+------------+------------+--------------+-------------- +0 8.2403226e1 8.2403226e1 -4.6629367e-15 -5.5511151e-15 +1 3.6074907e1 3.6074907e1 -4.4408921e-16 -5.8841820e-15 +2 7.9668487e0 7.9668487e0 -6.6613381e-16 -1.0880186e-14 +3 9.3711914e-1 9.3711914e-1 -3.3306691e-15 -8.6597396e-15 "; #[cfg(feature = "fastnlo")] -const IMPORT_FLEX_GRID_14_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff --+------------+------------+--------------+------------- -0 8.2850540e1 8.2850540e1 6.8833828e-15 7.1054274e-15 -1 3.5828674e1 3.5828674e1 2.6645353e-15 1.0103030e-14 -2 7.9087501e0 7.9087501e0 -8.1046281e-15 8.1046281e-15 -3 9.3462321e-1 9.3462321e-1 4.4408921e-16 8.2156504e-15 +const IMPORT_FLEX_GRID_14_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff +-+------------+------------+--------------+-------------- +0 8.2850540e1 8.2850540e1 6.8833828e-15 6.8833828e-15 +1 3.5828674e1 3.5828674e1 2.6645353e-15 -1.0103030e-14 +2 7.9087501e0 7.9087501e0 -8.2156504e-15 6.2172489e-15 +3 9.3462321e-1 9.3462321e-1 4.4408921e-16 8.2156504e-15 "; #[cfg(feature = "fastnlo")] -const IMPORT_FLEX_GRID_15_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff --+------------+------------+--------------+------------- -0 6.6997861e1 6.6997861e1 5.7731597e-15 1.1879386e-14 -1 2.6049196e1 2.6049196e1 -7.7715612e-16 1.3100632e-14 -2 5.2022797e0 5.2022797e0 8.4376950e-15 8.4376950e-15 -3 7.2427500e-1 7.2427500e-1 1.9984014e-15 9.5479180e-15 +const IMPORT_FLEX_GRID_15_STR: &str = "b PineAPPL fastNLO rel. diff svmaxreldiff +-+------------+------------+--------------+-------------- +0 6.6997861e1 6.6997861e1 5.5511151e-15 -1.1879386e-14 +1 2.6049196e1 2.6049196e1 -7.7715612e-16 1.3100632e-14 +2 5.2022797e0 5.2022797e0 8.4376950e-15 8.2156504e-15 +3 7.2427500e-1 7.2427500e-1 1.9984014e-15 9.5479180e-15 "; #[cfg(feature = "fktable")] @@ -236,31 +237,31 @@ const IMPORT_HADRONIC_FKTABLE_STR: &str = "b x1 diff "; #[cfg(feature = "applgrid")] -const IMPORT_PHOTON_GRID_STR: &str = "b PineAPPL APPLgrid rel. diff --+------------+------------+----------- -0 5.5621307e-4 5.5621307e-4 0.0000000e0 +const IMPORT_PHOTON_GRID_STR: &str = "b PineAPPL APPLgrid rel. diff +-+------------+------------+-------------- +0 5.5621307e-4 5.5621307e-4 -1.5543122e-15 "; #[cfg(feature = "applgrid")] const IMPORT_APPLGRID_STR: &str = "b PineAPPL APPLgrid rel. diff -+-----------+-----------+-------------- -0 2.9884537e6 2.9884537e6 -6.6613381e-16 +0 2.9884537e6 2.9884537e6 -7.7715612e-16 "; #[cfg(feature = "applgrid")] const IMPORT_NEW_APPLGRID_STR: &str = "b PineAPPL APPLgrid rel. diff --+-----------+-----------+-------------- -0 6.2634897e2 6.2634897e2 1.5543122e-15 -1 6.2847078e2 6.2847078e2 0.0000000e0 -2 6.3163323e2 6.3163323e2 2.2204460e-16 -3 6.3586556e2 6.3586556e2 2.2204460e-16 -4 6.4139163e2 6.4139163e2 1.7763568e-15 -5 6.4848088e2 6.4848088e2 -2.6645353e-15 -6 6.5354150e2 6.5354150e2 -3.6637360e-15 -7 6.5377566e2 6.5377566e2 -1.7763568e-15 -8 6.5094729e2 6.5094729e2 1.7763568e-15 -9 6.3588760e2 6.3588760e2 2.2204460e-15 -10 5.9810718e2 5.9810718e2 2.6645353e-15 +0 6.2634897e2 6.2634897e2 -8.8817842e-16 +1 6.2847078e2 6.2847078e2 -2.6645353e-15 +2 6.3163323e2 6.3163323e2 -6.6613381e-16 +3 6.3586556e2 6.3586556e2 -8.8817842e-16 +4 6.4139163e2 6.4139163e2 4.4408921e-16 +5 6.4848088e2 6.4848088e2 -4.2188475e-15 +6 6.5354150e2 6.5354150e2 -4.9960036e-15 +7 6.5377566e2 6.5377566e2 -3.3306691e-15 +8 6.5094729e2 6.5094729e2 1.1102230e-15 +9 6.3588760e2 6.3588760e2 1.9984014e-15 +10 5.9810718e2 5.9810718e2 1.9984014e-15 "; const IMPORT_FILE_FORMAT_FAILURE_STR: &str = "Error: could not detect file format @@ -273,7 +274,7 @@ const IMPORT_GRID_COMPARISON_FAILURE_STR: &str = "Error: grids are different #[cfg(feature = "applgrid")] const IMPORT_DIS_APPLGRID_STR: &str = "b PineAPPL APPLgrid rel. diff -+------------+------------+-------------- -0 9.3514881e-2 9.3514881e-2 -3.3306691e-16 +0 9.3514881e-2 9.3514881e-2 -4.4408921e-16 1 3.9993061e-2 3.9993061e-2 2.2204460e-16 2 1.3593440e-2 1.3593440e-2 -2.2204460e-16 3 2.0825199e-3 2.0825199e-3 -4.4408921e-16 @@ -281,37 +282,39 @@ const IMPORT_DIS_APPLGRID_STR: &str = "b PineAPPL APPLgrid rel. diff #[cfg(feature = "fastnlo")] const IMPORT_DOUBLE_HADRONIC_FASTNLO_STR: &str = - "b PineAPPL fastNLO rel. diff svmaxreldiff ---+------------+------------+--------------+------------- -0 9.6382069e5 9.6382069e5 4.4408921e-16 8.3266727e-15 -1 3.7342594e5 3.7342594e5 1.7985613e-14 1.9095836e-14 -2 1.4195038e5 1.4195038e5 -1.0880186e-14 2.2648550e-14 -3 5.7043791e4 5.7043791e4 4.2188475e-15 7.9936058e-15 -4 2.3327746e4 2.3327746e4 8.4376950e-15 1.2101431e-14 -5 1.0495603e4 1.0495603e4 1.3100632e-14 1.7985613e-14 -6 4.8153483e3 4.8153483e3 -1.6098234e-14 2.9753977e-14 -7 2.2957587e3 2.2957587e3 4.8849813e-15 3.0642155e-14 -8 1.1142545e3 1.1142545e3 -2.4424907e-15 1.5765167e-14 -9 5.3699925e2 5.3699925e2 -6.5503158e-15 1.8429702e-14 -10 2.5460314e2 2.5460314e2 -7.6605389e-15 1.3544721e-14 -11 1.1847638e2 1.1847638e2 1.0658141e-14 1.2656542e-14 -12 5.7567355e1 5.7567355e1 -2.9976022e-15 9.2148511e-15 -13 2.7189719e1 2.7189719e1 1.1102230e-15 1.5543122e-14 -14 1.2791922e1 1.2791922e1 -6.9944051e-15 1.2656542e-14 -15 5.8346996e0 5.8346996e0 2.8865799e-15 1.4988011e-14 -16 2.6521590e0 2.6521590e0 7.3274720e-15 1.4765966e-14 -17 1.1726035e0 1.1726035e0 1.3100632e-14 1.4432899e-14 -18 4.8823596e-1 4.8823596e-1 8.6597396e-15 1.3655743e-14 -19 1.9564964e-1 1.9564964e-1 -4.4408921e-15 1.1102230e-14 -20 2.0326950e-2 2.0326950e-2 6.6613381e-15 1.3211654e-14 + "b PineAPPL fastNLO rel. diff svmaxreldiff +--+------------+------------+--------------+-------------- +0 9.6382069e5 9.6382069e5 4.4408921e-16 -8.3266727e-15 +1 3.7342594e5 3.7342594e5 1.7985613e-14 1.8651747e-14 +2 1.4195038e5 1.4195038e5 -1.0880186e-14 -2.2870594e-14 +3 5.7043791e4 5.7043791e4 4.2188475e-15 7.7715612e-15 +4 2.3327746e4 2.3327746e4 8.4376950e-15 -1.2101431e-14 +5 1.0495603e4 1.0495603e4 1.3100632e-14 -1.7874591e-14 +6 4.8153483e3 4.8153483e3 -1.6098234e-14 2.9531932e-14 +7 2.2957587e3 2.2957587e3 4.6629367e-15 -3.0198066e-14 +8 1.1142545e3 1.1142545e3 -2.4424907e-15 1.5765167e-14 +9 5.3699925e2 5.3699925e2 -6.7723605e-15 1.8429702e-14 +10 2.5460314e2 2.5460314e2 -7.6605389e-15 -1.3544721e-14 +11 1.1847638e2 1.1847638e2 1.0880186e-14 -1.2989609e-14 +12 5.7567355e1 5.7567355e1 -2.8865799e-15 -9.2148511e-15 +13 2.7189719e1 2.7189719e1 1.3322676e-15 1.5543122e-14 +14 1.2791922e1 1.2791922e1 -6.9944051e-15 -1.2878587e-14 +15 5.8346996e0 5.8346996e0 2.8865799e-15 -1.4876989e-14 +16 2.6521590e0 2.6521590e0 7.1054274e-15 -1.4765966e-14 +17 1.1726035e0 1.1726035e0 1.3100632e-14 1.3988810e-14 +18 4.8823596e-1 4.8823596e-1 8.6597396e-15 -1.3433699e-14 +19 1.9564964e-1 1.9564964e-1 -4.6629367e-15 1.1102230e-14 +20 2.0326950e-2 2.0326950e-2 6.6613381e-15 -1.2767565e-14 "; #[cfg(feature = "fastnlo")] -const IMPORT_NPDFDIM_2_TABLE_STR: &str = "0 1.0824021e0 1.0824021e0 1.4654944e-14 +const IMPORT_NPDFDIM_2_TABLE_STR: &str = "b PineAPPL fastNLO rel. diff +-+------------+------------+-------------- +0 1.0824021e0 1.0824021e0 1.4654944e-14 1 1.0680553e0 1.0680553e0 -1.4432899e-15 2 6.4959982e-1 6.4959982e-1 4.4408921e-15 3 3.3033872e-1 3.3033872e-1 2.0872193e-14 -4 1.3360159e-1 1.3360159e-1 -2.3092639e-14 +4 1.3360159e-1 1.3360159e-1 -2.2870594e-14 5 3.2728146e-2 3.2728146e-2 -5.7731597e-15 6 3.8508907e-3 3.8508907e-3 2.2870594e-14 "; @@ -669,6 +672,8 @@ fn import_flex_grid_15() { #[test] #[cfg(feature = "fktable")] fn import_dis_fktable() { + use float_cmp::assert_approx_eq; + use ndarray::Array3; use pineappl::fk_table::FkTable; use pineappl::grid::Grid; use std::fs::File; @@ -710,7 +715,8 @@ fn import_dis_fktable() { // TODO: this should ideally be a unit test, but we need an FK table that we don't convert - assert_eq!(fk_table.muf2(), 1.65 * 1.65); + assert_eq!(fk_table.grid().kinematics().len(), 2); + assert_approx_eq!(f64, fk_table.muf2(), 1.65 * 1.65, ulps = 2); assert_eq!( fk_table.x_grid(), [ @@ -813,13 +819,13 @@ fn import_dis_fktable() { 0.8837966741980419, 0.9126417795942889, 0.9416284084927907, - 0.9707498946430192 + 0.9707498946430192, ] ); - let table = fk_table.table(); + let table: Array3 = fk_table.table().into_dimensionality().unwrap(); - assert_eq!(table.dim(), (20, 9, 100, 1)); + assert_eq!(table.dim(), (20, 9, 100)); assert_eq!( table .indexed_iter() @@ -827,16 +833,16 @@ fn import_dis_fktable() { .take(10) .collect::>(), [ - ((0, 0, 0, 0), &4.506605409085538e-8), - ((0, 0, 1, 0), &1.8561090273141668e-8), - ((0, 0, 2, 0), &-3.3821015317570252e-9), - ((0, 0, 3, 0), &1.980084314325426e-9), - ((0, 0, 4, 0), &2.187815687938248e-9), - ((0, 0, 5, 0), &1.3280152778522626e-9), - ((0, 0, 6, 0), &1.3848470515483116e-9), - ((0, 0, 7, 0), &1.5145898293299224e-9), - ((0, 0, 8, 0), &1.6942313031679552e-9), - ((0, 0, 9, 0), &1.9734220063025288e-9), + ((0, 0, 0), &4.506605409085538e-8), + ((0, 0, 1), &1.8561090273141668e-8), + ((0, 0, 2), &-3.3821015317570252e-9), + ((0, 0, 3), &1.980084314325426e-9), + ((0, 0, 4), &2.187815687938248e-9), + ((0, 0, 5), &1.3280152778522626e-9), + ((0, 0, 6), &1.3848470515483116e-9), + ((0, 0, 7), &1.5145898293299224e-9), + ((0, 0, 8), &1.6942313031679552e-9), + ((0, 0, 9), &1.9734220063025288e-9), ] ); } @@ -846,8 +852,9 @@ fn import_dis_fktable() { fn import_hadronic_fktable() { use float_cmp::assert_approx_eq; use lhapdf::Pdf; - use pineappl::convolutions::Convolution; - use pineappl::convolutions::LumiCache; + use ndarray::Array4; + use pineappl::convolutions::ConvolutionCache; + use pineappl::convolutions::{Conv, ConvType}; use pineappl::fk_table::{FkAssumptions, FkTable}; use pineappl::grid::Grid; use std::fs::File; @@ -885,11 +892,15 @@ fn import_hadronic_fktable() { let pdf = Pdf::with_setname_and_member("NNPDF31_nlo_as_0118_luxqed", 0).unwrap(); let mut xfx = |id, x, q2| pdf.xfx_q2(id, x, q2); let mut alphas = |_| 0.0; - let mut lumi_cache = LumiCache::with_one(2212, &mut xfx, &mut alphas); - let results = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let mut convolution_cache = ConvolutionCache::new( + vec![Conv::new(ConvType::UnpolPDF, 2212)], + vec![&mut xfx], + &mut alphas, + ); + let results = grid.convolve(&mut convolution_cache, &[], &[], &[], &[(1.0, 1.0, 1.0)]); let mut fk_table = FkTable::try_from(grid).unwrap(); - let table = fk_table.table(); + let table: Array4 = fk_table.table().into_dimensionality().unwrap(); assert_eq!(table.dim(), (1, 45, 30, 30)); assert_eq!( @@ -919,60 +930,63 @@ fn import_hadronic_fktable() { assert_eq!(fk_table.grid().bin_info().right(0), [1.0]); assert_eq!( fk_table.grid().convolutions(), - [Convolution::UnpolPDF(2212), Convolution::UnpolPDF(2212)] + [ + Conv::new(ConvType::UnpolPDF, 2212), + Conv::new(ConvType::UnpolPDF, 2212) + ] ); let channels = fk_table.channels(); assert_eq!( channels, [ - (100, 100), - (100, 21), - (100, 200), - (100, 203), - (100, 208), - (100, 215), - (100, 103), - (100, 108), - (100, 115), - (21, 21), - (21, 200), - (21, 203), - (21, 208), - (21, 215), - (21, 103), - (21, 108), - (21, 115), - (200, 200), - (200, 203), - (200, 208), - (200, 215), - (200, 103), - (200, 108), - (200, 115), - (203, 203), - (203, 208), - (203, 215), - (203, 103), - (203, 108), - (203, 115), - (208, 208), - (208, 215), - (208, 103), - (208, 108), - (208, 115), - (215, 215), - (215, 103), - (215, 108), - (215, 115), - (103, 103), - (103, 108), - (103, 115), - (108, 108), - (108, 115), - (115, 115) + [100, 100], + [100, 21], + [100, 200], + [100, 203], + [100, 208], + [100, 215], + [100, 103], + [100, 108], + [100, 115], + [21, 21], + [21, 200], + [21, 203], + [21, 208], + [21, 215], + [21, 103], + [21, 108], + [21, 115], + [200, 200], + [200, 203], + [200, 208], + [200, 215], + [200, 103], + [200, 108], + [200, 115], + [203, 203], + [203, 208], + [203, 215], + [203, 103], + [203, 108], + [203, 115], + [208, 208], + [208, 215], + [208, 103], + [208, 108], + [208, 115], + [215, 215], + [215, 103], + [215, 108], + [215, 115], + [103, 103], + [103, 108], + [103, 115], + [108, 108], + [108, 115], + [115, 115] ] ); - assert_eq!(fk_table.muf2(), 1.65 * 1.65); + assert_approx_eq!(f64, fk_table.muf2(), 1.65 * 1.65, ulps = 2); assert_eq!( fk_table.x_grid(), [ @@ -1009,14 +1023,14 @@ fn import_hadronic_fktable() { ] ); - assert_eq!(results, fk_table.convolve(&mut lumi_cache, &[], &[])); + assert_eq!(results, fk_table.convolve(&mut convolution_cache, &[], &[])); fk_table.optimize(FkAssumptions::Nf6Ind); assert_eq!(fk_table.channels(), channels); assert_approx_eq!( f64, results[0], - fk_table.convolve(&mut lumi_cache, &[], &[])[0], + fk_table.convolve(&mut convolution_cache, &[], &[])[0], ulps = 4 ); fk_table.optimize(FkAssumptions::Nf6Sym); @@ -1024,7 +1038,7 @@ fn import_hadronic_fktable() { assert_approx_eq!( f64, results[0], - fk_table.convolve(&mut lumi_cache, &[], &[])[0], + fk_table.convolve(&mut convolution_cache, &[], &[])[0], ulps = 4 ); fk_table.optimize(FkAssumptions::Nf5Ind); @@ -1032,124 +1046,124 @@ fn import_hadronic_fktable() { assert_approx_eq!( f64, results[0], - fk_table.convolve(&mut lumi_cache, &[], &[])[0] + fk_table.convolve(&mut convolution_cache, &[], &[])[0] ); fk_table.optimize(FkAssumptions::Nf5Sym); assert_eq!(fk_table.channels(), channels); assert_approx_eq!( f64, results[0], - fk_table.convolve(&mut lumi_cache, &[], &[])[0] + fk_table.convolve(&mut convolution_cache, &[], &[])[0] ); fk_table.optimize(FkAssumptions::Nf4Ind); assert_eq!(fk_table.channels(), channels); assert_approx_eq!( f64, results[0], - fk_table.convolve(&mut lumi_cache, &[], &[])[0] + fk_table.convolve(&mut convolution_cache, &[], &[])[0] ); fk_table.optimize(FkAssumptions::Nf4Sym); assert_eq!( fk_table.channels(), [ - (100, 100), - (100, 21), - (100, 203), - (100, 208), - (100, 200), - (100, 103), - (100, 108), - (100, 115), - (21, 21), - (21, 203), - (21, 208), - (21, 200), - (21, 103), - (21, 108), - (21, 115), - (200, 203), - (200, 208), - (203, 203), - (203, 208), - (203, 103), - (203, 108), - (203, 115), - (208, 208), - (208, 103), - (208, 108), - (208, 115), - (200, 200), - (200, 103), - (200, 108), - (200, 115), - (103, 103), - (103, 108), - (103, 115), - (108, 108), - (108, 115), - (115, 115) + [100, 100], + [100, 21], + [100, 203], + [100, 208], + [100, 200], + [100, 103], + [100, 108], + [100, 115], + [21, 21], + [21, 203], + [21, 208], + [21, 200], + [21, 103], + [21, 108], + [21, 115], + [200, 203], + [200, 208], + [203, 203], + [203, 208], + [203, 103], + [203, 108], + [203, 115], + [208, 208], + [208, 103], + [208, 108], + [208, 115], + [200, 200], + [200, 103], + [200, 108], + [200, 115], + [103, 103], + [103, 108], + [103, 115], + [108, 108], + [108, 115], + [115, 115] ] ); fk_table.optimize(FkAssumptions::Nf3Ind); assert_eq!( fk_table.channels(), [ - (100, 21), - (100, 203), - (100, 208), - (100, 200), - (100, 103), - (100, 108), - (21, 21), - (21, 203), - (21, 208), - (21, 200), - (21, 103), - (21, 108), - (200, 203), - (200, 208), - (203, 203), - (203, 208), - (203, 103), - (203, 108), - (208, 208), - (208, 103), - (208, 108), - (200, 200), - (200, 103), - (200, 108), - (103, 103), - (103, 108), - (108, 108), - (100, 100) + [100, 21], + [100, 203], + [100, 208], + [100, 200], + [100, 103], + [100, 108], + [21, 21], + [21, 203], + [21, 208], + [21, 200], + [21, 103], + [21, 108], + [200, 203], + [200, 208], + [203, 203], + [203, 208], + [203, 103], + [203, 108], + [208, 208], + [208, 103], + [208, 108], + [200, 200], + [200, 103], + [200, 108], + [103, 103], + [103, 108], + [108, 108], + [100, 100] ] ); fk_table.optimize(FkAssumptions::Nf3Sym); assert_eq!( fk_table.channels(), [ - (100, 21), - (100, 203), - (100, 200), - (100, 103), - (100, 108), - (21, 21), - (21, 203), - (21, 200), - (21, 103), - (21, 108), - (200, 203), - (203, 203), - (203, 103), - (203, 108), - (200, 200), - (200, 103), - (200, 108), - (103, 103), - (103, 108), - (108, 108), - (100, 100) + [100, 21], + [100, 203], + [100, 200], + [100, 103], + [100, 108], + [21, 21], + [21, 203], + [21, 200], + [21, 103], + [21, 108], + [200, 203], + [203, 203], + [203, 103], + [203, 108], + [200, 200], + [200, 103], + [200, 108], + [103, 103], + [103, 108], + [108, 108], + [100, 100], ] ); } diff --git a/pineappl_cli/tests/main.rs b/pineappl_cli/tests/main.rs index 0ba04dce5..8f6b2e34d 100644 --- a/pineappl_cli/tests/main.rs +++ b/pineappl_cli/tests/main.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use assert_cmd::Command; const HELP_STR: &str = "Read, write, and query PineAPPL grids diff --git a/pineappl_cli/tests/merge.rs b/pineappl_cli/tests/merge.rs index ae57dc5aa..30fb4a30b 100644 --- a/pineappl_cli/tests/merge.rs +++ b/pineappl_cli/tests/merge.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use assert_cmd::Command; use assert_fs::NamedTempFile; diff --git a/pineappl_cli/tests/orders.rs b/pineappl_cli/tests/orders.rs index f63210640..bb0336e45 100644 --- a/pineappl_cli/tests/orders.rs +++ b/pineappl_cli/tests/orders.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use assert_cmd::Command; const HELP_STR: &str = "Shows the predictions for all bin for each order separately diff --git a/pineappl_cli/tests/plot.rs b/pineappl_cli/tests/plot.rs index 0f656e639..bf242cfe7 100644 --- a/pineappl_cli/tests/plot.rs +++ b/pineappl_cli/tests/plot.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use assert_cmd::Command; use predicates::str; use std::num::NonZeroUsize; @@ -1877,9 +1879,6 @@ if __name__ == "__main__": main(plot_panels) "#; -const THREE_PDF_ERROR_STR: &str = "convolutions with 3 convolution functions is not supported -"; - #[test] fn help() { Command::cargo_bin("pineappl") @@ -1930,23 +1929,6 @@ fn subgrid_pull() { .stdout(SUBGRID_PULL_STR); } -#[test] -fn three_pdf_error() { - Command::cargo_bin("pineappl") - .unwrap() - .args([ - "plot", - "--subgrid-pull=0,0,0", - "--threads=1", - "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", - "NNPDF31_nlo_as_0118_luxqed", - "NNPDF40_nnlo_as_01180,NNPDF40_nnlo_as_01180,NNPDF40_nnlo_as_01180", - ]) - .assert() - .failure() - .stderr(str::contains(THREE_PDF_ERROR_STR)); -} - #[test] fn drell_yan_afb() { Command::cargo_bin("pineappl") diff --git a/pineappl_cli/tests/pull.rs b/pineappl_cli/tests/pull.rs index 6b6f6e86e..9891cfda4 100644 --- a/pineappl_cli/tests/pull.rs +++ b/pineappl_cli/tests/pull.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use assert_cmd::Command; use std::num::NonZeroUsize; use std::thread; diff --git a/pineappl_cli/tests/read.rs b/pineappl_cli/tests/read.rs index 1c4958a03..2e9120a3c 100644 --- a/pineappl_cli/tests/read.rs +++ b/pineappl_cli/tests/read.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use assert_cmd::Command; const HELP_STR: &str = "Read out information of a grid @@ -52,22 +54,22 @@ const ORDERS_STR: &str = "o order 4 O(a^3 lf^1) "; -const ORDERS_LONG_STR: &str = "o order --+--------------------- -0 O(as^0 a^2 lr^0 lf^0) -1 O(as^1 a^2 lr^0 lf^0) -2 O(as^1 a^2 lr^0 lf^1) -3 O(as^0 a^3 lr^0 lf^0) -4 O(as^0 a^3 lr^0 lf^1) +const ORDERS_LONG_STR: &str = "o order +-+-------------------------- +0 O(as^0 a^2 lr^0 lf^0 la^0) +1 O(as^1 a^2 lr^0 lf^0 la^0) +2 O(as^1 a^2 lr^0 lf^1 la^0) +3 O(as^0 a^3 lr^0 lf^0 la^0) +4 O(as^0 a^3 lr^0 lf^1 la^0) "; -const ORDERS_SPACES_STR: &str = "o order --+--------------------- -0 O( a^2 ) -1 O(as^1 a^2 ) -2 O(as^1 a^2 lf^1) -3 O( a^3 ) -4 O( a^3 lf^1) +const ORDERS_SPACES_STR: &str = "o order +-+-------------------------- +0 O( a^2 ) +1 O(as^1 a^2 ) +2 O(as^1 a^2 lf^1 ) +3 O( a^3 ) +4 O( a^3 lf^1 ) "; const FKTABLE_STR: &str = "no diff --git a/pineappl_cli/tests/subgrids.rs b/pineappl_cli/tests/subgrids.rs index d70110c4d..77f760202 100644 --- a/pineappl_cli/tests/subgrids.rs +++ b/pineappl_cli/tests/subgrids.rs @@ -1,8 +1,10 @@ +#![allow(missing_docs)] + use assert_cmd::Command; const HELP_STR: &str = "Print information about the internal subgrid types -Usage: pineappl subgrids [OPTIONS] <--type|--mur|--mur2|--muf|--muf2|--x1|--x2|--stats> +Usage: pineappl subgrids [OPTIONS] <--type|--scale=|--x=|--stats> Arguments: Path to the input grid @@ -10,342 +12,14 @@ Arguments: Options: --show-empty Show empty subgrids --type Show the subgrid type - --mur Show the renormalization grid values - --mur2 Show the squared renormalization grid values - --muf Show the factorization grid values - --muf2 Show the squared factorization grid values - --x1 Show the x1 grid values - --x2 Show the x2 grid values + --scale= Show the scale node values for the given indices + --x= Show the x-node values for the given indices --stats Show grid statistics (figures are the number of entries) --digits Set the number of digits shown for numerical values [default: 3] -h, --help Print help "; -const MUF_STR: &str = "o b c muf --+-+-+------ -0 0 0 80.352 -0 1 0 80.352 -0 2 0 80.352 -0 3 0 80.352 -0 4 0 80.352 -0 5 0 80.352 -0 6 0 80.352 -0 7 0 80.352 -1 0 0 80.352 -1 0 1 80.352 -1 0 3 80.352 -1 1 0 80.352 -1 1 1 80.352 -1 1 3 80.352 -1 2 0 80.352 -1 2 1 80.352 -1 2 3 80.352 -1 3 0 80.352 -1 3 1 80.352 -1 3 3 80.352 -1 4 0 80.352 -1 4 1 80.352 -1 4 3 80.352 -1 5 0 80.352 -1 5 1 80.352 -1 5 3 80.352 -1 6 0 80.352 -1 6 1 80.352 -1 6 3 80.352 -1 7 0 80.352 -1 7 1 80.352 -1 7 3 80.352 -2 0 0 80.352 -2 0 1 80.352 -2 0 3 80.352 -2 1 0 80.352 -2 1 1 80.352 -2 1 3 80.352 -2 2 0 80.352 -2 2 1 80.352 -2 2 3 80.352 -2 3 0 80.352 -2 3 1 80.352 -2 3 3 80.352 -2 4 0 80.352 -2 4 1 80.352 -2 4 3 80.352 -2 5 0 80.352 -2 5 1 80.352 -2 5 3 80.352 -2 6 0 80.352 -2 6 1 80.352 -2 6 3 80.352 -2 7 0 80.352 -2 7 1 80.352 -2 7 3 80.352 -3 0 0 80.352 -3 0 2 80.352 -3 0 4 80.352 -3 1 0 80.352 -3 1 2 80.352 -3 1 4 80.352 -3 2 0 80.352 -3 2 2 80.352 -3 2 4 80.352 -3 3 0 80.352 -3 3 2 80.352 -3 3 4 80.352 -3 4 0 80.352 -3 4 2 80.352 -3 4 4 80.352 -3 5 0 80.352 -3 5 2 80.352 -3 5 4 80.352 -3 6 0 80.352 -3 6 2 80.352 -3 6 4 80.352 -3 7 0 80.352 -3 7 2 80.352 -3 7 4 80.352 -4 0 0 80.352 -4 0 2 80.352 -4 0 4 80.352 -4 1 0 80.352 -4 1 2 80.352 -4 1 4 80.352 -4 2 0 80.352 -4 2 2 80.352 -4 2 4 80.352 -4 3 0 80.352 -4 3 2 80.352 -4 3 4 80.352 -4 4 0 80.352 -4 4 2 80.352 -4 4 4 80.352 -4 5 0 80.352 -4 5 2 80.352 -4 5 4 80.352 -4 6 0 80.352 -4 6 2 80.352 -4 6 4 80.352 -4 7 0 80.352 -4 7 2 80.352 -4 7 4 80.352 -"; - -const MUF2_STR: &str = "o b c muf2 --+-+-+-------- -0 0 0 6456.444 -0 1 0 6456.444 -0 2 0 6456.444 -0 3 0 6456.444 -0 4 0 6456.444 -0 5 0 6456.444 -0 6 0 6456.444 -0 7 0 6456.444 -1 0 0 6456.444 -1 0 1 6456.444 -1 0 3 6456.444 -1 1 0 6456.444 -1 1 1 6456.444 -1 1 3 6456.444 -1 2 0 6456.444 -1 2 1 6456.444 -1 2 3 6456.444 -1 3 0 6456.444 -1 3 1 6456.444 -1 3 3 6456.444 -1 4 0 6456.444 -1 4 1 6456.444 -1 4 3 6456.444 -1 5 0 6456.444 -1 5 1 6456.444 -1 5 3 6456.444 -1 6 0 6456.444 -1 6 1 6456.444 -1 6 3 6456.444 -1 7 0 6456.444 -1 7 1 6456.444 -1 7 3 6456.444 -2 0 0 6456.444 -2 0 1 6456.444 -2 0 3 6456.444 -2 1 0 6456.444 -2 1 1 6456.444 -2 1 3 6456.444 -2 2 0 6456.444 -2 2 1 6456.444 -2 2 3 6456.444 -2 3 0 6456.444 -2 3 1 6456.444 -2 3 3 6456.444 -2 4 0 6456.444 -2 4 1 6456.444 -2 4 3 6456.444 -2 5 0 6456.444 -2 5 1 6456.444 -2 5 3 6456.444 -2 6 0 6456.444 -2 6 1 6456.444 -2 6 3 6456.444 -2 7 0 6456.444 -2 7 1 6456.444 -2 7 3 6456.444 -3 0 0 6456.444 -3 0 2 6456.444 -3 0 4 6456.444 -3 1 0 6456.444 -3 1 2 6456.444 -3 1 4 6456.444 -3 2 0 6456.444 -3 2 2 6456.444 -3 2 4 6456.444 -3 3 0 6456.444 -3 3 2 6456.444 -3 3 4 6456.444 -3 4 0 6456.444 -3 4 2 6456.444 -3 4 4 6456.444 -3 5 0 6456.444 -3 5 2 6456.444 -3 5 4 6456.444 -3 6 0 6456.444 -3 6 2 6456.444 -3 6 4 6456.444 -3 7 0 6456.444 -3 7 2 6456.444 -3 7 4 6456.444 -4 0 0 6456.444 -4 0 2 6456.444 -4 0 4 6456.444 -4 1 0 6456.444 -4 1 2 6456.444 -4 1 4 6456.444 -4 2 0 6456.444 -4 2 2 6456.444 -4 2 4 6456.444 -4 3 0 6456.444 -4 3 2 6456.444 -4 3 4 6456.444 -4 4 0 6456.444 -4 4 2 6456.444 -4 4 4 6456.444 -4 5 0 6456.444 -4 5 2 6456.444 -4 5 4 6456.444 -4 6 0 6456.444 -4 6 2 6456.444 -4 6 4 6456.444 -4 7 0 6456.444 -4 7 2 6456.444 -4 7 4 6456.444 -"; - -const MUR_STR: &str = "o b c mur --+-+-+------ -0 0 0 80.352 -0 1 0 80.352 -0 2 0 80.352 -0 3 0 80.352 -0 4 0 80.352 -0 5 0 80.352 -0 6 0 80.352 -0 7 0 80.352 -1 0 0 80.352 -1 0 1 80.352 -1 0 3 80.352 -1 1 0 80.352 -1 1 1 80.352 -1 1 3 80.352 -1 2 0 80.352 -1 2 1 80.352 -1 2 3 80.352 -1 3 0 80.352 -1 3 1 80.352 -1 3 3 80.352 -1 4 0 80.352 -1 4 1 80.352 -1 4 3 80.352 -1 5 0 80.352 -1 5 1 80.352 -1 5 3 80.352 -1 6 0 80.352 -1 6 1 80.352 -1 6 3 80.352 -1 7 0 80.352 -1 7 1 80.352 -1 7 3 80.352 -2 0 0 80.352 -2 0 1 80.352 -2 0 3 80.352 -2 1 0 80.352 -2 1 1 80.352 -2 1 3 80.352 -2 2 0 80.352 -2 2 1 80.352 -2 2 3 80.352 -2 3 0 80.352 -2 3 1 80.352 -2 3 3 80.352 -2 4 0 80.352 -2 4 1 80.352 -2 4 3 80.352 -2 5 0 80.352 -2 5 1 80.352 -2 5 3 80.352 -2 6 0 80.352 -2 6 1 80.352 -2 6 3 80.352 -2 7 0 80.352 -2 7 1 80.352 -2 7 3 80.352 -3 0 0 80.352 -3 0 2 80.352 -3 0 4 80.352 -3 1 0 80.352 -3 1 2 80.352 -3 1 4 80.352 -3 2 0 80.352 -3 2 2 80.352 -3 2 4 80.352 -3 3 0 80.352 -3 3 2 80.352 -3 3 4 80.352 -3 4 0 80.352 -3 4 2 80.352 -3 4 4 80.352 -3 5 0 80.352 -3 5 2 80.352 -3 5 4 80.352 -3 6 0 80.352 -3 6 2 80.352 -3 6 4 80.352 -3 7 0 80.352 -3 7 2 80.352 -3 7 4 80.352 -4 0 0 80.352 -4 0 2 80.352 -4 0 4 80.352 -4 1 0 80.352 -4 1 2 80.352 -4 1 4 80.352 -4 2 0 80.352 -4 2 2 80.352 -4 2 4 80.352 -4 3 0 80.352 -4 3 2 80.352 -4 3 4 80.352 -4 4 0 80.352 -4 4 2 80.352 -4 4 4 80.352 -4 5 0 80.352 -4 5 2 80.352 -4 5 4 80.352 -4 6 0 80.352 -4 6 2 80.352 -4 6 4 80.352 -4 7 0 80.352 -4 7 2 80.352 -4 7 4 80.352 -"; - -const MUR2_STR: &str = "o b c mur2 +const SCALE0_STR: &str = "o b c scale0 -+-+-+-------- 0 0 0 6456.444 0 1 0 6456.444 @@ -455,425 +129,425 @@ const MUR2_STR: &str = "o b c mur2 const STATS_STR: &str = "o b c total allocated zeros overhead -+-+-+-----+---------+-----+-------- -0 0 0 1156 1012 3 70 -0 1 0 1156 1009 8 70 -0 2 0 1225 1018 16 72 -0 3 0 1296 1006 16 74 -0 4 0 1296 983 21 74 -0 5 0 1369 945 45 76 -0 6 0 1444 1025 77 78 -0 7 0 1521 833 37 80 -1 0 0 1156 1013 0 70 -1 0 1 1156 1009 0 70 -1 0 3 1156 1011 0 70 -1 1 0 1156 1016 0 70 -1 1 1 1156 1005 0 70 -1 1 3 1156 1011 0 70 -1 2 0 1225 1031 0 72 -1 2 1 1225 1022 0 72 -1 2 3 1225 1030 0 72 -1 3 0 1296 1031 0 74 -1 3 1 1296 1026 0 74 -1 3 3 1296 1030 0 74 -1 4 0 1296 1038 2 74 -1 4 1 1296 1025 0 74 -1 4 3 1296 1027 0 74 -1 5 0 1369 1039 0 76 -1 5 1 1369 1022 2 76 -1 5 3 1369 1032 0 76 -1 6 0 1444 1033 0 78 -1 6 1 1444 1029 0 78 -1 6 3 1444 1032 0 78 -1 7 0 1521 982 0 80 -1 7 1 1521 980 2 80 -1 7 3 1521 981 0 80 -2 0 0 1156 1013 0 70 -2 0 1 1156 952 0 70 -2 0 3 1156 1011 2 70 -2 1 0 1156 1016 0 70 -2 1 1 1156 953 7 70 -2 1 3 1156 1005 0 70 -2 2 0 1225 1031 0 72 -2 2 1 1225 1018 6 72 -2 2 3 1225 1029 0 72 -2 3 0 1296 1031 0 74 -2 3 1 1296 998 0 74 -2 3 3 1296 1027 0 74 -2 4 0 1296 1038 2 74 -2 4 1 1296 1022 10 74 -2 4 3 1296 1021 0 74 -2 5 0 1369 1039 0 76 -2 5 1 1332 1007 18 74 -2 5 3 1369 1028 0 76 -2 6 0 1444 1033 0 78 -2 6 1 1444 1027 11 78 -2 6 3 1444 1028 0 78 -2 7 0 1521 982 0 80 -2 7 1 1482 976 58 78 -2 7 3 1521 973 0 80 -3 0 0 1156 1013 0 70 -3 0 2 1156 936 20 70 -3 0 4 1156 930 10 70 -3 1 0 1156 1013 0 70 -3 1 2 1156 938 20 70 -3 1 4 1156 949 16 70 -3 2 0 1225 1026 0 72 -3 2 2 1225 933 32 72 -3 2 4 1225 943 0 72 -3 3 0 1296 1031 0 74 -3 3 2 1260 884 6 72 -3 3 4 1260 983 11 72 -3 4 0 1296 1038 3 74 -3 4 2 1296 894 37 74 -3 4 4 1296 940 0 74 -3 5 0 1369 1035 0 76 -3 5 2 1332 869 16 74 -3 5 4 1332 962 14 74 -3 6 0 1444 1033 0 78 -3 6 2 1406 861 21 76 -3 6 4 1406 969 22 76 -3 7 0 1521 980 0 80 -3 7 2 1444 873 114 78 -3 7 4 1444 817 38 78 -4 0 0 1156 1013 3 70 -4 0 2 1156 767 37 70 -4 0 4 1156 868 11 70 -4 1 0 1156 1013 1 70 -4 1 2 1122 718 15 68 -4 1 4 1156 854 17 70 -4 2 0 1225 1018 3 72 -4 2 2 1225 832 22 72 -4 2 4 1225 898 17 72 -4 3 0 1296 1023 0 74 -4 3 2 1260 840 54 72 -4 3 4 1225 913 39 72 -4 4 0 1296 1038 3 74 -4 4 2 1296 825 80 74 -4 4 4 1296 877 9 74 -4 5 0 1369 1035 0 76 -4 5 2 1332 841 72 74 -4 5 4 1332 911 20 74 -4 6 0 1444 1033 0 78 -4 6 2 1406 855 116 76 -4 6 4 1369 867 13 76 -4 7 0 1521 980 0 80 -4 7 2 1444 869 173 78 -4 7 4 1444 745 36 78 +0 0 0 1156 1014 5 30 +0 1 0 1156 1012 11 30 +0 2 0 1225 1014 12 46 +0 3 0 1296 1000 10 62 +0 4 0 1296 966 4 66 +0 5 0 1369 903 3 82 +0 6 0 1444 958 10 82 +0 7 0 1521 798 2 92 +1 0 0 1156 1016 3 28 +1 0 1 1156 1012 3 28 +1 0 3 1156 1015 4 28 +1 1 0 1156 1018 2 26 +1 1 1 1156 1008 3 28 +1 1 3 1156 1012 1 28 +1 2 0 1225 1034 3 32 +1 2 1 1225 1025 3 34 +1 2 3 1225 1033 3 32 +1 3 0 1296 1040 9 38 +1 3 1 1296 1037 11 38 +1 3 3 1296 1040 10 38 +1 4 0 1296 1039 3 38 +1 4 1 1296 1029 4 38 +1 4 3 1296 1031 4 36 +1 5 0 1369 1046 7 42 +1 5 1 1369 1030 10 46 +1 5 3 1369 1040 8 42 +1 6 0 1444 1039 6 50 +1 6 1 1444 1035 6 50 +1 6 3 1444 1039 7 50 +1 7 0 1521 989 7 56 +1 7 1 1521 986 8 58 +1 7 3 1521 987 6 58 +2 0 0 1156 1016 3 28 +2 0 1 1156 955 3 46 +2 0 3 1156 1013 4 30 +2 1 0 1156 1018 2 26 +2 1 1 1156 947 1 44 +2 1 3 1156 1009 4 28 +2 2 0 1225 1034 3 32 +2 2 1 1225 1021 9 36 +2 2 3 1225 1032 3 32 +2 3 0 1296 1040 9 38 +2 3 1 1296 1004 6 54 +2 3 3 1296 1037 10 38 +2 4 0 1296 1039 3 38 +2 4 1 1296 1020 8 44 +2 4 3 1296 1025 4 36 +2 5 0 1369 1046 7 42 +2 5 1 1332 998 9 56 +2 5 3 1369 1036 8 42 +2 6 0 1444 1039 6 50 +2 6 1 1444 1024 8 54 +2 6 3 1444 1035 7 50 +2 7 0 1521 989 7 56 +2 7 1 1482 924 6 90 +2 7 3 1521 979 6 58 +3 0 0 1156 1016 3 28 +3 0 2 1156 925 9 56 +3 0 4 1156 925 5 54 +3 1 0 1156 1018 5 26 +3 1 2 1156 922 4 56 +3 1 4 1156 936 3 50 +3 2 0 1225 1030 4 36 +3 2 2 1225 909 8 64 +3 2 4 1225 946 3 50 +3 3 0 1296 1040 9 38 +3 3 2 1260 878 0 74 +3 3 4 1260 984 12 52 +3 4 0 1296 1038 3 38 +3 4 2 1296 863 6 70 +3 4 4 1296 942 2 52 +3 5 0 1369 1045 10 42 +3 5 2 1332 857 4 82 +3 5 4 1332 955 7 60 +3 6 0 1444 1039 6 50 +3 6 2 1406 844 4 86 +3 6 4 1406 956 9 62 +3 7 0 1521 988 8 56 +3 7 2 1444 759 0 116 +3 7 4 1444 781 2 76 +4 0 0 1156 1014 4 30 +4 0 2 1156 733 3 80 +4 0 4 1156 862 5 52 +4 1 0 1156 1018 6 26 +4 1 2 1122 710 7 68 +4 1 4 1156 840 3 54 +4 2 0 1225 1023 8 38 +4 2 2 1225 816 6 86 +4 2 4 1225 885 4 56 +4 3 0 1296 1032 9 40 +4 3 2 1260 791 5 104 +4 3 4 1225 878 4 68 +4 4 0 1296 1038 3 38 +4 4 2 1296 745 0 104 +4 4 4 1296 872 4 56 +4 5 0 1369 1045 10 42 +4 5 2 1332 771 2 108 +4 5 4 1332 900 9 62 +4 6 0 1444 1039 6 50 +4 6 2 1406 739 0 112 +4 6 4 1369 859 5 54 +4 7 0 1521 988 8 56 +4 7 2 1444 696 0 116 +4 7 4 1444 709 0 76 "; -const TYPE_STR: &str = "o b c type --+-+-+------------------- -0 0 0 ImportOnlySubgridV2 -0 1 0 ImportOnlySubgridV2 -0 2 0 ImportOnlySubgridV2 -0 3 0 ImportOnlySubgridV2 -0 4 0 ImportOnlySubgridV2 -0 5 0 ImportOnlySubgridV2 -0 6 0 ImportOnlySubgridV2 -0 7 0 ImportOnlySubgridV2 -1 0 0 ImportOnlySubgridV2 -1 0 1 ImportOnlySubgridV2 -1 0 3 ImportOnlySubgridV2 -1 1 0 ImportOnlySubgridV2 -1 1 1 ImportOnlySubgridV2 -1 1 3 ImportOnlySubgridV2 -1 2 0 ImportOnlySubgridV2 -1 2 1 ImportOnlySubgridV2 -1 2 3 ImportOnlySubgridV2 -1 3 0 ImportOnlySubgridV2 -1 3 1 ImportOnlySubgridV2 -1 3 3 ImportOnlySubgridV2 -1 4 0 ImportOnlySubgridV2 -1 4 1 ImportOnlySubgridV2 -1 4 3 ImportOnlySubgridV2 -1 5 0 ImportOnlySubgridV2 -1 5 1 ImportOnlySubgridV2 -1 5 3 ImportOnlySubgridV2 -1 6 0 ImportOnlySubgridV2 -1 6 1 ImportOnlySubgridV2 -1 6 3 ImportOnlySubgridV2 -1 7 0 ImportOnlySubgridV2 -1 7 1 ImportOnlySubgridV2 -1 7 3 ImportOnlySubgridV2 -2 0 0 ImportOnlySubgridV2 -2 0 1 ImportOnlySubgridV2 -2 0 3 ImportOnlySubgridV2 -2 1 0 ImportOnlySubgridV2 -2 1 1 ImportOnlySubgridV2 -2 1 3 ImportOnlySubgridV2 -2 2 0 ImportOnlySubgridV2 -2 2 1 ImportOnlySubgridV2 -2 2 3 ImportOnlySubgridV2 -2 3 0 ImportOnlySubgridV2 -2 3 1 ImportOnlySubgridV2 -2 3 3 ImportOnlySubgridV2 -2 4 0 ImportOnlySubgridV2 -2 4 1 ImportOnlySubgridV2 -2 4 3 ImportOnlySubgridV2 -2 5 0 ImportOnlySubgridV2 -2 5 1 ImportOnlySubgridV2 -2 5 3 ImportOnlySubgridV2 -2 6 0 ImportOnlySubgridV2 -2 6 1 ImportOnlySubgridV2 -2 6 3 ImportOnlySubgridV2 -2 7 0 ImportOnlySubgridV2 -2 7 1 ImportOnlySubgridV2 -2 7 3 ImportOnlySubgridV2 -3 0 0 ImportOnlySubgridV2 -3 0 2 ImportOnlySubgridV2 -3 0 4 ImportOnlySubgridV2 -3 1 0 ImportOnlySubgridV2 -3 1 2 ImportOnlySubgridV2 -3 1 4 ImportOnlySubgridV2 -3 2 0 ImportOnlySubgridV2 -3 2 2 ImportOnlySubgridV2 -3 2 4 ImportOnlySubgridV2 -3 3 0 ImportOnlySubgridV2 -3 3 2 ImportOnlySubgridV2 -3 3 4 ImportOnlySubgridV2 -3 4 0 ImportOnlySubgridV2 -3 4 2 ImportOnlySubgridV2 -3 4 4 ImportOnlySubgridV2 -3 5 0 ImportOnlySubgridV2 -3 5 2 ImportOnlySubgridV2 -3 5 4 ImportOnlySubgridV2 -3 6 0 ImportOnlySubgridV2 -3 6 2 ImportOnlySubgridV2 -3 6 4 ImportOnlySubgridV2 -3 7 0 ImportOnlySubgridV2 -3 7 2 ImportOnlySubgridV2 -3 7 4 ImportOnlySubgridV2 -4 0 0 ImportOnlySubgridV2 -4 0 2 ImportOnlySubgridV2 -4 0 4 ImportOnlySubgridV2 -4 1 0 ImportOnlySubgridV2 -4 1 2 ImportOnlySubgridV2 -4 1 4 ImportOnlySubgridV2 -4 2 0 ImportOnlySubgridV2 -4 2 2 ImportOnlySubgridV2 -4 2 4 ImportOnlySubgridV2 -4 3 0 ImportOnlySubgridV2 -4 3 2 ImportOnlySubgridV2 -4 3 4 ImportOnlySubgridV2 -4 4 0 ImportOnlySubgridV2 -4 4 2 ImportOnlySubgridV2 -4 4 4 ImportOnlySubgridV2 -4 5 0 ImportOnlySubgridV2 -4 5 2 ImportOnlySubgridV2 -4 5 4 ImportOnlySubgridV2 -4 6 0 ImportOnlySubgridV2 -4 6 2 ImportOnlySubgridV2 -4 6 4 ImportOnlySubgridV2 -4 7 0 ImportOnlySubgridV2 -4 7 2 ImportOnlySubgridV2 -4 7 4 ImportOnlySubgridV2 +const TYPE_STR: &str = "o b c type +-+-+-+--------------- +0 0 0 ImportSubgridV1 +0 1 0 ImportSubgridV1 +0 2 0 ImportSubgridV1 +0 3 0 ImportSubgridV1 +0 4 0 ImportSubgridV1 +0 5 0 ImportSubgridV1 +0 6 0 ImportSubgridV1 +0 7 0 ImportSubgridV1 +1 0 0 ImportSubgridV1 +1 0 1 ImportSubgridV1 +1 0 3 ImportSubgridV1 +1 1 0 ImportSubgridV1 +1 1 1 ImportSubgridV1 +1 1 3 ImportSubgridV1 +1 2 0 ImportSubgridV1 +1 2 1 ImportSubgridV1 +1 2 3 ImportSubgridV1 +1 3 0 ImportSubgridV1 +1 3 1 ImportSubgridV1 +1 3 3 ImportSubgridV1 +1 4 0 ImportSubgridV1 +1 4 1 ImportSubgridV1 +1 4 3 ImportSubgridV1 +1 5 0 ImportSubgridV1 +1 5 1 ImportSubgridV1 +1 5 3 ImportSubgridV1 +1 6 0 ImportSubgridV1 +1 6 1 ImportSubgridV1 +1 6 3 ImportSubgridV1 +1 7 0 ImportSubgridV1 +1 7 1 ImportSubgridV1 +1 7 3 ImportSubgridV1 +2 0 0 ImportSubgridV1 +2 0 1 ImportSubgridV1 +2 0 3 ImportSubgridV1 +2 1 0 ImportSubgridV1 +2 1 1 ImportSubgridV1 +2 1 3 ImportSubgridV1 +2 2 0 ImportSubgridV1 +2 2 1 ImportSubgridV1 +2 2 3 ImportSubgridV1 +2 3 0 ImportSubgridV1 +2 3 1 ImportSubgridV1 +2 3 3 ImportSubgridV1 +2 4 0 ImportSubgridV1 +2 4 1 ImportSubgridV1 +2 4 3 ImportSubgridV1 +2 5 0 ImportSubgridV1 +2 5 1 ImportSubgridV1 +2 5 3 ImportSubgridV1 +2 6 0 ImportSubgridV1 +2 6 1 ImportSubgridV1 +2 6 3 ImportSubgridV1 +2 7 0 ImportSubgridV1 +2 7 1 ImportSubgridV1 +2 7 3 ImportSubgridV1 +3 0 0 ImportSubgridV1 +3 0 2 ImportSubgridV1 +3 0 4 ImportSubgridV1 +3 1 0 ImportSubgridV1 +3 1 2 ImportSubgridV1 +3 1 4 ImportSubgridV1 +3 2 0 ImportSubgridV1 +3 2 2 ImportSubgridV1 +3 2 4 ImportSubgridV1 +3 3 0 ImportSubgridV1 +3 3 2 ImportSubgridV1 +3 3 4 ImportSubgridV1 +3 4 0 ImportSubgridV1 +3 4 2 ImportSubgridV1 +3 4 4 ImportSubgridV1 +3 5 0 ImportSubgridV1 +3 5 2 ImportSubgridV1 +3 5 4 ImportSubgridV1 +3 6 0 ImportSubgridV1 +3 6 2 ImportSubgridV1 +3 6 4 ImportSubgridV1 +3 7 0 ImportSubgridV1 +3 7 2 ImportSubgridV1 +3 7 4 ImportSubgridV1 +4 0 0 ImportSubgridV1 +4 0 2 ImportSubgridV1 +4 0 4 ImportSubgridV1 +4 1 0 ImportSubgridV1 +4 1 2 ImportSubgridV1 +4 1 4 ImportSubgridV1 +4 2 0 ImportSubgridV1 +4 2 2 ImportSubgridV1 +4 2 4 ImportSubgridV1 +4 3 0 ImportSubgridV1 +4 3 2 ImportSubgridV1 +4 3 4 ImportSubgridV1 +4 4 0 ImportSubgridV1 +4 4 2 ImportSubgridV1 +4 4 4 ImportSubgridV1 +4 5 0 ImportSubgridV1 +4 5 2 ImportSubgridV1 +4 5 4 ImportSubgridV1 +4 6 0 ImportSubgridV1 +4 6 2 ImportSubgridV1 +4 6 4 ImportSubgridV1 +4 7 0 ImportSubgridV1 +4 7 2 ImportSubgridV1 +4 7 4 ImportSubgridV1 "; -const TYPE_SHOW_EMPTY_STR: &str = "o b c type --+-+-+------------------- -0 0 0 ImportOnlySubgridV2 +const TYPE_SHOW_EMPTY_STR: &str = "o b c type +-+-+-+--------------- +0 0 0 ImportSubgridV1 0 0 1 EmptySubgridV1 0 0 2 EmptySubgridV1 0 0 3 EmptySubgridV1 0 0 4 EmptySubgridV1 -0 1 0 ImportOnlySubgridV2 +0 1 0 ImportSubgridV1 0 1 1 EmptySubgridV1 0 1 2 EmptySubgridV1 0 1 3 EmptySubgridV1 0 1 4 EmptySubgridV1 -0 2 0 ImportOnlySubgridV2 +0 2 0 ImportSubgridV1 0 2 1 EmptySubgridV1 0 2 2 EmptySubgridV1 0 2 3 EmptySubgridV1 0 2 4 EmptySubgridV1 -0 3 0 ImportOnlySubgridV2 +0 3 0 ImportSubgridV1 0 3 1 EmptySubgridV1 0 3 2 EmptySubgridV1 0 3 3 EmptySubgridV1 0 3 4 EmptySubgridV1 -0 4 0 ImportOnlySubgridV2 +0 4 0 ImportSubgridV1 0 4 1 EmptySubgridV1 0 4 2 EmptySubgridV1 0 4 3 EmptySubgridV1 0 4 4 EmptySubgridV1 -0 5 0 ImportOnlySubgridV2 +0 5 0 ImportSubgridV1 0 5 1 EmptySubgridV1 0 5 2 EmptySubgridV1 0 5 3 EmptySubgridV1 0 5 4 EmptySubgridV1 -0 6 0 ImportOnlySubgridV2 +0 6 0 ImportSubgridV1 0 6 1 EmptySubgridV1 0 6 2 EmptySubgridV1 0 6 3 EmptySubgridV1 0 6 4 EmptySubgridV1 -0 7 0 ImportOnlySubgridV2 +0 7 0 ImportSubgridV1 0 7 1 EmptySubgridV1 0 7 2 EmptySubgridV1 0 7 3 EmptySubgridV1 0 7 4 EmptySubgridV1 -1 0 0 ImportOnlySubgridV2 -1 0 1 ImportOnlySubgridV2 +1 0 0 ImportSubgridV1 +1 0 1 ImportSubgridV1 1 0 2 EmptySubgridV1 -1 0 3 ImportOnlySubgridV2 +1 0 3 ImportSubgridV1 1 0 4 EmptySubgridV1 -1 1 0 ImportOnlySubgridV2 -1 1 1 ImportOnlySubgridV2 +1 1 0 ImportSubgridV1 +1 1 1 ImportSubgridV1 1 1 2 EmptySubgridV1 -1 1 3 ImportOnlySubgridV2 +1 1 3 ImportSubgridV1 1 1 4 EmptySubgridV1 -1 2 0 ImportOnlySubgridV2 -1 2 1 ImportOnlySubgridV2 +1 2 0 ImportSubgridV1 +1 2 1 ImportSubgridV1 1 2 2 EmptySubgridV1 -1 2 3 ImportOnlySubgridV2 +1 2 3 ImportSubgridV1 1 2 4 EmptySubgridV1 -1 3 0 ImportOnlySubgridV2 -1 3 1 ImportOnlySubgridV2 +1 3 0 ImportSubgridV1 +1 3 1 ImportSubgridV1 1 3 2 EmptySubgridV1 -1 3 3 ImportOnlySubgridV2 +1 3 3 ImportSubgridV1 1 3 4 EmptySubgridV1 -1 4 0 ImportOnlySubgridV2 -1 4 1 ImportOnlySubgridV2 +1 4 0 ImportSubgridV1 +1 4 1 ImportSubgridV1 1 4 2 EmptySubgridV1 -1 4 3 ImportOnlySubgridV2 +1 4 3 ImportSubgridV1 1 4 4 EmptySubgridV1 -1 5 0 ImportOnlySubgridV2 -1 5 1 ImportOnlySubgridV2 +1 5 0 ImportSubgridV1 +1 5 1 ImportSubgridV1 1 5 2 EmptySubgridV1 -1 5 3 ImportOnlySubgridV2 +1 5 3 ImportSubgridV1 1 5 4 EmptySubgridV1 -1 6 0 ImportOnlySubgridV2 -1 6 1 ImportOnlySubgridV2 +1 6 0 ImportSubgridV1 +1 6 1 ImportSubgridV1 1 6 2 EmptySubgridV1 -1 6 3 ImportOnlySubgridV2 +1 6 3 ImportSubgridV1 1 6 4 EmptySubgridV1 -1 7 0 ImportOnlySubgridV2 -1 7 1 ImportOnlySubgridV2 +1 7 0 ImportSubgridV1 +1 7 1 ImportSubgridV1 1 7 2 EmptySubgridV1 -1 7 3 ImportOnlySubgridV2 +1 7 3 ImportSubgridV1 1 7 4 EmptySubgridV1 -2 0 0 ImportOnlySubgridV2 -2 0 1 ImportOnlySubgridV2 +2 0 0 ImportSubgridV1 +2 0 1 ImportSubgridV1 2 0 2 EmptySubgridV1 -2 0 3 ImportOnlySubgridV2 +2 0 3 ImportSubgridV1 2 0 4 EmptySubgridV1 -2 1 0 ImportOnlySubgridV2 -2 1 1 ImportOnlySubgridV2 +2 1 0 ImportSubgridV1 +2 1 1 ImportSubgridV1 2 1 2 EmptySubgridV1 -2 1 3 ImportOnlySubgridV2 +2 1 3 ImportSubgridV1 2 1 4 EmptySubgridV1 -2 2 0 ImportOnlySubgridV2 -2 2 1 ImportOnlySubgridV2 +2 2 0 ImportSubgridV1 +2 2 1 ImportSubgridV1 2 2 2 EmptySubgridV1 -2 2 3 ImportOnlySubgridV2 +2 2 3 ImportSubgridV1 2 2 4 EmptySubgridV1 -2 3 0 ImportOnlySubgridV2 -2 3 1 ImportOnlySubgridV2 +2 3 0 ImportSubgridV1 +2 3 1 ImportSubgridV1 2 3 2 EmptySubgridV1 -2 3 3 ImportOnlySubgridV2 +2 3 3 ImportSubgridV1 2 3 4 EmptySubgridV1 -2 4 0 ImportOnlySubgridV2 -2 4 1 ImportOnlySubgridV2 +2 4 0 ImportSubgridV1 +2 4 1 ImportSubgridV1 2 4 2 EmptySubgridV1 -2 4 3 ImportOnlySubgridV2 +2 4 3 ImportSubgridV1 2 4 4 EmptySubgridV1 -2 5 0 ImportOnlySubgridV2 -2 5 1 ImportOnlySubgridV2 +2 5 0 ImportSubgridV1 +2 5 1 ImportSubgridV1 2 5 2 EmptySubgridV1 -2 5 3 ImportOnlySubgridV2 +2 5 3 ImportSubgridV1 2 5 4 EmptySubgridV1 -2 6 0 ImportOnlySubgridV2 -2 6 1 ImportOnlySubgridV2 +2 6 0 ImportSubgridV1 +2 6 1 ImportSubgridV1 2 6 2 EmptySubgridV1 -2 6 3 ImportOnlySubgridV2 +2 6 3 ImportSubgridV1 2 6 4 EmptySubgridV1 -2 7 0 ImportOnlySubgridV2 -2 7 1 ImportOnlySubgridV2 +2 7 0 ImportSubgridV1 +2 7 1 ImportSubgridV1 2 7 2 EmptySubgridV1 -2 7 3 ImportOnlySubgridV2 +2 7 3 ImportSubgridV1 2 7 4 EmptySubgridV1 -3 0 0 ImportOnlySubgridV2 +3 0 0 ImportSubgridV1 3 0 1 EmptySubgridV1 -3 0 2 ImportOnlySubgridV2 +3 0 2 ImportSubgridV1 3 0 3 EmptySubgridV1 -3 0 4 ImportOnlySubgridV2 -3 1 0 ImportOnlySubgridV2 +3 0 4 ImportSubgridV1 +3 1 0 ImportSubgridV1 3 1 1 EmptySubgridV1 -3 1 2 ImportOnlySubgridV2 +3 1 2 ImportSubgridV1 3 1 3 EmptySubgridV1 -3 1 4 ImportOnlySubgridV2 -3 2 0 ImportOnlySubgridV2 +3 1 4 ImportSubgridV1 +3 2 0 ImportSubgridV1 3 2 1 EmptySubgridV1 -3 2 2 ImportOnlySubgridV2 +3 2 2 ImportSubgridV1 3 2 3 EmptySubgridV1 -3 2 4 ImportOnlySubgridV2 -3 3 0 ImportOnlySubgridV2 +3 2 4 ImportSubgridV1 +3 3 0 ImportSubgridV1 3 3 1 EmptySubgridV1 -3 3 2 ImportOnlySubgridV2 +3 3 2 ImportSubgridV1 3 3 3 EmptySubgridV1 -3 3 4 ImportOnlySubgridV2 -3 4 0 ImportOnlySubgridV2 +3 3 4 ImportSubgridV1 +3 4 0 ImportSubgridV1 3 4 1 EmptySubgridV1 -3 4 2 ImportOnlySubgridV2 +3 4 2 ImportSubgridV1 3 4 3 EmptySubgridV1 -3 4 4 ImportOnlySubgridV2 -3 5 0 ImportOnlySubgridV2 +3 4 4 ImportSubgridV1 +3 5 0 ImportSubgridV1 3 5 1 EmptySubgridV1 -3 5 2 ImportOnlySubgridV2 +3 5 2 ImportSubgridV1 3 5 3 EmptySubgridV1 -3 5 4 ImportOnlySubgridV2 -3 6 0 ImportOnlySubgridV2 +3 5 4 ImportSubgridV1 +3 6 0 ImportSubgridV1 3 6 1 EmptySubgridV1 -3 6 2 ImportOnlySubgridV2 +3 6 2 ImportSubgridV1 3 6 3 EmptySubgridV1 -3 6 4 ImportOnlySubgridV2 -3 7 0 ImportOnlySubgridV2 +3 6 4 ImportSubgridV1 +3 7 0 ImportSubgridV1 3 7 1 EmptySubgridV1 -3 7 2 ImportOnlySubgridV2 +3 7 2 ImportSubgridV1 3 7 3 EmptySubgridV1 -3 7 4 ImportOnlySubgridV2 -4 0 0 ImportOnlySubgridV2 +3 7 4 ImportSubgridV1 +4 0 0 ImportSubgridV1 4 0 1 EmptySubgridV1 -4 0 2 ImportOnlySubgridV2 +4 0 2 ImportSubgridV1 4 0 3 EmptySubgridV1 -4 0 4 ImportOnlySubgridV2 -4 1 0 ImportOnlySubgridV2 +4 0 4 ImportSubgridV1 +4 1 0 ImportSubgridV1 4 1 1 EmptySubgridV1 -4 1 2 ImportOnlySubgridV2 +4 1 2 ImportSubgridV1 4 1 3 EmptySubgridV1 -4 1 4 ImportOnlySubgridV2 -4 2 0 ImportOnlySubgridV2 +4 1 4 ImportSubgridV1 +4 2 0 ImportSubgridV1 4 2 1 EmptySubgridV1 -4 2 2 ImportOnlySubgridV2 +4 2 2 ImportSubgridV1 4 2 3 EmptySubgridV1 -4 2 4 ImportOnlySubgridV2 -4 3 0 ImportOnlySubgridV2 +4 2 4 ImportSubgridV1 +4 3 0 ImportSubgridV1 4 3 1 EmptySubgridV1 -4 3 2 ImportOnlySubgridV2 +4 3 2 ImportSubgridV1 4 3 3 EmptySubgridV1 -4 3 4 ImportOnlySubgridV2 -4 4 0 ImportOnlySubgridV2 +4 3 4 ImportSubgridV1 +4 4 0 ImportSubgridV1 4 4 1 EmptySubgridV1 -4 4 2 ImportOnlySubgridV2 +4 4 2 ImportSubgridV1 4 4 3 EmptySubgridV1 -4 4 4 ImportOnlySubgridV2 -4 5 0 ImportOnlySubgridV2 +4 4 4 ImportSubgridV1 +4 5 0 ImportSubgridV1 4 5 1 EmptySubgridV1 -4 5 2 ImportOnlySubgridV2 +4 5 2 ImportSubgridV1 4 5 3 EmptySubgridV1 -4 5 4 ImportOnlySubgridV2 -4 6 0 ImportOnlySubgridV2 +4 5 4 ImportSubgridV1 +4 6 0 ImportSubgridV1 4 6 1 EmptySubgridV1 -4 6 2 ImportOnlySubgridV2 +4 6 2 ImportSubgridV1 4 6 3 EmptySubgridV1 -4 6 4 ImportOnlySubgridV2 -4 7 0 ImportOnlySubgridV2 +4 6 4 ImportSubgridV1 +4 7 0 ImportSubgridV1 4 7 1 EmptySubgridV1 -4 7 2 ImportOnlySubgridV2 +4 7 2 ImportSubgridV1 4 7 3 EmptySubgridV1 -4 7 4 ImportOnlySubgridV2 +4 7 4 ImportSubgridV1 "; -const X1_STR: &str = "o b c x1 +const X0_STR: &str = "o b c x0 -+-+-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 0 0 0 1.000e0, 9.309e-1, 8.628e-1, 7.956e-1, 7.296e-1, 6.648e-1, 6.015e-1, 5.398e-1, 4.799e-1, 4.222e-1, 3.669e-1, 3.144e-1, 2.651e-1, 2.195e-1, 1.780e-1, 1.411e-1, 1.091e-1, 8.228e-2, 6.048e-2, 4.341e-2, 3.052e-2, 2.109e-2, 1.438e-2, 9.699e-3, 6.496e-3, 4.329e-3, 2.874e-3, 1.903e-3, 1.259e-3, 8.314e-4, 5.488e-4, 3.621e-4, 2.388e-4, 1.575e-4 0 1 0 1.000e0, 9.309e-1, 8.628e-1, 7.956e-1, 7.296e-1, 6.648e-1, 6.015e-1, 5.398e-1, 4.799e-1, 4.222e-1, 3.669e-1, 3.144e-1, 2.651e-1, 2.195e-1, 1.780e-1, 1.411e-1, 1.091e-1, 8.228e-2, 6.048e-2, 4.341e-2, 3.052e-2, 2.109e-2, 1.438e-2, 9.699e-3, 6.496e-3, 4.329e-3, 2.874e-3, 1.903e-3, 1.259e-3, 8.314e-4, 5.488e-4, 3.621e-4, 2.388e-4, 1.575e-4 @@ -981,7 +655,7 @@ const X1_STR: &str = "o b c 4 7 4 1.000e0, 9.309e-1, 8.628e-1, 7.956e-1, 7.296e-1, 6.648e-1, 6.015e-1, 5.398e-1, 4.799e-1, 4.222e-1, 3.669e-1, 3.144e-1, 2.651e-1, 2.195e-1, 1.780e-1, 1.411e-1, 1.091e-1, 8.228e-2, 6.048e-2, 4.341e-2, 3.052e-2, 2.109e-2, 1.438e-2, 9.699e-3, 6.496e-3, 4.329e-3, 2.874e-3, 1.903e-3, 1.259e-3, 8.314e-4, 5.488e-4, 3.621e-4, 2.388e-4, 1.575e-4, 1.038e-4, 6.844e-5, 4.511e-5, 2.974e-5 "; -const X2_STR: &str = "o b c x2 +const X1_STR: &str = "o b c x1 -+-+-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 0 0 0 1.000e0, 9.309e-1, 8.628e-1, 7.956e-1, 7.296e-1, 6.648e-1, 6.015e-1, 5.398e-1, 4.799e-1, 4.222e-1, 3.669e-1, 3.144e-1, 2.651e-1, 2.195e-1, 1.780e-1, 1.411e-1, 1.091e-1, 8.228e-2, 6.048e-2, 4.341e-2, 3.052e-2, 2.109e-2, 1.438e-2, 9.699e-3, 6.496e-3, 4.329e-3, 2.874e-3, 1.903e-3, 1.259e-3, 8.314e-4, 5.488e-4, 3.621e-4, 2.388e-4, 1.575e-4 0 1 0 1.000e0, 9.309e-1, 8.628e-1, 7.956e-1, 7.296e-1, 6.648e-1, 6.015e-1, 5.398e-1, 4.799e-1, 4.222e-1, 3.669e-1, 3.144e-1, 2.651e-1, 2.195e-1, 1.780e-1, 1.411e-1, 1.091e-1, 8.228e-2, 6.048e-2, 4.341e-2, 3.052e-2, 2.109e-2, 1.438e-2, 9.699e-3, 6.496e-3, 4.329e-3, 2.874e-3, 1.903e-3, 1.259e-3, 8.314e-4, 5.488e-4, 3.621e-4, 2.388e-4, 1.575e-4 @@ -1100,59 +774,17 @@ fn help() { } #[test] -fn muf() { +fn scale0() { Command::cargo_bin("pineappl") .unwrap() .args([ "subgrids", - "--muf", + "--scale=0", "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", ]) .assert() .success() - .stdout(MUF_STR); -} - -#[test] -fn muf2() { - Command::cargo_bin("pineappl") - .unwrap() - .args([ - "subgrids", - "--muf2", - "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", - ]) - .assert() - .success() - .stdout(MUF2_STR); -} - -#[test] -fn mur() { - Command::cargo_bin("pineappl") - .unwrap() - .args([ - "subgrids", - "--mur", - "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", - ]) - .assert() - .success() - .stdout(MUR_STR); -} - -#[test] -fn mur2() { - Command::cargo_bin("pineappl") - .unwrap() - .args([ - "subgrids", - "--mur2", - "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", - ]) - .assert() - .success() - .stdout(MUR2_STR); + .stdout(SCALE0_STR); } #[test] @@ -1199,29 +831,29 @@ fn type_show_empty() { } #[test] -fn x1() { +fn x0() { Command::cargo_bin("pineappl") .unwrap() .args([ "subgrids", - "--x1", + "--x=0", "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", ]) .assert() .success() - .stdout(X1_STR); + .stdout(X0_STR); } #[test] -fn x2() { +fn x1() { Command::cargo_bin("pineappl") .unwrap() .args([ "subgrids", - "--x2", + "--x=1", "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", ]) .assert() .success() - .stdout(X2_STR); + .stdout(X1_STR); } diff --git a/pineappl_cli/tests/uncert.rs b/pineappl_cli/tests/uncert.rs index 2e44fff35..b0bfcd630 100644 --- a/pineappl_cli/tests/uncert.rs +++ b/pineappl_cli/tests/uncert.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use assert_cmd::Command; use std::num::NonZeroUsize; use std::thread; @@ -12,9 +14,9 @@ Arguments: Options: --conv-fun[=] Calculate convolution function uncertainties - --scale-abs[=] Show absolute numbers of the scale-varied results [possible values: 3, 7, 9] - --scale-cov[=] Calculate scale uncertainties using the covariance method [possible values: 3, 7, 9] - --scale-env[=] Calculate the envelope of results where renormalization and factorization scales varied [possible values: 3, 7, 9] + --scale-abs[=] Show absolute numbers of the scale-varied results [possible values: 3, 7, 9, 17, 27] + --scale-cov[=] Calculate scale uncertainties using the covariance method [possible values: 3, 7, 9, 17, 27] + --scale-env[=] Calculate the envelope of results where renormalization, factorization and fragmentation scales are varied [possible values: 3, 7, 9, 17, 27] --cl Confidence level in per cent, for convolution function uncertainties [default: 68.26894921370858] -i, --integrated Show integrated numbers (without bin widths) instead of differential ones -o, --orders Select orders manually @@ -77,18 +79,77 @@ const ORDERS_A2_AS1A2_STR: &str = "b etal dsig/detal NNPDF31_nlo_as_0118 7 4 4.5 2.7737493e1 2.7724826e1 -2.77 2.77 "; -const SCALE_ABS_STR: &str = -"b etal dsig/detal (r=1,f=1) (r=2,f=2) (r=0.5,f=0.5) (r=2,f=1) (r=1,f=2) (r=0.5,f=1) (r=1,f=0.5) - [] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] --+----+----+-----------+-----------+-----------+-------------+-----------+-----------+-----------+----------- -0 2 2.25 7.5459110e2 7.5459110e2 7.6745431e2 7.4296019e2 7.4384068e2 7.7529764e2 7.6796494e2 7.2595107e2 -1 2.25 2.5 6.9028342e2 6.9028342e2 7.0221920e2 6.7923774e2 6.8058382e2 7.0957480e2 7.0235002e2 6.6417441e2 -2 2.5 2.75 6.0025198e2 6.0025198e2 6.1056383e2 5.9046454e2 5.9160658e2 6.1750966e2 6.1100712e2 5.7747295e2 -3 2.75 3 4.8552235e2 4.8552235e2 4.9366919e2 4.7761552e2 4.7841022e2 4.9966237e2 4.9437007e2 4.6723687e2 -4 3 3.25 3.6195456e2 3.6195456e2 3.6783089e2 3.5611822e2 3.5652780e2 3.7261436e2 3.6870561e2 3.4843600e2 -5 3.25 3.5 2.4586691e2 2.4586691e2 2.4967698e2 2.4198770e2 2.4207028e2 2.5316566e2 2.5059003e2 2.3677625e2 -6 3.5 4 1.1586851e2 1.1586851e2 1.1746280e2 1.1418227e2 1.1396174e2 1.1930157e2 1.1824058e2 1.1166942e2 -7 4 4.5 2.7517266e1 2.7517266e1 2.7787333e1 2.7211003e1 2.7002241e1 2.8306905e1 2.8157972e1 2.6562471e1 +const SCALE_ABS_3_STR: &str = "b etal dsig/detal 1,1,1 2,2,2 0.5,0.5,0.5 + [] [pb] (r,f,a) (r,f,a) (r,f,a) + [pb] [pb] [pb] +-+----+----+-----------+-----------+-----------+----------- +0 2 2.25 7.5459110e2 7.5459110e2 7.6745431e2 7.4296019e2 +1 2.25 2.5 6.9028342e2 6.9028342e2 7.0221920e2 6.7923774e2 +2 2.5 2.75 6.0025198e2 6.0025198e2 6.1056383e2 5.9046454e2 +3 2.75 3 4.8552235e2 4.8552235e2 4.9366919e2 4.7761552e2 +4 3 3.25 3.6195456e2 3.6195456e2 3.6783089e2 3.5611822e2 +5 3.25 3.5 2.4586691e2 2.4586691e2 2.4967698e2 2.4198770e2 +6 3.5 4 1.1586851e2 1.1586851e2 1.1746280e2 1.1418227e2 +7 4 4.5 2.7517266e1 2.7517266e1 2.7787333e1 2.7211003e1 +"; + +const SCALE_ABS_7_STR: &str = +"b etal dsig/detal 1,1,1 2,2,1 0.5,0.5,1 2,1,1 1,2,1 0.5,1,1 1,0.5,1 + [] [pb] (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) + [pb] [pb] [pb] [pb] [pb] [pb] [pb] +-+----+----+-----------+-----------+-----------+-----------+-----------+-----------+-----------+----------- +0 2 2.25 7.5459110e2 7.5459110e2 7.6745431e2 7.4296019e2 7.4384068e2 7.7529764e2 7.6796494e2 7.2595107e2 +1 2.25 2.5 6.9028342e2 6.9028342e2 7.0221920e2 6.7923774e2 6.8058382e2 7.0957480e2 7.0235002e2 6.6417441e2 +2 2.5 2.75 6.0025198e2 6.0025198e2 6.1056383e2 5.9046454e2 5.9160658e2 6.1750966e2 6.1100712e2 5.7747295e2 +3 2.75 3 4.8552235e2 4.8552235e2 4.9366919e2 4.7761552e2 4.7841022e2 4.9966237e2 4.9437007e2 4.6723687e2 +4 3 3.25 3.6195456e2 3.6195456e2 3.6783089e2 3.5611822e2 3.5652780e2 3.7261436e2 3.6870561e2 3.4843600e2 +5 3.25 3.5 2.4586691e2 2.4586691e2 2.4967698e2 2.4198770e2 2.4207028e2 2.5316566e2 2.5059003e2 2.3677625e2 +6 3.5 4 1.1586851e2 1.1586851e2 1.1746280e2 1.1418227e2 1.1396174e2 1.1930157e2 1.1824058e2 1.1166942e2 +7 4 4.5 2.7517266e1 2.7517266e1 2.7787333e1 2.7211003e1 2.7002241e1 2.8306905e1 2.8157972e1 2.6562471e1 +"; + +const SCALE_ABS_9_STR: &str = +"b etal dsig/detal 1,1,1 2,2,1 0.5,0.5,1 2,1,1 1,2,1 0.5,1,1 1,0.5,1 2,0.5,1 0.5,2,1 + [] [pb] (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) + [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] +-+----+----+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+----------- +0 2 2.25 7.5459110e2 7.5459110e2 7.6745431e2 7.4296019e2 7.4384068e2 7.7529764e2 7.6796494e2 7.2595107e2 7.1227848e2 7.8505497e2 +1 2.25 2.5 6.9028342e2 6.9028342e2 7.0221920e2 6.7923774e2 6.8058382e2 7.0957480e2 7.0235002e2 6.6417441e2 6.5206593e2 7.1872540e2 +2 2.5 2.75 6.0025198e2 6.0025198e2 6.1056383e2 5.9046454e2 5.9160658e2 6.1750966e2 6.1100712e2 5.7747295e2 5.6702981e2 6.2615050e2 +3 2.75 3 4.8552235e2 4.8552235e2 4.9366919e2 4.7761552e2 4.7841022e2 4.9966237e2 4.9437007e2 4.6723687e2 4.5889411e2 5.0711808e2 +4 3 3.25 3.6195456e2 3.6195456e2 3.6783089e2 3.5611822e2 3.5652780e2 3.7261436e2 3.6870561e2 3.4843600e2 3.4226073e2 3.7856515e2 +5 3.25 3.5 2.4586691e2 2.4586691e2 2.4967698e2 2.4198770e2 2.4207028e2 2.5316566e2 2.5059003e2 2.3677625e2 2.3258708e2 2.5750568e2 +6 3.5 4 1.1586851e2 1.1586851e2 1.1746280e2 1.1418227e2 1.1396174e2 1.1930157e2 1.1824058e2 1.1166942e2 1.0964950e2 1.2158905e2 +7 4 4.5 2.7517266e1 2.7517266e1 2.7787333e1 2.7211003e1 2.7002241e1 2.8306905e1 2.8157972e1 2.6562471e1 2.6041156e1 2.8953268e1 +"; + +const SCALE_ABS_17_STR: &str = "b etal dsig/detal 1,1,1 2,2,2 0.5,0.5,0.5 0.5,0.5,1 0.5,1,0.5 0.5,1,1 0.5,1,2 1,0.5,0.5 1,0.5,1 1,1,0.5 1,1,2 1,2,1 1,2,2 2,1,0.5 2,1,1 2,1,2 2,2,1 + [] [pb] (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) + [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] +-+----+----+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+----------- +0 2 2.25 7.5459110e2 7.5459110e2 7.6745431e2 7.4296019e2 7.4296019e2 7.6796494e2 7.6796494e2 7.6796494e2 7.2595107e2 7.2595107e2 7.5459110e2 7.5459110e2 7.7529764e2 7.7529764e2 7.4384068e2 7.4384068e2 7.4384068e2 7.6745431e2 +1 2.25 2.5 6.9028342e2 6.9028342e2 7.0221920e2 6.7923774e2 6.7923774e2 7.0235002e2 7.0235002e2 7.0235002e2 6.6417441e2 6.6417441e2 6.9028342e2 6.9028342e2 7.0957480e2 7.0957480e2 6.8058382e2 6.8058382e2 6.8058382e2 7.0221920e2 +2 2.5 2.75 6.0025198e2 6.0025198e2 6.1056383e2 5.9046454e2 5.9046454e2 6.1100712e2 6.1100712e2 6.1100712e2 5.7747295e2 5.7747295e2 6.0025198e2 6.0025198e2 6.1750966e2 6.1750966e2 5.9160658e2 5.9160658e2 5.9160658e2 6.1056383e2 +3 2.75 3 4.8552235e2 4.8552235e2 4.9366919e2 4.7761552e2 4.7761552e2 4.9437007e2 4.9437007e2 4.9437007e2 4.6723687e2 4.6723687e2 4.8552235e2 4.8552235e2 4.9966237e2 4.9966237e2 4.7841022e2 4.7841022e2 4.7841022e2 4.9366919e2 +4 3 3.25 3.6195456e2 3.6195456e2 3.6783089e2 3.5611822e2 3.5611822e2 3.6870561e2 3.6870561e2 3.6870561e2 3.4843600e2 3.4843600e2 3.6195456e2 3.6195456e2 3.7261436e2 3.7261436e2 3.5652780e2 3.5652780e2 3.5652780e2 3.6783089e2 +5 3.25 3.5 2.4586691e2 2.4586691e2 2.4967698e2 2.4198770e2 2.4198770e2 2.5059003e2 2.5059003e2 2.5059003e2 2.3677625e2 2.3677625e2 2.4586691e2 2.4586691e2 2.5316566e2 2.5316566e2 2.4207028e2 2.4207028e2 2.4207028e2 2.4967698e2 +6 3.5 4 1.1586851e2 1.1586851e2 1.1746280e2 1.1418227e2 1.1418227e2 1.1824058e2 1.1824058e2 1.1824058e2 1.1166942e2 1.1166942e2 1.1586851e2 1.1586851e2 1.1930157e2 1.1930157e2 1.1396174e2 1.1396174e2 1.1396174e2 1.1746280e2 +7 4 4.5 2.7517266e1 2.7517266e1 2.7787333e1 2.7211003e1 2.7211003e1 2.8157972e1 2.8157972e1 2.8157972e1 2.6562471e1 2.6562471e1 2.7517266e1 2.7517266e1 2.8306905e1 2.8306905e1 2.7002241e1 2.7002241e1 2.7002241e1 2.7787333e1 +"; + +const SCALE_ABS_27_STR: &str = +"b etal dsig/detal 1,1,1 2,2,2 0.5,0.5,0.5 0.5,0.5,1 0.5,1,0.5 0.5,1,1 0.5,1,2 1,0.5,0.5 1,0.5,1 1,1,0.5 1,1,2 1,2,1 1,2,2 2,1,0.5 2,1,1 2,1,2 2,2,1 2,0.5,0.5 0.5,2,0.5 1,2,0.5 2,2,0.5 2,0.5,1 0.5,2,1 0.5,0.5,2 1,0.5,2 2,0.5,2 0.5,2,2 + [] [pb] (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) (r,f,a) + [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] [pb] +-+----+----+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+----------- +0 2 2.25 7.5459110e2 7.5459110e2 7.6745431e2 7.4296019e2 7.4296019e2 7.6796494e2 7.6796494e2 7.6796494e2 7.2595107e2 7.2595107e2 7.5459110e2 7.5459110e2 7.7529764e2 7.7529764e2 7.4384068e2 7.4384068e2 7.4384068e2 7.6745431e2 7.1227848e2 7.8505497e2 7.7529764e2 7.6745431e2 7.1227848e2 7.8505497e2 7.4296019e2 7.2595107e2 7.1227848e2 7.8505497e2 +1 2.25 2.5 6.9028342e2 6.9028342e2 7.0221920e2 6.7923774e2 6.7923774e2 7.0235002e2 7.0235002e2 7.0235002e2 6.6417441e2 6.6417441e2 6.9028342e2 6.9028342e2 7.0957480e2 7.0957480e2 6.8058382e2 6.8058382e2 6.8058382e2 7.0221920e2 6.5206593e2 7.1872540e2 7.0957480e2 7.0221920e2 6.5206593e2 7.1872540e2 6.7923774e2 6.6417441e2 6.5206593e2 7.1872540e2 +2 2.5 2.75 6.0025198e2 6.0025198e2 6.1056383e2 5.9046454e2 5.9046454e2 6.1100712e2 6.1100712e2 6.1100712e2 5.7747295e2 5.7747295e2 6.0025198e2 6.0025198e2 6.1750966e2 6.1750966e2 5.9160658e2 5.9160658e2 5.9160658e2 6.1056383e2 5.6702981e2 6.2615050e2 6.1750966e2 6.1056383e2 5.6702981e2 6.2615050e2 5.9046454e2 5.7747295e2 5.6702981e2 6.2615050e2 +3 2.75 3 4.8552235e2 4.8552235e2 4.9366919e2 4.7761552e2 4.7761552e2 4.9437007e2 4.9437007e2 4.9437007e2 4.6723687e2 4.6723687e2 4.8552235e2 4.8552235e2 4.9966237e2 4.9966237e2 4.7841022e2 4.7841022e2 4.7841022e2 4.9366919e2 4.5889411e2 5.0711808e2 4.9966237e2 4.9366919e2 4.5889411e2 5.0711808e2 4.7761552e2 4.6723687e2 4.5889411e2 5.0711808e2 +4 3 3.25 3.6195456e2 3.6195456e2 3.6783089e2 3.5611822e2 3.5611822e2 3.6870561e2 3.6870561e2 3.6870561e2 3.4843600e2 3.4843600e2 3.6195456e2 3.6195456e2 3.7261436e2 3.7261436e2 3.5652780e2 3.5652780e2 3.5652780e2 3.6783089e2 3.4226073e2 3.7856515e2 3.7261436e2 3.6783089e2 3.4226073e2 3.7856515e2 3.5611822e2 3.4843600e2 3.4226073e2 3.7856515e2 +5 3.25 3.5 2.4586691e2 2.4586691e2 2.4967698e2 2.4198770e2 2.4198770e2 2.5059003e2 2.5059003e2 2.5059003e2 2.3677625e2 2.3677625e2 2.4586691e2 2.4586691e2 2.5316566e2 2.5316566e2 2.4207028e2 2.4207028e2 2.4207028e2 2.4967698e2 2.3258708e2 2.5750568e2 2.5316566e2 2.4967698e2 2.3258708e2 2.5750568e2 2.4198770e2 2.3677625e2 2.3258708e2 2.5750568e2 +6 3.5 4 1.1586851e2 1.1586851e2 1.1746280e2 1.1418227e2 1.1418227e2 1.1824058e2 1.1824058e2 1.1824058e2 1.1166942e2 1.1166942e2 1.1586851e2 1.1586851e2 1.1930157e2 1.1930157e2 1.1396174e2 1.1396174e2 1.1396174e2 1.1746280e2 1.0964950e2 1.2158905e2 1.1930157e2 1.1746280e2 1.0964950e2 1.2158905e2 1.1418227e2 1.1166942e2 1.0964950e2 1.2158905e2 +7 4 4.5 2.7517266e1 2.7517266e1 2.7787333e1 2.7211003e1 2.7211003e1 2.8157972e1 2.8157972e1 2.8157972e1 2.6562471e1 2.6562471e1 2.7517266e1 2.7517266e1 2.8306905e1 2.8306905e1 2.7002241e1 2.7002241e1 2.7002241e1 2.7787333e1 2.6041156e1 2.8953268e1 2.8306905e1 2.7787333e1 2.6041156e1 2.8953268e1 2.7211003e1 2.6562471e1 2.6041156e1 2.8953268e1 "; const SCALE_COV_STR: &str = "b etal dsig/detal 7pt scale (cov) @@ -239,7 +300,82 @@ fn scale_abs() { ]) .assert() .success() - .stdout(SCALE_ABS_STR); + .stdout(SCALE_ABS_7_STR); +} + +#[test] +fn scale_abs_3() { + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "uncert", + "--scale-abs=3", + "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", + "NNPDF31_nlo_as_0118_luxqed", + ]) + .assert() + .success() + .stdout(SCALE_ABS_3_STR); +} + +#[test] +fn scale_abs_7() { + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "uncert", + "--scale-abs=7", + "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", + "NNPDF31_nlo_as_0118_luxqed", + ]) + .assert() + .success() + .stdout(SCALE_ABS_7_STR); +} + +#[test] +fn scale_abs_9() { + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "uncert", + "--scale-abs=9", + "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", + "NNPDF31_nlo_as_0118_luxqed", + ]) + .assert() + .success() + .stdout(SCALE_ABS_9_STR); +} + +#[test] +fn scale_abs_17() { + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "uncert", + "--scale-abs=17", + "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", + "NNPDF31_nlo_as_0118_luxqed", + ]) + .assert() + .success() + .stdout(SCALE_ABS_17_STR); +} + +#[test] +fn scale_abs_27() { + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "uncert", + "--scale-abs=27", + "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", + "NNPDF31_nlo_as_0118_luxqed", + ]) + .assert() + .success() + .stdout(SCALE_ABS_27_STR); } #[test] diff --git a/pineappl_cli/tests/write.rs b/pineappl_cli/tests/write.rs index ddf8c63d2..947838dff 100644 --- a/pineappl_cli/tests/write.rs +++ b/pineappl_cli/tests/write.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use assert_cmd::Command; use assert_fs::{fixture::FileWriteStr, NamedTempFile}; @@ -10,30 +12,29 @@ Arguments: Path of the modified PineAPPL file Options: - --cc1[=] Charge conjugate the first initial state [possible values: true, false] - --cc2[=] Charge conjugate the second initial state [possible values: true, false] - --dedup-channels[=] Deduplicate channels assuming numbers differing by ULPS are the same - --delete-bins Delete bins with the specified indices - --delete-channels Delete channels with the specified indices - --delete-orders Delete orders with the specified indices - --delete-key Delete an internal key-value pair - --merge-bins Merge specific bins together - --optimize[=] Optimize internal data structure to minimize memory and disk usage [possible values: true, false] - --optimize-fk-table Optimize internal data structure of an FkTable to minimize memory and disk usage [possible values: Nf6Ind, Nf6Sym, Nf5Ind, Nf5Sym, Nf4Ind, Nf4Sym, Nf3Ind, Nf3Sym] - --remap Modify the bin dimensions and widths - --remap-norm Modify the bin normalizations with a common factor - --remap-norm-ignore Modify the bin normalizations by multiplying with the bin lengths for the given dimensions - --rewrite-channel Rewrite the definition of the channel with index IDX - --rewrite-order Rewrite the definition of the order with index IDX - --rotate-pid-basis Rotate the PID basis for this grid [possible values: PDG, EVOL] - -s, --scale Scales all grids with the given factor - --scale-by-bin Scale each bin with a different factor - --scale-by-order Scales all grids with order-dependent factors - --set-key-value Set an internal key-value pair - --set-key-file Set an internal key-value pair, with value being read from a file - --split-channels[=] Split the grid such that each channel contains only a single PID combination [possible values: true, false] - --upgrade[=] Convert the file format to the most recent version [possible values: true, false] - -h, --help Print help + --cc Charge conjugate the convolution with the specified index + --dedup-channels[=] Deduplicate channels assuming numbers differing by ULPS are the same + --delete-bins Delete bins with the specified indices + --delete-channels Delete channels with the specified indices + --delete-orders Delete orders with the specified indices + --delete-key Delete an internal key-value pair + --merge-bins Merge specific bins together + --optimize[=] Optimize internal data structure to minimize memory and disk usage [possible values: true, false] + --optimize-fk-table Optimize internal data structure of an FkTable to minimize memory and disk usage [possible values: Nf6Ind, Nf6Sym, Nf5Ind, Nf5Sym, Nf4Ind, Nf4Sym, Nf3Ind, Nf3Sym] + --remap Modify the bin dimensions and widths + --remap-norm Modify the bin normalizations with a common factor + --remap-norm-ignore Modify the bin normalizations by multiplying with the bin lengths for the given dimensions + --rewrite-channel Rewrite the definition of the channel with index IDX + --rewrite-order Rewrite the definition of the order with index IDX + --rotate-pid-basis Rotate the PID basis for this grid [possible values: PDG, EVOL] + -s, --scale Scales all grids with the given factor + --scale-by-bin Scale each bin with a different factor + --scale-by-order Scale subgrids with order-dependent factors + --set-key-value Set an internal key-value pair + --set-key-file Set an internal key-value pair, with value being read from a file + --split-channels[=] Split the grid such that each channel contains only a single PID combination [possible values: true, false] + --upgrade[=] Convert the file format to the most recent version [possible values: true, false] + -h, --help Print help "; const CHANNEL_STR: &str = "c entry entry @@ -244,14 +245,14 @@ const ROTATE_PID_BASIS_NO_DIFF_STR: &str = "b x1 O(as^0 a^2) const ROTATE_PID_BASIS_DIFF_STR: &str = "b x1 O(as^0 a^2) O(as^0 a^3) O(as^1 a^2) -+----+----+-----------+-----------+----------+-------------+-------------+----------+-----------+-----------+---------- -0 2 2.25 6.5070305e2 6.5070305e2 -2.220e-16 -7.8692484e0 -7.8692484e0 -4.441e-16 1.1175729e2 1.1175729e2 -1.221e-15 -1 2.25 2.5 5.9601236e2 5.9601236e2 -7.772e-16 -6.5623495e0 -6.5623495e0 -2.220e-16 1.0083341e2 1.0083341e2 -5.551e-16 -2 2.5 2.75 5.1561247e2 5.1561247e2 -8.882e-16 -5.2348261e0 -5.2348261e0 -6.661e-16 8.9874343e1 8.9874343e1 -1.221e-15 -3 2.75 3 4.1534629e2 4.1534629e2 -4.441e-16 -3.7590420e0 -3.7590420e0 -5.551e-16 7.3935106e1 7.3935106e1 -1.554e-15 -4 3 3.25 3.0812719e2 3.0812719e2 -3.331e-16 -2.5871885e0 -2.5871885e0 -5.551e-16 5.6414554e1 5.6414554e1 -2.220e-16 -5 3.25 3.5 2.0807482e2 2.0807482e2 -6.661e-16 -1.6762487e0 -1.6762487e0 -1.110e-16 3.9468336e1 3.9468336e1 -3.331e-16 -6 3.5 4 9.6856769e1 9.6856769e1 -3.331e-16 -8.1027456e-1 -8.1027456e-1 -1.110e-16 1.9822014e1 1.9822014e1 -1.110e-15 -7 4 4.5 2.2383492e1 2.2383492e1 -4.441e-16 -2.2022770e-1 -2.2022770e-1 -5.551e-16 5.3540011e0 5.3540011e0 -3.331e-16 +0 2 2.25 6.5070305e2 6.5070305e2 -5.551e-16 -7.8692484e0 -7.8692484e0 -4.441e-16 1.1175729e2 1.1175729e2 -1.221e-15 +1 2.25 2.5 5.9601236e2 5.9601236e2 -7.772e-16 -6.5623495e0 -6.5623495e0 -4.441e-16 1.0083341e2 1.0083341e2 -8.882e-16 +2 2.5 2.75 5.1561247e2 5.1561247e2 -8.882e-16 -5.2348261e0 -5.2348261e0 -8.882e-16 8.9874343e1 8.9874343e1 -1.221e-15 +3 2.75 3 4.1534629e2 4.1534629e2 -4.441e-16 -3.7590420e0 -3.7590420e0 -6.661e-16 7.3935106e1 7.3935106e1 -1.110e-15 +4 3 3.25 3.0812719e2 3.0812719e2 -5.551e-16 -2.5871885e0 -2.5871885e0 -5.551e-16 5.6414554e1 5.6414554e1 2.220e-16 +5 3.25 3.5 2.0807482e2 2.0807482e2 -5.551e-16 -1.6762487e0 -1.6762487e0 -2.220e-16 3.9468336e1 3.9468336e1 -6.661e-16 +6 3.5 4 9.6856769e1 9.6856769e1 -4.441e-16 -8.1027456e-1 -8.1027456e-1 -4.441e-16 1.9822014e1 1.9822014e1 -1.443e-15 +7 4 4.5 2.2383492e1 2.2383492e1 -6.661e-16 -2.2022770e-1 -2.2022770e-1 -5.551e-16 5.3540011e0 5.3540011e0 -6.661e-16 "; const ROTATE_PID_BASIS_READ_CHANNELS_STR: &str = " c entry @@ -455,14 +456,14 @@ fn help() { } #[test] -fn cc1() { +fn cc_0() { let output = NamedTempFile::new("cc1.pineappl.lz4").unwrap(); Command::cargo_bin("pineappl") .unwrap() .args([ "write", - "--cc1", + "--cc=0", "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", output.path().to_str().unwrap(), ]) @@ -483,14 +484,15 @@ fn cc1() { } #[test] -fn cc2() { +fn cc_1() { let output = NamedTempFile::new("cc2.pineappl.lz4").unwrap(); Command::cargo_bin("pineappl") .unwrap() .args([ "write", - "--cc2", + "--cc", + "1", "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", output.path().to_str().unwrap(), ]) @@ -769,7 +771,7 @@ fn scale_by_order() { .unwrap() .args([ "write", - "--scale-by-order=2,1,0.5,0.5", + "--scale-by-order=2,1,0.5,0.5,1.0", "--scale=0.5", "../test-data/LHCB_WP_7TEV_opt.pineappl.lz4", output.path().to_str().unwrap(), diff --git a/pineappl_fastnlo/src/lib.rs b/pineappl_fastnlo/src/lib.rs index e72b0d152..27bca84ab 100644 --- a/pineappl_fastnlo/src/lib.rs +++ b/pineappl_fastnlo/src/lib.rs @@ -228,18 +228,18 @@ impl ffi::EScaleFunctionalForm { match self { Self::kScale1 => s1 * s1, Self::kScale2 => s2 * s2, - Self::kQuadraticSum => s1 * s1 + s2 * s2, - Self::kQuadraticMean => 0.5 * (s1 * s1 + s2 * s2), - Self::kQuadraticSumOver4 => 0.25 * (s1 * s1 + s2 * s2), + Self::kQuadraticSum => s1.mul_add(s1, s2 * s2), + Self::kQuadraticMean => 0.5 * s1.mul_add(s1, s2 * s2), + Self::kQuadraticSumOver4 => 0.25 * s1.mul_add(s1, s2 * s2), Self::kLinearMean => 0.25 * (s1 + s2).powi(2), Self::kLinearSum => (s1 + s2).powi(2), Self::kScaleMax => s1.max(s2).powi(2), Self::kScaleMin => s1.min(s2).powi(2), Self::kProd => (s1 * s2).powi(2), - Self::kS2plusS1half => 0.5 * (s1 * s1 + 2.0 * s2 * s2), + Self::kS2plusS1half => 0.5 * s1.mul_add(s1, 2.0 * s2 * s2), Self::kPow4Sum => (s1.powi(4) + s2.powi(4)).sqrt(), - Self::kWgtAvg => (s1.powi(4) + s2.powi(4)) / (s1 * s1 + s2 * s2), - Self::kS2plusS1fourth => 0.25 * s1 * s1 + s2 * s2, + Self::kWgtAvg => (s1.powi(4) + s2.powi(4)) / s1.mul_add(s1, s2 * s2), + Self::kS2plusS1fourth => (0.25 * s1).mul_add(s1, s2 * s2), Self::kExpProd2 => (s1 * (0.3 * s2).exp()).powi(2), Self::kExtern => todo!(), Self::kConst => todo!(), diff --git a/pineappl_py/Cargo.toml b/pineappl_py/Cargo.toml index 526e3dff4..7aa5af364 100644 --- a/pineappl_py/Cargo.toml +++ b/pineappl_py/Cargo.toml @@ -29,6 +29,6 @@ crate-type = ["cdylib"] [dependencies] itertools = "0.10.1" ndarray = "0.15.4" -numpy = "0.21.0" -pineappl = { path = "../pineappl", version = "=0.8.2" } -pyo3 = { features = ["extension-module"], version = "0.21.2" } +numpy = "0.22.0" +pineappl = { path = "../pineappl", version = "=1.0.0-alpha1" } +pyo3 = { features = ["extension-module"], version = "0.22.5" } diff --git a/pineappl_py/docs/source/advanced.ipynb b/pineappl_py/docs/source/advanced.ipynb index 9439a4e04..2df183128 100644 --- a/pineappl_py/docs/source/advanced.ipynb +++ b/pineappl_py/docs/source/advanced.ipynb @@ -297,7 +297,18 @@ " q2 = 90.0 * 90.0\n", "\n", " # fill the interpolation grid\n", - " grid.fill(x1, x2, q2, 0, np.abs(yll), 0, weight)" + " n_tuple = [\n", + " q2,\n", + " x1,\n", + " x2,\n", + " ] # Pass kinematics as list; order has to follow `[q2, x1, x2, ..., xn]`\n", + " grid.fill(\n", + " order=0,\n", + " observable=np.abs(yll),\n", + " channel=0,\n", + " ntuple=n_tuple,\n", + " weight=weight,\n", + " )" ] }, { @@ -319,15 +330,99 @@ "metadata": {}, "outputs": [], "source": [ - "def generate_grid(calls: int) -> pineappl.grid.Grid:\n", + "from pineappl.boc import Channel, Kinematics, ScaleFuncForm, Scales\n", + "from pineappl.convolutions import Conv, ConvType\n", + "from pineappl.grid import Grid, Order\n", + "from pineappl.interpolation import (\n", + " Interp,\n", + " InterpolationMethod,\n", + " MappingMethod,\n", + " ReweightingMethod,\n", + ")\n", + "from pineappl.pids import PidBasis\n", + "\n", + "\n", + "def grid_specs(\n", + " orders: list[Order],\n", + " channels: list[Channel],\n", + " bins: np.ndarray,\n", + ") -> Grid:\n", + " \"\"\"Construct the PineAPPL grid based on various specifications. These include\n", + " the types of kinematics involved, the types of convolutions required by the\n", + " involved hadrons, and the interpolations required by each kinematic variables.\n", + " \"\"\"\n", + " ### Define the specs that define the Grid ###\n", + " kinematics = [\n", + " Kinematics.Scale(0), # Scale\n", + " Kinematics.X(0), # momentum fraction x1\n", + " Kinematics.X(1), # momentum fraction x2\n", + " ]\n", + " # Define the interpolation specs for each item of the Kinematics\n", + " interpolations = [\n", + " Interp(\n", + " min=1e2,\n", + " max=1e8,\n", + " nodes=40,\n", + " order=3,\n", + " reweight_meth=ReweightingMethod.NoReweight,\n", + " map=MappingMethod.ApplGridH0,\n", + " interpolation_meth=InterpolationMethod.Lagrange,\n", + " ), # Interpolation on the Scale\n", + " Interp(\n", + " min=2e-7,\n", + " max=1,\n", + " nodes=50,\n", + " order=3,\n", + " reweight_meth=ReweightingMethod.ApplGridX,\n", + " map=MappingMethod.ApplGridF2,\n", + " interpolation_meth=InterpolationMethod.Lagrange,\n", + " ), # Interpolation on momentum fraction x1\n", + " Interp(\n", + " min=2e-7,\n", + " max=1,\n", + " nodes=50,\n", + " order=3,\n", + " reweight_meth=ReweightingMethod.ApplGridX,\n", + " map=MappingMethod.ApplGridF2,\n", + " interpolation_meth=InterpolationMethod.Lagrange,\n", + " ), # Interpolation on momentum fraction x2\n", + " ]\n", + "\n", + " # Construct the `Scales` object\n", + " scale_funcs = Scales(\n", + " ren=ScaleFuncForm.Scale(0),\n", + " fac=ScaleFuncForm.Scale(0),\n", + " frg=ScaleFuncForm.NoScale(0),\n", + " )\n", + "\n", + " # Construct the type of convolutions and the convolution object\n", + " # In our case we have symmetrical unpolarized protons in the initial state\n", + " conv_type = ConvType(polarized=False, time_like=False)\n", + " conv_object = Conv(conv_type=conv_type, pid=2212)\n", + " convolutions = [conv_object, conv_object]\n", + "\n", + " return Grid(\n", + " pid_basis=PidBasis.Evol,\n", + " channels=channels,\n", + " orders=orders,\n", + " bin_limits=bins,\n", + " convolutions=convolutions,\n", + " interpolations=interpolations,\n", + " kinematics=kinematics,\n", + " scale_funcs=scale_funcs,\n", + " )\n", + "\n", + "\n", + "def generate_grid(calls: int) -> Grid:\n", " \"\"\"Generate the grid.\"\"\"\n", " # create a new luminosity function for the $\\gamma\\gamma$ initial state\n", - " lumi_entries = [pineappl.boc.Channel([(22, 22, 1.0)])]\n", - " # only LO $\\alpha_\\mathrm{s}^0 \\alpha^2 \\log^0(\\xi_\\mathrm{R}) \\log^0(\\xi_\\mathrm{F})$\n", - " orders = [pineappl.grid.Order(0, 2, 0, 0)]\n", + " channels = [Channel([([22, 22], 1.0)])]\n", + " # only LO $\\alpha_\\mathrm{s}^0 \\alpha^2 \\log^0(\\xi_\\mathrm{R}) \\log^0(\\xi_\\mathrm{F}) \\log^0(\\xi_\\mathrm{A})$$\n", + " orders = [Order(0, 2, 0, 0, 0)]\n", " bins = np.arange(0, 2.4, 0.1)\n", - " params = pineappl.subgrid.SubgridParams()\n", - " grid = pineappl.grid.Grid(lumi_entries, orders, bins, params)\n", + "\n", + " # Instantiate the PineAPPL Grid\n", + " grid = grid_specs(orders, channels, bins)\n", "\n", " # fill the grid with phase-space points\n", " print(f\"Generating {calls} events, please wait...\")\n", @@ -357,7 +452,7 @@ "text": [ "Generating 1000000 events, please wait...\n", "Done.\n", - "LHAPDF 6.5.0 loading /Users/tanjona/miniconda3/envs/nnpdf/share/LHAPDF/NNPDF31_nnlo_as_0118_luxqed/NNPDF31_nnlo_as_0118_luxqed_0000.dat\n", + "LHAPDF 6.5.4 loading /home/tanjona/miniconda3/envs/nnpdf/share/LHAPDF/NNPDF31_nnlo_as_0118_luxqed/NNPDF31_nnlo_as_0118_luxqed_0000.dat\n", "NNPDF31_nnlo_as_0118_luxqed PDF set, member #0, version 2; LHAPDF ID = 325100\n" ] } @@ -372,7 +467,11 @@ "# of the partonic cross sections with the PDFs as given by our master\n", "# formula\n", "pdf = lhapdf.mkPDF(\"NNPDF31_nnlo_as_0118_luxqed\", 0)\n", - "bins = grid.convolve_with_one(2212, pdf.xfxQ2, pdf.alphasQ2)" + "bins = grid.convolve(\n", + " pdg_convs=[grid.convolutions()[0]],\n", + " xfxs=[pdf.xfxQ2],\n", + " alphas=pdf.alphasQ2,\n", + ")" ] }, { @@ -395,7 +494,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAf0AAAFvCAYAAABAYhLAAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuNSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/xnp5ZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAsVUlEQVR4nO3df3RU9Z3/8dfMQDIECAYjQSA1QiwISiKhCdBScBdlXStq9btgXaHZSHfVVDGrrmm3sKg1UiDiEQSrRlpdhfagrrt2URuI9QcWl8BBUCjBQqCSEMqPISFMwtz7/QMyNU2AzMydmZu5z8c5OWfuzed+8p7PSfI69zP3fq7LNE1TAAAg4bnjXQAAAIgNQh8AAIcg9AEAcAhCHwAAhyD0AQBwCEIfAACHIPQBAHCIHvEuIN4Mw9CXX36pvn37yuVyxbscAABCYpqmjh8/rkGDBsntPve5vOND/8svv1RmZma8ywAAICL79u3TkCFDztnG8aHft29fSdKePXuUlpYW52qcJRAIaPfu3Ro2bJg8Hk+8y3EMxj0+GPf4SfSx9/l8yszMDObZuTg+9Num9FNTU5WamhrnapwlEAioT58+Sk1NTcg/RLti3OODcY8fp4x9Vz6i5kI+AAAcgtAHAMAhCH0AAByC0AcAwCEIfQAAHILQBwDAIQh9AAAcgtAHAMAhCH0AAByC0AcAwCEcvwxvUEuT1JIUeT89UySe1gcAsCFC/wzPkpFSsgVhPSRfKnqH4AcA2A7T+1bbv1FqPhLvKgAA6IAz/TMCt7wopfUPv4NTJ6VXZ5x+bRjWFAUAgIUI/TbJqVKvC8I/vrXZslIAAIgGQt8JTFNqPWFdf1ysCADdEqGf6ExTqpgq7fu9dX1ysSIAdEtcyJfoWk9YG/gSFysCQDfFmb6T/L9fSt7U8I/nYkUA6NYIfSfxcrEiADgZ0/sAADgEoQ8AgEMQ+gAAOAShDwCAQxD6AAA4BKEPAIBDcMueXVm1dG6LhcvvAgC6NULfjqKxdC4AwPGY3rejaCydm/51qWcva/sEAHQrnOnbXaRL57bp2UvqkRx5PwCAbovQt7tIl84FAOAMpvcBAHAIQh8AAIcg9AEAcAg+00d4Wk9ILU2R9REInL49EQAQE4R+NEQaiN1hQZ2nroy4C4+kr6WPli5bH3k9AIDzIvSjwYJAtKUeXiljlFS/3bIuUw5tVaD5qNQ33bI+AQCdI/StEoVAtN2COi6XNG2p5PdFPi1/6qT06ozTrw0j8toAAOdF6FvFykBsY8cFdVwuydsv8n5amyPvAwAQEkLfSlYFIgAAUcAtewAAOARn+og/K27/65lyeqYFAHBWtgz9ZcuWaeHChaqrq1NOTo6efvpp5efnd9p25cqVKiwsbLcvOTlZJ0+ejEWpsIBnaU7knQzJl4reIfgB4BxsN72/evVqlZSUaN68eaqurlZOTo6mTp2qgwcPnvWY1NRUHThwIPi1d+/eGFaMsPTwyhwwyrr+9m+Umo9Y1x8AJCDbnemXl5dr9uzZwbP3FStW6K233lJFRYUefvjhTo9xuVwaOHBgLMtEpFwuGTc8rZraA8oekCKPO8wzdG79A4Aus1Xot7S0aNOmTSotLQ3uc7vdmjJlijZs2HDW4xobG3XJJZfIMAyNGTNGjz/+uEaN6vws0u/3y+/3B7d9Pp8kKWCYChgsCRtLAVMK9ExRILmfFG7ou5PkaesvYJxe2hfnFAgEZBiGAoxVTDHu8ZPoYx/K+7JV6B86dEiBQEAZGRnt9mdkZGjHjh2dHjN8+HBVVFRo9OjROnbsmBYtWqQJEyZo+/btGjJkSIf2ZWVlmj9/fof9Xxw6odSWY9a8EXSJYUqHj/tVI1/Yme86dVLDz7yu2bNHZvJhy+pLVIZh6PDhw6qpqZHbbbtP+BIW4x4/iT72jY2NXW5rq9APx/jx4zV+/Pjg9oQJE3T55Zfr2Wef1aOPPtqhfWlpqUpKSoLbPp9PmZmZGpqeorQLucc+lgKGqRqZyh6YGv70fmtS8GV2VpbU50JriktggUBANTU1ys7OlsfjOf8BsATjHj+JPvZtM9ZdYavQT09Pl8fjUX19fbv99fX1Xf7MvmfPnrrqqqtUU1PT6feTk5OVnNxxlTuP2xV+8CBsbpcrsrH/ynEej1tKwD/oaHC73fJ4PAn5D9DOGPf4SeSxD+U92WqeIykpSXl5eaqsrAzuMwxDlZWV7c7mzyUQCOjTTz/VxRdfHK0yAQDolmx1pi9JJSUlmjVrlsaOHav8/HwtWbJETU1Nwav5Z86cqcGDB6usrEyS9Mgjj2jcuHHKzs7W0aNHtXDhQu3du1d33nlnPN8GAAC2Y7vQnz59uhoaGjR37lzV1dUpNzdXa9euDV7cV1tb2+5CjCNHjmj27Nmqq6tTWlqa8vLy9NFHH2nkyJHxegsAANiS7UJfkoqLi1VcXNzp96qqqtptP/nkk3ryySdjUBUAAN2brT7TBwAA0UPoAwDgELac3gfCYsXT+iSe2AcgYRH6SBxPXWlNPzyxD0CCYnof3VsPr5Rh4dP6JJ7YByBhcaaP7s3lkqYtlfw+yYzwgUk8sQ9AgiP00f25XJLXgucmtDZH3gcA2BjT+wAAOAShDwCAQxD6AAA4BKEPAIBDEPoAADgEoQ8AgEMQ+gAAOAShDwCAQxD6AAA4BCvyAZ3hiX0AEhChD3SGJ/YBSEBM7wNteGIfgATHmT7Qhif2AUhwhD7wVdF4Yh/XBwCwCUIfiDauDwBgE3ymD0QD1wcAsCHO9IFo4PoAADZE6APREo3rAwAgAkzvAwDgEIQ+AAAOQegDAOAQhD4AAA5B6AMA4BCEPgAADsEte0B3YsWSvoFA5GsHAOiWCH2gO7FgSV+PpK+lj5YuWx95PQC6Fab3AbuLwpK+KYe2Ss1HLe0TgP1xpg/YHUv6ArAIoQ90ByzpC8ACTO8DAOAQhD4AAA5B6AMA4BCEPgAADmHL0F+2bJmysrLk9XpVUFCgjRs3dum4VatWyeVy6aabbopugQAAdEO2C/3Vq1erpKRE8+bNU3V1tXJycjR16lQdPHjwnMft2bNHDzzwgCZOnBijSgEA6F5sd8teeXm5Zs+ercLCQknSihUr9NZbb6miokIPP/xwp8cEAgHdfvvtmj9/vt5//30dPXr0rP37/X75/f7gts/nO92HYSpgsDRpLAUMU4bJuMeUYcpz5mXAME4vyYuYCAQCMgxDAcY85hJ97EN5X7YK/ZaWFm3atEmlpaXBfW63W1OmTNGGDRvOetwjjzyiAQMGqKioSO+///45f0ZZWZnmz5/fYf8Xh04oteVY+MUjZIYpHT7uV418crviXY0zuE6d1PAzr3fv2StXr6PxLMdRDMPQ4cOHVVNTI7fbdpOsCS3Rx76xsbHLbW0V+ocOHVIgEFBGRka7/RkZGdqxY0enx3zwwQd64YUXtGXLli79jNLSUpWUlAS3fT6fMjMzNTQ9RWkXWrD4CbosYJiqkansganykPqx0ZoUfDks6xJ5Ui+KYzHOEggEVFNTo+zsbHk8nvMfAMsk+ti3zVh3ha1CP1THjx/XHXfcoeeee07p6eldOiY5OVnJyckd9nvcLoInDtwuF2MfS18ZZ4/bnZD/AO3MfWbMGffYS+SxD+U92Sr009PT5fF4VF9f325/fX29Bg4c2KH97t27tWfPHt1www3BfcaZ9cR79OihnTt3atiwYdEtGgCAbsJWH24kJSUpLy9PlZWVwX2GYaiyslLjx4/v0H7EiBH69NNPtWXLluDXtGnTdPXVV2vLli3KzMyMZfkAANiarc70JamkpESzZs3S2LFjlZ+fryVLlqipqSl4Nf/MmTM1ePBglZWVyev16oorrmh3/AUXXCBJHfYDAOB0tgv96dOnq6GhQXPnzlVdXZ1yc3O1du3a4MV9tbW1CXn1JQAA0Wa70Jek4uJiFRcXd/q9qqqqcx67cuVK6wsCACABcMoMAIBDhHSm/+abb4b8A6655hr16tUr5OMAAIC1Qgr9UB9k43K5tGvXLg0dOjSk4wAAgPVCnt6vq6uTYRhd+kpJSYlGzQAAIAwhhf6sWbNCmqr/x3/8R6WmpoZcFAAAsF5I0/svvvhiSJ0vX748pPYAACB6LLl63zRNmSaPRwUAwM4iCv0XXnhBV1xxhbxeb3B1vOeff96q2gAAgIXCXpxn7ty5Ki8v1w9/+MPguvgbNmzQ/fffr9raWj3yyCOWFQkAACIXdugvX75czz33nG677bbgvmnTpmn06NH64Q9/SOgDAGAzYU/vt7a2auzYsR325+Xl6dSpUxEVBQAArBd26N9xxx2dXp3/85//XLfffntERQGIgdYTUktT5F9cxAt0GyFN75eUlARfu1wuPf/883rnnXc0btw4SdLvf/971dbWaubMmdZWCcBynqU51nQ0JF8qekdyuazpD0DUhBT6mzdvbredl5cnSdq9e7ckKT09Xenp6dq+fbtF5QGwVA+vzAGj5Dpo4d/o/o1S8xEppb91fQKIipBCf/369dGqA0AsuFwybnhaNbUHlD0gRR53BGfnp05Kr844/dowrKkPQFSFffX+V7UtzONieg+wP5dLZlJvqVc/KZLQb222riYAMcHiPAAAOASL8wAA4BAszgMAgEOwOA8AAA7B4jwAADhERFfvv/DCC2ddnOerC/mUl5dHViUAAIhY2KG/bds2jRkzRlLHxXm2bdsWbMdtfAAA2EPYoc9CPQAAdC8hfaa/detWGSGsvLV9+3Yu6gMAwCZCCv2rrrpKf/7zn7vcfvz48aqtrQ25KAAAYL2QpvdN09RPfvITpaSkdKl9S0tLWEUBAADrhRT63/72t7Vz584utx8/frx69eoVclEAAMB6IYV+VVVVlMoAAADRZslT9gA4XOsJqaUp8n56pkjc5gtEDaEPIHJPXWlNP0PypaJ3CH4gSiJ6tC4AB+vhlTJGWdvn/o1S8xFr+wQQxJk+gPC4XNK0pZLfJ5lmZH2dOim9OuP06xDWAgEQGkIfQPhcLsnbL/J+Wpsj7wPAeTG9DwCAQ4R0pn/ppZeG9QCdOXPm6N577w35OAAAYJ2QQn/lypVh/ZCsrKywjgMAANYJKfQnTZoUrToAAECU8Zk+AAAOEXHob9u2TRUVFaqurm63v7GxMewH7ixbtkxZWVnyer0qKCjQxo0bz9r2tdde09ixY3XBBReod+/eys3N1UsvvRTWzwUAIJFFHPrLly/Xhg0b9L//+7+6/fbbVV5erubmZvn9fv3TP/1TyP2tXr1aJSUlmjdvnqqrq5WTk6OpU6fq4MGDnbbv37+/fvzjH2vDhg3aunWrCgsLVVhYqLfffjvStwYAQEKJOPQXLFigr3/96/rggw909OhRvfnmmxoxYoQef/zxsM70y8vLNXv2bBUWFmrkyJFasWKFUlJSVFFR0Wn7yZMn6+abb9bll1+uYcOG6b777tPo0aP1wQcfRPrWAABIKBEvztOnTx89+OCDevDBB+X3+7Vr1y41NDTowIEDId/e19LSok2bNqm0tDS4z+12a8qUKdqwYcN5jzdNU+vWrdPOnTu1YMGCTtv4/X75/f7gts/nkyQFDFMBI8JVxRCSgGHKMBn3WLPluBumPGdeBgKGFAjEtZxoCAQCMgxDgQR8b3aX6GMfyvuKOPS3bdumjRs3Kjc3V2PGjNEVV1wh6fRn+jfffHNIfR06dEiBQEAZGRnt9mdkZGjHjh1nPe7YsWMaPHiw/H6/PB6PnnnmGV1zzTWdti0rK9P8+fM77P/i0AmlthwLqV5ExjClw8f9qpFPbp6vEjN2HHfXqZMafuZ1zZ49MpMPx7WeaDAMQ4cPH1ZNTY3cbq6hjqVEH/vGxsYut4049JcvX66WlhYdOHBAixcvVl5enu666y75/X7dd999evnllyP9EefVt29fbdmyRY2NjaqsrFRJSYmGDh2qyZMnd2hbWlqqkpKS4LbP51NmZqaGpqco7UILlhNFlwUMUzUylT0wVR67pI8D2HLcW5OCL7OzsqQ+F8avligJBAKqqalRdna2PB7P+Q+AZRJ97NtmrLsi4tBfsGCBli9frnXr1sntduvNN9/UU089pVtvvTXkz/TT09Pl8XhUX1/fbn99fb0GDhx41uPcbreys7MlSbm5ufr8889VVlbWaegnJycrOTm5w36P22Wff4AO4na5GPs4sN24f6UOj3FSCpyMvM+eKbZ7RK/b7ZbH40nI4LG7RB77UN5T2KG/b98+ZWZmWvqZflJSkvLy8lRZWambbrpJ0ulpmcrKShUXF3e5H8Mw2n1uD6AbeepKa/oZki8VvWO74AfiKezQHzFihP71X/9VDz/8sFJSUiSdPotu+0xfkmbMmBFyvyUlJZo1a5bGjh2r/Px8LVmyRE1NTSosLJQkzZw5U4MHD1ZZWZmk05/Rjx07VsOGDZPf79dvfvMbvfTSS1q+fHm4bw1ArPXwShmjpPrt1vW5f6PUfERK6W9dn0A3F3bov/vuu7r//vv1wgsv6Kc//am+//3vd2gTzgUT06dPV0NDg+bOnau6ujrl5uZq7dq1wYv7amtr2/Xb1NSku+++W/v371evXr00YsQIvfzyy5o+fXq4bw1ArLlc0rSlkt8nmRHeVXDqpPTqmRMOw4i8NiCBuEwzsr+wX/7yl/rxj3+sAQMGaMmSJZo4caJVtcWEz+dTv379dHjrO0q7MD3e5ThKwDC168AxXXZxP/t8tuwACT/urc3Si9edfv3AbqmPPf6uA4GAdu3apcsuuywhP1e2s0Qf+7YcO3bsmFJTU8/ZNuJ7F2bOnKmdO3fq+uuv13XXXadbb71Vf/zjHyPtFgAAWMyyGxavvfZa3XnnnXr99dc1cuRIPfTQQyHdOwgAAKIr7M/0V6xYoU8++USffPKJPv/8c7ndbl1xxRX6l3/5F+Xk5GjVqlUaOXJk8IE4AAAgvsIO/Z/+9KcqKCjQzJkzNW7cOOXl5alXr17B7//gBz/Q448/ru9///vatm2bJcUCAIDwRXSf/vkUFRXpJz/5Sbg/AgAAWCiqixBnZGRo3bp10fwRAACgi0I607/00ktDXmVPkubMmaN777035OMAAIB1Qgr9lStXhvVDsrKywjoOAABYJ6TQnzRpUrTqAAAAUZZ4DxYGAACdCulM/6vPoT+f8vLykIsBAADRE1Lob968ud12dXW1Tp06peHDh0uS/vCHP8jj8SgvL8+6CgEAgCVCCv3169cHX5eXl6tv3776xS9+obS0NEnSkSNHVFhY2O0eugMAgBOE/Zn+4sWLVVZWFgx8SUpLS9Njjz2mxYsXW1IcAACwTtih7/P51NDQ0GF/Q0ODjh8/HlFRAADAemGH/s0336zCwkK99tpr2r9/v/bv3681a9aoqKhI3/3ud62sEQAAWCCip+w98MAD+t73vqfW1tbTnfXooaKiIi1cuNCyAgEgbK0npJamyPromSKFsRIpYEdhh35KSoqeeeYZLVy4ULt375YkDRs2TL1797asOACIyFNXRt7HkHyp6B2CHwkh5On9uXPnatOmTcHt3r17a/To0Ro9ejSBDyD+eniljFHW9bd/o9R8xLr+gDgK+Ux///79uu6665SUlKQbbrhB06ZN09/+7d8qKSkpGvUBQGhcLmnaUsnvk0wz/H5OnZRenXH6tWFYUxsQZyGHfkVFhQzD0Icffqj//u//1pw5c3TgwAFdc801uvHGG/Wd73xH/fv3j0atANA1Lpfk7RdZH63N1tQC2EhYV++73W5NnDhRP/vZz7Rz5079/ve/V0FBgZ599lkNGjRI3/72t7Vo0SL96U9/srpeAAAQprBv2WtsbAy+vvzyy/XQQw/pww8/1L59+zRr1iy9//77evXVVy0pEgAARC7sq/f79eunX/3qV7rlllva7b/oootUVFSkoqKiiIsDAADWCftM3zRNPfvss/rmN7+pb33rW5ozZ44++eQTK2sDAAAWCjv0pdNP3RszZoy+9a1vafv27Zo4caIeeOABq2oDAAAWCnt6X5JeeeUVXXPNNcHtrVu36sYbb9TgwYN1//33R1wcAACwTthn+v3791dmZma7faNHj9bSpUu1fPnyiAsDAADWCjv0c3Nz9eKLL3bYn52drdra2oiKAgAA1gt7ev+xxx7T1VdfrS+//FJ33323Ro8eraamJj3++OO69NJLrawRAABYIOzQHzdunD7++GPde++9mjhxoswzy116vV79+te/tqxAAABgjYgu5MvJydF7772ngwcPatOmTTIMQwUFBUpPT7eqPgAAYJGQQr+kpOS8bSorKyVJ5eXl4VUEAACiIqTQ37x5c7vt6upqnTp1SsOHD5ck/eEPf5DH41FeXp51FQIAAEuEFPrr168Pvi4vL1ffvn31i1/8QmlpaZKkI0eOqLCwUBMnTrS2SgAAELGwb9lbvHixysrKgoEvSWlpaXrssce0ePFiS4oDAADWCTv0fT6fGhoaOuxvaGjQ8ePHIyoKAABYL+zQv/nmm1VYWKjXXntN+/fv1/79+7VmzRoVFRXpu9/9rpU1AgAAC4R9y96KFSv0wAMP6Hvf+55aW1tPd9ajh4qKirRw4ULLCgQAANYI+0w/JSVFzzzzjP785z9r8+bN2rx5sw4fPqxnnnlGvXv3jqioZcuWKSsrS16vVwUFBdq4ceNZ2z733HOaOHGi0tLSlJaWpilTppyzPQCErPWE1NIU+deZRcyAeIlocR5J6t27t0aPHm1FLZKk1atXq6SkRCtWrFBBQYGWLFmiqVOnaufOnRowYECH9lVVVbrttts0YcIEeb1eLViwQNdee622b9+uwYMHW1YXAAd76sqIu/BI+lr6aOmy9edtC0RL2Gf60VJeXq7Zs2ersLBQI0eO1IoVK5SSkqKKiopO2//nf/6n7r77buXm5mrEiBF6/vnnZRhGcJEgAAhLD6+UMcrSLlMObZWaj1raJxCKiM/0rdTS0qJNmzaptLQ0uM/tdmvKlCnasGFDl/o4ceKEWltb1b9//06/7/f75ff7g9s+n0+SFDBMBQym3mIpYJgyTMY91hj3EHznacl/XDKNyPo5dVKe1bdJkgKnTkmBgAXFoasCgYAMw1AgQcc9lPdlq9A/dOiQAoGAMjIy2u3PyMjQjh07utTHv/3bv2nQoEGaMmVKp98vKyvT/PnzO+z/4tAJpbYcC71ohM0wpcPH/aqRT25XvKtxDsY9HJENlOuUS8PPvN69Z69cvY5GXBG6zjAMHT58WDU1NXK7bTfBHbHGxsYut7VV6EfqiSee0KpVq1RVVSWv19tpm9LS0nbPEPD5fMrMzNTQ9BSlXdgvVqVCp884a2Qqe2CqPKRPzDDucdCaFHw5LOsSeVIvimMxzhMIBFRTU6Ps7Gx5PJ54l2O5thnrrrBV6Kenp8vj8ai+vr7d/vr6eg0cOPCcxy5atEhPPPGEfvvb357zwsLk5GQlJyd32O9xu/gHGAdul4uxjwPGPca+Ms4etzshg8fu3GfGPRHHPpT3ZKt5jqSkJOXl5bW7CK/torzx48ef9bif/exnevTRR7V27VqNHTs2FqUCANDt2OpMXzr9+N5Zs2Zp7Nixys/P15IlS9TU1KTCwkJJ0syZMzV48GCVlZVJkhYsWKC5c+fqlVdeUVZWlurq6iRJffr0UZ8+feL2PgAAsBvbhf706dPV0NCguXPnqq6uTrm5uVq7dm3w4r7a2tp2F2IsX75cLS0tuvXWW9v1M2/ePP3Hf/xHLEsHAMDWbBf6klRcXKzi4uJOv1dVVdVue8+ePdEvCACs0ra6X6R6pkgurslAaGwZ+gCQqDxLc6zpaEi+VPQOwY+Q2OpCPgBISD28MgdYu7qf9m+Umo9Y2ycSHmf6ABBtLpeMG55WTe0BZQ9IiexWyVMnpVdnnH5tRLhSIByH0AeAWHC5ZCb1lnr1U0RLIbY2W1cTHIfpfQAAHILQBwDAIQh9AAAcgtAHAMAhCH0AAByC0AcAwCEIfQAAHILQBwDAIQh9AAAcgtAHAMAhCH0AAByC0AcAwCEIfQAAHILQBwDAIQh9AAAcgtAHAMAhesS7AABAmFpPSC1NkffTM0VyuSLvB7ZH6ANAd/XUldb0MyRfKnqH4HcApvcBoDvp4ZUyRlnb5/6NUvMRa/uELXGmDwDdicslTVsq+X2SaUbW16mT0qszTr82jMhrg+0R+gDQ3bhckrdf5P20NkfeB7oVpvcBAHAIQh8AAIcg9AEAcAhCHwAAhyD0AQBwCEIfAACH4JY9AABL+joEoQ8AYElfh2B6HwCciiV9HYczfQBwKpb0dRxCHwCcjCV9HYXpfQAAHILQBwDAIQh9AAAcwnahv2zZMmVlZcnr9aqgoEAbN248a9vt27frlltuUVZWllwul5YsWRK7QgEA6GZsFfqrV69WSUmJ5s2bp+rqauXk5Gjq1Kk6ePBgp+1PnDihoUOH6oknntDAgQNjXC0AAN2LrUK/vLxcs2fPVmFhoUaOHKkVK1YoJSVFFRUVnbb/xje+oYULF2rGjBlKTk6OcbUAAHQvtrllr6WlRZs2bVJpaWlwn9vt1pQpU7RhwwbLfo7f75ff7w9u+3w+SVLAMBUwIrxPFSEJGKYMk3GPNcY9PhJ+3A1TnjMvAwFDCgTiWs5XBQIBGYahgI1qslIo78s2oX/o0CEFAgFlZGS025+RkaEdO3ZY9nPKyso0f/78Dvu/OHRCqS3HLPs5OD/DlA4f96tGPrlZsTNmGPf4SPRxd506qeFnXtfs2SMz+XBc6/kqwzB0+PBh1dTUyO221QS3JRobG7vc1jahHyulpaUqKSkJbvt8PmVmZmpoeorSLrRggQp0WcAwVSNT2QNT5UnE/4I2xbjHR8KPe2tS8GV2VpbU58L41fJXAoGAampqlJ2dLY/Hc/4Dupm2GeuusE3op6eny+PxqL6+vt3++vp6Sy/SS05O7vTzf4/blZh/iDbndrkY+zhg3OMjocf9K+/J43FLNgtXt9stj8eTkKEfynuyzTxHUlKS8vLyVFlZGdxnGIYqKys1fvz4OFYGAAhJ22N6I/2K9HkA6MA2Z/qSVFJSolmzZmns2LHKz8/XkiVL1NTUpMLCQknSzJkzNXjwYJWVlUk6ffHfZ599Fnz9pz/9SVu2bFGfPn2UnZ0dt/cBAI7GY3pty1ahP336dDU0NGju3Lmqq6tTbm6u1q5dG7y4r7a2tt1FGF9++aWuuuqq4PaiRYu0aNEiTZo0SVVVVbEuHwCcq+0xvfXbreuz7TG9Kf2t69PhbBX6klRcXKzi4uJOv/fXQZ6VlSWT6R8AiD8e09st2C70AQDdFI/ptT3bXMgHAACii9AHAMAhCH0AAByC0AcAwCG4kA8AYF9tC/1EIhBgoZ8zCH0AgH1ZsNCPR9LX0kdLl62PvJ5ujul9AIC9tC30Y6GUQ1ul5qOW9tkdcaYPALAXFvqJGkIfAGA/LPQTFUzvAwDgEIQ+AAAOQegDAOAQhD4AAA5B6AMA4BBcvQ8AcAYrVveTpJ4pp+8u6IYIfQCAI3iW5ljT0ZB8qeidbhn8TO8DABJXD6/MAdau7qf9G6XmI9b2GSOc6QMAEpfLJeOGp1VTe0DZA1LkcUdwdp4Aq/sR+gCAxOZyyUzqLfXqJ0US+gmwuh/T+wAAOAShDwCAQxD6AAA4BKEPAIBDEPoAADgEV+8DABAqK1b3i8PKfoQ+AACheurKyPuIw8p+TO8DANAVPbxShoWr+8VhZT/O9AEA6AqXS5q2VPL7JNMMv584ruxH6AMA0FUul+TtF1kfcVzZj+l9AAAcgjN9AADixYq7AEI4ntAHACBerLgLwN/16wuY3gcAIJasvgsglB8dl58KAIBTWXUXQJujR6QnbulSU0IfAIBYs+IugDYnA11uyvQ+AAAOQegDAOAQhD4AAA5B6AMA4BC2DP1ly5YpKytLXq9XBQUF2rhx4znb//rXv9aIESPk9Xp15ZVX6je/+U2MKgUAoPuwXeivXr1aJSUlmjdvnqqrq5WTk6OpU6fq4MGDnbb/6KOPdNttt6moqEibN2/WTTfdpJtuuknbtm2LceUAANib7UK/vLxcs2fPVmFhoUaOHKkVK1YoJSVFFRUVnbZ/6qmn9Hd/93d68MEHdfnll+vRRx/VmDFjtHTp0hhXDgCAvdnqPv2WlhZt2rRJpaWlwX1ut1tTpkzRhg0bOj1mw4YNKikpabdv6tSpeuONNzpt7/f75ff7g9vHjh2TJB09EttnGkMKGKZ8viYdSWqVx+2KdzmOwbjHB+MeP4k+9r6jRyVJZhcW+rFV6B86dEiBQEAZGRnt9mdkZGjHjh2dHlNXV9dp+7q6uk7bl5WVaf78+R32D500PcyqAQCIv+PHj6tfv3Mv+GOr0I+F0tLSdjMDR48e1SWXXKLa2trzDhas5fP5lJmZqX379ik1NTXe5TgG4x4fjHv8JPrYm6ap48ePa9CgQedta6vQT09Pl8fjUX19fbv99fX1GjhwYKfHDBw4MKT2ycnJSk5O7rC/X79+CfnL0B2kpqYy9nHAuMcH4x4/iTz2XT1ptdWFfElJScrLy1NlZWVwn2EYqqys1Pjx4zs9Zvz48e3aS9K777571vYAADiVrc70JamkpESzZs3S2LFjlZ+fryVLlqipqUmFhYWSpJkzZ2rw4MEqKyuTJN13332aNGmSFi9erOuvv16rVq3S//3f/+nnP/95PN8GAAC2Y7vQnz59uhoaGjR37lzV1dUpNzdXa9euDV6sV1tbK7f7LxMUEyZM0CuvvKJ///d/149+9CNddtlleuONN3TFFVd06eclJydr3rx5nU75I7oY+/hg3OODcY8fxv4vXGZXrvEHAADdnq0+0wcAANFD6AMA4BCEPgAADkHoAwDgEI4IfR7VGz+hjP3KlSvlcrnafXm93hhWmxh+97vf6YYbbtCgQYPkcrnO+hyKr6qqqtKYMWOUnJys7OxsrVy5Mup1JppQx72qqqrD77vL5TrrEuLoXFlZmb7xjW+ob9++GjBggG666Sbt3LnzvMc59f98woc+j+qNn1DHXjq9YtaBAweCX3v37o1hxYmhqalJOTk5WrZsWZfa//GPf9T111+vq6++Wlu2bNGcOXN055136u23345ypYkl1HFvs3Pnzna/8wMGDIhShYnpvffe0z333KOPP/5Y7777rlpbW3XttdeqqanprMc4+v+8meDy8/PNe+65J7gdCATMQYMGmWVlZZ22/4d/+Afz+uuvb7evoKDA/Od//ueo1pmIQh37F1980ezXr1+MqnMGSebrr79+zjYPPfSQOWrUqHb7pk+fbk6dOjWKlSW2roz7+vXrTUnmkSNHYlKTUxw8eNCUZL733ntnbePk//MJfabf9qjeKVOmBPd15VG9X20vnX5U79nao3PhjL0kNTY26pJLLlFmZqZuvPFGbd++PRblOhq/8/GVm5uriy++WNdcc40+/PDDeJfT7bU9Lr1///5nbePk3/mEDv1zPar3bJ+bhfqoXnQunLEfPny4Kioq9F//9V96+eWXZRiGJkyYoP3798eiZMc62++8z+dTc3NznKpKfBdffLFWrFihNWvWaM2aNcrMzNTkyZNVXV0d79K6LcMwNGfOHH3zm98856qsTv4/b7tleOFc48ePb/egpAkTJujyyy/Xs88+q0cffTSOlQHWGz58uIYPHx7cnjBhgnbv3q0nn3xSL730Uhwr677uuecebdu2TR988EG8S7GthD7Tj8WjetG5cMb+r/Xs2VNXXXWVampqolEizjjb73xqaqp69eoVp6qcKT8/n9/3MBUXF+t//ud/tH79eg0ZMuScbZ38fz6hQ59H9cZPOGP/1wKBgD799FNdfPHF0SoT4nfeTrZs2cLve4hM01RxcbFef/11rVu3Tpdeeul5j3H073y8rySMtlWrVpnJycnmypUrzc8++8z8wQ9+YF5wwQVmXV2daZqmeccdd5gPP/xwsP2HH35o9ujRw1y0aJH5+eefm/PmzTN79uxpfvrpp/F6C91WqGM/f/588+233zZ3795tbtq0yZwxY4bp9XrN7du3x+stdEvHjx83N2/ebG7evNmUZJaXl5ubN2829+7da5qmaT788MPmHXfcEWz/xRdfmCkpKeaDDz5ofv755+ayZctMj8djrl27Nl5voVsKddyffPJJ84033jB37dplfvrpp+Z9991nut1u87e//W283kK3dNddd5n9+vUzq6qqzAMHDgS/Tpw4EWzD//m/SPjQN03TfPrpp82vfe1rZlJSkpmfn29+/PHHwe9NmjTJnDVrVrv2v/rVr8yvf/3rZlJSkjlq1CjzrbfeinHFiSOUsZ8zZ06wbUZGhvn3f//3ZnV1dRyq7t7abgX766+2sZ41a5Y5adKkDsfk5uaaSUlJ5tChQ80XX3wx5nV3d6GO+4IFC8xhw4aZXq/X7N+/vzl58mRz3bp18Sm+G+tszCW1+x3m//xf8GhdAAAcIqE/0wcAAH9B6AMA4BCEPgAADkHoAwDgEIQ+AAAOQegDAOAQhD4AAA5B6AMA4BCEPgAADkHoAwDgEIQ+gLBUVVUpKyvL9n0C+AtCHwAAhyD0AQBwCEIfgCWGDBmiZ555pt2+jz76SCkpKdq7d2+cqgLwVYQ+AEsUFBTok08+CW6bpqk5c+bo/vvv1yWXXBLHygC0IfQBWGLcuHHtQv+ll17Svn37VFpaKkk6fvy47r77bm3durXTbQDRR+gDsMS4ceP0+eefq7GxUU1NTfrRj36kxx57TH369JEkLV++XH6/Xxs3bux0G0D0EfoALJGXlye3263q6motWLBAF110kQoLC4PfX7dunbKyspSbm9vpNoDoI/QBWCIlJUVXXnml1qxZo0WLFunJJ5+U2336X8zJkyfldrv12WefKS8vr8M2gNgg9AFYZty4cXr66ac1depUTZ48Obh/165dampqUl5enlwuV4dtALFB6AOwTE5Ojnr27KmFCxe229/Q0KAvvvhCd911V6fbAGKjR7wLAJA4Vq1apeLiYmVnZ7fbf+DAAd16661qbm6WYRgdtvv27RunigFn4UwfQEQMw1B9fb0ef/xx7dq1S/PmzWv3/UAgoOrqau3bt0/33ntv8GK/tu2ePXvGqXLAeTjTBxCR3/3ud/qbv/kbjRgxQmvWrFFqamq773s8Hi1evLjdvr/eBhAbhD6AsGRlZWnOnDmaPHmyDMOwtE8A0eEyTdOMdxEAACD6+EwfAACHIPQBAHAIQh8AAIcg9AEAcAhCHwAAhyD0AQBwCEIfAACHIPQBAHAIQh8AAIf4/5bmCcvpjkZOAAAAAElFTkSuQmCC", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAf0AAAFvCAYAAABAYhLAAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAsVUlEQVR4nO3df3RU9Z3/8dfMQDIECAYjQSA1QiwISiKhCdBScBdlXStq9btgXaHZSHfVVDGrrmm3sKg1UiDiEQSrRlpdhfagrrt2URuI9QcWl8BBUCjBQqCSEMqPISFMwtz7/QMyNU2AzMydmZu5z8c5OWfuzed+8p7PSfI69zP3fq7LNE1TAAAg4bnjXQAAAIgNQh8AAIcg9AEAcAhCHwAAhyD0AQBwCEIfAACHIPQBAHCIHvEuIN4Mw9CXX36pvn37yuVyxbscAABCYpqmjh8/rkGDBsntPve5vOND/8svv1RmZma8ywAAICL79u3TkCFDztnG8aHft29fSdKePXuUlpYW52qcJRAIaPfu3Ro2bJg8Hk+8y3EMxj0+GPf4SfSx9/l8yszMDObZuTg+9Num9FNTU5WamhrnapwlEAioT58+Sk1NTcg/RLti3OODcY8fp4x9Vz6i5kI+AAAcgtAHAMAhCH0AAByC0AcAwCEIfQAAHILQBwDAIQh9AAAcgtAHAMAhCH0AAByC0AcAwCEcvwxvUEuT1JIUeT89UySe1gcAsCFC/wzPkpFSsgVhPSRfKnqH4AcA2A7T+1bbv1FqPhLvKgAA6IAz/TMCt7wopfUPv4NTJ6VXZ5x+bRjWFAUAgIUI/TbJqVKvC8I/vrXZslIAAIgGQt8JTFNqPWFdf1ysCADdEqGf6ExTqpgq7fu9dX1ysSIAdEtcyJfoWk9YG/gSFysCQDfFmb6T/L9fSt7U8I/nYkUA6NYIfSfxcrEiADgZ0/sAADgEoQ8AgEMQ+gAAOAShDwCAQxD6AAA4BKEPAIBDcMueXVm1dG6LhcvvAgC6NULfjqKxdC4AwPGY3rejaCydm/51qWcva/sEAHQrnOnbXaRL57bp2UvqkRx5PwCAbovQt7tIl84FAOAMpvcBAHAIQh8AAIcg9AEAcAg+00d4Wk9ILU2R9REInL49EQAQE4R+NEQaiN1hQZ2nroy4C4+kr6WPli5bH3k9AIDzIvSjwYJAtKUeXiljlFS/3bIuUw5tVaD5qNQ33bI+AQCdI/StEoVAtN2COi6XNG2p5PdFPi1/6qT06ozTrw0j8toAAOdF6FvFykBsY8cFdVwuydsv8n5amyPvAwAQEkLfSlYFIgAAUcAtewAAOARn+og/K27/65lyeqYFAHBWtgz9ZcuWaeHChaqrq1NOTo6efvpp5efnd9p25cqVKiwsbLcvOTlZJ0+ejEWpsIBnaU7knQzJl4reIfgB4BxsN72/evVqlZSUaN68eaqurlZOTo6mTp2qgwcPnvWY1NRUHThwIPi1d+/eGFaMsPTwyhwwyrr+9m+Umo9Y1x8AJCDbnemXl5dr9uzZwbP3FStW6K233lJFRYUefvjhTo9xuVwaOHBgLMtEpFwuGTc8rZraA8oekCKPO8wzdG79A4Aus1Xot7S0aNOmTSotLQ3uc7vdmjJlijZs2HDW4xobG3XJJZfIMAyNGTNGjz/+uEaN6vws0u/3y+/3B7d9Pp8kKWCYChgsCRtLAVMK9ExRILmfFG7ou5PkaesvYJxe2hfnFAgEZBiGAoxVTDHu8ZPoYx/K+7JV6B86dEiBQEAZGRnt9mdkZGjHjh2dHjN8+HBVVFRo9OjROnbsmBYtWqQJEyZo+/btGjJkSIf2ZWVlmj9/fof9Xxw6odSWY9a8EXSJYUqHj/tVI1/Yme86dVLDz7yu2bNHZvJhy+pLVIZh6PDhw6qpqZHbbbtP+BIW4x4/iT72jY2NXW5rq9APx/jx4zV+/Pjg9oQJE3T55Zfr2Wef1aOPPtqhfWlpqUpKSoLbPp9PmZmZGpqeorQLucc+lgKGqRqZyh6YGv70fmtS8GV2VpbU50JriktggUBANTU1ys7OlsfjOf8BsATjHj+JPvZtM9ZdYavQT09Pl8fjUX19fbv99fX1Xf7MvmfPnrrqqqtUU1PT6feTk5OVnNxxlTuP2xV+8CBsbpcrsrH/ynEej1tKwD/oaHC73fJ4PAn5D9DOGPf4SeSxD+U92WqeIykpSXl5eaqsrAzuMwxDlZWV7c7mzyUQCOjTTz/VxRdfHK0yAQDolmx1pi9JJSUlmjVrlsaOHav8/HwtWbJETU1Nwav5Z86cqcGDB6usrEyS9Mgjj2jcuHHKzs7W0aNHtXDhQu3du1d33nlnPN8GAAC2Y7vQnz59uhoaGjR37lzV1dUpNzdXa9euDV7cV1tb2+5CjCNHjmj27Nmqq6tTWlqa8vLy9NFHH2nkyJHxegsAANiS7UJfkoqLi1VcXNzp96qqqtptP/nkk3ryySdjUBUAAN2brT7TBwAA0UPoAwDgELac3gfCYsXT+iSe2AcgYRH6SBxPXWlNPzyxD0CCYnof3VsPr5Rh4dP6JJ7YByBhcaaP7s3lkqYtlfw+yYzwgUk8sQ9AgiP00f25XJLXgucmtDZH3gcA2BjT+wAAOAShDwCAQxD6AAA4BKEPAIBDEPoAADgEoQ8AgEMQ+gAAOAShDwCAQxD6AAA4BCvyAZ3hiX0AEhChD3SGJ/YBSEBM7wNteGIfgATHmT7Qhif2AUhwhD7wVdF4Yh/XBwCwCUIfiDauDwBgE3ymD0QD1wcAsCHO9IFo4PoAADZE6APREo3rAwAgAkzvAwDgEIQ+AAAOQegDAOAQhD4AAA5B6AMA4BCEPgAADsEte0B3YsWSvoFA5GsHAOiWCH2gO7FgSV+PpK+lj5YuWx95PQC6Fab3AbuLwpK+KYe2Ss1HLe0TgP1xpg/YHUv6ArAIoQ90ByzpC8ACTO8DAOAQhD4AAA5B6AMA4BCEPgAADmHL0F+2bJmysrLk9XpVUFCgjRs3dum4VatWyeVy6aabbopugQAAdEO2C/3Vq1erpKRE8+bNU3V1tXJycjR16lQdPHjwnMft2bNHDzzwgCZOnBijSgEA6F5sd8teeXm5Zs+ercLCQknSihUr9NZbb6miokIPP/xwp8cEAgHdfvvtmj9/vt5//30dPXr0rP37/X75/f7gts/nO92HYSpgsDRpLAUMU4bJuMeUYcpz5mXAME4vyYuYCAQCMgxDAcY85hJ97EN5X7YK/ZaWFm3atEmlpaXBfW63W1OmTNGGDRvOetwjjzyiAQMGqKioSO+///45f0ZZWZnmz5/fYf8Xh04oteVY+MUjZIYpHT7uV418crviXY0zuE6d1PAzr3fv2StXr6PxLMdRDMPQ4cOHVVNTI7fbdpOsCS3Rx76xsbHLbW0V+ocOHVIgEFBGRka7/RkZGdqxY0enx3zwwQd64YUXtGXLli79jNLSUpWUlAS3fT6fMjMzNTQ9RWkXWrD4CbosYJiqkansganykPqx0ZoUfDks6xJ5Ui+KYzHOEggEVFNTo+zsbHk8nvMfAMsk+ti3zVh3ha1CP1THjx/XHXfcoeeee07p6eldOiY5OVnJyckd9nvcLoInDtwuF2MfS18ZZ4/bnZD/AO3MfWbMGffYS+SxD+U92Sr009PT5fF4VF9f325/fX29Bg4c2KH97t27tWfPHt1www3BfcaZ9cR79OihnTt3atiwYdEtGgCAbsJWH24kJSUpLy9PlZWVwX2GYaiyslLjx4/v0H7EiBH69NNPtWXLluDXtGnTdPXVV2vLli3KzMyMZfkAANiarc70JamkpESzZs3S2LFjlZ+fryVLlqipqSl4Nf/MmTM1ePBglZWVyev16oorrmh3/AUXXCBJHfYDAOB0tgv96dOnq6GhQXPnzlVdXZ1yc3O1du3a4MV9tbW1CXn1JQAA0Wa70Jek4uJiFRcXd/q9qqqqcx67cuVK6wsCACABcMoMAIBDhHSm/+abb4b8A6655hr16tUr5OMAAIC1Qgr9UB9k43K5tGvXLg0dOjSk4wAAgPVCnt6vq6uTYRhd+kpJSYlGzQAAIAwhhf6sWbNCmqr/x3/8R6WmpoZcFAAAsF5I0/svvvhiSJ0vX748pPYAACB6LLl63zRNmSaPRwUAwM4iCv0XXnhBV1xxhbxeb3B1vOeff96q2gAAgIXCXpxn7ty5Ki8v1w9/+MPguvgbNmzQ/fffr9raWj3yyCOWFQkAACIXdugvX75czz33nG677bbgvmnTpmn06NH64Q9/SOgDAGAzYU/vt7a2auzYsR325+Xl6dSpUxEVBQAArBd26N9xxx2dXp3/85//XLfffntERQGIgdYTUktT5F9cxAt0GyFN75eUlARfu1wuPf/883rnnXc0btw4SdLvf/971dbWaubMmdZWCcBynqU51nQ0JF8qekdyuazpD0DUhBT6mzdvbredl5cnSdq9e7ckKT09Xenp6dq+fbtF5QGwVA+vzAGj5Dpo4d/o/o1S8xEppb91fQKIipBCf/369dGqA0AsuFwybnhaNbUHlD0gRR53BGfnp05Kr844/dowrKkPQFSFffX+V7UtzONieg+wP5dLZlJvqVc/KZLQb222riYAMcHiPAAAOASL8wAA4BAszgMAgEOwOA8AAA7B4jwAADhERFfvv/DCC2ddnOerC/mUl5dHViUAAIhY2KG/bds2jRkzRlLHxXm2bdsWbMdtfAAA2EPYoc9CPQAAdC8hfaa/detWGSGsvLV9+3Yu6gMAwCZCCv2rrrpKf/7zn7vcfvz48aqtrQ25KAAAYL2QpvdN09RPfvITpaSkdKl9S0tLWEUBAADrhRT63/72t7Vz584utx8/frx69eoVclEAAMB6IYV+VVVVlMoAAADRZslT9gA4XOsJqaUp8n56pkjc5gtEDaEPIHJPXWlNP0PypaJ3CH4gSiJ6tC4AB+vhlTJGWdvn/o1S8xFr+wQQxJk+gPC4XNK0pZLfJ5lmZH2dOim9OuP06xDWAgEQGkIfQPhcLsnbL/J+Wpsj7wPAeTG9DwCAQ4R0pn/ppZeG9QCdOXPm6N577w35OAAAYJ2QQn/lypVh/ZCsrKywjgMAANYJKfQnTZoUrToAAECU8Zk+AAAOEXHob9u2TRUVFaqurm63v7GxMewH7ixbtkxZWVnyer0qKCjQxo0bz9r2tdde09ixY3XBBReod+/eys3N1UsvvRTWzwUAIJFFHPrLly/Xhg0b9L//+7+6/fbbVV5erubmZvn9fv3TP/1TyP2tXr1aJSUlmjdvnqqrq5WTk6OpU6fq4MGDnbbv37+/fvzjH2vDhg3aunWrCgsLVVhYqLfffjvStwYAQEKJOPQXLFigr3/96/rggw909OhRvfnmmxoxYoQef/zxsM70y8vLNXv2bBUWFmrkyJFasWKFUlJSVFFR0Wn7yZMn6+abb9bll1+uYcOG6b777tPo0aP1wQcfRPrWAABIKBEvztOnTx89+OCDevDBB+X3+7Vr1y41NDTowIEDId/e19LSok2bNqm0tDS4z+12a8qUKdqwYcN5jzdNU+vWrdPOnTu1YMGCTtv4/X75/f7gts/nkyQFDFMBI8JVxRCSgGHKMBn3WLPluBumPGdeBgKGFAjEtZxoCAQCMgxDgQR8b3aX6GMfyvuKOPS3bdumjRs3Kjc3V2PGjNEVV1wh6fRn+jfffHNIfR06dEiBQEAZGRnt9mdkZGjHjh1nPe7YsWMaPHiw/H6/PB6PnnnmGV1zzTWdti0rK9P8+fM77P/i0AmlthwLqV5ExjClw8f9qpFPbp6vEjN2HHfXqZMafuZ1zZ49MpMPx7WeaDAMQ4cPH1ZNTY3cbq6hjqVEH/vGxsYut4049JcvX66WlhYdOHBAixcvVl5enu666y75/X7dd999evnllyP9EefVt29fbdmyRY2NjaqsrFRJSYmGDh2qyZMnd2hbWlqqkpKS4LbP51NmZqaGpqco7UILlhNFlwUMUzUylT0wVR67pI8D2HLcW5OCL7OzsqQ+F8avligJBAKqqalRdna2PB7P+Q+AZRJ97NtmrLsi4tBfsGCBli9frnXr1sntduvNN9/UU089pVtvvTXkz/TT09Pl8XhUX1/fbn99fb0GDhx41uPcbreys7MlSbm5ufr8889VVlbWaegnJycrOTm5w36P22Wff4AO4na5GPs4sN24f6UOj3FSCpyMvM+eKbZ7RK/b7ZbH40nI4LG7RB77UN5T2KG/b98+ZWZmWvqZflJSkvLy8lRZWambbrpJ0ulpmcrKShUXF3e5H8Mw2n1uD6AbeepKa/oZki8VvWO74AfiKezQHzFihP71X/9VDz/8sFJSUiSdPotu+0xfkmbMmBFyvyUlJZo1a5bGjh2r/Px8LVmyRE1NTSosLJQkzZw5U4MHD1ZZWZmk05/Rjx07VsOGDZPf79dvfvMbvfTSS1q+fHm4bw1ArPXwShmjpPrt1vW5f6PUfERK6W9dn0A3F3bov/vuu7r//vv1wgsv6Kc//am+//3vd2gTzgUT06dPV0NDg+bOnau6ujrl5uZq7dq1wYv7amtr2/Xb1NSku+++W/v371evXr00YsQIvfzyy5o+fXq4bw1ArLlc0rSlkt8nmRHeVXDqpPTqmRMOw4i8NiCBuEwzsr+wX/7yl/rxj3+sAQMGaMmSJZo4caJVtcWEz+dTv379dHjrO0q7MD3e5ThKwDC168AxXXZxP/t8tuwACT/urc3Si9edfv3AbqmPPf6uA4GAdu3apcsuuywhP1e2s0Qf+7YcO3bsmFJTU8/ZNuJ7F2bOnKmdO3fq+uuv13XXXadbb71Vf/zjHyPtFgAAWMyyGxavvfZa3XnnnXr99dc1cuRIPfTQQyHdOwgAAKIr7M/0V6xYoU8++USffPKJPv/8c7ndbl1xxRX6l3/5F+Xk5GjVqlUaOXJk8IE4AAAgvsIO/Z/+9KcqKCjQzJkzNW7cOOXl5alXr17B7//gBz/Q448/ru9///vatm2bJcUCAIDwRXSf/vkUFRXpJz/5Sbg/AgAAWCiqixBnZGRo3bp10fwRAACgi0I607/00ktDXmVPkubMmaN777035OMAAIB1Qgr9lStXhvVDsrKywjoOAABYJ6TQnzRpUrTqAAAAUZZ4DxYGAACdCulM/6vPoT+f8vLykIsBAADRE1Lob968ud12dXW1Tp06peHDh0uS/vCHP8jj8SgvL8+6CgEAgCVCCv3169cHX5eXl6tv3776xS9+obS0NEnSkSNHVFhY2O0eugMAgBOE/Zn+4sWLVVZWFgx8SUpLS9Njjz2mxYsXW1IcAACwTtih7/P51NDQ0GF/Q0ODjh8/HlFRAADAemGH/s0336zCwkK99tpr2r9/v/bv3681a9aoqKhI3/3ud62sEQAAWCCip+w98MAD+t73vqfW1tbTnfXooaKiIi1cuNCyAgEgbK0npJamyPromSKFsRIpYEdhh35KSoqeeeYZLVy4ULt375YkDRs2TL1797asOACIyFNXRt7HkHyp6B2CHwkh5On9uXPnatOmTcHt3r17a/To0Ro9ejSBDyD+eniljFHW9bd/o9R8xLr+gDgK+Ux///79uu6665SUlKQbbrhB06ZN09/+7d8qKSkpGvUBQGhcLmnaUsnvk0wz/H5OnZRenXH6tWFYUxsQZyGHfkVFhQzD0Icffqj//u//1pw5c3TgwAFdc801uvHGG/Wd73xH/fv3j0atANA1Lpfk7RdZH63N1tQC2EhYV++73W5NnDhRP/vZz7Rz5079/ve/V0FBgZ599lkNGjRI3/72t7Vo0SL96U9/srpeAAAQprBv2WtsbAy+vvzyy/XQQw/pww8/1L59+zRr1iy9//77evXVVy0pEgAARC7sq/f79eunX/3qV7rlllva7b/oootUVFSkoqKiiIsDAADWCftM3zRNPfvss/rmN7+pb33rW5ozZ44++eQTK2sDAAAWCjv0pdNP3RszZoy+9a1vafv27Zo4caIeeOABq2oDAAAWCnt6X5JeeeUVXXPNNcHtrVu36sYbb9TgwYN1//33R1wcAACwTthn+v3791dmZma7faNHj9bSpUu1fPnyiAsDAADWCjv0c3Nz9eKLL3bYn52drdra2oiKAgAA1gt7ev+xxx7T1VdfrS+//FJ33323Ro8eraamJj3++OO69NJLrawRAABYIOzQHzdunD7++GPde++9mjhxoswzy116vV79+te/tqxAAABgjYgu5MvJydF7772ngwcPatOmTTIMQwUFBUpPT7eqPgAAYJGQQr+kpOS8bSorKyVJ5eXl4VUEAACiIqTQ37x5c7vt6upqnTp1SsOHD5ck/eEPf5DH41FeXp51FQIAAEuEFPrr168Pvi4vL1ffvn31i1/8QmlpaZKkI0eOqLCwUBMnTrS2SgAAELGwb9lbvHixysrKgoEvSWlpaXrssce0ePFiS4oDAADWCTv0fT6fGhoaOuxvaGjQ8ePHIyoKAABYL+zQv/nmm1VYWKjXXntN+/fv1/79+7VmzRoVFRXpu9/9rpU1AgAAC4R9y96KFSv0wAMP6Hvf+55aW1tPd9ajh4qKirRw4ULLCgQAANYI+0w/JSVFzzzzjP785z9r8+bN2rx5sw4fPqxnnnlGvXv3jqioZcuWKSsrS16vVwUFBdq4ceNZ2z733HOaOHGi0tLSlJaWpilTppyzPQCErPWE1NIU+deZRcyAeIlocR5J6t27t0aPHm1FLZKk1atXq6SkRCtWrFBBQYGWLFmiqVOnaufOnRowYECH9lVVVbrttts0YcIEeb1eLViwQNdee622b9+uwYMHW1YXAAd76sqIu/BI+lr6aOmy9edtC0RL2Gf60VJeXq7Zs2ersLBQI0eO1IoVK5SSkqKKiopO2//nf/6n7r77buXm5mrEiBF6/vnnZRhGcJEgAAhLD6+UMcrSLlMObZWaj1raJxCKiM/0rdTS0qJNmzaptLQ0uM/tdmvKlCnasGFDl/o4ceKEWltb1b9//06/7/f75ff7g9s+n0+SFDBMBQym3mIpYJgyTMY91hj3EHznacl/XDKNyPo5dVKe1bdJkgKnTkmBgAXFoasCgYAMw1AgQcc9lPdlq9A/dOiQAoGAMjIy2u3PyMjQjh07utTHv/3bv2nQoEGaMmVKp98vKyvT/PnzO+z/4tAJpbYcC71ohM0wpcPH/aqRT25XvKtxDsY9HJENlOuUS8PPvN69Z69cvY5GXBG6zjAMHT58WDU1NXK7bTfBHbHGxsYut7VV6EfqiSee0KpVq1RVVSWv19tpm9LS0nbPEPD5fMrMzNTQ9BSlXdgvVqVCp884a2Qqe2CqPKRPzDDucdCaFHw5LOsSeVIvimMxzhMIBFRTU6Ps7Gx5PJ54l2O5thnrrrBV6Kenp8vj8ai+vr7d/vr6eg0cOPCcxy5atEhPPPGEfvvb357zwsLk5GQlJyd32O9xu/gHGAdul4uxjwPGPca+Ms4etzshg8fu3GfGPRHHPpT3ZKt5jqSkJOXl5bW7CK/torzx48ef9bif/exnevTRR7V27VqNHTs2FqUCANDt2OpMXzr9+N5Zs2Zp7Nixys/P15IlS9TU1KTCwkJJ0syZMzV48GCVlZVJkhYsWKC5c+fqlVdeUVZWlurq6iRJffr0UZ8+feL2PgAAsBvbhf706dPV0NCguXPnqq6uTrm5uVq7dm3w4r7a2tp2F2IsX75cLS0tuvXWW9v1M2/ePP3Hf/xHLEsHAMDWbBf6klRcXKzi4uJOv1dVVdVue8+ePdEvCACs0ra6X6R6pkgurslAaGwZ+gCQqDxLc6zpaEi+VPQOwY+Q2OpCPgBISD28MgdYu7qf9m+Umo9Y2ycSHmf6ABBtLpeMG55WTe0BZQ9IiexWyVMnpVdnnH5tRLhSIByH0AeAWHC5ZCb1lnr1U0RLIbY2W1cTHIfpfQAAHILQBwDAIQh9AAAcgtAHAMAhCH0AAByC0AcAwCEIfQAAHILQBwDAIQh9AAAcgtAHAMAhCH0AAByC0AcAwCEIfQAAHILQBwDAIQh9AAAcgtAHAMAhesS7AABAmFpPSC1NkffTM0VyuSLvB7ZH6ANAd/XUldb0MyRfKnqH4HcApvcBoDvp4ZUyRlnb5/6NUvMRa/uELXGmDwDdicslTVsq+X2SaUbW16mT0qszTr82jMhrg+0R+gDQ3bhckrdf5P20NkfeB7oVpvcBAHAIQh8AAIcg9AEAcAhCHwAAhyD0AQBwCEIfAACH4JY9AABL+joEoQ8AYElfh2B6HwCciiV9HYczfQBwKpb0dRxCHwCcjCV9HYXpfQAAHILQBwDAIQh9AAAcwnahv2zZMmVlZcnr9aqgoEAbN248a9vt27frlltuUVZWllwul5YsWRK7QgEA6GZsFfqrV69WSUmJ5s2bp+rqauXk5Gjq1Kk6ePBgp+1PnDihoUOH6oknntDAgQNjXC0AAN2LrUK/vLxcs2fPVmFhoUaOHKkVK1YoJSVFFRUVnbb/xje+oYULF2rGjBlKTk6OcbUAAHQvtrllr6WlRZs2bVJpaWlwn9vt1pQpU7RhwwbLfo7f75ff7w9u+3w+SVLAMBUwIrxPFSEJGKYMk3GPNcY9PhJ+3A1TnjMvAwFDCgTiWs5XBQIBGYahgI1qslIo78s2oX/o0CEFAgFlZGS025+RkaEdO3ZY9nPKyso0f/78Dvu/OHRCqS3HLPs5OD/DlA4f96tGPrlZsTNmGPf4SPRxd506qeFnXtfs2SMz+XBc6/kqwzB0+PBh1dTUyO221QS3JRobG7vc1jahHyulpaUqKSkJbvt8PmVmZmpoeorSLrRggQp0WcAwVSNT2QNT5UnE/4I2xbjHR8KPe2tS8GV2VpbU58L41fJXAoGAampqlJ2dLY/Hc/4Dupm2GeuusE3op6eny+PxqL6+vt3++vp6Sy/SS05O7vTzf4/blZh/iDbndrkY+zhg3OMjocf9K+/J43FLNgtXt9stj8eTkKEfynuyzTxHUlKS8vLyVFlZGdxnGIYqKys1fvz4OFYGAAhJ22N6I/2K9HkA6MA2Z/qSVFJSolmzZmns2LHKz8/XkiVL1NTUpMLCQknSzJkzNXjwYJWVlUk6ffHfZ599Fnz9pz/9SVu2bFGfPn2UnZ0dt/cBAI7GY3pty1ahP336dDU0NGju3Lmqq6tTbm6u1q5dG7y4r7a2tt1FGF9++aWuuuqq4PaiRYu0aNEiTZo0SVVVVbEuHwCcq+0xvfXbreuz7TG9Kf2t69PhbBX6klRcXKzi4uJOv/fXQZ6VlSWT6R8AiD8e09st2C70AQDdFI/ptT3bXMgHAACii9AHAMAhCH0AAByC0AcAwCG4kA8AYF9tC/1EIhBgoZ8zCH0AgH1ZsNCPR9LX0kdLl62PvJ5ujul9AIC9tC30Y6GUQ1ul5qOW9tkdcaYPALAXFvqJGkIfAGA/LPQTFUzvAwDgEIQ+AAAOQegDAOAQhD4AAA5B6AMA4BBcvQ8AcAYrVveTpJ4pp+8u6IYIfQCAI3iW5ljT0ZB8qeidbhn8TO8DABJXD6/MAdau7qf9G6XmI9b2GSOc6QMAEpfLJeOGp1VTe0DZA1LkcUdwdp4Aq/sR+gCAxOZyyUzqLfXqJ0US+gmwuh/T+wAAOAShDwCAQxD6AAA4BKEPAIBDEPoAADgEV+8DABAqK1b3i8PKfoQ+AACheurKyPuIw8p+TO8DANAVPbxShoWr+8VhZT/O9AEA6AqXS5q2VPL7JNMMv584ruxH6AMA0FUul+TtF1kfcVzZj+l9AAAcgjN9AADixYq7AEI4ntAHACBerLgLwN/16wuY3gcAIJasvgsglB8dl58KAIBTWXUXQJujR6QnbulSU0IfAIBYs+IugDYnA11uyvQ+AAAOQegDAOAQhD4AAA5B6AMA4BC2DP1ly5YpKytLXq9XBQUF2rhx4znb//rXv9aIESPk9Xp15ZVX6je/+U2MKgUAoPuwXeivXr1aJSUlmjdvnqqrq5WTk6OpU6fq4MGDnbb/6KOPdNttt6moqEibN2/WTTfdpJtuuknbtm2LceUAANib7UK/vLxcs2fPVmFhoUaOHKkVK1YoJSVFFRUVnbZ/6qmn9Hd/93d68MEHdfnll+vRRx/VmDFjtHTp0hhXDgCAvdnqPv2WlhZt2rRJpaWlwX1ut1tTpkzRhg0bOj1mw4YNKikpabdv6tSpeuONNzpt7/f75ff7g9vHjh2TJB09EttnGkMKGKZ8viYdSWqVx+2KdzmOwbjHB+MeP4k+9r6jRyVJZhcW+rFV6B86dEiBQEAZGRnt9mdkZGjHjh2dHlNXV9dp+7q6uk7bl5WVaf78+R32D500PcyqAQCIv+PHj6tfv3Mv+GOr0I+F0tLSdjMDR48e1SWXXKLa2trzDhas5fP5lJmZqX379ik1NTXe5TgG4x4fjHv8JPrYm6ap48ePa9CgQedta6vQT09Pl8fjUX19fbv99fX1GjhwYKfHDBw4MKT2ycnJSk5O7rC/X79+CfnL0B2kpqYy9nHAuMcH4x4/iTz2XT1ptdWFfElJScrLy1NlZWVwn2EYqqys1Pjx4zs9Zvz48e3aS9K777571vYAADiVrc70JamkpESzZs3S2LFjlZ+fryVLlqipqUmFhYWSpJkzZ2rw4MEqKyuTJN13332aNGmSFi9erOuvv16rVq3S//3f/+nnP/95PN8GAAC2Y7vQnz59uhoaGjR37lzV1dUpNzdXa9euDV6sV1tbK7f7LxMUEyZM0CuvvKJ///d/149+9CNddtlleuONN3TFFVd06eclJydr3rx5nU75I7oY+/hg3OODcY8fxv4vXGZXrvEHAADdnq0+0wcAANFD6AMA4BCEPgAADkHoAwDgEI4IfR7VGz+hjP3KlSvlcrnafXm93hhWmxh+97vf6YYbbtCgQYPkcrnO+hyKr6qqqtKYMWOUnJys7OxsrVy5Mup1JppQx72qqqrD77vL5TrrEuLoXFlZmb7xjW+ob9++GjBggG666Sbt3LnzvMc59f98woc+j+qNn1DHXjq9YtaBAweCX3v37o1hxYmhqalJOTk5WrZsWZfa//GPf9T111+vq6++Wlu2bNGcOXN055136u23345ypYkl1HFvs3Pnzna/8wMGDIhShYnpvffe0z333KOPP/5Y7777rlpbW3XttdeqqanprMc4+v+8meDy8/PNe+65J7gdCATMQYMGmWVlZZ22/4d/+Afz+uuvb7evoKDA/Od//ueo1pmIQh37F1980ezXr1+MqnMGSebrr79+zjYPPfSQOWrUqHb7pk+fbk6dOjWKlSW2roz7+vXrTUnmkSNHYlKTUxw8eNCUZL733ntnbePk//MJfabf9qjeKVOmBPd15VG9X20vnX5U79nao3PhjL0kNTY26pJLLlFmZqZuvPFGbd++PRblOhq/8/GVm5uriy++WNdcc40+/PDDeJfT7bU9Lr1///5nbePk3/mEDv1zPar3bJ+bhfqoXnQunLEfPny4Kioq9F//9V96+eWXZRiGJkyYoP3798eiZMc62++8z+dTc3NznKpKfBdffLFWrFihNWvWaM2aNcrMzNTkyZNVXV0d79K6LcMwNGfOHH3zm98856qsTv4/b7tleOFc48ePb/egpAkTJujyyy/Xs88+q0cffTSOlQHWGz58uIYPHx7cnjBhgnbv3q0nn3xSL730Uhwr677uuecebdu2TR988EG8S7GthD7Tj8WjetG5cMb+r/Xs2VNXXXWVampqolEizjjb73xqaqp69eoVp6qcKT8/n9/3MBUXF+t//ud/tH79eg0ZMuScbZ38fz6hQ59H9cZPOGP/1wKBgD799FNdfPHF0SoT4nfeTrZs2cLve4hM01RxcbFef/11rVu3Tpdeeul5j3H073y8rySMtlWrVpnJycnmypUrzc8++8z8wQ9+YF5wwQVmXV2daZqmeccdd5gPP/xwsP2HH35o9ujRw1y0aJH5+eefm/PmzTN79uxpfvrpp/F6C91WqGM/f/588+233zZ3795tbtq0yZwxY4bp9XrN7du3x+stdEvHjx83N2/ebG7evNmUZJaXl5ubN2829+7da5qmaT788MPmHXfcEWz/xRdfmCkpKeaDDz5ofv755+ayZctMj8djrl27Nl5voVsKddyffPJJ84033jB37dplfvrpp+Z9991nut1u87e//W283kK3dNddd5n9+vUzq6qqzAMHDgS/Tpw4EWzD//m/SPjQN03TfPrpp82vfe1rZlJSkpmfn29+/PHHwe9NmjTJnDVrVrv2v/rVr8yvf/3rZlJSkjlq1CjzrbfeinHFiSOUsZ8zZ06wbUZGhvn3f//3ZnV1dRyq7t7abgX766+2sZ41a5Y5adKkDsfk5uaaSUlJ5tChQ80XX3wx5nV3d6GO+4IFC8xhw4aZXq/X7N+/vzl58mRz3bp18Sm+G+tszCW1+x3m//xf8GhdAAAcIqE/0wcAAH9B6AMA4BCEPgAADkHoAwDgEIQ+AAAOQegDAOAQhD4AAA5B6AMA4BCEPgAADkHoAwDgEIQ+gLBUVVUpKyvL9n0C+AtCHwAAhyD0AQBwCEIfgCWGDBmiZ555pt2+jz76SCkpKdq7d2+cqgLwVYQ+AEsUFBTok08+CW6bpqk5c+bo/vvv1yWXXBLHygC0IfQBWGLcuHHtQv+ll17Svn37VFpaKkk6fvy47r77bm3durXTbQDRR+gDsMS4ceP0+eefq7GxUU1NTfrRj36kxx57TH369JEkLV++XH6/Xxs3bux0G0D0EfoALJGXlye3263q6motWLBAF110kQoLC4PfX7dunbKyspSbm9vpNoDoI/QBWCIlJUVXXnml1qxZo0WLFunJJ5+U2336X8zJkyfldrv12WefKS8vr8M2gNgg9AFYZty4cXr66ac1depUTZ48Obh/165dampqUl5enlwuV4dtALFB6AOwTE5Ojnr27KmFCxe229/Q0KAvvvhCd911V6fbAGKjR7wLAJA4Vq1apeLiYmVnZ7fbf+DAAd16661qbm6WYRgdtvv27RunigFn4UwfQEQMw1B9fb0ef/xx7dq1S/PmzWv3/UAgoOrqau3bt0/33ntv8GK/tu2ePXvGqXLAeTjTBxCR3/3ud/qbv/kbjRgxQmvWrFFqamq773s8Hi1evLjdvr/eBhAbhD6AsGRlZWnOnDmaPHmyDMOwtE8A0eEyTdOMdxEAACD6+EwfAACHIPQBAHAIQh8AAIcg9AEAcAhCHwAAhyD0AQBwCEIfAACHIPQBAHAIQh8AAIf4/5bmCcvpjkZOAAAAAElFTkSuQmCC", "text/plain": [ "
" ] @@ -427,9 +526,9 @@ ], "metadata": { "kernelspec": { - "display_name": "PineAPPL", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "pineappl" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -441,7 +540,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.6" + "version": "3.11.9" } }, "nbformat": 4, diff --git a/pineappl_py/docs/source/introduction.ipynb b/pineappl_py/docs/source/introduction.ipynb index ce1c12288..7b51ae337 100644 --- a/pineappl_py/docs/source/introduction.ipynb +++ b/pineappl_py/docs/source/introduction.ipynb @@ -26,7 +26,7 @@ }, { "cell_type": "raw", - "id": "ecb04362-1f02-4d0b-81f6-df1b95134c67", + "id": "e904212d-ee57-4664-8e78-fd0e8f2a665e", "metadata": {}, "source": [ "pip install pineappl" @@ -116,7 +116,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "LHAPDF 6.5.4 loading /home/felix/local/share/LHAPDF/NNPDF40_nnlo_as_01180/NNPDF40_nnlo_as_01180_0000.dat\n", + "LHAPDF 6.5.4 loading /home/tanjona/LHAPDF_PATH/NNPDF40_nnlo_as_01180/NNPDF40_nnlo_as_01180_0000.dat\n", "NNPDF40_nnlo_as_01180 PDF set, member #0, version 1; LHAPDF ID = 331100\n" ] } @@ -140,12 +140,38 @@ "id": "640a1efd-94bb-4c22-a6d7-785e940a0013", "metadata": {}, "source": [ - "Our grid can now be convolved with our PDF set using the `convolve_with_one()` function:" + "In order to convolve a grid, we need to specify the types of convolutions that are required.\n", + "This includes the polarization and the PDG IDs of the involved hadrons, as well as wether or\n", + "not the hadron is in the initial- or final-state.\n", + "\n", + "In our example, the grid involves two initial-state unpolarized protons. We can therefore\n", + "construct the convolution types as follows:" ] }, { "cell_type": "code", "execution_count": 5, + "id": "a10346c8-cda8-4d71-9b10-518cab0ec38b", + "metadata": {}, + "outputs": [], + "source": [ + "from pineappl.convolutions import Conv, ConvType\n", + "\n", + "conv_type = ConvType(polarized=False, time_like=False)\n", + "conv_object = Conv(conv_type=conv_type, pid=2212)" + ] + }, + { + "cell_type": "markdown", + "id": "813d3ba0-df32-4f2d-8364-73c625855bce", + "metadata": {}, + "source": [ + "Our grid can now be convolved with our PDF set using the `convolve()` function." + ] + }, + { + "cell_type": "code", + "execution_count": 6, "id": "d41e391c-affd-49ad-8d70-483bf952d4f3", "metadata": {}, "outputs": [ @@ -182,13 +208,17 @@ "└──────┴─────────────┘" ] }, - "execution_count": 5, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "predictions = grid.convolve_with_one(2212, pdf.xfxQ2, pdf.alphasQ2)\n", + "predictions = grid.convolve(\n", + " pdg_convs=[conv_object, conv_object], # Similar convolutions for symmetric protons\n", + " xfxs=[pdf.xfxQ2, pdf.xfxQ2], # Similar PDF sets for symmetric protons\n", + " alphas=pdf.alphasQ2,\n", + ")\n", "df_preds = pl.DataFrame(\n", " {\n", " \"bins\": range(predictions.size),\n", @@ -198,6 +228,34 @@ "df_preds" ] }, + { + "cell_type": "markdown", + "id": "6ad4724b-e303-4200-a212-c24c81cb1d3f", + "metadata": {}, + "source": [ + "We can see that `convolve()` can perform convolutions with an arbitrary number of distributions. This is\n", + "why `pdf_convs` and `xfxs` are lists that respectively take all the types of convolutions and distributions\n", + "corresponding to the involved hadrons.\n", + "\n", + "**NOTE:** If the hadrons have the same type of convolutions and require the convolution to the same distribution,\n", + "then only one single element can be passed to the list:" + ] + }, + { + "cell_type": "markdown", + "id": "1cb9a279-cb32-4817-8d5a-0fde5213085b", + "metadata": {}, + "source": [ + "```python\n", + "# Pass the shared convolution type and distribution to all hadrons\n", + "predictions = grid.convolve(\n", + " pdg_convs=[conv_object],\n", + " xfxs=[pdf.xfxQ2],\n", + " alphas=pdf.alphasQ2,\n", + ")\n", + "```" + ] + }, { "cell_type": "markdown", "id": "231dd694-e068-4e62-8e19-b93c96f4d937", @@ -215,7 +273,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "id": "fee97bfd-e392-4a78-8850-a4093bbcb330", "metadata": {}, "outputs": [ @@ -297,9 +355,10 @@ "id": "ba2b1e4c-bc60-48dd-9b04-d349b4118707", "metadata": {}, "source": [ - "**NOTE:** If the two initial state hadrons are different (as is the case in\n", - "$pp$ collisions in which one of the protons is polarized), then one can convolve\n", - "the grid with **two** different PDF sets using the `convolve_with_two()` function:" + "**NOTE:** As mentioned before, if the two initial state hadrons are different \n", + "(as is the case in $pp$ collisions in which one of the protons is polarized),\n", + "then one can convolve the grid with **two** different PDF sets using the `convolve()` \n", + "function:" ] }, { @@ -308,8 +367,19 @@ "metadata": {}, "source": [ "```python\n", + "# Define the convolution types for each of the (un)polarized hadrons\n", + "pol_type = ConvType(polarized=True, time_like=False) # `polarized = True`\n", + "pol_object = Conv(conv_type=pol_type, pid=2212)\n", + "\n", + "unpol_type = ConvType(polarized=False, time_like=False)\n", + "unpol_object = Conv(conv_type=conv_type, pid=2212)\n", + "\n", "# Convolve the two initial state hadrons with different PDF sets\n", - "predictions = grid.convolve_with_two(2212, polarized_pdf.xfxQ2, 2212, unpolarized_pdf.xfxQ2, unpolarized_pdf.alphasQ2)\n", + "predictions = predictions = grid.convolve(\n", + " pdg_convs=[pol_object, conv_object],\n", + " xfxs=[polarized_pdf.xfxQ2, 2212, unpolarized_pdf.xfxQ2],\n", + " alphas=pdf.alphasQ2,\n", + ")\n", "```" ] }, @@ -318,7 +388,7 @@ "id": "3d56b36c-b888-4f2d-b3fb-2f8f689ff003", "metadata": {}, "source": [ - "**NOTE:** The same functions `convolve_with_one()` and `convolve_with_two()` also work for convolving FK tables with PDF sets." + "**NOTE:** The same function `convolve` also works for convolving FK tables with PDF sets." ] }, { @@ -342,7 +412,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "id": "b0d1a772-3be5-47df-99e2-96daa5ebf380", "metadata": {}, "outputs": [ @@ -350,24 +420,24 @@ "name": "stdout", "output_type": "stream", "text": [ - "0: [(2, -2, 1.0), (4, -4, 1.0)]\n", - "1: [(0, -4, 1.0), (0, -2, 1.0)]\n", - "2: [(22, -4, 1.0), (22, -2, 1.0)]\n", - "3: [(2, 0, 1.0), (4, 0, 1.0)]\n", - "4: [(2, 22, 1.0), (4, 22, 1.0)]\n", - "5: [(1, -1, 1.0), (3, -3, 1.0)]\n", - "6: [(0, -3, 1.0), (0, -1, 1.0)]\n", - "7: [(22, -3, 1.0), (22, -1, 1.0)]\n", - "8: [(1, 0, 1.0), (3, 0, 1.0)]\n", - "9: [(1, 22, 1.0), (3, 22, 1.0)]\n", - "10: [(5, -5, 1.0)]\n", - "11: [(0, -5, 1.0)]\n", - "12: [(22, -5, 1.0)]\n", - "13: [(5, 0, 1.0)]\n", - "14: [(5, 22, 1.0)]\n", - "15: [(22, 22, 1.0)]\n", - "16: [(-5, 22, 1.0), (-3, 22, 1.0), (-1, 22, 1.0)]\n", - "17: [(1, 22, 1.0), (3, 22, 1.0), (5, 22, 1.0)]\n" + "0: [([2, -2], 1.0), ([4, -4], 1.0)]\n", + "1: [([0, -4], 1.0), ([0, -2], 1.0)]\n", + "2: [([22, -4], 1.0), ([22, -2], 1.0)]\n", + "3: [([2, 0], 1.0), ([4, 0], 1.0)]\n", + "4: [([2, 22], 1.0), ([4, 22], 1.0)]\n", + "5: [([1, -1], 1.0), ([3, -3], 1.0)]\n", + "6: [([0, -3], 1.0), ([0, -1], 1.0)]\n", + "7: [([22, -3], 1.0), ([22, -1], 1.0)]\n", + "8: [([1, 0], 1.0), ([3, 0], 1.0)]\n", + "9: [([1, 22], 1.0), ([3, 22], 1.0)]\n", + "10: [([5, -5], 1.0)]\n", + "11: [([0, -5], 1.0)]\n", + "12: [([22, -5], 1.0)]\n", + "13: [([5, 0], 1.0)]\n", + "14: [([5, 22], 1.0)]\n", + "15: [([22, 22], 1.0)]\n", + "16: [([-5, 22], 1.0), ([-3, 22], 1.0), ([-1, 22], 1.0)]\n", + "17: [([1, 22], 1.0), ([3, 22], 1.0), ([5, 22], 1.0)]\n" ] } ], @@ -401,7 +471,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "id": "6658a6c3-bb88-42fa-a567-9f5b6cebb1ac", "metadata": {}, "outputs": [ @@ -415,26 +485,26 @@ " white-space: pre-wrap;\n", "}\n", "\n", - "shape: (7, 5)
indexasalflr
u32i64i64i64i64
00200
11200
21210
31201
40300
50310
60301
" + "shape: (7, 6)
indexasalflrla
u32i64i64i64i64i64
002000
112000
212100
312010
403000
503100
603010
" ], "text/plain": [ - "shape: (7, 5)\n", - "┌───────┬─────┬─────┬─────┬─────┐\n", - "│ index ┆ as ┆ a ┆ lf ┆ lr │\n", - "│ --- ┆ --- ┆ --- ┆ --- ┆ --- │\n", - "│ u32 ┆ i64 ┆ i64 ┆ i64 ┆ i64 │\n", - "╞═══════╪═════╪═════╪═════╪═════╡\n", - "│ 0 ┆ 0 ┆ 2 ┆ 0 ┆ 0 │\n", - "│ 1 ┆ 1 ┆ 2 ┆ 0 ┆ 0 │\n", - "│ 2 ┆ 1 ┆ 2 ┆ 1 ┆ 0 │\n", - "│ 3 ┆ 1 ┆ 2 ┆ 0 ┆ 1 │\n", - "│ 4 ┆ 0 ┆ 3 ┆ 0 ┆ 0 │\n", - "│ 5 ┆ 0 ┆ 3 ┆ 1 ┆ 0 │\n", - "│ 6 ┆ 0 ┆ 3 ┆ 0 ┆ 1 │\n", - "└───────┴─────┴─────┴─────┴─────┘" + "shape: (7, 6)\n", + "┌───────┬─────┬─────┬─────┬─────┬─────┐\n", + "│ index ┆ as ┆ a ┆ lf ┆ lr ┆ la │\n", + "│ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- │\n", + "│ u32 ┆ i64 ┆ i64 ┆ i64 ┆ i64 ┆ i64 │\n", + "╞═══════╪═════╪═════╪═════╪═════╪═════╡\n", + "│ 0 ┆ 0 ┆ 2 ┆ 0 ┆ 0 ┆ 0 │\n", + "│ 1 ┆ 1 ┆ 2 ┆ 0 ┆ 0 ┆ 0 │\n", + "│ 2 ┆ 1 ┆ 2 ┆ 1 ┆ 0 ┆ 0 │\n", + "│ 3 ┆ 1 ┆ 2 ┆ 0 ┆ 1 ┆ 0 │\n", + "│ 4 ┆ 0 ┆ 3 ┆ 0 ┆ 0 ┆ 0 │\n", + "│ 5 ┆ 0 ┆ 3 ┆ 1 ┆ 0 ┆ 0 │\n", + "│ 6 ┆ 0 ┆ 3 ┆ 0 ┆ 1 ┆ 0 │\n", + "└───────┴─────┴─────┴─────┴─────┴─────┘" ] }, - "execution_count": 8, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -445,7 +515,7 @@ "for idx, o in enumerate(grid.orders()):\n", " orders.append(o.as_tuple())\n", "\n", - "df_orders = pl.DataFrame(np.array(orders), schema=[\"as\", \"a\", \"lf\", \"lr\"])\n", + "df_orders = pl.DataFrame(np.array(orders), schema=[\"as\", \"a\", \"lf\", \"lr\", \"la\"])\n", "df_orders.with_row_index()" ] }, @@ -456,7 +526,8 @@ "source": [ "The table above lists the perturbative orders contained in the\n", "grid where the powers of the strong coupling $a_s$, the electroweak\n", - "coupling $a$, the factorization $\\ell_F = \\log(\\mu_F^2/Q^2)$ and renormalization $\\ell_R=\\log(\\mu_R^2/Q^2)$ \n", + "coupling $a$, the factorization $\\ell_F = \\log(\\mu_F^2/Q^2)$, renormalization $\\ell_R=\\log(\\mu_R^2/Q^2)$,\n", + "and fragmentation $\\ell_A=\\log(\\mu_A^2/Q^2)$\n", "logs are shown. For instance, the first index shows that the grid \n", "contains a leading-order (LO) which has the coupling $a_s^2$." ] @@ -481,7 +552,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "id": "4514380c-65e3-4116-a835-238212d68d10", "metadata": {}, "outputs": [ @@ -518,7 +589,7 @@ "└────────────┴─────────────┘" ] }, - "execution_count": 9, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -564,7 +635,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 11, "id": "16f768ea-4007-4ee9-be97-c862fbb8dfab", "metadata": {}, "outputs": [ @@ -601,7 +672,7 @@ "└───────┴───────────────────┘" ] }, - "execution_count": 10, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -623,7 +694,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 12, "id": "60745b45-0cb5-413d-8f84-cea84e557998", "metadata": {}, "outputs": [], @@ -657,7 +728,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 13, "id": "82efebec-966b-4a8f-89b1-c78077857752", "metadata": {}, "outputs": [ @@ -694,7 +765,7 @@ "└───────┴───────────────────┘" ] }, - "execution_count": 12, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -741,9 +812,9 @@ ], "metadata": { "kernelspec": { - "display_name": "PineAPPL", + "display_name": "nnpdf", "language": "python", - "name": "pineappl" + "name": "nnpdf" }, "language_info": { "codemirror_mode": { @@ -755,7 +826,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.6" + "version": "3.11.9" } }, "nbformat": 4, diff --git a/pineappl_py/pyproject.toml b/pineappl_py/pyproject.toml index f8e19cb3d..37adb15e0 100644 --- a/pineappl_py/pyproject.toml +++ b/pineappl_py/pyproject.toml @@ -7,7 +7,7 @@ name = "pineappl" # due to a bug in warehouse, https://github.com/pypi/warehouse/issues/8090, this file must be the # same across all wheels of a single version and therefore `requires-python` must give the minimum # Python version that we support -requires-python = ">=3.6" +requires-python = ">=3.7" classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Science/Research", @@ -21,7 +21,7 @@ classifiers = [ "Topic :: Scientific/Engineering :: Physics", ] -dependencies = ["numpy>=1.16.0,<2.0.0"] +dependencies = ["numpy>=1.16.0"] [project.optional-dependencies] cli = ["pineappl-cli"] diff --git a/pineappl_py/src/bin.rs b/pineappl_py/src/bin.rs index f4447b36a..c0752f498 100644 --- a/pineappl_py/src/bin.rs +++ b/pineappl_py/src/bin.rs @@ -13,7 +13,12 @@ pub struct PyBinRemapper { #[pymethods] impl PyBinRemapper { - /// Constructor. + /// Constructor for remapping bin limits. + /// + /// # Panics + /// + /// Panics when `bin_limits` does not have the same length as `normalizations` or the bins or + /// if the `bin_limits` edges are not within the observable bins. /// /// Parameters /// ---------- @@ -30,6 +35,10 @@ impl PyBinRemapper { } /// Register submodule in parent. +/// +/// # Errors +/// +/// Raises an error if (sub)module is not found. pub fn register(parent_module: &Bound<'_, PyModule>) -> PyResult<()> { let m = PyModule::new_bound(parent_module.py(), "bin")?; m.setattr(pyo3::intern!(m.py(), "__doc__"), "Binning interface.")?; diff --git a/pineappl_py/src/boc.rs b/pineappl_py/src/boc.rs index 343aa71bb..5dd10804b 100644 --- a/pineappl_py/src/boc.rs +++ b/pineappl_py/src/boc.rs @@ -1,16 +1,15 @@ //! Interface for bins, orders and channels. use numpy::{IntoPyArray, PyArray1}; -use pineappl::boc::{Channel, Order}; +use pineappl::boc::{Channel, Kinematics, Order, ScaleFuncForm, Scales}; use pyo3::prelude::*; /// PyO3 wrapper to :rustdoc:`pineappl::boc::Channel `. /// /// Each entry consists of a tuple, which contains, in the following order: /// -/// 1. the PDG id of the first incoming parton -/// 2. the PDG id of the second parton -/// 3. a numerical factor that will multiply the result for this specific combination. +/// 1. a list containing the PDG value of the 1st, 2nd, and etc. of the incoming parton +/// 2. a numerical factor that will multiply the result for this specific combination. #[pyclass(name = "Channel")] #[repr(transparent)] pub struct PyChannel { @@ -23,10 +22,11 @@ impl PyChannel { /// /// Parameters /// ---------- - /// entry: list(tuple(int, int, float)) + /// entry: list(tuple(list(int),float)) /// channel configuration #[new] - pub fn new(entry: Vec<(i32, i32, f64)>) -> Self { + #[must_use] + pub fn new(entry: Vec<(Vec, f64)>) -> Self { Self { entry: Channel::new(entry), } @@ -36,27 +36,129 @@ impl PyChannel { /// /// Returns /// ------- - /// list(tuple(int,int,float)) : + /// list(tuple(list(int),float)) : /// list representation - pub fn into_array(&self) -> Vec<(i32, i32, f64)> { + #[must_use] + pub fn into_array(&self) -> Vec<(Vec, f64)> { self.entry.entry().to_vec() } } -/// Register submodule in parent. -pub fn register(parent_module: &Bound<'_, PyModule>) -> PyResult<()> { - let m = PyModule::new_bound(parent_module.py(), "boc")?; - m.setattr( - pyo3::intern!(m.py(), "__doc__"), - "Interface for bins, orders and channels.", - )?; - pyo3::py_run!( - parent_module.py(), - m, - "import sys; sys.modules['pineappl.channel'] = m" - ); - m.add_class::()?; - parent_module.add_submodule(&m) +/// PyO3 wrapper to :rustdoc:`pineappl::boc::Kinematics `. +#[pyclass(name = "Kinematics")] +#[derive(Clone)] +pub enum PyKinematics { + /// map to Kinematics::Scale + Scale(usize), + /// map to Kinematics::X + X(usize), +} + +impl From for Kinematics { + fn from(item: PyKinematics) -> Self { + match item { + PyKinematics::X(v) => Self::X(v), + PyKinematics::Scale(v) => Self::Scale(v), + } + } +} + +/// PyO3 wrapper to :rustdoc:`pineappl::boc::ScaleFuncForm `. +#[pyclass(name = "ScaleFuncForm")] +#[derive(Clone)] +pub enum PyScaleFuncForm { + /// map to ScaleFuncForm::NoScale + /// NOTE No variant is not supported in complex enums + NoScale(usize), + /// map to ScaleFuncForm::Scale + Scale(usize), + /// map to ScaleFuncForm::QuadraticSum + QuadraticSum(usize, usize), + /// map to ScaleFuncForm::QuadraticMean + QuadraticMean(usize, usize), + /// map to ScaleFuncForm::QuadraticSumOver4 + QuadraticSumOver4(usize, usize), + /// map to ScaleFuncForm::LinearMean + LinearMean(usize, usize), + /// map to ScaleFuncForm::LinearSum + LinearSum(usize, usize), + /// map to ScaleFuncForm::ScaleMax + ScaleMax(usize, usize), + /// map to ScaleFuncForm::ScaleMin + ScaleMin(usize, usize), + /// map to ScaleFuncForm::Prod + Prod(usize, usize), + /// map to ScaleFuncForm::S2plusS1half + S2plusS1half(usize, usize), + /// map to ScaleFuncForm::Pow4Sum + Pow4Sum(usize, usize), + /// map to ScaleFuncForm::WgtAvg + WgtAvg(usize, usize), + /// map to ScaleFuncForm::S2plusS1fourth + S2plusS1fourth(usize, usize), + /// map to ScaleFuncForm::ExpProd2 + ExpProd2(usize, usize), +} + +impl From for ScaleFuncForm { + fn from(item: PyScaleFuncForm) -> Self { + match item { + PyScaleFuncForm::NoScale(_) => Self::NoScale, + PyScaleFuncForm::Scale(v) => Self::Scale(v), + PyScaleFuncForm::QuadraticSum(v1, v2) => Self::QuadraticSum(v1, v2), + PyScaleFuncForm::QuadraticMean(v1, v2) => Self::QuadraticMean(v1, v2), + PyScaleFuncForm::QuadraticSumOver4(v1, v2) => Self::QuadraticSumOver4(v1, v2), + PyScaleFuncForm::LinearMean(v1, v2) => Self::LinearMean(v1, v2), + PyScaleFuncForm::LinearSum(v1, v2) => Self::LinearSum(v1, v2), + PyScaleFuncForm::ScaleMax(v1, v2) => Self::ScaleMax(v1, v2), + PyScaleFuncForm::ScaleMin(v1, v2) => Self::ScaleMin(v1, v2), + PyScaleFuncForm::Prod(v1, v2) => Self::Prod(v1, v2), + PyScaleFuncForm::S2plusS1half(v1, v2) => Self::S2plusS1half(v1, v2), + PyScaleFuncForm::Pow4Sum(v1, v2) => Self::Pow4Sum(v1, v2), + PyScaleFuncForm::WgtAvg(v1, v2) => Self::WgtAvg(v1, v2), + PyScaleFuncForm::S2plusS1fourth(v1, v2) => Self::S2plusS1fourth(v1, v2), + PyScaleFuncForm::ExpProd2(v1, v2) => Self::ExpProd2(v1, v2), + } + } +} + +/// PyO3 wrapper to :rustdoc:`pineappl::boc::Scales `. +#[pyclass(name = "Scales")] +pub struct PyScales { + pub(crate) scales: Scales, +} + +impl PyScales { + pub(crate) const fn new(scales: Scales) -> Self { + Self { scales } + } +} + +impl Default for PyScales { + fn default() -> Self { + Self::new(Scales { + ren: ScaleFuncForm::Scale(0), + fac: ScaleFuncForm::Scale(0), + frg: ScaleFuncForm::NoScale, + }) + } +} + +#[pymethods] +impl PyScales { + /// Constructor for `Scales` + #[new] + #[must_use] + pub fn news_scales( + ren: PyRef, + fac: PyRef, + frg: PyRef, + ) -> Self { + let ren = ren.clone().into(); + let fac = fac.clone().into(); + let frg = frg.clone().into(); + Self::new(Scales { ren, fac, frg }) + } } /// PyO3 wrapper to :rustdoc:`pineappl::boc::Order `. @@ -67,7 +169,7 @@ pub struct PyOrder { } impl PyOrder { - pub(crate) fn new(order: Order) -> Self { + pub(crate) const fn new(order: Order) -> Self { Self { order } } } @@ -86,9 +188,12 @@ impl PyOrder { /// power of :math:`\ln(\xi_r)` /// logxif : int /// power of :math:`\ln(\xi_f)` + /// logxia : int + /// power of :math:`\ln(\xi_a)` #[new] - pub fn new_order(alphas: u32, alpha: u32, logxir: u32, logxif: u32) -> Self { - Self::new(Order::new(alphas, alpha, logxir, logxif)) + #[must_use] + pub const fn new_order(alphas: u8, alpha: u8, logxir: u8, logxif: u8, logxia: u8) -> Self { + Self::new(Order::new(alphas, alpha, logxir, logxif, logxia)) } /// Tuple representation. @@ -103,12 +208,16 @@ impl PyOrder { /// power of :math:`\ln(\xi_r)` /// logxif : int /// power of :math:`\ln(\xi_f)` - pub fn as_tuple(&self) -> (u32, u32, u32, u32) { + /// logxia : int + /// power of :math:`\ln(\xi_a)` + #[must_use] + pub const fn as_tuple(&self) -> (u8, u8, u8, u8, u8) { ( self.order.alphas, self.order.alpha, self.order.logxir, self.order.logxif, + self.order.logxia, ) } @@ -126,10 +235,11 @@ impl PyOrder { /// numpy.ndarray(bool) /// boolean array, to be used as orders' mask #[staticmethod] + #[must_use] pub fn create_mask<'py>( orders: Vec>, - max_as: u32, - max_al: u32, + max_as: u8, + max_al: u8, logs: bool, py: Python<'py>, ) -> Bound<'py, PyArray1> { @@ -142,3 +252,27 @@ impl PyOrder { .into_pyarray_bound(py) } } + +/// Register submodule in parent. +/// +/// # Errors +/// +/// Raises an error if (sub)module is not found. +pub fn register(parent_module: &Bound<'_, PyModule>) -> PyResult<()> { + let m = PyModule::new_bound(parent_module.py(), "boc")?; + m.setattr( + pyo3::intern!(m.py(), "__doc__"), + "Interface for bins, orders and channels.", + )?; + pyo3::py_run!( + parent_module.py(), + m, + "import sys; sys.modules['pineappl.boc'] = m" + ); + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + parent_module.add_submodule(&m) +} diff --git a/pineappl_py/src/convolutions.rs b/pineappl_py/src/convolutions.rs new file mode 100644 index 000000000..57251c375 --- /dev/null +++ b/pineappl_py/src/convolutions.rs @@ -0,0 +1,101 @@ +//! Convolution interface. + +use pineappl::convolutions::{Conv, ConvType}; +use pyo3::prelude::*; + +/// PyO3 wrapper to :rustdoc:`pineappl::convolutions::ConvType `. +#[pyclass(name = "ConvType")] +#[repr(transparent)] +pub struct PyConvType { + pub(crate) convtype: ConvType, +} + +impl PyConvType { + pub(crate) const fn new(convtype: ConvType) -> Self { + Self { convtype } + } +} + +#[pymethods] +impl PyConvType { + /// Constructor. + #[new] + #[must_use] + pub const fn new_convtype(polarized: bool, time_like: bool) -> Self { + Self::new(ConvType::new(polarized, time_like)) + } + + /// Returns a boolean on whether or not the convolution type is polarized + #[getter] + #[must_use] + pub const fn polarized(&self) -> bool { + matches!(self.convtype, ConvType::PolPDF | ConvType::PolFF) + } + + /// Returns a boolean on whether or not the convolution type is timelike + #[getter] + #[must_use] + pub const fn time_like(&self) -> bool { + matches!(self.convtype, ConvType::UnpolFF | ConvType::PolFF) + } +} + +/// PyO3 wrapper to :rustdoc:`pineappl::convolutions::Conv `. +#[pyclass(name = "Conv")] +#[repr(transparent)] +pub struct PyConv { + pub(crate) conv: Conv, +} + +impl PyConv { + pub(crate) const fn new(conv: Conv) -> Self { + Self { conv } + } +} + +#[pymethods] +impl PyConv { + /// Constructor. + #[new] + #[must_use] + pub fn new_conv(conv_type: PyRef, pid: i32) -> Self { + Self::new(Conv::new(conv_type.convtype, pid)) + } + + /// Return the convolution type of this convolution. + #[getter] + #[must_use] + pub const fn conv_type(&self) -> PyConvType { + PyConvType { + convtype: self.conv.conv_type(), + } + } + + /// Return the PID of this convolution. + #[getter] + #[must_use] + pub const fn pid(&self) -> i32 { + self.conv.pid() + } +} + +/// Register submodule in parent. +/// +/// # Errors +/// +/// Raises an error if (sub)module is not found. +pub fn register(parent_module: &Bound<'_, PyModule>) -> PyResult<()> { + let m = PyModule::new_bound(parent_module.py(), "convolutions")?; + m.setattr( + pyo3::intern!(m.py(), "__doc__"), + "Define the type of convolutions.", + )?; + pyo3::py_run!( + parent_module.py(), + m, + "import sys; sys.modules['pineappl.convolutions'] = m" + ); + m.add_class::()?; + m.add_class::()?; + parent_module.add_submodule(&m) +} diff --git a/pineappl_py/src/evolution.rs b/pineappl_py/src/evolution.rs index 4a28a3de6..3580a396c 100644 --- a/pineappl_py/src/evolution.rs +++ b/pineappl_py/src/evolution.rs @@ -1,5 +1,6 @@ //! Evolution interface. +use super::convolutions::PyConvType; use super::pids::PyPidBasis; use numpy::{IntoPyArray, PyArray1}; use pineappl::evolution::{EvolveInfo, OperatorSliceInfo}; @@ -33,7 +34,10 @@ impl PyOperatorSliceInfo { /// x-grid at the final scale /// pid_basis : PyPidBasis /// flavor basis reprentation at the initial scale + /// conv_type : PyConvType + /// the type of convolution required #[new] + #[must_use] pub fn new( fac0: f64, pids0: Vec, @@ -42,6 +46,7 @@ impl PyOperatorSliceInfo { pids1: Vec, x1: Vec, pid_basis: PyPidBasis, + conv_type: PyRef, ) -> Self { Self { info: OperatorSliceInfo { @@ -52,6 +57,7 @@ impl PyOperatorSliceInfo { pids1, x1, pid_basis: pid_basis.into(), + conv_type: conv_type.convtype, }, } } @@ -66,6 +72,20 @@ pub struct PyEvolveInfo { #[pymethods] impl PyEvolveInfo { + /// Constructor. + #[new] + #[must_use] + pub const fn new(fac1: Vec, pids1: Vec, x1: Vec, ren1: Vec) -> Self { + Self { + evolve_info: EvolveInfo { + fac1, + pids1, + x1, + ren1, + }, + } + } + /// Squared factorization scales of the `Grid`. #[getter] fn fac1<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1> { @@ -92,6 +112,10 @@ impl PyEvolveInfo { } /// Register submodule in parent. +/// +/// # Errors +/// +/// Raises an error if (sub)module is not found. pub fn register(parent_module: &Bound<'_, PyModule>) -> PyResult<()> { let m = PyModule::new_bound(parent_module.py(), "evolution")?; m.setattr(pyo3::intern!(m.py(), "__doc__"), "Evolution interface.")?; diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index 7061186a8..a96e6f5fd 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -1,24 +1,18 @@ //! FK table interface. +use super::convolutions::PyConv; use super::grid::PyGrid; -use numpy::{IntoPyArray, PyArray1, PyArray4}; -use pineappl::convolutions::LumiCache; +use numpy::{IntoPyArray, PyArray1, PyArrayDyn}; +use pineappl::convolutions::ConvolutionCache; use pineappl::fk_table::{FkAssumptions, FkTable}; use pineappl::grid::Grid; use pyo3::prelude::*; -use std::collections::HashMap; +use std::collections::BTreeMap; use std::fs::File; use std::io::BufReader; use std::path::PathBuf; use std::str::FromStr; -/// PyO3 wrapper to :rustdoc:`pineappl::fk_table::FkTable `. -#[pyclass(name = "FkTable")] -#[repr(transparent)] -pub struct PyFkTable { - pub(crate) fk_table: FkTable, -} - /// PyO3 wrapper to :rustdoc:`pineappl::fk_table::FkAssumptions `. #[pyclass(name = "FkAssumptions")] #[repr(transparent)] @@ -29,25 +23,50 @@ pub struct PyFkAssumptions { #[pymethods] impl PyFkAssumptions { /// Constructor. + /// + /// # Panics + /// + /// Panics if the `assumption` is not one of the possibilities. #[new] + #[must_use] pub fn new(assumption: &str) -> Self { - PyFkAssumptions { + Self { fk_assumptions: FkAssumptions::from_str(assumption).unwrap(), } } } +/// PyO3 wrapper to :rustdoc:`pineappl::fk_table::FkTable `. +#[pyclass(name = "FkTable")] +#[repr(transparent)] +pub struct PyFkTable { + pub(crate) fk_table: FkTable, +} + #[pymethods] impl PyFkTable { /// Constructor from an existing grid. + /// + /// # Panics + /// TODO #[new] + #[must_use] pub fn new(grid: PyGrid) -> Self { Self { fk_table: FkTable::try_from(grid.grid).unwrap(), } } - /// Read from given path. + /// Read an FK Table from given path. + /// + /// # Panics + /// TODO + /// + /// Parameteters + /// ------------ + /// path : str + /// path to the FK table + #[must_use] #[staticmethod] pub fn read(path: PathBuf) -> Self { Self { @@ -60,20 +79,40 @@ impl PyFkTable { /// Get cross section tensor. /// + /// # Errors + /// TODO + /// /// Returns /// ------- /// numpy.ndarray : /// 4-dimensional tensor with indixes: bin, channel, x1, x2 - pub fn table<'py>(&self, py: Python<'py>) -> PyResult>> { + pub fn table<'py>(&self, py: Python<'py>) -> PyResult>> { Ok(self.fk_table.table().into_pyarray_bound(py)) } + /// Get the type(s) of convolution(s) for the current FK table. + /// + /// Returns + /// list(PyConv): + /// list of convolution type with the corresponding PIDs + #[getter] + #[must_use] + pub fn convolutions(&self) -> Vec { + self.fk_table + .grid() + .convolutions() + .iter() + .map(|conv| PyConv { conv: conv.clone() }) + .collect() + } + /// Get number of bins. /// /// Returns /// ------- /// int : /// number of bins + #[must_use] pub fn bins(&self) -> usize { self.fk_table.grid().bin_info().bins() } @@ -84,6 +123,7 @@ impl PyFkTable { /// ------- /// numpy.ndarray /// bin normalizations + #[must_use] pub fn bin_normalizations<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1> { self.fk_table .grid() @@ -100,6 +140,7 @@ impl PyFkTable { /// ------- /// int : /// bin dimension + #[must_use] pub fn bin_dimensions(&self) -> usize { self.fk_table.grid().bin_info().dimensions() } @@ -115,6 +156,7 @@ impl PyFkTable { /// ------- /// numpy.ndarray(float) : /// left edges of bins + #[must_use] pub fn bin_left<'py>(&self, dimension: usize, py: Python<'py>) -> Bound<'py, PyArray1> { self.fk_table .grid() @@ -134,6 +176,7 @@ impl PyFkTable { /// ------- /// numpy.ndarray(float) : /// right edges of bins + #[must_use] pub fn bin_right<'py>(&self, dimension: usize, py: Python<'py>) -> Bound<'py, PyArray1> { self.fk_table .grid() @@ -142,35 +185,14 @@ impl PyFkTable { .into_pyarray_bound(py) } - /// Get metadata values. - /// - /// Returns - /// ------- - /// dict : - /// key, value map - pub fn key_values(&self) -> HashMap { - self.fk_table.grid().key_values().unwrap().clone() - } - - /// Set a metadata key-value pair. - /// - /// Parameters - /// ---------- - /// key : str - /// key - /// value : str - /// value - pub fn set_key_value(&mut self, key: &str, value: &str) { - self.fk_table.set_key_value(key, value); - } - /// Get channels. /// /// Returns /// ------- /// list(tuple(float,float)) : /// channel functions as pid tuples - pub fn channels(&self) -> Vec<(i32, i32)> { + #[must_use] + pub fn channels(&self) -> Vec> { self.fk_table.channels() } @@ -180,6 +202,7 @@ impl PyFkTable { /// ------- /// float : /// reference scale + #[must_use] pub fn muf2(&self) -> f64 { self.fk_table.muf2() } @@ -190,12 +213,17 @@ impl PyFkTable { /// ------- /// x_grid : numpy.ndarray(float) /// interpolation grid + #[must_use] pub fn x_grid<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1> { self.fk_table.x_grid().into_pyarray_bound(py) } /// Write to file. /// + /// # Panics + /// + /// Panics if the specified path is non-writeable (non-existent or missing permissions). + /// /// Parameters /// ---------- /// path : str @@ -209,6 +237,10 @@ impl PyFkTable { /// Write to file using lz4. /// + /// # Panics + /// + /// Panics if the specified path is non-writeable (non-existent or missing permissions). + /// /// Parameters /// ---------- /// path : str @@ -220,73 +252,83 @@ impl PyFkTable { .unwrap(); } - /// Convolve with a single distribution. + /// Set a metadata key-value pair in the FK Table. /// /// Parameters /// ---------- - /// pdg_id : integer - /// PDG Monte Carlo ID of the hadronic particle - /// xfx : callable - /// lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x`-grid + /// key : str + /// key + /// value : str + /// value + pub fn set_key_value(&mut self, key: &str, value: &str) { + self.fk_table.set_key_value(key, value); + } + + /// Get metadata values stored in the grid. + /// /// /// Returns /// ------- - /// numpy.ndarray(float) : - /// cross sections for all bins - #[pyo3(signature = (pdg_id, xfx, bin_indices = None, channel_mask= None))] - pub fn convolve_with_one<'py>( - &self, - pdg_id: i32, - xfx: &Bound<'py, PyAny>, - bin_indices: Option>, - channel_mask: Option>, - py: Python<'py>, - ) -> Bound<'py, PyArray1> { - let mut xfx = |id, x, q2| xfx.call1((id, x, q2)).unwrap().extract().unwrap(); - let mut alphas = |_| 1.0; - let mut lumi_cache = LumiCache::with_one(pdg_id, &mut xfx, &mut alphas); - self.fk_table - .convolve( - &mut lumi_cache, - &bin_indices.unwrap_or_default(), - &channel_mask.unwrap_or_default(), - ) - .into_pyarray_bound(py) + /// dict : + /// key, value map + #[getter] + #[must_use] + pub fn key_values(&self) -> BTreeMap { + self.fk_table.grid().metadata().clone() } - /// Convoluve grid with two different distribution. + /// Convolve the FK table with as many distributions. + /// + /// # Panics + /// TODO /// /// Parameters /// ---------- - /// pdg_id1 : integer - /// PDG Monte Carlo ID of the first hadronic particle - /// xfx1 : callable - /// lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x`-grid - /// pdg_id2 : integer - /// PDG Monte Carlo ID of the second hadronic particle - /// xfx2 : callable - /// lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x`-grid + /// pdg_convs : list(PyConv) + /// list containing the types of convolutions and PID + /// xfxs : list(callable) + /// list of lhapdf-like callable with arguments `pid, x, Q2` returning x*pdf + /// bin_indices : numpy.ndarray(int) + /// A list with the indices of the corresponding bins that should be calculated. An + /// empty list means that all bins should be calculated. + /// channel_mask : numpy.ndarray(bool) + /// Mask for selecting specific channels. The value `True` means the + /// corresponding channel is included. An empty list corresponds to all channels being + /// enabled. /// /// Returns /// ------- /// numpy.ndarray(float) : /// cross sections for all bins - #[pyo3(signature = (pdg_id1, xfx1, pdg_id2, xfx2, bin_indices = None, channel_mask= None))] - pub fn convolve_with_two<'py>( + #[must_use] + #[pyo3(signature = (pdg_convs, xfxs, bin_indices = None, channel_mask= None))] + pub fn convolve<'py>( &self, - pdg_id1: i32, - xfx1: &Bound<'py, PyAny>, - pdg_id2: i32, - xfx2: &Bound<'py, PyAny>, + pdg_convs: Vec>, + xfxs: Vec, bin_indices: Option>, channel_mask: Option>, py: Python<'py>, ) -> Bound<'py, PyArray1> { - let mut xfx1 = |id, x, q2| xfx1.call1((id, x, q2)).unwrap().extract().unwrap(); - let mut xfx2 = |id, x, q2| xfx2.call1((id, x, q2)).unwrap().extract().unwrap(); + let mut xfx_funcs: Vec<_> = xfxs + .iter() + .map(|xfx| { + move |id: i32, x: f64, q2: f64| { + xfx.call1(py, (id, x, q2)).unwrap().extract(py).unwrap() + } + }) + .collect(); + let mut alphas = |_| 1.0; - let mut lumi_cache = - LumiCache::with_two(pdg_id1, &mut xfx1, pdg_id2, &mut xfx2, &mut alphas); + let mut lumi_cache = ConvolutionCache::new( + pdg_convs.into_iter().map(|pdg| pdg.conv.clone()).collect(), + xfx_funcs + .iter_mut() + .map(|fx| fx as &mut dyn FnMut(i32, f64, f64) -> f64) + .collect(), + &mut alphas, + ); + self.fk_table .convolve( &mut lumi_cache, @@ -307,11 +349,14 @@ impl PyFkTable { /// assumptions about the FkTable properties, declared by the user, deciding which /// optimizations are possible pub fn optimize(&mut self, assumptions: PyRef) { - self.fk_table.optimize(assumptions.fk_assumptions) + self.fk_table.optimize(assumptions.fk_assumptions); } } /// Register submodule in parent. +/// # Errors +/// +/// Raises an error if (sub)module is not found. pub fn register(parent_module: &Bound<'_, PyModule>) -> PyResult<()> { let m = PyModule::new_bound(parent_module.py(), "fk_table")?; m.setattr(pyo3::intern!(m.py(), "__doc__"), "FK table interface.")?; diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 2fb37c628..429105716 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -1,20 +1,24 @@ //! Grid interface. use super::bin::PyBinRemapper; -use super::boc::{PyChannel, PyOrder}; +use super::boc::{PyChannel, PyKinematics, PyOrder, PyScales}; +use super::convolutions::PyConv; use super::evolution::{PyEvolveInfo, PyOperatorSliceInfo}; use super::fk_table::PyFkTable; -use super::subgrid::{PySubgridEnum, PySubgridParams}; +use super::interpolation::PyInterp; +use super::pids::PyPidBasis; +use super::subgrid::PySubgridEnum; use itertools::izip; use ndarray::CowArray; -use numpy::{IntoPyArray, PyArray1, PyReadonlyArray4}; -use pineappl::convolutions::LumiCache; +use numpy::{IntoPyArray, PyArray1, PyArrayDyn, PyReadonlyArray4}; +use pineappl::boc::Kinematics; +use pineappl::convolutions::ConvolutionCache; use pineappl::evolution::AlphasTable; -use pineappl::grid::{Grid, Ntuple}; +use pineappl::grid::Grid; +use pineappl::pids::PidBasis; use pyo3::exceptions::PyValueError; use pyo3::prelude::*; -use pyo3::types::PyIterator; -use std::collections::HashMap; +use std::collections::BTreeMap; use std::fs::File; use std::io::BufReader; use std::path::PathBuf; @@ -29,31 +33,63 @@ pub struct PyGrid { #[pymethods] impl PyGrid { - /// Constructor. + /// Constructor to instantiate a new PineAPPL Grid. + /// + /// # Panics + /// + /// Panics when the number of PIDs in `channels` is not equal to `convolutions.len()`, or + /// `interps` and `kinematics` have different lengths or if `kinematics` are not compatible + /// with `scales`. /// /// Parameters /// ---------- + /// pid_basis : PidBasis + /// choice of basis which can be `Evol` or `Pdg` /// channels : list(PyChannel) /// channels /// orders : list(PyOrder) /// orders /// bin_limits : list(float) /// bin configurations - /// subgrid_params : PySubgridParams - /// subgrid parameters + /// convolutions : list(PyConv) + /// contains the types of convolution + /// interpolations : list(PyInterp) + /// types of interpolations required by each kinematic + /// kinematics : list(PyKinematics) + /// list of kinematics + /// scale_funcs : PyScales + /// `Scales` object #[new] + #[must_use] pub fn new_grid( + pid_basis: PyPidBasis, channels: Vec>, orders: Vec>, bin_limits: Vec, - subgrid_params: PySubgridParams, + convolutions: Vec>, + interpolations: Vec>, + kinematics: Vec>, + scale_funcs: PyRef, ) -> Self { Self { grid: Grid::new( - channels.iter().map(|pyc| pyc.entry.clone()).collect(), - orders.iter().map(|pyo| pyo.order.clone()).collect(), + pid_basis.into(), + channels.into_iter().map(|pyc| pyc.entry.clone()).collect(), + orders.into_iter().map(|pyo| pyo.order.clone()).collect(), bin_limits, - subgrid_params.subgrid_params, + convolutions + .into_iter() + .map(|pyx| pyx.conv.clone()) + .collect(), + interpolations + .into_iter() + .map(|pyi| pyi.interp.clone()) + .collect(), + kinematics + .into_iter() + .map(|pyk| pyk.clone().into()) + .collect(), + scale_funcs.scales.clone(), ), } } @@ -62,36 +98,33 @@ impl PyGrid { /// /// Parameters /// ---------- - /// x1 : float - /// first momentum fraction - /// x2 : float - /// second momentum fraction - /// q2 : float - /// process scale /// order : int /// order index /// observable : float /// reference point (to be binned) /// channel : int /// channel index + /// ntuple: list(float) + /// list containing information on kinematics /// weight : float /// cross section weight pub fn fill( &mut self, - x1: f64, - x2: f64, - q2: f64, order: usize, observable: f64, channel: usize, + ntuple: Vec, weight: f64, ) { - self.grid.fill( - order, - observable, - channel, - &Ntuple:: { x1, x2, q2, weight }, - ); + self.grid.fill(order, observable, channel, &ntuple, weight); + } + + /// Retrieve a subgrid. + #[must_use] + pub fn subgrid(&self, order: usize, bin: usize, channel: usize) -> PySubgridEnum { + PySubgridEnum { + subgrid_enum: self.grid.subgrids()[[order, bin, channel]].clone(), + } } /// Add an array to the grid. @@ -100,43 +133,28 @@ impl PyGrid { /// /// Parameters /// ---------- - /// x1s : np.array(float) - /// first momentum fraction of all events - /// x2s : np.array(float) - /// second momentum fraction of all events - /// x1s : np.array(float) - /// process scale of all events /// order : int /// order index - /// observable : float - /// reference point (to be binned) + /// observables : list(float) + /// list of reference point (to be binned) /// channel : int /// channel index + /// ntuples: list(list(float)) + /// list of `ntuple` kinematics /// weights : np.array(float) /// cross section weight for all events pub fn fill_array( &mut self, - x1s: Vec, - x2s: Vec, - q2s: Vec, order: usize, observables: Vec, channel: usize, + ntuples: Vec>, weights: Vec, ) { - for (&x1, &x2, &q2, &observable, &weight) in izip!( - x1s.iter(), - x2s.iter(), - q2s.iter(), - observables.iter(), - weights.iter(), - ) { - self.grid.fill( - order, - observable, - channel, - &Ntuple:: { x1, x2, q2, weight }, - ); + for (ntuple, &observable, &weight) in + izip!(ntuples.iter(), observables.iter(), weights.iter()) + { + self.grid.fill(order, observable, channel, ntuple, weight); } } @@ -144,71 +162,32 @@ impl PyGrid { /// /// Parameters /// ---------- - /// x1 : float - /// first momentum fraction - /// x2 : float - /// second momentum fraction - /// q2 : float - /// process scale /// order : int /// order index /// observable : float /// reference point (to be binned) + /// ntuple: list(float) + /// list containing information on kinematics /// weights : np.array(float) /// cross section weights, one for each channels - pub fn fill_all( - &mut self, - x1: f64, - x2: f64, - q2: f64, - order: usize, - observable: f64, - weights: Vec, - ) { - self.grid.fill_all( - order, - observable, - &Ntuple::<()> { - x1, - x2, - q2, - weight: (), - }, - &weights, - ); - } - - /// Get metadata values stored in the grid. - /// - /// - /// Returns - /// ------- - /// dict : - /// key, value map - pub fn key_values(&self) -> HashMap { - self.grid.key_values().unwrap().clone() + pub fn fill_all(&mut self, order: usize, observable: f64, ntuple: Vec, weights: Vec) { + for (channel, &weight) in weights.iter().enumerate() { + self.grid.fill(order, observable, channel, &ntuple, weight); + } } - /// Set a metadata key-value pair in the grid. + /// Set a subgrid. /// /// Parameters /// ---------- - /// key : str - /// key - /// value : str - /// value - pub fn set_key_value(&mut self, key: &str, value: &str) { - self.grid.set_key_value(key, value); - } - - /// Retrieve a subgrid. - pub fn subgrid(&self, order: usize, bin: usize, channel: usize) -> PySubgridEnum { - PySubgridEnum { - subgrid_enum: self.grid.subgrids()[[order, bin, channel]].clone(), - } - } - - /// Set a subgrid. + /// order : int + /// order index + /// bin : int + /// bin index + /// channel : int + /// channel index + /// subgrid : PySubgridEnum + /// subgrid object pub fn set_subgrid( &mut self, order: usize, @@ -221,6 +200,10 @@ impl PyGrid { /// Set the bin normalizations. /// + /// # Panics + /// + /// Panics if the size of the bins in the grid and in the `remapper` are not consistent. + /// /// Parameters /// ---------- /// remapper: BinRemapper @@ -229,14 +212,47 @@ impl PyGrid { self.grid.set_remapper(remapper.bin_remapper).unwrap(); } - /// Convolve with a single distribution. + /// Set a metadata key-value pair in the grid. + /// + /// # Panics + /// TODO + /// + /// Parameters + /// ---------- + /// key : str + /// key + /// value : str + /// value + pub fn set_key_value(&mut self, key: &str, value: &str) { + self.grid + .metadata_mut() + .insert(key.to_owned(), value.to_owned()); + } + + /// Get metadata values stored in the grid. + /// + /// + /// Returns + /// ------- + /// dict : + /// key, value map + #[getter] + #[must_use] + pub fn key_values(&self) -> BTreeMap { + self.grid.metadata().clone() + } + + /// Convolve the grid with as many distributions. + /// + /// # Panics + /// TODO /// /// Parameters /// ---------- - /// pdg_id : int - /// PDG Monte Carlo ID of the hadronic particle - /// xfx : callable - /// lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x`-grid + /// pdg_convs : list(PyConv) + /// list containing the types of convolutions and PID + /// xfxs : list(callable) + /// list of lhapdf-like callable with arguments `pid, x, Q2` returning x*pdf /// alphas : callable /// lhapdf like callable with arguments `Q2` returning :math:`\alpha_s` /// order_mask : numpy.ndarray(bool) @@ -261,102 +277,111 @@ impl PyGrid { /// numpy.ndarray(float) : /// cross sections for all bins, for each scale-variation tuple (first all bins, then /// the scale variation) - #[pyo3(signature = (pdg_id, xfx, alphas, order_mask = None, bin_indices = None, channel_mask = None, xi = None))] - pub fn convolve_with_one<'py>( + #[must_use] + #[pyo3(signature = (pdg_convs, xfxs, alphas, order_mask = None, bin_indices = None, channel_mask = None, xi = None))] + pub fn convolve<'py>( &self, - pdg_id: i32, - xfx: &Bound<'py, PyAny>, - alphas: &Bound<'py, PyAny>, + pdg_convs: Vec>, + xfxs: Vec, + alphas: PyObject, order_mask: Option>, bin_indices: Option>, channel_mask: Option>, - xi: Option>, + xi: Option>, py: Python<'py>, ) -> Bound<'py, PyArray1> { - let mut xfx = |id, x, q2| xfx.call1((id, x, q2)).unwrap().extract().unwrap(); - // `(q2, )` must have the comma to make it a Rust tuple - let mut alphas = |q2| alphas.call1((q2,)).unwrap().extract().unwrap(); - let mut lumi_cache = LumiCache::with_one(pdg_id, &mut xfx, &mut alphas); + let mut alphas = |q2: f64| { + let result: f64 = alphas.call1(py, (q2,)).unwrap().extract(py).unwrap(); + result + }; + + let mut xfx_funcs: Vec<_> = xfxs + .iter() + .map(|xfx| { + move |id: i32, x: f64, q2: f64| { + xfx.call1(py, (id, x, q2)).unwrap().extract(py).unwrap() + } + }) + .collect(); + + let mut convolution_cache = ConvolutionCache::new( + pdg_convs.into_iter().map(|pdg| pdg.conv.clone()).collect(), + xfx_funcs + .iter_mut() + .map(|fx| fx as &mut dyn FnMut(i32, f64, f64) -> f64) + .collect(), + &mut alphas, + ); + self.grid .convolve( - &mut lumi_cache, + &mut convolution_cache, &order_mask.unwrap_or_default(), &bin_indices.unwrap_or_default(), &channel_mask.unwrap_or_default(), - &xi.unwrap_or(vec![(1.0, 1.0)]), + &xi.unwrap_or_else(|| vec![(1.0, 1.0, 0.0)]), ) .into_pyarray_bound(py) } - /// Convolve with two distributions. + /// Convolve a single subgrid `(order, bin, channel)` with the distributions. /// - /// Parameters - /// ---------- - /// pdg_id1 : int - /// PDG Monte Carlo ID of the first hadronic particle - /// xfx1 : callable - /// lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x`-grid - /// pdg_id2 : int - /// PDG Monte Carlo ID of the second hadronic particle - /// xfx2 : callable - /// lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x`-grid - /// alphas : callable - /// lhapdf like callable with arguments `Q2` returning :math:`\alpha_s` - /// order_mask : numpy.ndarray(bool) - /// Mask for selecting specific orders. The value `True` means the corresponding order - /// is included. An empty list corresponds to all orders being enabled. - /// bin_indices : numpy.ndarray(int) - /// A list with the indices of the corresponding bins that should be calculated. An - /// empty list means that all bins should be calculated. - /// channel_mask : numpy.ndarray(bool) - /// Mask for selecting specific channels. The value `True` means the - /// corresponding channel is included. An empty list corresponds to all channels being - /// enabled. - /// xi : list((float, float)) - /// A list with the scale variation factors that should be used to calculate - /// scale-varied results. The first entry of a tuple corresponds to the variation of - /// the renormalization scale, the second entry to the variation of the factorization - /// scale. If only results for the central scale are need the list should contain - /// `(1.0, 1.0)`. + /// # Panics /// - /// Returns - /// ------- - /// numpy.ndarray(float) : - /// cross sections for all bins, for each scale-variation tuple (first all bins, then - /// the scale variation) - #[pyo3(signature = (pdg_id1, xfx1, pdg_id2, xfx2, alphas, order_mask = None, bin_indices = None, channel_mask = None, xi = None))] - pub fn convolve_with_two<'py>( + /// TODO + #[must_use] + #[pyo3(signature = (pdg_convs, xfxs, alphas, ord, bin, channel, xi = None))] + pub fn convolve_subgrid<'py>( &self, - pdg_id1: i32, - xfx1: &Bound<'py, PyAny>, - pdg_id2: i32, - xfx2: &Bound<'py, PyAny>, - alphas: &Bound<'py, PyAny>, - order_mask: Option>, - bin_indices: Option>, - channel_mask: Option>, - xi: Option>, + pdg_convs: Vec>, + xfxs: Vec, + alphas: PyObject, + ord: usize, + bin: usize, + channel: usize, + xi: Option<(f64, f64, f64)>, py: Python<'py>, - ) -> Bound<'py, PyArray1> { - let mut xfx1 = |id, x, q2| xfx1.call1((id, x, q2)).unwrap().extract().unwrap(); - let mut xfx2 = |id, x, q2| xfx2.call1((id, x, q2)).unwrap().extract().unwrap(); - // `(q2, )` must have the comma to make it a Rust tuple - let mut alphas = |q2| alphas.call1((q2,)).unwrap().extract().unwrap(); - let mut lumi_cache = - LumiCache::with_two(pdg_id1, &mut xfx1, pdg_id2, &mut xfx2, &mut alphas); + ) -> Bound<'py, PyArrayDyn> { + let mut alphas = |q2: f64| { + let result: f64 = alphas.call1(py, (q2,)).unwrap().extract(py).unwrap(); + result + }; + + let mut xfx_funcs: Vec<_> = xfxs + .iter() + .map(|xfx| { + move |id: i32, x: f64, q2: f64| { + xfx.call1(py, (id, x, q2)).unwrap().extract(py).unwrap() + } + }) + .collect(); + + let mut convolution_cache = ConvolutionCache::new( + pdg_convs.into_iter().map(|pdg| pdg.conv.clone()).collect(), + xfx_funcs + .iter_mut() + .map(|fx| fx as &mut dyn FnMut(i32, f64, f64) -> f64) + .collect(), + &mut alphas, + ); + self.grid - .convolve( - &mut lumi_cache, - &order_mask.unwrap_or_default(), - &bin_indices.unwrap_or_default(), - &channel_mask.unwrap_or_default(), - &xi.unwrap_or(vec![(1.0, 1.0)]), + .convolve_subgrid( + &mut convolution_cache, + ord, + bin, + channel, + xi.unwrap_or((1.0, 1.0, 0.0)), ) .into_pyarray_bound(py) } /// Collect information for convolution with an evolution operator. /// + /// # Panics + /// + /// TODO + /// /// Parameters /// ---------- /// order_mask : numpy.ndarray(bool) @@ -365,71 +390,31 @@ impl PyGrid { /// Returns /// ------- /// PyEvolveInfo : - /// evolution information + /// evolution informations + #[must_use] pub fn evolve_info(&self, order_mask: Vec) -> PyEvolveInfo { PyEvolveInfo { evolve_info: self.grid.evolve_info(order_mask.as_slice()), } } - /// Convolve with uniform evolution operator slices. + /// Evolve the grid with as many EKOs as Convolutions. /// - /// Parameters - /// ---------- - /// slices : Iterable - /// list of (PyOperatorSliceInfo, 5D array) describing each convolution - /// order_mask : numpy.ndarray(bool) - /// boolean mask to activate orders - /// xi : (float, float) - /// factorization and renormalization variation - /// ren1 : numpy.ndarray(float) - /// list of renormalization scales - /// alphas : numpy.ndarray(float) - /// list with :math:`\alpha_s(Q2)` for the process scales + /// # Panics /// - /// Returns - /// ------- - /// PyFkTable : - /// produced FK table - pub fn evolve_with_slice_iter<'py>( - &self, - slices: &Bound<'py, PyIterator>, - order_mask: Vec, - xi: (f64, f64), - ren1: Vec, - alphas: Vec, - ) -> PyResult { - Ok(self - .grid - .evolve_with_slice_iter( - slices.into_iter().map(|slice| { - let (info, op) = slice - .unwrap() - .extract::<(PyOperatorSliceInfo, PyReadonlyArray4)>() - .unwrap(); - Ok::<_, std::io::Error>(( - info.info, - // TODO: avoid copying - CowArray::from(op.as_array().to_owned()), - )) - }), - &order_mask, - xi, - &AlphasTable { ren1, alphas }, - ) - .map(|fk_table| PyFkTable { fk_table }) - // TODO: avoid unwrap and convert `Result` into `PyResult` - .unwrap()) - } - - /// Convolve with two different evolution operator slices. + /// Panics when the operators returned by either slice have different dimensions than promised + /// by the corresponding [`OperatorSliceInfo`]. + /// + /// # Errors + /// + /// Raises error if either the `operator` or its `info` is incompatible with the Grid. + /// Another error is raised if the iterator from `slices` themselves return an error. /// /// Parameters /// ---------- - /// slices_a : Iterable - /// list of (PyOperatorSliceInfo, 5D array) describing the first convolution - /// slices_b : Iterable - /// list of (PyOperatorSliceInfo, 5D array) describing the second convolution + /// slices : list(list(tuple(PyOperatorSliceInfo, PyReadOnlyArray4))) + /// list of EKOs where each element is a list of (PyOperatorSliceInfo, 4D array) + /// describing each convolution /// order_mask : numpy.ndarray(bool) /// boolean mask to activate orders /// xi : (float, float) @@ -443,40 +428,29 @@ impl PyGrid { /// ------- /// PyFkTable : /// produced FK table - pub fn evolve_with_slice_iter2<'py>( + pub fn evolve( &self, - slices_a: &Bound<'py, PyIterator>, - slices_b: &Bound<'py, PyIterator>, + slices: Vec)>>, order_mask: Vec, - xi: (f64, f64), + xi: (f64, f64, f64), ren1: Vec, alphas: Vec, ) -> PyResult { Ok(self .grid - .evolve_with_slice_iter2( - slices_a.into_iter().map(|slice| { - let (info, op) = slice - .unwrap() - .extract::<(PyOperatorSliceInfo, PyReadonlyArray4)>() - .unwrap(); - Ok::<_, std::io::Error>(( - info.info, - // TODO: avoid copying - CowArray::from(op.as_array().to_owned()), - )) - }), - slices_b.into_iter().map(|slice| { - let (info, op) = slice - .unwrap() - .extract::<(PyOperatorSliceInfo, PyReadonlyArray4)>() - .unwrap(); - Ok::<_, std::io::Error>(( - info.info, - // TODO: avoid copying - CowArray::from(op.as_array().to_owned()), - )) - }), + .evolve( + slices + .iter() + .map(|subslice| { + subslice.iter().map(|(info, op)| { + Ok::<_, std::io::Error>(( + info.info.clone(), + // TODO: avoid copying + CowArray::from(op.as_array().to_owned()), + )) + }) + }) + .collect(), &order_mask, xi, &AlphasTable { ren1, alphas }, @@ -488,6 +462,10 @@ impl PyGrid { /// Load from file. /// + /// # Panics + /// + /// Panics if the grid specified by the path is non-existent. + /// /// Parameters /// ---------- /// path : str @@ -497,6 +475,7 @@ impl PyGrid { /// ------- /// PyGrid : /// grid + #[must_use] #[staticmethod] pub fn read(path: PathBuf) -> Self { Self { @@ -506,6 +485,10 @@ impl PyGrid { /// Write to file. /// + /// # Panics + /// + /// Panics if the specified path to write the grid is non-existent or requires permission. + /// /// Parameters /// ---------- /// path : str @@ -516,6 +499,10 @@ impl PyGrid { /// Write to compressed file. /// + /// # Panics + /// + /// Panics if the specified path to write the grid is non-existent or requires permission. + /// /// Parameters /// ---------- /// path : str @@ -524,16 +511,58 @@ impl PyGrid { self.grid.write_lz4(File::create(path).unwrap()).unwrap(); } - /// Optimize content. + /// Return the convention by which the channels' PIDS are encoded. + #[getter] + #[must_use] + pub const fn pid_basis(&self) -> PyPidBasis { + match self.grid.pid_basis() { + PidBasis::Pdg => PyPidBasis::Pdg, + PidBasis::Evol => PyPidBasis::Evol, + } + } + + /// Return the convention by which the Kinematics are encoded. + #[getter] + #[must_use] + pub fn kinematics(&self) -> Vec { + self.grid + .kinematics() + .iter() + .map(|&kin| match kin { + Kinematics::X(v) => PyKinematics::X(v), + Kinematics::Scale(v) => PyKinematics::Scale(v), + }) + .collect() + } + + /// Return the convention by which the Scales are encoded. + #[getter] + #[must_use] + pub fn scales(&self) -> PyScales { + PyScales { + scales: self.grid.scales().clone(), + } + } + + /// Optimize the contents of the Grid. pub fn optimize(&mut self) { self.grid.optimize(); } /// Merge with another grid. + /// + /// # Panics + /// + /// TODO + /// + /// # Errors + /// + /// If the bin limits of `self` and `other` are different and if the bin limits of `other` can + /// not be merged with `self` an error is returned. pub fn merge(&mut self, other: Self) -> PyResult<()> { match self.grid.merge(other.grid) { Ok(()) => Ok(()), - Err(x) => Err(PyValueError::new_err(format!("{:?}", x))), + Err(x) => Err(PyValueError::new_err(format!("{x:?}"))), } } @@ -545,6 +574,7 @@ impl PyGrid { /// ------- /// int : /// bin dimension + #[must_use] pub fn bin_dimensions(&self) -> usize { self.grid.bin_info().dimensions() } @@ -555,6 +585,7 @@ impl PyGrid { /// ------- /// np.ndarray /// bin normalizations + #[must_use] pub fn bin_normalizations<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1> { self.grid.bin_info().normalizations().into_pyarray_bound(py) } @@ -570,6 +601,7 @@ impl PyGrid { /// ------- /// numpy.ndarray(float) : /// left edges of bins + #[must_use] pub fn bin_left<'py>(&self, dimension: usize, py: Python<'py>) -> Bound<'py, PyArray1> { self.grid.bin_info().left(dimension).into_pyarray_bound(py) } @@ -585,6 +617,7 @@ impl PyGrid { /// ------- /// numpy.ndarray(float) : /// right edges of bins + #[must_use] pub fn bin_right<'py>(&self, dimension: usize, py: Python<'py>) -> Bound<'py, PyArray1> { self.grid.bin_info().right(dimension).into_pyarray_bound(py) } @@ -595,6 +628,7 @@ impl PyGrid { /// ------- /// int : /// Number of bins + #[must_use] pub fn bins(&self) -> usize { self.grid.bin_info().bins() } @@ -605,6 +639,7 @@ impl PyGrid { /// ------- /// list(PyOrder) : /// list with perturbative orders and scale variations + #[must_use] pub fn orders(&self) -> Vec { self.grid .orders() @@ -615,14 +650,47 @@ impl PyGrid { .collect() } + /// Get the type(s) of convolution(s) for the current Grid. + /// + /// Returns + /// list(PyConv): + /// list of convolution type with the corresponding PIDs + #[getter] + #[must_use] + pub fn convolutions(&self) -> Vec { + self.grid + .convolutions() + .iter() + .map(|conv| PyConv { conv: conv.clone() }) + .collect() + } + + /// Get the interpolation specifications for the current grid. + /// + /// Returns + /// list(PyInterp): + /// list of interpolation specifications + #[getter] + #[must_use] + pub fn interpolations(&mut self) -> Vec { + self.grid + .interpolations() + .iter() + .map(|interp| PyInterp { + interp: interp.clone(), + }) + .collect() + } + /// Extract channels. /// /// Returns /// ------- - /// list(list(tuple(float,float,int))) : - /// channels as tuples (pid, pid, factor) (multiple tuples can be associated to the same - /// contribution) - pub fn channels(&self) -> Vec> { + /// list(list(tuple(list[float],int))) : + /// channels as tuples (List of PIDs, factor) (multiple tuples can be associated + /// to the same contribution) + #[must_use] + pub fn channels(&self) -> Vec, f64)>> { self.grid .channels() .iter() @@ -630,6 +698,16 @@ impl PyGrid { .collect() } + /// Rotate the Grid into the specified basis + /// + /// Parameters + /// ---------- + /// pid_basis: PyPidBasis + /// PID basis of the resulting Grid + pub fn rotate_pid_basis(&mut self, pid_basis: PyPidBasis) { + self.grid.rotate_pid_basis(pid_basis.into()); + } + /// Scale all subgrids. /// /// Parameters @@ -642,28 +720,67 @@ impl PyGrid { /// Scale subgrids bin by bin. /// + /// # Panics + /// + /// TODO + /// /// Parameters /// ---------- - /// factors : numpy.ndarray[float] + /// factors : list[float] /// bin-dependent factors by which to scale pub fn scale_by_bin(&mut self, factors: Vec) { self.grid.scale_by_bin(&factors); } + /// Delete orders with the corresponding `order_indices`. Repeated indices and indices larger + /// or equal than the number of orders are ignored. + /// + /// Parameters + /// ---------- + /// order_indices : list[int] + /// list of indices of orders to be removed + pub fn delete_orders(&mut self, order_indices: Vec) { + self.grid.delete_orders(&order_indices); + } + /// Delete bins. /// + /// # Panics + /// + /// TODO + /// /// Repeated bins and those exceeding the length are ignored. /// /// Parameters /// ---------- - /// bin_indices : numpy.ndarray[int] - /// list of indices of bins to removed + /// bin_indices : list[int] + /// list of indices of bins to be removed pub fn delete_bins(&mut self, bin_indices: Vec) { - self.grid.delete_bins(&bin_indices) + self.grid.delete_bins(&bin_indices); + } + + /// Deletes channels with the corresponding `channel_indices`. Repeated indices and indices + /// larger or equal than the number of channels are ignored. + /// + /// Parameters + /// ---------- + /// bin_indices : list[int] + /// list of indices of bins to be removed + pub fn delete_channels(&mut self, channel_indices: Vec) { + self.grid.delete_channels(&channel_indices); + } + + /// Splits the grid such that each channel contains only a single tuple of PIDs. + pub fn split_channels(&mut self) { + self.grid.split_channels(); } } /// Register submodule in parent. +/// +/// # Errors +/// +/// Raises an error if (sub)module is not found. pub fn register(parent_module: &Bound<'_, PyModule>) -> PyResult<()> { let m = PyModule::new_bound(parent_module.py(), "grid")?; m.setattr(pyo3::intern!(m.py(), "__doc__"), "Grid interface.")?; @@ -673,6 +790,5 @@ pub fn register(parent_module: &Bound<'_, PyModule>) -> PyResult<()> { "import sys; sys.modules['pineappl.grid'] = m" ); m.add_class::()?; - m.add_class::()?; parent_module.add_submodule(&m) } diff --git a/pineappl_py/src/import_only_subgrid.rs b/pineappl_py/src/import_only_subgrid.rs deleted file mode 100644 index 3d12f58c8..000000000 --- a/pineappl_py/src/import_only_subgrid.rs +++ /dev/null @@ -1,161 +0,0 @@ -//! PyImportOnlySubgrid* interface. - -use super::subgrid::PySubgridEnum; -use numpy::PyReadonlyArray3; -use pineappl::import_only_subgrid::ImportOnlySubgridV1; -use pineappl::import_only_subgrid::ImportOnlySubgridV2; -use pineappl::sparse_array3::SparseArray3; -use pineappl::subgrid::Mu2; -use pyo3::prelude::*; - -/// PyO3 wrapper to :rustdoc:`pineappl::import_only_subgrid::ImportOnlySubgridV2 `. -#[pyclass(name = "ImportOnlySubgridV2")] -#[derive(Clone)] -#[repr(transparent)] -pub struct PyImportOnlySubgridV2 { - pub(crate) import_only_subgrid: ImportOnlySubgridV2, -} - -#[pymethods] -impl PyImportOnlySubgridV2 { - /// Constructor. - /// - /// Parameters - /// ---------- - /// array : numpy.ndarray(float) - /// 3D array with all weights - /// mu2_grid : list(tuple(float, float)) - /// scales grid - /// x1_grid : list(float) - /// first momentum fraction grid - /// x2_grid : list(float) - /// second momentum fraction grid - #[new] - pub fn new( - array: PyReadonlyArray3, - mu2_grid: Vec<(f64, f64)>, - x1_grid: Vec, - x2_grid: Vec, - ) -> Self { - let mut sparse_array = SparseArray3::new(mu2_grid.len(), x1_grid.len(), x2_grid.len()); - - for ((imu2, ix1, ix2), value) in array - .as_array() - .indexed_iter() - .filter(|((_, _, _), value)| **value != 0.0) - { - sparse_array[[imu2, ix1, ix2]] = *value; - } - Self { - import_only_subgrid: ImportOnlySubgridV2::new( - sparse_array, - mu2_grid - .iter() - .map(|(ren, fac)| Mu2 { - ren: *ren, - fac: *fac, - }) - .collect(), - x1_grid, - x2_grid, - ), - } - } - - /// Wrapper to match :meth:`pineappl.pineappl.PyGrid.set_subgrid()`. - /// - /// Returns - /// ------- - /// PySubgridEnum : - /// casted object - pub fn into(&self) -> PySubgridEnum { - PySubgridEnum { - subgrid_enum: self.import_only_subgrid.clone().into(), - } - } -} - -/// PyO3 wrapper to :rustdoc:`pineappl::import_only_subgrid::ImportOnlySubgridV1 `. -#[pyclass(name = "ImportOnlySubgridV1")] -#[derive(Clone)] -#[repr(transparent)] -pub struct PyImportOnlySubgridV1 { - pub(crate) import_only_subgrid: ImportOnlySubgridV1, -} - -impl PyImportOnlySubgridV1 { - pub(crate) fn new(import_only_subgrid: ImportOnlySubgridV1) -> Self { - Self { - import_only_subgrid, - } - } -} - -#[pymethods] -impl PyImportOnlySubgridV1 { - /// Constructor. - /// - /// Parameters - /// ---------- - /// array : numpy.ndarray(float) - /// 3D array with all weights - /// mu2_grid : list(tuple(float, float)) - /// scales grid - /// x1_grid : list(float) - /// first momentum fraction grid - /// x2_grid : list(float) - /// second momentum fraction grid - #[new] - pub fn new_import_only_subgrid( - array: PyReadonlyArray3, - q2_grid: Vec, - x1_grid: Vec, - x2_grid: Vec, - ) -> Self { - let mut sparse_array = SparseArray3::new(q2_grid.len(), x1_grid.len(), x2_grid.len()); - - for ((iq2, ix1, ix2), value) in array - .as_array() - .indexed_iter() - .filter(|((_, _, _), value)| **value != 0.0) - { - sparse_array[[iq2, ix1, ix2]] = *value; - } - - Self::new(ImportOnlySubgridV1::new( - sparse_array, - q2_grid, - x1_grid, - x2_grid, - )) - } - - /// Wrapper to match :meth:`pineappl.pineappl.PyGrid.set_subgrid()`. - /// - /// Returns - /// ------- - /// PySubgridEnum : - /// casted object - pub fn into(&self) -> PySubgridEnum { - PySubgridEnum { - subgrid_enum: self.import_only_subgrid.clone().into(), - } - } -} - -/// Register submodule in parent. -pub fn register(parent_module: &Bound<'_, PyModule>) -> PyResult<()> { - let m = PyModule::new_bound(parent_module.py(), "import_only_subgrid")?; - m.setattr( - pyo3::intern!(m.py(), "__doc__"), - "ImportOnlySubgrid* interface.", - )?; - pyo3::py_run!( - parent_module.py(), - m, - "import sys; sys.modules['pineappl.import_only_subgrid'] = m" - ); - m.add_class::()?; - m.add_class::()?; - parent_module.add_submodule(&m) -} diff --git a/pineappl_py/src/import_subgrid.rs b/pineappl_py/src/import_subgrid.rs new file mode 100644 index 000000000..de6cb64ed --- /dev/null +++ b/pineappl_py/src/import_subgrid.rs @@ -0,0 +1,78 @@ +//! PyPackedSubgrid* interface. + +use super::subgrid::PySubgridEnum; +use ndarray::Dimension; +use numpy::PyReadonlyArrayDyn; +use pineappl::import_subgrid::ImportSubgridV1; +use pineappl::packed_array::PackedArray; +use pyo3::prelude::*; + +/// PyO3 wrapper to :rustdoc:`pineappl::import_subgrid::ImportSubgridV1 `. +#[pyclass(name = "ImportSubgridV1")] +#[derive(Clone)] +#[repr(transparent)] +pub struct PyImportSubgridV1 { + pub(crate) import_subgrid: ImportSubgridV1, +} + +#[pymethods] +impl PyImportSubgridV1 { + /// Constructor. + /// + /// # Panics + /// TODO + /// + /// Parameters + /// ---------- + /// array : numpy.ndarray(float) + /// `N`-dimensional array with all weights + /// node_values: list(list(float)) + /// list containing the arrays of energy scales {q1, ..., qn} and momentum fractions + /// {x1, ..., xn}. + #[new] + #[must_use] + pub fn new(array: PyReadonlyArrayDyn, node_values: Vec>) -> Self { + let mut sparse_array: PackedArray = + PackedArray::new(node_values.iter().map(Vec::len).collect()); + + for (index, value) in array + .as_array() + .indexed_iter() + .filter(|(_, value)| **value != 0.0) + { + sparse_array[index.as_array_view().to_slice().unwrap()] = *value; + } + + Self { + import_subgrid: ImportSubgridV1::new(sparse_array, node_values), + } + } + + /// Ensures that the subgrid has type `PySubgridEnum`. + #[must_use] + pub fn into(&self) -> PySubgridEnum { + PySubgridEnum { + subgrid_enum: self.import_subgrid.clone().into(), + } + } +} + +/// Register submodule in parent. +/// +/// # Errors +/// +/// Raises an error if (sub)module is not found. +pub fn register(parent_module: &Bound<'_, PyModule>) -> PyResult<()> { + let m = PyModule::new_bound(parent_module.py(), "import_subgrid")?; + m.setattr( + pyo3::intern!(m.py(), "__doc__"), + "Interface for packed subgrid specs.", + )?; + pyo3::py_run!( + parent_module.py(), + m, + "import sys; sys.modules['pineappl.import_subgrid'] = m" + ); + m.add_class::()?; + parent_module.add_submodule(&m) +} diff --git a/pineappl_py/src/interpolation.rs b/pineappl_py/src/interpolation.rs new file mode 100644 index 000000000..fb4e9060f --- /dev/null +++ b/pineappl_py/src/interpolation.rs @@ -0,0 +1,141 @@ +//! Interpolation interface. + +use pineappl::interpolation::{Interp, InterpMeth, Map, ReweightMeth}; +use pyo3::prelude::*; + +/// PyO3 wrapper to :rustdoc:`pineappl::interpolation::Interp `. +#[pyclass(name = "Interp")] +#[repr(transparent)] +pub struct PyInterp { + pub(crate) interp: Interp, +} + +impl PyInterp { + pub(crate) const fn new(interp: Interp) -> Self { + Self { interp } + } +} + +/// PyO3 wrapper to :rustdoc:`pineappl::interpolation::ReweightMeth `. +#[pyclass(eq, eq_int, name = "ReweightingMethod")] +#[derive(Clone, PartialEq, Eq)] +pub enum PyReweightingMethod { + /// map to ReweightMeth::NoReweight + NoReweight, + /// map to ReweightMeth::ApplGridX + ApplGridX, +} + +impl From for ReweightMeth { + fn from(value: PyReweightingMethod) -> Self { + match value { + PyReweightingMethod::NoReweight => Self::NoReweight, + PyReweightingMethod::ApplGridX => Self::ApplGridX, + } + } +} + +/// PyO3 wrapper to :rustdoc:`pineappl::interpolation::Map `. +#[pyclass(eq, eq_int, name = "MappingMethod")] +#[derive(Clone, PartialEq, Eq)] +pub enum PyMappingMethod { + /// map to Map::ApplGridF2 + ApplGridF2, + /// map to Map::ApplGridH0 + ApplGridH0, +} + +impl From for Map { + fn from(value: PyMappingMethod) -> Self { + match value { + PyMappingMethod::ApplGridF2 => Self::ApplGridF2, + PyMappingMethod::ApplGridH0 => Self::ApplGridH0, + } + } +} + +/// PyO3 wrapper to :rustdoc:`pineappl::interpolation::InterpMeth `. +#[pyclass(eq, eq_int, name = "InterpolationMethod")] +#[derive(Clone, PartialEq, Eq)] +pub enum PyInterpolationMethod { + /// map to InterpMeth::Lagrange + Lagrange, +} + +impl From for InterpMeth { + fn from(value: PyInterpolationMethod) -> Self { + match value { + PyInterpolationMethod::Lagrange => Self::Lagrange, + } + } +} + +#[pymethods] +impl PyInterp { + /// Constructor. + /// + /// Parameteters + /// ------------ + /// min : float + /// minimum value of the node + /// max : float + /// maximum value of the node + /// nodes : int + /// number of nodes + /// order : int + /// order of the interpolation + /// reweght_meth : Optional[PyReweightingMethod] + /// re-weighting method to be used + /// map : Optional[PyMappingMethod] + /// the type of mapping to be used + /// interpolation_meth : Optional[PyInterpolationMethod] + /// the type of interpolation to be used + #[new] + #[must_use] + #[pyo3(signature = (min, max, nodes = None, order = None, reweight_meth = None, map = None, interpolation_meth = None))] + pub fn new_interp( + min: f64, + max: f64, + nodes: Option, + order: Option, + reweight_meth: Option, + map: Option, + interpolation_meth: Option, + ) -> Self { + let default_nodes: usize = 50; + let default_order: usize = 3; + let reweight = reweight_meth.unwrap_or(PyReweightingMethod::NoReweight); + let mapping = map.unwrap_or(PyMappingMethod::ApplGridF2); + let interp_method = interpolation_meth.unwrap_or(PyInterpolationMethod::Lagrange); + + Self::new(Interp::new( + min, + max, + nodes.unwrap_or(default_nodes), + order.unwrap_or(default_order), + reweight.into(), + mapping.into(), + interp_method.into(), + )) + } +} + +/// Register submodule in parent. +/// +/// # Errors +/// +/// Raises an error if (sub)module is not found. +pub fn register(parent_module: &Bound<'_, PyModule>) -> PyResult<()> { + let m = PyModule::new_bound(parent_module.py(), "interpolation")?; + m.setattr(pyo3::intern!(m.py(), "__doc__"), "Interpolation submodule.")?; + pyo3::py_run!( + parent_module.py(), + m, + "import sys; sys.modules['pineappl.interpolation'] = m" + ); + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + parent_module.add_submodule(&m) +} diff --git a/pineappl_py/src/lib.rs b/pineappl_py/src/lib.rs index 5c0c5e1d2..fac23ad4e 100644 --- a/pineappl_py/src/lib.rs +++ b/pineappl_py/src/lib.rs @@ -6,10 +6,12 @@ use pyo3::prelude::*; pub mod bin; pub mod boc; +pub mod convolutions; pub mod evolution; pub mod fk_table; pub mod grid; -pub mod import_only_subgrid; +pub mod import_subgrid; +pub mod interpolation; pub mod pids; pub mod subgrid; @@ -18,10 +20,12 @@ pub mod subgrid; fn pineappl(m: &Bound<'_, PyModule>) -> PyResult<()> { bin::register(m)?; boc::register(m)?; - grid::register(m)?; - import_only_subgrid::register(m)?; + convolutions::register(m)?; evolution::register(m)?; fk_table::register(m)?; + grid::register(m)?; + interpolation::register(m)?; + import_subgrid::register(m)?; pids::register(m)?; subgrid::register(m)?; m.add("version", env!("CARGO_PKG_VERSION"))?; diff --git a/pineappl_py/src/pids.rs b/pineappl_py/src/pids.rs index 81b3b67e4..85609d166 100644 --- a/pineappl_py/src/pids.rs +++ b/pineappl_py/src/pids.rs @@ -4,8 +4,8 @@ use pineappl::pids::PidBasis; use pyo3::prelude::*; /// PyO3 wrapper to :rustdoc:`pineappl::pids::PidBasis `. -#[pyclass(name = "PidBasis")] -#[derive(Clone)] +#[pyclass(eq, eq_int, name = "PidBasis")] +#[derive(Clone, PartialEq)] pub enum PyPidBasis { /// PDG Monte Carlo IDs. Pdg, @@ -23,6 +23,10 @@ impl From for PidBasis { } /// Register submodule in parent. +/// +/// # Errors +/// +/// Raises an error if (sub)module is not found. pub fn register(parent_module: &Bound<'_, PyModule>) -> PyResult<()> { let m = PyModule::new_bound(parent_module.py(), "pids")?; m.setattr(pyo3::intern!(m.py(), "__doc__"), "PIDs interface.")?; diff --git a/pineappl_py/src/subgrid.rs b/pineappl_py/src/subgrid.rs index 5eb89a78a..a6b3f7d9c 100644 --- a/pineappl_py/src/subgrid.rs +++ b/pineappl_py/src/subgrid.rs @@ -1,167 +1,10 @@ //! Subgrid interface. -use ndarray::Array3; -use numpy::{IntoPyArray, PyArray1, PyArray3}; -use pineappl::subgrid::Mu2; -use pineappl::subgrid::{Subgrid, SubgridEnum, SubgridParams}; +use ndarray::ArrayD; +use numpy::{IntoPyArray, PyArrayDyn}; +use pineappl::subgrid::{Subgrid, SubgridEnum}; use pyo3::prelude::*; -/// PyO3 wrapper to :rustdoc:`pineappl::subgrid::SubgridParams ` -#[pyclass(name = "SubgridParams")] -#[derive(Clone)] -#[repr(transparent)] -pub struct PySubgridParams { - pub(crate) subgrid_params: SubgridParams, -} - -#[pymethods] -impl PySubgridParams { - /// Constructor using the defaults. - #[new] - pub fn default() -> Self { - Self { - subgrid_params: SubgridParams::default(), - } - } - - /// Set number of :math:`Q^2` bins. - /// - /// Parameters - /// ---------- - /// q2_bins : int - /// number of bins - pub fn set_q2_bins(&mut self, q2_bins: usize) { - self.subgrid_params.set_q2_bins(q2_bins); - } - - /// Set the upper limit for :math:`Q^2`. - /// - /// Parameters - /// ---------- - /// q2_max: float - /// new `q2_max` - pub fn set_q2_max(&mut self, q2_max: f64) { - self.subgrid_params.set_q2_max(q2_max); - } - - /// Set the lower limit for :math:`Q^2`. - /// - /// Parameters - /// ---------- - /// q2_min: float - /// new `q2_min` - pub fn set_q2_min(&mut self, q2_min: f64) { - self.subgrid_params.set_q2_min(q2_min); - } - - /// Set interpolation order for :math:`Q^2_{grid}`. - /// - /// Parameters - /// ---------- - /// q2_order : float - /// new `q2_order` - pub fn set_q2_order(&mut self, q2_order: usize) { - self.subgrid_params.set_q2_order(q2_order); - } - - /// Set reweighting. - /// - /// Parameters - /// ---------- - /// reweight : bool - /// apply reweighting? - pub fn set_reweight(&mut self, reweight: bool) { - self.subgrid_params.set_reweight(reweight); - } - - /// Set number of x bins. - /// - /// Parameters - /// ---------- - /// x_bins : int - /// number of bins - pub fn set_x_bins(&mut self, x_bins: usize) { - self.subgrid_params.set_x_bins(x_bins); - } - - /// Set :math:`x_{max}`. - /// - /// Parameters - /// ---------- - /// x_max : float - /// new `x_max` - pub fn set_x_max(&mut self, x_max: f64) { - self.subgrid_params.set_x_max(x_max); - } - - /// Set :math:`x_{min}`. - /// - /// Parameters - /// ---------- - /// x_min : float - /// new `x_min` - pub fn set_x_min(&mut self, x_min: f64) { - self.subgrid_params.set_x_min(x_min); - } - - /// Set interpolation order for :math:`x_{grid}`. - /// - /// Parameters - /// ---------- - /// x_order : float - /// new `x_order` - pub fn set_x_order(&mut self, x_order: usize) { - self.subgrid_params.set_x_order(x_order); - } -} - -/// PyO3 wrapper to :rustdoc:`pineappl::subgrid::Mu2 ` -#[pyclass(name = "Mu2")] -#[repr(transparent)] -pub struct PyMu2 { - pub(crate) mu2: Mu2, -} - -#[pymethods] -impl PyMu2 { - /// Constructor. - /// - /// Parameters - /// ---------- - /// ren : float - /// renormalization scale - /// fac : float - /// factorization scale - #[new] - pub fn new(ren: f64, fac: f64) -> Self { - Self { - mu2: Mu2 { ren, fac }, - } - } - - #[getter] - fn ren(&self) -> PyResult { - Ok(self.mu2.ren) - } - - #[setter] - fn set_ren(&mut self, value: f64) -> PyResult<()> { - self.mu2.ren = value; - Ok(()) - } - - #[getter] - fn fac(&self) -> PyResult { - Ok(self.mu2.fac) - } - - #[setter] - fn set_fac(&mut self, value: f64) -> PyResult<()> { - self.mu2.fac = value; - Ok(()) - } -} - /// PyO3 wrapper to :rustdoc:`pineappl::subgrid::SubgridEnum ` #[pyclass(name = "SubgridEnum")] #[derive(Clone)] @@ -182,38 +25,45 @@ impl PySubgridEnum { self.subgrid_enum.scale(factor); } - /// Return the dense array of the subgrid. - pub fn to_array3<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray3> { - Array3::from(&self.subgrid_enum).into_pyarray_bound(py) - } - - /// Clone. - pub fn into(&self) -> Self { - self.clone() + /// Get the values of nodes used for the subgrids + #[getter] + pub fn node_values(&mut self) -> Vec> { + self.subgrid_enum.node_values() } - /// Return the array of mu2 objects. - pub fn mu2_grid(&self) -> Vec { - self.subgrid_enum - .mu2_grid() - .iter() - .cloned() - .map(|mu2| PyMu2 { mu2 }) - .collect() + /// Get the shape of the subgrids + #[getter] + pub fn shape(&mut self) -> Vec { + self.subgrid_enum.shape().to_vec() } - /// Return the array of x1. - pub fn x1_grid<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1> { - PyArray1::from_slice_bound(py, &self.subgrid_enum.x1_grid()) + /// Return the dense array of the subgrid. + #[must_use] + pub fn to_array<'py>( + &mut self, + py: Python<'py>, + shape: Vec, + ) -> Bound<'py, PyArrayDyn> { + let mut array_subgrid = ArrayD::::zeros(shape); + + for (index, value) in self.subgrid_enum.indexed_iter() { + array_subgrid[index.as_slice()] = value; + } + array_subgrid.into_pyarray_bound(py) } - /// Return the array of x2. - pub fn x2_grid<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1> { - PyArray1::from_slice_bound(py, &self.subgrid_enum.x2_grid()) + /// Clone. + #[must_use] + pub fn into(&self) -> Self { + self.clone() } } /// Register submodule in parent. +/// +/// # Errors +/// +/// Raises Errors if (sub-)module is not found. pub fn register(parent_module: &Bound<'_, PyModule>) -> PyResult<()> { let m = PyModule::new_bound(parent_module.py(), "subgrid")?; m.setattr(pyo3::intern!(m.py(), "__doc__"), "Subgrid interface.")?; @@ -223,7 +73,5 @@ pub fn register(parent_module: &Bound<'_, PyModule>) -> PyResult<()> { "import sys; sys.modules['pineappl.subgrid'] = m" ); m.add_class::()?; - m.add_class::()?; - m.add_class::()?; parent_module.add_submodule(&m) } diff --git a/pineappl_py/tests/conftest.py b/pineappl_py/tests/conftest.py index 12887d41b..1d861840c 100644 --- a/pineappl_py/tests/conftest.py +++ b/pineappl_py/tests/conftest.py @@ -1,9 +1,22 @@ +import numpy as np import pytest +import subprocess +from typing import List + +from pineappl.boc import Channel, Kinematics, ScaleFuncForm, Scales, Order +from pineappl.convolutions import Conv +from pineappl.grid import Grid +from pineappl.interpolation import ( + Interp, + InterpolationMethod, + MappingMethod, + ReweightingMethod, +) +from pineappl.pids import PidBasis class PDF: - def xfxQ(self, pid, x, q): - return self.xfxQ2(pid, x, q**2) + """PDF class whose attributes are some toy PDF functions.""" def xfxQ2(self, pid, x, q2): if pid in range(-6, 6): @@ -11,10 +24,144 @@ def xfxQ2(self, pid, x, q2): else: return 0.0 + def xfxQ(self, pid, x, q): + return self.xfxQ2(pid, x, q**2) + def alphasQ(self, q): return 1.0 + # Define the Toy Polarized PDF set + def polarized_pdf(self, pid, x, q2): + return 2.0 + + # Define the Toy Unpolarized PDF set + def unpolarized_pdf(self, pid, x, q2): + return 1.0 + + +class FakeGrid: + """Class that mocks a PineAPPL grid. This should contain functions + that return all the possible number of convolutions. + + TODO: Expose the index that defines the `ScaleFuncForm`. + """ + + def grid_with_generic_convolution( + self, + nb_convolutions: int, + channels: List[Channel], + orders: List[Order], + convolutions: List[Conv], + bins: List[float] = [1e-7, 1e-3, 1], + q2min: float = 1e2, + q2max: float = 1e8, + q2nodes: int = 40, + xmin: float = 2e-7, + xmax: float = 1, + xnodes: int = 50, + ) -> Grid: + """A function to generate fake GRIDs that can take any number of convolutions. + Note that the `nb_convolutions` can be different from the number of convolution + types passed to `convolutions`. Indeed, if all the types of convolutions are + the same, then only one single element can be passed to `convolutions`. + """ + kinematics = [ + Kinematics.Scale(0), # Scale + Kinematics.X(0), # momentum fraction x + ] + # Define the interpolation specs for each item of the Kinematics + interpolations = [ + Interp( + min=q2min, + max=q2max, + nodes=q2nodes, + order=3, + reweight_meth=ReweightingMethod.NoReweight, + map=MappingMethod.ApplGridH0, + interpolation_meth=InterpolationMethod.Lagrange, + ), # Interpolation on the Scale + Interp( + min=xmin, + max=xmax, + nodes=xnodes, + order=3, + reweight_meth=ReweightingMethod.ApplGridX, + map=MappingMethod.ApplGridF2, + interpolation_meth=InterpolationMethod.Lagrange, + ), # Interpolation on momentum fraction x + ] + + # Extend the Kinematics and Interpolations + if nb_convolutions > 1: + for i in range(1, nb_convolutions): + kinematics.append(Kinematics.X(i)) + interpolations.append( + Interp( + min=xmin, + max=xmax, + nodes=xnodes, + order=3, + reweight_meth=ReweightingMethod.ApplGridX, + map=MappingMethod.ApplGridF2, + interpolation_meth=InterpolationMethod.Lagrange, + ) + ) + + # Construct the `Scales` object + fragmentation_scale = ( + ScaleFuncForm.Scale(0) if nb_convolutions >= 3 else ScaleFuncForm.NoScale(0) + ) + scale_funcs = Scales( + ren=ScaleFuncForm.Scale(0), + fac=ScaleFuncForm.Scale(0), + frg=fragmentation_scale, + ) + + return Grid( + pid_basis=PidBasis.Evol, + channels=channels, + orders=orders, + bin_limits=np.array(bins), + convolutions=convolutions, + interpolations=interpolations, + kinematics=kinematics, + scale_funcs=scale_funcs, + ) + @pytest.fixture def pdf(): return PDF() + + +@pytest.fixture +def fake_grids(): + return FakeGrid() + + +@pytest.fixture +def download_objects(tmp_path_factory): + def _download_fk(objname: str) -> None: + download_dir = tmp_path_factory.mktemp("data") + file_path = download_dir / f"{objname}" + args = [ + "wget", + "--no-verbose", + "--no-clobber", + "-P", + f"{download_dir}", + f"https://data.nnpdf.science/pineappl/test-data/{objname}", + ] + + try: + _ = subprocess.run( + args, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + return file_path + except OSError as error: + msg = f"Failed to execute the command {args}." + raise EnvironmentError(msg) from error + + return _download_fk diff --git a/pineappl_py/tests/test_bin.py b/pineappl_py/tests/test_bin.py index 66044c782..1bf06a8ea 100644 --- a/pineappl_py/tests/test_bin.py +++ b/pineappl_py/tests/test_bin.py @@ -1,13 +1,50 @@ import numpy as np -import pineappl import pytest +from pineappl.boc import Channel, Order +from pineappl.bin import BinRemapper +from pineappl.convolutions import Conv, ConvType + class TestBinRemapper: def test_init(self): - br = pineappl.bin.BinRemapper(np.array([1.0]), [(2, 3)]) + br = BinRemapper(np.array([1.0]), [(2, 3)]) - assert isinstance(br, pineappl.bin.BinRemapper) + assert isinstance(br, BinRemapper) with pytest.raises(AttributeError): br._bla() + + def test_binremapper(self, fake_grids): + h = ConvType(polarized=True, time_like=False) + h_conv = Conv(conv_type=h, pid=2212) + convolutions = [h_conv] + + down_channel = [([1], 1.0)] # DIS-case + up_channel = [([2], 1.0)] # DIS-case + channels = [Channel(down_channel), Channel(up_channel)] + + orders = [Order(3, 0, 0, 0, 0)] + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=1, + channels=channels, + orders=orders, + convolutions=convolutions, + bins=np.linspace(1e-2, 1, num=20), + ) + + # Extract the left & right bin limits and redefine the normalization + bin_dims = g.bin_dimensions() + bin_limits = [ + (left, right) + for left, right in zip(g.bin_left(bin_dims - 1), g.bin_right(bin_dims - 1)) + ] + normalizations = [10.0 for _ in g.bin_normalizations()] + + remapper = BinRemapper(np.array(normalizations), bin_limits) + # Modify the bin normalization + g.set_remapper(remapper) + new_normalizations = g.bin_normalizations() + + # Check that the bin normalizations have been updated + np.testing.assert_allclose(new_normalizations, normalizations) diff --git a/pineappl_py/tests/test_boc.py b/pineappl_py/tests/test_boc.py index 9b00171d5..5a1991cb6 100644 --- a/pineappl_py/tests/test_boc.py +++ b/pineappl_py/tests/test_boc.py @@ -1,7 +1,72 @@ -import pineappl +import numpy as np +import pytest +from pineappl.boc import Channel, Kinematics, Order, ScaleFuncForm class TestChannel: def test_init(self): - le = pineappl.boc.Channel([(2, 2, 0.5)]) - assert isinstance(le, pineappl.boc.Channel) + le = Channel([([2, 2], 0.5)]) + assert isinstance(le, Channel) + assert le.into_array() == [([2, 2], 0.5)] + + +class TestKinematics: + @pytest.mark.parametrize( + "kintype, argument", + [ + ("Scale", 0), + ("Scale", 1), + ("Scale", 2), + ("X", 0), + ("X", 1), + ("X", 2), + ], + ) + def test_init(self, kintype: str, argument: int): + kin_method = getattr(Kinematics, kintype) + result = kin_method(argument) + assert isinstance(result, Kinematics) + + +class TestScaleFuncForm: + @pytest.mark.parametrize( + "scaletype, argument", + [ + ("NoScale", [0]), + ("Scale", [0]), + ("QuadraticSum", [0, 1]), + ("QuadraticMean", [0, 1]), + ("QuadraticSumOver4", [0, 1]), + ("LinearMean", [0, 1]), + ("LinearSum", [0, 1]), + ("ScaleMax", [0, 1]), + ("ScaleMin", [0, 1]), + ("Prod", [0, 1]), + ("S2plusS1half", [0, 1]), + ("Pow4Sum", [0, 1]), + ("WgtAvg", [0, 1]), + ("S2plusS1fourth", [0, 1]), + ("ExpProd2", [0, 1]), + ], + ) + def test_init(self, scaletype: ScaleFuncForm, argument: list): + scale_method = getattr(ScaleFuncForm, scaletype) + result = scale_method(*argument) + assert isinstance(result, ScaleFuncForm) + + +class TestOrder: + def create_order(self, args: tuple = (2, 1, 0, 1, 0)) -> Order: + return Order(*args) + + def test_init(self): + args = (2, 1, 0, 1, 0) + o = self.create_order(args=args) + + assert isinstance(o, Order) + assert o.as_tuple() == args + + def test_mask(self): + o = self.create_order() + mask = o.create_mask(orders=[o], max_as=2, max_al=1, logs=True) + assert np.all(mask) diff --git a/pineappl_py/tests/test_convolutions.py b/pineappl_py/tests/test_convolutions.py new file mode 100644 index 000000000..6f1e2cd4a --- /dev/null +++ b/pineappl_py/tests/test_convolutions.py @@ -0,0 +1,26 @@ +import pytest +from pineappl.convolutions import Conv, ConvType + + +class TestConvolutions: + """Test that the getter methods are returning the exptected values. + For more realistic tests, see `test_grid`. + """ + + @pytest.mark.parametrize( + "polarized, time_like", + [ + (False, False), + (True, True), + (True, False), + (False, True), + ], + ) + def test_init(self, polarized: bool, time_like: bool): + conv_type = ConvType(polarized=polarized, time_like=time_like) + convolutions = Conv(conv_type=conv_type, pid=2212) + + assert conv_type.polarized == polarized + assert conv_type.time_like == time_like + assert convolutions.conv_type.polarized == polarized + assert convolutions.conv_type.time_like == time_like diff --git a/pineappl_py/tests/test_evolution.py b/pineappl_py/tests/test_evolution.py new file mode 100644 index 000000000..d5ab7e664 --- /dev/null +++ b/pineappl_py/tests/test_evolution.py @@ -0,0 +1,38 @@ +"""Test module for the interface of the `evolution`. + +It checks the cases in which we have evolve with one, +two, and three (general) EKOs. +""" + +import numpy as np +from pineappl.convolutions import ConvType +from pineappl.evolution import EvolveInfo, OperatorSliceInfo +from pineappl.pids import PidBasis + + +class TestEvolution: + def test_evolveinfo(self): + evinfo = EvolveInfo( + fac1=[0.5, 1.0, 2.0], + pids1=[-2, 0, 2], + x1=[1e-3, 0.5, 1], + ren1=[0.5, 1.0, 2.0], + ) + np.testing.assert_array_equal(evinfo.fac1, [0.5, 1.0, 2.0]) + np.testing.assert_array_equal(evinfo.pids1, [-2, 0, 2]) + np.testing.assert_array_equal(evinfo.x1, [1e-3, 0.5, 1.0]) + np.testing.assert_array_equal(evinfo.fac1, [0.5, 1.0, 2.0]) + + def test_init_operatorsliceinfo(self): + info = OperatorSliceInfo( + fac0=1.0, + pids0=[], + x0=[], + fac1=1.0, + pids1=[], + x1=[], + pid_basis=PidBasis.Pdg, + conv_type=ConvType(polarized=False, time_like=False), + ) + + assert isinstance(info, OperatorSliceInfo) diff --git a/pineappl_py/tests/test_fk_table.py b/pineappl_py/tests/test_fk_table.py index 5adccf11c..8a7b7cdb9 100644 --- a/pineappl_py/tests/test_fk_table.py +++ b/pineappl_py/tests/test_fk_table.py @@ -1,84 +1,174 @@ +"""Test module for the interface of the `fk_table`. + +It checks the cases in which we have one, two, and +three (general) convolutions. +""" + import numpy as np +import tempfile -import pineappl +from pineappl.boc import Channel, Order +from pineappl.convolutions import Conv, ConvType +from pineappl.fk_table import FkAssumptions, FkTable +from pineappl.import_subgrid import ImportSubgridV1 class TestFkTable: - def fake_grid(self, bins=None): - channels = [pineappl.boc.Channel([(1, 21, 1.0)])] - orders = [pineappl.grid.Order(0, 0, 0, 0)] - bin_limits = np.array([1e-7, 1e-3, 1] if bins is None else bins, dtype=float) - subgrid_params = pineappl.subgrid.SubgridParams() - g = pineappl.grid.Grid(channels, orders, bin_limits, subgrid_params) - return g - - def test_convolve_with_one(self): - g = self.fake_grid() + def test_convolve(self, fake_grids): + # Define convolution types and the initial state hadrons + # We consider an initial state Polarized Proton + h = ConvType(polarized=True, time_like=False) + h_conv = Conv(conv_type=h, pid=2212) + # The length of the convolutions has to match the nb of hadrons + convolutions = [h_conv] + # We define the PIDs of the partons out of the Proton + down_channel = [([1], 1.0)] # DIS-case + up_channel = [([2], 1.0)] # DIS-case + channels = [Channel(down_channel), Channel(up_channel)] + # Now we define the perturbative orders + orders = [Order(0, 0, 0, 0, 0)] + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=1, + channels=channels, + orders=orders, + convolutions=convolutions, + ) # DIS grid xs = np.linspace(0.5, 1.0, 5) vs = xs.copy() - subgrid = pineappl.import_only_subgrid.ImportOnlySubgridV1( - vs[np.newaxis, :, np.newaxis], - np.array([90.0]), - xs, - np.array([1.0]), + q2_values = np.array([90.0]) + subgrid = ImportSubgridV1( + array=vs[np.newaxis, :], # DIS shape: (len(q2), len(x_grid)) + node_values=[q2_values, xs], ) g.set_subgrid(0, 0, 0, subgrid.into()) - fk = pineappl.fk_table.FkTable(g) + + # Convert the Grid -> FkTable + fk = FkTable(g) + + # Test a simple convolution of the FK table np.testing.assert_allclose( - fk.convolve_with_one(2212, lambda pid, x, q2: 0.0), + fk.convolve( + pdg_convs=[h_conv], + xfxs=[lambda pid, x, q2: 0.0], + ), [0.0] * 2, ) np.testing.assert_allclose( - fk.convolve_with_one(2212, lambda pid, x, q2: 1), + fk.convolve( + pdg_convs=[h_conv], + xfxs=[lambda pid, x, q2: 1.0], + ), [5e7 / 9999, 0.0], ) - info = pineappl.evolution.OperatorSliceInfo( - 1.0, [], [], 1.0, [], [], pineappl.pids.PidBasis.Pdg + # Test writing/dumping the FK table into disk + with tempfile.TemporaryDirectory() as tmpdir: + fk.write(f"{tmpdir}/toy_fktable.pineappl") + fk.write_lz4(f"{tmpdir}/toy_fktable.pineappl.lz4") + + def test_fktable( + self, + download_objects, + fkname: str = "FKTABLE_CMSTTBARTOT8TEV-TOPDIFF8TEVTOT.pineappl.lz4", + ): + fk_table = download_objects(f"{fkname}") + fk = FkTable.read(fk_table) + + assert fk.table().shape == (1, 51, 34, 34) + np.testing.assert_allclose(fk.muf2(), 2.7224999999999997) + + # Check the various aspects of the Bins + assert fk.bins() == 1 + np.testing.assert_allclose(fk.bin_normalizations(), [1.0]) + np.testing.assert_allclose(fk.bin_right(dimension=0), [1.0]) + np.testing.assert_allclose(fk.bin_left(dimension=0), [0.0]) + + # Check the various aspects of the Channels + channels = fk.channels() + assert len(channels) == 51 + assert [21, 200] in channels + + # Check the contents of the x-grid + x_grid = fk.x_grid() + assert x_grid.size == 34 + np.testing.assert_allclose(x_grid[0], 1.57456056e-04) + + # Test FK optimization + assumption = FkAssumptions("Nf6Sym") + fk.optimize(assumption) + + def test_unpolarized_convolution( + self, + pdf, + download_objects, + fkname: str = "FKTABLE_CMSTTBARTOT8TEV-TOPDIFF8TEVTOT.pineappl.lz4", + ): + """Check the convolution of an actual FK table that involves two + symmetrical unpolarized protons: + """ + expected_results = [3.72524538e04] + fk_table = download_objects(f"{fkname}") + fk = FkTable.read(fk_table) + + # Convolution object of the 1st hadron - Polarized + h = ConvType(polarized=False, time_like=False) + h_conv = Conv(conv_type=h, pid=2212) + + np.testing.assert_allclose( + fk.convolve( + pdg_convs=[h_conv, h_conv], + xfxs=[pdf.unpolarized_pdf, pdf.unpolarized_pdf], + ), + expected_results, ) - # TODO: write a better test - try: - g.evolve_with_slice_iter( - iter( - [ - (info, np.ndarray([0, 0, 0, 0])), - (info, np.ndarray([0, 0, 0, 0])), - ] - ), - np.array([], dtype=bool), - (1.0, 1.0), - [], - [], - ) - - assert False - except: # noqa: E722 - assert True - - # TODO: write a better test - try: - g.evolve_with_slice_iter2( - iter( - [ - (info, np.ndarray([0, 0, 0, 0])), - (info, np.ndarray([0, 0, 0, 0])), - ] - ), - iter( - [ - (info, np.ndarray([0, 0, 0, 0])), - (info, np.ndarray([0, 0, 0, 0])), - ] - ), - np.array([], dtype=bool), - (1.0, 1.0), - [], - [], - ) - - assert False - except: # noqa: E722 - assert True + def test_polarized_convolution( + self, + pdf, + download_objects, + fkname: str = "FKTABLE_STAR_WMWP_510GEV_WM-AL-POL.pineappl.lz4", + ): + """Check the convolution of an actual FK table that involves two + different initial states: + - 1st hadron: polarized proton + - 2nd hadron: unpolarized proton + """ + expected_results = [ + -1.00885071e6, + -2.40862657e5, + -1.66407218e5, + -2.96098362e5, + -5.67594297e5, + +6.59245015e4, + ] + fk_table = download_objects(f"{fkname}") + fk = FkTable.read(fk_table) + + # Check the FK table convolutions + convolutions = fk.convolutions + assert len(convolutions) == 2 + assert convolutions[0].conv_type.polarized + assert not convolutions[0].conv_type.time_like + assert not convolutions[1].conv_type.polarized + assert not convolutions[1].conv_type.time_like + # Check that the initial states are protons + assert convolutions[0].pid == 2212 + assert convolutions[1].pid == 2212 + + # Convolution object of the 1st hadron - Polarized + h1 = ConvType(polarized=True, time_like=False) + h1_conv = Conv(conv_type=h1, pid=2212) + + # Convolution object of the 2nd hadron - Unpolarized + h2 = ConvType(polarized=False, time_like=False) + h2_conv = Conv(conv_type=h2, pid=2212) + + np.testing.assert_allclose( + fk.convolve( + pdg_convs=[h1_conv, h2_conv], + xfxs=[pdf.polarized_pdf, pdf.unpolarized_pdf], + ), + expected_results, + ) diff --git a/pineappl_py/tests/test_grid.py b/pineappl_py/tests/test_grid.py index fbc4d17dd..32d84fdbf 100644 --- a/pineappl_py/tests/test_grid.py +++ b/pineappl_py/tests/test_grid.py @@ -1,77 +1,211 @@ +import itertools import numpy as np import pytest +import tempfile -import pineappl +from numpy.random import Generator, PCG64 +from typing import List +from pineappl.bin import BinRemapper +from pineappl.boc import Channel, Kinematics, Scales, Order +from pineappl.convolutions import Conv, ConvType +from pineappl.evolution import OperatorSliceInfo +from pineappl.fk_table import FkTable +from pineappl.grid import Grid +from pineappl.import_subgrid import ImportSubgridV1 +from pineappl.pids import PidBasis -class TestOrder: - def test_init(self): - args = (2, 1, 0, 1) - o = pineappl.grid.Order(*args) +# Construct the type of convolutions and the convolution object +# We assume unpolarized protons in the initial state +TYPECONV = ConvType(polarized=False, time_like=False) +CONVOBJECT = Conv(conv_type=TYPECONV, pid=2212) - assert isinstance(o, pineappl.grid.Order) - assert o.as_tuple() == args +# Construct the Channel and Order objetcs +UP_ANTIUP_CHANNEL = [([2, -2], 0.1)] +CHANNELS = [Channel(UP_ANTIUP_CHANNEL)] +ORDERS = [Order(3, 0, 0, 0, 0)] + +# Testing specs for Convolution checks. Each element of the list is +# a tuple with two elements where the first element is a dictionary +# whose keys are the arguments of the `convolve` function and the +# second element is the expected results. +REF_VALUE = 5e6 / 9999 +TESTING_SPECS = [ + ( + { + "pdg_convs": [CONVOBJECT, CONVOBJECT], + "xfxs": [lambda pid, x, q2: 0.0, lambda pid, x, q2: 0.0], + "alphas": lambda q2: 0.0, + }, + [0.0] * 2, + ), # fixed alphas(Q2) == 0.0 + ( + { + "pdg_convs": [CONVOBJECT, CONVOBJECT], + "xfxs": [lambda pid, x, q2: 1.0, lambda pid, x, q2: 1.0], + "alphas": lambda q2: 1.0, + }, + [REF_VALUE, 0.0], + ), # fixed alphas(Q2) == 1.0 + ( + { + "pdg_convs": [CONVOBJECT, CONVOBJECT], + "xfxs": [lambda pid, x, q2: 1.0, lambda pid, x, q2: 1.0], + "alphas": lambda q2: 2.0, + }, + [2**3 * REF_VALUE, 0.0], + ), # fixed alphas(Q2) == 2.0 + ( + { + "pdg_convs": [CONVOBJECT, CONVOBJECT], + "xfxs": [lambda pid, x, q2: 1.0, lambda pid, x, q2: 1.0], + "alphas": lambda q2: 1.0, + "bin_indices": [0], + }, + [REF_VALUE], + ), # block first Bin without argument + ( + { + "pdg_convs": [CONVOBJECT, CONVOBJECT], + "xfxs": [lambda pid, x, q2: 1.0, lambda pid, x, q2: 1.0], + "alphas": lambda q2: 1.0, + "bin_indices": [0], + "order_mask": [False], + }, + [0.0], + ), # block first Bin with order_mask + ( + { + "pdg_convs": [CONVOBJECT, CONVOBJECT], + "xfxs": [lambda pid, x, q2: 1.0, lambda pid, x, q2: 1.0], + "alphas": lambda q2: 1.0, + "bin_indices": [0], + "channel_mask": [False], + }, + [0.0], + ), # block first Bin with channel_mask + ( + { + "pdg_convs": [CONVOBJECT, CONVOBJECT], + "xfxs": [lambda pid, x, q2: 1.0, lambda pid, x, q2: 1.0], + "alphas": lambda q2: 1.0, + "bin_indices": [1], + }, + [0.0], + ), # second Bin is empty +] + +# Define the raw and target PIDS for testing the Evolution +EVOL_BASIS_PIDS = ( + 22, + 100, + 21, + 200, + 203, + 208, + 215, + 224, + 235, + 103, + 108, + 115, + 124, + 135, +) + +TARGET_PIDS = [22, -6, -5, -4, -3, -2, -1, 21, 1, 2, 3, 4, 5, 6] + +# Results from consecutively filling and convolving grids +FILL_CONV_RESUTLS = [ + 3.88554594e3, + 3.97251851e3, + 4.09227318e3, +] + +# Define some default kinematics +XGRID = np.geomspace(1e-5, 1, 20) +Q2GRID = np.geomspace(1e3, 1e5, 10) class TestGrid: - def fake_grid(self, bins=None): - channels = [pineappl.boc.Channel([(1, 21, 0.1)])] - orders = [pineappl.grid.Order(3, 0, 0, 0)] - bin_limits = np.array([1e-7, 1e-3, 1] if bins is None else bins, dtype=float) - subgrid_params = pineappl.subgrid.SubgridParams() - g = pineappl.grid.Grid(channels, orders, bin_limits, subgrid_params) - return g - - def test_init(self): - g = self.fake_grid() - assert isinstance(g, pineappl.grid.Grid) - # orders + def test_init(self, fake_grids): + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + ) + assert isinstance(g, Grid) assert len(g.orders()) == 1 - assert g.orders()[0].as_tuple() == (3, 0, 0, 0) + assert g.orders()[0].as_tuple() == (3, 0, 0, 0, 0) + + def test_channels(self, fake_grids): + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + ) + assert len(g.channels()) == 1 + assert g.channels()[0] == UP_ANTIUP_CHANNEL + + def test_write(self, fake_grids): + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + ) - def test_set_subgrid(self): - g = self.fake_grid() + # Test writing/dumping the FK table into disk + with tempfile.TemporaryDirectory() as tmpdir: + g.write(f"{tmpdir}/toy_grid.pineappl") + g.write_lz4(f"{tmpdir}/toy_grid.pineappl.lz4") + + def test_set_subgrid(self, fake_grids): + # Test a proper DIS-case + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=1, + channels=[Channel([([2], 0.1)])], + orders=ORDERS, + convolutions=[CONVOBJECT], + ) - # DIS grid xs = np.linspace(0.1, 1.0, 5) vs = np.random.rand(len(xs)) - subgrid = pineappl.import_only_subgrid.ImportOnlySubgridV1( - vs[np.newaxis, :, np.newaxis], - np.array([90.0]), - np.array(xs), - np.array([1.0]), + subgrid = ImportSubgridV1( + array=vs[np.newaxis, :], + node_values=[np.array([90.0]), xs], ) g.set_subgrid(0, 0, 0, subgrid.into()) - # let's mix it for fun with an hadronic one - x1s = np.linspace(0.1, 1, 2) - x2s = np.linspace(0.5, 1, 2) + xs = np.linspace(0.1, 1, 2) Q2s = np.linspace(10, 20, 2) - subgrid = pineappl.import_only_subgrid.ImportOnlySubgridV1( - np.random.rand(len(Q2s), len(x1s), len(x2s)), Q2s, x1s, x2s + subgrid = ImportSubgridV1( + array=np.random.rand(len(Q2s), len(xs)), + node_values=[Q2s, xs], ) g.set_subgrid(0, 1, 0, subgrid.into()) g.optimize() - def test_set_key_value(self): - g = self.fake_grid() - g.set_key_value("bla", "blub") - g.set_key_value('"', "'") - g.set_key_value("äöü", "ß\\") - - def test_bins(self): - g = self.fake_grid() + def test_bins(self, fake_grids): + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + ) # 1D normalizations = np.array([1.0, 1.0]) limits = [(1, 1), (2, 2)] - remapper = pineappl.bin.BinRemapper(normalizations, limits) + remapper = BinRemapper(normalizations, limits) g.set_remapper(remapper) assert g.bin_dimensions() == 1 np.testing.assert_allclose(g.bin_left(0), [1, 2]) np.testing.assert_allclose(g.bin_right(0), [1, 2]) # 2D limits = [(1, 2), (2, 3), (2, 4), (3, 5)] - remapper = pineappl.bin.BinRemapper(normalizations, limits) + remapper = BinRemapper(normalizations, limits) g.set_remapper(remapper) assert g.bin_dimensions() == 2 np.testing.assert_allclose(g.bin_left(0), [1, 2]) @@ -79,121 +213,582 @@ def test_bins(self): np.testing.assert_allclose(g.bin_left(1), [2, 3]) np.testing.assert_allclose(g.bin_right(1), [3, 5]) - def test_convolve_with_one(self): - g = self.fake_grid() + def test_rotate_pidbasis( + self, + pdf, + download_objects, + gridname: str = "GRID_STAR_WMWP_510GEV_WP-AL-POL.pineappl.lz4", + target_basis: PidBasis = PidBasis.Evol, + ): + grid = download_objects(f"{gridname}") + g = Grid.read(grid) + + conv_ref = g.convolve( + pdg_convs=g.convolutions, + xfxs=[pdf.polarized_pdf, pdf.unpolarized_pdf], + alphas=pdf.alphasQ, + ) + assert g.pid_basis == PidBasis.Pdg + + # Rotate the Grid into the PDG basis + g.rotate_pid_basis(target_basis) + assert g.pid_basis == target_basis + + conv_rot = g.convolve( + pdg_convs=g.convolutions, + xfxs=[pdf.polarized_pdf, pdf.unpolarized_pdf], + alphas=pdf.alphasQ, + ) + np.testing.assert_allclose(conv_ref, conv_rot) + + def test_delete_orders( + self, + download_objects, + gridname: str = "GRID_STAR_WMWP_510GEV_WP-AL-POL.pineappl.lz4", + order_indices: List[int] = [1], + ): + grid = download_objects(f"{gridname}") + g = Grid.read(grid) + orders = [o.as_tuple() for o in g.orders()] + g.delete_orders(order_indices) + for idx in order_indices: + assert orders[idx] not in g.orders() + + def test_delete_channels( + self, + download_objects, + gridname: str = "GRID_STAR_WMWP_510GEV_WP-AL-POL.pineappl.lz4", + channel_indices: List[int] = [1, 4, 5], + ): + grid = download_objects(f"{gridname}") + g = Grid.read(grid) + channels = g.channels() + g.delete_channels(channel_indices) + for idx in channel_indices: + assert channels[idx] not in g.channels() + + def test_split_channels( + self, + pdf, + download_objects, + gridname: str = "GRID_DYE906R_D_bin_1.pineappl.lz4", + ): + grid = download_objects(f"{gridname}") + g = Grid.read(grid) + assert len(g.channels()) == 15 + g.split_channels() + assert len(g.channels()) == 170 + + def test_grid( + self, + download_objects, + gridname: str = "GRID_STAR_WMWP_510GEV_WP-AL-POL.pineappl.lz4", + ): + grid = download_objects(f"{gridname}") + g = Grid.read(grid) + + # Get the types of convolutions for this grid + for conv in g.convolutions: + assert isinstance(conv, Conv) - # DIS grid + # Check that the scalings work, ie run without error + # TODO: implement method to check the actual values + g.scale(factor=10.0) + g.scale_by_bin(factors=[10.0, 20.0]) + g.delete_bins(bin_indices=[0, 1, 2]) + + def test_incosistent_convolutions( + self, + pdf, + download_objects, + gridname: str = "GRID_DYE906R_D_bin_1.pineappl.lz4", + ): + """Check that if the passed convolution types do not match the + information in the grid the fail with `PanicException`. + """ + grid = download_objects(f"{gridname}") + g = Grid.read(grid) + + # The following grid has UNPOLARIZED proton, ie should be + # `polarized=False`. + h = ConvType(polarized=True, time_like=False) + h_conv = Conv(conv_type=h, pid=2212) + + with pytest.raises(BaseException) as err_func: + g.convolve( + pdg_convs=[h_conv], # Requires ONE single convolutions + xfxs=[pdf.polarized_pdf], # Requires ONE single PDF + alphas=pdf.alphasQ, + ) + assert "called `Option::unwrap()` on a `None` value" == str(err_func.value) + + @pytest.mark.parametrize("params,expected", TESTING_SPECS) + def test_toy_convolution(self, fake_grids, params, expected): + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + ) + + # Fill the subgrid-part of the GRID object xs = np.linspace(0.5, 1.0, 5) vs = xs.copy() - subgrid = pineappl.import_only_subgrid.ImportOnlySubgridV1( - vs[np.newaxis, :, np.newaxis], - np.array([90.0]), - xs, - np.array([1.0]), + subgrid = ImportSubgridV1( + array=vs[np.newaxis, :, np.newaxis], + node_values=[np.array([90.0]), xs, np.array([1.0])], ) g.set_subgrid(0, 0, 0, subgrid.into()) + + # Check the convolutions of the GRID + np.testing.assert_allclose(g.convolve(**params), expected) + + def test_unpolarized_convolution( + self, + pdf, + download_objects, + gridname: str = "GRID_DYE906R_D_bin_1.pineappl.lz4", + ): + """Test convolution with an actual Grid. In the following example, + it is a Fixed-target DY grid involving two hadrons in the initial + state. + """ + expected_results = [ + +3.71019208e4, + +3.71019208e4, + +2.13727492e4, + -1.83941398e3, + +3.22728612e3, + +5.45646897e4, + ] # Numbers computed using `v0.8.6` + + grid = download_objects(f"{gridname}") + g = Grid.read(grid) + + # Convolution object of the Unpolarized proton. Given that the two + # initial state hadrons are both Unpolarized Proton, we can pass ONE + # single convolution type and ONE singe PDF set. + h = ConvType(polarized=False, time_like=False) + h_conv = Conv(conv_type=h, pid=2212) + np.testing.assert_allclose( - g.convolve_with_one(2212, lambda pid, x, q2: 0.0, lambda q2: 0.0), - [0.0] * 2, - ) - v = 5e6 / 9999 - np.testing.assert_allclose( - g.convolve_with_one(2212, lambda pid, x, q2: 1, lambda q2: 1.0), - [v, 0.0], - ) - np.testing.assert_allclose( - g.convolve_with_one( - 2212, lambda pid, x, q2: 1, lambda q2: 1.0, bin_indices=[0] + g.convolve( + pdg_convs=[h_conv], # need only to pass ONE convtype + xfxs=[pdf.polarized_pdf], # need only to pass ONE PDF + alphas=pdf.alphasQ, ), - [v], + expected_results, ) - # block first bins with additional args + + def test_polarized_convolution( + self, + pdf, + download_objects, + gridname: str = "GRID_STAR_WMWP_510GEV_WP-AL-POL.pineappl.lz4", + ): + expected_results = [ + +5.50006832e6, + +1.68117895e6, + +3.08224445e5, + -2.65602464e5, + -1.04664085e6, + -5.19002089e6, + ] # Numbers computed using `v0.8.6` + + grid = download_objects(f"{gridname}") + g = Grid.read(grid) + + # Check the Grid convolutions - can be used to construct `grid.convolve` + convolutions = g.convolutions + assert len(convolutions) == 2 + assert convolutions[0].conv_type.polarized + assert not convolutions[0].conv_type.time_like + assert not convolutions[1].conv_type.polarized + assert not convolutions[1].conv_type.time_like + # Check that the initial states are protons + assert convolutions[0].pid == 2212 + assert convolutions[1].pid == 2212 + + # Convolution object of the 1st hadron - Polarized + h1 = ConvType(polarized=True, time_like=False) + h1_conv = Conv(conv_type=h1, pid=2212) + + # Convolution object of the 2nd hadron - Unpolarized + h2 = ConvType(polarized=False, time_like=False) + h2_conv = Conv(conv_type=h2, pid=2212) + np.testing.assert_allclose( - g.convolve_with_one( - 2212, - lambda pid, x, q2: 1, - lambda q2: 1.0, - bin_indices=[0], - order_mask=[False], + g.convolve( + pdg_convs=[h1_conv, h2_conv], + xfxs=[pdf.polarized_pdf, pdf.unpolarized_pdf], + alphas=pdf.alphasQ, ), - [0.0], + expected_results, ) - np.testing.assert_allclose( - g.convolve_with_one( - 2212, - lambda pid, x, q2: 1, - lambda q2: 1.0, - bin_indices=[0], - channel_mask=[False], - ), - [0.0], + + def test_convolve_subgrid(self, fake_grids): + binning = [1e-2, 1e-1, 0.5, 1] + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + bins=binning, ) - # second bin is empty - np.testing.assert_allclose( - g.convolve_with_one( - 2212, lambda pid, x, q2: 1, lambda q2: 1.0, bin_indices=[1] - ), - [0.0], + + # Fill the grid with `fill_array` + rndgen = Generator(PCG64(seed=1234)) + ntuples = [ + np.array([q2, x1, x2]) + for q2, x1, x2 in itertools.product(Q2GRID, XGRID, XGRID) + ] + obs = [rndgen.uniform(binning[0], binning[-1]) for _ in ntuples] + for pto in range(len(ORDERS)): + for channel_id in range(len(CHANNELS)): + g.fill_array( + order=pto, + observables=obs, + channel=channel_id, + ntuples=ntuples, + weights=np.repeat(10, len(obs)), + ) + + ptos_res = [] + for pto in range(len(g.orders())): + res_by_bin = [] + for bin in range(g.bins()): + res_by_channel = 0 + for channel in range(len(g.channels())): + res_by_channel += g.convolve_subgrid( + pdg_convs=[CONVOBJECT, CONVOBJECT], + xfxs=[lambda pid, x, q2: x, lambda pid, x, q2: x], + alphas=lambda q2: 1.0, + ord=pto, + bin=bin, + channel=channel, + ).sum() + res_by_bin.append(res_by_channel) + ptos_res.append(res_by_bin) + + np.testing.assert_allclose(ptos_res, [FILL_CONV_RESUTLS]) + + def test_many_convolutions(self, fake_grids, pdf, nb_convolutions: int = 3): + """Test for fun many convolutions.""" + expected_results = [ + 5.87361800e0, + 4.35570600e1, + 4.94878400e1, + ] + binning = [1e-2, 1e-1, 0.5, 1] + rndgen = Generator(PCG64(seed=1234)) + rbools = rndgen.choice(a=[True, False], size=(nb_convolutions, 2)) + + # Define the convolutions + convtypes = [ConvType(polarized=p, time_like=t) for p, t in rbools] + convolutions = [Conv(conv_type=c, pid=2212) for c in convtypes] + + # Define the channel combinations + pids = rndgen.choice( + a=[i for i in range(-5, 5) if i != 0], size=nb_convolutions ) - np.testing.assert_allclose( - g.convolve_with_one(2212, lambda pid, x, q2: 1, lambda q2: 2.0), - [2**3 * v, 0.0], + channels = [Channel([(pids.tolist(), 1.0)])] + + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=nb_convolutions, + channels=channels, + orders=ORDERS, + convolutions=convolutions, + bins=binning, ) - def test_io(self, tmp_path): - g = self.fake_grid() + # Fill the grid with `fill_array` + _q2grid = np.geomspace(1e3, 1e5, 5) + _xgrid = np.geomspace(1e-5, 1, 4) + comb_nodes = [_q2grid] + [_xgrid for _ in range(nb_convolutions)] + ntuples = [np.array(list(kins)) for kins in itertools.product(*comb_nodes)] + obs = [rndgen.uniform(binning[0], binning[-1]) for _ in ntuples] + for pto in range(len(ORDERS)): + for channel_id in range(len(channels)): + g.fill_array( + order=pto, + observables=obs, + channel=channel_id, + ntuples=ntuples, + weights=np.repeat(1, len(obs)), + ) + + results = g.convolve( + pdg_convs=convolutions, + xfxs=[pdf.polarized_pdf for _ in range(nb_convolutions)], + alphas=pdf.alphasQ, + ) + + np.testing.assert_allclose(results / 1e15, expected_results) + + def test_evolve_with_two_ekos( + self, + pdf, + download_objects, + gridname: str = "GRID_STAR_WMWP_510GEV_WP-AL-POL.pineappl.lz4", + ): + """Test the evolution on a grid that contains two different convolutions, + ie. requires two different EKOs. + + TODO: Test again convolved numerical values. + """ + grid = download_objects(f"{gridname}") + g = Grid.read(grid) + + # Extract oder mask + order_mask = Order.create_mask(g.orders(), 2, 2, True) + evinfo = g.evolve_info(order_mask=order_mask) + + # Define the convolution types objects + h1 = ConvType(polarized=True, time_like=False) + h2 = ConvType(polarized=False, time_like=False) + conv_type = [h1, h2] + + input_xgrid = np.geomspace(2e-7, 1, num=50) + slices = [] + for conv_id, cvtype in enumerate(conv_type): + sub_slices = [] + for q2 in evinfo.fac1: + info = OperatorSliceInfo( + fac0=1.0, + fac1=q2, + x0=input_xgrid, + x1=evinfo.x1, + pids0=EVOL_BASIS_PIDS, + pids1=TARGET_PIDS, + pid_basis=PidBasis.Evol, + conv_type=cvtype, + ) + op = np.random.uniform( + low=1, + high=10, + size=( + len(TARGET_PIDS), + evinfo.x1.size, + len(EVOL_BASIS_PIDS), + input_xgrid.size, + ), + ) + sub_slices.append((info, op)) + slices.append(sub_slices) + + fktable = g.evolve( + slices=slices, + order_mask=order_mask, + xi=(1.0, 1.0, 1.0), + ren1=evinfo.fac1, + alphas=[0.12029247510152144], + ) + assert isinstance(fktable, FkTable) + + def test_io(self, tmp_path, fake_grids): + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + ) p = tmp_path / "test.pineappl" p.write_text("") g.write(str(p)) - gg = pineappl.grid.Grid.read(p) - assert isinstance(gg, pineappl.grid.Grid) - _ = pineappl.grid.Grid.read(str(p)) - - def test_fill(self): - g = self.fake_grid() - g.fill(0.5, 0.5, 10.0, 0, 0.01, 0, 10.0) - res = g.convolve_with_one(2212, lambda pid, x, q2: x, lambda q2: 1.0) - pytest.approx(res) == 0.0 - - def test_fill_array(self): - g = self.fake_grid() - g.fill_array( - np.array([0.5, 1.0]), - np.array([0.5, 1.0]), - np.array([0.5, 1.0]), - 0, - np.array([1e-3, 1e-2]), - 0, - np.array([10.0, 100.0]), - ) - res = g.convolve_with_one(2212, lambda pid, x, q2: x, lambda q2: 1.0) - pytest.approx(res) == 0.0 - - def test_fill_all(self): - g = self.fake_grid() - g.fill_all(1.0, 1.0, 1.0, 0, 1e-2, np.array([10.0])) - res = g.convolve_with_one(2212, lambda pid, x, q2: x, lambda q2: 1.0) - pytest.approx(res) == 0.0 - - def test_merge(self): - g = self.fake_grid([1, 2, 3]) - g1 = self.fake_grid([3, 4, 5]) - assert g.bins() == 2 - assert g1.bins() == 2 + gg = Grid.read(p) + assert isinstance(gg, Grid) + _ = Grid.read(str(p)) - g.merge(g1) - assert g.bins() == 4 + def test_set_key_value(self, fake_grids): + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + ) + g.set_key_value("bla", "blub") + g.set_key_value('"', "'") + g.set_key_value("äöü", "ß\\") - g2 = self.fake_grid([1, 2, 3]) - g3 = self.fake_grid([1, 2, 3]) + def test_pid_basis(self, fake_grids): + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + ) + assert g.pid_basis == PidBasis.Evol + + def test_bocs(self, fake_grids): + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + ) + for kin in g.kinematics: + assert isinstance(kin, Kinematics) + assert isinstance(g.scales, Scales) + + def test_fill(self, fake_grids): + binning = [1e-2, 1e-1, 0.5, 1] + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + bins=binning, + ) + + # Fill the Grid with some values + rndgen = Generator(PCG64(seed=1234)) + for pto in range(len(ORDERS)): + for channel_id in range(len(CHANNELS)): + for q2, x1, x2 in itertools.product(Q2GRID, XGRID, XGRID): + n_tuple = [q2, x1, x2] + obs = rndgen.uniform(binning[0], binning[-1]) + g.fill( + order=pto, + observable=obs, + channel=channel_id, + ntuple=n_tuple, + weight=10, + ) + + # Peform convolutions using Toy LHPDF & AlphasQ2 functions + res = g.convolve( + pdg_convs=[CONVOBJECT, CONVOBJECT], + xfxs=[lambda pid, x, q2: x, lambda pid, x, q2: x], + alphas=lambda q2: 1.0, + ) + np.testing.assert_allclose(res, FILL_CONV_RESUTLS) + + def test_fill_array(self, fake_grids): + """Test filling the Grid using array, should yield the same result as + `Grid.fill` above. + """ + binning = [1e-2, 1e-1, 0.5, 1] + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + bins=binning, + ) + + # Fill the grid with arrays instead of looping on them + rndgen = Generator(PCG64(seed=1234)) + ntuples = [ + np.array([q2, x1, x2]) + for q2, x1, x2 in itertools.product(Q2GRID, XGRID, XGRID) + ] + obs = [rndgen.uniform(binning[0], binning[-1]) for _ in ntuples] + for pto in range(len(ORDERS)): + for channel_id in range(len(CHANNELS)): + g.fill_array( + order=pto, + observables=obs, + channel=channel_id, + ntuples=ntuples, + weights=np.repeat(10, len(obs)), + ) + + # Convolution of two symmetrical hadrons + res = g.convolve( + pdg_convs=[CONVOBJECT, CONVOBJECT], + xfxs=[lambda pid, x, q2: x, lambda pid, x, q2: x], + alphas=lambda q2: 1.0, + ) + np.testing.assert_allclose(res, FILL_CONV_RESUTLS) + + def test_fill_all(self, fake_grids): + """Test filling the Grid by filling at once the kinematics and the observable, + should yield the same result as `Grid.fill` above. + """ + binning = [1e-2, 1e-1, 0.5, 1] + g = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + bins=binning, + ) + + # Add a point to the grid for all channels (and loop over the points) + rndgen = Generator(PCG64(seed=1234)) + for pto in range(len(ORDERS)): + for q2, x1, x2 in itertools.product(Q2GRID, XGRID, XGRID): + n_tuple = [q2, x1, x2] + obs = rndgen.uniform(binning[0], binning[-1]) + g.fill_all( + order=pto, + observable=obs, + ntuple=n_tuple, + weights=np.array([10.0]), + ) + + # Convolution of two symmetrical hadrons + res = g.convolve( + pdg_convs=[CONVOBJECT, CONVOBJECT], + xfxs=[lambda pid, x, q2: x, lambda pid, x, q2: x], + alphas=lambda q2: 1.0, + ) + np.testing.assert_allclose(res, FILL_CONV_RESUTLS) + + def test_merge(self, fake_grids): + g0 = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + bins=[1, 2, 3], + ) + g1 = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + bins=[3, 4, 5], + ) + assert g0.bins() == 2 + assert g1.bins() == 2 + g0.merge(g1) + assert g0.bins() == 4 + + g2 = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + bins=[1, 2, 3], + ) + g3 = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + bins=[1, 2, 3], + ) assert g2.bins() == 2 assert g3.bins() == 2 g2.merge(g3) assert g2.bins() == 2 - g4 = self.fake_grid([2, 3, 4]) - g5 = self.fake_grid([4, 5, 6]) + g4 = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + bins=[2, 3, 4], + ) + g5 = fake_grids.grid_with_generic_convolution( + nb_convolutions=2, + channels=CHANNELS, + orders=ORDERS, + convolutions=[CONVOBJECT, CONVOBJECT], + bins=[4, 5, 6], + ) assert g4.bins() == 2 assert g5.bins() == 2 @@ -202,3 +797,18 @@ def test_merge(self): with pytest.raises(ValueError, match="NonConsecutiveBins"): g2.merge(g5) + + def test_evolveinfo( + self, + download_objects, + gridname: str = "GRID_STAR_WMWP_510GEV_WP-AL-POL.pineappl.lz4", + ): + grid = download_objects(f"{gridname}") + g = Grid.read(grid) + g_evinfo = g.evolve_info(order_mask=[True, False, False, False]) + + np.testing.assert_allclose(g_evinfo.fac1, [6463.838404]) + np.testing.assert_allclose(g_evinfo.ren1, [6463.838404]) + np.testing.assert_allclose(g_evinfo.pids1, [-5, -3, -1, 2, 4]) + assert g_evinfo.x1.size == 23 + np.testing.assert_allclose(g_evinfo.x1[0], 0.01437507) diff --git a/pineappl_py/tests/test_subgrid.py b/pineappl_py/tests/test_subgrid.py new file mode 100644 index 000000000..573faaa5e --- /dev/null +++ b/pineappl_py/tests/test_subgrid.py @@ -0,0 +1,122 @@ +import numpy as np +import pytest + +from dataclasses import dataclass +from typing import List, Tuple + +from pineappl.boc import Channel, Order +from pineappl.convolutions import Conv, ConvType +from pineappl.grid import Grid +from pineappl.import_subgrid import ImportSubgridV1 +from pineappl.subgrid import SubgridEnum + +# Define some default for the minimum value of `Q2` +Q2_MIN = 1e2 + +# See `test_grid.py` for more detailed information +TYPECONV = ConvType(polarized=False, time_like=False) +CONVOBJECT = Conv(conv_type=TYPECONV, pid=2212) + + +@dataclass +class OperatorInfo: + x_grids: List[np.ndarray] + scale: List[float] + array: np.ndarray + + +def test_issue_164(pdf, fake_grids): + # https://github.com/NNPDF/pineappl/issues/164 + # DIS-like convolution now ONLY requires one entry of `PID` + channels = [Channel([([2], 1.0)])] # DIS-case + orders = [Order(0, 0, 0, 0, 0)] + + def convolve_grid(q2_min: float = Q2_MIN) -> np.ndarray: + grid = fake_grids.grid_with_generic_convolution( + nb_convolutions=1, + orders=orders, + channels=channels, + convolutions=[CONVOBJECT], + q2min=q2_min, + bins=np.array([0.0, 0.1]), + ) + # Fill the Grid with some values + grid.fill( + order=0, + observable=0.5, + channel=0, + ntuple=[10.0, 0.2, 0.2], + weight=0.5, + ) + return grid.convolve( + pdg_convs=[CONVOBJECT], + xfxs=[pdf.xfxQ], + alphas=pdf.alphasQ, + ) + + # Using default minimum + res = convolve_grid() + assert res == 0.0 + # lower minimum to q2=1 + res = convolve_grid(q2_min=1.0) + assert res == 0.0 + + +class TestSubgrid: + def fake_grid(self, fake_grids) -> Grid: + channels = [Channel([([2], 1.0)]), Channel([([3], 0.5)])] + orders = [Order(0, 0, 0, 0, 0)] + return fake_grids.grid_with_generic_convolution( + nb_convolutions=1, + orders=orders, + channels=channels, + convolutions=[CONVOBJECT], + ) + + def fake_importonlysubgrid( + self, nb_xdim: int = 1 + ) -> Tuple[ImportSubgridV1, OperatorInfo]: + x_grids = [np.linspace(0.1, 1, 2) for _ in range(nb_xdim)] + xgrid_size = [x.size for x in x_grids] + Q2s = np.linspace(10, 20, 2) + scale = [q2 for q2 in Q2s] # One single scale Q2 + array = np.random.rand(len(Q2s), *xgrid_size) + infos = OperatorInfo(x_grids, scale, array) + subgrid = ImportSubgridV1(array=array, node_values=[scale, *x_grids]) + return subgrid, infos + + def test_subgrid_methods(self, fake_grids): + grid = self.fake_grid(fake_grids) + test_subgrid, infos = self.fake_importonlysubgrid() + grid.set_subgrid(0, 0, 0, test_subgrid.into()) + extr_subgrid = grid.subgrid(0, 0, 0) + assert isinstance(extr_subgrid, SubgridEnum) + + # Check that the subgrid can be scaled + extr_subgrid.scale(factor=100) + assert isinstance(extr_subgrid.into(), SubgridEnum) + + @pytest.mark.parametrize("nb_xdim", [1, 2, 3, 4]) + def test_subgrid_arrays(self, nb_xdim: int): + """This simply checks that the commands run without raising any + errors and that the objects have been succesfully instantiated. + """ + subgrid, info = self.fake_importonlysubgrid(nb_xdim=nb_xdim) + assert isinstance(subgrid, ImportSubgridV1) + + def test_to_array(self, fake_grids): + grid = self.fake_grid(fake_grids) + test_subgrid, infos = self.fake_importonlysubgrid() + grid.set_subgrid(0, 0, 0, test_subgrid.into()) + extr_subgrid = grid.subgrid(0, 0, 0) + + # Check that the shape of the subgrid matches specs + extr_subgrid_shape = extr_subgrid.shape + assert tuple(extr_subgrid_shape) == infos.array.shape + + # Check that the `node_values` correspond with the Kinematics + node_values = extr_subgrid.node_values + np.testing.assert_allclose(node_values, [infos.scale, *infos.x_grids]) + + test_array = extr_subgrid.to_array(shape=extr_subgrid_shape) + np.testing.assert_allclose(test_array, infos.array) diff --git a/pineappl_py/tests/test_sugrid.py b/pineappl_py/tests/test_sugrid.py deleted file mode 100644 index 91a0d423b..000000000 --- a/pineappl_py/tests/test_sugrid.py +++ /dev/null @@ -1,76 +0,0 @@ -import pineappl -import pytest - -import numpy as np - - -class TestSubgridParams: - def test_init(self): - sp = pineappl.subgrid.SubgridParams() - assert isinstance(sp, pineappl.subgrid.SubgridParams) - - -def test_issue_164(pdf): - channels = [pineappl.boc.Channel([(1, 2, 1.0)])] - orders = [pineappl.grid.Order(0, 0, 0, 0)] - params = pineappl.subgrid.SubgridParams() - - def convolve_grid(): - bin_limits = np.array([0.0, 1.0]) - grid = pineappl.grid.Grid(channels, orders, bin_limits, params) - grid.fill(0.2, 0.2, 10, 0, 0.5, 0, 0.5) - return grid.convolve_with_one(2212, pdf.xfxQ, pdf.alphasQ) - - # default minimum is q2=100 - res = convolve_grid() - assert res == 0.0 - - # lower minimum to q2=1 - params.set_q2_min(1.0) - res = convolve_grid() - assert pytest.approx(res) != 0.0 - - -class TestSubgrid: - def fake_grid(self): - channels = [pineappl.boc.Channel([(1, 2, 1.0)])] - orders = [pineappl.grid.Order(0, 0, 0, 0)] - params = pineappl.subgrid.SubgridParams() - bin_limits = np.array([0.0, 1.0]) - grid = pineappl.grid.Grid(channels, orders, bin_limits, params) - return grid - - def fake_importonlysubgrid(self): - x1s = np.linspace(0.1, 1, 2) - x2s = np.linspace(0.5, 1, 2) - Q2s = np.linspace(10, 20, 2) - mu2s = [tuple([q2, q2]) for q2 in Q2s] - array = np.random.rand(len(Q2s), len(x1s), len(x2s)) - subgrid = pineappl.import_only_subgrid.ImportOnlySubgridV2( - array, mu2s, x1s, x2s - ) - return subgrid, [x1s, x2s, mu2s, array] - - def test_subgrid_methods(self): - grid = self.fake_grid() - test_subgrid, infos = self.fake_importonlysubgrid() - x1s, x2s, mu2s, _ = (obj for obj in infos) - grid.set_subgrid(0, 0, 0, test_subgrid.into()) - extr_subgrid = grid.subgrid(0, 0, 0) - facgrid = np.array([mu2.fac for mu2 in extr_subgrid.mu2_grid()]) - rengrid = np.array([mu2.ren for mu2 in extr_subgrid.mu2_grid()]) - np.testing.assert_allclose([mu2[0] for mu2 in mu2s], rengrid) - np.testing.assert_allclose([mu2[1] for mu2 in mu2s], facgrid) - np.testing.assert_allclose(extr_subgrid.x1_grid(), x1s) - np.testing.assert_allclose(extr_subgrid.x2_grid(), x2s) - - def test_to_array3(self): - grid = self.fake_grid() - test_subgrid, infos = self.fake_importonlysubgrid() - _, _, _, array = (obj for obj in infos) - grid.set_subgrid(0, 0, 0, test_subgrid.into()) - extr_subgrid = grid.subgrid(0, 0, 0) - test_array = extr_subgrid.to_array3() - print(test_array) - print(array) - np.testing.assert_allclose(test_array, array) diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index 908d0fc67..78d343eb9 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -21,4 +21,4 @@ clap_mangen = "0.2.18" enum_dispatch = "0.3.7" #git2 = "0.17.2" #semver = "1.0.17" -pineappl_cli = { path = "../pineappl_cli", version = "=0.8.2" } +pineappl_cli = { path = "../pineappl_cli", version = "=1.0.0-alpha1" }