diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index d2dd409e..2f90f0b4 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -23,7 +23,7 @@ jobs: uses: actions/cache@v3 with: path: test-data - key: test-data-v8 + key: test-data-v11 - name: Download test data if: steps.cache-test-data.outputs.cache-hit != 'true' run: | @@ -42,6 +42,8 @@ jobs: curl -s -C - -O 'https://data.nnpdf.science/pineappl/test-data/LHCB_WP_7TEV_old.pineappl.lz4' curl -s -C - -O 'https://data.nnpdf.science/pineappl/test-data/LHCB_WP_7TEV.pineappl.lz4' curl -s -C - -O 'https://data.nnpdf.science/pineappl/test-data/LHCB_WP_7TEV.tar' + curl -s -C - -O 'https://data.nnpdf.science/pineappl/test-data/LHCB_WP_7TEV_v2.tar' + curl -s -C - -O 'https://data.nnpdf.science/pineappl/test-data/LHCB_WP_7TEV_v2_xif_2.tar' curl -s -C - -O 'https://data.nnpdf.science/pineappl/test-data/NJetEvents_0-0-2.tab.gz' curl -s -C - -O 'https://data.nnpdf.science/pineappl/test-data/NUTEV_CC_NU_FE_SIGMARED.pineappl.lz4' curl -s -C - -O 'https://data.nnpdf.science/pineappl/test-data/NUTEV_CC_NU_FE_SIGMARED.tar' diff --git a/CHANGELOG.md b/CHANGELOG.md index dc9c5a15..eb8620f8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added + +- added `Grid::evolve_with_slice_iter`, `AlphasTable` and `OperatorSliceInfo`, + which define a new interface supporting very large evolution kernels that + have been introduced in EKO v0.13. This interface will replace `Grid::evolve` + +### Changed + +- `Grid::evolve` has now been marked deprecated + ## [0.6.2] - 09/10/2023 ### Added diff --git a/maintainer/generate-coverage.sh b/maintainer/generate-coverage.sh index 8f0c3bdc..bf071037 100755 --- a/maintainer/generate-coverage.sh +++ b/maintainer/generate-coverage.sh @@ -17,6 +17,7 @@ wget --no-verbose --no-clobber -P test-data 'https://data.nnpdf.science/pineappl wget --no-verbose --no-clobber -P test-data 'https://data.nnpdf.science/pineappl/test-data/LHCB_WP_7TEV_old.pineappl.lz4' wget --no-verbose --no-clobber -P test-data 'https://data.nnpdf.science/pineappl/test-data/LHCB_WP_7TEV.pineappl.lz4' wget --no-verbose --no-clobber -P test-data 'https://data.nnpdf.science/pineappl/test-data/LHCB_WP_7TEV.tar' +wget --no-verbose --no-clobber -P test-data 'https://data.nnpdf.science/pineappl/test-data/LHCB_WP_7TEV_v2.tar' wget --no-verbose --no-clobber -P test-data 'https://data.nnpdf.science/pineappl/test-data/NJetEvents_0-0-2.tab.gz' wget --no-verbose --no-clobber -P test-data 'https://data.nnpdf.science/pineappl/test-data/NUTEV_CC_NU_FE_SIGMARED.pineappl.lz4' wget --no-verbose --no-clobber -P test-data 'https://data.nnpdf.science/pineappl/test-data/NUTEV_CC_NU_FE_SIGMARED.tar' diff --git a/pineappl/Cargo.toml b/pineappl/Cargo.toml index 280fe4c4..8c6df8a8 100644 --- a/pineappl/Cargo.toml +++ b/pineappl/Cargo.toml @@ -13,6 +13,7 @@ rust-version.workspace = true version.workspace = true [dependencies] +anyhow = "1.0.48" arrayvec = "0.7.2" bincode = "1.3.3" bitflags = "1.3.2" diff --git a/pineappl/src/evolution.rs b/pineappl/src/evolution.rs index c93b5603..070e96b4 100644 --- a/pineappl/src/evolution.rs +++ b/pineappl/src/evolution.rs @@ -8,7 +8,8 @@ use super::sparse_array3::SparseArray3; use super::subgrid::{Mu2, Subgrid, SubgridEnum}; use float_cmp::approx_eq; use itertools::Itertools; -use ndarray::{s, Array1, Array2, Array3, ArrayView1, ArrayView5, Axis}; +use ndarray::linalg; +use ndarray::{s, Array1, Array2, Array3, ArrayView1, ArrayView4, Axis}; use std::iter; /// Number of ULPS used to de-duplicate grid values in [`Grid::evolve_info`]. @@ -88,36 +89,105 @@ pub struct OperatorInfo { pub lumi_id_types: String, } -fn gluon_has_pid_zero(grid: &Grid) -> bool { - grid.key_values() - .and_then(|key_values| key_values.get("lumi_id_types")) - .and_then(|value| { - (value == "pdg_mc_ids").then(|| { - grid.lumi() +/// Information about the evolution kernel operator slice (EKO) passed to +/// [`Grid::evolve_with_slice_iter`](super::grid::Grid::evolve_with_slice_iter) as `operator`, +/// which is used to convert a [`Grid`] into an [`FkTable`](super::fk_table::FkTable). The +/// dimensions of the EKO must correspond to the values given in [`fac1`](Self::fac1), +/// [`pids0`](Self::pids0), [`x0`](Self::x0), [`pids1`](Self::pids1) and [`x1`](Self::x1), exactly +/// in this order. Members with a `1` are defined at the squared factorization scale given as +/// `fac1` (often called process scale) and are found in the [`Grid`] that +/// `Grid::evolve_with_slice_iter` is called with. Members with a `0` are defined at the squared +/// factorization scale [`fac0`](Self::fac0) (often called fitting scale or starting scale) and are +/// found in the `FkTable` resulting from [`Grid::evolve`]. +/// +/// The EKO slice may convert a `Grid` from a basis given by the particle identifiers `pids1` to a +/// possibly different basis given by `pids0`. This basis must also be identified using +/// [`lumi_id_types`](Self::lumi_id_types), which tells +/// [`FkTable::convolute`](super::fk_table::FkTable::convolute) how to perform a convolution. +#[derive(Clone)] +pub struct OperatorSliceInfo { + /// Squared factorization scale of the `FkTable`. + pub fac0: f64, + /// Particle identifiers of the `FkTable`. + pub pids0: Vec, + /// `x`-grid coordinates of the `FkTable` + pub x0: Vec, + /// Squared factorization scale of the slice of `Grid` that should be evolved. + pub fac1: f64, + /// Particle identifiers of the `Grid`. If the `Grid` contains more particle identifiers than + /// given here, the contributions of them are silently ignored. + pub pids1: Vec, + /// `x`-grid coordinates of the `Grid`. + pub x1: Vec, + + /// Identifier of the particle basis for the `FkTable`. + pub lumi_id_types: String, +} + +/// A mapping of squared renormalization scales in `ren1` to strong couplings in `alphas`. The +/// ordering of both members defines the mapping. +pub struct AlphasTable { + /// Renormalization scales of the `Grid`. + pub ren1: Vec, + /// Strong couplings corresponding to the order given in [`ren1`](Self::ren1). + pub alphas: Vec, +} + +impl AlphasTable { + /// Create an `AlphasTable` for `grid`, varying the renormalization scale by `xir` for the + /// strong couplings given by `alphas`. The only argument of `alphas` must be the squared + /// renormalization scale. + pub fn from_grid(grid: &Grid, xir: f64, alphas: &dyn Fn(f64) -> f64) -> Self { + let mut ren1: Vec<_> = grid + .subgrids() + .iter() + .flat_map(|subgrid| { + subgrid + .mu2_grid() .iter() - .any(|entry| entry.entry().iter().any(|&(a, b, _)| (a == 0) || (b == 0))) + .map(|Mu2 { ren, .. }| xir * xir * ren) + .collect::>() }) - }) - .unwrap_or(false) + .collect(); + // UNWRAP: if we can't sort numbers the grid is fishy + ren1.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); + ren1.dedup(); + let ren1 = ren1; + let alphas: Vec<_> = ren1.iter().map(|&mur2| alphas(mur2)).collect(); + + Self { ren1, alphas } + } +} + +fn gluon_has_pid_zero(grid: &Grid) -> bool { + // if there are any PID zero particles ... + grid.lumi() + .iter() + .any(|entry| entry.entry().iter().any(|&(a, b, _)| (a == 0) || (b == 0))) + // and if lumi_id_types = pdg_mc_ids or if the key-value pair doesn't exist + && grid + .key_values() + .and_then(|key_values| key_values.get("lumi_id_types")) + .map_or(true, |value| value == "pdg_mc_ids") } type Pid01IndexTuples = Vec<(usize, usize)>; type Pid01Tuples = Vec<(i32, i32)>; -pub(crate) fn pids( - operator: &ArrayView5, - info: &OperatorInfo, +fn pid_slices( + operator: &ArrayView4, + info: &OperatorSliceInfo, gluon_has_pid_zero: bool, pid1_nonzero: &dyn Fn(i32) -> bool, ) -> Result<(Pid01IndexTuples, Pid01Tuples), GridError> { // list of all non-zero PID indices - let pid_indices: Vec<_> = (0..operator.dim().3) - .cartesian_product(0..operator.dim().1) + let pid_indices: Vec<_> = (0..operator.dim().2) + .cartesian_product(0..operator.dim().0) .filter(|&(pid0_idx, pid1_idx)| { // 1) at least one element of the operator must be non-zero, and 2) the pid must be // contained in the lumi somewhere operator - .slice(s![.., pid1_idx, .., pid0_idx, ..]) + .slice(s![pid1_idx, .., pid0_idx, ..]) .iter() .any(|&value| value != 0.0) && pid1_nonzero(if gluon_has_pid_zero && info.pids1[pid1_idx] == 21 { @@ -152,7 +222,7 @@ pub(crate) fn pids( Ok((pid_indices, pids)) } -pub(crate) fn lumi0_with_one(pids: &[(i32, i32)]) -> Vec { +fn lumi0_with_one(pids: &[(i32, i32)]) -> Vec { let mut pids0: Vec<_> = pids.iter().map(|&(pid0, _)| pid0).collect(); pids0.sort_unstable(); pids0.dedup(); @@ -160,7 +230,7 @@ pub(crate) fn lumi0_with_one(pids: &[(i32, i32)]) -> Vec { pids0 } -pub(crate) fn lumi0_with_two(pids_a: &[(i32, i32)], pids_b: &[(i32, i32)]) -> Vec<(i32, i32)> { +fn lumi0_with_two(pids_a: &[(i32, i32)], pids_b: &[(i32, i32)]) -> Vec<(i32, i32)> { let mut pids0_a: Vec<_> = pids_a.iter().map(|&(pid0, _)| pid0).collect(); pids0_a.sort_unstable(); pids0_a.dedup(); @@ -175,27 +245,12 @@ pub(crate) fn lumi0_with_two(pids_a: &[(i32, i32)], pids_b: &[(i32, i32)]) -> Ve .collect() } -pub(crate) fn operators( - operator: &ArrayView5, - info: &OperatorInfo, - fac1: &[f64], +fn operator_slices( + operator: &ArrayView4, + info: &OperatorSliceInfo, pid_indices: &[(usize, usize)], x1: &[f64], -) -> Result>, GridError> { - // permutation between the grid fac1 values and the operator fac1 values - let fac1_indices: Vec<_> = fac1 - .iter() - .map(|&fac1p| { - info.fac1 - .iter() - .position(|&fac1| approx_eq!(f64, fac1p, fac1, ulps = EVOLUTION_TOL_ULPS)) - .ok_or_else(|| { - GridError::EvolutionFailure(format!("no operator for muf2 = {fac1p} found")) - }) - }) - // TODO: use `try_collect` once stabilized - .collect::>()?; - +) -> Result>, GridError> { // permutation between the grid x values and the operator x1 values let x1_indices: Vec<_> = x1 .iter() @@ -215,10 +270,9 @@ pub(crate) fn operators( .iter() .map(|&(pid0_idx, pid1_idx)| { operator - .slice(s![.., pid1_idx, .., pid0_idx, ..]) - .select(Axis(0), &fac1_indices) - .select(Axis(1), &x1_indices) - .permuted_axes([0, 2, 1]) + .slice(s![pid1_idx, .., pid0_idx, ..]) + .select(Axis(0), &x1_indices) + .reversed_axes() .as_standard_layout() .into_owned() }) @@ -227,28 +281,18 @@ pub(crate) fn operators( Ok(operators) } -type Fac1X1aX1bOp3Tuple = (Vec, Vec, Vec, Array3); +type X1aX1bOp2Tuple = (Vec, Vec, Array2); -pub(crate) fn ndarray_from_subgrid_orders( - info: &OperatorInfo, +fn ndarray_from_subgrid_orders_slice( + info: &OperatorSliceInfo, subgrids: &ArrayView1, orders: &[Order], order_mask: &[bool], -) -> Result { + (xir, xif): (f64, f64), + alphas_table: &AlphasTable, +) -> Result { // TODO: skip empty subgrids - let mut fac1: Vec<_> = subgrids - .iter() - .enumerate() - .filter(|(index, _)| order_mask.get(*index).copied().unwrap_or(true)) - .flat_map(|(_, subgrid)| { - subgrid - .mu2_grid() - .iter() - .map(|mu2| info.xif * info.xif * mu2.fac) - .collect::>() - }) - .collect(); let mut x1_a: Vec<_> = subgrids .iter() .enumerate() @@ -262,14 +306,12 @@ pub(crate) fn ndarray_from_subgrid_orders( .flat_map(|(_, subgrid)| subgrid.x2_grid().into_owned()) .collect(); - fac1.sort_by(f64::total_cmp); - fac1.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLUTION_TOL_ULPS)); x1_a.sort_by(f64::total_cmp); x1_a.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLUTION_TOL_ULPS)); x1_b.sort_by(f64::total_cmp); x1_b.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLUTION_TOL_ULPS)); - let mut array = Array3::::zeros((fac1.len(), x1_a.len(), x1_b.len())); + let mut array = Array2::::zeros((x1_a.len(), x1_b.len())); // add subgrids for different orders, but the same bin and lumi, using the right // couplings @@ -284,40 +326,22 @@ pub(crate) fn ndarray_from_subgrid_orders( let mut logs = 1.0; if order.logxir > 0 { - if approx_eq!(f64, info.xir, 1.0, ulps = 4) { + if approx_eq!(f64, xir, 1.0, ulps = 4) { continue; } - logs *= (info.xir * info.xir).ln(); + logs *= (xir * xir).ln(); } if order.logxif > 0 { - if approx_eq!(f64, info.xif, 1.0, ulps = 4) { + if approx_eq!(f64, xif, 1.0, ulps = 4) { continue; } - logs *= (info.xif * info.xif).ln(); + logs *= (xif * xif).ln(); } // TODO: use `try_collect` once stabilized - let fac1_indices: Vec<_> = subgrid - .mu2_grid() - .iter() - .map(|&Mu2 { fac, .. }| { - fac1.iter() - .position(|&scale| { - approx_eq!( - f64, - info.xif * info.xif * fac, - scale, - ulps = EVOLUTION_TOL_ULPS - ) - }) - .ok_or_else(|| { - GridError::EvolutionFailure(format!("no operator for muf2 = {fac} found")) - }) - }) - .collect::>()?; let xa_indices: Vec<_> = subgrid .x1_grid() .iter() @@ -342,17 +366,23 @@ pub(crate) fn ndarray_from_subgrid_orders( .collect::>()?; for ((ifac1, ix1, ix2), value) in subgrid.indexed_iter() { - let mur2 = info.xir * info.xir * subgrid.mu2_grid()[ifac1].ren; + let Mu2 { ren, fac } = subgrid.mu2_grid()[ifac1]; + + if !approx_eq!(f64, xif * xif * fac, info.fac1, ulps = EVOLUTION_TOL_ULPS) { + continue; + } + + let mur2 = xir * xir * ren; let als = if order.alphas == 0 { 1.0 - } else if let Some(alphas) = - info.ren1 - .iter() - .zip(info.alphas.iter()) - .find_map(|(&ren1, &alphas)| { - approx_eq!(f64, ren1, mur2, ulps = EVOLUTION_TOL_ULPS).then(|| alphas) - }) + } else if let Some(alphas) = alphas_table + .ren1 + .iter() + .zip(alphas_table.alphas.iter()) + .find_map(|(&ren1, &alphas)| { + approx_eq!(f64, ren1, mur2, ulps = EVOLUTION_TOL_ULPS).then(|| alphas) + }) { alphas.powi(order.alphas.try_into().unwrap()) } else { @@ -361,23 +391,25 @@ pub(crate) fn ndarray_from_subgrid_orders( ))); }; - array[[fac1_indices[ifac1], xa_indices[ix1], xb_indices[ix2]]] += als * logs * value; + array[[xa_indices[ix1], xb_indices[ix2]]] += als * logs * value; } } - Ok((fac1, x1_a, x1_b, array)) + Ok((x1_a, x1_b, array)) } -pub(crate) fn evolve_with_one( +pub(crate) fn evolve_slice_with_one( grid: &Grid, - operator: &ArrayView5, - info: &OperatorInfo, + operator: &ArrayView4, + info: &OperatorSliceInfo, order_mask: &[bool], + xi: (f64, f64), + alphas_table: &AlphasTable, ) -> Result<(Array3, Vec), GridError> { let gluon_has_pid_zero = gluon_has_pid_zero(grid); let has_pdf1 = grid.has_pdf1(); - let (pid_indices, pids) = pids(operator, info, gluon_has_pid_zero, &|pid| { + let (pid_indices, pids) = pid_slices(operator, info, gluon_has_pid_zero, &|pid| { grid.lumi() .iter() .flat_map(LumiEntry::entry) @@ -389,15 +421,20 @@ pub(crate) fn evolve_with_one( let new_axis = if has_pdf1 { 2 } else { 1 }; let mut last_x1 = Vec::new(); - let mut last_fac1 = Vec::new(); let mut ops = Vec::new(); for subgrids_ol in grid.subgrids().axis_iter(Axis(1)) { let mut tables = vec![Array1::zeros(info.x0.len()); lumi0.len()]; - for (lumi1, subgrids_o) in subgrids_ol.axis_iter(Axis(1)).enumerate() { - let (fac1, x1_a, x1_b, array) = - ndarray_from_subgrid_orders(info, &subgrids_o, grid.orders(), order_mask)?; + for (subgrids_o, lumi1) in subgrids_ol.axis_iter(Axis(1)).zip(grid.lumi()) { + let (x1_a, x1_b, array) = ndarray_from_subgrid_orders_slice( + info, + &subgrids_o, + grid.orders(), + order_mask, + xi, + alphas_table, + )?; let x1 = if has_pdf1 { x1_a } else { x1_b }; @@ -405,33 +442,20 @@ pub(crate) fn evolve_with_one( continue; } - if (last_fac1.len() != fac1.len()) - || last_fac1 - .iter() - .zip(fac1.iter()) - .any(|(&lhs, &rhs)| !approx_eq!(f64, lhs, rhs, ulps = EVOLUTION_TOL_ULPS)) - || (last_x1.len() != x1.len()) + if (last_x1.len() != x1.len()) || last_x1 .iter() .zip(x1.iter()) .any(|(&lhs, &rhs)| !approx_eq!(f64, lhs, rhs, ulps = EVOLUTION_TOL_ULPS)) { - ops = operators(operator, info, &fac1, &pid_indices, &x1)?; - last_fac1 = fac1; + ops = operator_slices(operator, info, &pid_indices, &x1)?; last_x1 = x1; } - // TODO: get rid of array-index access - for (&pid1, &factor) in - grid.lumi()[lumi1].entry().iter().map( - |(a, b, f)| { - if has_pdf1 { - (a, f) - } else { - (b, f) - } - }, - ) + for (&pid1, &factor) in lumi1 + .entry() + .iter() + .map(|(a, b, f)| if has_pdf1 { (a, f) } else { (b, f) }) { for (fk_table, op) in lumi0 @@ -446,19 +470,7 @@ pub(crate) fn evolve_with_one( .map(|op| (fk_table, op)) }) { - let mut result = Array1::zeros(info.x0.len()); - - for imu2 in 0..array.dim().0 { - let op = op.index_axis(Axis(0), imu2); - - result += &op.dot( - &array - .index_axis(Axis(0), imu2) - .index_axis(Axis(new_axis - 1), 0), - ); - } - - fk_table.scaled_add(factor, &result); + fk_table.scaled_add(factor, &op.dot(&array.index_axis(Axis(new_axis - 1), 0))); } } } @@ -509,21 +521,23 @@ pub(crate) fn evolve_with_one( )) } -pub(crate) fn evolve_with_two( +pub(crate) fn evolve_slice_with_two( grid: &Grid, - operator: &ArrayView5, - info: &OperatorInfo, + operator: &ArrayView4, + info: &OperatorSliceInfo, order_mask: &[bool], + xi: (f64, f64), + alphas_table: &AlphasTable, ) -> Result<(Array3, Vec), GridError> { let gluon_has_pid_zero = gluon_has_pid_zero(grid); - let (pid_indices_a, pids_a) = pids(operator, info, gluon_has_pid_zero, &|pid1| { + let (pid_indices_a, pids_a) = pid_slices(operator, info, gluon_has_pid_zero, &|pid1| { grid.lumi() .iter() .flat_map(LumiEntry::entry) .any(|&(a, _, _)| a == pid1) })?; - let (pid_indices_b, pids_b) = pids(operator, info, gluon_has_pid_zero, &|pid1| { + let (pid_indices_b, pids_b) = pid_slices(operator, info, gluon_has_pid_zero, &|pid1| { grid.lumi() .iter() .flat_map(LumiEntry::entry) @@ -533,7 +547,6 @@ pub(crate) fn evolve_with_two( let lumi0 = lumi0_with_two(&pids_a, &pids_b); let mut sub_fk_tables = Vec::with_capacity(grid.bin_info().bins() * lumi0.len()); - let mut last_fac1 = Vec::new(); let mut last_x1a = Vec::new(); let mut last_x1b = Vec::new(); let mut operators_a = Vec::new(); @@ -542,44 +555,39 @@ pub(crate) fn evolve_with_two( for subgrids_ol in grid.subgrids().axis_iter(Axis(1)) { let mut tables = vec![Array2::zeros((info.x0.len(), info.x0.len())); lumi0.len()]; - for (lumi1, subgrids_o) in subgrids_ol.axis_iter(Axis(1)).enumerate() { - let (fac1, x1_a, x1_b, array) = - ndarray_from_subgrid_orders(info, &subgrids_o, grid.orders(), order_mask)?; - - let fac1_diff = (last_fac1.len() != fac1.len()) - || last_fac1 - .iter() - .zip(fac1.iter()) - .any(|(&lhs, &rhs)| !approx_eq!(f64, lhs, rhs, ulps = EVOLUTION_TOL_ULPS)); - - if fac1_diff - || (last_x1a.len() != x1_a.len()) + for (subgrids_o, lumi1) in subgrids_ol.axis_iter(Axis(1)).zip(grid.lumi()) { + let (x1_a, x1_b, array) = ndarray_from_subgrid_orders_slice( + info, + &subgrids_o, + grid.orders(), + order_mask, + xi, + alphas_table, + )?; + + if (last_x1a.len() != x1_a.len()) || last_x1a .iter() .zip(x1_a.iter()) .any(|(&lhs, &rhs)| !approx_eq!(f64, lhs, rhs, ulps = EVOLUTION_TOL_ULPS)) { - operators_a = operators(operator, info, &fac1, &pid_indices_a, &x1_a)?; + operators_a = operator_slices(operator, info, &pid_indices_a, &x1_a)?; last_x1a = x1_a; } - if fac1_diff - || (last_x1b.len() != x1_b.len()) + if (last_x1b.len() != x1_b.len()) || last_x1b .iter() .zip(x1_b.iter()) .any(|(&lhs, &rhs)| !approx_eq!(f64, lhs, rhs, ulps = EVOLUTION_TOL_ULPS)) { - operators_b = operators(operator, info, &fac1, &pid_indices_b, &x1_b)?; + operators_b = operator_slices(operator, info, &pid_indices_b, &x1_b)?; last_x1b = x1_b; } - if fac1_diff { - last_fac1 = fac1; - }; + let mut tmp = Array2::zeros((last_x1a.len(), info.x0.len())); - // TODO: get rid of array-index access - for &(pida1, pidb1, factor) in grid.lumi()[lumi1].entry() { + for &(pida1, pidb1, factor) in lumi1.entry() { for (fk_table, opa, opb) in lumi0 .iter() @@ -588,25 +596,19 @@ pub(crate) fn evolve_with_two( pids_a .iter() .zip(operators_a.iter()) - .cartesian_product(pids_b.iter().zip(operators_b.iter())) - .find_map(|((&(pa0, pa1), opa), (&(pb0, pb1), opb))| { - (pa0 == pida0 && pa1 == pida1 && pb0 == pidb0 && pb1 == pidb1) - .then_some((opa, opb)) + .find_map(|(&(pa0, pa1), opa)| { + (pa0 == pida0 && pa1 == pida1).then_some(opa) }) + .zip(pids_b.iter().zip(operators_b.iter()).find_map( + |(&(pb0, pb1), opb)| { + (pb0 == pidb0 && pb1 == pidb1).then_some(opb) + }, + )) .map(|(opa, opb)| (fk_table, opa, opb)) }) { - let mut result = Array2::zeros((info.x0.len(), info.x0.len())); - - for imu2 in 0..array.dim().0 { - let opa = opa.index_axis(Axis(0), imu2); - let opb = opb.index_axis(Axis(0), imu2); - let arr = array.index_axis(Axis(0), imu2); - - result += &opa.dot(&arr.dot(&opb.t())); - } - - fk_table.scaled_add(factor, &result); + linalg::general_mat_mul(1.0, &array, &opb.t(), 0.0, &mut tmp); + linalg::general_mat_mul(factor, opa, &tmp, 1.0, fk_table); } } } diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 3a785fb8..4b5fbdde 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -2,7 +2,7 @@ use super::bin::{BinInfo, BinLimits, BinRemapper}; use super::empty_subgrid::EmptySubgridV1; -use super::evolution::{self, EvolveInfo, OperatorInfo}; +use super::evolution::{self, AlphasTable, EvolveInfo, OperatorInfo, OperatorSliceInfo}; use super::fk_table::FkTable; use super::import_only_subgrid::ImportOnlySubgridV2; use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; @@ -18,7 +18,7 @@ use git_version::git_version; use indicatif::{ProgressBar, ProgressStyle}; use itertools::Itertools; use lz4_flex::frame::{FrameDecoder, FrameEncoder}; -use ndarray::{s, Array3, Array5, ArrayView5, Axis, Dimension}; +use ndarray::{s, Array3, Array5, ArrayView5, Axis, CowArray, Dimension, Ix4}; use serde::{Deserialize, Serialize}; use std::borrow::Cow; use std::cmp::Ordering; @@ -280,6 +280,9 @@ pub enum GridError { /// Returned from [`Grid::evolve`] if the evolution failed. #[error("failed to evolve grid: {0}")] EvolutionFailure(String), + /// Errors that do no originate from this crate itself. + #[error(transparent)] + Other(#[from] anyhow::Error), } #[derive(Clone, Deserialize, Serialize)] @@ -1902,46 +1905,138 @@ impl Grid { /// /// Returns a [`GridError::EvolutionFailure`] if either the `operator` or its `info` is /// incompatible with this `Grid`. + #[deprecated(since = "0.7.4", note = "use evolve_with_slice_iter instead")] pub fn evolve( &self, operator: ArrayView5, info: &OperatorInfo, order_mask: &[bool], ) -> Result { - let op_info_dim = ( - info.fac1.len(), - info.pids1.len(), - info.x1.len(), - info.pids0.len(), - info.x0.len(), - ); + self.evolve_with_slice_iter( + info.fac1 + .iter() + .zip(operator.axis_iter(Axis(0))) + .map(|(&fac1, op)| { + Ok::<_, GridError>(( + OperatorSliceInfo { + fac0: info.fac0.clone(), + pids0: info.pids0.clone(), + x0: info.x0.clone(), + fac1, + pids1: info.pids1.clone(), + x1: info.x1.clone(), + lumi_id_types: info.lumi_id_types.clone(), + }, + CowArray::from(op), + )) + }), + order_mask, + (info.xir, info.xif), + &AlphasTable { + ren1: info.ren1.clone(), + alphas: info.alphas.clone(), + }, + ) + } + + // TODO: + // - try to find a better solution than to require that E must be convertible into + // anyhow::Error + + /// Converts this `Grid` into an [`FkTable`] using `slices` that must iterate over a [`Result`] + /// of tuples of an [`OperatorSliceInfo`] and the corresponding sliced operator. The parameter + /// `order_mask` can be used to include or exclude orders from this operation, and must + /// correspond to the ordering given by [`Grid::orders`]. Orders that are not given are + /// enabled, and in particular if `order_mask` is empty all orders are activated. + /// + /// # Errors + /// + /// Returns a [`GridError::EvolutionFailure`] if either the `operator` or its `info` is + /// incompatible with this `Grid`. Returns a [`GridError::Other`] if the iterator from `slices` + /// return an error. + pub fn evolve_with_slice_iter<'a, E: Into>( + &self, + slices: impl IntoIterator), E>>, + order_mask: &[bool], + xi: (f64, f64), + alphas_table: &AlphasTable, + ) -> Result { + use super::evolution::EVOLVE_INFO_TOL_ULPS; + + let mut lhs: Option = None; + let mut fac1 = Vec::new(); + + for result in slices { + let (info, operator) = result.map_err(|err| GridError::Other(err.into()))?; + + let op_info_dim = ( + info.pids1.len(), + info.x1.len(), + info.pids0.len(), + info.x0.len(), + ); + + if operator.dim() != op_info_dim { + return Err(GridError::EvolutionFailure(format!( + "operator information {:?} does not match the operator's dimensions: {:?}", + op_info_dim, + operator.dim(), + ))); + } - if operator.dim() != op_info_dim { + let view = operator.view(); + + let (subgrids, lumi) = if self.has_pdf1() && self.has_pdf2() { + evolution::evolve_slice_with_two(self, &view, &info, order_mask, xi, alphas_table) + } else { + evolution::evolve_slice_with_one(self, &view, &info, order_mask, xi, alphas_table) + }?; + + let mut rhs = Self { + subgrids, + lumi, + bin_limits: self.bin_limits.clone(), + orders: vec![Order::new(0, 0, 0, 0)], + subgrid_params: SubgridParams::default(), + more_members: self.more_members.clone(), + }; + + // write additional metadata + rhs.set_key_value("lumi_id_types", &info.lumi_id_types); + + if let Some(lhs) = &mut lhs { + lhs.merge(rhs)?; + } else { + lhs = Some(rhs); + } + + fac1.push(info.fac1); + } + + // UNWRAP: if we can't compare two numbers there's a bug + fac1.sort_by(|a, b| a.partial_cmp(b).unwrap()); + + // make sure we've evolved all slices + if let Some(muf2) = self + .evolve_info(&order_mask) + .fac1 + .into_iter() + .map(|mu2| xi.1 * xi.1 * mu2) + .find(|&grid_mu2| { + !fac1 + .iter() + .any(|&eko_mu2| approx_eq!(f64, grid_mu2, eko_mu2, ulps = EVOLVE_INFO_TOL_ULPS)) + }) + { return Err(GridError::EvolutionFailure(format!( - "operator information {:?} does not match the operator's dimensions: {:?}", - op_info_dim, - operator.dim(), + "no operator for muf2 = {muf2} found in {fac1:?}" ))); } - let (subgrids, lumi) = if self.has_pdf1() && self.has_pdf2() { - evolution::evolve_with_two(self, &operator, info, order_mask) - } else { - evolution::evolve_with_one(self, &operator, info, order_mask) - }?; - - let mut grid = Self { - subgrids, - lumi, - bin_limits: self.bin_limits.clone(), - orders: vec![Order::new(0, 0, 0, 0)], - subgrid_params: SubgridParams::default(), - more_members: self.more_members.clone(), - }; - - // write additional metadata - grid.set_key_value("lumi_id_types", &info.lumi_id_types); + // UNWRAP: should panick when all subgrids are empty + let grid = lhs.unwrap(); + // UNWRAP: merging evolved slices should be a proper FkTable again Ok(FkTable::try_from(grid).unwrap_or_else(|_| unreachable!())) } diff --git a/pineappl_cli/src/evolve.rs b/pineappl_cli/src/evolve.rs index 5c79e869..8d5a7a73 100644 --- a/pineappl_cli/src/evolve.rs +++ b/pineappl_cli/src/evolve.rs @@ -10,22 +10,27 @@ use std::process::ExitCode; #[cfg(feature = "evolve")] mod eko { - use anyhow::{bail, Result}; + use anyhow::{anyhow, Result}; use base64::alphabet::URL_SAFE; use base64::engine::general_purpose::PAD; use base64::engine::GeneralPurpose; use base64::Engine; use either::Either; use lz4_flex::frame::FrameDecoder; - use ndarray::{Array4, Array5, Axis}; + use ndarray::iter::AxisIter; + use ndarray::{Array4, Array5, Axis, CowArray, Ix4}; use ndarray_npy::{NpzReader, ReadNpyExt}; - use pineappl::evolution::OperatorInfo; + use pineappl::evolution::OperatorSliceInfo; use pineappl::pids; use serde::Deserialize; + use std::collections::HashMap; + use std::ffi::{OsStr, OsString}; use std::fs::File; - use std::io::BufReader; + use std::io::{self, BufReader, Cursor}; + use std::iter::Zip; use std::path::Path; - use tar::Archive; + use std::slice::Iter; + use tar::{Archive, Entries}; #[derive(Deserialize)] struct MetadataV0 { @@ -63,118 +68,356 @@ mod eko { enum Metadata { V0(MetadataV0), V1(MetadataV1), + V2(MetadataV2), } - pub fn read(eko: &Path) -> Result<(OperatorInfo, Array5)> { - let mut archive = Archive::new(File::open(eko)?); + const BASES_V1_DEFAULT_PIDS: [i32; 14] = [22, -6, -5, -4, -3, -2, -1, 21, 1, 2, 3, 4, 5, 6]; - let mut metadata = None; - let mut operator = None; - let mut operators = Vec::new(); - let mut fac1 = Vec::new(); + #[derive(Deserialize)] + struct OperatorV1 { + mu0: f64, + } - let base64 = GeneralPurpose::new(&URL_SAFE, PAD); + #[derive(Deserialize)] + struct OperatorInfoV1 { + scale: f64, + } - for entry in archive.entries()? { - let file = entry?; - let path = file.header().path()?; + #[derive(Deserialize)] + struct BasesV1 { + inputgrid: Option>, + inputpids: Option>>, + targetgrid: Option>, + targetpids: Option>, + xgrid: Vec, + } - if let Some(file_name) = path.file_name() { - // TODO: get rid of the unwrap - match file_name.to_str().unwrap() { - "metadata.yaml" => metadata = Some(serde_yaml::from_reader(file)?), - "operators.npy.lz4" => { - operator = Some(Array5::::read_npy(FrameDecoder::new( - BufReader::new(file), - ))?); - } - x if x.ends_with(".npz.lz4") => { - let name = x.strip_suffix(".npz.lz4").unwrap(); - let bytes = base64.decode(name.as_bytes())?; - let array: [u8; 8] = bytes.as_slice().try_into().unwrap(); - let muf2 = f64::from_le_bytes(array); - - let mut reader = BufReader::new(FrameDecoder::new(BufReader::new(file))); - let mut buffer = Vec::new(); - std::io::copy(&mut reader, &mut buffer)?; - let mut npz = NpzReader::new(std::io::Cursor::new(buffer))?; - let operator: Array4 = npz.by_name("operator.npy")?; - - fac1.push(muf2); - operators.push(operator); - } - _ => {} + #[derive(Deserialize)] + struct MetadataV2 { + bases: BasesV1, + } + + pub enum EkoSlices { + V0 { + fac1: Vec, + info: OperatorSliceInfo, + operator: Array5, + }, + // V1 is a special case of V2 + V2 { + fac1: HashMap, + info: OperatorSliceInfo, + archive: Archive, + }, + } + + impl EkoSlices { + /// Read the EKO at `eko_path` and return the contents of the `metadata.yaml` file + /// deserialized into a [`Metadata`] object. + fn read_metadata(eko_path: &Path) -> Result { + for entry in Archive::new(File::open(eko_path)?).entries_with_seek()? { + let entry = entry?; + let path = entry.path()?; + + if path.ends_with("metadata.yaml") { + return Ok(serde_yaml::from_reader(entry)?); } } + + Err(anyhow!("no file 'metadata.yaml' in EKO archive found")) } - if !operators.is_empty() { - let ops: Vec<_> = operators.iter().map(Array4::view).collect(); - operator = Some(ndarray::stack(Axis(0), &ops).unwrap()); + pub fn new(eko_path: &Path) -> Result { + let metadata = Self::read_metadata(eko_path)?; + + match metadata { + Metadata::V0(v0) => Self::with_v0(v0, eko_path), + Metadata::V1(v1) => Self::with_v1(v1, eko_path), + Metadata::V2(v2) => Self::with_v2(v2, eko_path), + } + } + + fn with_v0(metadata: MetadataV0, eko_path: &Path) -> Result { + let mut operator = None; + + for entry in Archive::new(File::open(eko_path)?).entries_with_seek()? { + let entry = entry?; + let path = entry.path()?; + + if path.ends_with("operators.npy.lz4") { + operator = Some(Array5::read_npy(FrameDecoder::new(BufReader::new(entry)))?); + } + } + + let operator = + operator.ok_or_else(|| anyhow!("no file 'operator.yaml' in EKO archive found"))?; + + Ok(Self::V0 { + fac1: metadata.q2_grid, + info: OperatorSliceInfo { + lumi_id_types: pids::determine_lumi_id_types(&metadata.inputpids), + fac0: metadata.q2_ref, + pids0: metadata.inputpids, + x0: metadata.inputgrid, + fac1: 0.0, + pids1: metadata.targetpids, + x1: metadata.targetgrid, + }, + operator, + }) } - let mut info = match metadata { - Some(Metadata::V0(metadata)) => OperatorInfo { - fac1: metadata.q2_grid.clone(), - pids0: metadata.inputpids, - x0: metadata.inputgrid, - pids1: metadata.targetpids, - x1: metadata.targetgrid, - fac0: metadata.q2_ref, - ren1: vec![], - alphas: vec![], - xir: 1.0, - xif: 1.0, - lumi_id_types: String::new(), - }, - Some(Metadata::V1(metadata)) => OperatorInfo { + fn with_v1(metadata: MetadataV1, eko_path: &Path) -> Result { + let mut fac1 = HashMap::new(); + let base64 = GeneralPurpose::new(&URL_SAFE, PAD); + + for entry in Archive::new(File::open(eko_path)?).entries_with_seek()? { + let entry = entry?; + let path = entry.path()?; + + if path.starts_with("./operators") + && (path.extension() == Some(OsStr::new("lz4"))) + && (path.with_extension("").extension() == Some(OsStr::new("npz"))) + { + // TODO: use let-else when available in MSRV + let file_stem = if let Some(file_stem) = path.with_extension("").file_stem() { + file_stem.to_os_string() + } else { + continue; + }; + + let bytes = base64.decode(file_stem.to_string_lossy().as_bytes())?; + // UNWRAP: we assume that the filenames represent exactly 8 bytes + let array: [u8; 8] = bytes.as_slice().try_into().unwrap(); + let scale = f64::from_le_bytes(array); + + fac1.insert(file_stem, scale); + } + } + + let pids0 = metadata.rotations.inputpids.map_or_else( + || metadata.rotations.pids.clone(), + |either| { + either.right_or_else(|basis| { + basis + .into_iter() + .map(|factors| { + let tuples: Vec<_> = metadata + .rotations + .pids + .iter() + .copied() + .zip(factors) + .collect(); + + // UNWRAP: we assume that an evolution basis is specified, if + // that's not the case we must make the algorithm more generic + pids::pdg_mc_ids_to_evol(&tuples).unwrap() + }) + .collect() + }) + }, + ); + + Ok(Self::V2 { fac1, - pids0: metadata.rotations.inputpids.map_or_else( - || metadata.rotations.pids.clone(), - |either| { - either.right_or_else(|basis| { - basis - .into_iter() - .map(|factors| { - let tuples: Vec<_> = metadata - .rotations - .pids - .iter() - .copied() - .zip(factors.into_iter()) - .collect(); - - pids::pdg_mc_ids_to_evol(&tuples).unwrap() - }) - .collect() + info: OperatorSliceInfo { + lumi_id_types: pids::determine_lumi_id_types(&pids0), + fac0: metadata.mu20, + pids0, + x0: metadata + .rotations + .inputgrid + .unwrap_or_else(|| metadata.rotations.xgrid.clone()), + fac1: 0.0, + pids1: metadata + .rotations + .targetpids + .unwrap_or(metadata.rotations.pids), + x1: metadata + .rotations + .targetgrid + .unwrap_or(metadata.rotations.xgrid), + }, + archive: Archive::new(File::open(eko_path)?), + }) + } + + fn with_v2(metadata: MetadataV2, eko_path: &Path) -> Result { + let mut fac1 = HashMap::new(); + let mut operator: Option = None; + + for entry in Archive::new(File::open(eko_path)?).entries_with_seek()? { + let entry = entry?; + let path = entry.path()?; + + if path.starts_with("./operators") && (path.extension() == Some(OsStr::new("yaml"))) + { + // TODO: use let-else when available in MSRV + let file_stem = if let Some(file_stem) = path.file_stem() { + file_stem.to_os_string() + } else { + continue; + }; + + let op_info: OperatorInfoV1 = serde_yaml::from_reader(entry)?; + fac1.insert(file_stem, op_info.scale); + } else if path.as_os_str() == OsStr::new("./operator.yaml") { + operator = Some(serde_yaml::from_reader(entry)?); + } + } + + let operator = + operator.ok_or_else(|| anyhow!("no file 'operator.yaml' in EKO archive found"))?; + + let pids0 = metadata.bases.inputpids.map_or_else( + || BASES_V1_DEFAULT_PIDS.to_vec(), + |basis| { + basis + .into_iter() + .map(|factors| { + let tuples: Vec<_> = + BASES_V1_DEFAULT_PIDS.iter().copied().zip(factors).collect(); + + // UNWRAP: we assume that an evolution basis is specified, if that's + // not the case we must make the algorithm more generic + pids::pdg_mc_ids_to_evol(&tuples).unwrap() }) - }, - ), - x0: metadata - .rotations - .inputgrid - .unwrap_or_else(|| metadata.rotations.xgrid.clone()), - pids1: metadata - .rotations - .targetpids - .unwrap_or(metadata.rotations.pids), - x1: metadata - .rotations - .targetgrid - .unwrap_or(metadata.rotations.xgrid), - fac0: metadata.mu20, - ren1: vec![], - alphas: vec![], - xir: 1.0, - xif: 1.0, - lumi_id_types: String::new(), - }, - None => bail!("no `metadata.yaml` file found"), - }; - - info.lumi_id_types = pids::determine_lumi_id_types(&info.pids0); - - Ok((info, operator.unwrap())) + .collect() + }, + ); + + Ok(Self::V2 { + fac1, + info: OperatorSliceInfo { + lumi_id_types: pids::determine_lumi_id_types(&pids0), + fac0: operator.mu0 * operator.mu0, + pids0, + x0: metadata + .bases + .inputgrid + .unwrap_or_else(|| metadata.bases.xgrid.clone()), + fac1: 0.0, + pids1: metadata + .bases + .targetpids + .unwrap_or_else(|| BASES_V1_DEFAULT_PIDS.to_vec()), + x1: metadata + .bases + .targetgrid + .unwrap_or_else(|| metadata.bases.xgrid.clone()), + }, + archive: Archive::new(File::open(eko_path)?), + }) + } + + pub fn iter_mut(&mut self) -> EkoSlicesIter { + match self { + Self::V0 { + fac1, + info, + operator, + } => EkoSlicesIter::V0 { + info: info.clone(), + iter: fac1.iter().zip(operator.axis_iter(Axis(0))), + }, + Self::V2 { + fac1, + info, + archive, + } => { + EkoSlicesIter::V2 { + fac1: fac1.clone(), + info: info.clone(), + // UNWRAP: short of changing the return type of this method we can't + // propagate the error, so we must panic here + entries: archive.entries_with_seek().unwrap(), + } + } + } + } + } + + impl<'a> IntoIterator for &'a mut EkoSlices { + type Item = Result<(OperatorSliceInfo, CowArray<'a, f64, Ix4>)>; + type IntoIter = EkoSlicesIter<'a>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } + } + + pub enum EkoSlicesIter<'a> { + V0 { + info: OperatorSliceInfo, + iter: Zip, AxisIter<'a, f64, Ix4>>, + }, + V2 { + fac1: HashMap, + info: OperatorSliceInfo, + entries: Entries<'a, File>, + }, + } + + impl<'a> Iterator for EkoSlicesIter<'a> { + type Item = Result<(OperatorSliceInfo, CowArray<'a, f64, Ix4>)>; + + fn next(&mut self) -> Option { + match self { + Self::V0 { info, iter } => { + if let Some((fac1, operator)) = iter.next() { + let mut info = info.clone(); + info.fac1 = *fac1; + + Some(Ok((info, CowArray::from(operator)))) + } else { + None + } + } + Self::V2 { + fac1, + info, + entries, + } => { + let fun = || { + for entry in entries { + let entry = entry?; + let path = entry.path()?; + + // here we're only interested in the operators themselves + if path.starts_with("./operators") + && (path.extension() == Some(OsStr::new("lz4"))) + && (path.with_extension("").extension() == Some(OsStr::new("npz"))) + { + // TODO: use let-else when available in MSRV + let file_stem = + if let Some(file_stem) = path.with_extension("").file_stem() { + file_stem.to_os_string() + } else { + continue; + }; + + let mut reader = + BufReader::new(FrameDecoder::new(BufReader::new(entry))); + let mut buffer = Vec::new(); + io::copy(&mut reader, &mut buffer)?; + let mut npz = NpzReader::new(Cursor::new(buffer))?; + let operator: Array4 = npz.by_name("operator.npy")?; + + let mut info = info.clone(); + info.fac1 = fac1.get(&file_stem).copied().ok_or_else(|| anyhow!("file '{}.yaml' not found, could not determine the operator's factorization scale", file_stem.to_string_lossy()))?; + + return Ok(Some((info, CowArray::from(operator)))); + } + } + + Ok(None) + }; + + fun().transpose() + } + } + } } } @@ -186,34 +429,12 @@ fn evolve_grid( orders: &[(u32, u32)], xir: f64, xif: f64, + use_old_evolve: bool, ) -> Result { - use pineappl::subgrid::{Mu2, Subgrid}; - - let (mut info, operator) = eko::read(eko)?; - - // TODO: the following should probably be a method of `Grid` - let mut ren1: Vec<_> = grid - .subgrids() - .iter() - .flat_map(|subgrid| { - subgrid - .mu2_grid() - .iter() - .map(|Mu2 { ren, .. }| xir * xir * ren) - .collect::>() - }) - .collect(); - ren1.sort_by(|a, b| a.partial_cmp(b).unwrap()); - ren1.dedup(); - let ren1 = ren1; - let alphas: Vec<_> = ren1.iter().map(|&mur2| pdf.alphas_q2(mur2)).collect(); - - info.ren1 = ren1; - info.alphas = alphas; - info.xir = xir; - info.xif = xif; + use eko::EkoSlices; + use pineappl::evolution::{AlphasTable, OperatorInfo}; - let orders: Vec<_> = grid + let order_mask: Vec<_> = grid .orders() .iter() .map(|order| { @@ -224,11 +445,50 @@ fn evolve_grid( }) .collect(); - Ok(grid.evolve(operator.view(), &info, &orders)?) + let mut eko_slices = EkoSlices::new(eko)?; + let alphas_table = AlphasTable::from_grid(grid, xir, &|q2| pdf.alphas_q2(q2)); + + if use_old_evolve { + if let EkoSlices::V0 { + fac1, + info, + operator, + } = eko_slices + { + let op_info = OperatorInfo { + fac0: info.fac0, + pids0: info.pids0.clone(), + x0: info.x0.clone(), + fac1: fac1.clone(), + pids1: info.pids1.clone(), + x1: info.x1.clone(), + ren1: alphas_table.ren1, + alphas: alphas_table.alphas, + xir, + xif, + lumi_id_types: info.lumi_id_types, + }; + + #[allow(deprecated)] + Ok(grid.evolve(operator.view(), &op_info, &order_mask)?) + } else { + unimplemented!(); + } + } else { + Ok(grid.evolve_with_slice_iter(&mut eko_slices, &order_mask, (xir, xif), &alphas_table)?) + } } #[cfg(not(feature = "evolve"))] -fn evolve_grid(_: &Grid, _: &Path, _: &Pdf, _: &[(u32, u32)], _: f64, _: f64) -> Result { +fn evolve_grid( + _: &Grid, + _: &Path, + _: &Pdf, + _: &[(u32, u32)], + _: f64, + _: f64, + _: bool, +) -> Result { Err(anyhow!( "you need to install `pineappl` with feature `evolve`" )) @@ -273,6 +533,8 @@ pub struct Opts { /// Rescale the factorization scale with this factor. #[arg(default_value_t = 1.0, long)] xif: f64, + #[arg(hide = true, long)] + use_old_evolve: bool, } impl Subcommand for Opts { @@ -292,7 +554,15 @@ impl Subcommand for Opts { cfg.force_positive, ); - let fk_table = evolve_grid(&grid, &self.eko, &pdf, &self.orders, self.xir, self.xif)?; + let fk_table = evolve_grid( + &grid, + &self.eko, + &pdf, + &self.orders, + self.xir, + self.xif, + self.use_old_evolve, + )?; let evolved_results = helpers::convolute_scales( fk_table.grid(), &mut pdf, diff --git a/pineappl_cli/tests/evolve.rs b/pineappl_cli/tests/evolve.rs index a220cecc..65ca27bd 100644 --- a/pineappl_cli/tests/evolve.rs +++ b/pineappl_cli/tests/evolve.rs @@ -26,11 +26,8 @@ Options: const E906NLO_BIN_00_STR: &str = "b Grid FkTable rel. diff -+------------+------------+------------- 0 1.0659807e-1 1.0657904e-1 -1.7851986e-4 -1 1.0659807e-1 1.0657904e-1 -1.7851986e-4 -2 1.0659807e-1 1.0657904e-1 -1.7851986e-4 -3 1.0659807e-1 1.0657904e-1 -1.7851986e-4 -4 3.2698655e0 3.2711890e0 4.0477586e-4 -5 1.6039253e0 1.6047566e0 5.1825508e-4 +1 3.2698655e0 3.2711890e0 4.0477586e-4 +2 1.6039253e0 1.6047566e0 5.1825508e-4 "; const LHCB_DY_8TEV_STR: &str = "b Grid FkTable rel. diff @@ -67,6 +64,46 @@ const LHCB_WP_7TEV_STR: &str = "b Grid FkTable rel. diff 7 2.9272102e1 2.9268451e1 -1.2474967e-4 "; +const LHCB_WP_7TEV_V2_STR: &str = "b Grid FkTable rel. diff +-+--------------------+--------------------+---------------------- +0 7.8752126798068639e2 7.8731064380928558e2 -2.6745204220435248e-4 +1 7.1872113080347663e2 7.1853123147848032e2 -2.6421836906898033e-4 +2 6.2322357391848550e2 6.2306009928459093e2 -2.6230495882362259e-4 +3 5.0216762988872915e2 5.0203737363369049e2 -2.5938799573266280e-4 +4 3.7314505699003126e2 3.7305089832847733e2 -2.5233795755852384e-4 +5 2.5302044227292129e2 2.5295968261889854e2 -2.4013733229188983e-4 +6 1.1971045984774410e2 1.1968525412249538e2 -2.1055574659711862e-4 +7 2.9272102213930090e1 2.9268443366651141e1 -1.2499434622803562e-4 +"; + +const LHCB_WP_7TEV_V2_XIR2_STR: &str = "b Grid FkTable rel. diff +-+--------------------+--------------------+---------------------- +0 7.7634833292737017e2 7.7614037816519419e2 -2.6786270203205120e-4 +1 7.0866199875124983e2 7.0847444839781781e2 -2.6465417048249229e-4 +2 6.1427556024981789e2 6.1411417374531106e2 -2.6272655946324441e-4 +3 4.9482819982783724e2 4.9469964081143053e2 -2.5980535557890150e-4 +4 3.6756257449354945e2 3.6746967569489709e2 -2.5274281196974169e-4 +5 2.4912642701834142e2 2.4906651029915440e2 -2.4050727939273209e-4 +6 1.1776254040032327e2 1.1773772039493417e2 -2.1076316207790935e-4 +7 2.8749891297668260e1 2.8746299479656258e1 -1.2493327278395583e-4 +"; + +const LHCB_WP_7TEV_V2_XIF_2_STR: &str = + "b Grid FkTable rel. diff +-+--------------------+--------------------+---------------------- +0 8.0902449713533758e2 8.0880109089579207e2 -2.7614273774967391e-4 +1 7.3869242569893402e2 7.3849113100483919e2 -2.7250136469769703e-4 +2 6.4102495904778243e2 6.4085178025871448e2 -2.7015919836448354e-4 +3 5.1668563837653949e2 5.1654786167667771e2 -2.6665478896348294e-4 +4 3.8405066991124284e2 3.8395127677619655e2 -2.5880213949180941e-4 +5 2.6047697125229388e2 2.6041295913273854e2 -2.4574963094659008e-4 +6 1.2324364745022301e2 1.2321715784184289e2 -2.1493690691698486e-4 +7 3.0134629982656573e1 3.0130872371345841e1 -1.2469412476256991e-4 +"; + +const LHCB_WP_7TEV_V2_XIF_2_ERROR_STR: &str = "Error: failed to evolve grid: no operator for muf2 = 25825.775616000003 found in [6456.443904000001] +"; + const LHCB_WP_7TEV_OPTIMIZED_STR: &str = "b etal dsig/detal [] [pb] -+----+----+----------- @@ -191,16 +228,203 @@ fn lhcb_wp_7tev() { .stdout(LHCB_WP_7TEV_OPTIMIZED_STR); } +#[test] +fn lhcb_wp_7tev_use_old_evolve() { + let output = NamedTempFile::new("fktable1c.lz4").unwrap(); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "--silence-lhapdf", + "evolve", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + "../test-data/LHCB_WP_7TEV.tar", + output.path().to_str().unwrap(), + "NNPDF40_nlo_as_01180", + "--orders=a2,as1a2", + "--use-old-evolve", + ]) + .assert() + .success() + .stdout(LHCB_WP_7TEV_STR); +} + +#[test] +fn lhcb_wp_7tev_v2() { + let input = NamedTempFile::new("optimized.pineappl.lz4").unwrap(); + + // we first need to optimize the grid, to strip empty x-grid values not contained in the EKO + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "write", + "--optimize", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + input.path().to_str().unwrap(), + ]) + .assert() + .success() + .stdout(""); + + let output = NamedTempFile::new("fktable2a.lz4").unwrap(); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "--silence-lhapdf", + "evolve", + "--digits-abs=16", + "--digits-rel=16", + input.path().to_str().unwrap(), + "../test-data/LHCB_WP_7TEV_v2.tar", + output.path().to_str().unwrap(), + "NNPDF40_nlo_as_01180", + "--orders=a2,as1a2", + ]) + .assert() + .success() + .stdout(LHCB_WP_7TEV_V2_STR); +} + +#[test] +fn lhcb_wp_7tev_v2_xir_2() { + let input = NamedTempFile::new("optimized.pineappl.lz4").unwrap(); + + // we first need to optimize the grid, to strip empty x-grid values not contained in the EKO + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "write", + "--optimize", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + input.path().to_str().unwrap(), + ]) + .assert() + .success() + .stdout(""); + + let output = NamedTempFile::new("fktable2b.lz4").unwrap(); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "--silence-lhapdf", + "evolve", + "--digits-abs=16", + "--digits-rel=16", + input.path().to_str().unwrap(), + "../test-data/LHCB_WP_7TEV_v2.tar", + output.path().to_str().unwrap(), + "NNPDF40_nlo_as_01180", + "--orders=a2,as1a2", + "--xir=2", + ]) + .assert() + .success() + .stdout(LHCB_WP_7TEV_V2_XIR2_STR); +} + +#[test] +fn lhcb_wp_7tev_v2_xif_2() { + let input = NamedTempFile::new("optimized.pineappl.lz4").unwrap(); + + // we first need to optimize the grid, to strip empty x-grid values not contained in the EKO + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "write", + "--optimize", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + input.path().to_str().unwrap(), + ]) + .assert() + .success() + .stdout(""); + + let output = NamedTempFile::new("fktable2c.lz4").unwrap(); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "--silence-lhapdf", + "evolve", + "--digits-abs=16", + "--digits-rel=16", + input.path().to_str().unwrap(), + "../test-data/LHCB_WP_7TEV_v2_xif_2.tar", + output.path().to_str().unwrap(), + "NNPDF40_nlo_as_01180", + "--orders=a2,as1a2", + "--xif=2", + ]) + .assert() + .success() + .stdout(LHCB_WP_7TEV_V2_XIF_2_STR); +} + +#[test] +fn lhcb_wp_7tev_v2_xif_2_error() { + let input = NamedTempFile::new("optimized.pineappl.lz4").unwrap(); + + // we first need to optimize the grid, to strip empty x-grid values not contained in the EKO + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "write", + "--optimize", + "../test-data/LHCB_WP_7TEV.pineappl.lz4", + input.path().to_str().unwrap(), + ]) + .assert() + .success() + .stdout(""); + + let output = NamedTempFile::new("fktable2c.lz4").unwrap(); + + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "--silence-lhapdf", + "evolve", + "--digits-abs=16", + "--digits-rel=16", + input.path().to_str().unwrap(), + "../test-data/LHCB_WP_7TEV_v2.tar", + output.path().to_str().unwrap(), + "NNPDF40_nlo_as_01180", + "--orders=a2,as1a2", + "--xif=2", + ]) + .assert() + .failure() + .stderr(LHCB_WP_7TEV_V2_XIF_2_ERROR_STR) + .stdout(""); +} + #[test] fn e906nlo_bin_00() { + let input = NamedTempFile::new("E906nlo_bin_00_unique_bin_limits.pineappl.lz4").unwrap(); let output = NamedTempFile::new("fktable2.lz4").unwrap(); + // we need to delete bins with the same bin limits for `Grid::merge` to work properly + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "write", + "--delete-bins=1-3", + "../test-data/E906nlo_bin_00.pineappl.lz4", + input.path().to_str().unwrap(), + ]) + .assert() + .success() + .stdout(""); + Command::cargo_bin("pineappl") .unwrap() .args([ "--silence-lhapdf", "evolve", - "../test-data/E906nlo_bin_00.pineappl.lz4", + input.path().to_str().unwrap(), "../test-data/E906nlo_bin_00.tar", output.path().to_str().unwrap(), "NNPDF40_nlo_as_01180", diff --git a/pineappl_py/src/evolution.rs b/pineappl_py/src/evolution.rs index b554aa1d..c1c83992 100644 --- a/pineappl_py/src/evolution.rs +++ b/pineappl_py/src/evolution.rs @@ -1,5 +1,5 @@ use numpy::{IntoPyArray, PyArray1}; -use pineappl::evolution::EvolveInfo; +use pineappl::evolution::{EvolveInfo, OperatorSliceInfo}; use pyo3::prelude::*; @@ -36,3 +36,11 @@ impl PyEvolveInfo { self.evolve_info.ren1.clone().into_pyarray(py) } } + +/// TODO +#[pyclass] +#[repr(transparent)] +#[derive(Clone)] +pub struct PyOperatorSliceInfo { + pub(crate) slice_info: OperatorSliceInfo, +} diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 102b65f4..d50312de 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -1,4 +1,4 @@ -use pineappl::evolution::OperatorInfo; +use pineappl::evolution::{AlphasTable, OperatorInfo}; use pineappl::grid::{Grid, Ntuple, Order}; #[allow(deprecated)] @@ -7,13 +7,13 @@ use pineappl::grid::{EkoInfo, GridAxes}; use pineappl::lumi::LumiCache; use super::bin::PyBinRemapper; -use super::evolution::PyEvolveInfo; +use super::evolution::{PyEvolveInfo, PyOperatorSliceInfo}; use super::fk_table::PyFkTable; use super::lumi::PyLumiEntry; use super::subgrid::{PySubgridEnum, PySubgridParams}; use itertools::izip; -use numpy::{IntoPyArray, PyArray1, PyReadonlyArray1, PyReadonlyArray5}; +use numpy::{IntoPyArray, PyArray1, PyReadonlyArray1, PyReadonlyArray4, PyReadonlyArray5}; use std::collections::HashMap; use std::fs::File; @@ -22,6 +22,9 @@ use std::path::PathBuf; use pyo3::exceptions::PyValueError; use pyo3::prelude::*; +use pyo3::types::{PyIterator, PyTuple}; + +use ndarray::CowArray; /// PyO3 wrapper to :rustdoc:`pineappl::grid::Order ` /// @@ -562,6 +565,51 @@ impl PyGrid { } } + /// TODO + /// + /// Parameters + /// ---------- + /// slices : TODO + /// order_mask : TODO + /// + /// Returns + /// ------- + /// TODO + pub fn evolve_with_slice_iter( + &self, + slices: &PyIterator, + order_mask: PyReadonlyArray1, + xi: (f64, f64), + ren1: Vec, + alphas: Vec, + ) -> PyResult { + Ok(self + .grid + .evolve_with_slice_iter( + slices.map(|result| { + // TODO: check whether we can avoid the `.unwrap` calls + let any = result.unwrap(); + let tuple = any.downcast::().unwrap(); + let item0 = tuple.get_item(0).unwrap(); + let item1 = tuple.get_item(1).unwrap(); + let slice_info = item0.extract::().unwrap(); + let operator = item1.extract::>().unwrap(); + // TODO: can we get rid of the `into_owned` call? + let array = CowArray::from(operator.as_array().into_owned()); + + // TODO: change `PyErr` into something appropriate + Ok::<_, PyErr>((slice_info.slice_info, array)) + }), + // TODO: what if it's non-contiguous? + order_mask.as_slice().unwrap(), + xi, + &AlphasTable { ren1, alphas }, + ) + .map(|fk_table| PyFkTable { fk_table }) + // TODO: get rid of this `.unwrap` call + .unwrap()) + } + /// Load grid from file. /// /// **Usage:** `pineko`, FKTable generation