Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Fix panic in case of a missing model #7698

Merged
1 commit merged into from
Dec 9, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion frame/benchmarking/src/analysis.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,11 @@
//! Tools for analyzing the benchmark results.

use std::collections::BTreeMap;
use linregress::{FormulaRegressionBuilder, RegressionDataBuilder, RegressionModel};
use linregress::{FormulaRegressionBuilder, RegressionDataBuilder};
use crate::BenchmarkResults;

pub use linregress::RegressionModel;

pub struct Analysis {
pub base: u128,
pub slopes: Vec<u128>,
Expand Down
2 changes: 1 addition & 1 deletion frame/benchmarking/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ mod analysis;

pub use utils::*;
#[cfg(feature = "std")]
pub use analysis::{Analysis, BenchmarkSelector};
pub use analysis::{Analysis, BenchmarkSelector, RegressionModel};
#[doc(hidden)]
pub use sp_io::storage::root as storage_root;
pub use sp_runtime::traits::Zero;
Expand Down
25 changes: 18 additions & 7 deletions utils/frame/benchmarking-cli/src/writer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ use std::path::PathBuf;
use serde::Serialize;

use crate::BenchmarkCmd;
use frame_benchmarking::{BenchmarkBatch, BenchmarkSelector, Analysis};
use frame_benchmarking::{BenchmarkBatch, BenchmarkSelector, Analysis, RegressionModel};
use sp_runtime::traits::Zero;

const VERSION: &'static str = env!("CARGO_PKG_VERSION");
Expand Down Expand Up @@ -134,6 +134,17 @@ fn map_results(batches: &[BenchmarkBatch]) -> Result<HashMap<String, Vec<Benchma
Ok(all_benchmarks)
}

// Get an iterator of errors from a model. If the model is `None` all errors are zero.
fn extract_errors(model: &Option<RegressionModel>) -> impl Iterator<Item=u128> + '_ {
let mut errors = model.as_ref().map(|m| m.se.regressor_values.iter());
std::iter::from_fn(move || {
match &mut errors {
Some(model) => model.next().map(|val| *val as u128),
_ => Some(0),
}
})
}

// Analyze and return the relevant results for a given benchmark.
fn get_benchmark_data(batch: &BenchmarkBatch) -> BenchmarkData {
// Analyze benchmarks to get the linear regression.
Expand All @@ -149,40 +160,40 @@ fn get_benchmark_data(batch: &BenchmarkBatch) -> BenchmarkData {

extrinsic_time.slopes.into_iter()
.zip(extrinsic_time.names.iter())
.zip(extrinsic_time.model.unwrap().se.regressor_values.iter())
.zip(extract_errors(&extrinsic_time.model))
.for_each(|((slope, name), error)| {
if !slope.is_zero() {
if !used_components.contains(&name) { used_components.push(name); }
used_extrinsic_time.push(ComponentSlope {
name: name.clone(),
slope: slope.saturating_mul(1000),
error: (*error as u128).saturating_mul(1000),
error: error.saturating_mul(1000),
});
}
});
reads.slopes.into_iter()
.zip(reads.names.iter())
.zip(reads.model.unwrap().se.regressor_values.iter())
.zip(extract_errors(&reads.model))
.for_each(|((slope, name), error)| {
if !slope.is_zero() {
if !used_components.contains(&name) { used_components.push(name); }
used_reads.push(ComponentSlope {
name: name.clone(),
slope,
error: *error as u128,
error,
});
}
});
writes.slopes.into_iter()
.zip(writes.names.iter())
.zip(writes.model.unwrap().se.regressor_values.iter())
.zip(extract_errors(&writes.model))
.for_each(|((slope, name), error)| {
if !slope.is_zero() {
if !used_components.contains(&name) { used_components.push(name); }
used_writes.push(ComponentSlope {
name: name.clone(),
slope,
error: *error as u128,
error,
});
}
});
Expand Down