Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

bench-cli: Support JSON output #10771

Merged
merged 7 commits into from
Feb 2, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions frame/benchmarking/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ sp-storage = { version = "4.0.0", path = "../../primitives/storage", default-fea
frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" }
frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" }
log = { version = "0.4.14", default-features = false }
serde = { version = "1.0.132", optional = true }

[dev-dependencies]
hex-literal = "0.3.4"
Expand All @@ -37,6 +38,7 @@ default = ["std"]
std = [
"codec/std",
"scale-info/std",
"serde",
"sp-runtime-interface/std",
"sp-runtime/std",
"sp-api/std",
Expand Down
25 changes: 25 additions & 0 deletions frame/benchmarking/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,15 @@ use frame_support::{
pallet_prelude::*,
traits::StorageInfo,
};
#[cfg(feature = "std")]
use serde::Serialize;
use sp_io::hashing::blake2_256;
use sp_runtime::traits::TrailingZeroInput;
use sp_std::{prelude::Box, vec::Vec};
use sp_storage::TrackedStorageKey;

/// An alphabet of possible parameters to use for benchmarking.
#[cfg_attr(feature = "std", derive(Serialize))]
#[derive(Encode, Decode, Clone, Copy, PartialEq, Debug)]
#[allow(missing_docs)]
#[allow(non_camel_case_types)]
Expand Down Expand Up @@ -68,27 +71,35 @@ impl std::fmt::Display for BenchmarkParameter {
}

/// The results of a single of benchmark.
#[cfg_attr(feature = "std", derive(Serialize))]
#[derive(Encode, Decode, Clone, PartialEq, Debug)]
pub struct BenchmarkBatch {
/// The pallet containing this benchmark.
#[cfg_attr(feature = "std", serde(with = "serde_as_str"))]
pub pallet: Vec<u8>,
/// The instance of this pallet being benchmarked.
#[cfg_attr(feature = "std", serde(with = "serde_as_str"))]
pub instance: Vec<u8>,
/// The extrinsic (or benchmark name) of this benchmark.
#[cfg_attr(feature = "std", serde(with = "serde_as_str"))]
pub benchmark: Vec<u8>,
/// The results from this benchmark.
pub results: Vec<BenchmarkResult>,
}

// TODO: could probably make API cleaner here.
/// The results of a single of benchmark, where time and db results are separated.
#[cfg_attr(feature = "std", derive(Serialize))]
#[derive(Encode, Decode, Clone, PartialEq, Debug)]
pub struct BenchmarkBatchSplitResults {
/// The pallet containing this benchmark.
#[cfg_attr(feature = "std", serde(with = "serde_as_str"))]
pub pallet: Vec<u8>,
/// The instance of this pallet being benchmarked.
#[cfg_attr(feature = "std", serde(with = "serde_as_str"))]
pub instance: Vec<u8>,
/// The extrinsic (or benchmark name) of this benchmark.
#[cfg_attr(feature = "std", serde(with = "serde_as_str"))]
pub benchmark: Vec<u8>,
/// The extrinsic timing results from this benchmark.
pub time_results: Vec<BenchmarkResult>,
Expand All @@ -99,6 +110,7 @@ pub struct BenchmarkBatchSplitResults {
/// Result from running benchmarks on a FRAME pallet.
/// Contains duration of the function call in nanoseconds along with the benchmark parameters
/// used for that benchmark result.
#[cfg_attr(feature = "std", derive(Serialize))]
#[derive(Encode, Decode, Default, Clone, PartialEq, Debug)]
pub struct BenchmarkResult {
pub components: Vec<(BenchmarkParameter, u32)>,
Expand All @@ -109,6 +121,7 @@ pub struct BenchmarkResult {
pub writes: u32,
pub repeat_writes: u32,
pub proof_size: u32,
#[cfg_attr(feature = "std", serde(skip_serializing))]
pub keys: Vec<(Vec<u8>, u32, u32, bool)>,
}

Expand All @@ -118,6 +131,18 @@ impl BenchmarkResult {
}
}

/// Helper module to make serde serialize `Vec<u8>` as strings.
#[cfg(feature = "std")]
mod serde_as_str {
pub fn serialize<S>(value: &Vec<u8>, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let s = std::str::from_utf8(value).map_err(serde::ser::Error::custom)?;
serializer.collect_str(s)
}
}

/// Possible errors returned from the benchmarking pipeline.
#[derive(Clone, PartialEq, Debug)]
pub enum BenchmarkError {
Expand Down
1 change: 1 addition & 0 deletions utils/frame/benchmarking-cli/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ codec = { version = "2.0.0", package = "parity-scale-codec" }
clap = { version = "3.0", features = ["derive"] }
chrono = "0.4"
serde = "1.0.132"
serde_json = "1.0.74"
handlebars = "4.1.6"
Inflector = "0.11.4"
linked-hash-map = "0.5.4"
Expand Down
82 changes: 43 additions & 39 deletions utils/frame/benchmarking-cli/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ use sp_externalities::Extensions;
use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStorePtr};
use sp_runtime::traits::{Block as BlockT, Header as HeaderT};
use sp_state_machine::StateMachine;
use std::{fmt::Debug, sync::Arc, time};
use std::{fmt::Debug, fs, sync::Arc, time};

// This takes multiple benchmark batches and combines all the results where the pallet, instance,
// and benchmark are the same.
Expand Down Expand Up @@ -357,55 +357,61 @@ impl BenchmarkCmd {
// are together.
let batches: Vec<BenchmarkBatchSplitResults> = combine_batches(batches, batches_db);

// Create the weights.rs file.
if let Some(output_path) = &self.output {
crate::writer::write_results(&batches, &storage_info, output_path, self)?;
}

// Jsonify the result and write it to a file or stdout if desired.
if !self.jsonify(&batches)? {
// Print the summary only if `jsonify` did not write to stdout.
self.print_summary(&batches, &storage_info)
}
Ok(())
}

/// Jsonifies the passed batches and writes them to stdout or into a file.
/// Can be configured via `--json` and `--json-file`.
/// Returns whether it wrote to stdout.
fn jsonify(&self, batches: &Vec<BenchmarkBatchSplitResults>) -> Result<bool> {
if self.json_output || self.json_file.is_some() {
let json = serde_json::to_string_pretty(&batches)
.map_err(|e| format!("Serializing into JSON: {:?}", e))?;

if let Some(path) = &self.json_file {
fs::write(path, json)?;
} else {
println!("{}", json);
return Ok(true)
}
}

Ok(false)
}

/// Prints the results as human-readable summary without raw timing data.
fn print_summary(
&self,
batches: &Vec<BenchmarkBatchSplitResults>,
storage_info: &Vec<StorageInfo>,
) {
for batch in batches.into_iter() {
// Print benchmark metadata
println!(
"Pallet: {:?}, Extrinsic: {:?}, Lowest values: {:?}, Highest values: {:?}, Steps: {:?}, Repeat: {:?}",
String::from_utf8(batch.pallet).expect("Encoded from String; qed"),
String::from_utf8(batch.benchmark).expect("Encoded from String; qed"),
self.lowest_range_values,
self.highest_range_values,
self.steps,
self.repeat,
);
"Pallet: {:?}, Extrinsic: {:?}, Lowest values: {:?}, Highest values: {:?}, Steps: {:?}, Repeat: {:?}",
String::from_utf8(batch.pallet.clone()).expect("Encoded from String; qed"),
String::from_utf8(batch.benchmark.clone()).expect("Encoded from String; qed"),
self.lowest_range_values,
self.highest_range_values,
self.steps,
self.repeat,
);

// Skip raw data + analysis if there are no results
if batch.time_results.is_empty() {
continue
}

if self.raw_data {
// Print the table header
batch.time_results[0]
.components
.iter()
.for_each(|param| print!("{:?},", param.0));

print!("extrinsic_time_ns,storage_root_time_ns,reads,repeat_reads,writes,repeat_writes,proof_size_bytes\n");
// Print the values
batch.time_results.iter().for_each(|result| {
let parameters = &result.components;
parameters.iter().for_each(|param| print!("{:?},", param.1));
// Print extrinsic time and storage root time
print!(
"{:?},{:?},{:?},{:?},{:?},{:?},{:?}\n",
result.extrinsic_time,
result.storage_root_time,
result.reads,
result.repeat_reads,
result.writes,
result.repeat_writes,
result.proof_size,
);
});

println!();
}

if !self.no_storage_info {
let mut comments: Vec<String> = Default::default();
crate::writer::add_storage_comments(
Expand Down Expand Up @@ -460,8 +466,6 @@ impl BenchmarkCmd {
println!("");
}
}

Ok(())
}
}

Expand Down
18 changes: 11 additions & 7 deletions utils/frame/benchmarking-cli/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ mod command;
mod writer;

use sc_cli::{ExecutionStrategy, WasmExecutionMethod};
use std::fmt::Debug;
use std::{fmt::Debug, path::PathBuf};

// Add a more relaxed parsing for pallet names by allowing pallet directory names with `-` to be
// used like crate names with `_`
Expand Down Expand Up @@ -60,9 +60,13 @@ pub struct BenchmarkCmd {
#[clap(long, default_value = "1")]
pub external_repeat: u32,

/// Print the raw results.
#[clap(long = "raw")]
pub raw_data: bool,
/// Print the raw results in JSON format.
#[clap(long = "json")]
pub json_output: bool,

/// Write the raw results in JSON format into the give file.
#[clap(long, conflicts_with = "json-output")]
pub json_file: Option<PathBuf>,

/// Don't print the median-slopes linear regression analysis.
#[clap(long)]
Expand All @@ -74,15 +78,15 @@ pub struct BenchmarkCmd {

/// Output the benchmarks to a Rust file at the given path.
#[clap(long)]
pub output: Option<std::path::PathBuf>,
pub output: Option<PathBuf>,

/// Add a header file to your outputted benchmarks
#[clap(long)]
pub header: Option<std::path::PathBuf>,
pub header: Option<PathBuf>,

/// Path to Handlebars template file used for outputting benchmark results. (Optional)
#[clap(long)]
pub template: Option<std::path::PathBuf>,
pub template: Option<PathBuf>,

/// Which analysis function to use when outputting benchmarks:
/// * min-squares (default)
Expand Down