diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 5e2f95e5..60dd5732 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -1,11 +1,10 @@ on: push: branches: - - master + - main pull_request: branches: - - master - - version-0.4 + - main name: tests env: diff --git a/Cargo.toml b/Cargo.toml index 51d8ead3..2c8776bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,56 +9,56 @@ name = "criterion2" # # * Update version numbers in the book; version = "0.5.1" -edition = "2018" +edition = "2021" description = "Statistics-driven micro-benchmarking library" -homepage = "https://bheisler.github.io/criterion.rs/book/index.html" -repository = "https://github.com/bheisler/criterion.rs" -readme = "README.md" -keywords = ["criterion", "benchmark"] -categories = ["development-tools::profiling"] -license = "Apache-2.0 OR MIT" -exclude = ["book/*"] +homepage = "https://bheisler.github.io/criterion.rs/book/index.html" +repository = "https://github.com/bheisler/criterion.rs" +readme = "README.md" +keywords = ["criterion", "benchmark"] +categories = ["development-tools::profiling"] +license = "Apache-2.0 OR MIT" +exclude = ["book/*"] [dependencies] -anes = "0.1.4" -once_cell = "1.14" +anes = "0.1.6" +once_cell = "1.19" criterion-plot = { path = "plot", version = "0.5.0" } -itertools = ">=0.10, <=0.12" -serde = "1.0" -serde_json = "1.0" -serde_derive = "1.0" -ciborium = "0.2.0" -clap = { version = "4", default-features = false, features = ["std", "help"] } -walkdir = "2.3" -tinytemplate = "1.1" -cast = "0.3" -num-traits = { version = "0.2", default-features = false, features = ["std"] } -oorandom = "11.1" -regex = { version = "1.5", default-features = false, features = ["std"] } +itertools = ">=0.10, <=0.12" +serde = "1.0" +serde_json = "1.0" +serde_derive = "1.0" +ciborium = "0.2.2" +clap = { version = "4", default-features = false, features = ["std", "help"] } +walkdir = "2.5" +tinytemplate = "1.2" +cast = "0.3" +num-traits = { version = "0.2", default-features = false, features = ["std"] } +oorandom = "11.1" +regex = { version = "1.10", default-features = false, features = ["std"] } # Optional dependencies -rayon = { version = "1.3", optional = true } -csv = { version = "1.1", optional = true } +rayon = { version = "1.9", optional = true } +csv = { version = "1.3", optional = true } futures = { version = "0.3", default_features = false, optional = true } -smol = { version = "1.2", default-features = false, optional = true } -tokio = { version = "1.0", default-features = false, features = [ +smol = { version = "1.3", default-features = false, optional = true } +tokio = { version = "1.36", default-features = false, features = [ "rt", ], optional = true } -async-std = { version = "1.9", optional = true } +async-std = { version = "1.12", optional = true } [dependencies.plotters] -version = "^0.3.1" -optional = true +version = "^0.3.5" +optional = true default-features = false -features = ["svg_backend", "area_series", "line_series"] +features = ["svg_backend", "area_series", "line_series"] [dev-dependencies] -tempfile = "3.5.0" -approx = "0.5.0" +tempfile = "3.10.1" +approx = "0.5.1" quickcheck = { version = "1.0", default-features = false } -rand = "0.8" -futures = { version = "0.3", default_features = false, features = ["executor"] } +rand = "0.8" +futures = { version = "0.3", default_features = false, features = ["executor"] } [badges] maintenance = { status = "passively-maintained" } @@ -83,9 +83,9 @@ async = [] # These features enable built-in support for running async benchmarks on each different async # runtime. async_futures = ["futures/executor", "async"] -async_smol = ["smol", "async"] -async_tokio = ["tokio", "async"] -async_std = ["async-std", "async"] +async_smol = ["smol", "async"] +async_tokio = ["tokio", "async"] +async_std = ["async-std", "async"] # This feature _currently_ does nothing except disable a warning message, but in 0.4.0 it will be # required in order to have Criterion.rs generate its own plots (as opposed to using cargo-criterion) @@ -104,7 +104,7 @@ csv_output = ["csv"] exclude = ["cargo-criterion"] [[bench]] -name = "bench_main" +name = "bench_main" harness = false [lib] diff --git a/bencher_compat/Cargo.toml b/bencher_compat/Cargo.toml index c901ca72..78dacc51 100644 --- a/bencher_compat/Cargo.toml +++ b/bencher_compat/Cargo.toml @@ -23,4 +23,4 @@ default = [] name = "bencher_example" harness = false -[workspace] \ No newline at end of file +[workspace] diff --git a/bencher_compat/benches/bencher_example.rs b/bencher_compat/benches/bencher_example.rs index c45d246e..276ff41b 100644 --- a/bencher_compat/benches/bencher_example.rs +++ b/bencher_compat/benches/bencher_example.rs @@ -1,22 +1,15 @@ -#[macro_use] -extern crate criterion_bencher_compat; - -use criterion_bencher_compat::Bencher; - -fn a(bench: &mut Bencher) { - bench.iter(|| { - (0..1000).fold(0, |x, y| x + y) - }) -} - -fn b(bench: &mut Bencher) { - const N: usize = 1024; - bench.iter(|| { - vec![0u8; N] - }); - - bench.bytes = N as u64; -} - -benchmark_group!(benches, a, b); -benchmark_main!(benches); \ No newline at end of file +use criterion_bencher_compat::Bencher; + +fn a(bench: &mut Bencher) { + bench.iter(|| (0..1000).fold(0, |x, y| x + y)) +} + +fn b(bench: &mut Bencher) { + const N: usize = 1024; + bench.iter(|| vec![0u8; N]); + + bench.bytes = N as u64; +} + +benchmark_group!(benches, a, b); +benchmark_main!(benches); diff --git a/bencher_compat/src/lib.rs b/bencher_compat/src/lib.rs index 1ddf6756..6c23a1d2 100644 --- a/bencher_compat/src/lib.rs +++ b/bencher_compat/src/lib.rs @@ -1,8 +1,6 @@ -extern crate criterion; - -pub use criterion::Criterion; pub use criterion::black_box; use criterion::measurement::WallTime; +pub use criterion::Criterion; /// Stand-in for `bencher::Bencher` which uses Criterion.rs to perform the benchmark instead. pub struct Bencher<'a, 'b> { @@ -12,7 +10,8 @@ pub struct Bencher<'a, 'b> { impl<'a, 'b> Bencher<'a, 'b> { /// Callback for benchmark functions to run to perform the benchmark pub fn iter(&mut self, inner: F) - where F: FnMut() -> T + where + F: FnMut() -> T, { self.bencher.iter(inner); } @@ -60,4 +59,4 @@ macro_rules! benchmark_main { ($($group_name:path,)+) => { benchmark_main!($($group_name),+); }; -} \ No newline at end of file +} diff --git a/benches/benchmarks/async_measurement_overhead.rs b/benches/benchmarks/async_measurement_overhead.rs index 0c9605a1..9330ace6 100644 --- a/benches/benchmarks/async_measurement_overhead.rs +++ b/benches/benchmarks/async_measurement_overhead.rs @@ -4,36 +4,28 @@ fn some_benchmark(c: &mut Criterion) { let mut group = c.benchmark_group("async overhead"); group.bench_function("iter", |b| b.to_async(FuturesExecutor).iter(|| async { 1 })); group.bench_function("iter_with_setup", |b| { - b.to_async(FuturesExecutor) - .iter_with_setup(|| (), |_| async { 1 }) + b.to_async(FuturesExecutor).iter_with_setup(|| (), |_| async { 1 }) }); group.bench_function("iter_with_large_setup", |b| { - b.to_async(FuturesExecutor) - .iter_with_large_setup(|| (), |_| async { 1 }) + b.to_async(FuturesExecutor).iter_with_large_setup(|| (), |_| async { 1 }) }); group.bench_function("iter_with_large_drop", |b| { - b.to_async(FuturesExecutor) - .iter_with_large_drop(|| async { 1 }) + b.to_async(FuturesExecutor).iter_with_large_drop(|| async { 1 }) }); group.bench_function("iter_batched_small_input", |b| { - b.to_async(FuturesExecutor) - .iter_batched(|| (), |_| async { 1 }, BatchSize::SmallInput) + b.to_async(FuturesExecutor).iter_batched(|| (), |_| async { 1 }, BatchSize::SmallInput) }); group.bench_function("iter_batched_large_input", |b| { - b.to_async(FuturesExecutor) - .iter_batched(|| (), |_| async { 1 }, BatchSize::LargeInput) + b.to_async(FuturesExecutor).iter_batched(|| (), |_| async { 1 }, BatchSize::LargeInput) }); group.bench_function("iter_batched_per_iteration", |b| { - b.to_async(FuturesExecutor) - .iter_batched(|| (), |_| async { 1 }, BatchSize::PerIteration) + b.to_async(FuturesExecutor).iter_batched(|| (), |_| async { 1 }, BatchSize::PerIteration) }); group.bench_function("iter_batched_ref_small_input", |b| { - b.to_async(FuturesExecutor) - .iter_batched_ref(|| (), |_| async { 1 }, BatchSize::SmallInput) + b.to_async(FuturesExecutor).iter_batched_ref(|| (), |_| async { 1 }, BatchSize::SmallInput) }); group.bench_function("iter_batched_ref_large_input", |b| { - b.to_async(FuturesExecutor) - .iter_batched_ref(|| (), |_| async { 1 }, BatchSize::LargeInput) + b.to_async(FuturesExecutor).iter_batched_ref(|| (), |_| async { 1 }, BatchSize::LargeInput) }); group.bench_function("iter_batched_ref_per_iteration", |b| { b.to_async(FuturesExecutor).iter_batched_ref( diff --git a/benches/benchmarks/custom_measurement.rs b/benches/benchmarks/custom_measurement.rs index 449f9030..c4e3199a 100644 --- a/benches/benchmarks/custom_measurement.rs +++ b/benches/benchmarks/custom_measurement.rs @@ -17,10 +17,9 @@ impl ValueFormatter for HalfSecFormatter { Throughput::Bytes(bytes) | Throughput::BytesDecimal(bytes) => { format!("{} b/s/2", (bytes as f64) / (value * 2f64 * 10f64.powi(-9))) } - Throughput::Elements(elems) => format!( - "{} elem/s/2", - (elems as f64) / (value * 2f64 * 10f64.powi(-9)) - ), + Throughput::Elements(elems) => { + format!("{} elem/s/2", (elems as f64) / (value * 2f64 * 10f64.powi(-9))) + } } } diff --git a/benches/benchmarks/external_process.rs b/benches/benchmarks/external_process.rs index 7667a53b..b0d38bea 100644 --- a/benches/benchmarks/external_process.rs +++ b/benches/benchmarks/external_process.rs @@ -8,9 +8,7 @@ use std::{ fn create_command() -> Command { let mut command = Command::new("python3"); - command - .arg("benches/benchmarks/external_process.py") - .arg("10"); + command.arg("benches/benchmarks/external_process.py").arg("10"); command } @@ -29,21 +27,15 @@ fn python_fibonacci(c: &mut Criterion) { .spawn() .expect("Unable to start python process"); - let mut stdin = process - .stdin - .expect("Unable to get stdin for child process"); - let stdout = process - .stdout - .expect("Unable to get stdout for child process"); + let mut stdin = process.stdin.expect("Unable to get stdin for child process"); + let stdout = process.stdout.expect("Unable to get stdout for child process"); let mut stdout = BufReader::new(stdout); c.bench_function("fibonacci-python", |b| { b.iter_custom(|iters| { writeln!(stdin, "{}", iters) .expect("Unable to send iteration count to child process"); let mut line = String::new(); - stdout - .read_line(&mut line) - .expect("Unable to read time from child process"); + stdout.read_line(&mut line).expect("Unable to read time from child process"); let nanoseconds: u64 = u64::from_str(line.trim()).expect("Unable to parse time from child process"); Duration::from_nanos(nanoseconds) diff --git a/benches/benchmarks/sampling_mode.rs b/benches/benchmarks/sampling_mode.rs index af761273..0ffbf868 100644 --- a/benches/benchmarks/sampling_mode.rs +++ b/benches/benchmarks/sampling_mode.rs @@ -1,26 +1,20 @@ -use criterion::{criterion_group, Criterion, SamplingMode}; -use std::thread::sleep; -use std::time::Duration; - -fn sampling_mode_tests(c: &mut Criterion) { - let mut group = c.benchmark_group("sampling_mode"); - - group.sampling_mode(SamplingMode::Auto); - group.bench_function("Auto", |bencher| { - bencher.iter(|| sleep(Duration::from_millis(0))) - }); - - group.sampling_mode(SamplingMode::Linear); - group.bench_function("Linear", |bencher| { - bencher.iter(|| sleep(Duration::from_millis(0))) - }); - - group.sampling_mode(SamplingMode::Flat); - group.bench_function("Flat", |bencher| { - bencher.iter(|| sleep(Duration::from_millis(10))) - }); - - group.finish(); -} - -criterion_group!(benches, sampling_mode_tests,); +use criterion::{criterion_group, Criterion, SamplingMode}; +use std::thread::sleep; +use std::time::Duration; + +fn sampling_mode_tests(c: &mut Criterion) { + let mut group = c.benchmark_group("sampling_mode"); + + group.sampling_mode(SamplingMode::Auto); + group.bench_function("Auto", |bencher| bencher.iter(|| sleep(Duration::from_millis(0)))); + + group.sampling_mode(SamplingMode::Linear); + group.bench_function("Linear", |bencher| bencher.iter(|| sleep(Duration::from_millis(0)))); + + group.sampling_mode(SamplingMode::Flat); + group.bench_function("Flat", |bencher| bencher.iter(|| sleep(Duration::from_millis(10)))); + + group.finish(); +} + +criterion_group!(benches, sampling_mode_tests,); diff --git a/book/book.toml b/book/book.toml index c9d3fcc6..e3912efd 100644 --- a/book/book.toml +++ b/book/book.toml @@ -1,10 +1,10 @@ -[book] -title = "Criterion.rs Documentation" -description = "User Guide and Other Prose Documentation For Criterion.rs" -author = "Brook Heisler" - -[output.html] - -[output.linkcheck] -#follow-web-links = true -exclude = [ 'crates\.io' ] \ No newline at end of file +[book] +title = "Criterion.rs Documentation" +description = "User Guide and Other Prose Documentation For Criterion.rs" +author = "Brook Heisler" + +[output.html] + +[output.linkcheck] +#follow-web-links = true +exclude = ['crates\.io'] diff --git a/macro/Cargo.toml b/macro/Cargo.toml index b8adf2d4..3924e1b6 100644 --- a/macro/Cargo.toml +++ b/macro/Cargo.toml @@ -25,4 +25,4 @@ criterion = { version = "0.4.0", path = "..", default-features = false } [[bench]] name = "test_macro_bench" -[workspace] \ No newline at end of file +[workspace] diff --git a/macro/src/lib.rs b/macro/src/lib.rs index 6297a172..7ba02443 100644 --- a/macro/src/lib.rs +++ b/macro/src/lib.rs @@ -1,56 +1,54 @@ -extern crate proc_macro; -use proc_macro::TokenStream; -use proc_macro2::{Ident, TokenTree}; -use quote::quote_spanned; - -#[proc_macro_attribute] -pub fn criterion(attr: TokenStream, item: TokenStream) -> TokenStream { - let attr = proc_macro2::TokenStream::from(attr); - let item = proc_macro2::TokenStream::from(item); - - let span = proc_macro2::Span::call_site(); - - let init = if stream_length(attr.clone()) != 0 { - attr - } - else { - quote_spanned!(span=> criterion::Criterion::default()) - }; - - let function_name = find_name(item.clone()); - let wrapped_name = Ident::new(&format!("criterion_wrapped_{}", function_name.to_string()), span); - - let output = quote_spanned!(span=> - #[test_case] - pub fn #wrapped_name() { - #item - - let mut c = #init.configure_from_args(); - #function_name(&mut c); - } - ); - - output.into() -} - -fn stream_length(stream: proc_macro2::TokenStream) -> usize { - stream.into_iter().count() -} - -fn find_name(stream: proc_macro2::TokenStream) -> Ident { - let mut iter = stream.into_iter(); - while let Some(tok) = iter.next() { - if let TokenTree::Ident(ident) = tok { - if ident == "fn" { - break; - } - } - } - - if let Some(TokenTree::Ident(name)) = iter.next() { - name - } - else { - panic!("Unable to find function name") - } -} \ No newline at end of file +use proc_macro::TokenStream; +use proc_macro2::{Ident, TokenTree}; +use quote::quote_spanned; + +#[proc_macro_attribute] +pub fn criterion(attr: TokenStream, item: TokenStream) -> TokenStream { + let attr = proc_macro2::TokenStream::from(attr); + let item = proc_macro2::TokenStream::from(item); + + let span = proc_macro2::Span::call_site(); + + let init = if stream_length(attr.clone()) != 0 { + attr + } else { + quote_spanned!(span=> criterion::Criterion::default()) + }; + + let function_name = find_name(item.clone()); + let wrapped_name = + Ident::new(&format!("criterion_wrapped_{}", function_name.to_string()), span); + + let output = quote_spanned!(span=> + #[test_case] + pub fn #wrapped_name() { + #item + + let mut c = #init.configure_from_args(); + #function_name(&mut c); + } + ); + + output.into() +} + +fn stream_length(stream: proc_macro2::TokenStream) -> usize { + stream.into_iter().count() +} + +fn find_name(stream: proc_macro2::TokenStream) -> Ident { + let mut iter = stream.into_iter(); + while let Some(tok) = iter.next() { + if let TokenTree::Ident(ident) = tok { + if ident == "fn" { + break; + } + } + } + + if let Some(TokenTree::Ident(name)) = iter.next() { + name + } else { + panic!("Unable to find function name") + } +} diff --git a/plot/Cargo.toml b/plot/Cargo.toml index f7ef2023..e93c4199 100644 --- a/plot/Cargo.toml +++ b/plot/Cargo.toml @@ -1,5 +1,8 @@ [package] -authors = ["Jorge Aparicio ", "Brook Heisler "] +authors = [ + "Jorge Aparicio ", + "Brook Heisler ", +] name = "criterion-plot" version = "0.5.0" edition = "2018" diff --git a/plot/src/candlestick.rs b/plot/src/candlestick.rs index e0a5cbeb..921bffa8 100644 --- a/plot/src/candlestick.rs +++ b/plot/src/candlestick.rs @@ -3,6 +3,8 @@ use std::borrow::Cow; use std::iter::IntoIterator; +use itertools::izip; + use crate::data::Matrix; use crate::traits::{self, Data, Set}; use crate::{Color, Default, Display, Figure, Label, LineType, LineWidth, Plot, Script}; @@ -17,12 +19,7 @@ pub struct Properties { impl Default for Properties { fn default() -> Properties { - Properties { - color: None, - label: None, - line_type: LineType::Solid, - linewidth: None, - } + Properties { color: None, label: None, line_type: LineType::Solid, linewidth: None } } } @@ -132,20 +129,13 @@ where F: FnOnce(&mut Properties) -> &mut Properties, { let (x_factor, y_factor) = crate::scale_factor(&self.axes, crate::Axes::BottomXLeftY); - let Candlesticks { - x, - whisker_min, - box_min, - box_high, - whisker_high, - } = candlesticks; + let Candlesticks { x, whisker_min, box_min, box_high, whisker_high } = candlesticks; let data = Matrix::new( izip!(x, box_min, whisker_min, whisker_high, box_high), (x_factor, y_factor, y_factor, y_factor, y_factor), ); - self.plots - .push(Plot::new(data, configure(&mut Default::default()))); + self.plots.push(Plot::new(data, configure(&mut Default::default()))); self } } diff --git a/plot/src/curve.rs b/plot/src/curve.rs index bbddeff1..92c00131 100644 --- a/plot/src/curve.rs +++ b/plot/src/curve.rs @@ -3,6 +3,8 @@ use std::borrow::Cow; use std::iter::IntoIterator; +use itertools::izip; + use crate::data::Matrix; use crate::traits::{self, Data, Set}; use crate::{ diff --git a/plot/src/data.rs b/plot/src/data.rs index 20ed3d41..9ab92859 100644 --- a/plot/src/data.rs +++ b/plot/src/data.rs @@ -47,11 +47,7 @@ impl Matrix { row.append_to(&mut bytes, scale); } - Matrix { - bytes, - ncols, - nrows, - } + Matrix { bytes, ncols, nrows } } pub fn bytes(&self) -> &[u8] { diff --git a/plot/src/errorbar.rs b/plot/src/errorbar.rs index 7efd23e3..9265f6f2 100644 --- a/plot/src/errorbar.rs +++ b/plot/src/errorbar.rs @@ -3,6 +3,8 @@ use std::borrow::Cow; use std::iter::IntoIterator; +use itertools::izip; + use crate::data::Matrix; use crate::traits::{self, Data, Set}; use crate::{ @@ -234,39 +236,14 @@ where let style = e.style(); let (x, y, length, height, e_factor) = match e { - ErrorBar::XErrorBars { - x, - y, - x_low, - x_high, - } - | ErrorBar::XErrorLines { - x, - y, - x_low, - x_high, - } => (x, y, x_low, x_high, x_factor), - ErrorBar::YErrorBars { - x, - y, - y_low, - y_high, - } - | ErrorBar::YErrorLines { - x, - y, - y_low, - y_high, - } => (x, y, y_low, y_high, y_factor), + ErrorBar::XErrorBars { x, y, x_low, x_high } + | ErrorBar::XErrorLines { x, y, x_low, x_high } => (x, y, x_low, x_high, x_factor), + ErrorBar::YErrorBars { x, y, y_low, y_high } + | ErrorBar::YErrorLines { x, y, y_low, y_high } => (x, y, y_low, y_high, y_factor), }; - let data = Matrix::new( - izip!(x, y, length, height), - (x_factor, y_factor, e_factor, e_factor), - ); - self.plots.push(Plot::new( - data, - configure(&mut ErrorBarDefault::default(style)), - )); + let data = + Matrix::new(izip!(x, y, length, height), (x_factor, y_factor, e_factor, e_factor)); + self.plots.push(Plot::new(data, configure(&mut ErrorBarDefault::default(style)))); self } } diff --git a/plot/src/filledcurve.rs b/plot/src/filledcurve.rs index f79dbddc..db63eb13 100644 --- a/plot/src/filledcurve.rs +++ b/plot/src/filledcurve.rs @@ -3,6 +3,8 @@ use std::borrow::Cow; use std::iter::IntoIterator; +use itertools::izip; + use crate::data::Matrix; use crate::traits::{self, Data, Set}; use crate::{Axes, Color, Default, Display, Figure, Label, Opacity, Plot, Script}; @@ -17,12 +19,7 @@ pub struct Properties { impl Default for Properties { fn default() -> Properties { - Properties { - axes: None, - color: None, - label: None, - opacity: None, - } + Properties { axes: None, color: None, label: None, opacity: None } } } diff --git a/plot/src/lib.rs b/plot/src/lib.rs index 174765e6..d77932c0 100644 --- a/plot/src/lib.rs +++ b/plot/src/lib.rs @@ -371,10 +371,6 @@ #![cfg_attr(feature = "cargo-clippy", allow(clippy::doc_markdown))] #![cfg_attr(feature = "cargo-clippy", allow(clippy::many_single_char_names))] -extern crate cast; -#[macro_use] -extern crate itertools; - use std::borrow::Cow; use std::fmt; use std::fs::File; @@ -912,10 +908,7 @@ impl Plot { where S: Script, { - Plot { - data, - script: script.script(), - } + Plot { data, script: script.script() } } fn data(&self) -> &Matrix { @@ -947,11 +940,9 @@ impl fmt::Display for VersionError { write!(f, "`gnuplot --version` failed with error message:\n{}", msg) } VersionError::OutputError => write!(f, "`gnuplot --version` returned invalid utf-8"), - VersionError::ParseError(msg) => write!( - f, - "`gnuplot --version` returned an unparseable version string: {}", - msg - ), + VersionError::ParseError(msg) => { + write!(f, "`gnuplot --version` returned an unparseable version string: {}", msg) + } } } } @@ -965,7 +956,7 @@ impl ::std::error::Error for VersionError { } } - fn cause(&self) -> Option<&dyn ::std::error::Error> { + fn cause(&self) -> Option<&dyn::std::error::Error> { match self { VersionError::Exec(err) => Some(err), _ => None, @@ -985,10 +976,8 @@ pub struct Version { /// Returns `gnuplot` version pub fn version() -> Result { - let command_output = Command::new("gnuplot") - .arg("--version") - .output() - .map_err(VersionError::Exec)?; + let command_output = + Command::new("gnuplot").arg("--version").output().map_err(VersionError::Exec)?; if !command_output.status.success() { let error = String::from_utf8(command_output.stderr).map_err(|_| VersionError::OutputError)?; @@ -1007,11 +996,7 @@ fn parse_version(version_str: &str) -> Result> { let minor = version.next().ok_or(None)?.parse()?; let patchlevel = words.nth(1).ok_or(None)?.to_owned(); - Ok(Version { - major, - minor, - patch: patchlevel, - }) + Ok(Version { major, minor, patch: patchlevel }) } fn scale_factor(map: &map::axis::Map, axes: Axes) -> (f64, f64) { diff --git a/plot/src/map.rs b/plot/src/map.rs index 7099a96b..92d28a9d 100644 --- a/plot/src/map.rs +++ b/plot/src/map.rs @@ -64,10 +64,7 @@ pub mod axis { } pub fn iter(&self) -> Items { - Items { - map: self, - state: Some(Axis::BottomX), - } + Items { map: self, state: Some(Axis::BottomX) } } } @@ -76,12 +73,7 @@ pub mod axis { T: Clone, { fn clone(&self) -> Map { - Map([ - self.0[0].clone(), - self.0[1].clone(), - self.0[2].clone(), - self.0[3].clone(), - ]) + Map([self.0[0].clone(), self.0[1].clone(), self.0[2].clone(), self.0[3].clone()]) } } } @@ -144,10 +136,7 @@ pub mod grid { } pub fn iter(&self) -> Items { - Items { - map: self, - state: Some(Grid::Major), - } + Items { map: self, state: Some(Grid::Major) } } } diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 00000000..06c74581 --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,3 @@ +[toolchain] +channel = "1.76.0" +profile = "default" diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 00000000..7996b44e --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,2 @@ +use_small_heuristics = "Max" +use_field_init_shorthand = true diff --git a/src/analysis/compare.rs b/src/analysis/compare.rs index a49407d8..53e570f1 100644 --- a/src/analysis/compare.rs +++ b/src/analysis/compare.rs @@ -41,11 +41,8 @@ pub(crate) fn common( estimates_file.push("estimates.json"); let base_estimates: Estimates = fs::load(&estimates_file)?; - let base_avg_times: Vec = iters - .iter() - .zip(times.iter()) - .map(|(iters, elapsed)| elapsed / iters) - .collect(); + let base_avg_times: Vec = + iters.iter().zip(times.iter()).map(|(iters, elapsed)| elapsed / iters).collect(); let base_avg_time_sample = Sample::new(&base_avg_times); let mut change_dir = criterion.output_directory.clone(); @@ -106,10 +103,7 @@ fn estimates( criterion: &Criterion, ) -> (ChangeEstimates, ChangeDistributions) { fn stats(a: &Sample, b: &Sample) -> (f64, f64) { - ( - a.mean() / b.mean() - 1., - a.percentiles().median() / b.percentiles().median() - 1., - ) + (a.mean() / b.mean() - 1., a.percentiles().median() / b.percentiles().median() - 1.) } let cl = config.confidence_level; @@ -120,10 +114,7 @@ fn estimates( univariate::bootstrap(avg_times, base_avg_times, nresamples, stats) ); - let distributions = ChangeDistributions { - mean: dist_mean, - median: dist_median, - }; + let distributions = ChangeDistributions { mean: dist_mean, median: dist_median }; let (mean, median) = stats(avg_times, base_avg_times); let points = ChangePointEstimates { mean, median }; diff --git a/src/analysis/mod.rs b/src/analysis/mod.rs index 1851d718..5fe4fcc7 100644 --- a/src/analysis/mod.rs +++ b/src/analysis/mod.rs @@ -23,11 +23,7 @@ macro_rules! elapsed { let out = $block; let elapsed = &start.elapsed(); - info!( - "{} took {}", - $msg, - crate::format::time(elapsed.as_nanos() as f64) - ); + info!("{} took {}", $msg, crate::format::time(elapsed.as_nanos() as f64)); out }}; @@ -48,11 +44,7 @@ pub(crate) fn common( criterion.report.benchmark_start(id, report_context); if let Baseline::CompareStrict = criterion.baseline { - if !base_dir_exists( - id, - &criterion.baseline_directory, - &criterion.output_directory, - ) { + if !base_dir_exists(id, &criterion.baseline_directory, &criterion.output_directory) { panic!( "Baseline '{base}' must exist before comparison is allowed; try --save-baseline {base}", base=criterion.baseline_directory, @@ -103,8 +95,7 @@ pub(crate) fn common( }) .unwrap(); - conn.serve_value_formatter(criterion.measurement.formatter()) - .unwrap(); + conn.serve_value_formatter(criterion.measurement.formatter()).unwrap(); return; } } @@ -180,46 +171,43 @@ pub(crate) fn common( }); } - let compare_data = if base_dir_exists( - id, - &criterion.baseline_directory, - &criterion.output_directory, - ) { - let result = compare::common(id, avg_times, config, criterion); - match result { - Ok(( - t_value, - t_distribution, - relative_estimates, - relative_distributions, - base_iter_counts, - base_sample_times, - base_avg_times, - base_estimates, - )) => { - let p_value = t_distribution.p_value(t_value, &Tails::Two); - Some(crate::report::ComparisonData { - p_value, - t_distribution, + let compare_data = + if base_dir_exists(id, &criterion.baseline_directory, &criterion.output_directory) { + let result = compare::common(id, avg_times, config, criterion); + match result { + Ok(( t_value, + t_distribution, relative_estimates, relative_distributions, - significance_threshold: config.significance_level, - noise_threshold: config.noise_threshold, base_iter_counts, base_sample_times, base_avg_times, base_estimates, - }) - } - Err(e) => { - crate::error::log_error(&e); - None + )) => { + let p_value = t_distribution.p_value(t_value, &Tails::Two); + Some(crate::report::ComparisonData { + p_value, + t_distribution, + t_value, + relative_estimates, + relative_distributions, + significance_threshold: config.significance_level, + noise_threshold: config.noise_threshold, + base_iter_counts, + base_sample_times, + base_avg_times, + base_estimates, + }) + } + Err(e) => { + crate::error::log_error(&e); + None + } } - } - } else { - None - }; + } else { + None + }; let measurement_data = crate::report::MeasurementData { data: Data::new(&iters, ×), @@ -311,17 +299,10 @@ fn estimates(avg_times: &Sample, config: &BenchmarkConfig) -> (Distribution let nresamples = config.nresamples; let (mean, std_dev, median, mad) = stats(avg_times); - let points = PointEstimates { - mean, - median, - std_dev, - median_abs_dev: mad, - }; + let points = PointEstimates { mean, median, std_dev, median_abs_dev: mad }; - let (dist_mean, dist_stddev, dist_median, dist_mad) = elapsed!( - "Bootstrapping the absolute statistics.", - avg_times.bootstrap(nresamples, stats) - ); + let (dist_mean, dist_stddev, dist_median, dist_mad) = + elapsed!("Bootstrapping the absolute statistics.", avg_times.bootstrap(nresamples, stats)); let distributions = Distributions { mean: dist_mean, @@ -349,22 +330,10 @@ fn copy_new_dir_to_base(id: &str, baseline: &str, output_directory: &Path) { } // TODO: consider using walkdir or similar to generically copy. - try_else_return!(fs::cp( - &new_dir.join("estimates.json"), - &base_dir.join("estimates.json") - )); - try_else_return!(fs::cp( - &new_dir.join("sample.json"), - &base_dir.join("sample.json") - )); - try_else_return!(fs::cp( - &new_dir.join("tukey.json"), - &base_dir.join("tukey.json") - )); - try_else_return!(fs::cp( - &new_dir.join("benchmark.json"), - &base_dir.join("benchmark.json") - )); + try_else_return!(fs::cp(&new_dir.join("estimates.json"), &base_dir.join("estimates.json"))); + try_else_return!(fs::cp(&new_dir.join("sample.json"), &base_dir.join("sample.json"))); + try_else_return!(fs::cp(&new_dir.join("tukey.json"), &base_dir.join("tukey.json"))); + try_else_return!(fs::cp(&new_dir.join("benchmark.json"), &base_dir.join("benchmark.json"))); #[cfg(feature = "csv_output")] try_else_return!(fs::cp(&new_dir.join("raw.csv"), &base_dir.join("raw.csv"))); } diff --git a/src/bencher.rs b/src/bencher.rs index 5baebc40..492fdb60 100644 --- a/src/bencher.rs +++ b/src/bencher.rs @@ -1,764 +1,743 @@ -use std::iter::IntoIterator; -use std::time::Duration; -use std::time::Instant; - -use crate::black_box; -use crate::measurement::{Measurement, WallTime}; -use crate::BatchSize; - -#[cfg(feature = "async")] -use std::future::Future; - -#[cfg(feature = "async")] -use crate::async_executor::AsyncExecutor; - -// ================================== MAINTENANCE NOTE ============================================= -// Any changes made to either Bencher or AsyncBencher will have to be replicated to the other! -// ================================== MAINTENANCE NOTE ============================================= - -/// Timer struct used to iterate a benchmarked function and measure the runtime. -/// -/// This struct provides different timing loops as methods. Each timing loop provides a different -/// way to time a routine and each has advantages and disadvantages. -/// -/// * If you want to do the iteration and measurement yourself (eg. passing the iteration count -/// to a separate process), use `iter_custom`. -/// * If your routine requires no per-iteration setup and returns a value with an expensive `drop` -/// method, use `iter_with_large_drop`. -/// * If your routine requires some per-iteration setup that shouldn't be timed, use `iter_batched` -/// or `iter_batched_ref`. See [`BatchSize`](enum.BatchSize.html) for a discussion of batch sizes. -/// If the setup value implements `Drop` and you don't want to include the `drop` time in the -/// measurement, use `iter_batched_ref`, otherwise use `iter_batched`. These methods are also -/// suitable for benchmarking routines which return a value with an expensive `drop` method, -/// but are more complex than `iter_with_large_drop`. -/// * Otherwise, use `iter`. -pub struct Bencher<'a, M: Measurement = WallTime> { - pub(crate) iterated: bool, // Have we iterated this benchmark? - pub(crate) iters: u64, // Number of times to iterate this benchmark - pub(crate) value: M::Value, // The measured value - pub(crate) measurement: &'a M, // Reference to the measurement object - pub(crate) elapsed_time: Duration, // How much time did it take to perform the iteration? Used for the warmup period. -} -impl<'a, M: Measurement> Bencher<'a, M> { - /// Times a `routine` by executing it many times and timing the total elapsed time. - /// - /// Prefer this timing loop when `routine` returns a value that doesn't have a destructor. - /// - /// # Timing model - /// - /// Note that the `Bencher` also times the time required to destroy the output of `routine()`. - /// Therefore prefer this timing loop when the runtime of `mem::drop(O)` is negligible compared - /// to the runtime of the `routine`. - /// - /// ```text - /// elapsed = Instant::now + iters * (routine + mem::drop(O) + Range::next) - /// ``` - /// - /// # Example - /// - /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; - /// - /// // The function to benchmark - /// fn foo() { - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// c.bench_function("iter", move |b| { - /// b.iter(|| foo()) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - #[inline(never)] - pub fn iter(&mut self, mut routine: R) - where - R: FnMut() -> O, - { - self.iterated = true; - let time_start = Instant::now(); - let start = self.measurement.start(); - for _ in 0..self.iters { - black_box(routine()); - } - self.value = self.measurement.end(start); - self.elapsed_time = time_start.elapsed(); - } - - /// Times a `routine` by executing it many times and relying on `routine` to measure its own execution time. - /// - /// Prefer this timing loop in cases where `routine` has to do its own measurements to - /// get accurate timing information (for example in multi-threaded scenarios where you spawn - /// and coordinate with multiple threads). - /// - /// # Timing model - /// Custom, the timing model is whatever is returned as the Duration from `routine`. - /// - /// # Example - /// ```rust - /// #[macro_use] extern crate criterion; - /// use criterion::*; - /// use criterion::black_box; - /// use std::time::Instant; - /// - /// fn foo() { - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// c.bench_function("iter", move |b| { - /// b.iter_custom(|iters| { - /// let start = Instant::now(); - /// for _i in 0..iters { - /// black_box(foo()); - /// } - /// start.elapsed() - /// }) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - #[inline(never)] - pub fn iter_custom(&mut self, mut routine: R) - where - R: FnMut(u64) -> M::Value, - { - self.iterated = true; - let time_start = Instant::now(); - self.value = routine(self.iters); - self.elapsed_time = time_start.elapsed(); - } - - #[doc(hidden)] - pub fn iter_with_setup(&mut self, setup: S, routine: R) - where - S: FnMut() -> I, - R: FnMut(I) -> O, - { - self.iter_batched(setup, routine, BatchSize::PerIteration); - } - - /// Times a `routine` by collecting its output on each iteration. This avoids timing the - /// destructor of the value returned by `routine`. - /// - /// WARNING: This requires `O(iters * mem::size_of::())` of memory, and `iters` is not under the - /// control of the caller. If this causes out-of-memory errors, use `iter_batched` instead. - /// - /// # Timing model - /// - /// ``` text - /// elapsed = Instant::now + iters * (routine) + Iterator::collect::> - /// ``` - /// - /// # Example - /// - /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; - /// - /// fn create_vector() -> Vec { - /// # vec![] - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// c.bench_function("with_drop", move |b| { - /// // This will avoid timing the Vec::drop. - /// b.iter_with_large_drop(|| create_vector()) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - pub fn iter_with_large_drop(&mut self, mut routine: R) - where - R: FnMut() -> O, - { - self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput); - } - - /// Times a `routine` that requires some input by generating a batch of input, then timing the - /// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for - /// details on choosing the batch size. Use this when the routine must consume its input. - /// - /// For example, use this loop to benchmark sorting algorithms, because they require unsorted - /// data on each iteration. - /// - /// # Timing model - /// - /// ```text - /// elapsed = (Instant::now * num_batches) + (iters * (routine + O::drop)) + Vec::extend - /// ``` - /// - /// # Example - /// - /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; - /// - /// fn create_scrambled_data() -> Vec { - /// # vec![] - /// // ... - /// } - /// - /// // The sorting algorithm to test - /// fn sort(data: &mut [u64]) { - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// let data = create_scrambled_data(); - /// - /// c.bench_function("with_setup", move |b| { - /// // This will avoid timing the clone call. - /// b.iter_batched(|| data.clone(), |mut data| sort(&mut data), BatchSize::SmallInput) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - #[inline(never)] - pub fn iter_batched(&mut self, mut setup: S, mut routine: R, size: BatchSize) - where - S: FnMut() -> I, - R: FnMut(I) -> O, - { - self.iterated = true; - let batch_size = size.iters_per_batch(self.iters); - assert!(batch_size != 0, "Batch size must not be zero."); - let time_start = Instant::now(); - self.value = self.measurement.zero(); - - if batch_size == 1 { - for _ in 0..self.iters { - let input = black_box(setup()); - - let start = self.measurement.start(); - let output = routine(input); - let end = self.measurement.end(start); - self.value = self.measurement.add(&self.value, &end); - - drop(black_box(output)); - } - } else { - let mut iteration_counter = 0; - - while iteration_counter < self.iters { - let batch_size = ::std::cmp::min(batch_size, self.iters - iteration_counter); - - let inputs = black_box((0..batch_size).map(|_| setup()).collect::>()); - let mut outputs = Vec::with_capacity(batch_size as usize); - - let start = self.measurement.start(); - outputs.extend(inputs.into_iter().map(&mut routine)); - let end = self.measurement.end(start); - self.value = self.measurement.add(&self.value, &end); - - black_box(outputs); - - iteration_counter += batch_size; - } - } - - self.elapsed_time = time_start.elapsed(); - } - - /// Times a `routine` that requires some input by generating a batch of input, then timing the - /// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for - /// details on choosing the batch size. Use this when the routine should accept the input by - /// mutable reference. - /// - /// For example, use this loop to benchmark sorting algorithms, because they require unsorted - /// data on each iteration. - /// - /// # Timing model - /// - /// ```text - /// elapsed = (Instant::now * num_batches) + (iters * routine) + Vec::extend - /// ``` - /// - /// # Example - /// - /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; - /// - /// fn create_scrambled_data() -> Vec { - /// # vec![] - /// // ... - /// } - /// - /// // The sorting algorithm to test - /// fn sort(data: &mut [u64]) { - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// let data = create_scrambled_data(); - /// - /// c.bench_function("with_setup", move |b| { - /// // This will avoid timing the clone call. - /// b.iter_batched(|| data.clone(), |mut data| sort(&mut data), BatchSize::SmallInput) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - #[inline(never)] - pub fn iter_batched_ref(&mut self, mut setup: S, mut routine: R, size: BatchSize) - where - S: FnMut() -> I, - R: FnMut(&mut I) -> O, - { - self.iterated = true; - let batch_size = size.iters_per_batch(self.iters); - assert!(batch_size != 0, "Batch size must not be zero."); - let time_start = Instant::now(); - self.value = self.measurement.zero(); - - if batch_size == 1 { - for _ in 0..self.iters { - let mut input = black_box(setup()); - - let start = self.measurement.start(); - let output = routine(&mut input); - let end = self.measurement.end(start); - self.value = self.measurement.add(&self.value, &end); - - drop(black_box(output)); - drop(black_box(input)); - } - } else { - let mut iteration_counter = 0; - - while iteration_counter < self.iters { - let batch_size = ::std::cmp::min(batch_size, self.iters - iteration_counter); - - let mut inputs = black_box((0..batch_size).map(|_| setup()).collect::>()); - let mut outputs = Vec::with_capacity(batch_size as usize); - - let start = self.measurement.start(); - outputs.extend(inputs.iter_mut().map(&mut routine)); - let end = self.measurement.end(start); - self.value = self.measurement.add(&self.value, &end); - - black_box(outputs); - - iteration_counter += batch_size; - } - } - self.elapsed_time = time_start.elapsed(); - } - - // Benchmarks must actually call one of the iter methods. This causes benchmarks to fail loudly - // if they don't. - pub(crate) fn assert_iterated(&mut self) { - assert!( - self.iterated, - "Benchmark function must call Bencher::iter or related method." - ); - self.iterated = false; - } - - /// Convert this bencher into an AsyncBencher, which enables async/await support. - #[cfg(feature = "async")] - pub fn to_async<'b, A: AsyncExecutor>(&'b mut self, runner: A) -> AsyncBencher<'a, 'b, A, M> { - AsyncBencher { b: self, runner } - } -} - -/// Async/await variant of the Bencher struct. -#[cfg(feature = "async")] -pub struct AsyncBencher<'a, 'b, A: AsyncExecutor, M: Measurement = WallTime> { - b: &'b mut Bencher<'a, M>, - runner: A, -} -#[cfg(feature = "async")] -impl<'a, 'b, A: AsyncExecutor, M: Measurement> AsyncBencher<'a, 'b, A, M> { - /// Times a `routine` by executing it many times and timing the total elapsed time. - /// - /// Prefer this timing loop when `routine` returns a value that doesn't have a destructor. - /// - /// # Timing model - /// - /// Note that the `AsyncBencher` also times the time required to destroy the output of `routine()`. - /// Therefore prefer this timing loop when the runtime of `mem::drop(O)` is negligible compared - /// to the runtime of the `routine`. - /// - /// ```text - /// elapsed = Instant::now + iters * (routine + mem::drop(O) + Range::next) - /// ``` - /// - /// # Example - /// - /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; - /// use criterion::async_executor::FuturesExecutor; - /// - /// // The function to benchmark - /// async fn foo() { - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// c.bench_function("iter", move |b| { - /// b.to_async(FuturesExecutor).iter(|| async { foo().await } ) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - #[inline(never)] - pub fn iter(&mut self, mut routine: R) - where - R: FnMut() -> F, - F: Future, - { - let AsyncBencher { b, runner } = self; - runner.block_on(async { - b.iterated = true; - let time_start = Instant::now(); - let start = b.measurement.start(); - for _ in 0..b.iters { - black_box(routine().await); - } - b.value = b.measurement.end(start); - b.elapsed_time = time_start.elapsed(); - }); - } - - /// Times a `routine` by executing it many times and relying on `routine` to measure its own execution time. - /// - /// Prefer this timing loop in cases where `routine` has to do its own measurements to - /// get accurate timing information (for example in multi-threaded scenarios where you spawn - /// and coordinate with multiple threads). - /// - /// # Timing model - /// Custom, the timing model is whatever is returned as the Duration from `routine`. - /// - /// # Example - /// ```rust - /// #[macro_use] extern crate criterion; - /// use criterion::*; - /// use criterion::black_box; - /// use criterion::async_executor::FuturesExecutor; - /// use std::time::Instant; - /// - /// async fn foo() { - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// c.bench_function("iter", move |b| { - /// b.to_async(FuturesExecutor).iter_custom(|iters| { - /// async move { - /// let start = Instant::now(); - /// for _i in 0..iters { - /// black_box(foo().await); - /// } - /// start.elapsed() - /// } - /// }) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - #[inline(never)] - pub fn iter_custom(&mut self, mut routine: R) - where - R: FnMut(u64) -> F, - F: Future, - { - let AsyncBencher { b, runner } = self; - runner.block_on(async { - b.iterated = true; - let time_start = Instant::now(); - b.value = routine(b.iters).await; - b.elapsed_time = time_start.elapsed(); - }) - } - - #[doc(hidden)] - pub fn iter_with_setup(&mut self, setup: S, routine: R) - where - S: FnMut() -> I, - R: FnMut(I) -> F, - F: Future, - { - self.iter_batched(setup, routine, BatchSize::PerIteration); - } - - /// Times a `routine` by collecting its output on each iteration. This avoids timing the - /// destructor of the value returned by `routine`. - /// - /// WARNING: This requires `O(iters * mem::size_of::())` of memory, and `iters` is not under the - /// control of the caller. If this causes out-of-memory errors, use `iter_batched` instead. - /// - /// # Timing model - /// - /// ``` text - /// elapsed = Instant::now + iters * (routine) + Iterator::collect::> - /// ``` - /// - /// # Example - /// - /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; - /// use criterion::async_executor::FuturesExecutor; - /// - /// async fn create_vector() -> Vec { - /// # vec![] - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// c.bench_function("with_drop", move |b| { - /// // This will avoid timing the Vec::drop. - /// b.to_async(FuturesExecutor).iter_with_large_drop(|| async { create_vector().await }) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - pub fn iter_with_large_drop(&mut self, mut routine: R) - where - R: FnMut() -> F, - F: Future, - { - self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput); - } - - #[doc(hidden)] - pub fn iter_with_large_setup(&mut self, setup: S, routine: R) - where - S: FnMut() -> I, - R: FnMut(I) -> F, - F: Future, - { - self.iter_batched(setup, routine, BatchSize::NumBatches(1)); - } - - /// Times a `routine` that requires some input by generating a batch of input, then timing the - /// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for - /// details on choosing the batch size. Use this when the routine must consume its input. - /// - /// For example, use this loop to benchmark sorting algorithms, because they require unsorted - /// data on each iteration. - /// - /// # Timing model - /// - /// ```text - /// elapsed = (Instant::now * num_batches) + (iters * (routine + O::drop)) + Vec::extend - /// ``` - /// - /// # Example - /// - /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; - /// use criterion::async_executor::FuturesExecutor; - /// - /// fn create_scrambled_data() -> Vec { - /// # vec![] - /// // ... - /// } - /// - /// // The sorting algorithm to test - /// async fn sort(data: &mut [u64]) { - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// let data = create_scrambled_data(); - /// - /// c.bench_function("with_setup", move |b| { - /// // This will avoid timing the clone call. - /// b.iter_batched(|| data.clone(), |mut data| async move { sort(&mut data).await }, BatchSize::SmallInput) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - #[inline(never)] - pub fn iter_batched(&mut self, mut setup: S, mut routine: R, size: BatchSize) - where - S: FnMut() -> I, - R: FnMut(I) -> F, - F: Future, - { - let AsyncBencher { b, runner } = self; - runner.block_on(async { - b.iterated = true; - let batch_size = size.iters_per_batch(b.iters); - assert!(batch_size != 0, "Batch size must not be zero."); - let time_start = Instant::now(); - b.value = b.measurement.zero(); - - if batch_size == 1 { - for _ in 0..b.iters { - let input = black_box(setup()); - - let start = b.measurement.start(); - let output = routine(input).await; - let end = b.measurement.end(start); - b.value = b.measurement.add(&b.value, &end); - - drop(black_box(output)); - } - } else { - let mut iteration_counter = 0; - - while iteration_counter < b.iters { - let batch_size = ::std::cmp::min(batch_size, b.iters - iteration_counter); - - let inputs = black_box((0..batch_size).map(|_| setup()).collect::>()); - let mut outputs = Vec::with_capacity(batch_size as usize); - - let start = b.measurement.start(); - // Can't use .extend here like the sync version does - for input in inputs { - outputs.push(routine(input).await); - } - let end = b.measurement.end(start); - b.value = b.measurement.add(&b.value, &end); - - black_box(outputs); - - iteration_counter += batch_size; - } - } - - b.elapsed_time = time_start.elapsed(); - }) - } - - /// Times a `routine` that requires some input by generating a batch of input, then timing the - /// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for - /// details on choosing the batch size. Use this when the routine should accept the input by - /// mutable reference. - /// - /// For example, use this loop to benchmark sorting algorithms, because they require unsorted - /// data on each iteration. - /// - /// # Timing model - /// - /// ```text - /// elapsed = (Instant::now * num_batches) + (iters * routine) + Vec::extend - /// ``` - /// - /// # Example - /// - /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; - /// use criterion::async_executor::FuturesExecutor; - /// - /// fn create_scrambled_data() -> Vec { - /// # vec![] - /// // ... - /// } - /// - /// // The sorting algorithm to test - /// async fn sort(data: &mut [u64]) { - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// let data = create_scrambled_data(); - /// - /// c.bench_function("with_setup", move |b| { - /// // This will avoid timing the clone call. - /// b.iter_batched(|| data.clone(), |mut data| async move { sort(&mut data).await }, BatchSize::SmallInput) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - #[inline(never)] - pub fn iter_batched_ref(&mut self, mut setup: S, mut routine: R, size: BatchSize) - where - S: FnMut() -> I, - R: FnMut(&mut I) -> F, - F: Future, - { - let AsyncBencher { b, runner } = self; - runner.block_on(async { - b.iterated = true; - let batch_size = size.iters_per_batch(b.iters); - assert!(batch_size != 0, "Batch size must not be zero."); - let time_start = Instant::now(); - b.value = b.measurement.zero(); - - if batch_size == 1 { - for _ in 0..b.iters { - let mut input = black_box(setup()); - - let start = b.measurement.start(); - let output = routine(&mut input).await; - let end = b.measurement.end(start); - b.value = b.measurement.add(&b.value, &end); - - drop(black_box(output)); - drop(black_box(input)); - } - } else { - let mut iteration_counter = 0; - - while iteration_counter < b.iters { - let batch_size = ::std::cmp::min(batch_size, b.iters - iteration_counter); - - let inputs = black_box((0..batch_size).map(|_| setup()).collect::>()); - let mut outputs = Vec::with_capacity(batch_size as usize); - - let start = b.measurement.start(); - // Can't use .extend here like the sync version does - for mut input in inputs { - outputs.push(routine(&mut input).await); - } - let end = b.measurement.end(start); - b.value = b.measurement.add(&b.value, &end); - - black_box(outputs); - - iteration_counter += batch_size; - } - } - b.elapsed_time = time_start.elapsed(); - }); - } -} +use std::iter::IntoIterator; +use std::time::Duration; +use std::time::Instant; + +use crate::black_box; +use crate::measurement::{Measurement, WallTime}; +use crate::BatchSize; + +#[cfg(feature = "async")] +use std::future::Future; + +#[cfg(feature = "async")] +use crate::async_executor::AsyncExecutor; + +// ================================== MAINTENANCE NOTE ============================================= +// Any changes made to either Bencher or AsyncBencher will have to be replicated to the other! +// ================================== MAINTENANCE NOTE ============================================= + +/// Timer struct used to iterate a benchmarked function and measure the runtime. +/// +/// This struct provides different timing loops as methods. Each timing loop provides a different +/// way to time a routine and each has advantages and disadvantages. +/// +/// * If you want to do the iteration and measurement yourself (eg. passing the iteration count +/// to a separate process), use `iter_custom`. +/// * If your routine requires no per-iteration setup and returns a value with an expensive `drop` +/// method, use `iter_with_large_drop`. +/// * If your routine requires some per-iteration setup that shouldn't be timed, use `iter_batched` +/// or `iter_batched_ref`. See [`BatchSize`](enum.BatchSize.html) for a discussion of batch sizes. +/// If the setup value implements `Drop` and you don't want to include the `drop` time in the +/// measurement, use `iter_batched_ref`, otherwise use `iter_batched`. These methods are also +/// suitable for benchmarking routines which return a value with an expensive `drop` method, +/// but are more complex than `iter_with_large_drop`. +/// * Otherwise, use `iter`. +pub struct Bencher<'a, M: Measurement = WallTime> { + pub(crate) iterated: bool, // Have we iterated this benchmark? + pub(crate) iters: u64, // Number of times to iterate this benchmark + pub(crate) value: M::Value, // The measured value + pub(crate) measurement: &'a M, // Reference to the measurement object + pub(crate) elapsed_time: Duration, // How much time did it take to perform the iteration? Used for the warmup period. +} +impl<'a, M: Measurement> Bencher<'a, M> { + /// Times a `routine` by executing it many times and timing the total elapsed time. + /// + /// Prefer this timing loop when `routine` returns a value that doesn't have a destructor. + /// + /// # Timing model + /// + /// Note that the `Bencher` also times the time required to destroy the output of `routine()`. + /// Therefore prefer this timing loop when the runtime of `mem::drop(O)` is negligible compared + /// to the runtime of the `routine`. + /// + /// ```text + /// elapsed = Instant::now + iters * (routine + mem::drop(O) + Range::next) + /// ``` + /// + /// # Example + /// + /// ```rust + /// use criterion2::*; + /// + /// // The function to benchmark + /// fn foo() { + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// c.bench_function("iter", move |b| { + /// b.iter(|| foo()) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + #[inline(never)] + pub fn iter(&mut self, mut routine: R) + where + R: FnMut() -> O, + { + self.iterated = true; + let time_start = Instant::now(); + let start = self.measurement.start(); + for _ in 0..self.iters { + black_box(routine()); + } + self.value = self.measurement.end(start); + self.elapsed_time = time_start.elapsed(); + } + + /// Times a `routine` by executing it many times and relying on `routine` to measure its own execution time. + /// + /// Prefer this timing loop in cases where `routine` has to do its own measurements to + /// get accurate timing information (for example in multi-threaded scenarios where you spawn + /// and coordinate with multiple threads). + /// + /// # Timing model + /// Custom, the timing model is whatever is returned as the Duration from `routine`. + /// + /// # Example + /// ```rust + /// use criterion2::*; + /// use criterion2::black_box; + /// use std::time::Instant; + /// + /// fn foo() { + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// c.bench_function("iter", move |b| { + /// b.iter_custom(|iters| { + /// let start = Instant::now(); + /// for _i in 0..iters { + /// black_box(foo()); + /// } + /// start.elapsed() + /// }) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + #[inline(never)] + pub fn iter_custom(&mut self, mut routine: R) + where + R: FnMut(u64) -> M::Value, + { + self.iterated = true; + let time_start = Instant::now(); + self.value = routine(self.iters); + self.elapsed_time = time_start.elapsed(); + } + + #[doc(hidden)] + pub fn iter_with_setup(&mut self, setup: S, routine: R) + where + S: FnMut() -> I, + R: FnMut(I) -> O, + { + self.iter_batched(setup, routine, BatchSize::PerIteration); + } + + /// Times a `routine` by collecting its output on each iteration. This avoids timing the + /// destructor of the value returned by `routine`. + /// + /// WARNING: This requires `O(iters * mem::size_of::())` of memory, and `iters` is not under the + /// control of the caller. If this causes out-of-memory errors, use `iter_batched` instead. + /// + /// # Timing model + /// + /// ``` text + /// elapsed = Instant::now + iters * (routine) + Iterator::collect::> + /// ``` + /// + /// # Example + /// + /// ```rust + /// use criterion2::*; + /// + /// fn create_vector() -> Vec { + /// # vec![] + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// c.bench_function("with_drop", move |b| { + /// // This will avoid timing the Vec::drop. + /// b.iter_with_large_drop(|| create_vector()) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + pub fn iter_with_large_drop(&mut self, mut routine: R) + where + R: FnMut() -> O, + { + self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput); + } + + /// Times a `routine` that requires some input by generating a batch of input, then timing the + /// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for + /// details on choosing the batch size. Use this when the routine must consume its input. + /// + /// For example, use this loop to benchmark sorting algorithms, because they require unsorted + /// data on each iteration. + /// + /// # Timing model + /// + /// ```text + /// elapsed = (Instant::now * num_batches) + (iters * (routine + O::drop)) + Vec::extend + /// ``` + /// + /// # Example + /// + /// ```rust + /// use criterion2::*; + /// + /// fn create_scrambled_data() -> Vec { + /// # vec![] + /// // ... + /// } + /// + /// // The sorting algorithm to test + /// fn sort(data: &mut [u64]) { + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// let data = create_scrambled_data(); + /// + /// c.bench_function("with_setup", move |b| { + /// // This will avoid timing the clone call. + /// b.iter_batched(|| data.clone(), |mut data| sort(&mut data), BatchSize::SmallInput) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + #[inline(never)] + pub fn iter_batched(&mut self, mut setup: S, mut routine: R, size: BatchSize) + where + S: FnMut() -> I, + R: FnMut(I) -> O, + { + self.iterated = true; + let batch_size = size.iters_per_batch(self.iters); + assert!(batch_size != 0, "Batch size must not be zero."); + let time_start = Instant::now(); + self.value = self.measurement.zero(); + + if batch_size == 1 { + for _ in 0..self.iters { + let input = black_box(setup()); + + let start = self.measurement.start(); + let output = routine(input); + let end = self.measurement.end(start); + self.value = self.measurement.add(&self.value, &end); + + drop(black_box(output)); + } + } else { + let mut iteration_counter = 0; + + while iteration_counter < self.iters { + let batch_size = ::std::cmp::min(batch_size, self.iters - iteration_counter); + + let inputs = black_box((0..batch_size).map(|_| setup()).collect::>()); + let mut outputs = Vec::with_capacity(batch_size as usize); + + let start = self.measurement.start(); + outputs.extend(inputs.into_iter().map(&mut routine)); + let end = self.measurement.end(start); + self.value = self.measurement.add(&self.value, &end); + + black_box(outputs); + + iteration_counter += batch_size; + } + } + + self.elapsed_time = time_start.elapsed(); + } + + /// Times a `routine` that requires some input by generating a batch of input, then timing the + /// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for + /// details on choosing the batch size. Use this when the routine should accept the input by + /// mutable reference. + /// + /// For example, use this loop to benchmark sorting algorithms, because they require unsorted + /// data on each iteration. + /// + /// # Timing model + /// + /// ```text + /// elapsed = (Instant::now * num_batches) + (iters * routine) + Vec::extend + /// ``` + /// + /// # Example + /// + /// ```rust + /// use criterion2::*; + /// + /// fn create_scrambled_data() -> Vec { + /// # vec![] + /// // ... + /// } + /// + /// // The sorting algorithm to test + /// fn sort(data: &mut [u64]) { + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// let data = create_scrambled_data(); + /// + /// c.bench_function("with_setup", move |b| { + /// // This will avoid timing the clone call. + /// b.iter_batched(|| data.clone(), |mut data| sort(&mut data), BatchSize::SmallInput) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + #[inline(never)] + pub fn iter_batched_ref(&mut self, mut setup: S, mut routine: R, size: BatchSize) + where + S: FnMut() -> I, + R: FnMut(&mut I) -> O, + { + self.iterated = true; + let batch_size = size.iters_per_batch(self.iters); + assert!(batch_size != 0, "Batch size must not be zero."); + let time_start = Instant::now(); + self.value = self.measurement.zero(); + + if batch_size == 1 { + for _ in 0..self.iters { + let mut input = black_box(setup()); + + let start = self.measurement.start(); + let output = routine(&mut input); + let end = self.measurement.end(start); + self.value = self.measurement.add(&self.value, &end); + + drop(black_box(output)); + drop(black_box(input)); + } + } else { + let mut iteration_counter = 0; + + while iteration_counter < self.iters { + let batch_size = ::std::cmp::min(batch_size, self.iters - iteration_counter); + + let mut inputs = black_box((0..batch_size).map(|_| setup()).collect::>()); + let mut outputs = Vec::with_capacity(batch_size as usize); + + let start = self.measurement.start(); + outputs.extend(inputs.iter_mut().map(&mut routine)); + let end = self.measurement.end(start); + self.value = self.measurement.add(&self.value, &end); + + black_box(outputs); + + iteration_counter += batch_size; + } + } + self.elapsed_time = time_start.elapsed(); + } + + // Benchmarks must actually call one of the iter methods. This causes benchmarks to fail loudly + // if they don't. + pub(crate) fn assert_iterated(&mut self) { + assert!(self.iterated, "Benchmark function must call Bencher::iter or related method."); + self.iterated = false; + } + + /// Convert this bencher into an AsyncBencher, which enables async/await support. + #[cfg(feature = "async")] + pub fn to_async<'b, A: AsyncExecutor>(&'b mut self, runner: A) -> AsyncBencher<'a, 'b, A, M> { + AsyncBencher { b: self, runner } + } +} + +/// Async/await variant of the Bencher struct. +#[cfg(feature = "async")] +pub struct AsyncBencher<'a, 'b, A: AsyncExecutor, M: Measurement = WallTime> { + b: &'b mut Bencher<'a, M>, + runner: A, +} +#[cfg(feature = "async")] +impl<'a, 'b, A: AsyncExecutor, M: Measurement> AsyncBencher<'a, 'b, A, M> { + /// Times a `routine` by executing it many times and timing the total elapsed time. + /// + /// Prefer this timing loop when `routine` returns a value that doesn't have a destructor. + /// + /// # Timing model + /// + /// Note that the `AsyncBencher` also times the time required to destroy the output of `routine()`. + /// Therefore prefer this timing loop when the runtime of `mem::drop(O)` is negligible compared + /// to the runtime of the `routine`. + /// + /// ```text + /// elapsed = Instant::now + iters * (routine + mem::drop(O) + Range::next) + /// ``` + /// + /// # Example + /// + /// ```rust + /// use criterion2::*; + /// use criterion2::async_executor::FuturesExecutor; + /// + /// // The function to benchmark + /// async fn foo() { + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// c.bench_function("iter", move |b| { + /// b.to_async(FuturesExecutor).iter(|| async { foo().await } ) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + #[inline(never)] + pub fn iter(&mut self, mut routine: R) + where + R: FnMut() -> F, + F: Future, + { + let AsyncBencher { b, runner } = self; + runner.block_on(async { + b.iterated = true; + let time_start = Instant::now(); + let start = b.measurement.start(); + for _ in 0..b.iters { + black_box(routine().await); + } + b.value = b.measurement.end(start); + b.elapsed_time = time_start.elapsed(); + }); + } + + /// Times a `routine` by executing it many times and relying on `routine` to measure its own execution time. + /// + /// Prefer this timing loop in cases where `routine` has to do its own measurements to + /// get accurate timing information (for example in multi-threaded scenarios where you spawn + /// and coordinate with multiple threads). + /// + /// # Timing model + /// Custom, the timing model is whatever is returned as the Duration from `routine`. + /// + /// # Example + /// ```rust + /// use criterion2::*; + /// use criterion2::black_box; + /// use criterion2::async_executor::FuturesExecutor; + /// use std::time::Instant; + /// + /// async fn foo() { + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// c.bench_function("iter", move |b| { + /// b.to_async(FuturesExecutor).iter_custom(|iters| { + /// async move { + /// let start = Instant::now(); + /// for _i in 0..iters { + /// black_box(foo().await); + /// } + /// start.elapsed() + /// } + /// }) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + #[inline(never)] + pub fn iter_custom(&mut self, mut routine: R) + where + R: FnMut(u64) -> F, + F: Future, + { + let AsyncBencher { b, runner } = self; + runner.block_on(async { + b.iterated = true; + let time_start = Instant::now(); + b.value = routine(b.iters).await; + b.elapsed_time = time_start.elapsed(); + }) + } + + #[doc(hidden)] + pub fn iter_with_setup(&mut self, setup: S, routine: R) + where + S: FnMut() -> I, + R: FnMut(I) -> F, + F: Future, + { + self.iter_batched(setup, routine, BatchSize::PerIteration); + } + + /// Times a `routine` by collecting its output on each iteration. This avoids timing the + /// destructor of the value returned by `routine`. + /// + /// WARNING: This requires `O(iters * mem::size_of::())` of memory, and `iters` is not under the + /// control of the caller. If this causes out-of-memory errors, use `iter_batched` instead. + /// + /// # Timing model + /// + /// ``` text + /// elapsed = Instant::now + iters * (routine) + Iterator::collect::> + /// ``` + /// + /// # Example + /// + /// ```rust + /// use criterion2::*; + /// use criterion2::async_executor::FuturesExecutor; + /// + /// async fn create_vector() -> Vec { + /// # vec![] + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// c.bench_function("with_drop", move |b| { + /// // This will avoid timing the Vec::drop. + /// b.to_async(FuturesExecutor).iter_with_large_drop(|| async { create_vector().await }) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + pub fn iter_with_large_drop(&mut self, mut routine: R) + where + R: FnMut() -> F, + F: Future, + { + self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput); + } + + #[doc(hidden)] + pub fn iter_with_large_setup(&mut self, setup: S, routine: R) + where + S: FnMut() -> I, + R: FnMut(I) -> F, + F: Future, + { + self.iter_batched(setup, routine, BatchSize::NumBatches(1)); + } + + /// Times a `routine` that requires some input by generating a batch of input, then timing the + /// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for + /// details on choosing the batch size. Use this when the routine must consume its input. + /// + /// For example, use this loop to benchmark sorting algorithms, because they require unsorted + /// data on each iteration. + /// + /// # Timing model + /// + /// ```text + /// elapsed = (Instant::now * num_batches) + (iters * (routine + O::drop)) + Vec::extend + /// ``` + /// + /// # Example + /// + /// ```rust + /// use criterion2::*; + /// use criterion2::async_executor::FuturesExecutor; + /// + /// fn create_scrambled_data() -> Vec { + /// # vec![] + /// // ... + /// } + /// + /// // The sorting algorithm to test + /// async fn sort(data: &mut [u64]) { + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// let data = create_scrambled_data(); + /// + /// c.bench_function("with_setup", move |b| { + /// // This will avoid timing the clone call. + /// b.iter_batched(|| data.clone(), |mut data| async move { sort(&mut data).await }, BatchSize::SmallInput) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + #[inline(never)] + pub fn iter_batched(&mut self, mut setup: S, mut routine: R, size: BatchSize) + where + S: FnMut() -> I, + R: FnMut(I) -> F, + F: Future, + { + let AsyncBencher { b, runner } = self; + runner.block_on(async { + b.iterated = true; + let batch_size = size.iters_per_batch(b.iters); + assert!(batch_size != 0, "Batch size must not be zero."); + let time_start = Instant::now(); + b.value = b.measurement.zero(); + + if batch_size == 1 { + for _ in 0..b.iters { + let input = black_box(setup()); + + let start = b.measurement.start(); + let output = routine(input).await; + let end = b.measurement.end(start); + b.value = b.measurement.add(&b.value, &end); + + drop(black_box(output)); + } + } else { + let mut iteration_counter = 0; + + while iteration_counter < b.iters { + let batch_size = ::std::cmp::min(batch_size, b.iters - iteration_counter); + + let inputs = black_box((0..batch_size).map(|_| setup()).collect::>()); + let mut outputs = Vec::with_capacity(batch_size as usize); + + let start = b.measurement.start(); + // Can't use .extend here like the sync version does + for input in inputs { + outputs.push(routine(input).await); + } + let end = b.measurement.end(start); + b.value = b.measurement.add(&b.value, &end); + + black_box(outputs); + + iteration_counter += batch_size; + } + } + + b.elapsed_time = time_start.elapsed(); + }) + } + + /// Times a `routine` that requires some input by generating a batch of input, then timing the + /// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for + /// details on choosing the batch size. Use this when the routine should accept the input by + /// mutable reference. + /// + /// For example, use this loop to benchmark sorting algorithms, because they require unsorted + /// data on each iteration. + /// + /// # Timing model + /// + /// ```text + /// elapsed = (Instant::now * num_batches) + (iters * routine) + Vec::extend + /// ``` + /// + /// # Example + /// + /// ```rust + /// use criterion2::*; + /// use criterion2::async_executor::FuturesExecutor; + /// + /// fn create_scrambled_data() -> Vec { + /// # vec![] + /// // ... + /// } + /// + /// // The sorting algorithm to test + /// async fn sort(data: &mut [u64]) { + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// let data = create_scrambled_data(); + /// + /// c.bench_function("with_setup", move |b| { + /// // This will avoid timing the clone call. + /// b.iter_batched(|| data.clone(), |mut data| async move { sort(&mut data).await }, BatchSize::SmallInput) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + #[inline(never)] + pub fn iter_batched_ref(&mut self, mut setup: S, mut routine: R, size: BatchSize) + where + S: FnMut() -> I, + R: FnMut(&mut I) -> F, + F: Future, + { + let AsyncBencher { b, runner } = self; + runner.block_on(async { + b.iterated = true; + let batch_size = size.iters_per_batch(b.iters); + assert!(batch_size != 0, "Batch size must not be zero."); + let time_start = Instant::now(); + b.value = b.measurement.zero(); + + if batch_size == 1 { + for _ in 0..b.iters { + let mut input = black_box(setup()); + + let start = b.measurement.start(); + let output = routine(&mut input).await; + let end = b.measurement.end(start); + b.value = b.measurement.add(&b.value, &end); + + drop(black_box(output)); + drop(black_box(input)); + } + } else { + let mut iteration_counter = 0; + + while iteration_counter < b.iters { + let batch_size = ::std::cmp::min(batch_size, b.iters - iteration_counter); + + let inputs = black_box((0..batch_size).map(|_| setup()).collect::>()); + let mut outputs = Vec::with_capacity(batch_size as usize); + + let start = b.measurement.start(); + // Can't use .extend here like the sync version does + for mut input in inputs { + outputs.push(routine(&mut input).await); + } + let end = b.measurement.end(start); + b.value = b.measurement.add(&b.value, &end); + + black_box(outputs); + + iteration_counter += batch_size; + } + } + b.elapsed_time = time_start.elapsed(); + }); + } +} diff --git a/src/benchmark.rs b/src/benchmark.rs index 3a1cb001..2d9bee21 100644 --- a/src/benchmark.rs +++ b/src/benchmark.rs @@ -39,9 +39,7 @@ impl PartialBenchmarkConfig { noise_threshold: self.noise_threshold.unwrap_or(defaults.noise_threshold), nresamples: self.nresamples.unwrap_or(defaults.nresamples), sample_size: self.sample_size.unwrap_or(defaults.sample_size), - significance_level: self - .significance_level - .unwrap_or(defaults.significance_level), + significance_level: self.significance_level.unwrap_or(defaults.significance_level), warm_up_time: self.warm_up_time.unwrap_or(defaults.warm_up_time), sampling_mode: self.sampling_mode.unwrap_or(defaults.sampling_mode), quick_mode: self.quick_mode.unwrap_or(defaults.quick_mode), diff --git a/src/benchmark_group.rs b/src/benchmark_group.rs index 687fb2f2..3c3b67c9 100644 --- a/src/benchmark_group.rs +++ b/src/benchmark_group.rs @@ -16,8 +16,7 @@ use std::time::Duration; /// # Examples: /// /// ```no_run -/// #[macro_use] extern crate criterion; -/// use self::criterion::*; +/// use self::criterion2::*; /// use std::time::Duration; /// /// fn bench_simple(c: &mut Criterion) { @@ -26,7 +25,7 @@ use std::time::Duration; /// // Now we can perform benchmarks with this group /// group.bench_function("Bench 1", |b| b.iter(|| 1 )); /// group.bench_function("Bench 2", |b| b.iter(|| 2 )); -/// +/// /// // It's recommended to call group.finish() explicitly at the end, but if you don't it will /// // be called automatically when the group is dropped. /// group.finish(); @@ -46,13 +45,13 @@ use std::time::Duration; /// |b, (p_x, p_y)| b.iter(|| p_x * p_y)); /// } /// } -/// +/// /// group.finish(); /// } /// /// fn bench_throughput(c: &mut Criterion) { /// let mut group = c.benchmark_group("Summation"); -/// +/// /// for size in [1024, 2048, 4096].iter() { /// // Generate input of an appropriate size... /// let input = vec![1u64, *size]; @@ -295,9 +294,7 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> { ); id.ensure_directory_name_unique(&self.criterion.all_directories); - self.criterion - .all_directories - .insert(id.as_directory_name().to_owned()); + self.criterion.all_directories.insert(id.as_directory_name().to_owned()); id.ensure_title_unique(&self.criterion.all_titles); self.criterion.all_titles.insert(id.as_title().to_owned()); @@ -371,13 +368,10 @@ impl<'a, M: Measurement> Drop for BenchmarkGroup<'a, M> { // I don't really like having a bunch of non-trivial code in drop, but this is the only way // to really write linear types like this in Rust... if let Some(conn) = &mut self.criterion.connection { - conn.send(&OutgoingMessage::FinishedBenchmarkGroup { - group: &self.group_name, - }) - .unwrap(); - - conn.serve_value_formatter(self.criterion.measurement.formatter()) + conn.send(&OutgoingMessage::FinishedBenchmarkGroup { group: &self.group_name }) .unwrap(); + + conn.serve_value_formatter(self.criterion.measurement.formatter()).unwrap(); } if self.all_ids.len() > 1 && self.any_matched && self.criterion.mode.is_benchmark() { @@ -415,7 +409,7 @@ impl BenchmarkId { /// /// # Examples /// ``` - /// # use criterion::{BenchmarkId, Criterion}; + /// # use criterion2::{BenchmarkId, Criterion}; /// // A basic benchmark ID is typically constructed from a constant string and a simple /// // parameter /// let basic_id = BenchmarkId::new("my_id", 5); @@ -448,24 +442,15 @@ impl BenchmarkId { /// Construct a new benchmark ID from just a parameter value. Use this when benchmarking a /// single function with a variety of different inputs. pub fn from_parameter(parameter: P) -> BenchmarkId { - BenchmarkId { - function_name: None, - parameter: Some(format!("{}", parameter)), - } + BenchmarkId { function_name: None, parameter: Some(format!("{}", parameter)) } } pub(crate) fn no_function() -> BenchmarkId { - BenchmarkId { - function_name: None, - parameter: None, - } + BenchmarkId { function_name: None, parameter: None } } pub(crate) fn no_function_with_input(parameter: P) -> BenchmarkId { - BenchmarkId { - function_name: None, - parameter: Some(format!("{}", parameter)), - } + BenchmarkId { function_name: None, parameter: Some(format!("{}", parameter)) } } } @@ -487,14 +472,8 @@ impl IntoBenchmarkId for BenchmarkId { impl> IntoBenchmarkId for S { fn into_benchmark_id(self) -> BenchmarkId { let function_name = self.into(); - assert!( - !function_name.is_empty(), - "Function name must not be empty." - ); + assert!(!function_name.is_empty(), "Function name must not be empty."); - BenchmarkId { - function_name: Some(function_name), - parameter: None, - } + BenchmarkId { function_name: Some(function_name), parameter: None } } } diff --git a/src/connection.rs b/src/connection.rs index 53706d60..4f85912b 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -1,385 +1,345 @@ -use crate::report::BenchmarkId as InternalBenchmarkId; -use crate::Throughput; -use std::cell::RefCell; -use std::convert::TryFrom; -use std::io::{Read, Write}; -use std::mem::size_of; -use std::net::TcpStream; - -#[derive(Debug)] -pub enum MessageError { - Deserialization(ciborium::de::Error), - Serialization(ciborium::ser::Error), - Io(std::io::Error), -} -impl From> for MessageError { - fn from(other: ciborium::de::Error) -> Self { - MessageError::Deserialization(other) - } -} -impl From> for MessageError { - fn from(other: ciborium::ser::Error) -> Self { - MessageError::Serialization(other) - } -} -impl From for MessageError { - fn from(other: std::io::Error) -> Self { - MessageError::Io(other) - } -} -impl std::fmt::Display for MessageError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - MessageError::Deserialization(error) => write!( - f, - "Failed to deserialize message to Criterion.rs benchmark:\n{}", - error - ), - MessageError::Serialization(error) => write!( - f, - "Failed to serialize message to Criterion.rs benchmark:\n{}", - error - ), - MessageError::Io(error) => write!( - f, - "Failed to read or write message to Criterion.rs benchmark:\n{}", - error - ), - } - } -} -impl std::error::Error for MessageError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - MessageError::Deserialization(err) => Some(err), - MessageError::Serialization(err) => Some(err), - MessageError::Io(err) => Some(err), - } - } -} - -// Use str::len as a const fn once we bump MSRV over 1.39. -const RUNNER_MAGIC_NUMBER: &str = "cargo-criterion"; -const RUNNER_HELLO_SIZE: usize = 15 //RUNNER_MAGIC_NUMBER.len() // magic number - + (size_of::() * 3); // version number - -const BENCHMARK_MAGIC_NUMBER: &str = "Criterion"; -const BENCHMARK_HELLO_SIZE: usize = 9 //BENCHMARK_MAGIC_NUMBER.len() // magic number - + (size_of::() * 3) // version number - + size_of::() // protocol version - + size_of::(); // protocol format -const PROTOCOL_VERSION: u16 = 1; -const PROTOCOL_FORMAT: u16 = 1; - -#[derive(Debug)] -struct InnerConnection { - socket: TcpStream, - receive_buffer: Vec, - send_buffer: Vec, - // runner_version: [u8; 3], -} -impl InnerConnection { - pub fn new(mut socket: TcpStream) -> Result { - // read the runner-hello - let mut hello_buf = [0u8; RUNNER_HELLO_SIZE]; - socket.read_exact(&mut hello_buf)?; - assert_eq!( - &hello_buf[0..RUNNER_MAGIC_NUMBER.len()], - RUNNER_MAGIC_NUMBER.as_bytes(), - "Not connected to cargo-criterion." - ); - - let i = RUNNER_MAGIC_NUMBER.len(); - let runner_version = [hello_buf[i], hello_buf[i + 1], hello_buf[i + 2]]; - - info!("Runner version: {:?}", runner_version); - - // now send the benchmark-hello - let mut hello_buf = [0u8; BENCHMARK_HELLO_SIZE]; - hello_buf[0..BENCHMARK_MAGIC_NUMBER.len()] - .copy_from_slice(BENCHMARK_MAGIC_NUMBER.as_bytes()); - let mut i = BENCHMARK_MAGIC_NUMBER.len(); - hello_buf[i] = env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(); - hello_buf[i + 1] = env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(); - hello_buf[i + 2] = env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(); - i += 3; - hello_buf[i..i + 2].clone_from_slice(&PROTOCOL_VERSION.to_be_bytes()); - i += 2; - hello_buf[i..i + 2].clone_from_slice(&PROTOCOL_FORMAT.to_be_bytes()); - - socket.write_all(&hello_buf)?; - - Ok(InnerConnection { - socket, - receive_buffer: vec![], - send_buffer: vec![], - // runner_version, - }) - } - - #[allow(dead_code)] - pub fn recv(&mut self) -> Result { - let mut length_buf = [0u8; 4]; - self.socket.read_exact(&mut length_buf)?; - let length = u32::from_be_bytes(length_buf); - self.receive_buffer.resize(length as usize, 0u8); - self.socket.read_exact(&mut self.receive_buffer)?; - let value = ciborium::de::from_reader(&self.receive_buffer[..])?; - Ok(value) - } - - pub fn send(&mut self, message: &OutgoingMessage) -> Result<(), MessageError> { - self.send_buffer.truncate(0); - ciborium::ser::into_writer(message, &mut self.send_buffer)?; - let size = u32::try_from(self.send_buffer.len()).unwrap(); - let length_buf = size.to_be_bytes(); - self.socket.write_all(&length_buf)?; - self.socket.write_all(&self.send_buffer)?; - Ok(()) - } -} - -/// This is really just a holder to allow us to send messages through a shared reference to the -/// connection. -#[derive(Debug)] -pub struct Connection { - inner: RefCell, -} -impl Connection { - pub fn new(socket: TcpStream) -> Result { - Ok(Connection { - inner: RefCell::new(InnerConnection::new(socket)?), - }) - } - - #[allow(dead_code)] - pub fn recv(&self) -> Result { - self.inner.borrow_mut().recv() - } - - pub fn send(&self, message: &OutgoingMessage) -> Result<(), MessageError> { - self.inner.borrow_mut().send(message) - } - - pub fn serve_value_formatter( - &self, - formatter: &dyn crate::measurement::ValueFormatter, - ) -> Result<(), MessageError> { - loop { - let response = match self.recv()? { - IncomingMessage::FormatValue { value } => OutgoingMessage::FormattedValue { - value: formatter.format_value(value), - }, - IncomingMessage::FormatThroughput { value, throughput } => { - OutgoingMessage::FormattedValue { - value: formatter.format_throughput(&throughput, value), - } - } - IncomingMessage::ScaleValues { - typical_value, - mut values, - } => { - let unit = formatter.scale_values(typical_value, &mut values); - OutgoingMessage::ScaledValues { - unit, - scaled_values: values, - } - } - IncomingMessage::ScaleThroughputs { - typical_value, - throughput, - mut values, - } => { - let unit = formatter.scale_throughputs(typical_value, &throughput, &mut values); - OutgoingMessage::ScaledValues { - unit, - scaled_values: values, - } - } - IncomingMessage::ScaleForMachines { mut values } => { - let unit = formatter.scale_for_machines(&mut values); - OutgoingMessage::ScaledValues { - unit, - scaled_values: values, - } - } - IncomingMessage::Continue => break, - _ => panic!(), - }; - self.send(&response)?; - } - Ok(()) - } -} - -/// Enum defining the messages we can receive -#[derive(Debug, Deserialize)] -pub enum IncomingMessage { - // Value formatter requests - FormatValue { - value: f64, - }, - FormatThroughput { - value: f64, - throughput: Throughput, - }, - ScaleValues { - typical_value: f64, - values: Vec, - }, - ScaleThroughputs { - typical_value: f64, - values: Vec, - throughput: Throughput, - }, - ScaleForMachines { - values: Vec, - }, - Continue, - - __Other, -} - -/// Enum defining the messages we can send -#[derive(Debug, Serialize)] -pub enum OutgoingMessage<'a> { - BeginningBenchmarkGroup { - group: &'a str, - }, - FinishedBenchmarkGroup { - group: &'a str, - }, - BeginningBenchmark { - id: RawBenchmarkId, - }, - SkippingBenchmark { - id: RawBenchmarkId, - }, - Warmup { - id: RawBenchmarkId, - nanos: f64, - }, - MeasurementStart { - id: RawBenchmarkId, - sample_count: u64, - estimate_ns: f64, - iter_count: u64, - }, - MeasurementComplete { - id: RawBenchmarkId, - iters: &'a [f64], - times: &'a [f64], - plot_config: PlotConfiguration, - sampling_method: SamplingMethod, - benchmark_config: BenchmarkConfig, - }, - // value formatter responses - FormattedValue { - value: String, - }, - ScaledValues { - scaled_values: Vec, - unit: &'a str, - }, -} - -// Also define serializable variants of certain things, either to avoid leaking -// serializability into the public interface or because the serialized form -// is a bit different from the regular one. - -#[derive(Debug, Serialize)] -pub struct RawBenchmarkId { - group_id: String, - function_id: Option, - value_str: Option, - throughput: Vec, -} -impl From<&InternalBenchmarkId> for RawBenchmarkId { - fn from(other: &InternalBenchmarkId) -> RawBenchmarkId { - RawBenchmarkId { - group_id: other.group_id.clone(), - function_id: other.function_id.clone(), - value_str: other.value_str.clone(), - throughput: other.throughput.iter().cloned().collect(), - } - } -} - -#[derive(Debug, Serialize)] -pub enum AxisScale { - Linear, - Logarithmic, -} -impl From for AxisScale { - fn from(other: crate::AxisScale) -> Self { - match other { - crate::AxisScale::Linear => AxisScale::Linear, - crate::AxisScale::Logarithmic => AxisScale::Logarithmic, - } - } -} - -#[derive(Debug, Serialize)] -pub struct PlotConfiguration { - summary_scale: AxisScale, -} -impl From<&crate::PlotConfiguration> for PlotConfiguration { - fn from(other: &crate::PlotConfiguration) -> Self { - PlotConfiguration { - summary_scale: other.summary_scale.into(), - } - } -} - -#[derive(Debug, Serialize)] -struct Duration { - secs: u64, - nanos: u32, -} -impl From for Duration { - fn from(other: std::time::Duration) -> Self { - Duration { - secs: other.as_secs(), - nanos: other.subsec_nanos(), - } - } -} - -#[derive(Debug, Serialize)] -pub struct BenchmarkConfig { - confidence_level: f64, - measurement_time: Duration, - noise_threshold: f64, - nresamples: usize, - sample_size: usize, - significance_level: f64, - warm_up_time: Duration, -} -impl From<&crate::benchmark::BenchmarkConfig> for BenchmarkConfig { - fn from(other: &crate::benchmark::BenchmarkConfig) -> Self { - BenchmarkConfig { - confidence_level: other.confidence_level, - measurement_time: other.measurement_time.into(), - noise_threshold: other.noise_threshold, - nresamples: other.nresamples, - sample_size: other.sample_size, - significance_level: other.significance_level, - warm_up_time: other.warm_up_time.into(), - } - } -} - -/// Currently not used; defined for forwards compatibility with cargo-criterion. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -pub enum SamplingMethod { - Linear, - Flat, -} -impl From for SamplingMethod { - fn from(other: crate::ActualSamplingMode) -> Self { - match other { - crate::ActualSamplingMode::Flat => SamplingMethod::Flat, - crate::ActualSamplingMode::Linear => SamplingMethod::Linear, - } - } -} +use std::cell::RefCell; +use std::convert::TryFrom; +use std::io::{Read, Write}; +use std::mem::size_of; +use std::net::TcpStream; + +use serde::{Deserialize, Serialize}; + +use crate::report::BenchmarkId as InternalBenchmarkId; +use crate::Throughput; + +#[derive(Debug)] +pub enum MessageError { + Deserialization(ciborium::de::Error), + Serialization(ciborium::ser::Error), + Io(std::io::Error), +} +impl From> for MessageError { + fn from(other: ciborium::de::Error) -> Self { + MessageError::Deserialization(other) + } +} +impl From> for MessageError { + fn from(other: ciborium::ser::Error) -> Self { + MessageError::Serialization(other) + } +} +impl From for MessageError { + fn from(other: std::io::Error) -> Self { + MessageError::Io(other) + } +} +impl std::fmt::Display for MessageError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + MessageError::Deserialization(error) => { + write!(f, "Failed to deserialize message to Criterion.rs benchmark:\n{}", error) + } + MessageError::Serialization(error) => { + write!(f, "Failed to serialize message to Criterion.rs benchmark:\n{}", error) + } + MessageError::Io(error) => { + write!(f, "Failed to read or write message to Criterion.rs benchmark:\n{}", error) + } + } + } +} +impl std::error::Error for MessageError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + MessageError::Deserialization(err) => Some(err), + MessageError::Serialization(err) => Some(err), + MessageError::Io(err) => Some(err), + } + } +} + +// Use str::len as a const fn once we bump MSRV over 1.39. +const RUNNER_MAGIC_NUMBER: &str = "cargo-criterion"; +const RUNNER_HELLO_SIZE: usize = 15 //RUNNER_MAGIC_NUMBER.len() // magic number + + (size_of::() * 3); // version number + +const BENCHMARK_MAGIC_NUMBER: &str = "Criterion"; +const BENCHMARK_HELLO_SIZE: usize = 9 //BENCHMARK_MAGIC_NUMBER.len() // magic number + + (size_of::() * 3) // version number + + size_of::() // protocol version + + size_of::(); // protocol format +const PROTOCOL_VERSION: u16 = 1; +const PROTOCOL_FORMAT: u16 = 1; + +#[derive(Debug)] +struct InnerConnection { + socket: TcpStream, + receive_buffer: Vec, + send_buffer: Vec, + // runner_version: [u8; 3], +} +impl InnerConnection { + pub fn new(mut socket: TcpStream) -> Result { + // read the runner-hello + let mut hello_buf = [0u8; RUNNER_HELLO_SIZE]; + socket.read_exact(&mut hello_buf)?; + assert_eq!( + &hello_buf[0..RUNNER_MAGIC_NUMBER.len()], + RUNNER_MAGIC_NUMBER.as_bytes(), + "Not connected to cargo-criterion." + ); + + let i = RUNNER_MAGIC_NUMBER.len(); + let runner_version = [hello_buf[i], hello_buf[i + 1], hello_buf[i + 2]]; + + info!("Runner version: {:?}", runner_version); + + // now send the benchmark-hello + let mut hello_buf = [0u8; BENCHMARK_HELLO_SIZE]; + hello_buf[0..BENCHMARK_MAGIC_NUMBER.len()] + .copy_from_slice(BENCHMARK_MAGIC_NUMBER.as_bytes()); + let mut i = BENCHMARK_MAGIC_NUMBER.len(); + hello_buf[i] = env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(); + hello_buf[i + 1] = env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(); + hello_buf[i + 2] = env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(); + i += 3; + hello_buf[i..i + 2].clone_from_slice(&PROTOCOL_VERSION.to_be_bytes()); + i += 2; + hello_buf[i..i + 2].clone_from_slice(&PROTOCOL_FORMAT.to_be_bytes()); + + socket.write_all(&hello_buf)?; + + Ok(InnerConnection { + socket, + receive_buffer: vec![], + send_buffer: vec![], + // runner_version, + }) + } + + #[allow(dead_code)] + pub fn recv(&mut self) -> Result { + let mut length_buf = [0u8; 4]; + self.socket.read_exact(&mut length_buf)?; + let length = u32::from_be_bytes(length_buf); + self.receive_buffer.resize(length as usize, 0u8); + self.socket.read_exact(&mut self.receive_buffer)?; + let value = ciborium::de::from_reader(&self.receive_buffer[..])?; + Ok(value) + } + + pub fn send(&mut self, message: &OutgoingMessage) -> Result<(), MessageError> { + self.send_buffer.truncate(0); + ciborium::ser::into_writer(message, &mut self.send_buffer)?; + let size = u32::try_from(self.send_buffer.len()).unwrap(); + let length_buf = size.to_be_bytes(); + self.socket.write_all(&length_buf)?; + self.socket.write_all(&self.send_buffer)?; + Ok(()) + } +} + +/// This is really just a holder to allow us to send messages through a shared reference to the +/// connection. +#[derive(Debug)] +pub struct Connection { + inner: RefCell, +} +impl Connection { + pub fn new(socket: TcpStream) -> Result { + Ok(Connection { inner: RefCell::new(InnerConnection::new(socket)?) }) + } + + #[allow(dead_code)] + pub fn recv(&self) -> Result { + self.inner.borrow_mut().recv() + } + + pub fn send(&self, message: &OutgoingMessage) -> Result<(), MessageError> { + self.inner.borrow_mut().send(message) + } + + pub fn serve_value_formatter( + &self, + formatter: &dyn crate::measurement::ValueFormatter, + ) -> Result<(), MessageError> { + loop { + let response = match self.recv()? { + IncomingMessage::FormatValue { value } => { + OutgoingMessage::FormattedValue { value: formatter.format_value(value) } + } + IncomingMessage::FormatThroughput { value, throughput } => { + OutgoingMessage::FormattedValue { + value: formatter.format_throughput(&throughput, value), + } + } + IncomingMessage::ScaleValues { typical_value, mut values } => { + let unit = formatter.scale_values(typical_value, &mut values); + OutgoingMessage::ScaledValues { unit, scaled_values: values } + } + IncomingMessage::ScaleThroughputs { typical_value, throughput, mut values } => { + let unit = formatter.scale_throughputs(typical_value, &throughput, &mut values); + OutgoingMessage::ScaledValues { unit, scaled_values: values } + } + IncomingMessage::ScaleForMachines { mut values } => { + let unit = formatter.scale_for_machines(&mut values); + OutgoingMessage::ScaledValues { unit, scaled_values: values } + } + IncomingMessage::Continue => break, + _ => panic!(), + }; + self.send(&response)?; + } + Ok(()) + } +} + +/// Enum defining the messages we can receive +#[derive(Debug, Deserialize)] +pub enum IncomingMessage { + // Value formatter requests + FormatValue { value: f64 }, + FormatThroughput { value: f64, throughput: Throughput }, + ScaleValues { typical_value: f64, values: Vec }, + ScaleThroughputs { typical_value: f64, values: Vec, throughput: Throughput }, + ScaleForMachines { values: Vec }, + Continue, + + __Other, +} + +/// Enum defining the messages we can send +#[derive(Debug, Serialize)] +pub enum OutgoingMessage<'a> { + BeginningBenchmarkGroup { + group: &'a str, + }, + FinishedBenchmarkGroup { + group: &'a str, + }, + BeginningBenchmark { + id: RawBenchmarkId, + }, + SkippingBenchmark { + id: RawBenchmarkId, + }, + Warmup { + id: RawBenchmarkId, + nanos: f64, + }, + MeasurementStart { + id: RawBenchmarkId, + sample_count: u64, + estimate_ns: f64, + iter_count: u64, + }, + MeasurementComplete { + id: RawBenchmarkId, + iters: &'a [f64], + times: &'a [f64], + plot_config: PlotConfiguration, + sampling_method: SamplingMethod, + benchmark_config: BenchmarkConfig, + }, + // value formatter responses + FormattedValue { + value: String, + }, + ScaledValues { + scaled_values: Vec, + unit: &'a str, + }, +} + +// Also define serializable variants of certain things, either to avoid leaking +// serializability into the public interface or because the serialized form +// is a bit different from the regular one. + +#[derive(Debug, Serialize)] +pub struct RawBenchmarkId { + group_id: String, + function_id: Option, + value_str: Option, + throughput: Vec, +} +impl From<&InternalBenchmarkId> for RawBenchmarkId { + fn from(other: &InternalBenchmarkId) -> RawBenchmarkId { + RawBenchmarkId { + group_id: other.group_id.clone(), + function_id: other.function_id.clone(), + value_str: other.value_str.clone(), + throughput: other.throughput.iter().cloned().collect(), + } + } +} + +#[derive(Debug, Serialize)] +pub enum AxisScale { + Linear, + Logarithmic, +} +impl From for AxisScale { + fn from(other: crate::AxisScale) -> Self { + match other { + crate::AxisScale::Linear => AxisScale::Linear, + crate::AxisScale::Logarithmic => AxisScale::Logarithmic, + } + } +} + +#[derive(Debug, Serialize)] +pub struct PlotConfiguration { + summary_scale: AxisScale, +} +impl From<&crate::PlotConfiguration> for PlotConfiguration { + fn from(other: &crate::PlotConfiguration) -> Self { + PlotConfiguration { summary_scale: other.summary_scale.into() } + } +} + +#[derive(Debug, Serialize)] +struct Duration { + secs: u64, + nanos: u32, +} +impl From for Duration { + fn from(other: std::time::Duration) -> Self { + Duration { secs: other.as_secs(), nanos: other.subsec_nanos() } + } +} + +#[derive(Debug, Serialize)] +pub struct BenchmarkConfig { + confidence_level: f64, + measurement_time: Duration, + noise_threshold: f64, + nresamples: usize, + sample_size: usize, + significance_level: f64, + warm_up_time: Duration, +} +impl From<&crate::benchmark::BenchmarkConfig> for BenchmarkConfig { + fn from(other: &crate::benchmark::BenchmarkConfig) -> Self { + BenchmarkConfig { + confidence_level: other.confidence_level, + measurement_time: other.measurement_time.into(), + noise_threshold: other.noise_threshold, + nresamples: other.nresamples, + sample_size: other.sample_size, + significance_level: other.significance_level, + warm_up_time: other.warm_up_time.into(), + } + } +} + +/// Currently not used; defined for forwards compatibility with cargo-criterion. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum SamplingMethod { + Linear, + Flat, +} +impl From for SamplingMethod { + fn from(other: crate::ActualSamplingMode) -> Self { + match other { + crate::ActualSamplingMode::Flat => SamplingMethod::Flat, + crate::ActualSamplingMode::Linear => SamplingMethod::Linear, + } + } +} diff --git a/src/csv_report.rs b/src/csv_report.rs index 18c608b1..b427288c 100644 --- a/src/csv_report.rs +++ b/src/csv_report.rs @@ -1,10 +1,13 @@ +use std::io::Write; +use std::path::Path; + +use csv::Writer; +use serde::Serialize; + use crate::error::Result; use crate::measurement::ValueFormatter; use crate::report::{BenchmarkId, MeasurementData, Report, ReportContext}; use crate::Throughput; -use csv::Writer; -use std::io::Write; -use std::path::Path; #[derive(Serialize)] struct CsvRow<'a> { diff --git a/src/estimate.rs b/src/estimate.rs index 8a79d27a..299bac09 100644 --- a/src/estimate.rs +++ b/src/estimate.rs @@ -1,3 +1,4 @@ +use serde::{Deserialize, Serialize}; use std::fmt; use crate::stats::Distribution; diff --git a/src/format.rs b/src/format.rs index 74047643..45314055 100644 --- a/src/format.rs +++ b/src/format.rs @@ -61,15 +61,9 @@ pub fn iter_count(iterations: u64) -> String { } else if iterations < 1_000_000_000 { format!("{:.0}M iterations", (iterations as f64) / (1000.0 * 1000.0)) } else if iterations < 10_000_000_000 { - format!( - "{:.1}B iterations", - (iterations as f64) / (1000.0 * 1000.0 * 1000.0) - ) + format!("{:.1}B iterations", (iterations as f64) / (1000.0 * 1000.0 * 1000.0)) } else { - format!( - "{:.0}B iterations", - (iterations as f64) / (1000.0 * 1000.0 * 1000.0) - ) + format!("{:.0}B iterations", (iterations as f64) / (1000.0 * 1000.0 * 1000.0)) } } diff --git a/src/fs.rs b/src/fs.rs index f47508be..c11f1a77 100644 --- a/src/fs.rs +++ b/src/fs.rs @@ -15,16 +15,12 @@ where P: AsRef, { let path = path.as_ref(); - let mut f = File::open(path).map_err(|inner| Error::AccessError { - inner, - path: path.to_owned(), - })?; + let mut f = + File::open(path).map_err(|inner| Error::AccessError { inner, path: path.to_owned() })?; let mut string = String::new(); let _ = f.read_to_string(&mut string); - let result: A = serde_json::from_str(string.as_str()).map_err(|inner| Error::SerdeError { - inner, - path: path.to_owned(), - })?; + let result: A = serde_json::from_str(string.as_str()) + .map_err(|inner| Error::SerdeError { inner, path: path.to_owned() })?; Ok(result) } @@ -41,10 +37,8 @@ pub fn mkdirp

(path: &P) -> Result<()> where P: AsRef, { - fs::create_dir_all(path.as_ref()).map_err(|inner| Error::AccessError { - inner, - path: path.as_ref().to_owned(), - })?; + fs::create_dir_all(path.as_ref()) + .map_err(|inner| Error::AccessError { inner, path: path.as_ref().to_owned() })?; Ok(()) } @@ -62,10 +56,8 @@ where D: Serialize, P: AsRef, { - let buf = serde_json::to_string(&data).map_err(|inner| Error::SerdeError { - path: path.as_ref().to_owned(), - inner, - })?; + let buf = serde_json::to_string(&data) + .map_err(|inner| Error::SerdeError { path: path.as_ref().to_owned(), inner })?; save_string(&buf, path) } @@ -77,10 +69,7 @@ where File::create(path) .and_then(|mut f| f.write_all(data.as_bytes())) - .map_err(|inner| Error::AccessError { - inner, - path: path.as_ref().to_owned(), - })?; + .map_err(|inner| Error::AccessError { inner, path: path.as_ref().to_owned() })?; Ok(()) } diff --git a/src/html/mod.rs b/src/html/mod.rs index 687cd232..7543c1aa 100644 --- a/src/html/mod.rs +++ b/src/html/mod.rs @@ -102,10 +102,7 @@ struct Plot { } impl Plot { fn new(name: &str, url: &str) -> Plot { - Plot { - name: name.to_owned(), - url: url.to_owned(), - } + Plot { name: name.to_owned(), url: url.to_owned() } } } @@ -139,38 +136,26 @@ impl<'a> ReportLink<'a> { fn group(output_directory: &Path, group_id: &'a str) -> ReportLink<'a> { let path = PathBuf::from(make_filename_safe(group_id)); - ReportLink { - name: group_id, - path: if_exists(output_directory, &path), - } + ReportLink { name: group_id, path: if_exists(output_directory, &path) } } fn function(output_directory: &Path, group_id: &str, function_id: &'a str) -> ReportLink<'a> { let mut path = PathBuf::from(make_filename_safe(group_id)); path.push(make_filename_safe(function_id)); - ReportLink { - name: function_id, - path: if_exists(output_directory, &path), - } + ReportLink { name: function_id, path: if_exists(output_directory, &path) } } fn value(output_directory: &Path, group_id: &str, value_str: &'a str) -> ReportLink<'a> { let mut path = PathBuf::from(make_filename_safe(group_id)); path.push(make_filename_safe(value_str)); - ReportLink { - name: value_str, - path: if_exists(output_directory, &path), - } + ReportLink { name: value_str, path: if_exists(output_directory, &path) } } fn individual(output_directory: &Path, id: &'a BenchmarkId) -> ReportLink<'a> { let path = PathBuf::from(id.as_directory_name()); - ReportLink { - name: id.as_title(), - path: if_exists(output_directory, &path), - } + ReportLink { name: id.as_title(), path: if_exists(output_directory, &path) } } } @@ -254,12 +239,7 @@ impl<'a> BenchmarkGroup<'a> { .map(|os| os.map(|s| ReportLink::value(output_directory, group_id, s))) .collect::>>(); - BenchmarkGroup { - group_report, - function_ids, - values, - individual_links: value_groups, - } + BenchmarkGroup { group_report, function_ids, values, individual_links: value_groups } } } @@ -335,32 +315,22 @@ impl Report for Html { additional_plots.push(Plot::new("Slope", "slope.svg")); } - let throughput = measurements - .throughput - .as_ref() - .map(|thr| ConfidenceInterval { - lower: formatter - .format_throughput(thr, typical_estimate.confidence_interval.upper_bound), - upper: formatter - .format_throughput(thr, typical_estimate.confidence_interval.lower_bound), - point: formatter.format_throughput(thr, typical_estimate.point_estimate), - }); + let throughput = measurements.throughput.as_ref().map(|thr| ConfidenceInterval { + lower: formatter + .format_throughput(thr, typical_estimate.confidence_interval.upper_bound), + upper: formatter + .format_throughput(thr, typical_estimate.confidence_interval.lower_bound), + point: formatter.format_throughput(thr, typical_estimate.point_estimate), + }); let context = Context { title: id.as_title().to_owned(), - confidence: format!( - "{:.2}", - typical_estimate.confidence_interval.confidence_level - ), + confidence: format!("{:.2}", typical_estimate.confidence_interval.confidence_level), thumbnail_width: THUMBNAIL_SIZE.unwrap().0, thumbnail_height: THUMBNAIL_SIZE.unwrap().1, - slope: measurements - .absolute_estimates - .slope - .as_ref() - .map(time_interval), + slope: measurements.absolute_estimates.slope.as_ref().map(time_interval), mean: time_interval(&measurements.absolute_estimates.mean), median: time_interval(&measurements.absolute_estimates.median), mad: time_interval(&measurements.absolute_estimates.median_abs_dev), @@ -376,10 +346,7 @@ impl Report for Html { "{:0.7}", Slope(typical_estimate.confidence_interval.upper_bound).r_squared(&data) ), - point: format!( - "{:0.7}", - Slope(typical_estimate.point_estimate).r_squared(&data) - ), + point: format!("{:0.7}", Slope(typical_estimate.point_estimate).r_squared(&data)), }, additional_plots, @@ -491,9 +458,8 @@ impl Report for Html { // First sort the ids/data by value. // If all of the value strings can be parsed into a number, sort/dedupe // numerically. Otherwise sort lexicographically. - let all_values_numeric = all_data - .iter() - .all(|(id, _)| id.value_str.as_deref().and_then(try_parse).is_some()); + let all_values_numeric = + all_data.iter().all(|(id, _)| id.value_str.as_deref().and_then(try_parse).is_some()); if all_values_numeric { all_data.sort_unstable_by(|(a, _), (b, _)| { let num1 = a.value_str.as_deref().and_then(try_parse); @@ -547,10 +513,8 @@ impl Report for Html { debug_context(&report_path, &context); - let text = self - .templates - .render("index", &context) - .expect("Failed to render index template"); + let text = + self.templates.render("index", &context).expect("Failed to render index template"); try_else_return!(fs::save_string(&text, &report_path,)); } } @@ -624,18 +588,9 @@ impl Html { formatter: &dyn ValueFormatter, measurements: &MeasurementData<'_>, ) { - let plot_ctx = PlotContext { - id, - context, - size: None, - is_thumbnail: false, - }; + let plot_ctx = PlotContext { id, context, size: None, is_thumbnail: false }; - let plot_data = PlotData { - measurements, - formatter, - comparison: None, - }; + let plot_data = PlotData { measurements, formatter, comparison: None }; let plot_ctx_small = plot_ctx.thumbnail(true).size(THUMBNAIL_SIZE); @@ -643,21 +598,13 @@ impl Html { self.plotter.borrow_mut().pdf(plot_ctx_small, plot_data); if measurements.absolute_estimates.slope.is_some() { self.plotter.borrow_mut().regression(plot_ctx, plot_data); - self.plotter - .borrow_mut() - .regression(plot_ctx_small, plot_data); + self.plotter.borrow_mut().regression(plot_ctx_small, plot_data); } else { - self.plotter - .borrow_mut() - .iteration_times(plot_ctx, plot_data); - self.plotter - .borrow_mut() - .iteration_times(plot_ctx_small, plot_data); + self.plotter.borrow_mut().iteration_times(plot_ctx, plot_data); + self.plotter.borrow_mut().iteration_times(plot_ctx_small, plot_data); } - self.plotter - .borrow_mut() - .abs_distributions(plot_ctx, plot_data); + self.plotter.borrow_mut().abs_distributions(plot_ctx, plot_data); if let Some(ref comp) = measurements.comparison { try_else_return!({ @@ -684,21 +631,13 @@ impl Html { && comp.base_estimates.slope.is_some() { self.plotter.borrow_mut().regression(plot_ctx, comp_data); - self.plotter - .borrow_mut() - .regression(plot_ctx_small, comp_data); + self.plotter.borrow_mut().regression(plot_ctx_small, comp_data); } else { - self.plotter - .borrow_mut() - .iteration_times(plot_ctx, comp_data); - self.plotter - .borrow_mut() - .iteration_times(plot_ctx_small, comp_data); + self.plotter.borrow_mut().iteration_times(plot_ctx, comp_data); + self.plotter.borrow_mut().iteration_times(plot_ctx_small, comp_data); } self.plotter.borrow_mut().t_test(plot_ctx, comp_data); - self.plotter - .borrow_mut() - .rel_distributions(plot_ctx, comp_data); + self.plotter.borrow_mut().rel_distributions(plot_ctx, comp_data); } self.plotter.borrow_mut().wait(); @@ -735,12 +674,7 @@ impl Html { formatter: &dyn ValueFormatter, full_summary: bool, ) { - let plot_ctx = PlotContext { - id, - context: report_context, - size: None, - is_thumbnail: false, - }; + let plot_ctx = PlotContext { id, context: report_context, size: None, is_thumbnail: false }; try_else_return!( { diff --git a/src/lib.rs b/src/lib.rs index 5df25a34..ddd13bea 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -30,19 +30,9 @@ #[cfg(all(feature = "rayon", target_arch = "wasm32"))] compile_error!("Rayon cannot be used when targeting wasi32. Try disabling default features."); -#[cfg(test)] -extern crate approx; - -#[cfg(test)] -extern crate quickcheck; - use regex::Regex; -#[cfg(feature = "real_blackbox")] -extern crate test; - -#[macro_use] -extern crate serde_derive; +use serde::{Deserialize, Serialize}; // Needs to be declared before other modules // in order to be usable there. @@ -110,10 +100,7 @@ static DEFAULT_PLOTTING_BACKEND: Lazy = Lazy::new(|| match &*GN Err(e) => { match e { VersionError::Exec(_) => eprintln!("Gnuplot not found, using plotters backend"), - e => eprintln!( - "Gnuplot not found or not usable, using plotters backend\n{}", - e - ), + e => eprintln!("Gnuplot not found or not usable, using plotters backend\n{}", e), }; PlottingBackend::Plotters } @@ -386,16 +373,14 @@ fn cargo_target_directory() -> Option { target_directory: PathBuf, } - env::var_os("CARGO_TARGET_DIR") - .map(PathBuf::from) - .or_else(|| { - let output = Command::new(env::var_os("CARGO")?) - .args(["metadata", "--format-version", "1"]) - .output() - .ok()?; - let metadata: Metadata = serde_json::from_slice(&output.stdout).ok()?; - Some(metadata.target_directory) - }) + env::var_os("CARGO_TARGET_DIR").map(PathBuf::from).or_else(|| { + let output = Command::new(env::var_os("CARGO")?) + .args(["metadata", "--format-version", "1"]) + .output() + .ok()?; + let metadata: Metadata = serde_json::from_slice(&output.stdout).ok()?; + Some(metadata.target_directory) + }) } impl Default for Criterion { @@ -442,9 +427,7 @@ impl Default for Criterion { all_titles: HashSet::new(), measurement: WallTime, profiler: Box::new(RefCell::new(ExternalProfiler)), - connection: CARGO_CRITERION_CONNECTION - .as_ref() - .map(|mtx| mtx.lock().unwrap()), + connection: CARGO_CRITERION_CONNECTION.as_ref().map(|mtx| mtx.lock().unwrap()), mode: Mode::Benchmark, }; @@ -485,10 +468,7 @@ impl Criterion { /// Changes the internal profiler for benchmarks run with this runner. See /// the Profiler trait for more details. pub fn with_profiler(self, p: P) -> Criterion { - Criterion { - profiler: Box::new(RefCell::new(p)), - ..self - } + Criterion { profiler: Box::new(RefCell::new(p)), ..self } } #[must_use] @@ -682,11 +662,7 @@ impl Criterion { /// Names an explicit baseline and disables overwriting the previous results. pub fn retain_baseline(mut self, baseline: String, strict: bool) -> Criterion { self.baseline_directory = baseline; - self.baseline = if strict { - Baseline::CompareStrict - } else { - Baseline::CompareLenient - }; + self.baseline = if strict { Baseline::CompareStrict } else { Baseline::CompareLenient }; self } @@ -698,10 +674,7 @@ impl Criterion { pub fn with_filter>(mut self, filter: S) -> Criterion { let filter_text = filter.into(); let filter = Regex::new(&filter_text).unwrap_or_else(|err| { - panic!( - "Unable to parse '{}' as a regular expression: {}", - filter_text, err - ) + panic!("Unable to parse '{}' as a regular expression: {}", filter_text, err) }); self.filter = BenchmarkFilter::Regex(filter); @@ -951,9 +924,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html } if matches.contains_id("baseline") - || matches - .get_one::("save-baseline") - .map_or(false, |base| base != "base") + || matches.get_one::("save-baseline").map_or(false, |base| base != "base") || matches.contains_id("load-baseline") { eprintln!("Error: baselines are not supported when running with cargo-criterion."); @@ -1009,10 +980,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html BenchmarkFilter::Exact(filter.to_owned()) } else { let regex = Regex::new(filter).unwrap_or_else(|err| { - panic!( - "Unable to parse '{}' as a regular expression: {}", - filter, err - ) + panic!("Unable to parse '{}' as a regular expression: {}", filter, err) }); BenchmarkFilter::Regex(regex) } @@ -1162,8 +1130,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html /// # Examples: /// /// ```rust - /// #[macro_use] extern crate criterion; - /// use self::criterion::*; + /// use self::criterion2::*; /// /// fn bench_simple(c: &mut Criterion) { /// let mut group = c.benchmark_group("My Group"); @@ -1171,7 +1138,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html /// // Now we can perform benchmarks with this group /// group.bench_function("Bench 1", |b| b.iter(|| 1 )); /// group.bench_function("Bench 2", |b| b.iter(|| 2 )); - /// + /// /// group.finish(); /// } /// criterion_group!(benches, bench_simple); @@ -1184,8 +1151,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html assert!(!group_name.is_empty(), "Group name must not be empty."); if let Some(conn) = &self.connection { - conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: &group_name }) - .unwrap(); + conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: &group_name }).unwrap(); } BenchmarkGroup::new(self, group_name) @@ -1200,8 +1166,7 @@ where /// # Example /// /// ```rust - /// #[macro_use] extern crate criterion; - /// use self::criterion::*; + /// use self::criterion2::*; /// /// fn bench(c: &mut Criterion) { /// // Setup (construct data, allocate memory, etc) @@ -1220,8 +1185,7 @@ where where F: FnMut(&mut Bencher<'_, M>), { - self.benchmark_group(id) - .bench_function(BenchmarkId::no_function(), f); + self.benchmark_group(id).bench_function(BenchmarkId::no_function(), f); self } @@ -1231,8 +1195,7 @@ where /// # Example /// /// ```rust - /// #[macro_use] extern crate criterion; - /// use self::criterion::*; + /// use self::criterion2::*; /// /// fn bench(c: &mut Criterion) { /// // Setup (construct data, allocate memory, etc) @@ -1307,7 +1270,7 @@ pub enum AxisScale { /// or benchmark group. /// /// ```rust -/// use self::criterion::{Bencher, Criterion, PlotConfiguration, AxisScale}; +/// use self::criterion2::{Bencher, Criterion, PlotConfiguration, AxisScale}; /// /// let plot_config = PlotConfiguration::default() /// .summary_scale(AxisScale::Logarithmic); @@ -1325,9 +1288,7 @@ pub struct PlotConfiguration { impl Default for PlotConfiguration { fn default() -> PlotConfiguration { - PlotConfiguration { - summary_scale: AxisScale::Linear, - } + PlotConfiguration { summary_scale: AxisScale::Linear } } } diff --git a/src/macros.rs b/src/macros.rs index df7a44d9..cb4e677d 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -17,9 +17,7 @@ /// Complete form: /// /// ``` -/// # #[macro_use] -/// # extern crate criterion; -/// # use criterion::Criterion; +/// # use criterion2::{Criterion, criterion_group}; /// # fn bench_method1(c: &mut Criterion) { /// # } /// # @@ -43,9 +41,7 @@ /// Compact Form: /// /// ``` -/// # #[macro_use] -/// # extern crate criterion; -/// # use criterion::Criterion; +/// # use criterion2::{Criterion, criterion_group}; /// # fn bench_method1(c: &mut Criterion) { /// # } /// # @@ -100,9 +96,7 @@ macro_rules! criterion_group { /// Since we've disabled the default benchmark harness, we need to add our own: /// /// ```ignore -/// #[macro_use] -/// extern crate criterion; -/// use criterion::Criterion; +/// use criterion2::Criterion; /// fn bench_method1(c: &mut Criterion) { /// } /// diff --git a/src/plot/gnuplot_backend/distributions.rs b/src/plot/gnuplot_backend/distributions.rs index 1ccbc1a2..7eb4242c 100644 --- a/src/plot/gnuplot_backend/distributions.rs +++ b/src/plot/gnuplot_backend/distributions.rs @@ -35,29 +35,14 @@ fn abs_distribution( let (kde_xs, ys) = kde::sweep(scaled_xs_sample, KDE_POINTS, Some((start, end))); // interpolate between two points of the KDE sweep to find the Y position at the point estimate. - let n_point = kde_xs - .iter() - .position(|&x| x >= point) - .unwrap_or(kde_xs.len() - 1) - .max(1); // Must be at least the second element or this will panic + let n_point = kde_xs.iter().position(|&x| x >= point).unwrap_or(kde_xs.len() - 1).max(1); // Must be at least the second element or this will panic let slope = (ys[n_point] - ys[n_point - 1]) / (kde_xs[n_point] - kde_xs[n_point - 1]); let y_point = ys[n_point - 1] + (slope * (point - kde_xs[n_point - 1])); let zero = iter::repeat(0); - let start = kde_xs - .iter() - .enumerate() - .find(|&(_, &x)| x >= lb) - .unwrap() - .0; - let end = kde_xs - .iter() - .enumerate() - .rev() - .find(|&(_, &x)| x <= ub) - .unwrap() - .0; + let start = kde_xs.iter().enumerate().find(|&(_, &x)| x >= lb).unwrap().0; + let end = kde_xs.iter().enumerate().rev().find(|&(_, &x)| x <= ub).unwrap().0; let len = end - start; let kde_xs_sample = Sample::new(&kde_xs); @@ -66,11 +51,7 @@ fn abs_distribution( figure .set(Font(DEFAULT_FONT)) .set(size.unwrap_or(SIZE)) - .set(Title(format!( - "{}: {}", - gnuplot_escape(id.as_title()), - statistic - ))) + .set(Title(format!("{}: {}", gnuplot_escape(id.as_title()), statistic))) .configure(Axis::BottomX, |a| { a.set(Label(format!("Average time ({})", unit))) .set(Range::Limits(kde_xs_sample.min(), kde_xs_sample.max())) @@ -81,42 +62,23 @@ fn abs_distribution( .set(Order::SampleText) .set(Position::Outside(Vertical::Top, Horizontal::Right)) }) - .plot( - Lines { - x: &*kde_xs, - y: &*ys, - }, - |c| { - c.set(DARK_BLUE) - .set(LINEWIDTH) - .set(Label("Bootstrap distribution")) - .set(LineType::Solid) - }, - ) + .plot(Lines { x: &*kde_xs, y: &*ys }, |c| { + c.set(DARK_BLUE) + .set(LINEWIDTH) + .set(Label("Bootstrap distribution")) + .set(LineType::Solid) + }) .plot( FilledCurve { x: kde_xs.iter().skip(start).take(len), y1: ys.iter().skip(start), y2: zero, }, - |c| { - c.set(DARK_BLUE) - .set(Label("Confidence interval")) - .set(Opacity(0.25)) - }, + |c| c.set(DARK_BLUE).set(Label("Confidence interval")).set(Opacity(0.25)), ) - .plot( - Lines { - x: &[point, point], - y: &[0., y_point], - }, - |c| { - c.set(DARK_BLUE) - .set(LINEWIDTH) - .set(Label("Point estimate")) - .set(LineType::Dash) - }, - ); + .plot(Lines { x: &[point, point], y: &[0., y_point] }, |c| { + c.set(DARK_BLUE).set(LINEWIDTH).set(Label("Point estimate")).set(LineType::Dash) + }); let path = context.report_path(id, &format!("{}.svg", statistic)); debug_script(&path, &figure); @@ -134,22 +96,11 @@ pub(crate) fn abs_distributions( .iter() .filter_map(|stat| { measurements.distributions.get(*stat).and_then(|dist| { - measurements - .absolute_estimates - .get(*stat) - .map(|est| (*stat, dist, est)) + measurements.absolute_estimates.get(*stat).map(|est| (*stat, dist, est)) }) }) .map(|(statistic, distribution, estimate)| { - abs_distribution( - id, - context, - formatter, - statistic, - distribution, - estimate, - size, - ) + abs_distribution(id, context, formatter, statistic, distribution, estimate, size) }) .collect::>() } @@ -173,11 +124,7 @@ fn rel_distribution( // interpolate between two points of the KDE sweep to find the Y position at the point estimate. let point = estimate.point_estimate; - let n_point = xs - .iter() - .position(|&x| x >= point) - .unwrap_or(ys.len() - 1) - .max(1); + let n_point = xs.iter().position(|&x| x >= point).unwrap_or(ys.len() - 1).max(1); let slope = (ys[n_point] - ys[n_point - 1]) / (xs[n_point] - xs[n_point - 1]); let y_point = ys[n_point - 1] + (slope * (point - xs[n_point - 1])); @@ -185,13 +132,7 @@ fn rel_distribution( let zero = iter::repeat(0); let start = xs.iter().enumerate().find(|&(_, &x)| x >= lb).unwrap().0; - let end = xs - .iter() - .enumerate() - .rev() - .find(|&(_, &x)| x <= ub) - .unwrap() - .0; + let end = xs.iter().enumerate().rev().find(|&(_, &x)| x <= ub).unwrap().0; let len = end - start; let x_min = xs_.min(); @@ -203,16 +144,8 @@ fn rel_distribution( (middle, middle) } else { ( - if -noise_threshold < x_min { - x_min - } else { - -noise_threshold - }, - if noise_threshold > x_max { - x_max - } else { - noise_threshold - }, + if -noise_threshold < x_min { x_min } else { -noise_threshold }, + if noise_threshold > x_max { x_max } else { noise_threshold }, ) }; @@ -227,11 +160,7 @@ fn rel_distribution( .set(Order::SampleText) .set(Position::Outside(Vertical::Top, Horizontal::Right)) }) - .set(Title(format!( - "{}: {}", - gnuplot_escape(id.as_title()), - statistic - ))) + .set(Title(format!("{}: {}", gnuplot_escape(id.as_title()), statistic))) .configure(Axis::BottomX, |a| { a.set(Label("Relative change (%)")) .set(Range::Limits(x_min * 100., x_max * 100.)) @@ -249,37 +178,14 @@ fn rel_distribution( y1: ys.iter().skip(start), y2: zero.clone(), }, - |c| { - c.set(DARK_BLUE) - .set(Label("Confidence interval")) - .set(Opacity(0.25)) - }, - ) - .plot( - Lines { - x: &[point, point], - y: &[0., y_point], - }, - |c| { - c.set(DARK_BLUE) - .set(LINEWIDTH) - .set(Label("Point estimate")) - .set(LineType::Dash) - }, + |c| c.set(DARK_BLUE).set(Label("Confidence interval")).set(Opacity(0.25)), ) - .plot( - FilledCurve { - x: &[fc_start, fc_end], - y1: one, - y2: zero, - }, - |c| { - c.set(Axes::BottomXRightY) - .set(DARK_RED) - .set(Label("Noise threshold")) - .set(Opacity(0.1)) - }, - ); + .plot(Lines { x: &[point, point], y: &[0., y_point] }, |c| { + c.set(DARK_BLUE).set(LINEWIDTH).set(Label("Point estimate")).set(LineType::Dash) + }) + .plot(FilledCurve { x: &[fc_start, fc_end], y1: one, y2: zero }, |c| { + c.set(Axes::BottomXRightY).set(DARK_RED).set(Label("Noise threshold")).set(Opacity(0.1)) + }); let path = context.report_path(id, &format!("change/{}.svg", statistic)); debug_script(&path, &figure); diff --git a/src/plot/gnuplot_backend/iteration_times.rs b/src/plot/gnuplot_backend/iteration_times.rs index 4db4de8d..c243b646 100644 --- a/src/plot/gnuplot_backend/iteration_times.rs +++ b/src/plot/gnuplot_backend/iteration_times.rs @@ -22,24 +22,14 @@ fn iteration_times_figure( figure .set(Font(DEFAULT_FONT)) .set(size.unwrap_or(SIZE)) - .configure(Axis::BottomX, |a| { - a.configure(Grid::Major, |g| g.show()).set(Label("Sample")) - }) + .configure(Axis::BottomX, |a| a.configure(Grid::Major, |g| g.show()).set(Label("Sample"))) .configure(Axis::LeftY, |a| { a.configure(Grid::Major, |g| g.show()) .set(Label(format!("Average Iteration Time ({})", unit))) }) - .plot( - Points { - x: 1..(data.len() + 1), - y: scaled_y.as_ref(), - }, - |c| { - c.set(DARK_BLUE) - .set(PointSize(0.5)) - .set(PointType::FilledCircle) - }, - ); + .plot(Points { x: 1..(data.len() + 1), y: scaled_y.as_ref() }, |c| { + c.set(DARK_BLUE).set(PointSize(0.5)).set(PointType::FilledCircle) + }); figure } @@ -101,9 +91,7 @@ fn iteration_times_comparison_figure( figure .set(Font(DEFAULT_FONT)) .set(size.unwrap_or(SIZE)) - .configure(Axis::BottomX, |a| { - a.configure(Grid::Major, |g| g.show()).set(Label("Sample")) - }) + .configure(Axis::BottomX, |a| a.configure(Grid::Major, |g| g.show()).set(Label("Sample"))) .configure(Axis::LeftY, |a| { a.configure(Grid::Major, |g| g.show()) .set(Label(format!("Average Iteration Time ({})", unit))) @@ -113,30 +101,12 @@ fn iteration_times_comparison_figure( .set(Order::SampleText) .set(Position::Inside(Vertical::Top, Horizontal::Left)) }) - .plot( - Points { - x: 1..(current_data.len() + 1), - y: scaled_base_y.as_ref(), - }, - |c| { - c.set(DARK_RED) - .set(Label("Base")) - .set(PointSize(0.5)) - .set(PointType::FilledCircle) - }, - ) - .plot( - Points { - x: 1..(current_data.len() + 1), - y: scaled_current_y.as_ref(), - }, - |c| { - c.set(DARK_BLUE) - .set(Label("Current")) - .set(PointSize(0.5)) - .set(PointType::FilledCircle) - }, - ); + .plot(Points { x: 1..(current_data.len() + 1), y: scaled_base_y.as_ref() }, |c| { + c.set(DARK_RED).set(Label("Base")).set(PointSize(0.5)).set(PointType::FilledCircle) + }) + .plot(Points { x: 1..(current_data.len() + 1), y: scaled_current_y.as_ref() }, |c| { + c.set(DARK_BLUE).set(Label("Current")).set(PointSize(0.5)).set(PointType::FilledCircle) + }); figure } diff --git a/src/plot/gnuplot_backend/mod.rs b/src/plot/gnuplot_backend/mod.rs index 27cc48be..63c044c7 100644 --- a/src/plot/gnuplot_backend/mod.rs +++ b/src/plot/gnuplot_backend/mod.rs @@ -88,14 +88,7 @@ impl Plotter for Gnuplot { pdf_small(ctx.id, ctx.context, data.formatter, data.measurements, size) } } else if let Some(cmp) = data.comparison { - pdf_comparison( - ctx.id, - ctx.context, - data.formatter, - data.measurements, - cmp, - size, - ) + pdf_comparison(ctx.id, ctx.context, data.formatter, data.measurements, cmp, size) } else { pdf(ctx.id, ctx.context, data.formatter, data.measurements, size) }); @@ -192,8 +185,7 @@ impl Plotter for Gnuplot { fn t_test(&mut self, ctx: PlotContext<'_>, data: PlotData<'_>) { let size = ctx.size.map(|(w, h)| Size(w, h)); if let Some(cmp) = data.comparison { - self.process_list - .push(t_test(ctx.id, ctx.context, data.measurements, cmp, size)); + self.process_list.push(t_test(ctx.id, ctx.context, data.measurements, cmp, size)); } else { error!("Comparison data is not provided for t_test plot"); } diff --git a/src/plot/gnuplot_backend/pdf.rs b/src/plot/gnuplot_backend/pdf.rs index a0b85c7a..904d2f89 100644 --- a/src/plot/gnuplot_backend/pdf.rs +++ b/src/plot/gnuplot_backend/pdf.rs @@ -20,10 +20,7 @@ pub(crate) fn pdf( let mean = scaled_avg_times.mean(); let iter_counts = measurements.iter_counts(); - let &max_iters = iter_counts - .iter() - .max_by_key(|&&iters| iters as u64) - .unwrap(); + let &max_iters = iter_counts.iter().max_by_key(|&&iters| iters as u64).unwrap(); let exponent = (max_iters.log10() / 3.).floor() as i32 * 3; let y_scale = 10f64.powi(-exponent); @@ -62,57 +59,28 @@ pub(crate) fn pdf( .set(Order::SampleText) .set(Position::Outside(Vertical::Top, Horizontal::Right)) }) - .plot( - FilledCurve { - x: &*xs, - y1: &*ys, - y2: zeros, - }, - |c| { - c.set(Axes::BottomXRightY) - .set(DARK_BLUE) - .set(Label("PDF")) - .set(Opacity(0.25)) - }, - ) - .plot( - Lines { - x: &[mean, mean], - y: vertical, - }, - |c| { - c.set(DARK_BLUE) - .set(LINEWIDTH) - .set(LineType::Dash) - .set(Label("Mean")) - }, - ) + .plot(FilledCurve { x: &*xs, y1: &*ys, y2: zeros }, |c| { + c.set(Axes::BottomXRightY).set(DARK_BLUE).set(Label("PDF")).set(Opacity(0.25)) + }) + .plot(Lines { x: &[mean, mean], y: vertical }, |c| { + c.set(DARK_BLUE).set(LINEWIDTH).set(LineType::Dash).set(Label("Mean")) + }) .plot( Points { - x: avg_times - .iter() - .zip(scaled_avg_times.iter()) - .filter_map( - |((_, label), t)| { - if label.is_outlier() { - None - } else { - Some(t) - } - }, - ), - y: avg_times - .iter() - .zip(iter_counts.iter()) - .filter_map( - |((_, label), i)| { - if label.is_outlier() { - None - } else { - Some(i) - } - }, - ), + x: avg_times.iter().zip(scaled_avg_times.iter()).filter_map(|((_, label), t)| { + if label.is_outlier() { + None + } else { + Some(t) + } + }), + y: avg_times.iter().zip(iter_counts.iter()).filter_map(|((_, label), i)| { + if label.is_outlier() { + None + } else { + Some(i) + } + }), }, |c| { c.set(DARK_BLUE) @@ -123,30 +91,20 @@ pub(crate) fn pdf( ) .plot( Points { - x: avg_times - .iter() - .zip(scaled_avg_times.iter()) - .filter_map( - |((_, label), t)| { - if label.is_mild() { - Some(t) - } else { - None - } - }, - ), - y: avg_times - .iter() - .zip(iter_counts.iter()) - .filter_map( - |((_, label), i)| { - if label.is_mild() { - Some(i) - } else { - None - } - }, - ), + x: avg_times.iter().zip(scaled_avg_times.iter()).filter_map(|((_, label), t)| { + if label.is_mild() { + Some(t) + } else { + None + } + }), + y: avg_times.iter().zip(iter_counts.iter()).filter_map(|((_, label), i)| { + if label.is_mild() { + Some(i) + } else { + None + } + }), }, |c| { c.set(DARK_ORANGE) @@ -157,30 +115,20 @@ pub(crate) fn pdf( ) .plot( Points { - x: avg_times - .iter() - .zip(scaled_avg_times.iter()) - .filter_map( - |((_, label), t)| { - if label.is_severe() { - Some(t) - } else { - None - } - }, - ), - y: avg_times - .iter() - .zip(iter_counts.iter()) - .filter_map( - |((_, label), i)| { - if label.is_severe() { - Some(i) - } else { - None - } - }, - ), + x: avg_times.iter().zip(scaled_avg_times.iter()).filter_map(|((_, label), t)| { + if label.is_severe() { + Some(t) + } else { + None + } + }), + y: avg_times.iter().zip(iter_counts.iter()).filter_map(|((_, label), i)| { + if label.is_severe() { + Some(i) + } else { + None + } + }), }, |c| { c.set(DARK_RED) @@ -189,34 +137,18 @@ pub(crate) fn pdf( .set(PointType::FilledCircle) }, ) - .plot( - Lines { - x: &[lomt, lomt], - y: vertical, - }, - |c| c.set(DARK_ORANGE).set(LINEWIDTH).set(LineType::Dash), - ) - .plot( - Lines { - x: &[himt, himt], - y: vertical, - }, - |c| c.set(DARK_ORANGE).set(LINEWIDTH).set(LineType::Dash), - ) - .plot( - Lines { - x: &[lost, lost], - y: vertical, - }, - |c| c.set(DARK_RED).set(LINEWIDTH).set(LineType::Dash), - ) - .plot( - Lines { - x: &[hist, hist], - y: vertical, - }, - |c| c.set(DARK_RED).set(LINEWIDTH).set(LineType::Dash), - ); + .plot(Lines { x: &[lomt, lomt], y: vertical }, |c| { + c.set(DARK_ORANGE).set(LINEWIDTH).set(LineType::Dash) + }) + .plot(Lines { x: &[himt, himt], y: vertical }, |c| { + c.set(DARK_ORANGE).set(LINEWIDTH).set(LineType::Dash) + }) + .plot(Lines { x: &[lost, lost], y: vertical }, |c| { + c.set(DARK_RED).set(LINEWIDTH).set(LineType::Dash) + }) + .plot(Lines { x: &[hist, hist], y: vertical }, |c| { + c.set(DARK_RED).set(LINEWIDTH).set(LineType::Dash) + }); figure.set(Title(gnuplot_escape(id.as_title()))); let path = context.report_path(id, "pdf.svg"); @@ -253,32 +185,15 @@ pub(crate) fn pdf_small( a.set(Label(format!("Average time ({})", unit))) .set(Range::Limits(xs_.min(), xs_.max())) }) - .configure(Axis::LeftY, |a| { - a.set(Label("Density (a.u.)")) - .set(Range::Limits(0., y_limit)) - }) + .configure(Axis::LeftY, |a| a.set(Label("Density (a.u.)")).set(Range::Limits(0., y_limit))) .configure(Axis::RightY, |a| a.hide()) .configure(Key, |k| k.hide()) - .plot( - FilledCurve { - x: &*xs, - y1: &*ys, - y2: zeros, - }, - |c| { - c.set(Axes::BottomXRightY) - .set(DARK_BLUE) - .set(Label("PDF")) - .set(Opacity(0.25)) - }, - ) - .plot( - Lines { - x: &[mean, mean], - y: &[0., mean_y], - }, - |c| c.set(DARK_BLUE).set(LINEWIDTH).set(Label("Mean")), - ); + .plot(FilledCurve { x: &*xs, y1: &*ys, y2: zeros }, |c| { + c.set(Axes::BottomXRightY).set(DARK_BLUE).set(Label("PDF")).set(Opacity(0.25)) + }) + .plot(Lines { x: &[mean, mean], y: &[0., mean_y] }, |c| { + c.set(DARK_BLUE).set(LINEWIDTH).set(Label("Mean")) + }); let path = context.report_path(id, "pdf_small.svg"); debug_script(&path, &figure); @@ -297,10 +212,8 @@ fn pdf_comparison_figure( let unit = formatter.scale_values(typical, &mut scaled_base_avg_times); let scaled_base_avg_times = Sample::new(&scaled_base_avg_times); - let mut scaled_new_avg_times: Vec = (&measurements.avg_times as &Sample) - .iter() - .cloned() - .collect(); + let mut scaled_new_avg_times: Vec = + (&measurements.avg_times as &Sample).iter().cloned().collect(); let _ = formatter.scale_values(typical, &mut scaled_new_avg_times); let scaled_new_avg_times = Sample::new(&scaled_new_avg_times); @@ -318,9 +231,7 @@ fn pdf_comparison_figure( figure .set(Font(DEFAULT_FONT)) .set(size.unwrap_or(SIZE)) - .configure(Axis::BottomX, |a| { - a.set(Label(format!("Average time ({})", unit))) - }) + .configure(Axis::BottomX, |a| a.set(Label(format!("Average time ({})", unit)))) .configure(Axis::LeftY, |a| a.set(Label("Density (a.u.)"))) .configure(Axis::RightY, |a| a.hide()) .configure(Key, |k| { @@ -328,36 +239,18 @@ fn pdf_comparison_figure( .set(Order::SampleText) .set(Position::Outside(Vertical::Top, Horizontal::Right)) }) - .plot( - FilledCurve { - x: &*base_xs, - y1: &*base_ys, - y2: zeros.clone(), - }, - |c| c.set(DARK_RED).set(Label("Base PDF")).set(Opacity(0.5)), - ) - .plot( - Lines { - x: &[base_mean, base_mean], - y: &[0., base_y_mean], - }, - |c| c.set(DARK_RED).set(Label("Base Mean")).set(LINEWIDTH), - ) - .plot( - FilledCurve { - x: &*xs, - y1: &*ys, - y2: zeros, - }, - |c| c.set(DARK_BLUE).set(Label("New PDF")).set(Opacity(0.5)), - ) - .plot( - Lines { - x: &[new_mean, new_mean], - y: &[0., y_mean], - }, - |c| c.set(DARK_BLUE).set(Label("New Mean")).set(LINEWIDTH), - ); + .plot(FilledCurve { x: &*base_xs, y1: &*base_ys, y2: zeros.clone() }, |c| { + c.set(DARK_RED).set(Label("Base PDF")).set(Opacity(0.5)) + }) + .plot(Lines { x: &[base_mean, base_mean], y: &[0., base_y_mean] }, |c| { + c.set(DARK_RED).set(Label("Base Mean")).set(LINEWIDTH) + }) + .plot(FilledCurve { x: &*xs, y1: &*ys, y2: zeros }, |c| { + c.set(DARK_BLUE).set(Label("New PDF")).set(Opacity(0.5)) + }) + .plot(Lines { x: &[new_mean, new_mean], y: &[0., y_mean] }, |c| { + c.set(DARK_BLUE).set(Label("New Mean")).set(LINEWIDTH) + }); figure } diff --git a/src/plot/gnuplot_backend/regression.rs b/src/plot/gnuplot_backend/regression.rs index 82de357c..2bc10e9c 100644 --- a/src/plot/gnuplot_backend/regression.rs +++ b/src/plot/gnuplot_backend/regression.rs @@ -46,50 +46,21 @@ fn regression_figure( .set(Font(DEFAULT_FONT)) .set(size.unwrap_or(SIZE)) .configure(Axis::BottomX, |a| { - a.configure(Grid::Major, |g| g.show()) - .set(Label(x_label)) - .set(ScaleFactor(x_scale)) + a.configure(Grid::Major, |g| g.show()).set(Label(x_label)).set(ScaleFactor(x_scale)) }) .configure(Axis::LeftY, |a| { a.configure(Grid::Major, |g| g.show()) .set(Label(format!("Total sample time ({})", unit))) }) - .plot( - Points { - x: data.x().as_ref(), - y: scaled_y.as_ref(), - }, - |c| { - c.set(DARK_BLUE) - .set(Label("Sample")) - .set(PointSize(0.5)) - .set(PointType::FilledCircle) - }, - ) - .plot( - Lines { - x: &[0., max_iters], - y: &[0., point], - }, - |c| { - c.set(DARK_BLUE) - .set(LINEWIDTH) - .set(Label("Linear regression")) - .set(LineType::Solid) - }, - ) - .plot( - FilledCurve { - x: &[0., max_iters], - y1: &[0., lb], - y2: &[0., ub], - }, - |c| { - c.set(DARK_BLUE) - .set(Label("Confidence interval")) - .set(Opacity(0.25)) - }, - ); + .plot(Points { x: data.x().as_ref(), y: scaled_y.as_ref() }, |c| { + c.set(DARK_BLUE).set(Label("Sample")).set(PointSize(0.5)).set(PointType::FilledCircle) + }) + .plot(Lines { x: &[0., max_iters], y: &[0., point] }, |c| { + c.set(DARK_BLUE).set(LINEWIDTH).set(Label("Linear regression")).set(LineType::Solid) + }) + .plot(FilledCurve { x: &[0., max_iters], y1: &[0., lb], y2: &[0., ub] }, |c| { + c.set(DARK_BLUE).set(Label("Confidence interval")).set(Opacity(0.25)) + }); figure } @@ -149,23 +120,13 @@ fn regression_comparison_figure( }; let Estimate { - confidence_interval: - ConfidenceInterval { - lower_bound: base_lb, - upper_bound: base_ub, - .. - }, + confidence_interval: ConfidenceInterval { lower_bound: base_lb, upper_bound: base_ub, .. }, point_estimate: base_point, .. } = comparison.base_estimates.slope.as_ref().unwrap(); let Estimate { - confidence_interval: - ConfidenceInterval { - lower_bound: lb, - upper_bound: ub, - .. - }, + confidence_interval: ConfidenceInterval { lower_bound: lb, upper_bound: ub, .. }, point_estimate: point, .. } = measurements.absolute_estimates.slope.as_ref().unwrap(); @@ -186,9 +147,7 @@ fn regression_comparison_figure( .set(Font(DEFAULT_FONT)) .set(size.unwrap_or(SIZE)) .configure(Axis::BottomX, |a| { - a.configure(Grid::Major, |g| g.show()) - .set(Label(x_label)) - .set(ScaleFactor(x_scale)) + a.configure(Grid::Major, |g| g.show()).set(Label(x_label)).set(ScaleFactor(x_scale)) }) .configure(Axis::LeftY, |a| { a.configure(Grid::Major, |g| g.show()) @@ -199,46 +158,18 @@ fn regression_comparison_figure( .set(Order::SampleText) .set(Position::Inside(Vertical::Top, Horizontal::Left)) }) - .plot( - FilledCurve { - x: &[0., max_iters], - y1: &[0., base_lb], - y2: &[0., base_ub], - }, - |c| c.set(DARK_RED).set(Opacity(0.25)), - ) - .plot( - FilledCurve { - x: &[0., max_iters], - y1: &[0., lb], - y2: &[0., ub], - }, - |c| c.set(DARK_BLUE).set(Opacity(0.25)), - ) - .plot( - Lines { - x: &[0., max_iters], - y: &[0., base_point], - }, - |c| { - c.set(DARK_RED) - .set(LINEWIDTH) - .set(Label("Base sample")) - .set(LineType::Solid) - }, - ) - .plot( - Lines { - x: &[0., max_iters], - y: &[0., point], - }, - |c| { - c.set(DARK_BLUE) - .set(LINEWIDTH) - .set(Label("New sample")) - .set(LineType::Solid) - }, - ); + .plot(FilledCurve { x: &[0., max_iters], y1: &[0., base_lb], y2: &[0., base_ub] }, |c| { + c.set(DARK_RED).set(Opacity(0.25)) + }) + .plot(FilledCurve { x: &[0., max_iters], y1: &[0., lb], y2: &[0., ub] }, |c| { + c.set(DARK_BLUE).set(Opacity(0.25)) + }) + .plot(Lines { x: &[0., max_iters], y: &[0., base_point] }, |c| { + c.set(DARK_RED).set(LINEWIDTH).set(Label("Base sample")).set(LineType::Solid) + }) + .plot(Lines { x: &[0., max_iters], y: &[0., point] }, |c| { + c.set(DARK_BLUE).set(LINEWIDTH).set(Label("New sample")).set(LineType::Solid) + }); figure } diff --git a/src/plot/gnuplot_backend/summary.rs b/src/plot/gnuplot_backend/summary.rs index e5d2ab6b..d2a0a37a 100644 --- a/src/plot/gnuplot_backend/summary.rs +++ b/src/plot/gnuplot_backend/summary.rs @@ -59,8 +59,7 @@ pub fn line_comparison( }) .set(Title(format!("{}: Comparison", gnuplot_escape(title)))) .configure(Axis::BottomX, |a| { - a.set(Label(format!("Input{}", input_suffix))) - .set(axis_scale.to_gnuplot()) + a.set(Label(format!("Input{}", input_suffix))).set(axis_scale.to_gnuplot()) }); let mut i = 0; @@ -104,14 +103,10 @@ pub fn line_comparison( if let Some(name) = function_name { c.set(Label(name)); } - c.set(LINEWIDTH) - .set(LineType::Solid) - .set(COMPARISON_COLORS[i % NUM_COLORS]) + c.set(LINEWIDTH).set(LineType::Solid).set(COMPARISON_COLORS[i % NUM_COLORS]) }) .plot(Points { x: &xs, y: &ys }, |p| { - p.set(PointType::FilledCircle) - .set(POINT_SIZE) - .set(COMPARISON_COLORS[i % NUM_COLORS]) + p.set(PointType::FilledCircle).set(POINT_SIZE).set(COMPARISON_COLORS[i % NUM_COLORS]) }); i += 1; @@ -176,14 +171,10 @@ pub fn violin( .set(axis_scale.to_gnuplot()) }) .configure(Axis::LeftY, |a| { - a.set(Label("Input")) - .set(Range::Limits(0., all_curves.len() as f64)) - .set(TicLabels { - positions: tics(), - labels: all_curves - .iter() - .map(|&&(id, _)| gnuplot_escape(id.as_title())), - }) + a.set(Label("Input")).set(Range::Limits(0., all_curves.len() as f64)).set(TicLabels { + positions: tics(), + labels: all_curves.iter().map(|&&(id, _)| gnuplot_escape(id.as_title())), + }) }); let mut is_first = true; diff --git a/src/plot/gnuplot_backend/t_test.rs b/src/plot/gnuplot_backend/t_test.rs index 47b4a110..ba714660 100644 --- a/src/plot/gnuplot_backend/t_test.rs +++ b/src/plot/gnuplot_backend/t_test.rs @@ -22,10 +22,7 @@ pub(crate) fn t_test( figure .set(Font(DEFAULT_FONT)) .set(size.unwrap_or(SIZE)) - .set(Title(format!( - "{}: Welch t test", - gnuplot_escape(id.as_title()) - ))) + .set(Title(format!("{}: Welch t test", gnuplot_escape(id.as_title())))) .configure(Axis::BottomX, |a| a.set(Label("t score"))) .configure(Axis::LeftY, |a| a.set(Label("Density"))) .configure(Key, |k| { @@ -33,31 +30,16 @@ pub(crate) fn t_test( .set(Order::SampleText) .set(Position::Outside(Vertical::Top, Horizontal::Right)) }) - .plot( - FilledCurve { - x: &*xs, - y1: &*ys, - y2: zero, - }, - |c| { - c.set(DARK_BLUE) - .set(Label("t distribution")) - .set(Opacity(0.25)) - }, - ) - .plot( - Lines { - x: &[t, t], - y: &[0, 1], - }, - |c| { - c.set(Axes::BottomXRightY) - .set(DARK_BLUE) - .set(LINEWIDTH) - .set(Label("t statistic")) - .set(LineType::Solid) - }, - ); + .plot(FilledCurve { x: &*xs, y1: &*ys, y2: zero }, |c| { + c.set(DARK_BLUE).set(Label("t distribution")).set(Opacity(0.25)) + }) + .plot(Lines { x: &[t, t], y: &[0, 1] }, |c| { + c.set(Axes::BottomXRightY) + .set(DARK_BLUE) + .set(LINEWIDTH) + .set(Label("t statistic")) + .set(LineType::Solid) + }); let path = context.report_path(id, "change/t-test.svg"); debug_script(&path, &figure); diff --git a/src/plot/plotters_backend/distributions.rs b/src/plot/plotters_backend/distributions.rs index 8de11405..6a3d4e39 100644 --- a/src/plot/plotters_backend/distributions.rs +++ b/src/plot/plotters_backend/distributions.rs @@ -28,27 +28,12 @@ fn abs_distribution( let (kde_xs, ys) = kde::sweep(scaled_xs_sample, KDE_POINTS, Some((start, end))); // interpolate between two points of the KDE sweep to find the Y position at the point estimate. - let n_point = kde_xs - .iter() - .position(|&x| x >= point) - .unwrap_or(kde_xs.len() - 1) - .max(1); // Must be at least the second element or this will panic + let n_point = kde_xs.iter().position(|&x| x >= point).unwrap_or(kde_xs.len() - 1).max(1); // Must be at least the second element or this will panic let slope = (ys[n_point] - ys[n_point - 1]) / (kde_xs[n_point] - kde_xs[n_point - 1]); let y_point = ys[n_point - 1] + (slope * (point - kde_xs[n_point - 1])); - let start = kde_xs - .iter() - .enumerate() - .find(|&(_, &x)| x >= lb) - .unwrap() - .0; - let end = kde_xs - .iter() - .enumerate() - .rev() - .find(|&(_, &x)| x <= ub) - .unwrap() - .0; + let start = kde_xs.iter().enumerate().find(|&(_, &x)| x >= lb).unwrap().0; + let end = kde_xs.iter().enumerate().rev().find(|&(_, &x)| x <= ub).unwrap().0; let len = end - start; let kde_xs_sample = Sample::new(&kde_xs); @@ -63,10 +48,7 @@ fn abs_distribution( let mut chart = ChartBuilder::on(&root_area) .margin((5).percent()) - .caption( - format!("{}:{}", id.as_title(), statistic), - (DEFAULT_FONT, 20), - ) + .caption(format!("{}:{}", id.as_title(), statistic), (DEFAULT_FONT, 20)) .set_label_area_size(LabelAreaPosition::Left, (5).percent_width().min(60)) .set_label_area_size(LabelAreaPosition::Bottom, (5).percent_height().min(40)) .build_cartesian_2d(x_range, y_range) @@ -93,12 +75,7 @@ fn abs_distribution( chart .draw_series(AreaSeries::new( - kde_xs - .iter() - .zip(ys.iter()) - .skip(start) - .take(len) - .map(|(&x, &y)| (x, y)), + kde_xs.iter().zip(ys.iter()).skip(start).take(len).map(|(&x, &y)| (x, y)), 0.0, DARK_BLUE.mix(0.25).filled().stroke_width(3), )) @@ -117,11 +94,7 @@ fn abs_distribution( .label("Point estimate") .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE)); - chart - .configure_series_labels() - .position(SeriesLabelPosition::UpperRight) - .draw() - .unwrap(); + chart.configure_series_labels().position(SeriesLabelPosition::UpperRight).draw().unwrap(); } pub(crate) fn abs_distributions( @@ -135,22 +108,11 @@ pub(crate) fn abs_distributions( .iter() .filter_map(|stat| { measurements.distributions.get(*stat).and_then(|dist| { - measurements - .absolute_estimates - .get(*stat) - .map(|est| (*stat, dist, est)) + measurements.absolute_estimates.get(*stat).map(|est| (*stat, dist, est)) }) }) .for_each(|(statistic, distribution, estimate)| { - abs_distribution( - id, - context, - formatter, - statistic, - distribution, - estimate, - size, - ) + abs_distribution(id, context, formatter, statistic, distribution, estimate, size) }) } @@ -173,22 +135,12 @@ fn rel_distribution( // interpolate between two points of the KDE sweep to find the Y position at the point estimate. let point = estimate.point_estimate; - let n_point = xs - .iter() - .position(|&x| x >= point) - .unwrap_or(ys.len() - 1) - .max(1); + let n_point = xs.iter().position(|&x| x >= point).unwrap_or(ys.len() - 1).max(1); let slope = (ys[n_point] - ys[n_point - 1]) / (xs[n_point] - xs[n_point - 1]); let y_point = ys[n_point - 1] + (slope * (point - xs[n_point - 1])); let start = xs.iter().enumerate().find(|&(_, &x)| x >= lb).unwrap().0; - let end = xs - .iter() - .enumerate() - .rev() - .find(|&(_, &x)| x <= ub) - .unwrap() - .0; + let end = xs.iter().enumerate().rev().find(|&(_, &x)| x <= ub).unwrap().0; let len = end - start; let x_min = xs_.min(); @@ -200,16 +152,8 @@ fn rel_distribution( (middle, middle) } else { ( - if -noise_threshold < x_min { - x_min - } else { - -noise_threshold - }, - if noise_threshold > x_max { - x_max - } else { - noise_threshold - }, + if -noise_threshold < x_min { x_min } else { -noise_threshold }, + if noise_threshold > x_max { x_max } else { noise_threshold }, ) }; let y_range = plotters::data::fitting_range(ys.iter()); @@ -218,10 +162,7 @@ fn rel_distribution( let mut chart = ChartBuilder::on(&root_area) .margin((5).percent()) - .caption( - format!("{}:{}", id.as_title(), statistic), - (DEFAULT_FONT, 20), - ) + .caption(format!("{}:{}", id.as_title(), statistic), (DEFAULT_FONT, 20)) .set_label_area_size(LabelAreaPosition::Left, (5).percent_width().min(60)) .set_label_area_size(LabelAreaPosition::Bottom, (5).percent_height().min(40)) .build_cartesian_2d(x_min..x_max, y_range.clone()) @@ -238,21 +179,14 @@ fn rel_distribution( .unwrap(); chart - .draw_series(LineSeries::new( - xs.iter().zip(ys.iter()).map(|(x, y)| (*x, *y)), - DARK_BLUE, - )) + .draw_series(LineSeries::new(xs.iter().zip(ys.iter()).map(|(x, y)| (*x, *y)), DARK_BLUE)) .unwrap() .label("Bootstrap distribution") .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE)); chart .draw_series(AreaSeries::new( - xs.iter() - .zip(ys.iter()) - .skip(start) - .take(len) - .map(|(x, y)| (*x, *y)), + xs.iter().zip(ys.iter()).skip(start).take(len).map(|(x, y)| (*x, *y)), 0.0, DARK_BLUE.mix(0.25).filled().stroke_width(3), )) @@ -281,11 +215,7 @@ fn rel_distribution( .legend(|(x, y)| { Rectangle::new([(x, y - 5), (x + 20, y + 5)], DARK_RED.mix(0.25).filled()) }); - chart - .configure_series_labels() - .position(SeriesLabelPosition::UpperRight) - .draw() - .unwrap(); + chart.configure_series_labels().position(SeriesLabelPosition::UpperRight).draw().unwrap(); } pub(crate) fn rel_distributions( diff --git a/src/plot/plotters_backend/iteration_times.rs b/src/plot/plotters_backend/iteration_times.rs index 3ac4f1cc..2bb59172 100644 --- a/src/plot/plotters_backend/iteration_times.rs +++ b/src/plot/plotters_backend/iteration_times.rs @@ -52,11 +52,7 @@ pub(crate) fn iteration_times_figure( .legend(|(x, y)| Circle::new((x + 10, y), POINT_SIZE, DARK_BLUE.filled())); if title.is_some() { - chart - .configure_series_labels() - .position(SeriesLabelPosition::UpperLeft) - .draw() - .unwrap(); + chart.configure_series_labels().position(SeriesLabelPosition::UpperLeft).draw().unwrap(); } } @@ -129,10 +125,6 @@ pub(crate) fn iteration_times_comparison_figure( .legend(|(x, y)| Circle::new((x + 10, y), POINT_SIZE, DARK_RED.filled())); if title.is_some() { - chart - .configure_series_labels() - .position(SeriesLabelPosition::UpperLeft) - .draw() - .unwrap(); + chart.configure_series_labels().position(SeriesLabelPosition::UpperLeft).draw().unwrap(); } } diff --git a/src/plot/plotters_backend/mod.rs b/src/plot/plotters_backend/mod.rs index 4cd1b183..053f8c73 100644 --- a/src/plot/plotters_backend/mod.rs +++ b/src/plot/plotters_backend/mod.rs @@ -38,15 +38,9 @@ impl Plotter for PlottersBackend { fn pdf(&mut self, ctx: PlotContext<'_>, data: PlotData<'_>) { if let Some(cmp) = data.comparison { let (path, title) = if ctx.is_thumbnail { - ( - ctx.context.report_path(ctx.id, "relative_pdf_small.svg"), - None, - ) + (ctx.context.report_path(ctx.id, "relative_pdf_small.svg"), None) } else { - ( - ctx.context.report_path(ctx.id, "both/pdf.svg"), - Some(ctx.id.as_title()), - ) + (ctx.context.report_path(ctx.id, "both/pdf.svg"), Some(ctx.id.as_title())) }; pdf::pdf_comparison_figure( path.as_ref(), @@ -79,23 +73,16 @@ impl Plotter for PlottersBackend { fn regression(&mut self, ctx: PlotContext<'_>, data: PlotData<'_>) { let (title, path) = match (data.comparison.is_some(), ctx.is_thumbnail) { - (true, true) => ( - None, - ctx.context - .report_path(ctx.id, "relative_regression_small.svg"), - ), - (true, false) => ( - Some(ctx.id.as_title()), - ctx.context.report_path(ctx.id, "both/regression.svg"), - ), - (false, true) => ( - None, - ctx.context.report_path(ctx.id, "regression_small.svg"), - ), - (false, false) => ( - Some(ctx.id.as_title()), - ctx.context.report_path(ctx.id, "regression.svg"), - ), + (true, true) => { + (None, ctx.context.report_path(ctx.id, "relative_regression_small.svg")) + } + (true, false) => { + (Some(ctx.id.as_title()), ctx.context.report_path(ctx.id, "both/regression.svg")) + } + (false, true) => (None, ctx.context.report_path(ctx.id, "regression_small.svg")), + (false, false) => { + (Some(ctx.id.as_title()), ctx.context.report_path(ctx.id, "regression.svg")) + } }; if let Some(cmp) = data.comparison { @@ -122,23 +109,17 @@ impl Plotter for PlottersBackend { fn iteration_times(&mut self, ctx: PlotContext<'_>, data: PlotData<'_>) { let (title, path) = match (data.comparison.is_some(), ctx.is_thumbnail) { - (true, true) => ( - None, - ctx.context - .report_path(ctx.id, "relative_iteration_times_small.svg"), - ), + (true, true) => { + (None, ctx.context.report_path(ctx.id, "relative_iteration_times_small.svg")) + } (true, false) => ( Some(ctx.id.as_title()), ctx.context.report_path(ctx.id, "both/iteration_times.svg"), ), - (false, true) => ( - None, - ctx.context.report_path(ctx.id, "iteration_times_small.svg"), - ), - (false, false) => ( - Some(ctx.id.as_title()), - ctx.context.report_path(ctx.id, "iteration_times.svg"), - ), + (false, true) => (None, ctx.context.report_path(ctx.id, "iteration_times_small.svg")), + (false, false) => { + (Some(ctx.id.as_title()), ctx.context.report_path(ctx.id, "iteration_times.svg")) + } }; if let Some(cmp) = data.comparison { @@ -220,12 +201,7 @@ impl Plotter for PlottersBackend { fn t_test(&mut self, ctx: PlotContext<'_>, data: PlotData<'_>) { let title = ctx.id.as_title(); let path = ctx.context.report_path(ctx.id, "change/t-test.svg"); - t_test::t_test( - path.as_path(), - title, - data.comparison.unwrap(), - convert_size(ctx.size), - ); + t_test::t_test(path.as_path(), title, data.comparison.unwrap(), convert_size(ctx.size)); } fn wait(&mut self) {} diff --git a/src/plot/plotters_backend/pdf.rs b/src/plot/plotters_backend/pdf.rs index e55de4e6..ef5dd691 100644 --- a/src/plot/plotters_backend/pdf.rs +++ b/src/plot/plotters_backend/pdf.rs @@ -19,10 +19,8 @@ pub(crate) fn pdf_comparison_figure( let unit = formatter.scale_values(typical, &mut scaled_base_avg_times); let scaled_base_avg_times = Sample::new(&scaled_base_avg_times); - let mut scaled_new_avg_times: Vec = (&measurements.avg_times as &Sample) - .iter() - .cloned() - .collect(); + let mut scaled_new_avg_times: Vec = + (&measurements.avg_times as &Sample).iter().cloned().collect(); let _ = formatter.scale_values(typical, &mut scaled_new_avg_times); let scaled_new_avg_times = Sample::new(&scaled_new_avg_times); @@ -184,10 +182,7 @@ pub(crate) fn pdf( let mean = scaled_avg_times.mean(); let iter_counts = measurements.iter_counts(); - let &max_iters = iter_counts - .iter() - .max_by_key(|&&iters| iters as u64) - .unwrap(); + let &max_iters = iter_counts.iter().max_by_key(|&&iters| iters as u64).unwrap(); let exponent = (max_iters.log10() / 3.).floor() as i32 * 3; let y_scale = 10f64.powi(-exponent); @@ -271,37 +266,28 @@ pub(crate) fn pdf( .unwrap(); use crate::stats::univariate::outliers::tukey::Label; - let mut draw_data_point_series = - |filter: &dyn Fn(&Label) -> bool, color: RGBAColor, name: &str| { - chart - .draw_series( - avg_times - .iter() - .zip(scaled_avg_times.iter()) - .zip(iter_counts.iter()) - .filter_map(|(((_, label), t), i)| { - if filter(&label) { - Some(Circle::new((*t, *i), POINT_SIZE, color.filled())) - } else { - None - } - }), - ) - .unwrap() - .label(name) - .legend(move |(x, y)| Circle::new((x + 10, y), POINT_SIZE, color.filled())); - }; - - draw_data_point_series( - &|l| !l.is_outlier(), - DARK_BLUE.to_rgba(), - "\"Clean\" sample", - ); - draw_data_point_series( - &|l| l.is_mild(), - RGBColor(255, 127, 0).to_rgba(), - "Mild outliers", - ); + let mut draw_data_point_series = |filter: &dyn Fn(&Label) -> bool, + color: RGBAColor, + name: &str| { + chart + .draw_series( + avg_times.iter().zip(scaled_avg_times.iter()).zip(iter_counts.iter()).filter_map( + |(((_, label), t), i)| { + if filter(&label) { + Some(Circle::new((*t, *i), POINT_SIZE, color.filled())) + } else { + None + } + }, + ), + ) + .unwrap() + .label(name) + .legend(move |(x, y)| Circle::new((x + 10, y), POINT_SIZE, color.filled())); + }; + + draw_data_point_series(&|l| !l.is_outlier(), DARK_BLUE.to_rgba(), "\"Clean\" sample"); + draw_data_point_series(&|l| l.is_mild(), RGBColor(255, 127, 0).to_rgba(), "Mild outliers"); draw_data_point_series(&|l| l.is_severe(), DARK_RED.to_rgba(), "Severe outliers"); chart.configure_series_labels().draw().unwrap(); } diff --git a/src/plot/plotters_backend/regression.rs b/src/plot/plotters_backend/regression.rs index 1a9adece..a1a873ed 100644 --- a/src/plot/plotters_backend/regression.rs +++ b/src/plot/plotters_backend/regression.rs @@ -84,10 +84,7 @@ pub(crate) fn regression_figure( .unwrap() .label("Linear regression") .legend(|(x, y)| { - PathElement::new( - vec![(x, y), (x + 20, y)], - DARK_BLUE.filled().stroke_width(2), - ) + PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE.filled().stroke_width(2)) }); chart @@ -102,11 +99,7 @@ pub(crate) fn regression_figure( }); if title.is_some() { - chart - .configure_series_labels() - .position(SeriesLabelPosition::UpperLeft) - .draw() - .unwrap(); + chart.configure_series_labels().position(SeriesLabelPosition::UpperLeft).draw().unwrap(); } } @@ -133,23 +126,13 @@ pub(crate) fn regression_comparison_figure( }; let Estimate { - confidence_interval: - ConfidenceInterval { - lower_bound: base_lb, - upper_bound: base_ub, - .. - }, + confidence_interval: ConfidenceInterval { lower_bound: base_lb, upper_bound: base_ub, .. }, point_estimate: base_point, .. } = comparison.base_estimates.slope.as_ref().unwrap(); let Estimate { - confidence_interval: - ConfidenceInterval { - lower_bound: lb, - upper_bound: ub, - .. - }, + confidence_interval: ConfidenceInterval { lower_bound: lb, upper_bound: ub, .. }, point_estimate: point, .. } = measurements.absolute_estimates.slope.as_ref().unwrap(); @@ -218,17 +201,10 @@ pub(crate) fn regression_comparison_figure( .unwrap() .label("New Sample") .legend(|(x, y)| { - PathElement::new( - vec![(x, y), (x + 20, y)], - DARK_BLUE.filled().stroke_width(2), - ) + PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE.filled().stroke_width(2)) }); if title.is_some() { - chart - .configure_series_labels() - .position(SeriesLabelPosition::UpperLeft) - .draw() - .unwrap(); + chart.configure_series_labels().position(SeriesLabelPosition::UpperLeft).draw().unwrap(); } } diff --git a/src/plot/plotters_backend/summary.rs b/src/plot/plotters_backend/summary.rs index 12cd8474..22791856 100644 --- a/src/plot/plotters_backend/summary.rs +++ b/src/plot/plotters_backend/summary.rs @@ -106,11 +106,7 @@ fn draw_line_comarision_figure, YR: AsRangedCoord } } - chart - .configure_series_labels() - .position(SeriesLabelPosition::UpperLeft) - .draw() - .unwrap(); + chart.configure_series_labels().position(SeriesLabelPosition::UpperLeft).draw().unwrap(); } #[allow(clippy::type_complexity)] @@ -174,10 +170,7 @@ pub fn violin( }) .collect::>(); - let mut xs = kdes - .iter() - .flat_map(|(_, x, _)| x.iter()) - .filter(|&&x| x > 0.); + let mut xs = kdes.iter().flat_map(|(_, x, _)| x.iter()).filter(|&&x| x > 0.); let (mut min, mut max) = { let &first = xs.next().unwrap(); (first, first) diff --git a/src/plot/plotters_backend/t_test.rs b/src/plot/plotters_backend/t_test.rs index c575c2ff..b01742d8 100644 --- a/src/plot/plotters_backend/t_test.rs +++ b/src/plot/plotters_backend/t_test.rs @@ -26,13 +26,7 @@ pub(crate) fn t_test( .build_cartesian_2d(x_range, y_range.clone()) .unwrap(); - chart - .configure_mesh() - .disable_mesh() - .y_desc("Density") - .x_desc("t score") - .draw() - .unwrap(); + chart.configure_mesh().disable_mesh().y_desc("Density").x_desc("t score").draw().unwrap(); chart .draw_series(AreaSeries::new( diff --git a/src/report.rs b/src/report.rs index c5448fdb..c4525242 100644 --- a/src/report.rs +++ b/src/report.rs @@ -11,6 +11,7 @@ use crate::stats::univariate::Sample; use crate::stats::Distribution; use crate::{PlotConfiguration, Throughput}; use anes::{Attribute, ClearLine, Color, ResetAttributes, SetAttribute, SetForegroundColor}; +use serde::{Deserialize, Serialize}; use std::cmp; use std::collections::HashSet; use std::fmt; @@ -81,10 +82,7 @@ fn truncate_to_character_boundary(s: &mut String, max_len: usize) { } pub fn make_filename_safe(string: &str) -> String { - let mut string = string.replace( - &['?', '"', '/', '\\', '*', '<', '>', ':', '|', '^'][..], - "_", - ); + let mut string = string.replace(&['?', '"', '/', '\\', '*', '<', '>', ':', '|', '^'][..], "_"); // Truncate to last character boundary before max length... truncate_to_character_boundary(&mut string, MAX_DIRECTORY_NAME_LEN); @@ -135,28 +133,16 @@ impl BenchmarkId { make_filename_safe(func), make_filename_safe(val) ), - (Some(func), &None) => format!( - "{}/{}", - make_filename_safe(&group_id), - make_filename_safe(func) - ), - (&None, Some(val)) => format!( - "{}/{}", - make_filename_safe(&group_id), - make_filename_safe(val) - ), + (Some(func), &None) => { + format!("{}/{}", make_filename_safe(&group_id), make_filename_safe(func)) + } + (&None, Some(val)) => { + format!("{}/{}", make_filename_safe(&group_id), make_filename_safe(val)) + } (&None, &None) => make_filename_safe(&group_id), }; - BenchmarkId { - group_id, - function_id, - value_str, - throughput, - full_id, - directory_name, - title, - } + BenchmarkId { group_id, function_id, value_str, throughput, full_id, directory_name, title } } pub fn id(&self) -> &str { @@ -176,10 +162,7 @@ impl BenchmarkId { Some(Throughput::Bytes(n)) | Some(Throughput::Elements(n)) | Some(Throughput::BytesDecimal(n)) => Some(n as f64), - None => self - .value_str - .as_ref() - .and_then(|string| string.parse::().ok()), + None => self.value_str.as_ref().and_then(|string| string.parse::().ok()), } } @@ -386,11 +369,7 @@ impl CliReport { enable_text_coloring: bool, verbosity: CliVerbosity, ) -> CliReport { - CliReport { - enable_text_overwrite, - enable_text_coloring, - verbosity, - } + CliReport { enable_text_overwrite, enable_text_coloring, verbosity } } fn text_overwrite(&self) { @@ -636,15 +615,9 @@ impl Report for CliReport { println!( "{}time: [{} {} {}] (p = {:.2} {} {:.2})", " ".repeat(24), - self.faint(format::change( - mean_est.confidence_interval.lower_bound, - true - )), + self.faint(format::change(mean_est.confidence_interval.lower_bound, true)), point_estimate_str, - self.faint(format::change( - mean_est.confidence_interval.upper_bound, - true - )), + self.faint(format::change(mean_est.confidence_interval.upper_bound, true)), comp.p_value, if different_mean { "<" } else { ">" }, comp.significance_threshold @@ -666,15 +639,9 @@ impl Report for CliReport { println!( "{}change: [{} {} {}] (p = {:.2} {} {:.2})", " ".repeat(24), - self.faint(format::change( - mean_est.confidence_interval.lower_bound, - true - )), + self.faint(format::change(mean_est.confidence_interval.lower_bound, true)), point_estimate_str, - self.faint(format::change( - mean_est.confidence_interval.upper_bound, - true - )), + self.faint(format::change(mean_est.confidence_interval.upper_bound, true)), comp.p_value, if different_mean { "<" } else { ">" }, comp.significance_threshold diff --git a/src/routine.rs b/src/routine.rs index 88e4318b..2c7be299 100644 --- a/src/routine.rs +++ b/src/routine.rs @@ -32,9 +32,7 @@ pub(crate) trait Routine { time: Duration, parameter: &T, ) { - criterion - .report - .profile(id, report_context, time.as_nanos() as f64); + criterion.report.profile(id, report_context, time.as_nanos() as f64); let mut profile_path = report_context.output_directory.clone(); if (*crate::CARGO_CRITERION_CONNECTION).is_some() { @@ -46,10 +44,7 @@ pub(crate) trait Routine { profile_path.push(id.as_directory_name()); profile_path.push("profile"); } - criterion - .profiler - .borrow_mut() - .start_profiling(id.id(), &profile_path); + criterion.profiler.borrow_mut().start_profiling(id.id(), &profile_path); let time = time.as_nanos() as u64; @@ -71,10 +66,7 @@ pub(crate) trait Routine { self.bench(measurement, &[iters], parameter); } - criterion - .profiler - .borrow_mut() - .stop_profiling(id.id(), &profile_path); + criterion.profiler.borrow_mut().stop_profiling(id.id(), &profile_path); criterion.report.terminated(id, report_context); } @@ -110,10 +102,7 @@ pub(crate) trait Routine { // Main data collection loop. loop { - let t_now = *self - .bench(measurement, &[n * 2], parameter) - .first() - .unwrap(); + let t_now = *self.bench(measurement, &[n * 2], parameter).first().unwrap(); let t = (t_prev + 2. * t_now) / 5.; let stdev = (sq(t_prev - t) + sq(t_now - 2. * t)).sqrt(); // println!("Sample: {} {:.2}", n, stdev / t); @@ -132,16 +121,11 @@ pub(crate) trait Routine { let wu = config.warm_up_time; let m_ns = config.measurement_time.as_nanos(); - criterion - .report - .warmup(id, report_context, wu.as_nanos() as f64); + criterion.report.warmup(id, report_context, wu.as_nanos() as f64); if let Some(conn) = &criterion.connection { - conn.send(&OutgoingMessage::Warmup { - id: id.into(), - nanos: wu.as_nanos() as f64, - }) - .unwrap(); + conn.send(&OutgoingMessage::Warmup { id: id.into(), nanos: wu.as_nanos() as f64 }) + .unwrap(); } let (wu_elapsed, wu_iters) = self.warm_up(measurement, wu, parameter); @@ -159,17 +143,11 @@ pub(crate) trait Routine { let n = config.sample_size as u64; - let actual_sampling_mode = config - .sampling_mode - .choose_sampling_mode(met, n, m_ns as f64); + let actual_sampling_mode = config.sampling_mode.choose_sampling_mode(met, n, m_ns as f64); let m_iters = actual_sampling_mode.iteration_counts(met, n, &config.measurement_time); - let expected_ns = m_iters - .iter() - .copied() - .map(|count| count as f64 * met) - .sum(); + let expected_ns = m_iters.iter().copied().map(|count| count as f64 * met).sum(); // Use saturating_add to handle overflow. let mut total_iters = 0u64; @@ -177,9 +155,7 @@ pub(crate) trait Routine { total_iters = total_iters.saturating_add(count); } - criterion - .report - .measurement_start(id, report_context, n, expected_ns, total_iters); + criterion.report.measurement_start(id, report_context, n, expected_ns, total_iters); if let Some(conn) = &criterion.connection { conn.send(&OutgoingMessage::MeasurementStart { @@ -195,11 +171,7 @@ pub(crate) trait Routine { let m_iters_f: Vec = m_iters.iter().map(|&x| x as f64).collect(); - ( - actual_sampling_mode, - m_iters_f.into_boxed_slice(), - m_elapsed.into_boxed_slice(), - ) + (actual_sampling_mode, m_iters_f.into_boxed_slice(), m_elapsed.into_boxed_slice()) } } @@ -219,11 +191,7 @@ where T: ?Sized, { pub fn new(f: F) -> Function { - Function { - f, - _phantom: PhantomData, - _phamtom2: PhantomData, - } + Function { f, _phantom: PhantomData, _phamtom2: PhantomData } } } diff --git a/src/stats/bivariate/mod.rs b/src/stats/bivariate/mod.rs index 2351c9ef..98b07ed3 100644 --- a/src/stats/bivariate/mod.rs +++ b/src/stats/bivariate/mod.rs @@ -36,10 +36,7 @@ impl<'a, X, Y> Data<'a, X, Y> { /// Iterate over the data set pub fn iter(&self) -> Pairs<'a, X, Y> { - Pairs { - data: *self, - state: 0, - } + Pairs { data: *self, state: 0 } } } @@ -77,10 +74,7 @@ where { (0..nresamples) .into_par_iter() - .map_init( - || Resamples::new(*self), - |resamples, _| statistic(resamples.next()), - ) + .map_init(|| Resamples::new(*self), |resamples, _| statistic(resamples.next())) .fold( || T::Builder::new(0), |mut sub_distributions, sample| { diff --git a/src/stats/bivariate/resamples.rs b/src/stats/bivariate/resamples.rs index e254dc79..0f4b4dd1 100644 --- a/src/stats/bivariate/resamples.rs +++ b/src/stats/bivariate/resamples.rs @@ -19,11 +19,7 @@ where Y: 'a + Float, { pub fn new(data: Data<'a, X, Y>) -> Resamples<'a, X, Y> { - Resamples { - rng: new_rng(), - data: (data.x(), data.y()), - stage: None, - } + Resamples { rng: new_rng(), data: (data.x(), data.y()), stage: None } } pub fn next(&mut self) -> Data<'_, X, Y> { diff --git a/src/stats/mod.rs b/src/stats/mod.rs index 4f926deb..a09c32a1 100644 --- a/src/stats/mod.rs +++ b/src/stats/mod.rs @@ -97,9 +97,7 @@ fn dot(xs: &[A], ys: &[A]) -> A where A: Float, { - xs.iter() - .zip(ys) - .fold(A::cast(0), |acc, (&x, &y)| acc + x * y) + xs.iter().zip(ys).fold(A::cast(0), |acc, (&x, &y)| acc + x * y) } fn sum(xs: &[A]) -> A diff --git a/src/stats/tuple.rs b/src/stats/tuple.rs index 1c075159..c019b129 100644 --- a/src/stats/tuple.rs +++ b/src/stats/tuple.rs @@ -110,10 +110,7 @@ where } fn complete(self) -> (Distribution, Distribution) { - ( - Distribution(self.0.into_boxed_slice()), - Distribution(self.1.into_boxed_slice()), - ) + (Distribution(self.0.into_boxed_slice()), Distribution(self.1.into_boxed_slice())) } } @@ -144,11 +141,7 @@ where type Item = (A, B, C); fn new(size: usize) -> (Vec, Vec, Vec) { - ( - Vec::with_capacity(size), - Vec::with_capacity(size), - Vec::with_capacity(size), - ) + (Vec::with_capacity(size), Vec::with_capacity(size), Vec::with_capacity(size)) } fn push(&mut self, tuple: (A, B, C)) { @@ -179,22 +172,12 @@ where C: Copy, D: Copy, { - type Distributions = ( - Distribution, - Distribution, - Distribution, - Distribution, - ); + type Distributions = (Distribution, Distribution, Distribution, Distribution); type Builder = (Vec, Vec, Vec, Vec); } impl TupledDistributions - for ( - Distribution, - Distribution, - Distribution, - Distribution, - ) + for (Distribution, Distribution, Distribution, Distribution) where A: Copy, B: Copy, @@ -235,14 +218,7 @@ where (self.3).append(&mut other.3); } - fn complete( - self, - ) -> ( - Distribution, - Distribution, - Distribution, - Distribution, - ) { + fn complete(self) -> (Distribution, Distribution, Distribution, Distribution) { ( Distribution(self.0.into_boxed_slice()), Distribution(self.1.into_boxed_slice()), diff --git a/src/stats/univariate/kde/mod.rs b/src/stats/univariate/kde/mod.rs index c54de55a..fb38c497 100644 --- a/src/stats/univariate/kde/mod.rs +++ b/src/stats/univariate/kde/mod.rs @@ -27,11 +27,7 @@ where /// Creates a new kernel density estimator from the `sample`, using a kernel and estimating /// the bandwidth using the method `bw` pub fn new(sample: &'a Sample, kernel: K, bw: Bandwidth) -> Kde<'a, A, K> { - Kde { - bandwidth: bw.estimate(sample), - kernel, - sample, - } + Kde { bandwidth: bw.estimate(sample), kernel, sample } } /// Returns the bandwidth used by the estimator @@ -49,9 +45,7 @@ where #[cfg(not(feature = "rayon"))] let iter = xs.iter(); - iter.map(|&x| self.estimate(x)) - .collect::>() - .into_boxed_slice() + iter.map(|&x| self.estimate(x)).collect::>().into_boxed_slice() } /// Estimates the probability density of `x` @@ -61,9 +55,7 @@ where let h = self.bandwidth; let n = A::cast(slice.len()); - let sum = slice - .iter() - .fold(_0, |acc, &x_i| acc + self.kernel.evaluate((x - x_i) / h)); + let sum = slice.iter().fold(_0, |acc, &x_i| acc + self.kernel.evaluate((x - x_i) / h)); sum / (h * n) } diff --git a/src/stats/univariate/outliers/tukey.rs b/src/stats/univariate/outliers/tukey.rs index 12ed304b..4f024fc6 100644 --- a/src/stats/univariate/outliers/tukey.rs +++ b/src/stats/univariate/outliers/tukey.rs @@ -104,10 +104,7 @@ where /// Returns an iterator over the labeled data pub fn iter(&self) -> Iter<'a, A> { - Iter { - fences: self.fences, - iter: self.sample.iter(), - } + Iter { fences: self.fences, iter: self.sample.iter() } } } @@ -265,12 +262,7 @@ where let k_s = A::cast(3); LabeledSample { - fences: ( - q1 - k_s * iqr, - q1 - k_m * iqr, - q3 + k_m * iqr, - q3 + k_s * iqr, - ), + fences: (q1 - k_s * iqr, q1 - k_m * iqr, q3 + k_m * iqr, q3 + k_s * iqr), sample, } } diff --git a/src/stats/univariate/percentiles.rs b/src/stats/univariate/percentiles.rs index 39def18e..62573999 100644 --- a/src/stats/univariate/percentiles.rs +++ b/src/stats/univariate/percentiles.rs @@ -67,10 +67,6 @@ where /// Returns the 25th, 50th and 75th percentiles pub fn quartiles(&self) -> (A, A, A) { - ( - self.at(A::cast(25)), - self.at(A::cast(50)), - self.at(A::cast(75)), - ) + (self.at(A::cast(25)), self.at(A::cast(50)), self.at(A::cast(75))) } } diff --git a/src/stats/univariate/resamples.rs b/src/stats/univariate/resamples.rs index 923669d5..398357fa 100644 --- a/src/stats/univariate/resamples.rs +++ b/src/stats/univariate/resamples.rs @@ -21,11 +21,7 @@ where pub fn new(sample: &'a Sample) -> Resamples<'a, A> { let slice = sample; - Resamples { - rng: new_rng(), - sample: slice, - stage: None, - } + Resamples { rng: new_rng(), sample: slice, stage: None } } pub fn next(&mut self) -> &Sample { diff --git a/src/stats/univariate/sample.rs b/src/stats/univariate/sample.rs index 6fbb4fb2..51f5cb61 100644 --- a/src/stats/univariate/sample.rs +++ b/src/stats/univariate/sample.rs @@ -190,10 +190,7 @@ where let mean = mean.unwrap_or_else(|| self.mean()); let slice = self; - let sum = slice - .iter() - .map(|&x| (x - mean).powi(2)) - .fold(A::cast(0), Add::add); + let sum = slice.iter().map(|&x| (x - mean).powi(2)).fold(A::cast(0), Add::add); sum / A::cast(slice.len() - 1) } @@ -215,10 +212,7 @@ where { (0..nresamples) .into_par_iter() - .map_init( - || Resamples::new(self), - |resamples, _| statistic(resamples.next()), - ) + .map_init(|| Resamples::new(self), |resamples, _| statistic(resamples.next())) .fold( || T::Builder::new(0), |mut sub_distributions, sample| { diff --git a/tests/criterion_tests.rs b/tests/criterion_tests.rs index 6a009fa5..eef8526e 100644 --- a/tests/criterion_tests.rs +++ b/tests/criterion_tests.rs @@ -1,6 +1,6 @@ #[cfg(feature = "plotters")] -use criterion::SamplingMode; -use criterion::{ +use criterion2::SamplingMode; +use criterion2::{ criterion_group, criterion_main, profiler::Profiler, BatchSize, BenchmarkId, Criterion, }; use serde_json::value::Value; @@ -46,19 +46,13 @@ impl Counter { } impl Default for Counter { fn default() -> Counter { - Counter { - counter: Rc::new(RefCell::new(0)), - } + Counter { counter: Rc::new(RefCell::new(0)) } } } fn verify_file(dir: &Path, path: &str) -> PathBuf { let full_path = dir.join(path); - assert!( - full_path.is_file(), - "File {:?} does not exist or is not a file", - full_path - ); + assert!(full_path.is_file(), "File {:?} does not exist or is not a file", full_path); let metadata = full_path.metadata().unwrap(); assert!(metadata.len() > 0); full_path @@ -117,9 +111,7 @@ fn test_creates_directory() { #[test] fn test_without_plots() { let dir = temp_dir(); - short_benchmark(&dir) - .without_plots() - .bench_function("test_without_plots", |b| b.iter(|| 10)); + short_benchmark(&dir).without_plots().bench_function("test_without_plots", |b| b.iter(|| 10)); for entry in WalkDir::new(dir.path().join("test_without_plots")) { let entry = entry.ok(); @@ -193,12 +185,10 @@ fn test_sample_size() { let counter = Counter::default(); let clone = counter.clone(); - short_benchmark(&dir) - .sample_size(50) - .bench_function("test_sample_size", move |b| { - clone.count(); - b.iter(|| 10) - }); + short_benchmark(&dir).sample_size(50).bench_function("test_sample_size", move |b| { + clone.count(); + b.iter(|| 10) + }); // This function will be called more than sample_size times because of the // warmup. @@ -211,21 +201,23 @@ fn test_warmup_time() { let counter1 = Counter::default(); let clone = counter1.clone(); - short_benchmark(&dir) - .warm_up_time(Duration::from_millis(100)) - .bench_function("test_warmup_time_1", move |b| { + short_benchmark(&dir).warm_up_time(Duration::from_millis(100)).bench_function( + "test_warmup_time_1", + move |b| { clone.count(); b.iter(|| 10) - }); + }, + ); let counter2 = Counter::default(); let clone = counter2.clone(); - short_benchmark(&dir) - .warm_up_time(Duration::from_millis(2000)) - .bench_function("test_warmup_time_2", move |b| { + short_benchmark(&dir).warm_up_time(Duration::from_millis(2000)).bench_function( + "test_warmup_time_2", + move |b| { clone.count(); b.iter(|| 10) - }); + }, + ); assert!(counter1.read() < counter2.read()); } @@ -274,15 +266,11 @@ fn test_timing_loops() { let dir = temp_dir(); let mut c = short_benchmark(&dir); let mut group = c.benchmark_group("test_timing_loops"); - group.bench_function("iter_with_setup", |b| { - b.iter_with_setup(|| vec![10], |v| v[0]) - }); + group.bench_function("iter_with_setup", |b| b.iter_with_setup(|| vec![10], |v| v[0])); group.bench_function("iter_with_large_setup", |b| { b.iter_batched(|| vec![10], |v| v[0], BatchSize::NumBatches(1)) }); - group.bench_function("iter_with_large_drop", |b| { - b.iter_with_large_drop(|| vec![10; 100]) - }); + group.bench_function("iter_with_large_drop", |b| b.iter_with_large_drop(|| vec![10; 100])); group.bench_function("iter_batched_small", |b| { b.iter_batched(|| vec![10], |v| v[0], BatchSize::SmallInput) }); @@ -460,9 +448,7 @@ fn test_benchmark_group_without_input() { fn test_criterion_doesnt_panic_if_measured_time_is_zero() { let dir = temp_dir(); let mut c = short_benchmark(&dir); - c.bench_function("zero_time", |bencher| { - bencher.iter_custom(|_iters| Duration::new(0, 0)) - }); + c.bench_function("zero_time", |bencher| bencher.iter_custom(|_iters| Duration::new(0, 0))); } mod macros { @@ -545,14 +531,10 @@ impl Profiler for TestProfiler { fn test_profiler_called() { let started = Rc::new(Cell::new(0u32)); let stopped = Rc::new(Cell::new(0u32)); - let profiler = TestProfiler { - started: started.clone(), - stopped: stopped.clone(), - }; + let profiler = TestProfiler { started: started.clone(), stopped: stopped.clone() }; let dir = temp_dir(); - let mut criterion = short_benchmark(&dir) - .with_profiler(profiler) - .profile_time(Some(Duration::from_secs(1))); + let mut criterion = + short_benchmark(&dir).with_profiler(profiler).profile_time(Some(Duration::from_secs(1))); criterion.bench_function("profile_test", |b| b.iter(|| 10)); assert_eq!(1, started.get()); assert_eq!(1, stopped.get());