From 21d4850dfc6e5541a401cc6fd0d51ec2bfad1da8 Mon Sep 17 00:00:00 2001 From: Samuel Tardieu Date: Fri, 29 Mar 2024 06:57:14 +0100 Subject: [PATCH 1/7] Remove redundant imports --- src/bencher.rs | 1 - src/lib.rs | 3 +-- src/plot/gnuplot_backend/distributions.rs | 7 +------ src/plot/gnuplot_backend/iteration_times.rs | 6 +----- src/plot/gnuplot_backend/pdf.rs | 4 +--- src/plot/gnuplot_backend/regression.rs | 7 +------ src/plot/gnuplot_backend/t_test.rs | 5 +---- src/plot/plotters_backend/distributions.rs | 3 +-- src/plot/plotters_backend/pdf.rs | 4 +--- src/plot/plotters_backend/regression.rs | 1 - src/plot/plotters_backend/t_test.rs | 1 - src/stats/test.rs | 3 +-- src/stats/univariate/outliers/tukey.rs | 1 - src/stats/univariate/percentiles.rs | 2 +- 14 files changed, 10 insertions(+), 38 deletions(-) diff --git a/src/bencher.rs b/src/bencher.rs index 5baebc40f..b2bd971a8 100644 --- a/src/bencher.rs +++ b/src/bencher.rs @@ -1,4 +1,3 @@ -use std::iter::IntoIterator; use std::time::Duration; use std::time::Instant; diff --git a/src/lib.rs b/src/lib.rs index 5df25a345..5ebfae1a5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -74,7 +74,6 @@ mod stats; use std::cell::RefCell; use std::collections::HashSet; -use std::default::Default; use std::env; use std::io::{stdout, IsTerminal}; use std::net::TcpStream; @@ -1171,7 +1170,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html /// // Now we can perform benchmarks with this group /// group.bench_function("Bench 1", |b| b.iter(|| 1 )); /// group.bench_function("Bench 2", |b| b.iter(|| 2 )); - /// + /// /// group.finish(); /// } /// criterion_group!(benches, bench_simple); diff --git a/src/plot/gnuplot_backend/distributions.rs b/src/plot/gnuplot_backend/distributions.rs index 1ccbc1a25..bf3dd2d92 100644 --- a/src/plot/gnuplot_backend/distributions.rs +++ b/src/plot/gnuplot_backend/distributions.rs @@ -1,7 +1,3 @@ -use std::iter; -use std::process::Child; - -use crate::stats::univariate::Sample; use crate::stats::Distribution; use criterion_plot::prelude::*; @@ -9,8 +5,7 @@ use super::*; use crate::estimate::Estimate; use crate::estimate::Statistic; use crate::kde; -use crate::measurement::ValueFormatter; -use crate::report::{BenchmarkId, ComparisonData, MeasurementData, ReportContext}; +use crate::report::{ComparisonData, MeasurementData, ReportContext}; fn abs_distribution( id: &BenchmarkId, diff --git a/src/plot/gnuplot_backend/iteration_times.rs b/src/plot/gnuplot_backend/iteration_times.rs index 4db4de8d5..4d3547063 100644 --- a/src/plot/gnuplot_backend/iteration_times.rs +++ b/src/plot/gnuplot_backend/iteration_times.rs @@ -1,11 +1,7 @@ -use std::process::Child; - use criterion_plot::prelude::*; use super::*; -use crate::report::{BenchmarkId, ComparisonData, MeasurementData, ReportContext}; - -use crate::measurement::ValueFormatter; +use crate::report::{ComparisonData, MeasurementData, ReportContext}; fn iteration_times_figure( formatter: &dyn ValueFormatter, diff --git a/src/plot/gnuplot_backend/pdf.rs b/src/plot/gnuplot_backend/pdf.rs index a0b85c7aa..385d7a5dc 100644 --- a/src/plot/gnuplot_backend/pdf.rs +++ b/src/plot/gnuplot_backend/pdf.rs @@ -1,8 +1,6 @@ use super::*; use crate::kde; -use crate::measurement::ValueFormatter; -use crate::report::{BenchmarkId, ComparisonData, MeasurementData, ReportContext}; -use std::process::Child; +use crate::report::{ComparisonData, MeasurementData, ReportContext}; pub(crate) fn pdf( id: &BenchmarkId, diff --git a/src/plot/gnuplot_backend/regression.rs b/src/plot/gnuplot_backend/regression.rs index 82de357c4..d06306923 100644 --- a/src/plot/gnuplot_backend/regression.rs +++ b/src/plot/gnuplot_backend/regression.rs @@ -1,16 +1,11 @@ -use std::process::Child; - use crate::stats::bivariate::regression::Slope; use criterion_plot::prelude::*; use super::*; -use crate::report::{BenchmarkId, ComparisonData, MeasurementData, ReportContext}; -use crate::stats::bivariate::Data; +use crate::report::{ComparisonData, MeasurementData, ReportContext}; use crate::estimate::{ConfidenceInterval, Estimate}; -use crate::measurement::ValueFormatter; - fn regression_figure( formatter: &dyn ValueFormatter, measurements: &MeasurementData<'_>, diff --git a/src/plot/gnuplot_backend/t_test.rs b/src/plot/gnuplot_backend/t_test.rs index 47b4a110e..51ed567ad 100644 --- a/src/plot/gnuplot_backend/t_test.rs +++ b/src/plot/gnuplot_backend/t_test.rs @@ -1,11 +1,8 @@ -use std::iter; -use std::process::Child; - use criterion_plot::prelude::*; use super::*; use crate::kde; -use crate::report::{BenchmarkId, ComparisonData, MeasurementData, ReportContext}; +use crate::report::{ComparisonData, MeasurementData, ReportContext}; pub(crate) fn t_test( id: &BenchmarkId, diff --git a/src/plot/plotters_backend/distributions.rs b/src/plot/plotters_backend/distributions.rs index 8de114058..04f7d42c8 100644 --- a/src/plot/plotters_backend/distributions.rs +++ b/src/plot/plotters_backend/distributions.rs @@ -1,8 +1,7 @@ use super::*; use crate::estimate::Estimate; use crate::estimate::Statistic; -use crate::measurement::ValueFormatter; -use crate::report::{BenchmarkId, MeasurementData, ReportContext}; +use crate::report::ReportContext; use crate::stats::Distribution; fn abs_distribution( diff --git a/src/plot/plotters_backend/pdf.rs b/src/plot/plotters_backend/pdf.rs index e55de4e6e..e1f06edfc 100644 --- a/src/plot/plotters_backend/pdf.rs +++ b/src/plot/plotters_backend/pdf.rs @@ -1,8 +1,6 @@ use super::*; -use crate::measurement::ValueFormatter; -use crate::report::{BenchmarkId, ComparisonData, MeasurementData, ReportContext}; +use crate::report::ReportContext; use plotters::data; -use plotters::style::RGBAColor; use std::path::Path; pub(crate) fn pdf_comparison_figure( diff --git a/src/plot/plotters_backend/regression.rs b/src/plot/plotters_backend/regression.rs index 1a9adece0..ba1ad7ca3 100644 --- a/src/plot/plotters_backend/regression.rs +++ b/src/plot/plotters_backend/regression.rs @@ -4,7 +4,6 @@ use std::path::Path; use crate::estimate::{ConfidenceInterval, Estimate}; use crate::stats::bivariate::regression::Slope; -use crate::stats::bivariate::Data; pub(crate) fn regression_figure( title: Option<&str>, diff --git a/src/plot/plotters_backend/t_test.rs b/src/plot/plotters_backend/t_test.rs index c575c2ff6..9f29ead54 100644 --- a/src/plot/plotters_backend/t_test.rs +++ b/src/plot/plotters_backend/t_test.rs @@ -1,5 +1,4 @@ use super::*; -use crate::report::ComparisonData; use std::path::Path; pub(crate) fn t_test( diff --git a/src/stats/test.rs b/src/stats/test.rs index 9e13f3084..cc7e3fabb 100644 --- a/src/stats/test.rs +++ b/src/stats/test.rs @@ -1,6 +1,5 @@ -use rand::distributions::{Distribution, Standard}; +use rand::distributions::Standard; use rand::prelude::*; -use rand::rngs::StdRng; pub fn vec(size: usize, start: usize) -> Option> where diff --git a/src/stats/univariate/outliers/tukey.rs b/src/stats/univariate/outliers/tukey.rs index 12ed304bd..9e8ad0eca 100644 --- a/src/stats/univariate/outliers/tukey.rs +++ b/src/stats/univariate/outliers/tukey.rs @@ -37,7 +37,6 @@ //! x: "severe" outlier //! ``` -use std::iter::IntoIterator; use std::ops::{Deref, Index}; use std::slice; diff --git a/src/stats/univariate/percentiles.rs b/src/stats/univariate/percentiles.rs index 39def18e7..ba103ad0b 100644 --- a/src/stats/univariate/percentiles.rs +++ b/src/stats/univariate/percentiles.rs @@ -1,5 +1,5 @@ use crate::stats::float::Float; -use cast::{self, usize}; +use cast::usize; /// A "view" into the percentiles of a sample pub struct Percentiles(Box<[A]>) From 267f947834c5d51677a0a8d3412bc482c4443259 Mon Sep 17 00:00:00 2001 From: Samuel Tardieu Date: Fri, 29 Mar 2024 07:01:48 +0100 Subject: [PATCH 2/7] Remove unused trait --- src/plot/gnuplot_backend/mod.rs | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/src/plot/gnuplot_backend/mod.rs b/src/plot/gnuplot_backend/mod.rs index 27cc48be3..e7c7656fe 100644 --- a/src/plot/gnuplot_backend/mod.rs +++ b/src/plot/gnuplot_backend/mod.rs @@ -52,20 +52,6 @@ fn debug_script(path: &Path, figure: &Figure) { } } -/// Private -trait Append { - /// Private - fn append_(self, item: T) -> Self; -} - -// NB I wish this was in the standard library -impl Append for Vec { - fn append_(mut self, item: T) -> Vec { - self.push(item); - self - } -} - #[derive(Default)] pub(crate) struct Gnuplot { process_list: Vec, From e5937e42dafe24e0ed64d6fbfdbbccac3504f205 Mon Sep 17 00:00:00 2001 From: Samuel Tardieu Date: Fri, 29 Mar 2024 07:11:47 +0100 Subject: [PATCH 3/7] Do not use deprecated "cargo-clippy" feature check Lints starting with "clippy::" are already recognized as being applicable to Clippy only. --- plot/src/data.rs | 2 +- plot/src/lib.rs | 6 +++--- plot/src/proxy.rs | 8 ++++---- src/analysis/compare.rs | 2 +- src/lib.rs | 7 ++----- src/plot/gnuplot_backend/summary.rs | 2 +- src/report.rs | 2 +- src/stats/bivariate/mod.rs | 2 +- src/stats/bivariate/resamples.rs | 2 +- src/stats/univariate/mod.rs | 2 +- src/stats/univariate/outliers/tukey.rs | 6 +++--- src/stats/univariate/resamples.rs | 2 +- src/stats/univariate/sample.rs | 2 +- 13 files changed, 21 insertions(+), 24 deletions(-) diff --git a/plot/src/data.rs b/plot/src/data.rs index 20ed3d41d..054e1f379 100644 --- a/plot/src/data.rs +++ b/plot/src/data.rs @@ -155,7 +155,7 @@ where { type Scale = (f64, f64, f64, f64, f64); - #[cfg_attr(feature = "cargo-clippy", allow(clippy::many_single_char_names))] + #[allow(clippy::many_single_char_names)] fn append_to(self, buffer: &mut Vec, scale: (f64, f64, f64, f64, f64)) { let (a, b, c, d, e) = self; diff --git a/plot/src/lib.rs b/plot/src/lib.rs index 174765e68..c863e04cf 100644 --- a/plot/src/lib.rs +++ b/plot/src/lib.rs @@ -366,10 +366,10 @@ #![deny(bare_trait_objects)] // This lint has lots of false positives ATM, see // https://github.com/Manishearth/rust-clippy/issues/761 -#![cfg_attr(feature = "cargo-clippy", allow(clippy::new_without_default))] +#![allow(clippy::new_without_default)] // False positives with images -#![cfg_attr(feature = "cargo-clippy", allow(clippy::doc_markdown))] -#![cfg_attr(feature = "cargo-clippy", allow(clippy::many_single_char_names))] +#![allow(clippy::doc_markdown)] +#![allow(clippy::many_single_char_names)] extern crate cast; #[macro_use] diff --git a/plot/src/proxy.rs b/plot/src/proxy.rs index 401b7f923..f74830896 100644 --- a/plot/src/proxy.rs +++ b/plot/src/proxy.rs @@ -7,7 +7,7 @@ use std::borrow::Cow; use std::path::Path; /// Generic constructor for `Font` -#[cfg_attr(feature = "cargo-clippy", allow(clippy::inline_always))] +#[allow(clippy::inline_always)] #[inline(always)] pub fn Font(string: S) -> FontType where @@ -17,7 +17,7 @@ where } /// Generic constructor for `Label` -#[cfg_attr(feature = "cargo-clippy", allow(clippy::inline_always))] +#[allow(clippy::inline_always)] #[inline(always)] pub fn Label(string: S) -> LabelType where @@ -27,7 +27,7 @@ where } /// Generic constructor for `Title` -#[cfg_attr(feature = "cargo-clippy", allow(clippy::inline_always))] +#[allow(clippy::inline_always)] #[inline(always)] pub fn Title(string: S) -> TitleType where @@ -37,7 +37,7 @@ where } /// Generic constructor for `Output` -#[cfg_attr(feature = "cargo-clippy", allow(clippy::inline_always))] +#[allow(clippy::inline_always)] #[inline(always)] pub fn Output

(path: P) -> OutputType where diff --git a/src/analysis/compare.rs b/src/analysis/compare.rs index a49407d85..ab8c9d406 100644 --- a/src/analysis/compare.rs +++ b/src/analysis/compare.rs @@ -12,7 +12,7 @@ use crate::report::BenchmarkId; use crate::{fs, Criterion, SavedSample}; // Common comparison procedure -#[cfg_attr(feature = "cargo-clippy", allow(clippy::type_complexity))] +#[allow(clippy::type_complexity)] pub(crate) fn common( id: &BenchmarkId, avg_times: &Sample, diff --git a/src/lib.rs b/src/lib.rs index 5ebfae1a5..d23107fd5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -18,13 +18,10 @@ #![warn(missing_docs)] #![warn(bare_trait_objects)] #![cfg_attr(feature = "real_blackbox", feature(test))] -#![cfg_attr( - feature = "cargo-clippy", - allow( +#![allow( clippy::just_underscores_and_digits, // Used in the stats code clippy::transmute_ptr_to_ptr, // Used in the stats code clippy::manual_non_exhaustive, // Remove when MSRV bumped above 1.40 - ) )] #[cfg(all(feature = "rayon", target_arch = "wasm32"))] @@ -763,7 +760,7 @@ impl Criterion { /// Configure this criterion struct based on the command-line arguments to /// this process. #[must_use] - #[cfg_attr(feature = "cargo-clippy", allow(clippy::cognitive_complexity))] + #[allow(clippy::cognitive_complexity)] pub fn configure_from_args(mut self) -> Criterion { use clap::{value_parser, Arg, Command}; let matches = Command::new("Criterion Benchmark") diff --git a/src/plot/gnuplot_backend/summary.rs b/src/plot/gnuplot_backend/summary.rs index e5d2ab6be..39193c216 100644 --- a/src/plot/gnuplot_backend/summary.rs +++ b/src/plot/gnuplot_backend/summary.rs @@ -32,7 +32,7 @@ impl AxisScale { } } -#[cfg_attr(feature = "cargo-clippy", allow(clippy::explicit_counter_loop))] +#[allow(clippy::explicit_counter_loop)] pub fn line_comparison( formatter: &dyn ValueFormatter, title: &str, diff --git a/src/report.rs b/src/report.rs index c5448fdbb..0e14696be 100644 --- a/src/report.rs +++ b/src/report.rs @@ -400,7 +400,7 @@ impl CliReport { } // Passing a String is the common case here. - #[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] + #[allow(clippy::needless_pass_by_value)] fn print_overwritable(&self, s: String) { if self.enable_text_overwrite { eprint!("{}", s); diff --git a/src/stats/bivariate/mod.rs b/src/stats/bivariate/mod.rs index 2351c9ef6..7e0f8a9c6 100644 --- a/src/stats/bivariate/mod.rs +++ b/src/stats/bivariate/mod.rs @@ -21,7 +21,7 @@ pub struct Data<'a, X, Y>(&'a [X], &'a [Y]); impl<'a, X, Y> Copy for Data<'a, X, Y> {} -#[cfg_attr(feature = "cargo-clippy", allow(clippy::expl_impl_clone_on_copy))] +#[allow(clippy::expl_impl_clone_on_copy)] impl<'a, X, Y> Clone for Data<'a, X, Y> { fn clone(&self) -> Data<'a, X, Y> { *self diff --git a/src/stats/bivariate/resamples.rs b/src/stats/bivariate/resamples.rs index e254dc792..8672be074 100644 --- a/src/stats/bivariate/resamples.rs +++ b/src/stats/bivariate/resamples.rs @@ -12,7 +12,7 @@ where stage: Option<(Vec, Vec)>, } -#[cfg_attr(feature = "cargo-clippy", allow(clippy::should_implement_trait))] +#[allow(clippy::should_implement_trait)] impl<'a, X, Y> Resamples<'a, X, Y> where X: 'a + Float, diff --git a/src/stats/univariate/mod.rs b/src/stats/univariate/mod.rs index 5b221272d..5ded0afe7 100644 --- a/src/stats/univariate/mod.rs +++ b/src/stats/univariate/mod.rs @@ -25,7 +25,7 @@ pub use self::sample::Sample; /// - Multithreaded /// - Time: `O(nresamples)` /// - Memory: `O(nresamples)` -#[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_lossless))] +#[allow(clippy::cast_lossless)] pub fn bootstrap( a: &Sample, b: &Sample, diff --git a/src/stats/univariate/outliers/tukey.rs b/src/stats/univariate/outliers/tukey.rs index 9e8ad0eca..0f13b41bf 100644 --- a/src/stats/univariate/outliers/tukey.rs +++ b/src/stats/univariate/outliers/tukey.rs @@ -69,7 +69,7 @@ where /// Returns the number of data points per label /// /// - Time: `O(length)` - #[cfg_attr(feature = "cargo-clippy", allow(clippy::similar_names))] + #[allow(clippy::similar_names)] pub fn count(&self) -> (usize, usize, usize, usize, usize) { let (mut los, mut lom, mut noa, mut him, mut his) = (0, 0, 0, 0, 0); @@ -128,7 +128,7 @@ where { type Output = Label; - #[cfg_attr(feature = "cargo-clippy", allow(clippy::similar_names))] + #[allow(clippy::similar_names)] fn index(&self, i: usize) -> &Label { static LOW_SEVERE: Label = LowSevere; static LOW_MILD: Label = LowMild; @@ -180,7 +180,7 @@ where { type Item = (A, Label); - #[cfg_attr(feature = "cargo-clippy", allow(clippy::similar_names))] + #[allow(clippy::similar_names)] fn next(&mut self) -> Option<(A, Label)> { self.iter.next().map(|&x| { let (lost, lomt, himt, hist) = self.fences; diff --git a/src/stats/univariate/resamples.rs b/src/stats/univariate/resamples.rs index 923669d59..b42c83cc3 100644 --- a/src/stats/univariate/resamples.rs +++ b/src/stats/univariate/resamples.rs @@ -13,7 +13,7 @@ where stage: Option>, } -#[cfg_attr(feature = "cargo-clippy", allow(clippy::should_implement_trait))] +#[allow(clippy::should_implement_trait)] impl<'a, A> Resamples<'a, A> where A: 'a + Float, diff --git a/src/stats/univariate/sample.rs b/src/stats/univariate/sample.rs index 6fbb4fb2d..f952cec98 100644 --- a/src/stats/univariate/sample.rs +++ b/src/stats/univariate/sample.rs @@ -26,7 +26,7 @@ where /// # Panics /// /// Panics if `slice` contains any `NaN` or if `slice` has less than two elements - #[cfg_attr(feature = "cargo-clippy", allow(clippy::new_ret_no_self))] + #[allow(clippy::new_ret_no_self)] pub fn new(slice: &[A]) -> &Sample { assert!(slice.len() > 1 && slice.iter().all(|x| !x.is_nan())); From 88b517ad18e1ac6c1c6938de276b312e99cc2ff0 Mon Sep 17 00:00:00 2001 From: Samuel Tardieu Date: Fri, 29 Mar 2024 07:15:16 +0100 Subject: [PATCH 4/7] Replace objects instead of recreating them `.clone_into()` avoids a whole object deallocation and reallocation. Flagged by recent Clippy. --- src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index d23107fd5..e3f70f492 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -725,7 +725,7 @@ impl Criterion { #[must_use] #[doc(hidden)] pub fn output_directory(mut self, path: &Path) -> Criterion { - self.output_directory = path.to_owned(); + path.clone_into(&mut self.output_directory); self } @@ -1031,18 +1031,18 @@ https://bheisler.github.io/criterion.rs/book/faq.html if let Some(dir) = matches.get_one::("save-baseline") { self.baseline = Baseline::Save; - self.baseline_directory = dir.to_owned() + dir.clone_into(&mut self.baseline_directory) } if matches.get_flag("discard-baseline") { self.baseline = Baseline::Discard; } if let Some(dir) = matches.get_one::("baseline") { self.baseline = Baseline::CompareStrict; - self.baseline_directory = dir.to_owned(); + dir.clone_into(&mut self.baseline_directory); } if let Some(dir) = matches.get_one::("baseline-lenient") { self.baseline = Baseline::CompareLenient; - self.baseline_directory = dir.to_owned(); + dir.clone_into(&mut self.baseline_directory); } if self.connection.is_some() { From 4d50bf1dd9144510cef51bc2f6fa0cc1195f1251 Mon Sep 17 00:00:00 2001 From: Samuel Tardieu Date: Fri, 29 Mar 2024 07:17:06 +0100 Subject: [PATCH 5/7] Define generic parameter bound in one place only Flagged by recent Clippy. --- src/fs.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/fs.rs b/src/fs.rs index f47508be7..dcf263583 100644 --- a/src/fs.rs +++ b/src/fs.rs @@ -9,10 +9,10 @@ use walkdir::{DirEntry, WalkDir}; use crate::error::{Error, Result}; use crate::report::BenchmarkId; -pub fn load(path: &P) -> Result +pub fn load(path: &P) -> Result where A: DeserializeOwned, - P: AsRef, + P: AsRef + ?Sized, { let path = path.as_ref(); let mut f = File::open(path).map_err(|inner| Error::AccessError { From a0cc158827e0606551dac9975a2989e6061b318e Mon Sep 17 00:00:00 2001 From: Samuel Tardieu Date: Fri, 29 Mar 2024 07:20:37 +0100 Subject: [PATCH 6/7] Use Unix end-of-line convention for Rust source files --- bencher_compat/benches/bencher_example.rs | 42 +- benches/benchmarks/sampling_mode.rs | 52 +- macro/benches/test_macro_bench.rs | 52 +- macro/src/lib.rs | 110 +- src/async_executor.rs | 132 +- src/bencher.rs | 1526 ++++++++++----------- src/connection.rs | 770 +++++------ 7 files changed, 1342 insertions(+), 1342 deletions(-) diff --git a/bencher_compat/benches/bencher_example.rs b/bencher_compat/benches/bencher_example.rs index c45d246eb..5339836bd 100644 --- a/bencher_compat/benches/bencher_example.rs +++ b/bencher_compat/benches/bencher_example.rs @@ -1,22 +1,22 @@ -#[macro_use] -extern crate criterion_bencher_compat; - -use criterion_bencher_compat::Bencher; - -fn a(bench: &mut Bencher) { - bench.iter(|| { - (0..1000).fold(0, |x, y| x + y) - }) -} - -fn b(bench: &mut Bencher) { - const N: usize = 1024; - bench.iter(|| { - vec![0u8; N] - }); - - bench.bytes = N as u64; -} - -benchmark_group!(benches, a, b); +#[macro_use] +extern crate criterion_bencher_compat; + +use criterion_bencher_compat::Bencher; + +fn a(bench: &mut Bencher) { + bench.iter(|| { + (0..1000).fold(0, |x, y| x + y) + }) +} + +fn b(bench: &mut Bencher) { + const N: usize = 1024; + bench.iter(|| { + vec![0u8; N] + }); + + bench.bytes = N as u64; +} + +benchmark_group!(benches, a, b); benchmark_main!(benches); \ No newline at end of file diff --git a/benches/benchmarks/sampling_mode.rs b/benches/benchmarks/sampling_mode.rs index af761273a..c7ac7bfa8 100644 --- a/benches/benchmarks/sampling_mode.rs +++ b/benches/benchmarks/sampling_mode.rs @@ -1,26 +1,26 @@ -use criterion::{criterion_group, Criterion, SamplingMode}; -use std::thread::sleep; -use std::time::Duration; - -fn sampling_mode_tests(c: &mut Criterion) { - let mut group = c.benchmark_group("sampling_mode"); - - group.sampling_mode(SamplingMode::Auto); - group.bench_function("Auto", |bencher| { - bencher.iter(|| sleep(Duration::from_millis(0))) - }); - - group.sampling_mode(SamplingMode::Linear); - group.bench_function("Linear", |bencher| { - bencher.iter(|| sleep(Duration::from_millis(0))) - }); - - group.sampling_mode(SamplingMode::Flat); - group.bench_function("Flat", |bencher| { - bencher.iter(|| sleep(Duration::from_millis(10))) - }); - - group.finish(); -} - -criterion_group!(benches, sampling_mode_tests,); +use criterion::{criterion_group, Criterion, SamplingMode}; +use std::thread::sleep; +use std::time::Duration; + +fn sampling_mode_tests(c: &mut Criterion) { + let mut group = c.benchmark_group("sampling_mode"); + + group.sampling_mode(SamplingMode::Auto); + group.bench_function("Auto", |bencher| { + bencher.iter(|| sleep(Duration::from_millis(0))) + }); + + group.sampling_mode(SamplingMode::Linear); + group.bench_function("Linear", |bencher| { + bencher.iter(|| sleep(Duration::from_millis(0))) + }); + + group.sampling_mode(SamplingMode::Flat); + group.bench_function("Flat", |bencher| { + bencher.iter(|| sleep(Duration::from_millis(10))) + }); + + group.finish(); +} + +criterion_group!(benches, sampling_mode_tests,); diff --git a/macro/benches/test_macro_bench.rs b/macro/benches/test_macro_bench.rs index 40a3ab198..7369fd27b 100644 --- a/macro/benches/test_macro_bench.rs +++ b/macro/benches/test_macro_bench.rs @@ -1,27 +1,27 @@ -#![feature(custom_test_frameworks)] -#![test_runner(criterion::runner)] - -use criterion::{Criterion, black_box}; -use criterion_macro::criterion; - -fn fibonacci(n: u64) -> u64 { - match n { - 0 | 1 => 1, - n => fibonacci(n - 1) + fibonacci(n - 2), - } -} - -fn custom_criterion() -> Criterion { - Criterion::default() - .sample_size(50) -} - -#[criterion] -fn bench_simple(c: &mut Criterion) { - c.bench_function("Fibonacci-Simple", |b| b.iter(|| fibonacci(black_box(10)))); -} - -#[criterion(custom_criterion())] -fn bench_custom(c: &mut Criterion) { - c.bench_function("Fibonacci-Custom", |b| b.iter(|| fibonacci(black_box(20)))); +#![feature(custom_test_frameworks)] +#![test_runner(criterion::runner)] + +use criterion::{Criterion, black_box}; +use criterion_macro::criterion; + +fn fibonacci(n: u64) -> u64 { + match n { + 0 | 1 => 1, + n => fibonacci(n - 1) + fibonacci(n - 2), + } +} + +fn custom_criterion() -> Criterion { + Criterion::default() + .sample_size(50) +} + +#[criterion] +fn bench_simple(c: &mut Criterion) { + c.bench_function("Fibonacci-Simple", |b| b.iter(|| fibonacci(black_box(10)))); +} + +#[criterion(custom_criterion())] +fn bench_custom(c: &mut Criterion) { + c.bench_function("Fibonacci-Custom", |b| b.iter(|| fibonacci(black_box(20)))); } \ No newline at end of file diff --git a/macro/src/lib.rs b/macro/src/lib.rs index 6297a172e..360919362 100644 --- a/macro/src/lib.rs +++ b/macro/src/lib.rs @@ -1,56 +1,56 @@ -extern crate proc_macro; -use proc_macro::TokenStream; -use proc_macro2::{Ident, TokenTree}; -use quote::quote_spanned; - -#[proc_macro_attribute] -pub fn criterion(attr: TokenStream, item: TokenStream) -> TokenStream { - let attr = proc_macro2::TokenStream::from(attr); - let item = proc_macro2::TokenStream::from(item); - - let span = proc_macro2::Span::call_site(); - - let init = if stream_length(attr.clone()) != 0 { - attr - } - else { - quote_spanned!(span=> criterion::Criterion::default()) - }; - - let function_name = find_name(item.clone()); - let wrapped_name = Ident::new(&format!("criterion_wrapped_{}", function_name.to_string()), span); - - let output = quote_spanned!(span=> - #[test_case] - pub fn #wrapped_name() { - #item - - let mut c = #init.configure_from_args(); - #function_name(&mut c); - } - ); - - output.into() -} - -fn stream_length(stream: proc_macro2::TokenStream) -> usize { - stream.into_iter().count() -} - -fn find_name(stream: proc_macro2::TokenStream) -> Ident { - let mut iter = stream.into_iter(); - while let Some(tok) = iter.next() { - if let TokenTree::Ident(ident) = tok { - if ident == "fn" { - break; - } - } - } - - if let Some(TokenTree::Ident(name)) = iter.next() { - name - } - else { - panic!("Unable to find function name") - } +extern crate proc_macro; +use proc_macro::TokenStream; +use proc_macro2::{Ident, TokenTree}; +use quote::quote_spanned; + +#[proc_macro_attribute] +pub fn criterion(attr: TokenStream, item: TokenStream) -> TokenStream { + let attr = proc_macro2::TokenStream::from(attr); + let item = proc_macro2::TokenStream::from(item); + + let span = proc_macro2::Span::call_site(); + + let init = if stream_length(attr.clone()) != 0 { + attr + } + else { + quote_spanned!(span=> criterion::Criterion::default()) + }; + + let function_name = find_name(item.clone()); + let wrapped_name = Ident::new(&format!("criterion_wrapped_{}", function_name.to_string()), span); + + let output = quote_spanned!(span=> + #[test_case] + pub fn #wrapped_name() { + #item + + let mut c = #init.configure_from_args(); + #function_name(&mut c); + } + ); + + output.into() +} + +fn stream_length(stream: proc_macro2::TokenStream) -> usize { + stream.into_iter().count() +} + +fn find_name(stream: proc_macro2::TokenStream) -> Ident { + let mut iter = stream.into_iter(); + while let Some(tok) = iter.next() { + if let TokenTree::Ident(ident) = tok { + if ident == "fn" { + break; + } + } + } + + if let Some(TokenTree::Ident(name)) = iter.next() { + name + } + else { + panic!("Unable to find function name") + } } \ No newline at end of file diff --git a/src/async_executor.rs b/src/async_executor.rs index d51626448..58877d54b 100644 --- a/src/async_executor.rs +++ b/src/async_executor.rs @@ -1,66 +1,66 @@ -//! This module defines a trait that can be used to plug in different Futures executors into -//! Criterion.rs' async benchmarking support. -//! -//! Implementations are provided for: -//! * Tokio (implemented directly for `tokio::Runtime`) -//! * Async-std -//! * Smol -//! * The Futures crate -//! -//! Please note that async benchmarks will have a small amount of measurement overhead relative -//! to synchronous benchmarks. It is recommended to use synchronous benchmarks where possible, to -//! improve measurement accuracy. - -use std::future::Future; - -/// Plugin trait used to allow benchmarking on multiple different async runtimes. -/// -/// Smol, Tokio and Async-std are supported out of the box, as is the current-thread runner from the -/// Futures crate; it is recommended to use whichever runtime you use in production. -pub trait AsyncExecutor { - /// Spawn the given future onto this runtime and block until it's complete, returning the result. - fn block_on(&self, future: impl Future) -> T; -} - -/// Runs futures on the 'futures' crate's built-in current-thread executor -#[cfg(feature = "async_futures")] -pub struct FuturesExecutor; -#[cfg(feature = "async_futures")] -impl AsyncExecutor for FuturesExecutor { - fn block_on(&self, future: impl Future) -> T { - futures::executor::block_on(future) - } -} - -/// Runs futures on the 'smol' crate's global executor -#[cfg(feature = "async_smol")] -pub struct SmolExecutor; -#[cfg(feature = "async_smol")] -impl AsyncExecutor for SmolExecutor { - fn block_on(&self, future: impl Future) -> T { - smol::block_on(future) - } -} - -#[cfg(feature = "async_tokio")] -impl AsyncExecutor for tokio::runtime::Runtime { - fn block_on(&self, future: impl Future) -> T { - self.block_on(future) - } -} -#[cfg(feature = "async_tokio")] -impl AsyncExecutor for &tokio::runtime::Runtime { - fn block_on(&self, future: impl Future) -> T { - (*self).block_on(future) - } -} - -/// Runs futures on the 'async-std' crate's global executor -#[cfg(feature = "async_std")] -pub struct AsyncStdExecutor; -#[cfg(feature = "async_std")] -impl AsyncExecutor for AsyncStdExecutor { - fn block_on(&self, future: impl Future) -> T { - async_std::task::block_on(future) - } -} +//! This module defines a trait that can be used to plug in different Futures executors into +//! Criterion.rs' async benchmarking support. +//! +//! Implementations are provided for: +//! * Tokio (implemented directly for `tokio::Runtime`) +//! * Async-std +//! * Smol +//! * The Futures crate +//! +//! Please note that async benchmarks will have a small amount of measurement overhead relative +//! to synchronous benchmarks. It is recommended to use synchronous benchmarks where possible, to +//! improve measurement accuracy. + +use std::future::Future; + +/// Plugin trait used to allow benchmarking on multiple different async runtimes. +/// +/// Smol, Tokio and Async-std are supported out of the box, as is the current-thread runner from the +/// Futures crate; it is recommended to use whichever runtime you use in production. +pub trait AsyncExecutor { + /// Spawn the given future onto this runtime and block until it's complete, returning the result. + fn block_on(&self, future: impl Future) -> T; +} + +/// Runs futures on the 'futures' crate's built-in current-thread executor +#[cfg(feature = "async_futures")] +pub struct FuturesExecutor; +#[cfg(feature = "async_futures")] +impl AsyncExecutor for FuturesExecutor { + fn block_on(&self, future: impl Future) -> T { + futures::executor::block_on(future) + } +} + +/// Runs futures on the 'smol' crate's global executor +#[cfg(feature = "async_smol")] +pub struct SmolExecutor; +#[cfg(feature = "async_smol")] +impl AsyncExecutor for SmolExecutor { + fn block_on(&self, future: impl Future) -> T { + smol::block_on(future) + } +} + +#[cfg(feature = "async_tokio")] +impl AsyncExecutor for tokio::runtime::Runtime { + fn block_on(&self, future: impl Future) -> T { + self.block_on(future) + } +} +#[cfg(feature = "async_tokio")] +impl AsyncExecutor for &tokio::runtime::Runtime { + fn block_on(&self, future: impl Future) -> T { + (*self).block_on(future) + } +} + +/// Runs futures on the 'async-std' crate's global executor +#[cfg(feature = "async_std")] +pub struct AsyncStdExecutor; +#[cfg(feature = "async_std")] +impl AsyncExecutor for AsyncStdExecutor { + fn block_on(&self, future: impl Future) -> T { + async_std::task::block_on(future) + } +} diff --git a/src/bencher.rs b/src/bencher.rs index b2bd971a8..a508fd3e4 100644 --- a/src/bencher.rs +++ b/src/bencher.rs @@ -1,763 +1,763 @@ -use std::time::Duration; -use std::time::Instant; - -use crate::black_box; -use crate::measurement::{Measurement, WallTime}; -use crate::BatchSize; - -#[cfg(feature = "async")] -use std::future::Future; - -#[cfg(feature = "async")] -use crate::async_executor::AsyncExecutor; - -// ================================== MAINTENANCE NOTE ============================================= -// Any changes made to either Bencher or AsyncBencher will have to be replicated to the other! -// ================================== MAINTENANCE NOTE ============================================= - -/// Timer struct used to iterate a benchmarked function and measure the runtime. -/// -/// This struct provides different timing loops as methods. Each timing loop provides a different -/// way to time a routine and each has advantages and disadvantages. -/// -/// * If you want to do the iteration and measurement yourself (eg. passing the iteration count -/// to a separate process), use `iter_custom`. -/// * If your routine requires no per-iteration setup and returns a value with an expensive `drop` -/// method, use `iter_with_large_drop`. -/// * If your routine requires some per-iteration setup that shouldn't be timed, use `iter_batched` -/// or `iter_batched_ref`. See [`BatchSize`](enum.BatchSize.html) for a discussion of batch sizes. -/// If the setup value implements `Drop` and you don't want to include the `drop` time in the -/// measurement, use `iter_batched_ref`, otherwise use `iter_batched`. These methods are also -/// suitable for benchmarking routines which return a value with an expensive `drop` method, -/// but are more complex than `iter_with_large_drop`. -/// * Otherwise, use `iter`. -pub struct Bencher<'a, M: Measurement = WallTime> { - pub(crate) iterated: bool, // Have we iterated this benchmark? - pub(crate) iters: u64, // Number of times to iterate this benchmark - pub(crate) value: M::Value, // The measured value - pub(crate) measurement: &'a M, // Reference to the measurement object - pub(crate) elapsed_time: Duration, // How much time did it take to perform the iteration? Used for the warmup period. -} -impl<'a, M: Measurement> Bencher<'a, M> { - /// Times a `routine` by executing it many times and timing the total elapsed time. - /// - /// Prefer this timing loop when `routine` returns a value that doesn't have a destructor. - /// - /// # Timing model - /// - /// Note that the `Bencher` also times the time required to destroy the output of `routine()`. - /// Therefore prefer this timing loop when the runtime of `mem::drop(O)` is negligible compared - /// to the runtime of the `routine`. - /// - /// ```text - /// elapsed = Instant::now + iters * (routine + mem::drop(O) + Range::next) - /// ``` - /// - /// # Example - /// - /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; - /// - /// // The function to benchmark - /// fn foo() { - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// c.bench_function("iter", move |b| { - /// b.iter(|| foo()) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - #[inline(never)] - pub fn iter(&mut self, mut routine: R) - where - R: FnMut() -> O, - { - self.iterated = true; - let time_start = Instant::now(); - let start = self.measurement.start(); - for _ in 0..self.iters { - black_box(routine()); - } - self.value = self.measurement.end(start); - self.elapsed_time = time_start.elapsed(); - } - - /// Times a `routine` by executing it many times and relying on `routine` to measure its own execution time. - /// - /// Prefer this timing loop in cases where `routine` has to do its own measurements to - /// get accurate timing information (for example in multi-threaded scenarios where you spawn - /// and coordinate with multiple threads). - /// - /// # Timing model - /// Custom, the timing model is whatever is returned as the Duration from `routine`. - /// - /// # Example - /// ```rust - /// #[macro_use] extern crate criterion; - /// use criterion::*; - /// use criterion::black_box; - /// use std::time::Instant; - /// - /// fn foo() { - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// c.bench_function("iter", move |b| { - /// b.iter_custom(|iters| { - /// let start = Instant::now(); - /// for _i in 0..iters { - /// black_box(foo()); - /// } - /// start.elapsed() - /// }) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - #[inline(never)] - pub fn iter_custom(&mut self, mut routine: R) - where - R: FnMut(u64) -> M::Value, - { - self.iterated = true; - let time_start = Instant::now(); - self.value = routine(self.iters); - self.elapsed_time = time_start.elapsed(); - } - - #[doc(hidden)] - pub fn iter_with_setup(&mut self, setup: S, routine: R) - where - S: FnMut() -> I, - R: FnMut(I) -> O, - { - self.iter_batched(setup, routine, BatchSize::PerIteration); - } - - /// Times a `routine` by collecting its output on each iteration. This avoids timing the - /// destructor of the value returned by `routine`. - /// - /// WARNING: This requires `O(iters * mem::size_of::())` of memory, and `iters` is not under the - /// control of the caller. If this causes out-of-memory errors, use `iter_batched` instead. - /// - /// # Timing model - /// - /// ``` text - /// elapsed = Instant::now + iters * (routine) + Iterator::collect::> - /// ``` - /// - /// # Example - /// - /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; - /// - /// fn create_vector() -> Vec { - /// # vec![] - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// c.bench_function("with_drop", move |b| { - /// // This will avoid timing the Vec::drop. - /// b.iter_with_large_drop(|| create_vector()) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - pub fn iter_with_large_drop(&mut self, mut routine: R) - where - R: FnMut() -> O, - { - self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput); - } - - /// Times a `routine` that requires some input by generating a batch of input, then timing the - /// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for - /// details on choosing the batch size. Use this when the routine must consume its input. - /// - /// For example, use this loop to benchmark sorting algorithms, because they require unsorted - /// data on each iteration. - /// - /// # Timing model - /// - /// ```text - /// elapsed = (Instant::now * num_batches) + (iters * (routine + O::drop)) + Vec::extend - /// ``` - /// - /// # Example - /// - /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; - /// - /// fn create_scrambled_data() -> Vec { - /// # vec![] - /// // ... - /// } - /// - /// // The sorting algorithm to test - /// fn sort(data: &mut [u64]) { - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// let data = create_scrambled_data(); - /// - /// c.bench_function("with_setup", move |b| { - /// // This will avoid timing the clone call. - /// b.iter_batched(|| data.clone(), |mut data| sort(&mut data), BatchSize::SmallInput) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - #[inline(never)] - pub fn iter_batched(&mut self, mut setup: S, mut routine: R, size: BatchSize) - where - S: FnMut() -> I, - R: FnMut(I) -> O, - { - self.iterated = true; - let batch_size = size.iters_per_batch(self.iters); - assert!(batch_size != 0, "Batch size must not be zero."); - let time_start = Instant::now(); - self.value = self.measurement.zero(); - - if batch_size == 1 { - for _ in 0..self.iters { - let input = black_box(setup()); - - let start = self.measurement.start(); - let output = routine(input); - let end = self.measurement.end(start); - self.value = self.measurement.add(&self.value, &end); - - drop(black_box(output)); - } - } else { - let mut iteration_counter = 0; - - while iteration_counter < self.iters { - let batch_size = ::std::cmp::min(batch_size, self.iters - iteration_counter); - - let inputs = black_box((0..batch_size).map(|_| setup()).collect::>()); - let mut outputs = Vec::with_capacity(batch_size as usize); - - let start = self.measurement.start(); - outputs.extend(inputs.into_iter().map(&mut routine)); - let end = self.measurement.end(start); - self.value = self.measurement.add(&self.value, &end); - - black_box(outputs); - - iteration_counter += batch_size; - } - } - - self.elapsed_time = time_start.elapsed(); - } - - /// Times a `routine` that requires some input by generating a batch of input, then timing the - /// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for - /// details on choosing the batch size. Use this when the routine should accept the input by - /// mutable reference. - /// - /// For example, use this loop to benchmark sorting algorithms, because they require unsorted - /// data on each iteration. - /// - /// # Timing model - /// - /// ```text - /// elapsed = (Instant::now * num_batches) + (iters * routine) + Vec::extend - /// ``` - /// - /// # Example - /// - /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; - /// - /// fn create_scrambled_data() -> Vec { - /// # vec![] - /// // ... - /// } - /// - /// // The sorting algorithm to test - /// fn sort(data: &mut [u64]) { - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// let data = create_scrambled_data(); - /// - /// c.bench_function("with_setup", move |b| { - /// // This will avoid timing the clone call. - /// b.iter_batched(|| data.clone(), |mut data| sort(&mut data), BatchSize::SmallInput) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - #[inline(never)] - pub fn iter_batched_ref(&mut self, mut setup: S, mut routine: R, size: BatchSize) - where - S: FnMut() -> I, - R: FnMut(&mut I) -> O, - { - self.iterated = true; - let batch_size = size.iters_per_batch(self.iters); - assert!(batch_size != 0, "Batch size must not be zero."); - let time_start = Instant::now(); - self.value = self.measurement.zero(); - - if batch_size == 1 { - for _ in 0..self.iters { - let mut input = black_box(setup()); - - let start = self.measurement.start(); - let output = routine(&mut input); - let end = self.measurement.end(start); - self.value = self.measurement.add(&self.value, &end); - - drop(black_box(output)); - drop(black_box(input)); - } - } else { - let mut iteration_counter = 0; - - while iteration_counter < self.iters { - let batch_size = ::std::cmp::min(batch_size, self.iters - iteration_counter); - - let mut inputs = black_box((0..batch_size).map(|_| setup()).collect::>()); - let mut outputs = Vec::with_capacity(batch_size as usize); - - let start = self.measurement.start(); - outputs.extend(inputs.iter_mut().map(&mut routine)); - let end = self.measurement.end(start); - self.value = self.measurement.add(&self.value, &end); - - black_box(outputs); - - iteration_counter += batch_size; - } - } - self.elapsed_time = time_start.elapsed(); - } - - // Benchmarks must actually call one of the iter methods. This causes benchmarks to fail loudly - // if they don't. - pub(crate) fn assert_iterated(&mut self) { - assert!( - self.iterated, - "Benchmark function must call Bencher::iter or related method." - ); - self.iterated = false; - } - - /// Convert this bencher into an AsyncBencher, which enables async/await support. - #[cfg(feature = "async")] - pub fn to_async<'b, A: AsyncExecutor>(&'b mut self, runner: A) -> AsyncBencher<'a, 'b, A, M> { - AsyncBencher { b: self, runner } - } -} - -/// Async/await variant of the Bencher struct. -#[cfg(feature = "async")] -pub struct AsyncBencher<'a, 'b, A: AsyncExecutor, M: Measurement = WallTime> { - b: &'b mut Bencher<'a, M>, - runner: A, -} -#[cfg(feature = "async")] -impl<'a, 'b, A: AsyncExecutor, M: Measurement> AsyncBencher<'a, 'b, A, M> { - /// Times a `routine` by executing it many times and timing the total elapsed time. - /// - /// Prefer this timing loop when `routine` returns a value that doesn't have a destructor. - /// - /// # Timing model - /// - /// Note that the `AsyncBencher` also times the time required to destroy the output of `routine()`. - /// Therefore prefer this timing loop when the runtime of `mem::drop(O)` is negligible compared - /// to the runtime of the `routine`. - /// - /// ```text - /// elapsed = Instant::now + iters * (routine + mem::drop(O) + Range::next) - /// ``` - /// - /// # Example - /// - /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; - /// use criterion::async_executor::FuturesExecutor; - /// - /// // The function to benchmark - /// async fn foo() { - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// c.bench_function("iter", move |b| { - /// b.to_async(FuturesExecutor).iter(|| async { foo().await } ) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - #[inline(never)] - pub fn iter(&mut self, mut routine: R) - where - R: FnMut() -> F, - F: Future, - { - let AsyncBencher { b, runner } = self; - runner.block_on(async { - b.iterated = true; - let time_start = Instant::now(); - let start = b.measurement.start(); - for _ in 0..b.iters { - black_box(routine().await); - } - b.value = b.measurement.end(start); - b.elapsed_time = time_start.elapsed(); - }); - } - - /// Times a `routine` by executing it many times and relying on `routine` to measure its own execution time. - /// - /// Prefer this timing loop in cases where `routine` has to do its own measurements to - /// get accurate timing information (for example in multi-threaded scenarios where you spawn - /// and coordinate with multiple threads). - /// - /// # Timing model - /// Custom, the timing model is whatever is returned as the Duration from `routine`. - /// - /// # Example - /// ```rust - /// #[macro_use] extern crate criterion; - /// use criterion::*; - /// use criterion::black_box; - /// use criterion::async_executor::FuturesExecutor; - /// use std::time::Instant; - /// - /// async fn foo() { - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// c.bench_function("iter", move |b| { - /// b.to_async(FuturesExecutor).iter_custom(|iters| { - /// async move { - /// let start = Instant::now(); - /// for _i in 0..iters { - /// black_box(foo().await); - /// } - /// start.elapsed() - /// } - /// }) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - #[inline(never)] - pub fn iter_custom(&mut self, mut routine: R) - where - R: FnMut(u64) -> F, - F: Future, - { - let AsyncBencher { b, runner } = self; - runner.block_on(async { - b.iterated = true; - let time_start = Instant::now(); - b.value = routine(b.iters).await; - b.elapsed_time = time_start.elapsed(); - }) - } - - #[doc(hidden)] - pub fn iter_with_setup(&mut self, setup: S, routine: R) - where - S: FnMut() -> I, - R: FnMut(I) -> F, - F: Future, - { - self.iter_batched(setup, routine, BatchSize::PerIteration); - } - - /// Times a `routine` by collecting its output on each iteration. This avoids timing the - /// destructor of the value returned by `routine`. - /// - /// WARNING: This requires `O(iters * mem::size_of::())` of memory, and `iters` is not under the - /// control of the caller. If this causes out-of-memory errors, use `iter_batched` instead. - /// - /// # Timing model - /// - /// ``` text - /// elapsed = Instant::now + iters * (routine) + Iterator::collect::> - /// ``` - /// - /// # Example - /// - /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; - /// use criterion::async_executor::FuturesExecutor; - /// - /// async fn create_vector() -> Vec { - /// # vec![] - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// c.bench_function("with_drop", move |b| { - /// // This will avoid timing the Vec::drop. - /// b.to_async(FuturesExecutor).iter_with_large_drop(|| async { create_vector().await }) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - pub fn iter_with_large_drop(&mut self, mut routine: R) - where - R: FnMut() -> F, - F: Future, - { - self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput); - } - - #[doc(hidden)] - pub fn iter_with_large_setup(&mut self, setup: S, routine: R) - where - S: FnMut() -> I, - R: FnMut(I) -> F, - F: Future, - { - self.iter_batched(setup, routine, BatchSize::NumBatches(1)); - } - - /// Times a `routine` that requires some input by generating a batch of input, then timing the - /// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for - /// details on choosing the batch size. Use this when the routine must consume its input. - /// - /// For example, use this loop to benchmark sorting algorithms, because they require unsorted - /// data on each iteration. - /// - /// # Timing model - /// - /// ```text - /// elapsed = (Instant::now * num_batches) + (iters * (routine + O::drop)) + Vec::extend - /// ``` - /// - /// # Example - /// - /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; - /// use criterion::async_executor::FuturesExecutor; - /// - /// fn create_scrambled_data() -> Vec { - /// # vec![] - /// // ... - /// } - /// - /// // The sorting algorithm to test - /// async fn sort(data: &mut [u64]) { - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// let data = create_scrambled_data(); - /// - /// c.bench_function("with_setup", move |b| { - /// // This will avoid timing the clone call. - /// b.iter_batched(|| data.clone(), |mut data| async move { sort(&mut data).await }, BatchSize::SmallInput) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - #[inline(never)] - pub fn iter_batched(&mut self, mut setup: S, mut routine: R, size: BatchSize) - where - S: FnMut() -> I, - R: FnMut(I) -> F, - F: Future, - { - let AsyncBencher { b, runner } = self; - runner.block_on(async { - b.iterated = true; - let batch_size = size.iters_per_batch(b.iters); - assert!(batch_size != 0, "Batch size must not be zero."); - let time_start = Instant::now(); - b.value = b.measurement.zero(); - - if batch_size == 1 { - for _ in 0..b.iters { - let input = black_box(setup()); - - let start = b.measurement.start(); - let output = routine(input).await; - let end = b.measurement.end(start); - b.value = b.measurement.add(&b.value, &end); - - drop(black_box(output)); - } - } else { - let mut iteration_counter = 0; - - while iteration_counter < b.iters { - let batch_size = ::std::cmp::min(batch_size, b.iters - iteration_counter); - - let inputs = black_box((0..batch_size).map(|_| setup()).collect::>()); - let mut outputs = Vec::with_capacity(batch_size as usize); - - let start = b.measurement.start(); - // Can't use .extend here like the sync version does - for input in inputs { - outputs.push(routine(input).await); - } - let end = b.measurement.end(start); - b.value = b.measurement.add(&b.value, &end); - - black_box(outputs); - - iteration_counter += batch_size; - } - } - - b.elapsed_time = time_start.elapsed(); - }) - } - - /// Times a `routine` that requires some input by generating a batch of input, then timing the - /// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for - /// details on choosing the batch size. Use this when the routine should accept the input by - /// mutable reference. - /// - /// For example, use this loop to benchmark sorting algorithms, because they require unsorted - /// data on each iteration. - /// - /// # Timing model - /// - /// ```text - /// elapsed = (Instant::now * num_batches) + (iters * routine) + Vec::extend - /// ``` - /// - /// # Example - /// - /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; - /// use criterion::async_executor::FuturesExecutor; - /// - /// fn create_scrambled_data() -> Vec { - /// # vec![] - /// // ... - /// } - /// - /// // The sorting algorithm to test - /// async fn sort(data: &mut [u64]) { - /// // ... - /// } - /// - /// fn bench(c: &mut Criterion) { - /// let data = create_scrambled_data(); - /// - /// c.bench_function("with_setup", move |b| { - /// // This will avoid timing the clone call. - /// b.iter_batched(|| data.clone(), |mut data| async move { sort(&mut data).await }, BatchSize::SmallInput) - /// }); - /// } - /// - /// criterion_group!(benches, bench); - /// criterion_main!(benches); - /// ``` - /// - #[inline(never)] - pub fn iter_batched_ref(&mut self, mut setup: S, mut routine: R, size: BatchSize) - where - S: FnMut() -> I, - R: FnMut(&mut I) -> F, - F: Future, - { - let AsyncBencher { b, runner } = self; - runner.block_on(async { - b.iterated = true; - let batch_size = size.iters_per_batch(b.iters); - assert!(batch_size != 0, "Batch size must not be zero."); - let time_start = Instant::now(); - b.value = b.measurement.zero(); - - if batch_size == 1 { - for _ in 0..b.iters { - let mut input = black_box(setup()); - - let start = b.measurement.start(); - let output = routine(&mut input).await; - let end = b.measurement.end(start); - b.value = b.measurement.add(&b.value, &end); - - drop(black_box(output)); - drop(black_box(input)); - } - } else { - let mut iteration_counter = 0; - - while iteration_counter < b.iters { - let batch_size = ::std::cmp::min(batch_size, b.iters - iteration_counter); - - let inputs = black_box((0..batch_size).map(|_| setup()).collect::>()); - let mut outputs = Vec::with_capacity(batch_size as usize); - - let start = b.measurement.start(); - // Can't use .extend here like the sync version does - for mut input in inputs { - outputs.push(routine(&mut input).await); - } - let end = b.measurement.end(start); - b.value = b.measurement.add(&b.value, &end); - - black_box(outputs); - - iteration_counter += batch_size; - } - } - b.elapsed_time = time_start.elapsed(); - }); - } -} +use std::time::Duration; +use std::time::Instant; + +use crate::black_box; +use crate::measurement::{Measurement, WallTime}; +use crate::BatchSize; + +#[cfg(feature = "async")] +use std::future::Future; + +#[cfg(feature = "async")] +use crate::async_executor::AsyncExecutor; + +// ================================== MAINTENANCE NOTE ============================================= +// Any changes made to either Bencher or AsyncBencher will have to be replicated to the other! +// ================================== MAINTENANCE NOTE ============================================= + +/// Timer struct used to iterate a benchmarked function and measure the runtime. +/// +/// This struct provides different timing loops as methods. Each timing loop provides a different +/// way to time a routine and each has advantages and disadvantages. +/// +/// * If you want to do the iteration and measurement yourself (eg. passing the iteration count +/// to a separate process), use `iter_custom`. +/// * If your routine requires no per-iteration setup and returns a value with an expensive `drop` +/// method, use `iter_with_large_drop`. +/// * If your routine requires some per-iteration setup that shouldn't be timed, use `iter_batched` +/// or `iter_batched_ref`. See [`BatchSize`](enum.BatchSize.html) for a discussion of batch sizes. +/// If the setup value implements `Drop` and you don't want to include the `drop` time in the +/// measurement, use `iter_batched_ref`, otherwise use `iter_batched`. These methods are also +/// suitable for benchmarking routines which return a value with an expensive `drop` method, +/// but are more complex than `iter_with_large_drop`. +/// * Otherwise, use `iter`. +pub struct Bencher<'a, M: Measurement = WallTime> { + pub(crate) iterated: bool, // Have we iterated this benchmark? + pub(crate) iters: u64, // Number of times to iterate this benchmark + pub(crate) value: M::Value, // The measured value + pub(crate) measurement: &'a M, // Reference to the measurement object + pub(crate) elapsed_time: Duration, // How much time did it take to perform the iteration? Used for the warmup period. +} +impl<'a, M: Measurement> Bencher<'a, M> { + /// Times a `routine` by executing it many times and timing the total elapsed time. + /// + /// Prefer this timing loop when `routine` returns a value that doesn't have a destructor. + /// + /// # Timing model + /// + /// Note that the `Bencher` also times the time required to destroy the output of `routine()`. + /// Therefore prefer this timing loop when the runtime of `mem::drop(O)` is negligible compared + /// to the runtime of the `routine`. + /// + /// ```text + /// elapsed = Instant::now + iters * (routine + mem::drop(O) + Range::next) + /// ``` + /// + /// # Example + /// + /// ```rust + /// #[macro_use] extern crate criterion; + /// + /// use criterion::*; + /// + /// // The function to benchmark + /// fn foo() { + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// c.bench_function("iter", move |b| { + /// b.iter(|| foo()) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + #[inline(never)] + pub fn iter(&mut self, mut routine: R) + where + R: FnMut() -> O, + { + self.iterated = true; + let time_start = Instant::now(); + let start = self.measurement.start(); + for _ in 0..self.iters { + black_box(routine()); + } + self.value = self.measurement.end(start); + self.elapsed_time = time_start.elapsed(); + } + + /// Times a `routine` by executing it many times and relying on `routine` to measure its own execution time. + /// + /// Prefer this timing loop in cases where `routine` has to do its own measurements to + /// get accurate timing information (for example in multi-threaded scenarios where you spawn + /// and coordinate with multiple threads). + /// + /// # Timing model + /// Custom, the timing model is whatever is returned as the Duration from `routine`. + /// + /// # Example + /// ```rust + /// #[macro_use] extern crate criterion; + /// use criterion::*; + /// use criterion::black_box; + /// use std::time::Instant; + /// + /// fn foo() { + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// c.bench_function("iter", move |b| { + /// b.iter_custom(|iters| { + /// let start = Instant::now(); + /// for _i in 0..iters { + /// black_box(foo()); + /// } + /// start.elapsed() + /// }) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + #[inline(never)] + pub fn iter_custom(&mut self, mut routine: R) + where + R: FnMut(u64) -> M::Value, + { + self.iterated = true; + let time_start = Instant::now(); + self.value = routine(self.iters); + self.elapsed_time = time_start.elapsed(); + } + + #[doc(hidden)] + pub fn iter_with_setup(&mut self, setup: S, routine: R) + where + S: FnMut() -> I, + R: FnMut(I) -> O, + { + self.iter_batched(setup, routine, BatchSize::PerIteration); + } + + /// Times a `routine` by collecting its output on each iteration. This avoids timing the + /// destructor of the value returned by `routine`. + /// + /// WARNING: This requires `O(iters * mem::size_of::())` of memory, and `iters` is not under the + /// control of the caller. If this causes out-of-memory errors, use `iter_batched` instead. + /// + /// # Timing model + /// + /// ``` text + /// elapsed = Instant::now + iters * (routine) + Iterator::collect::> + /// ``` + /// + /// # Example + /// + /// ```rust + /// #[macro_use] extern crate criterion; + /// + /// use criterion::*; + /// + /// fn create_vector() -> Vec { + /// # vec![] + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// c.bench_function("with_drop", move |b| { + /// // This will avoid timing the Vec::drop. + /// b.iter_with_large_drop(|| create_vector()) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + pub fn iter_with_large_drop(&mut self, mut routine: R) + where + R: FnMut() -> O, + { + self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput); + } + + /// Times a `routine` that requires some input by generating a batch of input, then timing the + /// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for + /// details on choosing the batch size. Use this when the routine must consume its input. + /// + /// For example, use this loop to benchmark sorting algorithms, because they require unsorted + /// data on each iteration. + /// + /// # Timing model + /// + /// ```text + /// elapsed = (Instant::now * num_batches) + (iters * (routine + O::drop)) + Vec::extend + /// ``` + /// + /// # Example + /// + /// ```rust + /// #[macro_use] extern crate criterion; + /// + /// use criterion::*; + /// + /// fn create_scrambled_data() -> Vec { + /// # vec![] + /// // ... + /// } + /// + /// // The sorting algorithm to test + /// fn sort(data: &mut [u64]) { + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// let data = create_scrambled_data(); + /// + /// c.bench_function("with_setup", move |b| { + /// // This will avoid timing the clone call. + /// b.iter_batched(|| data.clone(), |mut data| sort(&mut data), BatchSize::SmallInput) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + #[inline(never)] + pub fn iter_batched(&mut self, mut setup: S, mut routine: R, size: BatchSize) + where + S: FnMut() -> I, + R: FnMut(I) -> O, + { + self.iterated = true; + let batch_size = size.iters_per_batch(self.iters); + assert!(batch_size != 0, "Batch size must not be zero."); + let time_start = Instant::now(); + self.value = self.measurement.zero(); + + if batch_size == 1 { + for _ in 0..self.iters { + let input = black_box(setup()); + + let start = self.measurement.start(); + let output = routine(input); + let end = self.measurement.end(start); + self.value = self.measurement.add(&self.value, &end); + + drop(black_box(output)); + } + } else { + let mut iteration_counter = 0; + + while iteration_counter < self.iters { + let batch_size = ::std::cmp::min(batch_size, self.iters - iteration_counter); + + let inputs = black_box((0..batch_size).map(|_| setup()).collect::>()); + let mut outputs = Vec::with_capacity(batch_size as usize); + + let start = self.measurement.start(); + outputs.extend(inputs.into_iter().map(&mut routine)); + let end = self.measurement.end(start); + self.value = self.measurement.add(&self.value, &end); + + black_box(outputs); + + iteration_counter += batch_size; + } + } + + self.elapsed_time = time_start.elapsed(); + } + + /// Times a `routine` that requires some input by generating a batch of input, then timing the + /// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for + /// details on choosing the batch size. Use this when the routine should accept the input by + /// mutable reference. + /// + /// For example, use this loop to benchmark sorting algorithms, because they require unsorted + /// data on each iteration. + /// + /// # Timing model + /// + /// ```text + /// elapsed = (Instant::now * num_batches) + (iters * routine) + Vec::extend + /// ``` + /// + /// # Example + /// + /// ```rust + /// #[macro_use] extern crate criterion; + /// + /// use criterion::*; + /// + /// fn create_scrambled_data() -> Vec { + /// # vec![] + /// // ... + /// } + /// + /// // The sorting algorithm to test + /// fn sort(data: &mut [u64]) { + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// let data = create_scrambled_data(); + /// + /// c.bench_function("with_setup", move |b| { + /// // This will avoid timing the clone call. + /// b.iter_batched(|| data.clone(), |mut data| sort(&mut data), BatchSize::SmallInput) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + #[inline(never)] + pub fn iter_batched_ref(&mut self, mut setup: S, mut routine: R, size: BatchSize) + where + S: FnMut() -> I, + R: FnMut(&mut I) -> O, + { + self.iterated = true; + let batch_size = size.iters_per_batch(self.iters); + assert!(batch_size != 0, "Batch size must not be zero."); + let time_start = Instant::now(); + self.value = self.measurement.zero(); + + if batch_size == 1 { + for _ in 0..self.iters { + let mut input = black_box(setup()); + + let start = self.measurement.start(); + let output = routine(&mut input); + let end = self.measurement.end(start); + self.value = self.measurement.add(&self.value, &end); + + drop(black_box(output)); + drop(black_box(input)); + } + } else { + let mut iteration_counter = 0; + + while iteration_counter < self.iters { + let batch_size = ::std::cmp::min(batch_size, self.iters - iteration_counter); + + let mut inputs = black_box((0..batch_size).map(|_| setup()).collect::>()); + let mut outputs = Vec::with_capacity(batch_size as usize); + + let start = self.measurement.start(); + outputs.extend(inputs.iter_mut().map(&mut routine)); + let end = self.measurement.end(start); + self.value = self.measurement.add(&self.value, &end); + + black_box(outputs); + + iteration_counter += batch_size; + } + } + self.elapsed_time = time_start.elapsed(); + } + + // Benchmarks must actually call one of the iter methods. This causes benchmarks to fail loudly + // if they don't. + pub(crate) fn assert_iterated(&mut self) { + assert!( + self.iterated, + "Benchmark function must call Bencher::iter or related method." + ); + self.iterated = false; + } + + /// Convert this bencher into an AsyncBencher, which enables async/await support. + #[cfg(feature = "async")] + pub fn to_async<'b, A: AsyncExecutor>(&'b mut self, runner: A) -> AsyncBencher<'a, 'b, A, M> { + AsyncBencher { b: self, runner } + } +} + +/// Async/await variant of the Bencher struct. +#[cfg(feature = "async")] +pub struct AsyncBencher<'a, 'b, A: AsyncExecutor, M: Measurement = WallTime> { + b: &'b mut Bencher<'a, M>, + runner: A, +} +#[cfg(feature = "async")] +impl<'a, 'b, A: AsyncExecutor, M: Measurement> AsyncBencher<'a, 'b, A, M> { + /// Times a `routine` by executing it many times and timing the total elapsed time. + /// + /// Prefer this timing loop when `routine` returns a value that doesn't have a destructor. + /// + /// # Timing model + /// + /// Note that the `AsyncBencher` also times the time required to destroy the output of `routine()`. + /// Therefore prefer this timing loop when the runtime of `mem::drop(O)` is negligible compared + /// to the runtime of the `routine`. + /// + /// ```text + /// elapsed = Instant::now + iters * (routine + mem::drop(O) + Range::next) + /// ``` + /// + /// # Example + /// + /// ```rust + /// #[macro_use] extern crate criterion; + /// + /// use criterion::*; + /// use criterion::async_executor::FuturesExecutor; + /// + /// // The function to benchmark + /// async fn foo() { + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// c.bench_function("iter", move |b| { + /// b.to_async(FuturesExecutor).iter(|| async { foo().await } ) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + #[inline(never)] + pub fn iter(&mut self, mut routine: R) + where + R: FnMut() -> F, + F: Future, + { + let AsyncBencher { b, runner } = self; + runner.block_on(async { + b.iterated = true; + let time_start = Instant::now(); + let start = b.measurement.start(); + for _ in 0..b.iters { + black_box(routine().await); + } + b.value = b.measurement.end(start); + b.elapsed_time = time_start.elapsed(); + }); + } + + /// Times a `routine` by executing it many times and relying on `routine` to measure its own execution time. + /// + /// Prefer this timing loop in cases where `routine` has to do its own measurements to + /// get accurate timing information (for example in multi-threaded scenarios where you spawn + /// and coordinate with multiple threads). + /// + /// # Timing model + /// Custom, the timing model is whatever is returned as the Duration from `routine`. + /// + /// # Example + /// ```rust + /// #[macro_use] extern crate criterion; + /// use criterion::*; + /// use criterion::black_box; + /// use criterion::async_executor::FuturesExecutor; + /// use std::time::Instant; + /// + /// async fn foo() { + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// c.bench_function("iter", move |b| { + /// b.to_async(FuturesExecutor).iter_custom(|iters| { + /// async move { + /// let start = Instant::now(); + /// for _i in 0..iters { + /// black_box(foo().await); + /// } + /// start.elapsed() + /// } + /// }) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + #[inline(never)] + pub fn iter_custom(&mut self, mut routine: R) + where + R: FnMut(u64) -> F, + F: Future, + { + let AsyncBencher { b, runner } = self; + runner.block_on(async { + b.iterated = true; + let time_start = Instant::now(); + b.value = routine(b.iters).await; + b.elapsed_time = time_start.elapsed(); + }) + } + + #[doc(hidden)] + pub fn iter_with_setup(&mut self, setup: S, routine: R) + where + S: FnMut() -> I, + R: FnMut(I) -> F, + F: Future, + { + self.iter_batched(setup, routine, BatchSize::PerIteration); + } + + /// Times a `routine` by collecting its output on each iteration. This avoids timing the + /// destructor of the value returned by `routine`. + /// + /// WARNING: This requires `O(iters * mem::size_of::())` of memory, and `iters` is not under the + /// control of the caller. If this causes out-of-memory errors, use `iter_batched` instead. + /// + /// # Timing model + /// + /// ``` text + /// elapsed = Instant::now + iters * (routine) + Iterator::collect::> + /// ``` + /// + /// # Example + /// + /// ```rust + /// #[macro_use] extern crate criterion; + /// + /// use criterion::*; + /// use criterion::async_executor::FuturesExecutor; + /// + /// async fn create_vector() -> Vec { + /// # vec![] + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// c.bench_function("with_drop", move |b| { + /// // This will avoid timing the Vec::drop. + /// b.to_async(FuturesExecutor).iter_with_large_drop(|| async { create_vector().await }) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + pub fn iter_with_large_drop(&mut self, mut routine: R) + where + R: FnMut() -> F, + F: Future, + { + self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput); + } + + #[doc(hidden)] + pub fn iter_with_large_setup(&mut self, setup: S, routine: R) + where + S: FnMut() -> I, + R: FnMut(I) -> F, + F: Future, + { + self.iter_batched(setup, routine, BatchSize::NumBatches(1)); + } + + /// Times a `routine` that requires some input by generating a batch of input, then timing the + /// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for + /// details on choosing the batch size. Use this when the routine must consume its input. + /// + /// For example, use this loop to benchmark sorting algorithms, because they require unsorted + /// data on each iteration. + /// + /// # Timing model + /// + /// ```text + /// elapsed = (Instant::now * num_batches) + (iters * (routine + O::drop)) + Vec::extend + /// ``` + /// + /// # Example + /// + /// ```rust + /// #[macro_use] extern crate criterion; + /// + /// use criterion::*; + /// use criterion::async_executor::FuturesExecutor; + /// + /// fn create_scrambled_data() -> Vec { + /// # vec![] + /// // ... + /// } + /// + /// // The sorting algorithm to test + /// async fn sort(data: &mut [u64]) { + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// let data = create_scrambled_data(); + /// + /// c.bench_function("with_setup", move |b| { + /// // This will avoid timing the clone call. + /// b.iter_batched(|| data.clone(), |mut data| async move { sort(&mut data).await }, BatchSize::SmallInput) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + #[inline(never)] + pub fn iter_batched(&mut self, mut setup: S, mut routine: R, size: BatchSize) + where + S: FnMut() -> I, + R: FnMut(I) -> F, + F: Future, + { + let AsyncBencher { b, runner } = self; + runner.block_on(async { + b.iterated = true; + let batch_size = size.iters_per_batch(b.iters); + assert!(batch_size != 0, "Batch size must not be zero."); + let time_start = Instant::now(); + b.value = b.measurement.zero(); + + if batch_size == 1 { + for _ in 0..b.iters { + let input = black_box(setup()); + + let start = b.measurement.start(); + let output = routine(input).await; + let end = b.measurement.end(start); + b.value = b.measurement.add(&b.value, &end); + + drop(black_box(output)); + } + } else { + let mut iteration_counter = 0; + + while iteration_counter < b.iters { + let batch_size = ::std::cmp::min(batch_size, b.iters - iteration_counter); + + let inputs = black_box((0..batch_size).map(|_| setup()).collect::>()); + let mut outputs = Vec::with_capacity(batch_size as usize); + + let start = b.measurement.start(); + // Can't use .extend here like the sync version does + for input in inputs { + outputs.push(routine(input).await); + } + let end = b.measurement.end(start); + b.value = b.measurement.add(&b.value, &end); + + black_box(outputs); + + iteration_counter += batch_size; + } + } + + b.elapsed_time = time_start.elapsed(); + }) + } + + /// Times a `routine` that requires some input by generating a batch of input, then timing the + /// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for + /// details on choosing the batch size. Use this when the routine should accept the input by + /// mutable reference. + /// + /// For example, use this loop to benchmark sorting algorithms, because they require unsorted + /// data on each iteration. + /// + /// # Timing model + /// + /// ```text + /// elapsed = (Instant::now * num_batches) + (iters * routine) + Vec::extend + /// ``` + /// + /// # Example + /// + /// ```rust + /// #[macro_use] extern crate criterion; + /// + /// use criterion::*; + /// use criterion::async_executor::FuturesExecutor; + /// + /// fn create_scrambled_data() -> Vec { + /// # vec![] + /// // ... + /// } + /// + /// // The sorting algorithm to test + /// async fn sort(data: &mut [u64]) { + /// // ... + /// } + /// + /// fn bench(c: &mut Criterion) { + /// let data = create_scrambled_data(); + /// + /// c.bench_function("with_setup", move |b| { + /// // This will avoid timing the clone call. + /// b.iter_batched(|| data.clone(), |mut data| async move { sort(&mut data).await }, BatchSize::SmallInput) + /// }); + /// } + /// + /// criterion_group!(benches, bench); + /// criterion_main!(benches); + /// ``` + /// + #[inline(never)] + pub fn iter_batched_ref(&mut self, mut setup: S, mut routine: R, size: BatchSize) + where + S: FnMut() -> I, + R: FnMut(&mut I) -> F, + F: Future, + { + let AsyncBencher { b, runner } = self; + runner.block_on(async { + b.iterated = true; + let batch_size = size.iters_per_batch(b.iters); + assert!(batch_size != 0, "Batch size must not be zero."); + let time_start = Instant::now(); + b.value = b.measurement.zero(); + + if batch_size == 1 { + for _ in 0..b.iters { + let mut input = black_box(setup()); + + let start = b.measurement.start(); + let output = routine(&mut input).await; + let end = b.measurement.end(start); + b.value = b.measurement.add(&b.value, &end); + + drop(black_box(output)); + drop(black_box(input)); + } + } else { + let mut iteration_counter = 0; + + while iteration_counter < b.iters { + let batch_size = ::std::cmp::min(batch_size, b.iters - iteration_counter); + + let inputs = black_box((0..batch_size).map(|_| setup()).collect::>()); + let mut outputs = Vec::with_capacity(batch_size as usize); + + let start = b.measurement.start(); + // Can't use .extend here like the sync version does + for mut input in inputs { + outputs.push(routine(&mut input).await); + } + let end = b.measurement.end(start); + b.value = b.measurement.add(&b.value, &end); + + black_box(outputs); + + iteration_counter += batch_size; + } + } + b.elapsed_time = time_start.elapsed(); + }); + } +} diff --git a/src/connection.rs b/src/connection.rs index 53706d608..cef6db015 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -1,385 +1,385 @@ -use crate::report::BenchmarkId as InternalBenchmarkId; -use crate::Throughput; -use std::cell::RefCell; -use std::convert::TryFrom; -use std::io::{Read, Write}; -use std::mem::size_of; -use std::net::TcpStream; - -#[derive(Debug)] -pub enum MessageError { - Deserialization(ciborium::de::Error), - Serialization(ciborium::ser::Error), - Io(std::io::Error), -} -impl From> for MessageError { - fn from(other: ciborium::de::Error) -> Self { - MessageError::Deserialization(other) - } -} -impl From> for MessageError { - fn from(other: ciborium::ser::Error) -> Self { - MessageError::Serialization(other) - } -} -impl From for MessageError { - fn from(other: std::io::Error) -> Self { - MessageError::Io(other) - } -} -impl std::fmt::Display for MessageError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - MessageError::Deserialization(error) => write!( - f, - "Failed to deserialize message to Criterion.rs benchmark:\n{}", - error - ), - MessageError::Serialization(error) => write!( - f, - "Failed to serialize message to Criterion.rs benchmark:\n{}", - error - ), - MessageError::Io(error) => write!( - f, - "Failed to read or write message to Criterion.rs benchmark:\n{}", - error - ), - } - } -} -impl std::error::Error for MessageError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - MessageError::Deserialization(err) => Some(err), - MessageError::Serialization(err) => Some(err), - MessageError::Io(err) => Some(err), - } - } -} - -// Use str::len as a const fn once we bump MSRV over 1.39. -const RUNNER_MAGIC_NUMBER: &str = "cargo-criterion"; -const RUNNER_HELLO_SIZE: usize = 15 //RUNNER_MAGIC_NUMBER.len() // magic number - + (size_of::() * 3); // version number - -const BENCHMARK_MAGIC_NUMBER: &str = "Criterion"; -const BENCHMARK_HELLO_SIZE: usize = 9 //BENCHMARK_MAGIC_NUMBER.len() // magic number - + (size_of::() * 3) // version number - + size_of::() // protocol version - + size_of::(); // protocol format -const PROTOCOL_VERSION: u16 = 1; -const PROTOCOL_FORMAT: u16 = 1; - -#[derive(Debug)] -struct InnerConnection { - socket: TcpStream, - receive_buffer: Vec, - send_buffer: Vec, - // runner_version: [u8; 3], -} -impl InnerConnection { - pub fn new(mut socket: TcpStream) -> Result { - // read the runner-hello - let mut hello_buf = [0u8; RUNNER_HELLO_SIZE]; - socket.read_exact(&mut hello_buf)?; - assert_eq!( - &hello_buf[0..RUNNER_MAGIC_NUMBER.len()], - RUNNER_MAGIC_NUMBER.as_bytes(), - "Not connected to cargo-criterion." - ); - - let i = RUNNER_MAGIC_NUMBER.len(); - let runner_version = [hello_buf[i], hello_buf[i + 1], hello_buf[i + 2]]; - - info!("Runner version: {:?}", runner_version); - - // now send the benchmark-hello - let mut hello_buf = [0u8; BENCHMARK_HELLO_SIZE]; - hello_buf[0..BENCHMARK_MAGIC_NUMBER.len()] - .copy_from_slice(BENCHMARK_MAGIC_NUMBER.as_bytes()); - let mut i = BENCHMARK_MAGIC_NUMBER.len(); - hello_buf[i] = env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(); - hello_buf[i + 1] = env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(); - hello_buf[i + 2] = env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(); - i += 3; - hello_buf[i..i + 2].clone_from_slice(&PROTOCOL_VERSION.to_be_bytes()); - i += 2; - hello_buf[i..i + 2].clone_from_slice(&PROTOCOL_FORMAT.to_be_bytes()); - - socket.write_all(&hello_buf)?; - - Ok(InnerConnection { - socket, - receive_buffer: vec![], - send_buffer: vec![], - // runner_version, - }) - } - - #[allow(dead_code)] - pub fn recv(&mut self) -> Result { - let mut length_buf = [0u8; 4]; - self.socket.read_exact(&mut length_buf)?; - let length = u32::from_be_bytes(length_buf); - self.receive_buffer.resize(length as usize, 0u8); - self.socket.read_exact(&mut self.receive_buffer)?; - let value = ciborium::de::from_reader(&self.receive_buffer[..])?; - Ok(value) - } - - pub fn send(&mut self, message: &OutgoingMessage) -> Result<(), MessageError> { - self.send_buffer.truncate(0); - ciborium::ser::into_writer(message, &mut self.send_buffer)?; - let size = u32::try_from(self.send_buffer.len()).unwrap(); - let length_buf = size.to_be_bytes(); - self.socket.write_all(&length_buf)?; - self.socket.write_all(&self.send_buffer)?; - Ok(()) - } -} - -/// This is really just a holder to allow us to send messages through a shared reference to the -/// connection. -#[derive(Debug)] -pub struct Connection { - inner: RefCell, -} -impl Connection { - pub fn new(socket: TcpStream) -> Result { - Ok(Connection { - inner: RefCell::new(InnerConnection::new(socket)?), - }) - } - - #[allow(dead_code)] - pub fn recv(&self) -> Result { - self.inner.borrow_mut().recv() - } - - pub fn send(&self, message: &OutgoingMessage) -> Result<(), MessageError> { - self.inner.borrow_mut().send(message) - } - - pub fn serve_value_formatter( - &self, - formatter: &dyn crate::measurement::ValueFormatter, - ) -> Result<(), MessageError> { - loop { - let response = match self.recv()? { - IncomingMessage::FormatValue { value } => OutgoingMessage::FormattedValue { - value: formatter.format_value(value), - }, - IncomingMessage::FormatThroughput { value, throughput } => { - OutgoingMessage::FormattedValue { - value: formatter.format_throughput(&throughput, value), - } - } - IncomingMessage::ScaleValues { - typical_value, - mut values, - } => { - let unit = formatter.scale_values(typical_value, &mut values); - OutgoingMessage::ScaledValues { - unit, - scaled_values: values, - } - } - IncomingMessage::ScaleThroughputs { - typical_value, - throughput, - mut values, - } => { - let unit = formatter.scale_throughputs(typical_value, &throughput, &mut values); - OutgoingMessage::ScaledValues { - unit, - scaled_values: values, - } - } - IncomingMessage::ScaleForMachines { mut values } => { - let unit = formatter.scale_for_machines(&mut values); - OutgoingMessage::ScaledValues { - unit, - scaled_values: values, - } - } - IncomingMessage::Continue => break, - _ => panic!(), - }; - self.send(&response)?; - } - Ok(()) - } -} - -/// Enum defining the messages we can receive -#[derive(Debug, Deserialize)] -pub enum IncomingMessage { - // Value formatter requests - FormatValue { - value: f64, - }, - FormatThroughput { - value: f64, - throughput: Throughput, - }, - ScaleValues { - typical_value: f64, - values: Vec, - }, - ScaleThroughputs { - typical_value: f64, - values: Vec, - throughput: Throughput, - }, - ScaleForMachines { - values: Vec, - }, - Continue, - - __Other, -} - -/// Enum defining the messages we can send -#[derive(Debug, Serialize)] -pub enum OutgoingMessage<'a> { - BeginningBenchmarkGroup { - group: &'a str, - }, - FinishedBenchmarkGroup { - group: &'a str, - }, - BeginningBenchmark { - id: RawBenchmarkId, - }, - SkippingBenchmark { - id: RawBenchmarkId, - }, - Warmup { - id: RawBenchmarkId, - nanos: f64, - }, - MeasurementStart { - id: RawBenchmarkId, - sample_count: u64, - estimate_ns: f64, - iter_count: u64, - }, - MeasurementComplete { - id: RawBenchmarkId, - iters: &'a [f64], - times: &'a [f64], - plot_config: PlotConfiguration, - sampling_method: SamplingMethod, - benchmark_config: BenchmarkConfig, - }, - // value formatter responses - FormattedValue { - value: String, - }, - ScaledValues { - scaled_values: Vec, - unit: &'a str, - }, -} - -// Also define serializable variants of certain things, either to avoid leaking -// serializability into the public interface or because the serialized form -// is a bit different from the regular one. - -#[derive(Debug, Serialize)] -pub struct RawBenchmarkId { - group_id: String, - function_id: Option, - value_str: Option, - throughput: Vec, -} -impl From<&InternalBenchmarkId> for RawBenchmarkId { - fn from(other: &InternalBenchmarkId) -> RawBenchmarkId { - RawBenchmarkId { - group_id: other.group_id.clone(), - function_id: other.function_id.clone(), - value_str: other.value_str.clone(), - throughput: other.throughput.iter().cloned().collect(), - } - } -} - -#[derive(Debug, Serialize)] -pub enum AxisScale { - Linear, - Logarithmic, -} -impl From for AxisScale { - fn from(other: crate::AxisScale) -> Self { - match other { - crate::AxisScale::Linear => AxisScale::Linear, - crate::AxisScale::Logarithmic => AxisScale::Logarithmic, - } - } -} - -#[derive(Debug, Serialize)] -pub struct PlotConfiguration { - summary_scale: AxisScale, -} -impl From<&crate::PlotConfiguration> for PlotConfiguration { - fn from(other: &crate::PlotConfiguration) -> Self { - PlotConfiguration { - summary_scale: other.summary_scale.into(), - } - } -} - -#[derive(Debug, Serialize)] -struct Duration { - secs: u64, - nanos: u32, -} -impl From for Duration { - fn from(other: std::time::Duration) -> Self { - Duration { - secs: other.as_secs(), - nanos: other.subsec_nanos(), - } - } -} - -#[derive(Debug, Serialize)] -pub struct BenchmarkConfig { - confidence_level: f64, - measurement_time: Duration, - noise_threshold: f64, - nresamples: usize, - sample_size: usize, - significance_level: f64, - warm_up_time: Duration, -} -impl From<&crate::benchmark::BenchmarkConfig> for BenchmarkConfig { - fn from(other: &crate::benchmark::BenchmarkConfig) -> Self { - BenchmarkConfig { - confidence_level: other.confidence_level, - measurement_time: other.measurement_time.into(), - noise_threshold: other.noise_threshold, - nresamples: other.nresamples, - sample_size: other.sample_size, - significance_level: other.significance_level, - warm_up_time: other.warm_up_time.into(), - } - } -} - -/// Currently not used; defined for forwards compatibility with cargo-criterion. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -pub enum SamplingMethod { - Linear, - Flat, -} -impl From for SamplingMethod { - fn from(other: crate::ActualSamplingMode) -> Self { - match other { - crate::ActualSamplingMode::Flat => SamplingMethod::Flat, - crate::ActualSamplingMode::Linear => SamplingMethod::Linear, - } - } -} +use crate::report::BenchmarkId as InternalBenchmarkId; +use crate::Throughput; +use std::cell::RefCell; +use std::convert::TryFrom; +use std::io::{Read, Write}; +use std::mem::size_of; +use std::net::TcpStream; + +#[derive(Debug)] +pub enum MessageError { + Deserialization(ciborium::de::Error), + Serialization(ciborium::ser::Error), + Io(std::io::Error), +} +impl From> for MessageError { + fn from(other: ciborium::de::Error) -> Self { + MessageError::Deserialization(other) + } +} +impl From> for MessageError { + fn from(other: ciborium::ser::Error) -> Self { + MessageError::Serialization(other) + } +} +impl From for MessageError { + fn from(other: std::io::Error) -> Self { + MessageError::Io(other) + } +} +impl std::fmt::Display for MessageError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + MessageError::Deserialization(error) => write!( + f, + "Failed to deserialize message to Criterion.rs benchmark:\n{}", + error + ), + MessageError::Serialization(error) => write!( + f, + "Failed to serialize message to Criterion.rs benchmark:\n{}", + error + ), + MessageError::Io(error) => write!( + f, + "Failed to read or write message to Criterion.rs benchmark:\n{}", + error + ), + } + } +} +impl std::error::Error for MessageError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + MessageError::Deserialization(err) => Some(err), + MessageError::Serialization(err) => Some(err), + MessageError::Io(err) => Some(err), + } + } +} + +// Use str::len as a const fn once we bump MSRV over 1.39. +const RUNNER_MAGIC_NUMBER: &str = "cargo-criterion"; +const RUNNER_HELLO_SIZE: usize = 15 //RUNNER_MAGIC_NUMBER.len() // magic number + + (size_of::() * 3); // version number + +const BENCHMARK_MAGIC_NUMBER: &str = "Criterion"; +const BENCHMARK_HELLO_SIZE: usize = 9 //BENCHMARK_MAGIC_NUMBER.len() // magic number + + (size_of::() * 3) // version number + + size_of::() // protocol version + + size_of::(); // protocol format +const PROTOCOL_VERSION: u16 = 1; +const PROTOCOL_FORMAT: u16 = 1; + +#[derive(Debug)] +struct InnerConnection { + socket: TcpStream, + receive_buffer: Vec, + send_buffer: Vec, + // runner_version: [u8; 3], +} +impl InnerConnection { + pub fn new(mut socket: TcpStream) -> Result { + // read the runner-hello + let mut hello_buf = [0u8; RUNNER_HELLO_SIZE]; + socket.read_exact(&mut hello_buf)?; + assert_eq!( + &hello_buf[0..RUNNER_MAGIC_NUMBER.len()], + RUNNER_MAGIC_NUMBER.as_bytes(), + "Not connected to cargo-criterion." + ); + + let i = RUNNER_MAGIC_NUMBER.len(); + let runner_version = [hello_buf[i], hello_buf[i + 1], hello_buf[i + 2]]; + + info!("Runner version: {:?}", runner_version); + + // now send the benchmark-hello + let mut hello_buf = [0u8; BENCHMARK_HELLO_SIZE]; + hello_buf[0..BENCHMARK_MAGIC_NUMBER.len()] + .copy_from_slice(BENCHMARK_MAGIC_NUMBER.as_bytes()); + let mut i = BENCHMARK_MAGIC_NUMBER.len(); + hello_buf[i] = env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(); + hello_buf[i + 1] = env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(); + hello_buf[i + 2] = env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(); + i += 3; + hello_buf[i..i + 2].clone_from_slice(&PROTOCOL_VERSION.to_be_bytes()); + i += 2; + hello_buf[i..i + 2].clone_from_slice(&PROTOCOL_FORMAT.to_be_bytes()); + + socket.write_all(&hello_buf)?; + + Ok(InnerConnection { + socket, + receive_buffer: vec![], + send_buffer: vec![], + // runner_version, + }) + } + + #[allow(dead_code)] + pub fn recv(&mut self) -> Result { + let mut length_buf = [0u8; 4]; + self.socket.read_exact(&mut length_buf)?; + let length = u32::from_be_bytes(length_buf); + self.receive_buffer.resize(length as usize, 0u8); + self.socket.read_exact(&mut self.receive_buffer)?; + let value = ciborium::de::from_reader(&self.receive_buffer[..])?; + Ok(value) + } + + pub fn send(&mut self, message: &OutgoingMessage) -> Result<(), MessageError> { + self.send_buffer.truncate(0); + ciborium::ser::into_writer(message, &mut self.send_buffer)?; + let size = u32::try_from(self.send_buffer.len()).unwrap(); + let length_buf = size.to_be_bytes(); + self.socket.write_all(&length_buf)?; + self.socket.write_all(&self.send_buffer)?; + Ok(()) + } +} + +/// This is really just a holder to allow us to send messages through a shared reference to the +/// connection. +#[derive(Debug)] +pub struct Connection { + inner: RefCell, +} +impl Connection { + pub fn new(socket: TcpStream) -> Result { + Ok(Connection { + inner: RefCell::new(InnerConnection::new(socket)?), + }) + } + + #[allow(dead_code)] + pub fn recv(&self) -> Result { + self.inner.borrow_mut().recv() + } + + pub fn send(&self, message: &OutgoingMessage) -> Result<(), MessageError> { + self.inner.borrow_mut().send(message) + } + + pub fn serve_value_formatter( + &self, + formatter: &dyn crate::measurement::ValueFormatter, + ) -> Result<(), MessageError> { + loop { + let response = match self.recv()? { + IncomingMessage::FormatValue { value } => OutgoingMessage::FormattedValue { + value: formatter.format_value(value), + }, + IncomingMessage::FormatThroughput { value, throughput } => { + OutgoingMessage::FormattedValue { + value: formatter.format_throughput(&throughput, value), + } + } + IncomingMessage::ScaleValues { + typical_value, + mut values, + } => { + let unit = formatter.scale_values(typical_value, &mut values); + OutgoingMessage::ScaledValues { + unit, + scaled_values: values, + } + } + IncomingMessage::ScaleThroughputs { + typical_value, + throughput, + mut values, + } => { + let unit = formatter.scale_throughputs(typical_value, &throughput, &mut values); + OutgoingMessage::ScaledValues { + unit, + scaled_values: values, + } + } + IncomingMessage::ScaleForMachines { mut values } => { + let unit = formatter.scale_for_machines(&mut values); + OutgoingMessage::ScaledValues { + unit, + scaled_values: values, + } + } + IncomingMessage::Continue => break, + _ => panic!(), + }; + self.send(&response)?; + } + Ok(()) + } +} + +/// Enum defining the messages we can receive +#[derive(Debug, Deserialize)] +pub enum IncomingMessage { + // Value formatter requests + FormatValue { + value: f64, + }, + FormatThroughput { + value: f64, + throughput: Throughput, + }, + ScaleValues { + typical_value: f64, + values: Vec, + }, + ScaleThroughputs { + typical_value: f64, + values: Vec, + throughput: Throughput, + }, + ScaleForMachines { + values: Vec, + }, + Continue, + + __Other, +} + +/// Enum defining the messages we can send +#[derive(Debug, Serialize)] +pub enum OutgoingMessage<'a> { + BeginningBenchmarkGroup { + group: &'a str, + }, + FinishedBenchmarkGroup { + group: &'a str, + }, + BeginningBenchmark { + id: RawBenchmarkId, + }, + SkippingBenchmark { + id: RawBenchmarkId, + }, + Warmup { + id: RawBenchmarkId, + nanos: f64, + }, + MeasurementStart { + id: RawBenchmarkId, + sample_count: u64, + estimate_ns: f64, + iter_count: u64, + }, + MeasurementComplete { + id: RawBenchmarkId, + iters: &'a [f64], + times: &'a [f64], + plot_config: PlotConfiguration, + sampling_method: SamplingMethod, + benchmark_config: BenchmarkConfig, + }, + // value formatter responses + FormattedValue { + value: String, + }, + ScaledValues { + scaled_values: Vec, + unit: &'a str, + }, +} + +// Also define serializable variants of certain things, either to avoid leaking +// serializability into the public interface or because the serialized form +// is a bit different from the regular one. + +#[derive(Debug, Serialize)] +pub struct RawBenchmarkId { + group_id: String, + function_id: Option, + value_str: Option, + throughput: Vec, +} +impl From<&InternalBenchmarkId> for RawBenchmarkId { + fn from(other: &InternalBenchmarkId) -> RawBenchmarkId { + RawBenchmarkId { + group_id: other.group_id.clone(), + function_id: other.function_id.clone(), + value_str: other.value_str.clone(), + throughput: other.throughput.iter().cloned().collect(), + } + } +} + +#[derive(Debug, Serialize)] +pub enum AxisScale { + Linear, + Logarithmic, +} +impl From for AxisScale { + fn from(other: crate::AxisScale) -> Self { + match other { + crate::AxisScale::Linear => AxisScale::Linear, + crate::AxisScale::Logarithmic => AxisScale::Logarithmic, + } + } +} + +#[derive(Debug, Serialize)] +pub struct PlotConfiguration { + summary_scale: AxisScale, +} +impl From<&crate::PlotConfiguration> for PlotConfiguration { + fn from(other: &crate::PlotConfiguration) -> Self { + PlotConfiguration { + summary_scale: other.summary_scale.into(), + } + } +} + +#[derive(Debug, Serialize)] +struct Duration { + secs: u64, + nanos: u32, +} +impl From for Duration { + fn from(other: std::time::Duration) -> Self { + Duration { + secs: other.as_secs(), + nanos: other.subsec_nanos(), + } + } +} + +#[derive(Debug, Serialize)] +pub struct BenchmarkConfig { + confidence_level: f64, + measurement_time: Duration, + noise_threshold: f64, + nresamples: usize, + sample_size: usize, + significance_level: f64, + warm_up_time: Duration, +} +impl From<&crate::benchmark::BenchmarkConfig> for BenchmarkConfig { + fn from(other: &crate::benchmark::BenchmarkConfig) -> Self { + BenchmarkConfig { + confidence_level: other.confidence_level, + measurement_time: other.measurement_time.into(), + noise_threshold: other.noise_threshold, + nresamples: other.nresamples, + sample_size: other.sample_size, + significance_level: other.significance_level, + warm_up_time: other.warm_up_time.into(), + } + } +} + +/// Currently not used; defined for forwards compatibility with cargo-criterion. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum SamplingMethod { + Linear, + Flat, +} +impl From for SamplingMethod { + fn from(other: crate::ActualSamplingMode) -> Self { + match other { + crate::ActualSamplingMode::Flat => SamplingMethod::Flat, + crate::ActualSamplingMode::Linear => SamplingMethod::Linear, + } + } +} From d1334d3d7f080cde37e3fca8bf1248f8aed52aeb Mon Sep 17 00:00:00 2001 From: Samuel Tardieu Date: Fri, 29 Mar 2024 07:33:22 +0100 Subject: [PATCH 7/7] Add missing documentation comment --- src/estimate.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/estimate.rs b/src/estimate.rs index 8a79d27a8..1617c1c33 100644 --- a/src/estimate.rs +++ b/src/estimate.rs @@ -36,7 +36,7 @@ pub struct ConfidenceInterval { pub struct Estimate { /// The confidence interval for this estimate pub confidence_interval: ConfidenceInterval, - /// + /// The value of this estimate pub point_estimate: f64, /// The standard error of this estimate pub standard_error: f64,