From 73fd7d6229646e88e8b670ea128561cd3c15cde7 Mon Sep 17 00:00:00 2001 From: Bruce Mitchener Date: Sat, 27 Jul 2024 11:46:57 +0700 Subject: [PATCH] Improve docs. * More cross-linking. * Remove `extern crate` stuff from doc tests / snippets. --- src/bencher.rs | 72 ++++++++++++++++++------------------------ src/benchmark_group.rs | 3 +- src/lib.rs | 32 +++++++++++-------- src/macros.rs | 14 +++----- src/measurement.rs | 4 +-- 5 files changed, 56 insertions(+), 69 deletions(-) diff --git a/src/bencher.rs b/src/bencher.rs index 73c232c11..6694d2815 100644 --- a/src/bencher.rs +++ b/src/bencher.rs @@ -21,16 +21,22 @@ use crate::async_executor::AsyncExecutor; /// way to time a routine and each has advantages and disadvantages. /// /// * If you want to do the iteration and measurement yourself (eg. passing the iteration count -/// to a separate process), use `iter_custom`. +/// to a separate process), use [`iter_custom`]. /// * If your routine requires no per-iteration setup and returns a value with an expensive `drop` -/// method, use `iter_with_large_drop`. -/// * If your routine requires some per-iteration setup that shouldn't be timed, use `iter_batched` -/// or `iter_batched_ref`. See [`BatchSize`] for a discussion of batch sizes. +/// method, use [`iter_with_large_drop`]. +/// * If your routine requires some per-iteration setup that shouldn't be timed, use [`iter_batched`] +/// or [`iter_batched_ref`]. See [`BatchSize`] for a discussion of batch sizes. /// If the setup value implements `Drop` and you don't want to include the `drop` time in the -/// measurement, use `iter_batched_ref`, otherwise use `iter_batched`. These methods are also +/// measurement, use [`iter_batched_ref`], otherwise use [`iter_batched`]. These methods are also /// suitable for benchmarking routines which return a value with an expensive `drop` method, -/// but are more complex than `iter_with_large_drop`. -/// * Otherwise, use `iter`. +/// but are more complex than [`iter_with_large_drop`]. +/// * Otherwise, use [`iter`]. +/// +/// [`iter`]: Bencher::iter +/// [`iter_custom`]: Bencher::iter_custom +/// [`iter_with_large_drop`]: Bencher::iter_with_large_drop +/// [`iter_batched`]: Bencher::iter_batched +/// [`iter_batched_ref`]: Bencher::iter_batched_ref pub struct Bencher<'a, M: Measurement = WallTime> { pub(crate) iterated: bool, // Have we iterated this benchmark? pub(crate) iters: u64, // Number of times to iterate this benchmark @@ -56,9 +62,7 @@ impl<'a, M: Measurement> Bencher<'a, M> { /// # Example /// /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; + /// use criterion::{criterion_group, criterion_main, Criterion}; /// /// // The function to benchmark /// fn foo() { @@ -97,12 +101,11 @@ impl<'a, M: Measurement> Bencher<'a, M> { /// and coordinate with multiple threads). /// /// # Timing model - /// Custom, the timing model is whatever is returned as the Duration from `routine`. + /// Custom, the timing model is whatever is returned as the [`Duration`] from `routine`. /// /// # Example /// ```rust - /// #[macro_use] extern crate criterion; - /// use criterion::*; + /// use criterion::{criterion_group, criterion_main, Criterion}; /// use std::time::Instant; /// /// fn foo() { @@ -148,8 +151,9 @@ impl<'a, M: Measurement> Bencher<'a, M> { /// Times a `routine` by collecting its output on each iteration. This avoids timing the /// destructor of the value returned by `routine`. /// - /// WARNING: This requires `O(iters * mem::size_of::())` of memory, and `iters` is not under the - /// control of the caller. If this causes out-of-memory errors, use `iter_batched` instead. + /// WARNING: This requires `O(iters * mem::size_of::())` of memory, and `iters` is not + /// under the control of the caller. If this causes out-of-memory errors, use + /// [`iter_batched`](Self::iter_batched) instead. /// /// # Timing model /// @@ -160,9 +164,7 @@ impl<'a, M: Measurement> Bencher<'a, M> { /// # Example /// /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; + /// use criterion::{criterion_group, criterion_main, Criterion}; /// /// fn create_vector() -> Vec { /// # vec![] @@ -203,9 +205,7 @@ impl<'a, M: Measurement> Bencher<'a, M> { /// # Example /// /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; + /// use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; /// /// fn create_scrambled_data() -> Vec { /// # vec![] @@ -293,9 +293,7 @@ impl<'a, M: Measurement> Bencher<'a, M> { /// # Example /// /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; + /// use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; /// /// fn create_scrambled_data() -> Vec { /// # vec![] @@ -408,9 +406,7 @@ impl<'a, 'b, A: AsyncExecutor, M: Measurement> AsyncBencher<'a, 'b, A, M> { /// # Example /// /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; + /// use criterion::{criterion_group, criterion_main, Criterion}; /// use criterion::async_executor::FuturesExecutor; /// /// // The function to benchmark @@ -454,12 +450,11 @@ impl<'a, 'b, A: AsyncExecutor, M: Measurement> AsyncBencher<'a, 'b, A, M> { /// and coordinate with multiple threads). /// /// # Timing model - /// Custom, the timing model is whatever is returned as the Duration from `routine`. + /// Custom, the timing model is whatever is returned as the [`Duration`] from `routine`. /// /// # Example /// ```rust - /// #[macro_use] extern crate criterion; - /// use criterion::*; + /// use criterion::{criterion_group, criterion_main, Criterion}; /// use criterion::async_executor::FuturesExecutor; /// use std::time::Instant; /// @@ -513,8 +508,9 @@ impl<'a, 'b, A: AsyncExecutor, M: Measurement> AsyncBencher<'a, 'b, A, M> { /// Times a `routine` by collecting its output on each iteration. This avoids timing the /// destructor of the value returned by `routine`. /// - /// WARNING: This requires `O(iters * mem::size_of::())` of memory, and `iters` is not under the - /// control of the caller. If this causes out-of-memory errors, use `iter_batched` instead. + /// WARNING: This requires `O(iters * mem::size_of::())` of memory, and `iters` + /// is not under the control of the caller. If this causes out-of-memory errors, use + /// [`iter_batched`](Self::iter_batched) instead. /// /// # Timing model /// @@ -525,9 +521,7 @@ impl<'a, 'b, A: AsyncExecutor, M: Measurement> AsyncBencher<'a, 'b, A, M> { /// # Example /// /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; + /// use criterion::{criterion_group, criterion_main, Criterion}; /// use criterion::async_executor::FuturesExecutor; /// /// async fn create_vector() -> Vec { @@ -580,9 +574,7 @@ impl<'a, 'b, A: AsyncExecutor, M: Measurement> AsyncBencher<'a, 'b, A, M> { /// # Example /// /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; + /// use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; /// use criterion::async_executor::FuturesExecutor; /// /// fn create_scrambled_data() -> Vec { @@ -678,9 +670,7 @@ impl<'a, 'b, A: AsyncExecutor, M: Measurement> AsyncBencher<'a, 'b, A, M> { /// # Example /// /// ```rust - /// #[macro_use] extern crate criterion; - /// - /// use criterion::*; + /// use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; /// use criterion::async_executor::FuturesExecutor; /// /// fn create_scrambled_data() -> Vec { diff --git a/src/benchmark_group.rs b/src/benchmark_group.rs index 687fb2f21..7f9009b3c 100644 --- a/src/benchmark_group.rs +++ b/src/benchmark_group.rs @@ -16,8 +16,7 @@ use std::time::Duration; /// # Examples: /// /// ```no_run -/// #[macro_use] extern crate criterion; -/// use self::criterion::*; +/// use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; /// use std::time::Duration; /// /// fn bench_simple(c: &mut Criterion) { diff --git a/src/lib.rs b/src/lib.rs index 1bab5c118..547d9904f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1154,8 +1154,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html /// # Examples: /// /// ```rust - /// #[macro_use] extern crate criterion; - /// use self::criterion::*; + /// use criterion::{criterion_group, criterion_main, Criterion}; /// /// fn bench_simple(c: &mut Criterion) { /// let mut group = c.benchmark_group("My Group"); @@ -1187,13 +1186,13 @@ impl Criterion where M: Measurement + 'static, { - /// Benchmarks a function. For comparing multiple functions, see `benchmark_group`. + /// Benchmarks a function. For comparing multiple functions, see + /// [`benchmark_group`](Self::benchmark_group). /// /// # Example /// /// ```rust - /// #[macro_use] extern crate criterion; - /// use self::criterion::*; + /// use criterion::{criterion_group, criterion_main, Criterion}; /// /// fn bench(c: &mut Criterion) { /// // Setup (construct data, allocate memory, etc) @@ -1218,13 +1217,12 @@ where } /// Benchmarks a function with an input. For comparing multiple functions or multiple inputs, - /// see `benchmark_group`. + /// see [`benchmark_group`](Self::benchmark_group). /// /// # Example /// /// ```rust - /// #[macro_use] extern crate criterion; - /// use self::criterion::*; + /// use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; /// /// fn bench(c: &mut Criterion) { /// // Setup (construct data, allocate memory, etc) @@ -1285,7 +1283,7 @@ pub enum Throughput { Elements(u64), } -/// Axis scaling type +/// Axis scaling type. Specified via [`PlotConfiguration::summary_scale`]. #[derive(Debug, Default, Clone, Copy)] pub enum AxisScale { /// Axes scale linearly @@ -1318,9 +1316,13 @@ pub struct PlotConfiguration { impl PlotConfiguration { #[must_use] - /// Set the axis scale (linear or logarithmic) for the summary plots. Typically, you would - /// set this to logarithmic if benchmarking over a range of inputs which scale exponentially. - /// Defaults to linear. + /// Set the axis scale ([linear] or [logarithmic]) for the summary plots. + /// + /// Typically, you would set this to logarithmic if benchmarking over a + /// range of inputs which scale exponentially. Defaults to [`AxisScale::Linear`]. + /// + /// [linear]: AxisScale::Linear + /// [logarithmic]: AxisScale::Logarithmic pub fn summary_scale(mut self, new_scale: AxisScale) -> PlotConfiguration { self.summary_scale = new_scale; self @@ -1328,7 +1330,7 @@ impl PlotConfiguration { } /// This enum allows the user to control how Criterion.rs chooses the iteration count when sampling. -/// The default is Auto, which will choose a method automatically based on the iteration time during +/// The default is `Auto`, which will choose a method automatically based on the iteration time during /// the warm-up phase. #[derive(Debug, Default, Clone, Copy)] pub enum SamplingMode { @@ -1343,10 +1345,11 @@ pub enum SamplingMode { /// Keep the iteration count the same for all samples. This is not recommended, as it affects /// the statistics that Criterion.rs can compute. However, it requires fewer iterations than - /// the Linear method and therefore is more suitable for very long-running benchmarks where + /// the `Linear` method and therefore is more suitable for very long-running benchmarks where /// benchmark execution time is more of a problem and statistical precision is less important. Flat, } + impl SamplingMode { pub(crate) fn choose_sampling_mode( &self, @@ -1380,6 +1383,7 @@ pub(crate) enum ActualSamplingMode { Linear, Flat, } + impl ActualSamplingMode { pub(crate) fn iteration_counts( &self, diff --git a/src/macros.rs b/src/macros.rs index 1b10051d8..55cbdc052 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -18,9 +18,7 @@ /// Complete form: /// /// ``` -/// # #[macro_use] -/// # extern crate criterion; -/// # use criterion::Criterion; +/// # use criterion::{criterion_group, Criterion}; /// # fn bench_method1(c: &mut Criterion) { /// # } /// # @@ -44,9 +42,7 @@ /// Compact Form: /// /// ``` -/// # #[macro_use] -/// # extern crate criterion; -/// # use criterion::Criterion; +/// # use criterion::{criterion_group, Criterion}; /// # fn bench_method1(c: &mut Criterion) { /// # } /// # @@ -100,10 +96,8 @@ macro_rules! criterion_group { /// /// Since we've disabled the default benchmark harness, we need to add our own: /// -/// ```ignore -/// #[macro_use] -/// extern crate criterion; -/// use criterion::Criterion; +/// ```no_run +/// use criterion::{criterion_group, criterion_main, Criterion}; /// fn bench_method1(c: &mut Criterion) { /// } /// diff --git a/src/measurement.rs b/src/measurement.rs index 415d8e768..b74a4812a 100644 --- a/src/measurement.rs +++ b/src/measurement.rs @@ -9,8 +9,8 @@ use std::time::{Duration, Instant}; /// Trait providing functions to format measured values to string so that they can be displayed on /// the command line or in the reports. The functions of this trait take measured values in f64 /// form; implementors can assume that the values are of the same scale as those produced by the -/// associated [MeasuredValue](trait.MeasuredValue.html) (eg. if your measurement produces values in -/// nanoseconds, the values passed to the formatter will be in nanoseconds). +/// associated [`Measurement`] (eg. if your measurement produces values in nanoseconds, the +/// values passed to the formatter will be in nanoseconds). /// /// Implementors are encouraged to format the values in a way that is intuitive for humans and /// uses the SI prefix system. For example, the format used by [`WallTime`] can display the value