Skip to content

Commit

Permalink
codspeed
Browse files Browse the repository at this point in the history
  • Loading branch information
Boshen committed Mar 16, 2024
1 parent 167a893 commit 4fdfbbb
Show file tree
Hide file tree
Showing 13 changed files with 1,590 additions and 895 deletions.
3 changes: 3 additions & 0 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,9 @@ jobs:
- name: Test with minimal features
run: cargo test --no-default-features

- name: Check codspeed
run: cargo check --features codspeed

- name: Check for non-standard formatting
if: ${{ matrix.rust == 'stable' }}
run: cargo fmt --all -- --check
Expand Down
3 changes: 3 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,8 @@ plotters = { version = "^0.3.5", default-features = false, features = [
"area_series",
"line_series",
], optional = true }
codspeed = { version = "2.4.0", optional = true }
colored = { version = "2.0.0", optional = true }

[dev-dependencies]
tempfile = "3.10.1"
Expand All @@ -82,6 +84,7 @@ stable = [
"async_std",
]
default = ["rayon", "plotters", "cargo_bench_support"]
codspeed = ["dep:codspeed", "dep:colored"]

# Enable use of the nightly-only test::black_box function to discourage compiler optimizations.
real_blackbox = []
Expand Down
3 changes: 2 additions & 1 deletion src/analysis/compare.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,14 @@ use crate::stats::univariate::{self, mixed};
use crate::stats::Distribution;

use crate::benchmark::BenchmarkConfig;
use crate::criterion::Criterion;
use crate::error::Result;
use crate::estimate::{
build_change_estimates, ChangeDistributions, ChangeEstimates, ChangePointEstimates, Estimates,
};
use crate::measurement::Measurement;
use crate::report::BenchmarkId;
use crate::{fs, Criterion, SavedSample};
use crate::{fs, SavedSample};

// Common comparison procedure
#[cfg_attr(feature = "cargo-clippy", allow(clippy::type_complexity))]
Expand Down
3 changes: 2 additions & 1 deletion src/analysis/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,15 @@ use crate::stats::{Distribution, Tails};

use crate::benchmark::BenchmarkConfig;
use crate::connection::OutgoingMessage;
use crate::criterion::Criterion;
use crate::estimate::{
build_estimates, ConfidenceInterval, Distributions, Estimate, Estimates, PointEstimates,
};
use crate::fs;
use crate::measurement::Measurement;
use crate::report::{BenchmarkId, Report, ReportContext};
use crate::routine::Routine;
use crate::{Baseline, Criterion, SavedSample, Throughput};
use crate::{Baseline, SavedSample, Throughput};

macro_rules! elapsed {
($msg:expr, $block:expr) => {{
Expand Down
7 changes: 5 additions & 2 deletions src/benchmark_group.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
use std::time::Duration;

use crate::analysis;
use crate::bencher::Bencher;
use crate::benchmark::PartialBenchmarkConfig;
use crate::connection::OutgoingMessage;
use crate::criterion::Criterion;
use crate::measurement::Measurement;
use crate::report::BenchmarkId as InternalBenchmarkId;
use crate::report::Report;
use crate::report::ReportContext;
use crate::routine::{Function, Routine};
use crate::{Bencher, Criterion, Mode, PlotConfiguration, SamplingMode, Throughput};
use std::time::Duration;
use crate::{Mode, PlotConfiguration, SamplingMode, Throughput};

/// Structure used to group together a set of related benchmarks, along with custom configuration
/// settings for groups of benchmarks. All benchmarks performed using a benchmark group will be
Expand Down
260 changes: 260 additions & 0 deletions src/codspeed/bencher.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,260 @@
#[cfg(feature = "async")]
use std::future::Future;
use std::{cell::RefCell, rc::Rc};

use codspeed::codspeed::{black_box, CodSpeed};
use colored::Colorize;

#[cfg(feature = "async")]
use crate::async_executor::AsyncExecutor;

use crate::{BatchSize, Measurement, WallTime};

pub struct Bencher<'a, M: Measurement = WallTime> {
codspeed: Rc<RefCell<CodSpeed>>,
uri: String,
_marker: std::marker::PhantomData<&'a M>,
}

impl<'a> Bencher<'a> {
pub fn new(codspeed: Rc<RefCell<CodSpeed>>, uri: String) -> Self {
Bencher { codspeed, uri, _marker: std::marker::PhantomData }
}

#[inline(never)]
pub fn iter<O, R>(&mut self, mut routine: R)
where
R: FnMut() -> O,
{
let mut codspeed = self.codspeed.borrow_mut();
// NOTE: this structure hardens our benchmark against dead code elimination
// https://godbolt.org/z/KnYeKMd1o
for i in 0..codspeed::codspeed::WARMUP_RUNS + 1 {
if i < codspeed::codspeed::WARMUP_RUNS {
black_box(routine());
} else {
codspeed.start_benchmark(self.uri.as_str());
black_box(routine());
codspeed.end_benchmark();
}
}
}

#[inline(never)]
pub fn iter_custom<R, MV>(&mut self, mut _routine: R)
where
R: FnMut(u64) -> MV,
{
println!(
"{} {} (CodSpeed doesn't support custom iterations)",
"Skipping:".to_string().yellow(),
self.uri.yellow(),
);
}

#[inline(never)]
pub fn iter_batched<I, O, S, R>(&mut self, mut setup: S, mut routine: R, _size: BatchSize)
where
S: FnMut() -> I,
R: FnMut(I) -> O,
{
let mut codspeed = self.codspeed.borrow_mut();

for i in 0..codspeed::codspeed::WARMUP_RUNS + 1 {
let input = black_box(setup());
let output = if i < codspeed::codspeed::WARMUP_RUNS {
black_box(routine(input))
} else {
let input = black_box(setup());
codspeed.start_benchmark(self.uri.as_str());
let output = black_box(routine(input));
codspeed.end_benchmark();
output
};
drop(black_box(output));
}
}

pub fn iter_with_setup<I, O, S, R>(&mut self, setup: S, routine: R)
where
S: FnMut() -> I,
R: FnMut(I) -> O,
{
self.iter_batched(setup, routine, BatchSize::PerIteration);
}

pub fn iter_with_large_drop<O, R>(&mut self, mut routine: R)
where
R: FnMut() -> O,
{
self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput);
}

pub fn iter_with_large_setup<I, O, S, R>(&mut self, setup: S, routine: R)
where
S: FnMut() -> I,
R: FnMut(I) -> O,
{
self.iter_batched(setup, routine, BatchSize::NumBatches(1));
}

#[inline(never)]
pub fn iter_batched_ref<I, O, S, R>(&mut self, mut setup: S, mut routine: R, _size: BatchSize)
where
S: FnMut() -> I,
R: FnMut(&mut I) -> O,
{
let mut codspeed = self.codspeed.borrow_mut();

for i in 0..codspeed::codspeed::WARMUP_RUNS + 1 {
let mut input = black_box(setup());
let output = if i < codspeed::codspeed::WARMUP_RUNS {
black_box(routine(&mut input))
} else {
codspeed.start_benchmark(self.uri.as_str());
let output = black_box(routine(&mut input));
codspeed.end_benchmark();
output
};
drop(black_box(output));
drop(black_box(input));
}
}

#[cfg(feature = "async")]
pub fn to_async<'b, A: AsyncExecutor>(&'b mut self, runner: A) -> AsyncBencher<'a, 'b, A> {
AsyncBencher { b: self, runner }
}
}

#[cfg(feature = "async")]
pub struct AsyncBencher<'a, 'b, A: AsyncExecutor> {
b: &'b mut Bencher<'a>,
runner: A,
}

#[cfg(feature = "async")]
impl<'a, 'b, A: AsyncExecutor> AsyncBencher<'a, 'b, A> {
#[allow(clippy::await_holding_refcell_ref)]
#[inline(never)]
pub fn iter<O, R, F>(&mut self, mut routine: R)
where
R: FnMut() -> F,
F: Future<Output = O>,
{
let AsyncBencher { b, runner } = self;
runner.block_on(async {
let mut codspeed = b.codspeed.borrow_mut();
for i in 0..codspeed::codspeed::WARMUP_RUNS + 1 {
if i < codspeed::codspeed::WARMUP_RUNS {
black_box(routine().await);
} else {
codspeed.start_benchmark(b.uri.as_str());
black_box(routine().await);
codspeed.end_benchmark();
}
}
});
}

#[inline(never)]
pub fn iter_custom<R, F, MV>(&mut self, mut _routine: R)
where
R: FnMut(u64) -> F,
F: Future<Output = MV>,
{
let AsyncBencher { b, .. } = self;
println!(
"{} {} (CodSpeed doesn't support custom iterations)",
"Skipping:".to_string().yellow(),
b.uri.yellow(),
);
}

#[doc(hidden)]
pub fn iter_with_setup<I, O, S, R, F>(&mut self, setup: S, routine: R)
where
S: FnMut() -> I,
R: FnMut(I) -> F,
F: Future<Output = O>,
{
self.iter_batched(setup, routine, BatchSize::PerIteration);
}

pub fn iter_with_large_drop<O, R, F>(&mut self, mut routine: R)
where
R: FnMut() -> F,
F: Future<Output = O>,
{
self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput);
}

#[doc(hidden)]
pub fn iter_with_large_setup<I, O, S, R, F>(&mut self, setup: S, routine: R)
where
S: FnMut() -> I,
R: FnMut(I) -> F,
F: Future<Output = O>,
{
self.iter_batched(setup, routine, BatchSize::NumBatches(1));
}

#[allow(clippy::await_holding_refcell_ref)]
#[inline(never)]
pub fn iter_batched<I, O, S, R, F>(&mut self, mut setup: S, mut routine: R, _size: BatchSize)
where
S: FnMut() -> I,
R: FnMut(I) -> F,
F: Future<Output = O>,
{
let AsyncBencher { b, runner } = self;
runner.block_on(async {
let mut codspeed = b.codspeed.borrow_mut();

for i in 0..codspeed::codspeed::WARMUP_RUNS + 1 {
let input = black_box(setup());
let output = if i < codspeed::codspeed::WARMUP_RUNS {
black_box(routine(input).await)
} else {
codspeed.start_benchmark(b.uri.as_str());
let output = black_box(routine(input).await);
codspeed.end_benchmark();
output
};
drop(black_box(output));
}
})
}

#[allow(clippy::await_holding_refcell_ref)]
#[inline(never)]
pub fn iter_batched_ref<I, O, S, R, F>(
&mut self,
mut setup: S,
mut routine: R,
_size: BatchSize,
) where
S: FnMut() -> I,
R: FnMut(&mut I) -> F,
F: Future<Output = O>,
{
let AsyncBencher { b, runner } = self;
runner.block_on(async {
let mut codspeed = b.codspeed.borrow_mut();

for i in 0..codspeed::codspeed::WARMUP_RUNS + 1 {
let mut input = black_box(setup());
let output = if i < codspeed::codspeed::WARMUP_RUNS {
black_box(routine(&mut input).await)
} else {
codspeed.start_benchmark(b.uri.as_str());
let output = black_box(routine(&mut input).await);
codspeed.end_benchmark();
output
};
drop(black_box(output));
drop(black_box(input));
}
});
}
}
Loading

0 comments on commit 4fdfbbb

Please sign in to comment.