Skip to content

Commit

Permalink
perf benchmark improvements (#1071)
Browse files Browse the repository at this point in the history
- allow running locally using `--dry-run`.
- limit CI runs to `main` branch only, so that we don't pollute the
dashboard with merge queue runs. We can still trigger manually for other
branches if needed.
  • Loading branch information
OmarTawfik authored Aug 12, 2024
1 parent 34e8c4b commit 0567213
Show file tree
Hide file tree
Showing 4 changed files with 111 additions and 91 deletions.
6 changes: 4 additions & 2 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,10 @@ on:
# https://docs.github.com/en/actions/managing-workflow-runs/manually-running-a-workflow
workflow_dispatch: {}

# Run on pushes to any branch:
push: {}
# Run on pushes to 'main' branch:
push:
branches:
- "main"

# Queue up benchmark workflows for the same branch, so that results are reported in order:
concurrency:
Expand Down
97 changes: 97 additions & 0 deletions crates/infra/cli/src/commands/perf/benchmark/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
use std::path::Path;

use anyhow::{bail, Result};
use clap::Parser;
use infra_utils::cargo::CargoWorkspace;
use infra_utils::commands::Command;
use infra_utils::github::GitHub;
use infra_utils::paths::PathExtensions;

use crate::utils::DryRun;

// Source: https://github.com/bencherdev/bencher/blob/aa31a002842cfb0da9d6c60569396fc5261f5111/tasks/test_api/src/task/test/smoke_test.rs#L20
const BENCHER_TEST_TOKEN: &str = "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJhdWQiOiJhcGlfa2V5IiwiZXhwIjo1OTkzNjQyMTU2LCJpYXQiOjE2OTg2NzQ4NjEsImlzcyI6Imh0dHBzOi8vZGV2ZWwtLWJlbmNoZXIubmV0bGlmeS5hcHAvIiwic3ViIjoibXVyaWVsLmJhZ2dlQG5vd2hlcmUuY29tIiwib3JnIjpudWxsfQ.9z7jmM53TcVzc1inDxTeX9_OR0PQPpZAsKsCE7lWHfo";

#[derive(Clone, Debug, Parser)]
pub struct BenchmarkController {
#[command(flatten)]
dry_run: DryRun,
}

impl BenchmarkController {
pub fn execute(&self) -> Result<()> {
Self::install_perf_tools()?;

// Bencher supports multiple languages/frameworks: https://bencher.dev/docs/explanation/adapters/
// We currently only have one benchmark suite (Rust/iai), but we can add more here in the future.
self.run_iai_bench("solidity_testing_perf", "iai");

Ok(())
}

fn install_perf_tools() -> Result<()> {
match Command::new("valgrind").flag("--version").evaluate() {
Ok(output) if output.starts_with("valgrind-") => {
// Valgrind is available
}
other => {
bail!(
"valgrind needs to be installed to run perf tests.
It is installed by default inside our devcontainer.
Supported Platforms: https://valgrind.org/downloads/current.html
{other:?}"
);
}
};

CargoWorkspace::install_binary("iai-callgrind-runner")?;

CargoWorkspace::install_binary("bencher_cli")?;

Ok(())
}

fn run_iai_bench(&self, package_name: &str, bench_name: &str) {
let token = if self.dry_run.get() {
// Use a dummy test token for dry runs:
// https://github.com/bencherdev/bencher/issues/468
BENCHER_TEST_TOKEN.to_string()
} else {
std::env::var("BENCHER_API_TOKEN").expect(
"BENCHER_API_TOKEN is not set. Either perform a '--dry-run', or set it to your Bencher API token: https://bencher.dev/console"
)
};

let cargo_command = format!("cargo bench --package {package_name} --bench {bench_name}");

let testbed = if GitHub::is_running_in_ci() {
"ci"
} else {
"dev"
};

Command::new("bencher")
.arg("run")
.property("--project", "slang")
.property("--adapter", "rust_iai_callgrind")
.property("--testbed", testbed)
.property("--token", token)
.arg(cargo_command)
.run();

let reports_dir = Path::repo_path("target/iai")
.join(package_name)
.join(bench_name);

println!("
Bencher Run is complete...
Test Results: [https://bencher.dev/console/projects/slang/reports]
Reports/Logs: {reports_dir:?}
- Callgrind flamegraphs (callgrind.*.svg) can be viewed directly in the browser.
- DHAT traces (dhat.*.out) can be viewed using the [dhat/dh_view.html] tool from the Valgrind release [https://valgrind.org/downloads/].
");
}
}
95 changes: 10 additions & 85 deletions crates/infra/cli/src/commands/perf/mod.rs
Original file line number Diff line number Diff line change
@@ -1,101 +1,26 @@
use std::path::Path;
mod benchmark;

use anyhow::{bail, Result};
use clap::{Parser, ValueEnum};
use infra_utils::cargo::CargoWorkspace;
use infra_utils::commands::Command;
use infra_utils::github::GitHub;
use infra_utils::paths::PathExtensions;
use infra_utils::terminal::Terminal;
use anyhow::Result;
use clap::{Parser, Subcommand};

use crate::utils::ClapExtensions;
use crate::commands::perf::benchmark::BenchmarkController;

#[derive(Clone, Debug, Parser)]
pub struct PerfController {
#[command(subcommand)]
command: PerfCommand,
}

#[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd, ValueEnum)]
#[derive(Clone, Debug, Subcommand)]
enum PerfCommand {
/// Run benchmark tests, and report the results to <https://bencher.dev/console>
Benchmark,
Benchmark(BenchmarkController),
}

impl PerfController {
pub fn execute(&self) -> Result<()> {
Terminal::step(format!("perf {name}", name = self.command.clap_name()));

install_perf_tools()?;

match self.command {
PerfCommand::Benchmark => {
// Bencher supports multiple languages/frameworks: https://bencher.dev/docs/explanation/adapters/
// We currently only have one benchmark suite (Rust/iai), but we can add more here in the future.

run_iai_bench("solidity_testing_perf", "iai");
}
};

Ok(())
}
}

fn install_perf_tools() -> Result<()> {
match Command::new("valgrind").flag("--version").evaluate() {
Ok(output) if output.starts_with("valgrind-") => {
// Valgrind is available
}
other => {
bail!(
"valgrind needs to be installed to run perf tests.
It is installed by default inside our devcontainer.
Supported Platforms: https://valgrind.org/downloads/current.html
{other:?}"
);
match &self.command {
PerfCommand::Benchmark(controller) => controller.execute(),
}
};

CargoWorkspace::install_binary("iai-callgrind-runner")?;

CargoWorkspace::install_binary("bencher_cli")?;

Ok(())
}

fn run_iai_bench(package_name: &str, bench_name: &str) {
assert!(
std::env::var("BENCHER_API_TOKEN").is_ok(),
"BENCHER_API_TOKEN is not set. Please set it to your Bencher API token: https://bencher.dev/console",
);

let cargo_command = format!("cargo bench --package {package_name} --bench {bench_name}");

let testbed = if GitHub::is_running_in_ci() {
"ci"
} else {
"dev"
};

Command::new("bencher")
.arg("run")
.property("--project", "slang")
.property("--adapter", "rust_iai_callgrind")
.property("--testbed", testbed)
.arg(cargo_command)
.run();

let reports_dir = Path::repo_path("target/iai")
.join(package_name)
.join(bench_name);

println!("
Bencher Run is complete...
Test Results: [https://bencher.dev/console/projects/slang/reports]
Reports/Logs: {reports_dir:?}
- Callgrind flamegraphs (callgrind.*.svg) can be viewed directly in the browser.
- DHAT traces (dhat.*.out) can be viewed using the [dhat/dh_view.html] tool from the Valgrind release [https://valgrind.org/downloads/].
");
}
}
4 changes: 0 additions & 4 deletions crates/infra/cli/src/utils.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
use anyhow::{Ok, Result};
use clap::{Parser, ValueEnum};
use infra_utils::github::GitHub;

pub trait OrderedCommand: Clone + Ord + PartialEq + ValueEnum {
fn execute(&self) -> Result<()>;
Expand Down Expand Up @@ -51,9 +50,6 @@ impl DryRun {
if self.dry_run {
println!("Performing a dry run, since it was requested on the command line.");
true
} else if !GitHub::is_running_in_ci() {
println!("Performing a dry run, since we are not running in CI.");
true
} else {
println!("Performing a full run. You can pass '--dry-run' to simulate the execution.");
false
Expand Down

0 comments on commit 0567213

Please sign in to comment.