From fa49d05f476c383a3ec6e42ab15131dc86a3d5c8 Mon Sep 17 00:00:00 2001 From: Maria Ines Parnisari Date: Fri, 20 Oct 2023 10:56:53 -0700 Subject: [PATCH 1/6] improve readme --- .github/workflows/minimal.yml | 29 ++++--- README.md | 148 +++++++++------------------------- 2 files changed, 57 insertions(+), 120 deletions(-) diff --git a/.github/workflows/minimal.yml b/.github/workflows/minimal.yml index d92abaa9f..5192b2c5d 100644 --- a/.github/workflows/minimal.yml +++ b/.github/workflows/minimal.yml @@ -4,30 +4,39 @@ on: branches: - master -permissions: - contents: write - deployments: write - jobs: benchmark: - name: Run minimal steps to run github-action-benchmark + name: Performance regression check runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: go-version: "stable" + # Run benchmark with `go test -bench` and stores the output to a file - name: Run benchmark run: cd examples/go && go test -bench 'BenchmarkFib' | tee output.txt + # Download previous benchmark result from cache - name: Download previous benchmark data - uses: actions/cache@v1 + uses: actions/cache/restore@v3 with: - path: ./cache + fail-on-cache-miss: true + path: ./cache/benchmark-data.json key: ${{ runner.os }}-benchmark - - name: Store benchmark result + - name: Compare results uses: benchmark-action/github-action-benchmark@v1 with: + # What benchmark tool the output.txt came from tool: 'go' - output-file-path: examples/go/output.txt + # Extract benchmark result from here + output-file-path: output.txt + # Where the previous data file is stored external-data-json-path: ./cache/benchmark-data.json + # Workflow will fail when an alert happens fail-on-alert: true + # Upload the updated cache file for the next job by actions/cache + - name: Save benchmark JSON + uses: actions/cache/save@v3 + with: + path: ./cache/benchmark-data.json + key: ${{ runner.os }}-benchmark \ No newline at end of file diff --git a/README.md b/README.md index 836236682..9492469f1 100644 --- a/README.md +++ b/README.md @@ -10,9 +10,14 @@ and monitor the results on GitHub Actions workflow. - This action can store collected benchmark results in [GitHub pages][gh-pages] branch and provide a chart view. Benchmark results are visualized on the GitHub pages of your project. + + ![page screenshot](https://raw.githubusercontent.com/rhysd/ss/master/github-action-benchmark/main.png) - This action can detect possible performance regressions by comparing benchmark results. When - benchmark results get worse than previous exceeding the specified threshold, it can raise an alert - via commit comment or workflow failure. + benchmark results get worse than previous exceeding the specified threshold, it can add [an alert comment][alert-comment-example] to the commit. + +![alert comment](https://raw.githubusercontent.com/rhysd/ss/master/github-action-benchmark/alert-comment.png) + +## Supported tools This action currently supports the following tools: @@ -83,61 +88,10 @@ context) properties. Like this: ] ``` -## Screenshots - -### Charts on GitHub Pages - -![page screenshot](https://raw.githubusercontent.com/rhysd/ss/master/github-action-benchmark/main.png) - -Mouseover on data point shows a tooltip. It includes - -- Commit hash -- Commit message -- Date and committer -- Benchmark value - -Clicking data point in chart opens the commit page on a GitHub repository. - -![tooltip](https://raw.githubusercontent.com/rhysd/ss/master/github-action-benchmark/tooltip.png) - -At bottom of the page, the download button is available for downloading benchmark results as a JSON file. - -![download button](https://raw.githubusercontent.com/rhysd/ss/master/github-action-benchmark/download.png) - - -### Alert comment on commit page - -This action can raise [an alert comment][alert-comment-example]. to the commit when its benchmark -results are worse than previous exceeding a specified threshold. - -![alert comment](https://raw.githubusercontent.com/rhysd/ss/master/github-action-benchmark/alert-comment.png) - - - -## Why? - -Since performance is important. Writing benchmarks is a popular and correct way to visualize a software -performance. Benchmarks help us to keep performance and to confirm the effects of optimizations. -For keeping the performance, it's important to monitor the benchmark results along with changes to -the software. To notice performance regression quickly, it's useful to monitor benchmarking results -continuously. - -However, there is no good free tool to watch the performance easily and continuously across languages -(as far as I looked into). So I built a new tool on top of GitHub Actions. - - - ## How to use -This action takes a file that contains benchmark output. And it outputs the results to GitHub Pages -branch and/or alert commit comment. - - ### Minimal setup -Let's start with a minimal workflow setup. For explanation, here let's say we have a Go project. But basic -setup is the same when you use other languages. For language-specific setup, please read the later section. - ```yaml name: Minimal setup on: @@ -150,41 +104,39 @@ jobs: name: Performance regression check runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: go-version: "stable" # Run benchmark with `go test -bench` and stores the output to a file - name: Run benchmark run: go test -bench 'BenchmarkFib' | tee output.txt - # Download previous benchmark result from cache (if exists) + # Download previous benchmark result from cache - name: Download previous benchmark data - uses: actions/cache@v1 + uses: actions/cache/restore@v3 with: - path: ./cache + fail-on-cache-miss: true + path: ./cache/benchmark-data.json key: ${{ runner.os }}-benchmark - # Run `github-action-benchmark` action - - name: Store benchmark result + - name: Compare results uses: benchmark-action/github-action-benchmark@v1 with: # What benchmark tool the output.txt came from tool: 'go' - # Where the output from the benchmark tool is stored + # Extract benchmark result from here output-file-path: output.txt # Where the previous data file is stored external-data-json-path: ./cache/benchmark-data.json # Workflow will fail when an alert happens fail-on-alert: true # Upload the updated cache file for the next job by actions/cache + - name: Save benchmark JSON + uses: actions/cache/save@v3 + with: + path: ./cache/benchmark-data.json + key: ${{ runner.os }}-benchmark ``` -The step which runs `github-action-benchmark` does followings: - -1. Extract benchmark result from the output in `output.txt` -2. Update the downloaded cache file with the extracted result -3. Compare the result with the previous result. If it gets worse than previous exceeding 200% threshold, - the workflow fails and the failure is notified to you - By default, this action marks the result as performance regression when it is worse than the previous exceeding 200% threshold. For example, if the previous benchmark result was 100 iter/ns and this time it is 230 iter/ns, it means 230% worse than the previous and an alert will happen. The threshold can @@ -193,24 +145,23 @@ be changed by `alert-threshold` input. A live workflow example is [here](.github/workflows/minimal.yml). And the results of the workflow can be seen [here][minimal-workflow-example]. - -### Commit comment - -In addition to the above setup, GitHub API token needs to be given to enable `comment-on-alert` feature. +### Comment on commit when regression is found ```yaml -- name: Store benchmark result +- name: Compare benchmarks and comment on alert uses: benchmark-action/github-action-benchmark@v1 with: tool: 'go' - output-file-path: output.txt + # Where the output from the current benchmark is stored + output-file-path: ${{ github.sha }}_bench_output.txt + # Where the previous benchmark is stored external-data-json-path: ./cache/benchmark-data.json fail-on-alert: true # GitHub API token to make a commit comment github-token: ${{ secrets.GITHUB_TOKEN }} # Enable alert commit comment comment-on-alert: true - # Mention @rhysd in the commit comment + # Mention @rhysd in the commit comment, not mandatory but highly recommended to ensure the comment is seen alert-comment-cc-users: '@rhysd' ``` @@ -220,34 +171,29 @@ performance regression. Now, in addition to making workflow fail, the step leaves a commit comment when it detects performance regression [like this][alert-comment-example]. Though `alert-comment-cc-users` input is not mandatory for -this, I recommend to set it to make sure you can notice the comment via GitHub notification. Please note +this, I recommend to set it to make sure you notice the comment via GitHub notification. Please note that this value must be quoted like `'@rhysd'` because [`@` is an indicator in YAML syntax](https://yaml.org/spec/1.2/spec.html#id2772075). A live workflow example is [here](.github/workflows/commit-comment.yml). And the results of the workflow can be seen [here][commit-comment-workflow-example]. -### PR Summary +### Leave a comment on PR Summary -Similar to the [Commit comment](#commit-comment) feature, Github Actions [Job Summaries](https://github.blog/2022-05-09-supercharging-github-actions-with-job-summaries/) are -also supported. In order to use Job Summaries, turn on the `summary-always` -option. +Github Actions [Job Summaries](https://github.blog/2022-05-09-supercharging-github-actions-with-job-summaries/) are +also supported. In order to use Job Summaries, turn on the `summary-always` option. ```yaml -- name: Store benchmark result +- name: Compare benchmarks and leave a comment on PR summary uses: benchmark-action/github-action-benchmark@v1 with: tool: 'cargo' - output-file-path: output.txt + # Where the output from the current benchmark is stored + output-file-path: ${{ github.sha }}_bench_output.txt + # Where the previous benchmark is stored external-data-json-path: ./cache/benchmark-data.json fail-on-alert: true - # GitHub API token to make a commit comment - github-token: ${{ secrets.GITHUB_TOKEN }} - # Enable alert commit comment - comment-on-alert: true # Enable Job Summary for PRs summary-always: true - # Mention @rhysd in the commit comment - alert-comment-cc-users: '@rhysd' ``` ### Charts on GitHub Pages @@ -293,8 +239,8 @@ jobs: # Run benchmark with `go test -bench` and stores the output to a file - name: Run benchmark run: go test -bench 'BenchmarkFib' | tee output.txt - # gh-pages branch is updated and pushed automatically with extracted benchmark data - - name: Store benchmark result + + - name: Compare benchmarks and publish to github pages uses: benchmark-action/github-action-benchmark@v1 with: name: My Project Go Benchmark @@ -329,10 +275,10 @@ benchmark results identical. Please see the above ['Examples' section](#examples) to see live workflow examples for each language. -If you don't want to pass GitHub API token to this action, it's still OK. +If you don't want to pass GitHub API token to this action: ```yaml -- name: Store benchmark result +- name: Compare benchmarks uses: benchmark-action/github-action-benchmark@v1 with: name: My Project Go Benchmark @@ -341,31 +287,13 @@ If you don't want to pass GitHub API token to this action, it's still OK. # Set auto-push to false since GitHub API token is not given auto-push: false # Push gh-pages branch by yourself -- name: Push benchmark result +- name: Publish to github pages without token run: git push 'https://you:${{ secrets.GITHUB_TOKEN }}@github.com/you/repo-name.git' gh-pages:gh-pages ``` Please add a step to push the branch to the remote. -### Tool specific setup - -Please read `README.md` files at each example directory. Usually, take stdout from a benchmark tool -and store it to file. Then specify the file path to `output-file-path` input. - -- [`cargo bench` for Rust projects](./examples/rust/README.md) -- [`go test` for Go projects](./examples/go/README.md) -- [Benchmark.js for JavaScript/TypeScript projects](./examples/benchmarkjs/README.md) -- [pytest-benchmark for Python projects with pytest](./examples/pytest/README.md) -- [Google Benchmark Framework for C++ projects](./examples/cpp/README.md) -- [catch2 for C++ projects](./examples/cpp/README.md) -- [BenchmarkTools.jl for Julia projects](./examples/julia/README.md) -- [Benchmark.Net for .Net projects](./examples/benchmarkdotnet/README.md) -- [benchmarkluau for Luau projects](#) - Examples for this are still a work in progress. - -These examples are run in workflows of this repository as described in the 'Examples' section above. - - ### Action inputs Input definitions are written in [action.yml](./action.yml). From ca3c56183ef0bc14c918dc1dd85327fef82519d1 Mon Sep 17 00:00:00 2001 From: Maria Ines Parnisari Date: Fri, 20 Oct 2023 11:05:51 -0700 Subject: [PATCH 2/6] less permissions --- .github/workflows/minimal.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/minimal.yml b/.github/workflows/minimal.yml index 5192b2c5d..fa9011be5 100644 --- a/.github/workflows/minimal.yml +++ b/.github/workflows/minimal.yml @@ -4,6 +4,10 @@ on: branches: - master +permissions: + # we don't publish anything + contents: read + jobs: benchmark: name: Performance regression check From bb00a3fa44a4dc3b9d5ae899ceff582ab9a3f3a3 Mon Sep 17 00:00:00 2001 From: mparnisari Date: Thu, 28 Dec 2023 20:53:52 -0800 Subject: [PATCH 3/6] address some comments --- .github/workflows/minimal.yml | 16 +++++----------- README.md | 22 ++++++++++++++++++++-- 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/.github/workflows/minimal.yml b/.github/workflows/minimal.yml index fa9011be5..c3da391c8 100644 --- a/.github/workflows/minimal.yml +++ b/.github/workflows/minimal.yml @@ -13,8 +13,8 @@ jobs: name: Performance regression check runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v4 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: "stable" # Run benchmark with `go test -bench` and stores the output to a file @@ -27,20 +27,14 @@ jobs: fail-on-cache-miss: true path: ./cache/benchmark-data.json key: ${{ runner.os }}-benchmark - - name: Compare results + - name: Store benchmark result uses: benchmark-action/github-action-benchmark@v1 with: # What benchmark tool the output.txt came from tool: 'go' - # Extract benchmark result from here + # Where the output from the benchmark tool is stored output-file-path: output.txt - # Where the previous data file is stored + # Write benchmarks to this file external-data-json-path: ./cache/benchmark-data.json # Workflow will fail when an alert happens fail-on-alert: true - # Upload the updated cache file for the next job by actions/cache - - name: Save benchmark JSON - uses: actions/cache/save@v3 - with: - path: ./cache/benchmark-data.json - key: ${{ runner.os }}-benchmark \ No newline at end of file diff --git a/README.md b/README.md index 9492469f1..c3df6898e 100644 --- a/README.md +++ b/README.md @@ -104,8 +104,8 @@ jobs: name: Performance regression check runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v4 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: "stable" # Run benchmark with `go test -bench` and stores the output to a file @@ -134,6 +134,7 @@ jobs: uses: actions/cache/save@v3 with: path: ./cache/benchmark-data.json + # Include OS in key so that we don't compare benchmarks across different OSes key: ${{ runner.os }}-benchmark ``` @@ -293,6 +294,23 @@ If you don't want to pass GitHub API token to this action: Please add a step to push the branch to the remote. +### Tool specific setup + +Please read `README.md` files at each example directory. Usually, take stdout from a benchmark tool +and store it to file. Then specify the file path to `output-file-path` input. + +- [`cargo bench` for Rust projects](./examples/rust/README.md) +- [`go test` for Go projects](./examples/go/README.md) +- [Benchmark.js for JavaScript/TypeScript projects](./examples/benchmarkjs/README.md) +- [pytest-benchmark for Python projects with pytest](./examples/pytest/README.md) +- [Google Benchmark Framework for C++ projects](./examples/cpp/README.md) +- [catch2 for C++ projects](./examples/cpp/README.md) +- [BenchmarkTools.jl for Julia projects](./examples/julia/README.md) +- [Benchmark.Net for .Net projects](./examples/benchmarkdotnet/README.md) +- [benchmarkluau for Luau projects](#) - Examples for this are still a work in progress. + +These examples are run in workflows of this repository as described in the 'Examples' section above. + ### Action inputs From f0d7aea50a80be4ebbf794d285bb6d9639b3f8f4 Mon Sep 17 00:00:00 2001 From: mparnisari Date: Thu, 28 Dec 2023 20:59:39 -0800 Subject: [PATCH 4/6] undo removal of screenshots --- README.md | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/README.md b/README.md index c3df6898e..79c390ee2 100644 --- a/README.md +++ b/README.md @@ -88,6 +88,35 @@ context) properties. Like this: ] ``` +## Screenshots + +### Charts on GitHub Pages + +![page screenshot](https://raw.githubusercontent.com/rhysd/ss/master/github-action-benchmark/main.png) + +Mouseover on data point shows a tooltip. It includes + +- Commit hash +- Commit message +- Date and committer +- Benchmark value + +Clicking data point in chart opens the commit page on a GitHub repository. + +![tooltip](https://raw.githubusercontent.com/rhysd/ss/master/github-action-benchmark/tooltip.png) + +At bottom of the page, the download button is available for downloading benchmark results as a JSON file. + +![download button](https://raw.githubusercontent.com/rhysd/ss/master/github-action-benchmark/download.png) + + +### Alert comment on commit page + +This action can raise [an alert comment][alert-comment-example]. to the commit when its benchmark +results are worse than previous exceeding a specified threshold. + +![alert comment](https://raw.githubusercontent.com/rhysd/ss/master/github-action-benchmark/alert-comment.png) + ## How to use ### Minimal setup From 75dcd0d370c91547ec0955c010f99aeaaaa6cb6a Mon Sep 17 00:00:00 2001 From: mparnisari Date: Thu, 28 Dec 2023 21:13:14 -0800 Subject: [PATCH 5/6] merge supported tools sections into one --- README.md | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 79c390ee2..0f6bcbb0c 100644 --- a/README.md +++ b/README.md @@ -19,21 +19,22 @@ and monitor the results on GitHub Actions workflow. ## Supported tools -This action currently supports the following tools: - -- [`cargo bench`][cargo-bench] for Rust projects -- `go test -bench` for Go projects -- [benchmark.js][benchmarkjs] for JavaScript/TypeScript projects -- [pytest-benchmark][] for Python projects with [pytest][] -- [Google Benchmark Framework][google-benchmark] for C++ projects -- [Catch2][catch2] for C++ projects -- [BenchmarkTools.jl][] for Julia packages -- [Benchmark.Net][benchmarkdotnet] for .Net projects -- [benchmarkluau](https://github.com/Roblox/luau/tree/master/bench) for Luau projects +This action currently supports the following tools. Please read the README file in each example directory. + +- [`cargo bench`][cargo-bench] for Rust projects. [Example setup](https://github.com/benchmark-action/github-action-benchmark/blob/master/examples/rust/README.md) +- `go test -bench` for Go projects. [Example setup](https://github.com/benchmark-action/github-action-benchmark/blob/master/examples/go/README.md) +- [benchmark.js][benchmarkjs] for JavaScript/TypeScript projects. [Example setup](https://github.com/benchmark-action/github-action-benchmark/blob/master/examples/benchmarkjs/README.md) +- [pytest-benchmark][] for Python projects with [pytest][]. [Example setup](https://github.com/benchmark-action/github-action-benchmark/blob/master/examples/benchmarkjs/README.md) +- [Google Benchmark Framework][google-benchmark] for C++ projects. [Example setup](https://github.com/benchmark-action/github-action-benchmark/blob/master/examples/cpp/README.md) +- [Catch2][catch2] for C++ projects. [Example setup](https://github.com/benchmark-action/github-action-benchmark/blob/master/examples/cpp/README.md) +- [BenchmarkTools.jl][] for Julia packages. [Example setup](https://github.com/benchmark-action/github-action-benchmark/blob/master/examples/julia/README.md) +- [Benchmark.Net][benchmarkdotnet] for .Net projects. [Example setup](https://github.com/benchmark-action/github-action-benchmark/blob/master/examples/benchmarkdotnet/README.md) +- [benchmarkluau](https://github.com/Roblox/luau/tree/master/bench) for Luau projects. [Example setup](https://github.com/benchmark-action/github-action-benchmark?tab=readme-ov-file#) - [JMH][jmh] for Java projects - Custom benchmarks where either 'biggerIsBetter' or 'smallerIsBetter' -Multiple languages in the same repository are supported for polyglot projects. +Multiple languages in the same repository are supported for polyglot projects.. Usually, take stdout from a benchmark tool and store it to file. Then specify the file path to output-file-path input. + [Japanese Blog post](https://rhysd.hatenablog.com/entry/2019/11/11/131505) From b83835a2e370b9c7002e223535d96917f10e3f79 Mon Sep 17 00:00:00 2001 From: mparnisari Date: Thu, 28 Dec 2023 21:29:29 -0800 Subject: [PATCH 6/6] condense languages --- README.md | 102 +++++++++--------------------------------------------- 1 file changed, 16 insertions(+), 86 deletions(-) diff --git a/README.md b/README.md index 0f6bcbb0c..1d4db61b8 100644 --- a/README.md +++ b/README.md @@ -17,51 +17,29 @@ and monitor the results on GitHub Actions workflow. ![alert comment](https://raw.githubusercontent.com/rhysd/ss/master/github-action-benchmark/alert-comment.png) -## Supported tools - -This action currently supports the following tools. Please read the README file in each example directory. - -- [`cargo bench`][cargo-bench] for Rust projects. [Example setup](https://github.com/benchmark-action/github-action-benchmark/blob/master/examples/rust/README.md) -- `go test -bench` for Go projects. [Example setup](https://github.com/benchmark-action/github-action-benchmark/blob/master/examples/go/README.md) -- [benchmark.js][benchmarkjs] for JavaScript/TypeScript projects. [Example setup](https://github.com/benchmark-action/github-action-benchmark/blob/master/examples/benchmarkjs/README.md) -- [pytest-benchmark][] for Python projects with [pytest][]. [Example setup](https://github.com/benchmark-action/github-action-benchmark/blob/master/examples/benchmarkjs/README.md) -- [Google Benchmark Framework][google-benchmark] for C++ projects. [Example setup](https://github.com/benchmark-action/github-action-benchmark/blob/master/examples/cpp/README.md) -- [Catch2][catch2] for C++ projects. [Example setup](https://github.com/benchmark-action/github-action-benchmark/blob/master/examples/cpp/README.md) -- [BenchmarkTools.jl][] for Julia packages. [Example setup](https://github.com/benchmark-action/github-action-benchmark/blob/master/examples/julia/README.md) -- [Benchmark.Net][benchmarkdotnet] for .Net projects. [Example setup](https://github.com/benchmark-action/github-action-benchmark/blob/master/examples/benchmarkdotnet/README.md) -- [benchmarkluau](https://github.com/Roblox/luau/tree/master/bench) for Luau projects. [Example setup](https://github.com/benchmark-action/github-action-benchmark?tab=readme-ov-file#) -- [JMH][jmh] for Java projects -- Custom benchmarks where either 'biggerIsBetter' or 'smallerIsBetter' - -Multiple languages in the same repository are supported for polyglot projects.. Usually, take stdout from a benchmark tool and store it to file. Then specify the file path to output-file-path input. - - [Japanese Blog post](https://rhysd.hatenablog.com/entry/2019/11/11/131505) - - -## Examples - -Example projects for each language are in [examples/](./examples) directory. Live example workflow -definitions are in [.github/workflows/](./.github/workflows) directory. Live workflows are: - -| Language | Workflow | Example Project | -|--------------|-----------------------------------------------------------------------------------------|------------------------------------------------| -| Rust | [![Rust Example Workflow][rust-badge]][rust-workflow-example] | [examples/rust](./examples/rust) | -| Go | [![Go Example Workflow][go-badge]][go-workflow-example] | [examples/go](./examples/go) | -| JavaScript | [![JavaScript Example Workflow][benchmarkjs-badge]][benchmarkjs-workflow-example] | [examples/benchmarkjs](./examples/benchmarkjs) | -| Python | [![pytest-benchmark Example Workflow][pytest-benchmark-badge]][pytest-workflow-example] | [examples/pytest](./examples/pytest) | -| C++ | [![C++ Example Workflow][cpp-badge]][cpp-workflow-example] | [examples/cpp](./examples/cpp) | -| C++ (Catch2) | [![C++ Catch2 Example Workflow][catch2-badge]][catch2-workflow-example] | [examples/catch2](./examples/catch2) | -| Julia | [![Julia Example][julia-badge]][julia-workflow-example] | [examples/julia](./examples/julia) | -| .Net | [![C# Benchmark.Net Example Workflow][benchmarkdotnet-badge]][benchmarkdotnet-workflow-example] | [examples/benchmarkdotnet](./examples/benchmarkdotnet) | -| Java | [![Java Example Workflow][java-badge]][java-workflow-example] | [examples/java](./examples/java) | -| Luau | Coming soon | Coming soon | +## Supported languages & examples + +| Language | Supported | Example Workflow | Example Project | +|--------------|-----------|-----------------------------------------------------------------------------|--------------------------------------------------------| +| Rust | Yes | [![cargo bench][rust-badge]][rust-workflow-example] | [examples/rust](./examples/rust) | +| Go | Yes | [![go test -bench][go-badge]][go-workflow-example] | [examples/go](./examples/go) | +| JavaScript | Yes | [![benchmark.js][benchmarkjs-badge]][benchmarkjs-workflow-example] | [examples/benchmarkjs](./examples/benchmarkjs) | +| Python | Yes | [![pytest-benchmark][pytest-benchmark-badge]][pytest-workflow-example] | [examples/pytest](./examples/pytest) | +| C++ | Yes | [![Google Benchmark Framework][cpp-badge]][cpp-workflow-example] | [examples/cpp](./examples/cpp) | +| C++ (Catch2) | Yes | [![Catch2][catch2-badge]][catch2-workflow-example] | [examples/catch2](./examples/catch2) | +| Julia | Yes | [![BenchmarkTools.jl][julia-badge]][julia-workflow-example] | [examples/julia](./examples/julia) | +| .Net | Yes | [![Benchmark.Net][benchmarkdotnet-badge]][benchmarkdotnet-workflow-example] | [examples/benchmarkdotnet](./examples/benchmarkdotnet) | +| Java | Yes | [![JMH][java-badge]][java-workflow-example] | [examples/java](./examples/java) | +| Luau | Yes | Coming soon for benchmarkluau | Coming soon | All benchmark charts from above workflows are gathered in GitHub pages: https://benchmark-action.github.io/github-action-benchmark/dev/bench/ +## Custom benchmarks + Additionally, even though there is no explicit example for them, you can use `customBiggerIsBetter` and `customSmallerIsBetter` to use this action and create your own graphs from your own benchmark data. The name in @@ -88,36 +66,6 @@ context) properties. Like this: } ] ``` - -## Screenshots - -### Charts on GitHub Pages - -![page screenshot](https://raw.githubusercontent.com/rhysd/ss/master/github-action-benchmark/main.png) - -Mouseover on data point shows a tooltip. It includes - -- Commit hash -- Commit message -- Date and committer -- Benchmark value - -Clicking data point in chart opens the commit page on a GitHub repository. - -![tooltip](https://raw.githubusercontent.com/rhysd/ss/master/github-action-benchmark/tooltip.png) - -At bottom of the page, the download button is available for downloading benchmark results as a JSON file. - -![download button](https://raw.githubusercontent.com/rhysd/ss/master/github-action-benchmark/download.png) - - -### Alert comment on commit page - -This action can raise [an alert comment][alert-comment-example]. to the commit when its benchmark -results are worse than previous exceeding a specified threshold. - -![alert comment](https://raw.githubusercontent.com/rhysd/ss/master/github-action-benchmark/alert-comment.png) - ## How to use ### Minimal setup @@ -324,24 +272,6 @@ If you don't want to pass GitHub API token to this action: Please add a step to push the branch to the remote. -### Tool specific setup - -Please read `README.md` files at each example directory. Usually, take stdout from a benchmark tool -and store it to file. Then specify the file path to `output-file-path` input. - -- [`cargo bench` for Rust projects](./examples/rust/README.md) -- [`go test` for Go projects](./examples/go/README.md) -- [Benchmark.js for JavaScript/TypeScript projects](./examples/benchmarkjs/README.md) -- [pytest-benchmark for Python projects with pytest](./examples/pytest/README.md) -- [Google Benchmark Framework for C++ projects](./examples/cpp/README.md) -- [catch2 for C++ projects](./examples/cpp/README.md) -- [BenchmarkTools.jl for Julia projects](./examples/julia/README.md) -- [Benchmark.Net for .Net projects](./examples/benchmarkdotnet/README.md) -- [benchmarkluau for Luau projects](#) - Examples for this are still a work in progress. - -These examples are run in workflows of this repository as described in the 'Examples' section above. - - ### Action inputs Input definitions are written in [action.yml](./action.yml).