From 8ead840f6c499010d785f7e8eaaffb5d260008ef Mon Sep 17 00:00:00 2001 From: zeme-iohk Date: Mon, 20 Mar 2023 22:57:25 +0700 Subject: [PATCH] Add workflow for longitudinal benchmarking (#5205) * New benchmark workflow * alert threshold * upd * Addressed some comments * WIP * Applied requested changes --- .github/workflows/longitudinal-benchmark.yml | 55 ++++++++++++++++++++ scripts/format-benchmark-output.py | 23 ++++++++ 2 files changed, 78 insertions(+) create mode 100644 .github/workflows/longitudinal-benchmark.yml create mode 100644 scripts/format-benchmark-output.py diff --git a/.github/workflows/longitudinal-benchmark.yml b/.github/workflows/longitudinal-benchmark.yml new file mode 100644 index 00000000000..498e6196963 --- /dev/null +++ b/.github/workflows/longitudinal-benchmark.yml @@ -0,0 +1,55 @@ +# Longitudinal Benchmarks +# +# This workflow will run the benchmarks defined in the environment variable BENCHMARKS. +# It will collect and aggreate the benchmark output, format it and feed it to github-action-benchmark. +# +# The benchmark charts are live at https://input-output-hk.github.io/plutus/dev/bench +# The benchmark data is available at https://input-output-hk.github.io/plutus/dev/bench/data.js + +name: Longitudinal Benchmarks + +on: + push: + branches: + - master + +permissions: + # Deployments permission to deploy GitHub pages website + deployments: write + # Contents permission to update benchmark contents in gh-pages branch + contents: write + +jobs: + longitudinal-benchmarks: + name: Performance regression check + runs-on: [self-hosted, plutus-benchmark] + steps: + - uses: actions/checkout@v3.3.0 + + - name: Run benchmarks + env: + BENCHMARKS: "validation validation-decode" + run: | + for bench in $BENCHMARKS; do + 2>&1 cabal run "$bench" | tee "$bench-output.txt" + done + python ./scripts/format-benchmark-output.py + + + - name: Store benchmark result + uses: benchmark-action/github-action-benchmark@v1.16.2 + with: + name: Plutus Benchmarks + tool: 'customSmallerIsBetter' + output-file-path: output.json + github-token: ${{ secrets.GITHUB_TOKEN }} + # Push and deploy GitHub pages branch automatically + auto-push: true + # Enable alert commit comment + comment-on-alert: true + # Mention @input-output-hk/plutus-core in the commit comment + alert-comment-cc-users: '@input-output-hk/plutus-core' + # Percentage value like "110%". + # It is a ratio indicating how worse the current benchmark result is. + # For example, if we now get 110 ns/iter and previously got 100 ns/iter, it gets 110% worse. + alert-threshold: '105%' diff --git a/scripts/format-benchmark-output.py b/scripts/format-benchmark-output.py new file mode 100644 index 00000000000..0aee2bafe8f --- /dev/null +++ b/scripts/format-benchmark-output.py @@ -0,0 +1,23 @@ +import json +import os + +result = [] + +for benchmark in os.getenv("BENCHMARKS").split(): + with open(f"{benchmark}-output.txt", "r") as file: + name = "" + for line in file.readlines(): + if line.startswith("benchmarking"): + name = line.split()[1] + elif line.startswith("mean"): + parts = line.split() + mean = parts[1] + unit = parts[2] + result.append({ + "name": f"{benchmark}-{name}", + "unit": unit, + "value": float(mean) + }) + +with open("output.json", "w") as file: + json.dump(result, file) \ No newline at end of file