Skip to content

Merge pull request #2358 from grumbach/crdt_record_put_verification #7

Merge pull request #2358 from grumbach/crdt_record_put_verification

Merge pull request #2358 from grumbach/crdt_record_put_verification #7

name: Benchmark Chart Generation
# Do not run this workflow on pull request since this workflow has permission to modify contents.
on:
push:
branches:
- main
permissions:
# deployments permission to deploy GitHub pages website
deployments: write
# contents permission to update benchmark contents in gh-pages branch
contents: write
env:
CARGO_INCREMENTAL: "0"
RUST_BACKTRACE: 1
CLIENT_DATA_PATH: /home/runner/.local/share/safe/autonomi
NODE_DATA_PATH: /home/runner/.local/share/safe/node
jobs:
benchmark-cli:
if: "!startsWith(github.event.head_commit.message, 'chore(release):')"
name: Run and log benchmark criterion results on gh-pages
# right now only ubuntu, running on multiple systems would require many pushes...\
# perhaps this can be done with one consolidation action in the future, pulling down all results and pushing
# once to the branch..
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt, clippy
- uses: Swatinem/rust-cache@v2
continue-on-error: true
- run: cargo install cargo-criterion
- name: ubuntu install ripgrep
run: sudo apt-get -y install ripgrep
- name: Download 95mb file to be uploaded with the safe client
shell: bash
run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip
- name: Build node and cli binaries
run: cargo build --release --features local --bin safenode --bin autonomi
timeout-minutes: 30
- name: Start a local network
uses: maidsafe/sn-local-testnet-action@main
with:
action: start
enable-evm-testnet: true
node-path: target/release/safenode
platform: ubuntu-latest
build: true
sn-log: "all"
########################
### Benchmark ###
########################
- name: Bench `autonomi` cli
shell: bash
# Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr,
# passes to tee which displays it in the terminal and writes to output.txt
run: |
cargo criterion --features=local --message-format=json 2>&1 -p autonomi-cli | tee -a output.txt
cat output.txt | rg benchmark-complete | jq -s 'map({
name: (.id | split("/"))[-1],
unit: "MiB/s",
value: ((if .throughput[0].unit == "KiB/s" then (.throughput[0].per_iteration / (1024*1024*1024)) else (.throughput[0].per_iteration / (1024*1024)) end) / (.mean.estimate / 1e9))
})' > files-benchmark.json
- name: Remove git hooks so gh-pages git commits will work
shell: bash
run: rm -rf .git/hooks/pre-commit
- name: check files-benchmark.json
shell: bash
run: cat files-benchmark.json
# gh-pages branch is updated and pushed automatically with extracted benchmark data
- name: Store cli files benchmark result
uses: benchmark-action/github-action-benchmark@v1
with:
name: "`safe files` benchmarks"
tool: "customBiggerIsBetter"
output-file-path: files-benchmark.json
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: true
max-items-in-chart: 300
# FIXME: do this in a generic way for localtestnets
- name: export default secret key
run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV
shell: bash
- name: Start a client instance to compare memory usage
shell: bash
run: cargo run --bin autonomi --release -- --log-output-dest=data-dir file upload the-test-data.zip
env:
SN_LOG: "all"
#########################
### Stop Network ###
#########################
- name: Stop the local network and upload logs
if: always()
uses: maidsafe/sn-local-testnet-action@main
with:
action: stop
platform: ubuntu-latest
log_file_prefix: safe_test_logs_generate_bench_charts
build: true
#########################
### Mem Analysis ###
#########################
- name: Check node memory usage
shell: bash
run: |
peak_mem_usage=$(
rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename |
awk -F':' '/"memory_used_mb":/{print $2}' |
sort -n |
tail -n 1
)
# Write the node memory usage to a file
echo '[
{
"name": "Peak memory w/ `safe` benchmarks",
"value": '$peak_mem_usage',
"unit": "MB"
}
]' > node_memory_usage.json
- name: check node_memory_usage.json
shell: bash
run: cat node_memory_usage.json
- name: Upload Node Memory Usage
uses: benchmark-action/github-action-benchmark@v1
with:
name: "Node memory"
tool: "customSmallerIsBetter"
output-file-path: node_memory_usage.json
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: true
max-items-in-chart: 300
- name: Check client memory usage
shell: bash
run: |
peak_mem_usage=$(
rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs/*/*.log -o --no-line-number --no-filename |
awk -F':' '/"memory_used_mb":/{print $2}' |
sort -n |
tail -n 1
)
total_mem=$(
rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs/*/*.log -o --no-line-number --no-filename |
awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}'
)
num_of_times=$(
rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs/*/*.log -c --stats |
rg "(\d+) matches" |
rg "\d+" -o
)
echo "num_of_times: $num_of_times"
echo "Total memory is: $total_mem"
average_mem=$(($total_mem/$(($num_of_times))))
echo "Average memory is: $average_mem"
# Write the client memory usage to a file
echo '[
{
"name": "Peak memory usage w/ upload",
"value": '$peak_mem_usage',
"unit": "MB"
},
{
"name": "Average memory usage w/ upload",
"value": '$average_mem',
"unit": "MB"
}
]' > client_memory_usage.json
- name: check client_memory_usage.json
shell: bash
run: cat client_memory_usage.json
- name: Upload Client Memory Usage
uses: benchmark-action/github-action-benchmark@v1
with:
name: "Client memory"
tool: "customSmallerIsBetter"
output-file-path: client_memory_usage.json
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: true
max-items-in-chart: 300
###########################################
### Swarm_driver handling time Analysis ###
###########################################
- name: Check swarm_driver handling time
shell: bash
run: |
num_of_times=$(
rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats |
rg "(\d+) matches" |
rg "\d+" -o
)
echo "Number of long cmd handling times: $num_of_times"
total_long_handling_ms=$(
rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename |
awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}'
)
echo "Total cmd long handling time is: $total_long_handling_ms ms"
average_handling_ms=$(($total_long_handling_ms/$(($num_of_times))))
echo "Average cmd long handling time is: $average_handling_ms ms"
total_long_handling=$(($total_long_handling_ms))
total_num_of_times=$(($num_of_times))
num_of_times=$(
rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats |
rg "(\d+) matches" |
rg "\d+" -o
)
echo "Number of long event handling times: $num_of_times"
total_long_handling_ms=$(
rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename |
awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}'
)
echo "Total event long handling time is: $total_long_handling_ms ms"
average_handling_ms=$(($total_long_handling_ms/$(($num_of_times))))
echo "Average event long handling time is: $average_handling_ms ms"
total_long_handling=$(($total_long_handling_ms+$total_long_handling))
total_num_of_times=$(($num_of_times+$total_num_of_times))
average_handling_ms=$(($total_long_handling/$(($total_num_of_times))))
echo "Total swarm_driver long handling times is: $total_num_of_times"
echo "Total swarm_driver long handling duration is: $total_long_handling ms"
echo "Total average swarm_driver long handling duration is: $average_handling_ms ms"
# Write the node memory usage to a file
echo '[
{
"name": "swarm_driver long handling times",
"value": '$total_num_of_times',
"unit": "hits"
},
{
"name": "swarm_driver long handling total_time",
"value": '$total_long_handling',
"unit": "ms"
},
{
"name": "swarm_driver average long handling time",
"value": '$average_handling_ms',
"unit": "ms"
}
]' > swarm_driver_long_handlings.json
- name: check swarm_driver_long_handlings.json
shell: bash
run: cat swarm_driver_long_handlings.json
- name: Upload swarm_driver Long Handlings
uses: benchmark-action/github-action-benchmark@v1
with:
name: "swarm_driver long handlings"
tool: "customSmallerIsBetter"
output-file-path: swarm_driver_long_handlings.json
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: true
max-items-in-chart: 300