Skip to content

Commit

Permalink
[WIP] Parallel GH actions workflow for Nixpkgs eval
Browse files Browse the repository at this point in the history
Partly taken from NixOS#352808 and NixOS#269403
  • Loading branch information
infinisil committed Nov 14, 2024
1 parent f8d0c64 commit 868ad90
Show file tree
Hide file tree
Showing 3 changed files with 175 additions and 0 deletions.
53 changes: 53 additions & 0 deletions .github/workflows/eval.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
name: Eval

on: pull_request_target

permissions:
contents: read

jobs:
tests:
name: eval-check
runs-on: ubuntu-latest
strategy:
matrix:
system: [x86_64-linux, aarch64-linux, aarch64-darwin, x86_64-darwin]
steps:
# Important: Because of `pull_request_target`, this doesn't check out the PR,
# but rather the base branch of the PR, which is needed so we don't run untrusted code
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: base
sparse-checkout: ci
- name: Resolving the merge commit
env:
GH_TOKEN: ${{ github.token }}
run: |
if mergedSha=$(base/ci/get-merge-commit.sh ${{ github.repository }} ${{ github.event.number }}); then
echo "Checking the merge commit $mergedSha"
echo "mergedSha=$mergedSha" >> "$GITHUB_ENV"
else
# Skipping so that no notifications are sent
echo "Skipping the rest..."
fi
rm -rf base
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
# Add this to _all_ subsequent steps to skip them
if: env.mergedSha
with:
ref: ${{ env.mergedSha }}

- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
if: env.mergedSha

- name: Enable swap
if: env.mergedSha
run: |
sudo fallocate -l 10G /swapfile
sudo chmod 600 /swapfile
sudo mkswap /swapfile
sudo swapon /swapfile
- name: Check eval
if: env.mergedSha
run: ./ci/eval-nixpkgs.sh --system "${{ matrix.system }}"
61 changes: 61 additions & 0 deletions ci/eval-nixpkgs.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
#!/usr/bin/env nix-shell
#!nix-shell -i bash -p moreutils

set -euxo pipefail

system="x86_64-linux"
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
NIXPKGS_PATH="$(readlink -f "$SCRIPT_DIR"/..)"

parseArgs() {
while [[ $# -gt 0 ]]; do
case $1 in
--system)
system=$2
shift 2
;;
*)
echo "Unknown argument: $1"
exit 1
;;
esac
done
}

main() {
parseArgs "$@"
tmpdir=$(mktemp -d)
trap 'rm -rf "$tmpdir"' EXIT

nix-instantiate --eval --strict --json --arg enableWarnings false "$NIXPKGS_PATH"/pkgs/top-level/release-attrpaths-superset.nix -A paths > "$tmpdir/paths.json"

CORES=$(nproc)
# Originally @amjoseph: note that the number of processes spawned is four times
# the number of cores -- this helps in two ways:
# 1. Keeping cores busy while I/O operations are in flight
# 2. Since the amount of time needed for the jobs is *not* balanced
# this minimizes the "tail latency" for the very last job to finish
# (on one core) by making the job size smaller.
NUM_CHUNKS=$(( 4 * CORES ))


(
set +e
parallel -j "$CORES" \
nix-env -qaP --no-name --out-path --arg checkMeta true --arg includeBroken true \
--arg systems "[\"$system\"]" \
-f "$NIXPKGS_PATH"/ci/parallel.nix --arg attrPathFile "$tmpdir"/paths.json \
--arg numChunks "$NUM_CHUNKS" --show-trace --arg myChunk \
-- $(seq 0 $(( NUM_CHUNKS - 1 ))) > "$tmpdir/paths"
echo $? > "$tmpdir/exit-code"
) &
pid=$!
while kill -0 "$pid"; do
free -g >&2
sleep 20
done
jq --raw-input --slurp 'split("\n") | map(select(. != "") | split(" ") | map(select(. != "")) | { key: .[0], value: .[1] }) | from_entries' "$tmpdir/paths"
exit "$(cat "$tmpdir/exit-code")"
}

main "$@"
61 changes: 61 additions & 0 deletions ci/parallel.nix
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
/*
Invocation:
Invocation; note that the number of processes spawned is four times
the number of cores -- this helps in two ways:
1. Keeping cores busy while I/O operations are in flight
2. Since the amount of time needed for the jobs is *not* balanced
this minimizes the "tail latency" for the very last job to finish
(on one core) by making the job size smaller.
*/
# see pkgs/top-level/nohydra
{ lib ? import ../lib
, checkMeta
, includeBroken ? true
, path ? ./..
, systems
, myChunk
, numChunks
, attrPathFile
}:

let
attrPaths = builtins.fromJSON (builtins.readFile attrPathFile);
chunkSize = (lib.length attrPaths) / numChunks;
myPaths =
let
dropped = lib.drop (chunkSize*myChunk) attrPaths;
in
if myChunk == numChunks - 1
then dropped
else lib.take chunkSize dropped;

unfiltered = import ../pkgs/top-level/release-outpaths.nix {
inherit checkMeta path includeBroken systems;
};

f = i: m: a:
lib.mapAttrs (name: values:
if a ? ${name} then
if lib.any (value: lib.length value <= i + 1) values then
a.${name}
else
f (i + 1) values a.${name}
else
null
) (lib.groupBy (a: lib.elemAt a i) m);

filtered = f 0 myPaths unfiltered;

recurseEverywhere = val:
if lib.isDerivation val || !(lib.isAttrs val)
then val
else (builtins.mapAttrs (_: v: recurseEverywhere v) val)
// { recurseForDerivations = true; };

in
recurseEverywhere filtered

0 comments on commit 868ad90

Please sign in to comment.