diff --git a/.github/workflows/eval.yml b/.github/workflows/eval.yml index a7c3a94a67880..3b8dc8fe2c548 100644 --- a/.github/workflows/eval.yml +++ b/.github/workflows/eval.yml @@ -6,12 +6,12 @@ permissions: contents: read jobs: - tests: - name: eval-check + attrs: + name: Attrs runs-on: ubuntu-latest - strategy: - matrix: - system: [x86_64-linux, aarch64-linux, aarch64-darwin, x86_64-darwin] + outputs: + systems: ${{ steps.systems.outputs.systems }} + mergedSha: ${{ steps.merged.outputs.mergedSha }} steps: # Important: Because of `pull_request_target`, this doesn't check out the PR, # but rather the base branch of the PR, which is needed so we don't run untrusted code @@ -20,12 +20,13 @@ jobs: path: base sparse-checkout: ci - name: Resolving the merge commit + id: merged env: GH_TOKEN: ${{ github.token }} run: | if mergedSha=$(base/ci/get-merge-commit.sh ${{ github.repository }} ${{ github.event.number }}); then echo "Checking the merge commit $mergedSha" - echo "mergedSha=$mergedSha" >> "$GITHUB_ENV" + echo "mergedSha=$mergedSha" >> "$GITHUB_OUTPUT" else # Skipping so that no notifications are sent echo "Skipping the rest..." @@ -33,13 +34,52 @@ jobs: rm -rf base - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 # Add this to _all_ subsequent steps to skip them - if: env.mergedSha + if: steps.merged.outputs.mergedSha with: ref: ${{ env.mergedSha }} + path: nixpkgs + + - uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30 + if: steps.merged.outputs.mergedSha + + - id: systems + if: steps.merged.outputs.mergedSha + run: | + nix-build nixpkgs/ci -A eval.evalPlan + echo "systems=$(> "GITHUB_OUTPUT" + + - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + if: steps.merged.outputs.mergedSha + with: + name: eval-plan + path: result/systems + + eval: + name: Eval + runs-on: ubuntu-latest + needs: attrs + if: needs.attrs.outputs.mergedSha + strategy: + matrix: + system: ${{ fromJSON(needs.attrs.outputs.systems) }} + steps: + - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: eval-plan + path: systems + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + ref: ${{ needs.attrs.outputs.mergedSha }} + path: nixpkgs - uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30 - if: env.mergedSha - name: Check eval - if: env.mergedSha - run: ./ci/eval-nixpkgs.sh --system "${{ matrix.system }}" + run: nix-build nixpkgs/ci -A eval.singleJob --arg jobDir systems/${{ matrix.system }} + + - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + if: needs.attrs.outputs.mergedSha + with: + name: result-${ matrix.system }} + path: result/paths diff --git a/ci/default.nix b/ci/default.nix index 02b2e948d17b8..f5a6cf7d3dd0e 100644 --- a/ci/default.nix +++ b/ci/default.nix @@ -26,4 +26,5 @@ in inherit pkgs; requestReviews = pkgs.callPackage ./request-reviews { }; codeownersValidator = pkgs.callPackage ./codeowners-validator { }; + eval = pkgs.callPackage ./eval { }; } diff --git a/ci/eval-nixpkgs.sh b/ci/eval-nixpkgs.sh deleted file mode 100755 index ca1a22a99ac87..0000000000000 --- a/ci/eval-nixpkgs.sh +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env nix-shell -#!nix-shell -i bash -p coreutils moreutils -I nixpkgs=channel:nixpkgs-unstable - -set -euxo pipefail - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" -NIXPKGS_PATH="$(readlink -f "$SCRIPT_DIR"/..)" - -system="x86_64-linux" -quick_test=0 -CORES=$(nproc) - -parseArgs() { - while [[ $# -gt 0 ]]; do - arg=$1 - shift - case "$arg" in - --system) - system=$1 - shift 1 - ;; - --cores) - CORES=$1 - shift 1 - ;; - --quick-test) - quick_test=1 - ;; - *) - echo "Unknown argument: $arg" - exit 1 - ;; - esac - done -} - -main() { - parseArgs "$@" - tmpdir=$(mktemp -d) - trap 'rm -rf "$tmpdir"' EXIT - - nix-instantiate --eval --strict --json --arg enableWarnings false "$NIXPKGS_PATH"/pkgs/top-level/release-attrpaths-superset.nix -A paths >"$tmpdir/paths.json" - - # Originally @amjoseph: note that the number of processes spawned is four times - # the number of cores -- this helps in two ways: - # 1. Keeping cores busy while I/O operations are in flight - # 2. Since the amount of time needed for the jobs is *not* balanced - # this minimizes the "tail latency" for the very last job to finish - # (on one core) by making the job size smaller. - local num_chunks=$((4 * CORES)) - local seq_end=$((num_chunks - 1)) - if [[ $quick_test -eq 1 ]]; then - seq_end=0 - fi - - ( - set +e - seq 0 $seq_end | xargs -P "$CORES" -I {} nix-env -qaP --no-name --out-path --arg checkMeta true --arg includeBroken true \ - --arg systems "[\"$system\"]" \ - -f "$NIXPKGS_PATH"/ci/parallel.nix --arg attrPathFile "$tmpdir"/paths.json \ - --arg numChunks "$num_chunks" --show-trace --arg myChunk {} >"$tmpdir/paths" - echo $? >"$tmpdir/exit-code" - ) & - pid=$! - while kill -0 "$pid"; do - free -g >&2 - sleep 20 - done - - # Extract derivation outputs for each attribute into a JSON object - # i.e. { "attr1": { "out": "/nix/store/...", "dev": "/nix/store/..." }, ... } - jq --raw-input --slurp ' - split("\n") | - map(select(. != "") | split(" ") | map(select(. != ""))) | - map( - { - key: .[0], - value: .[1] | split(";") | map(split("=") | - if length == 1 then - { key: "out", value: .[0] } - else - { key: .[0], value: .[1] } - end) | from_entries} - ) | from_entries - ' "$tmpdir/paths" - exit "$(cat "$tmpdir/exit-code")" -} - -main "$@" diff --git a/ci/eval/default.nix b/ci/eval/default.nix new file mode 100644 index 0000000000000..0f788e61db50f --- /dev/null +++ b/ci/eval/default.nix @@ -0,0 +1,222 @@ +{ + lib, + runCommand, + writeShellScript, + time, + procps, + nix, + jq, +}: + +# Use the GitHub Actions cache to cache /nix/store +# Although can't really merge.. +# Use artifacts and pass files manually, also doesn't have to repeat eval then +let + nixpkgs = + with lib.fileset; + toSource { + root = ../..; + fileset = unions ( + map (lib.path.append ../..) [ + "default.nix" + "doc" + "lib" + "maintainers" + "nixos" + "ci" + "pkgs" + ".version" + ] + ); + }; + + attrpathsSuperset = + runCommand "attrpaths-superset.json" + { + src = nixpkgs; + nativeBuildInputs = [ + nix + ]; + } + '' + export NIX_STATE_DIR=$(mktemp -d) + mkdir $out + nix-instantiate --eval --strict --json $src/pkgs/top-level/release-attrpaths-superset.nix -A paths > $out/paths.json + ''; + + # Takes a path to an attrpathsSuperset result and computes the result of evaluating it entirely + evalPlan = + { + # null means as many cores as your machine has + cores ? null, + # How many attributes to be evaluating at any single time. + # This effectively limits the maximum memory usage. + # Decrease this if too much memory is used + simultaneousAttrsPerSystem ? 100000, + checkMeta ? true, + includeBroken ? true, + # TODO + quickTest ? false, + }: + runCommand "eval-plan" + { + nativeBuildInputs = [ + jq + ]; + env.cores = toString cores; + supportedSystems = builtins.toJSON (import ../supportedSystems.nix); + passAsFile = [ "supportedSystems" ]; + } + '' + if [[ -z "$cores" ]]; then + cores=$(nproc) + fi + echo "Cores: $cores" + num_attrs=$(jq length "${attrpathsSuperset}/paths.json") + echo "Attribute count: $num_attrs" + chunk_size=$(( ${toString simultaneousAttrsPerSystem} / cores )) + echo "Chunk size: $chunk_size" + # Same as `num_attrs / chunk_size` but rounded up + num_chunks=$(( (num_attrs - 1) / chunk_size + 1 )) + echo "Chunk count: $num_chunks" + + mkdir -p $out/systems + mv "$supportedSystemsPath" $out/systems.json + echo "Systems: $(<$out/systems.json)" + for system in $(jq -r '.[]' "$out/systems.json"); do + mkdir -p "$out/systems/$system/chunks" + printf "%s" "$cores" > "$out/systems/$system/cores" + for chunk in $(seq -w 0 "$(( num_chunks - 1 ))"); do + jq '{ + paths: .[($chunk * $chunk_size):(($chunk + 1) * $chunk_size)], + systems: [ $system ], + checkMeta: $checkMeta, + includeBroken: $includeBroken + }' \ + --argjson chunk "$chunk" \ + --argjson chunk_size "$chunk_size" \ + --arg system "$system" \ + --argjson checkMeta "${lib.boolToString checkMeta}" \ + --argjson includeBroken "${lib.boolToString includeBroken}" \ + ${attrpathsSuperset}/paths.json \ + > "$out/systems/$system/chunks/$chunk.json" + done + done + ''; + + singleSystem = + let + singleChunk = writeShellScript "chunk" '' + set -euo pipefail + chunkFile=$1 + outputDir=$2 + + nix-env -f "${nixpkgs}/ci/eval/parallel.nix" \ + --query --available \ + --no-name --attr-path --out-path \ + --show-trace \ + --arg chunkFile "$chunkFile" > "$outputDir/$(basename "$chunkFile")" + ''; + in + { + systemDir, + }: + runCommand "nixpkgs-eval-system-${baseNameOf systemDir}" + { + nativeBuildInputs = [ + nix + time + procps + ]; + } + '' + export NIX_STATE_DIR=$(mktemp -d) + nix-store --init + + cores=$(<${systemDir}/cores) + chunkOutputDir=$(mktemp -d) + + ( + while true; do + free -g + sleep 20 + done + ) & + + find ${systemDir}/chunks -name '*.json' -print0 | + command time -v xargs -0 -t -I{} -P"$cores" \ + ${singleChunk} {} "$chunkOutputDir" + + cat "$chunkOutputDir"/* > $out/paths + ''; + + combine = + { + + }: + null; + + # mkdir $out + # for system in ${lib.escapeShellArgs (import ../supportedSystems.nix)}; do + + # done + + #''; + + # Files of the form + # $out//.json + # { + # paths: [ [ ... ], ... ], + # system: ..., + # } + + #attrsSuperset = import ../../pkgs/test/release { + # # Important: This is used to get dependencies for the evaluation itself. + # # Because we don't want it to take forever on staging PRs, + # # this uses the pinned CI version instead. + # inherit pkgs; + #}; + + # How to get the list of supported systems? + # ci/supportedSystems.nix + + #chunkResult = + # { + # # null means all default systems + # # Pass a list of strings to select specific ones + # system, + # attrsSupersetFile, + # myChunk, + # }: + # pkgs.runCommand "chunk-result-${system}-${toString myChunk}" '' + # ''; + +in +#combinedResult = +# { +# chunkResults, +# }: +# pkgs.runCommand "combined-result" { +# passAsFile = [ "jqScript" ]; +# jqScript = /* jq */ '' +# split("\n") | +# map(select(. != "") | split(" ") | map(select(. != ""))) | +# map( +# { +# key: .[0], +# value: .[1] | split(";") | map(split("=") | +# if length == 1 then +# { key: "out", value: .[0] } +# else +# { key: .[0], value: .[1] } +# end) | from_entries} +# ) | from_entries +# ''; +# } '' +# cat ${lib.escapeShellArgs chunkResults} | +# jq --raw-input --slurp -f "$jqScriptPath" "$tmpdir/paths" \ +# > $out/outpaths.json +# ''; +{ + inherit attrpathsSuperset evalPlan singleSystem; +} diff --git a/ci/eval/parallel.nix b/ci/eval/parallel.nix new file mode 100644 index 0000000000000..82c1b466dbbf1 --- /dev/null +++ b/ci/eval/parallel.nix @@ -0,0 +1,40 @@ +{ + lib ? import ../../lib, + path ? ../.., + chunkFile, +}: + +let + chunk = lib.importJSON chunkFile; + + unfiltered = import ../../pkgs/top-level/release-outpaths.nix { + inherit path; + inherit (chunk) checkMeta includeBroken systems; + }; + + filtered = + let + recurse = + index: paths: attrs: + lib.mapAttrs ( + name: values: + if attrs ? ${name} then + if lib.any (value: lib.length value <= index + 1) values then + attrs.${name} + else + recurse (index + 1) values attrs.${name} + else + null + ) (lib.groupBy (a: lib.elemAt a index) paths); + in + recurse 0 chunk.paths unfiltered; + + recurseEverywhere = + val: + if lib.isDerivation val || !(lib.isAttrs val) then + val + else + (lib.mapAttrs (_: v: recurseEverywhere v) val) // { recurseForDerivations = true; }; + +in +recurseEverywhere filtered diff --git a/ci/parallel.nix b/ci/parallel.nix deleted file mode 100644 index 074c545fe1170..0000000000000 --- a/ci/parallel.nix +++ /dev/null @@ -1,63 +0,0 @@ -/* - Invocation: - - Invocation; note that the number of processes spawned is four times - the number of cores -- this helps in two ways: - - 1. Keeping cores busy while I/O operations are in flight - - 2. Since the amount of time needed for the jobs is *not* balanced - this minimizes the "tail latency" for the very last job to finish - (on one core) by making the job size smaller. -*/ -# see pkgs/top-level/nohydra -{ - lib ? import ../lib, - checkMeta, - includeBroken ? true, - path ? ./.., - systems, - myChunk, - numChunks, - attrPathFile, -}: - -let - attrPaths = builtins.fromJSON (builtins.readFile attrPathFile); - chunkSize = (lib.length attrPaths) / numChunks; - myPaths = - let - dropped = lib.drop (chunkSize * myChunk) attrPaths; - in - if myChunk == numChunks - 1 then dropped else lib.take chunkSize dropped; - - unfiltered = import ../pkgs/top-level/release-outpaths.nix { - inherit - checkMeta - path - includeBroken - systems - ; - }; - - f = - i: m: a: - lib.mapAttrs ( - name: values: - if a ? ${name} then - if lib.any (value: lib.length value <= i + 1) values then a.${name} else f (i + 1) values a.${name} - else - null - ) (lib.groupBy (a: lib.elemAt a i) m); - - filtered = f 0 myPaths unfiltered; - - recurseEverywhere = - val: - if lib.isDerivation val || !(lib.isAttrs val) then - val - else - (builtins.mapAttrs (_: v: recurseEverywhere v) val) // { recurseForDerivations = true; }; - -in -recurseEverywhere filtered diff --git a/ci/supportedSystems.nix b/ci/supportedSystems.nix new file mode 100644 index 0000000000000..0036f02f6b134 --- /dev/null +++ b/ci/supportedSystems.nix @@ -0,0 +1,7 @@ +[ + "aarch64-linux" + "aarch64-darwin" + #"i686-linux" # !!! + "x86_64-linux" + "x86_64-darwin" +] diff --git a/lib/tests/release.nix b/lib/tests/release.nix index 084fbd94d34c2..5334498d08449 100644 --- a/lib/tests/release.nix +++ b/lib/tests/release.nix @@ -14,19 +14,5 @@ let in pkgs.symlinkJoin { name = "nixpkgs-lib-tests"; - paths = map testWithNix nixVersions ++ - - # - # TEMPORARY MIGRATION MECHANISM - # - # This comment and the expression which follows it should be - # removed as part of resolving this issue: - # - # https://github.com/NixOS/nixpkgs/issues/272591 - # - [(import ../../pkgs/test/release { - inherit pkgs lib nix; - })] - ; - + paths = map testWithNix nixVersions; } diff --git a/pkgs/top-level/release-haskell.nix b/pkgs/top-level/release-haskell.nix index 7a5a87ccf42e4..7df396f4327fc 100644 --- a/pkgs/top-level/release-haskell.nix +++ b/pkgs/top-level/release-haskell.nix @@ -9,7 +9,7 @@ $ hydra-eval-jobs -I . pkgs/top-level/release-haskell.nix */ -{ supportedSystems ? [ "x86_64-linux" "x86_64-darwin" "aarch64-linux" "aarch64-darwin" ] }: +{ supportedSystems ? import ../../ci/supportedSystems.nix }: let diff --git a/pkgs/top-level/release-outpaths.nix b/pkgs/top-level/release-outpaths.nix index 5c433fa542e0c..fd54609efa51b 100644 --- a/pkgs/top-level/release-outpaths.nix +++ b/pkgs/top-level/release-outpaths.nix @@ -12,13 +12,7 @@ , attrNamesOnly ? false # Set this to `null` to build for builtins.currentSystem only -, systems ? [ - "aarch64-linux" - "aarch64-darwin" - #"i686-linux" # !!! - "x86_64-linux" - "x86_64-darwin" - ] +, systems ? import ../../ci/supportedSystems.nix }: let lib = import (path + "/lib"); diff --git a/pkgs/top-level/release.nix b/pkgs/top-level/release.nix index 3d9de8660282b..25c28e8ff57aa 100644 --- a/pkgs/top-level/release.nix +++ b/pkgs/top-level/release.nix @@ -12,7 +12,7 @@ , system ? builtins.currentSystem , officialRelease ? false # The platform doubles for which we build Nixpkgs. -, supportedSystems ? [ "x86_64-linux" "x86_64-darwin" "aarch64-linux" "aarch64-darwin" ] +, supportedSystems ? import ../../ci/supportedSystems.nix # The platform triples for which we build bootstrap tools. , bootstrapConfigs ? [ "aarch64-apple-darwin"