diff --git a/ci/ci-util.py b/ci/ci-util.py index 919a49e6..1ec69b00 100755 --- a/ci/ci-util.py +++ b/ci/ci-util.py @@ -195,34 +195,19 @@ def locate_baseline(flags: list[str]) -> None: exit(1) try: - args = [ - "gh", - "run", - "list", - "--limit=1", - "--status=success", - f"--branch={DEFAULT_BRANCH}", - "--json=databaseId,url,headSha,conclusion,createdAt," - "status,workflowDatabaseId,workflowName", - f'--jq=select(.[].workflowName == "{WORKFLOW_NAME}")', - ] - - eprint("ARGS:", args) - # Locate the most recent job to complete with success on our branch latest_job = sp.check_output( - # [ - # "gh", - # "run", - # "list", - # "--limit=1", - # "--status=success", - # f"--branch={DEFAULT_BRANCH}", - # "--json=databaseId,url,headSha,conclusion,createdAt," - # "status,workflowDatabaseId,workflowName", - # f'--jq=select(.[].workflowName == "{WORKFLOW_NAME}")', - # ], - args, + [ + "gh", + "run", + "list", + "--limit=1", + "--status=success", + f"--branch={DEFAULT_BRANCH}", + "--json=databaseId,url,headSha,conclusion,createdAt," + "status,workflowDatabaseId,workflowName", + f'--jq=select(.[].workflowName == "{WORKFLOW_NAME}")', + ], text=True, ) eprint(f"latest: '{latest_job}'") @@ -230,15 +215,20 @@ def locate_baseline(flags: list[str]) -> None: eprint(f"failed to run github command: {e}") return - latest = json.loads(latest_job)[0] - eprint("latest job: ", json.dumps(latest, indent=4)) + try: + latest = json.loads(latest_job)[0] + eprint("latest job: ", json.dumps(latest, indent=4)) + except json.JSONDecodeError as e: + eprint(f"failed to decode json '{latest_job}', {e}") + return if not download: + eprint("--download not specified, returning") return job_id = latest.get("databaseId") if job_id is None: - eprint("unable to find job ID") + eprint("skipping download step") return sp.run( @@ -247,6 +237,7 @@ def locate_baseline(flags: list[str]) -> None: ) if not extract: + eprint("skipping extraction step") return # Find the baseline with the most recent timestamp. GH downloads the files to e.g. @@ -260,6 +251,7 @@ def locate_baseline(flags: list[str]) -> None: baseline_archive = candidate_baselines[0] eprint(f"extracting {baseline_archive}") sp.run(["tar", "xJvf", baseline_archive], check=True) + eprint("baseline extracted successfully") def check_iai_regressions(iai_home: str | None | Path): diff --git a/ci/run.sh b/ci/run.sh index 1d11c758..bb749b72 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -109,7 +109,7 @@ cmd="$cmd --exclude util --exclude libm-macros" # Test once with intrinsics enabled $cmd --features unstable-intrinsics -$cmd --features unstable-intrinsics --bench random +$cmd --features unstable-intrinsics --benches # Test the same in release mode, which also increases coverage. Also ensure # the soft float routines are checked. diff --git a/crates/libm-test/Cargo.toml b/crates/libm-test/Cargo.toml index 86188e4f..3a1ba879 100644 --- a/crates/libm-test/Cargo.toml +++ b/crates/libm-test/Cargo.toml @@ -55,6 +55,7 @@ libtest-mimic = "0.8.1" [[bench]] name = "icount" harness = false +required-features = ["icount"] [[bench]] name = "random"