Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

gh-127718: Add colour to test.regrtest output #127719

Merged
merged 4 commits into from
Dec 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions Doc/library/test.rst
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,10 @@ top-level directory where Python was built. On Windows,
executing :program:`rt.bat` from your :file:`PCbuild` directory will run all
regression tests.

.. versionadded:: 3.14
Output is colorized by default and can be
:ref:`controlled using environment variables <using-on-controlling-color>`.


:mod:`test.support` --- Utilities for the Python test suite
===========================================================
Expand Down
9 changes: 8 additions & 1 deletion Lib/test/libregrtest/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import sysconfig
import time
import trace
from _colorize import get_colors # type: ignore[import-not-found]
from typing import NoReturn

from test.support import os_helper, MS_WINDOWS, flush_std_streams
Expand Down Expand Up @@ -270,6 +271,9 @@ def _rerun_failed_tests(self, runtests: RunTests) -> RunTests:
return runtests

def rerun_failed_tests(self, runtests: RunTests) -> None:
ansi = get_colors()
red, reset = ansi.BOLD_RED, ansi.RESET

if self.python_cmd:
# Temp patch for https://github.com/python/cpython/issues/94052
self.log(
Expand All @@ -284,7 +288,10 @@ def rerun_failed_tests(self, runtests: RunTests) -> None:
rerun_runtests = self._rerun_failed_tests(runtests)

if self.results.bad:
print(count(len(self.results.bad), 'test'), "failed again:")
print(
f"{red}{count(len(self.results.bad), 'test')} "
f"failed again:{reset}"
)
printlist(self.results.bad)

self.display_result(rerun_runtests)
Expand Down
50 changes: 34 additions & 16 deletions Lib/test/libregrtest/result.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import dataclasses
import json
from _colorize import get_colors # type: ignore[import-not-found]
from typing import Any

from .utils import (
Expand Down Expand Up @@ -105,54 +106,71 @@ def is_failed(self, fail_env_changed: bool) -> bool:
return State.is_failed(self.state)

def _format_failed(self):
ansi = get_colors()
red, reset = ansi.BOLD_RED, ansi.RESET
if self.errors and self.failures:
le = len(self.errors)
lf = len(self.failures)
error_s = "error" + ("s" if le > 1 else "")
failure_s = "failure" + ("s" if lf > 1 else "")
return f"{self.test_name} failed ({le} {error_s}, {lf} {failure_s})"
return (
f"{red}{self.test_name} failed "
f"({le} {error_s}, {lf} {failure_s}){reset}"
)

if self.errors:
le = len(self.errors)
error_s = "error" + ("s" if le > 1 else "")
return f"{self.test_name} failed ({le} {error_s})"
return f"{red}{self.test_name} failed ({le} {error_s}){reset}"

if self.failures:
lf = len(self.failures)
failure_s = "failure" + ("s" if lf > 1 else "")
return f"{self.test_name} failed ({lf} {failure_s})"
return f"{red}{self.test_name} failed ({lf} {failure_s}){reset}"

return f"{self.test_name} failed"
return f"{red}{self.test_name} failed{reset}"

def __str__(self) -> str:
ansi = get_colors()
green = ansi.GREEN
red = ansi.BOLD_RED
reset = ansi.RESET
yellow = ansi.YELLOW

match self.state:
case State.PASSED:
return f"{self.test_name} passed"
return f"{green}{self.test_name} passed{reset}"
case State.FAILED:
return self._format_failed()
return f"{red}{self._format_failed()}{reset}"
case State.SKIPPED:
return f"{self.test_name} skipped"
return f"{yellow}{self.test_name} skipped{reset}"
case State.UNCAUGHT_EXC:
return f"{self.test_name} failed (uncaught exception)"
return (
f"{red}{self.test_name} failed (uncaught exception){reset}"
)
case State.REFLEAK:
return f"{self.test_name} failed (reference leak)"
return f"{red}{self.test_name} failed (reference leak){reset}"
case State.ENV_CHANGED:
return f"{self.test_name} failed (env changed)"
return f"{red}{self.test_name} failed (env changed){reset}"
case State.RESOURCE_DENIED:
return f"{self.test_name} skipped (resource denied)"
return f"{yellow}{self.test_name} skipped (resource denied){reset}"
case State.INTERRUPTED:
return f"{self.test_name} interrupted"
return f"{yellow}{self.test_name} interrupted{reset}"
case State.WORKER_FAILED:
return f"{self.test_name} worker non-zero exit code"
return (
f"{red}{self.test_name} worker non-zero exit code{reset}"
)
case State.WORKER_BUG:
return f"{self.test_name} worker bug"
return f"{red}{self.test_name} worker bug{reset}"
case State.DID_NOT_RUN:
return f"{self.test_name} ran no tests"
return f"{yellow}{self.test_name} ran no tests{reset}"
case State.TIMEOUT:
assert self.duration is not None, "self.duration is None"
return f"{self.test_name} timed out ({format_duration(self.duration)})"
case _:
raise ValueError("unknown result state: {state!r}")
raise ValueError(
f"{red}unknown result state: {{state!r}}{reset}"
)

def has_meaningful_duration(self):
return State.has_meaningful_duration(self.state)
Expand Down
89 changes: 61 additions & 28 deletions Lib/test/libregrtest/results.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import sys
import trace
from _colorize import get_colors # type: ignore[import-not-found]
from typing import TYPE_CHECKING

from .runtests import RunTests
Expand Down Expand Up @@ -59,19 +60,24 @@ def no_tests_run(self) -> bool:

def get_state(self, fail_env_changed: bool) -> str:
state = []
ansi = get_colors()
green = ansi.GREEN
red = ansi.BOLD_RED
reset = ansi.RESET
yellow = ansi.YELLOW
if self.bad:
state.append("FAILURE")
state.append(f"{red}FAILURE{reset}")
elif fail_env_changed and self.env_changed:
state.append("ENV CHANGED")
state.append(f"{yellow}ENV CHANGED{reset}")
elif self.no_tests_run():
state.append("NO TESTS RAN")
state.append(f"{yellow}NO TESTS RAN{reset}")

if self.interrupted:
state.append("INTERRUPTED")
state.append(f"{yellow}INTERRUPTED{reset}")
if self.worker_bug:
state.append("WORKER BUG")
state.append(f"{red}WORKER BUG{reset}")
if not state:
state.append("SUCCESS")
state.append(f"{green}SUCCESS{reset}")

return ', '.join(state)

Expand Down Expand Up @@ -197,27 +203,51 @@ def write_junit(self, filename: StrPath) -> None:
f.write(s)

def display_result(self, tests: TestTuple, quiet: bool, print_slowest: bool) -> None:
ansi = get_colors()
green = ansi.GREEN
red = ansi.BOLD_RED
reset = ansi.RESET
yellow = ansi.YELLOW

if print_slowest:
self.test_times.sort(reverse=True)
print()
print("10 slowest tests:")
print(f"{yellow}10 slowest tests:{reset}")
for test_time, test in self.test_times[:10]:
print("- %s: %s" % (test, format_duration(test_time)))
print(f"- {test}: {format_duration(test_time)}")

all_tests = []
omitted = set(tests) - self.get_executed()

# less important
all_tests.append((sorted(omitted), "test", "{} omitted:"))
all_tests.append(
(sorted(omitted), "test", f"{yellow}{{}} omitted:{reset}")
)
if not quiet:
all_tests.append((self.skipped, "test", "{} skipped:"))
all_tests.append((self.resource_denied, "test", "{} skipped (resource denied):"))
all_tests.append((self.run_no_tests, "test", "{} run no tests:"))
all_tests.append(
(self.skipped, "test", f"{yellow}{{}} skipped:{reset}")
)
all_tests.append(
(
self.resource_denied,
"test",
f"{yellow}{{}} skipped (resource denied):{reset}",
)
)
all_tests.append(
(self.run_no_tests, "test", f"{yellow}{{}} run no tests:{reset}")
)

# more important
all_tests.append((self.env_changed, "test", "{} altered the execution environment (env changed):"))
all_tests.append((self.rerun, "re-run test", "{}:"))
all_tests.append((self.bad, "test", "{} failed:"))
all_tests.append(
(
self.env_changed,
"test",
f"{yellow}{{}} altered the execution environment (env changed):{reset}",
)
)
all_tests.append((self.rerun, "re-run test", f"{yellow}{{}}:{reset}"))
all_tests.append((self.bad, "test", f"{red}{{}} failed:{reset}"))

for tests_list, count_text, title_format in all_tests:
if tests_list:
Expand All @@ -229,26 +259,29 @@ def display_result(self, tests: TestTuple, quiet: bool, print_slowest: bool) ->
if self.good and not quiet:
print()
text = count(len(self.good), "test")
text = f"{text} OK."
if (self.is_all_good() and len(self.good) > 1):
text = f"{green}{text} OK.{reset}"
if self.is_all_good() and len(self.good) > 1:
text = f"All {text}"
print(text)

if self.interrupted:
print()
print("Test suite interrupted by signal SIGINT.")
print(f"{yellow}Test suite interrupted by signal SIGINT.{reset}")

def display_summary(self, first_runtests: RunTests, filtered: bool) -> None:
# Total tests
ansi = get_colors()
red, reset, yellow = ansi.RED, ansi.RESET, ansi.YELLOW

stats = self.stats
text = f'run={stats.tests_run:,}'
if filtered:
text = f"{text} (filtered)"
report = [text]
if stats.failures:
report.append(f'failures={stats.failures:,}')
report.append(f'{red}failures={stats.failures:,}{reset}')
if stats.skipped:
report.append(f'skipped={stats.skipped:,}')
report.append(f'{yellow}skipped={stats.skipped:,}{reset}')
print(f"Total tests: {' '.join(report)}")

# Total test files
Expand All @@ -263,14 +296,14 @@ def display_summary(self, first_runtests: RunTests, filtered: bool) -> None:
if filtered:
text = f"{text} (filtered)"
report = [text]
for name, tests in (
('failed', self.bad),
('env_changed', self.env_changed),
('skipped', self.skipped),
('resource_denied', self.resource_denied),
('rerun', self.rerun),
('run_no_tests', self.run_no_tests),
for name, tests, color in (
('failed', self.bad, red),
('env_changed', self.env_changed, yellow),
('skipped', self.skipped, yellow),
('resource_denied', self.resource_denied, yellow),
('rerun', self.rerun, yellow),
('run_no_tests', self.run_no_tests, yellow),
):
if tests:
report.append(f'{name}={len(tests)}')
report.append(f'{color}{name}={len(tests)}{reset}')
print(f"Total test files: {' '.join(report)}")
22 changes: 14 additions & 8 deletions Lib/test/libregrtest/single.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import traceback
import unittest

from _colorize import get_colors # type: ignore[import-not-found]
from test import support
from test.support import threading_helper

Expand Down Expand Up @@ -161,6 +162,8 @@ def test_func():
def _runtest_env_changed_exc(result: TestResult, runtests: RunTests,
display_failure: bool = True) -> None:
# Handle exceptions, detect environment changes.
ansi = get_colors()
red, reset, yellow = ansi.RED, ansi.RESET, ansi.YELLOW

# Reset the environment_altered flag to detect if a test altered
# the environment
Expand All @@ -181,28 +184,28 @@ def _runtest_env_changed_exc(result: TestResult, runtests: RunTests,
_load_run_test(result, runtests)
except support.ResourceDenied as exc:
if not quiet and not pgo:
print(f"{test_name} skipped -- {exc}", flush=True)
print(f"{yellow}{test_name} skipped -- {exc}{reset}", flush=True)
result.state = State.RESOURCE_DENIED
return
except unittest.SkipTest as exc:
if not quiet and not pgo:
print(f"{test_name} skipped -- {exc}", flush=True)
print(f"{yellow}{test_name} skipped -- {exc}{reset}", flush=True)
result.state = State.SKIPPED
return
except support.TestFailedWithDetails as exc:
msg = f"test {test_name} failed"
msg = f"{red}test {test_name} failed{reset}"
if display_failure:
msg = f"{msg} -- {exc}"
msg = f"{red}{msg} -- {exc}{reset}"
print(msg, file=sys.stderr, flush=True)
result.state = State.FAILED
result.errors = exc.errors
result.failures = exc.failures
result.stats = exc.stats
return
except support.TestFailed as exc:
msg = f"test {test_name} failed"
msg = f"{red}test {test_name} failed{reset}"
if display_failure:
msg = f"{msg} -- {exc}"
msg = f"{red}{msg} -- {exc}{reset}"
print(msg, file=sys.stderr, flush=True)
result.state = State.FAILED
result.stats = exc.stats
Expand All @@ -217,7 +220,7 @@ def _runtest_env_changed_exc(result: TestResult, runtests: RunTests,
except:
if not pgo:
msg = traceback.format_exc()
print(f"test {test_name} crashed -- {msg}",
print(f"{red}test {test_name} crashed -- {msg}{reset}",
file=sys.stderr, flush=True)
result.state = State.UNCAUGHT_EXC
return
Expand Down Expand Up @@ -300,6 +303,9 @@ def run_single_test(test_name: TestName, runtests: RunTests) -> TestResult:
If runtests.use_junit, xml_data is a list containing each generated
testsuite element.
"""
ansi = get_colors()
red, reset, yellow = ansi.BOLD_RED, ansi.RESET, ansi.YELLOW

start_time = time.perf_counter()
result = TestResult(test_name)
pgo = runtests.pgo
Expand All @@ -308,7 +314,7 @@ def run_single_test(test_name: TestName, runtests: RunTests) -> TestResult:
except:
if not pgo:
msg = traceback.format_exc()
print(f"test {test_name} crashed -- {msg}",
print(f"{red}test {test_name} crashed -- {msg}{reset}",
file=sys.stderr, flush=True)
result.state = State.UNCAUGHT_EXC

Expand Down
Loading
Loading