diff --git a/Lib/test/libregrtest/__init__.py b/Lib/test/libregrtest/__init__.py index e69de29bb2d..8b137891791 100644 --- a/Lib/test/libregrtest/__init__.py +++ b/Lib/test/libregrtest/__init__.py @@ -0,0 +1 @@ + diff --git a/Lib/test/libregrtest/cmdline.py b/Lib/test/libregrtest/cmdline.py index 62bc36ed421..e7a12e4d0b6 100644 --- a/Lib/test/libregrtest/cmdline.py +++ b/Lib/test/libregrtest/cmdline.py @@ -44,11 +44,19 @@ doing memory analysis on the Python interpreter, which process tends to consume too many resources to run the full regression test non-stop. --S is used to continue running tests after an aborted run. It will -maintain the order a standard run (ie, this assumes -r is not used). +-S is used to resume running tests after an interrupted run. It will +maintain the order a standard run (i.e. it assumes -r is not used). This is useful after the tests have prematurely stopped for some external -reason and you want to start running from where you left off rather -than starting from the beginning. +reason and you want to resume the run from where you left off rather +than starting from the beginning. Note: this is different from --prioritize. + +--prioritize is used to influence the order of selected tests, such that +the tests listed as an argument are executed first. This is especially +useful when combined with -j and -r to pin the longest-running tests +to start at the beginning of a test run. Pass --prioritize=test_a,test_b +to make test_a run first, followed by test_b, and then the other tests. +If test_a wasn't selected for execution by regular means, --prioritize will +not make it execute. -f reads the names of tests from the file given as f's argument, one or more test names per line. Whitespace is ignored. Blank lines and @@ -87,38 +95,40 @@ The argument is a comma-separated list of words indicating the resources to test. Currently only the following are defined: - all - Enable all special resources. + all - Enable all special resources. + + none - Disable all special resources (this is the default). - none - Disable all special resources (this is the default). + audio - Tests that use the audio device. (There are known + cases of broken audio drivers that can crash Python or + even the Linux kernel.) - audio - Tests that use the audio device. (There are known - cases of broken audio drivers that can crash Python or - even the Linux kernel.) + curses - Tests that use curses and will modify the terminal's + state and output modes. - curses - Tests that use curses and will modify the terminal's - state and output modes. + largefile - It is okay to run some test that may create huge + files. These tests can take a long time and may + consume >2 GiB of disk space temporarily. - largefile - It is okay to run some test that may create huge - files. These tests can take a long time and may - consume >2 GiB of disk space temporarily. + extralargefile - Like 'largefile', but even larger (and slower). - network - It is okay to run tests that use external network - resource, e.g. testing SSL support for sockets. + network - It is okay to run tests that use external network + resource, e.g. testing SSL support for sockets. - decimal - Test the decimal module against a large suite that - verifies compliance with standards. + decimal - Test the decimal module against a large suite that + verifies compliance with standards. - cpu - Used for certain CPU-heavy tests. + cpu - Used for certain CPU-heavy tests. - walltime - Long running but not CPU-bound tests. + walltime - Long running but not CPU-bound tests. - subprocess Run all tests for the subprocess module. + subprocess Run all tests for the subprocess module. - urlfetch - It is okay to download files required on testing. + urlfetch - It is okay to download files required on testing. - gui - Run tests that require a running GUI. + gui - Run tests that require a running GUI. - tzdata - Run tests that require timezone data. + tzdata - Run tests that require timezone data. To enable all resources except one, use '-uall,-'. For example, to run all the tests except for the gui tests, give the @@ -158,6 +168,7 @@ def __init__(self, **kwargs) -> None: self.print_slow = False self.random_seed = None self.use_mp = None + self.parallel_threads = None self.forever = False self.header = False self.failfast = False @@ -165,6 +176,7 @@ def __init__(self, **kwargs) -> None: self.pgo = False self.pgo_extended = False self.tsan = False + self.tsan_parallel = False self.worker_json = None self.start = None self.timeout = None @@ -232,7 +244,7 @@ def _create_parser(): help='wait for user input, e.g., allow a debugger ' 'to be attached') group.add_argument('-S', '--start', metavar='START', - help='the name of the test at which to start.' + + help='resume an interrupted run at the following test.' + more_details) group.add_argument('-p', '--python', metavar='PYTHON', help='Command to run Python test subprocesses with.') @@ -262,6 +274,10 @@ def _create_parser(): group.add_argument('--no-randomize', dest='no_randomize', action='store_true', help='do not randomize test execution order, even if ' 'it would be implied by another option') + group.add_argument('--prioritize', metavar='TEST1,TEST2,...', + action='append', type=priority_list, + help='select these tests first, even if the order is' + ' randomized.' + more_details) group.add_argument('-f', '--fromfile', metavar='FILE', help='read names of tests to run from a file.' + more_details) @@ -317,6 +333,10 @@ def _create_parser(): 'a single process, ignore -jN option, ' 'and failed tests are also rerun sequentially ' 'in the same process') + group.add_argument('--parallel-threads', metavar='PARALLEL_THREADS', + type=int, + help='run copies of each test in PARALLEL_THREADS at ' + 'once') group.add_argument('-T', '--coverage', action='store_true', dest='trace', help='turn on code coverage tracing using the trace ' @@ -347,6 +367,9 @@ def _create_parser(): help='enable extended PGO training (slower training)') group.add_argument('--tsan', dest='tsan', action='store_true', help='run a subset of test cases that are proper for the TSAN test') + group.add_argument('--tsan-parallel', action='store_true', + help='run a subset of test cases that are appropriate ' + 'for TSAN with `--parallel-threads=N`') group.add_argument('--fail-env-changed', action='store_true', help='if a test file alters the environment, mark ' 'the test as failed') @@ -398,6 +421,10 @@ def resources_list(string): return u +def priority_list(string): + return string.split(",") + + def _parse_args(args, **kwargs): # Defaults ns = Namespace() @@ -549,4 +576,10 @@ def _parse_args(args, **kwargs): print(msg, file=sys.stderr, flush=True) sys.exit(2) + ns.prioritize = [ + test + for test_list in (ns.prioritize or ()) + for test in test_list + ] + return ns diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py index 3a9c06da47e..0fc2548789e 100644 --- a/Lib/test/libregrtest/main.py +++ b/Lib/test/libregrtest/main.py @@ -6,22 +6,21 @@ import sysconfig import time import trace +from _colorize import get_colors # type: ignore[import-not-found] from typing import NoReturn -from test.support import (os_helper, MS_WINDOWS, flush_std_streams, - can_use_suppress_immortalization, - suppress_immortalization) +from test.support import os_helper, MS_WINDOWS, flush_std_streams from .cmdline import _parse_args, Namespace from .findtests import findtests, split_test_packages, list_cases from .logger import Logger from .pgo import setup_pgo_tests -from .result import State, TestResult +from .result import TestResult from .results import TestResults, EXITCODE_INTERRUPTED from .runtests import RunTests, HuntRefleak from .setup import setup_process, setup_test_dir from .single import run_single_test, PROGRESS_MIN_TIME -from .tsan import setup_tsan_tests +from .tsan import setup_tsan_tests, setup_tsan_parallel_tests from .utils import ( StrPath, StrJSON, TestName, TestList, TestTuple, TestFilter, strip_py_suffix, count, format_duration, @@ -61,6 +60,7 @@ def __init__(self, ns: Namespace, _add_python_opts: bool = False): self.pgo: bool = ns.pgo self.pgo_extended: bool = ns.pgo_extended self.tsan: bool = ns.tsan + self.tsan_parallel: bool = ns.tsan_parallel # Test results self.results: TestResults = TestResults() @@ -142,6 +142,9 @@ def __init__(self, ns: Namespace, _add_python_opts: bool = False): self.random_seed = random.getrandbits(32) else: self.random_seed = ns.random_seed + self.prioritize_tests: tuple[str, ...] = tuple(ns.prioritize) + + self.parallel_threads = ns.parallel_threads # tests self.first_runtests: RunTests | None = None @@ -200,6 +203,9 @@ def find_tests(self, tests: TestList | None = None) -> tuple[TestTuple, TestList if self.tsan: setup_tsan_tests(self.cmdline_args) + if self.tsan_parallel: + setup_tsan_parallel_tests(self.cmdline_args) + alltests = findtests(testdir=self.test_dir, exclude=exclude_tests) @@ -236,6 +242,16 @@ def find_tests(self, tests: TestList | None = None) -> tuple[TestTuple, TestList if self.randomize: random.shuffle(selected) + for priority_test in reversed(self.prioritize_tests): + try: + selected.remove(priority_test) + except ValueError: + print(f"warning: --prioritize={priority_test} used" + f" but test not actually selected") + continue + else: + selected.insert(0, priority_test) + return (tuple(selected), tests) @staticmethod @@ -276,6 +292,9 @@ def _rerun_failed_tests(self, runtests: RunTests) -> RunTests: return runtests def rerun_failed_tests(self, runtests: RunTests) -> None: + ansi = get_colors() + red, reset = ansi.BOLD_RED, ansi.RESET + if self.python_cmd: # Temp patch for https://github.com/python/cpython/issues/94052 self.log( @@ -290,7 +309,10 @@ def rerun_failed_tests(self, runtests: RunTests) -> None: rerun_runtests = self._rerun_failed_tests(runtests) if self.results.bad: - print(count(len(self.results.bad), 'test'), "failed again:") + print( + f"{red}{count(len(self.results.bad), 'test')} " + f"failed again:{reset}" + ) printlist(self.results.bad) self.display_result(rerun_runtests) @@ -496,6 +518,7 @@ def create_run_tests(self, tests: TestTuple) -> RunTests: python_cmd=self.python_cmd, randomize=self.randomize, random_seed=self.random_seed, + parallel_threads=self.parallel_threads, ) def _run_tests(self, selected: TestTuple, tests: TestList | None) -> int: @@ -529,8 +552,7 @@ def _run_tests(self, selected: TestTuple, tests: TestList | None) -> int: use_load_tracker = False else: # WindowsLoadTracker is only needed on Windows - # use_load_tracker = MS_WINDOWS # TODO: RUSTPYTHON, investigate why this was disabled in the first place - use_load_tracker = False + use_load_tracker = MS_WINDOWS if use_load_tracker: self.logger.start_load_tracker() @@ -538,18 +560,7 @@ def _run_tests(self, selected: TestTuple, tests: TestList | None) -> int: if self.num_workers: self._run_tests_mp(runtests, self.num_workers) else: - # gh-135734: suppress_immortalization() raises SkipTest - # if _testinternalcapi is missing and the -R option is set. - if not can_use_suppress_immortalization(runtests.hunt_refleak): - print("Module '_testinternalcapi' is missing. " - "Did you disable it with --disable-test-modules?", - file=sys.stderr) - raise SystemExit(1) - - # gh-117783: don't immortalize deferred objects when tracking - # refleaks. Only relevant for the free-threaded build. - with suppress_immortalization(runtests.hunt_refleak): - self.run_tests_sequentially(runtests) + self.run_tests_sequentially(runtests) coverage = self.results.get_coverage_results() self.display_result(runtests) diff --git a/Lib/test/libregrtest/parallel_case.py b/Lib/test/libregrtest/parallel_case.py new file mode 100644 index 00000000000..8eb3c314916 --- /dev/null +++ b/Lib/test/libregrtest/parallel_case.py @@ -0,0 +1,78 @@ +"""Run a test case multiple times in parallel threads.""" + +import copy +import threading +import unittest + +from unittest import TestCase + + +class ParallelTestCase(TestCase): + def __init__(self, test_case: TestCase, num_threads: int): + self.test_case = test_case + self.num_threads = num_threads + self._testMethodName = test_case._testMethodName + self._testMethodDoc = test_case._testMethodDoc + + def __str__(self): + return f"{str(self.test_case)} [threads={self.num_threads}]" + + def run_worker(self, test_case: TestCase, result: unittest.TestResult, + barrier: threading.Barrier): + barrier.wait() + test_case.run(result) + + def run(self, result=None): + if result is None: + result = test_case.defaultTestResult() + startTestRun = getattr(result, 'startTestRun', None) + stopTestRun = getattr(result, 'stopTestRun', None) + if startTestRun is not None: + startTestRun() + else: + stopTestRun = None + + # Called at the beginning of each test. See TestCase.run. + result.startTest(self) + + cases = [copy.copy(self.test_case) for _ in range(self.num_threads)] + results = [unittest.TestResult() for _ in range(self.num_threads)] + + barrier = threading.Barrier(self.num_threads) + threads = [] + for i, (case, r) in enumerate(zip(cases, results)): + thread = threading.Thread(target=self.run_worker, + args=(case, r, barrier), + name=f"{str(self.test_case)}-{i}", + daemon=True) + threads.append(thread) + + for thread in threads: + thread.start() + + for threads in threads: + threads.join() + + # Aggregate test results + if all(r.wasSuccessful() for r in results): + result.addSuccess(self) + + # Note: We can't call result.addError, result.addFailure, etc. because + # we no longer have the original exception, just the string format. + for r in results: + if len(r.errors) > 0 or len(r.failures) > 0: + result._mirrorOutput = True + result.errors.extend(r.errors) + result.failures.extend(r.failures) + result.skipped.extend(r.skipped) + result.expectedFailures.extend(r.expectedFailures) + result.unexpectedSuccesses.extend(r.unexpectedSuccesses) + result.collectedDurations.extend(r.collectedDurations) + + if any(r.shouldStop for r in results): + result.stop() + + # Test has finished running + result.stopTest(self) + if stopTestRun is not None: + stopTestRun() diff --git a/Lib/test/libregrtest/refleak.py b/Lib/test/libregrtest/refleak.py index 75fcd118d72..5c78515506d 100644 --- a/Lib/test/libregrtest/refleak.py +++ b/Lib/test/libregrtest/refleak.py @@ -129,9 +129,9 @@ def get_pooled_int(value): xml_filename = 'refleak-xml.tmp' result = None dash_R_cleanup(fs, ps, pic, zdc, abcs, linecache_data) - support.gc_collect() for i in rep_range: + support.gc_collect() current = refleak_helper._hunting_for_refleaks refleak_helper._hunting_for_refleaks = True try: @@ -253,7 +253,6 @@ def dash_R_cleanup(fs, ps, pic, zdc, abcs, linecache_data): zipimport._zip_directory_cache.update(zdc) # Clear ABC registries, restoring previously saved ABC registries. - # ignore deprecation warning for collections.abc.ByteString abs_classes = [getattr(collections.abc, a) for a in collections.abc.__all__] abs_classes = filter(isabstract, abs_classes) for abc in abs_classes: diff --git a/Lib/test/libregrtest/result.py b/Lib/test/libregrtest/result.py index 7553efe5e8a..daf7624366e 100644 --- a/Lib/test/libregrtest/result.py +++ b/Lib/test/libregrtest/result.py @@ -1,5 +1,6 @@ import dataclasses import json +from _colorize import get_colors # type: ignore[import-not-found] from typing import Any from .utils import ( @@ -105,54 +106,71 @@ def is_failed(self, fail_env_changed: bool) -> bool: return State.is_failed(self.state) def _format_failed(self): + ansi = get_colors() + red, reset = ansi.BOLD_RED, ansi.RESET if self.errors and self.failures: le = len(self.errors) lf = len(self.failures) error_s = "error" + ("s" if le > 1 else "") failure_s = "failure" + ("s" if lf > 1 else "") - return f"{self.test_name} failed ({le} {error_s}, {lf} {failure_s})" + return ( + f"{red}{self.test_name} failed " + f"({le} {error_s}, {lf} {failure_s}){reset}" + ) if self.errors: le = len(self.errors) error_s = "error" + ("s" if le > 1 else "") - return f"{self.test_name} failed ({le} {error_s})" + return f"{red}{self.test_name} failed ({le} {error_s}){reset}" if self.failures: lf = len(self.failures) failure_s = "failure" + ("s" if lf > 1 else "") - return f"{self.test_name} failed ({lf} {failure_s})" + return f"{red}{self.test_name} failed ({lf} {failure_s}){reset}" - return f"{self.test_name} failed" + return f"{red}{self.test_name} failed{reset}" def __str__(self) -> str: + ansi = get_colors() + green = ansi.GREEN + red = ansi.BOLD_RED + reset = ansi.RESET + yellow = ansi.YELLOW + match self.state: case State.PASSED: - return f"{self.test_name} passed" + return f"{green}{self.test_name} passed{reset}" case State.FAILED: - return self._format_failed() + return f"{red}{self._format_failed()}{reset}" case State.SKIPPED: - return f"{self.test_name} skipped" + return f"{yellow}{self.test_name} skipped{reset}" case State.UNCAUGHT_EXC: - return f"{self.test_name} failed (uncaught exception)" + return ( + f"{red}{self.test_name} failed (uncaught exception){reset}" + ) case State.REFLEAK: - return f"{self.test_name} failed (reference leak)" + return f"{red}{self.test_name} failed (reference leak){reset}" case State.ENV_CHANGED: - return f"{self.test_name} failed (env changed)" + return f"{red}{self.test_name} failed (env changed){reset}" case State.RESOURCE_DENIED: - return f"{self.test_name} skipped (resource denied)" + return f"{yellow}{self.test_name} skipped (resource denied){reset}" case State.INTERRUPTED: - return f"{self.test_name} interrupted" + return f"{yellow}{self.test_name} interrupted{reset}" case State.WORKER_FAILED: - return f"{self.test_name} worker non-zero exit code" + return ( + f"{red}{self.test_name} worker non-zero exit code{reset}" + ) case State.WORKER_BUG: - return f"{self.test_name} worker bug" + return f"{red}{self.test_name} worker bug{reset}" case State.DID_NOT_RUN: - return f"{self.test_name} ran no tests" + return f"{yellow}{self.test_name} ran no tests{reset}" case State.TIMEOUT: assert self.duration is not None, "self.duration is None" return f"{self.test_name} timed out ({format_duration(self.duration)})" case _: - raise ValueError("unknown result state: {state!r}") + raise ValueError( + f"{red}unknown result state: {{state!r}}{reset}" + ) def has_meaningful_duration(self): return State.has_meaningful_duration(self.state) diff --git a/Lib/test/libregrtest/results.py b/Lib/test/libregrtest/results.py index 9eda926966d..a35934fc2c9 100644 --- a/Lib/test/libregrtest/results.py +++ b/Lib/test/libregrtest/results.py @@ -1,5 +1,6 @@ import sys import trace +from _colorize import get_colors # type: ignore[import-not-found] from typing import TYPE_CHECKING from .runtests import RunTests @@ -59,19 +60,24 @@ def no_tests_run(self) -> bool: def get_state(self, fail_env_changed: bool) -> str: state = [] + ansi = get_colors() + green = ansi.GREEN + red = ansi.BOLD_RED + reset = ansi.RESET + yellow = ansi.YELLOW if self.bad: - state.append("FAILURE") + state.append(f"{red}FAILURE{reset}") elif fail_env_changed and self.env_changed: - state.append("ENV CHANGED") + state.append(f"{yellow}ENV CHANGED{reset}") elif self.no_tests_run(): - state.append("NO TESTS RAN") + state.append(f"{yellow}NO TESTS RAN{reset}") if self.interrupted: - state.append("INTERRUPTED") + state.append(f"{yellow}INTERRUPTED{reset}") if self.worker_bug: - state.append("WORKER BUG") + state.append(f"{red}WORKER BUG{reset}") if not state: - state.append("SUCCESS") + state.append(f"{green}SUCCESS{reset}") return ', '.join(state) @@ -197,27 +203,51 @@ def write_junit(self, filename: StrPath) -> None: f.write(s) def display_result(self, tests: TestTuple, quiet: bool, print_slowest: bool) -> None: + ansi = get_colors() + green = ansi.GREEN + red = ansi.BOLD_RED + reset = ansi.RESET + yellow = ansi.YELLOW + if print_slowest: self.test_times.sort(reverse=True) print() - print("10 slowest tests:") + print(f"{yellow}10 slowest tests:{reset}") for test_time, test in self.test_times[:10]: - print("- %s: %s" % (test, format_duration(test_time))) + print(f"- {test}: {format_duration(test_time)}") all_tests = [] omitted = set(tests) - self.get_executed() # less important - all_tests.append((sorted(omitted), "test", "{} omitted:")) + all_tests.append( + (sorted(omitted), "test", f"{yellow}{{}} omitted:{reset}") + ) if not quiet: - all_tests.append((self.skipped, "test", "{} skipped:")) - all_tests.append((self.resource_denied, "test", "{} skipped (resource denied):")) - all_tests.append((self.run_no_tests, "test", "{} run no tests:")) + all_tests.append( + (self.skipped, "test", f"{yellow}{{}} skipped:{reset}") + ) + all_tests.append( + ( + self.resource_denied, + "test", + f"{yellow}{{}} skipped (resource denied):{reset}", + ) + ) + all_tests.append( + (self.run_no_tests, "test", f"{yellow}{{}} run no tests:{reset}") + ) # more important - all_tests.append((self.env_changed, "test", "{} altered the execution environment (env changed):")) - all_tests.append((self.rerun, "re-run test", "{}:")) - all_tests.append((self.bad, "test", "{} failed:")) + all_tests.append( + ( + self.env_changed, + "test", + f"{yellow}{{}} altered the execution environment (env changed):{reset}", + ) + ) + all_tests.append((self.rerun, "re-run test", f"{yellow}{{}}:{reset}")) + all_tests.append((self.bad, "test", f"{red}{{}} failed:{reset}")) for tests_list, count_text, title_format in all_tests: if tests_list: @@ -229,26 +259,29 @@ def display_result(self, tests: TestTuple, quiet: bool, print_slowest: bool) -> if self.good and not quiet: print() text = count(len(self.good), "test") - text = f"{text} OK." - if (self.is_all_good() and len(self.good) > 1): + text = f"{green}{text} OK.{reset}" + if self.is_all_good() and len(self.good) > 1: text = f"All {text}" print(text) if self.interrupted: print() - print("Test suite interrupted by signal SIGINT.") + print(f"{yellow}Test suite interrupted by signal SIGINT.{reset}") def display_summary(self, first_runtests: RunTests, filtered: bool) -> None: # Total tests + ansi = get_colors() + red, reset, yellow = ansi.RED, ansi.RESET, ansi.YELLOW + stats = self.stats text = f'run={stats.tests_run:,}' if filtered: text = f"{text} (filtered)" report = [text] if stats.failures: - report.append(f'failures={stats.failures:,}') + report.append(f'{red}failures={stats.failures:,}{reset}') if stats.skipped: - report.append(f'skipped={stats.skipped:,}') + report.append(f'{yellow}skipped={stats.skipped:,}{reset}') print(f"Total tests: {' '.join(report)}") # Total test files @@ -263,14 +296,14 @@ def display_summary(self, first_runtests: RunTests, filtered: bool) -> None: if filtered: text = f"{text} (filtered)" report = [text] - for name, tests in ( - ('failed', self.bad), - ('env_changed', self.env_changed), - ('skipped', self.skipped), - ('resource_denied', self.resource_denied), - ('rerun', self.rerun), - ('run_no_tests', self.run_no_tests), + for name, tests, color in ( + ('failed', self.bad, red), + ('env_changed', self.env_changed, yellow), + ('skipped', self.skipped, yellow), + ('resource_denied', self.resource_denied, yellow), + ('rerun', self.rerun, yellow), + ('run_no_tests', self.run_no_tests, yellow), ): if tests: - report.append(f'{name}={len(tests)}') + report.append(f'{color}{name}={len(tests)}{reset}') print(f"Total test files: {' '.join(report)}") diff --git a/Lib/test/libregrtest/run_workers.py b/Lib/test/libregrtest/run_workers.py index 3c6d13215fd..424085a0050 100644 --- a/Lib/test/libregrtest/run_workers.py +++ b/Lib/test/libregrtest/run_workers.py @@ -22,7 +22,7 @@ from .single import PROGRESS_MIN_TIME from .utils import ( StrPath, TestName, - format_duration, print_warning, count, plural, get_signal_name) + format_duration, print_warning, count, plural) from .worker import create_worker_process, USE_PROCESS_GROUP if MS_WINDOWS: @@ -145,14 +145,20 @@ def _kill(self) -> None: return self._killed = True - if USE_PROCESS_GROUP: + use_killpg = USE_PROCESS_GROUP + if use_killpg: + parent_sid = os.getsid(0) + sid = os.getsid(popen.pid) + use_killpg = (sid != parent_sid) + + if use_killpg: what = f"{self} process group" else: what = f"{self} process" print(f"Kill {what}", file=sys.stderr, flush=True) try: - if USE_PROCESS_GROUP: + if use_killpg: os.killpg(popen.pid, signal.SIGKILL) else: popen.kill() @@ -364,7 +370,7 @@ def _runtest(self, test_name: TestName) -> MultiprocessResult: err_msg=None, state=State.TIMEOUT) if retcode != 0: - name = get_signal_name(retcode) + name = support.get_signal_name(retcode) if name: retcode = f"{retcode} ({name})" raise WorkerError(self.test_name, f"Exit code {retcode}", stdout, diff --git a/Lib/test/libregrtest/runtests.py b/Lib/test/libregrtest/runtests.py index 7b607d4a559..759f24fc25e 100644 --- a/Lib/test/libregrtest/runtests.py +++ b/Lib/test/libregrtest/runtests.py @@ -100,13 +100,14 @@ class RunTests: python_cmd: tuple[str, ...] | None randomize: bool random_seed: int | str + parallel_threads: int | None def copy(self, **override) -> 'RunTests': state = dataclasses.asdict(self) state.update(override) return RunTests(**state) - def create_worker_runtests(self, **override) -> 'WorkerRunTests': + def create_worker_runtests(self, **override) -> WorkerRunTests: state = dataclasses.asdict(self) state.update(override) return WorkerRunTests(**state) @@ -184,6 +185,8 @@ def bisect_cmd_args(self) -> list[str]: args.extend(("--python", cmd)) if self.randomize: args.append(f"--randomize") + if self.parallel_threads: + args.append(f"--parallel-threads={self.parallel_threads}") args.append(f"--randseed={self.random_seed}") return args diff --git a/Lib/test/libregrtest/save_env.py b/Lib/test/libregrtest/save_env.py index f9bd6d87c4c..0ec12d7c475 100644 --- a/Lib/test/libregrtest/save_env.py +++ b/Lib/test/libregrtest/save_env.py @@ -105,7 +105,7 @@ def get_asyncio_events__event_loop_policy(self): return support.maybe_get_event_loop_policy() def restore_asyncio_events__event_loop_policy(self, policy): asyncio = self.get_module('asyncio') - asyncio.set_event_loop_policy(policy) + asyncio.events._set_event_loop_policy(policy) def get_sys_argv(self): return id(sys.argv), sys.argv, sys.argv[:] diff --git a/Lib/test/libregrtest/setup.py b/Lib/test/libregrtest/setup.py index 704a4ccc681..b9b76a44e3b 100644 --- a/Lib/test/libregrtest/setup.py +++ b/Lib/test/libregrtest/setup.py @@ -13,7 +13,7 @@ from .filter import set_match_tests from .runtests import RunTests from .utils import ( - setup_unraisable_hook, setup_threading_excepthook, fix_umask, + setup_unraisable_hook, setup_threading_excepthook, adjust_rlimit_nofile) @@ -28,8 +28,6 @@ def setup_test_dir(testdir: str | None) -> None: def setup_process() -> None: - fix_umask() - assert sys.__stderr__ is not None, "sys.__stderr__ is None" try: stderr_fd = sys.__stderr__.fileno() @@ -54,7 +52,7 @@ def setup_process() -> None: adjust_rlimit_nofile() - support.record_original_stdout(sys.stdout) # TODO: RUSTPYTHON, figure out why this was disabled in the first place + support.record_original_stdout(sys.stdout) # Set sys.stdout encoder error handler to backslashreplace, # similar to sys.stderr error handler, to avoid UnicodeEncodeError diff --git a/Lib/test/libregrtest/single.py b/Lib/test/libregrtest/single.py index 456eb53543e..3dfb0b01dc1 100644 --- a/Lib/test/libregrtest/single.py +++ b/Lib/test/libregrtest/single.py @@ -1,5 +1,5 @@ import faulthandler -# import gc # TODO: RUSTPYTHON, build a functional garbage collector into the interpreter +import gc import importlib import io import sys @@ -7,6 +7,7 @@ import traceback import unittest +from _colorize import get_colors # type: ignore[import-not-found] from test import support from test.support import threading_helper @@ -16,6 +17,7 @@ from .save_env import saved_test_environment from .setup import setup_tests from .testresult import get_test_runner +from .parallel_case import ParallelTestCase from .utils import ( TestName, clear_caches, remove_testfn, abs_module_name, print_warning) @@ -26,14 +28,17 @@ PROGRESS_MIN_TIME = 30.0 # seconds -def run_unittest(test_mod): +def run_unittest(test_mod, runtests: RunTests): loader = unittest.TestLoader() tests = loader.loadTestsFromModule(test_mod) + for error in loader.errors: print(error, file=sys.stderr) if loader.errors: raise Exception("errors while loading tests") _filter_suite(tests, match_test) + if runtests.parallel_threads: + _parallelize_tests(tests, runtests.parallel_threads) return _run_suite(tests) def _filter_suite(suite, pred): @@ -48,6 +53,28 @@ def _filter_suite(suite, pred): newtests.append(test) suite._tests = newtests +def _parallelize_tests(suite, parallel_threads: int): + def is_thread_unsafe(test): + test_method = getattr(test, test._testMethodName) + instance = test_method.__self__ + return (getattr(test_method, "__unittest_thread_unsafe__", False) or + getattr(instance, "__unittest_thread_unsafe__", False)) + + newtests: list[object] = [] + for test in suite._tests: + if isinstance(test, unittest.TestSuite): + _parallelize_tests(test, parallel_threads) + newtests.append(test) + continue + + if is_thread_unsafe(test): + # Don't parallelize thread-unsafe tests + newtests.append(test) + continue + + newtests.append(ParallelTestCase(test, parallel_threads)) + suite._tests = newtests + def _run_suite(suite): """Run tests from a unittest.TestSuite-derived class.""" runner = get_test_runner(sys.stdout, @@ -132,7 +159,7 @@ def _load_run_test(result: TestResult, runtests: RunTests) -> None: raise Exception(f"Module {test_name} defines test_main() which " f"is no longer supported by regrtest") def test_func(): - return run_unittest(test_mod) + return run_unittest(test_mod, runtests) try: regrtest_runner(result, test_func, runtests) @@ -145,15 +172,16 @@ def test_func(): remove_testfn(test_name, runtests.verbose) - # if gc.garbage: # TODO: RUSTPYTHON, build a functional garbage collector into the interpreter - # support.environment_altered = True # TODO: RUSTPYTHON, build a functional garbage collector into the interpreter - # print_warning(f"{test_name} created {len(gc.garbage)} " # TODO: RUSTPYTHON, build a functional garbage collector into the interpreter - # f"uncollectable object(s)") # TODO: RUSTPYTHON, build a functional garbage collector into the interpreter + # XXX: RUSTPYTHON, build a functional garbage collector into the interpreter + # if gc.garbage: + # support.environment_altered = True + # print_warning(f"{test_name} created {len(gc.garbage)} " + # f"uncollectable object(s)") - # # move the uncollectable objects somewhere, # TODO: RUSTPYTHON, build a functional garbage collector into the interpreter - # # so we don't see them again # TODO: RUSTPYTHON, build a functional garbage collector into the interpreter - # GC_GARBAGE.extend(gc.garbage) # TODO: RUSTPYTHON, build a functional garbage collector into the interpreter - # gc.garbage.clear() # TODO: RUSTPYTHON, build a functional garbage collector into the interpreter + # # move the uncollectable objects somewhere, + # # so we don't see them again + # GC_GARBAGE.extend(gc.garbage) + # gc.garbage.clear() support.reap_children() @@ -161,6 +189,8 @@ def test_func(): def _runtest_env_changed_exc(result: TestResult, runtests: RunTests, display_failure: bool = True) -> None: # Handle exceptions, detect environment changes. + stdout = get_colors(file=sys.stdout) + stderr = get_colors(file=sys.stderr) # Reset the environment_altered flag to detect if a test altered # the environment @@ -176,23 +206,29 @@ def _runtest_env_changed_exc(result: TestResult, runtests: RunTests, clear_caches() support.gc_collect() - with saved_test_environment(test_name, # TODO: RUSTPYTHON, figure out why this was disabled in the first place - runtests.verbose, quiet, pgo=pgo): # TODO: RUSTPYTHON, figure out why this was disabled in the first place - _load_run_test(result, runtests) # TODO: RUSTPYTHON, figure out why this was disabled in the first place + with saved_test_environment(test_name, + runtests.verbose, quiet, pgo=pgo): + _load_run_test(result, runtests) except support.ResourceDenied as exc: if not quiet and not pgo: - print(f"{test_name} skipped -- {exc}", flush=True) + print( + f"{stdout.YELLOW}{test_name} skipped -- {exc}{stdout.RESET}", + flush=True, + ) result.state = State.RESOURCE_DENIED return except unittest.SkipTest as exc: if not quiet and not pgo: - print(f"{test_name} skipped -- {exc}", flush=True) + print( + f"{stdout.YELLOW}{test_name} skipped -- {exc}{stdout.RESET}", + flush=True, + ) result.state = State.SKIPPED return except support.TestFailedWithDetails as exc: - msg = f"test {test_name} failed" + msg = f"{stderr.RED}test {test_name} failed{stderr.RESET}" if display_failure: - msg = f"{msg} -- {exc}" + msg = f"{stderr.RED}{msg} -- {exc}{stderr.RESET}" print(msg, file=sys.stderr, flush=True) result.state = State.FAILED result.errors = exc.errors @@ -200,9 +236,9 @@ def _runtest_env_changed_exc(result: TestResult, runtests: RunTests, result.stats = exc.stats return except support.TestFailed as exc: - msg = f"test {test_name} failed" + msg = f"{stderr.RED}test {test_name} failed{stderr.RESET}" if display_failure: - msg = f"{msg} -- {exc}" + msg = f"{stderr.RED}{msg} -- {exc}{stderr.RESET}" print(msg, file=sys.stderr, flush=True) result.state = State.FAILED result.stats = exc.stats @@ -217,8 +253,11 @@ def _runtest_env_changed_exc(result: TestResult, runtests: RunTests, except: if not pgo: msg = traceback.format_exc() - print(f"test {test_name} crashed -- {msg}", - file=sys.stderr, flush=True) + print( + f"{stderr.RED}test {test_name} crashed -- {msg}{stderr.RESET}", + file=sys.stderr, + flush=True, + ) result.state = State.UNCAUGHT_EXC return @@ -300,18 +339,18 @@ def run_single_test(test_name: TestName, runtests: RunTests) -> TestResult: If runtests.use_junit, xml_data is a list containing each generated testsuite element. """ + ansi = get_colors(file=sys.stderr) + red, reset, yellow = ansi.BOLD_RED, ansi.RESET, ansi.YELLOW + start_time = time.perf_counter() result = TestResult(test_name) pgo = runtests.pgo try: - # gh-117783: don't immortalize deferred objects when tracking - # refleaks. Only releveant for the free-threaded build. - with support.suppress_immortalization(runtests.hunt_refleak): - _runtest(result, runtests) + _runtest(result, runtests) except: if not pgo: msg = traceback.format_exc() - print(f"test {test_name} crashed -- {msg}", + print(f"{red}test {test_name} crashed -- {msg}{reset}", file=sys.stderr, flush=True) result.state = State.UNCAUGHT_EXC diff --git a/Lib/test/libregrtest/tsan.py b/Lib/test/libregrtest/tsan.py index 0c0ab20fa0b..d984a735bdf 100644 --- a/Lib/test/libregrtest/tsan.py +++ b/Lib/test/libregrtest/tsan.py @@ -2,10 +2,13 @@ # chosen because they use threads and run in a reasonable amount of time. TSAN_TESTS = [ + 'test_asyncio', # TODO: enable more of test_capi once bugs are fixed (GH-116908, GH-116909). 'test_capi.test_mem', 'test_capi.test_pyatomic', 'test_code', + 'test_ctypes', + # 'test_concurrent_futures', # gh-130605: too many data races 'test_enum', 'test_functools', 'test_httpservers', @@ -21,14 +24,28 @@ 'test_ssl', 'test_syslog', 'test_thread', + 'test_thread_local_bytecode', 'test_threadedtempfile', 'test_threading', 'test_threading_local', 'test_threadsignals', 'test_weakref', + 'test_free_threading', +] + +# Tests that should be run with `--parallel-threads=N` under TSAN. These tests +# typically do not use threads, but are run multiple times in parallel by +# the regression test runner with the `--parallel-threads` option enabled. +TSAN_PARALLEL_TESTS = [ + 'test_abc', + 'test_hashlib', ] def setup_tsan_tests(cmdline_args) -> None: if not cmdline_args: cmdline_args[:] = TSAN_TESTS[:] + +def setup_tsan_parallel_tests(cmdline_args) -> None: + if not cmdline_args: + cmdline_args[:] = TSAN_PARALLEL_TESTS[:] diff --git a/Lib/test/libregrtest/utils.py b/Lib/test/libregrtest/utils.py index 6500fd10ce0..d94fb84a743 100644 --- a/Lib/test/libregrtest/utils.py +++ b/Lib/test/libregrtest/utils.py @@ -7,7 +7,6 @@ import random import re import shlex -import signal import subprocess import sys import sysconfig @@ -242,8 +241,7 @@ def clear_caches(): except KeyError: pass else: - # struct._clearcache() # TODO: RUSTPYTHON, investigate why this was disabled in the first place - pass + struct._clearcache() try: doctest = sys.modules['doctest'] @@ -337,43 +335,11 @@ def get_build_info(): build.append('with_assert') # --enable-experimental-jit - tier2 = re.search('-D_Py_TIER2=([0-9]+)', cflags) - if tier2: - tier2 = int(tier2.group(1)) - - if not sys.flags.ignore_environment: - PYTHON_JIT = os.environ.get('PYTHON_JIT', None) - if PYTHON_JIT: - PYTHON_JIT = (PYTHON_JIT != '0') - else: - PYTHON_JIT = None - - if tier2 == 1: # =yes - if PYTHON_JIT == False: - jit = 'JIT=off' - else: - jit = 'JIT' - elif tier2 == 3: # =yes-off - if PYTHON_JIT: - jit = 'JIT' - else: - jit = 'JIT=off' - elif tier2 == 4: # =interpreter - if PYTHON_JIT == False: - jit = 'JIT-interpreter=off' - else: - jit = 'JIT-interpreter' - elif tier2 == 6: # =interpreter-off (Secret option!) - if PYTHON_JIT: - jit = 'JIT-interpreter' + if sys._jit.is_available(): + if sys._jit.is_enabled(): + build.append("JIT") else: - jit = 'JIT-interpreter=off' - elif '-D_Py_JIT' in cflags: - jit = 'JIT' - else: - jit = None - if jit: - build.append(jit) + build.append("JIT (disabled)") # --enable-framework=name framework = sysconfig.get_config_var('PYTHONFRAMEWORK') @@ -479,17 +445,6 @@ def get_temp_dir(tmp_dir: StrPath | None = None) -> StrPath: return os.path.abspath(tmp_dir) -def fix_umask() -> None: - if support.is_emscripten: - # Emscripten has default umask 0o777, which breaks some tests. - # see https://github.com/emscripten-core/emscripten/issues/17269 - old_mask = os.umask(0) - if old_mask == 0o777: - os.umask(0o027) - else: - os.umask(old_mask) - - def get_work_dir(parent_dir: StrPath, worker: bool = False) -> StrPath: # Define a writable temp dir that will be used as cwd while running # the tests. The name of the dir includes the pid to allow parallel @@ -661,12 +616,8 @@ def display_header(use_resources: tuple[str, ...], python_cmd: tuple[str, ...] | None) -> None: # Print basic platform information print("==", platform.python_implementation(), *sys.version.split()) - try: - print("==", platform.platform(aliased=True), - "%s-endian" % sys.byteorder) - except Exception as e: - print("==", f"Error: {e}") - print("==", "TODO: RUSTPYTHON, Need to fix platform.platform") + print("==", platform.platform(aliased=True), + "%s-endian" % sys.byteorder) print("== Python build:", ' '.join(get_build_info())) print("== cwd:", os.getcwd()) @@ -677,12 +628,8 @@ def display_header(use_resources: tuple[str, ...], if process_cpu_count and process_cpu_count != cpu_count: cpu_count = f"{process_cpu_count} (process) / {cpu_count} (system)" print("== CPU count:", cpu_count) - try: - print("== encodings: locale=%s FS=%s" - % (locale.getencoding(), sys.getfilesystemencoding())) - except Exception as e: - print("==", f"Error: {e}") - print("==", "TODO: RUSTPYTHON, Need to fix encoding stuff") + print("== encodings: locale=%s FS=%s" + % (locale.getencoding(), sys.getfilesystemencoding())) if use_resources: text = format_resources(use_resources) @@ -757,35 +704,6 @@ def cleanup_temp_dir(tmp_dir: StrPath) -> None: print("Remove file: %s" % name) os_helper.unlink(name) -WINDOWS_STATUS = { - 0xC0000005: "STATUS_ACCESS_VIOLATION", - 0xC00000FD: "STATUS_STACK_OVERFLOW", - 0xC000013A: "STATUS_CONTROL_C_EXIT", -} - -def get_signal_name(exitcode): - if exitcode < 0: - signum = -exitcode - try: - return signal.Signals(signum).name - except ValueError: - pass - - # Shell exit code (ex: WASI build) - if 128 < exitcode < 256: - signum = exitcode - 128 - try: - return signal.Signals(signum).name - except ValueError: - pass - - try: - return WINDOWS_STATUS[exitcode] - except KeyError: - pass - - return None - ILLEGAL_XML_CHARS_RE = re.compile( '[' diff --git a/Lib/test/libregrtest/worker.py b/Lib/test/libregrtest/worker.py index e0627c4adc0..1ad67e1cebf 100644 --- a/Lib/test/libregrtest/worker.py +++ b/Lib/test/libregrtest/worker.py @@ -15,6 +15,9 @@ USE_PROCESS_GROUP = (hasattr(os, "setsid") and hasattr(os, "killpg")) +NEED_TTY = { + 'test_ioctl', +} def create_worker_process(runtests: WorkerRunTests, output_fd: int, @@ -54,9 +57,21 @@ def create_worker_process(runtests: WorkerRunTests, output_fd: int, close_fds=True, cwd=work_dir, ) - if USE_PROCESS_GROUP: + + # Don't use setsid() in tests using TTY + test_name = runtests.tests[0] + if USE_PROCESS_GROUP and test_name not in NEED_TTY: kwargs['start_new_session'] = True + # Include the test name in the TSAN log file name + if 'TSAN_OPTIONS' in env: + parts = env['TSAN_OPTIONS'].split(' ') + for i, part in enumerate(parts): + if part.startswith('log_path='): + parts[i] = f'{part}.{test_name}' + break + env['TSAN_OPTIONS'] = ' '.join(parts) + # Pass json_file to the worker process json_file = runtests.json_file json_file.configure_subprocess(kwargs) diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py index 21b0edfd073..dd61b051354 100755 --- a/Lib/test/regrtest.py +++ b/Lib/test/regrtest.py @@ -6,13 +6,10 @@ Run this script with -h or --help for documentation. """ -# We import importlib *ASAP* in order to test #15386 -import importlib - import os import sys -from test.libregrtest import main +from test.libregrtest.main import main # Alias for backward compatibility (just in case) main_in_temp_cwd = main diff --git a/Lib/test/regrtestdata/import_from_tests/test_regrtest_a.py b/Lib/test/regrtestdata/import_from_tests/test_regrtest_a.py new file mode 100644 index 00000000000..9c3d0c7cf4b --- /dev/null +++ b/Lib/test/regrtestdata/import_from_tests/test_regrtest_a.py @@ -0,0 +1,11 @@ +import sys +import unittest +import test_regrtest_b.util + +class Test(unittest.TestCase): + def test(self): + test_regrtest_b.util # does not fail + self.assertIn('test_regrtest_a', sys.modules) + self.assertIs(sys.modules['test_regrtest_b'], test_regrtest_b) + self.assertIs(sys.modules['test_regrtest_b.util'], test_regrtest_b.util) + self.assertNotIn('test_regrtest_c', sys.modules) diff --git a/Lib/test/regrtestdata/import_from_tests/test_regrtest_b/__init__.py b/Lib/test/regrtestdata/import_from_tests/test_regrtest_b/__init__.py new file mode 100644 index 00000000000..3dfba253455 --- /dev/null +++ b/Lib/test/regrtestdata/import_from_tests/test_regrtest_b/__init__.py @@ -0,0 +1,9 @@ +import sys +import unittest + +class Test(unittest.TestCase): + def test(self): + self.assertNotIn('test_regrtest_a', sys.modules) + self.assertIn('test_regrtest_b', sys.modules) + self.assertNotIn('test_regrtest_b.util', sys.modules) + self.assertNotIn('test_regrtest_c', sys.modules) diff --git a/Lib/test/regrtestdata/import_from_tests/test_regrtest_b/util.py b/Lib/test/regrtestdata/import_from_tests/test_regrtest_b/util.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/Lib/test/regrtestdata/import_from_tests/test_regrtest_c.py b/Lib/test/regrtestdata/import_from_tests/test_regrtest_c.py new file mode 100644 index 00000000000..de80769118d --- /dev/null +++ b/Lib/test/regrtestdata/import_from_tests/test_regrtest_c.py @@ -0,0 +1,11 @@ +import sys +import unittest +import test_regrtest_b.util + +class Test(unittest.TestCase): + def test(self): + test_regrtest_b.util # does not fail + self.assertNotIn('test_regrtest_a', sys.modules) + self.assertIs(sys.modules['test_regrtest_b'], test_regrtest_b) + self.assertIs(sys.modules['test_regrtest_b.util'], test_regrtest_b.util) + self.assertIn('test_regrtest_c', sys.modules) diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py index ba038f18b4c..ee1d479b884 100644 --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -4,6 +4,7 @@ Note: test_regrtest cannot be run twice in parallel. """ +import _colorize import contextlib import dataclasses import glob @@ -21,10 +22,12 @@ import tempfile import textwrap import unittest +import unittest.mock from xml.etree import ElementTree from test import support -from test.support import os_helper, requires_jit_disabled +from test.support import import_helper +from test.support import os_helper from test.libregrtest import cmdline from test.libregrtest import main from test.libregrtest import setup @@ -818,7 +821,6 @@ def run_python(self, args, isolated=True, **kw): class CheckActualTests(BaseTestCase): - @unittest.expectedFailure # TODO: RUSTPYTHON def test_finds_expected_number_of_tests(self): """ Check that regrtest appears to find the expected set of tests. @@ -875,7 +877,6 @@ def run_tests(self, args, env=None, isolated=True): output = self.run_python(args, env=env, isolated=isolated) self.check_output(output) - @unittest.expectedFailure # TODO: RUSTPYTHON def test_script_regrtest(self): # Lib/test/regrtest.py script = os.path.join(self.testdir, 'regrtest.py') @@ -883,28 +884,27 @@ def test_script_regrtest(self): args = [*self.python_args, script, *self.regrtest_args, *self.tests] self.run_tests(args) - @unittest.skip('TODO: RUSTPYTHON flaky') + @unittest.skip("TODO: RUSTPYTHON; flaky") def test_module_test(self): # -m test args = [*self.python_args, '-m', 'test', *self.regrtest_args, *self.tests] self.run_tests(args) - @unittest.expectedFailure # TODO: RUSTPYTHON def test_module_regrtest(self): # -m test.regrtest args = [*self.python_args, '-m', 'test.regrtest', *self.regrtest_args, *self.tests] self.run_tests(args) - @unittest.skip('TODO: RUSTPYTHON flaky') + @unittest.skip("TODO: RUSTPYTHON; flaky") def test_module_autotest(self): # -m test.autotest args = [*self.python_args, '-m', 'test.autotest', *self.regrtest_args, *self.tests] self.run_tests(args) - @unittest.skip('TODO: RUSTPYTHON flaky') + @unittest.skip("TODO: RUSTPYTHON; flaky") def test_module_from_test_autotest(self): # from test import autotest code = 'from test import autotest' @@ -912,7 +912,7 @@ def test_module_from_test_autotest(self): *self.regrtest_args, *self.tests] self.run_tests(args) - @unittest.skip('TODO: RUSTPYTHON flaky') + @unittest.skip("TODO: RUSTPYTHON; flaky") def test_script_autotest(self): # Lib/test/autotest.py script = os.path.join(self.testdir, 'autotest.py') @@ -1202,7 +1202,7 @@ def test_coverage(self): output = self.run_tests("--coverage", test) self.check_executed_tests(output, [test], stats=1) regex = (r'lines +cov% +module +\(path\)\n' - r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+') + r'(?: *[0-9]+ *[0-9]{1,2}\.[0-9]% *[^ ]+ +\([^)]+\)+)+') self.check_line(output, regex) def test_wait(self): @@ -1245,7 +1245,7 @@ def test_run(self): stats=TestStats(4, 1), forever=True) - @requires_jit_disabled + @support.requires_jit_disabled def check_leak(self, code, what, *, run_workers=False): test = self.create_test('huntrleaks', code=code) @@ -1813,10 +1813,9 @@ def test_other_bug(self): @support.cpython_only def test_uncollectable(self): - try: - import _testcapi - except ImportError: - raise unittest.SkipTest("requires _testcapi") + # Skip test if _testcapi is missing + import_helper.import_module('_testcapi') + code = textwrap.dedent(r""" import _testcapi import gc @@ -2004,7 +2003,7 @@ def test_cleanup(self): for name in names: self.assertFalse(os.path.exists(name), name) - @unittest.skip('TODO: RUSTPYTHON flaky') + @unittest.skip("TODO: RUSTPYTHON; flaky") @unittest.skipIf(support.is_wasi, 'checking temp files is not implemented on WASI') def test_leak_tmp_file(self): @@ -2156,11 +2155,11 @@ def test_randint(self): def test_random_seed(self): self._check_random_seed(run_workers=False) - @unittest.skip('TODO: RUSTPYTHON flaky') + @unittest.skip("TODO: RUSTPYTHON; flaky") def test_random_seed_workers(self): self._check_random_seed(run_workers=True) - @unittest.skip('TODO: RUSTPYTHON flaky') + @unittest.skip("TODO: RUSTPYTHON; flaky") def test_python_command(self): code = textwrap.dedent(r""" import sys @@ -2182,7 +2181,6 @@ def test_dev_mode(self): self.check_executed_tests(output, tests, stats=len(tests), parallel=True) - @unittest.expectedFailure # TODO: RUSTPYTHON def test_unload_tests(self): # Test that unloading test modules does not break tests # that import from other tests. @@ -2204,34 +2202,34 @@ def test_unload_tests(self): def check_add_python_opts(self, option): # --fast-ci and --slow-ci add "-u -W default -bb -E" options to Python - try: - import _testinternalcapi - except ImportError: - raise unittest.SkipTest("requires _testinternalcapi") + + # Skip test if _testinternalcapi is missing + import_helper.import_module('_testinternalcapi') + code = textwrap.dedent(r""" import sys import unittest from test import support try: - from _testinternalcapi import get_config + from _testcapi import config_get except ImportError: - get_config = None + config_get = None # WASI/WASM buildbots don't use -E option use_environment = (support.is_emscripten or support.is_wasi) class WorkerTests(unittest.TestCase): - @unittest.skipUnless(get_config is None, 'need get_config()') + @unittest.skipUnless(config_get is None, 'need config_get()') def test_config(self): - config = get_config()['config'] + config = config_get() # -u option - self.assertEqual(config['buffered_stdio'], 0) + self.assertEqual(config_get('buffered_stdio'), 0) # -W default option - self.assertTrue(config['warnoptions'], ['default']) + self.assertTrue(config_get('warnoptions'), ['default']) # -bb option - self.assertTrue(config['bytes_warning'], 2) + self.assertTrue(config_get('bytes_warning'), 2) # -E option - self.assertTrue(config['use_environment'], use_environment) + self.assertTrue(config_get('use_environment'), use_environment) def test_python_opts(self): # -u option @@ -2270,10 +2268,8 @@ def test_add_python_opts(self): @unittest.skipIf(support.is_android, 'raising SIGSEGV on Android is unreliable') def test_worker_output_on_failure(self): - try: - from faulthandler import _sigsegv - except ImportError: - self.skipTest("need faulthandler._sigsegv") + # Skip test if faulthandler is missing + import_helper.import_module('faulthandler') code = textwrap.dedent(r""" import faulthandler @@ -2446,16 +2442,6 @@ def test_normalize_test_name(self): self.assertIsNone(normalize('setUpModule (test.test_x)', is_error=True)) self.assertIsNone(normalize('tearDownModule (test.test_module)', is_error=True)) - def test_get_signal_name(self): - for exitcode, expected in ( - (-int(signal.SIGINT), 'SIGINT'), - (-int(signal.SIGSEGV), 'SIGSEGV'), - (128 + int(signal.SIGABRT), 'SIGABRT'), - (3221225477, "STATUS_ACCESS_VIOLATION"), - (0xC00000FD, "STATUS_STACK_OVERFLOW"), - ): - self.assertEqual(utils.get_signal_name(exitcode), expected, exitcode) - def test_format_resources(self): format_resources = utils.format_resources ALL_RESOURCES = utils.ALL_RESOURCES @@ -2614,5 +2600,50 @@ def test_sanitize_xml(self): 'valid t\xe9xt \u20ac') +from test.libregrtest.results import TestResults + + +class TestColorized(unittest.TestCase): + def test_test_result_get_state(self): + # Arrange + green = _colorize.ANSIColors.GREEN + red = _colorize.ANSIColors.BOLD_RED + reset = _colorize.ANSIColors.RESET + yellow = _colorize.ANSIColors.YELLOW + + good_results = TestResults() + good_results.good = ["good1", "good2"] + bad_results = TestResults() + bad_results.bad = ["bad1", "bad2"] + no_results = TestResults() + no_results.bad = [] + interrupted_results = TestResults() + interrupted_results.interrupted = True + interrupted_worker_bug = TestResults() + interrupted_worker_bug.interrupted = True + interrupted_worker_bug.worker_bug = True + + for results, expected in ( + (good_results, f"{green}SUCCESS{reset}"), + (bad_results, f"{red}FAILURE{reset}"), + (no_results, f"{yellow}NO TESTS RAN{reset}"), + (interrupted_results, f"{yellow}INTERRUPTED{reset}"), + ( + interrupted_worker_bug, + f"{yellow}INTERRUPTED{reset}, {red}WORKER BUG{reset}", + ), + ): + with self.subTest(results=results, expected=expected): + # Act + with unittest.mock.patch( + "_colorize.can_colorize", return_value=True + ): + result = results.get_state(fail_env_changed=False) + + # Assert + self.assertEqual(result, expected) + + if __name__ == '__main__': + setup.setup_process() unittest.main() diff --git a/crates/vm/src/stdlib/winapi.rs b/crates/vm/src/stdlib/winapi.rs index 7279f9776e2..5145989e97f 100644 --- a/crates/vm/src/stdlib/winapi.rs +++ b/crates/vm/src/stdlib/winapi.rs @@ -466,7 +466,15 @@ mod _winapi { } #[pyfunction] - fn WaitForSingleObject(h: WinHandle, ms: u32, vm: &VirtualMachine) -> PyResult { + fn WaitForSingleObject(h: WinHandle, ms: i64, vm: &VirtualMachine) -> PyResult { + // Negative values (e.g., -1) map to INFINITE (0xFFFFFFFF) + let ms = if ms < 0 { + windows_sys::Win32::System::Threading::INFINITE + } else if ms > u32::MAX as i64 { + return Err(vm.new_overflow_error("timeout value is too large".to_owned())); + } else { + ms as u32 + }; let ret = unsafe { windows_sys::Win32::System::Threading::WaitForSingleObject(h.0, ms) }; if ret == windows_sys::Win32::Foundation::WAIT_FAILED { Err(vm.new_last_os_error())