Skip to content

Commit

Permalink
pythongh-109162: libregrtest: add worker.py
Browse files Browse the repository at this point in the history
Add new worker.py file:

* Move create_worker_process() and worker_process() to this file.
* Add main() function to worker.py. create_worker_process() now
  runs the command: "python -m test.libregrtest.worker JSON".
* create_worker_process() now starts the worker process in the
  current working directory. Regrtest now gets the absolute path of
  the reflog.txt filename: -R command line option filename.
* Remove --worker-json command line option.
  Remove test_regrtest.test_worker_json().

Related changes:

* Add write_json() and from_json() methods to TestResult.
* Rename select_temp_dir() to get_temp_dir() and move it to utils.
* Rename make_temp_dir() to get_work_dir() and move it to utils.
  It no longer calls os.makedirs(): Regrtest.main() now calls it.
* Move fix_umask() to utils. The function is now called by
  setup_tests().
* Move StrPath to utils.
* Add exit_timeout() context manager to utils.
* RunTests: Replace junit_filename (StrPath) with use_junit (bool).
  • Loading branch information
vstinner committed Sep 10, 2023
1 parent e55aab9 commit 99b345b
Show file tree
Hide file tree
Showing 10 changed files with 238 additions and 213 deletions.
1 change: 0 additions & 1 deletion Lib/test/libregrtest/cmdline.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,6 @@ def _create_parser():
group.add_argument('--wait', action='store_true',
help='wait for user input, e.g., allow a debugger '
'to be attached')
group.add_argument('--worker-json', metavar='ARGS')
group.add_argument('-S', '--start', metavar='START',
help='the name of the test at which to start.' +
more_details)
Expand Down
108 changes: 16 additions & 92 deletions Lib/test/libregrtest/main.py
Original file line number Diff line number Diff line change
@@ -1,34 +1,27 @@
import faulthandler
import locale
import os
import platform
import random
import re
import sys
import sysconfig
import tempfile
import time
import unittest

from test import support
from test.support import os_helper

from test.libregrtest.cmdline import _parse_args, Namespace
from test.libregrtest.logger import Logger
from test.libregrtest.runtest import (
findtests, split_test_packages, run_single_test, abs_module_name,
PROGRESS_MIN_TIME, State, RunTests, HuntRefleak,
FilterTuple, TestList, StrPath, StrJSON, TestName)
FilterTuple, TestList, StrJSON, TestName)
from test.libregrtest.setup import setup_tests, setup_test_dir
from test.libregrtest.pgo import setup_pgo_tests
from test.libregrtest.results import TestResults
from test.libregrtest.utils import (strip_py_suffix, count, format_duration,
printlist, get_build_info)
from test import support
from test.support import os_helper
from test.support import threading_helper


# bpo-38203: Maximum delay in seconds to exit Python (call Py_Finalize()).
# Used to protect against threading._shutdown() hang.
# Must be smaller than buildbot "1200 seconds without output" limit.
EXIT_TIMEOUT = 120.0
from test.libregrtest.utils import (
strip_py_suffix, count, format_duration, StrPath,
printlist, get_build_info, get_temp_dir, get_work_dir, exit_timeout)


class Regrtest:
Expand Down Expand Up @@ -104,7 +97,9 @@ def __init__(self, ns: Namespace):
self.verbose: bool = ns.verbose
self.quiet: bool = ns.quiet
if ns.huntrleaks:
self.hunt_refleak: HuntRefleak = HuntRefleak(*ns.huntrleaks)
warmups, runs, filename = ns.huntrleaks
filename = os.path.abspath(filename)
self.hunt_refleak: HuntRefleak = HuntRefleak(warmups, runs, filename)
else:
self.hunt_refleak = None
self.test_dir: StrPath | None = ns.testdir
Expand Down Expand Up @@ -454,64 +449,6 @@ def display_summary(self):
state = self.get_state()
print(f"Result: {state}")

@staticmethod
def fix_umask():
if support.is_emscripten:
# Emscripten has default umask 0o777, which breaks some tests.
# see https://github.com/emscripten-core/emscripten/issues/17269
old_mask = os.umask(0)
if old_mask == 0o777:
os.umask(0o027)
else:
os.umask(old_mask)

@staticmethod
def select_temp_dir(tmp_dir):
if tmp_dir:
tmp_dir = os.path.expanduser(tmp_dir)
else:
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. This eases the cleanup of leftover
# files using the "make distclean" command.
if sysconfig.is_python_build():
tmp_dir = sysconfig.get_config_var('abs_builddir')
if tmp_dir is None:
# bpo-30284: On Windows, only srcdir is available. Using
# abs_builddir mostly matters on UNIX when building Python
# out of the source tree, especially when the source tree
# is read only.
tmp_dir = sysconfig.get_config_var('srcdir')
tmp_dir = os.path.join(tmp_dir, 'build')
else:
tmp_dir = tempfile.gettempdir()

return os.path.abspath(tmp_dir)

def is_worker(self):
return (self.worker_json is not None)

@staticmethod
def make_temp_dir(tmp_dir: StrPath, is_worker: bool):
os.makedirs(tmp_dir, exist_ok=True)

# Define a writable temp dir that will be used as cwd while running
# the tests. The name of the dir includes the pid to allow parallel
# testing (see the -j option).
# Emscripten and WASI have stubbed getpid(), Emscripten has only
# milisecond clock resolution. Use randint() instead.
if sys.platform in {"emscripten", "wasi"}:
nounce = random.randint(0, 1_000_000)
else:
nounce = os.getpid()

if is_worker:
work_dir = 'test_python_worker_{}'.format(nounce)
else:
work_dir = 'test_python_{}'.format(nounce)
work_dir += os_helper.FS_NONASCII
work_dir = os.path.join(tmp_dir, work_dir)
return work_dir

@staticmethod
def cleanup_temp_dir(tmp_dir: StrPath):
import glob
Expand All @@ -534,17 +471,16 @@ def main(self, tests: TestList | None = None):

strip_py_suffix(self.cmdline_args)

self.tmp_dir = self.select_temp_dir(self.tmp_dir)

self.fix_umask()
self.tmp_dir = get_temp_dir(self.tmp_dir)

if self.want_cleanup:
self.cleanup_temp_dir(self.tmp_dir)
sys.exit(0)

work_dir = self.make_temp_dir(self.tmp_dir, self.is_worker())
os.makedirs(self.tmp_dir, exist_ok=True)
work_dir = get_work_dir(parent_dir=self.tmp_dir)

try:
with exit_timeout():
# Run the tests in a context manager that temporarily changes the
# CWD to a temporary and writable directory. If it's not possible
# to create or change the CWD, the original CWD will be used.
Expand All @@ -556,13 +492,6 @@ def main(self, tests: TestList | None = None):
# processes.

self._main()
except SystemExit as exc:
# bpo-38203: Python can hang at exit in Py_Finalize(), especially
# on threading._shutdown() call: put a timeout
if threading_helper.can_start_thread:
faulthandler.dump_traceback_later(EXIT_TIMEOUT, exit=True)

sys.exit(exc.code)

def create_run_tests(self):
return RunTests(
Expand All @@ -579,7 +508,7 @@ def create_run_tests(self):
quiet=self.quiet,
hunt_refleak=self.hunt_refleak,
test_dir=self.test_dir,
junit_filename=self.junit_filename,
use_junit=(self.junit_filename is not None),
memory_limit=self.memory_limit,
gc_threshold=self.gc_threshold,
use_resources=self.use_resources,
Expand Down Expand Up @@ -634,11 +563,6 @@ def run_tests(self) -> int:
self.fail_rerun)

def _main(self):
if self.is_worker():
from test.libregrtest.runtest_mp import worker_process
worker_process(self.worker_json)
return

if self.want_wait:
input("Press any key to continue...")

Expand Down
1 change: 0 additions & 1 deletion Lib/test/libregrtest/refleak.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ def get_pooled_int(value):
warmups = hunt_refleak.warmups
runs = hunt_refleak.runs
filename = hunt_refleak.filename
filename = os.path.join(os_helper.SAVEDCWD, filename)
repcount = warmups + runs

# Pre-allocate to ensure that the loop doesn't allocate anything new
Expand Down
5 changes: 3 additions & 2 deletions Lib/test/libregrtest/results.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@
from test.support import TestStats

from test.libregrtest.runtest import (
TestName, TestTuple, TestList, FilterDict, StrPath, State,
TestName, TestTuple, TestList, FilterDict, State,
TestResult, RunTests)
from test.libregrtest.utils import printlist, count, format_duration
from test.libregrtest.utils import (
printlist, count, format_duration, StrPath)


EXITCODE_BAD_TEST = 2
Expand Down
39 changes: 33 additions & 6 deletions Lib/test/libregrtest/runtest.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@
from test.support import os_helper
from test.support import threading_helper
from test.libregrtest.save_env import saved_test_environment
from test.libregrtest.utils import clear_caches, format_duration, print_warning
from test.libregrtest.utils import (
clear_caches, format_duration, print_warning, StrPath)


StrJSON = str
StrPath = str
TestName = str
TestTuple = tuple[TestName, ...]
TestList = list[TestName]
Expand Down Expand Up @@ -215,6 +215,33 @@ def get_rerun_match_tests(self) -> FilterTuple | None:
return None
return tuple(match_tests)

def write_json(self, file) -> None:
json.dump(self, file, cls=_EncodeTestResult)

@staticmethod
def from_json(worker_json) -> 'TestResult':
return json.loads(worker_json, object_hook=_decode_test_result)


class _EncodeTestResult(json.JSONEncoder):
def default(self, o: Any) -> dict[str, Any]:
if isinstance(o, TestResult):
result = dataclasses.asdict(o)
result["__test_result__"] = o.__class__.__name__
return result
else:
return super().default(o)


def _decode_test_result(data: dict[str, Any]) -> TestResult | dict[str, Any]:
if "__test_result__" in data:
data.pop('__test_result__')
if data['stats'] is not None:
data['stats'] = TestStats(**data['stats'])
return TestResult(**data)
else:
return data


@dataclasses.dataclass(slots=True, frozen=True)
class RunTests:
Expand All @@ -234,7 +261,7 @@ class RunTests:
quiet: bool = False
hunt_refleak: HuntRefleak | None = None
test_dir: StrPath | None = None
junit_filename: StrPath | None = None
use_junit: bool = False
memory_limit: str | None = None
gc_threshold: int | None = None
use_resources: list[str] = None
Expand Down Expand Up @@ -358,7 +385,7 @@ def setup_support(runtests: RunTests):
support.set_match_tests(runtests.match_tests, runtests.ignore_tests)
support.failfast = runtests.fail_fast
support.verbose = runtests.verbose
if runtests.junit_filename:
if runtests.use_junit:
support.junit_xml_list = []
else:
support.junit_xml_list = None
Expand Down Expand Up @@ -434,8 +461,8 @@ def run_single_test(test_name: TestName, runtests: RunTests) -> TestResult:
Returns a TestResult.
If runtests.junit_filename is not None, xml_data is a list containing each
generated testsuite element.
If runtests.use_junit, xml_data is a list containing each generated
testsuite element.
"""
start_time = time.perf_counter()
result = TestResult(test_name)
Expand Down
Loading

0 comments on commit 99b345b

Please sign in to comment.