From b60d912e91806e1b7eea80e96f61e3f2fcc35cb8 Mon Sep 17 00:00:00 2001 From: Michael Orlitzky Date: Sat, 19 Oct 2024 07:57:40 -0400 Subject: [PATCH 1/2] src/sage/parallel/decorate.py: disable a stress-dependent test One test in this file sets the timeout to 100ms and expects an easy example to complete within that amount of time. It can still fail, though, if the machine is under load: File "src/sage/parallel/decorate.py", line 519, in sage.parallel.decorate.fork Failed example: g(5, m=5) Expected: 8 Got: Killing subprocess 62130 with input ((5,), {'m': 5}) which took too long 8 This example isn't really the purpose of the doctest -- its intent is to show that there are hard examples that will NOT complete in 100ms. (That is much easier to do.) All things considered, we are better off just dropping this one flaky example. --- src/sage/parallel/decorate.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/sage/parallel/decorate.py b/src/sage/parallel/decorate.py index b23e997b7e2..fbcbada04ff 100644 --- a/src/sage/parallel/decorate.py +++ b/src/sage/parallel/decorate.py @@ -516,8 +516,6 @@ def fork(f=None, timeout=0, verbose=False): sage: @fork(timeout=0.1, verbose=True) ....: def g(n, m): return factorial(n).ndigits() + m - sage: g(5, m=5) - 8 sage: g(10^7, m=5) Killing subprocess ... with input ((10000000,), {'m': 5}) which took too long 'NO DATA (timed out)' From a1818838cb0dcb7eb278e22c75556db758f45cce Mon Sep 17 00:00:00 2001 From: Michael Orlitzky Date: Sat, 19 Oct 2024 08:09:37 -0400 Subject: [PATCH 2/2] src/sage/parallel/map_reduce.py: reduce test dependency on CPU speed The test we have for start_workers() is doing a bit more than that. It's starting two threads that each print something, sleep for a bit, and then print something else. To check the expected output, we are basically guessing how long it will take these threads to run, and on a very heavily-loaded machine, we can guess wrong: File "src/sage/parallel/map_reduce.py", line 1149, in sage.parallel.map_reduce.RESetMapReduce.start_workers Failed example: sleep(float(1.5)) Expected: Finished: ... Finished: ... Got: Finished: 2 (We were expecting both threads to finish within 1.5s, but only one did.) To make this a bit more robust, we eliminate the sleep() and subsequent print inside of the threads. As a result, we need only make one guess: how long it will take the threads to start. And for that we now go overkill and wait 5s. --- src/sage/parallel/map_reduce.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/src/sage/parallel/map_reduce.py b/src/sage/parallel/map_reduce.py index 239afb6df8b..8fe0898f001 100644 --- a/src/sage/parallel/map_reduce.py +++ b/src/sage/parallel/map_reduce.py @@ -1136,21 +1136,12 @@ def start_workers(self): sage: from sage.parallel.map_reduce import RESetMapReduce sage: def children(x): ....: print(f"Starting: {x}", flush=True) - ....: sleep(float(0.5)) - ....: print(f"Finished: {x}", flush=True) ....: return [] sage: S = RESetMapReduce(roots=[1, 2], children=children) sage: S.setup_workers(2) - sage: S.start_workers(); sleep(float(0.4)) + sage: S.start_workers(); sleep(float(5)) # long time Starting: ... Starting: ... - sage: [w.is_alive() for w in S._workers] - [True, True] - sage: sleep(float(1.5)) - Finished: ... - Finished: ... - sage: [not w.is_alive() for w in S._workers] - [True, True] Cleanup::