From 0cd2cc4e0bb810d4bd44bc61bbca5538e886fcca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5vard=20Berland?= Date: Sun, 21 Jan 2024 20:02:42 +0100 Subject: [PATCH] Mark two scheduler tests as flaky These have been seen to be flaky but will pass on retries. Rerunning instead of tuning the timing requirements will keep the timing requirements easier to understand. --- tests/unit_tests/scheduler/test_scheduler.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/unit_tests/scheduler/test_scheduler.py b/tests/unit_tests/scheduler/test_scheduler.py index 8516f2e2e96..892b0350db1 100644 --- a/tests/unit_tests/scheduler/test_scheduler.py +++ b/tests/unit_tests/scheduler/test_scheduler.py @@ -8,6 +8,7 @@ import pytest from cloudevents.http import CloudEvent, from_json +from flaky import flaky from ert.constant_filenames import CERT_FILE from ert.ensemble_evaluator._builder._realization import Realization @@ -346,6 +347,7 @@ async def wait(iens): assert killed_iens == [6, 7, 8, 9] +@flaky(max_runs=5, min_passes=1) @pytest.mark.parametrize( "submit_sleep, iens_stride, realization_runtime", [(0, 1, 0.1), (0.1, 1, 0.1), (0.1, 1, 0), (0.1, 2, 0)], @@ -388,6 +390,7 @@ async def wait(): assert max(deltas) <= submit_sleep + 0.1 +@flaky(max_runs=5, min_passes=1) @pytest.mark.parametrize( "submit_sleep, realization_max_runtime, max_running", [