From 4a53780b64d126a36527adde531853f9ab1dfb86 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 20 Dec 2022 20:05:33 -0800 Subject: [PATCH 1/2] prerequisite cycle detection --- Makefile | 1 - ldclient/impl/evaluator.py | 63 ++++++++++++----- testing/impl/test_evaluator.py | 45 ------------ testing/impl/test_evaluator_prerequisites.py | 72 ++++++++++++++++++++ 4 files changed, 118 insertions(+), 63 deletions(-) create mode 100644 testing/impl/test_evaluator_prerequisites.py diff --git a/Makefile b/Makefile index cb0d12f2..9c511c49 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,6 @@ TEMP_TEST_OUTPUT=/tmp/contract-test-service.log # - "evaluation/parameterized/segment recursion": Segment recursion is not yet implemented. # - "events": These test suites will be unavailable until more of the U2C implementation is done. TEST_HARNESS_PARAMS := $(TEST_HARNESS_PARAMS) \ - -skip 'evaluation/parameterized/prerequisites' \ -skip 'evaluation/parameterized/segment recursion' \ -skip 'events' diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 78492184..fc1a6d1f 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -48,13 +48,18 @@ def _context_to_user_dict(context: Context) -> dict: # prerequisite evaluations, and the cached state of any Big Segments query that we may have # ended up having to do for the context. class EvalResult: + __slots__ = ['detail', 'events', 'big_segments_status', 'big_segments_membership', + 'original_flag_key', 'prereq_stack'] + def __init__(self): self.detail = None - self.events = None + self.events = None # type: Optional[List[dict]] self.big_segments_status = None # type: Optional[str] self.big_segments_membership = None # type: Optional[Dict[str, Optional[dict]]] + self.original_flag_key = None # type: Optional[str] + self.prereq_stack = None # type: Optional[List[str]] - def add_event(self, event): + def add_event(self, event: dict): if self.events is None: self.events = [] self.events.append(event) @@ -138,22 +143,46 @@ def _evaluate(self, flag: FeatureFlag, context: Context, state: EvalResult, even def _check_prerequisites(self, flag: FeatureFlag, context: Context, state: EvalResult, event_factory: _EventFactory) -> Optional[dict]: failed_prereq = None prereq_res = None - for prereq in flag.prerequisites: - prereq_flag = self.__get_flag(prereq.key) - if prereq_flag is None: - log.warning("Missing prereq flag: " + prereq.key) - failed_prereq = prereq - else: - prereq_res = self._evaluate(prereq_flag, context, state, event_factory) - # Note that if the prerequisite flag is off, we don't consider it a match no matter what its - # off variation was. But we still need to evaluate it in order to generate an event. - if (not prereq_flag.on) or prereq_res.variation_index != prereq.variation: + if flag.prerequisites.count == 0: + return None + + try: + # We use the state object to guard against circular references in prerequisites. To avoid + # the overhead of creating the state.prereq_stack list in the most common case where + # there's only a single level prerequisites, we treat state.original_flag_key as the first + # element in the stack. + flag_key = flag.key + if flag_key != state.original_flag_key: + if state.prereq_stack is None: + state.prereq_stack = [] + state.prereq_stack.append(flag_key) + + for prereq in flag.prerequisites: + prereq_key = prereq.key + if (prereq_key == state.original_flag_key or + (flag_key != state.original_flag_key and prereq_key == flag_key) or + (state.prereq_stack is not None and prereq.key in state.prereq_stack)): + raise EvaluationException(('prerequisite relationship to "%s" caused a circular reference;' + + ' this is probably a temporary condition due to an incomplete update') % prereq_key) + + prereq_flag = self.__get_flag(prereq_key) + if prereq_flag is None: + log.warning("Missing prereq flag: " + prereq_key) failed_prereq = prereq - event = event_factory.new_eval_event(prereq_flag, _context_to_user_dict(context), prereq_res, None, flag) - state.add_event(event) - if failed_prereq: - return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.key} - return None + else: + prereq_res = self._evaluate(prereq_flag, context, state, event_factory) + # Note that if the prerequisite flag is off, we don't consider it a match no matter what its + # off variation was. But we still need to evaluate it in order to generate an event. + if (not prereq_flag.on) or prereq_res.variation_index != prereq.variation: + failed_prereq = prereq + event = event_factory.new_eval_event(prereq_flag, _context_to_user_dict(context), prereq_res, None, flag) + state.add_event(event) + if failed_prereq: + return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.key} + return None + finally: + if state.prereq_stack is not None and state.prereq_stack.count != 0: + state.prereq_stack.pop() def _check_targets(self, flag: FeatureFlag, context: Context) -> Optional[EvaluationDetail]: user_targets = flag.targets diff --git a/testing/impl/test_evaluator.py b/testing/impl/test_evaluator.py index 17b3827a..bfd39b6c 100644 --- a/testing/impl/test_evaluator.py +++ b/testing/impl/test_evaluator.py @@ -29,51 +29,6 @@ def test_flag_returns_error_if_off_variation_is_negative(): detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) -def test_flag_returns_off_variation_if_prerequisite_not_found(): - flag = FlagBuilder('feature').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \ - .prerequisite('badfeature', 1).build() - evaluator = EvaluatorBuilder().with_unknown_flag('badfeature').build() - user = Context.create('x') - detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'badfeature'}) - assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, None) - -def test_flag_returns_off_variation_and_event_if_prerequisite_is_off(): - flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \ - .prerequisite('feature1', 1).build() - flag1 = FlagBuilder('feature1').version(2).on(False).off_variation(1).variations('d', 'e').fallthrough_variation(1) \ - .build() - # note that even though flag1 returns the desired variation, it is still off and therefore not a match - evaluator = EvaluatorBuilder().with_flag(flag1).build() - user = Context.create('x') - detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) - events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, - 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] - assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) - -def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met(): - flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \ - .prerequisite('feature1', 1).build() - flag1 = FlagBuilder('feature1').version(2).on(True).off_variation(1).variations('d', 'e').fallthrough_variation(0) \ - .build() - evaluator = EvaluatorBuilder().with_flag(flag1).build() - user = Context.create('x') - detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) - events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 0, 'value': 'd', 'default': None, - 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] - assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) - -def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_rules(): - flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(0) \ - .prerequisite('feature1', 1).build() - flag1 = FlagBuilder('feature1').version(2).on(True).off_variation(1).variations('d', 'e').fallthrough_variation(1) \ - .build() - evaluator = EvaluatorBuilder().with_flag(flag1).build() - user = Context.create('x') - detail = EvaluationDetail('a', 0, {'kind': 'FALLTHROUGH'}) - events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, - 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] - assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) - def test_flag_returns_error_if_fallthrough_variation_is_too_high(): flag = FlagBuilder('feature').on(True).variations('a', 'b', 'c').fallthrough_variation(999).build() user = Context.create('x') diff --git a/testing/impl/test_evaluator_prerequisites.py b/testing/impl/test_evaluator_prerequisites.py new file mode 100644 index 00000000..1cb99f70 --- /dev/null +++ b/testing/impl/test_evaluator_prerequisites.py @@ -0,0 +1,72 @@ +import pytest + +from testing.builders import * + +from ldclient.client import Context +from ldclient.evaluation import EvaluationDetail +from ldclient.impl.evaluator import _context_to_user_dict +from testing.builders import * +from testing.impl.evaluator_util import * + + +def test_flag_returns_off_variation_if_prerequisite_not_found(): + flag = FlagBuilder('feature').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \ + .prerequisite('badfeature', 1).build() + evaluator = EvaluatorBuilder().with_unknown_flag('badfeature').build() + user = Context.create('x') + detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'badfeature'}) + assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, None) + +def test_flag_returns_off_variation_and_event_if_prerequisite_is_off(): + flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \ + .prerequisite('feature1', 1).build() + flag1 = FlagBuilder('feature1').version(2).on(False).off_variation(1).variations('d', 'e').fallthrough_variation(1) \ + .build() + # note that even though flag1 returns the desired variation, it is still off and therefore not a match + evaluator = EvaluatorBuilder().with_flag(flag1).build() + user = Context.create('x') + detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) + events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, + 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] + assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) + +def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met(): + flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \ + .prerequisite('feature1', 1).build() + flag1 = FlagBuilder('feature1').version(2).on(True).off_variation(1).variations('d', 'e').fallthrough_variation(0) \ + .build() + evaluator = EvaluatorBuilder().with_flag(flag1).build() + user = Context.create('x') + detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) + events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 0, 'value': 'd', 'default': None, + 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] + assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) + +def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_rules(): + flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(0) \ + .prerequisite('feature1', 1).build() + flag1 = FlagBuilder('feature1').version(2).on(True).off_variation(1).variations('d', 'e').fallthrough_variation(1) \ + .build() + evaluator = EvaluatorBuilder().with_flag(flag1).build() + user = Context.create('x') + detail = EvaluationDetail('a', 0, {'kind': 'FALLTHROUGH'}) + events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, + 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] + assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) + +@pytest.mark.parametrize("depth", [1, 2, 3, 4]) +def test_prerequisite_cycle_detection(depth: int): + flag_keys = list("flagkey%d" % i for i in range(depth)) + flags = [] + for i in range(depth): + flags.append( + FlagBuilder(flag_keys[i]).on(True).variations(False, True).off_variation(0) \ + .prerequisite(flag_keys[(i + 1) % depth], 0) \ + .build()) + evaluator_builder = EvaluatorBuilder() + for f in flags: + evaluator_builder.with_flag(f) + evaluator = evaluator_builder.build() + context = Context.create('x') + detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) + assert_eval_result(evaluator.evaluate(flags[0], context, event_factory), detail, None) From a6b9f05090e0c88c3c3c3249859b2a824068a55e Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 21 Dec 2022 12:31:43 -0800 Subject: [PATCH 2/2] fix prereq stack logic --- ldclient/impl/evaluator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index fc1a6d1f..d08f286e 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -107,6 +107,7 @@ def __init__( def evaluate(self, flag: FeatureFlag, context: Context, event_factory: _EventFactory) -> EvalResult: state = EvalResult() + state.original_flag_key = flag.key try: state.detail = self._evaluate(flag, context, state, event_factory) except EvaluationException as e: @@ -160,7 +161,6 @@ def _check_prerequisites(self, flag: FeatureFlag, context: Context, state: EvalR for prereq in flag.prerequisites: prereq_key = prereq.key if (prereq_key == state.original_flag_key or - (flag_key != state.original_flag_key and prereq_key == flag_key) or (state.prereq_stack is not None and prereq.key in state.prereq_stack)): raise EvaluationException(('prerequisite relationship to "%s" caused a circular reference;' + ' this is probably a temporary condition due to an incomplete update') % prereq_key) @@ -181,7 +181,7 @@ def _check_prerequisites(self, flag: FeatureFlag, context: Context, state: EvalR return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.key} return None finally: - if state.prereq_stack is not None and state.prereq_stack.count != 0: + if state.prereq_stack is not None and len(state.prereq_stack) != 0: state.prereq_stack.pop() def _check_targets(self, flag: FeatureFlag, context: Context) -> Optional[EvaluationDetail]: