Skip to content

Commit

Permalink
Merge pull request #202 from launchdarkly/eb/sc-180515/prereq-cycle
Browse files Browse the repository at this point in the history
(U2C 14) prerequisite cycle detection
  • Loading branch information
eli-darkly authored Dec 21, 2022
2 parents 700e5f7 + a6b9f05 commit bce65f0
Show file tree
Hide file tree
Showing 4 changed files with 118 additions and 63 deletions.
1 change: 0 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ TEMP_TEST_OUTPUT=/tmp/contract-test-service.log
# - "evaluation/parameterized/segment recursion": Segment recursion is not yet implemented.
# - "events": These test suites will be unavailable until more of the U2C implementation is done.
TEST_HARNESS_PARAMS := $(TEST_HARNESS_PARAMS) \
-skip 'evaluation/parameterized/prerequisites' \
-skip 'evaluation/parameterized/segment recursion' \
-skip 'events'

Expand Down
63 changes: 46 additions & 17 deletions ldclient/impl/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,13 +48,18 @@ def _context_to_user_dict(context: Context) -> dict:
# prerequisite evaluations, and the cached state of any Big Segments query that we may have
# ended up having to do for the context.
class EvalResult:
__slots__ = ['detail', 'events', 'big_segments_status', 'big_segments_membership',
'original_flag_key', 'prereq_stack']

def __init__(self):
self.detail = None
self.events = None
self.events = None # type: Optional[List[dict]]
self.big_segments_status = None # type: Optional[str]
self.big_segments_membership = None # type: Optional[Dict[str, Optional[dict]]]
self.original_flag_key = None # type: Optional[str]
self.prereq_stack = None # type: Optional[List[str]]

def add_event(self, event):
def add_event(self, event: dict):
if self.events is None:
self.events = []
self.events.append(event)
Expand Down Expand Up @@ -102,6 +107,7 @@ def __init__(

def evaluate(self, flag: FeatureFlag, context: Context, event_factory: _EventFactory) -> EvalResult:
state = EvalResult()
state.original_flag_key = flag.key
try:
state.detail = self._evaluate(flag, context, state, event_factory)
except EvaluationException as e:
Expand Down Expand Up @@ -138,22 +144,45 @@ def _evaluate(self, flag: FeatureFlag, context: Context, state: EvalResult, even
def _check_prerequisites(self, flag: FeatureFlag, context: Context, state: EvalResult, event_factory: _EventFactory) -> Optional[dict]:
failed_prereq = None
prereq_res = None
for prereq in flag.prerequisites:
prereq_flag = self.__get_flag(prereq.key)
if prereq_flag is None:
log.warning("Missing prereq flag: " + prereq.key)
failed_prereq = prereq
else:
prereq_res = self._evaluate(prereq_flag, context, state, event_factory)
# Note that if the prerequisite flag is off, we don't consider it a match no matter what its
# off variation was. But we still need to evaluate it in order to generate an event.
if (not prereq_flag.on) or prereq_res.variation_index != prereq.variation:
if flag.prerequisites.count == 0:
return None

try:
# We use the state object to guard against circular references in prerequisites. To avoid
# the overhead of creating the state.prereq_stack list in the most common case where
# there's only a single level prerequisites, we treat state.original_flag_key as the first
# element in the stack.
flag_key = flag.key
if flag_key != state.original_flag_key:
if state.prereq_stack is None:
state.prereq_stack = []
state.prereq_stack.append(flag_key)

for prereq in flag.prerequisites:
prereq_key = prereq.key
if (prereq_key == state.original_flag_key or
(state.prereq_stack is not None and prereq.key in state.prereq_stack)):
raise EvaluationException(('prerequisite relationship to "%s" caused a circular reference;' +
' this is probably a temporary condition due to an incomplete update') % prereq_key)

prereq_flag = self.__get_flag(prereq_key)
if prereq_flag is None:
log.warning("Missing prereq flag: " + prereq_key)
failed_prereq = prereq
event = event_factory.new_eval_event(prereq_flag, _context_to_user_dict(context), prereq_res, None, flag)
state.add_event(event)
if failed_prereq:
return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.key}
return None
else:
prereq_res = self._evaluate(prereq_flag, context, state, event_factory)
# Note that if the prerequisite flag is off, we don't consider it a match no matter what its
# off variation was. But we still need to evaluate it in order to generate an event.
if (not prereq_flag.on) or prereq_res.variation_index != prereq.variation:
failed_prereq = prereq
event = event_factory.new_eval_event(prereq_flag, _context_to_user_dict(context), prereq_res, None, flag)
state.add_event(event)
if failed_prereq:
return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.key}
return None
finally:
if state.prereq_stack is not None and len(state.prereq_stack) != 0:
state.prereq_stack.pop()

def _check_targets(self, flag: FeatureFlag, context: Context) -> Optional[EvaluationDetail]:
user_targets = flag.targets
Expand Down
45 changes: 0 additions & 45 deletions testing/impl/test_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,51 +29,6 @@ def test_flag_returns_error_if_off_variation_is_negative():
detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'})
assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None)

def test_flag_returns_off_variation_if_prerequisite_not_found():
flag = FlagBuilder('feature').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \
.prerequisite('badfeature', 1).build()
evaluator = EvaluatorBuilder().with_unknown_flag('badfeature').build()
user = Context.create('x')
detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'badfeature'})
assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, None)

def test_flag_returns_off_variation_and_event_if_prerequisite_is_off():
flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \
.prerequisite('feature1', 1).build()
flag1 = FlagBuilder('feature1').version(2).on(False).off_variation(1).variations('d', 'e').fallthrough_variation(1) \
.build()
# note that even though flag1 returns the desired variation, it is still off and therefore not a match
evaluator = EvaluatorBuilder().with_flag(flag1).build()
user = Context.create('x')
detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'})
events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None,
'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}]
assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be)

def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met():
flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \
.prerequisite('feature1', 1).build()
flag1 = FlagBuilder('feature1').version(2).on(True).off_variation(1).variations('d', 'e').fallthrough_variation(0) \
.build()
evaluator = EvaluatorBuilder().with_flag(flag1).build()
user = Context.create('x')
detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'})
events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 0, 'value': 'd', 'default': None,
'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}]
assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be)

def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_rules():
flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(0) \
.prerequisite('feature1', 1).build()
flag1 = FlagBuilder('feature1').version(2).on(True).off_variation(1).variations('d', 'e').fallthrough_variation(1) \
.build()
evaluator = EvaluatorBuilder().with_flag(flag1).build()
user = Context.create('x')
detail = EvaluationDetail('a', 0, {'kind': 'FALLTHROUGH'})
events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None,
'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}]
assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be)

def test_flag_returns_error_if_fallthrough_variation_is_too_high():
flag = FlagBuilder('feature').on(True).variations('a', 'b', 'c').fallthrough_variation(999).build()
user = Context.create('x')
Expand Down
72 changes: 72 additions & 0 deletions testing/impl/test_evaluator_prerequisites.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import pytest

from testing.builders import *

from ldclient.client import Context
from ldclient.evaluation import EvaluationDetail
from ldclient.impl.evaluator import _context_to_user_dict
from testing.builders import *
from testing.impl.evaluator_util import *


def test_flag_returns_off_variation_if_prerequisite_not_found():
flag = FlagBuilder('feature').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \
.prerequisite('badfeature', 1).build()
evaluator = EvaluatorBuilder().with_unknown_flag('badfeature').build()
user = Context.create('x')
detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'badfeature'})
assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, None)

def test_flag_returns_off_variation_and_event_if_prerequisite_is_off():
flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \
.prerequisite('feature1', 1).build()
flag1 = FlagBuilder('feature1').version(2).on(False).off_variation(1).variations('d', 'e').fallthrough_variation(1) \
.build()
# note that even though flag1 returns the desired variation, it is still off and therefore not a match
evaluator = EvaluatorBuilder().with_flag(flag1).build()
user = Context.create('x')
detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'})
events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None,
'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}]
assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be)

def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met():
flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \
.prerequisite('feature1', 1).build()
flag1 = FlagBuilder('feature1').version(2).on(True).off_variation(1).variations('d', 'e').fallthrough_variation(0) \
.build()
evaluator = EvaluatorBuilder().with_flag(flag1).build()
user = Context.create('x')
detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'})
events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 0, 'value': 'd', 'default': None,
'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}]
assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be)

def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_rules():
flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(0) \
.prerequisite('feature1', 1).build()
flag1 = FlagBuilder('feature1').version(2).on(True).off_variation(1).variations('d', 'e').fallthrough_variation(1) \
.build()
evaluator = EvaluatorBuilder().with_flag(flag1).build()
user = Context.create('x')
detail = EvaluationDetail('a', 0, {'kind': 'FALLTHROUGH'})
events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None,
'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}]
assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be)

@pytest.mark.parametrize("depth", [1, 2, 3, 4])
def test_prerequisite_cycle_detection(depth: int):
flag_keys = list("flagkey%d" % i for i in range(depth))
flags = []
for i in range(depth):
flags.append(
FlagBuilder(flag_keys[i]).on(True).variations(False, True).off_variation(0) \
.prerequisite(flag_keys[(i + 1) % depth], 0) \
.build())
evaluator_builder = EvaluatorBuilder()
for f in flags:
evaluator_builder.with_flag(f)
evaluator = evaluator_builder.build()
context = Context.create('x')
detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'})
assert_eval_result(evaluator.evaluate(flags[0], context, event_factory), detail, None)

0 comments on commit bce65f0

Please sign in to comment.