diff --git a/axelrod/strategies/grudger.py b/axelrod/strategies/grudger.py index 2f10ccfa7..75814ef6e 100644 --- a/axelrod/strategies/grudger.py +++ b/axelrod/strategies/grudger.py @@ -20,7 +20,7 @@ class Grudger(Player): name = 'Grudger' classifier = { - 'memory_depth': float('inf'), # Long memory + 'memory_depth': 1, 'stochastic': False, 'makes_use_of': set(), 'long_run_time': False, diff --git a/axelrod/tests/strategies/test_grudger.py b/axelrod/tests/strategies/test_grudger.py index 789d9773d..1f8fdd16f 100644 --- a/axelrod/tests/strategies/test_grudger.py +++ b/axelrod/tests/strategies/test_grudger.py @@ -11,7 +11,7 @@ class TestGrudger(TestPlayer): name = "Grudger" player = axl.Grudger expected_classifier = { - 'memory_depth': float('inf'), # Long memory + 'memory_depth': 1, 'stochastic': False, 'makes_use_of': set(), 'long_run_time': False, diff --git a/axelrod/tests/strategies/test_meta.py b/axelrod/tests/strategies/test_meta.py index 6c57c061f..93b86a295 100644 --- a/axelrod/tests/strategies/test_meta.py +++ b/axelrod/tests/strategies/test_meta.py @@ -294,7 +294,7 @@ class TestMetaMajorityMemoryOne(TestMetaPlayer): } def test_strategy(self): - actions = [(C, C), (C, D), (C, C), (C, D), (D, C)] + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] self.versus_test(opponent=axelrod.Alternator(), expected_actions=actions) diff --git a/axelrod/tests/unit/test_classification.py b/axelrod/tests/unit/test_classification.py index f3df4fe12..4d4268a2f 100644 --- a/axelrod/tests/unit/test_classification.py +++ b/axelrod/tests/unit/test_classification.py @@ -121,7 +121,6 @@ def test_is_basic(self): axl.ForgivingTitForTat, axl.GoByMajority20, axl.GTFT, - axl.Grudger, axl.Inverse, axl.Random] diff --git a/axelrod/tests/unit/test_strategy_transformers.py b/axelrod/tests/unit/test_strategy_transformers.py index f6fa2bf51..75916f05e 100644 --- a/axelrod/tests/unit/test_strategy_transformers.py +++ b/axelrod/tests/unit/test_strategy_transformers.py @@ -269,7 +269,7 @@ def test_initial_transformer(self): p1.play(p2) self.assertEqual(p2.history, [D, D, C, D, C]) - p3 = InitialTransformer([D, D])(axelrod.Grudger)() + p3 = InitialTransformer([D, D])(axelrod.Adaptive)() self.assertEqual(p3.classifier["memory_depth"], float('inf')) def test_final_transformer(self): @@ -286,7 +286,7 @@ def test_final_transformer(self): p1.play(p2) self.assertEqual(p2.history, [C, C, C, D, D, D, C, C]) - p3 = FinalTransformer([D, D])(axelrod.Grudger)() + p3 = FinalTransformer([D, D])(axelrod.Adaptive)() self.assertEqual(p3.classifier["memory_depth"], float('inf')) def test_final_transformer2(self): diff --git a/docs/tutorials/advanced/classification_of_strategies.rst b/docs/tutorials/advanced/classification_of_strategies.rst index a07c663f9..0b9391b16 100644 --- a/docs/tutorials/advanced/classification_of_strategies.rst +++ b/docs/tutorials/advanced/classification_of_strategies.rst @@ -57,7 +57,7 @@ make a decision:: ... } >>> strategies = axl.filtered_strategies(filterset) >>> len(strategies) - 27 + 28 Multiple filters can be specified within the filterset dictionary. To specify a range of memory_depth values, we can use the 'min_memory_depth' and @@ -69,7 +69,7 @@ range of memory_depth values, we can use the 'min_memory_depth' and ... } >>> strategies = axl.filtered_strategies(filterset) >>> len(strategies) - 50 + 51 We can also identify strategies that make use of particular properties of the tournament. For example, here is the number of strategies that make use of the