Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Neg strategy and change 4 method names for consistency #748

Merged
merged 10 commits into from
Oct 24, 2016
2 changes: 2 additions & 0 deletions axelrod/strategies/_strategies.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
from .mindcontrol import MindController, MindWarper, MindBender
from .mindreader import MindReader, ProtectedMindReader, MirrorMindReader
from .mutual import Desperate, Hopeless, Willing
from .negation import Negation
from .oncebitten import OnceBitten, FoolMeOnce, ForgetfulFoolMeOnce, FoolMeForever
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The strategy also needs to be added to the all_strategies list in this file

from .prober import (Prober, Prober2, Prober3, Prober4, HardProber,
NaiveProber, RemorsefulProber)
Expand Down Expand Up @@ -157,6 +158,7 @@
MindReader,
MindWarper,
MirrorMindReader,
Negation,
NiceAverageCopier,
Nydegger,
OmegaTFT,
Expand Down
35 changes: 35 additions & 0 deletions axelrod/strategies/negation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import random
from axelrod import Actions, Player, random_choice, flip_action, init_args
from axelrod.strategy_transformers import TrackHistoryTransformer

C, D = Actions.C, Actions.D

class Negation(Player):
"""
A player starts by cooperating or defecting randomly if it's their first move,
then simply doing the opposite of the opponents last move thereafter.
Names:
Negation - [http://www.prisoners-dilemma.com/competition.html]
"""

name = "Negation"
classifier = {
'memory_depth': 1,
'stochastic': True,
'makes_use_of': set(),
'long_run_time': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}

def strategy(self, opponent):
# Random first move
if not self.history:
return random_choice();

# Act opposite of opponent otherwise
return flip_action(opponent.history[-1])

2 changes: 1 addition & 1 deletion axelrod/tests/unit/test_axelrod_first.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ def test_strategy(self):
# Looks like Tit-For-Tat at first
self.markov_test([C, D, C, D])

def test_affect_of_strategy(self):
def test_effect_of_strategy(self):
"""Plays a modified TFT."""
self.responses_test([C, C, C], [C, C, C], [C, C, C])
# Make sure that the retaliations are increasing
Expand Down
25 changes: 25 additions & 0 deletions axelrod/tests/unit/test_negation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
"""Test for the Neg Strategy"""

import axelrod
from .test_player import TestPlayer

C, D = axelrod.Actions.C, axelrod.Actions.D

class TestNegation(TestPlayer):

name = "Negation"
player = axelrod.Negation
expected_classifier = {
'memory_depth': 1,
'stochastic': True,
'makes_use_of': set(),
'long_run_time': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}

def test_effect_of_strategy(self):
"""Repeats opposite of opponents last action."""
self.markov_test([D, C, D, C])

6 changes: 3 additions & 3 deletions axelrod/tests/unit/test_titfortat.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def test_strategy(self):
"""Starts by defecting"""
self.first_play_test(D)

def test_affect_of_strategy(self):
def test_effect_of_strategy(self):
"""Will do opposite of what opponent does."""
self.markov_test([D, C, D, C])

Expand Down Expand Up @@ -151,7 +151,7 @@ def test_strategy(self):
"""Starts by Defecting"""
self.first_play_test(D)

def test_affect_of_strategy(self):
def test_effect_of_strategy(self):
"""Plays like TFT after the first move, repeating the opponents last move."""
self.markov_test([C, D, C, D])

Expand All @@ -173,7 +173,7 @@ def test_strategy(self):
"""Starts by Cooperating"""
self.first_play_test(C)

def test_affect_of_strategy(self):
def test_effect_of_strategy(self):
"""Will do opposite of what opponent does."""
self.markov_test([D, C, D, C])

Expand Down
3 changes: 3 additions & 0 deletions docs/reference/all_strategies.rst
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,9 @@ Here are the docstrings of all the strategies in the library.
.. automodule:: axelrod.strategies.mutual
:members:
:undoc-members:
.. automodule:: axelrod.strategies.negation
:members:
:undoc-members:
.. automodule:: axelrod.strategies.oncebitten
:members:
:undoc-members:
Expand Down
6 changes: 3 additions & 3 deletions docs/tutorials/advanced/classification_of_strategies.rst
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ strategies::
... }
>>> strategies = axl.filtered_strategies(filterset)
>>> len(strategies)
37
38


Or, to find out how many strategies only use 1 turn worth of memory to
Expand All @@ -58,7 +58,7 @@ make a decision::
... }
>>> strategies = axl.filtered_strategies(filterset)
>>> len(strategies)
24
25

Multiple filters can be specified within the filterset dictionary. To specify a
range of memory_depth values, we can use the 'min_memory_depth' and
Expand All @@ -70,7 +70,7 @@ range of memory_depth values, we can use the 'min_memory_depth' and
... }
>>> strategies = axl.filtered_strategies(filterset)
>>> len(strategies)
41
42

We can also identify strategies that make use of particular properties of the
tournament. For example, here is the number of strategies that make use of the
Expand Down