diff --git a/axelrod/strategies/__init__.py b/axelrod/strategies/__init__.py index 5398922b3..72fb78229 100644 --- a/axelrod/strategies/__init__.py +++ b/axelrod/strategies/__init__.py @@ -63,6 +63,23 @@ # Distinguished strategy collections in addition to # `all_strategies` from _strategies.py demo_strategies = [Cooperator, Defector, TitForTat, Grudger, Random] +axelrod_first_strategies = [ + TitForTat, + FirstByTidemanAndChieruzzi, + FirstByNydegger, + FirstByGrofman, + FirstByShubik, + FirstBySteinAndRapoport, + Grudger, + FirstByDavis, + FirstByGraaskamp, + FirstByDowning, + FirstByFeld, + FirstByJoss, + FirstByTullock, + FirstByAnonymous, + Random, + ] basic_strategies = [s for s in all_strategies if is_basic(s())] strategies = [s for s in all_strategies if obey_axelrod(s())] diff --git a/axelrod/strategies/_strategies.py b/axelrod/strategies/_strategies.py index 3b33cbfdf..5c618d875 100644 --- a/axelrod/strategies/_strategies.py +++ b/axelrod/strategies/_strategies.py @@ -6,44 +6,44 @@ from .appeaser import Appeaser from .averagecopier import AverageCopier, NiceAverageCopier from .axelrod_first import ( - Davis, - Feld, - Graaskamp, - Grofman, - Joss, - Nydegger, - RevisedDowning, - Shubik, - SteinAndRapoport, - TidemanAndChieruzzi, - Tullock, - UnnamedStrategy, + FirstByDavis, + FirstByFeld, + FirstByGraaskamp, + FirstByGrofman, + FirstByJoss, + FirstByNydegger, + FirstByDowning, + FirstByShubik, + FirstBySteinAndRapoport, + FirstByTidemanAndChieruzzi, + FirstByTullock, + FirstByAnonymous, ) from .axelrod_second import ( - Appold, - Black, - Borufsen, - Cave, - Champion, - Colbert, - Eatherley, - Getzler, - Gladstein, - GraaskampKatzen, - Harrington, - Kluepfel, - Leyvraz, - Mikkelson, - MoreGrofman, - MoreTidemanAndChieruzzi, - RichardHufford, - Rowsam, - Tester, - Tranquilizer, - Weiner, - White, - WmAdams, - Yamachi, + SecondByAppold, + SecondByBlack, + SecondByBorufsen, + SecondByCave, + SecondByChampion, + SecondByColbert, + SecondByEatherley, + SecondByGetzler, + SecondByGladstein, + SecondByGraaskampKatzen, + SecondByHarrington, + SecondByKluepfel, + SecondByLeyvraz, + SecondByMikkelson, + SecondByGrofman, + SecondByTidemanAndChieruzzi, + SecondByRichardHufford, + SecondByRowsam, + SecondByTester, + SecondByTranquilizer, + SecondByWeiner, + SecondByWhite, + SecondByWmAdams, + SecondByYamachi, ) from .backstabber import BackStabber, DoubleCrosser from .better_and_better import BetterAndBetter @@ -189,6 +189,7 @@ Retaliate2, Retaliate3, ) +from .revised_downing import RevisedDowning from .selfsteem import SelfSteem from .sequence_player import SequencePlayer, ThueMorse, ThueMorseInverse from .shortmem import ShortMem @@ -254,20 +255,20 @@ APavlov2006, APavlov2011, Appeaser, - Appold, + SecondByAppold, ArrogantQLearner, AverageCopier, BackStabber, BetterAndBetter, - Black, - Borufsen, + SecondByBlack, + SecondByBorufsen, Bully, BushMosteller, Calculator, CautiousQLearner, - Cave, - Champion, - Colbert, + SecondByCave, + SecondByChampion, + SecondByColbert, CollectiveStrategy, ContriteTitForTat, Cooperator, @@ -280,7 +281,8 @@ CyclerDDC, CyclerCCCDCD, Darwin, - Davis, + FirstByDavis, + FirstByAnonymous, DBS, Defector, DefectorHunter, @@ -291,7 +293,7 @@ Doubler, DoubleResurrection, EasyGo, - Eatherley, + SecondByEatherley, EugineNier, EventualCycleHunter, EvolvedANN, @@ -303,7 +305,7 @@ EvolvedLookerUp1_1_1, EvolvedLookerUp2_2_2, EvolvedHMM5, - Feld, + FirstByFeld, FirmButFair, FoolMeOnce, ForgetfulFoolMeOnce, @@ -317,19 +319,19 @@ GellerCooperator, GellerDefector, GeneralSoftGrudger, - Getzler, - Gladstein, + SecondByGetzler, + SecondByGladstein, GoByMajority, GoByMajority10, GoByMajority20, GoByMajority40, GoByMajority5, Golden, - Graaskamp, - GraaskampKatzen, + FirstByGraaskamp, + SecondByGraaskampKatzen, Gradual, GradualKiller, - Grofman, + FirstByGrofman, Grudger, GrudgerAlternator, Grumpy, @@ -342,16 +344,16 @@ HardProber, HardTitFor2Tats, HardTitForTat, - Harrington, + SecondByHarrington, HesitantQLearner, Hopeless, Inverse, InversePunisher, - Joss, - Kluepfel, + FirstByJoss, + SecondByKluepfel, KnowledgeableWorseAndWorse, LevelPunisher, - Leyvraz, + SecondByLeyvraz, LimitedRetaliate, LimitedRetaliate2, LimitedRetaliate3, @@ -359,18 +361,19 @@ NaiveProber, MEM2, Michaelos, - Mikkelson, + SecondByMikkelson, MindBender, MindController, MindReader, MindWarper, MirrorMindReader, - MoreGrofman, - MoreTidemanAndChieruzzi, + RevisedDowning, + SecondByGrofman, + SecondByTidemanAndChieruzzi, Negation, NiceAverageCopier, NTitsForMTats, - Nydegger, + FirstByNydegger, OmegaTFT, OnceBitten, OppositeGrudger, @@ -396,14 +399,14 @@ Retaliate, Retaliate2, Retaliate3, - RevisedDowning, - RichardHufford, + FirstByDowning, + SecondByRichardHufford, Ripoff, RiskyQLearner, - Rowsam, + SecondByRowsam, SelfSteem, ShortMem, - Shubik, + FirstByShubik, SlowTitForTwoTats2, SneakyTitForTat, SoftGrudger, @@ -412,41 +415,41 @@ SolutionB5, SpitefulTitForTat, Stalker, - SteinAndRapoport, + FirstBySteinAndRapoport, StochasticCooperator, StochasticWSLS, SuspiciousTitForTat, - Tester, + SecondByTester, TF1, TF2, TF3, ThueMorse, ThueMorseInverse, Thumper, - TidemanAndChieruzzi, + FirstByTidemanAndChieruzzi, TitForTat, TitFor2Tats, - Tranquilizer, + SecondByTranquilizer, TrickyCooperator, TrickyDefector, TrickyLevelPunisher, - Tullock, + FirstByTullock, TwoTitsForTat, UsuallyCooperates, UsuallyDefects, VeryBad, - Weiner, - White, + SecondByWeiner, + SecondByWhite, Willing, Winner12, Winner21, WinShiftLoseStay, WinStayLoseShift, - WmAdams, + SecondByWmAdams, WorseAndWorse, WorseAndWorse2, WorseAndWorse3, - Yamachi, + SecondByYamachi, ZDExtortion, ZDExtort2, ZDExtort3, diff --git a/axelrod/strategies/axelrod_first.py b/axelrod/strategies/axelrod_first.py index d4773d26e..da39e8732 100644 --- a/axelrod/strategies/axelrod_first.py +++ b/axelrod/strategies/axelrod_first.py @@ -1,5 +1,16 @@ """ -Additional strategies from Axelrod's first tournament. +Strategies submitted to Axelrod's first tournament. All strategies in this +module are prefixed by `FirstBy` to indicate that they were submitted in +Axelrod's First tournament by the given author. + +Note that these strategies are implemented from the descriptions presented +in: + +Axelrod, R. (1980). Effective Choice in the Prisoner’s Dilemma. +Journal of Conflict Resolution, 24(1), 3–25. + +These descriptions are not always clear and/or precise and when assumptions have +been made they are explained in the strategy docstrings. """ import random @@ -16,12 +27,14 @@ C, D = Action.C, Action.D -class Davis(Player): +class FirstByDavis(Player): """ Submitted to Axelrod's first tournament by Morton Davis. - A player starts by cooperating for 10 rounds then plays Grudger, - defecting if at any point the opponent has defected. + The description written in [Axelrod1980]_ is: + + > "A player starts by cooperating for 10 rounds then plays Grudger, + > defecting if at any point the opponent has defected." This strategy came 8th in Axelrod's original tournament. @@ -30,7 +43,7 @@ class Davis(Player): - Davis: [Axelrod1980]_ """ - name = "Davis" + name = "First by Davis" classifier = { "memory_depth": float("inf"), # Long memory "stochastic": False, @@ -56,97 +69,234 @@ def strategy(self, opponent: Player) -> Action: opponent ever plays D.""" if len(self.history) < self._rounds_to_cooperate: return C - if opponent.defections: + if opponent.defections > 0: # Implement Grudger return D return C +class FirstByDowning(Player): + """ + Submitted to Axelrod's first tournament by Downing + + The description written in [Axelrod1980]_ is: + + > "This rule selects its choice to maximize its own longterm expected payoff on + > the assumption that the other rule cooperates with a fixed probability which + > depends only on whether the other player cooperated or defected on the previous + > move. These two probabilities estimates are continuously updated as the game + > progresses. Initially, they are both assumed to be .5, which amounts to the + > pessimistic assumption that the other player is not responsive. This rule is + > based on an outcome maximization interpretation of human performances proposed + > by Downing (1975)." + + The Downing (1975) paper is "The Prisoner's Dilemma Game as a + Problem-Solving Phenomenon" [Downing1975]_ and this is used to implement the + strategy. + + There are a number of specific points in this paper, on page 371: + + > "[...] In these strategies, O's [the opponent's] response on trial N is in + some way dependent or contingent on S's [the subject's] response on trial N- + 1. All varieties of these lag-one matching strategies can be defined by two + parameters: the conditional probability that O will choose C following C by + S, P(C_o | C_s) and the conditional probability that O will choose C + following D by S, P(C_o, D_s)." + + Throughout the paper the strategy (S) assumes that the opponent (O) is + playing a reactive strategy defined by these two conditional probabilities. + + The strategy aims to maximise the long run utility against such a strategy + and the mechanism for this is described in Appendix A (more on this later). + + One final point from the main text is, on page 372: + + > "For the various lag-one matching strategies of O, the maximizing + strategies of S will be 100% C, or 100% D, or for some strategies all S + strategies will be functionally equivalent." + + This implies that the strategy S will either always cooperate or always + defect (or be indifferent) dependent on the opponent's defining + probabilities. + + To understand the particular mechanism that describes the strategy S, we + refer to Appendix A of the paper on page 389. + + The stated goal of the strategy is to maximize (using the notation of the + paper): + + EV_TOT = #CC(EV_CC) + #CD(EV_CD) + #DC(EV_DC) + #DD(EV_DD) + + This differs from the more modern literature where #CC, #CD, #DC and #DD + would imply that counts of both players playing C and C, or the first + playing C and the second D etc... + In this case the author uses an argument based on the sequence of plays by + the player (S) so #CC denotes the number of times the player plays C twice + in a row. + + On the second page of the appendix, figure 4 (page 390) + identifies an expression for EV_TOT. + A specific term is made to disappear in + the case of T - R = P - S (which is not the case for the standard + (R, P, S, T) = (3, 1, 0, 5)): + + > "Where (t - r) = (p - s), EV_TOT will be a function of alpha, beta, t, r, + p, s and N are known and V which is unknown. + + V is the total number of cooperations of the player S (this is noted earlier + in the abstract) and as such the final expression (with only V as unknown) + can be used to decide if V should indicate that S always cooperates or not. + + This final expression is used to show that EV_TOT is linear in the number of + cooperations by the player thus justifying the fact that the player will + always cooperate or defect. + + All of the above details are used to give the following interpretation of + the strategy: + + 1. On any given turn, the strategy will estimate alpha = P(C_o | C_s) and + beta = P(C_o | D_s). + 2. The strategy will calculate the expected utility of always playing C OR + always playing D against the estimated probabilities. This corresponds to: + + a. In the case of the player always cooperating: + + P_CC = alpha and P_CD = 1 - alpha + + b. In the case of the player always defecting: -class RevisedDowning(Player): - """This strategy attempts to estimate the next move of the opponent by estimating - the probability of cooperating given that they defected (:math:`p(C|D)`) or - cooperated on the previous round (:math:`p(C|C)`). These probabilities are - continuously updated during play and the strategy attempts to maximise the long - term play. Note that the initial values are :math:`p(C|C)=p(C|D)=.5`. + P_DC = beta and P_DD = 1 - beta - Downing is implemented as `RevisedDowning`. Apparently in the first tournament - the strategy was implemented incorrectly and defected on the first two rounds. - This can be controlled by setting `revised=True` to prevent the initial defections. - This strategy came 10th in Axelrod's original tournament but would have won - if it had been implemented correctly. + Using this we have: + + E_C = alpha R + (1 - alpha) S + E_D = beta T + (1 - beta) P + + Thus at every turn, the strategy will calculate those two values and + cooperate if E_C > E_D and will defect if E_C < E_D. + + In the case of E_C = E_D, the player will alternate from their previous + move. This is based on specific sentence from Axelrod's original paper: + + > "Under certain circumstances, DOWNING will even determine that the best + > strategy is to alternate cooperation and defection." + + One final important point is the early game behaviour of the strategy. It + has been noted that this strategy was implemented in a way that assumed that + alpha and beta were both 1/2: + + > "Initially, they are both assumed to be .5, which amounts to the + > pessimistic assumption that the other player is not responsive." + + Note that if alpha = beta = 1 / 2 then: + + E_C = alpha R + alpha S + E_D = alpha T + alpha P + + And from the defining properties of the Prisoner's Dilemma (T > R > P > S) + this gives: E_D > E_C. + Thus, the player opens with a defection in the first two rounds. Note that + from the Axelrod publications alone there is nothing to indicate defections + on the first two rounds, although a defection in the opening round is clear. + However there is a presentation available at + http://www.sci.brooklyn.cuny.edu/~sklar/teaching/f05/alife/notes/azhar-ipd-Oct19th.pdf + That clearly states that Downing defected in the first two rounds, thus this + is assumed to be the behaviour. Interestingly, in future tournaments this + strategy was revised to not defect on the opening two rounds. + + It is assumed that these first two rounds are used to create initial + estimates of + beta = P(C_o | D_s) and we will use the opening play of the player to + estimate alpha = P(C_o | C_s). + Thus we assume that the opponents first play is a response to a cooperation + "before the match starts". + + So for example, if the plays are: + + [(D, C), (D, C)] + + Then the opponent's first cooperation counts as a cooperation in response to + the non existent cooperation of round 0. The total number of cooperations in + response to a cooperation is 1. We need to take in to account that extra + phantom cooperation to estimate the probability alpha=P(C_o | C_s) as 1 / 1 + = 1. + + This is an assumption with no clear indication from the literature. + + -- + This strategy came 10th in Axelrod's original tournament. Names: - - Revised Downing: [Axelrod1980]_ + - Downing: [Axelrod1980]_ """ - name = "Revised Downing" + name = "First by Downing" classifier = { "memory_depth": float("inf"), "stochastic": False, - "makes_use_of": set(), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, "manipulates_state": False, } - def __init__(self, revised: bool = True) -> None: + def __init__(self) -> None: super().__init__() - self.revised = revised - self.good = 1.0 - self.bad = 0.0 - self.nice1 = 0 - self.nice2 = 0 - self.total_C = 0 # note the same as self.cooperations - self.total_D = 0 # note the same as self.defections + self.number_opponent_cooperations_in_response_to_C = 0 + self.number_opponent_cooperations_in_response_to_D = 0 def strategy(self, opponent: Player) -> Action: round_number = len(self.history) + 1 - # According to internet sources, the original implementation defected - # on the first two moves. Otherwise it wins (if this code is removed - # and the comment restored. - # http://www.sci.brooklyn.cuny.edu/~sklar/teaching/f05/alife/notes/azhar-ipd-Oct19th.pdf - - if self.revised: - if round_number == 1: - return C - elif not self.revised: - if round_number <= 2: - return D - # Update various counts - if round_number > 2: - if self.history[-1] == D: - if opponent.history[-1] == C: - self.nice2 += 1 - self.total_D += 1 - self.bad = self.nice2 / self.total_D - else: - if opponent.history[-1] == C: - self.nice1 += 1 - self.total_C += 1 - self.good = self.nice1 / self.total_C - # Make a decision based on the accrued counts - c = 6.0 * self.good - 8.0 * self.bad - 2 - alt = 4.0 * self.good - 5.0 * self.bad - 1 - if c >= 0 and c >= alt: - move = C - elif (c >= 0 and c < alt) or (alt >= 0): - move = self.history[-1].flip() - else: - move = D - return move + if round_number == 1: + return D + if round_number == 2: + if opponent.history[-1] == C: + self.number_opponent_cooperations_in_response_to_C += 1 + return D + + + if self.history[-2] == C and opponent.history[-1] == C: + self.number_opponent_cooperations_in_response_to_C += 1 + if self.history[-2] == D and opponent.history[-1] == C: + self.number_opponent_cooperations_in_response_to_D += 1 + alpha = (self.number_opponent_cooperations_in_response_to_C / + (self.cooperations + 1)) # Adding 1 to count for assumption + # that first opponent move being a + # response to a cooperation. See + # docstring for more information. + beta = (self.number_opponent_cooperations_in_response_to_D / + (self.defections)) -class Feld(Player): + R, P, S, T = self.match_attributes["game"].RPST() + expected_value_of_cooperating = alpha * R + (1 - alpha) * S + expected_value_of_defecting = beta * T + (1 - beta) * P + + if expected_value_of_cooperating > expected_value_of_defecting: + return C + if expected_value_of_cooperating < expected_value_of_defecting: + return D + return self.history[-1].flip() + + +class FirstByFeld(Player): """ Submitted to Axelrod's first tournament by Scott Feld. + The description written in [Axelrod1980]_ is: + + > "This rule starts with tit for tat and gradually lowers its probability of + > cooperation following the other's cooperation to .5 by the two hundredth + > move. It always defects after a defection by the other." + This strategy plays Tit For Tat, always defecting if the opponent defects but cooperating when the opponent cooperates with a gradually decreasing probability - until it is only .5. + until it is only .5. Note that the description does not clearly indicate how + the cooperation probability should drop. This implements a linear decreasing + function. This strategy came 11th in Axelrod's original tournament. @@ -155,7 +305,7 @@ class Feld(Player): - Feld: [Axelrod1980]_ """ - name = "Feld" + name = "First by Feld" classifier = { "memory_depth": 200, # Varies actually, eventually becomes depth 1 "stochastic": True, @@ -206,25 +356,39 @@ def strategy(self, opponent: Player) -> Action: return random_choice(p) -class Graaskamp(Player): +class FirstByGraaskamp(Player): """ + Submitted to Axelrod's first tournament by James Graaskamp. - This is one of the strategies from Robert Axelrod's first tournament and is - described in the literature as: + The description written in [Axelrod1980]_ is: + + > "This rule plays tit for tat for 50 moves, defects on move 51, and then + > plays 5 more moves of tit for tat. A check is then made to see if the player + > seems to be RANDOM, in which case it defects from then on. A check is also + > made to see if the other is TIT FOR TAT, ANALOGY (a program from the + > preliminary tournament), and its own twin, in which case it plays tit for + > tat. Otherwise it randomly defects every 5 to 15 moves, hoping that enough + > trust has been built up so that the other player will not notice these + > defections.: + + This is implemented as: 1. Plays Tit For Tat for the first 50 rounds; 2. Defects on round 51; 3. Plays 5 further rounds of Tit For Tat; 4. A check is then made to see if the opponent is playing randomly in which - case it defects for the rest of the game; + case it defects for the rest of the game. This is implemented with a chi + squared test. 5. The strategy also checks to see if the opponent is playing Tit For Tat or - another strategy from a preliminary tournament called ‘Analogy’ If + a clone of itself. If so it plays Tit For Tat. If not it cooperates and randomly defects every 5 to 15 moves. - Note that there is no information about 'Analogy' available thus Step 5 is - not implemented fully. + a "best possible" interpretation of the description in the paper. + Furthermore the test for the clone is implemented as checking that both + players have played the same moves for the entire game. This is unlikely to + be the original approach but no further details are available. This strategy came 9th in Axelrod’s original tournament. @@ -233,7 +397,7 @@ class Graaskamp(Player): - Graaskamp: [Axelrod1980]_ """ - name = "Graaskamp" + name = "First by Graaskamp" classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -277,8 +441,8 @@ def strategy(self, opponent: Player) -> Action: if all( opponent.history[i] == self.history[i - 1] for i in range(1, len(self.history)) - ): - # Check if opponent plays Tit for Tat + ) or opponent.history == self.history: + # Check if opponent plays Tit for Tat or a clone of itself. if opponent.history[-1] == D: return D return C @@ -293,14 +457,14 @@ def strategy(self, opponent: Player) -> Action: return C -class Grofman(Player): +class FirstByGrofman(Player): """ Submitted to Axelrod's first tournament by Bernard Grofman. - Cooperate on the first two rounds and - returns the opponent's last action for the next 5. For the rest of the game - Grofman cooperates if both players selected the same action in the previous - round, and otherwise cooperates randomly with probability 2/7. + The description written in [Axelrod1980]_ is: + + > "If the players did different things on the previous move, this rule + > cooperates with probability 2/7. Otherwise this rule always cooperates." This strategy came 4th in Axelrod's original tournament. @@ -309,9 +473,9 @@ class Grofman(Player): - Grofman: [Axelrod1980]_ """ - name = "Grofman" + name = "First by Grofman" classifier = { - "memory_depth": float("inf"), + "memory_depth": 1, "stochastic": True, "makes_use_of": set(), "long_run_time": False, @@ -319,24 +483,20 @@ class Grofman(Player): "manipulates_source": False, "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: - round_number = len(self.history) + 1 - if round_number < 3: - return C - if round_number < 8: - return opponent.history[-1] - if self.history[-1] == opponent.history[-1]: + if len(self.history) == 0 or self.history[-1] == opponent.history[-1]: return C return random_choice(2 / 7) -class Joss(MemoryOnePlayer): +class FirstByJoss(MemoryOnePlayer): """ Submitted to Axelrod's first tournament by Johann Joss. - Cooperates with probability 0.9 when the opponent cooperates, otherwise - emulates Tit-For-Tat. + The description written in [Axelrod1980]_ is: + + > "This rule cooperates 90% of the time after a cooperation by the other. It + > always defects after a defection by the other." This strategy came 12th in Axelrod's original tournament. @@ -346,7 +506,7 @@ class Joss(MemoryOnePlayer): - Hard Joss: [Stewart2012]_ """ - name = "Joss" + name = "First by Joss" def __init__(self, p: float = 0.9) -> None: """ @@ -361,10 +521,26 @@ def __init__(self, p: float = 0.9) -> None: super().__init__(four_vector) -class Nydegger(Player): +class FirstByNydegger(Player): """ Submitted to Axelrod's first tournament by Rudy Nydegger. + The description written in [Axelrod1980]_ is: + + > "The program begins with tit for tat for the first three moves, except + > that if it was the only one to cooperate on the first move and the only one + > to defect on the second move, it defects on the third move. After the third + > move, its choice is determined from the 3 preceding outcomes in the + > following manner. Let A be the sum formed by counting the other's defection + > as 2 points and one's own as 1 point, and giving weights of 16, 4, and 1 to + > the preceding three moves in chronological order. The choice can be + > described as defecting only when A equals 1, 6, 7, 17, 22, 23, 26, 29, 30, + > 31, 33, 38, 39, 45, 49, 54, 55, 58, or 61. Thus if all three preceding moves + > are mutual defection, A = 63 and the rule cooperates. This rule was + > designed for use in laboratory experiments as a stooge which had a memory + > and appeared to be trustworthy, potentially cooperative, but not gullible + > (Nydegger, 1978)." + The program begins with tit for tat for the first three moves, except that if it was the only one to cooperate on the first move and the only one to defect on the second move, it defects on the third move. After the @@ -398,7 +574,7 @@ class Nydegger(Player): - Nydegger: [Axelrod1980]_ """ - name = "Nydegger" + name = "First by Nydegger" classifier = { "memory_depth": 3, "stochastic": False, @@ -410,7 +586,7 @@ class Nydegger(Player): } def __init__(self) -> None: - self.As = [1, 6, 7, 17, 22, 23, 26, 29, 30, 31, 33, 38, 39, 45, 54, 55, 58, 61] + self.As = [1, 6, 7, 17, 22, 23, 26, 29, 30, 31, 33, 38, 39, 45, 49, 54, 55, 58, 61] self.score_map = {(C, C): 0, (C, D): 2, (D, C): 1, (D, D): 3} super().__init__() @@ -446,12 +622,37 @@ def strategy(self, opponent: Player) -> Action: return C -class Shubik(Player): +class FirstByShubik(Player): """ Submitted to Axelrod's first tournament by Martin Shubik. - Plays like Tit-For-Tat with the following modification. After each - retaliation, the number of rounds that Shubik retaliates increases by 1. + The description written in [Axelrod1980]_ is: + + > "This rule cooperates until the other defects, and then defects once. If + > the other defects again after the rule's cooperation is resumed, the rule + > defects twice. In general, the length of retaliation is increased by one for + > each departure from mutual cooperation. This rule is described with its + > strategic implications in Shubik (1970). Further treatment of its is given + > in Taylor (1976). + + There is some room for interpretation as to how the strategy reacts to a + defection on the turn where it starts to cooperate once more. In Shubik + (1970) the strategy is described as: + + > "I will play my move 1 to begin with and will continue to do so, so long + > as my information shows that the other player has chosen his move 1. If my + > information tells me he has used move 2, then I will use move 2 for the + > immediate k subsequent periods, after which I will resume using move 1. If + > he uses his move 2 again after I have resumed using move 1, then I will + > switch to move 2 for the k + 1 immediately subsequent periods . . . and so + > on, increasing my retaliation by an extra period for each departure from the + > (1, 1) steady state." + + This is interpreted as: + + The player cooperates, if when it is cooperating, the opponent defects it + defects for k rounds. After k rounds it starts cooperating again and + increments the value of k if the opponent defects again. This strategy came 5th in Axelrod's original tournament. @@ -460,7 +661,7 @@ class Shubik(Player): - Shubik: [Axelrod1980]_ """ - name = "Shubik" + name = "First by Shubik" classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -488,33 +689,41 @@ def _decrease_retaliation_counter(self): def strategy(self, opponent: Player) -> Action: if not opponent.history: return C - if opponent.history[-1] == D: - # Retaliate against defections - if self.history[-1] == C: # it's on now! - # Lengthen the retaliation period - self.is_retaliating = True - self.retaliation_length += 1 - self.retaliation_remaining = self.retaliation_length - self._decrease_retaliation_counter() - return D - else: - # Just retaliate - if self.is_retaliating: - self._decrease_retaliation_counter() - return D + if self.is_retaliating: # Are we retaliating still? self._decrease_retaliation_counter() return D + + if opponent.history[-1] == D and self.history[-1] == C: + # "If he uses his move 2 again after I have resumed using move 1, + # then I will switch to move 2 for the k + 1 immediately subsequent + # periods" + self.is_retaliating = True + self.retaliation_length += 1 + self.retaliation_remaining = self.retaliation_length + self._decrease_retaliation_counter() + return D return C -class Tullock(Player): +class FirstByTullock(Player): """ Submitted to Axelrod's first tournament by Gordon Tullock. + The description written in [Axelrod1980]_ is: + + > "This rule cooperates on the first eleven moves. It then cooperates 10% + > less than the other player has cooperated on the preceding ten moves. This + > rule is based on an idea developed in Overcast and Tullock (1971). Professor + > Tullock was invited to specify how the idea could be implemented, and he did + > so out of scientific interest rather than an expectation that it would be a + > likely winner." + + This is interpreted as: + Cooperates for the first 11 rounds then randomly cooperates 10% less often - than the opponent has in previous rounds. + than the opponent has in the previous 10 rounds. This strategy came 13th in Axelrod's original tournament. @@ -523,9 +732,9 @@ class Tullock(Player): - Tullock: [Axelrod1980]_ """ - name = "Tullock" + name = "First by Tullock" classifier = { - "memory_depth": 11, # long memory, modified by init + "memory_depth": float("inf"), "stochastic": True, "makes_use_of": set(), "long_run_time": False, @@ -534,53 +743,52 @@ class Tullock(Player): "manipulates_state": False, } - def __init__(self, rounds_to_cooperate: int = 11) -> None: + def __init__(self) -> None: """ Parameters ---------- - rounds_to_cooperate: int, 10 + rounds_to_cooperate: int The number of rounds to cooperate initially """ super().__init__() - self._rounds_to_cooperate = rounds_to_cooperate - self.memory_depth = rounds_to_cooperate + self._rounds_to_cooperate = 11 + self.memory_depth = self._rounds_to_cooperate def strategy(self, opponent: Player) -> Action: - rounds = self._rounds_to_cooperate - if len(self.history) < rounds: + if len(self.history) < self._rounds_to_cooperate: return C + rounds = self._rounds_to_cooperate - 1 cooperate_count = opponent.history[-rounds:].count(C) prop_cooperate = cooperate_count / rounds prob_cooperate = max(0, prop_cooperate - 0.10) return random_choice(prob_cooperate) -class UnnamedStrategy(Player): - """Apparently written by a grad student in political science whose name was - withheld, this strategy cooperates with a given probability P. This - probability (which has initial value .3) is updated every 10 rounds based on - whether the opponent seems to be random, very cooperative or very - uncooperative. Furthermore, if after round 130 the strategy is losing then P - is also adjusted. +class FirstByAnonymous(Player): + """ + Submitted to Axelrod's first tournament by a graduate student whose name was + withheld. - Fourteenth Place with 282.2 points is a 77-line program by a graduate - student of political science whose dissertation is in game theory. This rule - has a probability of cooperating, P, which is initially 30% and is updated - every 10 moves. P is adjusted if the other player seems random, very - cooperative, or very uncooperative. P is also adjusted after move 130 if the - rule has a lower score than the other player. Unfortunately, the complex - process of adjustment frequently left the probability of cooperation in the - 30% to 70% range, and therefore the rule appeared random to many other players. + The description written in [Axelrod1980]_ is: - Names: + > "This rule has a probability of cooperating, P, which is initially 30% and + > is updated every 10 moves. P is adjusted if the other player seems random, + > very cooperative, or very uncooperative. P is also adjusted after move 130 + > if the rule has a lower score than the other player. Unfortunately, the + > complex process of adjustment frequently left the probability of cooperation + > in the 30% to 70% range, and therefore the rule appeared random to many + > other players." - - Unnamed Strategy: [Axelrod1980]_ + Given the lack of detail this strategy is implemented based on the final + sentence of the description which is to have a cooperation probability that + is uniformly random in the 30 to 70% range. - Warning: This strategy is not identical to the original strategy (source - unavailable) and was written based on published descriptions. + Names: + + - (Name withheld): [Axelrod1980]_ """ - name = "Unnamed Strategy" + name = "First by Anonymous" classifier = { "memory_depth": 0, "stochastic": True, @@ -598,14 +806,25 @@ def strategy(opponent: Player) -> Action: @FinalTransformer((D, D), name_prefix=None) -class SteinAndRapoport(Player): - """This strategy plays a modification of Tit For Tat. +class FirstBySteinAndRapoport(Player): + """ + Submitted to Axelrod's first tournament by William Stein and Amnon Rapoport. + + The description written in [Axelrod1980]_ is: + + > "This rule plays tit for tat except that it cooperates on the first four + > moves, it defects on the last two moves, and every fifteen moves it checks + > to see if the opponent seems to be playing randomly. This check uses a + > chi-squared test of the other's transition probabilities and also checks for + > alternating moves of CD and DC. + + This is implemented as follows: 1. It cooperates for the first 4 moves. 2. It defects on the last 2 moves. 3. Every 15 moves it makes use of a `chi-squared test `_ to check if the - opponent is playing randomly. + opponent is playing randomly. If so it defects. This strategy came 6th in Axelrod's original tournament. @@ -614,7 +833,7 @@ class SteinAndRapoport(Player): - SteinAndRapoport: [Axelrod1980]_ """ - name = "Stein and Rapoport" + name = "First by Stein and Rapoport" classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -658,10 +877,26 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class TidemanAndChieruzzi(Player): +@FinalTransformer((D, D), name_prefix=None) +class FirstByTidemanAndChieruzzi(Player): """ - This strategy begins by playing Tit For Tat and then follows the following - rules: + Submitted to Axelrod's first tournament by Nicolas Tideman and Paula + Chieruzzi. + + The description written in [Axelrod1980]_ is: + + > "This rule begins with cooperation and tit for tat. However, when the + > other player finishes his second run of defec- tions, an extra punishment is + > instituted, and the number of punishing defections is increased by one with + > each run of the other's defections. The other player is given a fresh start + > if he is 10 or more points behind, if he has not just started a run of + > defections, if it has been at least 20 moves since a fresh start, if there + > are at least 10 moves remaining, and if the number of defections differs + > from a 50-50 random generator by at least 3.0 standard deviations. A fresh + > start involves two cooperations and then play as if the game had just + > started. The program defects automatically on the last two moves." + + This is interpreted as: 1. Every run of defections played by the opponent increases the number of defections that this strategy retaliates with by 1. @@ -674,8 +909,10 @@ class TidemanAndChieruzzi(Player): - and the total number of defections differs from a 50-50 random sample by at least 3.0 standard deviations. - A ‘fresh start’ is a sequence of two cooperations followed by an assumption - that the game has just started (everything is forgotten). + A ‘fresh start’ is a sequence of two cooperations followed by an assumption + that the game has just started (everything is forgotten). + + 3. The strategy defects on the last two moves. This strategy came 2nd in Axelrod’s original tournament. @@ -684,7 +921,7 @@ class TidemanAndChieruzzi(Player): - TidemanAndChieruzzi: [Axelrod1980]_ """ - name = "Tideman and Chieruzzi" + name = "First by Tideman and Chieruzzi" classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -704,6 +941,7 @@ def __init__(self) -> None: self.opponent_score = 0 self.last_fresh_start = 0 self.fresh_start = False + self.remembered_number_of_opponent_defectioons = 0 def _decrease_retaliation_counter(self): """Lower the remaining owed retaliation count and flip to non-retaliate @@ -718,6 +956,7 @@ def _fresh_start(self): self.is_retaliating = False self.retaliation_length = 0 self.retaliation_remaining = 0 + self.remembered_number_of_opponent_defectioons = 0 def _score_last_round(self, opponent: Player): """Updates the scores for each player.""" @@ -732,6 +971,9 @@ def strategy(self, opponent: Player) -> Action: if not opponent.history: return C + if opponent.history[-1] == D: + self.remembered_number_of_opponent_defectioons += 1 + # Calculate the scores. self._score_last_round(opponent) @@ -759,7 +1001,8 @@ def strategy(self, opponent: Player) -> Action: std_deviation = (N ** (1 / 2)) / 2 lower = N / 2 - 3 * std_deviation upper = N / 2 + 3 * std_deviation - if opponent.defections <= lower or opponent.defections >= upper: + if (self.remembered_number_of_opponent_defectioons <= lower or + self.remembered_number_of_opponent_defectioons >= upper): # Opponent deserves a fresh start self.last_fresh_start = current_round self._fresh_start() diff --git a/axelrod/strategies/axelrod_second.py b/axelrod/strategies/axelrod_second.py index fb4bb07de..50e4e18ee 100644 --- a/axelrod/strategies/axelrod_second.py +++ b/axelrod/strategies/axelrod_second.py @@ -1,5 +1,7 @@ """ -Additional strategies from Axelrod's second tournament. +Strategies from Axelrod's second tournament. All strategies in this module are +prefixed by `SecondBy` to indicate that they were submitted in Axelrod's Second +tournament by the given author. """ import random @@ -15,7 +17,7 @@ C, D = Action.C, Action.D -class Champion(Player): +class SecondByChampion(Player): """ Strategy submitted to Axelrod's second tournament by Danny Champion. @@ -30,7 +32,7 @@ class Champion(Player): - Champion: [Axelrod1980b]_ """ - name = "Champion" + name = "Second by Champion" classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -59,8 +61,7 @@ def strategy(self, opponent: Player) -> Action: return D return C - -class Eatherley(Player): +class SecondByEatherley(Player): """ Strategy submitted to Axelrod's second tournament by Graham Eatherley. @@ -74,7 +75,7 @@ class Eatherley(Player): - Eatherley: [Axelrod1980b]_ """ - name = "Eatherley" + name = "Second by Eatherley" classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -99,7 +100,7 @@ def strategy(opponent: Player) -> Action: return random_choice(1 - defection_prop) -class Tester(Player): +class SecondByTester(Player): """ Submitted to Axelrod's second tournament by David Gladstein. @@ -115,7 +116,7 @@ class Tester(Player): - Tester: [Axelrod1980b]_ """ - name = "Tester" + name = "Second by Tester" classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -148,7 +149,7 @@ def strategy(self, opponent: Player) -> Action: return self.history[-1].flip() -class Gladstein(Player): +class SecondByGladstein(Player): """ Submitted to Axelrod's second tournament by David Gladstein. @@ -168,7 +169,7 @@ class Gladstein(Player): - Tester: [Axelrod1980b]_ """ - name = "Gladstein" + name = "Second by Gladstein" classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -204,7 +205,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class Tranquilizer(Player): +class SecondByTranquilizer(Player): """ Submitted to Axelrod's second tournament by Craig Feathers @@ -316,7 +317,7 @@ class Tranquilizer(Player): - Tranquilizer: [Axelrod1980]_ """ - name = "Tranquilizer" + name = "Second by Tranquilizer" classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -419,7 +420,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class MoreGrofman(Player): +class SecondByGrofman(Player): """ Submitted to Axelrod's second tournament by Bernard Grofman. @@ -447,7 +448,7 @@ class MoreGrofman(Player): - K86R: [Axelrod1980b]_ """ - name = "MoreGrofman" + name = "Second by Grofman" classifier = { "memory_depth": 8, "stochastic": False, @@ -477,7 +478,7 @@ def strategy(self, opponent: Player) -> Action: return D -class Kluepfel(Player): +class SecondByKluepfel(Player): """ Strategy submitted to Axelrod's second tournament by Charles Kluepfel (K32R). @@ -511,7 +512,7 @@ class Kluepfel(Player): - Kluepfel: [Axelrod1980b]_ """ - name = "Kluepfel" + name = "Second by Kluepfel" classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -582,7 +583,7 @@ def strategy(self, opponent: Player) -> Action: return one_move_ago.flip() -class Borufsen(Player): +class SecondByBorufsen(Player): """ Strategy submitted to Axelrod's second tournament by Otto Borufsen (K32R), and came in third in that tournament. @@ -623,7 +624,7 @@ class Borufsen(Player): - Borufsen: [Axelrod1980b]_ """ - name = "Borufsen" + name = "Second by Borufsen" classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -738,7 +739,7 @@ def strategy(self, opponent: Player) -> Action: return self.try_return(opponent.history[-1]) -class Cave(Player): +class SecondByCave(Player): """ Strategy submitted to Axelrod's second tournament by Rob Cave (K49R), and came in fourth in that tournament. @@ -759,7 +760,7 @@ class Cave(Player): - Cave: [Axelrod1980b]_ """ - name = "Cave" + name = "Second by Cave" classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -795,7 +796,7 @@ def strategy(self, opponent: Player) -> Action: return C -class WmAdams(Player): +class SecondByWmAdams(Player): """ Strategy submitted to Axelrod's second tournament by William Adams (K44R), and came in fifth in that tournament. @@ -810,7 +811,7 @@ class WmAdams(Player): - WmAdams: [Axelrod1980b]_ """ - name = "WmAdams" + name = "Second by WmAdams" classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -835,7 +836,7 @@ def strategy(self, opponent: Player) -> Action: return C -class GraaskampKatzen(Player): +class SecondByGraaskampKatzen(Player): """ Strategy submitted to Axelrod's second tournament by Jim Graaskamp and Ken Katzen (K60R), and came in sixth in that tournament. @@ -857,7 +858,7 @@ class GraaskampKatzen(Player): - GraaskampKatzen: [Axelrod1980b]_ """ - name = "GraaskampKatzen" + name = "Second by GraaskampKatzen" classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -908,7 +909,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] # Tit-for-Tat -class Weiner(Player): +class SecondByWeiner(Player): """ Strategy submitted to Axelrod's second tournament by Herb Weiner (K41R), and came in seventh in that tournament. @@ -940,7 +941,7 @@ class Weiner(Player): - Weiner: [Axelrod1980b]_ """ - name = "Weiner" + name = "Second by Weiner" classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -1000,7 +1001,7 @@ def strategy(self, opponent: Player) -> Action: return self.try_return(opponent.history[-1]) -class Harrington(Player): +class SecondByHarrington(Player): """ Strategy submitted to Axelrod's second tournament by Paul Harrington (K75R) and came in eighth in that tournament. @@ -1093,7 +1094,7 @@ class Harrington(Player): - Harrington: [Axelrod1980b]_ """ - name = "Harrington" + name = "Second by Harrington" classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -1339,7 +1340,7 @@ def strategy(self, opponent: Player) -> Action: return self.try_return(D, lower_flags=False) -class MoreTidemanAndChieruzzi(Player): +class SecondByTidemanAndChieruzzi(Player): """ Strategy submitted to Axelrod's second tournament by T. Nicolaus Tideman and Paula Chieruzzi (K84R) and came in ninth in that tournament. @@ -1364,10 +1365,10 @@ class MoreTidemanAndChieruzzi(Player): Names: - - MoreTidemanAndChieruzzi: [Axelrod1980b]_ + - TidemanAndChieruzzi: [Axelrod1980b]_ """ - name = "More Tideman and Chieruzzi" + name = "Second by Tideman and Chieruzzi" classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -1450,7 +1451,7 @@ def strategy(self, opponent: Player) -> Action: return D -class Getzler(Player): +class SecondByGetzler(Player): """ Strategy submitted to Axelrod's second tournament by Abraham Getzler (K35R) and came in eleventh in that tournament. @@ -1463,7 +1464,7 @@ class Getzler(Player): - Getzler: [Axelrod1980b]_ """ - name = "Getzler" + name = "Second by Getzler" classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -1488,7 +1489,7 @@ def strategy(self, opponent: Player) -> Action: return random_choice(1.0 - self.flack) -class Leyvraz(Player): +class SecondByLeyvraz(Player): """ Strategy submitted to Axelrod's second tournament by Fransois Leyvraz (K68R) and came in twelfth in that tournament. @@ -1507,7 +1508,7 @@ class Leyvraz(Player): - Leyvraz: [Axelrod1980b]_ """ - name = "Leyvraz" + name = "Second by Leyvraz" classifier = { "memory_depth": 3, "stochastic": True, @@ -1542,7 +1543,7 @@ def strategy(self, opponent: Player) -> Action: ) -class White(Player): +class SecondByWhite(Player): """ Strategy submitted to Axelrod's second tournament by Edward C White (K72R) and came in thirteenth in that tournament. @@ -1557,7 +1558,7 @@ class White(Player): - White: [Axelrod1980b]_ """ - name = "White" + name = "Second by White" classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -1579,7 +1580,7 @@ def strategy(self, opponent: Player) -> Action: return C -class Black(Player): +class SecondByBlack(Player): """ Strategy submitted to Axelrod's second tournament by Paul E Black (K83R) and came in fifteenth in that tournament. @@ -1595,7 +1596,7 @@ class Black(Player): - Black: [Axelrod1980b]_ """ - name = "Black" + name = "Second by Black" classifier = { "memory_depth": 5, "stochastic": True, @@ -1624,7 +1625,7 @@ def strategy(self, opponent: Player) -> Action: return random_choice(self.prob_coop[number_defects]) -class RichardHufford(Player): +class SecondByRichardHufford(Player): """ Strategy submitted to Axelrod's second tournament by Richard Hufford (K47R) and came in sixteenth in that tournament. @@ -1667,7 +1668,7 @@ class RichardHufford(Player): - RichardHufford: [Axelrod1980b]_ """ - name = "RichardHufford" + name = "Second by RichardHufford" classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -1738,7 +1739,7 @@ def strategy(self, opponent: Player) -> Action: return D -class Yamachi(Player): +class SecondByYamachi(Player): """ Strategy submitted to Axelrod's second tournament by Brian Yamachi (K64R) and came in seventeenth in that tournament. @@ -1764,7 +1765,7 @@ class Yamachi(Player): - Yamachi: [Axelrod1980b]_ """ - name = "Yamachi" + name = "Second by Yamachi" classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -1835,7 +1836,7 @@ def strategy(self, opponent: Player) -> Action: return self.try_return(D, opponent.defections) -class Colbert(FSMPlayer): +class SecondByColbert(FSMPlayer): """ Strategy submitted to Axelrod's second tournament by William Colbert (K51R) and came in eighteenth in that tournament. @@ -1851,7 +1852,7 @@ class Colbert(FSMPlayer): - Colbert: [Axelrod1980b]_ """ - name = "Colbert" + name = "Second by Colbert" classifier = { "memory_depth": 4, "stochastic": False, @@ -1891,7 +1892,7 @@ def __init__(self) -> None: super().__init__(transitions=transitions, initial_state=0, initial_action=C) -class Mikkelson(FSMPlayer): +class SecondByMikkelson(FSMPlayer): """ Strategy submitted to Axelrod's second tournament by Ray Mikkelson (K66R) and came in twentieth in that tournament. @@ -1914,7 +1915,7 @@ class Mikkelson(FSMPlayer): - Mikkelson: [Axelrod1980b]_ """ - name = "Mikkelson" + name = "Second by Mikkelson" classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -1955,7 +1956,7 @@ def strategy(self, opponent: Player) -> Action: return C -class Rowsam(Player): +class SecondByRowsam(Player): """ Strategy submitted to Axelrod's second tournament by Glen Rowsam (K58R) and came in 21st in that tournament. @@ -1988,7 +1989,7 @@ class Rowsam(Player): - Rowsam: [Axelrod1980b]_ """ - name = "Rowsam" + name = "Second by Rowsam" classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -2060,7 +2061,7 @@ def strategy(self, opponent: Player) -> Action: return D -class Appold(Player): +class SecondByAppold(Player): """ Strategy submitted to Axelrod's second tournament by Scott Appold (K88R) and came in 22nd in that tournament. @@ -2083,7 +2084,7 @@ class Appold(Player): - Appold: [Axelrod1980b]_ """ - name = "Appold" + name = "Second by Appold" classifier = { "memory_depth": float("inf"), "stochastic": True, diff --git a/axelrod/strategies/calculator.py b/axelrod/strategies/calculator.py index 6129bd398..8ac9b59d0 100644 --- a/axelrod/strategies/calculator.py +++ b/axelrod/strategies/calculator.py @@ -2,7 +2,7 @@ from axelrod.action import Action from axelrod.player import Player -from .axelrod_first import Joss +from .axelrod_first import FirstByJoss as Joss C, D = Action.C, Action.D diff --git a/axelrod/strategies/revised_downing.py b/axelrod/strategies/revised_downing.py new file mode 100644 index 000000000..2304af708 --- /dev/null +++ b/axelrod/strategies/revised_downing.py @@ -0,0 +1,75 @@ +""" +Revised Downing implemented from the Fortran source code for the second of +Axelrod's tournaments. +""" +from axelrod.action import Action +from axelrod.player import Player + +C, D = Action.C, Action.D + +class RevisedDowning(Player): + """ + Strategy submitted to Axelrod's second tournament by Leslie Downing. + (K59R). + + Revised Downing attempts to determine if players are cooperative or not. + If so, it cooperates with them. + + This strategy is a revision of the strategy submitted by Downing to + Axelrod's first tournament. + + + Names: + - Revised Downing: [Axelrod1980]_ + """ + + name = "Revised Downing" + + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.good = 1.0 + self.bad = 0.0 + self.nice1 = 0 + self.nice2 = 0 + self.total_C = 0 # note the same as self.cooperations + self.total_D = 0 # note the same as self.defections + + def strategy(self, opponent: Player) -> Action: + round_number = len(self.history) + 1 + + if round_number == 1: + return C + + # Update various counts + if round_number > 2: + if self.history[-1] == D: + if opponent.history[-1] == C: + self.nice2 += 1 + self.total_D += 1 + self.bad = self.nice2 / self.total_D + else: + if opponent.history[-1] == C: + self.nice1 += 1 + self.total_C += 1 + self.good = self.nice1 / self.total_C + # Make a decision based on the accrued counts + c = 6.0 * self.good - 8.0 * self.bad - 2 + alt = 4.0 * self.good - 5.0 * self.bad - 1 + if c >= 0 and c >= alt: + move = C + elif (c >= 0 and c < alt) or (alt >= 0): + move = self.history[-1].flip() + else: + move = D + return move + diff --git a/axelrod/tests/strategies/test_axelrod_first.py b/axelrod/tests/strategies/test_axelrod_first.py index 46dfac4a7..9198141c2 100644 --- a/axelrod/tests/strategies/test_axelrod_first.py +++ b/axelrod/tests/strategies/test_axelrod_first.py @@ -7,10 +7,10 @@ C, D = axelrod.Action.C, axelrod.Action.D -class TestDavis(TestPlayer): +class TestFirstByDavis(TestPlayer): - name = "Davis: 10" - player = axelrod.Davis + name = "First by Davis: 10" + player = axelrod.FirstByDavis expected_classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -43,14 +43,14 @@ def test_strategy(self): self.versus_test(opponent, expected_actions=actions) -class TestRevisedDowning(TestPlayer): +class TestFirstByDowning(TestPlayer): - name = "Revised Downing: True" - player = axelrod.RevisedDowning + name = "First by Downing" + player = axelrod.FirstByDowning expected_classifier = { "memory_depth": float("inf"), "stochastic": False, - "makes_use_of": set(), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -58,40 +58,33 @@ class TestRevisedDowning(TestPlayer): } def test_strategy(self): - actions = [(C, C), (C, C), (C, C)] + actions = [(D, C), (D, C), (C, C)] self.versus_test(axelrod.Cooperator(), expected_actions=actions) - actions = [(C, D), (C, D), (D, D)] + actions = [(D, D), (D, D), (D, D)] self.versus_test(axelrod.Defector(), expected_actions=actions) opponent = axelrod.MockPlayer(actions=[D, C, C]) - actions = [(C, D), (C, C), (C, C), (C, D)] + actions = [(D, D), (D, C), (D, C), (D, D)] self.versus_test(opponent, expected_actions=actions) opponent = axelrod.MockPlayer(actions=[D, D, C]) - actions = [(C, D), (C, D), (D, C), (D, D)] + actions = [(D, D), (D, D), (D, C), (D, D)] self.versus_test(opponent, expected_actions=actions) opponent = axelrod.MockPlayer(actions=[C, C, D, D, C, C]) - actions = [(C, C), (C, C), (C, D), (C, D), (D, C), (D, C), (D, C)] + actions = [(D, C), (D, C), (C, D), (D, D), (D, C), (D, C), (D, C)] self.versus_test(opponent, expected_actions=actions) opponent = axelrod.MockPlayer(actions=[C, C, C, C, D, D]) - actions = [(C, C), (C, C), (C, C), (C, C), (C, D), (C, D), (C, C)] + actions = [(D, C), (D, C), (C, C), (D, C), (D, D), (C, D), (D, C)] self.versus_test(opponent, expected_actions=actions) - def test_not_revised(self): - # Test not revised - player = self.player(revised=False) - opponent = axelrod.Cooperator() - match = axelrod.Match((player, opponent), turns=2) - self.assertEqual(match.play(), [(D, C), (D, C)]) - -class TestFeld(TestPlayer): +class TestFirstByFeld(TestPlayer): - name = "Feld: 1.0, 0.5, 200" - player = axelrod.Feld + name = "First by Feld: 1.0, 0.5, 200" + player = axelrod.FirstByFeld expected_classifier = { "memory_depth": 200, "stochastic": True, @@ -144,10 +137,10 @@ def test_strategy(self): self.versus_test(axelrod.Defector(), expected_actions=actions) -class TestGraaskamp(TestPlayer): +class TestFirstByGraaskamp(TestPlayer): - name = "Graaskamp: 0.05" - player = axelrod.Graaskamp + name = "First by Graaskamp: 0.05" + player = axelrod.FirstByGraaskamp expected_classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -242,12 +235,12 @@ def test_strategy(self): ) -class TestGrofman(TestPlayer): +class TestFirstByGrofman(TestPlayer): - name = "Grofman" - player = axelrod.Grofman + name = "First by Grofman" + player = axelrod.FirstByGrofman expected_classifier = { - "memory_depth": float("inf"), + "memory_depth": 1, "stochastic": True, "makes_use_of": set(), "long_run_time": False, @@ -264,18 +257,18 @@ def test_strategy(self): self.versus_test(axelrod.Alternator(), expected_actions=actions) opponent = axelrod.MockPlayer(actions=[D] * 8) - actions = [(C, D)] * 2 + [(D, D)] * 5 + [(C, D)] + [(C, D)] + actions = [(C, D), (C, D), (D, D), (C, D), (D, D), (C, D), (C, D), (D, D)] self.versus_test(opponent, expected_actions=actions, seed=1) opponent = axelrod.MockPlayer(actions=[D] * 8) - actions = [(C, D)] * 2 + [(D, D)] * 5 + [(C, D)] + [(D, D)] + actions = [(C, D), (D, D), (C, D), (D, D), (C, D), (C, D), (C, D), (D, D)] self.versus_test(opponent, expected_actions=actions, seed=2) -class TestJoss(TestPlayer): +class TestFirstByJoss(TestPlayer): - name = "Joss: 0.9" - player = axelrod.Joss + name = "First by Joss: 0.9" + player = axelrod.FirstByJoss expected_classifier = { "memory_depth": 1, "stochastic": True, @@ -304,10 +297,10 @@ def test_strategy(self): self.versus_test(axelrod.Defector(), expected_actions=actions, seed=2) -class TestNydegger(TestPlayer): +class TestFirstByNydegger(TestPlayer): - name = "Nydegger" - player = axelrod.Nydegger + name = "First by Nydegger" + player = axelrod.FirstByNydegger expected_classifier = { "memory_depth": 3, "stochastic": False, @@ -355,10 +348,10 @@ def test_strategy(self): self.versus_test(opponent, expected_actions=actions) -class TestShubik(TestPlayer): +class TestFirstByShubik(TestPlayer): - name = "Shubik" - player = axelrod.Shubik + name = "First by Shubik" + player = axelrod.FirstByShubik expected_classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -399,17 +392,17 @@ def test_strategy(self): (D, D), (D, C), (D, D), - (D, C), + (C, C), ] self.versus_test(opponent, expected_actions=actions) -class TestTullock(TestPlayer): +class TestFirstByTullock(TestPlayer): - name = "Tullock: 11" - player = axelrod.Tullock + name = "First by Tullock" + player = axelrod.FirstByTullock expected_classifier = { - "memory_depth": 11, + "memory_depth": float("inf"), "stochastic": True, "makes_use_of": set(), "long_run_time": False, @@ -448,10 +441,10 @@ def test_strategy(self): self.versus_test(opponent, expected_actions=actions, seed=2) -class TestUnnamedStrategy(TestPlayer): +class TestFirstByAnonymous(TestPlayer): - name = "Unnamed Strategy" - player = axelrod.UnnamedStrategy + name = "First by Anonymous" + player = axelrod.FirstByAnonymous expected_classifier = { "memory_depth": 0, "stochastic": True, @@ -470,10 +463,10 @@ def test_strategy(self): self.versus_test(axelrod.Cooperator(), expected_actions=actions, seed=10) -class TestSteinAndRapoport(TestPlayer): +class TestFirstBySteinAndRapoport(TestPlayer): - name = "Stein and Rapoport: 0.05: (D, D)" - player = axelrod.SteinAndRapoport + name = "First by Stein and Rapoport: 0.05: (D, D)" + player = axelrod.FirstBySteinAndRapoport expected_classifier = { "memory_depth": float("inf"), "long_run_time": False, @@ -553,10 +546,10 @@ def test_strategy(self): ) -class TestTidemanAndChieruzzi(TestPlayer): +class TestFirstByTidemanAndChieruzzi(TestPlayer): - name = "Tideman and Chieruzzi" - player = axelrod.TidemanAndChieruzzi + name = "First by Tideman and Chieruzzi: (D, D)" + player = axelrod.FirstByTidemanAndChieruzzi expected_classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -570,9 +563,15 @@ class TestTidemanAndChieruzzi(TestPlayer): def test_strategy(self): # Cooperator Test opponent = axelrod.Cooperator() - actions = [(C, C), (C, C), (C, C), (C, C)] + actions = [(C, C), (C, C), (D, C), (D, C)] self.versus_test(opponent, expected_actions=actions) + # Cooperator Test does noot defect if game length is unknown + opponent = axelrod.Cooperator() + actions = [(C, C), (C, C), (C, C), (C, C)] + self.versus_test(opponent, expected_actions=actions, + match_attributes={"length": float("inf")}) + # Defector Test opponent = axelrod.Defector() actions = [(C, D), (D, D), (D, D), (D, D)] @@ -589,7 +588,7 @@ def test_strategy(self): (D, C), (D, D), (D, C), - (C, D), + (D, D), (D, C), ] self.versus_test( @@ -743,7 +742,7 @@ def test_strategy(self): (D, D), (D, D), (D, C), - (C, D), + (D, D), (D, D), ] @@ -753,7 +752,7 @@ def test_strategy(self): # Check the fresh start condition opponent = axelrod.TitForTat() - actions = [(C, C), (C, C), (C, C), (C, C)] + actions = [(C, C), (C, C), (D, C), (D, D)] self.versus_test( opponent, expected_actions=actions, attrs={"fresh_start": False} ) @@ -794,16 +793,16 @@ def test_strategy(self): (D, C), (D, C), (C, C), - (C, C), - (C, D), + (D, C), + (D, D), ] self.versus_test( opponent, expected_actions=actions, match_attributes={"length": 35}, attrs={ - "current_score": 108, - "opponent_score": 78, + "current_score": 110, + "opponent_score": 75, "last_fresh_start": 24, "retaliation_length": 2, "retaliation_remaining": 0, diff --git a/axelrod/tests/strategies/test_axelrod_second.py b/axelrod/tests/strategies/test_axelrod_second.py index 53e9994ba..5a7415f35 100644 --- a/axelrod/tests/strategies/test_axelrod_second.py +++ b/axelrod/tests/strategies/test_axelrod_second.py @@ -11,8 +11,8 @@ class TestChampion(TestPlayer): - name = "Champion" - player = axelrod.Champion + name = "Second by Champion" + player = axelrod.SecondByChampion expected_classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -46,8 +46,8 @@ def test_strategy(self): class TestEatherley(TestPlayer): - name = "Eatherley" - player = axelrod.Eatherley + name = "Second by Eatherley" + player = axelrod.SecondByEatherley expected_classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -83,8 +83,8 @@ def test_strategy(self): class TestTester(TestPlayer): - name = "Tester" - player = axelrod.Tester + name = "Second by Tester" + player = axelrod.SecondByTester expected_classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -116,8 +116,8 @@ def test_strategy(self): class TestGladstein(TestPlayer): - name = "Gladstein" - player = axelrod.Gladstein + name = "Second by Gladstein" + player = axelrod.SecondByGladstein expected_classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -159,8 +159,8 @@ def test_strategy(self): class TestTranquilizer(TestPlayer): - name = "Tranquilizer" - player = axelrod.Tranquilizer + name = "Second by Tranquilizer" + player = axelrod.SecondByTranquilizer expected_classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -175,7 +175,7 @@ class TestTranquilizer(TestPlayer): def test_init(self): - player = axelrod.Tranquilizer() + player = axelrod.SecondByTranquilizer() self.assertEqual(player.num_turns_after_good_defection, 0) self.assertEqual(player.opponent_consecutive_defections, 0) @@ -355,10 +355,10 @@ def test_strategy(self): self.versus_test(opponent, expected_actions=actions, attrs=expected_attrs) -class TestMoreGrofman(TestPlayer): +class TestGrofman(TestPlayer): - name = "MoreGrofman" - player = axelrod.MoreGrofman + name = "Second by Grofman" + player = axelrod.SecondByGrofman expected_classifier = { "memory_depth": 8, "stochastic": False, @@ -378,7 +378,7 @@ def test_strategy(self): actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D), (D, C)] self.versus_test(axelrod.Alternator(), expected_actions=actions) - # Demonstrate MoreGrofman Logic + # Demonstrate Grofman Logic # Own previous move was C, opponent defected less than 3 times in last 8 moregrofman_actions = [C] * 7 + [C] opponent_actions = [C] * 6 + [D] * 2 @@ -533,8 +533,8 @@ def test_strategy(self): class TestKluepfel(TestPlayer): - name = "Kluepfel" - player = axelrod.Kluepfel + name = "Second by Kluepfel" + player = axelrod.SecondByKluepfel expected_classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -627,8 +627,8 @@ def test_strategy(self): class TestBorufsen(TestPlayer): - name = "Borufsen" - player = axelrod.Borufsen + name = "Second by Borufsen" + player = axelrod.SecondByBorufsen expected_classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -677,8 +677,8 @@ def test_strategy(self): class TestCave(TestPlayer): - name = "Cave" - player = axelrod.Cave + name = "Second by Cave" + player = axelrod.SecondByCave expected_classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -795,8 +795,8 @@ def test_strategy(self): class TestWmAdams(TestPlayer): - name = "WmAdams" - player = axelrod.WmAdams + name = "Second by WmAdams" + player = axelrod.SecondByWmAdams expected_classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -878,8 +878,8 @@ def test_strategy(self): class TestGraaskampKatzen(TestPlayer): - name = "GraaskampKatzen" - player = axelrod.GraaskampKatzen + name = "Second by GraaskampKatzen" + player = axelrod.SecondByGraaskampKatzen expected_classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -915,8 +915,8 @@ def test_strategy(self): class TestWeiner(TestPlayer): - name = "Weiner" - player = axelrod.Weiner + name = "Second by Weiner" + player = axelrod.SecondByWeiner expected_classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -984,8 +984,8 @@ def test_strategy(self): class TestHarrington(TestPlayer): - name = "Harrington" - player = axelrod.Harrington + name = "Second by Harrington" + player = axelrod.SecondByHarrington expected_classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -1243,9 +1243,9 @@ def test_strategy(self): ) -class TestMoreTidemanAndChieruzzi(TestPlayer): - name = "More Tideman and Chieruzzi" - player = axelrod.MoreTidemanAndChieruzzi +class TestTidemanAndChieruzzi(TestPlayer): + name = "Second by Tideman and Chieruzzi" + player = axelrod.SecondByTidemanAndChieruzzi expected_classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -1332,15 +1332,15 @@ def test_strategy(self): # Build an opponent who will cause us to consider a Fresh Start, but # will fail the binomial test. opponent_actions = [C] * 5 + [D] * 5 - C5D5_Player = axelrod.MockPlayer(actions=opponent_actions) + C5D5_player = axelrod.MockPlayer(actions=opponent_actions) actions = [(C, C)] * 5 + [(C, D)] + [(D, D)] * 3 actions += [(D, D)] # No Defection here means no Fresh Start. - self.versus_test(C5D5_Player, expected_actions=actions) + self.versus_test(C5D5_player, expected_actions=actions) class TestGetzler(TestPlayer): - name = "Getzler" - player = axelrod.Getzler + name = "Second by Getzler" + player = axelrod.SecondByGetzler expected_classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -1373,8 +1373,8 @@ def test_strategy(self): class TestLeyvraz(TestPlayer): - name = "Leyvraz" - player = axelrod.Leyvraz + name = "Second by Leyvraz" + player = axelrod.SecondByLeyvraz expected_classifier = { "memory_depth": 3, "stochastic": True, @@ -1416,8 +1416,8 @@ def test_strategy(self): class TestWhite(TestPlayer): - name = "White" - player = axelrod.White + name = "Second by White" + player = axelrod.SecondByWhite expected_classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -1484,8 +1484,8 @@ def test_strategy(self): class TestBlack(TestPlayer): - name = "Black" - player = axelrod.Black + name = "Second by Black" + player = axelrod.SecondByBlack expected_classifier = { "memory_depth": 5, "stochastic": True, @@ -1532,8 +1532,8 @@ def test_strategy(self): class TestRichardHufford(TestPlayer): - name = "RichardHufford" - player = axelrod.RichardHufford + name = "Second by RichardHufford" + player = axelrod.SecondByRichardHufford expected_classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -1583,8 +1583,8 @@ def test_strategy(self): class TestYamachi(TestPlayer): - name = "Yamachi" - player = axelrod.Yamachi + name = "Second by Yamachi" + player = axelrod.SecondByYamachi expected_classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -1713,8 +1713,8 @@ def test_strategy(self): class TestColbert(TestPlayer): - name = "Colbert" - player = axelrod.Colbert + name = "Second by Colbert" + player = axelrod.SecondByColbert expected_classifier = { "memory_depth": 4, "stochastic": False, @@ -1741,8 +1741,8 @@ def test_strategy(self): class TestMikkelson(TestPlayer): - name = "Mikkelson" - player = axelrod.Mikkelson + name = "Second by Mikkelson" + player = axelrod.SecondByMikkelson expected_classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -1817,8 +1817,8 @@ def test_strategy(self): # Still Cooperate, because Defect rate is low class TestRowsam(TestPlayer): - name = "Rowsam" - player = axelrod.Rowsam + name = "Second by Rowsam" + player = axelrod.SecondByRowsam expected_classifier = { "memory_depth": float("inf"), "stochastic": False, @@ -1910,8 +1910,8 @@ def test_strategy(self): class TestAppold(TestPlayer): - name = "Appold" - player = axelrod.Appold + name = "Second by Appold" + player = axelrod.SecondByAppold expected_classifier = { "memory_depth": float("inf"), "stochastic": True, @@ -2030,3 +2030,5 @@ def test_strategy(self): (C, C), (D, C)] self.versus_test(axelrod.Random(0.5), expected_actions=actions, seed=7) + + diff --git a/axelrod/tests/strategies/test_meta.py b/axelrod/tests/strategies/test_meta.py index d6eac7433..833b45fbb 100644 --- a/axelrod/tests/strategies/test_meta.py +++ b/axelrod/tests/strategies/test_meta.py @@ -369,7 +369,7 @@ class TestMetaMajorityFiniteMemory(TestMetaPlayer): } def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (C, D), (C, C)] + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] self.versus_test(opponent=axelrod.Alternator(), expected_actions=actions) @@ -430,7 +430,7 @@ class TestMetaWinnerFiniteMemory(TestMetaPlayer): } def test_strategy(self): - actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] + actions = [(C, C), (C, D), (C, C), (C, D), (D, C)] self.versus_test(opponent=axelrod.Alternator(), expected_actions=actions) @@ -578,7 +578,7 @@ class TestNMWEStochastic(TestMetaPlayer): } def test_strategy(self): - actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] + actions = [(C, C), (C, D), (C, C), (C, D), (D, C)] self.versus_test(opponent=axelrod.Alternator(), expected_actions=actions, seed=20) @@ -597,7 +597,7 @@ class TestNMWEFiniteMemory(TestMetaPlayer): } def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (D, D), (C, C)] + actions = [(C, C), (C, D), (D, C), (D, D), (D, C)] self.versus_test(opponent=axelrod.Alternator(), expected_actions=actions) diff --git a/axelrod/tests/strategies/test_revised_downing.py b/axelrod/tests/strategies/test_revised_downing.py new file mode 100644 index 000000000..c5637fbfb --- /dev/null +++ b/axelrod/tests/strategies/test_revised_downing.py @@ -0,0 +1,42 @@ +import axelrod + +from .test_player import TestPlayer + +C, D = axelrod.Action.C, axelrod.Action.D + +class TestRevisedDowning(TestPlayer): + + name = "Revised Downing" + player = axelrod.RevisedDowning + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, C), (C, C)] + self.versus_test(axelrod.Cooperator(), expected_actions=actions) + + actions = [(C, D), (C, D), (D, D)] + self.versus_test(axelrod.Defector(), expected_actions=actions) + + opponent = axelrod.MockPlayer(actions=[D, C, C]) + actions = [(C, D), (C, C), (C, C), (C, D)] + self.versus_test(opponent, expected_actions=actions) + + opponent = axelrod.MockPlayer(actions=[D, D, C]) + actions = [(C, D), (C, D), (D, C), (D, D)] + self.versus_test(opponent, expected_actions=actions) + + opponent = axelrod.MockPlayer(actions=[C, C, D, D, C, C]) + actions = [(C, C), (C, C), (C, D), (C, D), (D, C), (D, C), (D, C)] + self.versus_test(opponent, expected_actions=actions) + + opponent = axelrod.MockPlayer(actions=[C, C, C, C, D, D]) + actions = [(C, C), (C, C), (C, C), (C, C), (C, D), (C, D), (C, C)] + self.versus_test(opponent, expected_actions=actions) diff --git a/axelrod/tests/unit/test_classification.py b/axelrod/tests/unit/test_classification.py index b03a61c7d..50754c4bc 100644 --- a/axelrod/tests/unit/test_classification.py +++ b/axelrod/tests/unit/test_classification.py @@ -28,8 +28,8 @@ def test_multiple_instances(self): P2 = axl.MemoryOnePlayer(four_vector=(1, 0, 0, 1)) self.assertNotEqual(P1.classifier, P2.classifier) - P1 = axl.Joss() - P2 = axl.Joss(p=0) + P1 = axl.FirstByJoss() + P2 = axl.FirstByJoss(p=0) self.assertNotEqual(P1.classifier, P2.classifier) P1 = axl.GTFT(p=1) diff --git a/docs/reference/all_strategies.rst b/docs/reference/all_strategies.rst index 9e429f562..f933dd43e 100644 --- a/docs/reference/all_strategies.rst +++ b/docs/reference/all_strategies.rst @@ -100,6 +100,8 @@ Here are the docstrings of all the strategies in the library. :members: .. automodule:: axelrod.strategies.retaliate :members: +.. automodule:: axelrod.strategies.revised_downing + :members: .. automodule:: axelrod.strategies.sequence_player :members: .. automodule:: axelrod.strategies.shortmem diff --git a/docs/reference/bibliography.rst b/docs/reference/bibliography.rst index 5ffa3ad14..9b17b03e7 100644 --- a/docs/reference/bibliography.rst +++ b/docs/reference/bibliography.rst @@ -25,6 +25,7 @@ documentation. .. [Bendor1993] Bendor, Jonathan. "Uncertainty and the Evolution of Cooperation." The Journal of Conflict Resolution, 37(4), 709–734. .. [Beaufils1997] Beaufils, B. and Delahaye, J. (1997). Our Meeting With Gradual: A Good Strategy For The Iterated Prisoner’s Dilemma. http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.4041 .. [Berg2015] Berg, P. Van Den, & Weissing, F. J. (2015). The importance of mechanisms for the evolution of cooperation. Proceedings of the Royal Society B-Biological Sciences, 282. +.. [Downing1975] Downing, Leslie L. "The Prisoner's Dilemma game as a problem-solving phenomenon: An outcome maximization interpretation." Simulation & Games 6.4 (1975): 366-391. .. [Eckhart2015] Eckhart Arnold (2016) CoopSim v0.9.9 beta 6. https://github.com/jecki/CoopSim/ .. [Frean1994] Frean, Marcus R. "The Prisoner's Dilemma without Synchrony." Proceedings: Biological Sciences, vol. 257, no. 1348, 1994, pp. 75–79. www.jstor.org/stable/50253. .. [Harper2017] Harper, M., Knight, V., Jones, M., Koutsovoulos, G., Glynatsi, N. E., & Campbell, O. (2017) Reinforcement learning produces dominant strategies for the Iterated Prisoner’s Dilemma. PloS one. https://doi.org/10.1371/journal.pone.0188046 diff --git a/docs/reference/overview_of_strategies.rst b/docs/reference/overview_of_strategies.rst index fa676dcb1..9f2e6ed90 100644 --- a/docs/reference/overview_of_strategies.rst +++ b/docs/reference/overview_of_strategies.rst @@ -17,19 +17,19 @@ An indication is given as to whether or not this strategy is implemented in the :header: "Name", "Author", "Axelrod Library Name" "Tit For Tat", "Anatol Rapoport", ":class:`TitForTat `" - "Tideman and Chieruzzi", "T Nicolaus Tideman and Paula Chieruzz", ":class:`TidemanAndChieruzzi `" - "Nydegger", "Rudy Nydegger", ":class:`Nydegger `" - "Grofman", "Bernard Grofman", ":class:`Grofman `" - "Shubik", "Martin Shubik", ":class:`Shubik `" - "Stein and Rapoport", "Stein and Anatol Rapoport", ":class:`SteinAndRapoport `" + "Tideman and Chieruzzi", "T Nicolaus Tideman and Paula Chieruzz", ":class:`TidemanAndChieruzzi `" + "Nydegger", "Rudy Nydegger", ":class:`Nydegger `" + "Grofman", "Bernard Grofman", ":class:`Grofman `" + "Shubik", "Martin Shubik", ":class:`Shubik `" + "Stein and Rapoport", "Stein and Anatol Rapoport", ":class:`SteinAndRapoport `" "Grudger", "James W Friedman", ":class:`Grudger `" - "Davis", "Morton Davis", ":class:`Davis `" - "Graaskamp", "Jim Graaskamp", ":class:`Graaskamp `" - "Downing", "Leslie Downing", ":class:`RevisedDowning `" - "Feld", "Scott Feld", ":class:`Feld `" - "Joss", "Johann Joss", ":class:`Joss `" - "Tullock", "Gordon Tullock", ":class:`Tullock `" - "Unnamed Strategy", "Unknown", ":class:`UnnamedStrategy `" + "Davis", "Morton Davis", ":class:`Davis `" + "Graaskamp", "Jim Graaskamp", ":class:`Graaskamp `" + "FirstByDowning", "Leslie Downing", ":class:`RevisedDowning `" + "Feld", "Scott Feld", ":class:`Feld `" + "Joss", "Johann Joss", ":class:`Joss `" + "Tullock", "Gordon Tullock", ":class:`Tullock `" + "(Name withheld)", "Unknown", ":class:`UnnamedStrategy `" "Random", "Unknownd", ":class:`Random `" Axelrod's second tournament @@ -58,7 +58,7 @@ repository. "GRASR_", "Unknown", "Not Implemented" "K31R_", "Gail Grisell", ":class:`GoByMajority `" - "K32R_", "Charles Kluepfel", ":class:`Kluepfel `" + "K32R_", "Charles Kluepfel", ":class:`SecondByKluepfel `" "K33R_", "Harold Rabbie", "Not Implemented" "K34R_", "James W Friedman", ":class:`Grudger `" "K35R_", "Abraham Getzler", "Not Implemented" @@ -67,15 +67,15 @@ repository. "K38R_", "Nelson Weiderman", "Not Implemented" "K39R_", "Tom Almy", "Not Implemented" "K40R_", "Robert Adams", "Not Implemented" - "K41R_", "Herb Weiner", ":class:`Weiner `" - "K42R_", "Otto Borufsen", ":class:`Borufsen `" + "K41R_", "Herb Weiner", ":class:`SecondByWeiner `" + "K42R_", "Otto Borufsen", ":class:`SecondByBorufsen `" "K43R_", "R D Anderson", "Not Implemented" - "K44R_", "William Adams", ":class:`WmAdams `" + "K44R_", "William Adams", ":class:`SecondByWmAdams `" "K45R_", "Michael F McGurrin", "Not Implemented" - "K46R_", "Graham J Eatherley", ":class:`Eatherley `" - "K47R_", "Richard Hufford", ":class:`RichardHufford `" + "K46R_", "Graham J Eatherley", ":class:`SecondByEatherley `" + "K47R_", "Richard Hufford", ":class:`SecondByRichardHufford `" "K48R_", "George Hufford", "Not Implemented" - "K49R_", "Rob Cave", ":class:`Cave `" + "K49R_", "Rob Cave", ":class:`SecondByCave `" "K50R_", "Rik Smoody", "Not Implemented" "K51R_", "John Willaim Colbert", "Not Implemented" "K52R_", "David A Smith", "Not Implemented" @@ -84,40 +84,40 @@ repository. "K55R_", "Steve Newman", "Not Implemented" "K56R_", "Stanley F Quayle", "Not Implemented" "K57R_", "Rudy Nydegger", "Not Implemented" - "K58R_", "Glen Rowsam", ":class:`Rowsam `" - "K59R_", "Leslie Downing", "Not Implemented" - "K60R_", "Jim Graaskamp and Ken Katzen", ":class:`GraaskampKatzen `" - "K61R_", "Danny C Champion", ":class:`Champion `" + "K58R_", "Glen Rowsam", ":class:`SecondByRowsam `" + "K59R_", "Leslie Downing", ":class:`RevisedDowning `" + "K60R_", "Jim Graaskamp and Ken Katzen", ":class:`SecondByGraaskampKatzen `" + "K61R_", "Danny C Champion", ":class:`SecondByChampion `" "K62R_", "Howard R Hollander", "Not Implemented" "K63R_", "George Duisman", "Not Implemented" - "K64R_", "Brian Yamachi", ":class:`Yamachi `" + "K64R_", "Brian Yamachi", ":class:`SecondByYamachi `" "K65R_", "Mark F Batell", "Not Implemented" "K66R_", "Ray Mikkelson", "Not Implemented" - "K67R_", "Craig Feathers", ":class:`Tranquilizer `" - "K68R_", "Fransois Leyvraz", ":class:`Leyvraz `" + "K67R_", "Craig Feathers", ":class:`SecondByTranquilizer `" + "K68R_", "Fransois Leyvraz", ":class:`SecondByLeyvraz `" "K69R_", "Johann Joss", "Not Implemented" "K70R_", "Robert Pebly", "Not Implemented" "K71R_", "James E Hall", "Not Implemented" - "K72R_", "Edward C White Jr", ":class:`White `" + "K72R_", "Edward C White Jr", ":class:`SecondByWhite `" "K73R_", "George Zimmerman", "Not Implemented" "K74R_", "Edward Friedland", "Not Implemented" "K74RXX_", "Edward Friedland", "Not Implemented" - "K75R_", "Paul D Harrington", ":class:`Harrington `" - "K76R_", "David Gladstein", ":class:`Gladstein `" + "K75R_", "Paul D Harrington", ":class:`SecondByHarrington `" + "K76R_", "David Gladstein", ":class:`SecondByGladstein `" "K77R_", "Scott Feld", "Not Implemented" "K78R_", "Fred Mauk", "Not Implemented" "K79R_", "Dennis Ambuehl and Kevin Hickey", Not Implemented "K80R_", "Robyn M Dawes and Mark Batell", Not Implemented "K81R_", "Martyn Jones", "Not Implemented" "K82R_", "Robert A Leyland", "Not Implemented" - "K83R_", "Paul E Black", ":class:`White `" - "K84R_", "T Nicolaus Tideman and Paula Chieruzzi", ":class:`More Tideman And Chieruzzi `" + "K83R_", "Paul E Black", ":class:`SecondByWhite `" + "K84R_", "T Nicolaus Tideman and Paula Chieruzzi", ":class:`SecondByTidemanChieruzzi `" "K85R_", "Robert B Falk and James M Langsted", "Not Implemented" "K86R_", "Bernard Grofman", "Not Implemented" "K87R_", "E E H Schurmann", "Not Implemented" - "K88R_", "Scott Appold", ":class:`Appold `" + "K88R_", "Scott Appold", ":class:`SecondByAppold `" "K89R_", "Gene Snodgrass", "Not Implemented" - "K90R_", "John Maynard Smith", ":class:`Appold `" + "K90R_", "John Maynard Smith", ":class:`TitFor2Tats `" "K91R_", "Jonathan Pinkley", "Not Implemented" "K92R_", "Anatol Rapoport", ":class:`TitForTat `" "K93R_", "Unknown", "Not Implemented" diff --git a/docs/tutorials/advanced/classification_of_strategies.rst b/docs/tutorials/advanced/classification_of_strategies.rst index 66133fbb2..9e7db52b4 100644 --- a/docs/tutorials/advanced/classification_of_strategies.rst +++ b/docs/tutorials/advanced/classification_of_strategies.rst @@ -47,7 +47,7 @@ strategies:: ... } >>> strategies = axl.filtered_strategies(filterset) >>> len(strategies) - 87 + 88 Or, to find out how many strategies only use 1 turn worth of memory to make a decision:: @@ -57,7 +57,7 @@ make a decision:: ... } >>> strategies = axl.filtered_strategies(filterset) >>> len(strategies) - 31 + 32 Multiple filters can be specified within the filterset dictionary. To specify a range of memory_depth values, we can use the 'min_memory_depth' and @@ -69,7 +69,7 @@ range of memory_depth values, we can use the 'min_memory_depth' and ... } >>> strategies = axl.filtered_strategies(filterset) >>> len(strategies) - 54 + 55 We can also identify strategies that make use of particular properties of the tournament. For example, here is the number of strategies that make use of the diff --git a/docs/tutorials/advanced/player_information.rst b/docs/tutorials/advanced/player_information.rst index ed585bc27..3ede07136 100644 --- a/docs/tutorials/advanced/player_information.rst +++ b/docs/tutorials/advanced/player_information.rst @@ -4,13 +4,12 @@ Player information ================== It is possible to determine what information players know about their matches. -By default all known information is given. -For example let us create a match with 5 turns between :code:`SteinAndRapoport` -and :code:`Alternator`. The latter of these two always defects on the last 2 -turns:: +By default all known information is given. For example let us create a match +with 5 turns between :code:`FirstBySteinAndRapoport` and :code:`Alternator`. The +latter of these two always defects on the last 2 turns:: >>> import axelrod as axl - >>> players = (axl.Alternator(), axl.SteinAndRapoport()) + >>> players = (axl.Alternator(), axl.FirstBySteinAndRapoport()) >>> axl.Match(players, turns=5).play() [(C, C), (D, C), (C, C), (D, D), (C, D)] diff --git a/docs/tutorials/contributing/strategy/classifying_the_new_strategy.rst b/docs/tutorials/contributing/strategy/classifying_the_new_strategy.rst index da02fa89f..de0fd213d 100644 --- a/docs/tutorials/contributing/strategy/classifying_the_new_strategy.rst +++ b/docs/tutorials/contributing/strategy/classifying_the_new_strategy.rst @@ -26,8 +26,8 @@ default classifier dictionary from the class. This might sometimes be modified b the initialisation depending on input parameters. A good example of this is the :code:`Joss` strategy:: - >>> joss = axelrod.Joss() - >>> boring_joss = axelrod.Joss(p=1) + >>> joss = axelrod.FirstByJoss() + >>> boring_joss = axelrod.FirstByJoss(p=1) >>> joss.classifier['stochastic'], boring_joss.classifier['stochastic'] (True, False) diff --git a/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/boxplot.svg b/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/boxplot.svg new file mode 100644 index 000000000..bc9c9aa4d --- /dev/null +++ b/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/boxplot.svg @@ -0,0 +1,4697 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/main.py b/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/main.py new file mode 100644 index 000000000..670a9ec7a --- /dev/null +++ b/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/main.py @@ -0,0 +1,41 @@ +""" +Script to obtain plots for the running axelrod tournament tutorial. +""" + +import axelrod as axl +import matplotlib.pyplot as plt + +first_tournament_participants_ordered_by_reported_rank = [ + s() for s in axl.axelrod_first_strategies +] +number_of_strategies = len( + first_tournament_participants_ordered_by_reported_rank +) +axl.seed(0) +tournament = axl.Tournament( + players=first_tournament_participants_ordered_by_reported_rank, + turns=200, + repetitions=5, +) +results = tournament.play() + +plt.figure(figsize=(15, 6)) +plt.plot((0, 15), (0, 15), color="grey", linestyle="--") +for original_rank, strategy in enumerate( + first_tournament_participants_ordered_by_reported_rank +): + rank = results.ranked_names.index(str(strategy)) + if rank == original_rank: + symbol = "+" + plt.plot((rank, rank), (rank, 0), color="grey") + else: + symbol = "o" + plt.scatter([rank], [original_rank], marker=symbol, color="black", s=50) +plt.xticks(range(number_of_strategies), results.ranked_names, rotation=90) +plt.ylabel("Reported rank") +plt.xlabel("Reproduced rank") +plt.savefig("rank_comparison.svg") + +plot = axl.Plot(results) +p = plot.boxplot() +p.savefig("boxplot.svg") diff --git a/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/rank_comparison.svg b/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/rank_comparison.svg new file mode 100644 index 000000000..69a14348c --- /dev/null +++ b/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/rank_comparison.svg @@ -0,0 +1,1740 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/tutorials/getting_started/index.rst b/docs/tutorials/getting_started/index.rst index 8186ed97a..c87628f36 100644 --- a/docs/tutorials/getting_started/index.rst +++ b/docs/tutorials/getting_started/index.rst @@ -16,3 +16,4 @@ Contents: visualising_results.rst moran.rst human_interaction.rst + running_axelrods_first_tournament.rst diff --git a/docs/tutorials/getting_started/running_axelrods_first_tournament.rst b/docs/tutorials/getting_started/running_axelrods_first_tournament.rst new file mode 100644 index 000000000..56b436b30 --- /dev/null +++ b/docs/tutorials/getting_started/running_axelrods_first_tournament.rst @@ -0,0 +1,202 @@ +.. _running_axelrods_first_tournament: + +Running Axelrod's First Tournament +================================== + +This tutorial will bring together topics from the previous tutorials to +reproduce Axelrod's original tournament from [Axelrod1980]_. + +Selecting our players +--------------------- + +We will use the players from Axelrod's first tournament which are contained +in the `axelrod.axelrod_first_strategies` list:: + + >>> import axelrod as axl + >>> first_tournament_participants_ordered_by_reported_rank = [s() for s in axl.axelrod_first_strategies] + >>> number_of_strategies = len(first_tournament_participants_ordered_by_reported_rank) + >>> for player in first_tournament_participants_ordered_by_reported_rank: + ... print(player) + Tit For Tat + First by Tideman and Chieruzzi: (D, D) + First by Nydegger + First by Grofman + First by Shubik + First by Stein and Rapoport: 0.05: (D, D) + Grudger + First by Davis: 10 + First by Graaskamp: 0.05 + First by Downing + First by Feld: 1.0, 0.5, 200 + First by Joss: 0.9 + First by Tullock + First by Anonymous + Random: 0.5 + +Creating the tournament +----------------------- + +Now we create and run the tournament, we will set a seed to ensure +reproducibility and 50 repetitions to smooth the random effects. We use 5 +repetitions as this is what was done in [Axelrod1980]_:: + + >>> axl.seed(0) + >>> tournament = axl.Tournament( + ... players=first_tournament_participants_ordered_by_reported_rank, + ... turns=200, + ... repetitions=5 + ... ) + >>> results = tournament.play() + +Viewing the ranks of the participants +------------------------------------- + +The results object contains the ranked names:: + + >>> for name in results.ranked_names: + ... print(name) + First by Stein and Rapoport: 0.05: (D, D) + First by Grofman + First by Shubik + Tit For Tat + First by Tideman and Chieruzzi: (D, D) + First by Nydegger + First by Davis: 10 + Grudger + First by Graaskamp: 0.05 + First by Downing + First by Feld: 1.0, 0.5, 200 + First by Joss: 0.9 + First by Tullock + Random: 0.5 + First by Anonymous + +We see that `TitForTat` does not in fact win this tournament. +We can plot the reported rank (from [Axelrod1980]_) versus the reproduced one:: + + >>> import matplotlib.pyplot as plt + >>> plt.figure(figsize=(15, 6)) # doctest: +SKIP + >>> plt.plot((0, 15), (0, 15), color="grey", linestyle="--") # doctest: +SKIP + >>> for original_rank, strategy in enumerate(first_tournament_participants_ordered_by_reported_rank): + ... rank = results.ranked_names.index(str(strategy)) + ... if rank == original_rank: + ... symbol = "+" + ... plt.plot((rank, rank), (rank, 0), color="grey") + ... else: + ... symbol = "o" + ... plt.scatter([rank], [original_rank], marker=symbol, color="black", s=50) # doctest: +SKIP + >>> plt.xticks( + ... range(number_of_strategies), + ... results.ranked_names, + ... rotation=90 + ... ) # doctest: +SKIP + >>> plt.ylabel("Reported rank") # doctest: +SKIP + >>> plt.xlabel("Reproduced rank"); # doctest: +SKIP + >>> plt.show() + +.. image:: _static/running_axelrods_first_tournament/rank_comparison.svg + :width: 75% + :align: center + +Visualising the scores +---------------------- + +We see that the first 6 strategies do not match the ranks of the original paper, +we can take a look the variation in the scores:: + + >>> plot = axl.Plot(results) + >>> p = plot.boxplot() + >>> p.show() + +.. image:: _static/running_axelrods_first_tournament/boxplot.svg + :width: 75% + :align: center + +The first 6 strategies have similar scores which could indicate that the +original work by Axelrod was not run with sufficient repetitions. Another +explanation is that all the strategies are implemented from the descriptions +given in [Axelrod1980]_ and there is no source code to base this on. This leads +to some strategies being ambigious. These are all clearly explained in the +strategy docstrings. For example:: + + >>> print(axl.FirstByAnonymous.__doc__) + + Submitted to Axelrod's first tournament by a graduate student whose name was + withheld. + + The description written in [Axelrod1980]_ is: + + > "This rule has a probability of cooperating, P, which is initially 30% and + > is updated every 10 moves. P is adjusted if the other player seems random, + > very cooperative, or very uncooperative. P is also adjusted after move 130 + > if the rule has a lower score than the other player. Unfortunately, the + > complex process of adjustment frequently left the probability of cooperation + > in the 30% to 70% range, and therefore the rule appeared random to many + > other players." + + Given the lack of detail this strategy is implemented based on the final + sentence of the description which is to have a cooperation probability that + is uniformly random in the 30 to 70% range. + + Names: + + - (Name withheld): [Axelrod1980]_ + + +Other outcomes +-------------- + +If we run the tournament with other seeds, the results are different. For +example, with `130` Tit For Tat wins:: + + >>> axl.seed(130) + >>> tournament = axl.Tournament( + ... players=first_tournament_participants_ordered_by_reported_rank, + ... turns=200, + ... repetitions=5 + ... ) + >>> results = tournament.play() + >>> for name in results.ranked_names: + ... print(name) + Tit For Tat + First by Stein and Rapoport: 0.05: (D, D) + First by Grofman + First by Shubik + First by Nydegger + First by Tideman and Chieruzzi: (D, D) + First by Davis: 10 + Grudger + First by Graaskamp: 0.05 + First by Downing + First by Feld: 1.0, 0.5, 200 + First by Joss: 0.9 + First by Tullock + Random: 0.5 + First by Anonymous + +With `1238` the strategy submitted by Shubik wins:: + + >>> axl.seed(1238) + >>> tournament = axl.Tournament( + ... players=first_tournament_participants_ordered_by_reported_rank, + ... turns=200, + ... repetitions=5 + ... ) + >>> results = tournament.play() + >>> for name in results.ranked_names: + ... print(name) + First by Shubik + First by Stein and Rapoport: 0.05: (D, D) + First by Grofman + Tit For Tat + First by Nydegger + First by Tideman and Chieruzzi: (D, D) + Grudger + First by Davis: 10 + First by Graaskamp: 0.05 + First by Downing + First by Feld: 1.0, 0.5, 200 + First by Tullock + First by Joss: 0.9 + First by Anonymous + Random: 0.5