From ae71044176681fce270c172a45edd0c1307ebe83 Mon Sep 17 00:00:00 2001 From: Patrick Kofod Mogensen Date: Fri, 18 Jan 2019 10:50:25 +0100 Subject: [PATCH] Changes names and make tests work again. --- HARK/interpolation.py | 46 ++++++++++++++++++------------------- HARK/tests/test_discrete.py | 20 +++++++++++----- 2 files changed, 37 insertions(+), 29 deletions(-) diff --git a/HARK/interpolation.py b/HARK/interpolation.py index 62dbb31a87..111b3a965d 100644 --- a/HARK/interpolation.py +++ b/HARK/interpolation.py @@ -3367,13 +3367,13 @@ def _derY(self,x,y): dfdy = y_alpha*dfda + y_beta*dfdb return dfdy -def discreteLogSumProb(Vs, sigma): +def calcLogSumChoiceProbs(Vals, sigma): ''' - Returns the final optimal value and policies given the choice specific value - functions Vs. Policies are degenerate if sigma == 0.0. + Returns the final optimal value and choice probabilities given the choice + specific value functions `Vals`. Probabilities are degenerate if sigma == 0.0. Parameters ---------- - Vs : [numpy.array] + Vals : [numpy.array] A numpy.array that holds choice specific values at common grid points. sigma : float A number that controls the variance of the taste shocks @@ -3385,45 +3385,45 @@ def discreteLogSumProb(Vs, sigma): A numpy.array that holds the discrete choice probabilities ''' - return discreteLogSum(Vs, sigma), discreteProb(Vs, sigma) + return calcLogSum(Vals, sigma), calcChoiceProbs(Vals, sigma) -def discreteProb(Vs, sigma): +def calcChoiceProbs(Vals, sigma): ''' - Returns the policies given the choice specific value functions Vs. Policies - are degenerate if sigma == 0.0. + Returns the choice probabilities given the choice specific value functions + `Vals`. Probabilities are degenerate if sigma == 0.0. Parameters ---------- - Vs : [numpy.array] + Vals : [numpy.array] A numpy.array that holds choice specific values at common grid points. sigma : float A number that controls the variance of the taste shocks Returns ------- - P : [numpy.array] + Probs : [numpy.array] A numpy.array that holds the discrete choice probabilities ''' # Assumes that NaNs have been replaced by -numpy.inf or similar if sigma == 0.0: # We could construct a linear index here and use unravel_index. - Pflat = np.argmax(Vs, axis=0) - P = np.zeros(Vs.shape) - for i in range(Vs.shape[0]): - P[i][Pflat==i] = 1 - return P + Pflat = np.argmax(Vals, axis=0) + Probs = np.zeros(Vals.shape) + for i in range(Vals.shape[0]): + Probs[i][Pflat==i] = 1 + return Probs - P = np.divide(np.exp((Vs-Vs[0])/sigma), np.sum(np.exp((Vs-Vs[0])/sigma), axis=0)) - return P + Probs = np.divide(np.exp((Vals-Vals[0])/sigma), np.sum(np.exp((Vals-Vals[0])/sigma), axis=0)) + return Probs -def discreteLogSum(Vs, sigma): +def calcLogSum(Vals, sigma): ''' - Returns the optimal value given the choice specific value functions Vs. + Returns the optimal value given the choice specific value functions Vals. Parameters ---------- - Vs : [numpy.array] + Vals : [numpy.array] A numpy.array that holds choice specific values at common grid points. sigma : float A number that controls the variance of the taste shocks @@ -3436,14 +3436,14 @@ def discreteLogSum(Vs, sigma): # Assumes that NaNs have been replaced by -numpy.inf or similar if sigma == 0.0: # We could construct a linear index here and use unravel_index. - V = np.amax(Vs, axis=0) + V = np.amax(Vals, axis=0) return V # else we have a taste shock - maxV = Vs.max() + maxV = Vals.max() # calculate maxV+sigma*log(sum_i=1^J exp((V[i]-maxV))/sigma) - sumexp = np.sum(np.exp((Vs-maxV)/sigma), axis=0) + sumexp = np.sum(np.exp((Vals-maxV)/sigma), axis=0) V = np.log(sumexp) V = maxV + sigma*V return V diff --git a/HARK/tests/test_discrete.py b/HARK/tests/test_discrete.py index 1cf1630ed7..edf3e6c831 100644 --- a/HARK/tests/test_discrete.py +++ b/HARK/tests/test_discrete.py @@ -23,16 +23,24 @@ def setUp(self): # self.Vref3D = maxV + np.log(np.sum(np.exp(self.Vs3D-maxV),axis=0)) # self.Pref3D = np.log(np.sum(np.exp(self.Vs3D-maxV),axis=0)) + def test_noShock2DBothEqualValue(self): + # Test the value functions and policies of the 2D case + sigma = 0.0 + V, P = interpolation.calcLogSumChoiceProbs(self.Vs2D, sigma) + self.assertTrue((V == self.Vref2D).all) + self.assertTrue((P == self.Pref2D).all) + def test_noShock2DBoth(self): # Test the value functions and policies of the 2D case sigma = 0.0 - V, P = interpolation.discreteLogSumProb(self.Vs2D, sigma) - self.assertTrue((V, P), (self.Vref2D, self.Pref2D)) + V, P = interpolation.calcLogSumChoiceProbs(self.Vs2D, sigma) + self.assertTrue((V == self.Vref2D).all) + self.assertTrue((P == self.Pref2D).all) def test_noShock2DIndividual(self): # Test the value functions and policies of the 2D case sigma = 0.0 - V = interpolation.discreteLogSum(self.Vs2D, sigma) - P = interpolation.discreteProb(self.Vs2D, sigma) - self.assertTrue(np.testing.assert_array_equal(V, self.Vref2D)) - self.assertTrue(np.testing.assert_array_equal(P, self.Pref2D)) + V = interpolation.calcLogSum(self.Vs2D, sigma) + P = interpolation.calcChoiceProbs(self.Vs2D, sigma) + self.assertTrue((V == self.Vref2D).all()) + self.assertTrue((P == self.Pref2D).all())