Skip to content

Commit

Permalink
rewrite calcExpectation to use numpy methods, see econ-ark#625
Browse files Browse the repository at this point in the history
  • Loading branch information
sbenthall committed Dec 28, 2020
1 parent 435937c commit 4a56c02
Show file tree
Hide file tree
Showing 3 changed files with 134 additions and 103 deletions.
2 changes: 1 addition & 1 deletion Documentation/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ Release Data: TBD

* Adds a constructor for LogNormal distributions from mean and standard deviation (#891)[https://github.com/econ-ark/HARK/pull/891/]
* Uses new LogNormal constructor in ConsPortfolioModel (#891)[https://github.com/econ-ark/HARK/pull/891/]
* calcExpectations method for taking the expectation of a distribution over a function (#884)[https://github.com/econ-ark/HARK/pull/884/]
* calcExpectations method for taking the expectation of a distribution over a function (#884)[https://github.com/econ-ark/HARK/pull/884/] (#897)[https://github.com/econ-ark/HARK/pull/897]


#### Minor Changes
Expand Down
132 changes: 39 additions & 93 deletions HARK/distribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -1001,107 +1001,53 @@ def combineIndepDstns(*distributions, seed=0):
assert np.isclose(np.sum(P_out), 1), "Probabilities do not sum to 1!"
return DiscreteDistribution(P_out, X_out, seed=seed)


def calcExpectation(dstn,func=None,values=None):
def calcExpectation(dstn,func=lambda x : x,*args):
'''
Calculate the expectation of a stochastic function at an array of values.
Parameters
----------
dstn : DiscreteDistribution
The distribution over which the function is to be evaluated. It should
have dimension N, representing the last N arguments of func.
func : function or None
The function to be evaluated, which can take M+N identically shaped arrays
as arguments and returns 1 array as an output (of the same shape). The
first M arguments are non-stochastic, representing the inputs passed in
the argument values. The latter N arguments are stochastic, where N is
the dimensionality of dstn. Defaults to identity function.
values : np.array or [np.array] or None
One or more arrays of input values for func, representing the non-stochastic
arguments. If the array(s) are 1D, they are interpreted as grids over
each of the M non-stochastic arguments of the function; the expectation
will be evaluated at the tensor product set, so the output will have shape
(values[0].size,values[1].size,...,values[M].size). Otherwise, the arrays
in values must all have the same shape, and the expectation is computed
at f(values[0],values[1],...,values[M],*dstn). Defaults to empty list.
The N-valued distribution over which the function is to be evaluated.
func : function
The function to be evaluated.
This function should take a 1D array of size N.
It may also take other arguments *args
Please see numpy.apply_along_axis() for guidance on
design of func.
Defaults to identity function.
*args : scalar or np.array
One or more constants or arrays of input values for func,
representing the non-stochastic arguments.
The arrays must all have the same shape, and the expectation is computed
at f(dstn, args[0], args[1],...,args[M]).
Returns
-------
f_exp : np.array
f_exp : np.array or scalar
The expectation of the function at the queried values.
Scalar if only one value.
'''
# Fill in defaults
if func is None:
func = lambda x : x
if values is None:
values = []

# Get the number of (non-)stochastic arguments of the function
if not isinstance(values,list):
values = [values]
M = len(values)
N = dstn.dim()
K = dstn.pmf.size

# Determine whether the queried values are grids to be tensor-ed or just arrays
is_tensor = (len(values) > 0) and (len(values[0].shape) == 1)
args_list = [] # Initialize list of argument arrays

# Construct tensor arrays of the value grids
if is_tensor:
# Get length of each grid and shape of argument arrays
value_shape = np.array([values[i].size for i in range(M)])
arg_shape = np.concatenate((value_shape,np.array([K])))
shock_tiling_shape = np.concatenate((np.ones_like(value_shape), np.array([K])))

# Reshape each of the non-stochastic arrays to give them the tensor shape
for i in range(M):
new_array = values[i].copy()
for j in range(M):
if j < i:
new_array = new_array[np.newaxis,...]
elif j > i:
new_array = new_array[...,np.newaxis]
temp_shape = value_shape.copy()
temp_shape[i] = 1
new_array = np.tile(new_array, temp_shape)
new_array = new_array[...,np.newaxis] # Add dimension for shocks
new_array = np.tile(new_array, shock_tiling_shape)
args_list.append(new_array)

# Just add a dimension for the shocks
else:
# Get shape of argument arrays
if len(values) == 0:
value_shape = (1,) # No deterministic inputs, trivial shape
else:
value_shape = np.array(values[0].shape)
arg_shape = np.concatenate((value_shape,np.array([K])))
shock_tiling_shape = np.concatenate((np.ones_like(value_shape), np.array([K])))

# Add the shock dimension to each of the query value arrays
for i in range(M):
new_array = values[i].copy()[...,np.newaxis]
new_array = np.tile(new_array, shock_tiling_shape)
args_list.append(new_array)

# Make an argument array for each dimension of the distribution (and add to list)
value_tiling_shape = arg_shape.copy()
value_tiling_shape[-1] = 1
if N == 1:
new_array = np.reshape(dstn.X, shock_tiling_shape)
new_array = np.tile(new_array, value_tiling_shape)
args_list.append(new_array)
else:
for j in range(N):
new_array = np.reshape(dstn.X[j], shock_tiling_shape)
new_array = np.tile(new_array, value_tiling_shape)
args_list.append(new_array)

# Evaluate the function at the argument arrays
f_query = func(*args_list)

# Compute expectations over the shocks and return it
f_exp = np.dot(f_query, dstn.pmf)

dstn_array = np.column_stack(dstn.X)

if N > 1:
# numpy is weird about 1-D arrays.
dstn_array = dstn_array.T

f_query = np.apply_along_axis(
func, 0, dstn_array, *args
)

# Compute expectations over the values
f_exp = np.dot(
f_query,
np.vstack(dstn.pmf)
)

# a hack.
if f_exp.size == 1:
f_exp = f_exp.flat[0]

return f_exp
103 changes: 94 additions & 9 deletions HARK/tests/test_distribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,18 @@

import HARK.distribution as distribution

from HARK.distribution import (
Bernoulli,
DiscreteDistribution,
Lognormal,
MeanOneLogNormal,
Normal,
Uniform,
Weibull,
calcExpectation,
combineIndepDstns
)


class DiscreteDistributionTests(unittest.TestCase):
"""
Expand All @@ -12,12 +24,79 @@ class DiscreteDistributionTests(unittest.TestCase):

def test_drawDiscrete(self):
self.assertEqual(
distribution.DiscreteDistribution(np.ones(1), np.zeros(1)).drawDiscrete(1)[
DiscreteDistribution(
np.ones(1),
np.zeros(1)).drawDiscrete(1)[
0
],
0,
)

def test_calcExpectation(self):
dd_0_1_20 = Normal().approx(20)
dd_1_1_40 = Normal(mu = 1).approx(40)
dd_10_10_100 = Normal(mu = 10, sigma = 10).approx(100)

ce1 = calcExpectation(dd_0_1_20)
ce2 = calcExpectation(dd_1_1_40)
ce3 = calcExpectation(dd_10_10_100)

self.assertAlmostEqual(ce1, 0.0)
self.assertAlmostEqual(ce2, 1.0)
self.assertAlmostEqual(ce3, 10.0)

ce4= calcExpectation(
dd_0_1_20,
lambda x: 2 ** x
)

self.assertAlmostEqual(ce4, 1.27153712)

ce5 = calcExpectation(
dd_1_1_40,
lambda x: 2 * x
)

self.assertAlmostEqual(ce5, 2.0)

ce6 = calcExpectation(
dd_10_10_100,
lambda x, y: 2 * x + y,
20
)

self.assertAlmostEqual(ce6, 40.0)

ce7 = calcExpectation(
dd_0_1_20,
lambda x, y: x + y,
np.hstack(np.array([0,1,2,3,4,5]))
)

self.assertAlmostEqual(ce7.flat[3], 3.0)

PermShkDstn = MeanOneLogNormal().approx(200)
TranShkDstn = MeanOneLogNormal().approx(200)
IncomeDstn = combineIndepDstns(PermShkDstn, TranShkDstn)

ce8 = calcExpectation(
IncomeDstn,
lambda X: X[0] + X[1]
)

self.assertAlmostEqual(ce8, 2.0)

ce9 = calcExpectation(
IncomeDstn,
lambda X, a, r: r / X[0] * a + X[1],
np.array([0,1,2,3,4,5]), # an aNrmNow grid?
1.05 # an interest rate?
)

self.assertAlmostEqual(
ce9[3][0],
9.518015322143837
)

class DistributionClassTests(unittest.TestCase):
"""
Expand All @@ -26,11 +105,11 @@ class DistributionClassTests(unittest.TestCase):
"""

def test_drawMeanOneLognormal(self):
self.assertEqual(distribution.MeanOneLogNormal().draw(1)[0], 3.5397367004222002)
self.assertEqual(MeanOneLogNormal().draw(1)[0], 3.5397367004222002)

def test_Lognormal(self):

dist = distribution.Lognormal()
dist = Lognormal()

self.assertEqual(dist.draw(1)[0], 5.836039190663969)

Expand All @@ -40,7 +119,7 @@ def test_Lognormal(self):
self.assertEqual(dist.draw(1)[0], 5.836039190663969)

def test_Normal(self):
dist = distribution.Normal()
dist = Normal()

self.assertEqual(dist.draw(1)[0], 1.764052345967664)

Expand All @@ -50,17 +129,23 @@ def test_Normal(self):
self.assertEqual(dist.draw(1)[0], 1.764052345967664)

def test_Weibull(self):
self.assertEqual(distribution.Weibull().draw(1)[0], 0.79587450816311)
self.assertEqual(
Weibull().draw(1)[0],
0.79587450816311)

def test_Uniform(self):
uni = distribution.Uniform()
uni = Uniform()

self.assertEqual(distribution.Uniform().draw(1)[0], 0.5488135039273248)
self.assertEqual(
Uniform().draw(1)[0],
0.5488135039273248)

self.assertEqual(
distribution.calcExpectation(uni.approx(10))[0],
calcExpectation(uni.approx(10)),
0.5
)

def test_Bernoulli(self):
self.assertEqual(distribution.Bernoulli().draw(1)[0], False)
self.assertEqual(
Bernoulli().draw(1)[0], False
)

0 comments on commit 4a56c02

Please sign in to comment.