diff --git a/HARK/ConsumptionSaving/ConsPortfolioModel.py b/HARK/ConsumptionSaving/ConsPortfolioModel.py index 5d6111a13..a48dce8dc 100644 --- a/HARK/ConsumptionSaving/ConsPortfolioModel.py +++ b/HARK/ConsumptionSaving/ConsPortfolioModel.py @@ -29,9 +29,9 @@ MargValueFuncCRRA, ValueFuncCRRA, ) +from HARK.distribution import expected from HARK.metric import MetricObject from HARK.rewards import UtilityFuncCRRA -from HARK.distribution import expected # Define a class to represent the single period solution of the portfolio choice problem @@ -176,15 +176,10 @@ def __init__(self, verbose=False, quiet=False, **kwds): # Set the solver for the portfolio model, and update various constructed attributes if self.IndepDstnBool: - if self.DiscreteShareBool: - solver = ConsPortfolioDiscreteSolver - else: - solver = ConsPortfolioSolver + self.solve_one_period = solve_one_period_ConsPortfolio else: solver = ConsPortfolioJointDistSolver - self.solve_one_period = make_one_period_oo_solver(solver) - # self.solve_one_period = solve_one_period_ConsPortfolio - + self.solve_one_period = make_one_period_oo_solver(solver) self.update() def pre_solve(self): @@ -591,43 +586,108 @@ def EndOfPrddvds_dist(S, a, z): EndOfPrd_dvds = DiscFacEff * expected( EndOfPrddvds_dist, RiskyDstn, args=(aNrmNow, ShareNext) ) + + # Make the end-of-period value function if the value function is requested + if vFuncBool: - # Now find the optimal (continuous) risky share on [0,1] by solving the first - # order condition EndOfPrd_dvds == 0. - FOC_s = EndOfPrd_dvds # Relabel for convenient typing - - # For each value of aNrm, find the value of Share such that FOC_s == 0 - crossing = np.logical_and(FOC_s[:, 1:] <= 0.0, FOC_s[:, :-1] >= 0.0) - share_idx = np.argmax(crossing, axis=1) - # This represents the index of the segment of the share grid where dvds flips - # from positive to negative, indicating that there's a zero *on* the segment - - # Calculate the fractional distance between those share gridpoints where the - # zero should be found, assuming a linear function; call it alpha - a_idx = np.arange(aNrmCount) - bot_s = ShareGrid[share_idx] - top_s = ShareGrid[share_idx + 1] - bot_f = FOC_s[a_idx, share_idx] - top_f = FOC_s[a_idx, share_idx + 1] - bot_c = EndOfPrd_dvdaNvrs[a_idx, share_idx] - top_c = EndOfPrd_dvdaNvrs[a_idx, share_idx + 1] - alpha = 1.0 - top_f / (top_f - bot_f) - - # Calculate the continuous optimal risky share and optimal consumption - ShareAdj_now = (1.0 - alpha) * bot_s + alpha * top_s - cNrmAdj_now = (1.0 - alpha) * bot_c + alpha * top_c - - # If agent wants to put more than 100% into risky asset, he is constrained. - # Likewise if he wants to put less than 0% into risky asset, he is constrained. - constrained_top = FOC_s[:, -1] > 0.0 - constrained_bot = FOC_s[:, 0] < 0.0 - - # Apply those constraints to both risky share and consumption (but lower - # constraint should never be relevant) - ShareAdj_now[constrained_top] = 1.0 - ShareAdj_now[constrained_bot] = 0.0 - cNrmAdj_now[constrained_top] = EndOfPrd_dvdaNvrs[constrained_top, -1] - cNrmAdj_now[constrained_bot] = EndOfPrd_dvdaNvrs[constrained_bot, 0] + def calc_v_intermed(S, b, z): + """ + Calculate "intermediate" value from next period's bank balances, the + income shocks S, and the risky asset share. + """ + mNrm_next = calc_mNrm_next(S, b) + + vAdj_next = vFuncAdj_next(mNrm_next) + if AdjustPrb < 1.0: + vFxd_next = vFuncFxd_next(mNrm_next, z) + # Combine by adjustment probability + v_next = AdjustPrb * vAdj_next + (1.0 - AdjustPrb) * vFxd_next + else: # Don't bother evaluating if there's no chance that portfolio share is fixed + v_next = vAdj_next + + v_intermed = (S["PermShk"] * PermGroFac) ** (1.0 - CRRA) * v_next + return v_intermed + + # Calculate intermediate value by taking expectations over income shocks + v_intermed = expected(calc_v_intermed, IncShkDstn, args=(bNrmNext, ShareNext)) + + # Construct the "intermediate value function" for this period + vNvrs_intermed = uFunc.inv(v_intermed) + vNvrsFunc_intermed = BilinearInterp(vNvrs_intermed, bNrmGrid, ShareGrid) + vFunc_intermed = ValueFuncCRRA(vNvrsFunc_intermed, CRRA) + + def calc_EndOfPrd_v(S, a, z): + # Calculate future realizations of bank balances bNrm + Rxs = S - Rfree + Rport = Rfree + z * Rxs + bNrm_next = Rport * a + + # Make an extended share_next of the same dimension as b_nrm so + # that the function can be vectorized + z_rep = z + np.zeros_like(bNrm_next) + + EndOfPrd_v = vFunc_intermed(bNrm_next, z_rep) + return EndOfPrd_v + + # Calculate end-of-period value by taking expectations + EndOfPrd_v = DiscFacEff * expected( + calc_EndOfPrd_v, RiskyDstn, args=(aNrmNow, ShareNext) + ) + EndOfPrd_vNvrs = uFunc.inv(EndOfPrd_v) + + # Now make an end-of-period value function over aNrm and Share + EndOfPrd_vNvrsFunc = BilinearInterp(EndOfPrd_vNvrs, aNrmGrid, ShareGrid) + EndOfPrd_vFunc = ValueFuncCRRA(EndOfPrd_vNvrsFunc, CRRA) + # This will be used later to make the value function for this period + + # Find the optimal risky asset share either by choosing the best value among + # the discrete grid choices, or by satisfying the FOC with equality (continuous) + if DiscreteShareBool: + # If we're restricted to discrete choices, then portfolio share is + # the one with highest value for each aNrm gridpoint + opt_idx = np.argmax(EndOfPrd_v, axis=1) + ShareAdj_now = ShareGrid[opt_idx] + + # Take cNrm at that index as well... and that's it! + cNrmAdj_now = EndOfPrd_dvdaNvrs[np.arange(aNrmCount), opt_idx] + + else: + # Now find the optimal (continuous) risky share on [0,1] by solving the first + # order condition EndOfPrd_dvds == 0. + FOC_s = EndOfPrd_dvds # Relabel for convenient typing + + # For each value of aNrm, find the value of Share such that FOC_s == 0 + crossing = np.logical_and(FOC_s[:, 1:] <= 0.0, FOC_s[:, :-1] >= 0.0) + share_idx = np.argmax(crossing, axis=1) + # This represents the index of the segment of the share grid where dvds flips + # from positive to negative, indicating that there's a zero *on* the segment + + # Calculate the fractional distance between those share gridpoints where the + # zero should be found, assuming a linear function; call it alpha + a_idx = np.arange(aNrmCount) + bot_s = ShareGrid[share_idx] + top_s = ShareGrid[share_idx + 1] + bot_f = FOC_s[a_idx, share_idx] + top_f = FOC_s[a_idx, share_idx + 1] + bot_c = EndOfPrd_dvdaNvrs[a_idx, share_idx] + top_c = EndOfPrd_dvdaNvrs[a_idx, share_idx + 1] + alpha = 1.0 - top_f / (top_f - bot_f) + + # Calculate the continuous optimal risky share and optimal consumption + ShareAdj_now = (1.0 - alpha) * bot_s + alpha * top_s + cNrmAdj_now = (1.0 - alpha) * bot_c + alpha * top_c + + # If agent wants to put more than 100% into risky asset, he is constrained. + # Likewise if he wants to put less than 0% into risky asset, he is constrained. + constrained_top = FOC_s[:, -1] > 0.0 + constrained_bot = FOC_s[:, 0] < 0.0 + + # Apply those constraints to both risky share and consumption (but lower + # constraint should never be relevant) + ShareAdj_now[constrained_top] = 1.0 + ShareAdj_now[constrained_bot] = 0.0 + cNrmAdj_now[constrained_top] = EndOfPrd_dvdaNvrs[constrained_top, -1] + cNrmAdj_now[constrained_bot] = EndOfPrd_dvdaNvrs[constrained_bot, 0] # When the natural borrowing constraint is *not* zero, then aNrm=0 is in the # grid, but there's no way to "optimize" the portfolio if a=0, and consumption @@ -667,13 +727,28 @@ def EndOfPrddvds_dist(S, a, z): # Construct the marginal value of mNrm function when the agent can't adjust his share dvdmFuncFxd_now = MargValueFuncCRRA(cFuncFxd_now, CRRA) - # Construct the optimal risky share function when adjusting is possible - if BoroCnstNat_iszero: - Share_lower_bound = ShareLimit + # Construct the optimal risky share function when adjusting is possible. + # The interpolation method depends on whether the choice is discrete or continuous. + if DiscreteShareBool: + # If the share choice is discrete, the "interpolated" share function acts + # like a step function, with jumps at the midpoints of mNrm gridpoints. + # Because an actual step function would break our (assumed continuous) linear + # interpolator, there's a *tiny* region with extremely high slope. + mNrmAdj_mid = (mNrmAdj_now[2:] + mNrmAdj_now[1:-1]) / 2 + mNrmAdj_plus = mNrmAdj_mid * (1.0 + 1e-12) + mNrmAdj_comb = (np.transpose(np.vstack((mNrmAdj_mid, mNrmAdj_plus)))).flatten() + mNrmAdj_comb = np.append(np.insert(mNrmAdj_comb, 0, 0.0), mNrmAdj_now[-1]) + Share_comb = (np.transpose(np.vstack((ShareAdj_now, ShareAdj_now)))).flatten() + ShareFuncAdj_now = LinearInterp(mNrmAdj_comb, Share_comb) + else: - Share_lower_bound = 1.0 - ShareAdj_now = np.insert(ShareAdj_now, 0, Share_lower_bound) - ShareFuncAdj_now = LinearInterp(mNrmAdj_now, ShareAdj_now, ShareLimit, 0.0) + # If the share choice is continuous, just make an ordinary interpolating function + if BoroCnstNat_iszero: + Share_lower_bound = ShareLimit + else: + Share_lower_bound = 1.0 + ShareAdj_now = np.insert(ShareAdj_now, 0, Share_lower_bound) + ShareFuncAdj_now = LinearInterp(mNrmAdj_now, ShareAdj_now, ShareLimit, 0.0) # This is a point at which (a,c,share) have consistent length. Take the # snapshot for storing the grid and values in the solution. @@ -686,61 +761,12 @@ def EndOfPrddvds_dist(S, a, z): "eop_dvds_fxd": EndOfPrd_dvds, } - # Add the value function if requested TODO + # Add the value function if requested if vFuncBool: # Create the value functions for this period, defined over market resources # mNrm when agent can adjust his portfolio, and over market resources and # fixed share when agent can not adjust his portfolio. - def calc_v_intermed(S, b, z): - """ - Calculate "intermediate" value from next period's bank balances, the - income shocks S, and the risky asset share. - """ - mNrm_next = calc_mNrm_next(S, b) - - vAdj_next = vFuncAdj_next(mNrm_next) - if AdjustPrb < 1.0: - vFxd_next = vFuncFxd_next(mNrm_next, z) - # Combine by adjustment probability - v_next = AdjustPrb * vAdj_next + (1.0 - AdjustPrb) * vFxd_next - else: # Don't bother evaluating if there's no chance that portfolio share is fixed - v_next = vAdj_next - - v_intermed = (S["PermShk"] * PermGroFac) ** (1.0 - CRRA) * v_next - return v_intermed - - # Calculate intermediate value by taking expectations over income shocks - v_intermed = expected(calc_v_intermed, IncShkDstn, args=(bNrmNext, ShareNext)) - - # Construct the "intermediate value function" for this period - vNvrs_intermed = uFunc.inv(v_intermed) - vNvrsFunc_intermed = BilinearInterp(vNvrs_intermed, bNrmGrid, ShareGrid) - vFunc_intermed = ValueFuncCRRA(vNvrsFunc_intermed, CRRA) - - def calc_EndOfPrd_v(S, a, z): - # Calculate future realizations of bank balances bNrm - Rxs = S - Rfree - Rport = Rfree + z * Rxs - bNrm_next = Rport * a - - # Make an extended share_next of the same dimension as b_nrm so - # that the function can be vectorized - z_rep = z + np.zeros_like(bNrm_next) - - EndOfPrd_v = vFunc_intermed(bNrm_next, z_rep) - return EndOfPrd_v - - # Calculate end-of-period value by taking expectations - EndOfPrd_v = DiscFacEff * expected( - calc_EndOfPrd_v, RiskyDstn, args=(aNrmNow, ShareNext) - ) - EndOfPrd_vNvrs = uFunc.inv(EndOfPrd_v) - - # Now make an end-of-period value function over aNrm and Share - EndOfPrd_vNvrsFunc = BilinearInterp(EndOfPrd_vNvrs, aNrmGrid, ShareGrid) - EndOfPrd_vFunc = ValueFuncCRRA(EndOfPrd_vNvrsFunc, CRRA) - # Construct the value function when the agent can adjust his portfolio mNrm_temp = aXtraGrid # Just use aXtraGrid as our grid of mNrm values cNrm_temp = cFuncAdj_now(mNrm_temp)