Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Deal with cases when the agent wants to short #749

Merged
merged 2 commits into from
Jul 2, 2020
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 17 additions & 18 deletions HARK/ConsumptionSaving/ConsPortfolioModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -778,31 +778,30 @@ def solveConsPortfolio(solution_next,ShockDstn,IncomeDstn,RiskyDstn,
FOC_s = EndOfPrddvds
Share_now = np.zeros_like(aNrmGrid) # Initialize to putting everything in safe asset
cNrmAdj_now = np.zeros_like(aNrmGrid)
constrained = FOC_s[:,-1] > 0. # If agent wants to put more than 100% into risky asset, he is constrained
Share_now[constrained] = 1.0
constrained_top = FOC_s[:,-1] > 0. # If agent wants to put more than 100% into risky asset, he is constrained
constrained_bot = FOC_s[:,0] < 0. # Likewise if he wants to put less than 0% into risky asset
Share_now[constrained_top] = 1.0
if not zero_bound:
Share_now[0] = 1. # aNrm=0, so there's no way to "optimize" the portfolio
cNrmAdj_now[0] = EndOfPrddvdaNvrs[0,-1] # Consumption when aNrm=0 does not depend on Share
cNrmAdj_now[constrained] = EndOfPrddvdaNvrs[constrained,-1] # Get consumption when share-constrained

constrained_top[0] = True # Mark as constrained so that there is no attempt at optimization
cNrmAdj_now[constrained_top] = EndOfPrddvdaNvrs[constrained_top,-1] # Get consumption when share-constrained
cNrmAdj_now[constrained_bot] = EndOfPrddvdaNvrs[constrained_bot,0]
# For each value of aNrm, find the value of Share such that FOC-Share == 0.
# This loop can probably be eliminated, but it's such a small step that it won't speed things up much.
crossing = np.logical_and(FOC_s[:,1:] <= 0., FOC_s[:,:-1] >= 0.)
for j in range(aNrm_N):
if Share_now[j] == 0.:
try:
idx = np.argwhere(crossing[j,:])[0][0]
bot_s = ShareGrid[idx]
top_s = ShareGrid[idx+1]
bot_f = FOC_s[j,idx]
top_f = FOC_s[j,idx+1]
bot_c = EndOfPrddvdaNvrs[j,idx]
top_c = EndOfPrddvdaNvrs[j,idx+1]
alpha = 1. - top_f/(top_f-bot_f)
Share_now[j] = (1.-alpha)*bot_s + alpha*top_s
cNrmAdj_now[j] = (1.-alpha)*bot_c + alpha*top_c
except:
print('No optimal controls found for a=' + str(aNrmGrid[j]))
if not (constrained_top[j] or constrained_bot[j]):
idx = np.argwhere(crossing[j,:])[0][0]
bot_s = ShareGrid[idx]
top_s = ShareGrid[idx+1]
bot_f = FOC_s[j,idx]
top_f = FOC_s[j,idx+1]
bot_c = EndOfPrddvdaNvrs[j,idx]
top_c = EndOfPrddvdaNvrs[j,idx+1]
alpha = 1. - top_f/(top_f-bot_f)
Share_now[j] = (1.-alpha)*bot_s + alpha*top_s
cNrmAdj_now[j] = (1.-alpha)*bot_c + alpha*top_c

# Calculate the endogenous mNrm gridpoints when the agent adjusts his portfolio
mNrmAdj_now = aNrmGrid + cNrmAdj_now
Expand Down