From bbf1a845b4a47f8e28703f3bea392a205c5a11ec Mon Sep 17 00:00:00 2001 From: Mridul Seth Date: Tue, 28 Apr 2020 23:11:37 +0530 Subject: [PATCH 1/2] remove cstwMPC from HARK codebase --- HARK/cstwMPC/Figures/placeholder.txt | 1 - HARK/cstwMPC/MakeCSTWfigs.py | 542 --------------- HARK/cstwMPC/MakeCSTWfigsForSlides.py | 138 ---- HARK/cstwMPC/Results/BetaDistPYResults.txt | 26 - .../cstwMPC/Results/BetaDistPYResults_py3.txt | 26 - HARK/cstwMPC/SetupParamsCSTW.py | 303 --------- HARK/cstwMPC/__init__.py | 0 HARK/cstwMPC/cstwMPC.py | 624 ------------------ 8 files changed, 1660 deletions(-) delete mode 100644 HARK/cstwMPC/Figures/placeholder.txt delete mode 100644 HARK/cstwMPC/MakeCSTWfigs.py delete mode 100644 HARK/cstwMPC/MakeCSTWfigsForSlides.py delete mode 100644 HARK/cstwMPC/Results/BetaDistPYResults.txt delete mode 100644 HARK/cstwMPC/Results/BetaDistPYResults_py3.txt delete mode 100644 HARK/cstwMPC/SetupParamsCSTW.py delete mode 100644 HARK/cstwMPC/__init__.py delete mode 100644 HARK/cstwMPC/cstwMPC.py diff --git a/HARK/cstwMPC/Figures/placeholder.txt b/HARK/cstwMPC/Figures/placeholder.txt deleted file mode 100644 index 0994098b6..000000000 --- a/HARK/cstwMPC/Figures/placeholder.txt +++ /dev/null @@ -1 +0,0 @@ -This is a placeholder file because github doesn't like empty folders. \ No newline at end of file diff --git a/HARK/cstwMPC/MakeCSTWfigs.py b/HARK/cstwMPC/MakeCSTWfigs.py deleted file mode 100644 index 708082ecb..000000000 --- a/HARK/cstwMPC/MakeCSTWfigs.py +++ /dev/null @@ -1,542 +0,0 @@ -''' -This module makes some figures for cstwMPC. It requires that quite a few specifications -of the model have been estimated, with the results stored in ./Results. -''' - -from builtins import range -import matplotlib.pyplot as plt -import csv -import numpy as np -import os - -# Save the current file's directory location for writing output: -my_file_path = os.path.dirname(os.path.abspath(__file__)) - - -f = open(my_file_path + '/Results/LCbetaPointNetWorthLorenzFig.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -lorenz_percentiles = [] -scf_lorenz = [] -beta_point_lorenz = [] -for j in range(len(raw_data)): - lorenz_percentiles.append(float(raw_data[j][0])) - scf_lorenz.append(float(raw_data[j][1])) - beta_point_lorenz.append(float(raw_data[j][2])) -f.close() -lorenz_percentiles = np.array(lorenz_percentiles) -scf_lorenz = np.array(scf_lorenz) -beta_point_lorenz = np.array(beta_point_lorenz) - -f = open(my_file_path + '/Results/LCbetaDistNetWorthLorenzFig.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -beta_dist_lorenz = [] -for j in range(len(raw_data)): - beta_dist_lorenz.append(float(raw_data[j][2])) -f.close() -beta_dist_lorenz = np.array(beta_dist_lorenz) - -f = open(my_file_path + '/Results/LCbetaPointNetWorthMPCfig.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -mpc_percentiles = [] -mpc_beta_point = [] -for j in range(len(raw_data)): - mpc_percentiles.append(float(raw_data[j][0])) - mpc_beta_point.append(float(raw_data[j][1])) -f.close() -mpc_percentiles = np.asarray(mpc_percentiles) -mpc_beta_point = np.asarray(mpc_beta_point) - -f = open(my_file_path + '/Results/LCbetaDistNetWorthMPCfig.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -mpc_beta_dist = [] -for j in range(len(raw_data)): - mpc_beta_dist.append(float(raw_data[j][1])) -f.close() -mpc_beta_dist = np.asarray(mpc_beta_dist) - -f = open(my_file_path + '/Results/LCbetaDistLiquidMPCfig.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -mpc_beta_dist_liquid = [] -for j in range(len(raw_data)): - mpc_beta_dist_liquid.append(float(raw_data[j][1])) -f.close() -mpc_beta_dist_liquid = np.asarray(mpc_beta_dist_liquid) - -f = open(my_file_path + '/Results/LCbetaDistNetWorthKappaByAge.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -kappa_mean_age = [] -kappa_lo_beta_age = [] -kappa_hi_beta_age = [] -for j in range(len(raw_data)): - kappa_mean_age.append(float(raw_data[j][0])) - kappa_lo_beta_age.append(float(raw_data[j][1])) - kappa_hi_beta_age.append(float(raw_data[j][2])) -kappa_mean_age = np.array(kappa_mean_age) -kappa_lo_beta_age = np.array(kappa_lo_beta_age) -kappa_hi_beta_age = np.array(kappa_hi_beta_age) -age_list = np.array(list(range(len(kappa_mean_age))),dtype=float)*0.25 + 24.0 -f.close() - -f = open(my_file_path + '/Results/LC_KYbyBeta.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -KY_by_beta_lifecycle = [] -beta_list = [] -for j in range(len(raw_data)): - beta_list.append(float(raw_data[j][0])) - KY_by_beta_lifecycle.append(float(raw_data[j][1])) -beta_list = np.array(beta_list) -KY_by_beta_lifecycle = np.array(KY_by_beta_lifecycle) -f.close() - -f = open(my_file_path + '/Results/IH_KYbyBeta.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -KY_by_beta_infinite = [] -for j in range(len(raw_data)): - KY_by_beta_infinite.append(float(raw_data[j][1])) -KY_by_beta_infinite = np.array(KY_by_beta_infinite) -f.close() - - -plt.plot(100*lorenz_percentiles,beta_point_lorenz,'-.k',linewidth=1.5) -plt.plot(100*lorenz_percentiles,beta_dist_lorenz,'--k',linewidth=1.5) -plt.plot(100*lorenz_percentiles,scf_lorenz,'-k',linewidth=1.5) -plt.xlabel('Wealth percentile',fontsize=14) -plt.ylabel('Cumulative wealth ownership',fontsize=14) -plt.title('Lorenz Curve Matching, Lifecycle Model',fontsize=16) -plt.legend((r'$\beta$-point',r'$\beta$-dist','SCF data'),loc=2,fontsize=12) -plt.ylim(-0.01,1) -plt.savefig(my_file_path + '/Figures/LorenzLifecycle.pdf') -plt.show() - -plt.plot(mpc_beta_point,mpc_percentiles,'-.k',linewidth=1.5) -plt.plot(mpc_beta_dist,mpc_percentiles,'--k',linewidth=1.5) -plt.plot(mpc_beta_dist_liquid,mpc_percentiles,'-.k',linewidth=1.5) -plt.xlabel('Marginal propensity to consume',fontsize=14) -plt.ylabel('Cumulative probability',fontsize=14) -plt.title('CDF of the MPC, Lifecycle Model',fontsize=16) -plt.legend((r'$\beta$-point NW',r'$\beta$-dist NW',r'$\beta$-dist LA'),loc=0,fontsize=12) -plt.savefig(my_file_path + '/Figures/MPCdistLifecycle.pdf') -plt.show() - -plt.plot(age_list,kappa_mean_age,'-k',linewidth=1.5) -plt.plot(age_list,kappa_lo_beta_age,'--k',linewidth=1.5) -plt.plot(age_list,kappa_hi_beta_age,'-.k',linewidth=1.5) -plt.legend(('Population average','Most impatient','Most patient'),loc=2,fontsize=12) -plt.xlabel('Age',fontsize=14) -plt.ylabel('Average MPC',fontsize=14) -plt.title('Marginal Propensity to Consume by Age',fontsize=16) -plt.xlim(24,100) -plt.ylim(0,1) -plt.savefig(my_file_path + '/Figures/MPCbyAge.pdf') -plt.show() - -plt.plot(beta_list,KY_by_beta_infinite,'-k',linewidth=1.5) -plt.plot(beta_list,KY_by_beta_lifecycle,'--k',linewidth=1.5) -plt.plot([0.95,1.01],[10.26,10.26],'--k',linewidth=0.75) -plt.text(0.96,12,'U.S. K/Y ratio') -plt.legend(('Perpetual youth','Lifecycle'),loc=2,fontsize=12) -plt.xlabel(r'Discount factor $\beta$',fontsize=14) -plt.ylabel('Capital to output ratio',fontsize=14) -plt.title('K/Y Ratio by Discount Factor',fontsize=16) -plt.ylim(0,100) -plt.savefig(my_file_path + '/Figures/KYratioByBeta.pdf') -plt.show() - - -f = open(my_file_path + '/Results/IHbetaPointNetWorthLorenzFig.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -lorenz_percentiles = [] -scf_lorenz = [] -beta_point_lorenz = [] -for j in range(len(raw_data)): - lorenz_percentiles.append(float(raw_data[j][0])) - scf_lorenz.append(float(raw_data[j][1])) - beta_point_lorenz.append(float(raw_data[j][2])) -f.close() -lorenz_percentiles = np.array(lorenz_percentiles) -scf_lorenz = np.array(scf_lorenz) -beta_point_lorenz = np.array(beta_point_lorenz) - -f = open(my_file_path + '/Results/IHbetaDistNetWorthLorenzFig.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -beta_dist_lorenz = [] -for j in range(len(raw_data)): - beta_dist_lorenz.append(float(raw_data[j][2])) -f.close() -beta_dist_lorenz = np.array(beta_dist_lorenz) - - -f = open(my_file_path + '/Results/IHbetaPointLiquidLorenzFig.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -beta_point_lorenz_liquid = [] -for j in range(len(raw_data)): - beta_point_lorenz_liquid.append(float(raw_data[j][2])) -f.close() -beta_point_lorenz_liquid = np.array(beta_point_lorenz_liquid) - -f = open(my_file_path + '/Results/IHbetaDistLiquidLorenzFig.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -beta_dist_lorenz_liquid = [] -for j in range(len(raw_data)): - beta_dist_lorenz_liquid.append(float(raw_data[j][2])) -f.close() -beta_dist_lorenz_liquid = np.array(beta_dist_lorenz_liquid) - -f = open(my_file_path + '/Results/IHbetaPointNetWorthMPCfig.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -mpc_percentiles = [] -mpc_beta_point = [] -for j in range(len(raw_data)): - mpc_percentiles.append(float(raw_data[j][0])) - mpc_beta_point.append(float(raw_data[j][1])) -f.close() -mpc_percentiles = np.asarray(mpc_percentiles) -mpc_beta_point = np.asarray(mpc_beta_point) - -f = open(my_file_path + '/Results/IHbetaDistNetWorthMPCfig.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -mpc_beta_dist = [] -for j in range(len(raw_data)): - mpc_beta_dist.append(float(raw_data[j][1])) -f.close() -mpc_beta_dist = np.asarray(mpc_beta_dist) - -f = open(my_file_path + '/Results/IHbetaDistLiquidMPCfig.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -mpc_beta_dist_liquid = [] -for j in range(len(raw_data)): - mpc_beta_dist_liquid.append(float(raw_data[j][1])) -f.close() -mpc_beta_dist_liquid = np.asarray(mpc_beta_dist_liquid) - - -plt.plot(100*lorenz_percentiles,beta_point_lorenz,'-.k',linewidth=1.5) -plt.plot(100*lorenz_percentiles,scf_lorenz,'-k',linewidth=1.5) -plt.xlabel('Wealth percentile',fontsize=14) -plt.ylabel('Cumulative wealth ownership',fontsize=14) -plt.title('Lorenz Curve Matching, Perpetual Youth Model',fontsize=16) -plt.legend((r'$\beta$-point','SCF data'),loc=2,fontsize=12) -plt.ylim(-0.01,1) -plt.savefig(my_file_path + '/Figures/LorenzInfiniteBP.pdf') -plt.show() - -plt.plot(100*lorenz_percentiles,beta_point_lorenz,'-.k',linewidth=1.5) -plt.plot(100*lorenz_percentiles,beta_dist_lorenz,'--k',linewidth=1.5) -plt.plot(100*lorenz_percentiles,scf_lorenz,'-k',linewidth=1.5) -plt.xlabel('Wealth percentile',fontsize=14) -plt.ylabel('Cumulative wealth ownership',fontsize=14) -plt.title('Lorenz Curve Matching, Perpetual Youth Model',fontsize=16) -plt.legend((r'$\beta$-point',r'$\beta$-dist','SCF data'),loc=2,fontsize=12) -plt.ylim(-0.01,1) -plt.savefig(my_file_path + '/Figures/LorenzInfinite.pdf') -plt.show() - -plt.plot(100*lorenz_percentiles,beta_point_lorenz_liquid,'-.k',linewidth=1.5) -plt.plot(100*lorenz_percentiles,beta_dist_lorenz_liquid,'--k',linewidth=1.5) -plt.plot(np.array([20,40,60,80]),np.array([0.0, 0.004, 0.025,0.117]),'.r',markersize=10) -plt.xlabel('Wealth percentile',fontsize=14) -plt.ylabel('Cumulative wealth ownership',fontsize=14) -plt.title('Lorenz Curve Matching, Perpetual Youth (Liquid Assets)',fontsize=16) -plt.legend((r'$\beta$-point',r'$\beta$-dist','SCF targets'),loc=2,fontsize=12) -plt.ylim(-0.01,1) -plt.savefig(my_file_path + '/Figures/LorenzLiquid.pdf') -plt.show() - -plt.plot(mpc_beta_point,mpc_percentiles,'-.k',linewidth=1.5) -plt.plot(mpc_beta_dist,mpc_percentiles,'--k',linewidth=1.5) -plt.plot(mpc_beta_dist_liquid,mpc_percentiles,'-.k',linewidth=1.5) -plt.xlabel('Marginal propensity to consume',fontsize=14) -plt.ylabel('Cumulative probability',fontsize=14) -plt.title('CDF of the MPC, Perpetual Youth Model',fontsize=16) -plt.legend((r'$\beta$-point NW',r'$\beta$-dist NW',r'$\beta$-dist LA'),loc=0,fontsize=12) -plt.savefig(my_file_path + '/Figures/MPCdistInfinite.pdf') -plt.show() - - - - -f = open(my_file_path + '/Results/SensitivityRho.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -rho_sensitivity = np.array(raw_data) -f.close() - -f = open(my_file_path + '/Results/SensitivityXiSigma.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -xi_sigma_sensitivity = np.array(raw_data) -f.close() - -f = open(my_file_path + '/Results/SensitivityPsiSigma.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -psi_sigma_sensitivity = np.array(raw_data) -f.close() - -f = open(my_file_path + '/Results/SensitivityMu.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -mu_sensitivity = np.array(raw_data) -f.close() - -f = open(my_file_path + '/Results/SensitivityUrate.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -urate_sensitivity = np.array(raw_data) -f.close() - -f = open(my_file_path + '/Results/SensitivityMortality.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -mortality_sensitivity = np.array(raw_data) -f.close() - -f = open(my_file_path + '/Results/SensitivityG.txt','r') -my_reader = csv.reader(f,delimiter='\t') -raw_data = list(my_reader) -g_sensitivity = np.array(raw_data) -f.close() - - -#plt.subplots(2,3,sharey=True) -kappa = 0.242 -plt.subplot(2,3,1,xticks=[0.5,2.3,4],ylim=(0.21,0.36)) -plt.plot(rho_sensitivity[:,0],rho_sensitivity[:,1],'-k',linewidth=1) -plt.plot(1,kappa,'.k',markersize=8) -plt.yticks([0.22,0.26,0.30,0.34]) -plt.text(0.7,0.215,r'Risk aversion $\rho$',fontsize=11) -plt.ylabel('Aggregate MPC',fontsize=11) - -plt.subplot(2,3,2,xticks=[0.0,0.4,0.8],xlim=(0,0.8),ylim=(0.21,0.36)) -plt.plot(xi_sigma_sensitivity[:,0],xi_sigma_sensitivity[:,1],'-k',linewidth=1) -plt.plot(0.2,kappa,'.k',markersize=8) -plt.yticks([0.22,0.26,0.30,0.34],[]) -plt.text(0.045,0.215,r'Transitory std $\sigma_\theta$',fontsize=11) -plt.title('Sensitivity Analysis: Perpetual Youth',fontsize=14) - -plt.subplot(2,3,3,xticks=[0.04,0.06,0.08],xlim=(0.04,0.08),ylim=(0.21,0.36)) -plt.plot(psi_sigma_sensitivity[:,0],psi_sigma_sensitivity[:,1],'-k',linewidth=1) -plt.plot(0.0603,kappa,'.k',markersize=8) -plt.yticks([0.22,0.26,0.30,0.34],[]) -plt.text(0.041,0.34,r'Permanent std $\sigma_\psi$',fontsize=11) - -plt.subplot(2,3,4,xticks=[0.0,0.4,0.8],ylim=(0.21,0.36)) -plt.plot(mu_sensitivity[:,0],mu_sensitivity[:,1],'-k',linewidth=1) -plt.plot(0.15,kappa,'.k',markersize=8) -plt.yticks([0.22,0.26,0.30,0.34]) -plt.text(0.05,0.34,'Unemployment',fontsize=11) -plt.text(0.22,0.32,r'benefit $\mu$',fontsize=11) -plt.ylabel('Aggregate MPC',fontsize=11) - -plt.subplot(2,3,5,xticks=[0.02,0.07,0.12],ylim=(0.21,0.36)) -plt.plot(urate_sensitivity[:,0],urate_sensitivity[:,1],'-k',linewidth=1) -plt.plot(0.07,kappa,'.k',markersize=8) -plt.yticks([0.22,0.26,0.30,0.34],[]) -plt.text(0.03,0.34,'Unemployment',fontsize=11) -plt.text(0.055,0.32,r'rate $\mho$',fontsize=11) - -''' -plt.subplot(2,3,6,xticks=[0.004,0.008,0.012],xlim=(0.003,0.0125),ylim=(0.21,0.36)) -plt.plot(mortality_sensitivity[:,0],mortality_sensitivity[:,1],'-k',linewidth=1) -plt.plot(0.00625,kappa,'.k',markersize=8) -plt.yticks([0.22,0.26,0.30,0.34],[]) -plt.text(0.0037,0.34,r'Mortality rate $\mathsf{D}$',fontsize=11) -''' -plt.subplot(2,3,6,xlim=(0.0,0.04),ylim=(0.21,0.36)) -plt.plot(g_sensitivity[:,0],g_sensitivity[:,1],'-k',linewidth=1) -plt.plot(0.01,kappa,'.k',markersize=8) -plt.xticks([0.0,0.02,0.04],['0','0.02','0.04']) -plt.yticks([0.22,0.26,0.30,0.34],[]) -plt.text(0.008,0.34,r'Aggregate',fontsize=11) -plt.text(0.005,0.32,r'growth rate $g$',fontsize=11) - -plt.savefig(my_file_path + '/Figures/KappaSensitivity.pdf') -plt.show() - - - -#plt.subplots(2,3,sharey=True) -beta = 0.9877 -plt.subplot(2,3,1,xticks=[0.5,2.3,4],ylim=(0.95,1.0)) -plt.plot(rho_sensitivity[:,0],rho_sensitivity[:,2],'-k',linewidth=1) -plt.plot(1,beta,'.k',markersize=8) -plt.yticks([0.95,0.96,0.97,0.98,0.99,1.0]) -plt.text(0.7,0.955,r'Risk aversion $\rho$',fontsize=11) -plt.ylabel(r'Estimated $\grave{\beta}$',fontsize=11) - -plt.subplot(2,3,2,xticks=[0.0,0.4,0.8],xlim=(0,0.8),ylim=(0.95,1.0)) -plt.plot(xi_sigma_sensitivity[:,0],xi_sigma_sensitivity[:,2],'-k',linewidth=1) -plt.plot(0.2,beta,'.k',markersize=8) -plt.yticks([0.95,0.96,0.97,0.98,0.99,1.0],[]) -plt.text(0.045,0.955,r'Transitory std $\sigma_\theta$',fontsize=11) -plt.title('Sensitivity Analysis: Perpetual Youth',fontsize=14) - -plt.subplot(2,3,3,xticks=[0.04,0.06,0.08],xlim=(0.04,0.08),ylim=(0.97,1.0)) -plt.plot(psi_sigma_sensitivity[:,0],psi_sigma_sensitivity[:,2],'-k',linewidth=1) -plt.plot(0.0603,beta,'.k',markersize=8) -plt.yticks([0.95,0.96,0.97,0.98,0.99,1.0],[]) -plt.text(0.041,0.955,r'Permanent std $\sigma_\psi$',fontsize=11) - -plt.subplot(2,3,4,xticks=[0.0,0.4,0.8],ylim=(0.95,1.0)) -plt.plot(mu_sensitivity[:,0],mu_sensitivity[:,2],'-k',linewidth=1) -plt.plot(0.15,beta,'.k',markersize=8) -plt.yticks([0.95,0.96,0.97,0.98,0.99,1.0]) -plt.text(0.05,0.9625,'Unemployment',fontsize=11) -plt.text(0.22,0.955,r'benefit $\mu$',fontsize=11) -plt.ylabel(r'Estimated $\grave{\beta}$',fontsize=11) - -plt.subplot(2,3,5,xticks=[0.02,0.07,0.12],ylim=(0.95,1.0)) -plt.plot(urate_sensitivity[:,0],urate_sensitivity[:,2],'-k',linewidth=1) -plt.plot(0.07,beta,'.k',markersize=8) -plt.yticks([0.95,0.96,0.97,0.98,0.99,1.0],[]) -plt.text(0.03,0.9625,'Unemployment',fontsize=11) -plt.text(0.055,0.955,r'rate $\mho$',fontsize=11) - -''' -plt.subplot(2,3,6,xticks=[0.004,0.008,0.012],xlim=(0.003,0.0125),ylim=(0.95,1.0)) -plt.plot(mortality_sensitivity[:,0],mortality_sensitivity[:,2],'-k',linewidth=1) -plt.plot(0.00625,beta,'.k',markersize=8) -plt.yticks([0.95,0.96,0.97,0.98,0.99,1.0],[]) -plt.text(0.0037,0.955,r'Mortality rate $\mathsf{D}$',fontsize=11) -''' -plt.subplot(2,3,6,xlim=(0.0,0.04),ylim=(0.95,1.0)) -plt.plot(g_sensitivity[:,0],g_sensitivity[:,2],'-k',linewidth=1) -plt.plot(0.01,beta,'.k',markersize=8) -plt.xticks([0.0,0.02,0.04],['0','0.02','0.04']) -plt.yticks([0.95,0.96,0.97,0.98,0.99,1.0],[]) -plt.text(0.008,0.9625,r'Aggregate',fontsize=11) -plt.text(0.005,0.955,r'growth rate $g$',fontsize=11) - -plt.savefig(my_file_path + '/Figures/BetaSensitivity.pdf') -plt.show() - - - -#plt.subplots(2,3,sharey=True) -nabla = 0.00736 -plt.subplot(2,3,1,xticks=[0.5,2.3,4],ylim=(0.000,0.055)) -plt.plot(rho_sensitivity[:,0],rho_sensitivity[:,3],'-k',linewidth=1) -plt.plot(1,nabla,'.k',markersize=8) -plt.yticks([0,0.01,0.02,0.03,0.04,0.05]) -plt.text(0.63,0.0475,r'Risk aversion $\rho$',fontsize=11) -plt.ylabel(r'Estimated $\nabla$',fontsize=11) - -plt.subplot(2,3,2,xticks=[0.0,0.4,0.8],xlim=(0,0.8),ylim=(0.000,0.055)) -plt.plot(xi_sigma_sensitivity[:,0],xi_sigma_sensitivity[:,3],'-k',linewidth=1) -plt.plot(0.2,nabla,'.k',markersize=8) -plt.yticks([0,0.01,0.02,0.03,0.04,0.05],[]) -plt.text(0.045,0.0475,r'Transitory std $\sigma_\theta$',fontsize=11) -plt.title('Sensitivity Analysis: Perpetual Youth',fontsize=14) - -plt.subplot(2,3,3,xticks=[0.04,0.06,0.08],xlim=(0.04,0.08),ylim=(0.00,0.055)) -plt.plot(psi_sigma_sensitivity[:,0],psi_sigma_sensitivity[:,3],'-k',linewidth=1) -plt.plot(0.0603,nabla,'.k',markersize=8) -plt.yticks([0,0.01,0.02,0.03,0.04,0.05],[]) -plt.text(0.041,0.0475,r'Permanent std $\sigma_\psi$',fontsize=11) - -plt.subplot(2,3,4,xticks=[0.0,0.4,0.8],ylim=(0.000,0.055)) -plt.plot(mu_sensitivity[:,0],mu_sensitivity[:,3],'-k',linewidth=1) -plt.plot(0.15,nabla,'.k',markersize=8) -plt.yticks([0,0.01,0.02,0.03,0.04,0.05]) -plt.text(0.05,0.0475,'Unemployment',fontsize=11) -plt.text(0.22,0.040,r'benefit $\mu$',fontsize=11) -plt.ylabel(r'Estimated $\nabla$',fontsize=11) - -plt.subplot(2,3,5,xticks=[0.02,0.07,0.12],ylim=(0.000,0.055)) -plt.plot(urate_sensitivity[:,0],urate_sensitivity[:,3],'-k',linewidth=1) -plt.plot(0.07,nabla,'.k',markersize=8) -plt.yticks([0,0.01,0.02,0.03,0.04,0.05],[]) -plt.text(0.03,0.0475,'Unemployment',fontsize=11) -plt.text(0.055,0.04,r'rate $\mho$',fontsize=11) - -''' -plt.subplot(2,3,6,xticks=[0.004,0.008,0.012],xlim=(0.003,0.0125),ylim=(0.000,0.055)) -plt.plot(mortality_sensitivity[:,0],mortality_sensitivity[:,3],'-k',linewidth=1) -plt.plot(0.00625,nabla,'.k',markersize=8) -plt.yticks([0,0.01,0.02,0.03,0.04,0.05],[]) -plt.text(0.0037,0.0475,r'Mortality rate $\mathsf{D}$',fontsize=11) -''' - -plt.subplot(2,3,6,xlim=(0.0,0.04),ylim=(0.000,0.055)) -plt.plot(g_sensitivity[:,0],g_sensitivity[:,3],'-k',linewidth=1) -plt.plot(0.01,nabla,'.k',markersize=8) -plt.xticks([0.0,0.02,0.04],['0','0.02','0.04']) -plt.yticks([0,0.01,0.02,0.03,0.04,0.05],[]) -plt.text(0.008,0.0475,r'Aggregate',fontsize=11) -plt.text(0.005,0.04,r'growth rate $g$',fontsize=11) - -plt.savefig(my_file_path + '/Figures/NablaSensitivity.pdf') -plt.show() - - - -#plt.subplots(2,3,sharey=True) -fit = 4.593 -plt.subplot(2,3,1,xticks=[0.5,2.3,4],ylim=(0,10)) -plt.plot(rho_sensitivity[:,0],rho_sensitivity[:,4],'-k',linewidth=1) -plt.plot(1,fit,'.k',markersize=8) -plt.yticks([1,3,5,7,9]) -plt.text(0.7,0.5,r'Risk aversion $\rho$',fontsize=11) -plt.ylabel('Lorenz distance',fontsize=11) - -plt.subplot(2,3,2,xticks=[0.0,0.4,0.8],xlim=(0,0.8),ylim=(0,10)) -plt.plot(xi_sigma_sensitivity[:,0],xi_sigma_sensitivity[:,4],'-k',linewidth=1) -plt.plot(0.2,fit,'.k',markersize=8) -plt.yticks([1,3,5,7,9],[]) -plt.text(0.05,0.5,r'Transitory std $\sigma_\theta$',fontsize=11) -plt.title('Sensitivity Analysis: Perpetual Youth',fontsize=14) - -plt.subplot(2,3,3,xticks=[0.04,0.06,0.08],xlim=(0.04,0.08),ylim=(0,10)) -plt.plot(psi_sigma_sensitivity[:,0],psi_sigma_sensitivity[:,4],'-k',linewidth=1) -plt.plot(0.0603,fit,'.k',markersize=8) -plt.yticks([1,3,5,7,9],[]) -plt.text(0.041,0.5,r'Permanent std $\sigma_\psi$',fontsize=11) - -plt.subplot(2,3,4,xticks=[0.0,0.4,0.8],ylim=(0,10)) -plt.plot(mu_sensitivity[:,0],mu_sensitivity[:,4],'-k',linewidth=1) -plt.plot(0.15,fit,'.k',markersize=8) -plt.yticks([1,3,5,7,9]) -plt.text(0.05,8.5,'Unemployment',fontsize=11) -plt.text(0.22,7.25,r'benefit $\mu$',fontsize=11) -plt.ylabel('Lorenz distance',fontsize=11) - -plt.subplot(2,3,5,xticks=[0.02,0.07,0.12],ylim=(0,10)) -plt.plot(urate_sensitivity[:,0],urate_sensitivity[:,4],'-k',linewidth=1) -plt.plot(0.07,fit,'.k',markersize=8) -plt.yticks([1,3,5,7,9],[]) -plt.text(0.03,8.5,'Unemployment',fontsize=11) -plt.text(0.055,7.25,r'rate $\mho$',fontsize=11) - -''' -plt.subplot(2,3,6,xticks=[0.004,0.008,0.012],xlim=(0.003,0.0125),ylim=(0,10)) -plt.plot(mortality_sensitivity[:,0],mortality_sensitivity[:,4],'-k',linewidth=1) -plt.plot(0.00625,fit,'.k',markersize=8) -plt.yticks([1,3,5,7,9],[]) -plt.text(0.0037,0.5,r'Mortality rate $\mathsf{D}$',fontsize=11) -''' - -plt.subplot(2,3,6,xlim=(0.0,0.04),ylim=(0,10)) -plt.plot(g_sensitivity[:,0],g_sensitivity[:,4],'-k',linewidth=1) -plt.plot(0.01,fit,'.k',markersize=8) -plt.xticks([0.0,0.02,0.04],['0','0.02','0.04']) -plt.yticks([1,3,5,7,9],[]) -plt.text(0.008,8.5,r'Aggregate',fontsize=11) -plt.text(0.005,7.25,r'growth rate $g$',fontsize=11) - -plt.savefig(my_file_path + '/Figures/FitSensitivity.pdf') -plt.show() diff --git a/HARK/cstwMPC/MakeCSTWfigsForSlides.py b/HARK/cstwMPC/MakeCSTWfigsForSlides.py deleted file mode 100644 index 6ff1e6071..000000000 --- a/HARK/cstwMPC/MakeCSTWfigsForSlides.py +++ /dev/null @@ -1,138 +0,0 @@ -''' -This module / script makes some fairly simple figures used in a version of the slides. -All Booleans at the top of SetupParamsCSTW should be set to False, as this module -imports cstwMPC; there's no need to actually do anything but load the model. -''' -from __future__ import division, print_function -from __future__ import absolute_import - -from builtins import str -from builtins import range -from .cstwMPC import * -import matplotlib.pyplot as plt -import os - -# Save the current file's directory location for writing output: -my_file_path = os.path.dirname(os.path.abspath(__file__)) - - -plot_range = (0.0,30.0) -points = 200 -m = np.linspace(plot_range[0],plot_range[1],points) -InfiniteType(a_size=16) -InfiniteType.update() - -thorn = 1.0025*0.99325/(1.01) -mTargFunc = lambda x : (1 - thorn)*x + thorn - -mystr = lambda number : "{:.3f}".format(number) -mystrx = lambda number : "{:.0f}".format(number) - -def epaKernel(X): - K = 0.75*(1.0 - X**2.0) - K[np.abs(X) > 1] = 0 - return K - -def doCforBetaEquals(beta,m): - InfiniteType(beta=beta); - InfiniteType.solve(); - InfiniteType.unpack_cFunc() - - c = InfiniteType.cFunc[0](m) - InfiniteType.beta = Params.beta_guess - - InfiniteType.simulateCSTWc() - m_hist = InfiniteType.m_history - m_temp = np.reshape(m_hist[100:Params.sim_periods,:],((Params.sim_periods-100)*Params.sim_pop_size,1)) - n = m_temp.size - h = m[2] - m[0] - m_dist = np.zeros(m.shape) + np.nan - for j in range(m.size): - x = (m_temp - m[j])/h - m_dist[j] = np.sum(epaKernel(x))/(n*h) - - print('did beta= ' + str(beta)) - return c, m_dist - -c_array = np.zeros((17,points)) + np.nan -pdf_array = np.zeros((17,points)) + np.nan -for b in range(17): - beta = 0.978 + b*0.001 - c_array[b,], pdf_array[b,] = doCforBetaEquals(beta,m) - -for b in range(17): - beta = 0.978 + b*0.001 - highest = np.max(pdf_array[b,]) - scale = 1.5/highest - scale = 4.0 - plt.ylim(0,2.5) - plt.plot(m,scale*pdf_array[b,],'-c') - plt.fill_between(m,np.zeros(m.shape),scale*pdf_array[b,],facecolor='c',alpha=0.5) - plt.plot(m,mTargFunc(m),'-r') - plt.plot(m,c_array[b,],'-k',linewidth=1.5) - plt.text(10,2.2,r'$\beta=$' + str(beta),fontsize=20) - plt.xlabel(r'Cash on hand $m_t$',fontsize=14) - plt.ylabel(r'Consumption $c_t$',fontsize=14) - plt.savefig(my_file_path + '/Figures/mDistBeta0' + mystrx(1000*beta) + '.pdf') - plt.show() - - -plt.plot(m,c_array[12,],'-k',linewidth=1.5) -plt.ylim(0,1.25) -plt.xlim(0,15) -plt.xlabel(r'Cash on hand $m_t$',fontsize=14) -plt.ylabel(r'Consumption $c_t$',fontsize=14) -plt.savefig(my_file_path + '/Figures/ConFunc.pdf') -plt.plot(m,mTargFunc(m),'-r') -plt.plot(np.array([9.95,9.95]),np.array([0,1.5]),'--k') -plt.savefig(my_file_path + '/Figures/mTargBase.pdf') -plt.fill_between(m,np.zeros(m.shape),scale*2*pdf_array[12,],facecolor='c',alpha=0.5) -plt.savefig(my_file_path + '/Figures/mDistBase.pdf') -plt.show() - -InfiniteType(beta=0.99); -InfiniteType.solve(); -InfiniteType.unpack_cFunc() -m_new = np.linspace(0,15,points) -kappa_vec = InfiniteType.cFunc[0].derivative(m_new) - -plt.plot(m_new,kappa_vec,'-k',linewidth=1.5) -plt.xlim(0,15) -plt.ylim(0,1.02) -plt.xlabel(r'Cash on hand $m_t$',fontsize=14) -plt.ylabel(r'Marginal consumption $\kappa_t$',fontsize=14) -plt.savefig(my_file_path + '/Figures/kappaFuncBase.pdf') -plt.plot(np.array([9.95,9.95]),np.array([0,1.5]),'--k') -plt.fill_between(m,np.zeros(m.shape),scale*2*pdf_array[12,],facecolor='c',alpha=0.5) -plt.savefig(my_file_path + '/Figures/mDistVsKappa.pdf') -plt.show() - -plt.plot(m,mTargFunc(m),'-r') -plt.ylim(0,2.5) -plt.xlim(0,30) -for b in range(17): - plt.plot(m,c_array[b,],'-k',linewidth=1.5) - #idx = np.sum(c_array[b,] - mTargFunc(m) < 0) - #mTarg = m[idx] - #plt.plot(np.array([mTarg,mTarg]),np.array([0,2.5]),'--k') -plt.plot(m,mTargFunc(m),'-r') -plt.xlabel(r'Cash on hand $m_t$',fontsize=14) -plt.ylabel(r'Consumption $c_t$',fontsize=14) -plt.savefig(my_file_path + '/Figures/ManycFuncs.pdf') -plt.show() - -InfiniteType(beta=0.98); -InfiniteType.solve(); -InfiniteType.unpack_cFunc() -m_new = np.linspace(0,15,points) -kappa_vec = InfiniteType.cFunc[0].derivative(m_new) - -plt.plot(m_new,kappa_vec,'-k',linewidth=1.5) -plt.xlim(0,15) -plt.ylim(0,1.02) -plt.xlabel(r'Cash on hand $m_t$',fontsize=14) -plt.ylabel(r'Marginal consumption $\kappa_t$',fontsize=14) -plt.savefig(my_file_path + '/Figures/kappaFuncLowBeta.pdf') -plt.fill_between(m,np.zeros(m.shape),scale*0.33*pdf_array[2,],facecolor='c',alpha=0.5) -plt.savefig(my_file_path + '/Figures/mDistVsKappaLowBeta.pdf') -plt.show() diff --git a/HARK/cstwMPC/Results/BetaDistPYResults.txt b/HARK/cstwMPC/Results/BetaDistPYResults.txt deleted file mode 100644 index a1149e5b0..000000000 --- a/HARK/cstwMPC/Results/BetaDistPYResults.txt +++ /dev/null @@ -1,26 +0,0 @@ -Estimate is center=0.9891395234236353, spread=0.0 -Lorenz distance is 42.691225404510476 -Average MPC for all consumers is 0.099 -Average MPC in the top percentile of W/Y is 0.068 -Average MPC in the top decile of W/Y is 0.071 -Average MPC in the top quintile of W/Y is 0.072 -Average MPC in the second quintile of W/Y is 0.074 -Average MPC in the middle quintile of W/Y is 0.074 -Average MPC in the fourth quintile of W/Y is 0.076 -Average MPC in the bottom quintile of W/Y is 0.199 -Average MPC in the top percentile of y is 0.075 -Average MPC in the top decile of y is 0.078 -Average MPC in the top quintile of y is 0.084 -Average MPC in the second quintile of y is 0.114 -Average MPC in the middle quintile of y is 0.123 -Average MPC in the fourth quintile of y is 0.082 -Average MPC in the bottom quintile of y is 0.093 -Average MPC for the employed is 0.097 -Average MPC for the unemployed is 0.131 -Average MPC for the retired is nan -Of the population with the 1/3 highest MPCs... -61.057% are in the bottom wealth quintile, -28.655% are in the second wealth quintile, -7.285% are in the third wealth quintile, -2.207% are in the fourth wealth quintile, -and 0.796% are in the top wealth quintile. diff --git a/HARK/cstwMPC/Results/BetaDistPYResults_py3.txt b/HARK/cstwMPC/Results/BetaDistPYResults_py3.txt deleted file mode 100644 index a1149e5b0..000000000 --- a/HARK/cstwMPC/Results/BetaDistPYResults_py3.txt +++ /dev/null @@ -1,26 +0,0 @@ -Estimate is center=0.9891395234236353, spread=0.0 -Lorenz distance is 42.691225404510476 -Average MPC for all consumers is 0.099 -Average MPC in the top percentile of W/Y is 0.068 -Average MPC in the top decile of W/Y is 0.071 -Average MPC in the top quintile of W/Y is 0.072 -Average MPC in the second quintile of W/Y is 0.074 -Average MPC in the middle quintile of W/Y is 0.074 -Average MPC in the fourth quintile of W/Y is 0.076 -Average MPC in the bottom quintile of W/Y is 0.199 -Average MPC in the top percentile of y is 0.075 -Average MPC in the top decile of y is 0.078 -Average MPC in the top quintile of y is 0.084 -Average MPC in the second quintile of y is 0.114 -Average MPC in the middle quintile of y is 0.123 -Average MPC in the fourth quintile of y is 0.082 -Average MPC in the bottom quintile of y is 0.093 -Average MPC for the employed is 0.097 -Average MPC for the unemployed is 0.131 -Average MPC for the retired is nan -Of the population with the 1/3 highest MPCs... -61.057% are in the bottom wealth quintile, -28.655% are in the second wealth quintile, -7.285% are in the third wealth quintile, -2.207% are in the fourth wealth quintile, -and 0.796% are in the top wealth quintile. diff --git a/HARK/cstwMPC/SetupParamsCSTW.py b/HARK/cstwMPC/SetupParamsCSTW.py deleted file mode 100644 index c97378ebe..000000000 --- a/HARK/cstwMPC/SetupParamsCSTW.py +++ /dev/null @@ -1,303 +0,0 @@ -''' -Loads parameters used in the cstwMPC estimations. -''' - -from __future__ import division, print_function - -from builtins import range -import numpy as np -import csv -from copy import deepcopy -import os - -# Choose percentiles of the data to match and which estimation to run -spec_name = 'BetaDistPY' -param_name = 'DiscFac' # Which parameter to introduce heterogeneity in -dist_type = 'uniform' # Which type of distribution to use -do_lifecycle = False # Use lifecycle model if True, perpetual youth if False -do_param_dist = False # Do param-dist version if True, param-point if False -run_estimation = True # Runs the estimation if True -find_beta_vs_KY = False # Computes K/Y ratio for a wide range of beta; should have do_beta_dist = False -do_sensitivity = [False, False, False, False, False, False, False, False] # Choose which sensitivity analyses to run: rho, xi_sigma, psi_sigma, mu, urate, mortality, g, R -do_liquid = False # Matches liquid assets data when True, net worth data when False -do_tractable = False # Uses a "tractable consumer" rather than solving full model when True -do_agg_shocks = False # Solve the FBS aggregate shocks version of the model -SCF_data_file = 'SCFwealthDataReduced.txt' -percentiles_to_match = [0.2, 0.4, 0.6, 0.8] # Which points of the Lorenz curve to match in beta-dist (must be in (0,1)) -#percentiles_to_match = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] # Can use this line if you want to match more percentiles -if do_param_dist: - pref_type_count = 7 # Number of discrete beta types in beta-dist -else: - pref_type_count = 1 # Just one beta type in beta-point - -# Set basic parameters for the lifecycle micro model -init_age = 24 # Starting age for agents -Rfree = 1.04**(0.25) # Quarterly interest factor -working_T = 41*4 # Number of working periods -retired_T = 55*4 # Number of retired periods -T_cycle = working_T+retired_T # Total number of periods -CRRA = 1.0 # Coefficient of relative risk aversion -DiscFac_guess = 0.99 # Initial starting point for discount factor -UnempPrb = 0.07 # Probability of unemployment while working -UnempPrbRet = 0.0005 # Probabulity of "unemployment" while retired -IncUnemp = 0.15 # Unemployment benefit replacement rate -IncUnempRet = 0.0 # Ditto when retired -BoroCnstArt = 0.0 # Artificial borrowing constraint - -# Set grid sizes -PermShkCount = 5 # Number of points in permanent income shock grid -TranShkCount = 5 # Number of points in transitory income shock grid -aXtraMin = 0.00001 # Minimum end-of-period assets in grid -aXtraMax = 20 # Maximum end-of-period assets in grid -aXtraCount = 20 # Number of points in assets grid -aXtraNestFac = 3 # Number of times to 'exponentially nest' when constructing assets grid -CubicBool = False # Whether to use cubic spline interpolation -vFuncBool = False # Whether to calculate the value function during solution - -# Set simulation parameters -if do_param_dist: - if do_agg_shocks: - Population = 16800 - else: - Population = 14000 -else: - if do_agg_shocks: - Population = 9600 - else: - Population = 10000 # Total number of simulated agents in the population -T_sim_PY = 1200 # Number of periods to simulate (idiosyncratic shocks model, perpetual youth) -T_sim_LC = 1200 # Number of periods to simulate (idiosyncratic shocks model, lifecycle) -T_sim_agg_shocks = 1200 # Number of periods to simulate (aggregate shocks model) -ignore_periods_PY = 400 # Number of periods to throw out when looking at history (perpetual youth) -ignore_periods_LC = 400 # Number of periods to throw out when looking at history (lifecycle) -T_age = T_cycle + 1 # Don't let simulated agents survive beyond this age -pLvlInitMean_d = np.log(5) # average initial permanent income, dropouts -pLvlInitMean_h = np.log(7.5) # average initial permanent income, HS grads -pLvlInitMean_c = np.log(12) # average initial permanent income, college grads -pLvlInitStd = 0.4 # Standard deviation of initial permanent income -aNrmInitMean = np.log(0.5) # log initial wealth/income mean -aNrmInitStd = 0.5 # log initial wealth/income standard deviation - -# Set population macro parameters -PopGroFac = 1.01**(0.25) # Population growth rate -PermGroFacAgg = 1.015**(0.25) # TFP growth rate -d_pct = 0.11 # proportion of HS dropouts -h_pct = 0.55 # proportion of HS graduates -c_pct = 0.34 # proportion of college graduates -TypeWeight_lifecycle = [d_pct,h_pct,c_pct] - -# Set indiividual parameters for the infinite horizon model -IndL = 10.0/9.0 # Labor supply per individual (constant) -PermGroFac_i = [1.000**0.25] # Permanent income growth factor (no perm growth) -DiscFac_i = 0.97 # Default intertemporal discount factor -LivPrb_i = [1.0 - 1.0/160.0] # Survival probability -PermShkStd_i = [(0.01*4/11)**0.5] # Standard deviation of permanent shocks to income -TranShkStd_i = [(0.01*4)**0.5] # Standard deviation of transitory shocks to income - -# Define the paths of permanent and transitory shocks (from Sabelhaus and Song) -TranShkStd = (np.concatenate((np.linspace(0.1,0.12,17), 0.12*np.ones(17), np.linspace(0.12,0.075,61), np.linspace(0.074,0.007,68), np.zeros(retired_T+1)))*4)**0.5 -TranShkStd = np.ndarray.tolist(TranShkStd) -PermShkStd = np.concatenate((((0.00011342*(np.linspace(24,64.75,working_T-1)-47)**2 + 0.01)/(11.0/4.0))**0.5,np.zeros(retired_T+1))) -PermShkStd = np.ndarray.tolist(PermShkStd) - -# Set aggregate parameters for the infinite horizon model -PermShkAggCount = 3 # Number of discrete permanent aggregate shocks -TranShkAggCount = 3 # Number of discrete transitory aggregate shocks -PermShkAggStd = np.sqrt(0.00004) # Standard deviation of permanent aggregate shocks -TranShkAggStd = np.sqrt(0.00001) # Standard deviation of transitory aggregate shocks -CapShare = 0.36 # Capital's share of output -DeprFac = 0.025 # Capital depreciation factor -CRRAPF = 1.0 # CRRA in perfect foresight calibration -DiscFacPF = 0.99 # Discount factor in perfect foresight calibration -slope_prev = 1.0 # Initial slope of kNextFunc (aggregate shocks model) -intercept_prev = 0.0 # Initial intercept of kNextFunc (aggregate shocks model) - -# Import survival probabilities from SSA data -data_location = os.path.dirname(os.path.abspath(__file__)) -f = open(data_location + '/' + 'USactuarial.txt','r') -actuarial_reader = csv.reader(f,delimiter='\t') -raw_actuarial = list(actuarial_reader) -base_death_probs = [] -for j in range(len(raw_actuarial)): - base_death_probs += [float(raw_actuarial[j][4])] # This effectively assumes that everyone is a white woman -f.close - -# Import adjustments for education and apply them to the base mortality rates -f = open(data_location + '/' + 'EducMortAdj.txt','r') -adjustment_reader = csv.reader(f,delimiter=' ') -raw_adjustments = list(adjustment_reader) -d_death_probs = [] -h_death_probs = [] -c_death_probs = [] -for j in range(76): - d_death_probs += [base_death_probs[j + init_age]*float(raw_adjustments[j][1])] - h_death_probs += [base_death_probs[j + init_age]*float(raw_adjustments[j][2])] - c_death_probs += [base_death_probs[j + init_age]*float(raw_adjustments[j][3])] -for j in range(76,96): - d_death_probs += [base_death_probs[j + init_age]*float(raw_adjustments[75][1])] - h_death_probs += [base_death_probs[j + init_age]*float(raw_adjustments[75][2])] - c_death_probs += [base_death_probs[j + init_age]*float(raw_adjustments[75][3])] -LivPrb_d = [] -LivPrb_h = [] -LivPrb_c = [] -for j in range(len(d_death_probs)): # Convert annual mortality rates to quarterly survival rates - LivPrb_d += 4*[(1 - d_death_probs[j])**0.25] - LivPrb_h += 4*[(1 - h_death_probs[j])**0.25] - LivPrb_c += 4*[(1 - c_death_probs[j])**0.25] - -# Define permanent income growth rates for each education level (from Cagetti 2003) -PermGroFac_d_base = [5.2522391e-002, 5.0039782e-002, 4.7586132e-002, 4.5162424e-002, 4.2769638e-002, 4.0408757e-002, 3.8080763e-002, 3.5786635e-002, 3.3527358e-002, 3.1303911e-002, 2.9117277e-002, 2.6968437e-002, 2.4858374e-002, 2.2788068e-002, 2.0758501e-002, 1.8770655e-002, 1.6825511e-002, 1.4924052e-002, 1.3067258e-002, 1.1256112e-002, 9.4915947e-003, 7.7746883e-003, 6.1063742e-003, 4.4876340e-003, 2.9194495e-003, 1.4028022e-003, -6.1326258e-005, -1.4719542e-003, -2.8280999e-003, -4.1287819e-003, -5.3730185e-003, -6.5598280e-003, -7.6882288e-003, -8.7572392e-003, -9.7658777e-003, -1.0713163e-002, -1.1598112e-002, -1.2419745e-002, -1.3177079e-002, -1.3869133e-002, -4.3985368e-001, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003, -8.5623256e-003] -PermGroFac_h_base = [4.1102173e-002, 4.1194381e-002, 4.1117402e-002, 4.0878307e-002, 4.0484168e-002, 3.9942056e-002, 3.9259042e-002, 3.8442198e-002, 3.7498596e-002, 3.6435308e-002, 3.5259403e-002, 3.3977955e-002, 3.2598035e-002, 3.1126713e-002, 2.9571062e-002, 2.7938153e-002, 2.6235058e-002, 2.4468848e-002, 2.2646594e-002, 2.0775369e-002, 1.8862243e-002, 1.6914288e-002, 1.4938576e-002, 1.2942178e-002, 1.0932165e-002, 8.9156095e-003, 6.8995825e-003, 4.8911556e-003, 2.8974003e-003, 9.2538802e-004, -1.0178097e-003, -2.9251214e-003, -4.7894755e-003, -6.6038005e-003, -8.3610250e-003, -1.0054077e-002, -1.1675886e-002, -1.3219380e-002, -1.4677487e-002, -1.6043137e-002, -5.5864350e-001, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002, -1.0820465e-002] -PermGroFac_c_base = [3.9375106e-002, 3.9030288e-002, 3.8601230e-002, 3.8091011e-002, 3.7502710e-002, 3.6839406e-002, 3.6104179e-002, 3.5300107e-002, 3.4430270e-002, 3.3497746e-002, 3.2505614e-002, 3.1456953e-002, 3.0354843e-002, 2.9202363e-002, 2.8002591e-002, 2.6758606e-002, 2.5473489e-002, 2.4150316e-002, 2.2792168e-002, 2.1402124e-002, 1.9983263e-002, 1.8538663e-002, 1.7071404e-002, 1.5584565e-002, 1.4081224e-002, 1.2564462e-002, 1.1037356e-002, 9.5029859e-003, 7.9644308e-003, 6.4247695e-003, 4.8870812e-003, 3.3544449e-003, 1.8299396e-003, 3.1664424e-004, -1.1823620e-003, -2.6640003e-003, -4.1251914e-003, -5.5628564e-003, -6.9739162e-003, -8.3552918e-003, -6.8938447e-001, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004, -6.1023256e-004] -PermGroFac_d_base += 31*[PermGroFac_d_base[-1]] # Add 31 years of the same permanent income growth rate to the end of the sequence -PermGroFac_h_base += 31*[PermGroFac_h_base[-1]] -PermGroFac_c_base += 31*[PermGroFac_c_base[-1]] -PermGroFac_d_retire = PermGroFac_d_base[40] # Store the big shock to permanent income at retirement -PermGroFac_h_retire = PermGroFac_h_base[40] -PermGroFac_c_retire = PermGroFac_c_base[40] -PermGroFac_d_base[40] = PermGroFac_d_base[39] # Overwrite the "retirement drop" with the adjacent growth rate -PermGroFac_h_base[40] = PermGroFac_h_base[39] -PermGroFac_c_base[40] = PermGroFac_c_base[39] -PermGroFac_d = [] -PermGroFac_h = [] -PermGroFac_c = [] -for j in range(len(PermGroFac_d_base)): # Make sequences of quarterly permanent income growth factors from annual permanent income growth rates - PermGroFac_d += 4*[(1 + PermGroFac_d_base[j])**0.25] - PermGroFac_h += 4*[(1 + PermGroFac_h_base[j])**0.25] - PermGroFac_c += 4*[(1 + PermGroFac_c_base[j])**0.25] -PermGroFac_d[working_T-1] = 1 + PermGroFac_d_retire # Put the big shock at retirement back into the sequence -PermGroFac_h[working_T-1] = 1 + PermGroFac_h_retire -PermGroFac_c[working_T-1] = 1 + PermGroFac_c_retire - -# Import the SCF wealth data -f = open(data_location + '/' + SCF_data_file,'r') -SCF_reader = csv.reader(f,delimiter='\t') -SCF_raw = list(SCF_reader) -SCF_wealth = np.zeros(len(SCF_raw)) + np.nan -SCF_weights = deepcopy(SCF_wealth) -for j in range(len(SCF_raw)): - SCF_wealth[j] = float(SCF_raw[j][0]) - SCF_weights[j] = float(SCF_raw[j][1]) - - -# Make dictionaries for lifecycle consumer types -init_dropout = {"CRRA":CRRA, - "Rfree":Rfree, - "PermGroFac":PermGroFac_d, - "PermGroFacAgg":PermGroFacAgg, - "BoroCnstArt":BoroCnstArt, - "CubicBool":CubicBool, - "vFuncBool":vFuncBool, - "PermShkStd":PermShkStd, - "PermShkCount":PermShkCount, - "TranShkStd":TranShkStd, - "TranShkCount":TranShkCount, - "T_cycle":T_cycle, - "UnempPrb":UnempPrb, - "UnempPrbRet":UnempPrbRet, - "T_retire":working_T-1, - "IncUnemp":IncUnemp, - "IncUnempRet":IncUnempRet, - "aXtraMin":aXtraMin, - "aXtraMax":aXtraMax, - "aXtraCount":aXtraCount, - "aXtraExtra":[], - "aXtraNestFac":aXtraNestFac, - "LivPrb":LivPrb_d, - "DiscFac":DiscFac_guess, # dummy value, will be overwritten - 'AgentCount': 0, # this is overwritten by parameter distributor - 'T_sim':T_sim_LC, - 'T_age':T_age, - 'aNrmInitMean':aNrmInitMean, - 'aNrmInitStd':aNrmInitStd, - 'pLvlInitMean':pLvlInitMean_d, - 'pLvlInitStd':pLvlInitStd - } -adj_highschool = {"PermGroFac":PermGroFac_h,"LivPrb":LivPrb_h,'pLvlInitMean':pLvlInitMean_h} -adj_college = {"PermGroFac":PermGroFac_c,"LivPrb":LivPrb_c,'pLvlInitMean':pLvlInitMean_c} - -# Make a dictionary for the infinite horizon type -init_infinite = {"CRRA":CRRA, - "Rfree":1.01/LivPrb_i[0], - "PermGroFac":PermGroFac_i, - "PermGroFacAgg":1.0, - "BoroCnstArt":BoroCnstArt, - "CubicBool":CubicBool, - "vFuncBool":vFuncBool, - "PermShkStd":PermShkStd_i, - "PermShkCount":PermShkCount, - "TranShkStd":TranShkStd_i, - "TranShkCount":TranShkCount, - "UnempPrb":UnempPrb, - "IncUnemp":IncUnemp, - "UnempPrbRet":None, - "IncUnempRet":None, - "aXtraMin":aXtraMin, - "aXtraMax":aXtraMax, - "aXtraCount":aXtraCount, - "aXtraExtra":[None], - "aXtraNestFac":aXtraNestFac, - "LivPrb":LivPrb_i, - "DiscFac":DiscFac_i, # dummy value, will be overwritten - "cycles":0, - "T_cycle":1, - "T_retire":0, - 'T_sim':T_sim_PY, - 'T_age': 400, - 'IndL': IndL, - 'aNrmInitMean':np.log(0.00001), - 'aNrmInitStd':0.0, - 'pLvlInitMean':0.0, - 'pLvlInitStd':0.0, - 'AgentCount':0, # will be overwritten by parameter distributor - } - -# Make a base dictionary for the cstwMPCmarket -init_market = {'LorenzBool': False, - 'ManyStatsBool': False, - 'ignore_periods':0, # Will get overwritten - 'PopGroFac':0.0, # Will get overwritten - 'T_retire':0, # Will get overwritten - 'TypeWeights':[], # Will get overwritten - 'Population':Population, - 'act_T':0, # Will get overwritten - 'IncUnemp':IncUnemp, - 'cutoffs':[(0.99,1),(0.9,1),(0.8,1),(0.6,0.8),(0.4,0.6),(0.2,0.4),(0.0,0.2)], - 'LorenzPercentiles':percentiles_to_match, - 'AggShockBool':do_agg_shocks - } - - -# Make a dictionary for the aggregate shocks type -init_agg_shocks = deepcopy(init_infinite) -init_agg_shocks['T_sim'] = T_sim_agg_shocks -init_agg_shocks['tolerance'] = 0.0001 -init_agg_shocks['MgridBase'] = np.array([0.1,0.3,0.6,0.8,0.9,0.98,1.0,1.02,1.1,1.2,1.6,2.0,3.0]) - -# Make a dictionary for the aggrege shocks market -aggregate_params = {'PermShkAggCount': PermShkAggCount, - 'TranShkAggCount': TranShkAggCount, - 'PermShkAggStd': PermShkAggStd, - 'TranShkAggStd': TranShkAggStd, - 'DeprFac': DeprFac, - 'CapShare': CapShare, - 'AggregateL':(1.0-UnempPrb)*IndL, - 'CRRA': CRRAPF, - 'DiscFac': DiscFacPF, - 'LivPrb': LivPrb_i[0], - 'slope_prev': slope_prev, - 'intercept_prev': intercept_prev, - 'act_T':T_sim_agg_shocks, - 'ignore_periods':200, - 'tolerance':0.0001 - } - -def main(): - print("Sorry, SetupParamsCSTWnew doesn't actually do anything on its own.") - print("This module is imported by cstwMPCnew, providing data and calibrated") - print("parameters for the various estimations. Please see that module if") - print("you want more interesting output.") - -if __name__ == '__main__': - main() - diff --git a/HARK/cstwMPC/__init__.py b/HARK/cstwMPC/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/HARK/cstwMPC/cstwMPC.py b/HARK/cstwMPC/cstwMPC.py deleted file mode 100644 index d975ae9c3..000000000 --- a/HARK/cstwMPC/cstwMPC.py +++ /dev/null @@ -1,624 +0,0 @@ -''' -A second stab / complete do-over of cstwMPC. Steals some bits from old version. -''' -from __future__ import division, print_function -from __future__ import absolute_import - -from builtins import str -from builtins import range - -import os - -import numpy as np -from copy import copy, deepcopy -from time import time -from HARK.utilities import approxMeanOneLognormal, combineIndepDstns, approxUniform, \ - getPercentiles, getLorenzShares, calcSubpopAvg, approxLognormal -from HARK.simulation import drawDiscrete -from HARK import Market -import HARK.cstwMPC.SetupParamsCSTW as Params -import HARK.ConsumptionSaving.ConsIndShockModel as Model -from HARK.ConsumptionSaving.ConsAggShockModel import CobbDouglasEconomy, AggShockConsumerType -from scipy.optimize import golden, brentq -import matplotlib.pyplot as plt - -mystr = lambda number : "{:.3f}".format(number) - -if Params.do_agg_shocks: - EstimationAgentClass = AggShockConsumerType - EstimationMarketClass = CobbDouglasEconomy -else: - EstimationAgentClass = Model.IndShockConsumerType - EstimationMarketClass = Market - -class cstwMPCagent(EstimationAgentClass): - ''' - A slight extension of the idiosyncratic consumer type for the cstwMPC model. - ''' - def reset(self): - self.initializeSim() - self.t_age = drawDiscrete(self.AgentCount,P=self.AgeDstn,X=np.arange(self.AgeDstn.size),seed=self.RNG.randint(0,2**31-1)).astype(int) - self.t_cycle = copy(self.t_age) - if hasattr(self,'kGrid'): - self.aLvlNow = self.kInit*np.ones(self.AgentCount) # Start simulation near SS - self.aNrmNow = self.aLvlNow/self.pLvlNow - - def marketAction(self): - if hasattr(self,'kGrid'): - self.pLvl = self.pLvlNow/np.mean(self.pLvlNow) - self.simulate(1) - - def updateIncomeProcess(self): - ''' - An alternative method for constructing the income process in the infinite horizon model. - - Parameters - ---------- - none - - Returns - ------- - none - ''' - if self.cycles == 0: - tax_rate = (self.IncUnemp*self.UnempPrb)/((1.0-self.UnempPrb)*self.IndL) - TranShkDstn = deepcopy(approxMeanOneLognormal(self.TranShkCount,sigma=self.TranShkStd[0],tail_N=0)) - TranShkDstn[0] = np.insert(TranShkDstn[0]*(1.0-self.UnempPrb),0,self.UnempPrb) - TranShkDstn[1] = np.insert(TranShkDstn[1]*(1.0-tax_rate)*self.IndL,0,self.IncUnemp) - PermShkDstn = approxMeanOneLognormal(self.PermShkCount,sigma=self.PermShkStd[0],tail_N=0) - self.IncomeDstn = [combineIndepDstns(PermShkDstn,TranShkDstn)] - self.TranShkDstn = TranShkDstn - self.PermShkDstn = PermShkDstn - self.addToTimeVary('IncomeDstn') - else: # Do the usual method if this is the lifecycle model - EstimationAgentClass.updateIncomeProcess(self) - -class cstwMPCmarket(EstimationMarketClass): - ''' - A class for representing the economy in the cstwMPC model. - ''' - reap_vars = ['aLvlNow','pLvlNow','MPCnow','TranShkNow','EmpNow','t_age'] - sow_vars = [] # Nothing needs to be sent back to agents in the idiosyncratic shocks version - const_vars = ['LorenzBool','ManyStatsBool'] - track_vars = ['MaggNow','AaggNow','KtoYnow','Lorenz','LorenzLong','MPCall','MPCretired','MPCemployed','MPCunemployed','MPCbyIncome','MPCbyWealthRatio','HandToMouthPct'] - dyn_vars = [] # No dynamics in the idiosyncratic shocks version - - def __init__(self,**kwds): - ''' - Make a new instance of cstwMPCmarket. - ''' - super().__init__(sow_vars=self.sow_vars, reap_vars=self.reap_vars, - const_vars=self.const_vars, track_vars=self.track_vars, - dyn_vars=self.dyn_vars) - self.assignParameters(**kwds) - if self.AggShockBool: - self.sow_vars=['MaggNow','AaggNow','RfreeNow','wRteNow','PermShkAggNow','TranShkAggNow','KtoLnow'] - self.dyn_vars=['AFunc'] - self.max_loops = 20 - - # Save the current file's directory location for writing output: - self.my_file_path = os.path.dirname(os.path.abspath(__file__)) - - - def solve(self): - ''' - Solves the cstwMPCmarket. - ''' - if self.AggShockBool: - for agent in self.agents: - agent.getEconomyData(self) - Market.solve(self) - else: - self.solveAgents() - self.makeHistory() - - def millRule(self,aLvlNow,pLvlNow,MPCnow,TranShkNow,EmpNow,t_age,LorenzBool,ManyStatsBool): - ''' - The millRule for this class simply calls the method calcStats. - ''' - self.calcStats(aLvlNow,pLvlNow,MPCnow,TranShkNow,EmpNow,t_age,LorenzBool,ManyStatsBool) - if self.AggShockBool: - return self.calcRandW(aLvlNow,pLvlNow) - else: # These variables are tracked but not created in no-agg-shocks specifications - self.MaggNow = 0.0 - self.AaggNow = 0.0 - - def calcStats(self,aLvlNow,pLvlNow,MPCnow,TranShkNow,EmpNow,t_age,LorenzBool,ManyStatsBool): - ''' - Calculate various statistics about the current population in the economy. - - Parameters - ---------- - aLvlNow : [np.array] - Arrays with end-of-period assets, listed by each ConsumerType in self.agents. - pLvlNow : [np.array] - Arrays with permanent income levels, listed by each ConsumerType in self.agents. - MPCnow : [np.array] - Arrays with marginal propensity to consume, listed by each ConsumerType in self.agents. - TranShkNow : [np.array] - Arrays with transitory income shocks, listed by each ConsumerType in self.agents. - EmpNow : [np.array] - Arrays with employment states: True if employed, False otherwise. - t_age : [np.array] - Arrays with periods elapsed since model entry, listed by each ConsumerType in self.agents. - LorenzBool: bool - Indicator for whether the Lorenz target points should be calculated. Usually False, - only True when DiscFac has been identified for a particular nabla. - ManyStatsBool: bool - Indicator for whether a lot of statistics for tables should be calculated. Usually False, - only True when parameters have been estimated and we want values for tables. - - Returns - ------- - None - ''' - # Combine inputs into single arrays - aLvl = np.hstack(aLvlNow) - pLvl = np.hstack(pLvlNow) - age = np.hstack(t_age) - TranShk = np.hstack(TranShkNow) - Emp = np.hstack(EmpNow) - - # Calculate the capital to income ratio in the economy - CohortWeight = self.PopGroFac**(-age) - CapAgg = np.sum(aLvl*CohortWeight) - IncAgg = np.sum(pLvl*TranShk*CohortWeight) - KtoYnow = CapAgg/IncAgg - self.KtoYnow = KtoYnow - - # Store Lorenz data if requested - self.LorenzLong = np.nan - if LorenzBool: - order = np.argsort(aLvl) - aLvl = aLvl[order] - CohortWeight = CohortWeight[order] - wealth_shares = getLorenzShares(aLvl,weights=CohortWeight,percentiles=self.LorenzPercentiles,presorted=True) - self.Lorenz = wealth_shares - if ManyStatsBool: - self.LorenzLong = getLorenzShares(aLvl,weights=CohortWeight,percentiles=np.arange(0.01,1.0,0.01),presorted=True) - else: - self.Lorenz = np.nan # Store nothing if we don't want Lorenz data - - # Calculate a whole bunch of statistics if requested - if ManyStatsBool: - # Reshape other inputs - MPC = np.hstack(MPCnow) - - # Sort other data items if aLvl and CohortWeight were sorted - if LorenzBool: - pLvl = pLvl[order] - MPC = MPC[order] - TranShk = TranShk[order] - age = age[order] - Emp = Emp[order] - aNrm = aLvl/pLvl # Normalized assets (wealth ratio) - IncLvl = TranShk*pLvl # Labor income this period - - # Calculate overall population MPC and by subpopulations - MPCannual = 1.0 - (1.0 - MPC)**4 - self.MPCall = np.sum(MPCannual*CohortWeight)/np.sum(CohortWeight) - employed = Emp - unemployed = np.logical_not(employed) - if self.T_retire > 0: # Adjust for the lifecycle model, where agents might be retired instead - unemployed = np.logical_and(unemployed,age < self.T_retire) - employed = np.logical_and(employed,age < self.T_retire) - retired = age >= self.T_retire - else: - retired = np.zeros_like(unemployed,dtype=bool) - self.MPCunemployed = np.sum(MPCannual[unemployed]*CohortWeight[unemployed])/np.sum(CohortWeight[unemployed]) - self.MPCemployed = np.sum(MPCannual[employed]*CohortWeight[employed])/np.sum(CohortWeight[employed]) - self.MPCretired = np.sum(MPCannual[retired]*CohortWeight[retired])/np.sum(CohortWeight[retired]) - self.MPCbyWealthRatio = calcSubpopAvg(MPCannual,aNrm,self.cutoffs,CohortWeight) - self.MPCbyIncome = calcSubpopAvg(MPCannual,IncLvl,self.cutoffs,CohortWeight) - - # Calculate the wealth quintile distribution of "hand to mouth" consumers - quintile_cuts = getPercentiles(aLvl,weights=CohortWeight,percentiles=[0.2, 0.4, 0.6, 0.8]) - wealth_quintiles = np.ones(aLvl.size,dtype=int) - wealth_quintiles[aLvl > quintile_cuts[0]] = 2 - wealth_quintiles[aLvl > quintile_cuts[1]] = 3 - wealth_quintiles[aLvl > quintile_cuts[2]] = 4 - wealth_quintiles[aLvl > quintile_cuts[3]] = 5 - MPC_cutoff = getPercentiles(MPCannual,weights=CohortWeight,percentiles=[2.0/3.0]) # Looking at consumers with MPCs in the top 1/3 - these = MPCannual > MPC_cutoff - in_top_third_MPC = wealth_quintiles[these] - temp_weights = CohortWeight[these] - hand_to_mouth_total = np.sum(temp_weights) - hand_to_mouth_pct = [] - for q in range(1,6): - hand_to_mouth_pct.append(np.sum(temp_weights[in_top_third_MPC == q])/hand_to_mouth_total) - self.HandToMouthPct = np.array(hand_to_mouth_pct) - - else: # If we don't want these stats, just put empty values in history - self.MPCall = np.nan - self.MPCunemployed = np.nan - self.MPCemployed = np.nan - self.MPCretired = np.nan - self.MPCbyWealthRatio = np.nan - self.MPCbyIncome = np.nan - self.HandToMouthPct = np.nan - - def distributeParams(self,param_name,param_count,center,spread,dist_type): - ''' - Distributes heterogeneous values of one parameter to the AgentTypes in self.agents. - - Parameters - ---------- - param_name : string - Name of the parameter to be assigned. - param_count : int - Number of different values the parameter will take on. - center : float - A measure of centrality for the distribution of the parameter. - spread : float - A measure of spread or diffusion for the distribution of the parameter. - dist_type : string - The type of distribution to be used. Can be "lognormal" or "uniform" (can expand). - - Returns - ------- - None - ''' - # Get a list of discrete values for the parameter - if dist_type == 'uniform': - # If uniform, center is middle of distribution, spread is distance to either edge - param_dist = approxUniform(N=param_count,bot=center-spread,top=center+spread) - elif dist_type == 'lognormal': - # If lognormal, center is the mean and spread is the standard deviation (in log) - tail_N = 3 - param_dist = approxLognormal(N=param_count-tail_N,mu=np.log(center)-0.5*spread**2,sigma=spread,tail_N=tail_N,tail_bound=[0.0,0.9], tail_order=np.e) - - # Distribute the parameters to the various types, assigning consecutive types the same - # value if there are more types than values - replication_factor = len(self.agents) // param_count - # Note: the double division is intenger division in Python 3 and 2.7, this makes it explicit - j = 0 - b = 0 - while j < len(self.agents): - for n in range(replication_factor): - self.agents[j](AgentCount = int(self.Population*param_dist[0][b]*self.TypeWeight[n])) - exec('self.agents[j](' + param_name + '= param_dist[1][b])') - j += 1 - b += 1 - - def calcKYratioDifference(self): - ''' - Returns the difference between the simulated capital to income ratio and the target ratio. - Can only be run after solving all AgentTypes and running makeHistory. - - Parameters - ---------- - None - - Returns - ------- - diff : float - Difference between simulated and target capital to income ratio. - ''' - # Ignore the first X periods to allow economy to stabilize from initial conditions - KYratioSim = np.mean(np.array(self.KtoYnow_hist)[self.ignore_periods:]) - diff = KYratioSim - self.KYratioTarget - return diff - - def calcLorenzDistance(self): - ''' - Returns the sum of squared differences between simulated and target Lorenz points. - - Parameters - ---------- - None - - Returns - ------- - dist : float - Sum of squared distances between simulated and target Lorenz points (sqrt) - ''' - LorenzSim = np.mean(np.array(self.Lorenz_hist)[self.ignore_periods:,:],axis=0) - dist = np.sqrt(np.sum((100*(LorenzSim - self.LorenzTarget))**2)) - self.LorenzDistance = dist - return dist - - def showManyStats(self,spec_name=None): - ''' - Calculates the "many statistics" by averaging histories across simulated periods. Displays - the results as text and saves them to files if spec_name is not None. - - Parameters - ---------- - spec_name : string - A name or label for the current specification. - - Returns - ------- - None - ''' - # Calculate MPC overall and by subpopulations - MPCall = np.mean(self.MPCall_hist[self.ignore_periods:]) - MPCemployed = np.mean(self.MPCemployed_hist[self.ignore_periods:]) - MPCunemployed = np.mean(self.MPCunemployed_hist[self.ignore_periods:]) - MPCretired = np.mean(self.MPCretired_hist[self.ignore_periods:]) - MPCbyIncome = np.mean(np.array(self.MPCbyIncome_hist)[self.ignore_periods:,:],axis=0) - MPCbyWealthRatio = np.mean(np.array(self.MPCbyWealthRatio_hist)[self.ignore_periods:,:],axis=0) - HandToMouthPct = np.mean(np.array(self.HandToMouthPct_hist)[self.ignore_periods:,:],axis=0) - - LorenzSim = np.hstack((np.array(0.0),np.mean(np.array(self.LorenzLong_hist)[self.ignore_periods:,:],axis=0),np.array(1.0))) - LorenzAxis = np.arange(101,dtype=float) - plt.plot(LorenzAxis,self.LorenzData,'-k',linewidth=1.5) - plt.plot(LorenzAxis,LorenzSim,'--k',linewidth=1.5) - plt.xlabel('Income percentile',fontsize=12) - plt.ylabel('Cumulative wealth share',fontsize=12) - plt.ylim([-0.02,1.0]) - plt.show() - - # Make a string of results to display - results_string = 'Estimate is center=' + str(self.center_estimate) + ', spread=' + str(self.spread_estimate) + '\n' - results_string += 'Lorenz distance is ' + str(self.LorenzDistance) + '\n' - results_string += 'Average MPC for all consumers is ' + mystr(MPCall) + '\n' - results_string += 'Average MPC in the top percentile of W/Y is ' + mystr(MPCbyWealthRatio[0]) + '\n' - results_string += 'Average MPC in the top decile of W/Y is ' + mystr(MPCbyWealthRatio[1]) + '\n' - results_string += 'Average MPC in the top quintile of W/Y is ' + mystr(MPCbyWealthRatio[2]) + '\n' - results_string += 'Average MPC in the second quintile of W/Y is ' + mystr(MPCbyWealthRatio[3]) + '\n' - results_string += 'Average MPC in the middle quintile of W/Y is ' + mystr(MPCbyWealthRatio[4]) + '\n' - results_string += 'Average MPC in the fourth quintile of W/Y is ' + mystr(MPCbyWealthRatio[5]) + '\n' - results_string += 'Average MPC in the bottom quintile of W/Y is ' + mystr(MPCbyWealthRatio[6]) + '\n' - results_string += 'Average MPC in the top percentile of y is ' + mystr(MPCbyIncome[0]) + '\n' - results_string += 'Average MPC in the top decile of y is ' + mystr(MPCbyIncome[1]) + '\n' - results_string += 'Average MPC in the top quintile of y is ' + mystr(MPCbyIncome[2]) + '\n' - results_string += 'Average MPC in the second quintile of y is ' + mystr(MPCbyIncome[3]) + '\n' - results_string += 'Average MPC in the middle quintile of y is ' + mystr(MPCbyIncome[4]) + '\n' - results_string += 'Average MPC in the fourth quintile of y is ' + mystr(MPCbyIncome[5]) + '\n' - results_string += 'Average MPC in the bottom quintile of y is ' + mystr(MPCbyIncome[6]) + '\n' - results_string += 'Average MPC for the employed is ' + mystr(MPCemployed) + '\n' - results_string += 'Average MPC for the unemployed is ' + mystr(MPCunemployed) + '\n' - results_string += 'Average MPC for the retired is ' + mystr(MPCretired) + '\n' - results_string += 'Of the population with the 1/3 highest MPCs...' + '\n' - results_string += mystr(HandToMouthPct[0]*100) + '% are in the bottom wealth quintile,' + '\n' - results_string += mystr(HandToMouthPct[1]*100) + '% are in the second wealth quintile,' + '\n' - results_string += mystr(HandToMouthPct[2]*100) + '% are in the third wealth quintile,' + '\n' - results_string += mystr(HandToMouthPct[3]*100) + '% are in the fourth wealth quintile,' + '\n' - results_string += 'and ' + mystr(HandToMouthPct[4]*100) + '% are in the top wealth quintile.' + '\n' - print(results_string) - - # Save results to disk - if spec_name is not None: - with open(self.my_file_path + '/Results/' + spec_name + 'Results.txt','w') as f: - f.write(results_string) - - -def getKYratioDifference(Economy,param_name,param_count,center,spread,dist_type): - ''' - Finds the difference between simulated and target capital to income ratio in an economy when - a given parameter has heterogeneity according to some distribution. - - Parameters - ---------- - Economy : cstwMPCmarket - An object representing the entire economy, containing the various AgentTypes as an attribute. - param_name : string - The name of the parameter of interest that varies across the population. - param_count : int - The number of different values the parameter of interest will take on. - center : float - A measure of centrality for the distribution of the parameter of interest. - spread : float - A measure of spread or diffusion for the distribution of the parameter of interest. - dist_type : string - The type of distribution to be used. Can be "lognormal" or "uniform" (can expand). - - Returns - ------- - diff : float - Difference between simulated and target capital to income ratio for this economy. - ''' - Economy(LorenzBool = False, ManyStatsBool = False) # Make sure we're not wasting time calculating stuff - Economy.distributeParams(param_name,param_count,center,spread,dist_type) # Distribute parameters - Economy.solve() - diff = Economy.calcKYratioDifference() - print('getKYratioDifference tried center = ' + str(center) + ' and got ' + str(diff)) - return diff - - -def findLorenzDistanceAtTargetKY(Economy,param_name,param_count,center_range,spread,dist_type): - ''' - Finds the sum of squared distances between simulated and target Lorenz points in an economy when - a given parameter has heterogeneity according to some distribution. The class of distribution - and a measure of spread are given as inputs, but the measure of centrality such that the capital - to income ratio matches the target ratio must be found. - - Parameters - ---------- - Economy : cstwMPCmarket - An object representing the entire economy, containing the various AgentTypes as an attribute. - param_name : string - The name of the parameter of interest that varies across the population. - param_count : int - The number of different values the parameter of interest will take on. - center_range : [float,float] - Bounding values for a measure of centrality for the distribution of the parameter of interest. - spread : float - A measure of spread or diffusion for the distribution of the parameter of interest. - dist_type : string - The type of distribution to be used. Can be "lognormal" or "uniform" (can expand). - - Returns - ------- - dist : float - Sum of squared distances between simulated and target Lorenz points for this economy (sqrt). - ''' - # Define the function to search for the correct value of center, then find its zero - intermediateObjective = lambda center : getKYratioDifference(Economy = Economy, - param_name = param_name, - param_count = param_count, - center = center, - spread = spread, - dist_type = dist_type) - optimal_center = brentq(intermediateObjective,center_range[0],center_range[1],xtol=10**(-6)) - Economy.center_save = optimal_center - - # Get the sum of squared Lorenz distances given the correct distribution of the parameter - Economy(LorenzBool = True) # Make sure we actually calculate simulated Lorenz points - Economy.distributeParams(param_name,param_count,optimal_center,spread,dist_type) # Distribute parameters - Economy.solveAgents() - Economy.makeHistory() - dist = Economy.calcLorenzDistance() - Economy(LorenzBool = False) - print ('findLorenzDistanceAtTargetKY tried spread = ' + str(spread) + ' and got ' + str(dist)) - return dist - -def calcStationaryAgeDstn(LivPrb,terminal_period): - ''' - Calculates the steady state proportions of each age given survival probability sequence LivPrb. - Assumes that agents who die are replaced by a newborn agent with t_age=0. - - Parameters - ---------- - LivPrb : [float] - Sequence of survival probabilities in ordinary chronological order. Has length T_cycle. - terminal_period : bool - Indicator for whether a terminal period follows the last period in the cycle (with LivPrb=0). - - Returns - ------- - AgeDstn : np.array - Stationary distribution of age. Stochastic vector with frequencies of each age. - ''' - T = len(LivPrb) - if terminal_period: - MrkvArray = np.zeros((T+1,T+1)) - top = T - else: - MrkvArray = np.zeros((T,T)) - top = T-1 - - for t in range(top): - MrkvArray[t,0] = 1.0 - LivPrb[t] - MrkvArray[t,t+1] = LivPrb[t] - MrkvArray[t+1,0] = 1.0 - - w, v = np.linalg.eig(np.transpose(MrkvArray)) - idx = (np.abs(w-1.0)).argmin() - x = v[:,idx].astype(float) - AgeDstn = (x/np.sum(x)) - return AgeDstn - -#################################################################################################### - -def main(): - - # Set targets for K/Y and the Lorenz curve based on the data - if Params.do_liquid: - lorenz_target = np.array([0.0, 0.004, 0.025,0.117]) - KY_target = 6.60 - else: # This is hacky until I can find the liquid wealth data and import it - lorenz_target = getLorenzShares(Params.SCF_wealth,weights=Params.SCF_weights,percentiles=Params.percentiles_to_match) - lorenz_long_data = np.hstack((np.array(0.0),getLorenzShares(Params.SCF_wealth,weights=Params.SCF_weights,percentiles=np.arange(0.01,1.0,0.01).tolist()),np.array(1.0))) - #lorenz_target = np.array([-0.002, 0.01, 0.053,0.171]) - KY_target = 10.26 - - # Make AgentTypes for estimation - if Params.do_lifecycle: - DropoutType = cstwMPCagent(**Params.init_dropout) - DropoutType.AgeDstn = calcStationaryAgeDstn(DropoutType.LivPrb,True) - HighschoolType = deepcopy(DropoutType) - HighschoolType(**Params.adj_highschool) - HighschoolType.AgeDstn = calcStationaryAgeDstn(HighschoolType.LivPrb,True) - CollegeType = deepcopy(DropoutType) - CollegeType(**Params.adj_college) - CollegeType.AgeDstn = calcStationaryAgeDstn(CollegeType.LivPrb,True) - DropoutType.update() - HighschoolType.update() - CollegeType.update() - EstimationAgentList = [] - for n in range(Params.pref_type_count): - EstimationAgentList.append(deepcopy(DropoutType)) - EstimationAgentList.append(deepcopy(HighschoolType)) - EstimationAgentList.append(deepcopy(CollegeType)) - else: - if Params.do_agg_shocks: - PerpetualYouthType = cstwMPCagent(**Params.init_agg_shocks) - else: - PerpetualYouthType = cstwMPCagent(**Params.init_infinite) - PerpetualYouthType.AgeDstn = np.array(1.0) - EstimationAgentList = [] - for n in range(Params.pref_type_count): - EstimationAgentList.append(deepcopy(PerpetualYouthType)) - - # Give all the AgentTypes different seeds - for j in range(len(EstimationAgentList)): - EstimationAgentList[j].seed = j - - # Make an economy for the consumers to live in - EstimationEconomy = cstwMPCmarket(**Params.init_market) - EstimationEconomy.agents = EstimationAgentList - EstimationEconomy.KYratioTarget = KY_target - EstimationEconomy.LorenzTarget = lorenz_target - EstimationEconomy.LorenzData = lorenz_long_data - if Params.do_lifecycle: - EstimationEconomy.PopGroFac = Params.PopGroFac - EstimationEconomy.TypeWeight = Params.TypeWeight_lifecycle - EstimationEconomy.T_retire = Params.working_T-1 - EstimationEconomy.act_T = Params.T_sim_LC - EstimationEconomy.ignore_periods = Params.ignore_periods_LC - else: - EstimationEconomy.PopGroFac = 1.0 - EstimationEconomy.TypeWeight = [1.0] - EstimationEconomy.act_T = Params.T_sim_PY - EstimationEconomy.ignore_periods = Params.ignore_periods_PY - if Params.do_agg_shocks: - EstimationEconomy(**Params.aggregate_params) - EstimationEconomy.update() - EstimationEconomy.makeAggShkHist() - - # Estimate the model as requested - if Params.run_estimation: - # Choose the bounding region for the parameter search - if Params.param_name == 'CRRA': - param_range = [0.2,70.0] - spread_range = [0.00001,1.0] - elif Params.param_name == 'DiscFac': - param_range = [0.95,0.995] - spread_range = [0.006,0.008] - else: - print('Parameter range for ' + Params.param_name + ' has not been defined!') - - if Params.do_param_dist: - # Run the param-dist estimation - paramDistObjective = lambda spread : findLorenzDistanceAtTargetKY( - Economy = EstimationEconomy, - param_name = Params.param_name, - param_count = Params.pref_type_count, - center_range = param_range, - spread = spread, - dist_type = Params.dist_type) - t_start = time() - spread_estimate = golden(paramDistObjective,brack=spread_range,tol=1e-4) - center_estimate = EstimationEconomy.center_save - t_end = time() - else: - # Run the param-point estimation only - paramPointObjective = lambda center : getKYratioDifference(Economy = EstimationEconomy, - param_name = Params.param_name, - param_count = Params.pref_type_count, - center = center, - spread = 0.0, - dist_type = Params.dist_type) - t_start = time() - center_estimate = brentq(paramPointObjective,param_range[0],param_range[1],xtol=1e-6) - spread_estimate = 0.0 - t_end = time() - - # Display statistics about the estimated model - #center_estimate = 0.986609223266 - #spread_estimate = 0.00853886395698 - EstimationEconomy.LorenzBool = True - EstimationEconomy.ManyStatsBool = True - EstimationEconomy.distributeParams(Params.param_name,Params.pref_type_count,center_estimate,spread_estimate,Params.dist_type) - EstimationEconomy.solve() - EstimationEconomy.calcLorenzDistance() - print('Estimate is center=' + str(center_estimate) + ', spread=' + str(spread_estimate) + ', took ' + str(t_end-t_start) + ' seconds.') - EstimationEconomy.center_estimate = center_estimate - EstimationEconomy.spread_estimate = spread_estimate - EstimationEconomy.showManyStats(Params.spec_name) - -if __name__ == '__main__': - main() - From 3677f503d80783c00328748fc566d4d501eebda7 Mon Sep 17 00:00:00 2001 From: Mridul Seth Date: Tue, 28 Apr 2020 23:20:17 +0530 Subject: [PATCH 2/2] points towards the REMARK for TractableBufferStockModel cstwMPCagent --- HARK/ConsumptionSaving/TractableBufferStockModel.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/HARK/ConsumptionSaving/TractableBufferStockModel.py b/HARK/ConsumptionSaving/TractableBufferStockModel.py index 5ff678218..b446aaa55 100644 --- a/HARK/ConsumptionSaving/TractableBufferStockModel.py +++ b/HARK/ConsumptionSaving/TractableBufferStockModel.py @@ -36,9 +36,8 @@ __all__ = ['TractableConsumerSolution', 'TractableConsumerType'] -# If you want to run the "tractable" version of cstwMPC, uncomment the line below -# and have TractableConsumerType inherit from cstwMPCagent rather than AgentType -#from HARK.cstwMPC.cstwMPC import cstwMPCagent +# If you want to run the "tractable" version of cstwMPC, use cstwMPCagent from +# cstwMPC REMARK and have TractableConsumerType inherit from cstwMPCagent rather than AgentType # Define utility function and its derivatives (plus inverses) utility = CRRAutility