Skip to content

Commit

Permalink
Turned SolvingMicroDSOPs into the ReplicateMake example (#176)
Browse files Browse the repository at this point in the history
* Changed file structure and added code to make MicroDSOPs into the ReplicateMake example.

Added copy_local.py to each major module as part of the ReplicateMake effort.

* moved the copy_module_to_local functionality to HARK.core
  • Loading branch information
npalmer-professional authored and llorracc committed Aug 5, 2018
1 parent d00b1e4 commit d265565
Show file tree
Hide file tree
Showing 15 changed files with 445 additions and 19 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -170,4 +170,4 @@
print("Sorry, EstimationParameters doesn't actually do anything on its own.")
print("This module is imported by StructEstimation, providing calibrated ")
print("parameters for the example estimation. Please see that module if you ")
print("want more interesting output.")
print("want more interesting output.")
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,37 @@
from builtins import str
from builtins import range

import os
import os, sys

# Find pathname to this file:
my_file_path = os.path.dirname(os.path.abspath(__file__))

# Pathnames to the other files:
calibration_dir = os.path.join(my_file_path, "../Calibration/") # Relative directory for primitive parameter files
tables_dir = os.path.join(my_file_path, "../Tables/") # Relative directory for primitive parameter files
figures_dir = os.path.join(my_file_path, "../Figures/") # Relative directory for primitive parameter files
code_dir = os.path.join(my_file_path, "../Code/") # Relative directory for primitive parameter files


# Import modules from local repository. If local repository is part of HARK,
# this will import from HARK. Otherwise manual pathname specification is in
# order.
try:
# Import from core HARK code first:
from HARK.SolvingMicroDSOPs.Calibration.EstimationParameters import initial_age, empirical_cohort_age_groups

except:
# Need to rely on the manual insertion of pathnames to all files in do_all.py
# NOTE sys.path.insert(0, os.path.abspath(tables_dir)), etc. may need to be
# copied from do_all.py to here

# Import files first:
from EstimationParameters import initial_age, empirical_cohort_age_groups


# The following libraries are part of the standard python distribution
import numpy as np # Numerical Python
import csv # Comma-separated variable reader
from .EstimationParameters import initial_age, empirical_cohort_age_groups

# Libraries below are part of HARK's module system and must be in this directory
from HARK.utilities import warnings

# Set the path to the empirical data:
scf_data_path = data_location = os.path.dirname(os.path.abspath(__file__)) # os.path.abspath('./') #'./'
Expand Down
Empty file.
Original file line number Diff line number Diff line change
Expand Up @@ -13,19 +13,50 @@

from builtins import str
from builtins import range
from . import EstimationParameters as Params # Parameters for the consumer type and the estimation

import os
import sys
import csv
import numpy as np # Numeric Python
import pylab # Python reproductions of some Matlab functions
from time import time, clock # Timing utility

# Import modules from core HARK libraries:
import HARK.ConsumptionSaving.ConsIndShockModel as Model # The consumption-saving micro model
from . import SetupSCFdata as Data # SCF 2004 data on household wealth
from HARK.simulation import drawDiscrete # Method for sampling from a discrete distribution
from HARK.estimation import minimizeNelderMead, bootstrapSampleFromData # Estimation methods
import numpy as np # Numeric Python
import pylab # Python reproductions of some Matlab functions
from time import time # Timing utility

# Find pathname to this file:
my_file_path = os.path.dirname(os.path.abspath(__file__))

# Pathnames to the other files:
calibration_dir = os.path.join(my_file_path, "../Calibration/") # Relative directory for primitive parameter files
tables_dir = os.path.join(my_file_path, "../Tables/") # Relative directory for primitive parameter files
figures_dir = os.path.join(my_file_path, "../Figures/") # Relative directory for primitive parameter files
code_dir = os.path.join(my_file_path, "../Code/") # Relative directory for primitive parameter files


# Import modules from local repository. If local repository is part of HARK,
# this will import from HARK. Otherwise manual pathname specification is in
# order.
try:
# Import from core HARK code first:
from HARK.SolvingMicroDSOPs.Calibration import EstimationParameters as Params # Parameters for the consumer type and the estimation
from HARK.SolvingMicroDSOPs.Calibration import SetupSCFdata as Data # SCF 2004 data on household wealth

except:
# Need to rely on the manual insertion of pathnames to all files in do_all.py
# NOTE sys.path.insert(0, os.path.abspath(tables_dir)), etc. may need to be
# copied from do_all.py to here
import EstimationParameters as Params # Parameters for the consumer type and the estimation
import SetupSCFdata as Data # SCF 2004 data on household wealth



# Set booleans to determine which tasks should be done
estimate_model = True # Whether to estimate the model
compute_standard_errors = False # Whether to get standard errors via bootstrap
make_contour_plot = False # Whether to make a contour map of the objective function
local_estimate_model = True # Whether to estimate the model
local_compute_standard_errors = False # Whether to get standard errors via bootstrap
local_make_contour_plot = True # Whether to make a contour map of the objective function

#=====================================================
# Define objects and functions used for the estimation
Expand Down Expand Up @@ -253,21 +284,84 @@ def calculateStandardErrorsByBootstrap(initial_estimate,N,seed=0,verbose=False):
# Done defining objects and functions. Now run them (if desired).
#=================================================================

def main():
def main(estimate_model=local_estimate_model, compute_standard_errors=local_compute_standard_errors, make_contour_plot=local_make_contour_plot):
"""
Run the main estimation procedure for SolvingMicroDSOP.
Parameters
----------
estimate_model : bool
Whether to estimate the model using Nelder-Mead. When True, this is a low-time, low-memory operation.
compute_standard_errors : bool
Whether to compute standard errors on the estiamtion of the model.
make_contour_plot : bool
Whether to make the contour plot associate with the estiamte.
Returns
-------
None
"""


# Estimate the model using Nelder-Mead
if estimate_model:
initial_guess = [Params.DiscFacAdj_start,Params.CRRA_start]
print('--------------------------------------------------------------------------------')
print('Now estimating the model using Nelder-Mead from an initial guess of ' + str(initial_guess) + '...')
print('--------------------------------------------------------------------------------')
t_start_estimate = clock()
model_estimate = minimizeNelderMead(smmObjectiveFxnReduced,initial_guess,verbose=True)
t_end_estimate = clock()
time_to_estimate = t_end_estimate-t_start_estimate
print('Time to execute all:', round(time_to_estimate/60.,2), 'min,', time_to_estimate, 'sec')
print('Estimated values: DiscFacAdj=' + str(model_estimate[0]) + ', CRRA=' + str(model_estimate[1]))

# Create the simple estimate table
estimate_results_file = os.path.join(tables_dir, 'estimate_results.csv')
with open(estimate_results_file, 'wt') as f:
writer = csv.writer(f)
writer.writerow(['DiscFacAdj', 'CRRA'])
writer.writerow([model_estimate[0], model_estimate[1]])


if compute_standard_errors and not estimate_model:
print("To run the bootstrap you must first estimate the model by setting estimate_model = True.")

# Compute standard errors by bootstrap
if compute_standard_errors:
if compute_standard_errors and estimate_model:


# Estimate the model:
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print("Computing standard errors using",Params.bootstrap_size,"bootstrap replications.")
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
try:
t_bootstrap_guess = time_to_estimate * Params.bootstrap_size
print("This will take approximately", round(t_bootstrap_guess/60.,2), "min, ", t_bootstrap_guess, "sec")
except:
pass
t_start_bootstrap = clock()
std_errors = calculateStandardErrorsByBootstrap(model_estimate,N=Params.bootstrap_size,seed=Params.seed,verbose=True)
t_end_bootstrap = clock()
time_to_bootstrap = t_end_bootstrap-t_start_bootstrap
print('Time to execute all:', round(time_to_bootstrap/60.,2), 'min,', time_to_bootstrap, 'sec')
print('Standard errors: DiscFacAdj--> ' + str(std_errors[0]) + ', CRRA--> ' + str(std_errors[1]))

# Create the simple bootstrap table
bootstrap_results_file = os.path.join(tables_dir, 'bootstrap_results.csv')
with open(bootstrap_results_file, 'wt') as f:
writer = csv.writer(f)
writer.writerow(['DiscFacAdj', 'DiscFacAdj_standard_error', 'CRRA', 'CRRA_standard_error'])
writer.writerow([model_estimate[0], std_errors[0], model_estimate[1], std_errors[1]])

# Make a contour plot of the objective function
if make_contour_plot:
print('````````````````````````````````````````````````````````````````````````````````')
print("Creating the contour plot.")
print('````````````````````````````````````````````````````````````````````````````````')
t_start_contour = clock()
grid_density = 20 # Number of parameter values in each dimension
level_count = 100 # Number of contour levels to plot
DiscFacAdj_list = np.linspace(0.85,1.05,grid_density)
Expand All @@ -280,14 +374,19 @@ def main():
CRRA = CRRA_list[k]
smm_obj_levels[j,k] = smmObjectiveFxn(DiscFacAdj,CRRA)
smm_contour = pylab.contourf(CRRA_mesh,DiscFacAdj_mesh,smm_obj_levels,level_count)
t_end_contour = clock()
time_to_contour = t_end_contour-t_start_contour
print('Time to execute all:', round(time_to_contour/60.,2), 'min,', time_to_contour, 'sec')
pylab.colorbar(smm_contour)
pylab.plot(model_estimate[1],model_estimate[0],'*r',ms=15)
pylab.xlabel(r'coefficient of relative risk aversion $\rho$',fontsize=14)
pylab.ylabel(r'discount factor adjustment $\beth$',fontsize=14)
pylab.savefig('SMMcontour.pdf')
pylab.savefig('SMMcontour.png')
pylab.savefig(os.path.join(figures_dir, 'SMMcontour.pdf'))
pylab.savefig(os.path.join(figures_dir, 'SMMcontour.png'))
pylab.show()



if __name__ == '__main__':
main()

Empty file.
Binary file added HARK/SolvingMicroDSOPs/Figures/SMMcontour.pdf
Binary file not shown.
Binary file added HARK/SolvingMicroDSOPs/Figures/SMMcontour.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Empty file.
Binary file removed HARK/SolvingMicroDSOPs/SMMcontour.pdf
Binary file not shown.
Binary file removed HARK/SolvingMicroDSOPs/SMMcontour.png
Binary file not shown.
Empty file.
2 changes: 2 additions & 0 deletions HARK/SolvingMicroDSOPs/Tables/estimate_results.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
DiscFacAdj,CRRA
1.004880962644363,4.8605098948425285
148 changes: 148 additions & 0 deletions HARK/SolvingMicroDSOPs/do_all.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
'''
Run all of the plots and tables in SolvingMicroDSOPs.
To execute, do the following on the Python command line:
from HARK.[YOUR-MODULE-NAME-HERE].do_all import run_replication
run_replication()
You will be presented with an interactive prompt that asks what level of
replication you would like to have.
More Details
------------
This example script allows the user to create all of the Figures and Tables
modules for SolvingMicroDSOPs.StructuralEstimation.
This is example is kept as simple and minimal as possible to illustrate the
format of a "replication archive."
The file structure is as follows:
./SolvingMicroDSOPs/
Calibration/ # Directory that contain the necessary code and data to parameterize the model
Code/ # The main estimation code, in this case StructuralEstimation.py
Figures/ # Any Figures created by the main code
Tables/ # Any tables created by the main code
Because computational modeling can be very memory- and time-intensive, this file
also allows the user to choose whether to run files based on there resouce
requirements. Files are categorized as one of the following three:
- low_resource: low RAM needed and runs quickly, say less than 1-5 minutes
- medium_resource: moderate RAM needed and runs moderately quickly, say 5-10+ mintues
- high_resource: high RAM needed (and potentially parallel computing required), and high time to run, perhaps even hours, days, or longer.
The designation is purposefully vague and left up the to researcher to specify
more clearly below. Using time taken on an example machine is entirely reasonable
here.
Finally, this code may serve as example code for efforts that fall outside
the HARK package structure for one reason or another. Therefore this script will
attempt to import the necessary MicroDSOP sub-modules as though they are part of
the HARK package; if that fails, this script reverts to manaully updating the
Python PATH with the locations of the MicroDSOP directory structure so it can
still run.
'''

from __future__ import division, print_function
from builtins import str, range

import os, sys

# Find pathname to this file:
my_file_path = os.path.dirname(os.path.abspath(__file__))

# Pathnames to the other files:
calibration_dir = os.path.join(my_file_path, "Calibration") # Relative directory for primitive parameter files
tables_dir = os.path.join(my_file_path, "Tables") # Relative directory for primitive parameter files
figures_dir = os.path.join(my_file_path, "Figures") # Relative directory for primitive parameter files
code_dir = os.path.join(my_file_path, "Code") # Relative directory for primitive parameter files

# Import modules from local repository. If local repository is part of HARK,
# this will import from HARK. Otherwise manual pathname specification is in
# order.
try:
# Import from core HARK code first:
from HARK.SolvingMicroDSOPs.Code import StructEstimation as struct
except:
print("**************** Manually specifying pathnames for modules *******************")
# It appears that the current module is not part of HARK, therefore we will
# manually add the pathnames to the various files directly to the beginning
# of the Python path. This will be needed for all files that will run in
# lower directories.
sys.path.insert(0, calibration_dir)
sys.path.insert(0, tables_dir)
sys.path.insert(0, figures_dir)
sys.path.insert(0, code_dir)
sys.path.insert(0, my_file_path)

# Manual import needed, should draw from first instance at start of Python
# PATH added above:
import StructEstimation as struct


# Define settings for "main()" function in StructuralEstiamtion.py based on
# resource requirements:

low_resource = {'estimate_model':True, 'make_contour_plot':False, 'compute_standard_errors':False}
# Author note:
# This takes approximately 90 seconds on a laptop with the following specs:
# Linux, Ubuntu 14.04.1 LTS, 8G of RAM, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz

medium_resource = {'estimate_model':True, 'make_contour_plot':True, 'compute_standard_errors':False}
# Author note:
# This takes approximately 7 minutes on a laptop with the following specs:
# Linux, Ubuntu 14.04.1 LTS, 8G of RAM, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz

high_resource = {'estimate_model':True, 'make_contour_plot':False, 'compute_standard_errors':True}
# Author note:
# This takes approximately 30 minutes on a laptop with the following specs:
# Linux, Ubuntu 14.04.1 LTS, 8G of RAM, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz

all_replications = {'estimate_model':True, 'make_contour_plot':True, 'compute_standard_errors':True}
# Author note:
# This takes approximately 40 minutes on a laptop with the following specs:
# Linux, Ubuntu 14.04.1 LTS, 8G of RAM, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz


# Ask the user which replication to run, and run it:
def run_replication():
which_replication = input("""Which replication would you like to run? (See documentation in do_all.py for details.) Please enter the option number to run that option; default is in brackets:
[1] low-resource: ~90 sec; output ./Tables/estimate_results.csv
2 medium-resource: ~7 min; output ./Figures/SMMcontour.pdf
./Figures/SMMcontour.png
3 high-resource: ~30 min; output ./Tables/bootstrap_results.csv
4 all: ~40 min; output: all above.
q quit: exit without executing.\n\n""")


if which_replication == 'q':
return

elif which_replication == '1' or which_replication == '':
print("Running low-resource replication...")
struct.main(**low_resource)

elif which_replication == '2':
print("Running medium-resource replication...")
struct.main(**medium_resource)

elif which_replication == '3':
print("Running high-resource replication...")
struct.main(**high_resource)

elif which_replication == '4':
print("Running all replications...")
struct.main(**all_replications)

else:
return

if __name__ == '__main__':
run_replication()
Loading

0 comments on commit d265565

Please sign in to comment.