diff --git a/quantecon/arma.py b/quantecon/arma.py index 56ba60e52..c18477105 100644 --- a/quantecon/arma.py +++ b/quantecon/arma.py @@ -24,8 +24,8 @@ class ARMA(object): X_t = \phi X_{t-1} + \epsilon_t + \theta \epsilon_{t-1} - where :math:`epsilon_t` is a white noise process with standard - deviation :math:`sigma`. If phi and theta are arrays or sequences, + where :math:`\epsilon_t` is a white noise process with standard + deviation :math:`\sigma`. If phi and theta are arrays or sequences, then the interpretation is the ARMA(p, q) model .. math:: @@ -182,7 +182,7 @@ def spectral_density(self, two_pi=True, res=1200): .. math:: - f(w) = \sum_k \gamma(k) exp(-ikw) + f(w) = \sum_k \gamma(k) \exp(-ikw) where gamma is the autocovariance function and the sum is over the set of all integers. @@ -190,8 +190,8 @@ def spectral_density(self, two_pi=True, res=1200): Parameters ---------- two_pi : Boolean, optional - Compute the spectral density function over [0, pi] if - two_pi is False and [0, 2 pi] otherwise. Default value is + Compute the spectral density function over :math:`[0, \pi]` if + two_pi is False and :math:`[0, 2 \pi]` otherwise. Default value is True res : scalar or array_like(int), optional(default=1200) If res is a scalar then the spectral density is computed at diff --git a/quantecon/estspec.py b/quantecon/estspec.py index 708529fbc..8cae433ef 100644 --- a/quantecon/estspec.py +++ b/quantecon/estspec.py @@ -74,17 +74,18 @@ def smooth(x, window_len=7, window='hanning'): def periodogram(x, window=None, window_len=7): - """ + r""" Computes the periodogram .. math:: - I(w) = (1 / n) | sum_{t=0}^{n-1} x_t e^{itw} |^2 + I(w) = \frac{1}{n} \Big[ \sum_{t=0}^{n-1} x_t e^{itw} \Big] ^2 - at the Fourier frequences w_j := 2 pi j / n, j = 0, ..., n - 1, - using the fast Fourier transform. Only the frequences w_j in [0, - pi] and corresponding values I(w_j) are returned. If a window type - is given then smoothing is performed. + at the Fourier frequences :math:`w_j := \frac{2 \pi j}{n}`, + :math:`j = 0, \dots, n - 1`, using the fast Fourier transform. Only the + frequences :math:`w_j` in :math:`[0, \pi]` and corresponding values + :math:`I(w_j)` are returned. If a window type is given then smoothing + is performed. Parameters ---------- @@ -139,19 +140,18 @@ def ar_periodogram(x, window='hanning', window_len=7): """ # === run regression === # - x_lag = x[:-1] # lagged x - X = np.array([np.ones(len(x_lag)), x_lag]).T # add constant - - y = np.array(x[1:]) # current x - - beta_hat = np.linalg.solve(X.T @ X, X.T @ y) # solve for beta hat - e_hat = y - X @ beta_hat # compute residuals - phi = beta_hat[1] # pull out phi parameter + x_lag = x[:-1] # lagged x + X = np.array([np.ones(len(x_lag)), x_lag]).T # add constant + + y = np.array(x[1:]) # current x + + beta_hat = np.linalg.solve(X.T @ X, X.T @ y) # solve for beta hat + e_hat = y - X @ beta_hat # compute residuals + phi = beta_hat[1] # pull out phi parameter # === compute periodogram on residuals === # w, I_w = periodogram(e_hat, window=window, window_len=window_len) - # === compute periodogram on residuals === # w, I_w = periodogram(e_hat, window=window, window_len=window_len) diff --git a/quantecon/ivp.py b/quantecon/ivp.py index 9b236d0cf..9cff2b328 100644 --- a/quantecon/ivp.py +++ b/quantecon/ivp.py @@ -24,28 +24,30 @@ class IVP(integrate.ode): + + r""" + Creates an instance of the IVP class. - def __init__(self, f, jac=None): - r""" - Creates an instance of the IVP class. + Parameters + ---------- + f : callable ``f(t, y, *f_args)`` + Right hand side of the system of equations defining the ODE. + The independent variable, ``t``, is a ``scalar``; ``y`` is + an ``ndarray`` of dependent variables with ``y.shape == + (n,)``. The function `f` should return a ``scalar``, + ``ndarray`` or ``list`` (but not a ``tuple``). + jac : callable ``jac(t, y, *jac_args)``, optional(default=None) + Jacobian of the right hand side of the system of equations + defining the ODE. - Parameters - ---------- - f : callable ``f(t, y, *f_args)`` - Right hand side of the system of equations defining the ODE. - The independent variable, ``t``, is a ``scalar``; ``y`` is - an ``ndarray`` of dependent variables with ``y.shape == - (n,)``. The function `f` should return a ``scalar``, - ``ndarray`` or ``list`` (but not a ``tuple``). - jac : callable ``jac(t, y, *jac_args)``, optional(default=None) - Jacobian of the right hand side of the system of equations - defining the ODE. + .. :math: - .. :math: + \mathcal{J}_{i,j} = \bigg[\frac{\partial f_i}{\partial y_j}\bigg] - \mathcal{J}_{i,j} = \bigg[\frac{\partial f_i}{\partial y_j}\bigg] + """ + + def __init__(self, f, jac=None): - """ super(IVP, self).__init__(f, jac) def _integrate_fixed_trajectory(self, h, T, step, relax): diff --git a/quantecon/kalman.py b/quantecon/kalman.py index dd3e64396..565ef716c 100644 --- a/quantecon/kalman.py +++ b/quantecon/kalman.py @@ -1,6 +1,6 @@ """ Filename: kalman.py -Reference: http://quant-econ.net/py/kalman.html +Reference: https://lectures.quantecon.org/py/kalman.html Implements the Kalman filter for a linear Gaussian state space model. @@ -17,11 +17,16 @@ class Kalman(object): r""" Implements the Kalman filter for the Gaussian state space model + .. math:: + x_{t+1} = A x_t + C w_{t+1} - y_t = G x_t + H v_t. + y_t = G x_t + H v_t + + Here :math:`x_t` is the hidden state and :math:`y_t` is the measurement. + The shocks :math:`w_t` and :math:`v_t` are iid standard normals. Below + we use the notation - Here x_t is the hidden state and y_t is the measurement. The shocks - w_t and v_t are iid standard normals. Below we use the notation + .. math:: Q := CC' R := HH' @@ -52,7 +57,7 @@ class Kalman(object): References ---------- - http://quant-econ.net/py/kalman.html + https://lectures.quantecon.org/py/kalman.html """ @@ -91,28 +96,46 @@ def whitener_lss(self): that system to the time-invariant whitener represenation given by + .. math:: + \tilde{x}_{t+1}^* = \tilde{A} \tilde{x} + \tilde{C} v a = \tilde{G} \tilde{x} where + .. math:: + \tilde{x}_t = [x+{t}, \hat{x}_{t}, v_{t}] and - \tilde{A} = [A 0 0 - KG A-KG KH - 0 0 0] + .. math:: - \tilde{C} = [C 0 - 0 0 - 0 I] + \tilde{A} = + \begin{bmatrix} + A & 0 & 0 \\ + KG & A-KG & KH \\ + 0 & 0 & 0 \\ + \end{bmatrix} - \tilde{G} = [G -G H] + .. math:: - with A, C, G, H coming from the linear state space system - that defines the Kalman instance + \tilde{C} = + \begin{bmatrix} + C & 0 \\ + 0 & 0 \\ + 0 & I \\ + \end{bmatrix} + .. math:: + + \tilde{G} = + \begin{bmatrix} + G & -G & H \\ + \end{bmatrix} + + with :math:`A, C, G, H` coming from the linear state space system + that defines the Kalman instance Returns ------- @@ -147,18 +170,19 @@ def whitener_lss(self): return whitened_lss - def prior_to_filtered(self, y): r""" Updates the moments (x_hat, Sigma) of the time t prior to the - time t filtering distribution, using current measurement y_t. + time t filtering distribution, using current measurement :math:`y_t`. The updates are according to - x_{hat}^F = x_{hat} + Sigma G' (G Sigma G' + R)^{-1} - (y - G x_{hat}) - Sigma^F = Sigma - Sigma G' (G Sigma G' + R)^{-1} G - Sigma + .. math:: + + \hat{x}^F = \hat{x} + \Sigma G' (G \Sigma G' + R)^{-1} + (y - G \hat{x}) + \Sigma^F = \Sigma - \Sigma G' (G \Sigma G' + R)^{-1} G + \Sigma Parameters ---------- @@ -210,15 +234,15 @@ def update(self, y): def stationary_values(self): """ - Computes the limit of Sigma_t as t goes to infinity by - solving the associated Riccati equation. Computation is via the + Computes the limit of :math:`\Sigma_t` as t goes to infinity by + solving the associated Riccati equation. Computation is via the doubling algorithm (see the documentation in `matrix_eqn.solve_discrete_riccati`). Returns ------- Sigma_infinity : array_like or scalar(float) - The infinite limit of Sigma_t + The infinite limit of :math:`\Sigma_t` K_infinity : array_like or scalar(float) The stationary Kalman gain. diff --git a/quantecon/lae.py b/quantecon/lae.py index c8d9204f0..5c512af1e 100644 --- a/quantecon/lae.py +++ b/quantecon/lae.py @@ -1,4 +1,4 @@ -""" +r""" Filename: lae.py Authors: Thomas J. Sargent, John Stachurski, @@ -11,12 +11,12 @@ \frac{1}{n} \sum_{i=0}^n p(X_{t-1}^i, y) -This is a density in y. +This is a density in :math:`y`. References ---------- -http://quant-econ.net/py/stationary_densities.html +https://lectures.quantecon.org/py/stationary_densities.html """ from textwrap import dedent diff --git a/quantecon/lqcontrol.py b/quantecon/lqcontrol.py index 58a688215..13583f633 100644 --- a/quantecon/lqcontrol.py +++ b/quantecon/lqcontrol.py @@ -19,36 +19,50 @@ class LQ(object): This class is for analyzing linear quadratic optimal control problems of either the infinite horizon form - . min E sum_{t=0}^{\infty} beta^t r(x_t, u_t) + .. math:: + + \min \mathbb{E} + \Big[ \sum_{t=0}^{\infty} \beta^t r(x_t, u_t) \Big] with + .. math:: + r(x_t, u_t) := x_t' R x_t + u_t' Q u_t + 2 u_t' N x_t or the finite horizon form - min E sum_{t=0}^{T-1} beta^t r(x_t, u_t) + beta^T x_T' R_f x_T + .. math:: + + \min \mathbb{E} + \Big[ + \sum_{t=0}^{T-1} \beta^t r(x_t, u_t) + \beta^T x_T' R_f x_T + \Big] Both are minimized subject to the law of motion + .. math:: + x_{t+1} = A x_t + B u_t + C w_{t+1} - Here x is n x 1, u is k x 1, w is j x 1 and the matrices are - conformable for these dimensions. The sequence {w_t} is assumed to - be white noise, with zero mean and E w_t w_t = I, the j x j - identity. + Here :math:`x` is n x 1, :math:`u` is k x 1, :math:`w` is j x 1 and the + matrices are conformable for these dimensions. The sequence :math:`{w_t}` + is assumed to be white noise, with zero mean and + :math:`\mathbb{E} [ w_t' w_t ] = I`, the j x j identity. - If C is not supplied as a parameter, the model is assumed to be - deterministic (and C is set to a zero matrix of appropriate + If :math:`C` is not supplied as a parameter, the model is assumed to be + deterministic (and :math:`C` is set to a zero matrix of appropriate dimension). - For this model, the time t value (i.e., cost-to-go) function V_t + For this model, the time t value (i.e., cost-to-go) function :math:`V_t` takes the form + .. math:: + x' P_T x + d_T - and the optimal policy is of the form u_T = -F_T x_T. In - the infinite horizon case, V, P, d and F are all stationary. + and the optimal policy is of the form :math:`u_T = -F_T x_T`. In the + infinite horizon case, :math:`V, P, d` and :math:`F` are all stationary. Parameters ---------- @@ -87,9 +101,11 @@ class LQ(object): ---------- Q, R, N, A, B, C, beta, T, Rf : see Parameters P : array_like(float) - P is part of the value function representation of V(x) = x'Px + d + P is part of the value function representation of + :math:`V(x) = x'Px + d` d : array_like(float) - d is part of the value function representation of V(x) = x'Px + d + d is part of the value function representation of + :math:`V(x) = x'Px + d` F : array_like(float) F is the policy rule that determines the choice of control in each period. @@ -139,8 +155,8 @@ def __repr__(self): def __str__(self): m = """\ Linear Quadratic control system - - beta (discount parameter) : {b} - - T (time horizon) : {t} + - beta (discount parameter) : {b} + - T (time horizon) : {t} - n (number of state variables) : {n} - k (number of control variables) : {k} - j (number of shocks) : {j} @@ -154,12 +170,14 @@ def update_values(self): This method is for updating in the finite horizon case. It shifts the current value function + .. math:: + V_t(x) = x' P_t x + d_t - and the optimal policy F_t one step *back* in time, - replacing the pair P_t and d_t with - P_{t-1} and d_{t-1}, and F_t with - F_{t-1} + and the optimal policy :math:`F_t` one step *back* in time, + replacing the pair :math:`P_t` and :math:`d_t` with + :math:`P_{t-1}` and :math:`d_{t-1}`, and :math:`F_t` with + :math:`F_{t-1}` """ # === Simplify notation === # @@ -180,25 +198,27 @@ def update_values(self): def stationary_values(self): """ - Computes the matrix P and scalar d that represent the value - function + Computes the matrix :math:`P` and scalar :math:`d` that represent + the value function + + .. math:: V(x) = x' P x + d in the infinite horizon case. Also computes the control matrix - F from u = - Fx + :math:`F` from :math:`u = - Fx` Returns ------- P : array_like(float) P is part of the value function representation of - V(x) = xPx + d + :math:`V(x) = x'Px + d` F : array_like(float) F is the policy rule that determines the choice of control in each period. d : array_like(float) d is part of the value function representation of - V(x) = xPx + d + :math:`V(x) = x'Px + d` """ # === simplify notation === # @@ -224,8 +244,8 @@ def stationary_values(self): def compute_sequence(self, x0, ts_length=None): """ Compute and return the optimal state and control sequences - x_0, ..., x_T and u_0,..., u_T under the - assumption that {w_t} is iid and N(0, 1). + :math:`x_0, ..., x_T` and :math:`u_0,..., u_T` under the + assumption that :math:`{w_t}` is iid and :math:`N(0, 1)`. Parameters =========== @@ -238,13 +258,13 @@ def compute_sequence(self, x0, ts_length=None): Returns ======== x_path : array_like(float) - An n x T+1 matrix, where the t-th column represents x_t + An n x T+1 matrix, where the t-th column represents :math:`x_t` u_path : array_like(float) - A k x T matrix, where the t-th column represents u_t + A k x T matrix, where the t-th column represents :math:`u_t` w_path : array_like(float) - A j x T+1 matrix, where the t-th column represent w_t + A j x T+1 matrix, where the t-th column represent :math:`w_t` """ diff --git a/quantecon/lss.py b/quantecon/lss.py index 3cc2b91a4..90f6d93d8 100644 --- a/quantecon/lss.py +++ b/quantecon/lss.py @@ -1,6 +1,6 @@ """ Filename: lss.py -Reference: http://quant-econ.net/py/linear_models.html +Reference: https://lectures.quantecon.org/py/linear_models.html Computes quantities associated with the Gaussian linear state space model. """ @@ -11,20 +11,25 @@ from scipy.linalg import solve from numba import jit + @jit def simulate_linear_model(A, x0, v, ts_length): - """ - This is a separate function for simulating a vector linear system of + r""" + This is a separate function for simulating a vector linear system of the form - - x_{t+1} = A x_t + v_t given x_0 = x0 - - Here x_t and v_t are both n x 1 and A is n x n. - - The purpose of separating this functionality out is to target it for + + .. math:: + + x_{t+1} = A x_t + v_t + + given :math:`x_0` = x0 + + Here :math:`x_t` and :math:`v_t` are both n x 1 and :math:`A` is n x n. + + The purpose of separating this functionality out is to target it for optimization by Numba. For the same reason, matrix multiplication is broken down into for loops. - + Parameters ---------- A : array_like or scalar(float) @@ -33,14 +38,14 @@ def simulate_linear_model(A, x0, v, ts_length): Should be n x 1. Initial condition v : np.ndarray Should be n x ts_length-1. Its t-th column is used as the time t - shock v_t + shock :math:`v_t` ts_length : int The length of the time series Returns -------- x : np.ndarray - Time series with ts_length columns, the t-th column being x_t + Time series with ts_length columns, the t-th column being :math:`x_t` """ A = np.asarray(A) n = A.shape[0] @@ -49,24 +54,27 @@ def simulate_linear_model(A, x0, v, ts_length): for t in range(ts_length-1): # x[:, t+1] = A.dot(x[:, t]) + v[:, t] for i in range(n): - x[i, t+1] = v[i, t] #Shock + x[i, t+1] = v[i, t] # Shock for j in range(n): - x[i, t+1] += A[i, j] * x[j, t] #Dot Product + x[i, t+1] += A[i, j] * x[j, t] # Dot Product return x class LinearStateSpace(object): - """ + r""" A class that describes a Gaussian linear state space model of the form: + .. math:: + x_{t+1} = A x_t + C w_{t+1} y_t = G x_t + H v_t - where {w_t} and {v_t} are independent and standard normal with dimensions - k and l respectively. The initial conditions are mu_0 and Sigma_0 for x_0 - ~ N(mu_0, Sigma_0). When Sigma_0=0, the draw of x_0 is exactly mu_0. + where :math:`{w_t}` and :math:`{v_t}` are independent and standard normal + with dimensions k and l respectively. The initial conditions are + :math:`\mu_0` and :math:`\Sigma_0` for :math:`x_0 \sim N(\mu_0, \Sigma_0)`. + When :math:`\Sigma_0=0`, the draw of :math:`x_0` is exactly :math:`\mu_0`. Parameters ---------- @@ -94,8 +102,8 @@ class LinearStateSpace(object): def __init__(self, A, C, G, H=None, mu_0=None, Sigma_0=None): self.A, self.G, self.C = list(map(self.convert, (A, G, C))) - #-Check Input Shapes-# - ni,nj = self.A.shape + # = Check Input Shapes = # + ni, nj = self.A.shape if ni != nj: raise ValueError("Matrix A (shape: %s) needs to be square" % (self.A.shape)) if ni != self.C.shape[0]: @@ -114,7 +122,7 @@ def __init__(self, A, C, G, H=None, mu_0=None, Sigma_0=None): self.mu_0 = np.zeros((self.n, 1)) else: self.mu_0 = self.convert(mu_0) - self.mu_0.shape = self.n, 1 + self.mu_0.shape = self.n, 1 if Sigma_0 is None: self.Sigma_0 = np.zeros((self.n, self.n)) else: @@ -141,10 +149,12 @@ def convert(self, x): return np.atleast_2d(np.asarray(x, dtype='float')) def simulate(self, ts_length=100): - """ + r""" Simulate a time series of length ts_length, first drawing - x_0 ~ N(mu_0, Sigma_0) + .. math:: + + x_0 \sim N(\mu_0, \Sigma_0) Parameters ---------- @@ -155,17 +165,17 @@ def simulate(self, ts_length=100): Returns ------- x : array_like(float) - An n x ts_length array, where the t-th column is x_t + An n x ts_length array, where the t-th column is :math:`x_t` y : array_like(float) - A k x ts_length array, where the t-th column is y_t + A k x ts_length array, where the t-th column is :math:`y_t` """ x0 = multivariate_normal(self.mu_0.flatten(), self.Sigma_0) w = np.random.randn(self.m, ts_length-1) - v = self.C.dot(w) # Multiply each w_t by C to get v_t = C w_t + v = self.C.dot(w) # Multiply each w_t by C to get v_t = C w_t # == simulate time series == # x = simulate_linear_model(self.A, x0, v, ts_length) - + if self.H is not None: v = np.random.randn(self.l, ts_length) y = self.G.dot(x) + self.H.dot(v) @@ -175,9 +185,9 @@ def simulate(self, ts_length=100): return x, y def replicate(self, T=10, num_reps=100): - """ - Simulate num_reps observations of x_T and y_T given - x_0 ~ N(mu_0, Sigma_0). + r""" + Simulate num_reps observations of :math:`x_T` and :math:`y_T` given + :math:`x_0 \sim N(\mu_0, \Sigma_0)`. Parameters ---------- @@ -190,11 +200,11 @@ def replicate(self, T=10, num_reps=100): ------- x : array_like(float) An n x num_reps array, where the j-th column is the j_th - observation of x_T + observation of :math:`x_T` y : array_like(float) A k x num_reps array, where the j-th column is the j_th - observation of y_T + observation of :math:`y_T` """ x = np.empty((self.n, num_reps)) @@ -210,12 +220,12 @@ def replicate(self, T=10, num_reps=100): return x, y def moment_sequence(self): - """ + r""" Create a generator to calculate the population mean and - variance-convariance matrix for both x_t and y_t, starting at - the initial condition (self.mu_0, self.Sigma_0). Each iteration - produces a 4-tuple of items (mu_x, mu_y, Sigma_x, Sigma_y) for - the next period. + variance-convariance matrix for both :math:`x_t` and :math:`y_t` + starting at the initial condition (self.mu_0, self.Sigma_0). + Each iteration produces a 4-tuple of items (mu_x, mu_y, Sigma_x, + Sigma_y) for the next period. Yields ------ @@ -250,10 +260,10 @@ def moment_sequence(self): Sigma_x = A.dot(Sigma_x).dot(A.T) + C.dot(C.T) def stationary_distributions(self, max_iter=200, tol=1e-5): - """ - Compute the moments of the stationary distributions of x_t and - y_t if possible. Computation is by iteration, starting from the - initial conditions self.mu_0 and self.Sigma_0 + r""" + Compute the moments of the stationary distributions of :math:`x_t` and + :math:`y_t` if possible. Computation is by iteration, starting from + the initial conditions self.mu_0 and self.Sigma_0 Parameters ---------- @@ -265,15 +275,15 @@ def stationary_distributions(self, max_iter=200, tol=1e-5): Returns ------- mu_x_star : array_like(float) - An n x 1 array representing the stationary mean of x_t + An n x 1 array representing the stationary mean of :math:`x_t` mu_y_star : array_like(float) - An k x 1 array representing the stationary mean of y_t + An k x 1 array representing the stationary mean of :math:`y_t` Sigma_x_star : array_like(float) An n x n array representing the stationary var-cov matrix - of x_t + of :math:`x_t` Sigma_y_star : array_like(float) An k x k array representing the stationary var-cov matrix - of y_t + of :math:`y_t` """ # == Initialize iteration == # @@ -304,12 +314,14 @@ def stationary_distributions(self, max_iter=200, tol=1e-5): return mu_x_star, mu_y_star, Sigma_x_star, Sigma_y_star def geometric_sums(self, beta, x_t): - """ + r""" Forecast the geometric sums - S_x := E [sum_{j=0}^{\infty} beta^j x_{t+j} | x_t ] + .. math:: + + S_x := E \Big[ \sum_{j=0}^{\infty} \beta^j x_{t+j} | x_t \Big] - S_y := E [sum_{j=0}^{\infty} beta^j y_{t+j} | x_t ] + S_y := E \Big[ \sum_{j=0}^{\infty} \beta^j y_{t+j} | x_t \Big] Parameters ---------- @@ -328,6 +340,7 @@ def geometric_sums(self, beta, x_t): Geometric sum as defined above """ + I = np.identity(self.n) S_x = solve(I - beta * self.A, x_t) S_y = self.G.dot(S_x) @@ -335,15 +348,15 @@ def geometric_sums(self, beta, x_t): return S_x, S_y def impulse_response(self, j=5): - """ + r""" Pulls off the imuplse response coefficients to a shock - in w_{t} for x and y + in :math:`w_{t}` for :math:`x` and :math:`y` Important to note: We are uninterested in the shocks to v for this method - * x coefficients are C, AC, A^2 C... - * y coefficients are GC, GAC, GA^2C... + * :math:`x` coefficients are :math:`C, AC, A^2 C...` + * :math:`y` coefficients are :math:`GC, GAC, GA^2C...` Parameters ---------- @@ -371,4 +384,3 @@ def impulse_response(self, j=5): Apower = np.dot(Apower, A) return xcoef, ycoef - diff --git a/quantecon/markov/approximation.py b/quantecon/markov/approximation.py index cf842ab87..53f0e0e30 100644 --- a/quantecon/markov/approximation.py +++ b/quantecon/markov/approximation.py @@ -17,22 +17,47 @@ def rouwenhorst(n, ybar, sigma, rho): - """ - Takes as inputs n, p, q, psi. It will then construct a markov chain + r""" + Takes as inputs n, p, q, psi. It will then construct a markov chain that estimates an AR(1) process of: - y_t = \bar{y} + \rho y_{t-1} + \varepsilon_t - where \varepsilon_t is i.i.d. normal of mean 0, std dev of sigma + :math:`y_t = \bar{y} + \rho y_{t-1} + \varepsilon_t` + where :math:`\varepsilon_t` is i.i.d. normal of mean 0, std dev of sigma The Rouwenhorst approximation uses the following recursive defintion for approximating a distribution: - theta_2 = [p , 1 - p] - [1 - q, q ] + .. math:: + + \theta_2 = + \begin{bmatrix} + p & 1 - p \\ + 1 - q & q \\ + \end{bmatrix} + + .. math:: + + \theta_{n+1} = + p + \begin{bmatrix} + \theta_n & 0 \\ + 0 & 0 \\ + \end{bmatrix} + + (1 - p) + \begin{bmatrix} + 0 & \theta_n \\ + 0 & 0 \\ + \end{bmatrix} + + q + \begin{bmatrix} + 0 & 0 \\ + \theta_n & 0 \\ + \end{bmatrix} + + (1 - q) + \begin{bmatrix} + 0 & 0 \\ + 0 & \theta_n \\ + \end{bmatrix} - theta_{n+1} = p [theta_n, 0] + (1 - p) [0, theta_n] - [0 , 0] [0, 0] - + q [0 , 0] + (1 - q) [0, ] - [theta_n , 0] [0, theta_n] Parameters ---------- @@ -40,11 +65,11 @@ def rouwenhorst(n, ybar, sigma, rho): The number of points to approximate the distribution ybar : float - The value \bar{y} in the process. Note that the mean of this - AR(1) process, y, is simply ybar/(1 - rho) + The value :math:`\bar{y}` in the process. Note that the mean of this + AR(1) process, :math:`y`, is simply :math:`\bar{y}/(1 - \rho)` sigma : float - The value of the standard deviation of the \varepsilon process + The value of the standard deviation of the :math:`\varepsilon` process rho : float By default this will be 0, but if you are approximating an AR(1) @@ -52,11 +77,11 @@ def rouwenhorst(n, ybar, sigma, rho): Returns ------- - + mc : MarkovChain - An instance of the MarkovChain class that stores the transition + An instance of the MarkovChain class that stores the transition matrix and state values returned by the discretization method - + """ # Get the standard deviation of y @@ -79,6 +104,7 @@ def row_build_mat(n, p, q): """ This method uses the values of p and q to build the transition matrix for the rouwenhorst method + """ if n == 2: @@ -114,14 +140,16 @@ def row_build_mat(n, p, q): def tauchen(rho, sigma_u, m=3, n=7): - """ + r""" Computes a Markov chain associated with a discretized version of the linear Gaussian AR(1) process - y_{t+1} = rho * y_t + u_{t+1} + .. math:: + + y_{t+1} = \rho y_t + u_{t+1} - using Tauchen's method. Here {u_t} is an iid Gaussian process with zero - mean. + using Tauchen's method. Here :math:`{u_t}` is an i.i.d. Gaussian process + with zero mean. Parameters ---------- @@ -138,7 +166,7 @@ def tauchen(rho, sigma_u, m=3, n=7): ------- mc : MarkovChain - An instance of the MarkovChain class that stores the transition + An instance of the MarkovChain class that stores the transition matrix and state values returned by the discretization method """ diff --git a/quantecon/matrix_eqn.py b/quantecon/matrix_eqn.py index 322a5571d..b546ab820 100644 --- a/quantecon/matrix_eqn.py +++ b/quantecon/matrix_eqn.py @@ -26,14 +26,16 @@ def solve_discrete_lyapunov(A, B, max_it=50, method="doubling"): AXA' - X + B = 0 - X is computed by using a doubling algorithm. In particular, we - iterate to convergence on X_j with the following recursions for j = - 1, 2,... starting from X_0 = B, a_0 = A: + :math:`X` is computed by using a doubling algorithm. In particular, we + iterate to convergence on :math:`X_j` with the following recursions for + :math:`j = 1, 2, \dots` starting from :math:`X_0 = B`, :math:`a_0 = A`: .. math:: a_j = a_{j-1} a_{j-1} + .. math:: + X_j = X_{j-1} + a_{j-1} X_{j-1} a_{j-1}' Parameters @@ -57,7 +59,7 @@ def solve_discrete_lyapunov(A, B, max_it=50, method="doubling"): Returns ======== gamma1: array_like(float, ndim=2) - Represents the value V + Represents the value :math:`V` """ if method == "doubling": @@ -97,14 +99,17 @@ def solve_discrete_riccati(A, B, Q, R, N=None, tolerance=1e-10, max_iter=500): """ Solves the discrete-time algebraic Riccati equation + .. math:: + X = A'XA - (N + B'XA)'(B'XB + R)^{-1}(N + B'XA) + Q - via a modified structured doubling algorithm. An explanation of the + via a modified structured doubling algorithm. An explanation of the algorithm can be found in the reference below. - Note that SciPy also has a discrete riccati equation solver. However it - cannot handle the case where R is not invertible, or when N is nonzero. - Both of these cases can be handled in the algorithm implemented below. + Note that SciPy also has a discrete riccati equation solver. However it + cannot handle the case where :math:`R` is not invertible, or when :math:`N` + is nonzero. Both of these cases can be handled in the algorithm implemented + below. Parameters ---------- diff --git a/quantecon/quadsums.py b/quantecon/quadsums.py index cf23cd008..4130806db 100644 --- a/quantecon/quadsums.py +++ b/quantecon/quadsums.py @@ -21,10 +21,10 @@ def var_quadratic_sum(A, C, H, beta, x0): .. math:: - q(x_0) = E \sum_{t=0}^{\infty} \beta^t x_t' H x_t + q(x_0) = \mathbb{E} \Big[ \sum_{t=0}^{\infty} \beta^t x_t' H x_t \Big] - Here {x_t} is the VAR process x_{t+1} = A x_t + C w_t with {w_t} - standard normal and x_0 the initial condition. + Here :math:`{x_t}` is the VAR process :math:`x_{t+1} = A x_t + C w_t` + with :math:`{x_t}` standard normal and :math:`x_0` the initial condition. Parameters ---------- @@ -43,13 +43,14 @@ def var_quadratic_sum(A, C, H, beta, x0): Returns ------- q0: scalar(float) - Represents the value q(x_0) + Represents the value :math:`q(x_0)` - Remarks: The formula for computing q(x_0) is q(x_0) = x_0' Q x_0 + v + Remarks: The formula for computing :math:`q(x_0)` is + :math:`q(x_0) = x_0' Q x_0 + v` where - Q is the solution to Q = H + beta A' Q A and - v = \trace(C' Q C) \beta / (1 - \beta) + * :math:`Q` is the solution to :math:`Q = H + \beta A' Q A`, and + * :math:`v = \frac{trace(C' Q C) \beta}{(1 - \beta)}` """ # == Make sure that A, C, H and x0 are array_like == # @@ -81,11 +82,11 @@ def m_quadratic_sum(A, B, max_it=50): ---------- A : array_like(float, ndim=2) An n x n matrix as described above. We assume in order for - convergence that the eigenvalues of A have moduli bounded by + convergence that the eigenvalues of :math:`A` have moduli bounded by unity B : array_like(float, ndim=2) An n x n matrix as described above. We assume in order for - convergence that the eigenvalues of A have moduli bounded by + convergence that the eigenvalues of :math:`A` have moduli bounded by unity max_it : scalar(int), optional(default=50) The maximum number of iterations @@ -93,7 +94,7 @@ def m_quadratic_sum(A, B, max_it=50): Returns ======== gamma1: array_like(float, ndim=2) - Represents the value V + Represents the value :math:`V` """ diff --git a/quantecon/robustlq.py b/quantecon/robustlq.py index e5e24b426..f40bcdcbe 100644 --- a/quantecon/robustlq.py +++ b/quantecon/robustlq.py @@ -23,7 +23,7 @@ class RBLQ(object): .. math:: - min_{u_t} sum_t beta^t {x_t' R x_t + u_t' Q u_t } + \min_{u_t} \sum_t \beta^t {x_t' R x_t + u_t' Q u_t } subject to @@ -96,7 +96,7 @@ def d_operator(self, P): .. math:: - D(P) := P + PC(theta I - C'PC)^{-1} C'P. + D(P) := P + PC(\theta I - C'PC)^{-1} C'P. Parameters ---------- @@ -124,13 +124,13 @@ def b_operator(self, P): .. math:: - B(P) := R - beta^2 A'PB(Q + beta B'PB)^{-1}B'PA + beta A'PA + B(P) := R - \beta^2 A'PB(Q + \beta B'PB)^{-1}B'PA + \beta A'PA and also returning .. math:: - F := (Q + beta B'PB)^{-1} beta B'PA + F := (Q + \beta B'PB)^{-1} \beta B'PA Parameters ---------- @@ -165,7 +165,7 @@ def robust_rule(self): u_t = - F x_t - And the value function is -x'Px + And the value function is :math:`-x'Px` Returns ------- @@ -299,10 +299,20 @@ def K_to_F(self, K): return F, P def compute_deterministic_entropy(self, F, K, x0): - """ + r""" Given K and F, compute the value of deterministic entropy, which - is sum_t beta^t x_t' K'K x_t with x_{t+1} = (A - BF + CK) x_t. + is + + .. math:: + + \sum_t \beta^t x_t' K'K x_t` + + with + + .. math:: + + x_{t+1} = (A - BF + CK) x_t Parameters ---------- @@ -328,9 +338,9 @@ def compute_deterministic_entropy(self, F, K, x0): def evaluate_F(self, F): """ - Given a fixed policy F, with the interpretation u = -F x, this - function computes the matrix P_F and constant d_F associated - with discounted cost J_F(x) = x' P_F x + d_F. + Given a fixed policy F, with the interpretation :math:`u = -F x`, this + function computes the matrix :math:`P_F` and constant :math:`d_F` + associated with discounted cost :math:`J_F(x) = x' P_F x + d_F` Parameters ----------