Undergraduate Computational Macro
We have seen deterministic processes in previous lectures, e.g. the linear
\[ X_{t+1} = a X_t + b \]
But many states in the real world involve randomness
For discrete-valued random variables
\[ \mathbb{E}[f(X)] = \sum_{i=1}^N f(x_i) p_i \]
For continuous valued random variables \[ \mathbb{E}[f(X)] = \int_{-\infty}^\infty f(x) p(x) dx \]
Let \(X_1, X_2, \ldots\) be independent and identically distributed (iid) random variables with mean \(\mu \equiv \mathbb{E}(X) < \infty\), then let \[ \bar{X}_n \equiv \frac{1}{n} \sum_{i=1}^n X_i \]
One version is Kolmogorov’s Strong Law of Large Numbers \[ \mathbb{P} \left( \lim_{n \rightarrow \infty} \bar{X}_n = \mu \right) = 1 \]
function ksl(distribution, n = 100)
title = nameof(typeof(distribution))
observations = rand(distribution, n)
sample_means = cumsum(observations) ./ (1:n)
mu = mean(distribution)
plot(repeat((1:n)', 2), [zeros(1, n); observations']; title, xlabel="n",
label = "", color = :grey, alpha = 0.5)
plot!(1:n, observations; color = :grey, markershape = :circle,
alpha = 0.5, label = "", linewidth = 0)
if !isnan(mu)
hline!([mu], color = :black, linewidth = 1.5, linestyle = :dash,
grid = false, label = L"\mathbb{E}[X]")
end
return plot!(1:n, sample_means, linewidth = 3, alpha = 0.6, color = :green, label = L"\bar{X}_n")
end
ksl (generic function with 2 methods)
Random.seed!(0); # reproducible results
dist = Cauchy() # Doesn't have an expectation!
sample_mean = cumsum(rand(dist, n)) ./ (1:n)
plot(1:n, sample_mean, color = :red, alpha = 0.6, label = L"\bar{X}_n",
xlabel = "n", linewidth = 3)
hline!([0], color = :black, linestyle = :dash, label = "", grid = false)
One application of this is the numerical calculation of expectations
Let \(X\) be a random variable with density \(p(x)\), and hence \(\mathbb{E}[f(X)] = \int_{-\infty}^\infty f(x) p(x) dx\) (or \(\sum_{i=1}^N f(x_i) p_i\) if discrete)
These integrals are often difficult to calculate analytically, but if we can draw \(X \sim p\), then we can approximate the expectation by
\[ \mathbb{E}[f(X)] \approx \frac{1}{n} \sum_{i=1}^n f(x_i) \]
Then by the LLN this converges to the true expectation as \(n \rightarrow \infty\)
N = 500
# expectation with PMF, then MC
f_expec = dot(log.(vals .+ 1), p)
x_draws = rand(dist, N)
f_x_draws = log.(x_draws .+ 1)
f_expec_mc = sum(f_x_draws) / N
@show f_expec, f_expec_mc
# Just calculate sums then divide by N
f_means = cumsum(f_x_draws)./(1:N)
plot(1:length(f_means), f_means;
label=L"\frac{1}{n}\sum_{i=1}^n f(x_i)",
xlabel="n", ylabel=L"\bar{f}(X)",
size=(600,400))
hline!([f_expec];linestyle = :dash,
label = L"\mathbb{E}[f(X)]")
(f_expec, f_expec_mc) = (1.7526393207741702, 1.7552834928857293)
\[ X_{t+1} = a X_t + b + c W_{t+1} \]
iterate_map
function to work with vectors\[ x_{t+1} = \underbrace{\begin{bmatrix} a & 0 \\ 0 & a^2 \end{bmatrix}}_{\equiv A} x_t + \underbrace{\begin{bmatrix} b \\ c^2 \end{bmatrix}}_{\equiv B} \]
x_0 = [-1.0, 0.1] # tight
T = 10
f(x) = A * x + B
x = iterate_map(f, x_0, T)
x_star = fixedpoint(f, x_0).zero
plt = plot(Normal(x_star[1], sqrt(x_star[2]));
label = L"\psi^*",
style = :dash,
size = (600, 400))
for t in 1:T
dist = Normal(x[1, t], sqrt(x[2, t]))
plot!(plt, dist, label = L"\psi_{%$(t-1)}",
linealpha = 0.7)
end
plt
Take \(X_{t+1} = a X_t + b + c W_{t+1}\) if \(|a| < 1\) for \(W_{t+1} \sim \mathcal{N}(0, 1)\)
Recall If \(X_t \sim \mathcal{N}(\mu_t, v_t) \equiv \psi_t\), then using properties of Normals
Apply the evolution equation to \(\psi^*\) we demonstrate that \(\psi^* \sim f(\psi^*)\)
\[ \mathcal{N}\left(a \frac{b}{1-a} + b, a^2 \frac{c^2}{1-a^2} + c^2\right) = \mathcal{N}\left(\frac{b}{1-a}, \frac{c^2}{1-a^2}\right) \]
a,b,c = 1.1, 0.2, 0.25
A = [a 0; 0 a^2]
B = [b; c^2]
f(x) = A * x + B
T = 15
x = iterate_map(f, [0.0, 0.1], T)
plt = plot(Normal(x[1, end], sqrt(x[2, end]));
label = L"\psi_{%$T}",
size = (600, 400))
for t in 1:5
dist = Normal(x[1, t], sqrt(x[2, t]))
plot!(plt, dist, label=L"\psi_{%$(t-1)}",
linealpha = 0.7)
end
plt
a,b,c = 1.0, 0.0, 0.25
A = [a 0; 0 a^2]
B = [b; c^2]
f(x) = A * x + B
T = 15
x = iterate_map(f, [0.0, 0.1], T)
plt = plot(Normal(x[1, end], sqrt(x[2, end]));
label = L"\psi_{%$T}",
size = (600, 400))
for t in 1:5
dist = Normal(x[1, t], sqrt(x[2, t]))
plot!(plt, dist, label=L"\psi_{%$(t-1)}",
linealpha = 0.7)
end
plt
function iterate_map_iid(f, dist, x0, T)
x = zeros(T + 1)
x[1] = x0
for t in 2:(T + 1)
x[t] = f(x[t - 1], rand(dist))
end
return x
end
a,b,c = 0.9, 0.1, 0.05
x_0 = 0.5
T = 5
h(x, W) = a * x + b + c * W # iterate given random shock
x = iterate_map_iid(h, Normal(), x_0, T)
6-element Vector{Float64}:
0.5
0.5252717486805177
0.5306225876900339
0.46819901566492783
0.532032538532688
0.583020976850554
Random.seed!(20)
x_0 = rand(Normal(b/(1-a), sqrt(c^2/(1-a^2))))
x = iterate_map_iid(h, Normal(), x_0, T)
x_means = cumsum(x)./(1:(T+1))
plot(0:T, x_means;
label=L"\frac{1}{t}\sum_{s=0}^{t-1} X_s",
xlabel = "t", size = (600, 400))
hline!([b/(1-a)], color = :black,
linestyle = :dash,
label = L"\mathbb{E}[X]")
Random.seed!(20)
a,b,c = 0.99, 0.01, 0.05
h(x, W) = a * x + b + c * W
T = 2000
x_0 = 0.5
x = iterate_map_iid(h, Normal(), x_0, T)
x_means = cumsum(x)./(1:(T+1))
plot(0:T, x_means;
label=L"\frac{1}{t}\sum_{s=0}^{t-1} X_s",
xlabel = "t", size = (600, 400))
hline!([b/(1-a)], color = :black,
linestyle = :dash,
label = L"\mathbb{E}[X]")
The distribution of \(X_t\) then depends on the distribution of \(X_0\) and the distribution of the sum of \(t-1\) iid random variables
If \(X_0\) and \(W_t\) are normal, then \(X_t\) is normal since it is a linear combination \[ X_t = a^t X_0 + b \frac{1-a^t}{1-a} + c \sum_{j=0}^{t-1} a^j W_{t-j} \]
X_0 = 0.5 # degenerate prior
a, b, c = 0.9, 0.1, 0.05
A = [a 0; 0 a^2]
B = [b; c^2]
T = 20
num_samples = 10000
Xs = iterate_map(x -> A * x + B, [X_0, 0], T)
X_T = Normal(Xs[1, end], sqrt(Xs[2, end]))
W = randn(num_samples, T)
# Comprehensions and generators example, looks like math
X_T_samples = [a^T * X_0 + b * (1-a^T)/(1-a) + c * sum(a^j * W[i, T-j] for j in 0:T-1)
for i in 1:num_samples]
histogram(X_T_samples; xlabel="x", normalize=true,
label=L"Samples of $X_{%$T}$ using MA($\infty$)")
plot!(X_T; label=L"Analytic $X_{%$T}$", lw=3)
A useful class involves nonlinear functions for the drift and variance
\[ X_{t+1} = \mu(X_t) + \sigma(X_t) W_{t+1} \]
Nests our AR(1) process
For example, we may find that time-series data has time-varying volatility and depends on 1 lags \[ X_{t+1} = a X_t + \sigma_t W_{t+1} \]
Hence the process becomes an ARCH(1)
\[ X_{t+1} = a X_t + \left(\beta + \gamma X_t^2\right)^{1/2} W_{t+1} \]
Nonlinearity in economics often comes in various forms of barriers, e.g. borrowing constraints
Consider our AR(1) except that the process can never go below \(0\)
\[ X_{t+1} = \max\{a X_t + b + c W_{t+1}, 0.0\} \]
We could stop the process at this point, but instead we will continue to iterate
Turning off population growth, for \(f(k) = k^\alpha\), and \(s, \delta\) constants \[ k_{t+1} = (1-\delta) k_t + s Z_t f(k_t),\quad \text{given } k_0 \]
Let log productivity, \(z_t\equiv \log Z_t\), follow an AR(1) process (why logs?)
\[ \log Z_{t+1} = a \log Z_t + b + c W_{t+1} \]
alpha, delta, s = 0.3, 0.1, 0.2
a, b, c = 0.9, 0.1, 0.1
function h(x, W)
k = x[1]
z = x[2]
return [(1-delta) * k + s * exp(z) * k^alpha,
a * z + b + c * W]
end
x_0 = [7.0, log(2.0)] # k_0, z_0
T = 150
x = iterate_map_iid_vec(h, Normal(), x_0, T)
plot(x[1, :]; label = L"k_t", xlabel = "t", size = (600, 400), legend=:topright)
plot!(exp.(x[2, :]), label = L"Z_t")
dist = LogNormal(b/(1-a), sqrt(c^2/(1-a^2)))
hline!([mean(dist)]; linestyle = :dash, label = L"\mathbb{E}[Z_t]")
hline!([quantile(dist, 0.05)]; lw=0, fillrange = [quantile(dist, 0.95)], fillalpha=0.2, label = "5/95")
# Remember nonstochastic steady-state
k_ss_det= (s*mean(dist)/delta)^(1/(1-alpha))
T = 200000
x = iterate_map_iid_vec(h, Normal(), x_0, T)
histogram(x[1, :]; label = L"k_t",
normalize = true, xlabel = "k",
alpha=0.5, size = (600, 400))
vline!([k_ss_det]; linestyle = :dash, lw=3,
label = L"k_{ss}(c = 0)")
The simplest Kesten Process is a process of the form
\[ X_{t+1} = a_{t+1} X_t + y_{t+1} \]
Examples: if population is \(N_t\) and growth rate between \(t\) and \(t+1\) is \(g_{t+1}\)
Key questions will be about whether stationary distributions exist, how they depend on parameters, and how fast they are approached
Let \(R_t\) be the gross returns on a asset, and \(W_t\) be value of it \[ W_{t+1} = R_{t+1} W_t \]
Let \(\log R_t \sim \mathcal{N}(\mu, \sigma^2)\), i.e. lognormally distributed
mu = -0.01
sigma = 0.1
R_dist = LogNormal(mu, sigma)
T = 100
W_0 = 1.0
@show mean(R_dist)
@show exp(mu + sigma^2/2)
plot(iterate_map_iid((W, R) -> W * R, R_dist,
W_0, T);
ylabel = L"W_t", xlabel = "t",
size = (600, 400), legend=nothing,
title = "Simulations of Value")
plot!(iterate_map_iid((W, R) -> W * R, R_dist,
W_0, T))
plot!(iterate_map_iid((W, R) -> W * R, R_dist,
W_0, T))
mean(R_dist) = 0.9950124791926823
exp(mu + sigma ^ 2 / 2) = 0.9950124791926823
function iterate_map_iid_ensemble(f, dist, x0, T, num_samples)
x = zeros(num_samples, T + 1)
x[:, 1] .= x0
for t in 2:(T + 1)
# or could do a loop over samples
x[:, t] .= f.(x[:, t - 1], rand(dist, num_samples))
end
return x
end
num_samples = 200
W = iterate_map_iid_ensemble((W, R) -> W * R, R_dist, W_0, T, num_samples)
plot(W'; ylabel = L"W_t", xlabel = "t", legend = nothing, alpha = 0.1,
color=:blue, lw = 1)
num_samples = 1000
T = 200
W = iterate_map_iid_ensemble((W, R) -> W * R, R_dist, W_0, T, num_samples)
q_50 = [quantile(W[:,i], 0.5) for i in 1:T+1]
q_05 = [quantile(W[:,i], 0.05) for i in 1:T+1]
q_95 = [quantile(W[:,i], 0.95) for i in 1:T+1]
mean_W = mean(W, dims=1)'
plot(mean_W; label = "Mean Value")
plot!(q_50; label = "Median Value", style = :dash, color = :lightblue)
plot!(q_05; label = "5/95 Percentile", lw=0, fillrange = q_95, fillalpha=0.4, color = :lightblue)
mu = -0.001
sigma = 0.1
R_dist = LogNormal(mu, sigma)
T = 100
W_0 = 1.0
@show mean(R_dist)
num_samples = 1000
W = iterate_map_iid_ensemble((W, R) -> W * R, R_dist, W_0, T, num_samples)
q_50 = [quantile(W[:,i], 0.5) for i in 1:T+1]
q_05 = [quantile(W[:,i], 0.05) for i in 1:T+1]
q_95 = [quantile(W[:,i], 0.95) for i in 1:T+1]
mean_W = mean(W, dims=1)'
plot(mean_W; label = "Mean Value")
plot!(q_50; label = "Median Value", style = :dash, color = :lightblue)
plot!(q_05; label = "5/95 Percentile", lw=0, fillrange = q_95, fillalpha=0.4, color = :lightblue)
mean(R_dist) = 1.004008010677342