####################################################
# time series regression with autocorrelated error #
####################################################

# DGP: y = xb +u, u~AR(1)

set.seed(1234)
T = 100
tr = 1:T
phi = 0.6
beta = 2
x = rnorm(T)
e = rnorm(T)
u = rep(0, T)
for (t in 2:T) u[t] = phi*u[t-1]+e[t]
y = x*beta + u

# OLS
m1 = lm(y~x)
summary(m1)

# OLS with HAC estimator
library("sandwich")
library("lmtest")
coeftest(m1, df = Inf, vcov = NeweyWest)

# three-step FGLS procedure
uhat = m1$resid
uhat.lag1 = c(NA, uhat[1:(T-1)])
summary(lm(uhat~uhat.lag1))

y.lag1 = c(NA, y[1:(T-1)])
x.lag1 = c(NA, x[1:(T-1)])
ystar = y - 0.5965*y.lag1
xstar = x - 0.5965*x.lag1
summary(lm(ystar~xstar))

# Joint nonlinear least square estimates of beta and phi
rss.nls = function(co) {
beta = co[1]
phi = co[2]
res = (y - phi*y.lag1)-beta*(x - phi*x.lag1)
rss = sum(res^2, na.rm=T)
return(rss)
}
optim(c(0.1,0.1), rss.nls)

# canned command
summary(nls(y~p*y.lag1+b*x-p*b*x.lag1), start = list(p=0.1, b=0.1))

# AR(1) with autocorrelated error
# DGP: yt = yt-1b +u, u~AR(1)

set.seed(1234)
T = 100
tr = 1:T
phi = -0.6
beta = 0.4
x = rnorm(T)
e = rnorm(T)
u = rep(0, T)
y = rep(0, T)

for (t in 2:T) u[t] = phi*u[t-1]+e[t]
y[1]=u[1]
for (t in 2:T) y[t] = beta*y[t-1]+u[t]
y.lag1 = c(NA, y[1:(T-1)])
y.lag2 = c(NA, NA, y[1:(T-2)])


summary(lm(y~y.lag1))  # OLS is biased
rss.nls = function(co) {
beta = co[1]
phi = co[2]
res = (y - phi*y.lag1)-beta*(y.lag1 - phi*y.lag2)
rss = sum(res^2, na.rm=T)
return(rss)
}
optim(c(0.2,0.1), rss.nls)