# Fetch innovational method: source("inno_ma.R") # Since we can do more than 20 trials in R, # I suggest going for M=1000 in stead. If you want, # 20 as in the exercise text, set M=20 M=1000 # Make the simulation set of time series: x=array(NA,c(M,200)) theta=0.6 # Allocate the Yule-Walker estimates for theta # using example 3.1.2: theta.hat.mom=array(NA,M) theta.hat.inno=array(NA,M) theta.hat.ml=array(NA,M) for(i in 1:M) { # Draw 201 N(0,1) variables z=rnorm(201,0,1) # x_t=z_t+0.6*z_{t-1}: x[i,]=z[2:201]+theta*z[1:200] # a): # Fetch the lag=1 correlation: rho1=acf(x[i,], lag.max=21, type="correlation" , plot=FALSE)$acf[2] # Moment estimate, see equation and comments right after eq. 5.1.18: if(rho1 < 0.5 && rho1 > -0.5) theta.hat.mom[i]=(1-sqrt(1-4*rho1^2))/(2*rho1) else theta.hat.mom[i]=sign(rho1); #b): estimated.gamma=acf(x[i,], lag.max=21, type="covariance" , plot=FALSE)$acf # Use algorithm supplemented by Eivind Damsleth: theta.hat.inno[i]=inno.ma(6,estimated.gamma,20)[1] # Max likelihood fit: x.ts=ts(x[i,]) ma.ml.fit=arima(x.ts,order=c(0,0,1)) theta.hat.ml[i]=ma.ml.fit$coef[1] } # Make a histogram of the moment estimates: hist(theta.hat.mom, breaks=30) # Looks almost normally distributed, expect some estimates of theta=1 # Quite a bit of spread in the sample distribution of this estimate, # it seems. # Make a histogram of the innovational estimates: hist(theta.hat.inno, breaks=30) # The estimates larger in absolute value than 1 makes the # plot very wide. There are reasons for looking only # at the invertible results, as we can assume that # non-invertible results would not be considered real # estimates. invertible=theta.hat.inno>-1 & theta.hat.inno<1 & !is.na(theta.hat.inno) hist(theta.hat.inno[invertible], breaks=30) # This histogram looks pretty consentrated around theta=0.6. # Make a histogram of the ML estimates: hist(theta.hat.ml, breaks=30) # Seems more compact than the distribution of the # moment estimates and the innovational estimates. # c): # Compare mean and variance: # Mean: c(mean(theta.hat.mom), mean(theta.hat.inno[invertible]), mean(theta.hat.ml)) #[1] 0.6097875 0.5940619 0.6019275 # ML estimate is closest to the truth, though innovational and moment estimates are # not far behind in mean. # Variance: c(var(theta.hat.mom), var(theta.hat.inno[invertible]), var(theta.hat.ml)) #[1] 0.028639786 0.013114774 0.003814476 # Again, ML is best. Moment estimates looses, if non-invertible # estiamtes from the innovational method is removed. (But innovational # will be worst, if not. Check this.) # Root Mean Square Error: c(sqrt(mean((theta.hat.mom-0.6)^2)), sqrt(mean((theta.hat.inno[invertible]-0.6)^2)), sqrt(mean((theta.hat.ml-0.6)^2))) #[1] 0.16943123 0.11461565 0.06176064 # ML has the smallest RMSE, while the moment method has the largest. # d): # Asymptotic variance compared to the sample mean and variance # for moment esstimates. See page 146. n=200 # Moment: (1+theta^2+4*theta^4+theta^6+theta^8)/(1-theta^2)^2/n # 0.0237 # Not that far from the simulation result of 0.0151, but maybe # significant? Could removing the complex results # change the sample variance of the estimates? # Innovational: 1/n # 0.005 # The sample variance was a bit larger than this. This might indicate # that the algorithm presented here has not converged or still has # some errors. Either that, or the sample distribution # of the estimates approaches the asymptotic limit very slowly, and # has not yet approached it for n=200. # ML: (1-theta^2)/n #[1] 0.0032 # Quite close to the sample variance of 0.0034! # e): # ML seems clearly to be the winner, while the innovational or # the moment method looses, depending on whether one accepts throwing # away non-invertible innovational estimates.