
# file "Ropt" -- optimising a high-dimensional RWM algorithm

dim = 5

# specify the target covariance and mean
tmpmat = matrix(
c( 0.458731768, -1.138985820,  0.784868032, -1.017293716, -0.723827196,
 0.564371814, -0.155166457,  0.902601504,  0.186805644,  0.952120668,
 1.486382516,  1.458366992,  0.709626067, -1.138458393,  0.897889709,
 0.431802958, -0.721054075, -0.656762999,  1.940817630, -1.015356510,
 0.014969681,  0.422744911,  0.073882617, -0.007217515,  0.664031927),
								nrow=dim )
# PREV: tmpmat = matrix( rnorm(dim^2), nrow=dim )
Sigma = t(tmpmat) %*% tmpmat

meanvect = c( 0.2133594,  0.5641370, -0.8031936, -1.1818973, -0.7307054 )
# PREV: meanvect = rnorm(dim)

identity = diag(dim)

# specify the proposal increment covariance (choose one)
propSigma = identity
# propSigma = 0.1 * identity
# propSigma = Sigma
# propSigma = 1.4 * Sigma

# SOME MULTIVARIATE NORMAL R FUNCTIONS:
rmvn = function( mean = rep(0,nrow(cov)), cov = diag(length(mean)) ) {
  thedim = length(mean);
  if ( (!is.real(cov)) || ( t(cov) != cov) )
    stop("Error: covariance matrix in rmvn must be real and symmetric.")
  if ( (nrow(cov) != thedim) || (nrow(cov) != thedim) )
    stop("Error: covariance matrix in rmvn is not ", thedim, " x ", thedim);
  A = t(chol(cov));   # this ensures that: A A^T = cov
  return( t(mean + A %*% rnorm(thedim))[1,] );
}
dmvn = function( x, mean = rep(0,nrow(cov)), cov = diag(length(mean)) ) {
  thedim = length(mean);
  if ( (!is.real(cov)) || ( t(cov) != cov) )
    stop("Error: covariance matrix in rmvn must be real and symmetric.");
  if ( (nrow(cov) != thedim) || (nrow(cov) != thedim) )
    stop("Error: covariance matrix in rmvn is not ", thedim, " x ", thedim);
  return( (2*3.1415926)^(-thedim/2) / sqrt(abs(det(cov))) *
  	exp( - 0.5 * t(x-meanvect) %*% solve(cov) %*% (x-meanvect) ) );
}

pi = function(X) { dmvn(X, cov = Sigma) }

h = function(X) { return(X[1]^2) }

M = 4100  # run length
B = 100  # amount of burn-in
X = runif(dim,0,2)  # overdispersed starting distribution
x1list = rep(0,M)  # for keeping track of chain values
x2list = rep(0,M)  # for keeping track of chain values
hlist = rep(0,M)  # for keeping track of functional values
numaccept = 0;

for (i in 1:M) {
    Y = X + rmvn(cov = propSigma)  # proposal value
    U = runif(1)  # for accept/reject
    alpha = pi(Y) / pi(X)  # for accept/reject
    if (U < alpha) {
	X = Y  # accept proposal
        numaccept = numaccept + 1;
    }
    x1list[i] = X[1];
    x2list[i] = X[2];
    hlist[i] = h(X);
}

cat("ran Metropolis algorithm for", M, "iterations, with burn-in", B, "\n");
cat("acceptance rate =", numaccept/M, "\n");
estmean = mean(hlist[(B+1):M]);
cat("estimated mean of h is ", estmean, "\n")
cat("true mean of h is ", meanvect[1]^2 + Sigma[1,1], "\n");

se1 =  sd(hlist[(B+1):M]) / sqrt(M-B)
cat("iid standard error would be about", se1, "\n")

varfact <- function(xxx) { 2 * sum(acf(xxx, plot=FALSE)$acf) - 1 }
se2 = se1 * sqrt( varfact(hlist) );
cat("true standard error is about", se2, "\n")

cat("95% confidence interval is about (", 
      estmean - 1.96*se2, ",", estmean + 1.96*se2, ")\n")

plot(x1list, type='l')

