################################################################################# # Bayesian model selection and statistical modeling # Chapman & Hall/CRC Taylor and Francis Group # # Chapter6: Section6.3.1 # # The marginal likelihood evaluations for seemingly unrelated regression model # # Author : Tomohiro Ando # ################################################################################ # Generates samples from the multivariate normal distribution prnorm <- function(mu,A,n,p) { U <- svd(A)$u V <- svd(A)$v D <- diag(sqrt(svd(A)$d)) B <- U %*% D %*% t(V) # s—ñ A ‚Ì•½•ûª w <- matrix(0,n,p) for (i in 1:n){w[i,] <- mu + B%*%rnorm(p)} return(w) } prnorm2 <- function(mu,A,p) { U <- svd(A)$u V <- svd(A)$v D <- diag(sqrt(svd(A)$d)) B <- U %*% D %*% t(V) w <- mu + B%*%rnorm(p) return(w) } # Evaluate the density value of inverted Wishart logG <- function(m,a){ b1 <- m*(m-1)/4*log(pi) x <- abs( a-0.5*((m-1):0) ) b2 <- 0.5*log(2*pi)+(x-0.5)*log(x)-x+0.5/(12*x) b3 <- b1+sum(b2) b3 } logIW <- function(nu,A,S){ p <- ncol(A) b1 <- -0.5*nu*p*log(2)-logG(p,nu/2)-0.5*(nu+p+1)*log(det(S))+0.5*nu*log(det(A)) b2 <- -0.5*sum(diag(A%*%solve(S))) b3 <- as.numeric(b1+b2) b3 } # Evaluate the density value of normal density logN <- function(X,M,S){ p <- ncol(S) b <- -0.5*p*log(2*pi)-0.5*log(det(S))-0.5*sum( t(X-M)%*%solve(S)%*%(X-M) ) b } # library library(bayesm) # Data generation n <- 100 p <- 3 X1 <- matrix(runif(n*p,-2,2),ncol=p) X2 <- matrix(runif(n*p,-2,2),ncol=p) B1 <- c(-2,0,1); B2 <- c(0,3,1) SS <- matrix(c(0.35,-0.15,-0.15,0.43),2,2) er <- prnorm(rep(0,len=2),SS,n,2) y1 <- X1%*%B1+er[,1] y2 <- X2%*%B2+er[,2] AX1 <- X1; AX2 <- X2 #Model settings X1 <- AX1[,c(1,3)]; X2 <- AX2[,2:3] #Model1 #X1 <- AX1[,c(1,3)]; X2 <- AX2[,2:3] #Model2 #X1 <- AX1[,1:3]; X2 <- AX2[,1:3] #Model3 p <- ncol(X1) # Find the generalized least squared estimate X <- matrix(0,ncol=p*2,nrow=n*2) X[1:n,1:p] <- X1 X[(n+1):(2*n),(p+1):(2*p)] <- X2 y <- c(y1,y2) B <- solve(t(X)%*%X)%*%t(X)%*%y Beta1 <- B[1:p] Beta2 <- B[-(1:p)] R <- matrix(0,2,2) R[1,1] <- sum( (y1-X1%*%Beta1)^2 ) R[1,2] <- R[2,1] <- sum( t(y1-X1%*%Beta1)%*%(y2-X2%*%Beta2) ) R[2,2] <- sum( (y2-X2%*%Beta2)^2 ) S <- R/n O <- kronecker(solve(S),diag(1,n)) for(i in 1:100){ Beta1 <- B[1:p] Beta2 <- B[-(1:p)] B <- solve(t(X)%*%O%*%X)%*%t(X)%*%O%*%y R <- matrix(0,2,2) R[1,1] <- sum( (y1-X1%*%Beta1)^2 ) R[1,2] <- R[2,1] <- sum( t(y1-X1%*%Beta1)%*%(y2-X2%*%Beta2) ) R[2,2] <- sum( (y2-X2%*%Beta2)^2 ) S <- R/n O <- kronecker(solve(S),diag(1,n)) } A <- 10^-5*diag(1,ncol(X)) L0 <- diag(1,2) nu0 <- 5 # Gibbs sampling MHn <- 2000 # Numb. of iterations AB <- matrix(0,MHn,p*2) AS <- matrix(0,MHn,4) for(j in 1:MHn){ Bold <- B Sold <- S Rold <- R R[1,1] <- sum( (y1-X1%*%Beta1)^2 ); R[2,2] <- sum( (y2-X2%*%Beta2)^2 ) R[1,2] <- R[2,1] <- sum( t(y1-X1%*%Beta1)%*%(y2-X2%*%Beta2) ) O <- kronecker(solve(S),diag(1,n)) B <- solve(t(X)%*%O%*%X+A)%*%t(X)%*%O%*%y Bnew <- prnorm2(B,solve(t(X)%*%O%*%X+A),length(B)) Beta1 <- Bnew[1:p]; Beta2 <- Bnew[-(1:p)] AB[j,] <- Bnew R[1,1] <- sum( (y1-X1%*%Beta1)^2 ); R[2,2] <- sum( (y2-X2%*%Beta2)^2 ) R[1,2] <- R[2,1] <- sum( t(y1-X1%*%Beta1)%*%(y2-X2%*%Beta2) ) Snew <- S <- rwishart(n+nu0,solve(R+L0))$IW AS[j,] <- as.vector(Snew) } #Burn-in period AB <- AB[1001:MHn,]; AS <- AS[1001:MHn,] #Posterior means Bmean <- rep(0,len=ncol(AB)) for(i in 1:(ncol(AB))){Bmean[i] <- mean(AB[,i])} Smean <- rep(0,len=ncol(AS)) for(i in 1:(ncol(AS))){Smean[i] <- mean(AS[,i])} Smean <- matrix(Smean,nrow=2,ncol=2) #The Laplace-Metropolis estimator B0 <- rep(0,len=p*2) LoglikepriorB <- logN(Bmean,B0,solve(A)) LoglikepriorS <- logIW(nu0,L0,Smean) Loglikeprior <- LoglikepriorB+LoglikepriorS Beta1 <- Bmean[1:p]; Beta2 <- Bmean[-(1:p)] R[1,1] <- sum( (y1-X1%*%Beta1)^2 ); R[2,2] <- sum( (y2-X2%*%Beta2)^2 ) R[1,2] <- R[2,1] <- sum( t(y1-X1%*%Beta1)%*%(y2-X2%*%Beta2) ) LogLike <- -0.5*n*2*log(2*pi)-0.5*n*log(det(Smean))-0.5*sum( diag( R%*%solve(Smean) ) ) B <- cbind(AB,AS[,-3]) M <- rep(1,len=nrow(AB))%*%t(c(Bmean,Smean[-3])) V <- var(B-M) LogMarlikelihood <- LogLike+Loglikeprior+0.5*ncol(B)*log(2*pi)+0.5*log(det(V)) print(LogMarlikelihood) #Harmonic mean estimator P <- rep(0,len=nrow(AB)) for(j in 1:nrow(AB)){ Beta1 <- AB[j,1:p]; Beta2 <- AB[j,-(1:p)] S <- matrix(AS[j,],2,2) R[1,1] <- sum( (y1-X1%*%Beta1)^2 ); R[2,2] <- sum( (y2-X2%*%Beta2)^2 ) R[1,2] <- R[2,1] <- sum( t(y1-X1%*%Beta1)%*%(y2-X2%*%Beta2) ) LogLike <- -0.5*n*2*log(2*pi)-0.5*n*log(det(S))-0.5*sum( diag( R%*%solve(S) ) ) P[j] <- LogLike } LogMarlikelihood <- log( 1 / (sum( 1/exp(P) )/nrow(AB) ) ) print(LogMarlikelihood) #Chib's marginal likelihood Beta1 <- Bmean[1:p]; Beta2 <- Bmean[-(1:p)] R[1,1] <- sum( (y1-X1%*%Beta1)^2 ); R[2,2] <- sum( (y2-X2%*%Beta2)^2 ) R[1,2] <- R[2,1] <- sum( t(y1-X1%*%Beta1)%*%(y2-X2%*%Beta2) ) LogLike <- -0.5*n*2*log(2*pi)-0.5*n*log(det(Smean))-0.5*sum( diag( R%*%solve(Smean) ) ) O <- kronecker(solve(Smean),diag(1,n)) B <- solve(t(X)%*%O%*%X+A)%*%t(X)%*%O%*%y LoglikepostB <- logN(Bmean,B,t(X)%*%O%*%X+A) LikepostS <- 0 for(j in 1:nrow(AB)){ Beta1 <- AB[j,1:p]; Beta2 <- AB[j,-(1:p)] R[1,1] <- sum( (y1-X1%*%Beta1)^2 ); R[2,2] <- sum( (y2-X2%*%Beta2)^2 ) R[1,2] <- R[2,1] <- sum( t(y1-X1%*%Beta1)%*%(y2-X2%*%Beta2) ) LikepostS <- LikepostS + exp(logIW(n+nu0,L0+R,Smean)) } LoglikepostS <- log(LikepostS/nrow(AB)) LogMarlikelihood <- LogLike+Loglikeprior-LoglikepostB-LoglikepostS print(LogMarlikelihood)