1\name{llmnp} 2\alias{llmnp} 3\concept{multinomial probit} 4\concept{GHK method} 5\concept{likelihood} 6 7\title{Evaluate Log Likelihood for Multinomial Probit Model} 8 9\description{ 10\code{llmnp} evaluates the log-likelihood for the multinomial probit model. 11} 12 13\usage{llmnp(beta, Sigma, X, y, r)} 14 15\arguments{ 16 \item{beta }{ k x 1 vector of coefficients } 17 \item{Sigma}{ (p-1) x (p-1) covariance matrix of errors } 18 \item{X }{ n*(p-1) x k array where X is from differenced system } 19 \item{y }{ vector of n indicators of multinomial response (1, \ldots, p) } 20 \item{r }{ number of draws used in GHK } 21} 22 23\details{ 24 \eqn{X} is \eqn{(p-1)*n x k} matrix. Use \code{\link{createX}} with \code{DIFF=TRUE} to create \eqn{X}. \cr 25 26 Model for each obs: \eqn{w = Xbeta + e} with \eqn{e} \eqn{\sim}{~} \eqn{N(0,Sigma)}. 27 28 Censoring mechanism: \cr 29 if \eqn{y=j (j<p), w_j > max(w_{-j})} and \eqn{w_j >0} \cr 30 if \eqn{y=p, w < 0} \cr 31 32 To use GHK, we must transform so that these are rectangular regions 33 e.g. if \eqn{y=1, w_1 > 0} and \eqn{w_1 - w_{-1} > 0}. 34 35 Define \eqn{A_j} such that if \eqn{j=1,\ldots,p-1} then \eqn{A_jw = A_jmu + A_je > 0} is equivalent to \eqn{y=j}. Thus, if \eqn{y=j}, we have \eqn{A_je > -A_jmu}. Lower truncation is \eqn{-A_jmu} and \eqn{cov = A_jSigmat(A_j)}. For \eqn{j=p}, \eqn{e < - mu}. 36} 37 38\value{Value of log-likelihood (sum of log prob of observed multinomial outcomes)} 39 40\section{Warning}{ 41This routine is a utility routine that does \strong{not} check the input arguments for proper dimensions and type. 42} 43 44\author{ Peter Rossi, Anderson School, UCLA, \email{perossichi@gmail.com}.} 45 46\references{ For further discussion, see Chapters 2 and 4, \emph{Bayesian Statistics and Marketing} by Rossi, Allenby, and McCulloch. \cr \url{http://www.perossi.org/home/bsm-1}} 47 48\seealso{ \code{\link{createX}}, \code{\link{rmnpGibbs}} } 49 50\examples{ 51\dontrun{ll=llmnp(beta,Sigma,X,y,r)} 52} 53 54\keyword{models} 55