[r-cran-bayesm] 31/44: Import Upstream version 2.2-1

Andreas Tille tille at debian.org
Thu Sep 7 11:16:22 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-bayesm.

commit 79649ce645059bbe9a61eb743ce9d6b966911232
Author: Andreas Tille <tille at debian.org>
Date:   Thu Sep 7 13:09:52 2017 +0200

    Import Upstream version 2.2-1
---
 DESCRIPTION                |   8 +++---
 R/rivDP.R                  |   8 +++---
 inst/doc/bayesm-manual.pdf | Bin 464797 -> 490058 bytes
 man/customerSat.Rd         |   1 -
 man/fsh.Rd                 |   1 -
 man/rDPGibbs.Rd            |  64 ++++++++++++++++++++++-----------------------
 man/rivDP.Rd               |  16 +++++++++---
 man/rmvpGibbs.Rd           |   1 -
 8 files changed, 52 insertions(+), 47 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
index a8dc57f..917bf85 100755
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,6 +1,6 @@
 Package: bayesm
-Version: 2.2-0
-Date: 2008-03-28
+Version: 2.2-1
+Date: 2008-03-10
 Title:Bayesian Inference for Marketing/Micro-econometrics 
 Author: Peter Rossi <peter.rossi at ChicagoGsb.edu>, 
         Rob McCulloch <robert.mcculloch at ChicagoGsb.edu>.
@@ -16,7 +16,7 @@ Description: bayesm covers many important models used
   Multivariate Probit,
   Negative Binomial (Poisson) Regression,
   Multivariate Mixtures of Normals (including clustering),
-  Dirichlet Process Prior Density Estimation with normal base
+  Dirichlet Process Prior Density Estimation with normal base,
   Hierarchical Linear Models with normal prior and covariates,
   Hierarchical Linear Models with a mixture of normals prior and covariates,
   Hierarchical Multinomial Logits with a mixture of normals prior
@@ -31,4 +31,4 @@ Description: bayesm covers many important models used
   Marketing by Rossi, Allenby and McCulloch. 
 License: GPL (version 2 or later)
 URL: http://faculty.chicagogsb.edu/peter.rossi/research/bsm.html
-Packaged: Fri Mar  7 11:04:01 2008; per
+Packaged: Mon Mar 10 13:57:35 2008; per
diff --git a/R/rivDP.R b/R/rivDP.R
index 0229b74..5eae594 100755
--- a/R/rivDP.R
+++ b/R/rivDP.R
@@ -667,12 +667,12 @@ if(SCALE){
 ctime = proc.time()[3]
 cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
 
-densitymix=list(matrix(c(rep(1,length(thetaNp1draw))),ncol=1),list("gunk"),thetaNp1draw)
+nmix=list(probdraw=matrix(c(rep(1,length(thetaNp1draw))),ncol=1),zdraw=NULL,compdraw=thetaNp1draw)
 #
 # densitymix is in the format to be used with the generic mixture of normals plotting
 # methods (plot.bayesm.nmix)
 #
-attributes(densitymix)$class=c("bayesm.nmix")
+attributes(nmix)$class=c("bayesm.nmix")
 
 attributes(deltadraw)$class=c("bayesm.mat","mcmc")
 attributes(deltadraw)$mcpar=c(1,R,keep)
@@ -688,10 +688,10 @@ if(isgamma){
 
 if(isgamma) 
    { return(list(deltadraw=deltadraw,betadraw=betadraw,alphadraw=alphadraw,Istardraw=Istardraw,
-                 gammadraw=gammadraw,densitymix=densitymix))}
+                 gammadraw=gammadraw,nmix=nmix))}
    else
    { return(list(deltadraw=deltadraw,betadraw=betadraw,alphadraw=alphadraw,Istardraw=Istardraw,
-                 densitymix=densitymix))}
+                 nmix=nmix))}
 }
 
 
diff --git a/inst/doc/bayesm-manual.pdf b/inst/doc/bayesm-manual.pdf
index 502dbc0..44b2f19 100755
Binary files a/inst/doc/bayesm-manual.pdf and b/inst/doc/bayesm-manual.pdf differ
diff --git a/man/customerSat.Rd b/man/customerSat.Rd
index d680195..ae7d1f1 100755
--- a/man/customerSat.Rd
+++ b/man/customerSat.Rd
@@ -23,7 +23,6 @@
     \item{\code{q10}}{Distribution to Right Geographic Areas}
   }
 }
-}
 \source{
   Rossi et al (2001), "Overcoming Scale Usage Heterogeneity,"
   \emph{JASA} 96, 20-31.
diff --git a/man/fsh.Rd b/man/fsh.Rd
index 87be325..e7b43eb 100755
--- a/man/fsh.Rd
+++ b/man/fsh.Rd
@@ -9,7 +9,6 @@
 \usage{
 fsh()
 }
-}
 \value{
   No value is returned.
 }
diff --git a/man/rDPGibbs.Rd b/man/rDPGibbs.Rd
index 0b4e33c..40f431e 100755
--- a/man/rDPGibbs.Rd
+++ b/man/rDPGibbs.Rd
@@ -8,9 +8,9 @@
 \title{ Density Estimation with Dirichlet Process Prior and Normal Base }
 \description{
     \code{rDPGibbs} implements a Gibbs Sampler to draw from the posterior for a normal mixture problem
-    with a Dirichlet Process prior.  The base distribution is a multivariate normal distribution. A natural 
-    conjugate base prior is used with priors on the hyper parameters of this distribution. One interpretation
-    of this model is as a normal mixture with a random number of components. 
+    with a Dirichlet Process prior.  A natural conjugate base prior is used along with priors on the hyper 
+    parameters of this distribution. One interpretation
+    of this model is as a normal mixture with a random number of components that can grow with the sample size. 
 }
 
 \usage{
@@ -31,62 +31,61 @@ Model: \cr
 Priors:\cr
         \eqn{theta_i=(mu_i,Sigma_i)} \eqn{\sim}{~} \eqn{DP(G_0(lambda),alpha)}\cr
         \eqn{G_0(lambda):}\cr
-        \eqn{mu_i given Sigma_i} \eqn{\sim}{~} \eqn{N(0,Sigma_i (x) a^{-1})}\cr
+        \eqn{mu_i | Sigma_i} \eqn{\sim}{~} \eqn{N(0,Sigma_i (x) a^{-1})}\cr
         \eqn{Sigma_i} \eqn{\sim}{~} \eqn{IW(nu,nu*v*I)}
         
         \eqn{lambda(a,nu,v):}\cr
-        \eqn{a} \eqn{sim}{~} \eqn{uniform on grid[alim[1],alimb[2]]}\cr
-        \eqn{nu} \eqn{sim}{~} \eqn{uniform on grid[dim(data)-1 + exp(nulim[1]),dim(data)-1 +exp(nulim[2])]}\cr
-        \eqn{v} \eqn{sim}{~} \eqn{uniform on grid[vlim[1],vlim[2]]}
+        \eqn{a} \eqn{\sim}{~} uniform on grid[alim[1],alimb[2]]\cr
+        \eqn{nu} \eqn{\sim}{~} uniform on grid[dim(data)-1 + exp(nulim[1]),dim(data)-1 +exp(nulim[2])]\cr
+        \eqn{v} \eqn{\sim}{~} uniform on grid[vlim[1],vlim[2]]
        
-        \eqn{alpha} \eqn{sim}{~} \eqn{(1-(alpha-alphamin)/(alphamax-alphamin))^power} \cr
-        alpha= alphamin then expected number of compoents = Istarmin \cr
-        alpha= alphamax then expected number of compoents = Istarmax \cr
+        \eqn{alpha} \eqn{\sim}{~} \eqn{(1-(alpha-alphamin)/(alphamax-alphamin))^power} \cr
+        alpha= alphamin then expected number of components = Istarmin \cr
+        alpha= alphamax then expected number of components = Istarmax \cr
 
 list arguments
 
 Data:\cr
   \itemize{
-    \item{y}{N x k matrix of observations on k dimensional data}
+    \item{\code{y}}{N x k matrix of observations on k dimensional data}
   }
 
 Prioralpha:\cr
  \itemize{
-  \item{Istarmin}{expected number of components at lower bound of support of alpha}
-  \item{Istarmax}{expected number of components at upper bound of support of alpha}
-  \item{power}{power parameter for alpha prior}
+  \item{\code{Istarmin}}{expected number of components at lower bound of support of alpha}
+  \item{\code{Istarmax}}{expected number of components at upper bound of support of alpha}
+  \item{\code{power}}{power parameter for alpha prior}
   }
  
 lambda\_hyper:\cr
   \itemize{
-   \item{alim}{defines support of a distribution,def:c(.01,10) }
-   \item{nulim}{defines support of nu distribution, def:c(.01,3)} 
-   \item{vlim}{defines support of v distribution, def:c(.1,4)} 
+   \item{\code{alim}}{defines support of a distribution,def:c(.01,10) }
+   \item{\code{nulim}}{defines support of nu distribution, def:c(.01,3)} 
+   \item{\code{vlim}}{defines support of v distribution, def:c(.1,4)} 
   }
 Mcmc:\cr
  \itemize{
-   \item{R}{number of mcmc draws}
-   \item{keep}{thinning parm, keep every keepth draw}
-   \item{maxuniq}{storage constraint on the number of unique components}
-   \item{SCALE}{should data be scaled by mean,std deviation before posterior draws, def: TRUE}
-   \item{gridsize}{number of discrete points for hyperparameter priors,def: 20}
+   \item{\code{R}}{number of mcmc draws}
+   \item{\code{keep}}{thinning parm, keep every keepth draw}
+   \item{\code{maxuniq}}{storage constraint on the number of unique components}
+   \item{\code{SCALE}}{should data be scaled by mean,std deviation before posterior draws, def: TRUE}
+   \item{\code{gridsize}}{number of discrete points for hyperparameter priors,def: 20}
   }
 
 output:\cr
-
 the basic output are draws from the predictive distribution of the data in the object, \code{nmix}. 
 The average of these draws is the Bayesian analogue of a density estimate.
 
 nmix:\cr
   \itemize{
-   \item{probdraw}{R/keep x 1 matrix of 1s}
-   \item{zdraw}{R/keep x N matrix of draws of indicators of which component each obs is assigned to}
-   \item{compdraw}{R/keep list of draws of normals}
+   \item{\code{probdraw}}{R/keep x 1 matrix of 1s}
+   \item{\code{zdraw}}{R/keep x N matrix of draws of indicators of which component each obs is assigned to}
+   \item{\code{compdraw}}{R/keep list of draws of normals}
   }
   Output of the components is in the form of a list of lists. \cr
   compdraw[[i]] is ith draw -- list of lists. \cr
-  compdraw[[i]][[1]] is list of parms for normal component. \cr
-  compdraw[[i]][1]][[1]] is the mean vector.
+  compdraw[[i]][[1]] is list of parms for a draw from predictive. \cr
+  compdraw[[i]][1]][[1]] is the mean vector. compdraw[[i]][[1]][[2]] is the inverse of Cholesky root.
   \eqn{Sigma} = t(R)\%*\%R, \eqn{R^{-1}} = compdraw[[i]][[1]][[2]].
 }
 
@@ -131,10 +130,10 @@ nmix:\cr
           \code{\link{eMixMargDen}}, \code{\link{momMix}}, \code{\link{mixDen}}, \code{\link{mixDenBi}}}
 
 \examples{
-## simulate univariate data from Chi-Sq
-
 if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10}
 
+## simulate univariate data from Chi-Sq
+
 set.seed(66)
 N=200
 chisqdf=8; y1=as.matrix(rchisq(N,df=chisqdf))
@@ -143,7 +142,7 @@ chisqdf=8; y1=as.matrix(rchisq(N,df=chisqdf))
 
 Data1=list(y=y1)
 Prioralpha=list(Istarmin=1,Istarmax=10,power=.8)
-Prior1=list(alpha=1,Prioralpha=Prioralpha)
+Prior1=list(Prioralpha=Prioralpha)
 
 Mcmc=list(R=R,keep=1,maxuniq=200)
 
@@ -181,8 +180,7 @@ y2=banana(A=A,B=B,C1=C1,C2=C2,1000)
 
 Data2=list(y=y2)
 Prioralpha=list(Istarmin=1,Istarmax=10,power=.8)
-Prior2=list(alpha=1,Prioralpha=Prioralpha)
-R=2000
+Prior2=list(Prioralpha=Prioralpha)
 Mcmc=list(R=R,keep=1,maxuniq=200)
 
 out2=rDPGibbs(Prior=Prior2,Data=Data2,Mcmc)
diff --git a/man/rivDP.Rd b/man/rivDP.Rd
index 775323d..a598951 100755
--- a/man/rivDP.Rd
+++ b/man/rivDP.Rd
@@ -43,7 +43,7 @@ rivDP(Data, Prior, Mcmc)
   These parameters are collected together in the list \code{lambda}.  It is highly
        recommended that you use the default settings for these hyper-parameters.\cr
 
-  \eqn{alpha} \eqn{\sim}{~} \eqn{(1-(alpha-alpha_{min})/(alpha_{max}-alpha{min}))^{omega}} \cr
+  \eqn{alpha} \eqn{\sim}{~} \eqn{(1-(alpha-alpha_{min})/(alpha_{max}-alpha{min}))^{power}} \cr
    where \eqn{alpha_{min}} and \eqn{alpha_{max}} are set using the arguments in the reference
    below.  It is highly recommended that you use the default values for the hyperparameters
    of the prior on alpha
@@ -65,6 +65,16 @@ rivDP(Data, Prior, Mcmc)
     \item{\code{SCALE}}{ scale data, def: TRUE}
     \item{\code{gridsize}}{ gridsize parm for alpha draws (def: 20)} 
   }
+
+  output includes object \code{nmix} of class "bayesm.nmix" which contains draws of predictive distribution of 
+  errors (a Bayesian analogue of a density estimate for the error terms).\cr
+  nmix:\cr
+  \itemize{
+    \item{\code{probdraw}}{ not used}
+    \item{\code{zdraw}}{ not used}
+    \item{\code{compdraw}}{ list R/keep of draws from bivariate predictive for the errors}
+  }
+  note: in compdraw list, there is only one component per draw
 }
 \value{
   a list containing:
@@ -73,7 +83,7 @@ rivDP(Data, Prior, Mcmc)
   \item{gammadraw}{R/keep x dim(gamma) array of gamma draws }
   \item{Istardraw}{R/keep x 1 array of drawsi of the number of unique normal components}
   \item{alphadraw}{R/keep x 1 array of draws of Dirichlet Process tightness parameter}
-  \item{densitymix}{R/keep x list of draws for predictive distribution of errors}
+  \item{nmix}{R/keep x list of draws for predictive distribution of errors}
 }
 \references{ For further discussion, see "A Semi-Parametric Bayesian Approach to the Instrumental
   Variable Problem," by Conley, Hansen, McCulloch and Rossi, Journal of Econometrics (2008).\cr
@@ -140,7 +150,7 @@ summary(out$betadraw,tvalues=tbeta)
 if(0){
 ## plotting examples
 plot(out$betadraw,tvalues=tbeta)
-plot(out$densitymix)  ## plot "fitted" density of the errors
+plot(out$nmix)  ## plot "fitted" density of the errors
 ##
 
 }
diff --git a/man/rmvpGibbs.Rd b/man/rmvpGibbs.Rd
index 4f59406..ae8c0c3 100755
--- a/man/rmvpGibbs.Rd
+++ b/man/rmvpGibbs.Rd
@@ -11,7 +11,6 @@
   \code{rmvpGibbs} implements the Edwards/Allenby Gibbs Sampler for the multivariate probit model.
 }
   
-}
 \usage{
 rmvpGibbs(Data, Prior, Mcmc)
 }

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-bayesm.git



More information about the debian-science-commits mailing list