[r-cran-bayesm] 40/44: Import Upstream version 3.0-2

Andreas Tille tille at debian.org
Thu Sep 7 11:16:23 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-bayesm.

commit 7886232cab2f20e5052d601ec9c0a8c6b6b2f3e3
Author: Andreas Tille <tille at debian.org>
Date:   Thu Sep 7 13:10:22 2017 +0200

    Import Upstream version 3.0-2
---
 DESCRIPTION                                        |  64 +-
 MD5                                                | 257 ++++---
 NAMESPACE                                          |  23 +-
 R/BayesmConstants.R                                |  32 +
 R/BayesmFunctions.R                                |   1 +
 R/breg.R                                           |  25 -
 R/cgetC.R                                          |  14 -
 R/clusterMix.R                                     | 131 ----
 R/clusterMix_rcpp.R                                |  60 ++
 R/createX.R                                        | 143 ++--
 R/ghkvec.R                                         |  12 -
 R/llmnl.R                                          |  26 -
 R/llnhlogit.R                                      |   9 +-
 R/lndIChisq.R                                      |  11 -
 R/lndIWishart.R                                    |  31 -
 R/lndMvn.R                                         |  16 -
 R/lndMvst.R                                        |  19 -
 R/mnpProb.R                                        |   9 +-
 R/plot.bayesm.hcoef.R                              |  89 +--
 R/plot.bayesm.mat.R                                | 117 +--
 R/rDPGibbs.R                                       | 569 ---------------
 R/rbayesBLP_rcpp.R                                 | 289 ++++++++
 R/rbiNormGibbs.R                                   | 235 +++---
 R/{rbprobitGibbs.R => rbprobitgibbs_rcpp.r}        | 275 +++----
 R/rcppexports.r                                    | 151 ++++
 R/rdirichlet.R                                     |  12 -
 R/rdpgibbs_rcpp.r                                  | 172 +++++
 R/rhierBinLogit.R                                  | 455 ++++++------
 R/rhierLinearMixture.R                             | 341 ---------
 R/rhierLinearMixture_rcpp.r                        | 214 ++++++
 R/{rhierLinearModel.R => rhierLinearModel_rcpp.R}  | 135 +---
 R/rhierMnlDP.R                                     | 807 ---------------------
 R/rhierMnlDP_rcpp.r                                | 296 ++++++++
 ...hierMnlRwMixture.R => rhierMnlRwMixture_rcpp.r} | 207 ++----
 R/{rhierNegbinRw.R => rhiernegbinrw_rcpp.r}        | 587 +++++++--------
 R/rivDP.R                                          | 699 ------------------
 R/rivDP_rcpp.R                                     | 279 +++++++
 R/{rivGibbs.R => rivGibbs_rcpp.R}                  | 388 ++++------
 R/rmixGibbs.R                                      |  87 ---
 R/rmixture.R                                       |  36 -
 R/{rmnlIndepMetrop.R => rmnlIndepMetrop_rcpp.R}    | 301 ++++----
 R/{rmnpGibbs.R => rmnpgibbs_rcpp.r}                | 335 ++++-----
 R/rmultireg.R                                      |  56 --
 R/rmvpGibbs.R                                      | 202 ------
 R/rmvpgibbs_rcpp.r                                 | 123 ++++
 R/rmvst.R                                          |   8 -
 R/{rnegbinRw.R => rnegbinrw_rcpp.r}                | 376 ++++------
 R/{rnmixGibbs.R => rnmixgibbs_rcpp.r}              | 372 +++++-----
 R/{rordprobitGibbs.R => rordprobitgibbs_rcpp.r}    | 472 +++++-------
 R/rscaleUsage.R                                    | 446 ------------
 R/rscaleusage_rcpp.r                               | 254 +++++++
 R/{rsurGibbs.R => rsurgibbs_rcpp.r}                | 349 +++++----
 R/rtrun.R                                          |  11 -
 R/{runireg.R => runireg_rcpp.r}                    | 268 +++----
 R/{runiregGibbs.R => runireggibbs_rcpp.r}          | 266 +++----
 R/rwishart.R                                       |  30 -
 R/simnhlogit.R                                     |  14 +-
 R/summary.bayesm.var.R                             |   2 +-
 data/Scotch.rda                                    | Bin 5884 -> 6101 bytes
 data/bank.rda                                      | Bin 19548 -> 19940 bytes
 data/cheese.rda                                    | Bin 63660 -> 63800 bytes
 data/customerSat.rda                               | Bin 8992 -> 8760 bytes
 data/datalist                                      |   8 -
 data/detailing.rda                                 | Bin 35584 -> 33268 bytes
 data/margarine.rda                                 | Bin 23576 -> 23348 bytes
 data/orangeJuice.rda                               | Bin 595416 -> 591712 bytes
 data/tuna.rda                                      | Bin 40076 -> 40040 bytes
 inst/doc/Some_Useful_R_Pointers.pdf                | Bin 598836 -> 0 bytes
 inst/doc/Tips_On_Using_bayesm.pdf                  | Bin 54746 -> 0 bytes
 inst/doc/bayesm-manual.pdf                         | Bin 359770 -> 0 bytes
 inst/include/bayesm.h                              | 151 ++++
 man/bank.Rd                                        | 254 +++----
 man/breg.Rd                                        |   2 +-
 man/cgetC.Rd                                       |   2 +-
 man/cheese.Rd                                      | 166 ++---
 man/clusterMix.Rd                                  | 176 ++---
 man/condMom.Rd                                     |   2 +-
 man/customerSat.Rd                                 |  76 +-
 man/detailing.Rd                                   |  24 +-
 man/eMixMargDen.Rd                                 |   4 +-
 man/ghkvec.Rd                                      | 115 +--
 man/llmnl.Rd                                       |   2 +-
 man/llnhlogit.Rd                                   |  27 +-
 man/lndIChisq.Rd                                   |   8 +-
 man/lndMvn.Rd                                      |   2 +-
 man/lndMvst.Rd                                     |   4 +-
 man/logMargDenNR.Rd                                |  70 +-
 man/margarine.Rd                                   |  52 +-
 man/mixDen.Rd                                      |   4 +-
 man/mixDenBi.Rd                                    |   4 +-
 man/mnlHess.Rd                                     |  88 +--
 man/momMix.Rd                                      |   2 +-
 man/orangeJuice.Rd                                 |  68 +-
 man/plot.bayesm.hcoef.Rd                           |   2 +-
 man/plot.bayesm.mat.Rd                             |  10 +-
 man/plot.bayesm.nmix.Rd                            |  12 +-
 man/rDPGibbs.Rd                                    |  79 +-
 man/rbayesBLP.Rd                                   | 236 ++++++
 man/rbiNormGibbs.Rd                                |   2 +-
 man/rbprobitGibbs.Rd                               |   3 +-
 man/rhierBinLogit.Rd                               |   6 +-
 man/rhierLinearMixture.Rd                          | 308 ++++----
 man/rhierLinearModel.Rd                            | 201 ++---
 man/rhierMnlDP.Rd                                  | 454 ++++++------
 man/rhierMnlRwMixture.Rd                           | 366 +++++-----
 man/rhierNegbinRw.Rd                               | 284 ++++----
 man/rivDP.Rd                                       |  64 +-
 man/rivGibbs.Rd                                    | 197 ++---
 man/rmixGibbs.Rd                                   |   3 +-
 man/rmixture.Rd                                    |   2 +-
 man/rmnlIndepMetrop.Rd                             | 187 ++---
 man/rmnpGibbs.Rd                                   | 239 +++---
 man/rmultireg.Rd                                   |   8 +-
 man/rmvpGibbs.Rd                                   |   9 +-
 man/rnegbinRw.Rd                                   |  13 +-
 man/rnmixGibbs.Rd                                  |  13 +-
 man/rordprobitGibbs.Rd                             | 231 +++---
 man/rscaleUsage.Rd                                 |   4 +-
 man/rsurGibbs.Rd                                   |   9 +-
 man/runireg.Rd                                     |  11 +-
 man/runiregGibbs.Rd                                |   7 +-
 man/rwishart.Rd                                    |   2 +-
 man/simnhlogit.Rd                                  | 101 +--
 man/summary.bayesm.mat.Rd                          |   9 +-
 man/summary.bayesm.nmix.Rd                         |   2 +-
 man/summary.bayesm.var.Rd                          |   4 +-
 man/tuna.Rd                                        | 222 +++---
 src/Makevars                                       |   2 +
 src/Makevars.win                                   |   2 +
 src/bayesBLP_rcpp_loop.cpp                         | 508 +++++++++++++
 src/bayesmc.c                                      | 359 ---------
 src/bayesmcpp.cpp                                  |  81 ---
 src/breg_rcpp.cpp                                  |  23 +
 src/cgetC_rcpp.cpp                                 |  47 ++
 src/clusterMix_rcpp_loop.cpp                       | 145 ++++
 src/functionTiming.cpp                             |  30 +
 src/ghkvec_rcpp.cpp                                | 196 +++++
 src/llmnl_rcpp.cpp                                 |  23 +
 src/lndIChisq_rcpp.cpp                             |  12 +
 src/lndIWishart_rcpp.cpp                           |  36 +
 src/lndMvn_rcpp.cpp                                |  18 +
 src/lndMvst_rcpp.cpp                               |  25 +
 src/rDPGibbs_rcpp_loop.cpp                         | 168 +++++
 src/rbprobitGibbs_rcpp_loop.cpp                    |  55 ++
 src/rcppexports.cpp                                | 758 +++++++++++++++++++
 src/rdirichlet_rcpp.cpp                            |  19 +
 src/rhierLinearMixture_rcpp_loop.cpp               | 120 +++
 src/rhierLinearModel_rcpp_loop.cpp                 | 137 ++++
 src/rhierMnlDP_rcpp_loop.cpp                       | 343 +++++++++
 src/rhierMnlRwMixture_rcpp_loop.cpp                | 119 +++
 src/rhierNegbinRw_rcpp_loop.cpp                    | 169 +++++
 src/rivDP_rcpp_loop.cpp                            | 426 +++++++++++
 src/rivgibbs_rcpp_loop.cpp                         | 135 ++++
 src/rmixGibbs_rcpp.cpp                             | 178 +++++
 src/rmixture_rcpp.cpp                              |  65 ++
 src/rmnlIndepMetrop_rcpp_loop.cpp                  |  63 ++
 src/rmnpGibbs_rcpp_loop.cpp                        | 143 ++++
 src/rmultireg_rcpp.cpp                             |  70 ++
 src/rmvpGibbs_rcpp_loop.cpp                        | 135 ++++
 src/rmvst_rcpp.cpp                                 |  15 +
 src/rnegbinRw_rcpp_loop.cpp                        |  98 +++
 src/rnmixGibbs_rcpp_loop.cpp                       |  46 ++
 src/rordprobitGibbs_rcpp_loop.cpp                  | 211 ++++++
 src/rscaleUsage_rcpp_loop.cpp                      | 442 +++++++++++
 src/rsurGibbs_rcpp_loop.cpp                        | 129 ++++
 src/rtrun_rcpp.cpp                                 |  17 +
 src/runiregGibbs_rcpp_loop.cpp                     |  74 ++
 src/runireg_rcpp_loop.cpp                          |  74 ++
 src/rwishart_rcpp.cpp                              |  43 ++
 src/thetadraw.c                                    | 151 ----
 src/utilityFunctions.cpp                           | 789 ++++++++++++++++++++
 171 files changed, 13010 insertions(+), 9508 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
old mode 100755
new mode 100644
index 2f265fe..ebfed7a
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,32 +1,42 @@
 Package: bayesm
-Version: 2.2-5
-Date: 2012-05-15
-Title: Bayesian Inference for Marketing/Micro-econometrics
-Author: Peter Rossi <perossichi at gmail.com>.
-Maintainer: Peter Rossi <perossichi at gmail.com>
+Version: 3.0-2
+Type: Package
+Title: Bayesian Inference for Marketing/Micro-Econometrics
 Depends: R (>= 2.10)
-Description: bayesm covers many important models used in marketing and
-        micro-econometrics applications.  The package includes: Bayes
-        Regression (univariate or multivariate dep var), Bayes
-        Seemingly Unrelated Regression (SUR), Binary and Ordinal
-        Probit, Multinomial Logit (MNL) and Multinomial Probit (MNP),
-        Multivariate Probit, Negative Binomial (Poisson) Regression,
-        Multivariate Mixtures of Normals (including clustering),
-        Dirichlet Process Prior Density Estimation with normal base,
-        Hierarchical Linear Models with normal prior and covariates,
-        Hierarchical Linear Models with a mixture of normals prior and
-        covariates, Hierarchical Multinomial Logits with a mixture of
-        normals prior and covariates, Hierarchical Multinomial Logits
-        with a Dirichlet Process prior and covariates, Hierarchical
-        Negative Binomial Regression Models, Bayesian analysis of
-        choice-based conjoint data, Bayesian treatment of linear
-        instrumental variables models, and Analysis of Multivariate
-        Ordinal survey data with scale usage heterogeneity (as in Rossi
-        et al, JASA (01)).  For further reference, consult our book,
-        Bayesian Statistics and Marketing by Rossi, Allenby and
-        McCulloch.
+Date: 2015-06-15
+Author: Peter Rossi <perossichi at gmail.com>
+Maintainer: Peter Rossi <perossichi at gmail.com>
 License: GPL (>= 2)
+Imports: Rcpp (>= 0.11.3)
+LinkingTo: Rcpp, RcppArmadillo
 URL: http://www.perossi.org/home/bsm-1
-Packaged: 2012-05-15 22:08:17 UTC; perossichi
+Description: Covers many important models used
+  in marketing and micro-econometrics applications. 
+  The package includes:
+  Bayes Regression (univariate or multivariate dep var),
+  Bayes Seemingly Unrelated Regression (SUR),
+  Binary and Ordinal Probit,
+  Multinomial Logit (MNL) and Multinomial Probit (MNP),
+  Multivariate Probit,
+  Negative Binomial (Poisson) Regression,
+  Multivariate Mixtures of Normals (including clustering),
+  Dirichlet Process Prior Density Estimation with normal base,
+  Hierarchical Linear Models with normal prior and covariates,
+  Hierarchical Linear Models with a mixture of normals prior and covariates,
+  Hierarchical Multinomial Logits with a mixture of normals prior
+     and covariates,
+  Hierarchical Multinomial Logits with a Dirichlet Process prior and covariates,
+  Hierarchical Negative Binomial Regression Models,
+  Bayesian analysis of choice-based conjoint data,
+  Bayesian treatment of linear instrumental variables models,
+  Analysis of Multivariate Ordinal survey data with scale
+     usage heterogeneity (as in Rossi et al, JASA (01)),
+  Bayesian Analysis of Aggregate Random Coefficient Logit Models as in BLP (see
+  Jiang, Manchanda, Rossi 2009)
+  For further reference, consult our book, Bayesian Statistics and
+  Marketing by Rossi, Allenby and McCulloch (Wiley 2005) and Bayesian Non- and Semi-Parametric
+  Methods and Applications (Princeton U Press 2014).
+NeedsCompilation: yes
+Packaged: 2015-06-20 06:26:44 UTC; ripley
 Repository: CRAN
-Date/Publication: 2012-05-16 07:49:34
+Date/Publication: 2015-06-20 08:33:45
diff --git a/MD5 b/MD5
index 4028f75..7076cf9 100644
--- a/MD5
+++ b/MD5
@@ -1,141 +1,166 @@
-25ab0816dcfccb2b5c1148c87e1f715a *DESCRIPTION
-104c849c818b9997da00644d0a85a400 *NAMESPACE
-401e4f6588cb831c2f8b4f35ba2bc1af *R/breg.R
-33cab325b0b50726929a8392e9c909e0 *R/cgetC.R
-027f3ae5fd398edd39b46aa12531609f *R/clusterMix.R
+5e6b8ff59c55ec888fc23256f6a5edc5 *DESCRIPTION
+ea2da2fb3b1d8a3583ca145c403bfcbf *NAMESPACE
+ea81ddf03297bdab18de68ef0fce3434 *R/BayesmConstants.R
+750237445e7fb47a0c72db9038bad9b3 *R/BayesmFunctions.R
+5f045fe686e9700b1e69e17eb2cd1a4b *R/clusterMix_rcpp.R
 0207b70adcd95129ba38b6177b235d51 *R/condMom.R
-5791169a4fdf78dc93bfaebfa1bcfd46 *R/createX.R
+7e3f01b37b79b1cb694dcc89bef6801b *R/createX.R
 f0ab1f89e4c7b93a3abf2969c94b4b19 *R/eMixMargDen.R
 a6774f78661955079a5fae59bb3be712 *R/fsh.R
-0a73563b2dd400bf166539d86416eeec *R/ghkvec.R
-dc005ba67612e70f9786831cd706c71c *R/llmnl.R
 e37549412e0e9dbac1d400e99c28f82a *R/llmnp.R
-17028c8350c0ce277541b05f9b5cf492 *R/llnhlogit.R
-8b896bc1e1c112477addd5383ba74434 *R/lndIChisq.R
-12a19ef6193035d0ebf589a2caeba448 *R/lndIWishart.R
-d1e675e3b8c4e0906bb8f7b5224f52ac *R/lndMvn.R
-d8e8c68d2b6b453d40d13c09a99e3296 *R/lndMvst.R
+4af4b88d141c001e1a076bacb1059ade *R/llnhlogit.R
 a6170041c44b4b577f7d54a606d47b66 *R/logMargDenNR.R
 527cad4b4579e5b54a81980efd32d5b9 *R/mixDen.R
 0d7e8a1405b2ea906f5e253f419e285a *R/mixDenBi.R
 b707216d4a165cafcfd77ff74ca32d4e *R/mnlHess.R
-24101c4b685854e3969f56bacfb6542b *R/mnpProb.R
+94567c1ff99a7aec5b4fb8a884001520 *R/mnpProb.R
 e8c68b7db1d51d1ce270e09648db3112 *R/momMix.R
 dd4a04551d37b77645a4cb38b3feb114 *R/nmat.R
 72dbf320a442a6ee65a3de8bc4e6f238 *R/numEff.R
-883e66377d96e886e35ec88cfe5862e6 *R/plot.bayesm.hcoef.R
-101314f685d135205cf41fc7843dba0e *R/plot.bayesm.mat.R
+951afd2ed67014aa202714d0a408affa *R/plot.bayesm.hcoef.R
+c1a860d16faa2f40364ba2a535bf1eaa *R/plot.bayesm.mat.R
 d14eae20edc467e1557775af62c45eb0 *R/plot.bayesm.nmix.R
-8f1b246d9e942b936c1464bf5cc7b3e4 *R/rDPGibbs.R
-21b7c833644b73a172b135e212503c4a *R/rbiNormGibbs.R
-082e9caa253807a6b80ffcd6f79cae49 *R/rbprobitGibbs.R
-493df9bf9d99695645b005e20440609a *R/rdirichlet.R
-6b0c22ba7a9f46c474549a251a18d029 *R/rhierBinLogit.R
-26e849dced2247c6f5406c9534236b06 *R/rhierLinearMixture.R
-b4933e7df564ab1c246e7eb80b7e586b *R/rhierLinearModel.R
-abc7ef17f7f5ae2892dcbcaeef013e98 *R/rhierMnlDP.R
-475c9f82dff4c4e6405e1e5fcaa330c4 *R/rhierMnlRwMixture.R
-c0aa928d08b44fb6f5d027e1b46e0a7e *R/rhierNegbinRw.R
-fa9ea7d874426ebdb28ba2a318e6c147 *R/rivDP.R
-3b085ca153175acb96adb1cfb8ae7807 *R/rivGibbs.R
-c6307313b5535c2af034e1ce36482fd6 *R/rmixGibbs.R
-189f7b8890f155199e12d1557d563fb9 *R/rmixture.R
-06a026131b0906cb0e9765cd64c7b882 *R/rmnlIndepMetrop.R
-d0909f46470a76783ddf64c9b588a9a3 *R/rmnpGibbs.R
-179c70281b377291e0242ea44448abc4 *R/rmultireg.R
-0c3a511cd31ead4bbdb4a9b709ec528e *R/rmvpGibbs.R
-bbb32ab3df56506fa65b3b0577610310 *R/rmvst.R
-9fcb24261a1209b72fde7279c113c166 *R/rnegbinRw.R
-2b182fc7f36ae5802a4cdcef71b68e19 *R/rnmixGibbs.R
-670579428617e58ecd80b4b64b32545f *R/rordprobitGibbs.R
-a5935a0e04f10658a8a343161bb745a7 *R/rscaleUsage.R
-a537b2d1d2c8b70a1413f003953f82ef *R/rsurGibbs.R
-9ef931fbb05babb186e1c33616414071 *R/rtrun.R
-e247e2a2f2e07a38a191ddb3e16d4cc7 *R/runireg.R
-4bfb71f2356dc55d821074022181596f *R/runiregGibbs.R
-70abbbde4483632234b2972dc93814a4 *R/rwishart.R
-3b56c8bdb18a20a539f27572d68da109 *R/simnhlogit.R
+266b2acb8c78149259e0064b02ee6efb *R/rbayesBLP_rcpp.R
+394e1e4dec4ee095a7bcdcf2946cab20 *R/rbiNormGibbs.R
+ed2da9e8111f456b3dda159bd4174732 *R/rbprobitgibbs_rcpp.r
+39143486c417d6bdf4257bd6abe2faa0 *R/rcppexports.r
+03b396cb0761679c10b794a85cb16550 *R/rdpgibbs_rcpp.r
+3b00639bc89b7c0a9b9d01949fef0a73 *R/rhierBinLogit.R
+93075ca09545be579dbd7637bf883148 *R/rhierLinearMixture_rcpp.r
+0c8bbdb63fb81692bb053067f76f1f76 *R/rhierLinearModel_rcpp.R
+c91f43c2872a4b6ccf71e8e130fc0ba5 *R/rhierMnlDP_rcpp.r
+042c4daec89e340a04af32874ad76e12 *R/rhierMnlRwMixture_rcpp.r
+1cac088d907cc90eab8696cb2d964ee4 *R/rhiernegbinrw_rcpp.r
+5458944255970eabf8b231c5d637b580 *R/rivDP_rcpp.R
+28f955518fbdde816474031b5be44dab *R/rivGibbs_rcpp.R
+4b5f2862c7e5f3f4248b22c34eafb9e9 *R/rmnlIndepMetrop_rcpp.R
+cc1d6003dd818c72a29378a31a3494ea *R/rmnpgibbs_rcpp.r
+f2e92559c7ed6e5d303b25de9a6ceffb *R/rmvpgibbs_rcpp.r
+21264a02c76366e12f58da87ef5292c4 *R/rnegbinrw_rcpp.r
+5a26eb077e559bdabb181249f588bfef *R/rnmixgibbs_rcpp.r
+47ee5e7477db90d0474b26dbf9beed15 *R/rordprobitgibbs_rcpp.r
+2d530b64070f91394856b7389b2cf4ed *R/rscaleusage_rcpp.r
+81e7642e9b3c45ec3869b4f4928e777e *R/rsurgibbs_rcpp.r
+95f72485c41a622425a6ed05c0d1f11e *R/runireg_rcpp.r
+0fab16ddc9a6d4572c2e1bbf7a902226 *R/runireggibbs_rcpp.r
+88c70d97e33d665f93f08e5c5a7e31e8 *R/simnhlogit.R
 f6bbbc1dd104aa996c7ba62f17c1460b *R/summary.bayesm.mat.R
 14b1a1bf0cd8784439a7c16d2408ddde *R/summary.bayesm.nmix.R
-b32525a1cb91d95c80dc004797961065 *R/summary.bayesm.var.R
-b8aa164aa098ca3a09cf1caec14213fc *data/Scotch.rda
-aa29da9571ca351139d229453e99a3f4 *data/bank.rda
-ac9a5b4c9362e863a57a20dbe903c654 *data/cheese.rda
-cf5a6146480288dbdc12667884707c0b *data/customerSat.rda
-878b1056a05daf4bf6c5b6517a8dbad2 *data/datalist
-c7f757303437e1793e1ea493bbe71b44 *data/detailing.rda
-514fe7d568a94ae1bdfe060ecbbb893a *data/margarine.rda
-dfb9e017e59a1a4a05132e9268d12ddd *data/orangeJuice.rda
-c276ad0aace069b133741379be312f53 *data/tuna.rda
-545ea1221834d4224e011f6ea0c5ef72 *inst/doc/Some_Useful_R_Pointers.pdf
-e86b96859b1ae776c3f375fea5c9e5b5 *inst/doc/Tips_On_Using_bayesm.pdf
-11bc880c72a246a1b2d7b637b7a33666 *inst/doc/bayesm-manual.pdf
+8fa4d18ae9076fb6a474d7483386f432 *R/summary.bayesm.var.R
+fa58bfb68b79925644d958bdd659b14c *data/Scotch.rda
+8233d19a217afa65d03bbd5e1f95cec8 *data/bank.rda
+970c1954f36021804256b1cb160e2f9b *data/cheese.rda
+12629326a348c7901248e33e76987858 *data/customerSat.rda
+b64a79aed4d1f4e6f64593aba5aba322 *data/detailing.rda
+0bb56ee15246d15c8b7931f98e332693 *data/margarine.rda
+506ede0b22d91973c03085ca17ba9d8e *data/orangeJuice.rda
+dbce99be695403a0df5c8874b8f6ddcd *data/tuna.rda
+8027acad609eb9337a714e12604353a0 *inst/include/bayesm.h
 a3bb8beb9f2ab9a2a1da59db8ba50596 *man/Scotch.Rd
-d68d3fa7019a5b1cfad55a258408a180 *man/bank.Rd
-1fe66a704a13b451b99229f35ba1f1a8 *man/breg.Rd
-f07a713b4091c4f2ed98263371d37b2c *man/cgetC.Rd
-b8cab7715f713e1f7d1a14b939d963f9 *man/cheese.Rd
-1198306509a1cf7ef3dd798053349c31 *man/clusterMix.Rd
-4d1652da474399178b1df1e1bfb8887e *man/condMom.Rd
+234d462adaa4a59ffa377ff866e2b901 *man/bank.Rd
+297ef5281bbe744df143dbf743f6a057 *man/breg.Rd
+a2870a25cbd9d49779cb33826a395c40 *man/cgetC.Rd
+2f920789df84c6dd9b7e5c9a665f2de5 *man/cheese.Rd
+5677d4be5c7095c0b2ca490229b667db *man/clusterMix.Rd
+369a9b81f7589dca66a55bf4210603d3 *man/condMom.Rd
 14e31bbafc5799a43fa859a8f55acd1a *man/createX.Rd
-62681ec3b2bf9d998ac53196b4df5529 *man/customerSat.Rd
-17c6e1a9547ca791481353f3568baf02 *man/detailing.Rd
-5c61f699c189babdf28fdef19eae8ce0 *man/eMixMargDen.Rd
+4fcbde608314bf61225065afd7ab7aa3 *man/customerSat.Rd
+d18ab9a284aef00552152560735ebb41 *man/detailing.Rd
+8fefb2956c9d2acf97324399fe9fe210 *man/eMixMargDen.Rd
 407c7daf8cd13173b31845dab494a747 *man/fsh.Rd
-b362664b1788e3b90bd18b718104e24e *man/ghkvec.Rd
-38e2c94c6ab0693b2322095fd0a2e7f2 *man/llmnl.Rd
+eb2d9b080d8d1c2527f8dcca91805837 *man/ghkvec.Rd
+be19d4382d9162cb9cb0de105d40f238 *man/llmnl.Rd
 a54ed9dafaf397dc86ad9070a4b24aab *man/llmnp.Rd
-88d14f235d05d54a93c04522be66421b *man/llnhlogit.Rd
-c9e1ff340b37cf06ca4e8ec727c4a55a *man/lndIChisq.Rd
+645fe7830a74cbf56f7bc9906a7b1e7d *man/llnhlogit.Rd
+a0bf64f043fa44ddfd64d4b6199444d4 *man/lndIChisq.Rd
 faab40a3b3840e9a1322afa53a9de958 *man/lndIWishart.Rd
-4d7412ecb025dc439875696490c2434c *man/lndMvn.Rd
-c3b2c13159fb068e92a4071438a29a5d *man/lndMvst.Rd
-05ee3818fbde059250c9ee32ae945034 *man/logMargDenNR.Rd
-242b4f28697e70c21abf043b2b91b72b *man/margarine.Rd
-affbc038f38d8a1e1d2fd6bd0ab4f5e0 *man/mixDen.Rd
-9cd5aa24ea62d764ab005706efe0e5b9 *man/mixDenBi.Rd
-957a5b9d6f0f2eca48e5046ebe6451a1 *man/mnlHess.Rd
+46d31ea5da944e77fcbf1ea5c6807966 *man/lndMvn.Rd
+99ef9b9659834dcef31e174daf497a2f *man/lndMvst.Rd
+346496a8b9423bdb332a5b69b71e8bef *man/logMargDenNR.Rd
+61267c2169ffbb11503d4a381247e5ca *man/margarine.Rd
+b3d8c9b59d585079c547358989fc82a3 *man/mixDen.Rd
+23d2cfd58e1c67666369b15b9a169b29 *man/mixDenBi.Rd
+82e348b0be3379dbce07567a3b8316bf *man/mnlHess.Rd
 7dc6205db53ce866c0eb8aa6d8d884c7 *man/mnpProb.Rd
-c12fd2f67a955f37461cd7a66bf39b2f *man/momMix.Rd
+65e78ae6adc8f941ae14f9e8e32322ca *man/momMix.Rd
 b5efa10853ed50a4da769df0065a0e44 *man/nmat.Rd
 bdcdc98e07fd54e282f9c1aab1053a5a *man/numEff.Rd
-34829b72b32a6e25aee9409af90b50f8 *man/orangeJuice.Rd
-987d158c20b58a3edd18dfde6fe57d9b *man/plot.bayesm.hcoef.Rd
-eb4fe541950cdaaa6eb8aa6267024127 *man/plot.bayesm.mat.Rd
-6cbfec0e72b35c340894665538d27047 *man/plot.bayesm.nmix.Rd
-56464607c7f96a36b7430eae79acbe85 *man/rDPGibbs.Rd
-4ea31f59406ff8b724813e3c8601257e *man/rbiNormGibbs.Rd
-18723eb9100eca82315d5b97b0853bb0 *man/rbprobitGibbs.Rd
+0562919d6e47ba7614f37bea4eaaf7a6 *man/orangeJuice.Rd
+0f809e54f79df741951ea58d3a5ddbe7 *man/plot.bayesm.hcoef.Rd
+2afdb50ca28db3f5fb8b18afbce84165 *man/plot.bayesm.mat.Rd
+4283d26b77e4f57f82243c74696e79fb *man/plot.bayesm.nmix.Rd
+9ab0cf5b31e86ed12430016bb3bada71 *man/rDPGibbs.Rd
+d24ca816bef1099cefbb4c50b8ff900b *man/rbayesBLP.Rd
+bd9a9a496327cbd92e4718c867043b10 *man/rbiNormGibbs.Rd
+c77e63a7ef5aac8c3cfecd1941a0ea79 *man/rbprobitGibbs.Rd
 80e8d4e72e35691a571406a0d7294985 *man/rdirichlet.Rd
-95aa69a6614375b5f1bd17a708f7d02b *man/rhierBinLogit.Rd
-062430837353cb9f0c3a22c50f7f6d10 *man/rhierLinearMixture.Rd
-edf286793a792a8294250ac37b6020fa *man/rhierLinearModel.Rd
-e4bc42345f8801febeb9364d4e63c43e *man/rhierMnlDP.Rd
-bba3de792daa1d00f4d8f4d8e88fa8b9 *man/rhierMnlRwMixture.Rd
-7426ccb6ab1afefb658b6cc6dc2d46fa *man/rhierNegbinRw.Rd
-9fa7552e6c1f56ad9254d522f3333c51 *man/rivDP.Rd
-cc7ba89ad2a557b079599f8c27c7f383 *man/rivGibbs.Rd
-d350269f33b2d2ec71f1efac19142591 *man/rmixGibbs.Rd
-15771bcb72a8ade5d4450dcfc45efb32 *man/rmixture.Rd
-ce4f682014cbdf284d7a4c2caa5dfd6b *man/rmnlIndepMetrop.Rd
-94c1da07f825f873629a216e3d35f4f5 *man/rmnpGibbs.Rd
-0e8d71b14062ffb6ea8302dda67cca61 *man/rmultireg.Rd
-62c0b370cbb80ab572a84b89df4e0ea6 *man/rmvpGibbs.Rd
+b966143404112a4a03203a3a785b6600 *man/rhierBinLogit.Rd
+f3e4fa56b5075d2dc4969161baaa5427 *man/rhierLinearMixture.Rd
+adf9ef79557043156e3c144f8de2e825 *man/rhierLinearModel.Rd
+c3c5d0105bf6eebdcd1cb866ca4f3d12 *man/rhierMnlDP.Rd
+fe6a8c618357e5b17641ffd0dfe5145d *man/rhierMnlRwMixture.Rd
+9154d5bc301d22f0dab96b16b0be070c *man/rhierNegbinRw.Rd
+ab1ce054f1f26aa07c715ad534c4f6d0 *man/rivDP.Rd
+e00c6b3fe359ba432d257b8a516c7949 *man/rivGibbs.Rd
+64e3f05cf839fccd2d2cbe2d2f3e06d8 *man/rmixGibbs.Rd
+d8b03ae6d5954370c213476b46a95542 *man/rmixture.Rd
+ee26dd073fb86316dcdfcfb5f4de53fb *man/rmnlIndepMetrop.Rd
+256e9839df13b7a5ecd71dc75c2a59fe *man/rmnpGibbs.Rd
+267eb138e86cb65ee89ddb88fb551845 *man/rmultireg.Rd
+dff676b6053796baa8e11694c3b07f49 *man/rmvpGibbs.Rd
 73189b4e83339b6f2c08c0d42193ed1c *man/rmvst.Rd
-aa0fbf64304df8faafc0f3c3b56491cd *man/rnegbinRw.Rd
-89e693c58aba4f33ca7bc10081355532 *man/rnmixGibbs.Rd
-627cc5ce56e3a0447472d278e9fda82a *man/rordprobitGibbs.Rd
-6b0e0e5bb98ca051cafd77283645db1c *man/rscaleUsage.Rd
-2786573e1f0b349e9f0ab98515635f9c *man/rsurGibbs.Rd
+a98c5ca5289dcd531fa37fa7f3b3fa46 *man/rnegbinRw.Rd
+f9858ea967859b73164f0e764f9bc867 *man/rnmixGibbs.Rd
+722c4e0df21a259028b4c2ab86dbec2a *man/rordprobitGibbs.Rd
+2f0e91ff9408bf8db217a9e38562c60a *man/rscaleUsage.Rd
+604620b11476c6200054db62ff8dcc96 *man/rsurGibbs.Rd
 239d4e36330f6192286cfa5357551cfb *man/rtrun.Rd
-68c3de2eeb01d352010bb25ec17805cf *man/runireg.Rd
-ccebdab5cd015f22f3dc6826f6774cf0 *man/runiregGibbs.Rd
-d75ecd347f67dce2c096c7f87f0be264 *man/rwishart.Rd
-da878a797b7a6f8266734fa9b6b6703f *man/simnhlogit.Rd
-c2ec8e3bb180f526cd4cf804717ad201 *man/summary.bayesm.mat.Rd
-30de937fd9d54e6baf0d3fbe9c0cd629 *man/summary.bayesm.nmix.Rd
-33da73c40f373ab99ca9a4b3ea462961 *man/summary.bayesm.var.Rd
-2a34a7a3f3b2737c22529384cc4cff81 *man/tuna.Rd
-3bca185c577e354d7af15e8779148d57 *src/bayesmc.c
-f1200fa8922a2fc8e400d76d51c53474 *src/bayesmcpp.cpp
-8769203cb0949b0efb56d26dad9a829f *src/thetadraw.c
+20b835b70f710aa198767d815f6cf62a *man/runireg.Rd
+48e9e031dfa97a687f98f3e2013da38c *man/runiregGibbs.Rd
+26eb2ba51550236c38b3436feacc00c6 *man/rwishart.Rd
+8e22a280ada00eac02ab34b269b3320d *man/simnhlogit.Rd
+dce44654b54b044dde8f3d1a6349a9a4 *man/summary.bayesm.mat.Rd
+d0cf5f1a2f978bc3cce09131463620d8 *man/summary.bayesm.nmix.Rd
+6804dad23a5959729ab10d419d507eb9 *man/summary.bayesm.var.Rd
+3deea074beb0a39d41d480dea815a786 *man/tuna.Rd
+7cc14aaf4e9b43167d156ac973e01c68 *src/Makevars
+206b1cc7f7c3d1cac15a878b9af8402f *src/Makevars.win
+b984d7eda3c8025d074b775487f1359b *src/bayesBLP_rcpp_loop.cpp
+0fa53738f4a680174a18f4bf4c4c9181 *src/breg_rcpp.cpp
+b025ae497f1779906440138d78c0c969 *src/cgetC_rcpp.cpp
+1ae014e74b5f4c73354a40218e2a612d *src/clusterMix_rcpp_loop.cpp
+c32622825b8839daad7ffe3a7c202a03 *src/functionTiming.cpp
+379eb42a07c86700af57a4437cd66d27 *src/ghkvec_rcpp.cpp
+ae68b66eaf3c0b468904456d044925a4 *src/llmnl_rcpp.cpp
+607d592ac59d827b9132d7ca42abadc0 *src/lndIChisq_rcpp.cpp
+af19b27155010f25f4bbbba68b8f3d5c *src/lndIWishart_rcpp.cpp
+9760670d64f12c4739ad92caecd1f452 *src/lndMvn_rcpp.cpp
+7cafc7daedb50ab544144792a17fdaef *src/lndMvst_rcpp.cpp
+a735b22374ad8921c1a75f5ed4268389 *src/rDPGibbs_rcpp_loop.cpp
+87da270b61e6675765e139c9d4c2f132 *src/rbprobitGibbs_rcpp_loop.cpp
+7e97c25a24da9ba262e5f49978e6a4ae *src/rcppexports.cpp
+629e27e539669185adde253bb9fd29ad *src/rdirichlet_rcpp.cpp
+25f702b04845af381debda04662996ca *src/rhierLinearMixture_rcpp_loop.cpp
+5bd2691fe8d987ea35c41e5bd51b8750 *src/rhierLinearModel_rcpp_loop.cpp
+eecdf06ebfa47c8d03d642fc971612ed *src/rhierMnlDP_rcpp_loop.cpp
+27e2d917d5870e4d00ad73fa651385ee *src/rhierMnlRwMixture_rcpp_loop.cpp
+f4b8bcd25db40504a7c41db12fd18bf4 *src/rhierNegbinRw_rcpp_loop.cpp
+3634947ff8002a78ed96400f52ef345e *src/rivDP_rcpp_loop.cpp
+997068a7121f48ee3d9422e9062523db *src/rivgibbs_rcpp_loop.cpp
+cfd7582d1a30e8636e34450da1f7a0d2 *src/rmixGibbs_rcpp.cpp
+e5ca228c396cb5f46a8e1f1f96002dc1 *src/rmixture_rcpp.cpp
+c1c673a3620f1589ab0c659b20944d1a *src/rmnlIndepMetrop_rcpp_loop.cpp
+57a93ca0f630871d9e452319c924980c *src/rmnpGibbs_rcpp_loop.cpp
+f44ce2e6bf67bf0e28fc9ebe4a634209 *src/rmultireg_rcpp.cpp
+4433976965fea5eefbb32f668ec7366d *src/rmvpGibbs_rcpp_loop.cpp
+2dd06d455565e503d1c50f8fcea0b30a *src/rmvst_rcpp.cpp
+53a7f3f125740e3ef7fc76e125c965ed *src/rnegbinRw_rcpp_loop.cpp
+0ac7a9acade4ba0bfa6516e058555f72 *src/rnmixGibbs_rcpp_loop.cpp
+65554b346373018519da4beb78784bd8 *src/rordprobitGibbs_rcpp_loop.cpp
+21e4f4effb0614f635aeb9fa12d6348e *src/rscaleUsage_rcpp_loop.cpp
+fea764d71691a8c15b45005d568c365b *src/rsurGibbs_rcpp_loop.cpp
+efcf97fd8da23a7f47f60d8cc346c079 *src/rtrun_rcpp.cpp
+734a4038b98efc1439c2c080591a2bdb *src/runiregGibbs_rcpp_loop.cpp
+ba9cccc7a14e1640f4d7fcd61d9cf98b *src/runireg_rcpp_loop.cpp
+fa49ba36cfec5fe6cb7c93217346d36f *src/rwishart_rcpp.cpp
+d596ac72c0612ce2c887543ae0566c7b *src/utilityFunctions.cpp
diff --git a/NAMESPACE b/NAMESPACE
old mode 100755
new mode 100644
index f019aa1..5926a01
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -1,15 +1,19 @@
 useDynLib(bayesm)
+importFrom(Rcpp, evalCpp)
+## exportPattern("^[[:alpha:]]+") this line is automatically created when using package.skeleton but should be removed to prevent the _loop functions from exporting. Instead use the export() function (as is done here)
 
-export(breg,cgetC,createX,eMixMargDen,mixDen,fsh,llmnl,llmnp,llnhlogit,
-lndIChisq,lndIWishart,lndMvn,lndMvst,mnlHess,momMix,nmat,numEff,rdirichlet,
-rmixture,rmultireg,rwishart,rmvst,rtrun,rbprobitGibbs,runireg,
+export(breg,createX,eMixMargDen,mixDen,fsh,llmnl,llmnp,llnhlogit,
+lndIChisq,lndIWishart,lndMvn,lndMvst,mnlHess,momMix,nmat,numEff,rdirichlet, rmixture,rmultireg,
+rwishart,rmvst,rtrun,rbprobitGibbs,runireg,
 runiregGibbs,simnhlogit,rmnpGibbs,rmixGibbs,rnmixGibbs,
 rmvpGibbs,rhierLinearModel,rhierMnlRwMixture,rivGibbs,
 rmnlIndepMetrop,rscaleUsage,ghkvec,condMom,logMargDenNR,
 rhierBinLogit,rnegbinRw,rhierNegbinRw,rbiNormGibbs,clusterMix,rsurGibbs,
-mixDenBi,mnpProb,rhierLinearMixture,summary.bayesm.mat,plot.bayesm.mat,
-plot.bayesm.hcoef,plot.bayesm.nmix,rordprobitGibbs,rivGibbs,rivDP,rDPGibbs,
-rhierMnlDP)
+mixDenBi,mnpProb,rhierLinearMixture,
+summary.bayesm.mat,summary.bayesm.nmix,summary.bayesm.var,
+plot.bayesm.mat,plot.bayesm.hcoef,plot.bayesm.nmix,
+rordprobitGibbs,rivGibbs,rivDP,rDPGibbs,
+rhierMnlDP,cgetC,rbayesBLP)
 
 
 ## register S3 methods
@@ -19,10 +23,3 @@ S3method(plot, bayesm.hcoef)
 S3method(summary, bayesm.mat)
 S3method(summary, bayesm.var)
 S3method(summary, bayesm.nmix)
-
-
-
-
-
-
-
diff --git a/R/BayesmConstants.R b/R/BayesmConstants.R
new file mode 100644
index 0000000..a80db89
--- /dev/null
+++ b/R/BayesmConstants.R
@@ -0,0 +1,32 @@
+#MCMC
+BayesmConstant.keep = 1             #keep every keepth draw for MCMC routines
+BayesmConstant.nprint = 100         #print the remaining time on every nprint'th draw
+BayesmConstant.RRScaling = 2.38     #Roberts and Rosenthal optimal scaling constant
+BayesmConstant.w = .1               #fractional likelihood weighting parameter
+
+#Priors
+BayesmConstant.A = .01              #scaling factor for the prior precision matrix
+BayesmConstant.nuInc = 3            #Increment for nu
+BayesmConstant.a = 5                #Dirichlet parameter for mixture models
+BayesmConstant.nu.e = 3             #degrees of freedom parameter for regression error variance prior
+BayesmConstant.nu = 3               #degrees of freedom parameter for Inverted Wishart prior
+BayesmConstant.agammaprior = .5     #Gamma prior parameter
+BayesmConstant.bgammaprior = .1     #Gamma prior parameter
+
+#DP
+BayesmConstant.DPalimdef=c(.01,10)  #defines support of 'a' distribution
+BayesmConstant.DPnulimdef=c(.01,3)  #defines support of nu distribution
+BayesmConstant.DPvlimdef=c(.1,4)    #defines support of v distribution
+BayesmConstant.DPIstarmin = 1       #expected number of components at lower bound of support of alpha
+BayesmConstant.DPpower = .8         #power parameter for alpha prior
+BayesmConstant.DPalpha = 1.0        #intitalized value for alpha draws
+BayesmConstant.DPmaxuniq = 200      #storage constraint on the number of unique components
+BayesmConstant.DPSCALE = TRUE       #should data be scaled by mean,std deviation before posterior draws
+BayesmConstant.DPgridsize = 20      #number of discrete points for hyperparameter priors
+
+#Mathematical Constants
+BayesmConstant.gamma = .5772156649015328606
+
+#BayesBLP
+BayesmConstant.BLPVOmega = matrix(c(1,0.5,0.5,1),2,2)  #IW prior parameter of correlated shocks in IV bayesBLP
+BayesmConstant.BLPtol = 1e-6
\ No newline at end of file
diff --git a/R/BayesmFunctions.R b/R/BayesmFunctions.R
new file mode 100644
index 0000000..323a08c
--- /dev/null
+++ b/R/BayesmFunctions.R
@@ -0,0 +1 @@
+pandterm=function(message) { stop(message,call.=FALSE) }
\ No newline at end of file
diff --git a/R/breg.R b/R/breg.R
deleted file mode 100755
index 8ccb857..0000000
--- a/R/breg.R
+++ /dev/null
@@ -1,25 +0,0 @@
-breg=
-function(y,X,betabar,A) 
-{
-#
-# P.Rossi 12/04
-#  revision history:
-#    P. Rossi 3/27/05 -- changed to augment strategy
-#
-# Purpose: draw from posterior for linear regression, sigmasq=1.0
-#
-# Output:  draw from posterior
-# 
-# Model: y = Xbeta + e  e ~ N(0,I)
-#
-# Prior:  beta ~ N(betabar,A^-1)
-#
-k=length(betabar)
-RA=chol(A)
-W=rbind(X,RA)
-z=c(y,as.vector(RA%*%betabar))
-IR=backsolve(chol(crossprod(W)),diag(k))
-#      W'W=R'R ;  (W'W)^-1 = IR IR'  -- this is UL decomp
-return(crossprod(t(IR))%*%crossprod(W,z)+IR%*%rnorm(k))
-
-}
diff --git a/R/cgetC.R b/R/cgetC.R
deleted file mode 100755
index fdd5f39..0000000
--- a/R/cgetC.R
+++ /dev/null
@@ -1,14 +0,0 @@
-cgetC = function(e,k) 
-{
-# purpose: get a list of cutoffs for use with scale usage problems
-#
-# arguments:
-#   e: the "e" parameter from the paper
-#   k: the point scale, eg. items are rated from 1,2,...k
-# output:
-#   vector of grid points
-temp = (1:(k-1))+.5
-m1 = sum(temp)
-m2 = sum(temp^2)
-return(.C('getC',as.double(e),as.integer(k),as.double(m1),as.double(m2),cc=double(k+1))$cc)
-}
diff --git a/R/clusterMix.R b/R/clusterMix.R
deleted file mode 100755
index 20d60e6..0000000
--- a/R/clusterMix.R
+++ /dev/null
@@ -1,131 +0,0 @@
-clusterMix=
-function(zdraw,cutoff=.9,SILENT=FALSE){
-#
-#
-# revision history:
-#   written by p. rossi 9/05
-#
-# purpose: cluster observations based on draws of indicators of 
-#   normal mixture components
-#
-# arguments:
-#   zdraw is a R x nobs matrix of draws of indicators (typically output from rnmixGibbs)
-#   the rth row of zdraw contains rth draw of indicators for each observations
-#   each element of zdraw takes on up to p values for up to p groups. The maximum
-#   number of groups is nobs.  Typically, however, the number of groups will be small
-#   and equal to the number of components used in the normal mixture fit.
-#
-#   cutoff is a cutoff used in determining one clustering scheme it must be 
-#   a number between .5 and 1.
-#
-# output:
-#   two clustering schemes each with a vector of length nobs which gives the assignment
-#   of each observation to a cluster
-#
-#   clustera (finds zdraw with similarity matrix closest to posterior mean of similarity)
-#   clusterb (finds clustering scheme by assigning ones if posterior mean of similarity matrix
-#             > cutoff and computing associated z )
-#
-# define needed functions
-#
-# ------------------------------------------------------------------------------------------   
-
-ztoSim=function(z){
-#
-# function to convert indicator vector to Similarity matrix
-# Sim is n x n matrix, Sim[i,j]=1 if pair(i,j) are in same group
-# z is n x 1 vector of indicators (1,...,p)
-#
-# p.rossi 9/05
-#
-n=length(z)
-zvec=c(rep(z,n))
-zcomp=z%x%c(rep(1,n))
-Sim=as.numeric((zvec==zcomp))
-dim(Sim)=c(n,n)
-return(Sim)
-}
-Simtoz=function(Sim){
-#
-# function to convert Similarity matrix to indicator vector
-#  Sim is n x n matrix, Sim[i,j]=1 if pair(i,j) are in same group
-#  z is vector of indicators from (1,...,p) of group memberships (dim n)
-#
-#
-# p.rossi 9/05
-n=ncol(Sim)
-z=double(n)
-i=1
-groupn=1
-while (i <= n){
-  validind=z==0
-  if(sum(Sim[validind,i]==1)>=1) {
-     z[validind]=as.numeric(Sim[validind,i]==1)*groupn
-     groupn=groupn+1
-  }
-  i=i+1
-}
-return(z)
-} 
-# ----------------------------------------------------------------------------------------
-#
-# check arguments
-#
-pandterm=function(message) { stop(message,call.=FALSE) }
-if(missing(zdraw)) {pandterm("Requires zdraw argument -- R x n matrix of indicator draws")}
-#
-# check validity of zdraw rows -- must be integers in the range 1:nobs
-#
-nobs=ncol(zdraw)
-R=nrow(zdraw)
-if(sum(zdraw %in% (1:nobs)) < ncol(zdraw)*nrow(zdraw))
-   {pandterm("Bad zdraw argument -- all elements must be integers in 1:nobs")}
-cat("Table of zdraw values pooled over all rows",fill=TRUE)
-print(table(zdraw))
-#
-# check validity of cuttoff
-if(cutoff > 1 || cutoff < .5) {pandterm(paste("cutoff invalid, = ",cutoff))}
-
-#
-# compute posterior mean of Similarity matrix
-#
-#
-if(!SILENT){
-   cat("Computing Posterior Expectation of Similarity Matrix",fill=TRUE)
-   cat("processing draws ...",fill=TRUE); fsh()
-}
-Pmean=matrix(0,nrow=nobs,ncol=nobs)
-R=nrow(zdraw)
-for (r in 1:R) {
-   Pmean=Pmean+ztoSim(zdraw[r,])
-   if(!SILENT) {if(r%%100 == 0) {cat("  ",r,fill=TRUE); fsh()}}
-}
-Pmean=Pmean/R
-
-#
-# now find index for draw which minimizes discrepancy between
-# post exp of similarity and sim implied by that z
-if(!SILENT){
-  cat(" ",fill=TRUE)
-  cat("Look for zdraw which minimizes loss",fill=TRUE)
-  cat("processing draws ...",fill=TRUE); fsh()
-}
-loss=double(R)
-for (r in 1:R){
-  loss[r]=sum(abs(Pmean-ztoSim(zdraw[r,]))) 
-  if(!SILENT) {if(r%%100 == 0) {cat("  ",r,fill=TRUE);fsh()}}
-}
-index=which(loss==min(loss))
-clustera=zdraw[index[1],]
-#
-# now due clustering by assigning Similarity to any (i,j) pair for which
-# Pmean > cutoff
-Sim=matrix(as.numeric(Pmean >= cutoff),ncol=nobs)
-clusterb=Simtoz(Sim)
-return(list(clustera=clustera,clusterb=clusterb))
-}
-   
-      
-
-
-
diff --git a/R/clusterMix_rcpp.R b/R/clusterMix_rcpp.R
new file mode 100644
index 0000000..7d2bf37
--- /dev/null
+++ b/R/clusterMix_rcpp.R
@@ -0,0 +1,60 @@
+clusterMix=function(zdraw,cutoff=.9,SILENT=FALSE,nprint=BayesmConstant.nprint){
+#
+#
+# revision history:
+#   written by p. rossi 9/05
+#
+# purpose: cluster observations based on draws of indicators of 
+#   normal mixture components
+#
+# arguments:
+#   zdraw is a R x nobs matrix of draws of indicators (typically output from rnmixGibbs)
+#   the rth row of zdraw contains rth draw of indicators for each observations
+#   each element of zdraw takes on up to p values for up to p groups. The maximum
+#   number of groups is nobs.  Typically, however, the number of groups will be small
+#   and equal to the number of components used in the normal mixture fit.
+#
+#   cutoff is a cutoff used in determining one clustering scheme it must be 
+#   a number between .5 and 1.
+# 
+#   nprint - print every nprint'th draw
+#
+# output:
+#   two clustering schemes each with a vector of length nobs which gives the assignment
+#   of each observation to a cluster
+#
+#   clustera (finds zdraw with similarity matrix closest to posterior mean of similarity)
+#   clusterb (finds clustering scheme by assigning ones if posterior mean of similarity matrix
+#             > cutoff and computing associated z )
+#
+# define needed functions
+#
+# ------------------------------------------------------------------------------------------   
+
+#
+# check arguments
+#
+if(missing(zdraw)) {pandterm("Requires zdraw argument -- R x n matrix of indicator draws")}
+if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+#
+# check validity of zdraw rows -- must be integers in the range 1:nobs
+#
+nobs=ncol(zdraw)
+R=nrow(zdraw)
+if(sum(zdraw %in% (1:nobs)) < ncol(zdraw)*nrow(zdraw))
+   {pandterm("Bad zdraw argument -- all elements must be integers in 1:nobs")}
+cat("Table of zdraw values pooled over all rows",fill=TRUE)
+print(table(zdraw))
+#
+# check validity of cuttoff
+if(cutoff > 1 || cutoff < .5) {pandterm(paste("cutoff invalid, = ",cutoff))}
+
+###################################################################
+# Keunwoo Kim
+# 10/06/2014
+###################################################################
+out=clusterMix_rcpp_loop(zdraw, cutoff, SILENT, nprint)
+###################################################################
+
+return(list(clustera=as.vector(out$clustera),clusterb=as.vector(out$clusterb)))
+}
\ No newline at end of file
diff --git a/R/createX.R b/R/createX.R
index 4d22773..ad377a3 100755
--- a/R/createX.R
+++ b/R/createX.R
@@ -1,72 +1,71 @@
-createX=
-function(p,na,nd,Xa,Xd,INT=TRUE,DIFF=FALSE,base=p)
-{
-#
-# Revision History:
-#   P. Rossi 3/05
-#
-# purpose:
-# function to create X array in format needed MNL and MNP routines
-#
-# Arguments:
-#  p is number of choices
-#  na is number of choice attribute variables (choice-specific characteristics)
-#  nd is number of "demo" variables or characteristics of choosers
-#  Xa is a n x (nx*p) matrix of choice attributes.  First p cols are 
-#     values of attribute #1 for each of p chocies, second p for attribute
-#     # 2 ...
-#  Xd is an n x nd matrix of values of "demo" variables
-#  INT is a logical flag for intercepts 
-#  DIFF is a logical flag for differencing wrt to base alternative
-#     (required for MNP)
-#  base is base alternative (default is p)
-#
-#  note: if either you don't have any attributes or "demos", set 
-#        corresponding na, XA or nd,XD to NULL
-#        YOU must specify p,na,nd,XA,XD for the function to work
-#
-# Output:
-#  modified X matrix with n*p rows and INT*(p-1)+nd*(p-1) + na cols
-#
-#
-# check arguments
-#
-pandterm=function(message) {stop(message,call.=FALSE)}
-if(missing(p)) pandterm("requires p (# choice alternatives)")
-if(missing(na)) pandterm("requires na arg (use na=NULL if none)")
-if(missing(nd)) pandterm("requires nd arg (use nd=NULL if none)")
-if(missing(Xa)) pandterm("requires Xa arg (use Xa=NULL if none)")
-if(missing(Xd)) pandterm("requires Xd arg (use Xd=NULL if none)")
-if(is.null(Xa) && is.null(Xd)) pandterm("both Xa and Xd NULL -- requires one non-null")
-if(!is.null(na)  && !is.null(Xa)) 
-   {if(ncol(Xa) != p*na) pandterm(paste("bad Xa dim, dim=",dim(Xa)))}
-if(!is.null(nd) && !is.null(Xd))
-   {if(ncol(Xd) != nd) pandterm(paste("ncol(Xd) ne nd, ncol(Xd)=",ncol(Xd)))}
-if(!is.null(Xa) && !is.null(Xd)) 
-   {if(nrow(Xa) != nrow(Xd)) 
-       {pandterm(paste("nrow(Xa) ne nrow(Xd),nrow(Xa)= ",nrow(Xa)," nrow(Xd)= ",nrow(Xd)))}} 
-if(is.null(Xa)) {n=nrow(Xd)} else {n=nrow(Xa)}
-
-if(INT)  {Xd=cbind(c(rep(1,n)),Xd)}
-if(DIFF) {Imod=diag(p-1)} else {Imod=matrix(0,p,p-1); Imod[-base,]=diag(p-1)}
-if(!is.null(Xd)) Xone=Xd %x%Imod else Xone=NULL
-
-Xtwo=NULL
-if(!is.null(Xa))
-   {if(DIFF) 
-      {tXa=matrix(t(Xa),nrow=p)
-       Idiff=diag(p); Idiff[,base]=c(rep(-1,p));Idiff=Idiff[-base,] 
-       tXa=Idiff%*%tXa
-       Xa=matrix(as.vector(tXa),ncol=(p-1)*na,byrow=TRUE)
-       for (i in 1:na) 
-           {Xext=Xa[,((i-1)*(p-1)+1):((i-1)*(p-1)+p-1)] 
-            Xtwo=cbind(Xtwo,as.vector(t(Xext)))}
-       }
-    else
-      { for (i in 1:na) 
-            { Xext=Xa[,((i-1)*p+1):((i-1)*p+p)] 
-              Xtwo=cbind(Xtwo,as.vector(t(Xext)))}
-      }
-    }
-return(cbind(Xone,Xtwo))
-}
+createX=
+function(p,na,nd,Xa,Xd,INT=TRUE,DIFF=FALSE,base=p)
+{
+#
+# Revision History:
+#   P. Rossi 3/05
+#
+# purpose:
+# function to create X array in format needed MNL and MNP routines
+#
+# Arguments:
+#  p is number of choices
+#  na is number of choice attribute variables (choice-specific characteristics)
+#  nd is number of "demo" variables or characteristics of choosers
+#  Xa is a n x (nx*p) matrix of choice attributes.  First p cols are 
+#     values of attribute #1 for each of p chocies, second p for attribute
+#     # 2 ...
+#  Xd is an n x nd matrix of values of "demo" variables
+#  INT is a logical flag for intercepts 
+#  DIFF is a logical flag for differencing wrt to base alternative
+#     (required for MNP)
+#  base is base alternative (default is p)
+#
+#  note: if either you don't have any attributes or "demos", set 
+#        corresponding na, XA or nd,XD to NULL
+#        YOU must specify p,na,nd,XA,XD for the function to work
+#
+# Output:
+#  modified X matrix with n*p rows and INT*(p-1)+nd*(p-1) + na cols
+#
+#
+# check arguments
+#
+if(missing(p)) pandterm("requires p (# choice alternatives)")
+if(missing(na)) pandterm("requires na arg (use na=NULL if none)")
+if(missing(nd)) pandterm("requires nd arg (use nd=NULL if none)")
+if(missing(Xa)) pandterm("requires Xa arg (use Xa=NULL if none)")
+if(missing(Xd)) pandterm("requires Xd arg (use Xd=NULL if none)")
+if(is.null(Xa) && is.null(Xd)) pandterm("both Xa and Xd NULL -- requires one non-null")
+if(!is.null(na)  && !is.null(Xa)) 
+   {if(ncol(Xa) != p*na) pandterm(paste("bad Xa dim, dim=",dim(Xa)))}
+if(!is.null(nd) && !is.null(Xd))
+   {if(ncol(Xd) != nd) pandterm(paste("ncol(Xd) ne nd, ncol(Xd)=",ncol(Xd)))}
+if(!is.null(Xa) && !is.null(Xd)) 
+   {if(nrow(Xa) != nrow(Xd)) 
+       {pandterm(paste("nrow(Xa) ne nrow(Xd),nrow(Xa)= ",nrow(Xa)," nrow(Xd)= ",nrow(Xd)))}} 
+if(is.null(Xa)) {n=nrow(Xd)} else {n=nrow(Xa)}
+
+if(INT)  {Xd=cbind(c(rep(1,n)),Xd)}
+if(DIFF) {Imod=diag(p-1)} else {Imod=matrix(0,p,p-1); Imod[-base,]=diag(p-1)}
+if(!is.null(Xd)) Xone=Xd %x%Imod else Xone=NULL
+
+Xtwo=NULL
+if(!is.null(Xa))
+   {if(DIFF) 
+      {tXa=matrix(t(Xa),nrow=p)
+       Idiff=diag(p); Idiff[,base]=c(rep(-1,p));Idiff=Idiff[-base,] 
+       tXa=Idiff%*%tXa
+       Xa=matrix(as.vector(tXa),ncol=(p-1)*na,byrow=TRUE)
+       for (i in 1:na) 
+           {Xext=Xa[,((i-1)*(p-1)+1):((i-1)*(p-1)+p-1)] 
+            Xtwo=cbind(Xtwo,as.vector(t(Xext)))}
+       }
+    else
+      { for (i in 1:na) 
+            { Xext=Xa[,((i-1)*p+1):((i-1)*p+p)] 
+              Xtwo=cbind(Xtwo,as.vector(t(Xext)))}
+      }
+    }
+return(cbind(Xone,Xtwo))
+}
diff --git a/R/ghkvec.R b/R/ghkvec.R
deleted file mode 100755
index 6ffe94f..0000000
--- a/R/ghkvec.R
+++ /dev/null
@@ -1,12 +0,0 @@
-ghkvec = 
-function(L,trunpt,above,r){
-#
-# R interface to GHK code -- allows for a vector of truncation points
-# revision history-
-#    P. Rossi 4/05
-#
-   dim=length(above)
-   n=length(trunpt)/dim
-return(.C('ghk_vec',as.integer(n),as.double(L),as.double(trunpt),
-   as.integer(above),as.integer(dim), as.integer(r),res=double(n))$res)
-}
diff --git a/R/llmnl.R b/R/llmnl.R
deleted file mode 100755
index 7016a13..0000000
--- a/R/llmnl.R
+++ /dev/null
@@ -1,26 +0,0 @@
-llmnl= 
-function(beta,y,X) 
-{
-#    p. rossi 2004
-#    changed order of arguments to put beta first 9/05
-#
-# Purpose:evaluate log-like for MNL
-#
-# Arguments:
-#   y is n vector with element = 1,...,j indicating which alt chosen
-#   X is nj x k matrix of xvalues for each of j alt on each of n occasions
-#   beta is k vector of coefs
-# 
-# Output: value of loglike
-#
-n=length(y)
-j=nrow(X)/n
-Xbeta=X%*%beta
-Xbeta=matrix(Xbeta,byrow=T,ncol=j)
-ind=cbind(c(1:n),y)
-xby=Xbeta[ind]
-Xbeta=exp(Xbeta)
-iota=c(rep(1,j))
-denom=log(Xbeta%*%iota)
-return(sum(xby-denom))
-}
diff --git a/R/llnhlogit.R b/R/llnhlogit.R
index cdf2f08..4f23a06 100755
--- a/R/llnhlogit.R
+++ b/R/llnhlogit.R
@@ -13,11 +13,6 @@ llnhlogit=function(theta,choice,lnprices,Xexpend)
 #           gamma  (k x 1)   expenditure function coefficients
 #           tau   scaling of v
 #	    
-root=function(c1,c2,tol,iterlim) {
-   u=double(length(c1))
-   .C("callroot",as.integer(length(c1)),as.double(c1),as.double(c2),as.double(tol),
-       as.integer(iterlim),r=as.double(u))$r}
-
    m=ncol(lnprices)
    n=length(choice)
    d=ncol(Xexpend)
@@ -27,8 +22,8 @@ root=function(c1,c2,tol,iterlim) {
    tau=theta[length(theta)]
    iotam=c(rep(1,m))
    c1=as.vector(Xexpend%*%gamma)%x%iotam-as.vector(t(lnprices))+alpha
-   c2=c(rep(exp(k),n))   
-   u=root(c1,c2,.0000001,20)
+   c2=c(rep(exp(k),n))
+   u=callroot(c1,c2,.0000001,20)
    v=alpha - u*exp(k)-as.vector(t(lnprices))
    vmat=matrix(v,ncol=m,byrow=TRUE)
    vmat=tau*vmat
diff --git a/R/lndIChisq.R b/R/lndIChisq.R
deleted file mode 100755
index 51bf2c6..0000000
--- a/R/lndIChisq.R
+++ /dev/null
@@ -1,11 +0,0 @@
-lndIChisq=
-function(nu,ssq,x)
-{
-#
-# P. Rossi 12/04
-#
-# Purpose: evaluate log-density of scaled Inverse Chi-sq
-#  density of r.var. Z=nu*ssq/chisq(nu)
-#
-return(-lgamma(nu/2)+(nu/2)*log((nu*ssq)/2)-((nu/2)+1)*log(x)-(nu*ssq)/(2*x))
-}
diff --git a/R/lndIWishart.R b/R/lndIWishart.R
deleted file mode 100755
index 7e64919..0000000
--- a/R/lndIWishart.R
+++ /dev/null
@@ -1,31 +0,0 @@
-lndIWishart=
-function(nu,V,IW)
-{
-# 
-# P. Rossi 12/04
-#
-# purpose: evaluate log-density of inverted Wishart
-#    includes normalizing constant
-#
-# arguments:
-#   nu is d. f. parm
-#   V is location matrix
-#   IW is the value at which the density should be evaluated
-#
-# output:
-#   value of log density
-#
-# note: in this parameterization, E[IW]=V/(nu-k-1)
-#
-k=ncol(V)
-Uiw=chol(IW)
-lndetVd2=sum(log(diag(chol(V))))
-lndetIWd2=sum(log(diag(Uiw)))
-#
-# first evaluate constant
-#
-const=((nu*k)/2)*log(2)+((k*(k-1))/4)*log(pi)
-arg=(nu+1-c(1:k))/2
-const=const+sum(lgamma(arg))
-return(-const+nu*lndetVd2-(nu+k+1)*lndetIWd2-.5*sum(diag(V%*%chol2inv(Uiw))))
-}
diff --git a/R/lndMvn.R b/R/lndMvn.R
deleted file mode 100755
index d99ab93..0000000
--- a/R/lndMvn.R
+++ /dev/null
@@ -1,16 +0,0 @@
-lndMvn=
-function(x,mu,rooti)
-{
-#
-# changed 12/05 by Rossi to include normalizing constant
-#
-# function to evaluate log of MV NOrmal density with  mean mu, var Sigma
-# Sigma=t(root)%*%root   (root is upper tri cholesky root)
-# Sigma^-1=rooti%*%t(rooti)   
-# rooti is in the inverse of upper triangular chol root of sigma
-#          note: this is the UL decomp of sigmai not LU!
-#                Sigma=root'root   root=inv(rooti)
-#
-z=as.vector(t(rooti)%*%(x-mu))
-return(  -(length(x)/2)*log(2*pi) -.5*(z%*%z) + sum(log(diag(rooti))))
-}
diff --git a/R/lndMvst.R b/R/lndMvst.R
deleted file mode 100755
index 439e7aa..0000000
--- a/R/lndMvst.R
+++ /dev/null
@@ -1,19 +0,0 @@
-lndMvst=
-function(x,nu,mu,rooti,NORMC=FALSE)
-{
-#
-# modified by Rossi 12/2005 to include normalizing constant
-#
-# function to evaluate log of MVstudent t density with nu df, mean mu,
-# and with sigmai=rooti%*%t(rooti)   note: this is the UL decomp of sigmai not LU!
-# rooti is in the inverse of upper triangular chol root of sigma
-# or Sigma=root'root   root=inv(rooti)
-#
-dim=length(x)
-if(NORMC) 
-   {constant=(nu/2)*log(nu)+lgamma((nu+dim)/2)-(dim/2)*log(pi)-lgamma(nu/2)}
-  else
-   {constant=0}
-z=as.vector(t(rooti)%*%(x-mu))
-return(constant -((dim+nu)/2)*log(nu+z%*%z)+sum(log(diag(rooti))))
-}
diff --git a/R/mnpProb.R b/R/mnpProb.R
old mode 100755
new mode 100644
index 3ceda5b..49c4f2a
--- a/R/mnpProb.R
+++ b/R/mnpProb.R
@@ -4,6 +4,7 @@ function(beta,Sigma,X,r=100)
 #
 # revision history:
 #  written by Rossi 9/05
+#  W. Taylor 4/15 - replaced ghkvec call with rcpp version
 #
 # purpose:
 #   function to MNP probabilities for a given X matrix (corresponding
@@ -32,14 +33,6 @@ function(beta,Sigma,X,r=100)
 # for p, e < - mu
 #
 #
-# define functions needed
-#
-ghkvec = function(L,trunpt,above,r){
-   dim=length(above)
-   n=length(trunpt)/dim
-   .C('ghk_vec',as.integer(n),as.double(L),as.double(trunpt),as.integer(above),as.integer(dim),
-   as.integer(r),res=double(n))$res}
-#   
 pm1=ncol(Sigma)
 k=length(beta)
 mu=matrix(X%*%beta,nrow=pm1)
diff --git a/R/plot.bayesm.hcoef.R b/R/plot.bayesm.hcoef.R
index cd92bec..865a1bc 100755
--- a/R/plot.bayesm.hcoef.R
+++ b/R/plot.bayesm.hcoef.R
@@ -1,44 +1,45 @@
-plot.bayesm.hcoef=function(x,names,burnin=trunc(.1*R),...){
-#
-# S3 method to plot arrays of draws of coefs in hier models
-#   3 dimensional arrays:  unit x var x draw
-#   P. Rossi 2/07
-#
-  X=x
-  if(mode(X) == "list") stop("list entered \n Possible Fixup: extract from list \n")
-  if(mode(X) !="numeric") stop("Requires numeric argument \n")
-  d=dim(X)
-  if(length(d) !=3) stop("Requires 3-dim array \n") 
-  op=par(no.readonly=TRUE)
-  on.exit(par(op))
-  nunits=d[1]
-  nvar=d[2]
-  R=d[3]
-  if(R < 100) {cat("fewer than 100 draws submitted \n"); return(invisible())}
-  #
-  #  plot posterior distributions of nvar coef for 30 rand units
-  #
-  if(missing(names)) {names=as.character(1:nvar)}
-  rsam=sort(sample(c(1:nunits),30))  # randomly sample 30 cross-sectional units
-  par(mfrow=c(1,1))
-  par(las=3)  # horizontal labeling
-  for(var in 1:nvar){
-       ext=X[rsam,var,(burnin+1):R]; ext=data.frame(t(ext))
-       colnames(ext)=as.character(rsam)
-       out=boxplot(ext,plot=FALSE,...)
-       out$stats=apply(ext,2,quantile,probs=c(0,.05,.95,1))
-       bxp(out,xlab="Cross-sectional Unit",main=paste("Coefficients on Var ",names[var],sep=""),boxfill="magenta",...)
-       if(var==1) par(ask=dev.interactive())
-  }
-  #
-  # plot posterior means for each var 
-  #
-  par(las=1)
-  pmeans=matrix(0,nrow=nunits,ncol=nvar)
-  for(i in 1:nunits) pmeans[i,]=apply(X[i,,(burnin+1):R],1,mean)
-  names=as.character(1:nvar)
-  attributes(pmeans)$class="bayesm.mat"
-  for(i in 1:nvar) names[i]=paste("Posterior Means of Coef ",names[var],sep="")
-  plot(pmeans,names,TRACEPLOT=FALSE,INT=FALSE,DEN=FALSE,CHECK_NDRAWS=FALSE,...)
-invisible()
-}
+plot.bayesm.hcoef=function(x,names,burnin=trunc(.1*R),...){
+#
+# S3 method to plot arrays of draws of coefs in hier models
+#   3 dimensional arrays:  unit x var x draw
+#   P. Rossi 2/07
+#
+  X=x
+  if(mode(X) == "list") stop("list entered \n Possible Fixup: extract from list \n")
+  if(mode(X) !="numeric") stop("Requires numeric argument \n")
+  d=dim(X)
+  if(length(d) !=3) stop("Requires 3-dim array \n") 
+  op=par(no.readonly=TRUE)
+  on.exit(par(op))
+  on.exit(devAskNewPage(FALSE),add=TRUE)
+  nunits=d[1]
+  nvar=d[2]
+  R=d[3]
+  if(missing(names)) {names=as.character(1:nvar)}
+  if(R < 100) {cat("fewer than 100 draws submitted \n"); return(invisible())}
+  #
+  #  plot posterior distributions of nvar coef for 30 rand units
+  #
+ 
+  rsam=sort(sample(c(1:nunits),30))  # randomly sample 30 cross-sectional units
+  par(mfrow=c(1,1))
+  par(las=3)  # horizontal labeling
+  devAskNewPage(TRUE)
+  for(var in 1:nvar){
+       ext=X[rsam,var,(burnin+1):R]; ext=data.frame(t(ext))
+       colnames(ext)=as.character(rsam)
+       out=boxplot(ext,plot=FALSE,...)
+       out$stats=apply(ext,2,quantile,probs=c(0,.05,.95,1))
+       bxp(out,xlab="Cross-sectional Unit",main=paste("Coefficients on Var ",names[var],sep=""),boxfill="magenta",...)
+  }
+  #
+  # plot posterior means for each var 
+  #
+  par(las=1)
+  pmeans=matrix(0,nrow=nunits,ncol=nvar)
+  for(i in 1:nunits) pmeans[i,]=apply(X[i,,(burnin+1):R],1,mean)
+  attributes(pmeans)$class="bayesm.mat"
+  for(var in 1:nvar) names[var]=paste("Posterior Means of Coef ",names[var],sep="")
+  plot(pmeans,names,TRACEPLOT=FALSE,INT=FALSE,DEN=FALSE,CHECK_NDRAWS=FALSE,...)
+invisible()
+}
diff --git a/R/plot.bayesm.mat.R b/R/plot.bayesm.mat.R
index f0f180c..91ef4de 100755
--- a/R/plot.bayesm.mat.R
+++ b/R/plot.bayesm.mat.R
@@ -1,58 +1,59 @@
-plot.bayesm.mat=function(x,names,burnin=trunc(.1*nrow(X)),tvalues,TRACEPLOT=TRUE,DEN=TRUE,INT=TRUE,
-      CHECK_NDRAWS=TRUE,...){
-#
-#  S3 method to print matrices of draws the object X is of class "bayesm.mat"
-#
-#     P. Rossi 2/07
-#
-  X=x
-  if(mode(X) == "list") stop("list entered \n Possible Fixup: extract from list \n")
-  if(mode(X) !="numeric") stop("Requires numeric argument \n")
-  op=par(no.readonly=TRUE)
-  on.exit(par(op))
-  if(is.null(attributes(X)$dim)) X=as.matrix(X)
-  nx=ncol(X)
-  if(nrow(X) < 100 & CHECK_NDRAWS) {cat("fewer than 100 draws submitted \n"); return(invisible())}
-  if(!missing(tvalues)){ 
-        if(mode(tvalues) !="numeric") {stop("tvalues must be a numeric vector \n")} 
-      else 
-        {if(length(tvalues)!=nx) stop("tvalues are wrong length \n")}
-  }
-  if(nx==1) par(mfrow=c(1,1)) 
-  if(nx==2) par(mfrow=c(2,1))
-  if(nx==3) par(mfrow=c(3,1))
-  if(nx==4) par(mfrow=c(2,2))
-  if(nx>=5) par(mfrow=c(3,2))
-
-  if(missing(names)) {names=as.character(1:nx)}
-  if (DEN) ylabtxt="density" else ylabtxt="freq"
-  for(index in 1:nx){
-     hist(X[(burnin+1):nrow(X),index],xlab="",ylab=ylabtxt,main=names[index],freq=!DEN,col="magenta",...)
-     if(!missing(tvalues)) abline(v=tvalues[index],lwd=2,col="blue")
-     if(INT){
-     quants=quantile(X[(burnin+1):nrow(X),index],prob=c(.025,.975))
-     mean=mean(X[(burnin+1):nrow(X),index])
-     semean=numEff(X[(burnin+1):nrow(X),index])$stderr
-     text(quants[1],0,"|",cex=3.0,col="green")
-     text(quants[2],0,"|",cex=3.0,col="green")
-     text(mean,0,"|",cex=3.0,col="red")
-     text(mean-2*semean,0,"|",cex=2,col="yellow")
-     text(mean+2*semean,0,"|",cex=2,col="yellow")
-     }
-     par(ask=dev.interactive())
-  }
-  if(TRACEPLOT){
-     if(nx==1) par(mfrow=c(1,2)) 
-     if(nx==2) par(mfrow=c(2,2))
-     if(nx>=3) par(mfrow=c(3,2))
-     for(index in 1:nx){
-        plot(as.vector(X[,index]),xlab="",ylab="",main=names[index],type="l",col="red")
-        if(!missing(tvalues)) abline(h=tvalues[index],lwd=2,col="blue")
-        if(var(X[,index])>1.0e-20) {acf(as.vector(X[,index]),xlab="",ylab="",main="")}
-        else 
-            {plot.default(X[,index],xlab="",ylab="",type="n",main="No ACF Produced")}
-     }
-   }
-  invisible()
-}
-
+plot.bayesm.mat=function(x,names,burnin=trunc(.1*nrow(X)),tvalues,TRACEPLOT=TRUE,DEN=TRUE,INT=TRUE,
+      CHECK_NDRAWS=TRUE,...){
+#
+#  S3 method to print matrices of draws the object X is of class "bayesm.mat"
+#
+#     P. Rossi 2/07
+#
+  X=x
+  if(mode(X) == "list") stop("list entered \n Possible Fixup: extract from list \n")
+  if(mode(X) !="numeric") stop("Requires numeric argument \n")
+  op=par(no.readonly=TRUE)
+  on.exit(par(op))
+  on.exit(devAskNewPage(FALSE),add=TRUE)
+  if(is.null(attributes(X)$dim)) X=as.matrix(X)
+  nx=ncol(X)
+  if(nrow(X) < 100 & CHECK_NDRAWS) {cat("fewer than 100 draws submitted \n"); return(invisible())}
+  if(!missing(tvalues)){ 
+        if(mode(tvalues) !="numeric") {stop("tvalues must be a numeric vector \n")} 
+      else 
+        {if(length(tvalues)!=nx) stop("tvalues are wrong length \n")}
+  }
+  if(nx==1) par(mfrow=c(1,1)) 
+  if(nx==2) par(mfrow=c(2,1))
+  if(nx==3) par(mfrow=c(3,1))
+  if(nx==4) par(mfrow=c(2,2))
+  if(nx>=5) par(mfrow=c(3,2))
+
+  if(missing(names)) {names=as.character(1:nx)}
+  if (DEN) ylabtxt="density" else ylabtxt="freq"
+  devAskNewPage(TRUE)
+  for(index in 1:nx){
+     hist(X[(burnin+1):nrow(X),index],xlab="",ylab=ylabtxt,main=names[index],freq=!DEN,col="magenta",...)
+     if(!missing(tvalues)) abline(v=tvalues[index],lwd=2,col="blue")
+     if(INT){
+     quants=quantile(X[(burnin+1):nrow(X),index],prob=c(.025,.975))
+     mean=mean(X[(burnin+1):nrow(X),index])
+     semean=numEff(X[(burnin+1):nrow(X),index])$stderr
+     text(quants[1],0,"|",cex=3.0,col="green")
+     text(quants[2],0,"|",cex=3.0,col="green")
+     text(mean,0,"|",cex=3.0,col="red")
+     text(mean-2*semean,0,"|",cex=2,col="yellow")
+     text(mean+2*semean,0,"|",cex=2,col="yellow")
+     }
+  }
+  if(TRACEPLOT){
+     if(nx==1) par(mfrow=c(1,2)) 
+     if(nx==2) par(mfrow=c(2,2))
+     if(nx>=3) par(mfrow=c(3,2))
+     for(index in 1:nx){
+        plot(as.vector(X[,index]),xlab="",ylab="",main=names[index],type="l",col="red")
+        if(!missing(tvalues)) abline(h=tvalues[index],lwd=2,col="blue")
+        if(var(X[,index])>1.0e-20) {acf(as.vector(X[,index]),xlab="",ylab="",main="")}
+        else 
+            {plot.default(X[,index],xlab="",ylab="",type="n",main="No ACF Produced")}
+     }
+   }
+  invisible()
+}
+
diff --git a/R/rDPGibbs.R b/R/rDPGibbs.R
deleted file mode 100755
index 9367537..0000000
--- a/R/rDPGibbs.R
+++ /dev/null
@@ -1,569 +0,0 @@
-rDPGibbs= 
-function(Prior,Data,Mcmc)
-{
-#
-# Revision History: 
-#   5/06 add rthetaDP
-#   7/06 include rthetaDP in main body to avoid copy overhead
-#   1/08 add scaling
-#   2/08 add draw of lambda
-#   3/08 changed nu prior support to dim(y) + exp(unif gird on nulim[1],nulim[2])
-#
-# purpose: do Gibbs sampling for density estimation using Dirichlet process model
-#
-# arguments:
-#     Data is a list of y which is an n x k matrix of data
-#     Prior is a list of (alpha,lambda,Prioralpha)
-#       alpha: starting value
-#       lambda_hyper: hyperparms of prior on lambda
-#       Prioralpha: hyperparms of alpha prior; a list of (Istarmin,Istarmax,power)
-#       if elements of the prior don't exist, defaults are assumed
-#     Mcmc is a list of (R,keep,maxuniq)
-#       R: number of draws
-#       keep: thinning parameter
-#       maxuniq: the maximum number of unique thetaStar values
-#
-# Output:
-#     list with elements
-#     alphadraw: vector of length R/keep, [i] is ith draw of alpha
-#     Istardraw: vector of length R/keep, [i] is the number of unique theta's drawn from ith iteration
-#     adraw
-#     nudraw
-#     vdraw
-#     thetaNp1draws: list, [[i]] is ith draw of theta_{n+1}
-#     inddraw: R x n matrix, [,i] is indicators of identity for each obs in ith iteration
-#
-# Model:
-#        y_i ~ f(y|thetai)
-#        thetai|G ~ G
-#        G|lambda,alpha ~ DP(G|G0(lambda),alpha)
-#
-# Priors:
-#        alpha: starting value
-#
-#        lambda:
-#           G0 ~ N(mubar,Sigma (x) Amu^-1)
-#           mubar=vec(mubar)
-#           Sigma ~ IW(nu,nu*v*I)  note: mode(Sigma)=nu/(nu+2)*v*I
-#           mubar=0
-#           amu is uniform on grid specified by alim
-#           nu is log uniform, nu=d-1+exp(Z) z is uniform on seq defined bvy nulim
-#           v is uniform on sequence specificd by vlim
-#
-#        Prioralpha:
-#           alpha ~ (1-(alpha-alphamin)/(alphamax-alphamin))^power
-#           alphamin=exp(digamma(Istarmin)-log(gamma+log(N)))
-#           alphamax=exp(digamma(Istarmax)-log(gamma+log(N)))
-#           gamma= .5772156649015328606
-#
-#
-#
-# define needed functions
-#
-# -----------------------------------------------------------------------------------------
-#
-q0=function(y,lambda,eta){
-#
-# function to compute a vector of int f(y[i]|theta) p(theta|lambda)dlambda
-#     here p(theta|lambda) is G0 the base prior
-#
-# implemented for a multivariate normal data density and standard conjugate
-# prior:
-#    theta=list(mu,Sigma)
-#    f(y|theta,eta) is N(mu,Sigma)
-#    lambda=list(mubar,Amu,nu,V)
-#       mu|Sigma ~ N(mubar,Sigma (x) Amu^-1)
-#       Sigma ~ IW(nu,V)
-#
-# arguments:
-#    Y is n x k matrix of observations
-#    lambda=list(mubar,Amu,nu,V)
-#    eta is not used
-# 
-# output:
-#    vector of q0 values for each obs (row of Y)
-#
-# p. rossi 12/05
-#
-# here y is matrix of observations (each row is an obs)
-
-mubar=lambda$mubar; nu=lambda$nu ; Amu=lambda$Amu; V=lambda$V
-k=ncol(y)
-R=chol(V)
-logdetR=sum(log(diag(R)))
-if (k > 1) 
-  {lnk1k2=(k/2)*log(2)+log((nu-k)/2)+lgamma((nu-k)/2)-lgamma(nu/2)+sum(log(nu/2-(1:(k-1))/2))}
-else
-  {lnk1k2=(k/2)*log(2)+log((nu-k)/2)+lgamma((nu-k)/2)-lgamma(nu/2)}
-constant=-(k/2)*log(2*pi)+(k/2)*log(Amu/(1+Amu)) + lnk1k2 + nu*logdetR
-#
-# note: here we are using the fact that |V + S_i | = |R|^2 (1 + v_i'v_i)
-#       where v_i = sqrt(Amu/(1+Amu))*t(R^-1)*(y_i-mubar), R is chol(V)
-#
-#       and S_i = Amu/(1+Amu) * (y_i-mubar)(y_i-mubar)'
-#
-mat=sqrt(Amu/(1+Amu))*t(backsolve(R,diag(ncol(y))))%*%(t(y)-mubar)
-vivi=colSums(mat^2)
-
-lnq0v=constant-((nu+1)/2)*(2*logdetR+log(1+vivi))
-
-return(exp(lnq0v))
-}
-# ----------------------------------------------------------------------------------------------
-   rmultinomF=
-      function(p) {
-       return(sum(runif(1) > cumsum(p))+1)
-   }
-# -----------------------------------------------------------------------------------------------
-   alphaD=function(Prioralpha,Istar,gridsize){
-#
-#  function to draw alpha using prior, p(alpha)= (1-(alpha-alphamin)/(alphamax-alphamin))**power
-#
-   power=Prioralpha$power
-   alphamin=Prioralpha$alphamin
-   alphamax=Prioralpha$alphamax
-   n=Prioralpha$n
-   alpha=seq(from=alphamin,to=(alphamax-0.000001),len=gridsize)
-   lnprob=Istar*log(alpha) + lgamma(alpha) - lgamma(n+alpha) + 
-          power*log(1-(alpha-alphamin)/(alphamax-alphamin))
-   lnprob=lnprob-median(lnprob)
-   probs=exp(lnprob)
-   probs=probs/sum(probs)
-   return(alpha[rmultinomF(probs)])
-}  
-
-
-#
-# ------------------------------------------------------------------------------------------
-#
-yden=function(thetaStar,y,eta){
-#
-# function to compute f(y | theta) 
-# computes f for all values of theta in theta list of lists
-#
-# arguments:
-#   thetaStar is a list of lists.  thetaStar[[i]] is a list with components, mu, rooti
-#   y |theta[[i]] ~ N(mu,(rooti %*% t(rooti))^-1)  rooti is inverse of Chol root of Sigma
-#   eta is not used
-#
-# output:
-#   length(thetaStar) x n array of values of f(y[j,]|thetaStar[[i]]
-# 
-
-nunique=length(thetaStar)
-n=nrow(y)
-ydenmat=matrix(double(n*nunique),ncol=n)
-k=ncol(y)
-for(i in 1:nunique){
-
-   # now compute vectorized version of lndMvn 
-   # compute y_i'RIRI'y_i for all i
-   #
-   mu=thetaStar[[i]]$mu; rooti=thetaStar[[i]]$rooti
-   quads=colSums((crossprod(rooti,(t(y)-mu)))^2)
-   ydenmat[i,]=exp(-(k/2)*log(2*pi) + sum(log(diag(rooti))) - .5*quads)
-   
-}
-return(ydenmat)
-}
-
-#
-# -----------------------------------------------------------------------------------------
-#
-GD=function(lambda){
-#
-# function to draw from prior for Multivariate Normal Model
-#
-# mu|Sigma ~ N(mubar,Sigma x Amu^-1)
-# Sigma ~ IW(nu,V)
-#
-# note: we must insure that mu is a vector to use most efficient
-#       lndMvn routine
-#
-nu=lambda$nu
-V=lambda$V
-mubar=lambda$mubar
-Amu=lambda$Amu
-k=length(mubar)
-Sigma=rwishart(nu,chol2inv(chol(lambda$V)))$IW
-root=chol(Sigma)
-mu=mubar+(1/sqrt(Amu))*t(root)%*%matrix(rnorm(k),ncol=1)
-return(list(mu=as.vector(mu),rooti=backsolve(root,diag(k))))
-}
-
-#
-# -------------------------------------------------------------------------------------------
-#
-thetaD=function(y,lambda,eta){
-#
-# function to draw from posterior of theta given data y and base prior G0(lambda)
-#
-# here y ~ N(mu,Sigma)
-# theta = list(mu=mu,rooti=chol(Sigma)^-1)
-# mu|Sigma ~ N(mubar,Sigma (x) Amu-1)
-# Sigma ~ IW(nu,V)
-#
-# arguments: 
-#   y is n x k matrix of obs
-#   lambda is list(mubar,Amu,nu,V)
-#   eta is not used
-# output:
-#   one draw of theta, list(mu,rooti)
-#        Sigma=inv(rooti)%*%t(inv(rooti))
-#
-# note: we assume that y is a matrix. if there is only one obs, y is a 1 x k matrix
-#
-rout=rmultireg(y,matrix(c(rep(1,nrow(y))),ncol=1),matrix(lambda$mubar,nrow=1),matrix(lambda$Amu,ncol=1),
-       lambda$nu,lambda$V)
-return(list(mu=as.vector(rout$B),rooti=backsolve(chol(rout$Sigma),diag(ncol(y)))))
-}
-
-#
-# --------------------------------------------------------------------------------------------
-# load a faster version of lndMvn
-# note: version of lndMvn below assumes x,mu is a vector!
-lndMvn=function (x, mu, rooti) 
-{
-    return(-(length(x)/2) * log(2 * pi) - 0.5 * sum(((x-mu)%*%rooti)**2) + sum(log(diag(rooti))))
-}
-# -----------------------------------------------------------------------------------------
-   lambdaD=function(lambda,thetastar,alim=c(.01,2),nulim=c(.01,2),vlim=c(.1,5),gridsize=20){
-#
-# revision history
-#  p. rossi 7/06
-#  vectorized 1/07
-#  changed 2/08 to paramaterize V matrix of IW prior to nu*v*I; then mode of Sigma=nu/(nu+2)vI
-#      this means that we have a reparameterization to v* = nu*v
-#
-#  function to draw (nu, v, a) using uniform priors
-#
-#  theta_j=(mu_j,Sigma_j)  mu_j~N(0,Sigma_j/a)  Sigma_j~IW(nu,vI)
-#           recall E[Sigma]= vI/(nu-dim-1)
-#
-# define functions needed
-# ----------------------------------------------------------------------------------------------
-   rmultinomF=
-      function(p) {
-       return(sum(runif(1) > cumsum(p))+1)
-   }
-echo=function(lst){return(t(lst[[2]]))}
-rootiz=function(lst){crossprod(lst[[2]],lst[[1]])}
-#
-# ------------------------------------------------------------------------------------------
-
-   d=length(thetastar[[1]]$mu)
-   Istar=length(thetastar)
-   aseq=seq(from=alim[1],to=alim[2],len=gridsize)
-   nuseq=d-1+exp(seq(from=nulim[1],to=nulim[2],len=gridsize)) # log uniform grid
-   vseq=seq(from=vlim[1],to=vlim[2],len=gridsize)
-#
-#    "brute" force approach would simply loop over the 
-#         "observations" (theta_j) and use log of the appropriate densities.  To vectorize, we
-#         notice that the "data" comes via various statistics:
-#         1. sum of log(diag(rooti_j)
-#         2. sum of tr(V%*%rooti_j%*%t(rooti_j)) where V=vI_d
-#         3. quadratic form t(mu_j-0)%*%rooti%*%t(rooti)%*%(mu_j-0)
-#     thus, we will compute these first.
-#     for documentation purposes, we leave brute force code in comment fields
-#
-# extract needed info from thetastar list
-#
-   out=double(Istar*d*d)
-   out=sapply(thetastar,echo)
-   dim(out)=c(d,Istar*d) # out has the rootis in form: [t(rooti_1), t(rooti_2), ...,t(rooti_Istar)]
-   sumdiagriri=sum(colSums(out^2)) #  sum_j tr(rooti_j%*%t(rooti_j))
-#   now get diagonals of rooti
-   ind=cbind(c(1:(d*Istar)),rep((1:d),Istar))
-   out=t(out)
-   sumlogdiag=sum(log(out[ind]))
-   rimu=sapply(thetastar,rootiz) # columns of rimu contain t(rooti_j)%*%mu_j
-   dim(rimu)=c(d,Istar)
-   sumquads=sum(colSums(rimu^2)) 
-#  
-#  draw a  (conditionally indep of nu,v given theta_j)
-   lnprob=double(length(aseq))
-       #for(i in seq(along=aseq)){
-       #for(j in seq(along=thetastar)){
-       #lnprob[i]=lnprob[i]+lndMvn(thetastar[[j]]$mu,c(rep(0,d)),thetastar[[j]]$rooti*sqrt(aseq[i]))}
-    lnprob=Istar*(-(d/2)*log(2*pi))-.5*aseq*sumquads+Istar*d*log(sqrt(aseq))+sumlogdiag
-    lnprob=lnprob-max(lnprob)+200
-    probs=exp(lnprob)
-    probs=probs/sum(probs)
-    adraw=aseq[rmultinomF(probs)]
-#
-#   draw nu given v
-#
-    V=lambda$V
-    lnprob=double(length(nuseq))
-       #for(i in seq(along=nuseq)){
-       #for(j in seq(along=thetastar)){
-       #Sigma_j=crossprod(backsolve(thetastar[[j]]$rooti,diag(d)))
-       #lnprob[i]=lnprob[i]+lndIWishart(nuseq[i],V,Sigma_j)}
-    arg=rep(c(1:d),gridsize)
-    dim(arg)=c(d,gridsize)
-    arg=t(arg)
-    arg=(nuseq+1-arg)/2
-    lnprob=-Istar*log(2)*d/2*nuseq - Istar*rowSums(lgamma(arg)) + 
-            Istar*d*log(sqrt(V[1,1]))*nuseq + sumlogdiag*nuseq
-    lnprob=lnprob-max(lnprob)+200
-    probs=exp(lnprob)
-    probs=probs/sum(probs)
-    nudraw=nuseq[rmultinomF(probs)]
-#
-#   draw v given nu 
-#
-    lnprob=double(length(vseq))
-       #for(i in seq(along=vseq)){
-       #V=vseq[i]*diag(d)
-       #for(j in seq(along=thetastar)){
-       #Sigma_j=crossprod(backsolve(thetastar[[j]]$rooti,diag(d)))
-       #lnprob[i]=lnprob[i]+lndIWishart(nudraw,V,Sigma_j)}
-#    lnprob=Istar*nudraw*d*log(sqrt(vseq))-.5*sumdiagriri*vseq
-    lnprob=Istar*nudraw*d*log(sqrt(vseq*nudraw))-.5*sumdiagriri*vseq*nudraw
-    lnprob=lnprob-max(lnprob)+200
-    probs=exp(lnprob)
-    probs=probs/sum(probs)
-    vdraw=vseq[rmultinomF(probs)]
-#
-#   put back into lambda
-#
-    return(list(mubar=c(rep(0,d)),Amu=adraw,nu=nudraw,V=nudraw*vdraw*diag(d)))
-}
-# -----------------------------------------------------------------------------------------
-
-#  check arguments
-#
-pandterm=function(message) {stop(message,call.=FALSE)}
-if(missing(Data)) {pandterm("Requires Data argument -- list of y")}
-    if(is.null(Data$y)) {pandterm("Requires Data element y")}
-    y=Data$y
-#
-# check data for validity
-#
-if(!is.matrix(y)) {pandterm("y must be a matrix")}
-nobs=nrow(y)
-dimy=ncol(y)
-#
-# check for Prior
-#
-alimdef=c(.01,10)
-nulimdef=c(.01,3)
-vlimdef=c(.1,4)
-if(missing(Prior)) {pandterm("requires Prior argument ")}
-else
-   {
-    if(is.null(Prior$lambda_hyper)) {lambda_hyper=list(alim=alimdef,nulim=nulimdef,vlim=vlimdef)}
-    else {lambda_hyper=Prior$lambda_hyper;
-       if(is.null(lambda_hyper$alim)) {lambda_hyper$alim=alimdef}
-       if(is.null(lambda_hyper$nulim)) {lambda_hyper$nulim=nulimdef} 
-       if(is.null(lambda_hyper$vlim)) {lambda_hyper$vlim=vlimdef}
-       }
-    if(is.null(Prior$Prioralpha)) {Prioralpha=list(Istarmin=1,Istarmax=min(50,0.1*nobs),power=0.8)}
-    else {Prioralpha=Prior$Prioralpha;
-       if(is.null(Prioralpha$Istarmin)) {Prioralpha$Istarmin=1} else {Prioralpha$Istarmin=Prioralpha$Istarmin}
-       if(is.null(Prioralpha$Istarmax)) 
-             {Prioralpha$Istarmax=min(50,0.1*nobs)} else {Prioralpha$Istarmax=Prioralpha$Istarmax}
-       if(is.null(Prioralpha$power)) {Prioralpha$power=0.8}
-       }
-   }
-gamma= .5772156649015328606
-Prioralpha$alphamin=exp(digamma(Prioralpha$Istarmin)-log(gamma+log(nobs)))
-Prioralpha$alphamax=exp(digamma(Prioralpha$Istarmax)-log(gamma+log(nobs)))
-Prioralpha$n=nobs
-#
-# check Prior arguments for valdity
-#
-if(lambda_hyper$alim[1]<0) {pandterm("alim[1] must be >0")}
-if(lambda_hyper$nulim[1]<0) {pandterm("nulim[1] must be >0")}
-if(lambda_hyper$vlim[1]<0) {pandterm("vlim[1] must be >0")}
-if(Prioralpha$Istarmin <1){pandterm("Prioralpha$Istarmin must be >= 1")}
-if(Prioralpha$Istarmax <= Prioralpha$Istarmin){pandterm("Prioralpha$Istarmin must be > Prioralpha$Istarmax")}
-#
-# check MCMC argument
-#
-if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
-else
-   {
-    if(is.null(Mcmc$R)) 
-       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
-    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
-    if(is.null(Mcmc$maxuniq)) {maxuniq=200} else {maxuniq=Mcmc$maxuniq}
-    if(is.null(Mcmc$SCALE)) {SCALE=TRUE} else {SCALE=Mcmc$SCALE}
-    if(is.null(Mcmc$gridsize)) {gridsize=20} else {gridsize=Mcmc$gridsize}
-   }
-
-#
-# print out the problem
-#
-cat(" Starting Gibbs Sampler for Density Estimation Using Dirichlet Process Model",fill=TRUE)
-cat(" ",nobs," observations on ",dimy," dimensional data",fill=TRUE)
-cat(" ",fill=TRUE)
-cat(" SCALE=",SCALE,fill=TRUE)
-cat(" ",fill=TRUE)
-cat(" Prior Parms: ",fill=TRUE)
-cat("  G0 ~ N(mubar,Sigma (x) Amu^-1)",fill=TRUE)
-cat("   mubar = ",0,fill=TRUE)
-cat("   Sigma ~ IW(nu,nu*v*I)",fill=TRUE)
-cat("   Amu ~ uniform[",lambda_hyper$alim[1],",",lambda_hyper$alim[2],"]",fill=TRUE)
-cat("   nu ~ uniform on log grid on [",dimy-1+exp(lambda_hyper$nulim[1]),
-             ",",dimy-1+exp(lambda_hyper$nulim[2]),"]",fill=TRUE)
-cat("   v ~ uniform[",lambda_hyper$vlim[1],",",lambda_hyper$vlim[2],"]",fill=TRUE)
-cat(" ",fill=TRUE)
-cat("  alpha ~ (1-(alpha-alphamin)/(alphamax-alphamin))^power",fill=TRUE)
-cat("   Istarmin = ",Prioralpha$Istarmin,fill=TRUE)
-cat("   Istarmax = ",Prioralpha$Istarmax,fill=TRUE)
-cat("   alphamin = ",Prioralpha$alphamin,fill=TRUE)
-cat("   alphamax = ",Prioralpha$alphamax,fill=TRUE)
-cat("   power = ",Prioralpha$power,fill=TRUE)
-cat(" ",fill=TRUE)
-cat(" Mcmc Parms: R= ",R," keep= ",keep," maxuniq= ",maxuniq," gridsize for lambda hyperparms= ",gridsize,
-        fill=TRUE)
-cat(" ",fill=TRUE)
-
-# initialize theta, thetastar, indic
-
-theta=vector("list",nobs)
-for(i in 1:nobs) {theta[[i]]=list(mu=rep(0,dimy),rooti=diag(dimy))}
-indic=double(nobs)
-thetaStar=unique(theta)
-nunique=length(thetaStar)
-for(j in 1:nunique){
-    indic[which(sapply(theta,identical,thetaStar[[j]]))]=j
-}
-#
-# initialize lambda
-#
-lambda=list(mubar=rep(0,dimy),Amu=.1,nu=dimy+1,V=(dimy+1)*diag(dimy))
-
-#
-# initialize alpha
-#
-alpha=1
-
-alphadraw=double(floor(R/keep))
-Istardraw=double(floor(R/keep))
-adraw=double(floor(R/keep))
-nudraw=double(floor(R/keep))
-vdraw=double(floor(R/keep))
-thetaNp1draw=vector("list",floor(R/keep))
-inddraw=matrix(double((floor(R/keep))*nobs),ncol=nobs)
-
-#
-# do scaling
-#
-if(SCALE){
-  dvec=sqrt(apply(y,2,var))
-  ybar=apply(y,2,mean)
-  y=scale(y,center=ybar,scale=dvec)
-  dvec=1/dvec  # R function scale divides by scale
-} 
-#
-# note on scaling
-#
-#   we model scaled y, z_i=D(y_i-ybar)   D=diag(1/sigma1, ..., 1/sigma_dimy)
-#
-#   if p_z= 1/R sum(phi(z|mu,Sigma))
-#      p_y=1/R sum(phi(y|D^-1mu+ybar,D^-1SigmaD^-1)
-#      rooti_y=Drooti_z
-#
-#   you might want to use quantiles instead, like median and (10,90)
-#
-
-#
-# start main iteration loop
-#
-itime=proc.time()[3]
-cat("MCMC Iteration (est time to end -min) ",fill=TRUE)
-fsh()
-
-for(rep in 1:R)
-{
-   n = length(theta)
-
-   eta=NULL    # note eta is not used
-   thetaNp1=NULL
-   q0v = q0(y,lambda,eta)   # now that we draw lambda we need to recompute q0v each time
-
-   p=c(rep(1/(alpha+(n-1)),n-1),alpha/(alpha+(n-1)))
-
-   nunique=length(thetaStar)
-  
-   if(nunique > maxuniq ) { pandterm("maximum number of unique thetas exceeded")} 
-   ydenmat=matrix(double(maxuniq*n),ncol=n) 
-   ydenmat[1:nunique,]=yden(thetaStar,y,eta)
-   #  ydenmat is a length(thetaStar) x n array of density values given f(y[j,] | thetaStar[[i]]
-   #  note: due to remix step (below) we must recompute ydenmat each time!
-
-   # use .Call to draw theta list
-   out= .Call("thetadraw",y,ydenmat,indic,q0v,p,theta,lambda,eta=eta,
-                  thetaD=thetaD,yden=yden,maxuniq,nunique,new.env()) 
-
-   # theta has been modified by thetadraw so we need to recreate thetaStar
-   thetaStar=unique(theta)
-   nunique=length(thetaStar)
-
-   #thetaNp1 and remix
-   probs=double(nunique+1)
-   for(j in 1:nunique) {
-       ind = which(sapply(theta,identical,thetaStar[[j]]))
-       probs[j]=length(ind)/(alpha+n) 
-       new_utheta=thetaD(y[ind,,drop=FALSE],lambda,eta) 
-       for(i in seq(along=ind)) {theta[[ind[i]]]=new_utheta}
-       indic[ind]=j
-       thetaStar[[j]]=new_utheta
-   }
-   probs[nunique+1]=alpha/(alpha+n)
-   ind=rmultinomF(probs)
-   if(ind==length(probs)) {
-      thetaNp1=GD(lambda)
-   } else {
-      thetaNp1=thetaStar[[ind]]
-   }
-
-   # draw alpha
-   alpha=alphaD(Prioralpha,nunique,gridsize=gridsize)
-   
-   # draw lambda
-   lambda=lambdaD(lambda,thetaStar,alim=lambda_hyper$alim,nulim=lambda_hyper$nulim,
-             vlim=lambda_hyper$vlim,gridsize=gridsize)
-
-   if(rep%%100==0)
-     {
-      ctime=proc.time()[3]
-      timetoend=((ctime-itime)/rep)*(R-rep)
-      cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
-      fsh()
-      }
-   if(rep%%keep ==0)
-     {
-      mkeep=rep/keep
-      alphadraw[mkeep]=alpha
-      Istardraw[mkeep]=nunique
-      adraw[mkeep]=lambda$Amu
-      nudraw[mkeep]=lambda$nu
-      vdraw[mkeep]=lambda$V[1,1]/lambda$nu
-      if(SCALE){
-        thetaNp1[[1]]=thetaNp1[[1]]/dvec+ybar
-        if(ncol(y)>1) 
-          {thetaNp1[[2]]=diag(dvec)%*%thetaNp1[[2]]}
-        else
-          {thetaNp1[[2]]=dvec*thetaNp1[[2]]}
-      }
-      thetaNp1draw[[mkeep]]=list(list(mu=thetaNp1[[1]],rooti=thetaNp1[[2]]))
-                            #  here we put the draws into the list of lists of list format useful for
-                            #  finite mixture of normals utilities
-      inddraw[mkeep,]=indic
-      }
-}
-ctime=proc.time()[3]
-cat("Total Time Elapsed= ",round((ctime-itime)/60,2),fill=TRUE)
-nmix=list(probdraw=matrix(c(rep(1,nrow(inddraw))),ncol=1),zdraw=inddraw,compdraw=thetaNp1draw)
-attributes(nmix)$class="bayesm.nmix"
-attributes(alphadraw)$class=c("bayesm.mat","mcmc")
-attributes(Istardraw)$class=c("bayesm.mat","mcmc")
-attributes(adraw)$class=c("bayesm.mat","mcmc")
-attributes(nudraw)$class=c("bayesm.mat","mcmc")
-attributes(vdraw)$class=c("bayesm.mat","mcmc")
-return(list(alphadraw=alphadraw,Istardraw=Istardraw,adraw=adraw,nudraw=nudraw,
-         vdraw=vdraw,nmix=nmix))
-}
diff --git a/R/rbayesBLP_rcpp.R b/R/rbayesBLP_rcpp.R
new file mode 100644
index 0000000..5bc7918
--- /dev/null
+++ b/R/rbayesBLP_rcpp.R
@@ -0,0 +1,289 @@
+rbayesBLP=function(Data, Prior, Mcmc){
+#
+# Keunwoo Kim 02/06/2014
+#
+# Purpose: 
+#      draw theta_bar and Sigma via hybrid Gibbs sampler (Jiang, Manchanda, and Rossi, 2009)
+#
+# Arguments:
+#    Data
+#      X: J*T by H (if IV is used, the last column is endogeneous variable.)
+#      share: vector of length J*T
+#      J: number of alternatives (excluding outside option)
+#      Z: instrumental variables (optional)
+#
+#    Prior
+#      sigmasqR
+#      theta_hat
+#      A
+#      deltabar
+#      Ad
+#      nu0
+#      s0_sq
+#      VOmega
+#
+#    Mcmc
+#      R: number of MCMC draws
+#      H: number of draws for Monte-Carlo integration
+#
+#      s: scaling parameter of MH increment
+#      cand_cov: var-cov matrix of MH increment
+#      (minaccep: lower bound of target range of acceptance rate)
+#      (maxaccep: upper bound of target range of acceptance rate)
+#
+#      theta_bar_initial
+#      r_initial
+#      tau_sq_initial
+#      Omega_initial
+#      delta_initial
+#
+#      tol: convergence tolerance for the contraction mapping
+#
+# Output:
+#      a List of tau_sq (or Omega and delta), 
+#      theta_bar, r (equivalent to Sigma) draws, Sigma draws,
+#      relative numerical efficiency of r draws, tunned parameters for MH, and
+#      acceptance rate
+#
+  
+pandterm=function(message) {stop(message,call.=FALSE)}  
+#
+# check for data
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of X and share")}
+if(is.null(Data$X)) {pandterm("Requires Data element X")} else {X=Data$X}
+if(is.null(Data$share)) {pandterm("Requires Data element share")} else {share=Data$share}
+if(is.null(Data$J)) {pandterm("Requires Data element J")} else {J=Data$J}
+if(is.null(Data$Z)) {IV=FALSE; Z=matrix(0); I=1} else {IV=TRUE; I=ncol(Z)}
+K=ncol(X)
+  
+if (length(share) != nrow(X)) {pandterm("Mismatch in the number of observations in X and share")}
+T=length(share)/J
+
+#
+# check for prior 
+#
+if(missing(Prior)) {  
+  c=50
+  sigmasqRoff=1
+  sigmasqRdiag=log((1+sqrt(1-4*(2*(c(1:K)-1)*sigmasqRoff-c)))/2)/4
+  sigmasqR=c(sigmasqRdiag, rep(1, K*(K-1)/2))
+  A=BayesmConstant.A*diag(K)
+  theta_hat=rep(0,K)
+  nu0=K+1
+  s0_sq=1
+  deltabar=rep(0,I)
+  Ad=BayesmConstant.A*diag(I)
+  VOmega=BayesmConstant.BLPVOmega
+}
+else {
+  if(is.null(Prior$sigmasqR)) {
+    c=50
+    sigmasqRoff=1
+    sigmasqRdiag=log((1+sqrt(1-4*(2*(c(1:K)-1)*sigmasqRoff-c)))/2)/4
+    sigmasqR=c(sigmasqRdiag, rep(1, K*(K-1)/2))
+  } else {
+    sigmasqR=Prior$sigmasqR
+  }
+  if(is.null(Prior$A)) {A=BayesmConstant.A*diag(K)} else {A=Prior$A}
+  if(is.null(Prior$theta_hat)) {theta_hat=rep(0,K)} else {theta_hat=Prior$theta_hat}
+  if(is.null(Prior$nu0)) {nu0=K+1} else {nu0=Prior$nu0}
+  if(is.null(Prior$s0_sq)) {s0_sq=1} else {s0_sq=Prior$s0_sq}
+  if(is.null(Prior$deltabar)) {deltabar=rep(0,I)} else {deltabar=Prior$deltabar}
+  if(is.null(Prior$Ad)) {Ad=BayesmConstant.A*diag(I)} else {Ad=Prior$Ad}
+  if(is.null(Prior$VOmega)) {VOmega=BayesmConstant.BLPVOmega} else {VOmega=Prior$VOmega}
+}
+  
+if(length(sigmasqR) != K*(K+1)/2) pandterm("sigmasqR is of incorrect dimension")
+if(sum(dim(A)==c(K,K)) != 2) pandterm("A is of incorrect dimension")
+if(length(theta_hat) != K) pandterm("theta_hat is of incorrect dimension")
+if((length(nu0) != 1) | (nu0 <=0)) pandterm("nu0 should be a positive number")
+if((length(s0_sq) != 1) | (s0_sq <=0)) pandterm("s0_sq should be a positive number")
+if(length(deltabar) != I) pandterm("deltabar is of incorrect dimension")
+if(sum(dim(Ad)==c(I,I)) != 2) pandterm("Ad is of incorrect dimension")
+if(sum(dim(VOmega)==c(2,2)) != 2) pandterm("VOmega is of incorrect dimension")
+
+#
+# check for Mcmc 
+#
+if(missing(Mcmc)) pandterm("Requires Mcmc argument -- at least R and H")
+if(is.null(Mcmc$R)) {pandterm("Requires element R of Mcmc")} else {R=Mcmc$R}
+if(is.null(Mcmc$H)) {pandterm("Requires element H of Mcmc")} else {H=Mcmc$H}
+if(is.null(Mcmc$initial_theta_bar)) {initial_theta_bar=rep(0,K)} else {initial_theta_bar=Mcmc$initial_theta_bar}
+if(is.null(Mcmc$initial_r)) {initial_r=rep(0,K*(K+1)/2)} else {initial_r=Mcmc$initial_r}
+if(is.null(Mcmc$initial_tau_sq)) {initial_tau_sq=0.1} else {initial_tau_sq=Mcmc$initial_tau_sq}
+if(is.null(Mcmc$initial_Omega)) {initial_Omega=diag(2)} else {initial_Omega=Mcmc$initial_Omega}
+if(is.null(Mcmc$initial_delta)) {initial_delta=rep(0,I)} else {initial_delta=Mcmc$initial_tau_sq}
+if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
+if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+
+if(is.null(Mcmc$s)+is.null(Mcmc$cand_cov)==0){
+  s=Mcmc$s
+  cand_cov=Mcmc$cand_cov
+  tuning_auto=FALSE
+}
+if(is.null(Mcmc$s)+is.null(Mcmc$cand_cov)==1) pandterm("If you want to control tuning parameters, write both parameters.")
+if(is.null(Mcmc$s)+is.null(Mcmc$cand_cov)==2){
+  s=BayesmConstant.RRScaling/sqrt(K*(K+1)/2)
+  cand_cov=diag(c(rep(0.1,K),rep(1,K*(K-1)/2)))
+  tuning_auto=TRUE
+}
+
+if(is.null(Mcmc$tol)) {tol=BayesmConstant.BLPtol} else {tol=Mcmc$tol}
+minaccep=0.3
+maxaccep=0.5
+
+if(length(initial_theta_bar)!=K) pandterm("initial_theta_bar is of incorrect dimension")
+if(length(initial_r)!=(K*(K+1)/2)) pandterm("initial_r is of incorrect dimension")
+if(initial_tau_sq<0) pandterm("initial_tau_sq should be positive")
+if(sum(dim(initial_Omega)==c(2,2))!=2) pandterm("initial_Omega is of incorrect dimension")
+if(length(initial_delta)!=I) pandterm("initial_delta is of incorrect dimension")
+if(nprint<0) { pandterm('nprint must be >=0') }
+  
+#
+# print out problem
+#
+cat(" ",fill=TRUE)
+cat("Data Dimensions:",fill=TRUE)
+cat(" ",T," market(time); ",J+1," alternatives (including outside option); ",fill=TRUE)
+cat(" ",fill=TRUE)
+if (IV==TRUE){
+  cat(" ",I," instrumental variable(s) ",fill=TRUE)
+  cat(" ",fill=TRUE)
+}
+cat("Prior Parameters:",fill=TRUE)
+cat("  thetahat",fill=TRUE)
+print(theta_hat)
+cat("  A",fill=TRUE)
+print(A)
+cat("  sigmasqR",fill=TRUE)
+print(sigmasqR)
+cat("  nu0",fill=TRUE)
+print(nu0)
+if (IV==TRUE){
+  cat("  VOmega",fill=TRUE)
+  print(VOmega)
+  cat("  deltabar",fill=TRUE)
+  print(deltabar)
+  cat("  Ad",fill=TRUE)
+  print(Ad)
+}
+if (IV==FALSE){
+  cat("  s0_sq",fill=TRUE)
+  print(s0_sq)
+}
+cat(" ",fill=TRUE)
+cat("MCMC Parmameters: ",fill=TRUE)
+cat(" ",R," reps; keeping every ",keep,"th draw; printing every ",nprint,"th draw",fill=TRUE)
+cat(" ",H," draws for Monte-Carlo integration",fill=TRUE)
+cat(" ",fill=TRUE)
+cat("Contraction Mapping Tolerance: ",fill=TRUE)
+cat(" until max(abs((mu1-mu0)/mu0)) <",tol,fill=TRUE)
+cat(" ",fill=TRUE)
+
+if (tuning_auto){
+  cat("  automatically tuning parameters of RW M-H increment",fill=TRUE)
+  cat(" ",fill=TRUE)
+  cat("  target acceptance rate is between ",minaccep*100,"% and ",maxaccep*100,"%",fill=TRUE)
+  cat(" ",fill=TRUE)
+} else{
+  cat("  scaling parameter of RW M-H increment is given as",fill=TRUE)
+  print(s)
+  cat(" ",fill=TRUE)
+  cat("  var-cov matrix of RW M-H increment is given as",fill=TRUE)
+  print(cand_cov)
+  cat(" ",fill=TRUE)
+}
+
+# draw for MC integration
+draw <- matrix(rnorm(K*H), K, H)
+
+#
+# tuning RW Metropolis-Hastings  
+#
+
+# if auto-tuning
+complete1 <- 0
+
+initial_theta_bar2 <- initial_theta_bar
+initial_r2 <- initial_r
+initial_tau_sq2 <- initial_tau_sq
+initial_Omega2 <- initial_Omega
+initial_delta2 <- initial_delta
+rdraws <- NULL
+if (tuning_auto){  
+  cat("Tuning RW Metropolis-Hastings Increment...",fill=TRUE)
+  cat(" ",fill=TRUE)   
+  cat("-If acceptance rate < ",minaccep*100,"% => s/5",fill=TRUE)
+  cat("-If acceptance rate > ",maxaccep*100,"% => s*3",fill=TRUE)
+  cat("-If acceptance rate is ",minaccep*100,"~",maxaccep*100,"% => complete tuning",fill=TRUE)
+  cat(" ",fill=TRUE)
+  while (complete1==0){
+    
+    cat("  try s=",s,fill=TRUE)    
+    out1 <- bayesBLP_rcpp_loop(IV, X, Z, share, J, T, draw, 500, sigmasqR, A, theta_hat, deltabar, Ad, nu0, s0_sq, VOmega,
+                               s^2, cand_cov, initial_theta_bar2, initial_r2, initial_tau_sq2, initial_Omega2, initial_delta2, tol, 1, 0)
+    initial_theta_bar2 <- as.vector(out1$thetabardraw[,500])
+    initial_r2 <- as.vector(out1$rdraw[,500])
+    initial_tau_sq2 <- out1$tausqdraw[500]
+    if (IV==TRUE) {initial_Omega2 <- matrix(out1$Omegadraw[,500],2,2)}
+    if (IV==TRUE) {initial_delta2 <- as.vector(out1$deltadraw[,500])}
+    cat("    acceptance rate is ",out1$acceptrate,fill=TRUE)
+    
+    if (out1$acceptrate>0.20 & out1$acceptrate<0.80){
+      rdraws <- cbind(rdraws, out1$rdraw)   
+      cat("    (r draws stored)",fill=TRUE)
+    }    
+    if (out1$acceptrate<minaccep){      
+      s <- s/5
+    }else if (out1$acceptrate>maxaccep){      
+      s <- s*3
+    }else{      
+      complete1 <- 1      
+      cat(" ",fill=TRUE)
+      cat("    (tuning completed.)",fill=TRUE)        
+    }
+  }  
+  
+  # scaling tunned var-cov matrix from r draws
+  scale_opt <- s*sqrt(diag(cand_cov))
+  
+  Omega <- cov(t(rdraws))
+  scale_Omega <- sqrt(diag(Omega))
+  corr_opt <- Omega / (scale_Omega%*%t(scale_Omega))
+  
+  s <- 1
+  cand_cov <- corr_opt * (scale_opt%*%t(scale_opt))  
+  
+  cat(" ",fill=TRUE)
+  cat("Tuning Completed:",fill=TRUE)
+  cat("  s=",s,fill=TRUE)    
+  cat("  var-cov=",fill=TRUE)
+  print(cand_cov)  
+  cat(" ",fill=TRUE)
+}
+
+#
+# main run  
+#
+cat("Starting Random Walk Metropolis-Hastings Sampler for BLP",fill=TRUE)
+out <- bayesBLP_rcpp_loop(IV, X, Z, share, J, T, draw, R, sigmasqR, A, theta_hat, deltabar, Ad, nu0, s0_sq, VOmega,
+                          s^2, cand_cov, initial_theta_bar, initial_r, initial_tau_sq, initial_Omega, initial_delta, tol, keep, nprint) 
+out$s <- s
+out$cand_cov <- cand_cov
+
+attributes(out$tausqdraw)$class=c("bayesm.mat","mcmc")
+attributes(out$tausqdraw)$mcpar=c(1,R,keep)
+attributes(out$thetabardraw)$class=c("bayesm.mat","mcmc")
+attributes(out$thetabardraw)$mcpar=c(1,R,keep)
+attributes(out$rdraw)$class=c("bayesm.mat","mcmc")
+attributes(out$rdraw)$mcpar=c(1,R,keep)
+attributes(out$Sigmadraw)$class=c("bayesm.mat","mcmc")
+attributes(out$Sigmadraw)$mcpar=c(1,R,keep)
+attributes(out$Omegadraw)$class=c("bayesm.mat","mcmc")
+attributes(out$Omegadraw)$mcpar=c(1,R,keep)
+attributes(out$deltadraw)$class=c("bayesm.mat","mcmc")
+attributes(out$deltadraw)$mcpar=c(1,R,keep)
+
+return(out)
+}
diff --git a/R/rbiNormGibbs.R b/R/rbiNormGibbs.R
index 695246f..a15ac61 100755
--- a/R/rbiNormGibbs.R
+++ b/R/rbiNormGibbs.R
@@ -1,118 +1,117 @@
-rbiNormGibbs=function(initx=2,inity=-2,rho,burnin=100,R=500)
-{
-#
-# revision history:
-#     P. Rossi 1/05
-#
-# purpose:
-#    illustrate the function of bivariate normal gibbs sampler
-#
-# arguments:
-#   initx,inity  initial values for draw sequence
-#   rho  correlation
-#   burnin draws to be discarded in final paint
-#   R -- number of draws
-#
-# output:
-#   opens graph window and paints all moves and normal contours
-#   list containing draw matrix
-#
-# model:
-#  theta is bivariate normal with zero means, unit variances and correlation rho
-#
-# define needed functions
-#
-kernel=
-function(x,mu,rooti){
-# function to evaluate -.5*log of MV NOrmal density kernel with  mean mu, var Sigma
-# and with sigma^-1=rooti%*%t(rooti)   
-# rooti is in the inverse of upper triangular chol root of sigma
-#          note: this is the UL decomp of sigmai not LU!
-#                Sigma=root'root   root=inv(rooti)
-z=as.vector(t(rooti)%*%(x-mu))
-(z%*%z)
-}
-#
-pandterm=function(message) {stop(message,call.=FALSE)}
-#
-# check input arguments
-#
-if(missing(rho)) {pandterm("Requires rho argument ")}
-#
-# print out settings
-#
-cat("Bivariate Normal Gibbs Sampler",fill=TRUE)
-cat("rho= ",rho,fill=TRUE)
-cat("initial x,y coordinates= (",initx,",",inity,")",fill=TRUE)
-cat("burn-in= ",burnin," R= ",R,fill=TRUE)
-cat(" ",fill=TRUE)
-cat(" ",fill=TRUE)
-
-sd=(1-rho**2)**(.5)
-sigma=matrix(c(1,rho,rho,1),ncol=2)
-rooti=backsolve(chol(sigma),diag(2))
-mu=c(0,0)
-
-x=seq(-3.5,3.5,length=100)
-y=x
-z=matrix(double(100*100),ncol=100)
-for (i in 1:length(x)) 
-{
-   for(j in 1:length(y))
-   {
-   z[i,j]=kernel(c(x[i],y[j]),mu,rooti)
-   }
-}
-prob=c(.1,.3,.5,.7,.9,.99)
-lev=qchisq(prob,2)
-
-
-par(mfrow=c(1,1))
-contour(x,y,z,levels=lev,labels=prob,
-   xlab="theta1",ylab="theta2",drawlabels=TRUE,col="green",labcex=1.3,lwd=2.0)
-title(paste("Gibbs Sampler with Intermediate Moves: Rho =",rho))
-
-points(initx,inity,pch="B",cex=1.5)
-
-oldx=initx
-oldy=inity
-continue="y"
-r=0
-draws=matrix(double(R*2),ncol=2)
-draws[1,]=c(initx,inity)
-cat(" ")
-cat("Starting Gibbs Sampler ....",fill=TRUE)
-cat("(hit enter or y to display moves one-at-a-time)",fill=TRUE)
-cat("('go' to paint all moves without stopping to prompt)",fill=TRUE)
-cat(" ",fill=TRUE)
-while(continue != "n"&& r < R)
-{
-  if(continue != "go") continue=readline("cont?")
-  newy=sd*rnorm(1) + rho*oldx
-  lines(c(oldx,oldx),c(oldy,newy),col="magenta",lwd=1.5)
-  newx=sd*rnorm(1)+rho*newy
-  lines(c(oldx,newx),c(newy,newy),col="magenta",lwd=1.5)	
-  oldy=newy
-  oldx=newx
-  r=r+1
-  draws[r,]=c(newx,newy)
-}
-continue=readline("Show Comparison to iid Sampler?")
-if(continue != "n" & continue != "No" & continue != "no"){
-   par(mfrow=c(1,2))
-   contour(x,y,z,levels=lev,
-      xlab="theta1",ylab="theta2",drawlabels=TRUE,labels=prob,labcex=1.1,col="green",lwd=2.0)
-   title(paste("Gibbs Draws: Rho =",rho))
-   points(draws[(burnin+1):R,],pch=20,col="magenta",cex=.7)
-
-   idraws=t(chol(sigma))%*%matrix(rnorm(2*(R-burnin)),nrow=2)
-   idraws=t(idraws)
-   contour(x,y,z,levels=lev,
-      xlab="theta1",ylab="theta2",drawlabels=TRUE,labels=prob,labcex=1.1,col="green",lwd=2.0)
-   title(paste("IID draws: Rho =",rho))
-   points(idraws,pch=20,col="magenta",cex=.7)
-}
-attributes(draws)$class=c("bayesm.mat","mcmc")
-attributes(draws)$mcpar=c(1,R,1)
-return(draws)
-}
+rbiNormGibbs=function(initx=2,inity=-2,rho,burnin=100,R=500)
+{
+#
+# revision history:
+#     P. Rossi 1/05
+#
+# purpose:
+#    illustrate the function of bivariate normal gibbs sampler
+#
+# arguments:
+#   initx,inity  initial values for draw sequence
+#   rho  correlation
+#   burnin draws to be discarded in final paint
+#   R -- number of draws
+#
+# output:
+#   opens graph window and paints all moves and normal contours
+#   list containing draw matrix
+#
+# model:
+#  theta is bivariate normal with zero means, unit variances and correlation rho
+#
+# define needed functions
+#
+kernel=
+function(x,mu,rooti){
+# function to evaluate -.5*log of MV NOrmal density kernel with  mean mu, var Sigma
+# and with sigma^-1=rooti%*%t(rooti)   
+# rooti is in the inverse of upper triangular chol root of sigma
+#          note: this is the UL decomp of sigmai not LU!
+#                Sigma=root'root   root=inv(rooti)
+z=as.vector(t(rooti)%*%(x-mu))
+(z%*%z)
+}
+
+#
+# check input arguments
+#
+if(missing(rho)) {pandterm("Requires rho argument ")}
+#
+# print out settings
+#
+cat("Bivariate Normal Gibbs Sampler",fill=TRUE)
+cat("rho= ",rho,fill=TRUE)
+cat("initial x,y coordinates= (",initx,",",inity,")",fill=TRUE)
+cat("burn-in= ",burnin," R= ",R,fill=TRUE)
+cat(" ",fill=TRUE)
+cat(" ",fill=TRUE)
+
+sd=(1-rho**2)**(.5)
+sigma=matrix(c(1,rho,rho,1),ncol=2)
+rooti=backsolve(chol(sigma),diag(2))
+mu=c(0,0)
+
+x=seq(-3.5,3.5,length=100)
+y=x
+z=matrix(double(100*100),ncol=100)
+for (i in 1:length(x)) 
+{
+   for(j in 1:length(y))
+   {
+   z[i,j]=kernel(c(x[i],y[j]),mu,rooti)
+   }
+}
+prob=c(.1,.3,.5,.7,.9,.99)
+lev=qchisq(prob,2)
+
+
+par(mfrow=c(1,1))
+contour(x,y,z,levels=lev,labels=prob,
+   xlab="theta1",ylab="theta2",drawlabels=TRUE,col="green",labcex=1.3,lwd=2.0)
+title(paste("Gibbs Sampler with Intermediate Moves: Rho =",rho))
+
+points(initx,inity,pch="B",cex=1.5)
+
+oldx=initx
+oldy=inity
+continue="y"
+r=0
+draws=matrix(double(R*2),ncol=2)
+draws[1,]=c(initx,inity)
+cat(" ")
+cat("Starting Gibbs Sampler ....",fill=TRUE)
+cat("(hit enter or y to display moves one-at-a-time)",fill=TRUE)
+cat("('go' to paint all moves without stopping to prompt)",fill=TRUE)
+cat(" ",fill=TRUE)
+while(continue != "n"&& r < R)
+{
+  if(continue != "go") continue=readline("cont?")
+  newy=sd*rnorm(1) + rho*oldx
+  lines(c(oldx,oldx),c(oldy,newy),col="magenta",lwd=1.5)
+  newx=sd*rnorm(1)+rho*newy
+  lines(c(oldx,newx),c(newy,newy),col="magenta",lwd=1.5)	
+  oldy=newy
+  oldx=newx
+  r=r+1
+  draws[r,]=c(newx,newy)
+}
+continue=readline("Show Comparison to iid Sampler?")
+if(continue != "n" & continue != "No" & continue != "no"){
+   par(mfrow=c(1,2))
+   contour(x,y,z,levels=lev,
+      xlab="theta1",ylab="theta2",drawlabels=TRUE,labels=prob,labcex=1.1,col="green",lwd=2.0)
+   title(paste("Gibbs Draws: Rho =",rho))
+   points(draws[(burnin+1):R,],pch=20,col="magenta",cex=.7)
+
+   idraws=t(chol(sigma))%*%matrix(rnorm(2*(R-burnin)),nrow=2)
+   idraws=t(idraws)
+   contour(x,y,z,levels=lev,
+      xlab="theta1",ylab="theta2",drawlabels=TRUE,labels=prob,labcex=1.1,col="green",lwd=2.0)
+   title(paste("IID draws: Rho =",rho))
+   points(idraws,pch=20,col="magenta",cex=.7)
+}
+attributes(draws)$class=c("bayesm.mat","mcmc")
+attributes(draws)$mcpar=c(1,R,1)
+return(draws)
+}
diff --git a/R/rbprobitGibbs.R b/R/rbprobitgibbs_rcpp.r
old mode 100755
new mode 100644
similarity index 56%
rename from R/rbprobitGibbs.R
rename to R/rbprobitgibbs_rcpp.r
index 6a4010d..c9685a6
--- a/R/rbprobitGibbs.R
+++ b/R/rbprobitgibbs_rcpp.r
@@ -1,160 +1,115 @@
-rbprobitGibbs=
-function(Data,Prior,Mcmc)
-{
-#
-# revision history:
-#   p. rossi 1/05
-#   3/07 added validity check of values of y and classes
-#   3/07 fixed error with betabar supplied
-#
-# purpose: 
-#   draw from posterior for binary probit using Gibbs Sampler
-#
-# Arguments:
-#   Data - list of X,y  
-#     X is nobs x nvar, y is nobs vector of 0,1
-#   Prior - list of A, betabar
-#     A is nvar x nvar prior preci matrix
-#     betabar is nvar x 1 prior mean
-#   Mcmc
-#     R is number of draws
-#     keep is thinning parameter
-#
-# Output:
-#   list of betadraws
-#
-# Model:   y = 1 if  w=Xbeta + e   > 0  e ~N(0,1)
-#
-# Prior:   beta ~ N(betabar,A^-1)
-#
-#
-# ----------------------------------------------------------------------
-# define functions needed
-#
-breg1=
-function(root,X,y,Abetabar) 
-{
-#
-#     p.rossi 12/04
-#
-# Purpose: draw from posterior for linear regression, sigmasq=1.0
-# 
-# Arguments:
-#  root is chol((X'X+A)^-1)
-#  Abetabar = A*betabar
-#
-# Output:  draw from posterior
-# 
-# Model: y = Xbeta + e  e ~ N(0,I)
-#
-# Prior:  beta ~ N(betabar,A^-1)
-#
-cov=crossprod(root,root)
-betatilde=cov%*%(crossprod(X,y)+Abetabar)
-betatilde+t(root)%*%rnorm(length(betatilde))
-}
-
-pandterm=function(message) {stop(message,call.=FALSE)}
-#
-# ----------------------------------------------------------------------
-#
-# check arguments
-#
-if(missing(Data)) {pandterm("Requires Data argument -- list of y and X")}
-    if(is.null(Data$X)) {pandterm("Requires Data element X")}
-    X=Data$X
-    if(is.null(Data$y)) {pandterm("Requires Data element y")}
-    y=Data$y
-nvar=ncol(X)
-nobs=length(y)
-#
-# check data for validity
-#
-if(length(y) != nrow(X) ) {pandterm("y and X not of same row dim")}
-if(sum(unique(y) %in% c(0:1)) < length(unique(y))) {pandterm("Invalid y, must be 0,1")}
-#
-# check for Prior
-#
-if(missing(Prior))
-   { betabar=c(rep(0,nvar)); A=.01*diag(nvar)}
-else
-   {
-    if(is.null(Prior$betabar)) {betabar=c(rep(0,nvar))} 
-       else {betabar=Prior$betabar}
-    if(is.null(Prior$A)) {A=.01*diag(nvar)} 
-       else {A=Prior$A}
-   }
-#
-# check dimensions of Priors
-#
-if(ncol(A) != nrow(A) || ncol(A) != nvar || nrow(A) != nvar) 
-   {pandterm(paste("bad dimensions for A",dim(A)))}
-if(length(betabar) != nvar)
-   {pandterm(paste("betabar wrong length, length= ",length(betabar)))}
-#
-# check MCMC argument
-#
-if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
-else
-   {
-    if(is.null(Mcmc$R)) 
-       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
-    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
-    }
-#
-# print out problem
-#
-cat(" ", fill=TRUE)
-cat("Starting Gibbs Sampler for Binary Probit Model",fill=TRUE)
-cat("   with ",length(y)," observations",fill=TRUE)
-cat("Table of y Values",fill=TRUE)
-print(table(y))
-cat(" ", fill=TRUE)
-cat("Prior Parms: ",fill=TRUE)
-cat("betabar",fill=TRUE)
-print(betabar)
-cat("A",fill=TRUE)
-print(A)
-cat(" ", fill=TRUE)
-cat("MCMC parms: ",fill=TRUE)
-cat("R= ",R," keep= ",keep,fill=TRUE)
-cat(" ",fill=TRUE)
-
-betadraw=matrix(double(floor(R/keep)*nvar),ncol=nvar)
-beta=c(rep(0,nvar))
-sigma=c(rep(1,nrow(X)))
-root=chol(chol2inv(chol((crossprod(X,X)+A))))
-Abetabar=crossprod(A,betabar)
-        a=ifelse(y == 0,-100, 0)
-        b=ifelse(y == 0, 0, 100)
-#
-#	start main iteration loop
-#
-itime=proc.time()[3]
-cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
-fsh()
-
-for (rep in 1:R) 
-{
-  # draw z given beta(i-1)
-  mu=X%*%beta
-  z=rtrun(mu,sigma,a,b)
-  beta=breg1(root,X,z,Abetabar)
-#
-#       print time to completion and draw # every 100th draw
-#
-  if(rep%%100 == 0)
-    {ctime=proc.time()[3]
-    timetoend=((ctime-itime)/rep)*(R-rep)
-    cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
-    fsh()}
-
-  if(rep%%keep == 0) 
-    {mkeep=rep/keep; betadraw[mkeep,]=beta}
-}
-ctime = proc.time()[3]
-cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
-attributes(betadraw)$class=c("bayesm.mat","mcmc")
-attributes(betadraw)$mcpar=c(1,R,keep)
-return(list(betadraw=betadraw))
-}
+rbprobitGibbs=
+function(Data,Prior,Mcmc)
+{
+#
+# revision history:
+#   p. rossi 1/05
+#   3/07 added validity check of values of y and classes
+#   3/07 fixed error with betabar supplied
+#   W. Taylor 4/15 - added nprint option to MCMC argument
+#
+# purpose: 
+#   draw from posterior for binary probit using Gibbs Sampler
+#
+# Arguments:
+#   Data - list of X,y  
+#     X is nobs x nvar, y is nobs vector of 0,1
+#   Prior - list of A, betabar
+#     A is nvar x nvar prior preci matrix
+#     betabar is nvar x 1 prior mean
+#   Mcmc
+#     R is number of draws
+#     keep is thinning parameter
+#     nprint - print estimated time remaining on every nprint'th draw
+#
+# Output:
+#   list of betadraws
+#
+# Model:   y = 1 if  w=Xbeta + e   > 0  e ~N(0,1)
+#
+# Prior:   beta ~ N(betabar,A^-1)
+#
+#
+#
+# check arguments
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of y and X")}
+    if(is.null(Data$X)) {pandterm("Requires Data element X")}
+    X=Data$X
+    if(is.null(Data$y)) {pandterm("Requires Data element y")}
+    y=Data$y
+nvar=ncol(X)
+nobs=length(y)
+#
+# check data for validity
+#
+if(length(y) != nrow(X) ) {pandterm("y and X not of same row dim")}
+if(sum(unique(y) %in% c(0:1)) < length(unique(y))) {pandterm("Invalid y, must be 0,1")}
+#
+# check for Prior
+#
+if(missing(Prior))
+   { betabar=c(rep(0,nvar)); A=BayesmConstant.A*diag(nvar)}
+else
+   {
+    if(is.null(Prior$betabar)) {betabar=c(rep(0,nvar))} 
+       else {betabar=Prior$betabar}
+    if(is.null(Prior$A)) {A=BayesmConstant.A*diag(nvar)} 
+       else {A=Prior$A}
+   }
+#
+# check dimensions of Priors
+#
+if(ncol(A) != nrow(A) || ncol(A) != nvar || nrow(A) != nvar) 
+   {pandterm(paste("bad dimensions for A",dim(A)))}
+if(length(betabar) != nvar)
+   {pandterm(paste("betabar wrong length, length= ",length(betabar)))}
+#
+# check MCMC argument
+#
+if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
+else
+   {
+    if(is.null(Mcmc$R)) 
+       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
+    if(is.null(Mcmc$keep)) {keep=BayesmConstant.keep} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+      if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+    }
+#
+# print out problem
+#
+cat(" ", fill=TRUE)
+cat("Starting Gibbs Sampler for Binary Probit Model",fill=TRUE)
+cat("   with ",length(y)," observations",fill=TRUE)
+cat("Table of y Values",fill=TRUE)
+print(table(y))
+cat(" ", fill=TRUE)
+cat("Prior Parms: ",fill=TRUE)
+cat("betabar",fill=TRUE)
+print(betabar)
+cat("A",fill=TRUE)
+print(A)
+cat(" ", fill=TRUE)
+cat("MCMC parms: ",fill=TRUE)
+cat("R= ",R," keep= ",keep," nprint= ",nprint,fill=TRUE)
+cat(" ",fill=TRUE)
+
+beta=c(rep(0,nvar))
+sigma=c(rep(1,nrow(X)))
+root=chol(chol2inv(chol((crossprod(X,X)+A))))
+Abetabar=crossprod(A,betabar)
+        a=ifelse(y == 0,-100, 0)
+        b=ifelse(y == 0, 0, 100)
+
+###################################################################
+# Keunwoo Kim
+# 08/05/2014
+###################################################################
+draws=rbprobitGibbs_rcpp_loop(y,X,Abetabar,root,beta,sigma,a,b,R,keep,nprint)
+###################################################################
+
+attributes(draws$betadraw)$class=c("bayesm.mat","mcmc")
+attributes(draws$betadraw)$mcpar=c(1,R,keep)
+return(draws)
+}
diff --git a/R/rcppexports.r b/R/rcppexports.r
new file mode 100644
index 0000000..c34f495
--- /dev/null
+++ b/R/rcppexports.r
@@ -0,0 +1,151 @@
+# This file was generated by Rcpp::compileAttributes
+# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
+
+bayesBLP_rcpp_loop <- function(IV, X, Z, share, J, T, v, R, sigmasqR, A, theta_hat, deltabar, Ad, nu0, s0_sq, VOmega, ssq, cand_cov, theta_bar_initial, r_initial, tau_sq_initial, Omega_initial, delta_initial, tol, keep, nprint) {
+    .Call('bayesm_bayesBLP_rcpp_loop', PACKAGE = 'bayesm', IV, X, Z, share, J, T, v, R, sigmasqR, A, theta_hat, deltabar, Ad, nu0, s0_sq, VOmega, ssq, cand_cov, theta_bar_initial, r_initial, tau_sq_initial, Omega_initial, delta_initial, tol, keep, nprint)
+}
+
+breg <- function(y, X, betabar, A) {
+    .Call('bayesm_breg', PACKAGE = 'bayesm', y, X, betabar, A)
+}
+
+cgetC <- function(e, k) {
+    .Call('bayesm_cgetC', PACKAGE = 'bayesm', e, k)
+}
+
+clusterMix_rcpp_loop <- function(zdraw, cutoff, SILENT, nprint) {
+    .Call('bayesm_clusterMix_rcpp_loop', PACKAGE = 'bayesm', zdraw, cutoff, SILENT, nprint)
+}
+
+ghkvec <- function(L, trunpt, above, r, HALTON = TRUE, pn = as.integer( c(0))) {
+    .Call('bayesm_ghkvec', PACKAGE = 'bayesm', L, trunpt, above, r, HALTON, pn)
+}
+
+llmnl <- function(beta, y, X) {
+    .Call('bayesm_llmnl', PACKAGE = 'bayesm', beta, y, X)
+}
+
+lndIChisq <- function(nu, ssq, X) {
+    .Call('bayesm_lndIChisq', PACKAGE = 'bayesm', nu, ssq, X)
+}
+
+lndIWishart <- function(nu, V, IW) {
+    .Call('bayesm_lndIWishart', PACKAGE = 'bayesm', nu, V, IW)
+}
+
+lndMvn <- function(x, mu, rooti) {
+    .Call('bayesm_lndMvn', PACKAGE = 'bayesm', x, mu, rooti)
+}
+
+lndMvst <- function(x, nu, mu, rooti, NORMC = FALSE) {
+    .Call('bayesm_lndMvst', PACKAGE = 'bayesm', x, nu, mu, rooti, NORMC)
+}
+
+rbprobitGibbs_rcpp_loop <- function(y, X, Abetabar, root, beta, sigma, a, b, R, keep, nprint) {
+    .Call('bayesm_rbprobitGibbs_rcpp_loop', PACKAGE = 'bayesm', y, X, Abetabar, root, beta, sigma, a, b, R, keep, nprint)
+}
+
+rdirichlet <- function(alpha) {
+    .Call('bayesm_rdirichlet', PACKAGE = 'bayesm', alpha)
+}
+
+rDPGibbs_rcpp_loop <- function(R, keep, nprint, y, lambda_hyper, SCALE, maxuniq, PrioralphaList, gridsize, BayesmConstantA, BayesmConstantnuInc, BayesmConstantDPalpha) {
+    .Call('bayesm_rDPGibbs_rcpp_loop', PACKAGE = 'bayesm', R, keep, nprint, y, lambda_hyper, SCALE, maxuniq, PrioralphaList, gridsize, BayesmConstantA, BayesmConstantnuInc, BayesmConstantDPalpha)
+}
+
+rhierLinearMixture_rcpp_loop <- function(regdata, Z, deltabar, Ad, mubar, Amu, nu, V, nu_e, ssq, R, keep, nprint, drawdelta, olddelta, a, oldprob, ind, tau) {
+    .Call('bayesm_rhierLinearMixture_rcpp_loop', PACKAGE = 'bayesm', regdata, Z, deltabar, Ad, mubar, Amu, nu, V, nu_e, ssq, R, keep, nprint, drawdelta, olddelta, a, oldprob, ind, tau)
+}
+
+rhierLinearModel_rcpp_loop <- function(regdata, Z, Deltabar, A, nu, V, nu_e, ssq, tau, Delta, Vbeta, R, keep, nprint) {
+    .Call('bayesm_rhierLinearModel_rcpp_loop', PACKAGE = 'bayesm', regdata, Z, Deltabar, A, nu, V, nu_e, ssq, tau, Delta, Vbeta, R, keep, nprint)
+}
+
+rhierMnlDP_rcpp_loop <- function(R, keep, nprint, lgtdata, Z, deltabar, Ad, PrioralphaList, lambda_hyper, drawdelta, nvar, oldbetas, s, maxuniq, gridsize, BayesmConstantA, BayesmConstantnuInc, BayesmConstantDPalpha) {
+    .Call('bayesm_rhierMnlDP_rcpp_loop', PACKAGE = 'bayesm', R, keep, nprint, lgtdata, Z, deltabar, Ad, PrioralphaList, lambda_hyper, drawdelta, nvar, oldbetas, s, maxuniq, gridsize, BayesmConstantA, BayesmConstantnuInc, BayesmConstantDPalpha)
+}
+
+rhierMnlRwMixture_rcpp_loop <- function(lgtdata, Z, deltabar, Ad, mubar, Amu, nu, V, s, R, keep, nprint, drawdelta, olddelta, a, oldprob, oldbetas, ind) {
+    .Call('bayesm_rhierMnlRwMixture_rcpp_loop', PACKAGE = 'bayesm', lgtdata, Z, deltabar, Ad, mubar, Amu, nu, V, s, R, keep, nprint, drawdelta, olddelta, a, oldprob, oldbetas, ind)
+}
+
+rhierNegbinRw_rcpp_loop <- function(regdata, hessdata, Z, Beta, Delta, Deltabar, Adelta, nu, V, a, b, R, keep, sbeta, alphacroot, nprint, rootA, alpha, fixalpha) {
+    .Call('bayesm_rhierNegbinRw_rcpp_loop', PACKAGE = 'bayesm', regdata, hessdata, Z, Beta, Delta, Deltabar, Adelta, nu, V, a, b, R, keep, sbeta, alphacroot, nprint, rootA, alpha, fixalpha)
+}
+
+rivDP_rcpp_loop <- function(R, keep, nprint, dimd, mbg, Abg, md, Ad, y, isgamma, z, x, w, delta, PrioralphaList, gridsize, SCALE, maxuniq, scalex, scaley, lambda_hyper, BayesmConstantA, BayesmConstantnu) {
+    .Call('bayesm_rivDP_rcpp_loop', PACKAGE = 'bayesm', R, keep, nprint, dimd, mbg, Abg, md, Ad, y, isgamma, z, x, w, delta, PrioralphaList, gridsize, SCALE, maxuniq, scalex, scaley, lambda_hyper, BayesmConstantA, BayesmConstantnu)
+}
+
+rivGibbs_rcpp_loop <- function(y, x, z, w, mbg, Abg, md, Ad, V, nu, R, keep, nprint) {
+    .Call('bayesm_rivGibbs_rcpp_loop', PACKAGE = 'bayesm', y, x, z, w, mbg, Abg, md, Ad, V, nu, R, keep, nprint)
+}
+
+rmixGibbs <- function(y, Bbar, A, nu, V, a, p, z) {
+    .Call('bayesm_rmixGibbs', PACKAGE = 'bayesm', y, Bbar, A, nu, V, a, p, z)
+}
+
+rmixture <- function(n, pvec, comps) {
+    .Call('bayesm_rmixture', PACKAGE = 'bayesm', n, pvec, comps)
+}
+
+rmnlIndepMetrop_rcpp_loop <- function(R, keep, nu, betastar, root, y, X, betabar, rootpi, rooti, oldlimp, oldlpost, nprint) {
+    .Call('bayesm_rmnlIndepMetrop_rcpp_loop', PACKAGE = 'bayesm', R, keep, nu, betastar, root, y, X, betabar, rootpi, rooti, oldlimp, oldlpost, nprint)
+}
+
+rmnpGibbs_rcpp_loop <- function(R, keep, nprint, pm1, y, X, beta0, sigma0, V, nu, betabar, A) {
+    .Call('bayesm_rmnpGibbs_rcpp_loop', PACKAGE = 'bayesm', R, keep, nprint, pm1, y, X, beta0, sigma0, V, nu, betabar, A)
+}
+
+rmultireg <- function(Y, X, Bbar, A, nu, V) {
+    .Call('bayesm_rmultireg', PACKAGE = 'bayesm', Y, X, Bbar, A, nu, V)
+}
+
+rmvpGibbs_rcpp_loop <- function(R, keep, nprint, p, y, X, beta0, sigma0, V, nu, betabar, A) {
+    .Call('bayesm_rmvpGibbs_rcpp_loop', PACKAGE = 'bayesm', R, keep, nprint, p, y, X, beta0, sigma0, V, nu, betabar, A)
+}
+
+rmvst <- function(nu, mu, root) {
+    .Call('bayesm_rmvst', PACKAGE = 'bayesm', nu, mu, root)
+}
+
+rnegbinRw_rcpp_loop <- function(y, X, betabar, rootA, a, b, beta, alpha, fixalpha, betaroot, alphacroot, R, keep, nprint) {
+    .Call('bayesm_rnegbinRw_rcpp_loop', PACKAGE = 'bayesm', y, X, betabar, rootA, a, b, beta, alpha, fixalpha, betaroot, alphacroot, R, keep, nprint)
+}
+
+rnmixGibbs_rcpp_loop <- function(y, Mubar, A, nu, V, a, p, z, R, keep, nprint) {
+    .Call('bayesm_rnmixGibbs_rcpp_loop', PACKAGE = 'bayesm', y, Mubar, A, nu, V, a, p, z, R, keep, nprint)
+}
+
+rordprobitGibbs_rcpp_loop <- function(y, X, k, A, betabar, Ad, s, inc_root, dstarbar, betahat, R, keep, nprint) {
+    .Call('bayesm_rordprobitGibbs_rcpp_loop', PACKAGE = 'bayesm', y, X, k, A, betabar, Ad, s, inc_root, dstarbar, betahat, R, keep, nprint)
+}
+
+rscaleUsage_rcpp_loop <- function(k, x, p, n, R, keep, ndghk, nprint, y, mu, Sigma, tau, sigma, Lambda, e, domu, doSigma, dosigma, dotau, doLambda, doe, nu, V, mubar, Am, gsigma, gl11, gl22, gl12, nuL, VL, ge) {
+    .Call('bayesm_rscaleUsage_rcpp_loop', PACKAGE = 'bayesm', k, x, p, n, R, keep, ndghk, nprint, y, mu, Sigma, tau, sigma, Lambda, e, domu, doSigma, dosigma, dotau, doLambda, doe, nu, V, mubar, Am, gsigma, gl11, gl22, gl12, nuL, VL, ge)
+}
+
+rsurGibbs_rcpp_loop <- function(regdata, indreg, cumnk, nk, XspXs, Sigmainv, A, Abetabar, nu, V, nvar, E, Y, R, keep, nprint) {
+    .Call('bayesm_rsurGibbs_rcpp_loop', PACKAGE = 'bayesm', regdata, indreg, cumnk, nk, XspXs, Sigmainv, A, Abetabar, nu, V, nvar, E, Y, R, keep, nprint)
+}
+
+rtrun <- function(mu, sigma, a, b) {
+    .Call('bayesm_rtrun', PACKAGE = 'bayesm', mu, sigma, a, b)
+}
+
+runireg_rcpp_loop <- function(y, X, betabar, A, nu, ssq, R, keep, nprint) {
+    .Call('bayesm_runireg_rcpp_loop', PACKAGE = 'bayesm', y, X, betabar, A, nu, ssq, R, keep, nprint)
+}
+
+runiregGibbs_rcpp_loop <- function(y, X, betabar, A, nu, ssq, sigmasq, R, keep, nprint) {
+    .Call('bayesm_runiregGibbs_rcpp_loop', PACKAGE = 'bayesm', y, X, betabar, A, nu, ssq, sigmasq, R, keep, nprint)
+}
+
+rwishart <- function(nu, V) {
+    .Call('bayesm_rwishart', PACKAGE = 'bayesm', nu, V)
+}
+
+callroot <- function(c1, c2, tol, iterlim) {
+    .Call('bayesm_callroot', PACKAGE = 'bayesm', c1, c2, tol, iterlim)
+}
+
diff --git a/R/rdirichlet.R b/R/rdirichlet.R
deleted file mode 100755
index 5262611..0000000
--- a/R/rdirichlet.R
+++ /dev/null
@@ -1,12 +0,0 @@
-rdirichlet = 
-function(alpha)
-{
-#
-# Purpose:
-# draw from Dirichlet(alpha)
-#
-dim = length(alpha)
-y=rep(0,dim)
-for(i in 1:dim) y[i] = rgamma(1,alpha[i])
-return(y/sum(y))
-}
diff --git a/R/rdpgibbs_rcpp.r b/R/rdpgibbs_rcpp.r
new file mode 100644
index 0000000..183df79
--- /dev/null
+++ b/R/rdpgibbs_rcpp.r
@@ -0,0 +1,172 @@
+rDPGibbs=function(Prior,Data,Mcmc){
+#
+# Revision History: 
+#   5/06 add rthetaDP
+#   7/06 include rthetaDP in main body to avoid copy overhead
+#   1/08 add scaling
+#   2/08 add draw of lambda
+#   3/08 changed nu prior support to dim(y) + exp(unif gird on nulim[1],nulim[2])
+#   W. Taylor 4/15 - added nprint option to MCMC argument
+#
+# purpose: do Gibbs sampling for density estimation using Dirichlet process model
+#
+# arguments:
+#     Data is a list of y which is an n x k matrix of data
+#     Prior is a list of (alpha,lambda,Prioralpha)
+#       alpha: starting value
+#       lambda_hyper: hyperparms of prior on lambda
+#       Prioralpha: hyperparms of alpha prior; a list of (Istarmin,Istarmax,power)
+#       if elements of the prior don't exist, defaults are assumed
+#     Mcmc is a list of (R,keep,maxuniq)
+#       R: number of draws
+#       keep: thinning parameter
+#       maxuniq: the maximum number of unique thetaStar values
+#       nprint - print estimated time remaining on every nprint'th draw
+#
+# Output:
+#     list with elements
+#     alphadraw: vector of length R/keep, [i] is ith draw of alpha
+#     Istardraw: vector of length R/keep, [i] is the number of unique theta's drawn from ith iteration
+#     adraw
+#     nudraw
+#     vdraw
+#     thetaNp1draws: list, [[i]] is ith draw of theta_{n+1}
+#     inddraw: R x n matrix, [,i] is indicators of identity for each obs in ith iteration
+#
+# Model:
+#        y_i ~ f(y|thetai)
+#        thetai|G ~ G
+#        G|lambda,alpha ~ DP(G|G0(lambda),alpha)
+#
+# Priors:
+#        alpha: starting value
+#
+#        lambda:
+#           G0 ~ N(mubar,Sigma (x) Amu^-1)
+#           mubar=vec(mubar)
+#           Sigma ~ IW(nu,nu*v*I)  note: mode(Sigma)=nu/(nu+2)*v*I
+#           mubar=0
+#           amu is uniform on grid specified by alim
+#           nu is log uniform, nu=d-1+exp(Z) z is uniform on seq defined bvy nulim
+#           v is uniform on sequence specificd by vlim
+#
+#        Prioralpha:
+#           alpha ~ (1-(alpha-alphamin)/(alphamax-alphamin))^power
+#           alphamin=exp(digamma(Istarmin)-log(gamma+log(N)))
+#           alphamax=exp(digamma(Istarmax)-log(gamma+log(N)))
+#           gamma= .5772156649015328606
+#
+#
+#
+
+#  check arguments
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of y")}
+    if(is.null(Data$y)) {pandterm("Requires Data element y")}
+    y=Data$y
+#
+# check data for validity
+#
+if(!is.matrix(y)) {pandterm("y must be a matrix")}
+nobs=nrow(y)
+dimy=ncol(y)
+#
+# check for Prior
+#
+alimdef=BayesmConstant.DPalimdef
+nulimdef=BayesmConstant.DPnulimdef
+vlimdef=BayesmConstant.DPvlimdef
+
+
+if(missing(Prior)) {pandterm("requires Prior argument ")}
+else
+   {
+    if(is.null(Prior$lambda_hyper)) {lambda_hyper=list(alim=alimdef,nulim=nulimdef,vlim=vlimdef)}
+    else {lambda_hyper=Prior$lambda_hyper;
+       if(is.null(lambda_hyper$alim)) {lambda_hyper$alim=alimdef}
+       if(is.null(lambda_hyper$nulim)) {lambda_hyper$nulim=nulimdef} 
+       if(is.null(lambda_hyper$vlim)) {lambda_hyper$vlim=vlimdef}
+       }
+    if(is.null(Prior$Prioralpha)) {Prioralpha=list(Istarmin=BayesmConstant.DPIstarmin,Istarmax=min(50,0.1*nobs),power=BayesmConstant.DPpower)}
+    else {Prioralpha=Prior$Prioralpha;
+       if(is.null(Prioralpha$Istarmin)) {Prioralpha$Istarmin=BayesmConstant.DPIstarmin} else {Prioralpha$Istarmin=Prioralpha$Istarmin}
+       if(is.null(Prioralpha$Istarmax)) 
+             {Prioralpha$Istarmax=min(50,0.1*nobs)} else {Prioralpha$Istarmax=Prioralpha$Istarmax}
+       if(is.null(Prioralpha$power)) {Prioralpha$power=BayesmConstant.DPpower}
+       }
+   }
+gamma= BayesmConstant.gamma
+Prioralpha$alphamin=exp(digamma(Prioralpha$Istarmin)-log(gamma+log(nobs)))
+Prioralpha$alphamax=exp(digamma(Prioralpha$Istarmax)-log(gamma+log(nobs)))
+Prioralpha$n=nobs
+#
+# check Prior arguments for valdity
+#
+if(lambda_hyper$alim[1]<0) {pandterm("alim[1] must be >0")}
+if(lambda_hyper$nulim[1]<0) {pandterm("nulim[1] must be >0")}
+if(lambda_hyper$vlim[1]<0) {pandterm("vlim[1] must be >0")}
+if(Prioralpha$Istarmin <1){pandterm("Prioralpha$Istarmin must be >= 1")}
+if(Prioralpha$Istarmax <= Prioralpha$Istarmin){pandterm("Prioralpha$Istarmin must be > Prioralpha$Istarmax")}
+#
+# check MCMC argument
+#
+if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
+else
+   {
+    if(is.null(Mcmc$R)) 
+       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
+    if(is.null(Mcmc$keep)) {keep=BayesmConstant.keep} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+      if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+    if(is.null(Mcmc$maxuniq)) {maxuniq=BayesmConstant.DPmaxuniq} else {maxuniq=Mcmc$maxuniq}
+    if(is.null(Mcmc$SCALE)) {SCALE=BayesmConstant.DPSCALE} else {SCALE=Mcmc$SCALE}
+    if(is.null(Mcmc$gridsize)) {gridsize=BayesmConstant.DPgridsize} else {gridsize=Mcmc$gridsize}
+   }
+
+#
+# print out the problem
+#
+cat(" Starting Gibbs Sampler for Density Estimation Using Dirichlet Process Model",fill=TRUE)
+cat(" ",nobs," observations on ",dimy," dimensional data",fill=TRUE)
+cat(" ",fill=TRUE)
+cat(" SCALE=",SCALE,fill=TRUE)
+cat(" ",fill=TRUE)
+cat(" Prior Parms: ",fill=TRUE)
+cat("  G0 ~ N(mubar,Sigma (x) Amu^-1)",fill=TRUE)
+cat("   mubar = ",0,fill=TRUE)
+cat("   Sigma ~ IW(nu,nu*v*I)",fill=TRUE)
+cat("   Amu ~ uniform[",lambda_hyper$alim[1],",",lambda_hyper$alim[2],"]",fill=TRUE)
+cat("   nu ~ uniform on log grid on [",dimy-1+exp(lambda_hyper$nulim[1]),
+             ",",dimy-1+exp(lambda_hyper$nulim[2]),"]",fill=TRUE)
+cat("   v ~ uniform[",lambda_hyper$vlim[1],",",lambda_hyper$vlim[2],"]",fill=TRUE)
+cat(" ",fill=TRUE)
+cat("  alpha ~ (1-(alpha-alphamin)/(alphamax-alphamin))^power",fill=TRUE)
+cat("   Istarmin = ",Prioralpha$Istarmin,fill=TRUE)
+cat("   Istarmax = ",Prioralpha$Istarmax,fill=TRUE)
+cat("   alphamin = ",Prioralpha$alphamin,fill=TRUE)
+cat("   alphamax = ",Prioralpha$alphamax,fill=TRUE)
+cat("   power = ",Prioralpha$power,fill=TRUE)
+cat(" ",fill=TRUE)
+cat(" Mcmc Parms: R= ",R," keep= ",keep," nprint= ",nprint," maxuniq= ",maxuniq," gridsize for lambda hyperparms= ",gridsize,
+        fill=TRUE)
+cat(" ",fill=TRUE)
+
+###################################################################
+# Wayne Taylor
+# 1/29/2015
+###################################################################
+out = rDPGibbs_rcpp_loop(R,keep,nprint,
+                         y, lambda_hyper, SCALE, maxuniq, Prioralpha, gridsize,
+                         BayesmConstant.A,BayesmConstant.nuInc,BayesmConstant.DPalpha)
+###################################################################
+
+nmix=list(probdraw=matrix(c(rep(1,nrow(out$inddraw))),ncol=1),zdraw=out$inddraw,compdraw=out$thetaNp1draw)
+attributes(nmix)$class="bayesm.nmix"
+attributes(out$alphadraw)$class=c("bayesm.mat","mcmc")
+attributes(out$Istardraw)$class=c("bayesm.mat","mcmc")
+attributes(out$adraw)$class=c("bayesm.mat","mcmc")
+attributes(out$nudraw)$class=c("bayesm.mat","mcmc")
+attributes(out$vdraw)$class=c("bayesm.mat","mcmc")
+return(list(alphadraw=out$alphadraw,Istardraw=out$Istardraw,adraw=out$adraw,nudraw=out$nudraw,
+            vdraw=out$vdraw,nmix=nmix))
+}
diff --git a/R/rhierBinLogit.R b/R/rhierBinLogit.R
index b501dfb..829bfa0 100755
--- a/R/rhierBinLogit.R
+++ b/R/rhierBinLogit.R
@@ -1,228 +1,227 @@
-#
-# -----------------------------------------------------------------------------
-#
-rhierBinLogit=
-function(Data,Prior,Mcmc){
-#
-# revision history: 
-#	changed 5/12/05 by Rossi to add error checking
-#       1/07 removed init.rmultiregfp
-#       3/07 added classes
-#
-# purpose: run binary heterogeneous logit model 
-#
-# Arguments:
-#   Data contains a list of (lgtdata[[i]],Z)
-#      lgtdata[[i]]=list(y,X)
-#         y is index of brand chosen, y=1 is exp[X'beta]/(1+exp[X'beta])
-#         X is a matrix that is n_i x by nvar
-#      Z is a matrix of demographic variables nlgt*nz that have been 
-#	  mean centered so that the intercept is interpretable
-#   Prior contains a list of (nu,V,Deltabar,ADelta)
-#      beta_i ~ N(Z%*%Delta,Vbeta)
-#      vec(Delta) ~ N(vec(Deltabar),Vbeta (x) ADelta^-1)
-#      Vbeta ~ IW(nu,V)
-#   Mcmc is a list of (sbeta,R,keep)
-#      sbeta is scale factor for RW increment for beta_is
-#      R is number of draws
-#      keep every keepth draw
-#
-# Output:
-#      a list of Deltadraw (R/keep x nvar x nz), Vbetadraw (R/keep x nvar**2), 
-#         llike (R/keep), betadraw is a nlgt x nvar x nz x R/keep array of draws of betas
-#         nunits=length(lgtdata)
-#
-#  define functions needed
-#
-# ------------------------------------------------------------------------
-#
-loglike=
-function(y,X,beta) {
-# function computer log likelihood of data for binomial logit model
-# Pr(y=1) = 1 - Pr(y=0) = exp[X'beta]/(1+exp[X'beta])
-prob = exp(X%*%beta)/(1+exp(X%*%beta))
-prob = prob*y + (1-prob)*(1-y)
-sum(log(prob))
-}
-#
-#
-#  check arguments
-#
-pandterm=function(message) { stop(message,call.=FALSE) }
-if(missing(Data)) {pandterm("Requires Data argument -- list of m,lgtdata, and (possibly) Z")}
-  if(is.null(Data$lgtdata)) {pandterm("Requires Data element lgtdata (list of data for each unit)")}
-  lgtdata=Data$lgtdata
-  nlgt=length(lgtdata)
-if(is.null(Data$Z)) { cat("Z not specified -- putting in iota",fill=TRUE); fsh() ; Z=matrix(rep(1,nlgt),ncol=1)}
-  else {if (nrow(Data$Z) != nlgt) {pandterm(paste("Nrow(Z) ",nrow(Z),"ne number logits ",nlgt))}
-      else {Z=Data$Z}}
-  nz=ncol(Z)
-#
-# check lgtdata for validity
-#
-m=2  # set two choice alternatives for Greg's code
-ypooled=NULL
-Xpooled=NULL
-if(!is.null(lgtdata[[1]]$X)) {oldncol=ncol(lgtdata[[1]]$X)}
-for (i in 1:nlgt) 
-{
-    if(is.null(lgtdata[[i]]$y)) {pandterm(paste("Requires element y of lgtdata[[",i,"]]"))}
-    if(is.null(lgtdata[[i]]$X)) {pandterm(paste("Requires element X of lgtdata[[",i,"]]"))}
-    ypooled=c(ypooled,lgtdata[[i]]$y)
-    nrowX=nrow(lgtdata[[i]]$X)
-    if((nrowX) !=length(lgtdata[[i]]$y)) {pandterm(paste("nrow(X) ne length(yi); exception at unit",i))}
-    newncol=ncol(lgtdata[[i]]$X)
-    if(newncol != oldncol) {pandterm(paste("All X elements must have same # of cols; exception at unit",i))}
-    Xpooled=rbind(Xpooled,lgtdata[[i]]$X)
-    oldncol=newncol
-}
-nvar=ncol(Xpooled)
-levely=as.numeric(levels(as.factor(ypooled)))
-if(length(levely) != m) {pandterm(paste("y takes on ",length(levely)," values -- must be = m"))}
-bady=FALSE
-for (i in 0:1 )
-{
-    if(levely[i+1] != i) bady=TRUE
-}
-cat("Table of Y values pooled over all units",fill=TRUE)
-print(table(ypooled))
-if (bady) 
-  {pandterm("Invalid Y")}
-#
-# check on prior
-#
-if(missing(Prior)){
-    nu=nvar+3
-    V=nu*diag(nvar)
-    Deltabar=matrix(rep(0,nz*nvar),ncol=nvar)
-    ADelta=.01*diag(nz) }
-else {
-    if(is.null(Prior$nu)) {nu=nvar+3}  else {nu=Prior$nu}
-        if(nu < 1) {pandterm("invalid nu value")}
-    if(is.null(Prior$V)) {V=nu*diag(rep(1,nvar))} else {V=Prior$V}
-    if(sum(dim(V)==c(nvar,nvar)) !=2) pandterm("Invalid V in prior")
-    if(is.null(Prior$ADelta) ) {ADelta=.01*diag(nz)} else {ADelta=Prior$ADelta}
-    if(ncol(ADelta) != nz | nrow(ADelta) != nz) {pandterm("ADelta must be nz x nz")}
-    if(is.null(Prior$Deltabar) ) {Deltabar=matrix(rep(0,nz*nvar),ncol=nvar)} else {Deltabar=Prior$Deltabar}
-}
-#
-# check on Mcmc
-#
-if(missing(Mcmc)) 
-  {pandterm("Requires Mcmc list argument")}
-else 
-   { 
-    if(is.null(Mcmc$sbeta)) {sbeta=.2} else {sbeta=Mcmc$sbeta}
-    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
-    if(is.null(Mcmc$R)) {pandterm("Requires R argument in Mcmc list")} else {R=Mcmc$R}
-    }
-#
-# print out problem
-#
-cat(" ",fill=TRUE)
-cat("Attempting MCMC Inference for Hierarchical Binary Logit:",fill=TRUE)
-cat(paste("  ",nvar," variables in X"),fill=TRUE)
-cat(paste("  ",nz," variables in Z"),fill=TRUE)
-cat(paste("   for ",nlgt," cross-sectional units"),fill=TRUE)
-cat(" ",fill=TRUE)
-cat("Prior Parms: ",fill=TRUE)
-cat("nu =",nu,fill=TRUE)
-cat("V ",fill=TRUE)
-print(V)
-cat("Deltabar",fill=TRUE)
-print(Deltabar)
-cat("ADelta",fill=TRUE)
-print(ADelta)
-cat(" ",fill=TRUE)
-cat("MCMC Parms: ",fill=TRUE)
-cat(paste("sbeta=",round(sbeta,3)," R= ",R," keep= ",keep),fill=TRUE)
-cat("",fill=TRUE)
-
-nlgt=length(lgtdata)
-nvar=ncol(lgtdata[[1]]$X)
-nz=ncol(Z)
-
-
-
-#
-# initialize storage for draws
-#
-Vbetadraw=matrix(double(floor(R/keep)*nvar*nvar),ncol=nvar*nvar)
-betadraw=array(double(floor(R/keep)*nlgt*nvar),dim=c(nlgt,nvar,floor(R/keep)))
-Deltadraw=matrix(double(floor(R/keep)*nvar*nz),ncol=nvar*nz)
-oldbetas=matrix(double(nlgt*nvar),ncol=nvar)
-oldVbeta=diag(nvar)
-oldVbetai=diag(nvar)
-oldDelta=matrix(double(nvar*nz),ncol=nvar)
-
-betad = array(0,dim=c(nvar))
-betan = array(0,dim=c(nvar))
-reject = array(0,dim=c(R/keep))
-llike=array(0,dim=c(R/keep))
-
-itime=proc.time()[3]
-cat("MCMC Iteration (est time to end - min)",fill=TRUE)
-fsh()
-for (j in 1:R) {
-	rej = 0
-	logl = 0
-	sV = sbeta*oldVbeta
-	root=t(chol(sV))
-
-#	Draw B-h|B-bar, V
-
-	for (i in 1:nlgt) {
-
-		betad = oldbetas[i,]
-		betan = betad + root%*%rnorm(nvar)
-# data		
-		lognew = loglike(lgtdata[[i]]$y,lgtdata[[i]]$X,betan)
-		logold = loglike(lgtdata[[i]]$y,lgtdata[[i]]$X,betad) 
-# heterogeneity
-logknew = -.5*(t(betan)-Z[i,]%*%oldDelta) %*% oldVbetai %*% (betan-t(Z[i,]%*%oldDelta))
-logkold = -.5*(t(betad)-Z[i,]%*%oldDelta) %*% oldVbetai %*% (betad-t(Z[i,]%*%oldDelta))
-# MH step
-		alpha = exp(lognew + logknew - logold - logkold)
-		if(alpha=="NaN") alpha=-1
-		u = runif(n=1,min=0, max=1)
-		if(u < alpha) { 
-			oldbetas[i,] = betan
-			logl = logl + lognew } else {
-		 	logl = logl + logold
-			rej = rej+1  }
-		}
-#	Draw B-bar and V as a multivariate regression
-	out=rmultireg(oldbetas,Z,Deltabar,ADelta,nu,V)
-	oldDelta=out$B
-	oldVbeta=out$Sigma
-	oldVbetai=chol2inv(chol(oldVbeta))
-
-	if((j%%100)==0) 
-          {
-           ctime=proc.time()[3]
-           timetoend=((ctime-itime)/j)*(R-j)
-           cat(" ",j," (",round(timetoend/60,1),")",fill=TRUE)
-           fsh() }
-	mkeep=j/keep
-	if(mkeep*keep == (floor(mkeep)*keep))
-          {Deltadraw[mkeep,]=as.vector(oldDelta)
-           Vbetadraw[mkeep,]=as.vector(oldVbeta)
-           betadraw[,,mkeep]=oldbetas
-           llike[mkeep]=logl
-           reject[mkeep]=rej/nlgt
-          }
-}
-ctime=proc.time()[3]
-cat(" Total Time Elapsed: ",round((ctime-itime)/60,2),fill=TRUE)
-
-
-attributes(betadraw)$class=c("bayesm.hcoef")
-attributes(Deltadraw)$class=c("bayesm.mat","mcmc")
-attributes(Deltadraw)$mcpar=c(1,R,keep)
-attributes(Vbetadraw)$class=c("bayesm.var","bayesm.mat","mcmc")
-attributes(Vbetadraw)$mcpar=c(1,R,keep)
-
-return(list(betadraw=betadraw,Vbetadraw=Vbetadraw,Deltadraw=Deltadraw,llike=llike,reject=reject))
-}
-
-
+#
+# -----------------------------------------------------------------------------
+#
+rhierBinLogit=
+function(Data,Prior,Mcmc){
+#
+# revision history: 
+#	changed 5/12/05 by Rossi to add error checking
+#       1/07 removed init.rmultiregfp
+#       3/07 added classes
+#
+# purpose: run binary heterogeneous logit model 
+#
+# Arguments:
+#   Data contains a list of (lgtdata[[i]],Z)
+#      lgtdata[[i]]=list(y,X)
+#         y is index of brand chosen, y=1 is exp[X'beta]/(1+exp[X'beta])
+#         X is a matrix that is n_i x by nvar
+#      Z is a matrix of demographic variables nlgt*nz that have been 
+#	  mean centered so that the intercept is interpretable
+#   Prior contains a list of (nu,V,Deltabar,ADelta)
+#      beta_i ~ N(Z%*%Delta,Vbeta)
+#      vec(Delta) ~ N(vec(Deltabar),Vbeta (x) ADelta^-1)
+#      Vbeta ~ IW(nu,V)
+#   Mcmc is a list of (sbeta,R,keep)
+#      sbeta is scale factor for RW increment for beta_is
+#      R is number of draws
+#      keep every keepth draw
+#
+# Output:
+#      a list of Deltadraw (R/keep x nvar x nz), Vbetadraw (R/keep x nvar**2), 
+#         llike (R/keep), betadraw is a nlgt x nvar x nz x R/keep array of draws of betas
+#         nunits=length(lgtdata)
+#
+#  define functions needed
+#
+# ------------------------------------------------------------------------
+#
+loglike=
+function(y,X,beta) {
+# function computer log likelihood of data for binomial logit model
+# Pr(y=1) = 1 - Pr(y=0) = exp[X'beta]/(1+exp[X'beta])
+prob = exp(X%*%beta)/(1+exp(X%*%beta))
+prob = prob*y + (1-prob)*(1-y)
+sum(log(prob))
+}
+#
+#
+#  check arguments
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of m,lgtdata, and (possibly) Z")}
+  if(is.null(Data$lgtdata)) {pandterm("Requires Data element lgtdata (list of data for each unit)")}
+  lgtdata=Data$lgtdata
+  nlgt=length(lgtdata)
+if(is.null(Data$Z)) { cat("Z not specified -- putting in iota",fill=TRUE); fsh() ; Z=matrix(rep(1,nlgt),ncol=1)}
+  else {if (nrow(Data$Z) != nlgt) {pandterm(paste("Nrow(Z) ",nrow(Z),"ne number logits ",nlgt))}
+      else {Z=Data$Z}}
+  nz=ncol(Z)
+#
+# check lgtdata for validity
+#
+m=2  # set two choice alternatives for Greg's code
+ypooled=NULL
+Xpooled=NULL
+if(!is.null(lgtdata[[1]]$X)) {oldncol=ncol(lgtdata[[1]]$X)}
+for (i in 1:nlgt) 
+{
+    if(is.null(lgtdata[[i]]$y)) {pandterm(paste("Requires element y of lgtdata[[",i,"]]"))}
+    if(is.null(lgtdata[[i]]$X)) {pandterm(paste("Requires element X of lgtdata[[",i,"]]"))}
+    ypooled=c(ypooled,lgtdata[[i]]$y)
+    nrowX=nrow(lgtdata[[i]]$X)
+    if((nrowX) !=length(lgtdata[[i]]$y)) {pandterm(paste("nrow(X) ne length(yi); exception at unit",i))}
+    newncol=ncol(lgtdata[[i]]$X)
+    if(newncol != oldncol) {pandterm(paste("All X elements must have same # of cols; exception at unit",i))}
+    Xpooled=rbind(Xpooled,lgtdata[[i]]$X)
+    oldncol=newncol
+}
+nvar=ncol(Xpooled)
+levely=as.numeric(levels(as.factor(ypooled)))
+if(length(levely) != m) {pandterm(paste("y takes on ",length(levely)," values -- must be = m"))}
+bady=FALSE
+for (i in 0:1 )
+{
+    if(levely[i+1] != i) bady=TRUE
+}
+cat("Table of Y values pooled over all units",fill=TRUE)
+print(table(ypooled))
+if (bady) 
+  {pandterm("Invalid Y")}
+#
+# check on prior
+#
+if(missing(Prior)){
+    nu=nvar+3
+    V=nu*diag(nvar)
+    Deltabar=matrix(rep(0,nz*nvar),ncol=nvar)
+    ADelta=.01*diag(nz) }
+else {
+    if(is.null(Prior$nu)) {nu=nvar+3}  else {nu=Prior$nu}
+        if(nu < 1) {pandterm("invalid nu value")}
+    if(is.null(Prior$V)) {V=nu*diag(rep(1,nvar))} else {V=Prior$V}
+    if(sum(dim(V)==c(nvar,nvar)) !=2) pandterm("Invalid V in prior")
+    if(is.null(Prior$ADelta) ) {ADelta=.01*diag(nz)} else {ADelta=Prior$ADelta}
+    if(ncol(ADelta) != nz | nrow(ADelta) != nz) {pandterm("ADelta must be nz x nz")}
+    if(is.null(Prior$Deltabar) ) {Deltabar=matrix(rep(0,nz*nvar),ncol=nvar)} else {Deltabar=Prior$Deltabar}
+}
+#
+# check on Mcmc
+#
+if(missing(Mcmc)) 
+  {pandterm("Requires Mcmc list argument")}
+else 
+   { 
+    if(is.null(Mcmc$sbeta)) {sbeta=.2} else {sbeta=Mcmc$sbeta}
+    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$R)) {pandterm("Requires R argument in Mcmc list")} else {R=Mcmc$R}
+    }
+#
+# print out problem
+#
+cat(" ",fill=TRUE)
+cat("Attempting MCMC Inference for Hierarchical Binary Logit:",fill=TRUE)
+cat(paste("  ",nvar," variables in X"),fill=TRUE)
+cat(paste("  ",nz," variables in Z"),fill=TRUE)
+cat(paste("   for ",nlgt," cross-sectional units"),fill=TRUE)
+cat(" ",fill=TRUE)
+cat("Prior Parms: ",fill=TRUE)
+cat("nu =",nu,fill=TRUE)
+cat("V ",fill=TRUE)
+print(V)
+cat("Deltabar",fill=TRUE)
+print(Deltabar)
+cat("ADelta",fill=TRUE)
+print(ADelta)
+cat(" ",fill=TRUE)
+cat("MCMC Parms: ",fill=TRUE)
+cat(paste("sbeta=",round(sbeta,3)," R= ",R," keep= ",keep),fill=TRUE)
+cat("",fill=TRUE)
+
+nlgt=length(lgtdata)
+nvar=ncol(lgtdata[[1]]$X)
+nz=ncol(Z)
+
+
+
+#
+# initialize storage for draws
+#
+Vbetadraw=matrix(double(floor(R/keep)*nvar*nvar),ncol=nvar*nvar)
+betadraw=array(double(floor(R/keep)*nlgt*nvar),dim=c(nlgt,nvar,floor(R/keep)))
+Deltadraw=matrix(double(floor(R/keep)*nvar*nz),ncol=nvar*nz)
+oldbetas=matrix(double(nlgt*nvar),ncol=nvar)
+oldVbeta=diag(nvar)
+oldVbetai=diag(nvar)
+oldDelta=matrix(double(nvar*nz),ncol=nvar)
+
+betad = array(0,dim=c(nvar))
+betan = array(0,dim=c(nvar))
+reject = array(0,dim=c(R/keep))
+llike=array(0,dim=c(R/keep))
+
+itime=proc.time()[3]
+cat("MCMC Iteration (est time to end - min)",fill=TRUE)
+fsh()
+for (j in 1:R) {
+	rej = 0
+	logl = 0
+	sV = sbeta*oldVbeta
+	root=t(chol(sV))
+
+#	Draw B-h|B-bar, V
+
+	for (i in 1:nlgt) {
+
+		betad = oldbetas[i,]
+		betan = betad + root%*%rnorm(nvar)
+# data		
+		lognew = loglike(lgtdata[[i]]$y,lgtdata[[i]]$X,betan)
+		logold = loglike(lgtdata[[i]]$y,lgtdata[[i]]$X,betad) 
+# heterogeneity
+logknew = -.5*(t(betan)-Z[i,]%*%oldDelta) %*% oldVbetai %*% (betan-t(Z[i,]%*%oldDelta))
+logkold = -.5*(t(betad)-Z[i,]%*%oldDelta) %*% oldVbetai %*% (betad-t(Z[i,]%*%oldDelta))
+# MH step
+		alpha = exp(lognew + logknew - logold - logkold)
+		if(alpha=="NaN") alpha=-1
+		u = runif(n=1,min=0, max=1)
+		if(u < alpha) { 
+			oldbetas[i,] = betan
+			logl = logl + lognew } else {
+		 	logl = logl + logold
+			rej = rej+1  }
+		}
+#	Draw B-bar and V as a multivariate regression
+	out=rmultireg(oldbetas,Z,Deltabar,ADelta,nu,V)
+	oldDelta=out$B
+	oldVbeta=out$Sigma
+	oldVbetai=chol2inv(chol(oldVbeta))
+
+	if((j%%100)==0) 
+          {
+           ctime=proc.time()[3]
+           timetoend=((ctime-itime)/j)*(R-j)
+           cat(" ",j," (",round(timetoend/60,1),")",fill=TRUE)
+           fsh() }
+	mkeep=j/keep
+	if(mkeep*keep == (floor(mkeep)*keep))
+          {Deltadraw[mkeep,]=as.vector(oldDelta)
+           Vbetadraw[mkeep,]=as.vector(oldVbeta)
+           betadraw[,,mkeep]=oldbetas
+           llike[mkeep]=logl
+           reject[mkeep]=rej/nlgt
+          }
+}
+ctime=proc.time()[3]
+cat(" Total Time Elapsed: ",round((ctime-itime)/60,2),fill=TRUE)
+
+
+attributes(betadraw)$class=c("bayesm.hcoef")
+attributes(Deltadraw)$class=c("bayesm.mat","mcmc")
+attributes(Deltadraw)$mcpar=c(1,R,keep)
+attributes(Vbetadraw)$class=c("bayesm.var","bayesm.mat","mcmc")
+attributes(Vbetadraw)$mcpar=c(1,R,keep)
+
+return(list(betadraw=betadraw,Vbetadraw=Vbetadraw,Deltadraw=Deltadraw,llike=llike,reject=reject))
+}
+
+
diff --git a/R/rhierLinearMixture.R b/R/rhierLinearMixture.R
deleted file mode 100755
index 3c5f91e..0000000
--- a/R/rhierLinearMixture.R
+++ /dev/null
@@ -1,341 +0,0 @@
-rhierLinearMixture=
-function(Data,Prior,Mcmc)
-{
-#
-# revision history:
-#   changed 12/17/04 by rossi to fix bug in drawdelta when there is zero/one unit
-#   in a mixture component
-#   adapted to linear model by Vicky Chen 6/06
-#   put in classes 3/07
-#   changed a check 9/08
-#
-# purpose: run hierarchical linear model with mixture of normals 
-#
-# Arguments:
-#   Data contains a list of (regdata, and possibly Z)
-#      regdata is a list of lists (one list per unit)
-#          regdata[[i]]=list(y,X)
-#             y is a vector of observations
-#             X is a length(y) x nvar matrix of values of
-#               X vars including intercepts
-#             Z is an nreg x nz matrix of values of variables
-#               note: Z should NOT contain an intercept
-#   Prior contains a list of (nu.e,ssq,deltabar,Ad,mubar,Amu,nu,V,ncomp,a) 
-#      ncomp is the number of components in normal mixture
-#           if elements of Prior (other than ncomp) do not exist, defaults are used
-#   Mcmc contains a list of (s,c,R,keep)
-#
-# Output:  as list containing
-#   taodraw is R/keep x nreg  array of error variances for each regression
-#   Deltadraw R/keep  x nz*nvar matrix of draws of Delta, first row is initial value
-#   betadraw is nreg x nvar x R/keep array of draws of betas
-#   probdraw is R/keep x ncomp matrix of draws of probs of mixture components
-#   compdraw is a list of list of lists (length R/keep)
-#      compdraw[[rep]] is the repth draw of components for mixtures
-#
-# Priors:
-#    tau_i ~ nu.e*ssq_i/chisq(nu.e)  tau_i is the variance of epsilon_i
-#    beta_i = delta %*% z[i,] + u_i
-#       u_i ~ N(mu_ind[i],Sigma_ind[i])
-#       ind[i] ~multinomial(p)
-#       p ~ dirichlet (a)
-#           a: Dirichlet parameters for prior on p
-#       delta is a k x nz array
-#          delta= vec(D) ~ N(deltabar,A_d^-1)
-#    mu_j ~ N(mubar,A_mu^-1(x)Sigma_j)
-#    Sigma_j ~ IW(nu,V^-1)
-#    ncomp is number of components
-#
-# MCMC parameters
-#   R is number of draws
-#   keep is thinning parameter, keep every keepth draw
-#
-#  check arguments
-#
-#--------------------------------------------------------------------------------------------------
-#
-#  create functions needed
-#
-append=function(l) { l=c(l,list(XpX=crossprod(l$X),Xpy=crossprod(l$X,l$y)))}
-#
-getvar=function(l) { 
-     v=var(l$y)
-     if(is.na(v)) return(1)
-     if(v>0) return (v) else return (1)}
-#
-runiregG=
-function(y,X,XpX,Xpy,sigmasq,rooti,betabar,nu,ssq){
-# 
-# Purpose:
-#   perform one Gibbs iteration for Univ Regression Model
-#   only does one iteration so can be used in both rhierLinearMixture & rhierLinearModel
-#
-# Model:
-#   y = Xbeta + e  e ~N(0,sigmasq)
-#          y is n x 1
-#          X is n x k
-#          beta is k x 1 vector of coefficients
-#
-# Priors:  beta ~ N(betabar,A^-1)
-#          sigmasq ~ (nu*ssq)/chisq_nu
-# 
-n=length(y)
-k=ncol(XpX)
-sigmasq=as.vector(sigmasq)
-A=crossprod(rooti)
-#
-#     first draw beta | sigmasq
-#
-  IR=backsolve(chol(XpX/sigmasq+A),diag(k))
-  btilde=crossprod(t(IR))%*%(Xpy/sigmasq+A%*%betabar)
-  beta = btilde + IR%*%rnorm(k)
-#
-#    now draw sigmasq | beta
-#
-  res=y-X%*%beta
-  s=t(res)%*%res
-  sigmasq=(nu*ssq + s)/rchisq(1,nu+n)
-
-list(betadraw=beta,sigmasqdraw=sigmasq)
-}
-#
-drawDelta=
-function(x,y,z,comps,deltabar,Ad){
-# Z,oldbetas,ind,oldcomp,deltabar,Ad
-# delta = vec(D)
-#  given z and comps (z[i] gives component indicator for the ith observation, 
-#   comps is a list of mu and rooti)
-# y is betas: nreg x nvar
-# x is Z: nreg x nz
-# y = xD' + U , rows of U are indep with covs Sigma_i given by z and comps
-nvar=ncol(y) #p
-nz=ncol(x)   #k
-xtx = matrix(0.0,nz*nvar,nz*nvar)
-xty = matrix(0.0,nvar,nz) #this is the unvecced version, have to vec after sum
-for(i in 1:length(comps)) {
-   nobs=sum(z==i)
-   if(nobs > 0) {
-      if(nobs == 1) 
-        { yi = matrix(y[z==i,],ncol=nvar); xi = matrix(x[z==i,],ncol=nz)}
-      else
-        { yi = y[z==i,]; xi = x[z==i,]}
-          
-      yi = t(t(yi)-comps[[i]][[1]])
-      sigi = crossprod(t(comps[[i]][[2]]))
-      xtx = xtx + crossprod(xi) %x% sigi
-      xty = xty + (sigi %*% crossprod(yi,xi))
-      }
-}
-xty = matrix(xty,ncol=1)
-
-# then vec(t(D)) ~ N(V^{-1}(xty + Ad*deltabar),V^{-1}) V = (xtx+Ad)
-cov=chol2inv(chol(xtx+Ad))
-return(cov%*%(xty+Ad%*%deltabar) + t(chol(cov))%*%rnorm(length(deltabar)))
-}
-#-------------------------------------------------------------------------------------------------------
-pandterm=function(message) { stop(message,call.=FALSE) }
-if(missing(Data)) {pandterm("Requires Data argument -- list of regdata, and (possibly) Z")}
-  if(is.null(Data$regdata)) {pandterm("Requires Data element regdata (list of data for each unit)")}
-  regdata=Data$regdata
-  nreg=length(regdata)
-  drawdelta=TRUE
-if(is.null(Data$Z)) { cat("Z not specified",fill=TRUE); fsh() ; drawdelta=FALSE}
-  else {if (nrow(Data$Z) != nreg) {pandterm(paste("Nrow(Z) ",nrow(Z),"ne number regressions ",nreg))}
-      else {Z=Data$Z}}
-  if(drawdelta) {
-     nz=ncol(Z)
-     colmeans=apply(Z,2,mean)
-     if(sum(colmeans) > .00001) 
-       {pandterm(paste("Z does not appear to be de-meaned: colmeans= ",colmeans))}
-  }
-#
-# check regdata for validity
-#
-dimfun=function(l) {c(length(l$y),dim(l$X))}
-dims=sapply(regdata,dimfun)
-dims=t(dims)
-nvar=quantile(dims[,3],prob=.5)
-
-for (i in 1:nreg) 
-{
-   if(dims[i,1] != dims[i,2]  || dims[i,3] !=nvar) 
-      {pandterm(paste("Bad Data dimensions for unit ",i," dims(y,X) =",dims[i,]))}
-}
-#
-# check on prior
-#
-if(missing(Prior)) 
-{pandterm("Requires Prior list argument (at least ncomp)")} 
-if(is.null(Prior$nu.e)) {nu.e=3} 
-   else {nu.e=Prior$nu.e}
-if(is.null(Prior$ssq)) {ssq=sapply(regdata,getvar)} 
-   else {ssq=Prior$ssq}
-if(is.null(Prior$ncomp)) {pandterm("Requires Prior element ncomp (num of mixture components)")} else {ncomp=Prior$ncomp}
-if(is.null(Prior$mubar)) {mubar=matrix(rep(0,nvar),nrow=1)} else { mubar=matrix(Prior$mubar,nrow=1)}
-  if(ncol(mubar) != nvar) {pandterm(paste("mubar must have ncomp cols, ncol(mubar)= ",ncol(mubar)))}
-if(is.null(Prior$Amu)) {Amu=matrix(.01,ncol=1)} else {Amu=matrix(Prior$Amu,ncol=1)}
-  if(ncol(Amu) != 1 | nrow(Amu) != 1) {pandterm("Am must be a 1 x 1 array")}
-if(is.null(Prior$nu)) {nu=nvar+3}  else {nu=Prior$nu}
-  if(nu < 1) {pandterm("invalid nu value")}
-if(is.null(Prior$V)) {V=nu*diag(nvar)} else {V=Prior$V}
-  if(sum(dim(V)==c(nvar,nvar)) !=2) pandterm("Invalid V in prior")
-if(is.null(Prior$Ad) & drawdelta) {Ad=.01*diag(nvar*nz)} else {Ad=Prior$Ad}
-if(drawdelta) {if(ncol(Ad) != nvar*nz | nrow(Ad) != nvar*nz) {pandterm("Ad must be nvar*nz x nvar*nz")}}
-if(is.null(Prior$deltabar)& drawdelta) {deltabar=rep(0,nz*nvar)} else {deltabar=Prior$deltabar}
-  if(drawdelta) {if(length(deltabar) != nz*nvar) {pandterm("deltabar must be of length nvar*nz")}}
-if(is.null(Prior$a)) { a=rep(5,ncomp)} else {a=Prior$a}
-if(length(a) != ncomp) {pandterm("Requires dim(a)= ncomp (no of components)")}
-bada=FALSE
-   for(i in 1:ncomp) { if(a[i] < 0) bada=TRUE}
-  if(bada) pandterm("invalid values in a vector")
-#
-# check on Mcmc
-#
-if(missing(Mcmc)) 
-  {pandterm("Requires Mcmc list argument")}
-else 
-   { 
-    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
-    if(is.null(Mcmc$R)) {pandterm("Requires R argument in Mcmc list")} else {R=Mcmc$R}
-    }
-#
-# print out problem
-#
-cat(" ",fill=TRUE)
-cat("Starting MCMC Inference for Hierarchical Linear Model:",fill=TRUE)
-cat("   Normal Mixture with",ncomp,"components for first stage prior",fill=TRUE)
-cat(paste("   for ",nreg," cross-sectional units"),fill=TRUE)
-cat(" ",fill=TRUE)
-cat("Prior Parms: ",fill=TRUE)
-cat("nu.e =",nu.e,fill=TRUE)
-cat("nu =",nu,fill=TRUE)
-cat("V ",fill=TRUE)
-print(V)
-cat("mubar ",fill=TRUE)
-print(mubar)
-cat("Amu ", fill=TRUE)
-print(Amu)
-cat("a ",fill=TRUE)
-print(a)
-if(drawdelta) 
-{
-   cat("deltabar",fill=TRUE)
-   print(deltabar)
-   cat("Ad",fill=TRUE)
-   print(Ad)
-}
-cat(" ",fill=TRUE)
-cat("MCMC Parms: ",fill=TRUE)
-cat(paste(" R= ",R," keep= ",keep),fill=TRUE)
-cat("",fill=TRUE)
-#
-# allocate space for draws
-#
-taudraw=matrix(double(floor(R/keep)*nreg),ncol=nreg)
-if(drawdelta) Deltadraw=matrix(double((floor(R/keep))*nz*nvar),ncol=nz*nvar)
-betadraw=array(double((floor(R/keep))*nreg*nvar),dim=c(nreg,nvar,floor(R/keep)))
-probdraw=matrix(double((floor(R/keep))*ncomp),ncol=ncomp)
-oldbetas=matrix(double(nreg*nvar),ncol=nvar)
-oldcomp=NULL
-compdraw=NULL
-#
-#  initialize values
-#
-#  Create XpX elements of regdata and initialize tau
-#
-regdata=lapply(regdata,append)
-tau=sapply(regdata,getvar)
-#
-# set initial values for the indicators
-#     ind is of length(nreg) and indicates which mixture component this obs
-#     belongs to.
-#
-ind=NULL
-ninc=floor(nreg/ncomp)
-for (i in 1:(ncomp-1)) {ind=c(ind,rep(i,ninc))}
-if(ncomp != 1) {ind = c(ind,rep(ncomp,nreg-length(ind)))} else {ind=rep(1,nreg)}
-#
-# initialize delta
-#
-if (drawdelta) olddelta=rep(0,nz*nvar)
-#
-# initialize probs
-#
-oldprob=rep(1/ncomp,ncomp)
-#
-# initialize comps
-#
-tcomp=list(list(mu=rep(0,nvar),rooti=diag(nvar)))
-oldcomp=rep(tcomp,ncomp)
-#
-#	start main iteration loop
-#
-itime=proc.time()[3]
-cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
-fsh()
-for(rep in 1:R)
-{
-   # first draw comps,ind,p | {beta_i}, delta
-   #        ind,p need initialization comps is drawn first in sub-Gibbs
-   if(drawdelta) 
-      {mgout=rmixGibbs(oldbetas-Z%*%t(matrix(olddelta,ncol=nz)),
-      mubar,Amu,nu,V,a,oldprob,ind,oldcomp)}
-   else
-      {mgout=rmixGibbs(oldbetas,
-      mubar,Amu,nu,V,a,oldprob,ind,oldcomp)}
-   oldprob=mgout[[1]]
-   oldcomp=mgout[[3]]
-   ind=mgout[[2]]
-   # now draw delta | {beta_i}, ind, comps
-   if(drawdelta) {olddelta=drawDelta(Z,oldbetas,ind,oldcomp,deltabar,Ad)}
-   #
-   #  loop over all regression equations drawing beta_i | ind[i],z[i,],mu[ind[i]],rooti[ind[i]]
-   #
-      for (reg in 1:nreg) 
-      {
-         rootpi=oldcomp[[ind[reg]]]$rooti
-         #  note: beta_i = Delta*z_i + u_i  Delta is nvar x nz
-         if(drawdelta) {
-            betabar=oldcomp[[ind[reg]]]$mu+matrix(olddelta,ncol=nz)%*%as.vector(Z[reg,])}
-         else {
-            betabar=oldcomp[[ind[reg]]]$mu }
-      regout=runiregG(regdata[[reg]]$y,regdata[[reg]]$X,regdata[[reg]]$XpX,
-                regdata[[reg]]$Xpy,tau[reg],rootpi,betabar,nu.e,ssq[reg])
-      oldbetas[reg,]=regout$betadraw
-      tau[reg]=regout$sigmasqdraw
-      }
-   #
-   #       print time to completion and draw # every 100th draw
-   #
-   if(((rep/100)*100) ==(floor(rep/100)*100))
-     {ctime=proc.time()[3]
-      timetoend=((ctime-itime)/rep)*(R+1-rep)
-      cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
-      fsh()}
-   #
-   #       save every keepth draw
-   #
-   mkeep=rep/keep
-   if((mkeep*keep) == (floor(mkeep)*keep))
-      { taudraw[mkeep,]=tau
-        betadraw[,,mkeep]=oldbetas 
-        probdraw[mkeep,]=oldprob
-        if(drawdelta) Deltadraw[mkeep,]=olddelta
-        compdraw[[mkeep]]=oldcomp }
-        
-}
-ctime=proc.time()[3]
-cat(" Total Time Elapsed: ",round((ctime-itime)/60,2),fill=TRUE)
-attributes(taudraw)$class=c("bayesm.mat","mcmc")
-attributes(taudraw)$mcpar=c(1,R,keep)
-if(drawdelta){
-   attributes(Deltadraw)$class=c("bayesm.mat","mcmc")
-   attributes(Deltadraw)$mcpar=c(1,R,keep)}
-attributes(betadraw)$class=c("bayesm.hcoef")
-nmix=list(probdraw=probdraw,zdraw=NULL,compdraw=compdraw)
-attributes(nmix)$class="bayesm.nmix"
-if(drawdelta) 
-   {return(list(taudraw=taudraw,Deltadraw=Deltadraw,betadraw=betadraw,nmix=nmix))} 
-else 
-   {return(list(taudraw=taudraw,betadraw=betadraw,nmix=nmix))}
-}
diff --git a/R/rhierLinearMixture_rcpp.r b/R/rhierLinearMixture_rcpp.r
new file mode 100644
index 0000000..9789649
--- /dev/null
+++ b/R/rhierLinearMixture_rcpp.r
@@ -0,0 +1,214 @@
+rhierLinearMixture=function(Data,Prior,Mcmc){
+#
+# revision history:
+#   changed 12/17/04 by rossi to fix bug in drawdelta when there is zero/one unit
+#   in a mixture component
+#   adapted to linear model by Vicky Chen 6/06
+#   put in classes 3/07
+#   changed a check 9/08
+#   W. Taylor 4/15 - added nprint option to MCMC argument
+#
+# purpose: run hierarchical linear model with mixture of normals 
+#
+# Arguments:
+#   Data contains a list of (regdata, and possibly Z)
+#      regdata is a list of lists (one list per unit)
+#          regdata[[i]]=list(y,X)
+#             y is a vector of observations
+#             X is a length(y) x nvar matrix of values of
+#               X vars including intercepts
+#             Z is an nreg x nz matrix of values of variables
+#               note: Z should NOT contain an intercept
+#   Prior contains a list of (nu.e,ssq,deltabar,Ad,mubar,Amu,nu,V,ncomp,a) 
+#      ncomp is the number of components in normal mixture
+#           if elements of Prior (other than ncomp) do not exist, defaults are used
+#   Mcmc contains a list of (s,c,R,keep,nprint)
+#
+# Output:  as list containing
+#   taodraw is R/keep x nreg  array of error variances for each regression
+#   Deltadraw R/keep  x nz*nvar matrix of draws of Delta, first row is initial value
+#   betadraw is nreg x nvar x R/keep array of draws of betas
+#   probdraw is R/keep x ncomp matrix of draws of probs of mixture components
+#   compdraw is a list of list of lists (length R/keep)
+#      compdraw[[rep]] is the repth draw of components for mixtures
+#
+# Priors:
+#    tau_i ~ nu.e*ssq_i/chisq(nu.e)  tau_i is the variance of epsilon_i
+#    beta_i = delta %*% z[i,] + u_i
+#       u_i ~ N(mu_ind[i],Sigma_ind[i])
+#       ind[i] ~multinomial(p)
+#       p ~ dirichlet (a)
+#           a: Dirichlet parameters for prior on p
+#       delta is a k x nz array
+#          delta= vec(D) ~ N(deltabar,A_d^-1)
+#    mu_j ~ N(mubar,A_mu^-1(x)Sigma_j)
+#    Sigma_j ~ IW(nu,V^-1)
+#    ncomp is number of components
+#
+# MCMC parameters
+#   R is number of draws
+#   keep is thinning parameter, keep every keepth draw
+#   nprint - print estimated time remaining on every nprint'th draw
+#
+#  check arguments
+#
+#--------------------------------------------------------------------------------------------------
+#
+#  create functions needed
+#
+append=function(l) { l=c(l,list(XpX=crossprod(l$X),Xpy=crossprod(l$X,l$y)))}
+#
+getvar=function(l) { 
+  v=var(l$y)
+  if(is.na(v)) return(1)
+  if(v>0) return (v) else return (1)}
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of regdata, and (possibly) Z")}
+  if(is.null(Data$regdata)) {pandterm("Requires Data element regdata (list of data for each unit)")}
+  regdata=Data$regdata
+  nreg=length(regdata)
+  drawdelta=TRUE
+if(is.null(Data$Z)) { cat("Z not specified",fill=TRUE); fsh() ; drawdelta=FALSE}
+  else {if (nrow(Data$Z) != nreg) {pandterm(paste("Nrow(Z) ",nrow(Z),"ne number regressions ",nreg))}
+      else {Z=Data$Z}}
+  if(drawdelta) {
+     nz=ncol(Z)
+     colmeans=apply(Z,2,mean)
+     if(sum(colmeans) > .00001) 
+       {pandterm(paste("Z does not appear to be de-meaned: colmeans= ",colmeans))}
+  }
+#
+# check regdata for validity
+#
+dimfun=function(l) {c(length(l$y),dim(l$X))}
+dims=sapply(regdata,dimfun)
+dims=t(dims)
+nvar=quantile(dims[,3],prob=.5)
+
+for (i in 1:nreg) 
+{
+   if(dims[i,1] != dims[i,2]  || dims[i,3] !=nvar) 
+      {pandterm(paste("Bad Data dimensions for unit ",i," dims(y,X) =",dims[i,]))}
+}
+#
+# check on prior
+#
+if(missing(Prior)) 
+{pandterm("Requires Prior list argument (at least ncomp)")} 
+if(is.null(Prior$nu.e)) {nu.e=BayesmConstant.nu.e} 
+   else {nu.e=Prior$nu.e}
+if(is.null(Prior$ssq)) {ssq=sapply(regdata,getvar)} 
+   else {ssq=Prior$ssq}
+if(is.null(Prior$ncomp)) {pandterm("Requires Prior element ncomp (num of mixture components)")} else {ncomp=Prior$ncomp}
+if(is.null(Prior$mubar)) {mubar=matrix(rep(0,nvar),nrow=1)} else { mubar=matrix(Prior$mubar,nrow=1)}
+  if(ncol(mubar) != nvar) {pandterm(paste("mubar must have ncomp cols, ncol(mubar)= ",ncol(mubar)))}
+if(is.null(Prior$Amu)) {Amu=matrix(BayesmConstant.A,ncol=1)} else {Amu=matrix(Prior$Amu,ncol=1)}
+  if(ncol(Amu) != 1 | nrow(Amu) != 1) {pandterm("Am must be a 1 x 1 array")}
+if(is.null(Prior$nu)) {nu=nvar+BayesmConstant.nuInc}  else {nu=Prior$nu}
+  if(nu < 1) {pandterm("invalid nu value")}
+if(is.null(Prior$V)) {V=nu*diag(nvar)} else {V=Prior$V}
+  if(sum(dim(V)==c(nvar,nvar)) !=2) pandterm("Invalid V in prior")
+if(is.null(Prior$Ad) & drawdelta) {Ad=BayesmConstant.A*diag(nvar*nz)} else {Ad=Prior$Ad}
+if(drawdelta) {if(ncol(Ad) != nvar*nz | nrow(Ad) != nvar*nz) {pandterm("Ad must be nvar*nz x nvar*nz")}}
+if(is.null(Prior$deltabar)& drawdelta) {deltabar=rep(0,nz*nvar)} else {deltabar=Prior$deltabar}
+  if(drawdelta) {if(length(deltabar) != nz*nvar) {pandterm("deltabar must be of length nvar*nz")}}
+if(is.null(Prior$a)) { a=rep(BayesmConstant.a,ncomp)} else {a=Prior$a}
+if(length(a) != ncomp) {pandterm("Requires dim(a)= ncomp (no of components)")}
+bada=FALSE
+   for(i in 1:ncomp) { if(a[i] < 0) bada=TRUE}
+  if(bada) pandterm("invalid values in a vector")
+#
+# check on Mcmc
+#
+if(missing(Mcmc)) 
+  {pandterm("Requires Mcmc list argument")}
+else 
+   { 
+    if(is.null(Mcmc$keep)) {keep=BayesmConstant.keep} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$R)) {pandterm("Requires R argument in Mcmc list")} else {R=Mcmc$R}
+    if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+      if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+    }
+#
+# print out problem
+#
+cat(" ",fill=TRUE)
+cat("Starting MCMC Inference for Hierarchical Linear Model:",fill=TRUE)
+cat("   Normal Mixture with",ncomp,"components for first stage prior",fill=TRUE)
+cat(paste("   for ",nreg," cross-sectional units"),fill=TRUE)
+cat(" ",fill=TRUE)
+cat("Prior Parms: ",fill=TRUE)
+cat("nu.e =",nu.e,fill=TRUE)
+cat("nu =",nu,fill=TRUE)
+cat("V ",fill=TRUE)
+print(V)
+cat("mubar ",fill=TRUE)
+print(mubar)
+cat("Amu ", fill=TRUE)
+print(Amu)
+cat("a ",fill=TRUE)
+print(a)
+if(drawdelta) 
+{
+   cat("deltabar",fill=TRUE)
+   print(deltabar)
+   cat("Ad",fill=TRUE)
+   print(Ad)
+}
+cat(" ",fill=TRUE)
+cat("MCMC Parms: ",fill=TRUE)
+cat("R= ",R," keep= ",keep," nprint= ",nprint,fill=TRUE)
+cat("",fill=TRUE)
+
+#  initialize values
+#
+#  Create XpX elements of regdata and initialize tau
+#
+regdata=lapply(regdata,append)
+tau=sapply(regdata,getvar)
+#
+# set initial values for the indicators
+#     ind is of length(nreg) and indicates which mixture component this obs
+#     belongs to.
+#
+ind=NULL
+ninc=floor(nreg/ncomp)
+for (i in 1:(ncomp-1)) {ind=c(ind,rep(i,ninc))}
+if(ncomp != 1) {ind = c(ind,rep(ncomp,nreg-length(ind)))} else {ind=rep(1,nreg)}
+#
+#initialize delta
+#
+if (drawdelta){
+  olddelta = rep(0,nz*nvar)
+} else { #send placeholders to the _loop function if there is no Z matrix
+  olddelta = 0
+  Z = matrix(0)
+  deltabar = 0
+  Ad = matrix(0)
+}
+#
+# initialize probs
+#
+oldprob=rep(1/ncomp,ncomp)
+
+###################################################################
+# Wayne Taylor
+# 09/19/2014
+###################################################################
+draws =  rhierLinearMixture_rcpp_loop(regdata, Z,
+                                      deltabar, Ad, mubar, Amu,
+                                      nu, V, nu.e, ssq,
+                                      R, keep, nprint, drawdelta,
+                                      as.matrix(olddelta),  a, oldprob, ind, tau)
+####################################################################
+
+attributes(draws$taudraw)$class=c("bayesm.mat","mcmc")
+attributes(draws$taudraw)$mcpar=c(1,R,keep)
+if(drawdelta){
+  attributes(draws$Deltadraw)$class=c("bayesm.mat","mcmc")
+  attributes(draws$Deltadraw)$mcpar=c(1,R,keep)}
+attributes(draws$betadraw)$class=c("bayesm.hcoef")
+attributes(draws$nmix)$class="bayesm.nmix"
+
+return(draws)
+}
\ No newline at end of file
diff --git a/R/rhierLinearModel.R b/R/rhierLinearModel_rcpp.R
old mode 100755
new mode 100644
similarity index 59%
rename from R/rhierLinearModel.R
rename to R/rhierLinearModel_rcpp.R
index fb75af2..ddccf1d
--- a/R/rhierLinearModel.R
+++ b/R/rhierLinearModel_rcpp.R
@@ -6,6 +6,7 @@ function(Data,Prior,Mcmc)
 #     1/17/05  P. Rossi
 #     10/05  fixed error in setting prior if Prior argument is missing 
 #     3/07 added classes
+#     W. Taylor 4/15 - added nprint option to MCMC argument
 #
 # Purpose:
 #   run hiearchical regression model
@@ -23,6 +24,7 @@ function(Data,Prior,Mcmc)
 #     list of Mcmc parameters
 #     R is number of draws
 #     keep is thining parameter -- keep every keepth draw
+#     nprint - print estimated time remaining on every nprint'th draw
 #
 # Output: 
 #   list of 
@@ -61,48 +63,12 @@ getvar=function(l) {
      if(is.na(v)) return(1)
      if(v>0) return (v) else return (1)}
 #
-runiregG=
-function(y,X,XpX,Xpy,sigmasq,A,betabar,nu,ssq){
-# 
-# Purpose:
-#   perform one Gibbs iteration for Univ Regression Model
-#   only does one iteration so can be used in rhierLinearModel
-#
-# Model:
-#   y = Xbeta + e  e ~N(0,sigmasq)
-#          y is n x 1
-#          X is n x k
-#          beta is k x 1 vector of coefficients
-#
-# Priors:  beta ~ N(betabar,A^-1)
-#          sigmasq ~ (nu*ssq)/chisq_nu
-# 
-n=length(y)
-k=ncol(XpX)
-sigmasq=as.vector(sigmasq)
-#
-#     first draw beta | sigmasq
-#
-  IR=backsolve(chol(XpX/sigmasq+A),diag(k))
-  btilde=crossprod(t(IR))%*%(Xpy/sigmasq+A%*%betabar)
-  beta = btilde + IR%*%rnorm(k)
-#
-#    now draw sigmasq | beta
-#
-  res=y-X%*%beta
-  s=t(res)%*%res
-  sigmasq=(nu*ssq + s)/rchisq(1,nu+n)
-
-list(betadraw=beta,sigmasqdraw=sigmasq)
-}
-
 #------------------------------------------------------------------------------
 #
 
 #
 # check arguments
 #
-pandterm=function(message) {stop(message,call.=FALSE)}
 if(missing(Data)) {pandterm("Requires Data argument -- list of regdata and Z")}
     if(is.null(Data$regdata)) {pandterm("Requires Data element regdata")}
     regdata=Data$regdata
@@ -128,19 +94,19 @@ for (i in 1:nreg)
 # check for Prior
 #
 if(missing(Prior))
-   { Deltabar=matrix(rep(0,nz*nvar),ncol=nvar); A=0.01*diag(nz);
-     nu.e=3; ssq=sapply(regdata,getvar) ; nu=nvar+3 ; V= nu*diag(nvar)}
+   { Deltabar=matrix(rep(0,nz*nvar),ncol=nvar); A=BayesmConstant.A*diag(nz);
+     nu.e=BayesmConstant.nu.e; ssq=sapply(regdata,getvar) ; nu=nvar+BayesmConstant.nuInc ; V= nu*diag(nvar)}
 else
    {
     if(is.null(Prior$Deltabar)) {Deltabar=matrix(rep(0,nz*nvar),ncol=nvar)} 
        else {Deltabar=Prior$Deltabar}
-    if(is.null(Prior$A)) {A=.01*diag(nz)} 
+    if(is.null(Prior$A)) {A=BayesmConstant.A*diag(nz)} 
        else {A=Prior$A}
-    if(is.null(Prior$nu.e)) {nu.e=3} 
+    if(is.null(Prior$nu.e)) {nu.e=BayesmConstant.nu.e} 
        else {nu.e=Prior$nu.e}
     if(is.null(Prior$ssq)) {ssq=sapply(regdata,getvar)} 
        else {ssq=Prior$ssq}
-    if(is.null(Prior$nu)) {nu=nvar+3} 
+    if(is.null(Prior$nu)) {nu=nvar+BayesmConstant.nuInc} 
        else {nu=Prior$nu}
     if(is.null(Prior$V)) {V=nu*diag(nvar)} 
        else {V=Prior$V}
@@ -162,7 +128,9 @@ else
    {
     if(is.null(Mcmc$R)) 
        {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
-    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$keep)) {keep=BayesmConstant.keep} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+      if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
     }
 #
 # print out problem
@@ -184,20 +152,14 @@ cat("V ",fill=TRUE)
 print(V)
 cat(" ", fill=TRUE)
 cat("MCMC parms: ",fill=TRUE)
-cat("R= ",R," keep= ",keep,fill=TRUE)
+cat("R= ",R," keep= ",keep," nprint= ",nprint,fill=TRUE)
 cat(" ",fill=TRUE)
 #
 #  allocate space for the draws and set initial values of Vbeta and Delta
 #
-Vbetadraw=matrix(double(floor(R/keep)*nvar*nvar),ncol=nvar*nvar)
-Deltadraw=matrix(double(floor(R/keep)*nz*nvar),ncol=nz*nvar)
-taudraw=matrix(double(floor(R/keep)*nreg),ncol=nreg)
-betadraw=array(double(floor(R/keep)*nreg*nvar),dim=c(nreg,nvar,floor(R/keep)))
-
 tau=double(nreg)
-Delta=c(rep(0,nz*nvar))
-Vbeta=as.vector(diag(nvar))
-betas=matrix(double(nreg*nvar),ncol=nvar)
+Delta=matrix(0,nz,nvar)
+Vbeta=diag(nvar)
 #
 #  set up fixed parms for the draw of Vbeta,Delta
 #
@@ -211,63 +173,22 @@ betas=matrix(double(nreg*nvar),ncol=nvar)
 #       Create XpX elements of regdata and initialize tau
 #
 regdata=lapply(regdata,append)
-
 tau=sapply(regdata,getvar)
-#
-#	start main iteration loop
-#
-itime=proc.time()[3]
-cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
-fsh()
 
-for(rep in 1:R)
-{
-   Abeta=chol2inv(chol(matrix(Vbeta,ncol=nvar)))
-   betabar=Z%*%matrix(Delta,ncol=nvar)
-#
-#       loop over all regressions
-#
-   for (reg in 1:nreg) 
-   {
-      regout=runiregG(regdata[[reg]]$y,regdata[[reg]]$X,regdata[[reg]]$XpX,
-                regdata[[reg]]$Xpy,tau[reg],Abeta,betabar[reg,],nu.e,ssq[reg])
-      betas[reg,]=regout$betadraw
-      tau[reg]=regout$sigmasqdraw
-   }
-#
-#          draw Vbeta, Delta | {beta_i}
-#
-   rmregout=rmultireg(betas,Z,Deltabar,A,nu,V)
-   Vbeta=as.vector(rmregout$Sigma)
-   Delta=as.vector(rmregout$B)
-#
-#       print time to completion and draw # every 100th draw
-#
-  if(rep%%100 == 0)
-    {ctime=proc.time()[3]
-    timetoend=((ctime-itime)/rep)*(R-rep)
-    cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
-    fsh()}
+###################################################################
+# Keunwoo Kim
+# 08/20/2014
+###################################################################
+draws=rhierLinearModel_rcpp_loop(regdata,Z,Deltabar,A,nu,V,nu.e,ssq,tau,Delta,Vbeta,R,keep,nprint)
+###################################################################
 
+attributes(draws$taudraw)$class=c("bayesm.mat","mcmc")
+attributes(draws$taudraw)$mcpar=c(1,R,keep)
+attributes(draws$Deltadraw)$class=c("bayesm.mat","mcmc")
+attributes(draws$Deltadraw)$mcpar=c(1,R,keep)
+attributes(draws$Vbetadraw)$class=c("bayesm.var","bayesm.mat","mcmc")
+attributes(draws$Vbetadraw)$mcpar=c(1,R,keep)
+attributes(draws$betadraw)$class=c("bayesm.hcoef")
 
-  if(rep%%keep == 0) 
-    {mkeep=rep/keep
-     Vbetadraw[mkeep,]=Vbeta
-     Deltadraw[mkeep,]=Delta
-     taudraw[mkeep,]=tau
-     betadraw[,,mkeep]=betas}
-
-}
-ctime = proc.time()[3]
-cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
-
-attributes(taudraw)$class=c("bayesm.mat","mcmc")
-attributes(taudraw)$mcpar=c(1,R,keep)
-attributes(Deltadraw)$class=c("bayesm.mat","mcmc")
-attributes(Deltadraw)$mcpar=c(1,R,keep)
-attributes(Vbetadraw)$class=c("bayesm.var","bayesm.mat","mcmc")
-attributes(Vbetadraw)$mcpar=c(1,R,keep)
-attributes(betadraw)$class=c("bayesm.hcoef")
-
-return(list(Vbetadraw=Vbetadraw,Deltadraw=Deltadraw,betadraw=betadraw,taudraw=taudraw))
-}
+return(draws)
+}
\ No newline at end of file
diff --git a/R/rhierMnlDP.R b/R/rhierMnlDP.R
deleted file mode 100644
index 306926d..0000000
--- a/R/rhierMnlDP.R
+++ /dev/null
@@ -1,807 +0,0 @@
-rhierMnlDP=
-function(Data,Prior,Mcmc)
-{
-#
-#  created 3/08 by Rossi from rhierMnlRwMixture adding DP draw for to replace finite mixture of normals
-#
-# revision history:
-#   changed 12/17/04 by rossi to fix bug in drawdelta when there is zero/one unit
-#   in a mixture component
-#   added loglike output, changed to reflect new argument order in llmnl, mnlHess 9/05
-#   changed weighting scheme to (1-w)logl_i + w*Lbar (normalized) 12/05
-#   3/07 added classes
-#
-# purpose: run hierarchical mnl logit model with mixture of normals 
-#   using RW and cov(RW inc) = (hess_i + Vbeta^-1)^-1
-#   uses normal approximation to pooled likelihood
-#
-# Arguments:
-#   Data contains a list of (p,lgtdata, and possibly Z)
-#      p is number of choice alternatives
-#      lgtdata is a list of lists (one list per unit)
-#          lgtdata[[i]]=list(y,X)
-#             y is a vector indicating alternative chosen
-#               integers 1:p indicate alternative
-#             X is a length(y)*p x nvar matrix of values of
-#               X vars including intercepts
-#             Z is an length(lgtdata) x nz matrix of values of variables
-#               note: Z should NOT contain an intercept
-#   Prior contains a list of (deltabar,Ad,lambda_hyper,Prioralpha)
-#       alpha: starting value
-#       lambda_hyper: hyperparms of prior on lambda
-#       Prioralpha: hyperparms of alpha prior; a list of (Istarmin,Istarmax,power)
-#       if elements of the prior don't exist, defaults are assumed
-#   Mcmc contains a list of (s,c,R,keep)
-#
-# Output:  as list containing
-#   Deltadraw R/keep  x nz*nvar matrix of draws of Delta, first row is initial value
-#   betadraw is nlgt x nvar x R/keep array of draws of betas
-#   probdraw is R/keep x 1 matrix of draws of probs of mixture components
-#   compdraw is a list of list of lists (length R/keep)
-#      compdraw[[rep]] is the repth draw of components for mixtures
-#   loglike  log-likelikelhood at each kept draw
-#
-# Priors:
-#    beta_i = D %*% z[i,] + u_i
-#       vec(D)~N(deltabar)
-#       u_i ~ N(theta_i)
-#       theta_i~G
-#       G|lambda,alpha ~ DP(G|G0(lambda),alpha)
-#
-#        lambda:
-#           G0 ~ N(mubar,Sigma (x) Amu^-1)
-#           mubar=vec(mubar)
-#           Sigma ~ IW(nu,nu*v*I)  note: mode(Sigma)=nu/(nu+2)*v*I
-#           mubar=0
-#           amu is uniform on grid specified by alim
-#           nu is log uniform, nu=d-1+exp(Z) z is uniform on seq defined bvy nulim
-#           v is uniform on sequence specificd by vlim
-#
-#        Prioralpha:
-#           alpha ~ (1-(alpha-alphamin)/(alphamax-alphamin))^power
-#           alphamin=exp(digamma(Istarmin)-log(gamma+log(N)))
-#           alphamax=exp(digamma(Istarmax)-log(gamma+log(N)))
-#           gamma= .5772156649015328606
-#
-# MCMC parameters
-#   s is the scaling parameter for the RW inc covariance matrix; s^2 Var is inc cov
-#      matrix
-#   w is parameter for weighting function in fractional likelihood
-#      w is the weight on the normalized pooled likelihood 
-#   R is number of draws
-#   keep is thinning parameter, keep every keepth draw
-#--------------------------------------------------------------------------------------------------
-#
-#  create functions needed
-#
-rDPGibbs1= 
-function(y,theta,thetaStar,indic,lambda,alpha,Prioralpha,lambda_hyper,maxuniq,gridsize){
-#
-#  revision history:
-#   created from rDPGibbs by Rossi 3/08
-#
-#  do one draw of DP Gibbs sampler with norma base
-#
-# Model:
-#        y_i ~ N(y|thetai)
-#        thetai|G ~ G
-#        G|lambda,alpha ~ DP(G|G0(lambda),alpha)
-#
-# Priors:
-#        alpha: starting value
-#
-#        lambda:
-#           G0 ~ N(mubar,Sigma (x) Amu^-1)
-#           mubar=vec(mubar)
-#           Sigma ~ IW(nu,nu*V) V=v*I  note: mode(Sigma)=nu/(nu+2)*v*I
-#           mubar=0
-#           amu is uniform on grid specified by alim
-#           nu is log uniform, nu=d-1+exp(Z) z is uniform on seq defined bvy nulim
-#           v is uniform on sequence specificd by vlim
-#
-#        Prioralpha:
-#           alpha ~ (1-(alpha-alphamin)/(alphamax-alphamin))^power
-#           alphamin=exp(digamma(Istarmin)-log(gamma+log(N)))
-#           alphamax=exp(digamma(Istarmax)-log(gamma+log(N)))
-#           gamma= .5772156649015328606
-#
-#
-# output:
-#   theta - list of thetas for each "obs"
-#   ind - vector of indicators for which observations are associated with which comp in thetaStar
-#   thetaStar - list of unique normal component parms
-#   lambda  - list of of (a,nu,V)
-#   alpha 
-#   thetaNp1 - one draw from predictive given thetaStar, lambda,alphama
-#
-# define needed functions
-#
-# -----------------------------------------------------------------------------------------
-#
-q0=
-   function(y,lambda,eta){
-#
-# function to compute a vector of int f(y[i]|theta) p(theta|lambda)dlambda
-#     here p(theta|lambda) is G0 the base prior
-#
-# implemented for a multivariate normal data density and standard conjugate
-# prior:
-#    theta=list(mu,Sigma)
-#    f(y|theta,eta) is N(mu,Sigma)
-#    lambda=list(mubar,Amu,nu,V)
-#       mu|Sigma ~ N(mubar,Sigma (x) Amu^-1)
-#       Sigma ~ IW(nu,V)
-#
-# arguments:
-#    Y is n x k matrix of observations
-#    lambda=list(mubar,Amu,nu,V)
-#    eta is not used
-# 
-# output:
-#    vector of q0 values for each obs (row of Y)
-#
-# p. rossi 12/05
-#
-# here y is matrix of observations (each row is an obs)
-
-mubar=lambda$mubar; nu=lambda$nu ; Amu=lambda$Amu; V=lambda$V
-k=ncol(y)
-R=chol(V)
-logdetR=sum(log(diag(R)))
-if (k > 1) 
-  {lnk1k2=(k/2)*log(2)+log((nu-k)/2)+lgamma((nu-k)/2)-lgamma(nu/2)+sum(log(nu/2-(1:(k-1))/2))}
-else
-  {lnk1k2=(k/2)*log(2)+log((nu-k)/2)+lgamma((nu-k)/2)-lgamma(nu/2)}
-constant=-(k/2)*log(2*pi)+(k/2)*log(Amu/(1+Amu)) + lnk1k2 + nu*logdetR
-#
-# note: here we are using the fact that |V + S_i | = |R|^2 (1 + v_i'v_i)
-#       where v_i = sqrt(Amu/(1+Amu))*t(R^-1)*(y_i-mubar), R is chol(V)
-#
-#       and S_i = Amu/(1+Amu) * (y_i-mubar)(y_i-mubar)'
-#
-mat=sqrt(Amu/(1+Amu))*t(backsolve(R,diag(ncol(y))))%*%(t(y)-mubar)
-vivi=colSums(mat^2)
-
-lnq0v=constant-((nu+1)/2)*(2*logdetR+log(1+vivi))
-
-return(exp(lnq0v))
-}
-# ----------------------------------------------------------------------------------------------
-   rmultinomF=function(p) {
-       return(sum(runif(1) > cumsum(p))+1)
-   }
-# -----------------------------------------------------------------------------------------------
-alphaD=
-   function(Prioralpha,Istar,gridsize){
-#
-#  function to draw alpha using prior, p(alpha)= (1-(alpha-alphamin)/(alphamax-alphamin))**power
-#
-   power=Prioralpha$power
-   alphamin=Prioralpha$alphamin
-   alphamax=Prioralpha$alphamax
-   n=Prioralpha$n
-   alpha=seq(from=alphamin,to=(alphamax-0.000001),len=gridsize)
-   lnprob=Istar*log(alpha) + lgamma(alpha) - lgamma(n+alpha) + 
-          power*log(1-(alpha-alphamin)/(alphamax-alphamin))
-   lnprob=lnprob-median(lnprob)
-   probs=exp(lnprob)
-   probs=probs/sum(probs)
-   return(alpha[rmultinomF(probs)])
-}  
-
-
-#
-# ------------------------------------------------------------------------------------------
-#
-yden=
-   function(thetaStar,y,eta){
-#
-# function to compute f(y | theta) 
-# computes f for all values of theta in theta list of lists
-#
-# arguments:
-#   thetaStar is a list of lists.  thetaStar[[i]] is a list with components, mu, rooti
-#   y |theta[[i]] ~ N(mu,(rooti %*% t(rooti))^-1)  rooti is inverse of Chol root of Sigma
-#   eta is not used
-#
-# output:
-#   length(thetaStar) x n array of values of f(y[j,]|thetaStar[[i]]
-# 
-
-nunique=length(thetaStar)
-n=nrow(y)
-ydenmat=matrix(double(n*nunique),ncol=n)
-k=ncol(y)
-for(i in 1:nunique){
-
-   # now compute vectorized version of lndMvn 
-   # compute y_i'RIRI'y_i for all i
-   #
-   mu=thetaStar[[i]]$mu; rooti=thetaStar[[i]]$rooti
-   quads=colSums((crossprod(rooti,(t(y)-mu)))^2)
-   ydenmat[i,]=exp(-(k/2)*log(2*pi) + sum(log(diag(rooti))) - .5*quads)
-   
-}
-return(ydenmat)
-}
-
-#
-# -----------------------------------------------------------------------------------------
-#
-GD=
-   function(lambda){
-#
-# function to draw from prior for Multivariate Normal Model
-#
-# mu|Sigma ~ N(mubar,Sigma x Amu^-1)
-# Sigma ~ IW(nu,V)
-#
-# note: we must insure that mu is a vector to use most efficient
-#       lndMvn routine
-#
-nu=lambda$nu
-V=lambda$V
-mubar=lambda$mubar
-Amu=lambda$Amu
-k=length(mubar)
-Sigma=rwishart(nu,chol2inv(chol(lambda$V)))$IW
-root=chol(Sigma)
-mu=mubar+(1/sqrt(Amu))*t(root)%*%matrix(rnorm(k),ncol=1)
-return(list(mu=as.vector(mu),rooti=backsolve(root,diag(k))))
-}
-
-#
-# -------------------------------------------------------------------------------------------
-#
-thetaD=
-   function(y,lambda,eta){
-#
-# function to draw from posterior of theta given data y and base prior G0(lambda)
-#
-# here y ~ N(mu,Sigma)
-# theta = list(mu=mu,rooti=chol(Sigma)^-1)
-# mu|Sigma ~ N(mubar,Sigma (x) Amu-1)
-# Sigma ~ IW(nu,V)
-#
-# arguments: 
-#   y is n x k matrix of obs
-#   lambda is list(mubar,Amu,nu,V)
-#   eta is not used
-# output:
-#   one draw of theta, list(mu,rooti)
-#        Sigma=inv(rooti)%*%t(inv(rooti))
-#
-# note: we assume that y is a matrix. if there is only one obs, y is a 1 x k matrix
-#
-rout=rmultireg(y,matrix(c(rep(1,nrow(y))),ncol=1),matrix(lambda$mubar,nrow=1),matrix(lambda$Amu,ncol=1),
-       lambda$nu,lambda$V)
-return(list(mu=as.vector(rout$B),rooti=backsolve(chol(rout$Sigma),diag(ncol(y)))))
-}
-
-#
-# --------------------------------------------------------------------------------------------
-# load a faster version of lndMvn
-# note: version of lndMvn below assumes x,mu is a vector!
-lndMvn=function (x, mu, rooti){
-    return(-(length(x)/2) * log(2 * pi) - 0.5 * sum(((x-mu)%*%rooti)**2) + sum(log(diag(rooti))))
-}
-# -----------------------------------------------------------------------------------------
-   lambdaD=function(lambda,thetaStar,alim=c(.01,2),nulim=c(.01,2),vlim=c(.1,5),gridsize=20){
-#
-# revision history
-#  p. rossi 7/06
-#  vectorized 1/07
-#  changed 2/08 to paramaterize V matrix of IW prior to nu*v*I; then mode of Sigma=nu/(nu+2)vI
-#      this means that we have a reparameterization to v* = nu*v
-#
-#  function to draw (nu, v, a) using uniform priors
-#
-#  theta_j=(mu_j,Sigma_j)  mu_j~N(0,Sigma_j/a)  Sigma_j~IW(nu,vI)
-#           recall E[Sigma]= vI/(nu-dim-1)
-#
-# define functions needed
-# ----------------------------------------------------------------------------------------------
-   rmultinomF=function(p) {
-       return(sum(runif(1) > cumsum(p))+1)
-   }
-echo=function(lst){return(t(lst[[2]]))}
-rootiz=function(lst){crossprod(lst[[2]],lst[[1]])}
-#
-# ------------------------------------------------------------------------------------------
-
-   d=length(thetaStar[[1]]$mu)
-   Istar=length(thetaStar)
-   aseq=seq(from=alim[1],to=alim[2],len=gridsize)
-   nuseq=d-1+exp(seq(from=nulim[1],to=nulim[2],len=gridsize)) # log uniform grid
-   vseq=seq(from=vlim[1],to=vlim[2],len=gridsize)
-#
-# extract needed info from thetaStar list
-#
-   out=double(Istar*d*d)
-   out=sapply(thetaStar,echo)
-   dim(out)=c(d,Istar*d) # out has the rootis in form: [t(rooti_1), t(rooti_2), ...,t(rooti_Istar)]
-   sumdiagriri=sum(colSums(out^2)) #  sum_j tr(rooti_j%*%t(rooti_j))
-#   now get diagonals of rooti
-   ind=cbind(c(1:(d*Istar)),rep((1:d),Istar))
-   out=t(out)
-   sumlogdiag=sum(log(out[ind]))
-   rimu=sapply(thetaStar,rootiz) # columns of rimu contain t(rooti_j)%*%mu_j
-   dim(rimu)=c(d,Istar)
-   sumquads=sum(colSums(rimu^2)) 
-#  
-#  draw a  (conditionally indep of nu,v given theta_j)
-    lnprob=double(length(aseq))
-    lnprob=Istar*(-(d/2)*log(2*pi))-.5*aseq*sumquads+Istar*d*log(sqrt(aseq))+sumlogdiag
-    lnprob=lnprob-max(lnprob)+200
-    probs=exp(lnprob)
-    probs=probs/sum(probs)
-    adraw=aseq[rmultinomF(probs)]
-#
-#   draw nu given v
-#
-    V=lambda$V
-    lnprob=double(length(nuseq))
-    arg=rep(c(1:d),gridsize)
-    dim(arg)=c(d,gridsize)
-    arg=t(arg)
-    arg=(nuseq+1-arg)/2
-    lnprob=-Istar*log(2)*d/2*nuseq - Istar*rowSums(lgamma(arg)) + 
-            Istar*d*log(sqrt(V[1,1]))*nuseq + sumlogdiag*nuseq
-    lnprob=lnprob-max(lnprob)+200
-    probs=exp(lnprob)
-    probs=probs/sum(probs)
-    nudraw=nuseq[rmultinomF(probs)]
-#
-#   draw v given nu 
-#
-    lnprob=double(length(vseq))
-    lnprob=Istar*nudraw*d*log(sqrt(vseq*nudraw))-.5*sumdiagriri*vseq*nudraw
-    lnprob=lnprob-max(lnprob)+200
-    probs=exp(lnprob)
-    probs=probs/sum(probs)
-    vdraw=vseq[rmultinomF(probs)]
-#
-#   put back into lambda
-#
-    return(list(mubar=c(rep(0,d)),Amu=adraw,nu=nudraw,V=nudraw*vdraw*diag(d)))
-}
-pandterm=function(message) { stop(message,call.=FALSE) }
-# -----------------------------------------------------------------------------------------
-
-for(rep in 1:1)		#note: we only do one loop!
-{
-   n = length(theta)
-
-   eta=NULL    # note eta is not used
-   thetaNp1=NULL
-   q0v = q0(y,lambda,eta)   # now that we draw lambda we need to recompute q0v each time
-
-   p=c(rep(1/(alpha+(n-1)),n-1),alpha/(alpha+(n-1)))
-
-   nunique=length(thetaStar)
-  
-   if(nunique > maxuniq ) { pandterm("maximum number of unique thetas exceeded")} 
-   ydenmat=matrix(double(maxuniq*n),ncol=n) 
-   ydenmat[1:nunique,]=yden(thetaStar,y,eta)
-   #  ydenmat is a length(thetaStar) x n array of density values given f(y[j,] | thetaStar[[i]]
-   #  note: due to remix step (below) we must recompute ydenmat each time!
-
-   # use .Call to draw theta list
-   out= .Call("thetadraw",y,ydenmat,indic,q0v,p,theta,lambda,eta=eta,
-                  thetaD=thetaD,yden=yden,maxuniq,nunique,new.env()) 
-
-   # theta has been modified by thetadraw so we need to recreate thetaStar
-   thetaStar=unique(theta)
-   nunique=length(thetaStar)
-
-   #thetaNp1 and remix
-   probs=double(nunique+1)
-   for(j in 1:nunique) {
-       ind = which(sapply(theta,identical,thetaStar[[j]]))
-       probs[j]=length(ind)/(alpha+n) 
-       new_utheta=thetaD(y[ind,,drop=FALSE],lambda,eta) 
-       for(i in seq(along=ind)) {theta[[ind[i]]]=new_utheta}
-       indic[ind]=j
-       thetaStar[[j]]=new_utheta
-   }
-   probs[nunique+1]=alpha/(alpha+n)
-   ind=rmultinomF(probs)
-   if(ind==length(probs)) {
-      thetaNp1=GD(lambda)
-   } else {
-      thetaNp1=thetaStar[[ind]]
-   }
-
-   # draw alpha
-   alpha=alphaD(Prioralpha,nunique,gridsize=gridsize)
-   
-   # draw lambda
-   lambda=lambdaD(lambda,thetaStar,alim=lambda_hyper$alim,nulim=lambda_hyper$nulim,
-             vlim=lambda_hyper$vlim,gridsize=gridsize)
-
-}
-#   note indic is the vector of indicators for each obs correspond to which thetaStar
-return(list(theta=theta,thetaStar=thetaStar,thetaNp1=thetaNp1,alpha=alpha,lambda=lambda,ind=indic))
-}
-#--------------------------------------------------------------------------------------------------
-
-llmnlFract=
-function(beta,y,X,betapooled,rootH,w,wgt){
-z=as.vector(rootH%*%(beta-betapooled))
-return((1-w)*llmnl(beta,y,X)+w*wgt*(-.5*(z%*%z)))
-}
-
-mnlRwMetropOnce=
-function(y,X,oldbeta,oldll,s,inc.root,betabar,rootpi){ 
-#
-# function to execute rw metropolis for the MNL
-# y is n vector with element = 1,...,j indicating which alt chosen
-# X is nj x k matrix of xvalues for each of j alt on each of n occasions
-# RW increments are N(0,s^2*t(inc.root)%*%inc.root)
-# prior on beta is N(betabar,Sigma)  Sigma^-1=rootpi*t(rootpi)
-#	inc.root, rootpi are upper triangular
-#	this means that we are using the UL decomp of Sigma^-1 for prior 
-# oldbeta is the current
-     stay=0
-     betac=oldbeta + s*t(inc.root)%*%(matrix(rnorm(ncol(X)),ncol=1))
-     cll=llmnl(betac,y,X)
-     clpost=cll+lndMvn(betac,betabar,rootpi)
-     ldiff=clpost-oldll-lndMvn(oldbeta,betabar,rootpi)
-     alpha=min(1,exp(ldiff))
-     if(alpha < 1) {unif=runif(1)} else {unif=0}
-     if (unif <= alpha)
-             {betadraw=betac; oldll=cll}
-           else
-             {betadraw=oldbeta; stay=1}
-return(list(betadraw=betadraw,stay=stay,oldll=oldll))
-}
-drawDelta=
-function(x,y,z,comps,deltabar,Ad){
-# delta = vec(D)
-#  given z and comps (z[i] gives component indicator for the ith observation, 
-#   comps is a list of mu and rooti)
-#y is n x p
-#x is n x k
-#y = xD' + U , rows of U are indep with covs Sigma_i given by z and comps
-p=ncol(y)
-k=ncol(x)
-xtx = matrix(0.0,k*p,k*p)
-xty = matrix(0.0,p,k) #this is the unvecced version, have to vec after sum
-for(i in 1:length(comps)) {
-   nobs=sum(z==i)
-   if(nobs > 0) {
-      if(nobs == 1) 
-        { yi = matrix(y[z==i,],ncol=p); xi = matrix(x[z==i,],ncol=k)}
-      else
-        { yi = y[z==i,]; xi = x[z==i,]}
-          
-      yi = t(t(yi)-comps[[i]][[1]])
-      sigi = crossprod(t(comps[[i]][[2]]))
-      xtx = xtx + crossprod(xi) %x% sigi
-      xty = xty + (sigi %*% crossprod(yi,xi))
-      }
-}
-xty = matrix(xty,ncol=1)
-
-# then vec(t(D)) ~ N(V^{-1}(xty + Ad*deltabar),V^{-1}) V = (xtx+Ad)
-cov=chol2inv(chol(xtx+Ad))
-return(cov%*%(xty+Ad%*%deltabar) + t(chol(cov))%*%rnorm(length(deltabar)))
-}
-#-------------------------------------------------------------------------------------------------------
-#
-#  check arguments
-#
-pandterm=function(message) { stop(message,call.=FALSE) }
-if(missing(Data)) {pandterm("Requires Data argument -- list of p,lgtdata, and (possibly) Z")}
-  if(is.null(Data$p)) {pandterm("Requires Data element p (# chce alternatives)") }
-  p=Data$p
-  if(is.null(Data$lgtdata)) {pandterm("Requires Data element lgtdata (list of data for each unit)")}
-  lgtdata=Data$lgtdata
-  nlgt=length(lgtdata)
-  drawdelta=TRUE
-if(is.null(Data$Z)) { cat("Z not specified",fill=TRUE); fsh() ; drawdelta=FALSE}
-  else {if (nrow(Data$Z) != nlgt) {pandterm(paste("Nrow(Z) ",nrow(Z),"ne number logits ",nlgt))}
-      else {Z=Data$Z}}
-  if(drawdelta) {
-     nz=ncol(Z)
-     colmeans=apply(Z,2,mean)
-     if(sum(colmeans) > .00001) 
-       {pandterm(paste("Z does not appear to be de-meaned: colmeans= ",colmeans))}
-  }
-  
-#
-# check lgtdata for validity
-#
-ypooled=NULL
-Xpooled=NULL
-if(!is.null(lgtdata[[1]]$X)) {oldncol=ncol(lgtdata[[1]]$X)}
-for (i in 1:nlgt) 
-{
-    if(is.null(lgtdata[[i]]$y)) {pandterm(paste("Requires element y of lgtdata[[",i,"]]"))}
-    if(is.null(lgtdata[[i]]$X)) {pandterm(paste("Requires element X of lgtdata[[",i,"]]"))}
-    ypooled=c(ypooled,lgtdata[[i]]$y)
-    nrowX=nrow(lgtdata[[i]]$X)
-    if((nrowX/p) !=length(lgtdata[[i]]$y)) {pandterm(paste("nrow(X) ne p*length(yi); exception at unit",i))}
-    newncol=ncol(lgtdata[[i]]$X)
-    if(newncol != oldncol) {pandterm(paste("All X elements must have same # of cols; exception at unit",i))}
-    Xpooled=rbind(Xpooled,lgtdata[[i]]$X)
-    oldncol=newncol
-}
-nvar=ncol(Xpooled)
-levely=as.numeric(levels(as.factor(ypooled)))
-if(length(levely) != p) {pandterm(paste("y takes on ",length(levely)," values -- must be = p"))}
-bady=FALSE
-for (i in 1:p )
-{
-    if(levely[i] != i) bady=TRUE
-}
-cat("Table of Y values pooled over all units",fill=TRUE)
-print(table(ypooled))
-if (bady) 
-  {pandterm("Invalid Y")}
-#
-# check on prior
-#
-alimdef=c(.01,2)
-nulimdef=c(.01,3)
-vlimdef=c(.1,4)
-if(missing(Prior)) {Prior=NULL}
-
-if(is.null(Prior$lambda_hyper)) {lambda_hyper=list(alim=alimdef,nulim=nulimdef,vlim=vlimdef)}
-   else {lambda_hyper=Prior$lambda_hyper;
-       if(is.null(lambda_hyper$alim)) {lambda_hyper$alim=alimdef}
-       if(is.null(lambda_hyper$nulim)) {lambda_hyper$nulim=nulimdef} 
-       if(is.null(lambda_hyper$vlim)) {lambda_hyper$vlim=vlimdef}
-       }
-if(is.null(Prior$Prioralpha)) {Prioralpha=list(Istarmin=1,Istarmax=min(50,0.1*nlgt),power=0.8)}
-   else {Prioralpha=Prior$Prioralpha;
-       if(is.null(Prioralpha$Istarmin)) {Prioralpha$Istarmin=1} else {Prioralpha$Istarmin=Prioralpha$Istarmin}
-       if(is.null(Prioralpha$Istarmax)) 
-       {Prioralpha$Istarmax=min(50,0.1*nlgt)} else {Prioralpha$Istarmax=Prioralpha$Istarmax}
-      if(is.null(Prioralpha$power)) {Prioralpha$power=0.8}
-   }	
-gamma= .5772156649015328606
-Prioralpha$alphamin=exp(digamma(Prioralpha$Istarmin)-log(gamma+log(nlgt)))
-Prioralpha$alphamax=exp(digamma(Prioralpha$Istarmax)-log(gamma+log(nlgt)))
-Prioralpha$n=nlgt
-#
-# check Prior arguments for valdity
-#
-if(lambda_hyper$alim[1]<0) {pandterm("alim[1] must be >0")}
-if(lambda_hyper$nulim[1]<0) {pandterm("nulim[1] must be >0")}
-if(lambda_hyper$vlim[1]<0) {pandterm("vlim[1] must be >0")}
-if(Prioralpha$Istarmin <1){pandterm("Prioralpha$Istarmin must be >= 1")}
-if(Prioralpha$Istarmax <= Prioralpha$Istarmin){pandterm("Prioralpha$Istarmin must be < Prioralpha$Istarmax")}	
-
-if(is.null(Prior$Ad) & drawdelta) {Ad=.01*diag(nvar*nz)} else {Ad=Prior$Ad}
-if(drawdelta) {if(ncol(Ad) != nvar*nz | nrow(Ad) != nvar*nz) {pandterm("Ad must be nvar*nz x nvar*nz")}}
-if(is.null(Prior$deltabar)& drawdelta) {deltabar=rep(0,nz*nvar)} else {deltabar=Prior$deltabar}
-  if(drawdelta) {if(length(deltabar) != nz*nvar) {pandterm("deltabar must be of length nvar*nz")}}
-#
-# check on Mcmc
-#
-if(missing(Mcmc)) 
-  {pandterm("Requires Mcmc list argument")}
-else 
-   { 
-    if(is.null(Mcmc$s)) {s=2.93/sqrt(nvar)} else {s=Mcmc$s}
-    if(is.null(Mcmc$w)) {w=.1}  else {w=Mcmc$w}
-    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
-    if(is.null(Mcmc$maxuniq)) {maxuniq=200} else {keep=Mcmc$maxuniq}
-    if(is.null(Mcmc$gridsize)) {gridsize=20} else {gridsize=Mcmc$gridsize}
-    if(is.null(Mcmc$R)) {pandterm("Requires R argument in Mcmc list")} else {R=Mcmc$R}
-    }
-#
-# print out problem
-#
-cat(" ",fill=TRUE)
-cat("Starting MCMC Inference for Hierarchical Logit:",fill=TRUE)
-cat("   Dirichlet Process Prior",fill=TRUE)
-cat(paste("  ",p," alternatives; ",nvar," variables in X"),fill=TRUE)
-cat(paste("   for ",nlgt," cross-sectional units"),fill=TRUE)
-cat(" ",fill=TRUE)
-cat(" Prior Parms: ",fill=TRUE)
-cat("  G0 ~ N(mubar,Sigma (x) Amu^-1)",fill=TRUE)
-cat("   mubar = ",0,fill=TRUE)
-cat("   Sigma ~ IW(nu,nu*v*I)",fill=TRUE)
-cat("   Amu ~ uniform[",lambda_hyper$alim[1],",",lambda_hyper$alim[2],"]",fill=TRUE)
-cat("   nu ~ uniform on log grid  [",nvar-1+exp(lambda_hyper$nulim[1]),
-             ",",nvar-1+exp(lambda_hyper$nulim[2]),"]",fill=TRUE)
-cat("   v ~ uniform[",lambda_hyper$vlim[1],",",lambda_hyper$vlim[2],"]",fill=TRUE)
-cat(" ",fill=TRUE)
-cat("  alpha ~ (1-(alpha-alphamin)/(alphamax-alphamin))^power",fill=TRUE)
-cat("   Istarmin = ",Prioralpha$Istarmin,fill=TRUE)
-cat("   Istarmax = ",Prioralpha$Istarmax,fill=TRUE)
-cat("   alphamin = ",Prioralpha$alphamin,fill=TRUE)
-cat("   alphamax = ",Prioralpha$alphamax,fill=TRUE)
-cat("   power = ",Prioralpha$power,fill=TRUE)
-cat(" ",fill=TRUE)
-if(drawdelta) 
-{
-   cat("deltabar",fill=TRUE)
-   print(deltabar)
-   cat("Ad",fill=TRUE)
-   print(Ad)
-}
-cat(" ",fill=TRUE)
-cat("MCMC Parms: ",fill=TRUE)
-cat(paste("s=",round(s,3)," w= ",w," R= ",R," keep= ",keep," maxuniq= ",maxuniq,
-          " gridsize for lambda hyperparms= ",gridsize),fill=TRUE)
-cat("",fill=TRUE)
-#
-# allocate space for draws
-#
-if(drawdelta) Deltadraw=matrix(double((floor(R/keep))*nz*nvar),ncol=nz*nvar)
-betadraw=array(double((floor(R/keep))*nlgt*nvar),dim=c(nlgt,nvar,floor(R/keep)))
-probdraw=matrix(double(floor(R/keep)),ncol=1)
-oldbetas=matrix(double(nlgt*nvar),ncol=nvar)
-oldll=double(nlgt)
-loglike=double(floor(R/keep))
-thetaStar=NULL
-compdraw=NULL
-Istardraw=matrix(double(floor(R/keep)),ncol=1)
-alphadraw=matrix(double(floor(R/keep)),ncol=1)
-nudraw=matrix(double(floor(R/keep)),ncol=1)
-vdraw=matrix(double(floor(R/keep)),ncol=1)
-adraw=matrix(double(floor(R/keep)),ncol=1)
-
-#
-# intialize compute quantities for Metropolis
-#
-cat("initializing Metropolis candidate densities for ",nlgt," units ...",fill=TRUE)
-fsh()
-#
-#  now go thru and computed fraction likelihood estimates and hessians
-#
-#       Lbar=log(pooled likelihood^(n_i/N))
-#
-#       fraction loglike = (1-w)*loglike_i + w*Lbar
-#
-betainit=c(rep(0,nvar))
-#
-#  compute pooled optimum
-#
-out=optim(betainit,llmnl,method="BFGS",control=list( fnscale=-1,trace=0,reltol=1e-6), 
-     X=Xpooled,y=ypooled)
-betapooled=out$par
-H=mnlHess(betapooled,ypooled,Xpooled)
-rootH=chol(H)
-#
-# initialize betas for all units
-#
-for (i in 1:nlgt) 
-{
-   wgt=length(lgtdata[[i]]$y)/length(ypooled)
-   out=optim(betapooled,llmnlFract,method="BFGS",control=list( fnscale=-1,trace=0,reltol=1e-4), 
-   X=lgtdata[[i]]$X,y=lgtdata[[i]]$y,betapooled=betapooled,rootH=rootH,w=w,wgt=wgt)
-   if(out$convergence == 0) 
-     { hess=mnlHess(out$par,lgtdata[[i]]$y,lgtdata[[i]]$X)
-       lgtdata[[i]]=c(lgtdata[[i]],list(converge=1,betafmle=out$par,hess=hess)) }
-   else
-     { lgtdata[[i]]=c(lgtdata[[i]],list(converge=0,betafmle=c(rep(0,nvar)),
-        hess=diag(nvar))) }
-   oldbetas[i,]=lgtdata[[i]]$betafmle
-   if(i%%50 ==0) cat("  completed unit #",i,fill=TRUE)
-   fsh()
-}
-
-#
-# initialize delta
-#
-if (drawdelta) olddelta=rep(0,nz*nvar)
-
-#
-# initialize theta,thetaStar,ind
-#
-theta=vector("list",nlgt)
-for(i in 1:nlgt) {theta[[i]]=list(mu=rep(0,nvar),rooti=diag(nvar))}
-ind=double(nlgt)
-thetaStar=unique(theta)
-nunique=length(thetaStar)
-for(j in 1:nunique){
-    ind[which(sapply(theta,identical,thetaStar[[j]]))]=j
-}
-#
-# initialize alpha,lambda
-#
-alpha=1
-lambda=list(mubar=rep(0,nvar),Amu=1,nu=nvar+1,V=(nvar+1)*diag(nvar))
-#
-# fix oldprob (only one comp)
-#
-oldprob=1
-
-#
-#	start main iteration loop
-#
-itime=proc.time()[3]
-cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
-fsh()
-for(rep in 1:R)
-{
-   # first draw comps,ind,p | {beta_i}, delta
-   #        ind,p need initialization comps is drawn first in sub-Gibbs
-   if(drawdelta) 
-      {mgout=rDPGibbs1(oldbetas-Z%*%t(matrix(olddelta,ncol=nz)),theta,thetaStar,ind,
-	  lambda,alpha,Prioralpha,lambda_hyper,maxuniq,gridsize)}
-   else
-      {mgout=rDPGibbs1(oldbetas,theta,thetaStar,ind,
-	  lambda,alpha,Prioralpha,lambda_hyper,maxuniq,gridsize)}
-
-   ind=mgout$ind
-   lambda=mgout$lambda
-   alpha=mgout$alpha
-   theta=mgout$theta
-   thetaStar=mgout$thetaStar
-   Istar=length(thetaStar)
-   
-   
-   # now draw delta | {beta_i}, ind, comps
-   if(drawdelta) {olddelta=drawDelta(Z,oldbetas,ind,thetaStar,deltabar,Ad)}
-   #
-   #  loop over all lgt equations drawing beta_i | ind[i],z[i,],mu[ind[i]],rooti[ind[i]]
-   #
-      for (lgt in 1:nlgt) 
-      {
-         rootpi=thetaStar[[ind[lgt]]]$rooti
-         #  note: beta_i = Delta*z_i + u_i  Delta is nvar x nz
-         if(drawdelta) {
-            betabar=thetaStar[[ind[lgt]]]$mu+matrix(olddelta,ncol=nz)%*%as.vector(Z[lgt,])}
-         else {
-            betabar=thetaStar[[ind[lgt]]]$mu }
-         if (rep == 1) 
-            { oldll[lgt]=llmnl(oldbetas[lgt,],lgtdata[[lgt]]$y,lgtdata[[lgt]]$X)}  
-         #   compute inc.root
-         inc.root=chol(chol2inv(chol(lgtdata[[lgt]]$hess+rootpi%*%t(rootpi))))
-         metropout=mnlRwMetropOnce(lgtdata[[lgt]]$y,lgtdata[[lgt]]$X,oldbetas[lgt,],
-                                   oldll[lgt],s,inc.root,betabar,rootpi)      
-         oldbetas[lgt,]=metropout$betadraw
-         oldll[lgt]=metropout$oldll
-      }
-   #
-   #
-   #       print time to completion and draw # every 100th draw
-   #
-   if(((rep/100)*100) ==(floor(rep/100)*100))
-     {ctime=proc.time()[3]
-      timetoend=((ctime-itime)/rep)*(R+1-rep)
-      cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
-      fsh()}
-   #
-   #       save every keepth draw
-   #
-   mkeep=rep/keep
-   if((mkeep*keep) == (floor(mkeep)*keep))
-      { betadraw[,,mkeep]=oldbetas 
-        probdraw[mkeep,]=oldprob
- 		  alphadraw[mkeep,]=alpha
-        Istardraw[mkeep,]=Istar
-        adraw[mkeep,]=lambda$Amu
-        nudraw[mkeep,]=lambda$nu
-        vdraw[mkeep,]=lambda$V[1,1]/lambda$nu
-        loglike[mkeep]=sum(oldll)
-        if(drawdelta) Deltadraw[mkeep,]=olddelta
-        compdraw[[mkeep]]=list(list(mu=mgout$thetaNp1[[1]],rooti=mgout$thetaNp1[[2]]))
-      }
-        
-}
-ctime=proc.time()[3]
-cat(" Total Time Elapsed: ",round((ctime-itime)/60,2),fill=TRUE)
-if(drawdelta){
-   attributes(Deltadraw)$class=c("bayesm.mat","mcmc")
-   attributes(Deltadraw)$mcpar=c(1,R,keep)}
-attributes(betadraw)$class=c("bayesm.hcoef")
-nmix=list(probdraw=probdraw,zdraw=NULL,compdraw=compdraw)
-attributes(nmix)$class="bayesm.nmix"
-attributes(adraw)$class=c("bayesm.mat","mcmc")
-attributes(nudraw)$class=c("bayesm.mat","mcmc")
-attributes(vdraw)$class=c("bayesm.mat","mcmc")
-attributes(Istardraw)$class=c("bayesm.mat","mcmc")
-attributes(alphadraw)$class=c("bayesm.mat","mcmc")
-if(drawdelta) 
-   {return(list(Deltadraw=Deltadraw,betadraw=betadraw,nmix=nmix,alphadraw=alphadraw,Istardraw=Istardraw,
-	            adraw=adraw,nudraw=nudraw,vdraw=vdraw,loglike=loglike))} 
-else
-   {return(list(betadraw=betadraw,nmix=nmix,alphadraw=alphadraw,Istardraw=Istardraw,
-	            adraw=adraw,nudraw=nudraw,vdraw=vdraw,loglike=loglike))} 
-}
diff --git a/R/rhierMnlDP_rcpp.r b/R/rhierMnlDP_rcpp.r
new file mode 100644
index 0000000..269555f
--- /dev/null
+++ b/R/rhierMnlDP_rcpp.r
@@ -0,0 +1,296 @@
+rhierMnlDP=function(Data,Prior,Mcmc){
+#
+#  created 3/08 by Rossi from rhierMnlRwMixture adding DP draw for to replace finite mixture of normals
+#
+# revision history:
+#   changed 12/17/04 by rossi to fix bug in drawdelta when there is zero/one unit
+#   in a mixture component
+#   added loglike output, changed to reflect new argument order in llmnl, mnlHess 9/05
+#   changed weighting scheme to (1-w)logl_i + w*Lbar (normalized) 12/05
+#   3/07 added classes
+#   W. Taylor 4/15 - added nprint option to MCMC argument
+#
+# purpose: run hierarchical mnl logit model with mixture of normals 
+#   using RW and cov(RW inc) = (hess_i + Vbeta^-1)^-1
+#   uses normal approximation to pooled likelihood
+#
+# Arguments:
+#   Data contains a list of (p,lgtdata, and possibly Z)
+#      p is number of choice alternatives
+#      lgtdata is a list of lists (one list per unit)
+#          lgtdata[[i]]=list(y,X)
+#             y is a vector indicating alternative chosen
+#               integers 1:p indicate alternative
+#             X is a length(y)*p x nvar matrix of values of
+#               X vars including intercepts
+#             Z is an length(lgtdata) x nz matrix of values of variables
+#               note: Z should NOT contain an intercept
+#   Prior contains a list of (deltabar,Ad,lambda_hyper,Prioralpha)
+#       alpha: starting value
+#       lambda_hyper: hyperparms of prior on lambda
+#       Prioralpha: hyperparms of alpha prior; a list of (Istarmin,Istarmax,power)
+#       if elements of the prior don't exist, defaults are assumed
+#   Mcmc contains a list of (s,c,R,keep,nprint)
+#
+# Output:  as list containing
+#   Deltadraw R/keep  x nz*nvar matrix of draws of Delta, first row is initial value
+#   betadraw is nlgt x nvar x R/keep array of draws of betas
+#   probdraw is R/keep x 1 matrix of draws of probs of mixture components
+#   compdraw is a list of list of lists (length R/keep)
+#      compdraw[[rep]] is the repth draw of components for mixtures
+#   loglike  log-likelikelhood at each kept draw
+#
+# Priors:
+#    beta_i = D %*% z[i,] + u_i
+#       vec(D)~N(deltabar)
+#       u_i ~ N(theta_i)
+#       theta_i~G
+#       G|lambda,alpha ~ DP(G|G0(lambda),alpha)
+#
+#        lambda:
+#           G0 ~ N(mubar,Sigma (x) Amu^-1)
+#           mubar=vec(mubar)
+#           Sigma ~ IW(nu,nu*v*I)  note: mode(Sigma)=nu/(nu+2)*v*I
+#           mubar=0
+#           amu is uniform on grid specified by alim
+#           nu is log uniform, nu=d-1+exp(Z) z is uniform on seq defined bvy nulim
+#           v is uniform on sequence specificd by vlim
+#
+#        Prioralpha:
+#           alpha ~ (1-(alpha-alphamin)/(alphamax-alphamin))^power
+#           alphamin=exp(digamma(Istarmin)-log(gamma+log(N)))
+#           alphamax=exp(digamma(Istarmax)-log(gamma+log(N)))
+#           gamma= .5772156649015328606
+#
+# MCMC parameters
+#   s is the scaling parameter for the RW inc covariance matrix; s^2 Var is inc cov
+#      matrix
+#   w is parameter for weighting function in fractional likelihood
+#      w is the weight on the normalized pooled likelihood 
+#   R is number of draws
+#   keep is thinning parameter, keep every keepth draw
+#   nprint - print estimated time remaining on every nprint'th draw
+#--------------------------------------------------------------------------------------------------
+
+llmnlFract=
+function(beta,y,X,betapooled,rootH,w,wgt){
+z=as.vector(rootH%*%(beta-betapooled))
+return((1-w)*llmnl(beta,y,X)+w*wgt*(-.5*(z%*%z)))
+}
+
+#
+#  check arguments
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of p,lgtdata, and (possibly) Z")}
+  if(is.null(Data$p)) {pandterm("Requires Data element p (# chce alternatives)") }
+  p=Data$p
+  if(is.null(Data$lgtdata)) {pandterm("Requires Data element lgtdata (list of data for each unit)")}
+  lgtdata=Data$lgtdata
+  nlgt=length(lgtdata)
+  drawdelta=TRUE
+if(is.null(Data$Z)) { cat("Z not specified",fill=TRUE); fsh() ; drawdelta=FALSE}
+  else {if (nrow(Data$Z) != nlgt) {pandterm(paste("Nrow(Z) ",nrow(Z),"ne number logits ",nlgt))}
+      else {Z=Data$Z}}
+  if(drawdelta) {
+     nz=ncol(Z)
+     colmeans=apply(Z,2,mean)
+     if(sum(colmeans) > .00001) 
+       {pandterm(paste("Z does not appear to be de-meaned: colmeans= ",colmeans))}
+  }
+#
+# check lgtdata for validity
+#
+ypooled=NULL
+Xpooled=NULL
+if(!is.null(lgtdata[[1]]$X)) {oldncol=ncol(lgtdata[[1]]$X)}
+for (i in 1:nlgt) 
+{
+    if(is.null(lgtdata[[i]]$y)) {pandterm(paste("Requires element y of lgtdata[[",i,"]]"))}
+    if(is.null(lgtdata[[i]]$X)) {pandterm(paste("Requires element X of lgtdata[[",i,"]]"))}
+    ypooled=c(ypooled,lgtdata[[i]]$y)
+    nrowX=nrow(lgtdata[[i]]$X)
+    if((nrowX/p) !=length(lgtdata[[i]]$y)) {pandterm(paste("nrow(X) ne p*length(yi); exception at unit",i))}
+    newncol=ncol(lgtdata[[i]]$X)
+    if(newncol != oldncol) {pandterm(paste("All X elements must have same # of cols; exception at unit",i))}
+    Xpooled=rbind(Xpooled,lgtdata[[i]]$X)
+    oldncol=newncol
+}
+nvar=ncol(Xpooled)
+levely=as.numeric(levels(as.factor(ypooled)))
+if(length(levely) != p) {pandterm(paste("y takes on ",length(levely)," values -- must be = p"))}
+bady=FALSE
+for (i in 1:p )
+{
+    if(levely[i] != i) bady=TRUE
+}
+cat("Table of Y values pooled over all units",fill=TRUE)
+print(table(ypooled))
+if (bady) 
+  {pandterm("Invalid Y")}
+#
+# check on prior
+#
+alimdef=BayesmConstant.DPalimdef
+nulimdef=BayesmConstant.DPnulimdef
+vlimdef=BayesmConstant.DPvlimdef
+
+if(missing(Prior)) {Prior=NULL}
+
+if(is.null(Prior$lambda_hyper)) {lambda_hyper=list(alim=alimdef,nulim=nulimdef,vlim=vlimdef)}
+   else {lambda_hyper=Prior$lambda_hyper;
+       if(is.null(lambda_hyper$alim)) {lambda_hyper$alim=alimdef}
+       if(is.null(lambda_hyper$nulim)) {lambda_hyper$nulim=nulimdef} 
+       if(is.null(lambda_hyper$vlim)) {lambda_hyper$vlim=vlimdef}
+       }
+if(is.null(Prior$Prioralpha)) {Prioralpha=list(Istarmin=BayesmConstant.DPIstarmin,Istarmax=min(50,0.1*nlgt),power=BayesmConstant.DPpower)}
+   else {Prioralpha=Prior$Prioralpha;
+       if(is.null(Prioralpha$Istarmin)) {Prioralpha$Istarmin=BayesmConstant.DPIstarmin} else {Prioralpha$Istarmin=Prioralpha$Istarmin}
+       if(is.null(Prioralpha$Istarmax)) 
+       {Prioralpha$Istarmax=min(50,0.1*nlgt)} else {Prioralpha$Istarmax=Prioralpha$Istarmax}
+      if(is.null(Prioralpha$power)) {Prioralpha$power=BayesmConstant.DPpower}
+   }	
+gamma= BayesmConstant.gamma
+Prioralpha$alphamin=exp(digamma(Prioralpha$Istarmin)-log(gamma+log(nlgt)))
+Prioralpha$alphamax=exp(digamma(Prioralpha$Istarmax)-log(gamma+log(nlgt)))
+Prioralpha$n=nlgt
+#
+# check Prior arguments for valdity
+#
+if(lambda_hyper$alim[1]<0) {pandterm("alim[1] must be >0")}
+if(lambda_hyper$nulim[1]<0) {pandterm("nulim[1] must be >0")}
+if(lambda_hyper$vlim[1]<0) {pandterm("vlim[1] must be >0")}
+if(Prioralpha$Istarmin <1){pandterm("Prioralpha$Istarmin must be >= 1")}
+if(Prioralpha$Istarmax <= Prioralpha$Istarmin){pandterm("Prioralpha$Istarmin must be < Prioralpha$Istarmax")}	
+
+if(is.null(Prior$Ad) & drawdelta) {Ad=BayesmConstant.A*diag(nvar*nz)} else {Ad=Prior$Ad}
+if(drawdelta) {if(ncol(Ad) != nvar*nz | nrow(Ad) != nvar*nz) {pandterm("Ad must be nvar*nz x nvar*nz")}}
+if(is.null(Prior$deltabar)& drawdelta) {deltabar=rep(0,nz*nvar)} else {deltabar=Prior$deltabar}
+  if(drawdelta) {if(length(deltabar) != nz*nvar) {pandterm("deltabar must be of length nvar*nz")}}
+#
+# check on Mcmc
+#
+if(missing(Mcmc)) 
+  {pandterm("Requires Mcmc list argument")}
+else 
+   { 
+    if(is.null(Mcmc$s)) {s=BayesmConstant.RRScaling/sqrt(nvar)} else {s=Mcmc$s}
+    if(is.null(Mcmc$w)) {w=BayesmConstant.w}  else {w=Mcmc$w}
+    if(is.null(Mcmc$keep)) {keep=BayesmConstant.keep} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+      if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')} 
+    if(is.null(Mcmc$maxuniq)) {maxuniq=BayesmConstant.DPmaxuniq} else {keep=Mcmc$maxuniq}
+    if(is.null(Mcmc$gridsize)) {gridsize=BayesmConstant.DPgridsize} else {gridsize=Mcmc$gridsize}
+    if(is.null(Mcmc$R)) {pandterm("Requires R argument in Mcmc list")} else {R=Mcmc$R}
+    }
+#
+# print out problem
+#
+cat(" ",fill=TRUE)
+cat("Starting MCMC Inference for Hierarchical Logit:",fill=TRUE)
+cat("   Dirichlet Process Prior",fill=TRUE)
+cat(paste("  ",p," alternatives; ",nvar," variables in X"),fill=TRUE)
+cat(paste("   for ",nlgt," cross-sectional units"),fill=TRUE)
+cat(" ",fill=TRUE)
+cat(" Prior Parms: ",fill=TRUE)
+cat("  G0 ~ N(mubar,Sigma (x) Amu^-1)",fill=TRUE)
+cat("   mubar = ",0,fill=TRUE)
+cat("   Sigma ~ IW(nu,nu*v*I)",fill=TRUE)
+cat("   Amu ~ uniform[",lambda_hyper$alim[1],",",lambda_hyper$alim[2],"]",fill=TRUE)
+cat("   nu ~ uniform on log grid  [",nvar-1+exp(lambda_hyper$nulim[1]),
+             ",",nvar-1+exp(lambda_hyper$nulim[2]),"]",fill=TRUE)
+cat("   v ~ uniform[",lambda_hyper$vlim[1],",",lambda_hyper$vlim[2],"]",fill=TRUE)
+cat(" ",fill=TRUE)
+cat("  alpha ~ (1-(alpha-alphamin)/(alphamax-alphamin))^power",fill=TRUE)
+cat("   Istarmin = ",Prioralpha$Istarmin,fill=TRUE)
+cat("   Istarmax = ",Prioralpha$Istarmax,fill=TRUE)
+cat("   alphamin = ",Prioralpha$alphamin,fill=TRUE)
+cat("   alphamax = ",Prioralpha$alphamax,fill=TRUE)
+cat("   power = ",Prioralpha$power,fill=TRUE)
+cat(" ",fill=TRUE)
+if(drawdelta) 
+{
+   cat("deltabar",fill=TRUE)
+   print(deltabar)
+   cat("Ad",fill=TRUE)
+   print(Ad)
+}
+cat(" ",fill=TRUE)
+cat("MCMC Parms: ",fill=TRUE)
+cat(paste("s=",round(s,3)," w= ",w," R= ",R," keep= ",keep," nprint= ",nprint," maxuniq= ",maxuniq,
+          " gridsize for lambda hyperparms= ",gridsize),fill=TRUE)
+cat("",fill=TRUE)
+#
+# allocate space for draws
+#
+oldbetas=matrix(double(nlgt*nvar),ncol=nvar)
+
+#
+# intialize compute quantities for Metropolis
+#
+cat("initializing Metropolis candidate densities for ",nlgt," units ...",fill=TRUE)
+fsh()
+#
+#  now go thru and computed fraction likelihood estimates and hessians
+#
+#       Lbar=log(pooled likelihood^(n_i/N))
+#
+#       fraction loglike = (1-w)*loglike_i + w*Lbar
+#
+betainit=c(rep(0,nvar))
+#
+#  compute pooled optimum
+#
+out=optim(betainit,llmnl,method="BFGS",control=list( fnscale=-1,trace=0,reltol=1e-6), 
+     X=Xpooled,y=ypooled)
+betapooled=out$par
+H=mnlHess(betapooled,ypooled,Xpooled)
+rootH=chol(H)
+#
+# initialize betas for all units
+#
+for (i in 1:nlgt) 
+{
+   wgt=length(lgtdata[[i]]$y)/length(ypooled)
+   out=optim(betapooled,llmnlFract,method="BFGS",control=list( fnscale=-1,trace=0,reltol=1e-4), 
+   X=lgtdata[[i]]$X,y=lgtdata[[i]]$y,betapooled=betapooled,rootH=rootH,w=w,wgt=wgt)
+   if(out$convergence == 0) 
+     { hess=mnlHess(out$par,lgtdata[[i]]$y,lgtdata[[i]]$X)
+       lgtdata[[i]]=c(lgtdata[[i]],list(converge=1,betafmle=out$par,hess=hess)) }
+   else
+     { lgtdata[[i]]=c(lgtdata[[i]],list(converge=0,betafmle=c(rep(0,nvar)),
+        hess=diag(nvar))) }
+   oldbetas[i,]=lgtdata[[i]]$betafmle
+   if(i%%50 ==0) cat("  completed unit #",i,fill=TRUE)
+   fsh()
+}
+
+#Initialize placeholders when drawdelta == FALSE
+if (drawdelta==FALSE){
+  Z = matrix(0)
+  deltabar = 0
+  Ad = matrix(0)
+}
+
+###################################################################
+# Wayne Taylor
+# 2/21/2015
+###################################################################
+out = rhierMnlDP_rcpp_loop(R,keep,nprint,
+                           lgtdata,Z,deltabar,Ad,Prioralpha,lambda_hyper,
+                           drawdelta,nvar,oldbetas,s,maxuniq,gridsize,
+                           BayesmConstant.A,BayesmConstant.nuInc,BayesmConstant.DPalpha)
+###################################################################
+
+if(drawdelta){
+  attributes(out$Deltadraw)$class=c("bayesm.mat","mcmc")
+  attributes(out$Deltadraw)$mcpar=c(1,R,keep)}
+attributes(out$betadraw)$class=c("bayesm.hcoef")
+attributes(out$nmix)$class="bayesm.nmix"
+attributes(out$adraw)$class=c("bayesm.mat","mcmc")
+attributes(out$nudraw)$class=c("bayesm.mat","mcmc")
+attributes(out$vdraw)$class=c("bayesm.mat","mcmc")
+attributes(out$Istardraw)$class=c("bayesm.mat","mcmc")
+attributes(out$alphadraw)$class=c("bayesm.mat","mcmc")
+
+return(out)
+}
\ No newline at end of file
diff --git a/R/rhierMnlRwMixture.R b/R/rhierMnlRwMixture_rcpp.r
old mode 100755
new mode 100644
similarity index 57%
rename from R/rhierMnlRwMixture.R
rename to R/rhierMnlRwMixture_rcpp.r
index 025eb25..ace822f
--- a/R/rhierMnlRwMixture.R
+++ b/R/rhierMnlRwMixture_rcpp.r
@@ -1,6 +1,4 @@
-rhierMnlRwMixture=
-function(Data,Prior,Mcmc)
-{
+rhierMnlRwMixture=function(Data,Prior,Mcmc){
 #
 # revision history:
 #   changed 12/17/04 by rossi to fix bug in drawdelta when there is zero/one unit
@@ -9,6 +7,7 @@ function(Data,Prior,Mcmc)
 #   changed weighting scheme to (1-w)logl_i + w*Lbar (normalized) 12/05
 #   3/07 added classes
 #   9/08 changed Dirichlet a check
+#   W. Taylor 4/15 - added nprint option to MCMC argument  
 #
 # purpose: run hierarchical mnl logit model with mixture of normals 
 #   using RW and cov(RW inc) = (hess_i + Vbeta^-1)^-1
@@ -28,7 +27,7 @@ function(Data,Prior,Mcmc)
 #   Prior contains a list of (deltabar,Ad,mubar,Amu,nu,V,ncomp) 
 #      ncomp is the number of components in normal mixture
 #           if elements of Prior (other than ncomp) do not exist, defaults are used
-#   Mcmc contains a list of (s,c,R,keep)
+#   Mcmc contains a list of (s,c,R,keep,nprint)
 #
 # Output:  as list containing
 #   Deltadraw R/keep  x nz*nvar matrix of draws of Delta, first row is initial value
@@ -56,10 +55,10 @@ function(Data,Prior,Mcmc)
 #      w is the weight on the normalized pooled likelihood 
 #   R is number of draws
 #   keep is thinning parameter, keep every keepth draw
+#   nprint - print estimated time remaining on every nprint'th draw
 #
 #  check arguments
 #
-pandterm=function(message) { stop(message,call.=FALSE) }
 if(missing(Data)) {pandterm("Requires Data argument -- list of p,lgtdata, and (possibly) Z")}
   if(is.null(Data$p)) {pandterm("Requires Data element p (# chce alternatives)") }
   p=Data$p
@@ -76,7 +75,6 @@ if(is.null(Data$Z)) { cat("Z not specified",fill=TRUE); fsh() ; drawdelta=FALSE}
      if(sum(colmeans) > .00001) 
        {pandterm(paste("Z does not appear to be de-meaned: colmeans= ",colmeans))}
   }
-  
 #
 # check lgtdata for validity
 #
@@ -115,17 +113,17 @@ if(missing(Prior))
 if(is.null(Prior$ncomp)) {pandterm("Requires Prior element ncomp (num of mixture components)")} else {ncomp=Prior$ncomp}
 if(is.null(Prior$mubar)) {mubar=matrix(rep(0,nvar),nrow=1)} else { mubar=matrix(Prior$mubar,nrow=1)}
   if(ncol(mubar) != nvar) {pandterm(paste("mubar must have ncomp cols, ncol(mubar)= ",ncol(mubar)))}
-if(is.null(Prior$Amu)) {Amu=matrix(.01,ncol=1)} else {Amu=matrix(Prior$Amu,ncol=1)}
+if(is.null(Prior$Amu)) {Amu=matrix(BayesmConstant.A,ncol=1)} else {Amu=matrix(Prior$Amu,ncol=1)}
   if(ncol(Amu) != 1 | nrow(Amu) != 1) {pandterm("Am must be a 1 x 1 array")}
-if(is.null(Prior$nu)) {nu=nvar+3}  else {nu=Prior$nu}
+if(is.null(Prior$nu)) {nu=nvar+BayesmConstant.nuInc}  else {nu=Prior$nu}
   if(nu < 1) {pandterm("invalid nu value")}
 if(is.null(Prior$V)) {V=nu*diag(nvar)} else {V=Prior$V}
   if(sum(dim(V)==c(nvar,nvar)) !=2) pandterm("Invalid V in prior")
-if(is.null(Prior$Ad) & drawdelta) {Ad=.01*diag(nvar*nz)} else {Ad=Prior$Ad}
+if(is.null(Prior$Ad) & drawdelta) {Ad=BayesmConstant.A*diag(nvar*nz)} else {Ad=Prior$Ad}
 if(drawdelta) {if(ncol(Ad) != nvar*nz | nrow(Ad) != nvar*nz) {pandterm("Ad must be nvar*nz x nvar*nz")}}
 if(is.null(Prior$deltabar)& drawdelta) {deltabar=rep(0,nz*nvar)} else {deltabar=Prior$deltabar}
   if(drawdelta) {if(length(deltabar) != nz*nvar) {pandterm("deltabar must be of length nvar*nz")}}
-if(is.null(Prior$a)) { a=rep(5,ncomp)} else {a=Prior$a}
+if(is.null(Prior$a)) { a=rep(BayesmConstant.a,ncomp)} else {a=Prior$a}
 if(length(a) != ncomp) {pandterm("Requires dim(a)= ncomp (no of components)")}
 bada=FALSE
    for(i in 1:ncomp) { if(a[i] < 0) bada=TRUE}
@@ -137,10 +135,12 @@ if(missing(Mcmc))
   {pandterm("Requires Mcmc list argument")}
 else 
    { 
-    if(is.null(Mcmc$s)) {s=2.93/sqrt(nvar)} else {s=Mcmc$s}
-    if(is.null(Mcmc$w)) {w=.1}  else {w=Mcmc$w}
-    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$s)) {s=BayesmConstant.RRScaling/sqrt(nvar)} else {s=Mcmc$s}
+    if(is.null(Mcmc$w)) {w=BayesmConstant.w}  else {w=Mcmc$w}
+    if(is.null(Mcmc$keep)) {keep=BayesmConstant.keep} else {keep=Mcmc$keep}
     if(is.null(Mcmc$R)) {pandterm("Requires R argument in Mcmc list")} else {R=Mcmc$R}
+    if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+      if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
     }
 #
 # print out problem
@@ -170,19 +170,11 @@ if(drawdelta)
 }
 cat(" ",fill=TRUE)
 cat("MCMC Parms: ",fill=TRUE)
-cat(paste("s=",round(s,3)," w= ",w," R= ",R," keep= ",keep),fill=TRUE)
+cat(paste("s=",round(s,3)," w= ",w," R= ",R," keep= ",keep," nprint= ",nprint),fill=TRUE)
 cat("",fill=TRUE)
-#
-# allocate space for draws
-#
-if(drawdelta) Deltadraw=matrix(double((floor(R/keep))*nz*nvar),ncol=nz*nvar)
-betadraw=array(double((floor(R/keep))*nlgt*nvar),dim=c(nlgt,nvar,floor(R/keep)))
-probdraw=matrix(double((floor(R/keep))*ncomp),ncol=ncomp)
-oldbetas=matrix(double(nlgt*nvar),ncol=nvar)
-oldll=double(nlgt)
-loglike=double(floor(R/keep))
-oldcomp=NULL
-compdraw=NULL
+
+oldbetas = matrix(double(nlgt * nvar), ncol = nvar)
+
 #--------------------------------------------------------------------------------------------------
 #
 #  create functions needed
@@ -192,63 +184,6 @@ function(beta,y,X,betapooled,rootH,w,wgt){
 z=as.vector(rootH%*%(beta-betapooled))
 return((1-w)*llmnl(beta,y,X)+w*wgt*(-.5*(z%*%z)))
 }
-
-mnlRwMetropOnce=
-function(y,X,oldbeta,oldll,s,inc.root,betabar,rootpi){ 
-#
-# function to execute rw metropolis for the MNL
-# y is n vector with element = 1,...,j indicating which alt chosen
-# X is nj x k matrix of xvalues for each of j alt on each of n occasions
-# RW increments are N(0,s^2*t(inc.root)%*%inc.root)
-# prior on beta is N(betabar,Sigma)  Sigma^-1=rootpi*t(rootpi)
-#	inc.root, rootpi are upper triangular
-#	this means that we are using the UL decomp of Sigma^-1 for prior 
-# oldbeta is the current
-     stay=0
-     betac=oldbeta + s*t(inc.root)%*%(matrix(rnorm(ncol(X)),ncol=1))
-     cll=llmnl(betac,y,X)
-     clpost=cll+lndMvn(betac,betabar,rootpi)
-     ldiff=clpost-oldll-lndMvn(oldbeta,betabar,rootpi)
-     alpha=min(1,exp(ldiff))
-     if(alpha < 1) {unif=runif(1)} else {unif=0}
-     if (unif <= alpha)
-             {betadraw=betac; oldll=cll}
-           else
-             {betadraw=oldbeta; stay=1}
-return(list(betadraw=betadraw,stay=stay,oldll=oldll))
-}
-drawDelta=
-function(x,y,z,comps,deltabar,Ad){
-# delta = vec(D)
-#  given z and comps (z[i] gives component indicator for the ith observation, 
-#   comps is a list of mu and rooti)
-#y is n x p
-#x is n x k
-#y = xD' + U , rows of U are indep with covs Sigma_i given by z and comps
-p=ncol(y)
-k=ncol(x)
-xtx = matrix(0.0,k*p,k*p)
-xty = matrix(0.0,p,k) #this is the unvecced version, have to vec after sum
-for(i in 1:length(comps)) {
-   nobs=sum(z==i)
-   if(nobs > 0) {
-      if(nobs == 1) 
-        { yi = matrix(y[z==i,],ncol=p); xi = matrix(x[z==i,],ncol=k)}
-      else
-        { yi = y[z==i,]; xi = x[z==i,]}
-          
-      yi = t(t(yi)-comps[[i]][[1]])
-      sigi = crossprod(t(comps[[i]][[2]]))
-      xtx = xtx + crossprod(xi) %x% sigi
-      xty = xty + (sigi %*% crossprod(yi,xi))
-      }
-}
-xty = matrix(xty,ncol=1)
-
-# then vec(t(D)) ~ N(V^{-1}(xty + Ad*deltabar),V^{-1}) V = (xtx+Ad)
-cov=chol2inv(chol(xtx+Ad))
-return(cov%*%(xty+Ad%*%deltabar) + t(chol(cov))%*%rnorm(length(deltabar)))
-}
 #-------------------------------------------------------------------------------------------------------
 #
 # intialize compute quantities for Metropolis
@@ -298,91 +233,37 @@ ninc=floor(nlgt/ncomp)
 for (i in 1:(ncomp-1)) {ind=c(ind,rep(i,ninc))}
 if(ncomp != 1) {ind = c(ind,rep(ncomp,nlgt-length(ind)))} else {ind=rep(1,nlgt)}
 #
-# initialize delta
-#
-if (drawdelta) olddelta=rep(0,nz*nvar)
-#
 # initialize probs
 #
 oldprob=rep(1/ncomp,ncomp)
 #
-# initialize comps
+#initialize delta
 #
-tcomp=list(list(mu=rep(0,nvar),rooti=diag(nvar)))
-oldcomp=rep(tcomp,ncomp)
-#
-#
-#	start main iteration loop
-#
-itime=proc.time()[3]
-cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
-fsh()
-for(rep in 1:R)
-{
-   # first draw comps,ind,p | {beta_i}, delta
-   #        ind,p need initialization comps is drawn first in sub-Gibbs
-   if(drawdelta) 
-      {mgout=rmixGibbs(oldbetas-Z%*%t(matrix(olddelta,ncol=nz)),
-      mubar,Amu,nu,V,a,oldprob,ind,oldcomp)}
-   else
-      {mgout=rmixGibbs(oldbetas,
-      mubar,Amu,nu,V,a,oldprob,ind,oldcomp)}
-   oldprob=mgout[[1]]
-   oldcomp=mgout[[3]]
-   ind=mgout[[2]]
-   # now draw delta | {beta_i}, ind, comps
-   if(drawdelta) {olddelta=drawDelta(Z,oldbetas,ind,oldcomp,deltabar,Ad)}
-   #
-   #  loop over all lgt equations drawing beta_i | ind[i],z[i,],mu[ind[i]],rooti[ind[i]]
-   #
-      for (lgt in 1:nlgt) 
-      {
-         rootpi=oldcomp[[ind[lgt]]]$rooti
-         #  note: beta_i = Delta*z_i + u_i  Delta is nvar x nz
-         if(drawdelta) {
-            betabar=oldcomp[[ind[lgt]]]$mu+matrix(olddelta,ncol=nz)%*%as.vector(Z[lgt,])}
-         else {
-            betabar=oldcomp[[ind[lgt]]]$mu }
-         if (rep == 1) 
-            { oldll[lgt]=llmnl(oldbetas[lgt,],lgtdata[[lgt]]$y,lgtdata[[lgt]]$X)}  
-         #   compute inc.root
-         inc.root=chol(chol2inv(chol(lgtdata[[lgt]]$hess+rootpi%*%t(rootpi))))
-         metropout=mnlRwMetropOnce(lgtdata[[lgt]]$y,lgtdata[[lgt]]$X,oldbetas[lgt,],
-                                   oldll[lgt],s,inc.root,betabar,rootpi)      
-         oldbetas[lgt,]=metropout$betadraw
-         oldll[lgt]=metropout$oldll
-      }
-   #
-   #
-   #       print time to completion and draw # every 100th draw
-   #
-   if(((rep/100)*100) ==(floor(rep/100)*100))
-     {ctime=proc.time()[3]
-      timetoend=((ctime-itime)/rep)*(R+1-rep)
-      cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
-      fsh()}
-   #
-   #       save every keepth draw
-   #
-   mkeep=rep/keep
-   if((mkeep*keep) == (floor(mkeep)*keep))
-      { betadraw[,,mkeep]=oldbetas 
-        probdraw[mkeep,]=oldprob
-        loglike[mkeep]=sum(oldll)
-        if(drawdelta) Deltadraw[mkeep,]=olddelta
-        compdraw[[mkeep]]=oldcomp }
-        
+if (drawdelta){
+  olddelta = rep(0,nz*nvar)
+} else { #send placeholders to the _loop function if there is no Z matrix
+  olddelta = 0
+  Z = matrix(0)
+  deltabar = 0
+  Ad = matrix(0)
 }
-ctime=proc.time()[3]
-cat(" Total Time Elapsed: ",round((ctime-itime)/60,2),fill=TRUE)
+
+###################################################################
+# Wayne Taylor
+# 09/22/2014
+###################################################################
+draws =  rhierMnlRwMixture_rcpp_loop(lgtdata, Z,
+                                     deltabar, Ad, mubar, Amu,
+                                     nu, V, s,
+                                     R, keep, nprint, drawdelta,
+                                     as.matrix(olddelta), a, oldprob, oldbetas, ind)
+####################################################################
+
 if(drawdelta){
-   attributes(Deltadraw)$class=c("bayesm.mat","mcmc")
-   attributes(Deltadraw)$mcpar=c(1,R,keep)}
-attributes(betadraw)$class=c("bayesm.hcoef")
-nmix=list(probdraw=probdraw,zdraw=NULL,compdraw=compdraw)
-attributes(nmix)$class="bayesm.nmix"
-if(drawdelta) 
-   {return(list(Deltadraw=Deltadraw,betadraw=betadraw,nmix=nmix,loglike=loglike))} 
-else 
-   {return(list(betadraw=betadraw,nmix=nmix,loglike=loglike))}
-}
+  attributes(draws$Deltadraw)$class=c("bayesm.mat","mcmc")
+  attributes(draws$Deltadraw)$mcpar=c(1,R,keep)}
+attributes(draws$betadraw)$class=c("bayesm.hcoef")
+attributes(draws$nmix)$class="bayesm.nmix"
+
+return(draws)
+}
\ No newline at end of file
diff --git a/R/rhierNegbinRw.R b/R/rhiernegbinrw_rcpp.r
old mode 100755
new mode 100644
similarity index 55%
rename from R/rhierNegbinRw.R
rename to R/rhiernegbinrw_rcpp.r
index c58aafc..6b422bf
--- a/R/rhierNegbinRw.R
+++ b/R/rhiernegbinrw_rcpp.r
@@ -1,335 +1,252 @@
-rhierNegbinRw = 
-function(Data, Prior, Mcmc) {
-
-#   Revision History
-#	  Sridhar Narayanan - 05/2005
-#         P. Rossi 6/05
-#         fixed error with nobs not specified and changed llnegbinFract 9/05
-#         3/07 added classes
-#         3/08 fixed fractional likelihood
-#
-#   Model
-#       (y_i|lambda_i,alpha) ~ Negative Binomial(Mean = lambda_i, Overdispersion par = alpha)
-#
-#       ln(lambda_i) =  X_i * beta_i
-#
-#       beta_i = Delta'*z_i + nu_i
-#               nu_i~N(0,Vbeta)
-#
-#   Priors
-#       vec(Delta|Vbeta) ~ N(vec(Deltabar), Vbeta (x) (Adelta^-1))
-#       Vbeta ~ Inv Wishart(nu, V)
-#       alpha ~ Gamma(a,b) where mean = a/b and variance = a/(b^2)
-#
-#   Arguments
-#       Data = list of regdata,Z 
-#           regdata is a list of lists each list with members y, X
-#              e.g. regdata[[i]]=list(y=y,X=X)
-#              X has nvar columns including a first column of ones
-#              Z is nreg=length(regdata) x nz with a first column of ones
-#
-#       Prior - list containing the prior parameters
-#           Deltabar, Adelta - mean of Delta prior, inverse of variance covariance of Delta prior
-#           nu, V - parameters of Vbeta prior
-#           a, b - parameters of alpha prior
-#
-#       Mcmc - list containing
-#           R is number of draws
-#           keep is thinning parameter (def = 1)
-#           s_beta - scaling parameter for beta RW (def = 2.93/sqrt(nvar))
-#           s_alpha - scaling parameter for alpha RW (def = 2.93)
-#           w - fractional weighting parameter (def = .1)
-#           Vbeta0, Delta0 - initial guesses for parameters, if not supplied default values are used
-#
-
-
-#
-# Definitions of functions used within rhierNegbinRw
-#
-
-llnegbin = 
-function(par,X,y, nvar) {
-# Computes the log-likelihood
-    beta = par[1:nvar]
-    alpha = exp(par[nvar+1])+1.0e-50
-    mean=exp(X%*%beta)
-    prob=alpha/(alpha+mean)
-    prob=ifelse(prob<1.0e-100,1.0e-100,prob)
-     out=dnbinom(y,size=alpha,prob=prob,log=TRUE)
-     return(sum(out))
-}
-
-llnegbinFract = 
-function(par,X,y,Xpooled, ypooled, w,wgt, nvar,lnalpha)  {
-# Computes the fractional log-likelihood at the unit level
-    theta = c(par,lnalpha)
-    (1-w)*llnegbin(theta,X,y,nvar) + w*wgt*llnegbin(theta,Xpooled,ypooled, nvar) 
-}
-
-lpostbetai = 
-function(beta, alpha, X, y, Delta, Z, Vbetainv) {
-# Computes the unnormalized log posterior for beta at the unit level
-    lambda = exp(X %*% as.vector(beta))
-    p = alpha/(alpha + lambda)
-    residual = as.vector(beta - as.vector(Z%*%Delta))
-    sum(alpha * log(p) + y * log(1-p)) - 0.5*( t(residual)%*%Vbetainv%*%residual)
-}
-
-
-lpostalpha = 
-function(alpha, beta, regdata, ypooled, a, b, nreg) {
-# Computes the unnormalized log posterior for alpha
-    Xbeta=NULL
-    for (i in 1:nreg) {Xbeta = rbind(Xbeta,regdata[[i]]$X%*%beta[i,]) }
-    sum(log(dnbinom(ypooled,size=alpha,mu=exp(Xbeta)))) + (a-1)*log(alpha) - b* alpha
-}
-
-
-#
-# Error Checking
-#
-pandterm=function(message) {stop(message,call.=FALSE)}
-if(missing(Data)) {pandterm("Requires Data argument -- list of regdata and (possibly) Z")}
-
-if(is.null(Data$regdata)) {
-    pandterm("Requires Data element regdata -- list of data for each unit : y and X")
-}
-regdata=Data$regdata
-nreg = length(regdata)
-
-if (is.null(Data$Z)) {
-    cat("Z not specified - using a column of ones instead", fill = TRUE)
-    Z = matrix(rep(1,nreg),ncol=1)
-}
-else {
-    if (nrow(Data$Z) != nreg) {
-        pandterm(paste("Nrow(Z) ", nrow(Z), "ne number units ",nreg))
-    }
-    else {
-        Z = Data$Z
-    }
-}
-nz = ncol(Z)
-
-dimfun = function(l) {
-    c(length(l$y),dim(l$X))
-}
-dims=sapply(regdata,dimfun)
-dims = t(dims)
-nvar = quantile(dims[,3],prob=0.5)
-for (i in 1:nreg) {
-        if (dims[i, 1] != dims[i, 2] || dims[i, 3] != nvar) {
-            pandterm(paste("Bad Data dimensions for unit ", i, 
-                " dims(y,X) =", dims[i, ]))
-        }
-}
-
-ypooled = NULL
-Xpooled = NULL
-for (i in 1:nreg) {
-    ypooled = c(ypooled,regdata[[i]]$y)
-    Xpooled = rbind(Xpooled,regdata[[i]]$X)
-}
-nobs= length(ypooled)
-
-nvar=ncol(Xpooled)
-#
-# check for prior elements
-#
-if(missing(Prior)) {
-    Deltabar=matrix(rep(0,nvar*nz),nrow=nz) ; Adelta=0.01*diag(nz) ; nu=nvar+3; V=nu*diag(nvar); a=0.5; b=0.1;
-}
-else {
-    if(is.null(Prior$Deltabar)) {Deltabar=matrix(rep(0,nvar*nz),nrow=nz)} else {Deltabar=Prior$Deltabar}
-    if(is.null(Prior$Adelta)) {Adelta=0.01*diag(nz)} else {Adelta=Prior$Adelta}
-    if(is.null(Prior$nu)) {nu=nvar+3} else {nu=Prior$nu}
-    if(is.null(Prior$V)) {V=nu*diag(nvar)} else {V=Prior$V}
-    if(is.null(Prior$a)) {a=0.5} else {a=Prior$a}
-    if(is.null(Prior$b)) {b=0.1} else {b=Prior$b}
-}
-
-if(sum(dim(Deltabar) == c(nz,nvar)) != 2) pandterm("Deltabar is of incorrect dimension")
-if(sum(dim(Adelta)==c(nz,nz)) != 2) pandterm("Adelta is of incorrect dimension")
-if(nu < nvar) pandterm("invalid nu value")
-if(sum(dim(V)==c(nvar,nvar)) != 2) pandterm("V is of incorrect dimension")
-if((length(a) != 1) | (a <=0)) pandterm("a should be a positive number")
-if((length(b) != 1) | (b <=0)) pandterm("b should be a positive number")
-
-#
-# check for Mcmc 
-#
-if(missing(Mcmc)) pandterm("Requires Mcmc argument -- at least R")
-if(is.null(Mcmc$R)) {pandterm("Requires element R of Mcmc")} else {R=Mcmc$R}
-if(is.null(Mcmc$Vbeta0)) {Vbeta0=diag(nvar)} else {Vbeta0=Mcmc$Vbeta0}
-if(sum(dim(Vbeta0) == c(nvar,nvar)) !=2) pandterm("Vbeta0 is not of dimension nvar")
-if(is.null(Mcmc$Delta0)) {Delta0=matrix(rep(0,nz*nvar),nrow=nz)} else {Delta0=Mcmc$Delta0}
-if(sum(dim(Delta0) == c(nz,nvar)) !=2) pandterm("Delta0 is not of dimension nvar by nz")
-if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
-if(is.null(Mcmc$s_alpha)) { s_alpha=2.93} 
-    else {s_alpha= Mcmc$s_alpha }
-if(is.null(Mcmc$s_beta)) { s_beta=2.93/sqrt(nvar)} 
-    else {s_beta=Mcmc$s_beta }
-if(is.null(Mcmc$w)) { w=.1} 
-    else {w = Mcmc$w}
-
-#out = rhierNegbinRw(Data, Prior, Mcmc)
-# print out problem
-#
-cat(" ",fill=TRUE)
-cat("Starting Random Walk Metropolis Sampler for Hierarchical Negative Binomial Regression",fill=TRUE)
-cat("  ",nobs," obs; ",nvar," covariates (including the intercept); ",fill=TRUE)
-cat("  ",nz," individual characteristics (including the intercept) ",fill=TRUE)
-cat(" ",fill=TRUE)
-cat("Prior Parameters:",fill=TRUE)
-cat("Deltabar",fill=TRUE)
-print(Deltabar)
-cat("Adelta",fill=TRUE)
-print(Adelta)
-cat("nu",fill=TRUE)
-print(nu)
-cat("V",fill=TRUE)
-print(V)
-cat("a",fill=TRUE)
-print(a)
-cat("b",fill=TRUE)
-print(b)
-cat(" ",fill=TRUE)
-cat("MCMC Parameters:",fill=TRUE)
-cat(R," reps; keeping every ",keep,"th draw",fill=TRUE)
-cat("s_alpha = ",s_alpha,fill=TRUE)
-cat("s_beta = ",s_beta,fill=TRUE)
-cat("Fractional Likelihood Weight Parameter = ",w,fill=TRUE)
-cat(" ",fill=TRUE)
-
-par = rep(0,(nvar+1))
-cat("initializing Metropolis candidate densities for ",nreg,"units ...",fill=TRUE)
-fsh()
-mle = optim(par,llnegbin, X=Xpooled, y=ypooled, nvar=nvar, 
-      method="L-BFGS-B", upper=c(Inf,Inf,Inf,log(100000000)), hessian=TRUE, control=list(fnscale=-1))
-fsh()
-beta_mle=mle$par[1:nvar]
-alpha_mle = exp(mle$par[nvar+1])
-varcovinv = -mle$hessian
-Delta = Delta0
-Beta = t(matrix(rep(beta_mle,nreg),ncol=nreg))
-Vbetainv = solve(Vbeta0)
-Vbeta = Vbeta0
-alpha = alpha_mle
-alphacvar = s_alpha/varcovinv[nvar+1,nvar+1]
-alphacroot = sqrt(alphacvar)
-#cat("beta_mle = ",beta_mle,fill=TRUE)
-#cat("alpha_mle = ",alpha_mle, fill = TRUE)
-#fsh()
-
-hess_i=NULL
-if(nobs > 1000){
-  sind=sample(c(1:nobs),size=1000)
-  ypooleds=ypooled[sind]
-  Xpooleds=Xpooled[sind,]
-  }
-# Find the individual candidate hessian
-for (i in 1:nreg) {
-    wgt = length(regdata[[i]]$y)/length(ypooleds)
-    mle2 = optim(mle$par[1:nvar],llnegbinFract, X=regdata[[i]]$X, y=regdata[[i]]$y, Xpooled=Xpooleds, 
-           ypooled=ypooleds, w=w,wgt=wgt, nvar=nvar, lnalpha=mle$par[nvar+1], 
-           method="BFGS", hessian=TRUE, control=list(fnscale=-1, trace=0))
-    if (mle2$convergence==0)
-        hess_i[[i]] = list(hess=-mle2$hessian)
-    else
-        hess_i[[i]] = diag(rep(1,nvar))
-   if(i%%50 ==0) cat("  completed unit #",i,fill=TRUE)	
-   fsh()
-}
-
-oldlpostbeta = rep(0,nreg)
-nacceptbeta = 0
-nacceptalpha = 0
-clpostbeta = rep(0,nreg)
-
-Betadraw = array(double((floor(R/keep)) * nreg * nvar), dim = c(nreg, 
-        nvar, floor(R/keep)))
-
-alphadraw = rep(0,floor(R/keep))
-llike = rep(0,floor(R/keep))
-Vbetadraw=matrix(double(floor(R/keep)*(nvar*nvar)),ncol=(nvar*nvar))
-Deltadraw=matrix(double(floor(R/keep)*(nvar*nz)),ncol=(nvar*nz))
-
-#
-#	start main iteration loop
-#
-itime=proc.time()[3]
-cat(" ",fill=TRUE)
-cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
-fsh()
-
-for (r in 1:R) 
-{
-#   Draw betai
-    for (i in 1:nreg) {
-        betacvar = s_beta*solve(hess_i[[i]]$hess + Vbetainv)
-        betaroot = t(chol(betacvar))
-        betac = as.vector(Beta[i,]) + betaroot%*%rnorm(nvar)
-
-        oldlpostbeta[i] = lpostbetai(as.vector(Beta[i,]), alpha, regdata[[i]]$X, regdata[[i]]$y, Delta, Z[i,],Vbetainv)
-        clpostbeta[i] = lpostbetai(betac, alpha, regdata[[i]]$X, regdata[[i]]$y, Delta, Z[i,],Vbetainv)
-        
-        ldiff=clpostbeta[i]-oldlpostbeta[i]
-        acc=min(1,exp(ldiff))
-        if(acc < 1) {unif=runif(1)} else {unif=0}
-
-        if (unif <= acc) {
-            Beta[i,]=betac
-            nacceptbeta=nacceptbeta+1
-        }
-    }
-
-#   Draw alpha
-    logalphac = rnorm(1,mean=log(alpha), sd=alphacroot)
-    oldlpostalpha = lpostalpha(alpha, Beta, regdata, ypooled,  a, b, nreg)
-    clpostalpha = lpostalpha(exp(logalphac), Beta, regdata, ypooled, a, b, nreg)
-    ldiff=clpostalpha-oldlpostalpha
-    acc=min(1,exp(ldiff))
-    if(acc < 1) {unif=runif(1)} else {unif=0}
-    if (unif <= acc) {
-        alpha=exp(logalphac)
-        nacceptalpha=nacceptalpha+1
-    }
-
-#   Draw Vbeta and Delta using rmultireg (bayesm function)
-    temp = rmultireg(Beta,Z,Deltabar,Adelta,nu,V)
-    Vbeta = matrix(temp$Sigma,nrow=nvar)
-    Vbetainv = solve(Vbeta)
-    Delta = temp$B
-
-
-  if(r%%100 == 0)
-    {ctime=proc.time()[3]
-    timetoend=((ctime-itime)/r)*(R-r)
-    cat(" ",r," (",round(timetoend/60,1),")",fill=TRUE)
-    fsh()}
-
-  if(r%%keep == 0) {
-    mkeep=r/keep
-    Betadraw[, ,mkeep]=Beta
-    alphadraw[mkeep] = alpha
-    Vbetadraw[mkeep,] = as.vector(Vbeta)
-    Deltadraw[mkeep,] = as.vector(Delta)
-    ll=0.0
-    for (i in 1:nreg) {ll=ll+llnegbin(c(Beta[i,],alpha),regdata[[i]]$X,regdata[[i]]$y,nvar)}
-    llike[r]=ll
-  }
-}
-ctime = proc.time()[3]
-
-attributes(alphadraw)$class=c("bayesm.mat","mcmc")
-attributes(alphadraw)$mcpar=c(1,R,keep)
-attributes(Deltadraw)$class=c("bayesm.mat","mcmc")
-attributes(Deltadraw)$mcpar=c(1,R,keep)
-attributes(Vbetadraw)$class=c("bayesm.var","bayesm.mat","mcmc")
-attributes(Vbetadraw)$mcpar=c(1,R,keep)
-attributes(Betadraw)$class=c("bayesm.hcoef")
-    
-cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
-return(list(llike=llike,Betadraw=Betadraw,alphadraw=alphadraw, Vbetadraw=Vbetadraw, Deltadraw=Deltadraw,
-     acceptrbeta=nacceptbeta/(R*nreg)*100,acceptralpha=nacceptalpha/R*100))
-}
+rhierNegbinRw= function(Data, Prior, Mcmc) {
+#   Revision History
+#	  Sridhar Narayanan - 05/2005
+#         P. Rossi 6/05
+#         fixed error with nobs not specified and changed llnegbinFract 9/05
+#         3/07 added classes
+#         3/08 fixed fractional likelihood
+#   W. Taylor 4/15 - added nprint option to MCMC argument
+#
+#   Model
+#       (y_i|lambda_i,alpha) ~ Negative Binomial(Mean = lambda_i, Overdispersion par = alpha)
+#
+#       ln(lambda_i) =  X_i * beta_i
+#
+#       beta_i = Delta'*z_i + nu_i
+#               nu_i~N(0,Vbeta)
+#
+#   Priors
+#       vec(Delta|Vbeta) ~ N(vec(Deltabar), Vbeta (x) (Adelta^-1))
+#       Vbeta ~ Inv Wishart(nu, V)
+#       alpha ~ Gamma(a,b) where mean = a/b and variance = a/(b^2)
+#
+#   Arguments
+#       Data = list of regdata,Z 
+#           regdata is a list of lists each list with members y, X
+#              e.g. regdata[[i]]=list(y=y,X=X)
+#              X has nvar columns including a first column of ones
+#              Z is nreg=length(regdata) x nz with a first column of ones
+#
+#       Prior - list containing the prior parameters
+#           Deltabar, Adelta - mean of Delta prior, inverse of variance covariance of Delta prior
+#           nu, V - parameters of Vbeta prior
+#           a, b - parameters of alpha prior
+#
+#       Mcmc - list containing
+#           R is number of draws
+#           keep is thinning parameter (def = 1)
+#           nprint - print estimated time remaining on every nprint'th draw (def = 100)
+#           s_beta - scaling parameter for beta RW (def = 2.93/sqrt(nvar))
+#           s_alpha - scaling parameter for alpha RW (def = 2.93)
+#           w - fractional weighting parameter (def = .1)
+#           Vbeta0, Delta0 - initial guesses for parameters, if not supplied default values are used
+#
+
+
+#
+# Definitions of functions used within rhierNegbinRw (but outside of Rcpp loop)
+#
+llnegbinR = function(par,X,y, nvar) {
+# Computes the log-likelihood
+    beta = par[1:nvar]
+    alpha = exp(par[nvar+1])+1.0e-50
+    mean=exp(X%*%beta)
+    prob=alpha/(alpha+mean)
+    prob=ifelse(prob<1.0e-100,1.0e-100,prob)
+     out=dnbinom(y,size=alpha,prob=prob,log=TRUE)
+     return(sum(out))
+}
+
+llnegbinFract = 
+function(par,X,y,Xpooled, ypooled, w,wgt, nvar,lnalpha)  {
+# Computes the fractional log-likelihood at the unit level
+    theta = c(par,lnalpha)
+    (1-w)*llnegbinR(theta,X,y,nvar) + w*wgt*llnegbinR(theta,Xpooled,ypooled, nvar) 
+}
+
+#
+# Error Checking
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of regdata and (possibly) Z")}
+
+if(is.null(Data$regdata)) {
+    pandterm("Requires Data element regdata -- list of data for each unit : y and X")
+}
+regdata=Data$regdata
+nreg = length(regdata)
+
+if (is.null(Data$Z)) {
+    cat("Z not specified - using a column of ones instead", fill = TRUE)
+    Z = matrix(rep(1,nreg),ncol=1)
+}
+else {
+    if (nrow(Data$Z) != nreg) {
+        pandterm(paste("Nrow(Z) ", nrow(Z), "ne number units ",nreg))
+    }
+    else {
+        Z = Data$Z
+    }
+}
+nz = ncol(Z)
+
+dimfun = function(l) {
+    c(length(l$y),dim(l$X))
+}
+dims=sapply(regdata,dimfun)
+dims = t(dims)
+nvar = quantile(dims[,3],prob=0.5)
+for (i in 1:nreg) {
+        if (dims[i, 1] != dims[i, 2] || dims[i, 3] != nvar) {
+            pandterm(paste("Bad Data dimensions for unit ", i, 
+                " dims(y,X) =", dims[i, ]))
+        }
+}
+
+ypooled = NULL
+Xpooled = NULL
+for (i in 1:nreg) {
+    ypooled = c(ypooled,regdata[[i]]$y)
+    Xpooled = rbind(Xpooled,regdata[[i]]$X)
+}
+nobs= length(ypooled)
+
+nvar=ncol(Xpooled)
+#
+# check for prior elements
+#
+if(missing(Prior)) {
+    Deltabar=matrix(rep(0,nvar*nz),nrow=nz) ; Adelta=BayesmConstant.A*diag(nz) ; nu=nvar+BayesmConstant.nuInc; V=nu*diag(nvar); a=0.5; b=0.1;
+}
+else {
+    if(is.null(Prior$Deltabar)) {Deltabar=matrix(rep(0,nvar*nz),nrow=nz)} else {Deltabar=Prior$Deltabar}
+    if(is.null(Prior$Adelta)) {Adelta=BayesmConstant.A*diag(nz)} else {Adelta=Prior$Adelta}
+    if(is.null(Prior$nu)) {nu=nvar+BayesmConstant.nuInc} else {nu=Prior$nu}
+    if(is.null(Prior$V)) {V=nu*diag(nvar)} else {V=Prior$V}
+    if(is.null(Prior$a)) {a=BayesmConstant.agammaprior} else {a=Prior$a}
+    if(is.null(Prior$b)) {b=BayesmConstant.bgammaprior} else {b=Prior$b}
+}
+
+if(sum(dim(Deltabar) == c(nz,nvar)) != 2) pandterm("Deltabar is of incorrect dimension")
+if(sum(dim(Adelta)==c(nz,nz)) != 2) pandterm("Adelta is of incorrect dimension")
+if(nu < nvar) pandterm("invalid nu value")
+if(sum(dim(V)==c(nvar,nvar)) != 2) pandterm("V is of incorrect dimension")
+if((length(a) != 1) | (a <=0)) pandterm("a should be a positive number")
+if((length(b) != 1) | (b <=0)) pandterm("b should be a positive number")
+
+#
+# check for Mcmc 
+#
+if(missing(Mcmc)) pandterm("Requires Mcmc argument -- at least R")
+if(is.null(Mcmc$R)) {pandterm("Requires element R of Mcmc")} else {R=Mcmc$R}
+if(is.null(Mcmc$Vbeta0)) {Vbeta0=diag(nvar)} else {Vbeta0=Mcmc$Vbeta0}
+if(sum(dim(Vbeta0) == c(nvar,nvar)) !=2) pandterm("Vbeta0 is not of dimension nvar")
+if(is.null(Mcmc$Delta0)) {Delta0=matrix(rep(0,nz*nvar),nrow=nz)} else {Delta0=Mcmc$Delta0}
+if(sum(dim(Delta0) == c(nz,nvar)) !=2) pandterm("Delta0 is not of dimension nvar by nz")
+if(is.null(Mcmc$keep)) {keep=BayesmConstant.keep} else {keep=Mcmc$keep}
+if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+  if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+if(is.null(Mcmc$s_alpha)) { s_alpha=BayesmConstant.RRScaling} 
+    else {s_alpha= Mcmc$s_alpha }
+if(is.null(Mcmc$s_beta)) { s_beta=BayesmConstant.RRScaling/sqrt(nvar)} 
+    else {s_beta=Mcmc$s_beta }
+if(is.null(Mcmc$w)) { w=BayesmConstant.w} 
+    else {w = Mcmc$w}
+# Wayne Taylor 12/2014 #############################################
+if(is.null(Mcmc$alpha)) {fixalpha=FALSE} else {fixalpha=TRUE; alpha=Mcmc$alpha}
+if(fixalpha & alpha<=0) pandterm("alpha is not positive")
+###################################################################
+
+# print out problem
+#
+cat(" ",fill=TRUE)
+cat("Starting Random Walk Metropolis Sampler for Hierarchical Negative Binomial Regression",fill=TRUE)
+cat("  ",nobs," obs; ",nvar," covariates (including the intercept); ",fill=TRUE)
+cat("  ",nz," individual characteristics (including the intercept) ",fill=TRUE)
+cat(" ",fill=TRUE)
+cat("Prior Parameters:",fill=TRUE)
+cat("Deltabar",fill=TRUE)
+print(Deltabar)
+cat("Adelta",fill=TRUE)
+print(Adelta)
+cat("nu",fill=TRUE)
+print(nu)
+cat("V",fill=TRUE)
+print(V)
+cat("a",fill=TRUE)
+print(a)
+cat("b",fill=TRUE)
+print(b)
+cat(" ",fill=TRUE)
+cat("MCMC Parameters:",fill=TRUE)
+cat("R= ",R," keep= ",keep," nprint= ",nprint,fill=TRUE)
+cat("s_alpha = ",s_alpha,fill=TRUE)
+cat("s_beta = ",s_beta,fill=TRUE)
+cat("Fractional Likelihood Weight Parameter = ",w,fill=TRUE)
+cat(" ",fill=TRUE)
+
+par = rep(0,(nvar+1))
+cat("initializing Metropolis candidate densities for ",nreg,"units ...",fill=TRUE)
+fsh()
+mle = optim(par,llnegbinR, X=Xpooled, y=ypooled, nvar=nvar, 
+      method="L-BFGS-B", upper=c(Inf,Inf,Inf,log(100000000)), hessian=TRUE, control=list(fnscale=-1))
+fsh()
+beta_mle=mle$par[1:nvar]
+alpha_mle = exp(mle$par[nvar+1])
+varcovinv = -mle$hessian
+Delta = Delta0
+Beta = t(matrix(rep(beta_mle,nreg),ncol=nreg))
+Vbetainv = chol2inv(chol(Vbeta0)) #Wayne: replaced "solve" function
+Vbeta = Vbeta0
+alpha = alpha_mle
+alphacvar = s_alpha/varcovinv[nvar+1,nvar+1]
+alphacroot = sqrt(alphacvar)
+cat("beta_mle = ",beta_mle,fill=TRUE)
+cat("alpha_mle = ",alpha_mle, fill = TRUE)
+fsh()
+
+hess_i=NULL
+if(nobs > 1000){
+  sind=sample(c(1:nobs),size=1000)
+  ypooleds=ypooled[sind]
+  Xpooleds=Xpooled[sind,]
+  }
+else{
+	ypooleds=ypooled
+	Xpooleds=Xpooled
+}
+# Find the individual candidate hessian
+for (i in 1:nreg) {
+    wgt = length(regdata[[i]]$y)/length(ypooleds)
+    mle2 = optim(mle$par[1:nvar],llnegbinFract, X=regdata[[i]]$X, y=regdata[[i]]$y, Xpooled=Xpooleds, 
+           ypooled=ypooleds, w=w,wgt=wgt, nvar=nvar, lnalpha=mle$par[nvar+1], 
+           method="BFGS", hessian=TRUE, control=list(fnscale=-1, trace=0,reltol=1e-6))
+    if (mle2$convergence==0)
+        hess_i[[i]] = list(hess=-mle2$hessian)
+    else
+        hess_i[[i]] = diag(rep(1,nvar))
+   if(i%%50 ==0) cat("  completed unit #",i,fill=TRUE)	
+   fsh()
+}
+
+###################################################################
+# Wayne Taylor
+# 12/01/2014
+###################################################################
+if (fixalpha) {alpha=Mcmc$alpha}
+rootA = chol(Vbetainv)
+draws=rhierNegbinRw_rcpp_loop(regdata, hess_i, Z, Beta, Delta,
+                              Deltabar, Adelta, nu, V, a, b,
+                              R, keep, s_beta, alphacroot, 1000, rootA,
+                              alpha, fixalpha)
+###################################################################
+
+attributes(draws$alphadraw)$class=c("bayesm.mat","mcmc")
+attributes(draws$alphadraw)$mcpar=c(1,R,keep)
+attributes(draws$Deltadraw)$class=c("bayesm.mat","mcmc")
+attributes(draws$Deltadraw)$mcpar=c(1,R,keep)
+attributes(draws$Vbetadraw)$class=c("bayesm.var","bayesm.mat","mcmc")
+attributes(draws$Vbetadraw)$mcpar=c(1,R,keep)
+attributes(draws$Betadraw)$class=c("bayesm.hcoef")
+
+return(draws)
+}
\ No newline at end of file
diff --git a/R/rivDP.R b/R/rivDP.R
deleted file mode 100755
index 96c0dbe..0000000
--- a/R/rivDP.R
+++ /dev/null
@@ -1,699 +0,0 @@
-rivDP = 
-function(Data,Prior,Mcmc) 
-{
-#
-# revision history:
-#   P. Rossi 1/06
-#   added draw of alpha 2/06
-#   added automatic scaling 2/06
-#   removed reqfun  7/07 -- now functions are in rthetaDP
-#   fixed initialization of theta 3/09
-#   fixed error in assigning user defined prior parms
-#
-# purpose: 
-#   draw from posterior for linear I.V. model with DP process for errors
-#
-# Arguments:
-#   Data -- list of z,w,x,y
-#        y is vector of obs on lhs var in structural equation
-#        x is "endogenous" var in structural eqn
-#        w is matrix of obs on "exogenous" vars in the structural eqn
-#        z is matrix of obs on instruments
-#   Prior -- list of md,Ad,mbg,Abg,mubar,Amu,nuV
-#        md is prior mean of delta
-#        Ad is prior prec
-#        mbg is prior mean vector for beta,gamma
-#        Abg is prior prec of same
-#        lamda is a list of prior parms for DP draw
-#              mubar is prior mean of means for "errors"
-#              Amu is scale precision parm for means
-#              nu,V parms for IW on Sigma (idential priors for each normal comp
-#        alpha prior parm for DP process (weight on base measure)
-#           or starting value if there is a prior on alpha (requires element Prioralpha)
-#        Prioralpha list of hyperparms for draw of alpha (alphamin,alphamax,power,n)
-#
-#   Mcmc -- list of R,keep,starting values for delta,beta,gamma,theta
-#        maxuniq is maximum number of unique theta values
-#        R is number of draws
-#        keep is thinning parameter
-#        SCALE if scale data, def: TRUE
-#        gridsize is the gridsize parm for alpha draws
-#
-#   Output: 
-#      list of draws of delta,beta,gamma and thetaNp1 which is used for
-#      predictive distribution of errors (density estimation)
-# 
-#   Model:
-#
-#    x=z'delta + e1
-#    y=beta*x + w'gamma + e2
-#        e1,e2 ~ N(theta_i)
-#
-#   Priors
-#   delta ~ N(md,Ad^-1)
-#   vec(beta,gamma) ~ N(mbg,Abg^-1)
-#   theta ~ DPP(alpha|lambda)
-#
-#
-#   extract data and check dimensios
-#
-pandterm=function(message) {stop(message,call.=FALSE)}
-if(missing(Data)) {pandterm("Requires Data argument -- list of z,w,x,y")}
-    if(is.null(Data$w)) isgamma=FALSE else isgamma=TRUE
-    if(isgamma) w = Data$w #matrix
-    if(is.null(Data$z)) {pandterm("Requires Data element z")}
-    z=Data$z
-    if(is.null(Data$x)) {pandterm("Requires Data element x")}
-    x=as.vector(Data$x)
-    if(is.null(Data$y)) {pandterm("Requires Data element y")}
-    y=as.vector(Data$y)
-
-#
-# check data for validity
-#
-n=length(y)
-if(isgamma)
-   {if(!is.matrix(w)) {pandterm("w is not a matrix")}
-   dimg=ncol(w)
-   if(n != nrow(w) ) {pandterm("length(y) ne nrow(w)")}}
-
-if(!is.matrix(z)) {pandterm("z is not a matrix")}
-dimd=ncol(z)
-if(n != length(x) ) {pandterm("length(y) ne length(x)")}
-if(n != nrow(z) ) {pandterm("length(y) ne nrow(z)")}
-
-
-#
-# extract elements corresponding to the prior
-#
-if(missing(Prior))
-   {
-    md=c(rep(0,dimd)) 
-    Ad=diag(0.01,dimd) 
-    if(isgamma) dimbg=1+dimg else dimbg=1
-    mbg=c(rep(0,dimbg)) 
-    Abg=diag(0.01,dimbg) 
- 
-
-    gamma= .5772156649015328606  
-    Istarmin=1
-    alphamin=exp(digamma(Istarmin)-log(gamma+log(n)))
-    Istarmax=floor(.1*n)
-    alphamax=exp(digamma(Istarmax)-log(gamma+log(n)))
-    power=.8
-    Prioralpha=list(n=n,alphamin=alphamin,alphamax=alphamax,power=power)
-
-    lambda=list(mubar=c(0,0),Amu=.2,nu=3.4,V=1.7*diag(2))
-   }
-
-else  
-   { 
-    if(is.null(Prior$md)) md=c(rep(0,dimd)) else md=Prior$md
-    if(is.null(Prior$Ad)) Ad=diag(0.01,dimd) else Ad=Prior$Ad
-    if(isgamma) dimbg=1+dimg else dimbg=1
-    if(is.null(Prior$mbg)) mbg=c(rep(0,dimbg)) else md=Prior$mbg
-    if(is.null(Prior$Abg)) Abg=diag(0.01,dimbg) else md=Prior$Abg
-
-
-    if(!is.null(Prior$Prioralpha))
-       {Prioralpha=Prior$Prioralpha}
-    else
-       {gamma= .5772156649015328606  
-        Istarmin=1
-        alphamin=exp(digamma(Istarmin)-log(gamma+log(n)))
-        Istarmax=floor(.1*n)
-        alphamax=exp(digamma(Istarmax)-log(gamma+log(n)))
-        power=.8
-        Prioralpha=list(n=n,alphamin=alphamin,alphamax=alphamax,power=power)}
-
-     if(!is.null(Prior$lambda))
-       {lambda=Prior$lambda}
-     else
-       {lambda=list(mubar=c(0,0),Amu=.2,nu=3.4,V=1.7*diag(2))}
-    }
-
-#
-# obtain starting values for MCMC
-#
-# we draw need inital values of delta, theta and indic
-#
-
-if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
-theta=NULL
-if(!is.null(Mcmc$delta)) 
-   {delta = Mcmc$delta}
-else
-   {lmxz = lm(x~z,data.frame(x=x,z=z))
-    delta = lmxz$coef[2:(ncol(z)+1)]}
-if(!is.null(Mcmc$theta))
-  {theta=Mcmc$theta }
-else
-  {onecomp=list(mu=c(0,0),rooti=diag(2))
-   theta=vector("list",length(y))
-   for(i in 1:n) {theta[[i]]=onecomp}
-   }
-dimd = length(delta)
-if(is.null(Mcmc$maxuniq))
-   {maxuniq=200}
-else
-   {maxuniq=Mcmc$maxuniq}
-if(is.null(Mcmc$R)) {pandterm("requres Mcmc argument, R")}
-R = Mcmc$R
-if(is.null(Mcmc$keep))
-   {keep=1}
-else
-   {keep=Mcmc$keep}
-if(is.null(Mcmc$gridsize))
-   {gridsize=20}
-else
-   {gridsize=Mcmc$gridsize}
-if(is.null(Mcmc$SCALE))
-  {SCALE=TRUE}
-else
-  {SCALE=Mcmc$SCALE}
-
-
-#
-# scale and center
-#
-if(SCALE){
-  scaley=sqrt(var(y))
-  scalex=sqrt(var(x))
-  meany=mean(y)
-  meanx=mean(x)
-  meanz=apply(z,2,mean)
-  y=(y-meany)/scaley; x=(x-meanx)/scalex
-  z=scale(z,center=TRUE,scale=FALSE)
-  if(isgamma) {meanw=apply(w,2,mean);  w=scale(w,center=TRUE,scale=FALSE)}
-}
-
-#
-# print out model
-#
-cat(" ",fill=TRUE)
-cat("Starting Gibbs Sampler for Linear IV Model With DP Process Errors",fill=TRUE)
-cat(" ",fill=TRUE)
-cat(" nobs= ",n,"; ",ncol(z)," instruments",fill=TRUE)
-cat(" ",fill=TRUE)
-cat("Prior Parms: ",fill=TRUE)
-cat("mean of delta ",fill=TRUE)
-print(md)
-cat(" ",fill=TRUE)
-cat("Adelta",fill=TRUE)
-print(Ad)
-cat(" ",fill=TRUE)
-cat("mean of beta/gamma",fill=TRUE)
-print(mbg)
-cat(" ",fill=TRUE)
-cat("Abeta/gamma",fill=TRUE)
-print(Abg)
-cat(" ",fill=TRUE)
-cat("lambda contains: ", fill=TRUE)
-cat("mu Prior Parms:",fill=TRUE)
-cat("mubar= ",lambda$mubar,fill=TRUE)
-cat("Amu= ",lambda$Amu,fill=TRUE)
-cat(" ",fill=TRUE)
-cat("Sigma Prior Parms:",fill=TRUE)
-cat("nu= ",lambda$nu," V=",fill=TRUE)
-print(lambda$V)
-cat("  ",fill=TRUE)
-cat("Parameters of Prior on Dirichlet Process parm (alpha)",fill=TRUE)
-cat("alphamin= ",Prioralpha$alphamin," alphamax= ",Prioralpha$alphamax," power=",
-        Prioralpha$power,fill=TRUE)
-cat("alpha values correspond to Istarmin = ",Istarmin," Istarmax = ",Istarmax,fill=TRUE)
-cat(" ",fill=TRUE)
-cat("MCMC parms: R= ",R," keep= ",keep,fill=TRUE)
-cat("  maximum number of unique thetas= ",maxuniq,fill=TRUE)
-cat("  gridsize for alpha draws= ",gridsize,fill=TRUE)
-cat("  SCALE data= ",SCALE,fill=TRUE)
-cat(" ",fill=TRUE)
-
-
-#
-# define needed functions
-#
-#
-#
-# --------------------------------------------------------------------------------------------
-#
-#
-get_ytxt=function(y,z,delta,x,w,ncomp,indic,comps){
-yt=NULL; xt=NULL;
-if(missing(w)) isw=FALSE else isw=TRUE
-if(isw) ncolw=ncol(w)
-for (k in 1:ncomp)
-{ 
-  nobs=sum(indic==k)
-  if(nobs > 0) 
-     {
-     if(isw) wk=matrix(w[indic==k,],ncol=ncolw)
-     zk=matrix(z[indic==k,],ncol=length(delta))
-     yk=y[indic==k]
-     xk=matrix(x[indic==k],ncol=1)
-     Sigma=backsolve(comps[[k]][[2]],diag(2))
-     Sigma=crossprod(Sigma)
-     mu=comps[[k]][[1]]
-     e1 = as.vector(xk-zk%*%delta)
-     ee2 = mu[2] +(Sigma[1,2]/Sigma[1,1])*(e1-mu[1])
-     sig = sqrt(Sigma[2,2]-(Sigma[1,2]^2/Sigma[1,1]))
-     yt = c(yt,(yk-ee2)/sig)
-     if(isw) 
-        {xt = rbind(xt,(cbind(xk,wk)/sig))}
-     else
-        {xt=rbind(xt,xk/sig)}
-     }
-}
-return(list(xt=xt,yt=yt))
-}
-#
-#
-# --------------------------------------------------------------------------------------------
-#
-#
-get_ytxtd=function(y,z,beta,gamma,x,w,ncomp,indic,comps,dimd){
-yt=NULL; xtd=NULL;
-if(missing(w)) isw=FALSE else isw=TRUE
-if(isw) ncolw=ncol(w)
-C = matrix(c(1,beta,0,1),nrow=2)
-for (k in 1:ncomp)
-   {
-    nobs=sum(indic==k)
-    if(nobs > 0) 
-     {
-      xtdk=matrix(nrow=2*nobs,ncol=dimd)
-      ind=seq(1,(2*nobs-1),by=2)
-      if(isw) wk=matrix(w[indic==k,],ncol=ncolw)
-      zk=matrix(z[indic==k,],ncol=dimd)
-      zveck=as.vector(t(zk))
-      yk=y[indic==k]
-      xk=x[indic==k]
-      Sigma=backsolve(comps[[k]][[2]],diag(2))
-      Sigma=crossprod(Sigma)
-      mu=comps[[k]][[1]]
-      B = C%*%Sigma%*%t(C)
-      L = t(chol(B))
-      Li=backsolve(L,diag(2),upper.tri=FALSE)
-      if(isw) {u=as.vector((yk-wk%*%gamma-mu[2]-beta*mu[1]))}
-      else {u=as.vector((yk-mu[2]-beta*mu[1]))}
-      ytk = as.vector(Li %*% rbind((xk-mu[1]),u))
-
-      z2=rbind(zveck,beta*zveck)
-      z2=Li%*%z2
-      zt1=z2[1,]
-      zt2=z2[2,]
-
-      dim(zt1)=c(dimd,nobs)
-      zt1=t(zt1)
-      dim(zt2)=c(dimd,nobs)
-      zt2=t(zt2)
-
-      xtdk[ind,]=zt1
-      xtdk[-ind,]=zt2
-
-      yt=c(yt,ytk)
-      xtd=rbind(xtd,xtdk)
-    }
-   }
-return(list(yt=yt,xtd=xtd))
-}
-#
-#
-# --------------------------------------------------------------------------------------------
-#
-#
-rthetaDP= function(maxuniq,alpha,lambda,Prioralpha,theta,thetaStar,indic,q0v,y,gridsize){
-# 
-#  function to make one draw from DP process 
-#
-#  P. Rossi 1/06
-#  added draw of alpha 2/06
-#  removed lambdaD,etaD and function arguments 5/06
-#  removed thetaStar argument to .Call and creation of newthetaStar 7/06
-#  removed q0 computations as eta is not drawn  7/06
-#  changed for new version of thetadraw and removed calculation of thetaStar before
-#    .Call  7/07
-#
-#      y(i) ~ f(y|theta[[i]],eta)
-#      theta ~ DP(alpha,G(lambda))
-#              note: eta is not used
-#output:
-#   list with components:
-#      thetaDraws: list, [[i]] is a list of the ith draw of the n theta's
-#                  where n is the length of the input theta and nrow(y)
-#      thetaNp1Draws: list, [[i]] is ith draw of theta_{n+1}
-#args:
-#   maxuniq: the maximum number of unique thetaStar values -- an error will be raised
-#            if this is exceeded
-#   alpha,lambda: starting values (or fixed DP prior values if not drawn).
-#   Prioralpha: list of hyperparms of alpha prior
-#   theta: list of starting value for theta's
-#   thetaStar: list of unique values of theta, thetaStar[[i]]
-#   indic:  n vector of indicator for which unique theta (in thetaStar)
-#   y: is a matrix nxk
-#         thetaStar: list of unique values of theta, thetaStar[[i]]
-#   q0v:a double vector with the same number of rows as y, giving \Int f(y(i)|theta,eta) dG_{lambda}(theta).
-#
-#  define needed functions for rthetaDP
-# -----------------------------------------------------------------------------------------------
-   pandterm = function(message) {
-        stop(message, call. = FALSE) }
-# ----------------------------------------------------------------------------------------------
-   rmultinomF=
-      function(p) {
-       return(sum(runif(1) > cumsum(p))+1)
-   }
-# -----------------------------------------------------------------------------------------------
-   alphaD=function(Prioralpha,Istar,gridsize){
-#
-#  function to draw alpha using prior, p(alpha)= (1-(alpha-alphamin)/(alphamax-alphamin))**power
-#
-   power=Prioralpha$power
-   alphamin=Prioralpha$alphamin
-   alphamax=Prioralpha$alphamax
-   n=Prioralpha$n
-   alpha=seq(from=alphamin,to=(alphamax-0.000001),len=gridsize)
-   lnprob=Istar*log(alpha) + lgamma(alpha) - lgamma(n+alpha) + 
-          power*log(1-(alpha-alphamin)/(alphamax-alphamin))
-   lnprob=lnprob-median(lnprob)
-   probs=exp(lnprob)
-   probs=probs/sum(probs)
-   return(alpha[rmultinomF(probs)])
-}  
-# -----------------------------------------------------------------------------------------------
-#
-yden=function(thetaStar,y,eta){
-#
-# function to compute f(y | theta) 
-# computes f for all values of theta in theta list of lists
-#
-# arguments:
-#   thetaStar is a list of lists.  thetaStar[[i]] is a list with components, mu, rooti
-#   y |theta[[i]] ~ N(mu,(rooti %*% t(rooti))^-1)  rooti is inverse of Chol root of Sigma
-#   eta is not used
-#
-# output:
-#   length(thetaStar) x n array of values of f(y[j,]|thetaStar[[i]]
-# 
-
-nunique=length(thetaStar)
-n=nrow(y)
-ydenmat=matrix(double(n*nunique),ncol=n)
-k=ncol(y)
-for(i in 1:nunique){
-
-   # now compute vectorized version of lndMvn 
-   # compute y_i'RIRI'y_i for all i
-   #
-   mu=thetaStar[[i]]$mu; rooti=thetaStar[[i]]$rooti
-   quads=colSums((crossprod(rooti,(t(y)-mu)))^2)
-   ydenmat[i,]=exp(-(k/2)*log(2*pi) + sum(log(diag(rooti))) - .5*quads)
-   
-}
-return(ydenmat)
-}
-#
-#
-# -----------------------------------------------------------------------------------------
-#
-#
-GD=function(lambda){
-#
-# function to draw from prior for Multivariate Normal Model
-#
-# mu|Sigma ~ N(mubar,Sigma x Amu^-1)
-# Sigma ~ IW(nu,V)
-#
-#
-nu=lambda$nu
-V=lambda$V
-mubar=lambda$mubar
-Amu=lambda$Amu
-k=length(mubar)
-Sigma=rwishart(nu,chol2inv(chol(lambda$V)))$IW
-root=chol(Sigma)
-mu=mubar+(1/sqrt(Amu))*t(root)%*%matrix(rnorm(k),ncol=1)
-return(list(mu=as.vector(mu),rooti=backsolve(root,diag(k))))
-}
-#
-#
-# -------------------------------------------------------------------------------------------
-#
-#
-thetaD=function(y,lambda,eta){
-#
-# function to draw from posterior of theta given data y and base prior G0(lambda)
-#
-# here y ~ N(mu,Sigma)
-# theta = list(mu=mu,rooti=chol(Sigma)^-1)
-# mu|Sigma ~ N(mubar,Sigma (x) Amu-1)
-# Sigma ~ IW(nu,V)
-#
-# arguments: 
-#   y is n x k matrix of obs
-#   lambda is list(mubar,Amu,nu,V)
-#   eta is not used
-# output:
-#   one draw of theta, list(mu,rooti)
-#        Sigma=inv(rooti)%*%t(inv(rooti))
-#
-# note: we assume that y is a matrix. if there is only one obs, y is a 1 x k matrix
-#
-rout=rmultireg(y,matrix(c(rep(1,nrow(y))),ncol=1),matrix(lambda$mubar,nrow=1),matrix(lambda$Amu,ncol=1),
-       lambda$nu,lambda$V)
-return(list(mu=as.vector(rout$B),rooti=backsolve(chol(rout$Sigma),diag(ncol(y)))))
-}
-#
-#  END OF REQUIRED FUNCTIONS AREA
-# --------------------------------------------------------------------------------------------
-#
-
-   n = length(theta)
-
-   eta=NULL    # note eta is not used
-   thetaNp1=NULL
-
-   p=c(rep(1/(alpha+(n-1)),n-1),alpha/(alpha+(n-1)))
-
-   nunique=length(thetaStar)
-  
-   if(nunique > maxuniq ) { pandterm("maximum number of unique thetas exceeded")} 
-   ydenmat=matrix(double(maxuniq*n),ncol=n) 
-   ydenmat[1:nunique,]=yden(thetaStar,y,eta)
-   #  ydenmat is a length(thetaStar) x n array of density values given f(y[j,] | thetaStar[[i]]
-   #  note: due to remix step (below) we must recompute ydenmat each time!
-
-   # use .Call to draw theta list
-   out= .Call("thetadraw",y,ydenmat,indic,q0v,p,theta,lambda,eta=eta,
-                  thetaD=thetaD,yden=yden,maxuniq,nunique,new.env()) 
-
-   # theta has been modified by thetadraw so we need to recreate thetaStar
-   thetaStar=unique(theta)
-   nunique=length(thetaStar)
-
-   #thetaNp1 and remix
-   probs=double(nunique+1)
-   for(j in 1:nunique) {
-       ind = which(sapply(theta,identical,thetaStar[[j]]))
-       probs[j]=length(ind)/(alpha+n) 
-       new_utheta=thetaD(y[ind,,drop=FALSE],lambda,eta) 
-       for(i in seq(along=ind)) {theta[[ind[i]]]=new_utheta}
-       indic[ind]=j
-       thetaStar[[j]]=new_utheta
-   }
-   probs[nunique+1]=alpha/(alpha+n)
-   ind=rmultinomF(probs)
-   if(ind==length(probs)) {
-      thetaNp1=GD(lambda)
-   } else {
-      thetaNp1=thetaStar[[ind]]
-   }
-
-   #alpha
-   alpha=alphaD(Prioralpha,nunique,gridsize)
-   
-   return(list(theta=theta,indic=indic,thetaStar=thetaStar,
-               thetaNp1=thetaNp1,alpha=alpha,Istar=nunique))
-}
-#
-#
-# -----------------------------------------------------------------------------------------
-#
-#
-q0=function(y,lambda,eta){
-#
-# function to compute a vector of int f(y[i]|theta) p(theta|lambda)dlambda
-#     here p(theta|lambda) is G0 the base prior
-#
-# implemented for a multivariate normal data density and standard conjugate
-# prior:
-#    theta=list(mu,Sigma)
-#    f(y|theta) is N(mu,Sigma)
-#    lambda=list(mubar,Amu,nu,V)
-#       mu|Sigma ~ N(mubar,Sigma (x) Amu^-1)
-#       Sigma ~ IW(nu,V)
-#
-# arguments:
-#    Y is n x k matrix of observations
-#    eta is not used
-#    lambda=list(mubar,Amu,nu,V)
-# 
-# output:
-#    vector of q0 values for each obs (row of Y)
-#
-# p. rossi 12/05
-#
-# here y is matrix of observations (each row is an obs)
-
-mubar=lambda$mubar; nu=lambda$nu ; Amu=lambda$Amu; V=lambda$V
-k=ncol(y)
-R=chol(V)
-logdetR=sum(log(diag(R)))
-if (k > 1) 
-  {lnk1k2=(k/2)*log(2)+log((nu-k)/2)+lgamma((nu-k)/2)-lgamma(nu/2)+sum(log(nu/2-(1:(k-1))/2))}
-else
-  {lnk1k2=(k/2)*log(2)+log((nu-k)/2)+lgamma((nu-k)/2)-lgamma(nu/2)}
-constant=-(k/2)*log(2*pi)+(k/2)*log(Amu/(1+Amu)) + lnk1k2 + nu*logdetR
-#
-# note: here we are using the fact that |V + S_i | = |R|^2 (1 + v_i'v_i)
-#       where v_i = sqrt(Amu/(1+Amu))*t(R^-1)*(y_i-mubar), R is chol(V)
-#
-#       and S_i = Amu/(1+Amu) * (y_i-mubar)(y_i-mubar)'
-#
-mat=sqrt(Amu/(1+Amu))*t(backsolve(R,diag(ncol(y))))%*%(t(y)-mubar)
-vivi=colSums(mat^2)
-
-lnq0v=constant-((nu+1)/2)*(2*logdetR+log(1+vivi))
-
-return(exp(lnq0v))
-}
-#
-#
-# --------------------------------------------------------------------------------------------
-#
-#
-#    END OF REQUIRED FUNCTIONS AREA
-#
-#
-#initialize comps,indic,ncomp
-comps=unique(theta)
-ncomp=length(comps)
-indic=double(n)
-for(j in 1:ncomp){
-      indic[which(sapply(theta,identical,comps[[j]]))]=j
-   }
-# initialize eta
-eta=NULL
-#
-# initialize alpha
-alpha=1
-
-# reserve space for draws
-#
-deltadraw = matrix(double(floor(R/keep)*dimd),ncol=dimd)
-betadraw = rep(0.0,floor(R/keep))
-alphadraw=double(floor(R/keep))
-Istardraw=double(floor(R/keep))
-if(isgamma) gammadraw = matrix(double(floor(R/keep)*dimg),ncol=dimg)
-thetaNp1draw=vector("list",R)
-
-#
-# start main iteration loop
-#
-itime=proc.time()[3]
-cat("MCMC Iteration (est time to end -min) ",fill=TRUE)
-fsh()
-
-for(rep in 1:R) {
-
-   # draw beta and gamma
-      if(isgamma) 
-         {out=get_ytxt(y=y,z=z,delta=delta,x=x,w=w,
-          ncomp=ncomp,indic=indic,comps=comps)}
-      else
-         {out=get_ytxt(y=y,z=z,delta=delta,x=x,
-          ncomp=ncomp,indic=indic,comps=comps)}
-         
-      bg = breg(out$yt,out$xt,mbg,Abg)  
-      beta = bg[1]
-      if(isgamma) gamma = bg[2:length(bg)]
-
-   # draw delta
-      if(isgamma)
-         {out=get_ytxtd(y=y,z=z,beta=beta,gamma=gamma,
-          x=x,w=w,ncomp=ncomp,indic=indic,comps=comps,dimd=dimd)}
-      else
-         {out=get_ytxtd(y=y,z=z,beta=beta,
-          x=x,ncomp=ncomp,indic=indic,comps=comps,dimd=dimd)}
-	
-      delta = breg(out$yt,out$xtd,md,Ad)
-
-    # DP process stuff- theta | lambda
-      if(isgamma) {Err = cbind(x-z%*%delta,y-beta*x-w%*%gamma)}
-      else {Err = cbind(x-z%*%delta,y-beta*x)}
-      q0v = q0(Err,lambda,eta)
-      DPout=rthetaDP(maxuniq=maxuniq,alpha=alpha,lambda=lambda,Prioralpha=Prioralpha,theta=theta,
-                     thetaStar=comps,indic=indic,q0v=q0v,y=Err,gridsize=gridsize)
-      indic=DPout$indic
-      theta=DPout$theta
-      comps=DPout$thetaStar
-      alpha=DPout$alpha
-      Istar=DPout$Istar
-      ncomp=length(comps)
-   
-   if(rep%%100==0)
-     {
-      ctime=proc.time()[3]
-      timetoend=((ctime-itime)/rep)*(R-rep)
-      cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
-      fsh()
-      }
-   if(rep%%keep ==0)
-     {
-      mkeep=rep/keep
-      deltadraw[mkeep,]=delta
-      betadraw[mkeep]=beta
-      alphadraw[mkeep]=alpha
-      Istardraw[mkeep]=Istar
-      if(isgamma) gammadraw[mkeep,]=gamma
-      thetaNp1draw[[mkeep]]=list(DPout$thetaNp1)
-      }
-}
-#
-# rescale
-#
-if(SCALE){
-   deltadraw=deltadraw*scalex
-   betadraw=betadraw*scaley/scalex
-   if(isgamma) {gammadraw=gammadraw*scaley}
-}
-ctime = proc.time()[3]
-cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
-
-nmix=list(probdraw=matrix(c(rep(1,length(thetaNp1draw))),ncol=1),zdraw=NULL,compdraw=thetaNp1draw)
-#
-# densitymix is in the format to be used with the generic mixture of normals plotting
-# methods (plot.bayesm.nmix)
-#
-attributes(nmix)$class=c("bayesm.nmix")
-
-attributes(deltadraw)$class=c("bayesm.mat","mcmc")
-attributes(deltadraw)$mcpar=c(1,R,keep)
-attributes(betadraw)$class=c("bayesm.mat","mcmc")
-attributes(betadraw)$mcpar=c(1,R,keep)
-attributes(alphadraw)$class=c("bayesm.mat","mcmc")
-attributes(alphadraw)$mcpar=c(1,R,keep)
-attributes(Istardraw)$class=c("bayesm.mat","mcmc")
-attributes(Istardraw)$mcpar=c(1,R,keep)
-if(isgamma){
-   attributes(gammadraw)$class=c("bayesm.mat","mcmc")
-   attributes(gammadraw)$mcpar=c(1,R,keep)}
-
-if(isgamma) 
-   { return(list(deltadraw=deltadraw,betadraw=betadraw,alphadraw=alphadraw,Istardraw=Istardraw,
-                 gammadraw=gammadraw,nmix=nmix))}
-   else
-   { return(list(deltadraw=deltadraw,betadraw=betadraw,alphadraw=alphadraw,Istardraw=Istardraw,
-                 nmix=nmix))}
-}
-
-
diff --git a/R/rivDP_rcpp.R b/R/rivDP_rcpp.R
new file mode 100644
index 0000000..951b86d
--- /dev/null
+++ b/R/rivDP_rcpp.R
@@ -0,0 +1,279 @@
+rivDP = function(Data,Prior,Mcmc) {
+#
+# revision history:
+#   P. Rossi 1/06
+#   added draw of alpha 2/06
+#   added automatic scaling 2/06
+#   removed reqfun  7/07 -- now functions are in rthetaDP
+#   fixed initialization of theta 3/09
+#   fixed error in assigning user defined prior parms
+#   W. Taylor 4/15 - added nprint option to MCMC argument
+#
+# purpose: 
+#   draw from posterior for linear I.V. model with DP process for errors
+#
+# Arguments:
+#   Data -- list of z,w,x,y
+#        y is vector of obs on lhs var in structural equation
+#        x is "endogenous" var in structural eqn
+#        w is matrix of obs on "exogenous" vars in the structural eqn
+#        z is matrix of obs on instruments
+#   Prior -- list of md,Ad,mbg,Abg,mubar,Amu,nuV
+#        md is prior mean of delta
+#        Ad is prior prec
+#        mbg is prior mean vector for beta,gamma
+#        Abg is prior prec of same
+#        lamda is a list of prior parms for DP draw
+#              mubar is prior mean of means for "errors"
+#              Amu is scale precision parm for means
+#              nu,V parms for IW on Sigma (idential priors for each normal comp
+#        alpha prior parm for DP process (weight on base measure)
+#           or starting value if there is a prior on alpha (requires element Prioralpha)
+#        Prioralpha list of hyperparms for draw of alpha (alphamin,alphamax,power,n)
+#
+#   Mcmc -- list of R,keep,starting values for delta,beta,gamma,theta
+#        maxuniq is maximum number of unique theta values
+#        R is number of draws
+#        keep is thinning parameter
+#        nprint - print estimated time remaining on every nprint'th draw
+#        SCALE if scale data, def: TRUE
+#        gridsize is the gridsize parm for alpha draws
+#
+#   Output: 
+#      list of draws of delta,beta,gamma and thetaNp1 which is used for
+#      predictive distribution of errors (density estimation)
+# 
+#   Model:
+#
+#    x=z'delta + e1
+#    y=beta*x + w'gamma + e2
+#        e1,e2 ~ N(theta_i)
+#
+#   Priors
+#   delta ~ N(md,Ad^-1)
+#   vec(beta,gamma) ~ N(mbg,Abg^-1)
+#   theta ~ DPP(alpha|lambda)
+#
+#
+#   extract data and check dimensios
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of z,w,x,y")}
+    if(is.null(Data$w)) isgamma=FALSE else isgamma=TRUE
+    if(isgamma) w = Data$w #matrix
+    if(is.null(Data$z)) {pandterm("Requires Data element z")}
+    z=Data$z
+    if(is.null(Data$x)) {pandterm("Requires Data element x")}
+    x=as.vector(Data$x)
+    if(is.null(Data$y)) {pandterm("Requires Data element y")}
+    y=as.vector(Data$y)
+#
+# check data for validity
+#
+n=length(y)
+if(isgamma)
+   {if(!is.matrix(w)) {pandterm("w is not a matrix")}
+   dimg=ncol(w)
+   if(n != nrow(w) ) {pandterm("length(y) ne nrow(w)")}}
+
+if(!is.matrix(z)) {pandterm("z is not a matrix")}
+dimd=ncol(z)
+if(n != length(x) ) {pandterm("length(y) ne length(x)")}
+if(n != nrow(z) ) {pandterm("length(y) ne nrow(z)")}
+
+
+#
+# extract elements corresponding to the prior
+#
+alimdef=BayesmConstant.DPalimdef
+nulimdef=BayesmConstant.DPnulimdef
+vlimdef=BayesmConstant.DPvlimdef
+
+if(missing(Prior))
+   {
+    md=c(rep(0,dimd)) 
+    Ad=diag(BayesmConstant.A,dimd) 
+    if(isgamma) dimbg=1+dimg else dimbg=1
+    mbg=c(rep(0,dimbg)) 
+    Abg=diag(BayesmConstant.A,dimbg) 
+ 
+
+    gamma= BayesmConstant.gamma  
+    Istarmin=BayesmConstant.DPIstarmin
+    alphamin=exp(digamma(Istarmin)-log(gamma+log(n)))
+    Istarmax=floor(.1*n)
+    alphamax=exp(digamma(Istarmax)-log(gamma+log(n)))
+    power=BayesmConstant.DPpower
+    Prioralpha=list(n=n,alphamin=alphamin,alphamax=alphamax,power=power)
+
+    lambda_hyper=list(alim=alimdef,nulim=nulimdef,vlim=vlimdef)
+   }
+
+else  
+   { 
+    if(is.null(Prior$md)) md=c(rep(0,dimd)) else md=Prior$md
+    if(is.null(Prior$Ad)) Ad=diag(BayesmConstant.A,dimd) else Ad=Prior$Ad
+    if(isgamma) dimbg=1+dimg else dimbg=1
+    if(is.null(Prior$mbg)) mbg=c(rep(0,dimbg)) else mbg=Prior$mbg
+    if(is.null(Prior$Abg)) Abg=diag(BayesmConstant.A,dimbg) else Abg=Prior$Abg
+    
+    if(!is.null(Prior$Prioralpha))
+       {Prioralpha=Prior$Prioralpha}
+    else
+       {gamma= BayesmConstant.gamma 
+        Istarmin=BayesmConstant.DPIstarmin
+        alphamin=exp(digamma(Istarmin)-log(gamma+log(n)))
+        Istarmax=floor(.1*n)
+        alphamax=exp(digamma(Istarmax)-log(gamma+log(n)))
+        power=BayesmConstant.DPpower
+        Prioralpha=list(n=n,alphamin=alphamin,alphamax=alphamax,power=power)}
+    
+    if(is.null(Prior$lambda_hyper)) {lambda_hyper=Prior$lambda_hyper}
+    else
+       {lambda_hyper=Prior$lambda_hyper;
+          if(is.null(lambda_hyper$alim)) {lambda_hyper$alim=alimdef}
+          if(is.null(lambda_hyper$nulim)) {lambda_hyper$nulim=nulimdef} 
+          if(is.null(lambda_hyper$vlim)) {lambda_hyper$vlim=vlimdef}
+       } 
+   }
+#
+# check Prior arguments for valdity
+#
+if(lambda_hyper$alim[1]<0) {pandterm("alim[1] must be >0")}
+if(lambda_hyper$nulim[1]<0) {pandterm("nulim[1] must be >0")}
+if(lambda_hyper$vlim[1]<0) {pandterm("vlim[1] must be >0")}
+
+#
+# obtain starting values for MCMC
+#
+# we draw need inital values of delta, theta and indic
+#
+
+if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
+theta=NULL
+if(!is.null(Mcmc$delta)) 
+   {delta = Mcmc$delta}
+else
+   {lmxz = lm(x~z,data.frame(x=x,z=z))
+    delta = lmxz$coef[2:(ncol(z)+1)]}
+if(!is.null(Mcmc$theta))
+  {theta=Mcmc$theta }
+else
+  {onecomp=list(mu=c(0,0),rooti=diag(2))
+   theta=vector("list",length(y))
+   for(i in 1:n) {theta[[i]]=onecomp}
+   }
+dimd = length(delta)
+if(is.null(Mcmc$maxuniq))
+   {maxuniq=BayesmConstant.DPmaxuniq}
+else
+   {maxuniq=Mcmc$maxuniq}
+if(is.null(Mcmc$R)) {pandterm("requres Mcmc argument, R")}
+R = Mcmc$R
+if(is.null(Mcmc$keep))
+   {keep=BayesmConstant.keep}
+else
+   {keep=Mcmc$keep}
+if(is.null(Mcmc$nprint))
+{nprint=BayesmConstant.nprint}
+else
+{nprint=Mcmc$nprint}
+if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+if(is.null(Mcmc$gridsize))
+   {gridsize=BayesmConstant.DPgridsize}
+else
+   {gridsize=Mcmc$gridsize}
+if(is.null(Mcmc$SCALE))
+  {SCALE=BayesmConstant.DPSCALE}
+else
+  {SCALE=Mcmc$SCALE}
+
+#
+# scale and center
+#
+if(SCALE){
+  scaley=sqrt(var(y))
+  scalex=sqrt(var(x))
+  meany=mean(y)
+  meanx=mean(x)
+  meanz=apply(z,2,mean)
+  y=(y-meany)/scaley; x=(x-meanx)/scalex
+  z=scale(z,center=TRUE,scale=FALSE)
+  if(isgamma) {meanw=apply(w,2,mean);  w=scale(w,center=TRUE,scale=FALSE)}
+}
+
+#
+# print out model
+#
+cat(" ",fill=TRUE)
+cat("Starting Gibbs Sampler for Linear IV Model With DP Process Errors",fill=TRUE)
+cat(" ",fill=TRUE)
+cat(" nobs= ",n,"; ",ncol(z)," instruments",fill=TRUE)
+cat(" ",fill=TRUE)
+cat("Prior Parms: ",fill=TRUE)
+cat("mean of delta ",fill=TRUE)
+print(md)
+cat(" ",fill=TRUE)
+cat("Adelta",fill=TRUE)
+print(Ad)
+cat(" ",fill=TRUE)
+cat("mean of beta/gamma",fill=TRUE)
+print(mbg)
+cat(" ",fill=TRUE)
+cat("Abeta/gamma",fill=TRUE)
+print(Abg)
+cat(" ",fill=TRUE)
+cat("G0 ~ N(mubar,Sigma (x) Amu^-1)",fill=TRUE)
+cat(" mubar = ",0,fill=TRUE)
+cat(" Sigma ~ IW(nu,nu*v*I)",fill=TRUE)
+cat(" Amu ~ uniform[",lambda_hyper$alim[1],",",lambda_hyper$alim[2],"]",fill=TRUE)
+cat(" nu ~ uniform on log grid  [",2-1+exp(lambda_hyper$nulim[1]),
+    ",",2-1+exp(lambda_hyper$nulim[2]),"]",fill=TRUE)
+cat(" v ~ uniform[",lambda_hyper$vlim[1],",",lambda_hyper$vlim[2],"]",fill=TRUE)
+cat("  ",fill=TRUE)
+cat("Parameters of Prior on Dirichlet Process parm (alpha)",fill=TRUE)
+cat("alphamin= ",Prioralpha$alphamin," alphamax= ",Prioralpha$alphamax," power=",
+        Prioralpha$power,fill=TRUE)
+cat("alpha values correspond to Istarmin = ",Istarmin," Istarmax = ",Istarmax,fill=TRUE)
+cat(" ",fill=TRUE)
+cat("MCMC parms: R= ",R," keep= ",keep," nprint= ",nprint,fill=TRUE)
+cat("  maximum number of unique thetas= ",maxuniq,fill=TRUE)
+cat("  gridsize for alpha draws= ",gridsize,fill=TRUE)
+cat("  SCALE data= ",SCALE,fill=TRUE)
+cat(" ",fill=TRUE)
+
+###################################################################
+# Wayne Taylor
+# 3/14/2015
+###################################################################
+if(isgamma == FALSE) w=matrix()
+out = rivDP_rcpp_loop(R,keep,nprint,dimd,mbg,Abg,md,Ad,y,isgamma,z,x,w, 
+                      delta,PrioralphaList=Prioralpha,gridsize,SCALE,maxuniq,scalex,scaley,lambda_hyper,
+                      BayesmConstant.A,BayesmConstant.nu)
+###################################################################
+
+nmix=list(probdraw=matrix(c(rep(1,length(out$thetaNp1draw))),ncol=1),zdraw=NULL,compdraw=out$thetaNp1draw)
+#
+# densitymix is in the format to be used with the generic mixture of normals plotting
+# methods (plot.bayesm.nmix)
+#
+attributes(nmix)$class=c("bayesm.nmix")
+
+attributes(out$deltadraw)$class=c("bayesm.mat","mcmc")
+attributes(out$deltadraw)$mcpar=c(1,R,keep)
+attributes(out$betadraw)$class=c("bayesm.mat","mcmc")
+attributes(out$betadraw)$mcpar=c(1,R,keep)
+attributes(out$alphadraw)$class=c("bayesm.mat","mcmc")
+attributes(out$alphadraw)$mcpar=c(1,R,keep)
+attributes(out$Istardraw)$class=c("bayesm.mat","mcmc")
+attributes(out$Istardraw)$mcpar=c(1,R,keep)
+if(isgamma){
+  attributes(out$gammadraw)$class=c("bayesm.mat","mcmc")
+  attributes(out$gammadraw)$mcpar=c(1,R,keep)}
+
+if(isgamma) 
+{ return(list(deltadraw=out$deltadraw,betadraw=out$betadraw,alphadraw=out$alphadraw,Istardraw=out$Istardraw,
+              gammadraw=out$gammadraw,nmix=nmix))}
+else
+{ return(list(deltadraw=out$deltadraw,betadraw=out$betadraw,alphadraw=out$alphadraw,Istardraw=out$Istardraw,
+              nmix=nmix))}
+}
\ No newline at end of file
diff --git a/R/rivGibbs.R b/R/rivGibbs_rcpp.R
similarity index 55%
rename from R/rivGibbs.R
rename to R/rivGibbs_rcpp.R
index a2c4f68..b668d5f 100755
--- a/R/rivGibbs.R
+++ b/R/rivGibbs_rcpp.R
@@ -1,228 +1,160 @@
-rivGibbs=
-function(Data,Prior,Mcmc) 
-{
-#
-# revision history:
-#    R. McCulloch original version 2/05 
-#    p. rossi 3/05 
-#    p. rossi 1/06 -- fixed error in nins
-#    p. rossi 1/06 -- fixed def Prior settings for nu,V
-#    3/07 added classes
-#
-# purpose: 
-#   draw from posterior for linear I.V. model
-#
-# Arguments:
-#   Data -- list of z,w,x,y
-#        y is vector of obs on lhs var in structural equation
-#        x is "endogenous" var in structural eqn
-#        w is matrix of obs on "exogenous" vars in the structural eqn
-#        z is matrix of obs on instruments
-#   Prior -- list of md,Ad,mbg,Abg,nu,V
-#        md is prior mean of delta
-#        Ad is prior prec
-#        mbg is prior mean vector for beta,gamma
-#        Abg is prior prec of same
-#        nu,V parms for IW on Sigma
-#
-#   Mcmc -- list of R,keep 
-#        R is number of draws
-#        keep is thinning parameter
-#
-#   Output: 
-#      list of draws of delta,beta,gamma and Sigma
-# 
-#   Model:
-#
-#    x=z'delta + e1
-#    y=beta*x + w'gamma + e2
-#        e1,e2 ~ N(0,Sigma)
-#
-#   Priors
-#   delta ~ N(md,Ad^-1)
-#   vec(beta,gamma) ~ N(mbg,Abg^-1)
-#   Sigma ~ IW(nu,V)
-#
-#   check arguments
-#
-pandterm=function(message) {stop(message,call.=FALSE)}
-if(missing(Data)) {pandterm("Requires Data argument -- list of z,w,x,y")}
-    if(is.null(Data$z)) {pandterm("Requires Data element z")}
-    z=Data$z
-    if(is.null(Data$w)) {pandterm("Requires Data element w")}
-    w=Data$w
-    if(is.null(Data$x)) {pandterm("Requires Data element x")}
-    x=Data$x
-    if(is.null(Data$y)) {pandterm("Requires Data element y")}
-    y=Data$y
-
-#
-# check data for validity
-#
-if(!is.vector(x)) {pandterm("x must be a vector")}
-if(!is.vector(y)) {pandterm("y must be a vector")}
-n=length(y)
-if(!is.matrix(w)) {pandterm("w is not a matrix")}
-if(!is.matrix(z)) {pandterm("z is not a matrix")}
-dimd=ncol(z)
-dimg=ncol(w)
-if(n != length(x) ) {pandterm("length(y) ne length(x)")}
-if(n != nrow(w) ) {pandterm("length(y) ne nrow(w)")}
-if(n != nrow(z) ) {pandterm("length(y) ne nrow(z)")}
-#
-# check for Prior
-#
-if(missing(Prior))
-   { md=c(rep(0,dimd));Ad=.01*diag(dimd); 
-     mbg=c(rep(0,(1+dimg))); Abg=.01*diag((1+dimg));
-     nu=3; V=diag(2)}
-else
-   {
-    if(is.null(Prior$md)) {md=c(rep(0,dimd))} 
-       else {md=Prior$md}
-    if(is.null(Prior$Ad)) {Ad=.01*diag(dimd)} 
-       else {Ad=Prior$Ad}
-    if(is.null(Prior$mbg)) {mbg=c(rep(0,(1+dimg)))} 
-       else {mbg=Prior$mbg}
-    if(is.null(Prior$Abg)) {Abg=.01*diag((1+dimg))} 
-       else {Abg=Prior$Abg}
-    if(is.null(Prior$nu)) {nu=3}
-       else {nu=Prior$nu}
-    if(is.null(Prior$V)) {V=nu*diag(2)}
-       else {V=Prior$V}
-   }
-#
-# check dimensions of Priors
-#
-if(ncol(Ad) != nrow(Ad) || ncol(Ad) != dimd || nrow(Ad) != dimd) 
-   {pandterm(paste("bad dimensions for Ad",dim(Ad)))}
-if(length(md) != dimd)
-   {pandterm(paste("md wrong length, length= ",length(md)))}
-if(ncol(Abg) != nrow(Abg) || ncol(Abg) != (1+dimg) || nrow(Abg) != (1+dimg)) 
-   {pandterm(paste("bad dimensions for Abg",dim(Abg)))}
-if(length(mbg) != (1+dimg))
-   {pandterm(paste("mbg wrong length, length= ",length(mbg)))}
-#
-# check MCMC argument
-#
-if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
-else
-   {
-    if(is.null(Mcmc$R)) 
-       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
-    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
-   }
-
-#
-# print out model
-#
-cat(" ",fill=TRUE)
-cat("Starting Gibbs Sampler for Linear IV Model",fill=TRUE)
-cat(" ",fill=TRUE)
-cat(" nobs= ",n,"; ",ncol(z)," instruments; ",ncol(w)," included exog vars",fill=TRUE)
-cat("     Note: the numbers above include intercepts if in z or w",fill=TRUE)
-cat(" ",fill=TRUE)
-cat("Prior Parms: ",fill=TRUE)
-cat("mean of delta ",fill=TRUE)
-print(md)
-cat("Adelta",fill=TRUE)
-print(Ad)
-cat("mean of beta/gamma",fill=TRUE)
-print(mbg)
-cat("Abeta/gamma",fill=TRUE)
-print(Abg)
-cat("Sigma Prior Parms",fill=TRUE)
-cat("nu= ",nu," V=",fill=TRUE)
-print(V)
-cat(" ",fill=TRUE)
-cat("MCMC parms: R= ",R," keep= ",keep,fill=TRUE)
-cat(" ",fill=TRUE)
-
-deltadraw = matrix(double(floor(R/keep)*dimd),ncol=dimd)
-betadraw = rep(0.0,floor(R/keep))
-gammadraw = matrix(double(floor(R/keep)*dimg),ncol=dimg)
-Sigmadraw = matrix(double(floor(R/keep)*4),ncol=4)
-
-#set initial values
-Sigma=diag(2)
-delta=c(rep(.1,dimd))
-
-#
-# start main iteration loop
-#
-itime=proc.time()[3]
-cat("MCMC Iteration (est time to end -min) ",fill=TRUE)
-fsh()
-xtd=matrix(nrow=2*n,ncol=dimd)
-ind=seq(1,(2*n-1),by=2)
-zvec=as.vector(t(z))
-
-for(rep in 1:R) {
-
-    # draw beta,gamma
-      e1 = as.vector(x-z%*%delta)
-      ee2 = (Sigma[1,2]/Sigma[1,1])*e1
-      sig = sqrt(Sigma[2,2]-(Sigma[1,2]^2/Sigma[1,1]))
-      yt = (y-ee2)/sig
-      xt = cbind(x,w)/sig
-      bg = breg(yt,xt,mbg,Abg)  
-      beta = bg[1]
-      gamma = bg[2:length(bg)]
-
-    # draw delta
-      C = matrix(c(1,beta,0,1),nrow=2)
-      B = C%*%Sigma%*%t(C)
-      L = t(chol(B))
-      Li=backsolve(L,diag(2),upper.tri=FALSE)
-      u = as.vector((y-w%*%gamma))
-      yt = as.vector(Li %*% rbind(x,u))
-
-      z2=rbind(zvec,beta*zvec)
-      z2=Li%*%z2
-      zt1=z2[1,]
-      zt2=z2[2,]
-      dim(zt1)=c(dimd,n)
-      zt1=t(zt1)
-      dim(zt2)=c(dimd,n)
-      zt2=t(zt2)
-      xtd[ind,]=zt1
-      xtd[-ind,]=zt2
-      delta = breg(yt,xtd,md,Ad)
-
-    # draw Sigma
-      Res = cbind(x-z%*%delta,y-beta*x-w%*%gamma)
-      S = crossprod(Res)
-      Sigma = rwishart(nu+n,chol2inv(chol(V+S)))$IW
-  
-   if(rep%%100==0)
-     {
-      ctime=proc.time()[3]
-      timetoend=((ctime-itime)/rep)*(R-rep)
-      cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
-      fsh()
-      }
-   if(rep%%keep ==0)
-     {
-      mkeep=rep/keep
-      deltadraw[mkeep,]=delta
-      betadraw[mkeep]=beta
-      gammadraw[mkeep,]=gamma
-      Sigmadraw[mkeep,]=Sigma
-      }
-}
-
-ctime = proc.time()[3]
-cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
-
-attributes(deltadraw)$class=c("bayesm.mat","mcmc")
-attributes(deltadraw)$mcpar=c(1,R,keep)
-attributes(betadraw)$class=c("bayesm.mat","mcmc")
-attributes(betadraw)$mcpar=c(1,R,keep)
-attributes(gammadraw)$class=c("bayesm.mat","mcmc")
-attributes(gammadraw)$mcpar=c(1,R,keep)
-attributes(Sigmadraw)$class=c("bayesm.var","bayesm.mat","mcmc")
-attributes(Sigmadraw)$mcpar=c(1,R,keep)
-
-
-return(list(deltadraw=deltadraw,betadraw=betadraw,gammadraw=gammadraw,Sigmadraw=Sigmadraw))
-}
+rivGibbs=function(Data,Prior,Mcmc) {
+#
+# revision history:
+#    R. McCulloch original version 2/05 
+#    p. rossi 3/05 
+#    p. rossi 1/06 -- fixed error in nins
+#    p. rossi 1/06 -- fixed def Prior settings for nu,V
+#    3/07 added classes
+#    W. Taylor 4/15 - added nprint option to MCMC argument
+#
+#
+# purpose: 
+#   draw from posterior for linear I.V. model
+#
+# Arguments:
+#   Data -- list of z,w,x,y
+#        y is vector of obs on lhs var in structural equation
+#        x is "endogenous" var in structural eqn
+#        w is matrix of obs on "exogenous" vars in the structural eqn
+#        z is matrix of obs on instruments
+#   Prior -- list of md,Ad,mbg,Abg,nu,V
+#        md is prior mean of delta
+#        Ad is prior prec
+#        mbg is prior mean vector for beta,gamma
+#        Abg is prior prec of same
+#        nu,V parms for IW on Sigma
+#
+#   Mcmc -- list of R,keep 
+#        R is number of draws
+#        keep is thinning parameter
+#        nprint - print estimated time remaining on every nprint'th draw
+#
+#   Output: 
+#      list of draws of delta,beta,gamma and Sigma
+# 
+#   Model:
+#
+#    x=z'delta + e1
+#    y=beta*x + w'gamma + e2
+#        e1,e2 ~ N(0,Sigma)
+#
+#   Priors
+#   delta ~ N(md,Ad^-1)
+#   vec(beta,gamma) ~ N(mbg,Abg^-1)
+#   Sigma ~ IW(nu,V)
+#
+#   check arguments
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of z,w,x,y")}
+    if(is.null(Data$z)) {pandterm("Requires Data element z")}
+    z=Data$z
+    if(is.null(Data$w)) {pandterm("Requires Data element w")}
+    w=Data$w
+    if(is.null(Data$x)) {pandterm("Requires Data element x")}
+    x=Data$x
+    if(is.null(Data$y)) {pandterm("Requires Data element y")}
+    y=Data$y
+#
+# check data for validity
+#
+if(!is.vector(x)) {pandterm("x must be a vector")}
+if(!is.vector(y)) {pandterm("y must be a vector")}
+n=length(y)
+if(!is.matrix(w)) {pandterm("w is not a matrix")}
+if(!is.matrix(z)) {pandterm("z is not a matrix")}
+dimd=ncol(z)
+dimg=ncol(w)
+if(n != length(x) ) {pandterm("length(y) ne length(x)")}
+if(n != nrow(w) ) {pandterm("length(y) ne nrow(w)")}
+if(n != nrow(z) ) {pandterm("length(y) ne nrow(z)")}
+#
+# check for Prior
+#
+if(missing(Prior))
+   { md=c(rep(0,dimd));Ad=BayesmConstant.A*diag(dimd); 
+     mbg=c(rep(0,(1+dimg))); Abg=BayesmConstant.A*diag((1+dimg));
+     nu=3; V=diag(2)}
+else
+   {
+    if(is.null(Prior$md)) {md=c(rep(0,dimd))} 
+       else {md=Prior$md}
+    if(is.null(Prior$Ad)) {Ad=BayesmConstant.A*diag(dimd)} 
+       else {Ad=Prior$Ad}
+    if(is.null(Prior$mbg)) {mbg=c(rep(0,(1+dimg)))} 
+       else {mbg=Prior$mbg}
+    if(is.null(Prior$Abg)) {Abg=BayesmConstant.A*diag((1+dimg))} 
+       else {Abg=Prior$Abg}
+    if(is.null(Prior$nu)) {nu=3}
+       else {nu=Prior$nu}
+    if(is.null(Prior$V)) {V=nu*diag(2)}
+       else {V=Prior$V}
+   }
+#
+# check dimensions of Priors
+#
+if(ncol(Ad) != nrow(Ad) || ncol(Ad) != dimd || nrow(Ad) != dimd) 
+   {pandterm(paste("bad dimensions for Ad",dim(Ad)))}
+if(length(md) != dimd)
+   {pandterm(paste("md wrong length, length= ",length(md)))}
+if(ncol(Abg) != nrow(Abg) || ncol(Abg) != (1+dimg) || nrow(Abg) != (1+dimg)) 
+   {pandterm(paste("bad dimensions for Abg",dim(Abg)))}
+if(length(mbg) != (1+dimg))
+   {pandterm(paste("mbg wrong length, length= ",length(mbg)))}
+#
+# check MCMC argument
+#
+if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
+else
+   {
+    if(is.null(Mcmc$R)) 
+       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
+    if(is.null(Mcmc$keep)) {keep=BayesmConstant.keep} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+      if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+   }
+
+#
+# print out model
+#
+cat(" ",fill=TRUE)
+cat("Starting Gibbs Sampler for Linear IV Model",fill=TRUE)
+cat(" ",fill=TRUE)
+cat(" nobs= ",n,"; ",ncol(z)," instruments; ",ncol(w)," included exog vars",fill=TRUE)
+cat("     Note: the numbers above include intercepts if in z or w",fill=TRUE)
+cat(" ",fill=TRUE)
+cat("Prior Parms: ",fill=TRUE)
+cat("mean of delta ",fill=TRUE)
+print(md)
+cat("Adelta",fill=TRUE)
+print(Ad)
+cat("mean of beta/gamma",fill=TRUE)
+print(mbg)
+cat("Abeta/gamma",fill=TRUE)
+print(Abg)
+cat("Sigma Prior Parms",fill=TRUE)
+cat("nu= ",nu," V=",fill=TRUE)
+print(V)
+cat(" ",fill=TRUE)
+cat("MCMC parms: ",fill=TRUE)
+cat("R= ",R," keep= ",keep," nprint= ",nprint,fill=TRUE)
+cat(" ",fill=TRUE)
+
+###################################################################
+# Keunwoo Kim
+# 09/03/2014
+###################################################################
+draws=rivGibbs_rcpp_loop(y, x, z, w, mbg, Abg, md, Ad, V, nu, R, keep, nprint)
+###################################################################
+
+attributes(draws$deltadraw)$class=c("bayesm.mat","mcmc")
+attributes(draws$deltadraw)$mcpar=c(1,R,keep)
+attributes(draws$betadraw)$class=c("bayesm.mat","mcmc")
+attributes(draws$betadraw)$mcpar=c(1,R,keep)
+attributes(draws$gammadraw)$class=c("bayesm.mat","mcmc")
+attributes(draws$gammadraw)$mcpar=c(1,R,keep)
+attributes(draws$Sigmadraw)$class=c("bayesm.var","bayesm.mat","mcmc")
+attributes(draws$Sigmadraw)$mcpar=c(1,R,keep)
+
+return(draws)
+}
diff --git a/R/rmixGibbs.R b/R/rmixGibbs.R
deleted file mode 100755
index cdb753e..0000000
--- a/R/rmixGibbs.R
+++ /dev/null
@@ -1,87 +0,0 @@
-rmixGibbs=
-function(y,Bbar,A,nu,V,a,p,z,comps)
-{
-#
-# Revision History: 
-#   R. McCulloch 11/04
-#   P. Rossi 3/05 put in backsolve and improved documentation
-#
-# purpose: do gibbs sampling inference for a mixture of multivariate normals
-#
-# arguments:
-#     y: data, rows are observations, assumed to be iid draws from normal mixture
-#     Bbar,A,nu,V: common prior for mean and variance of each normal component
-#
-#     note: Bbar should be a matrix. usually with only one row
-#
-#        beta ~ N(betabar,Sigma (x) A^-1)
-#                   betabar=vec(Bbar)
-#          Sigma ~ IW(nu,V) or Sigma^-1 ~ W(nu, V^-1)
-#              note: if you want Sigma ~ A, use nu big and rwishart(nu,nu(A)^{-1})$IW
-#     a: Dirichlet parameters for prior on p
-#     p: prior probabilities of normal components
-#     z: components indentities for each observation 
-#        (vector of intergers each in {1,2,...number of components})
-#     comps: list, each member is a list comp with ith normal component
-#                        ~N(comp[[1]],Sigma), Sigma = t(R)%*%R, R^{-1} = comp[[2]]
-# Output:
-#  list with elements [[1]=$p, [[2]]=$z, and [[3]]=$comps, with the updated values
-#
-#------------------------------------------------------------------------------------
-#  define functions needed
-#
-rcompsC = function(x,p,comps) {
-# purpose:
-#  draws class membership of rows of x, given x rows are iid draws from 
-#  mixture of multivariate normals
-# arguments:
-#     x: observations (number of observations x dimension)
-#     p: prior probabilities of mixture components
-#     comps: list, each member is a list with mean and R^{-1}, Sigma = t(R)%*%R 
-dim = ncol(x)
-nob = nrow(x)
-nc = length(comps)
-mumat = matrix(0.0,dim,nc)
-rivmat = matrix(0.0,dim*(dim+1)/2,nc)
-for(i in 1:nc) {
-   mumat[,i] = comps[[i]][[1]]
-   rivmat[,i] = uttovC(comps[[i]][[2]])
-}
-xx=t(x)
-.C('crcomps',as.double(xx),as.double(mumat),as.double(rivmat),as.double(p),
-    as.integer(dim),as.integer(nc),as.integer(nob),res=integer(nob))$res
-}
-
-uttovC = function(rooti) {
-# returns vector of square upper triangular matrix rooti, goes down columns dropping the zeros
-dim = nrow(rooti)
-n = dim*(dim+1)/2
-.C('cuttov',as.double(rooti),res = double(n),as.integer(dim))$res
-}
-#-----------------------------------------------------------------------------------------
-nmix = length(a)
-#draw comps
-for(i in 1:nmix) {
-   nobincomp = sum(z==i)         # get number of observations "in" component i
-   if(nobincomp>0) {             # if more than one obs in this component, draw from posterior
-      yi=y[z==i,]
-      dim(yi)=c(nobincomp,ncol(y))
-          #  worry about case where y has only one col (univ mixtures) or only one row
-          #  then yi gets converted to a vector
-      temp = rmultireg(yi,matrix(rep(1,nobincomp),ncol=1),Bbar,A,nu,V)
-      comps[[i]] = list(mu = as.vector(temp$B),
-                        rooti=backsolve(chol(temp$Sigma),diag(rep(1,nrow(temp$Sigma)))))
-   } 
-   else { # else draw from the prior
-      rw=rwishart(nu,chol2inv(chol(V)))
-      comps[[i]] = list(mu = as.vector(t(Bbar) + (rw$CI %*% rnorm(length(Bbar)))/sqrt(A[1,1])),
-                        rooti=backsolve(chol(rw$IW),diag(rep(1,nrow(V)))))
-   }
-}
-#draw z
-z=rcompsC(y,p,comps)
-#draw p
-for(i in 1:length(a)) a[i] = a[i] + sum(z==i)
-p = rdirichlet(a)
-return(list(p=p,z=z,comps=comps))
-}
diff --git a/R/rmixture.R b/R/rmixture.R
deleted file mode 100755
index bc20d48..0000000
--- a/R/rmixture.R
+++ /dev/null
@@ -1,36 +0,0 @@
-rmixture=
-function(n,pvec,comps)
-{
-#
-# R. McCulloch 12/04
-# revision history:
-#   commented by rossi 3/05
-#
-# purpose: iid draws from mixture of multivariate normals
-# arguments:
-#     n: number of draws
-#     pvec: prior probabilities of normal components
-#     comps: list, each member is a list comp with ith normal component
-#                     ~N(comp[[1]],Sigma), Sigma = t(R)%*%R, R^{-1} = comp[[2]]
-# output:
-#  list of x (n by length(comp[[1]]) matrix of draws) and z latent indicators of
-#  component
-#
-#----------------------------------------------------------------------------------
-# define function needed
-#
-rcomp=function(comp) {
-# purpose: draw multivariate normal with mean and variance given by comp 
-# arguments:
-#     comp is a list of length 2,
-#     comp[[1]] is the mean and comp[[2]] is R^{-1} = comp[[2]], Sigma = t(R)%*%R
-invUT = function(ut) {
-backsolve(ut,diag(rep(1,nrow(ut))))
-}
-as.vector(comp[[1]] + t(invUT(comp[[2]]))%*%rnorm(length(comp[[1]])))
-}
-#----------------------------------------------------------------------------------
-#
-z = sample(1:length(pvec), n, replace = TRUE, prob = pvec)
-return(list(x = t(sapply(comps[z],rcomp)),z=z))
-}
diff --git a/R/rmnlIndepMetrop.R b/R/rmnlIndepMetrop_rcpp.R
old mode 100755
new mode 100644
similarity index 64%
rename from R/rmnlIndepMetrop.R
rename to R/rmnlIndepMetrop_rcpp.R
index a917124..813ddc9
--- a/R/rmnlIndepMetrop.R
+++ b/R/rmnlIndepMetrop_rcpp.R
@@ -1,165 +1,136 @@
-rmnlIndepMetrop=
-function(Data,Prior,Mcmc)
-{
-#
-# revision history:
-#   p. rossi 1/05
-#   2/9/05 fixed error in Metrop eval
-#   changed to reflect new argument order in llmnl,mnlHess 9/05
-#   added return for log-like  11/05
-#
-# purpose: 
-#   draw from posterior for MNL using Independence Metropolis
-#
-# Arguments:
-#   Data - list of p,y,X  
-#     p is number of alternatives
-#     X is nobs*p x nvar matrix
-#     y is nobs vector of values from 1 to p
-#   Prior - list of A, betabar
-#     A is nvar x nvar prior preci matrix
-#     betabar is nvar x 1 prior mean
-#   Mcmc
-#     R is number of draws
-#     keep is thinning parameter
-#     nu degrees of freedom parameter for independence 
-#        sampling density
-#
-# Output:
-#   list of betadraws
-#
-# Model:   Pr(y=j) = exp(x_j'beta)/sum(exp(x_k'beta)
-#
-# Prior:   beta ~ N(betabar,A^-1)
-#
-# check arguments
-#
-pandterm=function(message) {stop(message,call.=FALSE)}
-if(missing(Data)) {pandterm("Requires Data argument -- list of p, y, X")}
-    if(is.null(Data$X)) {pandterm("Requires Data element X")}
-    X=Data$X
-    if(is.null(Data$y)) {pandterm("Requires Data element y")}
-    y=Data$y
-    if(is.null(Data$p)) {pandterm("Requires Data element p")}
-    p=Data$p
-nvar=ncol(X)
-nobs=length(y)
-#
-# check data for validity
-#
-if(length(y) != (nrow(X)/p) ) {pandterm("length(y) ne nrow(X)/p")}
-if(sum(y %in% (1:p)) < nobs) {pandterm("invalid values in y vector -- must be integers in 1:p")}
-cat(" table of y values",fill=TRUE)
-print(table(y))
-#
-# check for Prior
-#
-if(missing(Prior))
-   { betabar=c(rep(0,nvar)); A=.01*diag(nvar)}
-else
-   {
-    if(is.null(Prior$betabar)) {betabar=c(rep(0,nvar))} 
-       else {betabar=Prior$betabar}
-    if(is.null(Prior$A)) {A=.01*diag(nvar)} 
-       else {A=Prior$A}
-   }
-#
-# check dimensions of Priors
-#
-if(ncol(A) != nrow(A) || ncol(A) != nvar || nrow(A) != nvar) 
-   {pandterm(paste("bad dimensions for A",dim(A)))}
-if(length(betabar) != nvar)
-   {pandterm(paste("betabar wrong length, length= ",length(betabar)))}
-#
-# check MCMC argument
-#
-if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
-else
-   {
-    if(is.null(Mcmc$R)) 
-       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
-    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
-    if(is.null(Mcmc$nu)) {nu=6} else {nu=Mcmc$nu}
-   }
-#
-# print out problem
-#
-cat(" ", fill=TRUE)
-cat("Starting Independence Metropolis Sampler for Multinomial Logit Model",fill=TRUE)
-cat("  ",length(y)," obs with ",p," alternatives",fill=TRUE)
-cat(" ", fill=TRUE)
-cat("Table of y Values",fill=TRUE)
-print(table(y))
-cat("Prior Parms: ",fill=TRUE)
-cat("betabar",fill=TRUE)
-print(betabar)
-cat("A",fill=TRUE)
-print(A)
-cat(" ", fill=TRUE)
-cat("MCMC parms: ",fill=TRUE)
-cat("R= ",R," keep= ",keep," nu (df for st candidates) = ",nu,fill=TRUE)
-cat(" ",fill=TRUE)
-
-betadraw=matrix(double(floor(R/keep)*nvar),ncol=nvar)
-loglike=double(floor(R/keep))
-#
-# compute required quantities for indep candidates
-#
-beta=c(rep(0,nvar))
-mle=optim(beta,llmnl,X=X,y=y,method="BFGS",hessian=TRUE,control=list(fnscale=-1))
-beta=mle$par
-betastar=mle$par
-mhess=mnlHess(beta,y,X)
-candcov=chol2inv(chol(mhess))
-root=chol(candcov)
-rooti=backsolve(root,diag(nvar))
-priorcov=chol2inv(chol(A))
-rootp=chol(priorcov)
-rootpi=backsolve(rootp,diag(nvar))
-
-#
-#	start main iteration loop
-#
-itime=proc.time()[3]
-cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
-fsh()
-oldloglike=llmnl(beta,y,X)
-oldlpost=oldloglike+lndMvn(beta,betabar,rootpi)
-oldlimp=lndMvst(beta,nu,betastar,rooti)
-#       note: we don't need the determinants as they cancel in
-#       computation of acceptance prob
-naccept=0
-
-for (rep in 1:R) 
-{
-   betac=rmvst(nu,betastar,root)
-   cloglike=llmnl(betac,y,X)
-   clpost=cloglike+lndMvn(betac,betabar,rootpi)
-   climp=lndMvst(betac,nu,betastar,rooti)
-   ldiff=clpost+oldlimp-oldlpost-climp
-   alpha=min(1,exp(ldiff))
-   if(alpha < 1) {unif=runif(1)} else {unif=0}
-   if (unif <= alpha)
-      { beta=betac
-        oldloglike=cloglike
-        oldlpost=clpost
-        oldlimp=climp
-        naccept=naccept+1}
-#
-#       print time to completion and draw # every 100th draw
-#
-  if(rep%%100 == 0)
-    {ctime=proc.time()[3]
-    timetoend=((ctime-itime)/rep)*(R-rep)
-    cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
-    fsh()}
-
-  if(rep%%keep == 0) 
-    {mkeep=rep/keep; betadraw[mkeep,]=beta; loglike[mkeep]=oldloglike}
-}
-ctime = proc.time()[3]
-cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
-attributes(betadraw)$class=c("bayesm.mat","mcmc")
-attributes(betadraw)$mcpar=c(1,R,keep)
-return(list(betadraw=betadraw,loglike=loglike,acceptr=naccept/R))
-}
+rmnlIndepMetrop=function(Data,Prior,Mcmc){
+#
+# revision history:
+#   p. rossi 1/05
+#   2/9/05 fixed error in Metrop eval
+#   changed to reflect new argument order in llmnl,mnlHess 9/05
+#   added return for log-like  11/05
+#   W. Taylor 4/15 - added nprint option to MCMC argument
+#
+# purpose: 
+#   draw from posterior for MNL using Independence Metropolis
+#
+# Arguments:
+#   Data - list of p,y,X  
+#     p is number of alternatives
+#     X is nobs*p x nvar matrix
+#     y is nobs vector of values from 1 to p
+#   Prior - list of A, betabar
+#     A is nvar x nvar prior preci matrix
+#     betabar is nvar x 1 prior mean
+#   Mcmc
+#     R is number of draws
+#     keep is thinning parameter
+#     nprint - print estimated time remaining on every nprint'th draw
+#     nu degrees of freedom parameter for independence 
+#        sampling density
+#
+# Output:
+#   list of betadraws
+#
+# Model:   Pr(y=j) = exp(x_j'beta)/sum(exp(x_k'beta)
+#
+# Prior:   beta ~ N(betabar,A^-1)
+#
+# check arguments
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of p, y, X")}
+    if(is.null(Data$X)) {pandterm("Requires Data element X")}
+    X=Data$X
+    if(is.null(Data$y)) {pandterm("Requires Data element y")}
+    y=Data$y
+    if(is.null(Data$p)) {pandterm("Requires Data element p")}
+    p=Data$p
+nvar=ncol(X)
+nobs=length(y)
+#
+# check data for validity
+#
+if(length(y) != (nrow(X)/p) ) {pandterm("length(y) ne nrow(X)/p")}
+if(sum(y %in% (1:p)) < nobs) {pandterm("invalid values in y vector -- must be integers in 1:p")}
+cat(" table of y values",fill=TRUE)
+print(table(y))
+#
+# check for Prior
+#
+if(missing(Prior))
+   { betabar=c(rep(0,nvar)); A=BayesmConstant.A*diag(nvar)}
+else
+   {
+    if(is.null(Prior$betabar)) {betabar=c(rep(0,nvar))} 
+       else {betabar=Prior$betabar}
+    if(is.null(Prior$A)) {A=BayesmConstant.A*diag(nvar)} 
+       else {A=Prior$A}
+   }
+#
+# check dimensions of Priors
+#
+if(ncol(A) != nrow(A) || ncol(A) != nvar || nrow(A) != nvar) 
+   {pandterm(paste("bad dimensions for A",dim(A)))}
+if(length(betabar) != nvar)
+   {pandterm(paste("betabar wrong length, length= ",length(betabar)))}
+#
+# check MCMC argument
+#
+if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
+else
+   {
+    if(is.null(Mcmc$R)) 
+       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
+    if(is.null(Mcmc$keep)) {keep=BayesmConstant.keep} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+      if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+    if(is.null(Mcmc$nu)) {nu=6} else {nu=Mcmc$nu}
+   }
+#
+# print out problem
+#
+cat(" ", fill=TRUE)
+cat("Starting Independence Metropolis Sampler for Multinomial Logit Model",fill=TRUE)
+cat("  ",length(y)," obs with ",p," alternatives",fill=TRUE)
+cat(" ", fill=TRUE)
+cat("Table of y Values",fill=TRUE)
+print(table(y))
+cat("Prior Parms: ",fill=TRUE)
+cat("betabar",fill=TRUE)
+print(betabar)
+cat("A",fill=TRUE)
+print(A)
+cat(" ", fill=TRUE)
+cat("MCMC parms: ",fill=TRUE)
+cat("R= ",R," keep= ",keep," nprint= ",nprint," nu (df for st candidates) = ",nu,fill=TRUE)
+cat(" ",fill=TRUE)
+
+#
+# compute required quantities for indep candidates
+#
+beta=c(rep(0,nvar))
+mle=optim(beta,llmnl,X=X,y=y,method="BFGS",hessian=TRUE,control=list(fnscale=-1))
+beta=mle$par
+betastar=mle$par
+mhess=mnlHess(beta,y,X)
+candcov=chol2inv(chol(mhess))
+root=chol(candcov)
+rooti=backsolve(root,diag(nvar))
+priorcov=chol2inv(chol(A))
+rootp=chol(priorcov)
+rootpi=backsolve(rootp,diag(nvar))
+
+oldloglike=llmnl(beta,y,X)
+oldlpost=oldloglike+lndMvn(beta,betabar,rootpi)
+oldlimp=lndMvst(beta,nu,betastar,rooti)
+#       note: we don't need the determinants as they cancel in
+#       computation of acceptance prob
+
+###################################################################
+# Wayne Taylor
+# 08/21/2014
+###################################################################
+loopout = rmnlIndepMetrop_rcpp_loop(R,keep,nu,betastar,root,y,X,betabar,rootpi,rooti,oldlimp,oldlpost,nprint);
+###################################################################
+
+attributes(loopout$betadraw)$class=c("bayesm.mat","mcmc")
+attributes(loopout$betadraw)$mcpar=c(1,R,keep)
+
+return(list(betadraw=loopout$betadraw,loglike=loopout$loglike,acceptr=loopout$naccept/R))
+}
diff --git a/R/rmnpGibbs.R b/R/rmnpgibbs_rcpp.r
old mode 100755
new mode 100644
similarity index 53%
rename from R/rmnpGibbs.R
rename to R/rmnpgibbs_rcpp.r
index ab90870..8ba90fc
--- a/R/rmnpGibbs.R
+++ b/R/rmnpgibbs_rcpp.r
@@ -1,206 +1,129 @@
-rmnpGibbs=
-function(Data,Prior,Mcmc) 
-{
-#
-# Revision History:
-#   modified by rossi 12/18/04 to include error checking
-#   3/07 added classes
-#
-# purpose:  Gibbs MNP model with full covariance matrix
-#
-# Arguments:
-#   Data contains 
-#      p the number of choice alternatives
-#      y -- a vector of length n with choices (takes on values from 1, .., p)
-#      X -- n(p-1) x k matrix of covariates (including intercepts)
-#           note: X is the differenced matrix unlike MNL X=stack(X_1,..,X_n) 
-#                 each X_i is (p-1) x nvar
-#
-#   Prior contains a list of (betabar, A, nu, V)
-#      if elements of prior do not exist, defaults are used
-#
-#   Mcmc is a list of (beta0,sigma0,R,keep)  
-#     beta0,sigma0 are intial values, if not supplied defaults are used
-#     R is number of draws
-#     keep is thinning parm, keep every keepth draw
-#
-# Output: a list of every keepth betadraw and sigmsdraw
-#
-#  model: 
-#    w_i = X_ibeta + e    e~N(0,Sigma)     note w_i,e are (p-1) x 1
-#    y_i = j  if w_ij > w_i-j  j=1,...,p-1
-#    y_i = p  if all w_i < 0
-#  
-#  priors:
-#    beta ~ N(betabar,A^-1)
-#    Sigma ~ IW(nu,V)
-#
-#  Check arguments
-#
-pandterm=function(message) {stop(message,call.=FALSE)}
-if(missing(Data)) {pandterm("Requires Data argument -- list of p, y, X")}
-  if(is.null(Data$p)) {pandterm("Requires Data element p -- number of alternatives")}
-  p=Data$p
-  if(is.null(Data$y)) {pandterm("Requires Data element y -- number of alternatives")}
-  y=Data$y
-  if(is.null(Data$X)) {pandterm("Requires Data element X -- matrix of covariates")}
-  X=Data$X
-#
-# check data for validity
-#
-levely=as.numeric(levels(as.factor(y)))
-if(length(levely) != p) {pandterm(paste("y takes on ",length(levely),
-  " values -- must be ",p))}
-  bady=FALSE
-  for (i in 1:p) 
-  {
-      if(levely[i] != i) bady=TRUE
-  }
-cat("Table of y values",fill=TRUE)
-print(table(y))
-if (bady) {pandterm("Invalid y")}
-n=length(y)
-k=ncol(X)
-pm1=p-1
-if(nrow(X)/n != pm1) {pandterm(paste("X has ",nrow(X)," rows; must be = (p-1)n"))}
-#
-# check for prior elements
-#
-if(missing(Prior)) 
-  { betabar=rep(0,k) ; A=.01*diag(k) ; nu=pm1+3; V=nu*diag(pm1)}
-else 
-  {if(is.null(Prior$betabar)) {betabar=rep(0,k)} else {betabar=Prior$betabar}
-   if(is.null(Prior$A)) {A=.01*diag(k)} else {A=Prior$A}
-   if(is.null(Prior$nu)) {nu=pm1+3} else {nu=Prior$nu}
-   if(is.null(Prior$V)) {V=nu*diag(pm1)} else {V=Prior$V}}
-if(length(betabar) != k) pandterm("length betabar ne k")
-if(sum(dim(A)==c(k,k)) != 2) pandterm("A is of incorrect dimension")
-if(nu < 1) pandterm("invalid nu value")
-if(sum(dim(V)==c(pm1,pm1)) != 2) pandterm("V is of incorrect dimension")
-#
-# check for Mcmc 
-#
-if(missing(Mcmc)) pandterm("Requires Mcmc argument -- at least R must be included")
-if(is.null(Mcmc$R)) {pandterm("Requires element R of Mcmc")} else {R=Mcmc$R}
-if(is.null(Mcmc$beta0)) {beta0=rep(0,k)} else {beta0=Mcmc$beta0}
-if(is.null(Mcmc$sigma0)) {sigma0=diag(pm1)} else {sigma0=Mcmc$sigma0}
-if(length(beta0) != k) pandterm("beta0 is not of length k")
-if(sum(dim(sigma0) == c(pm1,pm1)) != 2) pandterm("sigma0 is of incorrect dimension")
-if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
-#
-# print out problem
-#
-cat(" ",fill=TRUE)
-cat("Starting Gibbs Sampler for MNP",fill=TRUE)
-cat("  ",n," obs; ",p," choice alternatives; ",k," indep vars (including intercepts)",fill=TRUE)
-cat("  ",R," reps; keeping every ",keep,"th draw",fill=TRUE)
-cat(" ",fill=TRUE)
-cat("Table of y values",fill=TRUE)
-print(table(y))
-cat("Prior Parms:",fill=TRUE)
-cat("betabar",fill=TRUE)
-print(betabar)
-cat("A",fill=TRUE)
-print(A)
-cat("nu",fill=TRUE)
-print(nu)
-cat("V",fill=TRUE)
-print(V)
-cat(" ",fill=TRUE)
-cat("MCMC Parms:",fill=TRUE)
-cat("R= ",R,fill=TRUE)
-cat("initial beta= ",beta0,fill=TRUE)
-cat("initial sigma= ",sigma0,fill=TRUE)
-cat(" ",fill=TRUE)
-#
-# allocate space for draws
-#
-sigmadraw=matrix(double(floor(R/keep)*pm1*pm1),ncol=pm1*pm1)
-betadraw=matrix(double(floor(R/keep)*k),ncol=k)
-wnew=double(nrow(X))
-betanew=double(k)
-
-#
-#  set initial values of w,beta, sigma (or root of inv)
-#
-wold=c(rep(0,nrow(X)))
-betaold=beta0
-C=chol(solve(sigma0))
-#
-#  C is upper triangular root of sigma^-1 (G) = C'C
-#
-#  create functions needed
-#
-drawwc=function(w,mu,y,sigi) {
-      .C("draww",w=as.double(w),as.double(mu),as.double(sigi),
-        as.integer(length(y)),as.integer(ncol(sigi)),as.integer(y))$w}
-
-draww=
-function(w,X,y,beta,sigmai){
-#
-#   draw latent vector
-#
-#  	w is n x (p-1) vector
-#       X ix n(p-1) x k  matrix
-#       y is multinomial 1,..., p
-#       beta is k x 1 vector
-#       sigmai is (p-1) x (p-1) 
-#
-
-Xbeta=as.vector(X%*%beta)
-drawwc(w,Xbeta,y,sigmai)
-}
-
-itime=proc.time()[3]
-cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
-for (rep in 1:R) 
-   {
-   #
-   # draw w given beta(rep-1),sigma(rep-1)
-   #
-   sigmai=crossprod(C)
-   wnew=draww(wold,X,y,betaold,sigmai)
-   #
-   # draw beta given w(rep) and sigma(rep-1)
-   #
-   #  note:  if Sigma^-1 (G) = C'C then Var(Ce)=CSigmaC' = I
-   #  first, transform w_i = X_ibeta + e_i by premultiply by C
-   #
-   zmat=matrix(cbind(wnew,X),nrow=pm1)
-   zmat=C%*%zmat
-   zmat=matrix(zmat,nrow=nrow(X))
-   betanew=breg(zmat[,1],zmat[,2:(k+1)],betabar,A)
-   #
-   # draw sigmai given w and beta
-   #
-   epsilon=matrix((wnew-X%*%betanew),nrow=pm1)
-   S=crossprod(t(epsilon))
-   W=rwishart(nu+n,chol2inv(chol(V+S)))
-   C=W$C
-   #
-   #       print time to completion and draw # every 100th draw
-   #
-   if(rep%%100 == 0)
-     {ctime=proc.time()[3]
-      timetoend=((ctime-itime)/rep)*(R+1-rep)
-      cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
-      fsh()}
-   #
-   #       save every keepth draw
-   #
-   if(rep%%keep ==0)
-      {mkeep=rep/keep
-      betadraw[mkeep,]=betanew
-      sigmadraw[mkeep,]=as.vector(W$IW)}
-   wold=wnew
-   betaold=betanew
-   }
-ctime = proc.time()[3]
-cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
-
-attributes(betadraw)$class=c("bayesm.mat","mcmc")
-attributes(betadraw)$mcpar=c(1,R,keep)
-attributes(sigmadraw)$class=c("bayesm.var","bayesm.mat","mcmc")
-attributes(sigmadraw)$mcpar=c(1,R,keep)
-list(betadraw=betadraw,sigmadraw=sigmadraw)
-}
+rmnpGibbs=function(Data,Prior,Mcmc) {
+#
+# Revision History:
+#   modified by rossi 12/18/04 to include error checking
+#   3/07 added classes
+#   W. Taylor 4/15 - added nprint option to MCMC argument
+#
+# purpose:  Gibbs MNP model with full covariance matrix
+#
+# Arguments:
+#   Data contains 
+#      p the number of choice alternatives
+#      y -- a vector of length n with choices (takes on values from 1, .., p)
+#      X -- n(p-1) x k matrix of covariates (including intercepts)
+#           note: X is the differenced matrix unlike MNL X=stack(X_1,..,X_n) 
+#                 each X_i is (p-1) x nvar
+#
+#   Prior contains a list of (betabar, A, nu, V)
+#      if elements of prior do not exist, defaults are used
+#
+#   Mcmc is a list of (beta0,sigma0,R,keep)  
+#     beta0,sigma0 are intial values, if not supplied defaults are used
+#     R is number of draws
+#     keep is thinning parm, keep every keepth draw
+#     nprint - print estimated time remaining on every nprint'th draw
+#
+# Output: a list of every keepth betadraw and sigmsdraw
+#
+#  model: 
+#    w_i = X_ibeta + e    e~N(0,Sigma)     note w_i,e are (p-1) x 1
+#    y_i = j  if w_ij > w_i-j  j=1,...,p-1
+#    y_i = p  if all w_i < 0
+#  
+#  priors:
+#    beta ~ N(betabar,A^-1)
+#    Sigma ~ IW(nu,V)
+#
+#  Check arguments
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of p, y, X")}
+  if(is.null(Data$p)) {pandterm("Requires Data element p -- number of alternatives")}
+  p=Data$p
+  if(is.null(Data$y)) {pandterm("Requires Data element y -- number of alternatives")}
+  y=Data$y
+  if(is.null(Data$X)) {pandterm("Requires Data element X -- matrix of covariates")}
+  X=Data$X
+#
+# check data for validity
+#
+levely=as.numeric(levels(as.factor(y)))
+if(length(levely) != p) {pandterm(paste("y takes on ",length(levely),
+  " values -- must be ",p))}
+  bady=FALSE
+  for (i in 1:p) 
+  {
+      if(levely[i] != i) bady=TRUE
+  }
+cat("Table of y values",fill=TRUE)
+print(table(y))
+if (bady) {pandterm("Invalid y")}
+n=length(y)
+k=ncol(X)
+pm1=p-1
+if(nrow(X)/n != pm1) {pandterm(paste("X has ",nrow(X)," rows; must be = (p-1)n"))}
+#
+# check for prior elements
+#
+if(missing(Prior)) 
+  { betabar=rep(0,k) ; A=BayesmConstant.A*diag(k) ; nu=pm1+3; V=nu*diag(pm1)}
+else 
+  {if(is.null(Prior$betabar)) {betabar=rep(0,k)} else {betabar=Prior$betabar}
+   if(is.null(Prior$A)) {A=BayesmConstant.A*diag(k)} else {A=Prior$A}
+   if(is.null(Prior$nu)) {nu=pm1+BayesmConstant.nuInc} else {nu=Prior$nu}
+   if(is.null(Prior$V)) {V=nu*diag(pm1)} else {V=Prior$V}}
+if(length(betabar) != k) pandterm("length betabar ne k")
+if(sum(dim(A)==c(k,k)) != 2) pandterm("A is of incorrect dimension")
+if(nu < 1) pandterm("invalid nu value")
+if(sum(dim(V)==c(pm1,pm1)) != 2) pandterm("V is of incorrect dimension")
+#
+# check for Mcmc 
+#
+if(missing(Mcmc)) pandterm("Requires Mcmc argument -- at least R must be included")
+if(is.null(Mcmc$R)) {pandterm("Requires element R of Mcmc")} else {R=Mcmc$R}
+if(is.null(Mcmc$beta0)) {beta0=rep(0,k)} else {beta0=Mcmc$beta0}
+if(is.null(Mcmc$sigma0)) {sigma0=diag(pm1)} else {sigma0=Mcmc$sigma0}
+if(length(beta0) != k) pandterm("beta0 is not of length k")
+if(sum(dim(sigma0) == c(pm1,pm1)) != 2) pandterm("sigma0 is of incorrect dimension")
+if(is.null(Mcmc$keep)) {keep=BayesmConstant.keep} else {keep=Mcmc$keep}
+if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+  if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+#
+# print out problem
+#
+cat(" ",fill=TRUE)
+cat("Starting Gibbs Sampler for MNP",fill=TRUE)
+cat("  ",n," obs; ",p," choice alternatives; ",k," indep vars (including intercepts)",fill=TRUE)
+cat(" ",fill=TRUE)
+cat("Table of y values",fill=TRUE)
+print(table(y))
+cat("Prior Parms:",fill=TRUE)
+cat("betabar",fill=TRUE)
+print(betabar)
+cat("A",fill=TRUE)
+print(A)
+cat("nu",fill=TRUE)
+print(nu)
+cat("V",fill=TRUE)
+print(V)
+cat(" ",fill=TRUE)
+cat("MCMC Parms:",fill=TRUE)
+cat("  ",R," reps; keeping every ",keep,"th draw"," nprint= ",nprint,fill=TRUE)
+cat("initial beta= ",beta0,fill=TRUE)
+cat("initial sigma= ",sigma0,fill=TRUE)
+cat(" ",fill=TRUE)
+
+###################################################################
+# Wayne Taylor
+# 09/03/2014
+###################################################################
+loopout = rmnpGibbs_rcpp_loop(R,keep,nprint,pm1,y,X,beta0,sigma0,V,nu,betabar,A);
+###################################################################
+
+attributes(loopout$betadraw)$class=c("bayesm.mat","mcmc")
+attributes(loopout$betadraw)$mcpar=c(1,R,keep)
+attributes(loopout$sigmadraw)$class=c("bayesm.var","bayesm.mat","mcmc")
+attributes(loopout$sigmadraw)$mcpar=c(1,R,keep)
+
+return(loopout)
+}
diff --git a/R/rmultireg.R b/R/rmultireg.R
deleted file mode 100755
index 2064b47..0000000
--- a/R/rmultireg.R
+++ /dev/null
@@ -1,56 +0,0 @@
-rmultireg=
-function(Y,X,Bbar,A,nu,V)
-{
-#
-# revision history:
-#    changed 1/11/05 by P. Rossi to fix sum of squares error
-#
-# purpose:
-#    draw from posterior for Multivariate Regression Model with
-#    natural conjugate prior
-# arguments:
-#    Y is n x m matrix
-#    X is n x k
-#    Bbar is the prior mean of regression coefficients  (k x m)
-#    A is prior precision matrix
-#    nu, V are parameters for prior on Sigma
-# output:
-#    list of B, Sigma draws of matrix of coefficients and Sigma matrix
-# model:
-#    Y=XB+U  cov(u_i) = Sigma
-#    B is k x m matrix of coefficients
-# priors:  beta|Sigma  ~ N(betabar,Sigma (x) A^-1)
-#                   betabar=vec(Bbar)
-#                   beta = vec(B) 
-#          Sigma ~ IW(nu,V) or Sigma^-1 ~ W(nu, V^-1)
-n=nrow(Y)
-m=ncol(Y)
-k=ncol(X)
-#
-# first draw Sigma
-#
-RA=chol(A)
-W=rbind(X,RA)
-Z=rbind(Y,RA%*%Bbar)
-#   note:  Y,X,A,Bbar must be matrices!
-IR=backsolve(chol(crossprod(W)),diag(k))
-#                      W'W = R'R  &  (W'W)^-1 = IRIR'  -- this is the UL decomp!
-Btilde=crossprod(t(IR))%*%crossprod(W,Z)   
-#                      IRIR'(W'Z) = (X'X+A)^-1(X'Y + ABbar)
-S=crossprod(Z-W%*%Btilde)
-#                      E'E
-rwout=rwishart(nu+n,chol2inv(chol(V+S)))
-#
-# now draw B given Sigma
-#   note beta ~ N(vec(Btilde),Sigma (x) Covxxa)
-#       Cov=(X'X + A)^-1  = IR t(IR)  
-#       Sigma=CICI'    
-#       therefore, cov(beta)= Omega = CICI' (x) IR IR' = (CI (x) IR) (CI (x) IR)'
-#	so to draw beta we do beta= vec(Btilde) +(CI (x) IR)vec(Z_mk)  
-#			Z_mk is m x k matrix of N(0,1)
-#	since vec(ABC) = (C' (x) A)vec(B), we have 
-#		B = Btilde + IR Z_mk CI'
-#
-B = Btilde + IR%*%matrix(rnorm(m*k),ncol=m)%*%t(rwout$CI)
-return(list(B=B,Sigma=rwout$IW))
-}
diff --git a/R/rmvpGibbs.R b/R/rmvpGibbs.R
deleted file mode 100755
index dd27f36..0000000
--- a/R/rmvpGibbs.R
+++ /dev/null
@@ -1,202 +0,0 @@
-rmvpGibbs=
-function(Data,Prior,Mcmc) 
-{
-#
-# Revision History:
-#   modified by rossi 12/18/04 to include error checking
-#   3/07 added classes
-#
-# purpose:  Gibbs MVP model with full covariance matrix
-#
-# Arguments:
-#   Data contains 
-#      p the number of alternatives (could be time or could be from pick j of p survey)
-#      y -- a vector of length n*p of indicators (1 if "chosen" if not)
-#      X -- np x k matrix of covariates (including intercepts)
-#                 each X_i is p x nvar
-#
-#   Prior contains a list of (betabar, A, nu, V)
-#      if elements of prior do not exist, defaults are used
-#
-#   Mcmc is a list of (beta0,sigma0,R,keep)  
-#     beta0,sigma0 are intial values, if not supplied defaults are used
-#     R is number of draws
-#     keep is thinning parm, keep every keepth draw
-#
-# Output: a list of every keepth betadraw and sigmsdraw
-#
-#  model: 
-#    w_i = X_ibeta + e    e~N(0,Sigma)     note w_i,e are p x 1
-#    y_ij = 1 if w_ij > 0 else y_ij = 0  
-#  
-#  priors:
-#    beta ~ N(betabar,A^-1) in prior
-#    Sigma ~ IW(nu,V)
-#
-#  Check arguments
-#
-pandterm=function(message) {stop(message,call.=FALSE)}
-if(missing(Data)) {pandterm("Requires Data argument -- list of p, y, X")}
-  if(is.null(Data$p)) {pandterm("Requires Data element p -- number of binary indicators")}
-  p=Data$p
-  if(is.null(Data$y)) {pandterm("Requires Data element y -- values of binary indicators")}
-  y=Data$y
-  if(is.null(Data$X)) {pandterm("Requires Data element X -- matrix of covariates")}
-  X=Data$X
-#
-# check data for validity
-#
-levely=as.numeric(levels(as.factor(y)))
-  bady=FALSE
-  for (i in 0:1) 
-  { if(levely[i+1] != i) {bady=TRUE} }
-cat("Table of y values",fill=TRUE)
-print(table(y))
-if (bady) {pandterm("Invalid y")}
-if (length(y)%%p !=0) {pandterm("length of y is not a multiple of p")}
-n=length(y)/p
-k=ncol(X)
-if(nrow(X) != (n*p)) {pandterm(paste("X has ",nrow(X)," rows; must be = p*n"))}
-#
-# check for prior elements
-#
-if(missing(Prior)) 
-  { betabar=rep(0,k) ; A=.01*diag(k) ; nu=p+3; V=nu*diag(p)}
-else 
-  {if(is.null(Prior$betabar)) {betabar=rep(0,k)} else {betabar=Prior$betabar}
-   if(is.null(Prior$A)) {A=.01*diag(k)} else {A=Prior$A}
-   if(is.null(Prior$nu)) {nu=p+3} else {nu=Prior$nu}
-   if(is.null(Prior$V)) {V=nu*diag(p)} else {V=Prior$V}}
-if(length(betabar) != k) pandterm("length betabar ne k")
-if(sum(dim(A)==c(k,k)) != 2) pandterm("A is of incorrect dimension")
-if(nu < 1) pandterm("invalid nu value")
-if(sum(dim(V)==c(p,p)) != 2) pandterm("V is of incorrect dimension")
-#
-# check for Mcmc 
-#
-if(missing(Mcmc)) pandterm("Requires Mcmc argument -- at least R must be included")
-if(is.null(Mcmc$R)) {pandterm("Requires element R of Mcmc")} else {R=Mcmc$R}
-if(is.null(Mcmc$beta0)) {beta0=rep(0,k)} else {beta0=Mcmc$beta0}
-if(is.null(Mcmc$sigma0)) {sigma0=diag(p)} else {sigma0=Mcmc$sigma0}
-if(length(beta0) != k) pandterm("beta0 is not of length k")
-if(sum(dim(sigma0) == c(p,p)) != 2) pandterm("sigma0 is of incorrect dimension")
-if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
-#
-# print out problem
-#
-cat(" ",fill=TRUE)
-cat("Starting Gibbs Sampler for MVP",fill=TRUE)
-cat("  ",n," obs of ",p," binary indicators; ",k," indep vars (including intercepts)",fill=TRUE)
-cat("  ",R," reps; keeping every ",keep,"th draw",fill=TRUE)
-cat(" ",fill=TRUE)
-cat("Prior Parms:",fill=TRUE)
-cat("betabar",fill=TRUE)
-print(betabar)
-cat("A",fill=TRUE)
-print(A)
-cat("nu",fill=TRUE)
-print(nu)
-cat("V",fill=TRUE)
-print(V)
-cat(" ",fill=TRUE)
-cat("MCMC Parms:",fill=TRUE)
-cat("R= ",R,fill=TRUE)
-cat("initial beta= ",beta0,fill=TRUE)
-cat("initial sigma= ",fill=TRUE)
-print(sigma0)
-cat(" ",fill=TRUE)
-
-#
-# allocate space for draws
-#
-sigmadraw=matrix(double(floor(R/keep)*p*p),ncol=p*p)
-betadraw=matrix(double(floor(R/keep)*k),ncol=k)
-wnew=double(nrow(X))
-betanew=double(k)
-
-#
-#  set initial values of w,beta, sigma (or root of inv)
-#
-wold=c(rep(0,nrow(X)))
-betaold=beta0
-C=chol(solve(sigma0))
-#
-#  C is upper triangular root of sigma^-1 (G) = C'C
-#
-#  create functions needed
-#
-drawwMvpC=function(w,mu,y,sigi) {
-	p=ncol(sigi)
-      .C("draww_mvp",w=as.double(w),as.double(mu),as.double(sigi),
-        as.integer(length(w)/p),as.integer(p),as.integer(y))$w}
-
-drawwMvp=
-function(w,X,y,beta,sigmai){
-#
-#   draw latent vector
-#
-#  	w is n x (p-1) vector
-#       X ix n(p-1) x k  matrix
-#       y is n x (p-1) vector of binary (0,1) outcomes 
-#       beta is k x 1 vector
-#       sigmai is (p-1) x (p-1) 
-#
-
-Xbeta=as.vector(X%*%beta)
-drawwMvpC(w,Xbeta,y,sigmai)
-}
-
-itime=proc.time()[3]
-cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
-for (rep in 1:R) 
-   {
-   #
-   # draw w given beta(rep-1),sigma(rep-1)
-   #
-   sigmai=crossprod(C)
-   wnew=drawwMvp(wold,X,y,betaold,sigmai)
-   #
-   # draw beta given w(rep) and sigma(rep-1)
-   #
-   #  note:  if Sigma^-1 (G) = C'C then Var(Ce)=CSigmaC' = I
-   #  first, transform w_i = X_ibeta + e_i by premultiply by C
-   #
-   zmat=matrix(cbind(wnew,X),nrow=p)
-   zmat=C%*%zmat
-   zmat=matrix(zmat,nrow=nrow(X))
-   betanew=breg(zmat[,1],zmat[,2:(k+1)],betabar,A)
-   #
-   # draw sigmai given w and beta
-   #
-   epsilon=matrix((wnew-X%*%betanew),nrow=p)
-   S=crossprod(t(epsilon))
-   W=rwishart(nu+n,chol2inv(chol(V+S)))
-   C=W$C
-   #
-   #       print time to completion and draw # every 100th draw
-   #
-   if(rep%%100 == 0)
-     {ctime=proc.time()[3]
-      timetoend=((ctime-itime)/rep)*(R+1-rep)
-      cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
-      fsh()}
-   #
-   #       save every keepth draw
-   #
-   if(rep%%keep ==0)
-      {mkeep=rep/keep
-      betadraw[mkeep,]=betanew
-      sigmadraw[mkeep,]=as.vector(W$IW)}
-   wold=wnew
-   betaold=betanew
-   }
-ctime = proc.time()[3]
-cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
-
-attributes(betadraw)$class=c("bayesm.mat","mcmc")
-attributes(betadraw)$mcpar=c(1,R,keep)
-attributes(sigmadraw)$class=c("bayesm.var","bayesm.mat","mcmc")
-attributes(sigmadraw)$mcpar=c(1,R,keep)
-
-return(list(betadraw=betadraw,sigmadraw=sigmadraw))
-}
diff --git a/R/rmvpgibbs_rcpp.r b/R/rmvpgibbs_rcpp.r
new file mode 100644
index 0000000..29ffd14
--- /dev/null
+++ b/R/rmvpgibbs_rcpp.r
@@ -0,0 +1,123 @@
+rmvpGibbs=function(Data,Prior,Mcmc){
+#
+# Revision History:
+#   modified by rossi 12/18/04 to include error checking
+#   3/07 added classes
+#   W. Taylor 4/15 - added nprint option to MCMC argument
+#
+#
+# purpose:  Gibbs MVP model with full covariance matrix
+#
+# Arguments:
+#   Data contains 
+#      p the number of alternatives (could be time or could be from pick j of p survey)
+#      y -- a vector of length n*p of indicators (1 if "chosen" if not)
+#      X -- np x k matrix of covariates (including intercepts)
+#                 each X_i is p x nvar
+#
+#   Prior contains a list of (betabar, A, nu, V)
+#      if elements of prior do not exist, defaults are used
+#
+#   Mcmc is a list of (beta0,sigma0,R,keep)  
+#     beta0,sigma0 are intial values, if not supplied defaults are used
+#     R is number of draws
+#     keep is thinning parm, keep every keepth draw
+#     nprint - print estimated time remaining on every nprint'th draw
+#
+# Output: a list of every keepth betadraw and sigmsdraw
+#
+#  model: 
+#    w_i = X_ibeta + e    e~N(0,Sigma)     note w_i,e are p x 1
+#    y_ij = 1 if w_ij > 0 else y_ij = 0  
+#  
+#  priors:
+#    beta ~ N(betabar,A^-1) in prior
+#    Sigma ~ IW(nu,V)
+#
+#  Check arguments
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of p, y, X")}
+if(is.null(Data$p)) {pandterm("Requires Data element p -- number of binary indicators")}
+p=Data$p
+if(is.null(Data$y)) {pandterm("Requires Data element y -- values of binary indicators")}
+y=Data$y
+if(is.null(Data$X)) {pandterm("Requires Data element X -- matrix of covariates")}
+X=Data$X
+#
+# check data for validity
+#
+levely=as.numeric(levels(as.factor(y)))
+bady=FALSE
+for (i in 0:1) 
+{ if(levely[i+1] != i) {bady=TRUE} }
+cat("Table of y values",fill=TRUE)
+print(table(y))
+if (bady) {pandterm("Invalid y")}
+if (length(y)%%p !=0) {pandterm("length of y is not a multiple of p")}
+n=length(y)/p
+k=ncol(X)
+if(nrow(X) != (n*p)) {pandterm(paste("X has ",nrow(X)," rows; must be = p*n"))}
+#
+# check for prior elements
+#
+if(missing(Prior)) 
+{ betabar=rep(0,k) ; A=BayesmConstant.A*diag(k) ; nu=p+BayesmConstant.nuInc; V=nu*diag(p)}
+else 
+{if(is.null(Prior$betabar)) {betabar=rep(0,k)} else {betabar=Prior$betabar}
+ if(is.null(Prior$A)) {A=BayesmConstant.A*diag(k)} else {A=Prior$A}
+ if(is.null(Prior$nu)) {nu=p+BayesmConstant.nuInc} else {nu=Prior$nu}
+ if(is.null(Prior$V)) {V=nu*diag(p)} else {V=Prior$V}}
+if(length(betabar) != k) pandterm("length betabar ne k")
+if(sum(dim(A)==c(k,k)) != 2) pandterm("A is of incorrect dimension")
+if(nu < 1) pandterm("invalid nu value")
+if(sum(dim(V)==c(p,p)) != 2) pandterm("V is of incorrect dimension")
+#
+# check for Mcmc 
+#
+if(missing(Mcmc)) pandterm("Requires Mcmc argument -- at least R must be included")
+if(is.null(Mcmc$R)) {pandterm("Requires element R of Mcmc")} else {R=Mcmc$R}
+if(is.null(Mcmc$beta0)) {beta0=rep(0,k)} else {beta0=Mcmc$beta0}
+if(is.null(Mcmc$sigma0)) {sigma0=diag(p)} else {sigma0=Mcmc$sigma0}
+if(length(beta0) != k) pandterm("beta0 is not of length k")
+if(sum(dim(sigma0) == c(p,p)) != 2) pandterm("sigma0 is of incorrect dimension")
+if(is.null(Mcmc$keep)) {keep=BayesmConstant.keep} else {keep=Mcmc$keep}
+if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+  if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+#
+# print out problem
+#
+cat(" ",fill=TRUE)
+cat("Starting Gibbs Sampler for MVP",fill=TRUE)
+cat("  ",n," obs of ",p," binary indicators; ",k," indep vars (including intercepts)",fill=TRUE)
+cat(" ",fill=TRUE)
+cat("Prior Parms:",fill=TRUE)
+cat("betabar",fill=TRUE)
+print(betabar)
+cat("A",fill=TRUE)
+print(A)
+cat("nu",fill=TRUE)
+print(nu)
+cat("V",fill=TRUE)
+print(V)
+cat(" ",fill=TRUE)
+cat("MCMC Parms:",fill=TRUE)
+cat("  ",R," reps; keeping every ",keep,"th draw"," nprint= ",nprint,fill=TRUE)
+cat("initial beta= ",beta0,fill=TRUE)
+cat("initial sigma= ",fill=TRUE)
+print(sigma0)
+cat(" ",fill=TRUE)
+
+###################################################################
+# Wayne Taylor
+# 09/03/2014
+###################################################################
+loopout = rmvpGibbs_rcpp_loop(R,keep,nprint,p,y,X,beta0,sigma0,V,nu,betabar,A);
+###################################################################
+
+attributes(loopout$betadraw)$class=c("bayesm.mat","mcmc")
+attributes(loopout$betadraw)$mcpar=c(1,R,keep)
+attributes(loopout$sigmadraw)$class=c("bayesm.var","bayesm.mat","mcmc")
+attributes(loopout$sigmadraw)$mcpar=c(1,R,keep)
+
+return(loopout)
+}
\ No newline at end of file
diff --git a/R/rmvst.R b/R/rmvst.R
deleted file mode 100755
index 674cab4..0000000
--- a/R/rmvst.R
+++ /dev/null
@@ -1,8 +0,0 @@
-rmvst=
-function(nu,mu,root){
-#
-# function to draw from MV s-t  with nu df, mean mu, Sigma=t(root)%*%root
-#      root is upper triangular cholesky root
-nvec=t(root)%*%rnorm(length(mu))
-return(nvec/sqrt(rchisq(1,nu)/nu) + mu)
-}
diff --git a/R/rnegbinRw.R b/R/rnegbinrw_rcpp.r
similarity index 55%
rename from R/rnegbinRw.R
rename to R/rnegbinrw_rcpp.r
index 340b0b8..d177d7b 100755
--- a/R/rnegbinRw.R
+++ b/R/rnegbinrw_rcpp.r
@@ -1,220 +1,156 @@
-rnegbinRw = 
-function(Data, Prior, Mcmc) {
-
-#   Revision History
-#	  Sridhar Narayanan - 05/2005
-#         P. Rossi 6/05
-#         3/07 added classes
-#
-#   Model
-#       (y|lambda,alpha) ~ Negative Binomial(Mean = lambda, Overdispersion par = alpha)
-#
-#       ln(lambda) =  X * beta
-#               
-#   Priors
-#       beta ~ N(betabar, A^-1)
-#       alpha ~ Gamma(a,b) where mean = a/b and variance = a/(b^2)
-#
-#   Arguments
-#       Data = list of y, X
-#              e.g. regdata[[i]]=list(y=y,X=X)
-#              X has nvar columns including a first column of ones
-#
-#       Prior - list containing the prior parameters
-#           betabar, A - mean of beta prior, inverse of variance covariance of beta prior
-#           a, b - parameters of alpha prior
-#
-#       Mcmc - list containing
-#           R is number of draws
-#           keep is thinning parameter (def = 1)
-#           s_beta - scaling parameter for beta RW (def = 2.93/sqrt(nvar))
-#           s_alpha - scaling parameter for alpha RW (def = 2.93)
-#           beta0 - initial guesses for parameters, if not supplied default values are used
-#
-
-
-#
-# Definitions of functions used within rhierNegbinRw
-#
-llnegbin = 
-function(par,X,y, nvar) {
-# Computes the log-likelihood
-    beta = par[1:nvar]
-    alpha = exp(par[nvar+1])+1.0e-50
-    mean=exp(X%*%beta)
-    prob=alpha/(alpha+mean)
-    prob=ifelse(prob<1.0e-100,1.0e-100,prob)
-     out=dnbinom(y,size=alpha,prob=prob,log=TRUE)
-     return(sum(out))
-}
-
-lpostbetai = 
-function(beta, alpha, X, y, betabar, A) {
-# Computes the unnormalized log posterior for beta
-    lambda = exp(X %*% beta)
-    p = alpha/(alpha + lambda)
-    residual = as.vector(beta - betabar)
-    sum(alpha * log(p) + y * log(1-p)) - 0.5*( t(residual)%*%A%*%residual)
-}
-
-
-lpostalpha = 
-function(alpha, beta, X,y, a, b) {
-# Computes the unnormalized log posterior for alpha
-    sum(log(dnbinom(y,size=alpha,mu=exp(X%*%beta)))) + (a-1)*log(alpha) - b* alpha
-}
-
-
-#
-# Error Checking
-#
-pandterm=function(message) {stop(message,call.=FALSE)}
-if(missing(Data)) {pandterm("Requires Data argument -- list of X and y")}
-if(is.null(Data$X)) {pandterm("Requires Data element X")} else {X=Data$X}
-if(is.null(Data$y)) {pandterm("Requires Data element y")} else {y=Data$y}
-nvar = ncol(X)
-
-if (length(y) != nrow(X)) {pandterm("Mismatch in the number of observations in X and y")}
-nobs=length(y)
-
-#
-# check for prior elements
-#
-if(missing(Prior)) {
-    betabar=rep(0,nvar); A=0.01*diag(nvar) ;  a=0.5; b=0.1;
-}
-else {
-    if(is.null(Prior$betabar)) {betabar=rep(0,nvar)} else {betabar=Prior$betabar}
-    if(is.null(Prior$A)) {A=0.01*diag(nvar)} else {A=Prior$A}
-    if(is.null(Prior$a)) {a=0.5} else {a=Prior$a}
-    if(is.null(Prior$b)) {b=0.1} else {b=Prior$b}
-}
-
-if(length(betabar) != nvar) pandterm("betabar is of incorrect dimension")
-if(sum(dim(A)==c(nvar,nvar)) != 2) pandterm("A is of incorrect dimension")
-if((length(a) != 1) | (a <=0)) pandterm("a should be a positive number")
-if((length(b) != 1) | (b <=0)) pandterm("b should be a positive number")
-
-#
-# check for Mcmc 
-#
-if(missing(Mcmc)) pandterm("Requires Mcmc argument -- at least R")
-if(is.null(Mcmc$R)) {pandterm("Requires element R of Mcmc")} else {R=Mcmc$R}
-if(is.null(Mcmc$beta0)) {beta0=rep(0,nvar)} else {beta0=Mcmc$beta0}
-if(length(beta0) !=nvar) pandterm("beta0 is not of dimension nvar")
-if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
-if(is.null(Mcmc$s_alpha)) {cat("Using default s_alpha = 2.93",fill=TRUE); s_alpha=2.93} 
-    else {s_alpha = Mcmc$s_alpha} 
-if(is.null(Mcmc$s_beta)) {cat("Using default s_beta = 2.93/sqrt(nvar)",fill=TRUE); s_beta=2.93/sqrt(nvar)} 
-    else {s_beta = Mcmc$s_beta}
-
-#
-# print out problem
-#
-cat(" ",fill=TRUE)
-cat("Starting Random Walk Metropolis Sampler for Negative Binomial Regression",fill=TRUE)
-cat("  ",nobs," obs; ",nvar," covariates (including intercept); ",fill=TRUE)
-cat("Prior Parameters:",fill=TRUE)
-cat("betabar",fill=TRUE)
-print(betabar)
-cat("A",fill=TRUE)
-print(A)
-cat("a",fill=TRUE)
-print(a)
-cat("b",fill=TRUE)
-print(b)
-cat(" ",fill=TRUE)
-cat("MCMC Parms: ",fill=TRUE)
-cat("  ",R," reps; keeping every ",keep,"th draw",fill=TRUE)
-cat("s_alpha = ",s_alpha,fill=TRUE)
-cat("s_beta = ",s_beta,fill=TRUE)
-cat(" ",fill=TRUE)
-
-par = rep(0,(nvar+1))
-cat(" Initializing RW Increment Covariance Matrix...",fill=TRUE)
-fsh()
-mle = optim(par,llnegbin, X=X, y=y, nvar=nvar, method="L-BFGS-B", upper=c(Inf,Inf,Inf,log(100000000)), hessian=TRUE, control=list(fnscale=-1))
-fsh()
-beta_mle=mle$par[1:nvar]
-alpha_mle = exp(mle$par[nvar+1])
-varcovinv = -mle$hessian
-beta = beta0
-betacvar = s_beta*solve(varcovinv[1:nvar,1:nvar])
-betaroot = t(chol(betacvar))
-alpha = alpha_mle
-alphacvar = s_alpha/varcovinv[nvar+1,nvar+1]
-alphacroot = sqrt(alphacvar)
-cat("beta_mle = ",beta_mle,fill=TRUE)
-cat("alpha_mle = ",alpha_mle, fill = TRUE)
-fsh()
-
-oldlpostbeta = 0
-nacceptbeta = 0
-nacceptalpha = 0
-clpostbeta = 0
-
-alphadraw = rep(0,floor(R/keep))
-betadraw=matrix(double(floor(R/keep)*(nvar)),ncol=nvar)
-llike=rep(0,floor(R/keep))
-
-#
-#	start main iteration loop
-#
-itime=proc.time()[3]
-cat(" ",fill=TRUE)
-cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
-fsh()
-
-for (r in 1:R) 
-{
-#   Draw beta
-    betac = beta + betaroot%*%rnorm(nvar)
-    oldlpostbeta = lpostbetai(beta, alpha, X, y, betabar,A)
-    clpostbeta = lpostbetai(betac, alpha, X, y, betabar,A)
-        
-    ldiff=clpostbeta-oldlpostbeta
-    acc=min(1,exp(ldiff))
-    if(acc < 1) {unif=runif(1)} else {unif=0}
-    if (unif <= acc) {
-        beta=betac
-        nacceptbeta=nacceptbeta+1
-    }
- 
-
-#   Draw alpha
-    logalphac = rnorm(1,mean=log(alpha), sd=alphacroot)
-    oldlpostalpha = lpostalpha(alpha, beta, X, y,  a, b)
-    clpostalpha = lpostalpha(exp(logalphac), beta, X, y, a, b)
-    ldiff=clpostalpha-oldlpostalpha
-    acc=min(1,exp(ldiff))
-    if(acc < 1) {unif=runif(1)} else {unif=0}
-    if (unif <= acc) {
-        alpha=exp(logalphac)
-        nacceptalpha=nacceptalpha+1
-    }
-
-if(r%%100 == 0)
-    {ctime=proc.time()[3]
-    timetoend=((ctime-itime)/r)*(R-r)
-    cat(" ",r," (",round(timetoend/60,1),")",fill=TRUE)
-    fsh()}
-
-if(r%%keep == 0) {
-    mkeep=r/keep
-    betadraw[mkeep,]=beta
-    alphadraw[mkeep] = alpha
-    llike[mkeep]=llnegbin(c(beta,alpha),X,y,nvar)
-    }
-}
-
-ctime = proc.time()[3]
-cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
-
-
-attributes(betadraw)$class=c("bayesm.mat","mcmc")
-attributes(betadraw)$mcpar=c(1,R,keep)
-attributes(alphadraw)$class=c("bayesm.mat","mcmc")
-attributes(alphadraw)$mcpar=c(1,R,keep)
-return(list(llike=llike,betadraw=betadraw,alphadraw=alphadraw,
-     acceptrbeta=nacceptbeta/R*100,acceptralpha=nacceptalpha/R*100))
-}
+rnegbinRw=function(Data, Prior, Mcmc){
+#   Revision History
+#	  Sridhar Narayanan - 05/2005
+#         P. Rossi 6/05
+#         3/07 added classes
+#   Keunwoo Kim 11/2014
+#         1. added "alphafix" argument 
+#         2. corrected code in more clear way (in Cpp)
+#   W. Taylor 4/15 - added nprint option to MCMC argument
+#
+#   Model
+#       (y|lambda,alpha) ~ Negative Binomial(Mean = lambda, Overdispersion par = alpha)
+#
+#       ln(lambda) =  X * beta
+#               
+#   Priors
+#       beta ~ N(betabar, A^-1)
+#       alpha ~ Gamma(a,b) where mean = a/b and variance = a/(b^2)
+#
+#   Arguments
+#       Data = list of y, X
+#              e.g. regdata[[i]]=list(y=y,X=X)
+#              X has nvar columns including a first column of ones
+#
+#       Prior - list containing the prior parameters
+#           betabar, A - mean of beta prior, inverse of variance covariance of beta prior
+#           a, b - parameters of alpha prior
+#
+#       Mcmc - list containing
+#           R is number of draws
+#           keep is thinning parameter (def = 1)
+#           nprint - print estimated time remaining on every nprint'th draw (def = 100)
+#           s_beta - scaling parameter for beta RW (def = 2.93/sqrt(nvar))
+#           s_alpha - scaling parameter for alpha RW (def = 2.93)
+#           beta0 - initial guesses for parameters, if not supplied default values are used
+#           alpha - value of alpha fixed. If it is given, draw only beta
+#
+
+#
+# Definitions of functions used within rhierNegbinRw
+#
+llnegbin = 
+function(par,X,y, nvar) {
+# Computes the log-likelihood
+    beta = par[1:nvar]
+    alpha = exp(par[nvar+1])+1.0e-50
+    mean=exp(X%*%beta)
+    prob=alpha/(alpha+mean)
+    prob=ifelse(prob<1.0e-100,1.0e-100,prob)
+     out=dnbinom(y,size=alpha,prob=prob,log=TRUE)
+     return(sum(out))
+}
+
+#
+# Error Checking
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of X and y")}
+if(is.null(Data$X)) {pandterm("Requires Data element X")} else {X=Data$X}
+if(is.null(Data$y)) {pandterm("Requires Data element y")} else {y=Data$y}
+nvar = ncol(X)
+
+if (length(y) != nrow(X)) {pandterm("Mismatch in the number of observations in X and y")}
+nobs=length(y)
+
+#
+# check for prior elements
+#
+if(missing(Prior)) {
+    betabar=rep(0,nvar); A=BayesmConstant.A*diag(nvar) ;  a=BayesmConstant.agammaprior; b=BayesmConstant.bgammaprior;
+}
+else {
+    if(is.null(Prior$betabar)) {betabar=rep(0,nvar)} else {betabar=Prior$betabar}
+    if(is.null(Prior$A)) {A=BayesmConstant.A*diag(nvar)} else {A=Prior$A}
+    if(is.null(Prior$a)) {a=BayesmConstant.agammaprior} else {a=Prior$a}
+    if(is.null(Prior$b)) {b=BayesmConstant.bgammaprior} else {b=Prior$b}
+}
+
+if(length(betabar) != nvar) pandterm("betabar is of incorrect dimension")
+if(sum(dim(A)==c(nvar,nvar)) != 2) pandterm("A is of incorrect dimension")
+if((length(a) != 1) | (a <=0)) pandterm("a should be a positive number")
+if((length(b) != 1) | (b <=0)) pandterm("b should be a positive number")
+
+#
+# check for Mcmc 
+#
+if(missing(Mcmc)) pandterm("Requires Mcmc argument -- at least R")
+if(is.null(Mcmc$R)) {pandterm("Requires element R of Mcmc")} else {R=Mcmc$R}
+if(is.null(Mcmc$beta0)) {beta0=rep(0,nvar)} else {beta0=Mcmc$beta0}
+if(length(beta0) !=nvar) pandterm("beta0 is not of dimension nvar")
+if(is.null(Mcmc$keep)) {keep=BayesmConstant.keep} else {keep=Mcmc$keep}
+if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+  if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+if(is.null(Mcmc$s_alpha)) {cat("Using default s_alpha = 2.93",fill=TRUE); s_alpha=BayesmConstant.RRScaling} 
+    else {s_alpha = Mcmc$s_alpha} 
+if(is.null(Mcmc$s_beta)) {cat("Using default s_beta = 2.93/sqrt(nvar)",fill=TRUE); s_beta=BayesmConstant.RRScaling/sqrt(nvar)} 
+    else {s_beta = Mcmc$s_beta}
+# Keunwoo Kim 11/2014 #############################################
+if(is.null(Mcmc$alpha)) {fixalpha=FALSE} else {fixalpha=TRUE; alpha=Mcmc$alpha}
+if(fixalpha & alpha<=0) pandterm("alpha is not positive")
+###################################################################
+
+#
+# print out problem
+#
+cat(" ",fill=TRUE)
+cat("Starting Random Walk Metropolis Sampler for Negative Binomial Regression",fill=TRUE)
+cat("  ",nobs," obs; ",nvar," covariates (including intercept); ",fill=TRUE)
+cat("Prior Parameters:",fill=TRUE)
+cat("betabar",fill=TRUE)
+print(betabar)
+cat("A",fill=TRUE)
+print(A)
+cat("a",fill=TRUE)
+print(a)
+cat("b",fill=TRUE)
+print(b)
+cat(" ",fill=TRUE)
+cat("MCMC Parms: ",fill=TRUE)
+cat("R= ",R," keep= ",keep," nprint= ",nprint,fill=TRUE)
+cat("s_alpha = ",s_alpha,fill=TRUE)
+cat("s_beta = ",s_beta,fill=TRUE)
+cat(" ",fill=TRUE)
+
+par = rep(0,(nvar+1))
+cat(" Initializing RW Increment Covariance Matrix...",fill=TRUE)
+fsh()
+mle = optim(par,llnegbin, X=X, y=y, nvar=nvar, method="L-BFGS-B", upper=c(Inf,Inf,Inf,log(100000000)), hessian=TRUE, control=list(fnscale=-1))
+fsh()
+beta_mle=mle$par[1:nvar]
+alpha_mle = exp(mle$par[nvar+1])
+varcovinv = -mle$hessian
+beta = beta0
+betacvar = s_beta*solve(varcovinv[1:nvar,1:nvar])
+betaroot = t(chol(betacvar))
+if(!fixalpha) {alpha = alpha_mle}
+alphacvar = s_alpha/varcovinv[nvar+1,nvar+1]
+alphacroot = sqrt(alphacvar)
+cat("beta_mle = ",beta_mle,fill=TRUE)
+cat("alpha_mle = ",alpha_mle, fill = TRUE)
+fsh()
+
+###################################################################
+# Keunwoo Kim
+# 09/03/2014
+###################################################################
+if (fixalpha) {alpha=Mcmc$alpha}
+draws=rnegbinRw_rcpp_loop(y, X, betabar, chol(A), a, b, beta, alpha, fixalpha, betaroot, alphacroot, R, keep, nprint)
+###################################################################
+
+attributes(draws$betadraw)$class=c("bayesm.mat","mcmc")
+attributes(draws$betadraw)$mcpar=c(1,R,keep)
+attributes(draws$alphadraw)$class=c("bayesm.mat","mcmc")
+attributes(draws$alphadraw)$mcpar=c(1,R,keep)
+return(list(betadraw=draws$betadraw,alphadraw=draws$alphadraw,
+            acceptrbeta=draws$nacceptbeta/R*keep,acceptralpha=draws$nacceptalpha/R*keep))
+}
diff --git a/R/rnmixGibbs.R b/R/rnmixgibbs_rcpp.r
old mode 100755
new mode 100644
similarity index 57%
rename from R/rnmixGibbs.R
rename to R/rnmixgibbs_rcpp.r
index add5a6a..104bd4c
--- a/R/rnmixGibbs.R
+++ b/R/rnmixgibbs_rcpp.r
@@ -1,198 +1,174 @@
-rnmixGibbs= 
-function(Data,Prior,Mcmc)
-{
-#
-# Revision History: 
-#   P. Rossi 3/05
-#   add check to see if Mubar is a vector  9/05
-#   fixed bug in saving comps draw comps[[mkeep]]=  9/05
-#   fixed so that ncomp can be =1; added check that nobs >= 2*ncomp   12/06
-#   3/07 added classes
-#   added log-likelihood  9/08
-#
-# purpose: do Gibbs sampling inference for a mixture of multivariate normals
-#
-# arguments:
-#     Data is a list of y which is an n x k matrix of data -- each row
-#       is an iid draw from the normal mixture
-#     Prior is a list of (Mubar,A,nu,V,a,ncomp)
-#       ncomp is required
-#       if elements of the prior don't exist, defaults are assumed
-#     Mcmc is a list of R and keep (thinning parameter)
-# Output:
-#     list with elements
-#     pdraw -- R/keep x ncomp array of mixture prob draws
-#     zdraw -- R/keep x nobs array of indicators of mixture comp identity for each obs
-#     compsdraw -- list of R/keep lists of lists of comp parm draws
-#        e.g. compsdraw[[i]] is ith draw -- list of ncomp lists
-#             compsdraw[[i]][[j]] is list of parms for jth normal component
-#             if jcomp=compsdraw[[i]][j]]
-#                        ~N(jcomp[[1]],Sigma), Sigma = t(R)%*%R, R^{-1} = jcomp[[2]]
-#
-# Model:
-#        y_i ~ N(mu_ind,Sigma_ind)
-#        ind ~ iid multinomial(p)  p is a 1x ncomp vector of probs
-# Priors:
-#        mu_j ~ N(mubar,Sigma (x) A^-1)
-#        mubar=vec(Mubar)
-#        Sigma_j ~ IW(nu,V)
-#        note: this is the natural conjugate prior -- a special case of multivariate 
-#              regression
-#        p ~ Dirchlet(a)
-#
-#  check arguments
-#
-pandterm=function(message) {stop(message,call.=FALSE)}
-#
-# -----------------------------------------------------------------------------------------
-llnmix=function(Y,z,comps){
-#
-# evaluate likelihood for mixture of normals
-#
-zu=unique(z)
-ll=0.0
-for(i in 1:length(zu)){
-    Ysel=Y[z==zu[i],,drop=FALSE]
-    ll=ll+sum(apply(Ysel,1,lndMvn,mu=comps[[zu[i]]]$mu,rooti=comps[[zu[i]]]$rooti))
-}
-return(ll)
-}
-# -----------------------------------------------------------------------------------------
-if(missing(Data)) {pandterm("Requires Data argument -- list of y")}
-    if(is.null(Data$y)) {pandterm("Requires Data element y")}
-    y=Data$y
-#
-# check data for validity
-#
-if(!is.matrix(y)) {pandterm("y must be a matrix")}
-nobs=nrow(y)
-dimy=ncol(y)
-#
-# check for Prior
-#
-if(missing(Prior)) {pandterm("requires Prior argument ")}
-else
-   {
-    if(is.null(Prior$ncomp)) {pandterm("requires number of mix comps -- Prior$ncomp")}
-       else {ncomp=Prior$ncomp}
-    if(is.null(Prior$Mubar)) {Mubar=matrix(rep(0,dimy),nrow=1)} 
-       else {Mubar=Prior$Mubar; if(is.vector(Mubar)) {Mubar=matrix(Mubar,nrow=1)}}
-    if(is.null(Prior$A)) {A=matrix(c(.01),ncol=1)} 
-       else {A=Prior$A}
-    if(is.null(Prior$nu)) {nu=dimy+2} 
-       else {nu=Prior$nu}
-    if(is.null(Prior$V)) {V=nu*diag(dimy)} 
-       else {V=Prior$V}
-    if(is.null(Prior$a)) {a=c(rep(5,ncomp))}
-       else {a=Prior$a}
-   }
-#
-# check for adequate no. of observations
-#
-if(nobs<2*ncomp)
-   {pandterm("too few obs, nobs should be >= 2*ncomp")}
-#
-# check dimensions of Priors
-#
-if(ncol(A) != nrow(A) || ncol(A) != 1)
-   {pandterm(paste("bad dimensions for A",dim(A)))}
-if(!is.matrix(Mubar))
-   {pandterm("Mubar must be a matrix")}
-if(nrow(Mubar) != 1 || ncol(Mubar) != dimy) 
-   {pandterm(paste("bad dimensions for Mubar",dim(Mubar)))}
-if(ncol(V) != nrow(V) || ncol(V) != dimy)
-   {pandterm(paste("bad dimensions for V",dim(V)))}
-if(length(a) != ncomp)
-   {pandterm(paste("a wrong length, length= ",length(a)))}
-bada=FALSE
-for(i in 1:ncomp){if(a[i] < 0) bada=TRUE}
-if(bada) pandterm("invalid values in a vector")
-#
-# check MCMC argument
-#
-
-if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
-else
-   {
-    if(is.null(Mcmc$R)) 
-       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
-    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
-    if(is.null(Mcmc$LogLike)) {LogLike=FALSE} else {LogLike=Mcmc$LogLike}
-   }
-
-#
-# print out the problem
-#
-cat(" Starting Gibbs Sampler for Mixture of Normals",fill=TRUE)
-cat(" ",nobs," observations on ",dimy," dimensional data",fill=TRUE)
-cat("     using ",ncomp," mixture components",fill=TRUE)
-cat(" ",fill=TRUE)
-cat(" Prior Parms: ",fill=TRUE)
-cat("  mu_j ~ N(mubar,Sigma (x) A^-1)",fill=TRUE)
-cat("  mubar = ",fill=TRUE)
-print(Mubar)
-cat("  precision parm for prior variance of mu vectors (A)= ",A,fill=TRUE)
-cat("  Sigma_j ~ IW(nu,V) nu= ",nu,fill=TRUE)
-cat("  V =",fill=TRUE)
-print(V)
-cat("  Dirichlet parameters ",fill=TRUE)
-print(a)
-cat(" ",fill=TRUE)
-cat(" Mcmc Parms: R= ",R," keep= ",keep," LogLike= ",LogLike,fill=TRUE)
-
-pdraw=matrix(double(floor(R/keep)*ncomp),ncol=ncomp)
-zdraw=matrix(double(floor(R/keep)*nobs),ncol=nobs)
-compdraw=list()
-compsd=list()
-if(LogLike) ll=double(floor(R/keep))
-
-#
-# set initial values of z
-#
-z=rep(c(1:ncomp),(floor(nobs/ncomp)+1))
-z=z[1:nobs]
-cat(" ",fill=TRUE)
-cat("starting value for z",fill=TRUE)
-print(table(z))
-cat(" ",fill=TRUE)
-p=c(rep(1,ncomp))/ncomp # note this is not used
-
-
-#
-# start main iteration loop
-#
-itime=proc.time()[3]
-cat("MCMC Iteration (est time to end -min) ",fill=TRUE)
-fsh()
-for(rep in 1:R)
-{
-   out = rmixGibbs(y,Mubar,A,nu,V,a,p,z,compsd)
-   compsd=out$comps
-   p=out$p
-   z=out$z
-   if(rep%%100==0)
-     {
-      ctime=proc.time()[3]
-      timetoend=((ctime-itime)/rep)*(R-rep)
-      cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
-      fsh()
-      }
-   if(rep%%keep ==0)
-     {
-      mkeep=rep/keep
-      pdraw[mkeep,]=p
-      zdraw[mkeep,]=z
-      compdraw[[mkeep]]=compsd
-      if(LogLike) ll[mkeep]=llnmix(y,z,compsd)
-      }
-}
-ctime = proc.time()[3]
-cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
-
-nmix=list(probdraw=pdraw,zdraw=zdraw,compdraw=compdraw)
-attributes(nmix)$class="bayesm.nmix"
-if(LogLike) 
-	{return(list(ll=ll,nmix=nmix))}
-else
-	{return(list(nmix=nmix))}
-}
+rnmixGibbs= function(Data,Prior,Mcmc){
+#
+# Revision History: 
+#   P. Rossi 3/05
+#   add check to see if Mubar is a vector  9/05
+#   fixed bug in saving comps draw comps[[mkeep]]=  9/05
+#   fixed so that ncomp can be =1; added check that nobs >= 2*ncomp   12/06
+#   3/07 added classes
+#   added log-likelihood  9/08
+#   W. Taylor 4/15 - added nprint option to MCMC argument
+#
+# purpose: do Gibbs sampling inference for a mixture of multivariate normals
+#
+# arguments:
+#     Data is a list of y which is an n x k matrix of data -- each row
+#       is an iid draw from the normal mixture
+#     Prior is a list of (Mubar,A,nu,V,a,ncomp)
+#       ncomp is required
+#       if elements of the prior don't exist, defaults are assumed
+#     Mcmc is a list of R, keep (thinning parameter), and nprint
+# Output:
+#     list with elements
+#     pdraw -- R/keep x ncomp array of mixture prob draws
+#     zdraw -- R/keep x nobs array of indicators of mixture comp identity for each obs
+#     compsdraw -- list of R/keep lists of lists of comp parm draws
+#        e.g. compsdraw[[i]] is ith draw -- list of ncomp lists
+#             compsdraw[[i]][[j]] is list of parms for jth normal component
+#             if jcomp=compsdraw[[i]][j]]
+#                        ~N(jcomp[[1]],Sigma), Sigma = t(R)%*%R, R^{-1} = jcomp[[2]]
+#
+# Model:
+#        y_i ~ N(mu_ind,Sigma_ind)
+#        ind ~ iid multinomial(p)  p is a 1x ncomp vector of probs
+# Priors:
+#        mu_j ~ N(mubar,Sigma (x) A^-1)
+#        mubar=vec(Mubar)
+#        Sigma_j ~ IW(nu,V)
+#        note: this is the natural conjugate prior -- a special case of multivariate 
+#              regression
+#        p ~ Dirchlet(a)
+#
+#  check arguments
+#
+#
+# -----------------------------------------------------------------------------------------
+llnmix=function(Y,z,comps){
+  #
+  # evaluate likelihood for mixture of normals
+  #
+  zu=unique(z)
+  ll=0.0
+  for(i in 1:length(zu)){
+    Ysel=Y[z==zu[i],,drop=FALSE]
+    ll=ll+sum(apply(Ysel,1,lndMvn,mu=comps[[zu[i]]]$mu,rooti=comps[[zu[i]]]$rooti))
+  }
+  return(ll)
+}
+# -----------------------------------------------------------------------------------------
+if(missing(Data)) {pandterm("Requires Data argument -- list of y")}
+if(is.null(Data$y)) {pandterm("Requires Data element y")}
+y=Data$y
+#
+# check data for validity
+#
+if(!is.matrix(y)) {pandterm("y must be a matrix")}
+nobs=nrow(y)
+dimy=ncol(y)
+#
+# check for Prior
+#
+if(missing(Prior)) {pandterm("requires Prior argument ")}
+else
+{
+  if(is.null(Prior$ncomp)) {pandterm("requires number of mix comps -- Prior$ncomp")}
+  else {ncomp=Prior$ncomp}
+  if(is.null(Prior$Mubar)) {Mubar=matrix(rep(0,dimy),nrow=1)} 
+  else {Mubar=Prior$Mubar; if(is.vector(Mubar)) {Mubar=matrix(Mubar,nrow=1)}}
+  if(is.null(Prior$A)) {A=matrix(BayesmConstant.A,ncol=1)} 
+  else {A=Prior$A}
+  if(is.null(Prior$nu)) {nu=dimy+BayesmConstant.nuInc} 
+  else {nu=Prior$nu}
+  if(is.null(Prior$V)) {V=nu*diag(dimy)} 
+  else {V=Prior$V}
+  if(is.null(Prior$a)) {a=c(rep(BayesmConstant.a,ncomp))}
+  else {a=Prior$a}
+}
+#
+# check for adequate no. of observations
+#
+if(nobs<2*ncomp)
+{pandterm("too few obs, nobs should be >= 2*ncomp")}
+#
+# check dimensions of Priors
+#
+if(ncol(A) != nrow(A) || ncol(A) != 1)
+{pandterm(paste("bad dimensions for A",dim(A)))}
+if(!is.matrix(Mubar))
+{pandterm("Mubar must be a matrix")}
+if(nrow(Mubar) != 1 || ncol(Mubar) != dimy) 
+{pandterm(paste("bad dimensions for Mubar",dim(Mubar)))}
+if(ncol(V) != nrow(V) || ncol(V) != dimy)
+{pandterm(paste("bad dimensions for V",dim(V)))}
+if(length(a) != ncomp)
+{pandterm(paste("a wrong length, length= ",length(a)))}
+bada=FALSE
+for(i in 1:ncomp){if(a[i] < 0) bada=TRUE}
+if(bada) pandterm("invalid values in a vector")
+#
+# check MCMC argument
+#
+
+if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
+else
+{
+  if(is.null(Mcmc$R)) 
+  {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
+  if(is.null(Mcmc$keep)) {keep=BayesmConstant.keep} else {keep=Mcmc$keep}
+  if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+    if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+  if(is.null(Mcmc$LogLike)) {LogLike=FALSE} else {LogLike=Mcmc$LogLike}
+}
+
+#
+# print out the problem
+#
+cat(" Starting Gibbs Sampler for Mixture of Normals",fill=TRUE)
+cat(" ",nobs," observations on ",dimy," dimensional data",fill=TRUE)
+cat("     using ",ncomp," mixture components",fill=TRUE)
+cat(" ",fill=TRUE)
+cat(" Prior Parms: ",fill=TRUE)
+cat("  mu_j ~ N(mubar,Sigma (x) A^-1)",fill=TRUE)
+cat("  mubar = ",fill=TRUE)
+print(Mubar)
+cat("  precision parm for prior variance of mu vectors (A)= ",A,fill=TRUE)
+cat("  Sigma_j ~ IW(nu,V) nu= ",nu,fill=TRUE)
+cat("  V =",fill=TRUE)
+print(V)
+cat("  Dirichlet parameters ",fill=TRUE)
+print(a)
+cat(" ",fill=TRUE)
+cat(" Mcmc Parms: R= ",R," keep= ",keep," nprint= ",nprint," LogLike= ",LogLike,fill=TRUE)
+
+# pdraw=matrix(double(floor(R/keep)*ncomp),ncol=ncomp)
+# zdraw=matrix(double(floor(R/keep)*nobs),ncol=nobs)
+# compdraw=list()
+compsd=list()
+if(LogLike) ll=double(floor(R/keep))
+
+#
+# set initial values of z
+#
+z=rep(c(1:ncomp),(floor(nobs/ncomp)+1))
+z=z[1:nobs]
+cat(" ",fill=TRUE)
+cat("starting value for z",fill=TRUE)
+print(table(z))
+cat(" ",fill=TRUE)
+p=c(rep(1,ncomp))/ncomp # note this is not used
+fsh()
+
+#Wayne Taylor 8/18/14#####################################################
+nmix = rnmixGibbs_rcpp_loop(y, Mubar, A, nu, V, a, p, z, R, keep, nprint);
+##########################################################################
+
+attributes(nmix)$class="bayesm.nmix"
+  if(LogLike){
+    zdraw = nmix$zdraw
+    compdraw = nmix$compdraw
+    ll = lapply(seq_along(compdraw), function(i) llnmix(y, zdraw[i,], compdraw[[i]]))
+    return(list(ll=ll,nmix=nmix))
+  }else{
+    return(list(nmix=nmix))
+  }
+}
\ No newline at end of file
diff --git a/R/rordprobitGibbs.R b/R/rordprobitgibbs_rcpp.r
similarity index 50%
rename from R/rordprobitGibbs.R
rename to R/rordprobitgibbs_rcpp.r
index f505d2a..2cc5e90 100755
--- a/R/rordprobitGibbs.R
+++ b/R/rordprobitgibbs_rcpp.r
@@ -1,290 +1,182 @@
-rordprobitGibbs=
- function(Data,Prior,Mcmc)
-{
-#
-# revision history:
-#   3/07  Hsiu-Wen Liu
-#   3/07  fixed naming of dstardraw rossi
-#    
-# purpose: 
-#   draw from posterior for ordered probit using Gibbs Sampler
-#   and metropolis RW
-#
-# Arguments:
-#   Data - list of X,y,k  
-#     X is nobs x nvar, y is nobs vector of 1,2,.,k (ordinal variable)
-#   Prior - list of A, betabar
-#     A is nvar x nvar prior preci matrix
-#     betabar is nvar x 1 prior mean
-#     Ad is ndstar x ndstar prior preci matrix of dstar (ncut is number of cut-offs being estimated)
-#     dstarbar is ndstar x 1 prior mean of dstar
-#   Mcmc
-#     R is number of draws
-#     keep is thinning parameter
-#     s is scale parameter of random work Metropolis    
-#      
-# Output:
-#   list of betadraws and cutdraws
-#
-# Model: 
-#    z=Xbeta + e  < 0  e ~N(0,1)
-#    y=1,..,k, if z~c(c[k], c[k+1])
-#
-#    cutoffs = c[1],..,c[k+1]
-#    dstar = dstar[1],dstar[k-2]
-#    set c[1]=-100, c[2]=0, ...,c[k+1]=100
-#
-#    c[3]=exp(dstar[1]),c[4]=c[3]+exp(dstar[2]),...,
-#    c[k]=c[k-1]+exp(datsr[k-2])
-#    
-# Note: 1. length of dstar = length of cutoffs - 3
-#       2. Be careful in assessing prior parameter, Ad.  .1 is too small for many applications.
-#
-# Prior: beta ~ N(betabar,A^-1)
-#        dstar ~ N(dstarbar, Ad^-1)
-#
-#
-# ----------------------------------------------------------------------
-# define functions needed
-#
-breg1=
-function(root,X,y,Abetabar) 
-{
-#
-# p.rossi 12/04
-#
-# Purpose: draw from posterior for linear regression, sigmasq=1.0
-# 
-# Arguments:
-#  root is chol((X'X+A)^-1)
-#  Abetabar = A*betabar
-#
-# Output:  draw from posterior
-# 
-# Model: y = Xbeta + e  e ~ N(0,I)
-# Prior:  beta ~ N(betabar,A^-1)
-#
-cov=crossprod(root,root)
-betatilde=cov%*%(crossprod(X,y)+Abetabar)
-betatilde+t(root)%*%rnorm(length(betatilde))
-}
-
-#  
-#  dstartoc is a fuction to transfer dstar to its cut-off value    
-
-    dstartoc=function(dstar) {c(-100, 0, cumsum(exp(dstar)), 100)} 
-
-# compute conditional likelihood of data given cut-offs
-#
-   lldstar=function(dstar,y,mu){
-           gamma=dstartoc(dstar)
-           arg = pnorm(gamma[y+1]-mu)-pnorm(gamma[y]-mu)
-           epsilon=1.0e-50
-           arg=ifelse(arg < epsilon,epsilon,arg)
-           return(sum(log(arg)))
-           }
-
-
-dstarRwMetrop=
-function(y,mu,olddstar,s,inc.root,dstarbar,oldll,rootdi){ 
-#
-# function to execute rw metropolis for the dstar
-# y is n vector with element = 1,...,j 
-# X is n x k matrix of x values 
-# RW increments are N(0,s^2*t(inc.root)%*%inc.root)
-# prior on dstar is N(dstarbar,Sigma)  Sigma^-1=rootdi*t(rootdi)
-#	inc.root, rootdi are upper triangular
-#	this means that we are using the UL decomp of Sigma^-1 for prior 
-# olddstar is the current
-     
-     stay=0   
-     dstarc=olddstar + s*t(inc.root)%*%(matrix(rnorm(ncut),ncol=1))
-     cll=lldstar(dstarc,y,mu)
-     clpost=cll+lndMvn(dstarc,dstarbar,rootdi)
-     ldiff=clpost-oldll-lndMvn(olddstar,dstarbar,rootdi)
-     alpha=min(1,exp(ldiff))
-
-     if(alpha < 1) {unif=runif(1)} else {unif=0}
-     if (unif <= alpha)
-          {dstardraw=dstarc; oldll=cll}
-     else 
-          {dstardraw=olddstar; stay=1}
-
-return(list(dstardraw=dstardraw,oldll=oldll, stay=stay))
-}   
-
-pandterm=function(message) {stop(message,call.=FALSE)}
-#
-# ----------------------------------------------------------------------
-#
-# check arguments
-#
-if(missing(Data)) {pandterm("Requires Data argument -- list of y and X")}
-    if(is.null(Data$X)) {pandterm("Requires Data element X")}
-    X=Data$X
-    if(is.null(Data$y)) {pandterm("Requires Data element y")}
-    y=Data$y
-    if(is.null(Data$k)) {pandterm("Requires Data element k")}
-    k=Data$k
-
-nvar=ncol(X)
-nobs=length(y)  
-ndstar = k-2         # number of dstar being estimated
-ncuts = k+1          # number of cut-offs (including zero and two ends)
-ncut = ncuts-3       # number of cut-offs being estimated c[1]=-100, c[2]=0, c[k+1]=100 
-
-#
-# check data for validity
-#
-if(length(y) != nrow(X) ) {pandterm("y and X not of same row dim")}
-if(  sum(unique(y) %in% (1:k) ) < length(unique(y)) )
-  {pandterm("some value of y is not vaild")}
-
-#
-
-#
-# check for Prior
-#
-if(missing(Prior))
-   { betabar=c(rep(0,nvar)); A=.01*diag(nvar); Ad=diag(ndstar); dstarbar=c(rep(0,ndstar))}
-else
-   {
-    if(is.null(Prior$betabar)) {betabar=c(rep(0,nvar))} 
-       else {betabar=Prior$betabar}
-    if(is.null(Prior$A)) {A=.01*diag(nvar)} 
-       else {A=Prior$A}
-    if(is.null(Prior$Ad)) {Ad=diag(ndstar)} 
-       else {Ad=Prior$Ad}
-    if(is.null(Prior$dstarbar)) {dstarbar=c(rep(0,ndstar))} 
-       else {dstarbar=Prior$dstarbar}
-   }
-#
-# check dimensions of Priors
-#
-
-if(ncol(A) != nrow(A) || ncol(A) != nvar || nrow(A) != nvar) 
-   {pandterm(paste("bad dimensions for A",dim(A)))}
-if(length(betabar) != nvar)
-   {pandterm(paste("betabar wrong length, length= ",length(betabar)))}
-if(ncol(Ad) != nrow(Ad) || ncol(Ad) != ndstar || nrow(Ad) != ndstar) 
-   {pandterm(paste("bad dimensions for Ad",dim(Ad)))}
-if(length(dstarbar) != ndstar)
-   {pandterm(paste("dstarbar wrong length, length= ",length(dstarbar)))}
-
-#
-# check MCMC argument
-#
-if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
-else
-   {
-    if(is.null(Mcmc$R)) 
-       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
-    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
-    if(is.null(Mcmc$s)) {s=2.93/sqrt(ndstar)} else {s=Mcmc$s} 
-    }
-#
-# print out problem
-#
-cat(" ", fill=TRUE)
-cat("Starting Gibbs Sampler for Ordered Probit Model",fill=TRUE)
-cat("   with ",nobs,"observations",fill=TRUE)
-cat(" ", fill=TRUE)
-cat("Table of y values",fill=TRUE)
-print(table(y))
-cat(" ",fill=TRUE)
-cat("Prior Parms: ",fill=TRUE)
-cat("betabar",fill=TRUE)
-print(betabar)
-cat(" ", fill=TRUE)
-cat("A",fill=TRUE)
-print(A)
-cat(" ", fill=TRUE)
-cat("dstarbar",fill=TRUE)
-print(dstarbar)
-cat(" ", fill=TRUE)
-cat("Ad",fill=TRUE)
-print(Ad)
-cat(" ", fill=TRUE)
-cat("MCMC parms: ",fill=TRUE)
-cat("R= ",R," keep= ",keep,"s= ",s, fill=TRUE) 
-cat(" ",fill=TRUE)
-
-betadraw=matrix(double(floor(R/keep)*nvar),ncol=nvar)
-cutdraw=matrix(double(floor(R/keep)*ncuts),ncol=ncuts)
-dstardraw=matrix(double(floor(R/keep)*ndstar),ncol=ndstar)
-staydraw=array(0,dim=c(R/keep))
-
-sigma=c(rep(1,nrow(X)))
-root=chol(chol2inv(chol((crossprod(X,X)+A))))
-Abetabar=crossprod(A,betabar)
-rootdi=chol(chol2inv(chol(Ad)))
-
-# use (-Hessian+Ad)^(-1) evaluated at betahat as the basis of the 
-# covariance matrix for the random walk Metropolis increments 
-    
-    betahat = chol2inv(chol(crossprod(X,X)))%*% crossprod(X,y)
-    dstarini = c(cumsum(c( rep(0.1, ndstar))))     # set initial value for dstar   
-    dstarout = optim(dstarini, lldstar, method = "BFGS", hessian=T,
-                control = list(fnscale = -1,maxit=500,
-                reltol = 1e-06, trace=0), mu=X%*%betahat, y=y)             
-    inc.root=chol(chol2inv(chol((-dstarout$hessian+Ad))))  # chol((H+Ad)^-1) 
-
-# set initial values for MCMC  
-    
-    olddstar = c(rep(0,ndstar))
-    beta = betahat    
-    cutoffs = dstartoc (olddstar)  
-    oldll = lldstar(olddstar,y,mu=X%*%betahat)
- 
-#
-#
-#	start main iteration loop
-#
-itime=proc.time()[3]
-cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
-fsh()
-#    print time to completion and draw # every 100th draw
-#
-for (rep in 1:R) 
-{
-   # draw z given beta(i-1), sigma, y, cut-offs     
-      z = rtrun (X%*%beta, sigma=sigma, a=cutoffs[y] , b=cutoffs[y+1])
-  
-   # draw beta given z and rest
-      beta= breg1(root,X,z, Abetabar)     
-   
-    # draw gamma given z
-      metropout = dstarRwMetrop(y,X%*%beta,olddstar,s,inc.root,dstarbar,oldll,rootdi)     
-      olddstar = metropout$dstardraw
-      oldll =  metropout$oldll
-      cutoffs = dstartoc (olddstar) 
-      stay = metropout$stay  
-
-
-#    print time to completion and draw # every 100th draw
-
-  if(rep%%100 == 0)
-    {ctime=proc.time()[3]
-    timetoend=((ctime-itime)/rep)*(R-rep)
-    cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
-    fsh()}
-
-  if(rep%%keep == 0) 
-    {mkeep=rep/keep; cutdraw[mkeep,]=cutoffs; dstardraw[mkeep,]=olddstar;betadraw[mkeep,]=beta;staydraw[mkeep]=stay }
-                
-}
-    accept=1-sum(staydraw)/(R/keep)
-
-ctime = proc.time()[3]
-cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
-
-cutdraw=cutdraw[,2:k]
-attributes(cutdraw)$class="bayesm.mat"
-attributes(betadraw)$class="bayesm.mat"
-attributes(dstardraw)$class="bayesm.mat"
-attributes(cutdraw)$mcpar=c(1,R,keep)
-attributes(betadraw)$mcpar=c(1,R,keep)
-attributes(dstardraw)$mcpar=c(1,R,keep)
-
-return(list(cutdraw=cutdraw,betadraw=betadraw, dstardraw=dstardraw, accept=accept))
-}
+rordprobitGibbs=function(Data,Prior,Mcmc){
+#
+# revision history:
+#   3/07  Hsiu-Wen Liu
+#   3/07  fixed naming of dstardraw rossi
+#    
+# purpose: 
+#   draw from posterior for ordered probit using Gibbs Sampler
+#   and metropolis RW
+#
+# Arguments:
+#   Data - list of X,y,k  
+#     X is nobs x nvar, y is nobs vector of 1,2,.,k (ordinal variable)
+#   Prior - list of A, betabar
+#     A is nvar x nvar prior preci matrix
+#     betabar is nvar x 1 prior mean
+#     Ad is ndstar x ndstar prior preci matrix of dstar (ncut is number of cut-offs being estimated)
+#     dstarbar is ndstar x 1 prior mean of dstar
+#   Mcmc
+#     R is number of draws
+#     keep is thinning parameter
+#     nprint - print estimated time remaining on every nprint'th draw
+#     s is scale parameter of random work Metropolis    
+#      
+# Output:
+#   list of betadraws and cutdraws
+#
+# Model: 
+#    z=Xbeta + e  < 0  e ~N(0,1)
+#    y=1,..,k, if z~c(c[k], c[k+1])
+#
+#    cutoffs = c[1],..,c[k+1]
+#    dstar = dstar[1],dstar[k-2]
+#    set c[1]=-100, c[2]=0, ...,c[k+1]=100
+#
+#    c[3]=exp(dstar[1]),c[4]=c[3]+exp(dstar[2]),...,
+#    c[k]=c[k-1]+exp(datsr[k-2])
+#    
+# Note: 1. length of dstar = length of cutoffs - 3
+#       2. Be careful in assessing prior parameter, Ad.  .1 is too small for many applications.
+#
+# Prior: beta ~ N(betabar,A^-1)
+#        dstar ~ N(dstarbar, Ad^-1)
+#
+#
+# ----------------------------------------------------------------------
+# define functions needed
+#  dstartoc is a fuction to transfer dstar to its cut-off value    
+
+dstartoc=function(dstar) {c(-100, 0, cumsum(exp(dstar)), 100)} 
+
+# compute conditional likelihood of data given cut-offs
+#
+lldstar=function(dstar,y,mu){
+  gamma=dstartoc(dstar)
+  arg = pnorm(gamma[y+1]-mu)-pnorm(gamma[y]-mu)
+  epsilon=1.0e-50
+  arg=ifelse(arg < epsilon,epsilon,arg)
+  return(sum(log(arg)))
+}
+#
+# ----------------------------------------------------------------------
+#
+# check arguments
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of y and X")}
+    if(is.null(Data$X)) {pandterm("Requires Data element X")}
+    X=Data$X
+    if(is.null(Data$y)) {pandterm("Requires Data element y")}
+    y=Data$y
+    if(is.null(Data$k)) {pandterm("Requires Data element k")}
+    k=Data$k
+
+nvar=ncol(X)
+nobs=length(y)  
+ndstar = k-2         # number of dstar being estimated
+ncuts = k+1          # number of cut-offs (including zero and two ends)
+ncut = ncuts-3       # number of cut-offs being estimated c[1]=-100, c[2]=0, c[k+1]=100 
+
+#
+# check data for validity
+#
+if(length(y) != nrow(X) ) {pandterm("y and X not of same row dim")}
+if(  sum(unique(y) %in% (1:k) ) < length(unique(y)) )
+  {pandterm("some value of y is not vaild")}
+
+#
+# check for Prior
+#
+if(missing(Prior))
+   { betabar=c(rep(0,nvar)); A=BayesmConstant.A*diag(nvar); Ad=diag(ndstar); dstarbar=c(rep(0,ndstar))}
+else
+   {
+    if(is.null(Prior$betabar)) {betabar=c(rep(0,nvar))} 
+       else {betabar=Prior$betabar}
+    if(is.null(Prior$A)) {A=BayesmConstant.A*diag(nvar)} 
+       else {A=Prior$A}
+    if(is.null(Prior$Ad)) {Ad=diag(ndstar)} 
+       else {Ad=Prior$Ad}
+    if(is.null(Prior$dstarbar)) {dstarbar=c(rep(0,ndstar))} 
+       else {dstarbar=Prior$dstarbar}
+   }
+#
+# check dimensions of Priors
+#
+
+if(ncol(A) != nrow(A) || ncol(A) != nvar || nrow(A) != nvar) 
+   {pandterm(paste("bad dimensions for A",dim(A)))}
+if(length(betabar) != nvar)
+   {pandterm(paste("betabar wrong length, length= ",length(betabar)))}
+if(ncol(Ad) != nrow(Ad) || ncol(Ad) != ndstar || nrow(Ad) != ndstar) 
+   {pandterm(paste("bad dimensions for Ad",dim(Ad)))}
+if(length(dstarbar) != ndstar)
+   {pandterm(paste("dstarbar wrong length, length= ",length(dstarbar)))}
+
+#
+# check MCMC argument
+#
+if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
+else
+   {
+    if(is.null(Mcmc$R)) 
+       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
+    if(is.null(Mcmc$keep)) {keep=BayesmConstant.keep} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+      if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+    if(is.null(Mcmc$s)) {s=BayesmConstant.RRScaling/sqrt(ndstar)} else {s=Mcmc$s} 
+    }
+#
+# print out problem
+#
+cat(" ", fill=TRUE)
+cat("Starting Gibbs Sampler for Ordered Probit Model",fill=TRUE)
+cat("   with ",nobs,"observations",fill=TRUE)
+cat(" ", fill=TRUE)
+cat("Table of y values",fill=TRUE)
+print(table(y))
+cat(" ",fill=TRUE)
+cat("Prior Parms: ",fill=TRUE)
+cat("betabar",fill=TRUE)
+print(betabar)
+cat(" ", fill=TRUE)
+cat("A",fill=TRUE)
+print(A)
+cat(" ", fill=TRUE)
+cat("dstarbar",fill=TRUE)
+print(dstarbar)
+cat(" ", fill=TRUE)
+cat("Ad",fill=TRUE)
+print(Ad)
+cat(" ", fill=TRUE)
+cat("MCMC parms: ",fill=TRUE)
+cat("R= ",R," keep= ",keep," nprint= ",nprint,"s= ",s, fill=TRUE) 
+cat(" ",fill=TRUE)
+
+# use (-Hessian+Ad)^(-1) evaluated at betahat as the basis of the 
+# covariance matrix for the random walk Metropolis increments 
+    
+    betahat = chol2inv(chol(crossprod(X,X)))%*% crossprod(X,y)
+    dstarini = c(cumsum(c( rep(0.1, ndstar))))     # set initial value for dstar   
+    dstarout = optim(dstarini, lldstar, method = "BFGS", hessian=T,
+                control = list(fnscale = -1,maxit=500,
+                reltol = 1e-06, trace=0), mu=X%*%betahat, y=y)             
+    inc.root=chol(chol2inv(chol((-dstarout$hessian+Ad))))  # chol((H+Ad)^-1) 
+
+###################################################################
+# Keunwoo Kim
+# 08/20/2014
+###################################################################
+draws=rordprobitGibbs_rcpp_loop(y,X,k,A,betabar,Ad,s,inc.root,dstarbar,betahat,R,keep,nprint)
+###################################################################
+
+draws$cutdraw=draws$cutdraw[,2:k]
+attributes(draws$cutdraw)$class="bayesm.mat"
+attributes(draws$betadraw)$class="bayesm.mat"
+attributes(draws$dstardraw)$class="bayesm.mat"
+attributes(draws$cutdraw)$mcpar=c(1,R,keep)
+attributes(draws$betadraw)$mcpar=c(1,R,keep)
+attributes(draws$dstardraw)$mcpar=c(1,R,keep)
+
+return(draws)
+}
diff --git a/R/rscaleUsage.R b/R/rscaleUsage.R
deleted file mode 100755
index 5ebdcfe..0000000
--- a/R/rscaleUsage.R
+++ /dev/null
@@ -1,446 +0,0 @@
-rscaleUsage=
-function(Data,Prior,Mcmc) 
-{
-#
-# purpose: run scale-usage mcmc
-#    draws y,Sigma,mu,tau,sigma,Lambda,e
-#                                R. McCulloch 12/28/04
-#    added classes 3/07
-# 
-# arguments:
-#    Data:
-#     all components are required:
-#       k:  integer giving the scale of the responses, each observation is an integer from 1,2,...k
-#       x:  data, num rows=number of respondents, num columns = number of questions
-#    Prior:
-#     all components are optional
-#       nu,V: Sigma ~ IW(nu,V)
-#       mubar,Am: mu ~N(mubar,Am^{-1})
-#       gsigma: grid for sigma
-#       gl11,gl22,gl12: grids for ij element of Lamda
-#       Lambdanu,LambdaV: Lambda ~ IW(Lambdanu,LambdaV)
-#       ge: grid for e
-#    Mcmc:
-#     all components are optional (but you would typically want to specify R= number of draws)
-#       R: number of mcmc iterations
-#       keep: frequency with which draw is kept
-#       ndghk: number of draws for ghk
-#       printevery: how often to print out how many draws are done
-#       e,y,mu,Sigma,sigma,tau,Lamda: initial values for the state
-#       doe, ...doLambda: indicates whether draw should be made
-# output:
-#    List with draws of each of Sigma,mu,tau,sigma,Lambda,e
-#    eg. result$Sigma is the draws of Sigma
-#    Each component is a matrix expept e, which is a vector
-#    for the matrices Sigma and Lambda each row transpose of the Vec
-#    eg. result$Lambda has rows (Lambda11,Lamda21,Lamda12,Lamda22)
-
-#
-# define functions needed
-#
-# -----------------------------------------------------------------------------------
-rlpx = function(x,e,k,mu,tau,Sigma,sigma,nd=500) {
-n=nrow(x); p = ncol(x)
-cc = cgetC(e,k)
-L=t(chol(Sigma))
-lpv = rep(0,n)
-offset = p*log(k)
-for(i in 1:n) {
-   Li = sigma[i]*L
-   a = cc[x[i,]]-mu-tau[i]; b = cc[x[i,]+1]-mu-tau[i]
-   ghkres = rghk(Li,a,b,nd)
-   lghkres = log(ghkres)
-   if(is.nan(lghkres)) {
-      #print("nan in ghk:")
-      #print(paste('ghkres: ',ghkres))
-      lghkres = log(1e-320)
-   }
-   if(is.infinite(lghkres)) {
-      #print("infinite in ghk:")
-      #print(paste('ghkres: ',ghkres))
-      lghkres = log(1e-320)
-   }
-   lpv[i] = lghkres + offset
-}
-sum(lpv)
-}
-rghk = function(L,a,b,nd) {
-.C('ghk',as.double(L),as.double(a),as.double(b),as.integer(nrow(L)),
-          as.integer(nd),res=double(1))$res
-}
-condd = function(Sigma) {
-p = nrow(Sigma)
-Si = solve(Sigma)
-cbeta = matrix(0,p-1,p)
-for(i in 1:p) {
-ind = (1:p)[-i]
-cbeta[,i] = -Si[ind,i]/Si[i,i]
-}
-list(beta=cbeta,s=sqrt(1/diag(Si)))
-}
-pandterm = function(message) { stop(paste("in rscaleUsage: ",message),call.=FALSE) }
-myin = function(i,ind) {i %in% ind}
-getS = function(Lam,n,moms) {
-S=matrix(0.0,2,2)
-S[1,1] = (n-1)*moms[3] + n*moms[1]^2
-S[1,2] = (n-1)*moms[4] + n*moms[1]*(moms[2]-Lam[2,2])
-S[2,1] = S[1,2]
-S[2,2] = (n-1)*moms[5] + n*(moms[2]-Lam[2,2])^2
-S
-}
-llL = function(Lam,n,S,V,nu) {
-dlam = Lam[1,1]*Lam[2,2]-Lam[1,2]^2
-M = (S+V) %*%  chol2inv(chol(Lam))
-ll = -.5*(n+nu+3)*log(dlam) -.5*sum(diag(M))
-}
-ispd = function(mat,d=nrow(mat)) {
-if(!is.matrix(mat)) {
-res = FALSE
-} else if(!((nrow(mat)==d) & (ncol(mat)==d))) {
-res = FALSE
-} else {
-diff = (t(mat)+mat)/2 - mat
-perdiff = sum(diff^2)/sum(mat^2)
-res = ((det(mat)>0) & (perdiff < 1e-10))
-}
-res
-}
-#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-# print out components of inputs ----------------------------------------------
-cat('\nIn function rscaleUsage\n\n')
-if(!missing(Data)) {
-cat('   Data has components: ')
-cat(paste(names(Data),collapse=' ')[1],'\n')
-}
-if(!missing(Prior)) {
-cat('   Prior has components: ')
-cat(paste(names(Prior),collapse=' ')[1],'\n')
-}
-if(!missing(Mcmc)) {
-cat('   Mcmc has components: ')
-cat(paste(names(Mcmc),collapse=' ')[1],'\n')
-}
-cat('\n')
-# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-# process Data argument --------------------------
-if(missing(Data)) {pandterm("Requires Data argument - list of k=question scale and x = data")}
-if(is.null(Data$k)) {
-   pandterm("k not specified")
-} else {
-   k = as.integer(Data$k)
-   if(!((k>0) & (k<50))) {pandterm("Data$k must be integer between 1 and 50")}
-}
-if(is.null(Data$x)) {
-   pandterm('x (the data), not specified')
-} else {
-   if(!is.matrix(Data$x)) {pandterm('Data$x must be a matrix')}
-   x = matrix(as.integer(Data$x),nrow=nrow(Data$x))
-   checkx = sum(sapply(as.vector(x),myin,1:k))
-   if(!(checkx == nrow(x)*ncol(x))) {pandterm('each element of Data$x must be in 1,2...k')}
-   p = ncol(x)
-   n = nrow(x)
-   if((p<2) | (n<1)) {pandterm(paste('invalid dimensions for x: nrow,ncol: ',n,p))}
-}
-# ++++++++++++++++++++++++++++++++++++++++++++++++
-
-# process Mcmc argument ---------------------
-
-#run mcmc
-R = as.integer(1000)
-keep = as.integer(1)
-ndghk= as.integer(100)
-printevery = as.integer(10)
-if(!missing(Mcmc)) {
-if(!is.null(Mcmc$R))              { R = as.integer(Mcmc$R) }
-if(!is.null(Mcmc$keep))           { keep = as.integer(Mcmc$keep) }
-if(!is.null(Mcmc$ndghk))          { ndghk = as.integer(Mcmc$ndghk) }
-if(!is.null(Mcmc$printevery))     { printevery = as.integer(Mcmc$printevery) }
-}
-if(R<1) { pandterm('R must be positive')}
-if(keep<1) { pandterm('keep must be positive') }
-if(ndghk<1) { pandterm('ndghk must be positive') }
-if(printevery<1) { pandterm('printevery must be positive') }
-
-
-#state
-y = matrix(as.double(x),nrow=nrow(x))
-mu = apply(y,2,mean)
-Sigma = var(y)
-tau = rep(0,n)
-sigma = rep(1,n)
-#Lamda = matrix(c(3.7,-.22,-.22,.32),ncol=2)
-#Lamda = matrix(c((k/4)^2,(k/4)*.5*(-.2),0,.25),nrow=2); Lamda[1,2]=Lamda[2,1]
-Lamda = matrix(c(4,0,0,.5),ncol=2)
-e=0
-if(!missing(Mcmc)) {
-if(!is.null(Mcmc$y))         { y = Mcmc$y }
-if(!is.null(Mcmc$mu))        { mu = Mcmc$mu }
-if(!is.null(Mcmc$Sigma))     { Sigma = Mcmc$Sigma }
-if(!is.null(Mcmc$tau))       { tau = Mcmc$tau }
-if(!is.null(Mcmc$sigma))     { sigma = Mcmc$sigma }
-if(!is.null(Mcmc$Lambda))    { Lamda = Mcmc$Lambda }
-if(!is.null(Mcmc$e))         { e = Mcmc$e }
-}
-if(!ispd(Sigma,p)) { pandterm(paste('Sigma must be positive definite with dimension ',p)) }
-if(!ispd(Lamda,2)) { pandterm(paste('Lambda must be positive definite with dimension ',2)) }
-if(!is.vector(mu)) { pandterm('mu must be a vector') }
-if(length(mu) != p) { pandterm(paste('mu must have length ',p)) }
-if(!is.vector(tau)) { pandterm('tau must be a vector') }
-if(length(tau) != n) { pandterm(paste('tau must have length ',n)) }
-if(!is.vector(sigma)) { pandterm('sigma must be a vector') }
-if(length(sigma) != n) { pandterm(paste('sigma must have length ',n)) }
-if(!is.matrix(y)) { pandterm('y must be a matrix') }
-if(nrow(y) != n) { pandterm(paste('y must have',n,'rows')) }
-if(ncol(y) != p) { pandterm(paste('y must have',p,'columns')) }
-
-#do draws
-domu=TRUE
-doSigma=TRUE
-dosigma=TRUE
-dotau=TRUE
-doLamda=TRUE
-doe=TRUE
-if(!missing(Mcmc)) {
-if(!is.null(Mcmc$domu))        { domu = Mcmc$domu }
-if(!is.null(Mcmc$doSigma))     { doSigma = Mcmc$doSigma }
-if(!is.null(Mcmc$dotau))       { dotau = Mcmc$dotau }
-if(!is.null(Mcmc$dosigma))     { dosigma = Mcmc$dosigma }
-if(!is.null(Mcmc$doLambda))    { doLamda = Mcmc$doLambda }
-if(!is.null(Mcmc$doe))         { doe = Mcmc$doe }
-}
-
-
-#++++++++++++++++++++++++++++++++++++++
-
-#process Prior argument ----------------------------------
-nu = p+3
-V= nu*diag(p)
-mubar = matrix(rep(k/2,p),ncol=1)
-Am = .0001*diag(p)
-gs = 200
-gsigma = 6*(1:gs)/gs
-gl11 = .1 + 5.9*(1:gs)/gs
-gl22 = .1 + 2.0*(1:gs)/gs
-#gl12 = -.8 + 1.6*(1:gs)/gs
-gl12 = -2.0 + 4*(1:gs)/gs
-nuL=20
-VL = (nuL-3)*Lamda
-ge = -.1+.2*(0:gs)/gs
-
-if(!missing(Prior)) {
-if(!is.null(Prior$nu))       { nu = Prior$nu; V = nu*diag(p) }
-if(!is.null(Prior$V))        { V = Prior$V }
-if(!is.null(Prior$mubar))    { mubar = matrix(Prior$mubar,ncol=1) }
-if(!is.null(Prior$Am))       { Am = Prior$Am }
-if(!is.null(Prior$gsigma))   { gsigma = Prior$gsigma }
-if(!is.null(Prior$gl11))     { gl11 = Prior$gl11 }
-if(!is.null(Prior$gl22))     { gl22 = Prior$gl22 }
-if(!is.null(Prior$gl12))     { gl12 = Prior$gl12 }
-if(!is.null(Prior$Lambdanu)) { nuL = Prior$Lambdanu; VL = (nuL-3)*Lamda }
-if(!is.null(Prior$LambdaV))  { VL = Prior$LambdaV }
-if(!is.null(Prior$ge))       { ge = Prior$ge }
-}
-if(!ispd(V,p)) { pandterm(paste('V must be positive definite with dimension ',p)) }
-if(!ispd(Am,p)) { pandterm(paste('Am must be positive definite with dimension ',p)) }
-if(!ispd(VL,2)) { pandterm(paste('VL must be positive definite with dimension ',2)) }
-if(nrow(mubar) != p) { pandterm(paste('mubar must have length',p)) }
-#++++++++++++++++++++++++++++++++++++++++
-
-#print out run info -------------------------
-#
-# note in the documentation and in BSM, m is used instead of p
-#    for print-out purposes I'm using m   P. Rossi 12/06
-cat('   n,m,k: ', n,p,k,'\n')
-cat('   R,keep,ndghk,printevery: ', R,keep,ndghk,printevery,'\n')
-cat('\n')
-cat('   Data:\n')
-cat('      x[1,1],x[n,1],x[1,m],x[n,m]: ',x[1,1],x[n,1],x[1,p],x[n,p],'\n\n')
-cat('   Prior:\n')
-cat('      ','nu: ',nu,'\n')
-cat('      ','V[1,1]/nu,V[m,m]/nu: ',V[1,1]/nu,V[p,p]/nu,'\n')
-cat('      ','mubar[1],mubar[m]: ',mubar[1],mubar[p],'\n')
-cat('      ','Am[1,1],Am[m,m]: ',Am[1,1],Am[p,p],'\n')
-cat('      ','Lambdanu: ',nuL,'\n')
-cat('      ','LambdaV11,22/(Lambdanu-3): ',VL[1,1]/(nuL-3),VL[2,2]/(nuL-3),'\n')
-cat('      ','sigma grid, 1,',length(gsigma),': ',gsigma[1],', ',gsigma[length(gsigma)],'\n')
-cat('      ','Lambda11 grid, 1,',length(gl11),': ',gl11[1],', ',gl11[length(gl11)],'\n')
-cat('      ','Lambda12 grid, 1,',length(gl12),': ',gl12[1],', ',gl12[length(gl12)],'\n')
-cat('      ','Lambda22 grid, 1,',length(gl22),': ',gl22[1],', ',gl22[length(gl22)],'\n')
-cat('      ','e grid, 1,',length(ge),': ',ge[1],', ',ge[length(ge)],'\n')
-cat('      ','draw e: ',doe,'\n')
-cat('      ','draw Lambda: ',doLamda,'\n')
-#++++++++++++++++++++++++++++++++++++++++++++
-
-nk = floor(R/keep)
-ndpost = nk*keep
-drSigma=matrix(0.0,nk,p^2)
-drmu = matrix(0.0,nk,p)
-drtau = matrix(0.0,nk,n)
-drsigma = matrix(0.0,nk,n)
-drLamda = matrix(0.0,nk,4)
-dre = rep(0,nk)
-
-itime = proc.time()[3]
-cat("Mcmc Iteration (est time to end - min)",'\n')
-for(rep in 1:ndpost) {
-   if(1) { # y
-      cc = cgetC(e,k)
-      bs = condd(Sigma)
-      y = matrix(.C('dy',as.integer(p),as.integer(n),y=as.double(t(y)),as.integer(t(x)),as.double(cc),as.double(mu),as.double(bs$beta),as.double(bs$s),
-	                                        as.double(tau),as.double(sigma))$y,ncol=p,byrow=TRUE)
-   }
-   if(doSigma) { #Sigma
-      Res = (t(t(y)-mu)-tau)/sigma
-      S = crossprod(Res)
-      Sigma = rwishart(nu+n,chol2inv(chol(V+S)))$IW
-   }
-   if(domu) { #mu
-      yd = y-tau
-      Si = chol2inv(chol(Sigma))
-      Vmi = sum(1/sigma^2)*Si + Am
-      R = chol(Vmi)
-      Ri = backsolve(R,diag(p))
-      Vm = chol2inv(chol(Vmi))
-      mm = Vm %*% (Si %*% (t(yd) %*% matrix(1/sigma^2,ncol=1)) + Am %*% mubar)
-      mu = as.vector(mm + Ri %*% matrix(rnorm(p),ncol=1))
-   }
-   if(dotau) { #tau
-      Ai = Lamda[1,1] - (Lamda[1,2]^2)/Lamda[2,2]
-      A = 1.0/Ai
-      onev = matrix(1.0,p,1)
-      R = chol(Sigma)
-      xx = backsolve(R,onev,transpose=TRUE)
-      yy = backsolve(R,t(y)-mu,transpose=TRUE)
-      xtx = sum(xx^2)
-      xty = as.vector(t(xx) %*% yy)
-      beta = A*Lamda[1,2]/Lamda[2,2]
-      for(j in 1:n) {
-	 s2 = xtx/sigma[j]^2   + A
-         s2 = 1.0/s2 
-	 m = s2*((xty[j]/sigma[j]^2) + beta*(log(sigma[j])-Lamda[2,2]))
-	 tau[j] = m + sqrt(s2)*rnorm(1)
-      }
-   }
-   if(dosigma) { #sigma
-      R = chol(Sigma)
-      eps = backsolve(R,t(y-tau)-mu,transpose=TRUE)
-      ete = as.vector(matrix(rep(1,p),nrow=1) %*% eps^2)
-      a= Lamda[2,2]
-      b= Lamda[1,2]/Lamda[1,1]
-      s=sqrt(Lamda[2,2]-(Lamda[1,2]^2/Lamda[1,1]))
-      for(j in 1:n) {
-	 pv = -(p+1)*log(gsigma) -.5*ete[j]/gsigma^2 -.5*((log(gsigma)-(a+b*tau[j]))/s)^2
-	 pv = exp(pv-max(pv))
-	 pv = pv/sum(pv)
-	 sigma[j] = sample(gsigma,size=1,prob=pv)
-      }
-   }
-   
-   if(doLamda) { # Lamda
-      h=log(sigma)
-      dat = cbind(tau,h)
-      temp = var(dat)
-      moms = c(mean(tau),mean(h),temp[1,1],temp[1,2],temp[2,2])
-
-      SS = getS(Lamda,n,moms)
-      rgl11 = gl11[gl11 > (Lamda[1,2]^2/Lamda[2,2])]
-      ng = length(rgl11)
-      pv = rep(0,ng)
-      for(j in 1:ng) {
-         Lamda[1,1] = rgl11[j]
-         pv[j] = llL(Lamda,n,SS,VL,nuL)
-      }
-      pv = exp(pv-max(pv)); pv = pv/sum(pv)
-      Lamda[1,1] = sample(rgl11,size=1,prob=pv)
-
-      rgl12 = gl12[(gl12<sqrt(Lamda[1,1]*Lamda[2,2])) & (gl12>-sqrt(Lamda[1,1]*Lamda[2,2]))]
-      ng = length(rgl12)
-      pv = rep(0,ng)
-      for(j in 1:ng) {
-         Lamda[1,2] = rgl12[j]; Lamda[2,1]=Lamda[1,2]
-         pv[j] = llL(Lamda,n,SS,VL,nuL)
-      }
-      pv = exp(pv-max(pv)); pv = pv/sum(pv)
-      Lamda[1,2] = sample(rgl12,size=1,prob=pv)
-      Lamda[2,1]=Lamda[1,2]
-
-      rgl22 = gl22[gl22 > (Lamda[1,2]^2/Lamda[1,1])]
-      ng = length(rgl22)
-      pv = rep(0,ng)
-      for(j in 1:ng) {
-         Lamda[2,2] = rgl22[j]
-	 SS = getS(Lamda,n,moms)
-         pv[j] = llL(Lamda,n,SS,VL,nuL)
-      }
-      pv = exp(pv-max(pv)); pv = pv/sum(pv)
-      Lamda[2,2] = sample(rgl22,size=1,prob=pv)
-   }
-
-   if(doe) { # e
-      ng = length(ge)
-      ei = which.min(abs(e-ge))
-      if(ei==1) {
-         pi =2
-         qr = .5
-      } else if(ei==ng) {
-         pi = ng-1
-         qr = .5
-      } else {
-         pi = ei + rbinom(1,1,.5)*2-1
-         qr = 1
-      }
-      eold = ge[ei]
-      eprop = ge[pi]
-      llold = rlpx(x,eold,k,mu,tau,Sigma,sigma,ndghk)
-      llprop = rlpx(x,eprop,k,mu,tau,Sigma,sigma,ndghk)
-      lrat = llprop - llold + log(qr)
-      if(lrat>0) {
-         e = eprop
-      } else {
-         paccept = min(1,exp(lrat))
-         e = ifelse(rbinom(1,1,paccept),eprop,eold)
-      }
-   }
-   mkeep = rep/keep
-   if(mkeep == floor(mkeep)) {
-      drSigma[mkeep,] = Sigma
-      drmu[mkeep,] = mu
-      drtau[mkeep,] = tau
-      drsigma[mkeep,] = sigma 
-      drLamda[mkeep,] = Lamda
-      dre[mkeep] = e
-   }
-   if((rep/printevery)==floor(rep/printevery)) {
-      ctime = proc.time()[3]
-      timetoend = ((ctime-itime)/rep)*(ndpost-rep)
-      cat(rep,' (', round(timetoend/60,1), ') \n')
-      fsh()
-   }
-}
-ctime = proc.time()[3]
-cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
-
-R=ndpost
-mudraw=drmu; taudraw=drtau; sigmadraw=drsigma; Lambdadraw=drLamda;
-edraw=dre; Sigmadraw=drSigma
-attributes(mudraw)$class=c("bayesm.mat","mcmc")
-attributes(mudraw)$mcpar=c(1,R,keep)
-attributes(taudraw)$class=c("bayesm.mat","mcmc")
-attributes(taudraw)$mcpar=c(1,R,keep)
-attributes(sigmadraw)$class=c("bayesm.mat","mcmc")
-attributes(sigmadraw)$mcpar=c(1,R,keep)
-attributes(Lambdadraw)$class=c("bayesm.mat","mcmc")
-attributes(Lambdadraw)$mcpar=c(1,R,keep)
-attributes(edraw)$class=c("bayesm.mat","mcmc")
-attributes(edraw)$mcpar=c(1,R,keep)
-attributes(Sigmadraw)$class=c("bayesm.var","bayesm.mat","mcmc")
-attributes(Sigmadraw)$mcpar=c(1,R,keep)
-return(list(Sigmadraw=Sigmadraw,mudraw=mudraw,taudraw = taudraw,
- sigmadraw=sigmadraw,Lambdadraw=Lambdadraw,edraw=edraw))
-}
-
-
-
-
diff --git a/R/rscaleusage_rcpp.r b/R/rscaleusage_rcpp.r
new file mode 100644
index 0000000..683b3bf
--- /dev/null
+++ b/R/rscaleusage_rcpp.r
@@ -0,0 +1,254 @@
+rscaleUsage=
+function(Data,Prior,Mcmc) 
+{
+#
+# purpose: run scale-usage mcmc
+#    draws y,Sigma,mu,tau,sigma,Lambda,e
+#                                R. McCulloch 12/28/04
+#    added classes 3/07
+# 
+# arguments:
+#    Data:
+#     all components are required:
+#       k:  integer giving the scale of the responses, each observation is an integer from 1,2,...k
+#       x:  data, num rows=number of respondents, num columns = number of questions
+#    Prior:
+#     all components are optional
+#       nu,V: Sigma ~ IW(nu,V)
+#       mubar,Am: mu ~N(mubar,Am^{-1})
+#       gsigma: grid for sigma
+#       gl11,gl22,gl12: grids for ij element of Lambda
+#       Lambdanu,LambdaV: Lambda ~ IW(Lambdanu,LambdaV)
+#       ge: grid for e
+#    Mcmc:
+#     all components are optional (but you would typically want to specify R= number of draws)
+#       R: number of mcmc iterations
+#       keep: frequency with which draw is kept
+#       ndghk: number of draws for ghk
+#       nprint - print estimated time remaining on every nprint'th draw
+#       e,y,mu,Sigma,sigma,tau,Lambda: initial values for the state
+#       doe, ...doLambda: indicates whether draw should be made
+# output:
+#    List with draws of each of Sigma,mu,tau,sigma,Lambda,e
+#    eg. result$Sigma is the draws of Sigma
+#    Each component is a matrix expept e, which is a vector
+#    for the matrices Sigma and Lambda each row transpose of the Vec
+#    eg. result$Lambda has rows (Lambda11,Lambda21,Lambda12,Lambda22)
+
+#
+# define functions needed
+#
+# -----------------------------------------------------------------------------------
+myin = function(i,ind) {i %in% ind}
+
+ispd = function(mat,d=nrow(mat)) {
+if(!is.matrix(mat)) {
+res = FALSE
+} else if(!((nrow(mat)==d) & (ncol(mat)==d))) {
+res = FALSE
+} else {
+diff = (t(mat)+mat)/2 - mat
+perdiff = sum(diff^2)/sum(mat^2)
+res = ((det(mat)>0) & (perdiff < 1e-10))
+}
+res
+}
+#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+# print out components of inputs ----------------------------------------------
+cat('\nIn function rscaleUsage\n\n')
+if(!missing(Data)) {
+cat('   Data has components: ')
+cat(paste(names(Data),collapse=' ')[1],'\n')
+}
+if(!missing(Prior)) {
+cat('   Prior has components: ')
+cat(paste(names(Prior),collapse=' ')[1],'\n')
+}
+if(!missing(Mcmc)) {
+cat('   Mcmc has components: ')
+cat(paste(names(Mcmc),collapse=' ')[1],'\n')
+}
+cat('\n')
+# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+# process Data argument --------------------------
+if(missing(Data)) {pandterm("Requires Data argument - list of k=question scale and x = data")}
+if(is.null(Data$k)) {
+   pandterm("k not specified")
+} else {
+   k = as.integer(Data$k)
+   if(!((k>0) & (k<50))) {pandterm("Data$k must be integer between 1 and 50")}
+}
+if(is.null(Data$x)) {
+   pandterm('x (the data), not specified')
+} else {
+   if(!is.matrix(Data$x)) {pandterm('Data$x must be a matrix')}
+   x = matrix(as.integer(Data$x),nrow=nrow(Data$x))
+   checkx = sum(sapply(as.vector(x),myin,1:k))
+   if(!(checkx == nrow(x)*ncol(x))) {pandterm('each element of Data$x must be in 1,2...k')}
+   p = ncol(x)
+   n = nrow(x)
+   if((p<2) | (n<1)) {pandterm(paste('invalid dimensions for x: nrow,ncol: ',n,p))}
+}
+# ++++++++++++++++++++++++++++++++++++++++++++++++
+
+# process Mcmc argument ---------------------
+
+#run mcmc
+R = 1000
+keep = BayesmConstant.keep
+ndghk= 100
+nprint = BayesmConstant.nprint
+if(!missing(Mcmc)) {
+if(!is.null(Mcmc$R))              { R = as.integer(Mcmc$R) }
+if(!is.null(Mcmc$keep))           { keep = as.integer(Mcmc$keep) }
+if(!is.null(Mcmc$ndghk))          { ndghk = as.integer(Mcmc$ndghk) }
+if(!is.null(Mcmc$nprint))         { nprint = as.integer(Mcmc$nprint) }
+}
+if(R<1) { pandterm('R must be positive')}
+if(keep<1) { pandterm('keep must be positive') }
+if(ndghk<1) { pandterm('ndghk must be positive') }
+if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+
+#state
+y = matrix(as.double(x),nrow=nrow(x))
+mu = apply(y,2,mean)
+Sigma = var(y)
+tau = rep(0,n)
+sigma = rep(1,n)
+#Lambda = matrix(c(3.7,-.22,-.22,.32),ncol=2)
+#Lambda = matrix(c((k/4)^2,(k/4)*.5*(-.2),0,.25),nrow=2); Lambda[1,2]=Lambda[2,1]
+Lambda = matrix(c(4,0,0,.5),ncol=2)
+e=0
+if(!missing(Mcmc)) {
+if(!is.null(Mcmc$y))         { y = Mcmc$y }
+if(!is.null(Mcmc$mu))        { mu = Mcmc$mu }
+if(!is.null(Mcmc$Sigma))     { Sigma = Mcmc$Sigma }
+if(!is.null(Mcmc$tau))       { tau = Mcmc$tau }
+if(!is.null(Mcmc$sigma))     { sigma = Mcmc$sigma }
+if(!is.null(Mcmc$Lambda))    { Lambda = Mcmc$Lambda }
+if(!is.null(Mcmc$e))         { e = Mcmc$e }
+}
+if(!ispd(Sigma,p)) { pandterm(paste('Sigma must be positive definite with dimension ',p)) }
+if(!ispd(Lambda,2)) { pandterm(paste('Lambda must be positive definite with dimension ',2)) }
+if(!is.vector(mu)) { pandterm('mu must be a vector') }
+if(length(mu) != p) { pandterm(paste('mu must have length ',p)) }
+if(length(tau) != n) { pandterm(paste('tau must have length ',n)) }
+if(!is.vector(sigma)) { pandterm('sigma must be a vector') }
+if(length(sigma) != n) { pandterm(paste('sigma must have length ',n)) }
+if(!is.matrix(y)) { pandterm('y must be a matrix') }
+if(nrow(y) != n) { pandterm(paste('y must have',n,'rows')) }
+if(ncol(y) != p) { pandterm(paste('y must have',p,'columns')) }
+
+#do draws
+domu=TRUE
+doSigma=TRUE
+dosigma=TRUE
+dotau=TRUE
+doLambda=TRUE
+doe=TRUE
+if(!missing(Mcmc)) {
+if(!is.null(Mcmc$domu))        { domu = Mcmc$domu }
+if(!is.null(Mcmc$doSigma))     { doSigma = Mcmc$doSigma }
+if(!is.null(Mcmc$dotau))       { dotau = Mcmc$dotau }
+if(!is.null(Mcmc$dosigma))     { dosigma = Mcmc$dosigma }
+if(!is.null(Mcmc$doLambda))    { doLambda = Mcmc$doLambda }
+if(!is.null(Mcmc$doe))         { doe = Mcmc$doe }
+}
+
+
+#++++++++++++++++++++++++++++++++++++++
+
+#process Prior argument ----------------------------------
+nu = p+BayesmConstant.nuInc
+V= nu*diag(p)
+mubar = matrix(rep(k/2,p),ncol=1)
+Am = BayesmConstant.A*diag(p)
+gs = 200
+gsigma = 6*(1:gs)/gs
+gl11 = .1 + 5.9*(1:gs)/gs
+gl22 = .1 + 2.0*(1:gs)/gs
+#gl12 = -.8 + 1.6*(1:gs)/gs
+gl12 = -2.0 + 4*(1:gs)/gs
+nuL=20
+VL = (nuL-3)*Lambda
+ge = -.1+.2*(0:gs)/gs
+
+if(!missing(Prior)) {
+if(!is.null(Prior$nu))       { nu = Prior$nu; V = nu*diag(p) }
+if(!is.null(Prior$V))        { V = Prior$V }
+if(!is.null(Prior$mubar))    { mubar = matrix(Prior$mubar,ncol=1) }
+if(!is.null(Prior$Am))       { Am = Prior$Am }
+if(!is.null(Prior$gsigma))   { gsigma = Prior$gsigma }
+if(!is.null(Prior$gl11))     { gl11 = Prior$gl11 }
+if(!is.null(Prior$gl22))     { gl22 = Prior$gl22 }
+if(!is.null(Prior$gl12))     { gl12 = Prior$gl12 }
+if(!is.null(Prior$Lambdanu)) { nuL = Prior$Lambdanu; VL = (nuL-3)*Lambda }
+if(!is.null(Prior$LambdaV))  { VL = Prior$LambdaV }
+if(!is.null(Prior$ge))       { ge = Prior$ge }
+}
+if(!ispd(V,p)) { pandterm(paste('V must be positive definite with dimension ',p)) }
+if(!ispd(Am,p)) { pandterm(paste('Am must be positive definite with dimension ',p)) }
+if(!ispd(VL,2)) { pandterm(paste('VL must be positive definite with dimension ',2)) }
+if(nrow(mubar) != p) { pandterm(paste('mubar must have length',p)) }
+#++++++++++++++++++++++++++++++++++++++++
+
+#print out run info -------------------------
+#
+# note in the documentation and in BSM, m is used instead of p
+#    for print-out purposes I'm using m   P. Rossi 12/06
+cat('   n,m,k: ', n,p,k,'\n')
+cat('   R,keep,ndghk,nprint: ', R,keep,ndghk,nprint,'\n')
+cat('\n')
+cat('   Data:\n')
+cat('      x[1,1],x[n,1],x[1,m],x[n,m]: ',x[1,1],x[n,1],x[1,p],x[n,p],'\n\n')
+cat('   Prior:\n')
+cat('      ','nu: ',nu,'\n')
+cat('      ','V[1,1]/nu,V[m,m]/nu: ',V[1,1]/nu,V[p,p]/nu,'\n')
+cat('      ','mubar[1],mubar[m]: ',mubar[1],mubar[p],'\n')
+cat('      ','Am[1,1],Am[m,m]: ',Am[1,1],Am[p,p],'\n')
+cat('      ','Lambdanu: ',nuL,'\n')
+cat('      ','LambdaV11,22/(Lambdanu-3): ',VL[1,1]/(nuL-3),VL[2,2]/(nuL-3),'\n')
+cat('      ','sigma grid, 1,',length(gsigma),': ',gsigma[1],', ',gsigma[length(gsigma)],'\n')
+cat('      ','Lambda11 grid, 1,',length(gl11),': ',gl11[1],', ',gl11[length(gl11)],'\n')
+cat('      ','Lambda12 grid, 1,',length(gl12),': ',gl12[1],', ',gl12[length(gl12)],'\n')
+cat('      ','Lambda22 grid, 1,',length(gl22),': ',gl22[1],', ',gl22[length(gl22)],'\n')
+cat('      ','e grid, 1,',length(ge),': ',ge[1],', ',ge[length(ge)],'\n')
+cat('      ','draw e: ',doe,'\n')
+cat('      ','draw Lambda: ',doLambda,'\n')
+#++++++++++++++++++++++++++++++++++++++++++++
+
+###################################################################
+# Wayne Taylor
+# 3/14/2015
+###################################################################
+out = rscaleUsage_rcpp_loop(k,x,p,n,
+                            R,keep,ndghk,nprint,
+                            y,mu,Sigma,tau,sigma,Lambda,e,
+                            domu,doSigma,dosigma,dotau,doLambda,doe,
+                            nu,V,mubar,Am, 
+                            gsigma,gl11,gl22,gl12,
+                            nuL,VL,ge)
+
+R = out$ndpost
+###################################################################
+attributes(out$drmu)$class=c("bayesm.mat","mcmc")
+attributes(out$drmu)$mcpar=c(1,R,keep)
+attributes(out$drtau)$class=c("bayesm.mat","mcmc")
+attributes(out$drtau)$mcpar=c(1,R,keep)
+attributes(out$drsigma)$class=c("bayesm.mat","mcmc")
+attributes(out$drsigma)$mcpar=c(1,R,keep)
+attributes(out$drLambda)$class=c("bayesm.mat","mcmc")
+attributes(out$drLambda)$mcpar=c(1,R,keep)
+attributes(out$dre)$class=c("bayesm.mat","mcmc")
+attributes(out$dre)$mcpar=c(1,R,keep)
+attributes(out$drSigma)$class=c("bayesm.var","bayesm.mat","mcmc")
+attributes(out$drSigma)$mcpar=c(1,R,keep)
+return(list(Sigmadraw=out$drSigma,mudraw=out$drmu,taudraw = out$drtau,
+            sigmadraw=out$drsigma,Lambdadraw=out$drLambda,edraw=out$dre))
+}
+
+
+
+
diff --git a/R/rsurGibbs.R b/R/rsurgibbs_rcpp.r
old mode 100755
new mode 100644
similarity index 60%
rename from R/rsurGibbs.R
rename to R/rsurgibbs_rcpp.r
index 2308611..3b9f289
--- a/R/rsurGibbs.R
+++ b/R/rsurgibbs_rcpp.r
@@ -1,187 +1,162 @@
-rsurGibbs=
-function(Data,Prior,Mcmc)
-{
-# 
-# revision history:
-#          P. Rossi 9/05
-#          3/07 added classes
-# Purpose:
-#   implement Gibbs Sampler for SUR
-# 
-# Arguments:
-#   Data -- regdata
-#           regdata is a list of lists of data for each regression
-#           regdata[[i]] contains data for regression equation i
-#           regdata[[i]]$y is y, regdata[[i]]$X is X
-#           note: each regression can have differing numbers of X vars
-#                 but you must have same no of obs in each equation. 
-#   Prior -- list of prior hyperparameters
-#     betabar,A      prior mean, prior precision
-#     nu, V          prior on Sigma
-#   Mcmc -- list of MCMC parms
-#     R number of draws
-#     keep -- thinning parameter
-# 
-# Output: 
-#   list of betadraw,Sigmadraw
-#
-# Model:
-#   y_i = X_ibeta + e_i  
-#          y is nobs x 1
-#          X is nobs x k_i
-#          beta is k_i x 1 vector of coefficients
-#          i=1,nreg total regressions
-#
-#         (e_1,k,...,e_nreg,k) ~ N(0,Sigma) k=1,...,nobs
-#
-#   we can also write as stacked regression
-#   y = Xbeta+e
-#       y is nobs*nreg x 1,X is nobs*nreg x (sum(k_i))
-#   routine draws beta -- the stacked vector of all coefficients
-#
-# Priors:  beta ~ N(betabar,A^-1)
-#          Sigma ~ IW(nu,V)
-# 
-#
-# check arguments
-#
-pandterm=function(message) {stop(message,call.=FALSE)}
-if(missing(Data)) {pandterm("Requires Data argument -- list of regdata")}
-    if(is.null(Data$regdata)) {pandterm("Requires Data element regdata")}
-    regdata=Data$regdata
-#
-# check regdata for validity
-#
-nreg=length(regdata)
-nobs=length(regdata[[1]]$y)
-nvar=0
-indreg=double(nreg+1)
-y=NULL
-for (reg in 1:nreg) {
-   if(length(regdata[[reg]]$y) != nobs || nrow(regdata[[reg]]$X) != nobs)
-      {pandterm(paste("incorrect dimensions for regression",reg))}
-   else
-      {indreg[reg]=nvar+1
-       nvar=nvar+ncol(regdata[[reg]]$X); y=c(y,regdata[[reg]]$y)}
-} 
-indreg[nreg+1]=nvar+1
-#
-# check for Prior
-#
-if(missing(Prior))
-   { betabar=c(rep(0,nvar)); A=.01*diag(nvar); nu=nreg+3; V=nu*diag(nreg)}
-else
-   {
-    if(is.null(Prior$betabar)) {betabar=c(rep(0,nvar))} 
-       else {betabar=Prior$betabar}
-    if(is.null(Prior$A)) {A=.01*diag(nvar)} 
-       else {A=Prior$A}
-    if(is.null(Prior$nu)) {nu=nreg+3}
-       else {nu=Prior$nu}
-    if(is.null(Prior$V)) {V=nu*diag(nreg)}
-       else {ssq=Prior$V}
-   }
-#
-# check dimensions of Priors
-#
-if(ncol(A) != nrow(A) || ncol(A) != nvar || nrow(A) != nvar) 
-   {pandterm(paste("bad dimensions for A",dim(A)))}
-if(length(betabar) != nvar)
-   {pandterm(paste("betabar wrong length, length= ",length(betabar)))}
-#
-# check MCMC argument
-#
-if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
-else
-   {
-    if(is.null(Mcmc$R)) {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
-    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
-   }
-#
-# print out problem
-#
-cat(" ", fill=TRUE)
-cat("Starting Gibbs Sampler for SUR Regression Model",fill=TRUE)
-cat("  with ",nreg," regressions",fill=TRUE)
-cat("  and  ",nobs," observations for each regression",fill=TRUE)
-cat(" ", fill=TRUE)
-cat("Prior Parms: ",fill=TRUE)
-cat("betabar",fill=TRUE)
-print(betabar)
-cat("A",fill=TRUE)
-print(A)
-cat("nu = ",nu,fill=TRUE)
-cat("V = ",fill=TRUE)
-print(V)
-cat(" ", fill=TRUE)
-cat("MCMC parms: ",fill=TRUE)
-cat("R= ",R," keep= ",keep,fill=TRUE)
-cat(" ",fill=TRUE)
-
-Sigmadraw=matrix(double(floor(R*nreg*nreg/keep)),ncol=nreg*nreg)
-betadraw=matrix(double(floor(R*nvar/keep)),ncol=nvar)
-
-
-#
-# set initial value of Sigma
-#
-E=matrix(double(nobs*nreg),ncol=nreg)
-for (reg in 1:nreg) {
-    E[,reg]=lm(y~.-1,data=data.frame(y=regdata[[reg]]$y,regdata[[reg]]$X))$residuals
-}
-Sigma=crossprod(E)/nobs
-L=t(backsolve(chol(Sigma),diag(nreg)))
-Y=y
-dim(Y)=c(nobs,nreg)
-Xti=matrix(0,ncol=nvar,nrow=nreg*nobs)
-
-itime=proc.time()[3]
-cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
-fsh()
-
-for (rep in 1:R)
-{
-#
-#     first draw beta | Sigma
-#
-# compute Xtilde
-#
-  for (reg in 1:nreg){
-     Xti[,indreg[reg]:(indreg[reg+1]-1)]=L[,reg]%x%regdata[[reg]]$X
-  }
-  IR=backsolve(chol(crossprod(Xti)+A),diag(nvar))
-#
-# compute ytilde
-  yti=as.vector(Y%*%t(L))
-  btilde=crossprod(t(IR))%*%(crossprod(Xti,yti)+A%*%betabar)
-  beta = btilde + IR%*%rnorm(nvar)
-#
-#    now draw Sigma | beta
-#
-  for(reg in 1:nreg){
-     E[,reg]=regdata[[reg]]$y-regdata[[reg]]$X%*%beta[indreg[reg]:(indreg[reg+1]-1)]
-  }
-  Sigma=rwishart(nu+nobs,chol2inv(chol(crossprod(E)+V)))$IW
-  L=t(backsolve(chol(Sigma),diag(nreg)))
-#
-#       print time to completion and draw # every 100th draw
-#
-  if(rep%%100 == 0)
-    {ctime=proc.time()[3]
-    timetoend=((ctime-itime)/rep)*(R-rep)
-    cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
-    fsh()}
-
-  if(rep%%keep == 0) 
-    {mkeep=rep/keep; betadraw[mkeep,]=beta; Sigmadraw[mkeep,]=Sigma}
-}
-ctime = proc.time()[3]
-cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
-
-attributes(betadraw)$class=c("bayesm.mat","mcmc")
-attributes(betadraw)$mcpar=c(1,R,keep)
-attributes(Sigmadraw)$class=c("bayesm.var","bayesm.mat","mcmc")
-attributes(Sigmadraw)$mcpar=c(1,R,keep)
-
-return(list(betadraw=betadraw,Sigmadraw=Sigmadraw))
-}
+rsurGibbs=
+function(Data,Prior,Mcmc)
+{
+# 
+# revision history:
+#          P. Rossi 9/05
+#          3/07 added classes
+#          9/14 changed to improve computations by avoiding Kronecker products
+#          W. Taylor 4/15 - added nprint option to MCMC argument
+# Purpose:
+#   implement Gibbs Sampler for SUR
+# 
+# Arguments:
+#   Data -- regdata
+#           regdata is a list of lists of data for each regression
+#           regdata[[i]] contains data for regression equation i
+#           regdata[[i]]$y is y, regdata[[i]]$X is X
+#           note: each regression can have differing numbers of X vars
+#                 but you must have same no of obs in each equation. 
+#   Prior -- list of prior hyperparameters
+#     betabar,A      prior mean, prior precision
+#     nu, V          prior on Sigma
+#   Mcmc -- list of MCMC parms
+#     R number of draws
+#     keep -- thinning parameter
+#     nprint - print estimated time remaining on every nprint'th draw
+# 
+# Output: 
+#   list of betadraw,Sigmadraw
+#
+# Model:
+#   y_i = X_ibeta + e_i  
+#          y is nobs x 1
+#          X is nobs x k_i
+#          beta is k_i x 1 vector of coefficients
+#          i=1,nreg total regressions
+#
+#         (e_1,k,...,e_nreg,k) ~ N(0,Sigma) k=1,...,nobs
+#
+#   we can also write as stacked regression
+#   y = Xbeta+e
+#       y is nobs*nreg x 1,X is nobs*nreg x (sum(k_i))
+#   routine draws beta -- the stacked vector of all coefficients
+#
+# Priors:  beta ~ N(betabar,A^-1)
+#          Sigma ~ IW(nu,V)
+# 
+#
+# check arguments
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of regdata")}
+    if(is.null(Data$regdata)) {pandterm("Requires Data element regdata")}
+    regdata=Data$regdata
+#
+# check regdata for validity
+#
+nreg=length(regdata)
+nobs=length(regdata[[1]]$y)
+nvar=0
+indreg=double(nreg+1)
+y=NULL
+for (reg in 1:nreg) {
+   if(length(regdata[[reg]]$y) != nobs || nrow(regdata[[reg]]$X) != nobs)
+      {pandterm(paste("incorrect dimensions for regression",reg))}
+   else
+      {indreg[reg]=nvar+1
+       nvar=nvar+ncol(regdata[[reg]]$X); y=c(y,regdata[[reg]]$y)}
+} 
+indreg[nreg+1]=nvar+1
+#
+# check for Prior
+#
+if(missing(Prior))
+   { betabar=c(rep(0,nvar)); A=BayesmConstant.A*diag(nvar); nu=nreg+BayesmConstant.nuInc; V=nu*diag(nreg)}
+else
+   {
+    if(is.null(Prior$betabar)) {betabar=c(rep(0,nvar))} 
+       else {betabar=Prior$betabar}
+    if(is.null(Prior$A)) {A=BayesmConstant.A*diag(nvar)} 
+       else {A=Prior$A}
+    if(is.null(Prior$nu)) {nu=nreg+BayesmConstant.nuInc}
+       else {nu=Prior$nu}
+    if(is.null(Prior$V)) {V=nu*diag(nreg)}
+       else {ssq=Prior$V}
+   }
+#
+# check dimensions of Priors
+#
+if(ncol(A) != nrow(A) || ncol(A) != nvar || nrow(A) != nvar) 
+   {pandterm(paste("bad dimensions for A",dim(A)))}
+if(length(betabar) != nvar)
+   {pandterm(paste("betabar wrong length, length= ",length(betabar)))}
+#
+# check MCMC argument
+#
+if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
+else
+   {
+    if(is.null(Mcmc$R)) {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
+    if(is.null(Mcmc$keep)) {keep=BayesmConstant.keep} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+      if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+   }
+#
+# print out problem
+#
+cat(" ", fill=TRUE)
+cat("Starting Gibbs Sampler for SUR Regression Model",fill=TRUE)
+cat("  with ",nreg," regressions",fill=TRUE)
+cat("  and  ",nobs," observations for each regression",fill=TRUE)
+cat(" ", fill=TRUE)
+cat("Prior Parms: ",fill=TRUE)
+cat("betabar",fill=TRUE)
+print(betabar)
+cat("A",fill=TRUE)
+print(A)
+cat("nu = ",nu,fill=TRUE)
+cat("V = ",fill=TRUE)
+print(V)
+cat(" ", fill=TRUE)
+cat("MCMC parms: ",fill=TRUE)
+cat("R= ",R," keep= ",keep," nprint= ",nprint,fill=TRUE)
+cat(" ",fill=TRUE)
+
+#
+# set initial value of Sigma
+#
+E=matrix(double(nobs*nreg),ncol=nreg)
+for (reg in 1:nreg) {
+    E[,reg]=lm(y~.-1,data=data.frame(y=regdata[[reg]]$y,regdata[[reg]]$X))$residuals
+}
+Sigma=(crossprod(E)+diag(.01,nreg))/nobs
+Sigmainv=chol2inv(chol(Sigma))
+
+#
+# precompute various moments and indices into moment matrix and Abetabar
+nk=integer(nreg)
+Xstar=NULL
+Y=NULL
+for(i in 1:nreg){
+  nk[i]=ncol(regdata[[i]]$X)
+  Xstar=cbind(Xstar,regdata[[i]]$X)
+  Y=cbind(Y,regdata[[i]]$y)
+}
+cumnk=cumsum(nk)
+XspXs=crossprod(Xstar)
+Abetabar=A%*%betabar
+
+###################################################################
+# Keunwoo Kim
+# 09/19/2014
+###################################################################
+draws=rsurGibbs_rcpp_loop(regdata,indreg,cumnk,nk,XspXs,Sigmainv,A,Abetabar,nu,V,nvar,E,Y,R,keep,nprint)
+###################################################################
+
+attributes(draws$betadraw)$class=c("bayesm.mat","mcmc")
+attributes(draws$betadraw)$mcpar=c(1,R,keep)
+attributes(draws$Sigmadraw)$class=c("bayesm.var","bayesm.mat","mcmc")
+attributes(draws$Sigmadraw)$mcpar=c(1,R,keep)
+
+return(draws)
+}
diff --git a/R/rtrun.R b/R/rtrun.R
deleted file mode 100755
index 65aca38..0000000
--- a/R/rtrun.R
+++ /dev/null
@@ -1,11 +0,0 @@
-rtrun=
-function(mu,sigma,a,b){
-#
-# function to draw from univariate truncated norm
-# a is vector of lower bounds for truncation
-# b is vector of upper bounds for truncation
-#
-FA=pnorm(((a-mu)/sigma))
-FB=pnorm(((b-mu)/sigma))
-return(mu+sigma*qnorm(runif(length(mu))*(FB-FA)+FA))
-}
diff --git a/R/runireg.R b/R/runireg_rcpp.r
old mode 100755
new mode 100644
similarity index 57%
rename from R/runireg.R
rename to R/runireg_rcpp.r
index 7084405..f50a38a
--- a/R/runireg.R
+++ b/R/runireg_rcpp.r
@@ -1,151 +1,117 @@
-runireg=
-function(Data,Prior,Mcmc)
-{
-# 
-# revision history:
-#          P. Rossi 1/17/05
-#          revised 9/05 to put in Data,Prior,Mcmc calling convention
-#          3/07 added classes
-# Purpose:
-#   perform iid draws from posterior of regression model using
-#     conjugate prior
-# 
-# Arguments:
-#   Data -- list of data 
-#           y,X
-#   Prior -- list of prior hyperparameters
-#     betabar,A      prior mean, prior precision
-#     nu, ssq        prior on sigmasq
-#   Mcmc -- list of MCMC parms
-#     R number of draws
-#     keep -- thinning parameter
-# 
-# Output: 
-#   list of beta, sigmasq
-#
-# Model:
-#   y = Xbeta + e  e ~N(0,sigmasq)
-#          y is n x 1
-#          X is n x k
-#          beta is k x 1 vector of coefficients
-#
-# Priors:  beta ~ N(betabar,sigmasq*A^-1)
-#          sigmasq ~ (nu*ssq)/chisq_nu
-# 
-#
-# check arguments
-#
-pandterm=function(message) {stop(message,call.=FALSE)}
-if(missing(Data)) {pandterm("Requires Data argument -- list of y and X")}
-    if(is.null(Data$X)) {pandterm("Requires Data element X")}
-    X=Data$X
-    if(is.null(Data$y)) {pandterm("Requires Data element y")}
-    y=Data$y
-nvar=ncol(X)
-nobs=length(y)
-#
-# check data for validity
-#
-if(nobs != nrow(X) ) {pandterm("length(y) ne nrow(X)")}
-#
-# check for Prior
-#
-if(missing(Prior))
-   { betabar=c(rep(0,nvar)); A=.01*diag(nvar); nu=3; ssq=var(y)}
-else
-   {
-    if(is.null(Prior$betabar)) {betabar=c(rep(0,nvar))} 
-       else {betabar=Prior$betabar}
-    if(is.null(Prior$A)) {A=.01*diag(nvar)} 
-       else {A=Prior$A}
-    if(is.null(Prior$nu)) {nu=3}
-       else {nu=Prior$nu}
-    if(is.null(Prior$ssq)) {ssq=var(y)}
-       else {ssq=Prior$ssq}
-   }
-#
-# check dimensions of Priors
-#
-if(ncol(A) != nrow(A) || ncol(A) != nvar || nrow(A) != nvar) 
-   {pandterm(paste("bad dimensions for A",dim(A)))}
-if(length(betabar) != nvar)
-   {pandterm(paste("betabar wrong length, length= ",length(betabar)))}
-#
-# check MCMC argument
-#
-if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
-else
-   {
-    if(is.null(Mcmc$R)) 
-       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
-    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
-   }
-#
-# print out problem
-#
-cat(" ", fill=TRUE)
-cat("Starting IID Sampler for Univariate Regression Model",fill=TRUE)
-cat("  with ",nobs," observations",fill=TRUE)
-cat(" ", fill=TRUE)
-cat("Prior Parms: ",fill=TRUE)
-cat("betabar",fill=TRUE)
-print(betabar)
-cat("A",fill=TRUE)
-print(A)
-cat("nu = ",nu," ssq= ",ssq,fill=TRUE)
-cat(" ", fill=TRUE)
-cat("MCMC parms: ",fill=TRUE)
-cat("R= ",R," keep= ",keep,fill=TRUE)
-cat(" ",fill=TRUE)
-
-sigmasqdraw=double(floor(Mcmc$R/keep))
-betadraw=matrix(double(floor(Mcmc$R*nvar/keep)),ncol=nvar)
-
-itime=proc.time()[3]
-cat("IID Iteration (est time to end - min) ",fill=TRUE)
-fsh()
-
-for (rep in 1:Mcmc$R){
-
-#
-# first draw Sigma
-#
-RA=chol(A)
-W=rbind(X,RA)
-z=c(y,as.vector(RA%*%betabar))
-IR=backsolve(chol(crossprod(W)),diag(nvar))
-#      W'W=R'R ;  (W'W)^-1 = IR IR'  -- this is UL decomp
-btilde=crossprod(t(IR))%*%crossprod(W,z)
-res=z-W%*%btilde
-s=t(res)%*%res
-#
-# first draw Sigma
-#
-#
-sigmasq=(nu*ssq + s)/rchisq(1,nu+nobs)
-#
-# now draw beta given Sigma
-#	
-beta = btilde + as.vector(sqrt(sigmasq))*IR%*%rnorm(nvar)
-#
-#       print time to completion and draw # every 100th draw
-#
-  if(rep%%100 == 0)
-    {ctime=proc.time()[3]
-    timetoend=((ctime-itime)/rep)*(R-rep)
-    cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
-    fsh()}
-
-  if(rep%%keep == 0) 
-    {mkeep=rep/keep; betadraw[mkeep,]=beta; sigmasqdraw[mkeep]=sigmasq}
-}
-ctime = proc.time()[3]
-cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
-
-attributes(betadraw)$class=c("bayesm.mat","mcmc")
-attributes(betadraw)$mcpar=c(1,R,keep)
-attributes(sigmasqdraw)$class=c("bayesm.mat","mcmc")
-attributes(sigmasqdraw)$mcpar=c(1,R,keep)
-
-return(list(betadraw=betadraw,sigmasqdraw=sigmasqdraw))
-}
+runireg=
+function(Data,Prior,Mcmc)
+{
+# 
+# revision history:
+#          P. Rossi 1/17/05
+#          revised 9/05 to put in Data,Prior,Mcmc calling convention
+#          3/07 added classes
+#          W. Taylor 4/15 - added nprint option to MCMC argument
+# Purpose:
+#   perform iid draws from posterior of regression model using
+#     conjugate prior
+# 
+# Arguments:
+#   Data -- list of data 
+#           y,X
+#   Prior -- list of prior hyperparameters
+#     betabar,A      prior mean, prior precision
+#     nu, ssq        prior on sigmasq
+#   Mcmc -- list of MCMC parms
+#     R number of draws
+#     keep -- thinning parameter
+#     nprint - print estimated time remaining on every nprint'th draw
+# 
+# Output: 
+#   list of beta, sigmasq
+#
+# Model:
+#   y = Xbeta + e  e ~N(0,sigmasq)
+#          y is n x 1
+#          X is n x k
+#          beta is k x 1 vector of coefficients
+#
+# Priors:  beta ~ N(betabar,sigmasq*A^-1)
+#          sigmasq ~ (nu*ssq)/chisq_nu
+# 
+#
+# check arguments
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of y and X")}
+    if(is.null(Data$X)) {pandterm("Requires Data element X")}
+    X=Data$X
+    if(is.null(Data$y)) {pandterm("Requires Data element y")}
+    y=Data$y
+nvar=ncol(X)
+nobs=length(y)
+#
+# check data for validity
+#
+if(nobs != nrow(X) ) {pandterm("length(y) ne nrow(X)")}
+#
+# check for Prior
+#
+if(missing(Prior))
+   { betabar=c(rep(0,nvar)); A=BayesmConstant.A*diag(nvar); nu=BayesmConstant.nu; ssq=var(y)}
+else
+   {
+    if(is.null(Prior$betabar)) {betabar=c(rep(0,nvar))} 
+       else {betabar=Prior$betabar}
+    if(is.null(Prior$A)) {A=BayesmConstant.A*diag(nvar)} 
+       else {A=Prior$A}
+    if(is.null(Prior$nu)) {nu=BayesmConstant.nu}
+       else {nu=Prior$nu}
+    if(is.null(Prior$ssq)) {ssq=var(y)}
+       else {ssq=Prior$ssq}
+   }
+#
+# check dimensions of Priors
+#
+if(ncol(A) != nrow(A) || ncol(A) != nvar || nrow(A) != nvar) 
+   {pandterm(paste("bad dimensions for A",dim(A)))}
+if(length(betabar) != nvar)
+   {pandterm(paste("betabar wrong length, length= ",length(betabar)))}
+#
+# check MCMC argument
+#
+if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
+else
+   {
+    if(is.null(Mcmc$R)) 
+       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
+    if(is.null(Mcmc$keep)) {keep=BayesmConstant.keep} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$nprint)) {nprint=BayesmConstant.nprint} else {nprint=Mcmc$nprint}
+      if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+   }
+#
+# print out problem
+#
+cat(" ", fill=TRUE)
+cat("Starting IID Sampler for Univariate Regression Model",fill=TRUE)
+cat("  with ",nobs," observations",fill=TRUE)
+cat(" ", fill=TRUE)
+cat("Prior Parms: ",fill=TRUE)
+cat("betabar",fill=TRUE)
+print(betabar)
+cat("A",fill=TRUE)
+print(A)
+cat("nu = ",nu," ssq= ",ssq,fill=TRUE)
+cat(" ", fill=TRUE)
+cat("MCMC parms: ",fill=TRUE)
+cat("R= ",R," keep= ",keep," nprint= ",nprint,fill=TRUE)
+cat(" ",fill=TRUE)
+
+###################################################################
+# Keunwoo Kim
+# 08/05/2014
+###################################################################
+draws = runireg_rcpp_loop(y, X, betabar, A, nu, ssq, R, keep, nprint)
+###################################################################
+
+attributes(draws$betadraw)$class=c("bayesm.mat","mcmc")
+attributes(draws$betadraw)$mcpar=c(1,R,keep)
+attributes(draws$sigmasqdraw)$class=c("bayesm.mat","mcmc")
+attributes(draws$sigmasqdraw)$mcpar=c(1,R,keep)
+
+return(draws)
+}
diff --git a/R/runiregGibbs.R b/R/runireggibbs_rcpp.r
old mode 100755
new mode 100644
similarity index 63%
rename from R/runiregGibbs.R
rename to R/runireggibbs_rcpp.r
index ef8020d..7e4e259
--- a/R/runiregGibbs.R
+++ b/R/runireggibbs_rcpp.r
@@ -1,148 +1,118 @@
-runiregGibbs=
-function(Data,Prior,Mcmc)
-{
-# 
-# revision history:
-#          P. Rossi 1/17/05
-#          3/07 added classes
-# Purpose:
-#   perform Gibbs iterations for Univ Regression Model using
-#     prior with beta, sigma-sq indep
-# 
-# Arguments:
-#   Data -- list of data 
-#           y,X
-#   Prior -- list of prior hyperparameters
-#     betabar,A      prior mean, prior precision
-#     nu, ssq        prior on sigmasq
-#   Mcmc -- list of MCMC parms
-#     sigmasq=initial value for sigmasq
-#     R number of draws
-#     keep -- thinning parameter
-# 
-# Output: 
-#   list of beta, sigmasq
-#
-# Model:
-#   y = Xbeta + e  e ~N(0,sigmasq)
-#          y is n x 1
-#          X is n x k
-#          beta is k x 1 vector of coefficients
-#
-# Priors:  beta ~ N(betabar,A^-1)
-#          sigmasq ~ (nu*ssq)/chisq_nu
-# 
-#
-# check arguments
-#
-pandterm=function(message) {stop(message,call.=FALSE)}
-if(missing(Data)) {pandterm("Requires Data argument -- list of y and X")}
-    if(is.null(Data$X)) {pandterm("Requires Data element X")}
-    X=Data$X
-    if(is.null(Data$y)) {pandterm("Requires Data element y")}
-    y=Data$y
-nvar=ncol(X)
-nobs=length(y)
-#
-# check data for validity
-#
-if(nobs != nrow(X) ) {pandterm("length(y) ne nrow(X)")}
-#
-# check for Prior
-#
-if(missing(Prior))
-   { betabar=c(rep(0,nvar)); A=.01*diag(nvar); nu=3; ssq=var(y)}
-else
-   {
-    if(is.null(Prior$betabar)) {betabar=c(rep(0,nvar))} 
-       else {betabar=Prior$betabar}
-    if(is.null(Prior$A)) {A=.01*diag(nvar)} 
-       else {A=Prior$A}
-    if(is.null(Prior$nu)) {nu=3}
-       else {nu=Prior$nu}
-    if(is.null(Prior$ssq)) {ssq=var(y)}
-       else {ssq=Prior$ssq}
-   }
-#
-# check dimensions of Priors
-#
-if(ncol(A) != nrow(A) || ncol(A) != nvar || nrow(A) != nvar) 
-   {pandterm(paste("bad dimensions for A",dim(A)))}
-if(length(betabar) != nvar)
-   {pandterm(paste("betabar wrong length, length= ",length(betabar)))}
-#
-# check MCMC argument
-#
-if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
-else
-   {
-    if(is.null(Mcmc$R)) 
-       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
-    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
-    if(is.null(Mcmc$sigmasq)) {sigmasq=var(y)} else {sigmasq=Mcmc$sigmasq}
-   }
-#
-# print out problem
-#
-cat(" ", fill=TRUE)
-cat("Starting Gibbs Sampler for Univariate Regression Model",fill=TRUE)
-cat("  with ",nobs," observations",fill=TRUE)
-cat(" ", fill=TRUE)
-cat("Prior Parms: ",fill=TRUE)
-cat("betabar",fill=TRUE)
-print(betabar)
-cat("A",fill=TRUE)
-print(A)
-cat("nu = ",nu," ssq= ",ssq,fill=TRUE)
-cat(" ", fill=TRUE)
-cat("MCMC parms: ",fill=TRUE)
-cat("R= ",R," keep= ",keep,fill=TRUE)
-cat(" ",fill=TRUE)
-
-sigmasqdraw=double(floor(Mcmc$R/keep))
-betadraw=matrix(double(floor(Mcmc$R*nvar/keep)),ncol=nvar)
-XpX=crossprod(X)
-Xpy=crossprod(X,y)
-sigmasq=as.vector(sigmasq)
-
-itime=proc.time()[3]
-cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
-fsh()
-
-for (rep in 1:Mcmc$R)
-{
-#
-#     first draw beta | sigmasq
-#
-  IR=backsolve(chol(XpX/sigmasq+A),diag(nvar))
-  btilde=crossprod(t(IR))%*%(Xpy/sigmasq+A%*%betabar)
-  beta = btilde + IR%*%rnorm(nvar)
-#
-#    now draw sigmasq | beta
-#
-  res=y-X%*%beta
-  s=t(res)%*%res
-  sigmasq=(nu*ssq + s)/rchisq(1,nu+nobs)
-  sigmasq=as.vector(sigmasq)
-#
-#       print time to completion and draw # every 100th draw
-#
-  if(rep%%100 == 0)
-    {ctime=proc.time()[3]
-    timetoend=((ctime-itime)/rep)*(R-rep)
-    cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
-    fsh()}
-
-  if(rep%%keep == 0) 
-    {mkeep=rep/keep; betadraw[mkeep,]=beta; sigmasqdraw[mkeep]=sigmasq}
-}
-ctime = proc.time()[3]
-cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
-
-attributes(betadraw)$class=c("bayesm.mat","mcmc")
-attributes(betadraw)$mcpar=c(1,R,keep)
-attributes(sigmasqdraw)$class=c("bayesm.mat","mcmc")
-attributes(sigmasqdraw)$mcpar=c(1,R,keep)
-
-return(list(betadraw=betadraw,sigmasqdraw=sigmasqdraw))
-}
+runiregGibbs=
+function(Data,Prior,Mcmc)
+{
+# 
+# revision history:
+#          P. Rossi 1/17/05
+#          3/07 added classes
+#          W. Taylor 4/15 - added nprint option to MCMC argument
+# Purpose:
+#   perform Gibbs iterations for Univ Regression Model using
+#     prior with beta, sigma-sq indep
+# 
+# Arguments:
+#   Data -- list of data 
+#           y,X
+#   Prior -- list of prior hyperparameters
+#     betabar,A      prior mean, prior precision
+#     nu, ssq        prior on sigmasq
+#   Mcmc -- list of MCMC parms
+#     sigmasq=initial value for sigmasq
+#     R number of draws
+#     keep -- thinning parameter
+#     nprint - print estimated time remaining on every nprint'th draw
+# 
+# Output: 
+#   list of beta, sigmasq
+#
+# Model:
+#   y = Xbeta + e  e ~N(0,sigmasq)
+#          y is n x 1
+#          X is n x k
+#          beta is k x 1 vector of coefficients
+#
+# Priors:  beta ~ N(betabar,A^-1)
+#          sigmasq ~ (nu*ssq)/chisq_nu
+# 
+#
+# check arguments
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of y and X")}
+    if(is.null(Data$X)) {pandterm("Requires Data element X")}
+    X=Data$X
+    if(is.null(Data$y)) {pandterm("Requires Data element y")}
+    y=Data$y
+nvar=ncol(X)
+nobs=length(y)
+#
+# check data for validity
+#
+if(nobs != nrow(X) ) {pandterm("length(y) ne nrow(X)")}
+#
+# check for Prior
+#
+if(missing(Prior))
+   { betabar=c(rep(0,nvar)); A=.01*diag(nvar); nu=3; ssq=var(y)}
+else
+   {
+    if(is.null(Prior$betabar)) {betabar=c(rep(0,nvar))} 
+       else {betabar=Prior$betabar}
+    if(is.null(Prior$A)) {A=.01*diag(nvar)} 
+       else {A=Prior$A}
+    if(is.null(Prior$nu)) {nu=3}
+       else {nu=Prior$nu}
+    if(is.null(Prior$ssq)) {ssq=var(y)}
+       else {ssq=Prior$ssq}
+   }
+#
+# check dimensions of Priors
+#
+if(ncol(A) != nrow(A) || ncol(A) != nvar || nrow(A) != nvar) 
+   {pandterm(paste("bad dimensions for A",dim(A)))}
+if(length(betabar) != nvar)
+   {pandterm(paste("betabar wrong length, length= ",length(betabar)))}
+#
+# check MCMC argument
+#
+if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
+else
+   {
+    if(is.null(Mcmc$R)) 
+       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
+    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$nprint)) {nprint=100} else {nprint=Mcmc$nprint}
+      if(nprint<0) {pandterm('nprint must be an integer greater than or equal to 0')}
+    if(is.null(Mcmc$sigmasq)) {sigmasq=var(y)} else {sigmasq=Mcmc$sigmasq}
+   }
+#
+# print out problem
+#
+cat(" ", fill=TRUE)
+cat("Starting Gibbs Sampler for Univariate Regression Model",fill=TRUE)
+cat("  with ",nobs," observations",fill=TRUE)
+cat(" ", fill=TRUE)
+cat("Prior Parms: ",fill=TRUE)
+cat("betabar",fill=TRUE)
+print(betabar)
+cat("A",fill=TRUE)
+print(A)
+cat("nu = ",nu," ssq= ",ssq,fill=TRUE)
+cat(" ", fill=TRUE)
+cat("MCMC parms: ",fill=TRUE)
+cat("R= ",R," keep= ",keep," nprint= ",nprint,fill=TRUE)
+cat(" ",fill=TRUE)
+
+###################################################################
+# Keunwoo Kim
+# 08/05/2014
+###################################################################
+draws = runiregGibbs_rcpp_loop(y, X, betabar, A, nu, ssq, sigmasq, R, keep, nprint)
+###################################################################
+
+attributes(draws$betadraw)$class=c("bayesm.mat","mcmc")
+attributes(draws$betadraw)$mcpar=c(1,R,keep)
+attributes(draws$sigmasqdraw)$class=c("bayesm.mat","mcmc")
+attributes(draws$sigmasqdraw)$mcpar=c(1,R,keep)
+
+return(draws)
+}
diff --git a/R/rwishart.R b/R/rwishart.R
deleted file mode 100755
index cc372e9..0000000
--- a/R/rwishart.R
+++ /dev/null
@@ -1,30 +0,0 @@
-rwishart=
-function(nu,V){
-#
-# function to draw from Wishart (nu,V) and IW
-# 
-# W ~ W(nu,V)
-# E[W]=nuV
-#
-# WI=W^-1
-# E[WI]=V^-1/(nu-m-1)
-# 
-#
-m=nrow(V)
-df=(nu+nu-m+1)-(nu-m+1):nu
-if(m >1) {
-T=diag(sqrt(rchisq(c(rep(1,m)),df)))
-T[lower.tri(T)]=rnorm((m*(m+1)/2-m))}
-else
-{T=sqrt(rchisq(1,df))}
-U=chol(V)
-C=t(T)%*%U
-CI=backsolve(C,diag(m))
-#
-#   C is the upper triangular root of Wishart
-#      therefore, W=C'C  this is the LU decomposition 
-#      Inv(W) = CICI'  Note:  this is the UL decomp not LU!
-#
-return(list(W=crossprod(C),IW=crossprod(t(CI)),C=C,CI=CI))
-#  W is Wishart draw,  IW is W^-1
-}
diff --git a/R/simnhlogit.R b/R/simnhlogit.R
index 52c7977..688c4a1 100755
--- a/R/simnhlogit.R
+++ b/R/simnhlogit.R
@@ -1,6 +1,4 @@
-simnhlogit=
-function(theta,lnprices,Xexpend) 
-{
+simnhlogit=function(theta,lnprices,Xexpend) {
 #   function to simulate non-homothetic logit model
 #       creates y  a n x 1 vector with indicator of choice (1,...,m)
 #	lnprices is n x m array of log-prices faced
@@ -14,11 +12,6 @@ function(theta,lnprices,Xexpend)
 #           gamma  (k x 1)   expenditure function coefficients
 #           tau   -- scaling of v
 #	    
-root=function(c1,c2,tol,iterlim) {
-   u=double(length(c1))
-   .C("callroot",as.integer(length(c1)),as.double(c1),as.double(c2),as.double(tol),
-       as.integer(iterlim),r=as.double(u))$r}
-
    m=ncol(lnprices)
    n=nrow(lnprices)
    d=ncol(Xexpend)
@@ -29,7 +22,7 @@ root=function(c1,c2,tol,iterlim) {
    iotam=c(rep(1,m))
    c1=as.vector(Xexpend%*%gamma)%x%iotam-as.vector(t(lnprices))+alpha
    c2=c(rep(exp(k),n))   
-   u=root(c1,c2,.0000001,20)
+   u=callroot(c1,c2,.0000001,20)
    v=alpha - u*exp(k)-as.vector(t(lnprices))
    vmat=matrix(v,ncol=m,byrow=TRUE)
    vmat=tau*vmat
@@ -47,5 +40,4 @@ root=function(c1,c2,tol,iterlim) {
 
 return(list(y=y,Xexpend=Xexpend,lnprices=lnprices,theta=theta,prob=Prob))
 
-}
-
+}
\ No newline at end of file
diff --git a/R/summary.bayesm.var.R b/R/summary.bayesm.var.R
index b17f4a6..12aec11 100755
--- a/R/summary.bayesm.var.R
+++ b/R/summary.bayesm.var.R
@@ -32,6 +32,6 @@ summary.bayesm.var=function(object,names,burnin=trunc(.1*nrow(Vard)),tvalues,QUA
   plabels=paste(labels[,1],labels[,2],sep=",")
   uppertri=as.matrix(Vard[,ind])
   attributes(uppertri)$class="bayesm.mat"
-  summary(uppertri,names=plabels,tvalues=tvalues,QUANTILES=QUANTILES)
+  summary(uppertri,names=plabels,burnin=burnin,tvalues=tvalues,QUANTILES=QUANTILES)
   invisible()
 }
diff --git a/data/Scotch.rda b/data/Scotch.rda
index 72bc0bd..afbf6ef 100644
Binary files a/data/Scotch.rda and b/data/Scotch.rda differ
diff --git a/data/bank.rda b/data/bank.rda
index 6ea1677..87f50c5 100644
Binary files a/data/bank.rda and b/data/bank.rda differ
diff --git a/data/cheese.rda b/data/cheese.rda
index 771a103..ad12e4e 100644
Binary files a/data/cheese.rda and b/data/cheese.rda differ
diff --git a/data/customerSat.rda b/data/customerSat.rda
index 505b276..be380c0 100644
Binary files a/data/customerSat.rda and b/data/customerSat.rda differ
diff --git a/data/datalist b/data/datalist
deleted file mode 100644
index d03112d..0000000
--- a/data/datalist
+++ /dev/null
@@ -1,8 +0,0 @@
-Scotch
-bank
-cheese
-customerSat
-detailing
-margarine
-orangeJuice
-tuna
diff --git a/data/detailing.rda b/data/detailing.rda
index a0efcd2..bffb833 100644
Binary files a/data/detailing.rda and b/data/detailing.rda differ
diff --git a/data/margarine.rda b/data/margarine.rda
index 27c8b1d..34f4d21 100644
Binary files a/data/margarine.rda and b/data/margarine.rda differ
diff --git a/data/orangeJuice.rda b/data/orangeJuice.rda
index 286bec2..4e7ba7c 100644
Binary files a/data/orangeJuice.rda and b/data/orangeJuice.rda differ
diff --git a/data/tuna.rda b/data/tuna.rda
index 08dc626..583b98f 100644
Binary files a/data/tuna.rda and b/data/tuna.rda differ
diff --git a/inst/doc/Some_Useful_R_Pointers.pdf b/inst/doc/Some_Useful_R_Pointers.pdf
deleted file mode 100755
index d47520d..0000000
Binary files a/inst/doc/Some_Useful_R_Pointers.pdf and /dev/null differ
diff --git a/inst/doc/Tips_On_Using_bayesm.pdf b/inst/doc/Tips_On_Using_bayesm.pdf
deleted file mode 100755
index b1bfe71..0000000
Binary files a/inst/doc/Tips_On_Using_bayesm.pdf and /dev/null differ
diff --git a/inst/doc/bayesm-manual.pdf b/inst/doc/bayesm-manual.pdf
deleted file mode 100644
index 3180960..0000000
Binary files a/inst/doc/bayesm-manual.pdf and /dev/null differ
diff --git a/inst/include/bayesm.h b/inst/include/bayesm.h
new file mode 100644
index 0000000..da352e2
--- /dev/null
+++ b/inst/include/bayesm.h
@@ -0,0 +1,151 @@
+#ifndef __BAYESM_H__
+#define __BAYESM_H__
+
+#include <RcppArmadillo.h>
+#include <Rcpp.h>
+#include <stdio.h>
+#include <time.h>
+
+using namespace arma;
+using namespace Rcpp;
+
+//CUSTOM STRUCTS--------------------------------------------------------------------------------------------------
+//Used in rhierLinearMixture, rhierLinearModel, rhierMnlDP, rhierMnlRwMixture, rhierNegbinRw, and rsurGibbs
+struct moments{
+  vec y;
+  mat X;
+  mat XpX;
+  vec Xpy;
+  mat hess;
+};
+
+//Used in rhierLinearMixture, rhierLinearModel, rhierMnlRWMixture, and utilityFunctions.cpp
+struct unireg{
+    vec beta;
+    double sigmasq;
+  };
+
+//Used in rhierMnlDP, rhierMnlRwMixture, and utilityFunctions.cpp
+struct mnlMetropOnceOut{
+  vec betadraw;
+  int stay;
+  double oldll;
+};  
+  
+//Used in rDPGibbs, rhierMnlDP, rivDP, and utilityFunctions.cpp
+struct lambda{
+    vec mubar;
+    double Amu;
+    double nu;
+    mat V;
+};
+
+//Used in rDPGibbs, rhierMnlDP, rivDP, and utilityFunctions.cpp
+struct priorAlpha{
+  double power;
+  double alphamin;
+  double alphamax;
+  int n;
+};
+
+//Used  in rDPGibbs, rhierMnlDP, rivDP, and utilityFunctions.cpp
+struct murooti{
+  vec mu;
+  mat rooti;
+};
+
+//Used in rDPGibbs, rhierMnlDP, rivDP, and utilityFunctions.cpp
+struct thetaStarIndex{
+  ivec indic;
+  std::vector<murooti> thetaStar_vector;
+};
+
+//Used in rhierMnlDP, rivDP
+struct DPOut{
+  ivec indic;
+  std::vector<murooti> thetaStar_vector;
+  std::vector<murooti> thetaNp1_vector;
+  double alpha;
+  int Istar;
+  lambda lambda_struct;
+};
+
+//EXPOSED FUNCTIONS-----------------------------------------------------------------------------------------------
+List rwishart(int const& nu, mat const& V);
+
+List rmultireg(mat const& Y, mat const& X, mat const& Bbar, mat const& A, int nu, mat const& V);
+
+vec rdirichlet(vec const& alpha);
+
+double llmnl(vec const& beta, vec const& y, mat const& X);
+
+mat lndIChisq(double nu, double ssq, mat const& X);
+
+double lndMvst(vec const& x, int nu, vec const& mu, mat const& rooti, bool NORMC);
+
+double lndMvn(vec const& x, vec const& mu, mat const& rooti);
+
+double lndIWishart(double nu, mat const& V, mat const& IW);
+
+vec rmvst(int nu, vec const& mu, mat const& root);
+
+vec breg(vec const& y, mat const& X, vec const& betabar, mat const& A);
+
+vec cgetC(double e, int k);
+
+List rmixGibbs( mat const& y,  mat const& Bbar, mat const& A, int nu, mat const& V,  vec const& a, vec const& p,  vec const& z);
+  //rmixGibbs contains the following support functions, which are called ONLY THROUGH rmixGibbs: drawCompsFromLabels, drawLabelsFromComps, and drawPFromLabels
+
+//SUPPORT FUNCTIONS (contained in utilityFunctions.cpp)-----------------------------------------------------------
+//Used in rmvpGibbs and rmnpGibbs
+vec condmom(vec const& x, vec const& mu, mat const& sigmai, int p, int j);
+
+double rtrun1(double mu, double sigma,double trunpt, int above);
+
+//Used in rhierLinearModel, rhierLinearMixture and rhierMnlRWMixture
+mat drawDelta(mat const& x,mat const& y,vec const& z,List const& comps,vec const& deltabar,mat const& Ad);
+
+unireg runiregG(vec const& y, mat const& X, mat const& XpX, vec const& Xpy, double sigmasq, mat const& A, vec const& Abetabar, int nu, double ssq);
+
+//Used in rnegbinRW and rhierNegbinRw
+double llnegbin(vec const& y, vec const& lambda, double alpha, bool constant);
+
+double lpostbeta(double alpha, vec const& beta, mat const& X, vec const& y, vec const& betabar, mat const& rootA);
+
+double lpostalpha(double alpha, vec const& beta, mat const& X, vec const& y, double a, double b);
+
+//Used in rbprobitGibbs and rordprobitGibbs
+vec breg1(mat const& root, mat const& X, vec const& y, vec const& Abetabar);
+
+vec rtrunVec(vec const& mu,vec const& sigma, vec const& a, vec const& b);
+
+//Used in rhierMnlDP and rhierMnlRwMixture
+mnlMetropOnceOut mnlMetropOnce(vec const& y, mat const& X, vec const& oldbeta, double oldll,double s, mat const& incroot, vec const& betabar, mat const& rootpi);
+
+//Used in rDPGibbs, rhierMnlDP, rivDP
+int rmultinomF(vec const& p);
+
+mat yden(std::vector<murooti> const& thetaStar, mat const& y);
+
+ivec numcomp(ivec const& indic, int k);
+
+murooti thetaD(mat const& y, lambda const& lambda_struct);
+
+thetaStarIndex thetaStarDraw(ivec indic, std::vector<murooti> thetaStar_vector, mat const& y, mat ydenmat, vec const& q0v, double alpha, lambda const& lambda_struct, int maxuniq);
+
+vec q0(mat const& y, lambda const& lambda_struct);
+
+vec seq_rcpp(double from, double to, int len); //kept _rcpp due to conflict with base seq function
+
+double alphaD(priorAlpha const& priorAlpha_struct, int Istar, int gridsize);
+
+murooti GD(lambda const& lambda_struct);
+
+lambda lambdaD(lambda const& lambda_struct, std::vector<murooti> const& thetaStar_vector, vec const& alim, vec const& nulim, vec const& vlim, int gridsize);
+
+//FUNCTION TIMING (contained in functionTiming.cpp)---------------------------------------------------------------
+void startMcmcTimer();
+void infoMcmcTimer(int rep, int R);
+void endMcmcTimer();
+
+#endif
diff --git a/man/bank.Rd b/man/bank.Rd
old mode 100755
new mode 100644
index e2dbe00..3155e72
--- a/man/bank.Rd
+++ b/man/bank.Rd
@@ -1,127 +1,127 @@
-\name{bank}
-\alias{bank}
-\docType{data}
-\title{ Bank Card Conjoint Data of Allenby and Ginter (1995)}
-\description{
-  Data from a conjoint experiment in which two partial profiles of 
-  credit cards were presented to 946 respondents. The variable 
-  bank\$choiceAtt\$choice indicates which profile was chosen.  The
-  profiles are coded as the difference in attribute levels. Thus,
-  a "-1" means the profile coded as a choice of "0" has the attribute.
-  A value of 0 means that the attribute was not present in the 
-  comparison.
-  
-  data on age,income and gender (female=1) are also recorded in 
-  bank\$demo
-}
-\usage{data(bank)}
-\format{
-  This R object is a list of two data frames, list(choiceAtt,demo).
-
-  List of 2 
-
- \$ choiceAtt:`data.frame':	14799 obs. of  16 variables:\cr
-  \ldots\$ id           : int [1:14799] 1 1 1 1 1 1 1 1 1 1 \cr
-  \ldots\$ choice       : int [1:14799] 1 1 1 1 1 1 1 1 0 1 \cr
-  \ldots\$ Med\_FInt     : int [1:14799] 1 1 1 0 0 0 0 0 0 0 \cr
-  \ldots\$ Low\_FInt     : int [1:14799] 0 0 0 0 0 0 0 0 0 0 \cr
-  \ldots\$ Med\_VInt     : int [1:14799] 0 0 0 0 0 0 0 0 0 0 \cr
-  \ldots\$ Rewrd\_2      : int [1:14799] -1 1 0 0 0 0 0 1 -1 0 \cr
-  \ldots\$ Rewrd\_3      : int [1:14799] 0 -1 1 0 0 0 0 0 1 -1 \cr
-  \ldots\$ Rewrd\_4      : int [1:14799] 0 0 -1 0 0 0 0 0 0 1 \cr
-  \ldots\$ Med\_Fee      : int [1:14799] 0 0 0 1 1 -1 -1 0 0 0 \cr
-  \ldots\$ Low\_Fee      : int [1:14799] 0 0 0 0 0 1 1 0 0 0 \cr
-  \ldots\$ Bank\_B       : int [1:14799] 0 0 0 -1 1 -1 1 0 0 0 \cr
-  \ldots\$ Out\_State    : int [1:14799] 0 0 0 0 -1 0 -1 0 0 0 \cr
-  \ldots\$ Med\_Rebate   : int [1:14799] 0 0 0 0 0 0 0 0 0 0 \cr
-  \ldots\$ High\_Rebate  : int [1:14799] 0 0 0 0 0 0 0 0 0 0 \cr
-  \ldots\$ High\_CredLine: int [1:14799] 0 0 0 0 0 0 0 -1 -1 -1 \cr
-  \ldots\$ Long\_Grace   : int [1:14799] 0 0 0 0 0 0 0 0 0 0 
-
- \$ demo     :`data.frame':	946 obs. of  4 variables:\cr
-  \ldots\$ id    : int [1:946] 1 2 3 4 6 7 8 9 10 11 \cr
-  \ldots\$ age   : int [1:946] 60 40 75 40 30 30 50 50 50 40 \cr
-  \ldots\$ income: int [1:946] 20 40 30 40 30 60 50 100 50 40 \cr
-  \ldots\$ gender: int [1:946] 1 1 0 0 0 0 1 0 0 0 \cr
-}
-\details{
-  Each respondent was presented with between 13 and 17 paired comparisons. Thus, this
-  dataset has a panel structure.
-}
-\source{
-  Allenby and Ginter (1995), "Using Extremes to Design Products and Segment
-  Markets," \emph{JMR}, 392-403.
-}
-\references{ Appendix A, \emph{Bayesian Statistics and Marketing}
-  by Rossi,Allenby and McCulloch. \cr
-  \url{http://www.perossi.org/home/bsm-1l}
-}
-\examples{
-data(bank)
-cat(" table of Binary Dep Var", fill=TRUE)
-print(table(bank$choiceAtt[,2]))
-cat(" table of Attribute Variables",fill=TRUE)
-mat=apply(as.matrix(bank$choiceAtt[,3:16]),2,table)
-print(mat)
-cat(" means of Demographic Variables",fill=TRUE)
-mat=apply(as.matrix(bank$demo[,2:3]),2,mean)
-print(mat)
-
-## example of processing for use with rhierBinLogit
-##
-if(0)
-{
-choiceAtt=bank$choiceAtt
-Z=bank$demo
-
-## center demo data so that mean of random-effects
-## distribution can be interpreted as the average respondent
-
-Z[,1]=rep(1,nrow(Z))
-Z[,2]=Z[,2]-mean(Z[,2])
-Z[,3]=Z[,3]-mean(Z[,3])
-Z[,4]=Z[,4]-mean(Z[,4])
-Z=as.matrix(Z)
-
-hh=levels(factor(choiceAtt$id))
-nhh=length(hh)
-lgtdata=NULL
-for (i in 1:nhh) {
-	y=choiceAtt[choiceAtt[,1]==hh[i],2]
-	nobs=length(y)
-	X=as.matrix(choiceAtt[choiceAtt[,1]==hh[i],c(3:16)])
-	lgtdata[[i]]=list(y=y,X=X)
-		}
-
-cat("Finished Reading data",fill=TRUE)
-fsh()
-
-Data=list(lgtdata=lgtdata,Z=Z)
-Mcmc=list(R=10000,sbeta=0.2,keep=20)
-set.seed(66)
-out=rhierBinLogit(Data=Data,Mcmc=Mcmc)
-
-begin=5000/20
-end=10000/20
-
-summary(out$Deltadraw,burnin=begin)
-summary(out$Vbetadraw,burnin=begin)
-
-if(0){
-## plotting examples
-
-## plot grand means of random effects distribution (first row of Delta)
-index=4*c(0:13)+1
-matplot(out$Deltadraw[,index],type="l",xlab="Iterations/20",ylab="",
-main="Average Respondent Part-Worths")
-
-## plot hierarchical coefs
-plot(out$betadraw)
-
-## plot log-likelihood
-plot(out$llike,type="l",xlab="Iterations/20",ylab="",main="Log Likelihood")
-
-}
-}
-}
-\keyword{datasets}
+\name{bank}
+\alias{bank}
+\docType{data}
+\title{ Bank Card Conjoint Data of Allenby and Ginter (1995)}
+\description{
+  Data from a conjoint experiment in which two partial profiles of 
+  credit cards were presented to 946 respondents. The variable 
+  bank$choiceAtt$choice indicates which profile was chosen.  The
+  profiles are coded as the difference in attribute levels. Thus,
+  a "-1" means the profile coded as a choice of "0" has the attribute.
+  A value of 0 means that the attribute was not present in the 
+  comparison.
+  
+  data on age,income and gender (female=1) are also recorded in 
+  bank$demo
+}
+\usage{data(bank)}
+\format{
+  This R object is a list of two data frames, list(choiceAtt,demo).
+
+  List of 2 
+
+ $ choiceAtt:`data.frame':	14799 obs. of  16 variables:\cr
+  \ldots$ id           : int [1:14799] 1 1 1 1 1 1 1 1 1 1 \cr
+  \ldots$ choice       : int [1:14799] 1 1 1 1 1 1 1 1 0 1 \cr
+  \ldots$ Med_FInt     : int [1:14799] 1 1 1 0 0 0 0 0 0 0 \cr
+  \ldots$ Low_FInt     : int [1:14799] 0 0 0 0 0 0 0 0 0 0 \cr
+  \ldots$ Med_VInt     : int [1:14799] 0 0 0 0 0 0 0 0 0 0 \cr
+  \ldots$ Rewrd_2      : int [1:14799] -1 1 0 0 0 0 0 1 -1 0 \cr
+  \ldots$ Rewrd_3      : int [1:14799] 0 -1 1 0 0 0 0 0 1 -1 \cr
+  \ldots$ Rewrd_4      : int [1:14799] 0 0 -1 0 0 0 0 0 0 1 \cr
+  \ldots$ Med_Fee      : int [1:14799] 0 0 0 1 1 -1 -1 0 0 0 \cr
+  \ldots$ Low_Fee      : int [1:14799] 0 0 0 0 0 1 1 0 0 0 \cr
+  \ldots$ Bank_B       : int [1:14799] 0 0 0 -1 1 -1 1 0 0 0 \cr
+  \ldots$ Out_State    : int [1:14799] 0 0 0 0 -1 0 -1 0 0 0 \cr
+  \ldots$ Med_Rebate   : int [1:14799] 0 0 0 0 0 0 0 0 0 0 \cr
+  \ldots$ High_Rebate  : int [1:14799] 0 0 0 0 0 0 0 0 0 0 \cr
+  \ldots$ High_CredLine: int [1:14799] 0 0 0 0 0 0 0 -1 -1 -1 \cr
+  \ldots$ Long_Grace   : int [1:14799] 0 0 0 0 0 0 0 0 0 0 
+
+ $ demo     :`data.frame':	946 obs. of  4 variables:\cr
+  \ldots$ id    : int [1:946] 1 2 3 4 6 7 8 9 10 11 \cr
+  \ldots$ age   : int [1:946] 60 40 75 40 30 30 50 50 50 40 \cr
+  \ldots$ income: int [1:946] 20 40 30 40 30 60 50 100 50 40 \cr
+  \ldots$ gender: int [1:946] 1 1 0 0 0 0 1 0 0 0 \cr
+}
+\details{
+  Each respondent was presented with between 13 and 17 paired comparisons. Thus, this
+  dataset has a panel structure.
+}
+\source{
+  Allenby and Ginter (1995), "Using Extremes to Design Products and Segment
+  Markets," \emph{JMR}, 392-403.
+}
+\references{ Appendix A, \emph{Bayesian Statistics and Marketing}
+  by Rossi, Allenby and McCulloch. \cr
+  \url{http://www.perossi.org/home/bsm-1}
+}
+\examples{
+data(bank)
+cat(" table of Binary Dep Var", fill=TRUE)
+print(table(bank$choiceAtt[,2]))
+cat(" table of Attribute Variables",fill=TRUE)
+mat=apply(as.matrix(bank$choiceAtt[,3:16]),2,table)
+print(mat)
+cat(" means of Demographic Variables",fill=TRUE)
+mat=apply(as.matrix(bank$demo[,2:3]),2,mean)
+print(mat)
+
+## example of processing for use with rhierBinLogit
+##
+if(0)
+{
+choiceAtt=bank$choiceAtt
+Z=bank$demo
+
+## center demo data so that mean of random-effects
+## distribution can be interpreted as the average respondent
+
+Z[,1]=rep(1,nrow(Z))
+Z[,2]=Z[,2]-mean(Z[,2])
+Z[,3]=Z[,3]-mean(Z[,3])
+Z[,4]=Z[,4]-mean(Z[,4])
+Z=as.matrix(Z)
+
+hh=levels(factor(choiceAtt$id))
+nhh=length(hh)
+lgtdata=NULL
+for (i in 1:nhh) {
+	y=choiceAtt[choiceAtt[,1]==hh[i],2]
+	nobs=length(y)
+	X=as.matrix(choiceAtt[choiceAtt[,1]==hh[i],c(3:16)])
+	lgtdata[[i]]=list(y=y,X=X)
+		}
+
+cat("Finished Reading data",fill=TRUE)
+fsh()
+
+Data=list(lgtdata=lgtdata,Z=Z)
+Mcmc=list(R=10000,sbeta=0.2,keep=20)
+set.seed(66)
+out=rhierBinLogit(Data=Data,Mcmc=Mcmc)
+
+begin=5000/20
+end=10000/20
+
+summary(out$Deltadraw,burnin=begin)
+summary(out$Vbetadraw,burnin=begin)
+
+if(0){
+## plotting examples
+
+## plot grand means of random effects distribution (first row of Delta)
+index=4*c(0:13)+1
+matplot(out$Deltadraw[,index],type="l",xlab="Iterations/20",ylab="",
+main="Average Respondent Part-Worths")
+
+## plot hierarchical coefs
+plot(out$betadraw)
+
+## plot log-likelihood
+plot(out$llike,type="l",xlab="Iterations/20",ylab="",main="Log Likelihood")
+
+}
+}
+}
+\keyword{datasets}
diff --git a/man/breg.Rd b/man/breg.Rd
old mode 100755
new mode 100644
index cf9c5fe..864f8f0
--- a/man/breg.Rd
+++ b/man/breg.Rd
@@ -31,7 +31,7 @@ breg(y, X, betabar, A)
 }
 
 \references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
-  by Rossi,Allenby and McCulloch. \cr
+  by Rossi, Allenby and McCulloch. \cr
   \url{http://www.perossi.org/home/bsm-1}
 }
 
diff --git a/man/cgetC.Rd b/man/cgetC.Rd
old mode 100755
new mode 100644
index 778cb05..02b9235
--- a/man/cgetC.Rd
+++ b/man/cgetC.Rd
@@ -27,7 +27,7 @@ cgetC(e, k)
  Rossi et al (2001), \dQuote{Overcoming Scale Usage Heterogeneity,} \emph{JASA}96, 20-31.
 }
 
-\author{ Rob McCulloch and Peter Rossi, Graduate School of Business, University of Chicago.
+\author{ Rob McCulloch and Peter Rossi, Anderson School, UCLA.
   \email{perossichi at gmail.com}.
 }
 
diff --git a/man/cheese.Rd b/man/cheese.Rd
old mode 100755
new mode 100644
index 41a65f8..eb486f8
--- a/man/cheese.Rd
+++ b/man/cheese.Rd
@@ -1,83 +1,83 @@
-\name{cheese}
-\alias{cheese}
-\docType{data}
-\title{ Sliced Cheese Data}
-\description{
-  Panel data with sales volume for a package of Borden Sliced Cheese
-  as well as a measure of display activity and price.  Weekly data aggregated
-  to the "key" account or retailer/market level.
-}
-\usage{data(cheese)}
-\format{
-  A data frame with 5555 observations on the following 4 variables.
-  \describe{
-    \item{\code{RETAILER}}{a list of 88 retailers}
-    \item{\code{VOLUME}}{unit sales}
-    \item{\code{DISP}}{a measure of display activity -- per cent ACV on display}
-    \item{\code{PRICE}}{in \$}
-  }
-}
-\source{
-  Boatwright et al (1999), "Account-Level Modeling for Trade Promotion," 
-  \emph{JASA} 94, 1063-1073.
-}
-\references{
- Chapter 3, \emph{Bayesian Statistics and Marketing} by Rossi et al. \cr
- \url{http://www.perossi.org/home/bsm-1l}
-}
-\examples{
-data(cheese)
-cat(" Quantiles of the Variables ",fill=TRUE)
-mat=apply(as.matrix(cheese[,2:4]),2,quantile)
-print(mat)
-
-##
-## example of processing for use with rhierLinearModel
-##
-if(0)
-{
-
-retailer=levels(cheese$RETAILER)
-nreg=length(retailer)
-nvar=3
-regdata=NULL
-for (reg in 1:nreg) {
-	y=log(cheese$VOLUME[cheese$RETAILER==retailer[reg]])
-	iota=c(rep(1,length(y)))
-	X=cbind(iota,cheese$DISP[cheese$RETAILER==retailer[reg]],
-		log(cheese$PRICE[cheese$RETAILER==retailer[reg]]))
-	regdata[[reg]]=list(y=y,X=X)
-}
-Z=matrix(c(rep(1,nreg)),ncol=1)
-nz=ncol(Z)
-##
-## run each individual regression and store results
-##
-lscoef=matrix(double(nreg*nvar),ncol=nvar)
-for (reg in 1:nreg) {
-	coef=lsfit(regdata[[reg]]$X,regdata[[reg]]$y,intercept=FALSE)$coef
-	if (var(regdata[[reg]]$X[,2])==0)  { lscoef[reg,1]=coef[1]; lscoef[reg,3]=coef[2]}
-	else {lscoef[reg,]=coef }
-}
-
-R=2000
-Data=list(regdata=regdata,Z=Z)
-Mcmc=list(R=R,keep=1)
-
-set.seed(66)
-out=rhierLinearModel(Data=Data,Mcmc=Mcmc)
-
-cat("Summary of Delta Draws",fill=TRUE)
-summary(out$Deltadraw)
-cat("Summary of Vbeta Draws",fill=TRUE)
-summary(out$Vbetadraw)
-
-if(0){
-#
-# plot hier coefs
-plot(out$betadraw)
-}
-
-}
-}
-\keyword{datasets}
+\name{cheese}
+\alias{cheese}
+\docType{data}
+\title{ Sliced Cheese Data}
+\description{
+  Panel data with sales volume for a package of Borden Sliced Cheese
+  as well as a measure of display activity and price.  Weekly data aggregated
+  to the "key" account or retailer/market level.
+}
+\usage{data(cheese)}
+\format{
+  A data frame with 5555 observations on the following 4 variables.
+  \describe{
+    \item{\code{RETAILER}}{a list of 88 retailers}
+    \item{\code{VOLUME}}{unit sales}
+    \item{\code{DISP}}{a measure of display activity -- per cent ACV on display}
+    \item{\code{PRICE}}{in $}
+  }
+}
+\source{
+  Boatwright et al (1999), "Account-Level Modeling for Trade Promotion," 
+  \emph{JASA} 94, 1063-1073.
+}
+\references{
+ Chapter 3, \emph{Bayesian Statistics and Marketing} by Rossi, Allenby and McCulloch. \cr
+ \url{http://www.perossi.org/home/bsm-1}
+}
+\examples{
+data(cheese)
+cat(" Quantiles of the Variables ",fill=TRUE)
+mat=apply(as.matrix(cheese[,2:4]),2,quantile)
+print(mat)
+
+##
+## example of processing for use with rhierLinearModel
+##
+if(0)
+{
+
+retailer=levels(cheese$RETAILER)
+nreg=length(retailer)
+nvar=3
+regdata=NULL
+for (reg in 1:nreg) {
+	y=log(cheese$VOLUME[cheese$RETAILER==retailer[reg]])
+	iota=c(rep(1,length(y)))
+	X=cbind(iota,cheese$DISP[cheese$RETAILER==retailer[reg]],
+		log(cheese$PRICE[cheese$RETAILER==retailer[reg]]))
+	regdata[[reg]]=list(y=y,X=X)
+}
+Z=matrix(c(rep(1,nreg)),ncol=1)
+nz=ncol(Z)
+##
+## run each individual regression and store results
+##
+lscoef=matrix(double(nreg*nvar),ncol=nvar)
+for (reg in 1:nreg) {
+	coef=lsfit(regdata[[reg]]$X,regdata[[reg]]$y,intercept=FALSE)$coef
+	if (var(regdata[[reg]]$X[,2])==0)  { lscoef[reg,1]=coef[1]; lscoef[reg,3]=coef[2]}
+	else {lscoef[reg,]=coef }
+}
+
+R=2000
+Data=list(regdata=regdata,Z=Z)
+Mcmc=list(R=R,keep=1)
+
+set.seed(66)
+out=rhierLinearModel(Data=Data,Mcmc=Mcmc)
+
+cat("Summary of Delta Draws",fill=TRUE)
+summary(out$Deltadraw)
+cat("Summary of Vbeta Draws",fill=TRUE)
+summary(out$Vbetadraw)
+
+if(0){
+#
+# plot hier coefs
+plot(out$betadraw)
+}
+
+}
+}
+\keyword{datasets}
diff --git a/man/clusterMix.Rd b/man/clusterMix.Rd
old mode 100755
new mode 100644
index c76be47..65f4c0f
--- a/man/clusterMix.Rd
+++ b/man/clusterMix.Rd
@@ -1,88 +1,88 @@
-\name{clusterMix}
-\alias{clusterMix}
-\concept{normal mixture}
-\concept{clustering}
-\title{ Cluster Observations Based on Indicator MCMC Draws }
-\description{
-  \code{clusterMix} uses MCMC draws of indicator variables from a normal
-  component mixture model to cluster observations based on a similarity matrix.
-}
-\usage{
-clusterMix(zdraw, cutoff = 0.9, SILENT = FALSE)
-}
-\arguments{
-  \item{zdraw}{ R x nobs array of draws of indicators }
-  \item{cutoff}{ cutoff probability for similarity  (def=.9)}
-  \item{SILENT}{ logical flag for silent operation (def= FALSE) }
-}
-\details{
-
-   define a similarity matrix, Sim, Sim[i,j]=1 if observations i and j are in same component.
-   Compute the posterior mean of Sim over indicator draws.
-
-   clustering is achieved by two means:
-
-   Method A:
-   Find the indicator draw whose similarity matrix minimizes, loss(E[Sim]-Sim(z)),  
-   where loss is absolute deviation.
-
-   Method B:
-   Define a Similarity matrix by setting any element of E[Sim] = 1 if E[Sim] > cutoff.
-   Compute the clustering scheme associated with this "windsorized" Similarity matrix.
-}
-\value{
-  \item{clustera}{indicator function for clustering based on method A above}
-  \item{clusterb}{indicator function for clustering based on method B above}
-}
-\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
-  by Rossi, Allenby and McCulloch Chapter 3. \cr
-  \url{http://www.perossi.org/home/bsm-1}
-}
-
-\author{ Peter Rossi, Graduate School of Business, University of Chicago
-  \email{perossichi at gmail.com}.
-}
-
-\section{Warning}{
-  This routine is a utility routine that does \strong{not} check the
-  input arguments for proper dimensions and type.
-}
-\seealso{ \code{\link{rnmixGibbs}}  }
-
-\keyword{ models }
-\keyword{ multivariate }
-\examples{
-##
-if(nchar(Sys.getenv("LONG_TEST")) != 0) 
-{
-## simulate data from mixture of normals
-n=500
-pvec=c(.5,.5)
-mu1=c(2,2)
-mu2=c(-2,-2)
-Sigma1=matrix(c(1,.5,.5,1),ncol=2)
-Sigma2=matrix(c(1,.5,.5,1),ncol=2)
-comps=NULL
-comps[[1]]=list(mu1,backsolve(chol(Sigma1),diag(2)))
-comps[[2]]=list(mu2,backsolve(chol(Sigma2),diag(2)))
-dm=rmixture(n,pvec,comps)
-## run MCMC on normal mixture
-R=2000
-Data=list(y=dm$x)
-ncomp=2
-Prior=list(ncomp=ncomp,a=c(rep(100,ncomp)))
-Mcmc=list(R=R,keep=1)
-out=rnmixGibbs(Data=Data,Prior=Prior,Mcmc=Mcmc)
-begin=500
-end=R
-## find clusters
-outclusterMix=clusterMix(out$zdraw[begin:end,])
-##
-## check on clustering versus "truth"
-##  note: there could be switched labels
-##
-table(outclusterMix$clustera,dm$z)
-table(outclusterMix$clusterb,dm$z)
-}
-##
-}
+\name{clusterMix}
+\alias{clusterMix}
+\concept{normal mixture}
+\concept{clustering}
+\title{ Cluster Observations Based on Indicator MCMC Draws }
+\description{
+  \code{clusterMix} uses MCMC draws of indicator variables from a normal
+  component mixture model to cluster observations based on a similarity matrix.
+}
+\usage{
+clusterMix(zdraw, cutoff = 0.9, SILENT = FALSE, nprint = BayesmConstant.nprint)
+}
+\arguments{
+  \item{zdraw}{ R x nobs array of draws of indicators }
+  \item{cutoff}{ cutoff probability for similarity  (def: .9)}
+  \item{SILENT}{ logical flag for silent operation (def: FALSE) }
+  \item{nprint}{ print every nprint'th draw (def: 100) }
+}
+\details{
+
+   Define a similarity matrix, Sim, Sim[i,j]=1 if observations i and j are in same component.
+   Compute the posterior mean of Sim over indicator draws.
+
+   Clustering is achieved by two means:
+
+   Method A:
+   Find the indicator draw whose similarity matrix minimizes, loss(E[Sim]-Sim(z)),  
+   where loss is absolute deviation.
+
+   Method B:
+   Define a Similarity matrix by setting any element of E[Sim] = 1 if E[Sim] > cutoff.
+   Compute the clustering scheme associated with this "windsorized" Similarity matrix.
+}
+\value{
+  \item{clustera}{indicator function for clustering based on method A above}
+  \item{clusterb}{indicator function for clustering based on method B above}
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Rossi, Allenby and McCulloch Chapter 3. \cr
+  \url{http://www.perossi.org/home/bsm-1}
+}
+
+\author{ Peter Rossi, Anderson School, UCLA, <perossichi at gmail.com>.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+\seealso{ \code{\link{rnmixGibbs}}  }
+
+\keyword{ models }
+\keyword{ multivariate }
+\examples{
+##
+if(nchar(Sys.getenv("LONG_TEST")) != 0) 
+{
+## simulate data from mixture of normals
+n=500
+pvec=c(.5,.5)
+mu1=c(2,2)
+mu2=c(-2,-2)
+Sigma1=matrix(c(1,.5,.5,1),ncol=2)
+Sigma2=matrix(c(1,.5,.5,1),ncol=2)
+comps=NULL
+comps[[1]]=list(mu1,backsolve(chol(Sigma1),diag(2)))
+comps[[2]]=list(mu2,backsolve(chol(Sigma2),diag(2)))
+dm=rmixture(n,pvec,comps)
+## run MCMC on normal mixture
+R=2000
+Data=list(y=dm$x)
+ncomp=2
+Prior=list(ncomp=ncomp,a=c(rep(100,ncomp)))
+Mcmc=list(R=R,keep=1)
+out=rnmixGibbs(Data=Data,Prior=Prior,Mcmc=Mcmc)
+begin=500
+end=R
+## find clusters
+outclusterMix=clusterMix(out$nmix$zdraw[begin:end,])
+##
+## check on clustering versus "truth"
+##  note: there could be switched labels
+##
+table(outclusterMix$clustera,dm$z)
+table(outclusterMix$clusterb,dm$z)
+}
+##
+}
diff --git a/man/condMom.Rd b/man/condMom.Rd
old mode 100755
new mode 100644
index 2b6a4ec..de0afa0
--- a/man/condMom.Rd
+++ b/man/condMom.Rd
@@ -18,7 +18,7 @@ condMom(x, mu, sigi, i)
   \item{i}{ conditional distribution of ith element }
 }
 \details{
-  \eqn{x} \eqn{\sim}{~} \eqn{MVN(mu,Sigma)}.
+  \eqn{x} \eqn{\sim}{~} \eqn{MVN(mu,sigi^{-1})}.
 
   \code{condMom} computes moments of \eqn{x_i} given \eqn{x_{-i}}.
 }
diff --git a/man/customerSat.Rd b/man/customerSat.Rd
old mode 100755
new mode 100644
index 305aa5f..ebf677c
--- a/man/customerSat.Rd
+++ b/man/customerSat.Rd
@@ -1,38 +1,38 @@
-\name{customerSat}
-\alias{customerSat}
-\docType{data}
-\title{ Customer Satisfaction Data}
-\description{
-  Responses to a satisfaction survey for a Yellow Pages advertising product.
-  All responses are on a 10 point scale from 1 to 10 (10 is "Excellent"
-  and 1 is "Poor")
-}
-\usage{data(customerSat)}
-\format{
-  A data frame with 1811 observations on the following 10 variables.
-  \describe{
-    \item{\code{q1}}{Overall Satisfaction}
-    \item{\code{q2}}{Setting Competitive Prices}
-    \item{\code{q3}}{Holding Price Increase to a Minimum}
-    \item{\code{q4}}{Appropriate Pricing given Volume}
-    \item{\code{q5}}{Demonstrating Effectiveness of Purchase}
-    \item{\code{q6}}{Reach a Large \# of Customers}
-    \item{\code{q7}}{Reach of Advertising}
-    \item{\code{q8}}{Long-term Exposure}
-    \item{\code{q9}}{Distribution}
-    \item{\code{q10}}{Distribution to Right Geographic Areas}
-  }
-}
-\source{
-  Rossi et al (2001), "Overcoming Scale Usage Heterogeneity,"
-  \emph{JASA} 96, 20-31.
-}
-\references{
-  Case Study 3, \emph{Bayesian Statistics and Marketing} by Rossi et al.\cr 
-  \url{http://www.perossi.org/home/bsm-1*}
-}
-\examples{
-data(customerSat)
-apply(as.matrix(customerSat),2,table)
-}
-\keyword{datasets}
+\name{customerSat}
+\alias{customerSat}
+\docType{data}
+\title{ Customer Satisfaction Data}
+\description{
+  Responses to a satisfaction survey for a Yellow Pages advertising product.
+  All responses are on a 10 point scale from 1 to 10 (10 is "Excellent"
+  and 1 is "Poor")
+}
+\usage{data(customerSat)}
+\format{
+  A data frame with 1811 observations on the following 10 variables.
+  \describe{
+    \item{\code{q1}}{Overall Satisfaction}
+    \item{\code{q2}}{Setting Competitive Prices}
+    \item{\code{q3}}{Holding Price Increase to a Minimum}
+    \item{\code{q4}}{Appropriate Pricing given Volume}
+    \item{\code{q5}}{Demonstrating Effectiveness of Purchase}
+    \item{\code{q6}}{Reach a Large # of Customers}
+    \item{\code{q7}}{Reach of Advertising}
+    \item{\code{q8}}{Long-term Exposure}
+    \item{\code{q9}}{Distribution}
+    \item{\code{q10}}{Distribution to Right Geographic Areas}
+  }
+}
+\source{
+  Rossi et al (2001), "Overcoming Scale Usage Heterogeneity,"
+  \emph{JASA} 96, 20-31.
+}
+\references{
+  Case Study 3, \emph{Bayesian Statistics and Marketing} by Rossi et al.\cr 
+  \url{http://www.perossi.org/home/bsm-1}
+}
+\examples{
+data(customerSat)
+apply(as.matrix(customerSat),2,table)
+}
+\keyword{datasets}
diff --git a/man/detailing.Rd b/man/detailing.Rd
old mode 100755
new mode 100644
index eb8840c..39ce6ec
--- a/man/detailing.Rd
+++ b/man/detailing.Rd
@@ -4,7 +4,7 @@
 \title{ Physician Detailing Data from Manchanda et al (2004)}
 \description{
   Monthly data on detailing (sales calls) on 1000 physicians. 23 mos of data
-  for each Physician. Includes physician covariates. Dependent Variable (\code{scripts}) is the
+  for each physician. Includes physician covariates. Dependent variable (\code{scripts}) is the
   number of new prescriptions ordered by the physician for the drug detailed.
 }
 \usage{data(detailing)}
@@ -13,22 +13,22 @@
  
  List of 2:
 
- \$ counts:`data.frame':	23000 obs. of  4 variables:\cr
-  \ldots\$ id        : int [1:23000] 1 1 1 1 1 1 1 1 1 1 \cr
-  \ldots\$ scripts       : int [1:23000] 3 12 3 6 5 2 5 1 5 3 \cr
-  \ldots\$ detailing     : int [1:23000] 1 1 1 2 1 0 2 2 1 1 \cr
-  \ldots\$ lagged\_scripts: int [1:23000] 4 3 12 3 6 5 2 5 1 5 
+ $ counts:`data.frame':	23000 obs. of  4 variables:\cr
+  \ldots$ id        : int [1:23000] 1 1 1 1 1 1 1 1 1 1 \cr
+  \ldots$ scripts       : int [1:23000] 3 12 3 6 5 2 5 1 5 3 \cr
+  \ldots$ detailing     : int [1:23000] 1 1 1 2 1 0 2 2 1 1 \cr
+  \ldots$ lagged_scripts: int [1:23000] 4 3 12 3 6 5 2 5 1 5 
 
- \$ demo  :`data.frame':	1000 obs. of  4 variables:\cr
-  \ldots\$ id          : int [1:1000] 1 2 3 4 5 6 7 8 9 10 \cr
-  \ldots\$ generalphys : int [1:1000] 1 0 1 1 0 1 1 1 1 1 \cr
-  \ldots\$ specialist: int [1:1000] 0 1 0 0 1 0 0 0 0 0  \cr
-  \ldots\$ mean\_samples: num [1:1000] 0.722 0.491 0.339 3.196 0.348 
+ $ demo  :`data.frame':	1000 obs. of  4 variables:\cr
+  \ldots$ id          : int [1:1000] 1 2 3 4 5 6 7 8 9 10 \cr
+  \ldots$ generalphys : int [1:1000] 1 0 1 1 0 1 1 1 1 1 \cr
+  \ldots$ specialist: int [1:1000] 0 1 0 0 1 0 0 0 0 0  \cr
+  \ldots$ mean_samples: num [1:1000] 0.722 0.491 0.339 3.196 0.348 
 }
 \details{
   generalphys is dummy for if doctor is a "general practitioner," specialist is dummy for
   if the physician is a specialist in the theraputic class for which the drug is 
-  intended, mean\_samples is the mean number of free drug samples given the doctor
+  intended, mean_samples is the mean number of free drug samples given the doctor
   over the sample.
 }
 \source{
diff --git a/man/eMixMargDen.Rd b/man/eMixMargDen.Rd
old mode 100755
new mode 100644
index fa4ff3d..928379d
--- a/man/eMixMargDen.Rd
+++ b/man/eMixMargDen.Rd
@@ -25,8 +25,8 @@ eMixMargDen(grid, probdraw, compdraw)
 \details{
   length(compdraw) is number of MCMC draws. \cr
   compdraw[[i]] is a list draws of mu and inv Chol root for each of mixture components. \cr
-  compdraw[[i]][[j]] is jth component.  compdraw[[i]][[j]]\$mu is mean vector; compdraw[[i]][[j]]\$rooti
-  is the UL decomp of \eqn{Sigma^{-1}}.
+  compdraw[[i]][[j]] is jth component.  compdraw[[i]][[j]]$mu is mean vector; compdraw[[i]][[j]]$rooti
+  is the UL decomp of \eqn{\Sigma^{-1}}.
 }
 
 \value{
diff --git a/man/ghkvec.Rd b/man/ghkvec.Rd
index de05d68..5a90b0f 100755
--- a/man/ghkvec.Rd
+++ b/man/ghkvec.Rd
@@ -1,49 +1,66 @@
-\name{ghkvec}
-\alias{ghkvec}
-\concept{multivariate normal distribution}
-\concept{GHK method}
-\concept{integral}
-
-\title{ Compute GHK approximation to Multivariate Normal Integrals }
-\description{
-  \code{ghkvec} computes the GHK approximation to the integral of a
-  multivariate normal density over a half plane defined by a set
-  of truncation points.  
-}
-\usage{
-ghkvec(L, trunpt, above, r)
-}
-\arguments{
-  \item{L}{ lower triangular Cholesky root of Covariance matrix }
-  \item{trunpt}{ vector of truncation points}
-  \item{above}{ vector of indicators for truncation above(1) or below(0) }
-  \item{r}{ number of draws to use in GHK }
-}
-\value{
-  approximation to integral
-}
-\note{
-  \code{ghkvec} can accept a vector of truncations and compute more than one
-  integral.  That is, length(trunpt)/length(above) number of different integrals,
-  each with the same Sigma and mean 0 but different truncation points. See 
-  example below for an example with two integrals at different truncation points.
-}
-\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
-  by Rossi,Allenby and McCulloch,  Chapter 2. \cr
-  \url{http://www.perossi.org/home/bsm-1}
-}
-
-\author{ Peter Rossi,Anderson School, UCLA,
-  \email{perossichi at gmail.com}.
-}
-
-\examples{
-##
-
-Sigma=matrix(c(1,.5,.5,1),ncol=2)
-L=t(chol(Sigma))
-trunpt=c(0,0,1,1)
-above=c(1,1)
-ghkvec(L,trunpt,above,100)
-}
-\keyword{ distribution }
+\name{ghkvec}
+\alias{ghkvec}
+\concept{multivariate normal distribution}
+\concept{GHK method}
+\concept{integral}
+
+\title{ Compute GHK approximation to Multivariate Normal Integrals }
+\description{
+  \code{ghkvec} computes the GHK approximation to the integral of a
+  multivariate normal density over a half plane defined by a set
+  of truncation points.  
+}
+\usage{
+ghkvec(L, trunpt, above, r, HALTON=TRUE, pn)
+}
+\arguments{
+  \item{L}{ lower triangular Cholesky root of covariance matrix }
+  \item{trunpt}{ vector of truncation points}
+  \item{above}{ vector of indicators for truncation above(1) or below(0) }
+  \item{r}{ number of draws to use in GHK }
+  \item{HALTON}{ if TRUE, use Halton sequence. If FALSE, use R::runif random number generator (optional / def: TRUE)}
+  \item{pn}{ prime number used for Halton sequence (optional / def: the smallest prime numbers, i.e. 2, 3, 5, ...)}  
+}
+\value{
+  approximation to integral
+}
+\note{
+  \code{ghkvec} can accept a vector of truncations and compute more than one
+  integral.  That is, length(trunpt)/length(above) number of different integrals,
+  each with the same Sigma and mean 0 but different truncation points. See 
+  example below for an example with two integrals at different truncation points. \cr
+  
+  User can choose what random number to use for the numerical integration: psuedo-random numbers by \code{R::runif} or quasi-random numbers by Halton sequence. Generally, the quasi-random sequence (e.g., Halton) is more uniformly distributed within domain, so it shows lower error and improved convergence than the psuedo-random sequence (Morokoff and Caflisch, 1995). \cr 
+  
+  For the prime numbers generating Halton sequence, we suggest to use the first smallest prime numbers. Halton (1960) and Kocis and Whiten (1997) prove that their discrepancy measures (how uniformly the sample points are distributed) have the upper bounds, which decrease as the generating prime number decreases. \cr
+  
+  Note: For a high dimensional integration (10 or more dimension), we suggest to use the psuedo-random number generator (\code{R::runif}). According to Kocis and Whiten (1997), Halton sequences may be highly correlated when the dimension is 10 or more.
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Rossi, Allenby and McCulloch,  Chapter 2. \cr
+  \url{http://www.perossi.org/home/bsm-1} \cr
+  
+  For Halton sequence, see Halton (1960, Numerische Mathematik), Morokoff and Caflisch (1995, Journal of Computational Physics), and Kocis and Whiten (1997, ACM Transactions on Mathematical Software).
+}
+
+\author{
+  Peter Rossi, Anderson School, UCLA, \email{perossichi at gmail.com}.\cr
+  Keunwoo Kim, Anderson School, UCLA, \email{keunwoo.kim at gmail.com}
+}
+
+\examples{
+Sigma=matrix(c(1,.5,.5,1),ncol=2)
+L=t(chol(Sigma))
+trunpt=c(0,0,1,1)
+above=c(1,1)
+
+# drawn by Halton sequence
+ghkvec(L,trunpt,above,r=100)
+
+# use prime number 11 and 13
+ghkvec(L,trunpt,above,r=100,HALTON=TRUE,pn=c(11,13))
+
+# drawn by R::runif
+ghkvec(L,trunpt,above,r=100,HALTON=FALSE)
+}
+\keyword{ distribution }
diff --git a/man/llmnl.Rd b/man/llmnl.Rd
old mode 100755
new mode 100644
index b057155..5998df6
--- a/man/llmnl.Rd
+++ b/man/llmnl.Rd
@@ -17,7 +17,7 @@ llmnl(beta,y, X)
   \item{X}{ n*p x k Design matrix (use \code{createX} to make) }
 }
 \details{
-  Let \eqn{mu_i=X_i \beta}, then \eqn{Pr(y_i=j) = exp(mu_{i,j})/\sum_kexp(mu_{i,k})}.\cr
+  Let \eqn{\mu_i=X_i beta}, then \eqn{Pr(y_i=j) = exp(\mu_{i,j})/\sum_kexp(\mu_{i,k})}.\cr
   \eqn{X_i} is the submatrix of X corresponding to the
   ith observation.  X has n*p rows.  
   
diff --git a/man/llnhlogit.Rd b/man/llnhlogit.Rd
old mode 100755
new mode 100644
index 3e9b1cd..2efae67
--- a/man/llnhlogit.Rd
+++ b/man/llnhlogit.Rd
@@ -5,7 +5,7 @@
 
 \title{ Evaluate Log Likelihood for non-homothetic Logit Model  }
 \description{
-  \code{llmnp} evaluates log-likelihood for the Non-homothetic Logit model.
+  \code{llnhlogit} evaluates log-likelihood for the Non-homothetic Logit model.
 }
 
 \usage{
@@ -20,11 +20,17 @@ llnhlogit(theta, choice, lnprices, Xexpend)
 }
 
 \details{
-  Non-homothetic logit model with: \eqn{ln(psi_i(U)) = alpha_i - e^{k_i}U} \cr
+  Non-homothetic logit model, \eqn{Pr(i) = exp(tau v_i)/sum_j(exp(tau v_j))} \cr
+  
+  \eqn{v_i = alpha_i - e^{kappaStar_i}u^i - lnp_i} 
+  tau is the scale parameter of extreme value error distribution.\cr
+  \eqn{u^i} solves \eqn{u^i = psi_i(u^i)E/p_i}.\cr
+  \eqn{ln(psi_i(U)) = alpha_i - e^{kappaStar_i}U}. \cr
+  \eqn{lnE = gamma'Xexpend}.\cr
 
   Structure of theta vector \cr
   alpha: (p x 1) vector of utility intercepts.\cr
-  k: (p x 1) vector of utility rotation parms. \cr
+  kappaStar: (p x 1) vector of utility rotation parms expressed on natural log scale. \cr
   gamma: (k x 1) -- expenditure variable coefs.\cr
   tau: (1 x 1) -- logit scale parameter.\cr
 }
@@ -50,6 +56,17 @@ llnhlogit(theta, choice, lnprices, Xexpend)
 \seealso{ \code{\link{simnhlogit}} }
 \examples{
 ##
-\dontrun{ll=llnhlogit(theta,choice,lnprices,Xexpend)}
+N=1000
+p=3
+k=1
+theta = c(rep(1,p),seq(from=-1,to=1,length=p),rep(2,k),.5)
+lnprices = matrix(runif(N*p),ncol=p)
+Xexpend = matrix(runif(N*k),ncol=k)
+simdata = simnhlogit(theta,lnprices,Xexpend)
+#
+# let's evaluate likelihood at true theta
+#
+llstar = llnhlogit(theta,simdata$y,simdata$lnprices,simdata$Xexpend)
 }
-\keyword{ models }
+
+\keyword{ models }
\ No newline at end of file
diff --git a/man/lndIChisq.Rd b/man/lndIChisq.Rd
old mode 100755
new mode 100644
index d833cf3..9546508
--- a/man/lndIChisq.Rd
+++ b/man/lndIChisq.Rd
@@ -8,15 +8,15 @@
   \code{lndIChisq} computes the log of an Inverted Chi-Squared Density.
 }
 \usage{
-lndIChisq(nu, ssq, x)
+lndIChisq(nu, ssq, X)
 }
 \arguments{
   \item{nu}{ d.f. parameter }
   \item{ssq}{ scale parameter }
-  \item{x}{ ordinate for density evaluation }
+  \item{X}{ ordinate for density evaluation (this must be a matrix)}
 }
 \details{
-  \eqn{Z= \nu*ssq/\chi^2_{\nu}}, \eqn{Z} \eqn{\sim}{~} Inverted Chi-Squared.  \cr
+  \eqn{Z= nu*ssq/\chi^2_{nu}}, \eqn{Z} \eqn{\sim}{~} Inverted Chi-Squared.  \cr
   \code{lndIChisq} computes the complete log-density, including normalizing constants.
 }
 \value{
@@ -39,6 +39,6 @@ lndIChisq(nu, ssq, x)
 \seealso{ \code{\link{dchisq}} }
 \examples{
 ##
-lndIChisq(3,1,2)
+lndIChisq(3,1,matrix(2))
 }
 \keyword{ distribution }
diff --git a/man/lndMvn.Rd b/man/lndMvn.Rd
old mode 100755
new mode 100644
index 2419440..5c4c695
--- a/man/lndMvn.Rd
+++ b/man/lndMvn.Rd
@@ -16,7 +16,7 @@ lndMvn(x, mu, rooti)
 \arguments{
   \item{x}{ density ordinate }
   \item{mu}{ mu vector }
-  \item{rooti}{ inv of Upper Triangular Cholesky root of Sigma }
+  \item{rooti}{ inv of Upper Triangular Cholesky root of \eqn{\Sigma} }
 }
 \details{
   \eqn{z} \eqn{\sim}{~} \eqn{N(mu,\Sigma)}
diff --git a/man/lndMvst.Rd b/man/lndMvst.Rd
old mode 100755
new mode 100644
index c9a6166..a588c04
--- a/man/lndMvst.Rd
+++ b/man/lndMvst.Rd
@@ -16,8 +16,8 @@ lndMvst(x, nu, mu, rooti,NORMC)
   \item{x}{ density ordinate }
   \item{nu}{ d.f. parameter }
   \item{mu}{ mu vector }
-  \item{rooti}{ inv of Cholesky root of Sigma }
-  \item{NORMC}{ include normalizing constant, def: FALSE }
+  \item{rooti}{ inv of Cholesky root of \eqn{\Sigma} }
+  \item{NORMC}{ include normalizing constant (def: FALSE) }
 }
 
 \details{
diff --git a/man/logMargDenNR.Rd b/man/logMargDenNR.Rd
index 8ba48cf..7d499a4 100755
--- a/man/logMargDenNR.Rd
+++ b/man/logMargDenNR.Rd
@@ -1,35 +1,35 @@
-\name{logMargDenNR}
-\alias{logMargDenNR}
-\concept{Newton-Raftery approximation}
-\concept{bayes}
-\concept{marginal likelihood}
-\concept{density}
-
-\title{ Compute Log Marginal Density Using Newton-Raftery Approx }
-\description{
-  \code{logMargDenNR} computes log marginal density using the Newton-Raftery approximation.\cr
-  Note: this approximation can be influenced by outliers in the vector of log-likelihoods. Use 
-  with \strong{care} . 
-}
-\usage{
-logMargDenNR(ll)
-}
-\arguments{
-  \item{ll}{ vector of log-likelihoods evaluated at length(ll) MCMC draws }
-}
-\value{
-  approximation to log marginal density value.
-}
-\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
-  by Rossi, Allenby and McCulloch, Chapter 6. \cr
-  \url{http://www.perossi.org/home/bsm-1l}
-}
-\author{ Peter Rossi, Anderson School, UCLA,
-  \email{perossichi at gmail.com}.
-}
-\section{Warning}{
-  This routine is a utility routine that does \strong{not} check the
-  input arguments for proper dimensions and type.
-}
-
-\keyword{ distribution }
+\name{logMargDenNR}
+\alias{logMargDenNR}
+\concept{Newton-Raftery approximation}
+\concept{bayes}
+\concept{marginal likelihood}
+\concept{density}
+
+\title{ Compute Log Marginal Density Using Newton-Raftery Approx }
+\description{
+  \code{logMargDenNR} computes log marginal density using the Newton-Raftery approximation.\cr
+  Note: this approximation can be influenced by outliers in the vector of log-likelihoods. Use 
+  with \strong{care} . 
+}
+\usage{
+logMargDenNR(ll)
+}
+\arguments{
+  \item{ll}{ vector of log-likelihoods evaluated at length(ll) MCMC draws }
+}
+\value{
+  approximation to log marginal density value.
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Rossi, Allenby and McCulloch, Chapter 6. \cr
+  \url{http://www.perossi.org/home/bsm-1}
+}
+\author{ Peter Rossi, Anderson School, UCLA,
+  \email{perossichi at gmail.com}.
+}
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\keyword{ distribution }
diff --git a/man/margarine.Rd b/man/margarine.Rd
old mode 100755
new mode 100644
index a82b519..e1441d9
--- a/man/margarine.Rd
+++ b/man/margarine.Rd
@@ -11,40 +11,40 @@
   This is an R object that is a list of two data frames, list(choicePrice,demos)
 
  List of 2 \cr
- \$ choicePrice:`data.frame':	4470 obs. of  12 variables:\cr
-  \ldots \$ hhid    : int [1:4470] 2100016 2100016 2100016 2100016 \cr
-  \ldots \$ choice  : num [1:4470] 1 1 1 1 1 4 1 1 4 1 \cr
-  \ldots \$ PPk\_Stk : num [1:4470] 0.66 0.63 0.29 0.62 0.5 0.58 0.29  \cr
-  \ldots \$ PBB\_Stk : num [1:4470] 0.67 0.67 0.5 0.61 0.58 0.45 0.51  \cr
-  \ldots \$ PFl\_Stk : num [1:4470] 1.09 0.99 0.99 0.99 0.99 0.99 0.99 \cr
-  \ldots \$ PHse\_Stk: num [1:4470] 0.57 0.57 0.57 0.57 0.45 0.45 0.29 \cr
-  \ldots \$ PGen\_Stk: num [1:4470] 0.36 0.36 0.36 0.36 0.33 0.33 0.33 \cr
-  \ldots \$ PImp\_Stk: num [1:4470] 0.93 1.03 0.69 0.75 0.72 0.72 0.72 \cr
-  \ldots \$ PSS\_Tub : num [1:4470] 0.85 0.85 0.79 0.85 0.85 0.85 0.85 \cr
-  \ldots \$ PPk\_Tub : num [1:4470] 1.09 1.09 1.09 1.09 1.07 1.07 1.07 \cr
-  \ldots \$ PFl\_Tub : num [1:4470] 1.19 1.19 1.19 1.19 1.19 1.19 1.19 \cr
-  \ldots \$ PHse\_Tub: num [1:4470] 0.33 0.37 0.59 0.59 0.59 0.59 0.59 \cr
+ $ choicePrice:`data.frame':	4470 obs. of  12 variables:\cr
+  \ldots $ hhid    : int [1:4470] 2100016 2100016 2100016 2100016 \cr
+  \ldots $ choice  : num [1:4470] 1 1 1 1 1 4 1 1 4 1 \cr
+  \ldots $ PPk_Stk : num [1:4470] 0.66 0.63 0.29 0.62 0.5 0.58 0.29  \cr
+  \ldots $ PBB_Stk : num [1:4470] 0.67 0.67 0.5 0.61 0.58 0.45 0.51  \cr
+  \ldots $ PFl_Stk : num [1:4470] 1.09 0.99 0.99 0.99 0.99 0.99 0.99 \cr
+  \ldots $ PHse_Stk: num [1:4470] 0.57 0.57 0.57 0.57 0.45 0.45 0.29 \cr
+  \ldots $ PGen_Stk: num [1:4470] 0.36 0.36 0.36 0.36 0.33 0.33 0.33 \cr
+  \ldots $ PImp_Stk: num [1:4470] 0.93 1.03 0.69 0.75 0.72 0.72 0.72 \cr
+  \ldots $ PSS_Tub : num [1:4470] 0.85 0.85 0.79 0.85 0.85 0.85 0.85 \cr
+  \ldots $ PPk_Tub : num [1:4470] 1.09 1.09 1.09 1.09 1.07 1.07 1.07 \cr
+  \ldots $ PFl_Tub : num [1:4470] 1.19 1.19 1.19 1.19 1.19 1.19 1.19 \cr
+  \ldots $ PHse_Tub: num [1:4470] 0.33 0.37 0.59 0.59 0.59 0.59 0.59 \cr
 
   Pk is Parkay; BB is BlueBonnett, Fl is Fleischmanns, Hse is house,
-  Gen is generic, Imp is Imperial, SS is Shed Spread.  \_Stk indicates 
-  stick, \_Tub indicates Tub form.
+  Gen is generic, Imp is Imperial, SS is Shed Spread.  _Stk indicates 
+  stick, _Tub indicates Tub form.
 
- \$ demos      :`data.frame':	516 obs. of  8 variables:\cr
-  \ldots \$ hhid     : num [1:516] 2100016 2100024 2100495 2100560 \cr
-  \ldots \$ Income   : num [1:516] 32.5 17.5 37.5 17.5 87.5 12.5 \cr
-  \ldots \$ Fs3\_4    : int [1:516] 0 1 0 0 0 0 0 0 0 0 \cr
-  \ldots \$ Fs5      : int [1:516] 0 0 0 0 0 0 0 0 1 0 \cr
-  \ldots \$ Fam\_Size : int [1:516] 2 3 2 1 1 2 2 2 5 2 \cr
-  \ldots \$ college  : int [1:516] 1 1 0 0 1 0 1 0 1 1 \cr
-  \ldots \$ whtcollar: int [1:516] 0 1 0 1 1 0 0 0 1 1 \cr
-  \ldots \$ retired  : int [1:516] 1 1 1 0 0 1 0 1 0 0 \cr
+ $ demos      :`data.frame':	516 obs. of  8 variables:\cr
+  \ldots $ hhid     : num [1:516] 2100016 2100024 2100495 2100560 \cr
+  \ldots $ Income   : num [1:516] 32.5 17.5 37.5 17.5 87.5 12.5 \cr
+  \ldots $ Fs3_4    : int [1:516] 0 1 0 0 0 0 0 0 0 0 \cr
+  \ldots $ Fs5      : int [1:516] 0 0 0 0 0 0 0 0 1 0 \cr
+  \ldots $ Fam_Size : int [1:516] 2 3 2 1 1 2 2 2 5 2 \cr
+  \ldots $ college  : int [1:516] 1 1 0 0 1 0 1 0 1 1 \cr
+  \ldots $ whtcollar: int [1:516] 0 1 0 1 1 0 0 0 1 1 \cr
+  \ldots $ retired  : int [1:516] 1 1 1 0 0 1 0 1 0 0 \cr
 
-  Fs3\_4 is dummy (family size 3-4). Fs5 is dummy for family size >= 5.
+  Fs3_4 is dummy (family size 3-4). Fs5 is dummy for family size >= 5.
   college,whtcollar,retired are dummies reflecting these statuses.
 }
 \details{
   choice is a multinomial indicator of one of the 10 brands (in order listed under format). 
-  All prices are in \$.
+  All prices are in $.
 }
 \source{
   Allenby and Rossi (1991), "Quality Perceptions and Asymmetric Switching Between Brands," 
diff --git a/man/mixDen.Rd b/man/mixDen.Rd
old mode 100755
new mode 100644
index e4bc664..4ae7d45
--- a/man/mixDen.Rd
+++ b/man/mixDen.Rd
@@ -19,8 +19,8 @@ mixDen(x, pvec, comps)
 }
 \details{
   length(comps) is the number of mixture components.  comps[[j]] is a list of
-  parameters of the jth component. comps[[j]]\$mu is mean vector; comps[[j]]\$rooti
-  is the UL decomp of \eqn{Sigma^{-1}}.
+  parameters of the jth component. comps[[j]]$mu is mean vector; comps[[j]]$rooti
+  is the UL decomp of \eqn{\Sigma^{-1}}.
 }
 
 \value{
diff --git a/man/mixDenBi.Rd b/man/mixDenBi.Rd
old mode 100755
new mode 100644
index 8c8f821..46d0230
--- a/man/mixDenBi.Rd
+++ b/man/mixDenBi.Rd
@@ -22,8 +22,8 @@ mixDenBi(i, j, xi, xj, pvec, comps)
 }
 \details{
   length(comps) is the number of mixture components.  comps[[j]] is a list of
-  parameters of the jth component. comps[[j]]\$mu is mean vector; comps[[j]]\$rooti
-  is the UL decomp of \eqn{Sigma^{-1}}.
+  parameters of the jth component. comps[[j]]$mu is mean vector; comps[[j]]$rooti
+  is the UL decomp of \eqn{\Sigma^{-1}}.
 }
 
 \value{
diff --git a/man/mnlHess.Rd b/man/mnlHess.Rd
old mode 100755
new mode 100644
index 6c1bb3a..e3d616b
--- a/man/mnlHess.Rd
+++ b/man/mnlHess.Rd
@@ -1,44 +1,44 @@
-\name{mnlHess}
-\alias{mnlHess}
-\concept{multinomial logit}
-\concept{hessian}
-
-
-\title{ Computes -Expected Hessian for Multinomial Logit}
-\description{
-  \code{mnlHess} computes -Expected[Hessian] for Multinomial Logit Model
-}
-\usage{
-mnlHess(beta,y, X)
-}
-\arguments{
-  \item{beta}{ k x 1 vector of coefficients }
-  \item{y}{ n x 1 vector of choices, (1, \ldots,p) }
-  \item{X}{ n*p x k Design matrix }
-}
-\details{
-  See \code{\link{llmnl}} for information on structure of X array.  Use \code{\link{createX}} to make X.
-}
-\value{
-  k x k matrix
-}
-\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
-  by Rossi, Allenby and McCulloch, Chapter 3. \cr
-  \url{http://www.perossi.org/home/bsm-1l}
-}
-
-\author{ Peter Rossi, Anderson School, UCLA,
-  \email{perossichi at gmail.com}.
-}
-
-\section{Warning}{
-  This routine is a utility routine that does \strong{not} check the
-  input arguments for proper dimensions and type.
-}
-
-\seealso{ \code{\link{llmnl}}, \code{\link{createX}}, \code{\link{rmnlIndepMetrop}} }
-\examples{
-##
-\dontrun{mnlHess(beta,y,X)}
-}
-\keyword{ models }
+\name{mnlHess}
+\alias{mnlHess}
+\concept{multinomial logit}
+\concept{hessian}
+
+
+\title{ Computes -Expected Hessian for Multinomial Logit}
+\description{
+  \code{mnlHess} computes -Expected[Hessian] for Multinomial Logit Model
+}
+\usage{
+mnlHess(beta,y,X)
+}
+\arguments{
+  \item{beta}{ k x 1 vector of coefficients }
+  \item{y}{ n x 1 vector of choices, (1, \ldots,p) }
+  \item{X}{ n*p x k Design matrix }
+}
+\details{
+  See \code{\link{llmnl}} for information on structure of X array.  Use \code{\link{createX}} to make X.
+}
+\value{
+  k x k matrix
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Rossi, Allenby and McCulloch, Chapter 3. \cr
+  \url{http://www.perossi.org/home/bsm-1}
+}
+
+\author{ Peter Rossi, Anderson School, UCLA,
+  \email{perossichi at gmail.com}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{llmnl}}, \code{\link{createX}}, \code{\link{rmnlIndepMetrop}} }
+\examples{
+##
+\dontrun{mnlHess(beta,y,X)}
+}
+\keyword{ models }
diff --git a/man/momMix.Rd b/man/momMix.Rd
old mode 100755
new mode 100644
index fdb80c9..737cf3e
--- a/man/momMix.Rd
+++ b/man/momMix.Rd
@@ -22,7 +22,7 @@ momMix(probdraw, compdraw)
   compdraw is a list of lists of lists with mixture components.  \cr
   compdraw[[i]] is ith draw. \cr
   compdraw[[i]][[j]][[1]] is the mean parameter vector for the jth component, ith MCMC draw. \cr
-  compdraw[[i]][[j]][[2]] is the UL decomposition of \eqn{Sigma^{-1}} for the jth component, ith MCMC draw. 
+  compdraw[[i]][[j]][[2]] is the UL decomposition of \eqn{\Sigma^{-1}} for the jth component, ith MCMC draw. 
  
 }
 \value{
diff --git a/man/orangeJuice.Rd b/man/orangeJuice.Rd
old mode 100755
new mode 100644
index 3bade1e..4bb3a01
--- a/man/orangeJuice.Rd
+++ b/man/orangeJuice.Rd
@@ -11,45 +11,45 @@
  This R object is a list of two data frames, list(yx,storedemo).\cr
 
  List of 2 \cr
- \$ yx       :'data.frame':	106139 obs. of  19 variables:\cr
-  \ldots \$ store   : int [1:106139] 2 2 2 2 2 2 2 2 2 2 \cr
-  \ldots \$ brand   : int [1:106139] 1 1 1 1 1 1 1 1 1 1 \cr
-  \ldots \$ week    : int [1:106139] 40 46 47 48 50 51 52 53 54 57 \cr
-  \ldots \$ logmove : num [1:106139] 9.02 8.72 8.25 8.99 9.09 \cr
-  \ldots \$ constant: int [1:106139] 1 1 1 1 1 1 1 1 1 1 \cr
-  \ldots \$ price1  : num [1:106139] 0.0605 0.0605 0.0605 0.0605 0.0605 \cr
-  \ldots \$ price2  : num [1:106139] 0.0605 0.0603 0.0603 0.0603 0.0603 \cr
-  \ldots \$ price3  : num [1:106139] 0.0420 0.0452 0.0452 0.0498 0.0436 \cr
-  \ldots \$ price4  : num [1:106139] 0.0295 0.0467 0.0467 0.0373 0.0311 \cr
-  \ldots \$ price5  : num [1:106139] 0.0495 0.0495 0.0373 0.0495 0.0495 \cr
-  \ldots \$ price6  : num [1:106139] 0.0530 0.0478 0.0530 0.0530 0.0530 \cr
-  \ldots \$ price7  : num [1:106139] 0.0389 0.0458 0.0458 0.0458 0.0466 \cr
-  \ldots \$ price8  : num [1:106139] 0.0414 0.0280 0.0414 0.0414 0.0414 \cr
-  \ldots \$ price9  : num [1:106139] 0.0289 0.0430 0.0481 0.0423 0.0423 \cr
-  \ldots \$ price10 : num [1:106139] 0.0248 0.0420 0.0327 0.0327 0.0327 \cr
-  \ldots \$ price11 : num [1:106139] 0.0390 0.0390 0.0390 0.0390 0.0382 \cr
-  \ldots \$ deal    : int [1:106139] 1 0 0 0 0 0 1 1 1 1 \cr
-  \ldots \$ feat    : num [1:106139] 0 0 0 0 0 0 0 0 0 0 \cr
-  \ldots \$ profit  : num [1:106139] 38.0 30.1 30.0 29.9 29.9 \cr
+ $ yx       :'data.frame':	106139 obs. of  19 variables:\cr
+  \ldots $ store   : int [1:106139] 2 2 2 2 2 2 2 2 2 2 \cr
+  \ldots $ brand   : int [1:106139] 1 1 1 1 1 1 1 1 1 1 \cr
+  \ldots $ week    : int [1:106139] 40 46 47 48 50 51 52 53 54 57 \cr
+  \ldots $ logmove : num [1:106139] 9.02 8.72 8.25 8.99 9.09 \cr
+  \ldots $ constant: int [1:106139] 1 1 1 1 1 1 1 1 1 1 \cr
+  \ldots $ price1  : num [1:106139] 0.0605 0.0605 0.0605 0.0605 0.0605 \cr
+  \ldots $ price2  : num [1:106139] 0.0605 0.0603 0.0603 0.0603 0.0603 \cr
+  \ldots $ price3  : num [1:106139] 0.0420 0.0452 0.0452 0.0498 0.0436 \cr
+  \ldots $ price4  : num [1:106139] 0.0295 0.0467 0.0467 0.0373 0.0311 \cr
+  \ldots $ price5  : num [1:106139] 0.0495 0.0495 0.0373 0.0495 0.0495 \cr
+  \ldots $ price6  : num [1:106139] 0.0530 0.0478 0.0530 0.0530 0.0530 \cr
+  \ldots $ price7  : num [1:106139] 0.0389 0.0458 0.0458 0.0458 0.0466 \cr
+  \ldots $ price8  : num [1:106139] 0.0414 0.0280 0.0414 0.0414 0.0414 \cr
+  \ldots $ price9  : num [1:106139] 0.0289 0.0430 0.0481 0.0423 0.0423 \cr
+  \ldots $ price10 : num [1:106139] 0.0248 0.0420 0.0327 0.0327 0.0327 \cr
+  \ldots $ price11 : num [1:106139] 0.0390 0.0390 0.0390 0.0390 0.0382 \cr
+  \ldots $ deal    : int [1:106139] 1 0 0 0 0 0 1 1 1 1 \cr
+  \ldots $ feat    : num [1:106139] 0 0 0 0 0 0 0 0 0 0 \cr
+  \ldots $ profit  : num [1:106139] 38.0 30.1 30.0 29.9 29.9 \cr
 
      1 Tropicana Premium 64 oz;   2 Tropicana Premium 96 oz;  3 Florida's Natural 64 oz; \cr   
      4 Tropicana 64 oz;           5 Minute Maid 64 oz;        6 Minute Maid 96 oz; \cr
      7 Citrus Hill 64 oz;         8 Tree Fresh 64 oz;         9 Florida Gold 64 oz; \cr       
      10 Dominicks 64 oz;          11 Dominicks 128 oz.  \cr
 
- \$ storedemo:'data.frame':	83 obs. of  12 variables:\cr
-  \ldots \$ STORE   : int [1:83] 2 5 8 9 12 14 18 21 28 32 \cr
-  \ldots \$ AGE60   : num [1:83] 0.233 0.117 0.252 0.269 0.178 \cr
-  \ldots \$ EDUC    : num [1:83] 0.2489 0.3212 0.0952 0.2222 0.2534 \cr
-  \ldots \$ ETHNIC  : num [1:83] 0.1143 0.0539 0.0352 0.0326 0.3807 \cr
-  \ldots \$ INCOME  : num [1:83] 10.6 10.9 10.6 10.8 10.0 \cr
-  \ldots \$ HHLARGE : num [1:83] 0.1040 0.1031 0.1317 0.0968 0.0572 \cr
-  \ldots \$ WORKWOM : num [1:83] 0.304 0.411 0.283 0.359 0.391 \cr
-  \ldots \$ HVAL150 : num [1:83] 0.4639 0.5359 0.0542 0.5057 0.3866 \cr
-  \ldots \$ SSTRDIST: num [1:83] 2.11 3.80 2.64 1.10 9.20 \cr
-  \ldots \$ SSTRVOL : num [1:83] 1.143 0.682 1.500 0.667 1.111 \cr
-  \ldots \$ CPDIST5 : num [1:83] 1.93 1.60 2.91 1.82 0.84 \cr
-  \ldots \$ CPWVOL5 : num [1:83] 0.377 0.736 0.641 0.441 0.106 \cr
+ $ storedemo:'data.frame':	83 obs. of  12 variables:\cr
+  \ldots $ STORE   : int [1:83] 2 5 8 9 12 14 18 21 28 32 \cr
+  \ldots $ AGE60   : num [1:83] 0.233 0.117 0.252 0.269 0.178 \cr
+  \ldots $ EDUC    : num [1:83] 0.2489 0.3212 0.0952 0.2222 0.2534 \cr
+  \ldots $ ETHNIC  : num [1:83] 0.1143 0.0539 0.0352 0.0326 0.3807 \cr
+  \ldots $ INCOME  : num [1:83] 10.6 10.9 10.6 10.8 10.0 \cr
+  \ldots $ HHLARGE : num [1:83] 0.1040 0.1031 0.1317 0.0968 0.0572 \cr
+  \ldots $ WORKWOM : num [1:83] 0.304 0.411 0.283 0.359 0.391 \cr
+  \ldots $ HVAL150 : num [1:83] 0.4639 0.5359 0.0542 0.5057 0.3866 \cr
+  \ldots $ SSTRDIST: num [1:83] 2.11 3.80 2.64 1.10 9.20 \cr
+  \ldots $ SSTRVOL : num [1:83] 1.143 0.682 1.500 0.667 1.111 \cr
+  \ldots $ CPDIST5 : num [1:83] 1.93 1.60 2.91 1.82 0.84 \cr
+  \ldots $ CPWVOL5 : num [1:83] 0.377 0.736 0.641 0.441 0.106 \cr
 }
 \details{
   \describe{
@@ -68,7 +68,7 @@
     \item{\code{INCOME}}{median income}
     \item{\code{HHLARGE}}{percentage of households with 5 or more persons}
     \item{\code{WORKWOM}}{percentage of women with full-time jobs}
-    \item{\code{HVAL150}}{percentage of households worth more than \$150,000}
+    \item{\code{HVAL150}}{percentage of households worth more than $150,000}
     \item{\code{SSTRDIST}}{distance to the nearest warehouse store}
     \item{\code{SSTRVOL}}{ratio of sales of this store to the nearest warehouse store}
     \item{\code{CPDIST5}}{average distance in miles to the nearest 5 supermarkets}
diff --git a/man/plot.bayesm.hcoef.Rd b/man/plot.bayesm.hcoef.Rd
old mode 100755
new mode 100644
index cf9b202..c20735d
--- a/man/plot.bayesm.hcoef.Rd
+++ b/man/plot.bayesm.hcoef.Rd
@@ -15,7 +15,7 @@
 \arguments{
   \item{x}{ An object of S3 class, bayesm.hcoef }
   \item{names}{ a list of names for the variables in the hierarchical model}
-  \item{burnin}{ no draws to burnin, def: .1*R }
+  \item{burnin}{ no draws to burnin (def: .1*R)}
   \item{...}{ standard graphics parameters }
 }
 \details{
diff --git a/man/plot.bayesm.mat.Rd b/man/plot.bayesm.mat.Rd
old mode 100755
new mode 100644
index d158f51..d1cef39
--- a/man/plot.bayesm.mat.Rd
+++ b/man/plot.bayesm.mat.Rd
@@ -14,12 +14,12 @@
 \arguments{
   \item{x}{ An object of either S3 class, bayesm.mat, or S3 class, mcmc }
   \item{names}{optional character vector of names for coefficients}
-  \item{burnin}{number of draws to discard for burn-in, def: .1*nrow(X)}
+  \item{burnin}{number of draws to discard for burn-in (def: .1*nrow(X))}
   \item{tvalues}{vector of true values}
-  \item{TRACEPLOT}{ logical, TRUE provide sequence plots of draws and acfs, def: TRUE }
-  \item{DEN}{ logical, TRUE use density scale on histograms, def: TRUE }
-  \item{INT}{ logical, TRUE put various intervals and points on graph, def: TRUE }
-  \item{CHECK_NDRAWS}{ logical, TRUE check that there are at least 100 draws, def: TRUE }
+  \item{TRACEPLOT}{ logical, TRUE provide sequence plots of draws and acfs (def: TRUE)}
+  \item{DEN}{ logical, TRUE use density scale on histograms (def: TRUE)}
+  \item{INT}{ logical, TRUE put various intervals and points on graph (def: TRUE)}
+  \item{CHECK_NDRAWS}{ logical, TRUE check that there are at least 100 draws (def: TRUE)}
   \item{...}{ standard graphics parameters }
 }
 \details{
diff --git a/man/plot.bayesm.nmix.Rd b/man/plot.bayesm.nmix.Rd
old mode 100755
new mode 100644
index 151d99a..a9eb459
--- a/man/plot.bayesm.nmix.Rd
+++ b/man/plot.bayesm.nmix.Rd
@@ -15,15 +15,15 @@
 \arguments{
   \item{x}{ An object of  S3 class bayesm.nmix }
   \item{names}{optional character vector of names for each of the dimensions}
-  \item{burnin}{number of draws to discard for burn-in, def: .1*nrow(X)}
+  \item{burnin}{number of draws to discard for burn-in (def: .1*nrow(X))}
   \item{Grid}{matrix of grid points for densities, def: mean +/- nstd std deviations (if Data no supplied), 
                range of Data if supplied)}
-  \item{bi.sel}{list of vectors, each giving pairs for bivariate distributions, def: list(c(1,2))}
-  \item{nstd}{number of standard deviations for default Grid, def: 2}
-  \item{marg}{logical, if TRUE display marginals, def: TRUE}
+  \item{bi.sel}{list of vectors, each giving pairs for bivariate distributions (def: list(c(1,2)))}
+  \item{nstd}{number of standard deviations for default Grid (def: 2)}
+  \item{marg}{logical, if TRUE display marginals (def: TRUE)}
   \item{Data}{matrix of data points, used to paint histograms on marginals and for grid  }
-  \item{ngrid}{number of grid points for density estimates, def:50}
-  \item{ndraw}{number of draws to average Mcmc estimates over, def:200}
+  \item{ngrid}{number of grid points for density estimates (def: 50)}
+  \item{ndraw}{number of draws to average Mcmc estimates over (def: 200)}
   \item{...}{ standard graphics parameters }
 }
 \details{
diff --git a/man/rDPGibbs.Rd b/man/rDPGibbs.Rd
old mode 100755
new mode 100644
index 2c59ebd..93ec339
--- a/man/rDPGibbs.Rd
+++ b/man/rDPGibbs.Rd
@@ -18,32 +18,32 @@ rDPGibbs(Prior, Data, Mcmc)
 }
 
 \arguments{
-  \item{Prior}{ list(Prioralpha,lambda\_hyper) }
+  \item{Prior}{ list(Prioralpha,lambda_hyper) }
   \item{Data}{ list(y) }
-  \item{Mcmc}{ list(R,keep,maxuniq,SCALE,gridsize) }
+  \item{Mcmc}{ list(R,keep,nprint,maxuniq,SCALE,gridsize) }
 }
 
 \details{
 
 Model: \cr
-        \eqn{y_i} \eqn{\sim}{~} \eqn{N(mu_i,Sigma_i)}. \cr
+        \eqn{y_i} \eqn{\sim}{~} \eqn{N(\mu_i,\Sigma_i)}. \cr
 
 Priors:\cr
-        \eqn{theta_i=(mu_i,Sigma_i)} \eqn{\sim}{~} \eqn{DP(G_0(lambda),alpha)}\cr
-        \eqn{G_0(lambda):}\cr
-        \eqn{mu_i | Sigma_i} \eqn{\sim}{~} \eqn{N(0,Sigma_i (x) a^{-1})}\cr
-        \eqn{Sigma_i} \eqn{\sim}{~} \eqn{IW(nu,nu*v*I)}
+        \eqn{\theta_i=(\mu_i,\Sigma_i)} \eqn{\sim}{~} \eqn{DP(G_0(\lambda),alpha)}\cr
+        \eqn{G_0(\lambda):}\cr
+        \eqn{\mu_i | \Sigma_i} \eqn{\sim}{~} \eqn{N(0,\Sigma_i (x) a^{-1})}\cr
+        \eqn{\Sigma_i} \eqn{\sim}{~} \eqn{IW(nu,nu*v*I)}
         
-        \eqn{lambda(a,nu,v):}\cr
+        \eqn{\lambda(a,nu,v):}\cr
         \eqn{a} \eqn{\sim}{~} uniform on grid[alim[1],alimb[2]]\cr
         \eqn{nu} \eqn{\sim}{~} uniform on grid[dim(data)-1 + exp(nulim[1]),dim(data)-1 +exp(nulim[2])]\cr
         \eqn{v} \eqn{\sim}{~} uniform on grid[vlim[1],vlim[2]]
        
-        \eqn{alpha} \eqn{\sim}{~} \eqn{(1-(alpha-alphamin)/(alphamax-alphamin))^power} \cr
-        alpha= alphamin then expected number of components = Istarmin \cr
-        alpha= alphamax then expected number of components = Istarmax \cr
+        \eqn{alpha} \eqn{\sim}{~} \eqn{(1-(\alpha-alphamin)/(alphamax-alphamin))^{power}} \cr
+        \eqn{alpha}= alphamin then expected number of components = Istarmin \cr
+        \eqn{alpha}= alphamax then expected number of components = Istarmax \cr
 
-list arguments
+List arguments contain:
 
 Data:\cr
   \itemize{
@@ -52,46 +52,46 @@ Data:\cr
 
 Prioralpha:\cr
  \itemize{
-  \item{\code{Istarmin}}{expected number of components at lower bound of support of alpha}
-  \item{\code{Istarmax}}{expected number of components at upper bound of support of alpha}
-  \item{\code{power}}{power parameter for alpha prior}
+  \item{\code{Istarmin}}{ expected number of components at lower bound of support of alpha (def: 1)}
+  \item{\code{Istarmax}}{ expected number of components at upper bound of support of alpha}
+  \item{\code{power}}{ power parameter for alpha prior (def: .8)}
   }
  
-lambda\_hyper:\cr
+lambda_hyper:\cr
   \itemize{
-   \item{\code{alim}}{defines support of a distribution,def:c(.01,10) }
-   \item{\code{nulim}}{defines support of nu distribution, def:c(.01,3)} 
-   \item{\code{vlim}}{defines support of v distribution, def:c(.1,4)} 
+   \item{\code{alim}}{ defines support of a distribution (def: (.01,10))}
+   \item{\code{nulim}}{ defines support of nu distribution (def: (.01,3))} 
+   \item{\code{vlim}}{ defines support of v distribution (def: (.1,4))} 
   }
 Mcmc:\cr
  \itemize{
-   \item{\code{R}}{number of mcmc draws}
-   \item{\code{keep}}{thinning parm, keep every keepth draw}
-   \item{\code{maxuniq}}{storage constraint on the number of unique components}
-   \item{\code{SCALE}}{should data be scaled by mean,std deviation before posterior draws, def: TRUE}
-   \item{\code{gridsize}}{number of discrete points for hyperparameter priors,def: 20}
+   \item{\code{R}}{ number of mcmc draws}
+   \item{\code{keep}}{ thinning parm, keep every keepth draw}
+   \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
+   \item{\code{maxuniq}}{ storage constraint on the number of unique components (def: 200)}
+   \item{\code{SCALE}}{ should data be scaled by mean,std deviation before posterior draws, (def: TRUE)}
+   \item{\code{gridsize}}{ number of discrete points for hyperparameter priors,def: 20}
   }
 
-output:\cr
 the basic output are draws from the predictive distribution of the data in the object, \code{nmix}. 
 The average of these draws is the Bayesian analogue of a density estimate.
 
 nmix:\cr
   \itemize{
-   \item{\code{probdraw}}{R/keep x 1 matrix of 1s}
-   \item{\code{zdraw}}{R/keep x N matrix of draws of indicators of which component each obs is assigned to}
-   \item{\code{compdraw}}{R/keep list of draws of normals}
+   \item{\code{probdraw}}{ R/keep x 1 matrix of 1s}
+   \item{\code{zdraw}}{ R/keep x N matrix of draws of indicators of which component each obs is assigned to}
+   \item{\code{compdraw}}{ R/keep list of draws of normals}
   }
   Output of the components is in the form of a list of lists. \cr
   compdraw[[i]] is ith draw -- list of lists. \cr
   compdraw[[i]][[1]] is list of parms for a draw from predictive. \cr
   compdraw[[i]][1]][[1]] is the mean vector. compdraw[[i]][[1]][[2]] is the inverse of Cholesky root.
-  \eqn{Sigma} = t(R)\%*\%R, \eqn{R^{-1}} = compdraw[[i]][[1]][[2]].
+  \eqn{\Sigma} = t(R)\%*\%R, \eqn{R^{-1}} = compdraw[[i]][[1]][[2]].
 }
 
 
 \note{
-    we parameterize the prior on \eqn{Sigma_i} such that \eqn{mode(Sigma)= nu/(nu+2) vI}.
+    we parameterize the prior on \eqn{\Sigma_i} such that \eqn{mode(\Sigma)= nu/(nu+2) vI}.
     The support of nu enforces valid IW density; \eqn{nulim[1] > 0}
 
     We use the structure for \code{nmix} that is compatible with the \code{bayesm} routines for finite mixtures of normals.
@@ -115,11 +115,11 @@ nmix:\cr
 
 
 \value{
- \item{nmix}{a list containing: probdraw,zdraw,compdraw}
- \item{alphadraw}{vector of draws of DP process tightness parameter}
- \item{nudraw}{vector of draws of base prior hyperparameter}
- \item{adraw}{vector of draws of base prior hyperparameter}
- \item{vdraw}{vector of draws of base prior hyperparameter}
+ \item{nmix}{ a list containing: probdraw,zdraw,compdraw}
+ \item{alphadraw}{ vector of draws of DP process tightness parameter}
+ \item{nudraw}{ vector of draws of base prior hyperparameter}
+ \item{adraw}{ vector of draws of base prior hyperparameter}
+ \item{vdraw}{ vector of draws of base prior hyperparameter}
 }
 
 \author{ Peter Rossi, Anderson School, UCLA,
@@ -154,9 +154,11 @@ rgi=c(0,20); grid=matrix(seq(from=rgi[1],to=rgi[2],length.out=50),ncol=1)
 deltax=(rgi[2]-rgi[1])/nrow(grid)
 plot(out1$nmix,Grid=grid,Data=y1)
 ## plot true density with historgram
-plot(range(grid[,1]),1.5*range(dchisq(grid[,1],df=chisqdf)),type="n",xlab=paste("Chisq ; ",N," obs",sep=""), ylab="")
+plot(range(grid[,1]),1.5*range(dchisq(grid[,1],df=chisqdf)),
+  type="n",xlab=paste("Chisq ; ",N," obs",sep=""), ylab="")
 hist(y1,xlim=rgi,freq=FALSE,col="yellow",breaks=20,add=TRUE)
-lines(grid[,1],dchisq(grid[,1],df=chisqdf)/(sum(dchisq(grid[,1],df=chisqdf))*deltax),col="blue",lwd=2)
+lines(grid[,1],dchisq(grid[,1],df=chisqdf)/
+  (sum(dchisq(grid[,1],df=chisqdf))*deltax),col="blue",lwd=2)
 }
 
 
@@ -199,7 +201,8 @@ plot(out2$nmix,Grid=grid,Data=y2)
 ## plot true bivariate density
 tden=matrix(double(50*50),ncol=50)
 for (i in 1:50){ for (j in 1:50) 
-      {tden[i,j]=exp(-0.5*(A*(x1[i]^2)*(x2[j]^2)+(x1[i]^2)+(x2[j]^2)-2*B*x1[i]*x2[j]-2*C1*x1[i]-2*C2*x2[j]))}
+      {tden[i,j]=exp(-0.5*(A*(x1[i]^2)*(x2[j]^2)+
+      (x1[i]^2)+(x2[j]^2)-2*B*x1[i]*x2[j]-2*C1*x1[i]-2*C2*x2[j]))}
 }
 tden=tden/sum(tden)
 image(x1,x2,tden,col=terrain.colors(100),xlab="",ylab="")
diff --git a/man/rbayesBLP.Rd b/man/rbayesBLP.Rd
new file mode 100644
index 0000000..d084b4c
--- /dev/null
+++ b/man/rbayesBLP.Rd
@@ -0,0 +1,236 @@
+\name{rbayesBLP}
+\alias{rbayesBLP}
+\concept{bayes}
+\concept{random coefficient logit}
+\concept{BLP}
+\concept{Metropolis Hasting}
+
+\title{ Bayesian Analysis of Random Coefficient Logit Models Using Aggregate Data }
+\description{
+  \code{rbayesBLP} implements a hybrid MCMC algorithm for aggregate level sales data in a market with differentiated products. Version 3.0-1 contains an error for use of instruments with this function. This will be fixed in version 3.0-2.
+}
+\usage{
+rbayesBLP(Data, Prior, Mcmc)
+}
+\arguments{
+  \item{Data}{ list(X,share,J,Z) (X, share, and J: required). }
+  \item{Prior}{ list(sigmasqR,theta_hat,A,deltabar,Ad,nu0,s0_sq,VOmega) (optional).}
+  \item{Mcmc}{ list(R,H,initial_theta_bar,initial_r,initial_tau_sq,initial_Omega,initial_delta,s,cand_cov,tol,keep,nprint) (R and H: required).}
+}
+\details{
+  Model: \cr
+        \eqn{u_ijt = X_jt \theta_i + \eta_jt + e_ijt}\cr
+        \eqn{e_ijt} \eqn{\sim}{~} type I Extreme Value (logit)\cr
+        \eqn{\theta_i} \eqn{\sim}{~}  \eqn{N(\theta_bar, \Sigma)}\cr
+        \eqn{\eta_jt} \eqn{\sim}{~} \eqn{N(0, \tau_sq)}\cr 
+        This structure implies a logit model for each consumer (\eqn{\theta}). Aggregate shares \code{share} are produced by integrating this consumer level logit model over the assumed normal distribution of \eqn{\theta}. 
+
+ Priors:\cr
+        \eqn{r} \eqn{\sim}{~} \eqn{N(0,diag(sigmasqR))}.\cr
+        \eqn{\theta_bar} \eqn{\sim}{~} \eqn{N(\theta_hat,A^-1)}.\cr
+        \eqn{\tau_sq} \eqn{\sim}{~} \eqn{nu0*s0_sq / \chi^2 (nu0)}\cr
+        
+        Note: we observe the aggregate level market share, not individual level choice.\cr
+        
+        Note: \eqn{r} is the vector of nonzero elements of cholesky root of \eqn{\Sigma}. Instead of \eqn{\Sigma} we draw \eqn{r}, which is one-to-one correspondence with the positive-definite \eqn{\Sigma}.
+        
+ Model (with IV): \cr
+        \eqn{u_ijt = X_jt \theta_i + \eta_jt + e_ijt}\cr
+        \eqn{e_ijt} \eqn{\sim}{~} type I Extreme Value (logit)\cr
+        \eqn{\theta_i} \eqn{\sim}{~}  \eqn{N(\theta_bar, \Sigma)}\cr
+        
+        \eqn{X_jt = [X_exo_jt, X_endo_jt]}\cr
+        \eqn{X_endo_jt = Z_jt \delta_jt + \zeta_jt}\cr
+        \eqn{vec(\zeta_jt, \eta_jt)} \eqn{\sim}{~} \eqn{N(0, \Omega)}\cr
+
+ Priors (with IV):\cr
+        \eqn{r} \eqn{\sim}{~} \eqn{N(0,diag(sigmasqR))}.\cr
+        \eqn{\theta_bar} \eqn{\sim}{~} \eqn{N(\theta_hat,A^-1)}.\cr
+        \eqn{\delta} \eqn{\sim}{~} \eqn{N(deltabar,Ad^-1)}.\cr
+        \eqn{\Omega} \eqn{\sim}{~} \eqn{IW(nu0, VOmega)}\cr
+          
+ Step 1 (\eqn{\Sigma}):\cr
+        Given \eqn{\theta_bar} and \eqn{\tau_sq}, draw \eqn{r} via Metropolis-Hasting.\cr
+        Covert the drawn \eqn{r} to \eqn{\Sigma}.\cr
+        
+        Note: if user does not specify the Metropolis-Hasting increment parameters (\code{s} and \code{cand_cov}), \code{rbayesBLP} automatically tunes the parameters.
+        
+ Step 2 (\eqn{\theta_bar}, \eqn{\tau_sq}):\cr
+        Given \eqn{\Sigma}, draw \eqn{\theta_bar} and \eqn{\tau_sq} via Gibbs sampler.\cr
+        
+ Step 2 (with IV: \eqn{\theta_bar}, \eqn{\delta}, \eqn{\Omega}):\cr
+        Given \eqn{\Sigma}, draw \eqn{\theta_bar}, \eqn{\delta}, and \eqn{\Omega} via IV Gibbs sampler.\cr
+        
+  List arguments contain:\cr
+  
+  Data
+  \itemize{
+    \item{\code{J}}{ number of alternatives without outside option}
+    \item{\code{X}}{ J*T by K matrix (no outside option, which is normalized to 0). If IV is used, the last column is endogeneous variable.}
+    \item{\code{share}}{ J*T vector (no outside option)}
+    \item{\code{Z}}{ J*T by I matrix of instrumental variables (optional)}
+  }
+  Note: both the \code{share} vector and the \code{X} matrix are organized by the jt index. j varies faster than t, i.e. (j=1,t=1),(j=2,t=1), ..., (j=J,T=1), ..., (j=J,t=T)\cr
+  
+  Prior
+  \itemize{
+    \item{\code{sigmasqR}}{ K*(K+1)/2 vector for \eqn{r} prior variance (def: diffuse prior for \eqn{\Sigma})}    
+    \item{\code{theta_hat}}{ K vector for \eqn{\theta_bar} prior mean (def: 0 vector)}
+    \item{\code{A}}{ K by K matrix for \eqn{\theta_bar} prior precision (def: 0.01*diag(K))}
+    \item{\code{deltabar}}{ I vector for \eqn{\delta} prior mean (def: 0 vector)}
+    \item{\code{Ad}}{ I by I matrix for \eqn{\delta} prior precision (def: 0.01*diag(I))}
+    \item{\code{nu0}}{ d.f. parameter for \eqn{\tau_sq} and \eqn{\Omega} prior (def: K+1)}
+    \item{\code{s0_sq}}{ scale parameter for \eqn{\tau_sq} prior (def: 1)}
+    \item{\code{VOmega}}{ 2 by 2 matrix parameter for \eqn{\Omega} prior (def: matrix(c(1,0.5,0.5,1),2,2))}
+  }
+  Mcmc
+  \itemize{    
+    \item{\code{R}}{ number of MCMC draws}
+    \item{\code{H}}{ number of random draws used for Monte-Carlo integration}
+    \item{\code{initial_theta_bar}}{ initial value of \eqn{\theta_bar} (def: 0 vector)}
+    \item{\code{initial_r}}{ initial value of \eqn{r} (def: 0 vector)}
+    \item{\code{initial_tau_sq}}{ initial value of \eqn{\tau_sq} (def: 0.1)}
+    \item{\code{initial_Omega}}{ initial value of \eqn{\Omega} (def: diag(2))}
+    \item{\code{initial_delta}}{ initial value of \eqn{\delta} (def: 0 vector)}
+    \item{\code{s}}{ scale parameter of Metropolis-Hasting increment (def: automatically tuned)}
+    \item{\code{cand_cov}}{ var-cov matrix of Metropolis-Hasting increment (def: automatically tuned)}
+    \item{\code{tol}}{ convergence tolerance for the contraction mapping (def: 1e-6)}
+    \item{\code{keep}}{ MCMC thinning parameter: keep every keepth draw (def: 1)}
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}    
+   }
+   Tuning Metropolis-Hastings algorithm:\cr
+   
+   r_cand = r_old + s*N(0,cand_cov)\cr
+   Fix the candidate covariance matrix as cand_cov0 = diag(rep(0.1, K), rep(1, K*(K-1)/2)).\cr
+   Start from s0 = 2.38/sqrt(dim(r))\cr
+   
+   Repeat\{\cr
+   Run 500 MCMC chain.\cr   
+   If acceptance rate < 30\% => update s1 = s0/5.\cr
+   If acceptance rate > 50\% => update s1 = s0*3.\cr
+   (Store r draws if acceptance rate is 20~80\%.)\cr
+   s0 = s1\cr
+   \} until acceptance rate is 30~50\%
+   
+   Scale matrix C = s1*sqrt(cand_cov0)\cr
+   Correlation matrix R = Corr(r draws)\cr
+   Use C*R*C as s^2*cand_cov.\cr
+   
+   
+   
+}
+\value{
+  a list containing
+  \item{thetabardraw}{K by R/keep matrix of random coefficient mean draws}
+  \item{Sigmadraw}{K*K by R/keep matrix of random coefficient variance draws}
+  \item{rdraw}{K*K by R/keep matrix of \eqn{r} draws (same information as in \code{Sigmadraw})}
+  \item{tausqdraw}{R/keep vector of aggregate demand shock variance draws}
+  \item{Omegadraw}{2*2 by R/keep matrix of correlated endogenous shock variance draws}
+  \item{deltadraw}{I by R/keep matrix of endogenous structural equation coefficient draws}
+  \item{acceptrate}{scalor of acceptance rate of Metropolis-Hasting}
+  \item{s}{scale parameter used for Metropolis-Hasting}
+  \item{cand_cov}{var-cov matrix used for Metropolis-Hasting}
+}
+\references{ For further discussion, see \emph{Bayesian Analysis of Random Coefficient Logit Models Using Aggregate Data}
+  by Jiang, Manchanda and Rossi, Journal of Econometrics, 2009. \cr
+  \url{http://www.sciencedirect.com/science/article/pii/S0304407608002297}
+}
+
+\author{ Keunwoo Kim, Anderson School, UCLA,
+  \email{keunwoo.kim at gmail.com}.
+}
+
+\examples{
+
+if(nchar(Sys.getenv("LONG_TEST")) != 0) {
+###
+### Simulate aggregate level data
+###
+simulData <- function(para, others, Hbatch){
+  #
+  # Keunwoo Kim, UCLA Anderson
+  #
+  ### parameters
+  theta_bar <- para$theta_bar
+  Sigma <- para$Sigma
+  tau_sq <- para$tau_sq
+	
+  T <- others$T	
+  J <- others$J	
+  p <- others$p	
+  H <- others$H	
+  K <- J + p	
+  
+  # Hbatch does the integration for computing market shares in batches of
+  #        size Hbatch
+
+  ### build X	
+  X <- matrix(runif(T*J*p), T*J, p)
+  inter <- NULL
+  for (t in 1:T){
+    inter <- rbind(inter, diag(J))
+  }
+  X <- cbind(inter, X)
+
+  ### draw eta ~ N(0, tau_sq)	
+  eta <- rnorm(T*J)*sqrt(tau_sq)
+  X <- cbind(X, eta)
+	
+  share <- rep(0, J*T)
+  for (HH in 1:(H/Hbatch)){
+    ### draw theta ~ N(theta_bar, Sigma)
+    cho <- chol(Sigma)
+    theta <- matrix(rnorm(K*Hbatch), nrow=K, ncol=Hbatch)
+    theta <- t(cho)\%*\%theta + theta_bar
+
+    ### utility
+    V <- X\%*\%rbind(theta, 1)
+    expV <- exp(V)
+    expSum <- matrix(colSums(matrix(expV, J, T*Hbatch)), T, Hbatch)
+    expSum <- expSum \%x\% matrix(1, J, 1)
+    choiceProb <- expV / (1 + expSum)
+    share <- share +  rowSums(choiceProb) / H
+  }	
+	
+  ### the last K+1'th column is eta, which is unobservable.
+  X	<- X[,c(1:K)]	
+  return (list(X=X, share=share))
+}
+
+### true parameter
+theta_bar_true <- c(-2, -3, -4, -5)
+Sigma_true <- rbind(c(3,2,1.5,1),c(2,4,-1,1.5),c(1.5,-1,4,-0.5),c(1,1.5,-0.5,3))
+cho <- chol(Sigma_true)
+r_true <- c(log(diag(cho)),cho[1,2:4],cho[2,3:4],cho[3,4]) 
+tau_sq_true <- 1
+
+### simulate data
+set.seed(66)
+T <- 300;J <- 3;p <- 1;K <- 4;H <- 1000000;Hbatch <- 5000
+dat <- simulData(para=list(theta_bar=theta_bar_true, Sigma=Sigma_true, tau_sq=tau_sq_true),
+        others=list(T=T, J=J, p=p, H=H), Hbatch)
+X <- dat$X
+share <- dat$share
+
+### Mcmc run
+R <- 2000;H <- 50
+Data1 <- list(X=X, share=share, J=J)
+Mcmc1 <- list(R=R, H=H, nprint=0)
+set.seed(66)
+out <- rbayesBLP(Data=Data1, Mcmc=Mcmc1)
+
+### acceptance rate
+out$acceptrate
+
+### summary of draws
+summary(out$thetabardraw)
+summary(out$Sigmadraw)
+summary(out$tausqdraw)
+
+### plotting draws
+plot(out$thetabardraw)
+plot(out$Sigmadraw)
+plot(out$tausqdraw)
+}
+}
+
diff --git a/man/rbiNormGibbs.Rd b/man/rbiNormGibbs.Rd
old mode 100755
new mode 100644
index 0172336..b5ab145
--- a/man/rbiNormGibbs.Rd
+++ b/man/rbiNormGibbs.Rd
@@ -21,7 +21,7 @@ rbiNormGibbs(initx = 2, inity = -2, rho, burnin = 100, R = 500)
   \item{R}{ number of MCMC draws (def:500) }
 }
 \details{
-  (theta1,theta2) ~ N((0,0), Sigma=matrix(c(1,rho,rho,1),ncol=2))
+  \eqn{(\theta_1,\theta_2) ~ N((0,0)}, \eqn{\Sigma}=matrix(c(1,rho,rho,1),ncol=2))
 }
 \value{
  R x 2 array of draws
diff --git a/man/rbprobitGibbs.Rd b/man/rbprobitGibbs.Rd
old mode 100755
new mode 100644
index cf8f4cf..10d7a02
--- a/man/rbprobitGibbs.Rd
+++ b/man/rbprobitGibbs.Rd
@@ -17,7 +17,7 @@ rbprobitGibbs(Data, Prior, Mcmc)
 \arguments{
   \item{Data}{ list(X,y)}
   \item{Prior}{ list(betabar,A)}
-  \item{Mcmc}{ list(R,keep)  }
+  \item{Mcmc}{ list(R,keep,nprint)  }
 }
 
 \details{
@@ -33,6 +33,7 @@ rbprobitGibbs(Data, Prior, Mcmc)
     \item{\code{A}}{k x k prior precision matrix (def: .01I)} 
     \item{\code{R}}{ number of MCMC draws }
     \item{\code{keep}}{ thinning parameter - keep every keepth draw (def: 1)}
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
   }
 }
 
diff --git a/man/rhierBinLogit.Rd b/man/rhierBinLogit.Rd
old mode 100755
new mode 100644
index d8fd028..2ecd659
--- a/man/rhierBinLogit.Rd
+++ b/man/rhierBinLogit.Rd
@@ -8,7 +8,7 @@
 \title{ MCMC Algorithm for Hierarchical Binary Logit }
 \description{
   \code{rhierBinLogit} implements an MCMC algorithm for hierarchical binary logits with
-  a normal heterogeneity distribution.  This is a hybrid sampler with a RW Metropolis step
+  a normal heterogeneity distribution. This is a hybrid sampler with a RW Metropolis step
   for unit-level logit parameters.
 
   \code{rhierBinLogit} is designed for use on choice-based conjoint data with partial profiles.
@@ -25,10 +25,10 @@ rhierBinLogit(Data, Prior, Mcmc)
 }
 \details{
   Model: \cr
-  \eqn{y_{hi} = 1} with \eqn{pr=exp(x_{hi}'beta_h)/(1+exp(x_{hi}'beta_h)}.  \eqn{beta_h} is nvar x 1.\cr
+  \eqn{y_{hi} = 1} with \eqn{\Pr=exp(x_{hi}'\beta_h)/(1+exp(x_{hi}'\beta_h)}.  \eqn{\beta_h} is nvar x 1.\cr
   h=1,\ldots,length(lgtdata) units or "respondents" for survey data.
 
-  \eqn{beta_h}= ZDelta[h,] + \eqn{u_h}. \cr
+  \eqn{\beta_h}= ZDelta[h,] + \eqn{u_h}. \cr
   Note: here ZDelta refers to Z\%*\%Delta, ZDelta[h,] is hth row of this product.\cr
   Delta is an nz x nvar array. 
 
diff --git a/man/rhierLinearMixture.Rd b/man/rhierLinearMixture.Rd
old mode 100755
new mode 100644
index e6920aa..de449e4
--- a/man/rhierLinearMixture.Rd
+++ b/man/rhierLinearMixture.Rd
@@ -1,152 +1,156 @@
-\name{rhierLinearMixture}
-\alias{rhierLinearMixture}
-\concept{bayes}
-\concept{MCMC}
-\concept{Gibbs Sampling}
-\concept{mixture of normals}
-\concept{normal mixture}
-\concept{heterogeneity}
-\concept{regresssion}
-\concept{hierarchical models}
-\concept{linear model}
-
-\title{ Gibbs Sampler for Hierarchical Linear Model }
-\description{
-  \code{rhierLinearMixture} implements a Gibbs Sampler for hierarchical linear models with a mixture of normals prior.
-}
-\usage{
-rhierLinearMixture(Data, Prior, Mcmc)
-}
-\arguments{
-  \item{Data}{ list(regdata,Z) (Z optional). }
-  \item{Prior}{ list(deltabar,Ad,mubar,Amu,nu,V,nu.e,ssq,ncomp)  (all but ncomp are optional).}
-  \item{Mcmc}{ list(R,keep) (R required).}
-}
-\details{
-  Model: length(regdata) regression equations. \cr
-        \eqn{y_i = X_ibeta_i + e_i}. \eqn{e_i} \eqn{\sim}{~} \eqn{N(0,tau_i)}.  nvar X vars in each equation. 
-
- Priors:\cr
-        \eqn{tau_i} \eqn{\sim}{~} nu.e*\eqn{ssq_i/\chi^2_{nu.e}}.  \eqn{tau_i} is the variance of \eqn{e_i}.\cr
-
-        \eqn{beta_i}= ZDelta[i,] + \eqn{u_i}. \cr
-        Note: here ZDelta refers to Z\%*\%D, ZDelta[i,] is ith row of this product.\cr
-        Delta is an nz x nvar array. 
-
-        \eqn{u_i} \eqn{\sim}{~} \eqn{N(mu_{ind},Sigma_{ind})}. \eqn{ind} \eqn{\sim}{~} multinomial(pvec). \cr
-
-        \eqn{pvec} \eqn{\sim}{~} dirichlet (a)\cr
-        \eqn{delta= vec(Delta)} \eqn{\sim}{~} \eqn{N(deltabar,A_d^{-1})}\cr
-        \eqn{mu_j} \eqn{\sim}{~} \eqn{N(mubar,Sigma_j (x) Amu^{-1})}\cr
-        \eqn{Sigma_j} \eqn{\sim}{~} IW(nu,V) \cr
-
-
-  List arguments contain:
-  \itemize{
-    \item{\code{regdata}}{ list of lists with X,y matrices for each of length(regdata) regressions}
-    \item{\code{regdata[[i]]$X}}{ X matrix for equation i }
-    \item{\code{regdata[[i]]$y}}{ y vector for equation i }
-    \item{\code{deltabar}}{nz*nvar vector of prior means (def: 0)}
-    \item{\code{Ad}}{ prior prec matrix for vec(Delta) (def: .01I)}
-    \item{\code{mubar}}{ nvar x 1 prior mean vector for normal comp mean (def: 0)}
-    \item{\code{Amu}}{ prior precision for normal comp mean (def: .01I)}
-    \item{\code{nu}}{ d.f. parm for IW prior on norm comp Sigma (def: nvar+3)}
-    \item{\code{V}}{ pds location parm for IW prior on norm comp Sigma (def: nuI)}
-    \item{\code{nu.e}}{ d.f. parm for regression error variance prior (def: 3)}
-    \item{\code{ssq}}{ scale parm for regression error var prior (def: var(\eqn{y_i}))}
-    \item{\code{ncomp}}{ number of components used in normal mixture }
-    \item{\code{R}}{ number of MCMC draws}
-    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
-   }
-}
-\value{
-  a list containing
-  \item{taudraw}{R/keep x nreg array of error variance draws}
-  \item{betadraw}{nreg x nvar x R/keep array of individual regression coef draws}
-  \item{Deltadraw}{R/keep x nz x nvar array of Deltadraws}
-  \item{nmix}{list of three elements, (probdraw, NULL, compdraw)}
-}
-\note{
-  More on \code{probdraw} component of nmix return value list: \cr
-  this is an R/keep by ncomp array of draws of mixture component probs (pvec)\cr
-  More on \code{compdraw} component of nmix return value list: 
-  \describe{
-  \item{compdraw[[i]]}{the ith draw of components for mixtures}
-  \item{compdraw[[i]][[j]]}{ith draw of the jth normal mixture comp}
-  \item{compdraw[[i]][[j]][[1]]}{ith draw of jth normal mixture comp mean vector}
-  \item{compdraw[[i]][[j]][[2]]}{ith draw of jth normal mixture cov parm (rooti)}
-  }
-
-  Note: Z should \strong{not} include an intercept and should be centered for ease of interpretation.\cr
-  
-  Be careful in assessing the prior parameter, Amu.  .01 can be too small for some applications. See 
-  Rossi et al, chapter 5 for full discussion.\cr
-
-} 
-\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
-  by Rossi, Allenby and McCulloch, Chapter 3. \cr
-  \url{http://www.perossi.org/home/bsm-1}
-}
-
-\author{ Peter Rossi, Anderson School, UCLA,
-  \email{perossichi at gmail.com}.
-}
-\seealso{ \code{\link{rhierLinearModel}} }
-\examples{
-##
-if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10}
-
-set.seed(66)
-nreg=300; nobs=500; nvar=3; nz=2
-
-Z=matrix(runif(nreg*nz),ncol=nz) 
-Z=t(t(Z)-apply(Z,2,mean))
-Delta=matrix(c(1,-1,2,0,1,0),ncol=nz)
-tau0=.1
-iota=c(rep(1,nobs))
-
-## create arguments for rmixture
-
-tcomps=NULL
-a=matrix(c(1,0,0,0.5773503,1.1547005,0,-0.4082483,0.4082483,1.2247449),ncol=3)
-tcomps[[1]]=list(mu=c(0,-1,-2),rooti=a) 
-tcomps[[2]]=list(mu=c(0,-1,-2)*2,rooti=a)
-tcomps[[3]]=list(mu=c(0,-1,-2)*4,rooti=a)
-tpvec=c(.4,.2,.4)                               
-
-regdata=NULL						  # simulated data with Z
-betas=matrix(double(nreg*nvar),ncol=nvar)
-tind=double(nreg)
-
-for (reg in 1:nreg) {
-tempout=rmixture(1,tpvec,tcomps)
-betas[reg,]=Delta\%*\%Z[reg,]+as.vector(tempout$x)
-tind[reg]=tempout$z
-X=cbind(iota,matrix(runif(nobs*(nvar-1)),ncol=(nvar-1)))
-tau=tau0*runif(1,min=0.5,max=1)
-y=X\%*\%betas[reg,]+sqrt(tau)*rnorm(nobs)
-regdata[[reg]]=list(y=y,X=X,beta=betas[reg,],tau=tau)
-}
-
-## run rhierLinearMixture
-
-Data1=list(regdata=regdata,Z=Z)
-Prior1=list(ncomp=3)
-Mcmc1=list(R=R,keep=1)
-
-out1=rhierLinearMixture(Data=Data1,Prior=Prior1,Mcmc=Mcmc1)
-
-cat("Summary of Delta draws",fill=TRUE)
-summary(out1$Deltadraw,tvalues=as.vector(Delta))
-cat("Summary of Normal Mixture Distribution",fill=TRUE)
-summary(out1$nmix)
-
-if(0){
-## plotting examples 
-plot(out1$betadraw)
-plot(out1$nmix)
-plot(out1$Deltadraw)
-}
-
-}
-\keyword{ regression }
+\name{rhierLinearMixture}
+\alias{rhierLinearMixture}
+\concept{bayes}
+\concept{MCMC}
+\concept{Gibbs Sampling}
+\concept{mixture of normals}
+\concept{normal mixture}
+\concept{heterogeneity}
+\concept{regresssion}
+\concept{hierarchical models}
+\concept{linear model}
+
+\title{ Gibbs Sampler for Hierarchical Linear Model }
+\description{
+  \code{rhierLinearMixture} implements a Gibbs Sampler for hierarchical linear models with a mixture of normals prior.
+}
+\usage{
+rhierLinearMixture(Data, Prior, Mcmc)
+}
+\arguments{
+  \item{Data}{ list(regdata,Z) (Z optional). }
+  \item{Prior}{ list(deltabar,Ad,mubar,Amu,nu,V,nu.e,ssq,ncomp)  (all but ncomp are optional).}
+  \item{Mcmc}{ list(R,keep,nprint) (R required).}
+}
+\details{
+  Model: length(regdata) regression equations. \cr
+        \eqn{y_i = X_i\beta_i + e_i}. \eqn{e_i} \eqn{\sim}{~} \eqn{N(0,\tau_i)}.  \code{nvar} is the number of X vars in each equation. 
+
+ Priors:\cr
+        \eqn{\tau_i} \eqn{\sim}{~} \eqn{nu.e*ssq_i/\chi^2_{nu.e}}.  \eqn{\tau_i} is the variance of \eqn{e_i}.\cr
+        \eqn{B = Z\Delta + U} or \cr
+        \eqn{\beta_i = \Delta' Z[i,]' + u_i}. \cr
+        \eqn{\Delta} is an nz x nvar array. \cr
+
+
+        \eqn{u_i} \eqn{\sim}{~} \eqn{N(\mu_{ind},\Sigma_{ind})}\cr
+        \eqn{ind} \eqn{\sim}{~} \eqn{multinomial(pvec)} \cr
+
+        \eqn{pvec} \eqn{\sim}{~} \eqn{dirichlet(a)}\cr
+        \eqn{delta= vec(\Delta)} \eqn{\sim}{~} \eqn{N(deltabar,A_d^{-1})}\cr
+        \eqn{\mu_j} \eqn{\sim}{~} \eqn{N(mubar,\Sigma_j (x) Amu^{-1})}\cr
+        \eqn{\Sigma_j} \eqn{\sim}{~} \eqn{IW(nu,V)} \cr
+
+
+  List arguments contain:
+  \itemize{
+    \item{\code{regdata}}{ list of lists with X,y matrices for each of length(regdata) regressions}
+    \item{\code{regdata[[i]]$X}}{ X matrix for equation i }
+    \item{\code{regdata[[i]]$y}}{ y vector for equation i }
+    \item{\code{deltabar}}{nz*nvar vector of prior means (def: 0)}
+    \item{\code{Ad}}{ prior prec matrix for vec(Delta) (def: .01I)}
+    \item{\code{mubar}}{ nvar x 1 prior mean vector for normal comp mean (def: 0)}
+    \item{\code{Amu}}{ prior precision for normal comp mean (def: .01I)}
+    \item{\code{nu}}{ d.f. parm for IW prior on norm comp Sigma (def: nvar+3)}
+    \item{\code{V}}{ pds location parm for IW prior on norm comp Sigma (def: nuI)}
+    \item{\code{nu.e}}{ d.f. parm for regression error variance prior (def: 3)}
+    \item{\code{ssq}}{ scale parm for regression error var prior (def: var(\eqn{y_i}))}
+    \item{\code{a}}{ Dirichlet prior parameter (def: 5)}
+    \item{\code{ncomp}}{ number of components used in normal mixture }
+    \item{\code{R}}{ number of MCMC draws}
+    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
+   }
+}
+\value{
+  a list containing
+  \item{taudraw}{R/keep x nreg array of error variance draws}
+  \item{betadraw}{nreg x nvar x R/keep array of individual regression coef draws}
+  \item{Deltadraw}{R/keep x nz x nvar array of Deltadraws}
+  \item{nmix}{list of three elements, (probdraw, NULL, compdraw)}
+}
+\note{
+  More on \code{probdraw} component of nmix return value list: \cr
+  this is an R/keep by ncomp array of draws of mixture component probs (pvec)\cr
+  More on \code{compdraw} component of nmix return value list: 
+  \describe{
+  \item{compdraw[[i]]}{the ith draw of components for mixtures}
+  \item{compdraw[[i]][[j]]}{ith draw of the jth normal mixture comp}
+  \item{compdraw[[i]][[j]][[1]]}{ith draw of jth normal mixture comp mean vector}
+  \item{compdraw[[i]][[j]][[2]]}{ith draw of jth normal mixture cov parm (rooti)}
+  }
+
+  Note: Z should \strong{not} include an intercept and should be centered for ease of interpretation. The mean of each of the \code{nreg} \eqn{\beta} s is the mean of the normal mixture.  Use \code{summary()} to compute this mean from the \code{compdraw} output.  
+  
+  
+  Be careful in assessing the prior parameter, Amu.  .01 can be too small for some applications. See 
+  Rossi et al, chapter 5 for full discussion.\cr
+
+} 
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Rossi, Allenby and McCulloch, Chapter 3. \cr
+  \url{http://www.perossi.org/home/bsm-1}
+}
+
+\author{ Peter Rossi, Anderson School, UCLA,
+  \email{perossichi at gmail.com}.
+}
+\seealso{ \code{\link{rhierLinearModel}} }
+\examples{
+##
+if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10}
+
+set.seed(66)
+nreg=300; nobs=500; nvar=3; nz=2
+
+Z=matrix(runif(nreg*nz),ncol=nz) 
+Z=t(t(Z)-apply(Z,2,mean))
+Delta=matrix(c(1,-1,2,0,1,0),ncol=nz)
+tau0=.1
+iota=c(rep(1,nobs))
+
+## create arguments for rmixture
+
+tcomps=NULL
+a=matrix(c(1,0,0,0.5773503,1.1547005,0,-0.4082483,0.4082483,1.2247449),ncol=3)
+tcomps[[1]]=list(mu=c(0,-1,-2),rooti=a) 
+tcomps[[2]]=list(mu=c(0,-1,-2)*2,rooti=a)
+tcomps[[3]]=list(mu=c(0,-1,-2)*4,rooti=a)
+tpvec=c(.4,.2,.4)                               
+
+regdata=NULL						  # simulated data with Z
+betas=matrix(double(nreg*nvar),ncol=nvar)
+tind=double(nreg)
+
+for (reg in 1:nreg) {
+tempout=rmixture(1,tpvec,tcomps)
+betas[reg,]=Delta\%*\%Z[reg,]+as.vector(tempout$x)
+tind[reg]=tempout$z
+X=cbind(iota,matrix(runif(nobs*(nvar-1)),ncol=(nvar-1)))
+tau=tau0*runif(1,min=0.5,max=1)
+y=X\%*\%betas[reg,]+sqrt(tau)*rnorm(nobs)
+regdata[[reg]]=list(y=y,X=X,beta=betas[reg,],tau=tau)
+}
+
+## run rhierLinearMixture
+
+Data1=list(regdata=regdata,Z=Z)
+Prior1=list(ncomp=3)
+Mcmc1=list(R=R,keep=1)
+
+out1=rhierLinearMixture(Data=Data1,Prior=Prior1,Mcmc=Mcmc1)
+
+cat("Summary of Delta draws",fill=TRUE)
+summary(out1$Deltadraw,tvalues=as.vector(Delta))
+cat("Summary of Normal Mixture Distribution",fill=TRUE)
+summary(out1$nmix)
+
+if(0){
+## plotting examples 
+plot(out1$betadraw)
+plot(out1$nmix)
+plot(out1$Deltadraw)
+}
+
+}
+\keyword{ regression }
diff --git a/man/rhierLinearModel.Rd b/man/rhierLinearModel.Rd
old mode 100755
new mode 100644
index a463a93..eeb0ca8
--- a/man/rhierLinearModel.Rd
+++ b/man/rhierLinearModel.Rd
@@ -1,100 +1,101 @@
-\name{rhierLinearModel}
-\alias{rhierLinearModel}
-\concept{bayes}
-\concept{MCMC}
-\concept{Gibbs Sampling}
-\concept{hierarchical models}
-\concept{linear model}
-
-\title{ Gibbs Sampler for Hierarchical Linear Model }
-\description{
-  \code{rhierLinearModel} implements a Gibbs Sampler for hierarchical linear models with a normal prior.
-}
-\usage{
-rhierLinearModel(Data, Prior, Mcmc)
-}
-\arguments{
-  \item{Data}{ list(regdata,Z) (Z optional). }
-  \item{Prior}{ list(Deltabar,A,nu.e,ssq,nu,V)  (optional).}
-  \item{Mcmc}{ list(R,keep) (R required).}
-}
-\details{
-  Model: length(regdata) regression equations. \cr
-        \eqn{y_i = X_ibeta_i + e_i}. \eqn{e_i} \eqn{\sim}{~} \eqn{N(0,tau_i)}.  nvar X vars in each equation. 
-
- Priors:\cr
-        \eqn{tau_i} \eqn{\sim}{~} nu.e*\eqn{ssq_i/\chi^2_{nu.e}}.  \eqn{tau_i} is the variance of \eqn{e_i}.\cr
-        \eqn{beta_i} \eqn{\sim}{~} N(ZDelta[i,],\eqn{V_{beta}}). \cr
-               Note:  ZDelta is the matrix Z * Delta; [i,] refers to ith row of this product.
-
-          \eqn{vec(Delta)} given \eqn{V_{beta}} \eqn{\sim}{~} \eqn{N(vec(Deltabar),V_{beta} (x) A^{-1})}.\cr
-          \eqn{V_{beta}} \eqn{\sim}{~} \eqn{IW(nu,V)}. \cr
-              \eqn{Delta, Deltabar} are nz x nvar.  \eqn{A} is nz x nz.  \eqn{V_{beta}} is nvar x nvar.
-        
-          Note: if you don't have any z vars, set Z=iota (nreg x 1).
-
-  List arguments contain:
-  \itemize{
-    \item{\code{regdata}}{ list of lists with X,y matrices for each of length(regdata) regressions}
-    \item{\code{regdata[[i]]$X}}{ X matrix for equation i }
-    \item{\code{regdata[[i]]$y}}{ y vector for equation i }
-    \item{\code{Deltabar}}{ nz x nvar matrix of prior means (def: 0)}
-    \item{\code{A}}{ nz x nz matrix for prior precision (def: .01I)}
-    \item{\code{nu.e}}{ d.f. parm for regression error variance prior (def: 3)}
-    \item{\code{ssq}}{ scale parm for regression error var prior (def: var(\eqn{y_i}))}
-    \item{\code{nu}}{ d.f. parm for Vbeta prior (def: nvar+3)}
-    \item{\code{V}}{ Scale location matrix for Vbeta prior (def: nu*I)}
-    \item{\code{R}}{ number of MCMC draws}
-    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
-   }
-}
-\value{
-  a list containing
-  \item{betadraw}{nreg x nvar x R/keep array of individual regression coef draws}
-  \item{taudraw}{R/keep x nreg array of error variance draws}
-  \item{Deltadraw}{R/keep x nz x nvar array of Deltadraws}
-  \item{Vbetadraw}{R/keep x nvar*nvar array of Vbeta draws}
-}
-\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
-  by Rossi, Allenby and McCulloch, Chapter 3. \cr
-  \url{http://www.perossi.org/home/bsm-1}
-}
-
-\author{ Peter Rossi, Anderson School, UCLA,
-  \email{perossichi at gmail.comu}.
-}
-\seealso{ \code{\link{rhierLinearMixture}} }
-\examples{
-##
-if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10}
-
-nreg=100; nobs=100; nvar=3
-Vbeta=matrix(c(1,.5,0,.5,2,.7,0,.7,1),ncol=3)
-Z=cbind(c(rep(1,nreg)),3*runif(nreg)); Z[,2]=Z[,2]-mean(Z[,2])
-nz=ncol(Z)
-Delta=matrix(c(1,-1,2,0,1,0),ncol=2)
-Delta=t(Delta) # first row of Delta is means of betas
-Beta=matrix(rnorm(nreg*nvar),nrow=nreg)\%*\%chol(Vbeta)+Z\%*\%Delta
-tau=.1
-iota=c(rep(1,nobs))
-regdata=NULL
-for (reg in 1:nreg) { X=cbind(iota,matrix(runif(nobs*(nvar-1)),ncol=(nvar-1)))
-	y=X\%*\%Beta[reg,]+sqrt(tau)*rnorm(nobs); regdata[[reg]]=list(y=y,X=X) }
-
-Data1=list(regdata=regdata,Z=Z)
-Mcmc1=list(R=R,keep=1)
-out=rhierLinearModel(Data=Data1,Mcmc=Mcmc1)
-
-cat("Summary of Delta draws",fill=TRUE)
-summary(out$Deltadraw,tvalues=as.vector(Delta))
-cat("Summary of Vbeta draws",fill=TRUE)
-summary(out$Vbetadraw,tvalues=as.vector(Vbeta[upper.tri(Vbeta,diag=TRUE)]))
-
-if(0){
-## plotting examples
-plot(out$betadraw)
-plot(out$Deltadraw)
-}
-
-}
-\keyword{ regression }
+\name{rhierLinearModel}
+\alias{rhierLinearModel}
+\concept{bayes}
+\concept{MCMC}
+\concept{Gibbs Sampling}
+\concept{hierarchical models}
+\concept{linear model}
+
+\title{ Gibbs Sampler for Hierarchical Linear Model }
+\description{
+  \code{rhierLinearModel} implements a Gibbs Sampler for hierarchical linear models with a normal prior.
+}
+\usage{
+rhierLinearModel(Data, Prior, Mcmc)
+}
+\arguments{
+  \item{Data}{ list(regdata,Z) (Z optional). }
+  \item{Prior}{ list(Deltabar,A,nu.e,ssq,nu,V)  (optional).}
+  \item{Mcmc}{ list(R,keep,nprint) (R required).}
+}
+\details{
+  Model: length(regdata) regression equations. \cr
+        \eqn{y_i = X_i\beta_i + e_i}. \eqn{e_i} \eqn{\sim}{~} \eqn{N(0,\tau_i)}.  nvar X vars in each equation. 
+
+ Priors:\cr
+        \eqn{\tau_i} \eqn{\sim}{~} nu.e*\eqn{ssq_i/\chi^2_{nu.e}}.  \eqn{\tau_i} is the variance of \eqn{e_i}.\cr
+        \eqn{\beta_i} \eqn{\sim}{~} N(Z\eqn{\Delta}[i,],\eqn{V_{\beta}}). \cr
+               Note:  Z\eqn{\Delta} is the matrix Z * \eqn{\Delta}; [i,] refers to ith row of this product.\cr
+
+          \eqn{vec(\Delta)} given \eqn{V_{\beta}} \eqn{\sim}{~} \eqn{N(vec(Deltabar),V_{\beta} (x) A^{-1})}.\cr
+          \eqn{V_{\beta}} \eqn{\sim}{~} \eqn{IW(nu,V)}. \cr
+              \eqn{Delta, Deltabar} are nz x nvar.  \eqn{A} is nz x nz.  \eqn{V_{\beta}} is nvar x nvar.
+        
+          Note: if you don't have any Z vars, omit Z in the \code{Data} argument and a vector of ones will be inserted for you.  In this case (of no Z vars), the matrix \eqn{\Delta} will be 1 x nvar and should be interpreted as the mean of all unit \eqn{\beta} s.
+
+  List arguments contain:
+  \itemize{
+    \item{\code{regdata}}{ list of lists with X,y matrices for each of length(regdata) regressions}
+    \item{\code{regdata[[i]]$X}}{ X matrix for equation i }
+    \item{\code{regdata[[i]]$y}}{ y vector for equation i }
+    \item{\code{Deltabar}}{ nz x nvar matrix of prior means (def: 0)}
+    \item{\code{A}}{ nz x nz matrix for prior precision (def: .01I)}
+    \item{\code{nu.e}}{ d.f. parm for regression error variance prior (def: 3)}
+    \item{\code{ssq}}{ scale parm for regression error var prior (def: var(\eqn{y_i}))}
+    \item{\code{nu}}{ d.f. parm for Vbeta prior (def: nvar+3)}
+    \item{\code{V}}{ Scale location matrix for Vbeta prior (def: nu*I)}
+    \item{\code{R}}{ number of MCMC draws}
+    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
+   }
+}
+\value{
+  a list containing
+  \item{betadraw}{nreg x nvar x R/keep array of individual regression coef draws}
+  \item{taudraw}{R/keep x nreg array of error variance draws}
+  \item{Deltadraw}{R/keep x nz x nvar array of Deltadraws}
+  \item{Vbetadraw}{R/keep x nvar*nvar array of Vbeta draws}
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Rossi, Allenby and McCulloch, Chapter 3. \cr
+  \url{http://www.perossi.org/home/bsm-1}
+}
+
+\author{ Peter Rossi, Anderson School, UCLA,
+  \email{perossichi at gmail.comu}.
+}
+\seealso{ \code{\link{rhierLinearMixture}} }
+\examples{
+##
+if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10}
+
+nreg=100; nobs=100; nvar=3
+Vbeta=matrix(c(1,.5,0,.5,2,.7,0,.7,1),ncol=3)
+Z=cbind(c(rep(1,nreg)),3*runif(nreg)); Z[,2]=Z[,2]-mean(Z[,2])
+nz=ncol(Z)
+Delta=matrix(c(1,-1,2,0,1,0),ncol=2)
+Delta=t(Delta) # first row of Delta is means of betas
+Beta=matrix(rnorm(nreg*nvar),nrow=nreg)\%*\%chol(Vbeta)+Z\%*\%Delta
+tau=.1
+iota=c(rep(1,nobs))
+regdata=NULL
+for (reg in 1:nreg) { X=cbind(iota,matrix(runif(nobs*(nvar-1)),ncol=(nvar-1)))
+	y=X\%*\%Beta[reg,]+sqrt(tau)*rnorm(nobs); regdata[[reg]]=list(y=y,X=X) }
+
+Data1=list(regdata=regdata,Z=Z)
+Mcmc1=list(R=R,keep=1)
+out=rhierLinearModel(Data=Data1,Mcmc=Mcmc1)
+
+cat("Summary of Delta draws",fill=TRUE)
+summary(out$Deltadraw,tvalues=as.vector(Delta))
+cat("Summary of Vbeta draws",fill=TRUE)
+summary(out$Vbetadraw,tvalues=as.vector(Vbeta[upper.tri(Vbeta,diag=TRUE)]))
+
+if(0){
+## plotting examples
+plot(out$betadraw)
+plot(out$Deltadraw)
+}
+
+}
+\keyword{ regression }
diff --git a/man/rhierMnlDP.Rd b/man/rhierMnlDP.Rd
old mode 100755
new mode 100644
index bbbe062..f0f5e23
--- a/man/rhierMnlDP.Rd
+++ b/man/rhierMnlDP.Rd
@@ -1,226 +1,228 @@
-\name{rhierMnlDP}
-\alias{rhierMnlDP}
-\concept{bayes}
-\concept{MCMC}
-\concept{Multinomial Logit}
-\concept{normal mixture}
-\concept{Dirichlet Process Prior}
-\concept{heterogeneity}
-\concept{hierarchical models}
-
-\title{ MCMC Algorithm for Hierarchical Multinomial Logit with Dirichlet Process Prior Heterogeneity}
-\description{
-  \code{rhierMnlDP} is a MCMC algorithm for a hierarchical multinomial logit with a Dirichlet Process Prior for the distribution of heteorogeneity.  A base normal model is used so that the DP can be interpreted as allowing for a mixture of normals with as many components as there are panel units.  This is a hybrid Gibbs Sampler with a RW Metropolis step for the MNL 
-  coefficients for each panel unit.  This procedure can be interpreted as a Bayesian semi-parameteric method in the sense that the DP prior can accomodate heterogeniety of an unknown form.
-}
-\usage{
-rhierMnlDP(Data, Prior, Mcmc)
-}
-\arguments{
-  \item{Data}{ list(p,lgtdata,Z) ( Z is optional) }
-  \item{Prior}{ list(deltabar,Ad,Prioralpha,lambda\_hyper) (all are optional)}
-  \item{Mcmc}{ list(s,w,R,keep) (R required)}
-}
-\details{
-  Model: \cr
-  \eqn{y_i} \eqn{\sim}{~} \eqn{MNL(X_i,beta_i)}.  i=1,\ldots, length(lgtdata). \eqn{theta_i} is nvar x 1.
-
-  \eqn{beta_i}= ZDelta[i,] + \eqn{u_i}. \cr
-  Note: here ZDelta refers to Z\%*\%D, ZDelta[i,] is ith row of this product.\cr
-  Delta is an nz x nvar array. 
-
-  \eqn{beta_i} \eqn{\sim}{~} \eqn{N(mu_i,Sigma_i)}. \cr
-
-  Priors: \cr
-        \eqn{theta_i=(mu_i,Sigma_i)} \eqn{\sim}{~} \eqn{DP(G_0(lambda),alpha)}\cr
-        \eqn{G_0(lambda):}\cr
-        \eqn{mu_i | Sigma_i} \eqn{\sim}{~} \eqn{N(0,Sigma_i (x) a^{-1})}\cr
-        \eqn{Sigma_i} \eqn{\sim}{~} \eqn{IW(nu,nu*v*I)}
-        
-        \eqn{lambda(a,nu,v):}\cr
-        \eqn{a} \eqn{\sim}{~} uniform[alim[1],alimb[2]]\cr
-        \eqn{nu} \eqn{\sim}{~}  dim(data)-1 + exp(z) \cr
-        \eqn{z} \eqn{\sim}{~}  uniform[dim(data)-1+nulim[1],nulim[2]]\cr
-        \eqn{v} \eqn{\sim}{~} uniform[vlim[1],vlim[2]]
-       
-        \eqn{alpha} \eqn{\sim}{~} \eqn{(1-(alpha-alphamin)/(alphamax-alphamin))^power} \cr
-        alpha= alphamin then expected number of components = Istarmin \cr
-        alpha= alphamax then expected number of components = Istarmax \cr
-
-
-  Lists contain: \cr
-
-Data:\cr
-  \itemize{
-    \item{\code{p}}{ p is number of choice alternatives}
-    \item{\code{lgtdata}}{list of lists with each cross-section unit MNL data}
-    \item{\code{lgtdata[[i]]$y}}{ \eqn{n_i} vector of multinomial outcomes (1,\ldots,m)}
-    \item{\code{lgtdata[[i]]$X}}{ \eqn{n_i} by nvar design matrix for ith unit}
-  }
-Prior: \cr
-    \itemize{
-      \item{\code{deltabar}}{nz*nvar vector of prior means (def: 0)}
-      \item{\code{Ad}}{ prior prec matrix for vec(D) (def: .01I)}
-   }
-Prioralpha:\cr
- \itemize{
-  \item{\code{Istarmin}}{expected number of components at lower bound of support of alpha def(1)}
-  \item{\code{Istarmax}}{expected number of components at upper bound of support of alpha (def: min(50,.1*nlgt))}
-  \item{\code{power}}{power parameter for alpha prior (def: .8)}
-  }
- 
-lambda\_hyper:\cr
-  \itemize{
-   \item{\code{alim}}{defines support of a distribution,def:c(.01,2) }
-   \item{\code{nulim}}{defines support of nu distribution, def:c(.01,3)} 
-   \item{\code{vlim}}{defines support of v distribution, def:c(.1,4)} 
-  }
-
-Mcmc:\cr
- \itemize{
-   \item{\code{R}}{number of mcmc draws}
-   \item{\code{keep}}{thinning parm, keep every keepth draw}
-   \item{\code{maxuniq}}{storage constraint on the number of unique components}
-   \item{\code{gridsize}}{number of discrete points for hyperparameter priors,def: 20}
-  }
-
-}
-\value{
-  a list containing:
-  \item{Deltadraw}{R/keep  x nz*nvar matrix of draws of Delta, first row is initial value}
-  \item{betadraw}{ nlgt x nvar x R/keep array of draws of betas}
-  \item{nmix}{ list of 3 components, probdraw, NULL, compdraw }
-  \item{adraw}{R/keep draws of hyperparm a}
-  \item{vdraw}{R/keep draws of hyperparm v}
-  \item{nudraw}{R/keep draws of hyperparm nu}
-  \item{Istardraw}{R/keep draws of number of unique components}
-  \item{alphadraw}{R/keep draws of number of DP tightness parameter}
-  \item{loglike}{R/keep draws of log-likelihood}
-}
-\note{
-
-  As is well known, Bayesian density estimation involves computing the predictive distribution of a "new" unit parameter,
-  \eqn{theta_{n+1}} (here "n"=nlgt). This is done by averaging the normal base distribution over draws from the distribution of \eqn{theta_{n+1}} given \eqn{theta_1}, ..., \eqn{theta_n},alpha,lambda,Data.
-  To facilitate this, we store those draws from the predictive distribution of \eqn{theta_{n+1}} in a list structure compatible  with other \code{bayesm} routines that implement a finite mixture of normals.
-
-  More on nmix list:\cr 
-  contains the draws from the predictive distribution of a "new" observations parameters.  These are simply the parameters of one normal distribution.  We enforce compatibility with a mixture of k components in order to utilize generic summary 
-  plotting functions.  
-
-  Therefore,\code{probdraw} is a vector of ones.  \code{zdraw} (indicator draws) is omitted as it is not necessary for density estimation. \code{compdraw} contains the draws of the \eqn{theta_{n+1}} as a list of list of lists.
-
-  More on \code{compdraw} component of return value list:
-  \itemize{
-  \item{compdraw[[i]]}{ith draw of components for mixtures}
-  \item{compdraw[[i]][[1]]}{ith draw of the thetanp1}
-  \item{compdraw[[i]][[1]][[1]]}{ith draw of mean vector}
-  \item{compdraw[[i]][[1]][[2]]}{ith draw of parm (rooti)}
-  }
-
-  We parameterize the prior on \eqn{Sigma_i} such that \eqn{mode(Sigma)= nu/(nu+2) vI}.
-    The support of nu enforces a non-degenerate IW density; \eqn{nulim[1] > 0}.
-
-    The default choices of alim,nulim, and vlim determine the location and approximate size of candidate
-    "atoms" or possible normal components. The defaults are sensible given a reasonable scaling of the X variables.
-    You want to insure that alim is set for a wide enough range of values (remember a is a precision
-    parameter) and the v is big enough to propose Sigma matrices wide enough to cover the data range.  
-
-    A careful analyst should look at the posterior distribution of a, nu, v to make sure that the support is
-    set correctly in alim, nulim, vlim.  In other words, if we see the posterior bunched up at one end of these
-    support ranges, we should widen the range and rerun.  
-
-   If you want to force the procedure to use many small atoms, then set nulim to consider only large values and 
-   set vlim to consider only small scaling constants.  Set alphamax to a large number.  This will create a very
-   "lumpy" density estimate somewhat like the classical Kernel density estimates. Of course, this is not advised 
-   if you have a prior belief that densities are relatively smooth.
-
-  Note: Z should \strong{not} include an intercept and is centered for ease of interpretation.\cr
-  
-  Large R values may be required (>20,000).
-
-} 
-\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
-  by Rossi, Allenby and McCulloch, Chapter 5. \cr
-  \url{http://www.perossi.org/home/bsm-1}
-}
-
-\author{ Peter Rossi, Anderson School, UCLA,
-  \email{perossichi at gmail.com}.
-}
-  
-\seealso{ \code{\link{rhierMnlRwMixture}} }
-\examples{
-##
-if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=20000} else {R=10}
-
-set.seed(66)
-p=3                                # num of choice alterns
-ncoef=3  
-nlgt=300                           # num of cross sectional units
-nz=2
-Z=matrix(runif(nz*nlgt),ncol=nz)
-Z=t(t(Z)-apply(Z,2,mean))          # demean Z
-ncomp=3                                # no of mixture components
-Delta=matrix(c(1,0,1,0,1,2),ncol=2)
-comps=NULL
-comps[[1]]=list(mu=c(0,-1,-2),rooti=diag(rep(2,3)))
-comps[[2]]=list(mu=c(0,-1,-2)*2,rooti=diag(rep(2,3)))
-comps[[3]]=list(mu=c(0,-1,-2)*4,rooti=diag(rep(2,3)))
-pvec=c(.4,.2,.4)
-
-simmnlwX= function(n,X,beta) {
-  ##  simulate from MNL model conditional on X matrix
-  k=length(beta)
-  Xbeta=X\%*\%beta
-  j=nrow(Xbeta)/n
-  Xbeta=matrix(Xbeta,byrow=TRUE,ncol=j)
-  Prob=exp(Xbeta)
-  iota=c(rep(1,j))
-  denom=Prob\%*\%iota
-  Prob=Prob/as.vector(denom)
-  y=vector("double",n)
-  ind=1:j
-  for (i in 1:n) 
-      {yvec=rmultinom(1,1,Prob[i,]); y[i]=ind\%*\%yvec}
-  return(list(y=y,X=X,beta=beta,prob=Prob))
-}
-
-## simulate data with a mixture of 3 normals
-simlgtdata=NULL
-ni=rep(50,300)
-for (i in 1:nlgt) 
-{  betai=Delta\%*\%Z[i,]+as.vector(rmixture(1,pvec,comps)$x)
-   Xa=matrix(runif(ni[i]*p,min=-1.5,max=0),ncol=p)
-   X=createX(p,na=1,nd=NULL,Xa=Xa,Xd=NULL,base=1)
-   outa=simmnlwX(ni[i],X,betai)
-   simlgtdata[[i]]=list(y=outa$y,X=X,beta=betai)
-}
-
-## plot betas
-if(1){
-## set if(1) above to produce plots
-bmat=matrix(0,nlgt,ncoef)
-for(i in 1:nlgt) {bmat[i,]=simlgtdata[[i]]$beta}
-par(mfrow=c(ncoef,1))
-for(i in 1:ncoef) hist(bmat[,i],breaks=30,col="magenta")
-}
-
-##   set Data and Mcmc lists
-keep=5
-Mcmc1=list(R=R,keep=keep)
-Data1=list(p=p,lgtdata=simlgtdata,Z=Z)
-
-out=rhierMnlDP(Data=Data1,Mcmc=Mcmc1)
-
-cat("Summary of Delta draws",fill=TRUE)
-summary(out$Deltadraw,tvalues=as.vector(Delta))
-
-if(0) {
-## plotting examples
-plot(out$betadraw)
-plot(out$nmix)
-}
-
-}
-
-\keyword{models}
+\name{rhierMnlDP}
+\alias{rhierMnlDP}
+\concept{bayes}
+\concept{MCMC}
+\concept{Multinomial Logit}
+\concept{normal mixture}
+\concept{Dirichlet Process Prior}
+\concept{heterogeneity}
+\concept{hierarchical models}
+
+\title{ MCMC Algorithm for Hierarchical Multinomial Logit with Dirichlet Process Prior Heterogeneity}
+\description{
+  \code{rhierMnlDP} is a MCMC algorithm for a hierarchical multinomial logit with a Dirichlet Process Prior for the distribution of heteorogeneity.  A base normal model is used so that the DP can be interpreted as allowing for a mixture of normals with as many components as there are panel units.  This is a hybrid Gibbs Sampler with a RW Metropolis step for the MNL 
+  coefficients for each panel unit.  This procedure can be interpreted as a Bayesian semi-parameteric method in the sense that the DP prior can accomodate heterogeniety of an unknown form.
+}
+\usage{
+rhierMnlDP(Data, Prior, Mcmc)
+}
+\arguments{
+  \item{Data}{ list(p,lgtdata,Z) ( Z is optional) }
+  \item{Prior}{ list(deltabar,Ad,Prioralpha,lambda_hyper) (all are optional)}
+  \item{Mcmc}{ list(s,w,R,keep,nprint) (R required)}
+}
+\details{
+  Model: \cr
+  \eqn{y_i} \eqn{\sim}{~} \eqn{MNL(X_i,\beta_i)}.  i=1,\ldots, length(lgtdata). \eqn{\theta_i} is nvar x 1.
+
+  \eqn{\beta_i}= Z\eqn{\Delta}[i,] + \eqn{u_i}. \cr
+  Note:  Z\eqn{\Delta} is the matrix Z * \eqn{\Delta}; [i,] refers to ith row of this product.\cr
+  Delta is an nz x nvar array. 
+
+  \eqn{\beta_i} \eqn{\sim}{~} \eqn{N(\mu_i,\Sigma_i)}. \cr
+
+  Priors: \cr
+        \eqn{\theta_i=(\mu_i,\Sigma_i)} \eqn{\sim}{~} \eqn{DP(G_0(\lambda),alpha)}\cr
+        \eqn{G_0(\lambda):}\cr
+        \eqn{\mu_i | \Sigma_i} \eqn{\sim}{~} \eqn{N(0,\Sigma_i (x) a^{-1})}\cr
+        \eqn{\Sigma_i} \eqn{\sim}{~} \eqn{IW(nu,nu*v*I)}\cr
+        \eqn{delta= vec(\Delta)} \eqn{\sim}{~} \eqn{N(deltabar,A_d^{-1})}\cr
+        
+        \eqn{\lambda(a,nu,v):}\cr
+        \eqn{a} \eqn{\sim}{~} uniform[alim[1],alimb[2]]\cr
+        \eqn{nu} \eqn{\sim}{~}  dim(data)-1 + exp(z) \cr
+        \eqn{z} \eqn{\sim}{~}  uniform[dim(data)-1+nulim[1],nulim[2]]\cr
+        \eqn{v} \eqn{\sim}{~} uniform[vlim[1],vlim[2]]
+       
+        \eqn{alpha} \eqn{\sim}{~} \eqn{(1-(alpha-alphamin)/(alphamax-alphamin))^{power}} \cr
+        alpha = alphamin then expected number of components = Istarmin \cr
+        alpha = alphamax then expected number of components = Istarmax \cr
+
+
+  Lists contain: \cr
+
+Data:\cr
+  \itemize{
+    \item{\code{p}}{ p is number of choice alternatives}
+    \item{\code{lgtdata}}{list of lists with each cross-section unit MNL data}
+    \item{\code{lgtdata[[i]]$y}}{ \eqn{n_i} vector of multinomial outcomes (1,\ldots,m)}
+    \item{\code{lgtdata[[i]]$X}}{ \eqn{n_i} by nvar design matrix for ith unit}
+  }
+Prior: \cr
+    \itemize{
+      \item{\code{deltabar}}{nz*nvar vector of prior means (def: 0)}
+      \item{\code{Ad}}{ prior prec matrix for vec(D) (def: .01I)}
+   }
+Prioralpha:\cr
+ \itemize{
+  \item{\code{Istarmin}}{expected number of components at lower bound of support of alpha def(1)}
+  \item{\code{Istarmax}}{expected number of components at upper bound of support of alpha (def: min(50,.1*nlgt))}
+  \item{\code{power}}{power parameter for alpha prior (def: .8)}
+  }
+ 
+lambda_hyper:\cr
+  \itemize{
+   \item{\code{alim}}{defines support of a distribution (def: (.01,2))}
+   \item{\code{nulim}}{defines support of nu distribution (def: (.01,3))} 
+   \item{\code{vlim}}{defines support of v distribution (def: (.1,4))} 
+  }
+
+Mcmc:\cr
+ \itemize{
+   \item{\code{R}}{ number of mcmc draws}
+   \item{\code{keep}}{ thinning parm, keep every keepth draw}
+   \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
+   \item{\code{maxuniq}}{ storage constraint on the number of unique components}
+   \item{\code{gridsize}}{ number of discrete points for hyperparameter priors,def: 20}
+  }
+
+}
+\value{
+  a list containing:
+  \item{Deltadraw}{R/keep  x nz*nvar matrix of draws of Delta, first row is initial value}
+  \item{betadraw}{ nlgt x nvar x R/keep array of draws of betas}
+  \item{nmix}{ list of 3 components, probdraw, NULL, compdraw }
+  \item{adraw}{R/keep draws of hyperparm a}
+  \item{vdraw}{R/keep draws of hyperparm v}
+  \item{nudraw}{R/keep draws of hyperparm nu}
+  \item{Istardraw}{R/keep draws of number of unique components}
+  \item{alphadraw}{R/keep draws of number of DP tightness parameter}
+  \item{loglike}{R/keep draws of log-likelihood}
+}
+\note{
+
+  As is well known, Bayesian density estimation involves computing the predictive distribution of a "new" unit parameter,
+  \eqn{\theta_{n+1}} (here "n"=nlgt). This is done by averaging the normal base distribution over draws from the distribution of \eqn{\theta_{n+1}} given \eqn{\theta_1}, ..., \eqn{\theta_n},alpha,lambda,Data.
+  To facilitate this, we store those draws from the predictive distribution of \eqn{\theta_{n+1}} in a list structure compatible  with other \code{bayesm} routines that implement a finite mixture of normals.
+
+  More on nmix list:\cr 
+  contains the draws from the predictive distribution of a "new" observations parameters.  These are simply the parameters of one normal distribution.  We enforce compatibility with a mixture of k components in order to utilize generic summary 
+  plotting functions.  
+
+  Therefore,\code{probdraw} is a vector of ones.  \code{zdraw} (indicator draws) is omitted as it is not necessary for density estimation. \code{compdraw} contains the draws of the \eqn{\theta_{n+1}} as a list of list of lists.
+
+  More on \code{compdraw} component of return value list:
+  \itemize{
+  \item{compdraw[[i]]}{ith draw of components for mixtures}
+  \item{compdraw[[i]][[1]]}{ith draw of the thetanp1}
+  \item{compdraw[[i]][[1]][[1]]}{ith draw of mean vector}
+  \item{compdraw[[i]][[1]][[2]]}{ith draw of parm (rooti)}
+  }
+
+  We parameterize the prior on \eqn{\Sigma_i} such that \eqn{mode(\Sigma)= nu/(nu+2) vI}.
+    The support of nu enforces a non-degenerate IW density; \eqn{nulim[1] > 0}.
+
+    The default choices of alim,nulim, and vlim determine the location and approximate size of candidate
+    "atoms" or possible normal components. The defaults are sensible given a reasonable scaling of the X variables.
+    You want to insure that alim is set for a wide enough range of values (remember a is a precision
+    parameter) and the v is big enough to propose Sigma matrices wide enough to cover the data range.  
+
+    A careful analyst should look at the posterior distribution of a, nu, v to make sure that the support is
+    set correctly in alim, nulim, vlim.  In other words, if we see the posterior bunched up at one end of these
+    support ranges, we should widen the range and rerun.  
+
+   If you want to force the procedure to use many small atoms, then set nulim to consider only large values and 
+   set vlim to consider only small scaling constants.  Set alphamax to a large number.  This will create a very
+   "lumpy" density estimate somewhat like the classical Kernel density estimates. Of course, this is not advised 
+   if you have a prior belief that densities are relatively smooth.
+
+  Note: Z should \strong{not} include an intercept and is centered for ease of interpretation. The mean of each of the \code{nlgt} \eqn{\beta} s is the mean of the normal mixture.  Use \code{summary()} to compute this mean from the \code{compdraw} output.\cr
+  
+  Large R values may be required (>20,000).
+
+} 
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Rossi, Allenby and McCulloch, Chapter 5. \cr
+  \url{http://www.perossi.org/home/bsm-1}
+}
+
+\author{ Peter Rossi, Anderson School, UCLA,
+  \email{perossichi at gmail.com}.
+}
+  
+\seealso{ \code{\link{rhierMnlRwMixture}} }
+\examples{
+##
+if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=20000} else {R=10}
+
+set.seed(66)
+p=3                                # num of choice alterns
+ncoef=3  
+nlgt=300                           # num of cross sectional units
+nz=2
+Z=matrix(runif(nz*nlgt),ncol=nz)
+Z=t(t(Z)-apply(Z,2,mean))          # demean Z
+ncomp=3                                # no of mixture components
+Delta=matrix(c(1,0,1,0,1,2),ncol=2)
+comps=NULL
+comps[[1]]=list(mu=c(0,-1,-2),rooti=diag(rep(2,3)))
+comps[[2]]=list(mu=c(0,-1,-2)*2,rooti=diag(rep(2,3)))
+comps[[3]]=list(mu=c(0,-1,-2)*4,rooti=diag(rep(2,3)))
+pvec=c(.4,.2,.4)
+
+simmnlwX= function(n,X,beta) {
+  ##  simulate from MNL model conditional on X matrix
+  k=length(beta)
+  Xbeta=X\%*\%beta
+  j=nrow(Xbeta)/n
+  Xbeta=matrix(Xbeta,byrow=TRUE,ncol=j)
+  Prob=exp(Xbeta)
+  iota=c(rep(1,j))
+  denom=Prob\%*\%iota
+  Prob=Prob/as.vector(denom)
+  y=vector("double",n)
+  ind=1:j
+  for (i in 1:n) 
+      {yvec=rmultinom(1,1,Prob[i,]); y[i]=ind\%*\%yvec}
+  return(list(y=y,X=X,beta=beta,prob=Prob))
+}
+
+## simulate data with a mixture of 3 normals
+simlgtdata=NULL
+ni=rep(50,300)
+for (i in 1:nlgt) 
+{  betai=Delta\%*\%Z[i,]+as.vector(rmixture(1,pvec,comps)$x)
+   Xa=matrix(runif(ni[i]*p,min=-1.5,max=0),ncol=p)
+   X=createX(p,na=1,nd=NULL,Xa=Xa,Xd=NULL,base=1)
+   outa=simmnlwX(ni[i],X,betai)
+   simlgtdata[[i]]=list(y=outa$y,X=X,beta=betai)
+}
+
+## plot betas
+if(1){
+## set if(1) above to produce plots
+bmat=matrix(0,nlgt,ncoef)
+for(i in 1:nlgt) {bmat[i,]=simlgtdata[[i]]$beta}
+par(mfrow=c(ncoef,1))
+for(i in 1:ncoef) hist(bmat[,i],breaks=30,col="magenta")
+}
+
+##   set Data and Mcmc lists
+keep=5
+Mcmc1=list(R=R,keep=keep)
+Data1=list(p=p,lgtdata=simlgtdata,Z=Z)
+
+out=rhierMnlDP(Data=Data1,Mcmc=Mcmc1)
+
+cat("Summary of Delta draws",fill=TRUE)
+summary(out$Deltadraw,tvalues=as.vector(Delta))
+
+if(0) {
+## plotting examples
+plot(out$betadraw)
+plot(out$nmix)
+}
+
+}
+
+\keyword{models}
diff --git a/man/rhierMnlRwMixture.Rd b/man/rhierMnlRwMixture.Rd
old mode 100755
new mode 100644
index c4124e7..8ee950a
--- a/man/rhierMnlRwMixture.Rd
+++ b/man/rhierMnlRwMixture.Rd
@@ -1,182 +1,184 @@
-\name{rhierMnlRwMixture}
-\alias{rhierMnlRwMixture}
-\concept{bayes}
-\concept{MCMC}
-\concept{Multinomial Logit}
-\concept{mixture of normals}
-\concept{normal mixture}
-\concept{heterogeneity}
-\concept{hierarchical models}
-
-\title{ MCMC Algorithm for Hierarchical Multinomial Logit with Mixture of Normals Heterogeneity}
-\description{
-  \code{rhierMnlRwMixture} is a MCMC algorithm for a hierarchical multinomial logit with a mixture of normals 
-  heterogeneity distribution.  This is a hybrid Gibbs Sampler with a RW Metropolis step for the MNL 
-  coefficients for each panel unit.
-}
-\usage{
-rhierMnlRwMixture(Data, Prior, Mcmc)
-}
-\arguments{
-  \item{Data}{ list(p,lgtdata,Z) ( Z is optional) }
-  \item{Prior}{ list(a,deltabar,Ad,mubar,Amu,nu,V,ncomp) (all but ncomp are optional)}
-  \item{Mcmc}{ list(s,w,R,keep) (R required)}
-}
-\details{
-  Model: \cr
-  \eqn{y_i} \eqn{\sim}{~} \eqn{MNL(X_i,beta_i)}.  i=1,\ldots, length(lgtdata). \eqn{theta_i} is nvar x 1.
-
-  \eqn{beta_i}= ZDelta[i,] + \eqn{u_i}. \cr
-  Note: here ZDelta refers to Z\%*\%D, ZDelta[i,] is ith row of this product.\cr
-  Delta is an nz x nvar array. 
-
-  \eqn{u_i} \eqn{\sim}{~} \eqn{N(mu_{ind},Sigma_{ind})}. \eqn{ind} \eqn{\sim}{~} multinomial(pvec). \cr
-
-  Priors: \cr
-  \eqn{pvec} \eqn{\sim}{~} dirichlet (a)\cr
-  \eqn{delta= vec(Delta)} \eqn{\sim}{~} \eqn{N(deltabar,A_d^{-1})}\cr
-  \eqn{mu_j} \eqn{\sim}{~} \eqn{N(mubar,Sigma_j (x) Amu^{-1})}\cr
-  \eqn{Sigma_j} \eqn{\sim}{~} IW(nu,V) \cr
-
-  Lists contain:
-  \itemize{
-    \item{\code{p}}{ p is number of choice alternatives}
-    \item{\code{lgtdata}}{list of lists with each cross-section unit MNL data}
-    \item{\code{lgtdata[[i]]$y}}{ \eqn{n_i} vector of multinomial outcomes (1,\ldots,m)}
-    \item{\code{lgtdata[[i]]$X}}{ \eqn{n_i}*p by nvar design matrix for ith unit}
-    \item{\code{a}}{vector of length ncomp of Dirichlet prior parms (def: rep(5,ncomp))}
-    \item{\code{deltabar}}{nz*nvar vector of prior means (def: 0)}
-    \item{\code{Ad}}{ prior prec matrix for vec(D) (def: .01I)}
-    \item{\code{mubar}}{ nvar x 1 prior mean vector for normal comp mean (def: 0)}
-    \item{\code{Amu}}{ prior precision for normal comp mean (def: .01I)}
-    \item{\code{nu}}{ d.f. parm for IW prior on norm comp Sigma (def: nvar+3)}
-    \item{\code{V}}{ pds location parm for IW prior on norm comp Sigma (def: nuI)}
-    \item{\code{ncomp}}{ number of components used in normal mixture }
-    \item{\code{s}}{ scaling parm for RW Metropolis (def: 2.93/sqrt(nvar))}
-    \item{\code{w}}{ fractional likelihood weighting parm (def: .1)}
-    \item{\code{R}}{ number of MCMC draws}
-    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
-  }
-}
-\value{
-  a list containing:
-  \item{Deltadraw}{R/keep  x nz*nvar matrix of draws of Delta, first row is initial value}
-  \item{betadraw}{ nlgt x nvar x R/keep array of draws of betas}
-  \item{nmix}{ list of 3 components, probdraw, NULL, compdraw }
-  \item{loglike}{ log-likelihood for each kept draw (length R/keep)}
-}
-\note{
-  More on \code{probdraw} component of nmix list:\cr 
-  R/keep x ncomp matrix of draws of probs of mixture components (pvec)  \cr
-  More on \code{compdraw} component of return value list: \cr
-  \itemize{
-  \item{compdraw[[i]]}{ the ith draw of components for mixtures}
-  \item{compdraw[[i]][[j]]}{ ith draw of the jth normal mixture comp}
-  \item{compdraw[[i]][[j]][[1]]}{ ith draw of jth normal mixture comp mean vector}
-  \item{compdraw[[i]][[j]][[2]]}{ ith draw of jth normal mixture cov parm (rooti) }
-  }
-
-  Note: Z should \strong{not} include an intercept and is centered for ease of interpretation.\cr
-  
-  Be careful in assessing prior parameter, Amu.  .01 is too small for many applications. See 
-  Rossi et al, chapter 5 for full discussion.\cr
-
-  Note: as of version 2.0-2 of \code{bayesm}, the fractional weight parameter has been changed
-  to a weight between 0 and 1.  w is the fractional weight on the normalized pooled likelihood.
-  This differs from what is in Rossi et al chapter 5, i.e.
-
-  \eqn{like_i^(1-w) x like_pooled^((n_i/N)*w)}
-
- 
-  Large R values may be required (>20,000).
-
-} 
-\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
-  by Rossi, Allenby and McCulloch, Chapter 5. \cr
-  \url{http://www.perossi.org/home/bsm-1}
-}
-
-\author{ Peter Rossi, Anderson School, UCLA,
-  \email{perossichi at gmail.com}.
-}
-  
-\seealso{ \code{\link{rmnlIndepMetrop}} }
-\examples{
-##
-if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=10000} else {R=10}
-
-set.seed(66)
-p=3                                # num of choice alterns
-ncoef=3  
-nlgt=300                           # num of cross sectional units
-nz=2
-Z=matrix(runif(nz*nlgt),ncol=nz)
-Z=t(t(Z)-apply(Z,2,mean))          # demean Z
-ncomp=3                                # no of mixture components
-Delta=matrix(c(1,0,1,0,1,2),ncol=2)
-comps=NULL
-comps[[1]]=list(mu=c(0,-1,-2),rooti=diag(rep(1,3)))
-comps[[2]]=list(mu=c(0,-1,-2)*2,rooti=diag(rep(1,3)))
-comps[[3]]=list(mu=c(0,-1,-2)*4,rooti=diag(rep(1,3)))
-pvec=c(.4,.2,.4)
-
-simmnlwX= function(n,X,beta) {
-  ##  simulate from MNL model conditional on X matrix
-  k=length(beta)
-  Xbeta=X\%*\%beta
-  j=nrow(Xbeta)/n
-  Xbeta=matrix(Xbeta,byrow=TRUE,ncol=j)
-  Prob=exp(Xbeta)
-  iota=c(rep(1,j))
-  denom=Prob\%*\%iota
-  Prob=Prob/as.vector(denom)
-  y=vector("double",n)
-  ind=1:j
-  for (i in 1:n) 
-      {yvec=rmultinom(1,1,Prob[i,]); y[i]=ind\%*\%yvec}
-  return(list(y=y,X=X,beta=beta,prob=Prob))
-}
-
-## simulate data
-simlgtdata=NULL
-ni=rep(50,300)
-for (i in 1:nlgt) 
-{  betai=Delta\%*\%Z[i,]+as.vector(rmixture(1,pvec,comps)$x)
-   Xa=matrix(runif(ni[i]*p,min=-1.5,max=0),ncol=p)
-   X=createX(p,na=1,nd=NULL,Xa=Xa,Xd=NULL,base=1)
-   outa=simmnlwX(ni[i],X,betai)
-   simlgtdata[[i]]=list(y=outa$y,X=X,beta=betai)
-}
-
-## plot betas
-if(0){
-## set if(1) above to produce plots
-bmat=matrix(0,nlgt,ncoef)
-for(i in 1:nlgt) {bmat[i,]=simlgtdata[[i]]$beta}
-par(mfrow=c(ncoef,1))
-for(i in 1:ncoef) hist(bmat[,i],breaks=30,col="magenta")
-}
-
-##   set parms for priors and Z
-Prior1=list(ncomp=5)
-
-keep=5
-Mcmc1=list(R=R,keep=keep)
-Data1=list(p=p,lgtdata=simlgtdata,Z=Z)
-
-out=rhierMnlRwMixture(Data=Data1,Prior=Prior1,Mcmc=Mcmc1)
-
-cat("Summary of Delta draws",fill=TRUE)
-summary(out$Deltadraw,tvalues=as.vector(Delta))
-cat("Summary of Normal Mixture Distribution",fill=TRUE)
-summary(out$nmix)
-
-if(0) {
-## plotting examples
-plot(out$betadraw)
-plot(out$nmix)
-}
-
-}
-
-\keyword{models}
+\name{rhierMnlRwMixture}
+\alias{rhierMnlRwMixture}
+\concept{bayes}
+\concept{MCMC}
+\concept{Multinomial Logit}
+\concept{mixture of normals}
+\concept{normal mixture}
+\concept{heterogeneity}
+\concept{hierarchical models}
+
+\title{ MCMC Algorithm for Hierarchical Multinomial Logit with Mixture of Normals Heterogeneity}
+\description{
+  \code{rhierMnlRwMixture} is a MCMC algorithm for a hierarchical multinomial logit with a mixture of normals 
+  heterogeneity distribution.  This is a hybrid Gibbs Sampler with a RW Metropolis step for the MNL 
+  coefficients for each panel unit.
+}
+\usage{
+rhierMnlRwMixture(Data, Prior, Mcmc)
+}
+\arguments{
+  \item{Data}{ list(p,lgtdata,Z) ( Z is optional) }
+  \item{Prior}{ list(a,deltabar,Ad,mubar,Amu,nu,V,a,ncomp) (all but ncomp are optional)}
+  \item{Mcmc}{ list(s,w,R,keep,nprint) (R required)}
+}
+\details{
+  Model: \cr
+  \eqn{y_i} \eqn{\sim}{~} \eqn{MNL(X_i,\beta_i)}.  i=1,\ldots, length(lgtdata). \eqn{\beta_i} is nvar x 1.
+
+  \eqn{\beta_i}= Z\eqn{\Delta}[i,] + \eqn{u_i}. \cr
+  Note:  Z\eqn{\Delta} is the matrix Z * \eqn{\Delta}; [i,] refers to ith row of this product.\cr
+  Delta is an nz x nvar array. 
+
+  \eqn{u_i} \eqn{\sim}{~} \eqn{N(\mu_{ind},\Sigma_{ind})}. \eqn{ind} \eqn{\sim}{~} multinomial(pvec). \cr
+
+  Priors: \cr
+  \eqn{pvec} \eqn{\sim}{~} dirichlet (a)\cr
+  \eqn{delta= vec(\Delta)} \eqn{\sim}{~} \eqn{N(deltabar,A_d^{-1})}\cr
+  \eqn{\mu_j} \eqn{\sim}{~} \eqn{N(mubar,\Sigma_j (x) Amu^{-1})}\cr
+  \eqn{\Sigma_j} \eqn{\sim}{~} IW(nu,V) \cr
+
+  Lists contain:
+  \itemize{
+    \item{\code{p}}{ p is number of choice alternatives}
+    \item{\code{lgtdata}}{list of lists with each cross-section unit MNL data}
+    \item{\code{lgtdata[[i]]$y}}{ \eqn{n_i} vector of multinomial outcomes (1,\ldots,m)}
+    \item{\code{lgtdata[[i]]$X}}{ \eqn{n_i}*p by nvar design matrix for ith unit}
+    \item{\code{a}}{vector of length ncomp of Dirichlet prior parms (def: rep(5,ncomp))}
+    \item{\code{deltabar}}{nz*nvar vector of prior means (def: 0)}
+    \item{\code{Ad}}{ prior prec matrix for vec(D) (def: .01I)}
+    \item{\code{mubar}}{ nvar x 1 prior mean vector for normal comp mean (def: 0)}
+    \item{\code{Amu}}{ prior precision for normal comp mean (def: .01I)}
+    \item{\code{nu}}{ d.f. parm for IW prior on norm comp Sigma (def: nvar+3)}
+    \item{\code{V}}{ pds location parm for IW prior on norm comp Sigma (def: nuI)}
+    \item{\code{a}}{ Dirichlet prior parameter (def: 5)}
+    \item{\code{ncomp}}{ number of components used in normal mixture }
+    \item{\code{s}}{ scaling parm for RW Metropolis (def: 2.93/sqrt(nvar))}
+    \item{\code{w}}{ fractional likelihood weighting parm (def: .1)}
+    \item{\code{R}}{ number of MCMC draws}
+    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
+  }
+}
+\value{
+  a list containing:
+  \item{Deltadraw}{R/keep  x nz*nvar matrix of draws of Delta, first row is initial value}
+  \item{betadraw}{ nlgt x nvar x R/keep array of draws of betas}
+  \item{nmix}{ list of 3 components, probdraw, NULL, compdraw }
+  \item{loglike}{ log-likelihood for each kept draw (length R/keep)}
+}
+\note{
+  More on \code{probdraw} component of nmix list:\cr 
+  R/keep x ncomp matrix of draws of probs of mixture components (pvec)  \cr
+  More on \code{compdraw} component of return value list: \cr
+  \itemize{
+  \item{compdraw[[i]]}{ the ith draw of components for mixtures}
+  \item{compdraw[[i]][[j]]}{ ith draw of the jth normal mixture comp}
+  \item{compdraw[[i]][[j]][[1]]}{ ith draw of jth normal mixture comp mean vector}
+  \item{compdraw[[i]][[j]][[2]]}{ ith draw of jth normal mixture cov parm (rooti) }
+  }
+
+  Note: Z should \strong{not} include an intercept and is centered for ease of interpretation. The mean of each of the \code{nlgt} \eqn{\beta} s is the mean of the normal mixture.  Use \code{summary()} to compute this mean from the \code{compdraw} output.\cr
+  
+  Be careful in assessing prior parameter, Amu.  .01 is too small for many applications. See 
+  Rossi et al, chapter 5 for full discussion.\cr
+
+  Note: as of version 2.0-2 of \code{bayesm}, the fractional weight parameter has been changed
+  to a weight between 0 and 1.  w is the fractional weight on the normalized pooled likelihood.
+  This differs from what is in Rossi et al chapter 5, i.e.
+
+  \eqn{like_i^{(1-w)} x like_pooled^{((n_i/N)*w)}}
+
+ 
+  Large R values may be required (>20,000).
+
+} 
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Rossi, Allenby and McCulloch, Chapter 5. \cr
+  \url{http://www.perossi.org/home/bsm-1}
+}
+
+\author{ Peter Rossi, Anderson School, UCLA,
+  \email{perossichi at gmail.com}.
+}
+  
+\seealso{ \code{\link{rmnlIndepMetrop}} }
+\examples{
+##
+if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=10000} else {R=10}
+
+set.seed(66)
+p=3                                # num of choice alterns
+ncoef=3  
+nlgt=300                           # num of cross sectional units
+nz=2
+Z=matrix(runif(nz*nlgt),ncol=nz)
+Z=t(t(Z)-apply(Z,2,mean))          # demean Z
+ncomp=3                                # no of mixture components
+Delta=matrix(c(1,0,1,0,1,2),ncol=2)
+comps=NULL
+comps[[1]]=list(mu=c(0,-1,-2),rooti=diag(rep(1,3)))
+comps[[2]]=list(mu=c(0,-1,-2)*2,rooti=diag(rep(1,3)))
+comps[[3]]=list(mu=c(0,-1,-2)*4,rooti=diag(rep(1,3)))
+pvec=c(.4,.2,.4)
+
+simmnlwX= function(n,X,beta) {
+  ##  simulate from MNL model conditional on X matrix
+  k=length(beta)
+  Xbeta=X\%*\%beta
+  j=nrow(Xbeta)/n
+  Xbeta=matrix(Xbeta,byrow=TRUE,ncol=j)
+  Prob=exp(Xbeta)
+  iota=c(rep(1,j))
+  denom=Prob\%*\%iota
+  Prob=Prob/as.vector(denom)
+  y=vector("double",n)
+  ind=1:j
+  for (i in 1:n) 
+      {yvec=rmultinom(1,1,Prob[i,]); y[i]=ind\%*\%yvec}
+  return(list(y=y,X=X,beta=beta,prob=Prob))
+}
+
+## simulate data
+simlgtdata=NULL
+ni=rep(50,300)
+for (i in 1:nlgt) 
+{  betai=Delta\%*\%Z[i,]+as.vector(rmixture(1,pvec,comps)$x)
+   Xa=matrix(runif(ni[i]*p,min=-1.5,max=0),ncol=p)
+   X=createX(p,na=1,nd=NULL,Xa=Xa,Xd=NULL,base=1)
+   outa=simmnlwX(ni[i],X,betai)
+   simlgtdata[[i]]=list(y=outa$y,X=X,beta=betai)
+}
+
+## plot betas
+if(0){
+## set if(1) above to produce plots
+bmat=matrix(0,nlgt,ncoef)
+for(i in 1:nlgt) {bmat[i,]=simlgtdata[[i]]$beta}
+par(mfrow=c(ncoef,1))
+for(i in 1:ncoef) hist(bmat[,i],breaks=30,col="magenta")
+}
+
+##   set parms for priors and Z
+Prior1=list(ncomp=5)
+
+keep=5
+Mcmc1=list(R=R,keep=keep)
+Data1=list(p=p,lgtdata=simlgtdata,Z=Z)
+
+out=rhierMnlRwMixture(Data=Data1,Prior=Prior1,Mcmc=Mcmc1)
+
+cat("Summary of Delta draws",fill=TRUE)
+summary(out$Deltadraw,tvalues=as.vector(Delta))
+cat("Summary of Normal Mixture Distribution",fill=TRUE)
+summary(out$nmix)
+
+if(0) {
+## plotting examples
+plot(out$betadraw)
+plot(out$nmix)
+}
+
+}
+
+\keyword{models}
diff --git a/man/rhierNegbinRw.Rd b/man/rhierNegbinRw.Rd
old mode 100755
new mode 100644
index 2068518..48f6fcc
--- a/man/rhierNegbinRw.Rd
+++ b/man/rhierNegbinRw.Rd
@@ -1,141 +1,143 @@
-\name{rhierNegbinRw}
-\alias{rhierNegbinRw}
-\concept{MCMC}
-\concept{hierarchical NBD regression}
-\concept{Negative Binomial regression}
-\concept{Poisson regression}
-\concept{Metropolis algorithm}
-\concept{bayes}
-\title{ MCMC Algorithm for Negative Binomial Regression }
-\description{
- \code{rhierNegbinRw} implements an MCMC strategy for the hierarchical Negative 
- Binomial (NBD) regression model. Metropolis steps for each unit level set of 
- regression parameters are automatically tuned by optimization. Over-dispersion
- parameter (alpha) is common across units.
-}
-\usage{
-rhierNegbinRw(Data, Prior, Mcmc)
-}
-\arguments{
-  \item{Data}{ list(regdata,Z) }
-  \item{Prior}{ list(Deltabar,Adelta,nu,V,a,b) }
-  \item{Mcmc}{ list(R,keep,s\_beta,s\_alpha,c,Vbeta0,Delta0) }
-}
-\details{
-  Model:   \eqn{y_i} \eqn{\sim}{~} NBD(mean=lambda, over-dispersion=alpha).  \cr
-           \eqn{lambda=exp(X_ibeta_i)}
-
-  Prior:   \eqn{beta_i} \eqn{\sim}{~} \eqn{N(Delta'z_i,Vbeta)}.
-
-           \eqn{vec(Delta|Vbeta)} \eqn{\sim}{~} \eqn{N(vec(Deltabar),Vbeta (x) Adelta)}. \cr
-           \eqn{Vbeta} \eqn{\sim}{~} \eqn{IW(nu,V)}. \cr
-           \eqn{alpha} \eqn{\sim}{~} \eqn{Gamma(a,b)}. \cr
-            note: prior mean of \eqn{alpha = a/b}, \eqn{variance = a/(b^2)}
-
-  list arguments contain:
-  \itemize{
-    \item{\code{regdata}}{ list of lists with data on each of nreg units}
-    \item{\code{regdata[[i]]$X}}{ nobs\_i x nvar matrix of X variables}
-    \item{\code{regdata[[i]]$y}}{ nobs\_i x 1 vector of count responses}
-    \item{\code{Z}}{nreg x nz mat of unit chars (def: vector of ones)}
-    \item{\code{Deltabar}}{ nz x nvar prior mean matrix (def: 0)}
-    \item{\code{Adelta}}{ nz x nz pds prior prec matrix (def: .01I)}
-    \item{\code{nu}}{ d.f. parm for IWishart (def: nvar+3)}
-    \item{\code{V}}{location matrix of IWishart prior (def: nuI)}
-    \item{\code{a}}{ Gamma prior parm (def: .5)}
-    \item{\code{b}}{ Gamma prior parm (def: .1)}
-    \item{\code{R}}{ number of MCMC draws}
-    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
-    \item{\code{s\_beta}}{ scaling for beta| alpha RW inc cov (def: 2.93/sqrt(nvar))}
-    \item{\code{s\_alpha}}{ scaling for alpha | beta RW inc cov (def: 2.93)}
-    \item{\code{c}}{ fractional likelihood weighting parm (def:2)}
-    \item{\code{Vbeta0}}{ starting value for Vbeta (def: I)}
-    \item{\code{Delta0}}{ starting value for Delta (def: 0)}
-  }
-}
-\value{
-  a list containing: 
-  \item{llike}{R/keep vector of values of log-likelihood}
-  \item{betadraw}{nreg x nvar x R/keep array of beta draws}
-  \item{alphadraw}{R/keep vector of alpha draws}
-  \item{acceptrbeta}{acceptance rate of the beta draws}
-  \item{acceptralpha}{acceptance rate of the alpha draws}
-}
-\note{
-  The NBD regression encompasses Poisson regression in the sense that as alpha goes to
-  infinity the NBD distribution tends to the Poisson.\cr
-  For "small" values of alpha, the dependent variable can be extremely variable so that 
-  a large number of observations may be required to obtain precise inferences. 
-
-  For ease of interpretation, we recommend demeaning Z variables.
-}
-
-\seealso{ \code{\link{rnegbinRw}} }
-\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
-  by Rossi, Allenby and McCulloch, Chapter 5. \cr
-  \url{http://www.perossi.org/home/bsm-1}
-}
-
-\author{ Sridhar Narayanam & Peter Rossi, Anderson School, UCLA,
-  \email{perossichi at gmail.com}.
-}
-\examples{
-##
-if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10}
-##
-set.seed(66)
-simnegbin = 
-function(X, beta, alpha) {
-#   Simulate from the Negative Binomial Regression
-lambda = exp(X \%*\% beta)
-y=NULL
-for (j in 1:length(lambda))
-    y = c(y,rnbinom(1,mu = lambda[j],size = alpha))
-return(y)
-}
-
-nreg = 100        # Number of cross sectional units
-T = 50            # Number of observations per unit
-nobs = nreg*T
-nvar=2            # Number of X variables
-nz=2              # Number of Z variables
-              
-# Construct the Z matrix
-Z = cbind(rep(1,nreg),rnorm(nreg,mean=1,sd=0.125))
-
-Delta = cbind(c(4,2), c(0.1,-1))
-alpha = 5
-Vbeta = rbind(c(2,1),c(1,2))
-
-# Construct the regdata (containing X)
-simnegbindata = NULL
-for (i in 1:nreg) {
-    betai = as.vector(Z[i,]\%*\%Delta) + chol(Vbeta)\%*\%rnorm(nvar)
-    X = cbind(rep(1,T),rnorm(T,mean=2,sd=0.25))
-    simnegbindata[[i]] = list(y=simnegbin(X,betai,alpha), X=X,beta=betai)
-}
-
-Beta = NULL
-for (i in 1:nreg) {Beta=rbind(Beta,matrix(simnegbindata[[i]]$beta,nrow=1))}
-    
-Data1 = list(regdata=simnegbindata, Z=Z)
-Mcmc1 = list(R=R)
-
-out = rhierNegbinRw(Data=Data1, Mcmc=Mcmc1)
-
-cat("Summary of Delta draws",fill=TRUE)
-summary(out$Deltadraw,tvalues=as.vector(Delta))
-cat("Summary of Vbeta draws",fill=TRUE)
-summary(out$Vbetadraw,tvalues=as.vector(Vbeta[upper.tri(Vbeta,diag=TRUE)]))
-cat("Summary of alpha draws",fill=TRUE)
-summary(out$alpha,tvalues=alpha)
-
-if(0){
-## plotting examples
-plot(out$betadraw)
-plot(out$alpha,tvalues=alpha)
-plot(out$Deltadraw,tvalues=as.vector(Delta))
-}
-}
-
-\keyword{models}
+\name{rhierNegbinRw}
+\alias{rhierNegbinRw}
+\concept{MCMC}
+\concept{hierarchical NBD regression}
+\concept{Negative Binomial regression}
+\concept{Poisson regression}
+\concept{Metropolis algorithm}
+\concept{bayes}
+\title{ MCMC Algorithm for Negative Binomial Regression }
+\description{
+ \code{rhierNegbinRw} implements an MCMC strategy for the hierarchical Negative 
+ Binomial (NBD) regression model. Metropolis steps for each unit level set of 
+ regression parameters are automatically tuned by optimization. Over-dispersion
+ parameter (alpha) is common across units.
+}
+\usage{
+rhierNegbinRw(Data, Prior, Mcmc)
+}
+\arguments{
+  \item{Data}{ list(regdata,Z) }
+  \item{Prior}{ list(Deltabar,Adelta,nu,V,a,b) }
+  \item{Mcmc}{ list(R,keep,nprint,s_beta,s_alpha,c,Vbeta0,Delta0) }
+}
+\details{
+  Model:   \eqn{y_i} \eqn{\sim}{~} NBD(mean=\eqn{\lambda}, over-dispersion=alpha).  \cr
+           
+           \eqn{\lambda=exp(X_i\beta_i)}
+
+  Prior:   \eqn{\beta_i} \eqn{\sim}{~} \eqn{N(\Delta'z_i,Vbeta)}.
+
+           \eqn{vec(\Delta|Vbeta)} \eqn{\sim}{~} \eqn{N(vec(Deltabar),Vbeta (x) Adelta)}. \cr
+           \eqn{Vbeta} \eqn{\sim}{~} \eqn{IW(nu,V)}. \cr
+           \eqn{alpha} \eqn{\sim}{~} \eqn{Gamma(a,b)}. \cr
+            note: prior mean of \eqn{alpha = a/b}, \eqn{variance = a/(b^2)}
+
+  list arguments contain:
+  \itemize{
+    \item{\code{regdata}}{ list of lists with data on each of nreg units}
+    \item{\code{regdata[[i]]$X}}{ nobs\_i x nvar matrix of X variables}
+    \item{\code{regdata[[i]]$y}}{ nobs\_i x 1 vector of count responses}
+    \item{\code{Z}}{nreg x nz mat of unit chars (def: vector of ones)}
+    \item{\code{Deltabar}}{ nz x nvar prior mean matrix (def: 0)}
+    \item{\code{Adelta}}{ nz x nz pds prior prec matrix (def: .01I)}
+    \item{\code{nu}}{ d.f. parm for IWishart (def: nvar+3)}
+    \item{\code{V}}{location matrix of IWishart prior (def: nuI)}
+    \item{\code{a}}{ Gamma prior parm (def: .5)}
+    \item{\code{b}}{ Gamma prior parm (def: .1)}
+    \item{\code{R}}{ number of MCMC draws}
+    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
+    \item{\code{s_beta}}{ scaling for beta| alpha RW inc cov (def: 2.93/sqrt(nvar))}
+    \item{\code{s_alpha}}{ scaling for alpha | beta RW inc cov (def: 2.93)}
+    \item{\code{c}}{ fractional likelihood weighting parm (def:2)}
+    \item{\code{Vbeta0}}{ starting value for Vbeta (def: I)}
+    \item{\code{Delta0}}{ starting value for Delta (def: 0)}
+  }
+}
+\value{
+  a list containing: 
+  \item{llike}{R/keep vector of values of log-likelihood}
+  \item{betadraw}{nreg x nvar x R/keep array of beta draws}
+  \item{alphadraw}{R/keep vector of alpha draws}
+  \item{acceptrbeta}{acceptance rate of the beta draws}
+  \item{acceptralpha}{acceptance rate of the alpha draws}
+}
+\note{
+  The NBD regression encompasses Poisson regression in the sense that as alpha goes to
+  infinity the NBD distribution tends to the Poisson.\cr
+  For "small" values of alpha, the dependent variable can be extremely variable so that 
+  a large number of observations may be required to obtain precise inferences. 
+
+  For ease of interpretation, we recommend demeaning Z variables.
+}
+
+\seealso{ \code{\link{rnegbinRw}} }
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Rossi, Allenby and McCulloch, Chapter 5. \cr
+  \url{http://www.perossi.org/home/bsm-1}
+}
+
+\author{ Sridhar Narayanam & Peter Rossi, Anderson School, UCLA,
+  \email{perossichi at gmail.com}.
+}
+\examples{
+##
+if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10}
+##
+set.seed(66)
+simnegbin = 
+function(X, beta, alpha) {
+#   Simulate from the Negative Binomial Regression
+lambda = exp(X \%*\% beta)
+y=NULL
+for (j in 1:length(lambda))
+    y = c(y,rnbinom(1,mu = lambda[j],size = alpha))
+return(y)
+}
+
+nreg = 100        # Number of cross sectional units
+T = 50            # Number of observations per unit
+nobs = nreg*T
+nvar=2            # Number of X variables
+nz=2              # Number of Z variables
+              
+# Construct the Z matrix
+Z = cbind(rep(1,nreg),rnorm(nreg,mean=1,sd=0.125))
+
+Delta = cbind(c(4,2), c(0.1,-1))
+alpha = 5
+Vbeta = rbind(c(2,1),c(1,2))
+
+# Construct the regdata (containing X)
+simnegbindata = NULL
+for (i in 1:nreg) {
+    betai = as.vector(Z[i,]\%*\%Delta) + chol(Vbeta)\%*\%rnorm(nvar)
+    X = cbind(rep(1,T),rnorm(T,mean=2,sd=0.25))
+    simnegbindata[[i]] = list(y=simnegbin(X,betai,alpha), X=X,beta=betai)
+}
+
+Beta = NULL
+for (i in 1:nreg) {Beta=rbind(Beta,matrix(simnegbindata[[i]]$beta,nrow=1))}
+    
+Data1 = list(regdata=simnegbindata, Z=Z)
+Mcmc1 = list(R=R)
+
+out = rhierNegbinRw(Data=Data1, Mcmc=Mcmc1)
+
+cat("Summary of Delta draws",fill=TRUE)
+summary(out$Deltadraw,tvalues=as.vector(Delta))
+cat("Summary of Vbeta draws",fill=TRUE)
+summary(out$Vbetadraw,tvalues=as.vector(Vbeta[upper.tri(Vbeta,diag=TRUE)]))
+cat("Summary of alpha draws",fill=TRUE)
+summary(out$alpha,tvalues=alpha)
+
+if(0){
+## plotting examples
+plot(out$betadraw)
+plot(out$alpha,tvalues=alpha)
+plot(out$Deltadraw,tvalues=as.vector(Delta))
+}
+}
+
+\keyword{models}
diff --git a/man/rivDP.Rd b/man/rivDP.Rd
old mode 100755
new mode 100644
index d1fa5ca..351f915
--- a/man/rivDP.Rd
+++ b/man/rivDP.Rd
@@ -19,54 +19,82 @@ rivDP(Data, Prior, Mcmc)
 }
 \arguments{
   \item{Data}{ list(z,w,x,y) }
-  \item{Prior}{ list(md,Ad,mbg,Abg,lambda,Prioralpha) (optional) } 
-  \item{Mcmc}{ list(R,keep,SCALE) (R required) }
+  \item{Prior}{ list(md,Ad,mbg,Abg,lambda,Prioralpha,lambda_hyper) (optional) } 
+  \item{Mcmc}{ list(R,keep,nprint,maxuniq,SCALE,gridsize) (R required) }
 }
 \details{
   Model:\cr
-  \eqn{x=z'delta + e1}. \cr
-  \eqn{y=beta*x + w'gamma + e2}. \cr
-  \eqn{e1,e2} \eqn{\sim}{~} \eqn{N(theta_{i})}.  \eqn{theta_{i}} represents \eqn{mu_{i},Sigma_{i}}
+  \eqn{x=z'\delta + e1}. \cr
+  \eqn{y=\beta*x + w'\gamma + e2}. \cr
+  \eqn{e1,e2} \eqn{\sim}{~} \eqn{N(\theta_{i})}.  \eqn{\theta_{i}} represents \eqn{\mu_{i},\Sigma_{i}}
   
   Note: Error terms have non-zero means.  DO NOT include intercepts in the z or w matrices.  This is different
         from \code{rivGibbs} which requires intercepts to be included explicitly.
 
   Priors:\cr
-  \eqn{delta} \eqn{\sim}{~} \eqn{N(md,Ad^{-1})}.  \eqn{vec(beta,gamma)} \eqn{\sim}{~} \eqn{N(mbg,Abg^{-1})} \cr
+  \eqn{\delta} \eqn{\sim}{~} \eqn{N(md,Ad^{-1})}.  \eqn{vec(\beta,\gamma)} \eqn{\sim}{~} \eqn{N(mbg,Abg^{-1})} \cr
 
-  \eqn{theta_{i}\sim{~}G} \cr
+  \eqn{\theta_{i}} \eqn{\sim}{~} \eqn{G} \cr
 
   \eqn{G} \eqn{\sim}{~} \eqn{DP(alpha,G_{0})} \cr
  
-  \eqn{G_{0}} is the natural conjugate prior for \eqn{(mu,Sigma)}: \cr
-  \eqn{Sigma} \eqn{\sim}{~} \eqn{IW(nu,vI)} and  \eqn{mu | Sigma} \eqn{\sim}{~} \eqn{N(0,1/amu Sigma)} \cr
-  These parameters are collected together in the list \code{lambda}.  It is highly
+  \eqn{G_{0}} is the natural conjugate prior for \eqn{(\mu,\Sigma)}: \cr
+  \eqn{\Sigma} \eqn{\sim}{~} \eqn{IW(nu,vI)} and  \eqn{\mu|\Sigma} \eqn{\sim}{~} \eqn{N(0,\Sigma (x) a^{-1})} \cr
+  These parameters are collected together in the list \eqn{\lambda}.  It is highly
        recommended that you use the default settings for these hyper-parameters.\cr
+  
+  \eqn{\lambda(a,nu,v):}\cr
+  
+      \eqn{a} \eqn{\sim}{~} uniform[alim[1],alimb[2]]\cr
+      \eqn{nu} \eqn{\sim}{~} dim(data)-1 + exp(z) \cr
+      \eqn{z} \eqn{\sim}{~} uniform[dim(data)-1+nulim[1],nulim[2]]\cr
+      \eqn{v} \eqn{\sim}{~} uniform[vlim[1],vlim[2]]
+       
 
   \eqn{alpha} \eqn{\sim}{~} \eqn{(1-(alpha-alpha_{min})/(alpha_{max}-alpha{min}))^{power}} \cr
    where \eqn{alpha_{min}} and \eqn{alpha_{max}} are set using the arguments in the reference
    below.  It is highly recommended that you use the default values for the hyperparameters
    of the prior on alpha
 
-  List arguments contain:
+List arguments contain:
+  
+Data:\cr
   \itemize{
     \item{\code{z}}{ matrix of obs on instruments}
     \item{\code{y}}{ vector of obs on lhs var in structural equation}
     \item{\code{x}}{ "endogenous" var in structural eqn}
-    \item{\code{w}}{ matrix of obs on "exogenous" vars in the structural eqn}
+    \item{\code{w}}{ matrix of obs on "exogenous" vars in the structural eqn}}
+Prior:\cr    
+  \itemize{
     \item{\code{md}}{ prior mean of delta (def: 0)}
     \item{\code{Ad}}{ pds prior prec for prior on delta (def: .01I)}
     \item{\code{mbg}}{ prior mean vector for prior on beta,gamma (def: 0)}
-    \item{\code{Abg}}{ pds prior prec  for prior on beta,gamma (def: .01I)}
-    \item{\code{lambda}}{ list of hyperparameters for theta prior- use default settings }
-    \item{\code{Prioralpha}}{ list of hyperparameters for theta prior- use default settings }
+    \item{\code{Abg}}{ pds prior prec  for prior on beta,gamma (def: .01I)}}
+Prioralpha:\cr
+ \itemize{
+  \item{\code{Istarmin}}{ expected number of components at lower bound of support of alpha (def: 1)}
+  \item{\code{Istarmax}}{ expected number of components at upper bound of support of alpha}
+  \item{\code{power}}{ power parameter for alpha prior (def: .8)}
+  }
+ 
+lambda_hyper:\cr
+  \itemize{
+   \item{\code{alim}}{ defines support of a distribution,def:c(.01,10) }
+   \item{\code{nulim}}{ defines support of nu distribution, def:c(.01,3)} 
+   \item{\code{vlim}}{ defines support of v distribution, def:c(.1,4)} 
+  }
+  
+MCMC:\cr  
+  \itemize{
     \item{\code{R}}{ number of MCMC draws}
-    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)} 
-    \item{\code{SCALE}}{ scale data, def: TRUE}
+    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
+    \item{\code{maxuniq}}{ storage constraint on the number of unique components (def: 200)}
+    \item{\code{SCALE}}{ scale data (def: TRUE)}
     \item{\code{gridsize}}{ gridsize parm for alpha draws (def: 20)} 
   }
 
-  output includes object \code{nmix} of class "bayesm.nmix" which contains draws of predictive distribution of 
+output includes object \code{nmix} of class "bayesm.nmix" which contains draws of predictive distribution of 
   errors (a Bayesian analogue of a density estimate for the error terms).\cr
   nmix:\cr
   \itemize{
diff --git a/man/rivGibbs.Rd b/man/rivGibbs.Rd
old mode 100755
new mode 100644
index 18c7670..669d360
--- a/man/rivGibbs.Rd
+++ b/man/rivGibbs.Rd
@@ -1,98 +1,99 @@
-\name{rivGibbs}
-\alias{rivGibbs}
-\concept{Instrumental Variables}
-\concept{Gibbs Sampler}
-\concept{bayes}
-\concept{endogeneity}
-\concept{simultaneity}
-\concept{MCMC}
-
-\title{ Gibbs Sampler for Linear "IV" Model}
-\description{
-  \code{rivGibbs} is a Gibbs Sampler for a linear structural equation with an arbitrary number of instruments.
-}
-\usage{
-rivGibbs(Data, Prior, Mcmc)
-}
-\arguments{
-  \item{Data}{ list(z,w,x,y) }
-  \item{Prior}{ list(md,Ad,mbg,Abg,nu,V) (optional) } 
-  \item{Mcmc}{ list(R,keep) (R required) }
-}
-\details{
-  Model:\cr
-  \eqn{x=z'delta + e1}. \cr
-  \eqn{y=beta*x + w'gamma + e2}. \cr
-  \eqn{e1,e2} \eqn{\sim}{~} \eqn{N(0,Sigma)}. 
-  
-  Note: if intercepts are desired in either equation, include vector of ones in z or w
-
-  Priors:\cr
-  \eqn{delta} \eqn{\sim}{~} \eqn{N(md,Ad^{-1})}.  \eqn{vec(beta,gamma)} \eqn{\sim}{~} \eqn{N(mbg,Abg^{-1})} \cr
-  \eqn{Sigma} \eqn{\sim}{~} IW(nu,V)
-
-  List arguments contain:
-  \itemize{
-    \item{\code{z}}{ matrix of obs on instruments}
-    \item{\code{y}}{ vector of obs on lhs var in structural equation}
-    \item{\code{x}}{ "endogenous" var in structural eqn}
-    \item{\code{w}}{ matrix of obs on "exogenous" vars in the structural eqn}
-    \item{\code{md}}{ prior mean of delta (def: 0)}
-    \item{\code{Ad}}{ pds prior prec for prior on delta (def: .01I)}
-    \item{\code{mbg}}{ prior mean vector for prior on beta,gamma (def: 0)}
-    \item{\code{Abg}}{ pds prior prec  for prior on beta,gamma (def: .01I)}
-    \item{\code{nu}}{ d.f. parm for IW prior on Sigma (def: 5)}
-    \item{\code{V}}{ pds location matrix for IW prior on Sigma (def: nuI)}
-    \item{\code{R}}{ number of MCMC draws}
-    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)} 
-  }
-}
-\value{
-  a list containing:
-  \item{deltadraw}{R/keep x dim(delta) array of delta draws}
-  \item{betadraw}{R/keep x 1 vector of beta draws}
-  \item{gammadraw}{R/keep x dim(gamma) array of gamma draws }
-  \item{Sigmadraw}{R/keep x 4 array of Sigma draws}
-}
-\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
-  by Rossi, Allenby and McCulloch, Chapter 5. \cr
-  \url{http://www.perossi.org/home/bsm-1}
-}
-
-\author{ Rob McCulloch and Peter Rossi, Anderson School, UCLA,
-  \email{perossichi at gmail.com}.
-}
-\examples{
-##
-if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10}
-
-set.seed(66)
-simIV = function(delta,beta,Sigma,n,z,w,gamma) {
-eps = matrix(rnorm(2*n),ncol=2) \%*\% chol(Sigma)
-x = z \%*\% delta + eps[,1]; y = beta*x +  eps[,2] + w\%*\%gamma
-list(x=as.vector(x),y=as.vector(y)) }
-n = 200 ; p=1 # number of instruments
-z = cbind(rep(1,n),matrix(runif(n*p),ncol=p))
-w = matrix(1,n,1)
-rho=.8
-Sigma = matrix(c(1,rho,rho,1),ncol=2)
-delta = c(1,4); beta = .5; gamma = c(1)
-simiv = simIV(delta,beta,Sigma,n,z,w,gamma)
-
-Mcmc1=list();  Data1 = list()
-Data1$z = z; Data1$w=w; Data1$x=simiv$x; Data1$y=simiv$y
-Mcmc1$R = R
-Mcmc1$keep=1
-out=rivGibbs(Data=Data1,Mcmc=Mcmc1)
-
-cat("Summary of Beta draws",fill=TRUE)
-summary(out$betadraw,tvalues=beta)
-cat("Summary of Sigma draws",fill=TRUE)
-summary(out$Sigmadraw,tvalues=as.vector(Sigma[upper.tri(Sigma,diag=TRUE)]))
-
-if(0){
-## plotting examples
-plot(out$betadraw)
-}
-}
-\keyword{ models }
+\name{rivGibbs}
+\alias{rivGibbs}
+\concept{Instrumental Variables}
+\concept{Gibbs Sampler}
+\concept{bayes}
+\concept{endogeneity}
+\concept{simultaneity}
+\concept{MCMC}
+
+\title{ Gibbs Sampler for Linear "IV" Model}
+\description{
+  \code{rivGibbs} is a Gibbs Sampler for a linear structural equation with an arbitrary number of instruments.
+}
+\usage{
+rivGibbs(Data, Prior, Mcmc)
+}
+\arguments{
+  \item{Data}{ list(z,w,x,y) }
+  \item{Prior}{ list(md,Ad,mbg,Abg,nu,V) (optional) } 
+  \item{Mcmc}{ list(R,keep,nprint) (R required) }
+}
+\details{
+  Model:\cr
+  \eqn{x=z'\delta + e1}. \cr
+  \eqn{y=\beta*x + w'\gamma + e2}. \cr
+  \eqn{e1,e2} \eqn{\sim}{~} \eqn{N(0,\Sigma)}. 
+  
+  Note: if intercepts are desired in either equation, include vector of ones in z or w
+
+  Priors:\cr
+  \eqn{\delta} \eqn{\sim}{~} \eqn{N(md,Ad^{-1})}.  \eqn{vec(\beta,\gamma)} \eqn{\sim}{~} \eqn{N(mbg,Abg^{-1})} \cr
+  \eqn{\Sigma} \eqn{\sim}{~} IW(nu,V)
+
+  List arguments contain:
+  \itemize{
+    \item{\code{z}}{ matrix of obs on instruments}
+    \item{\code{y}}{ vector of obs on lhs var in structural equation}
+    \item{\code{x}}{ "endogenous" var in structural eqn}
+    \item{\code{w}}{ matrix of obs on "exogenous" vars in the structural eqn}
+    \item{\code{md}}{ prior mean of delta (def: 0)}
+    \item{\code{Ad}}{ pds prior prec for prior on delta (def: .01I)}
+    \item{\code{mbg}}{ prior mean vector for prior on beta,gamma (def: 0)}
+    \item{\code{Abg}}{ pds prior prec  for prior on beta,gamma (def: .01I)}
+    \item{\code{nu}}{ d.f. parm for IW prior on Sigma (def: 5)}
+    \item{\code{V}}{ pds location matrix for IW prior on Sigma (def: nuI)}
+    \item{\code{R}}{ number of MCMC draws}
+    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
+  }
+}
+\value{
+  a list containing:
+  \item{deltadraw}{R/keep x dim(delta) array of delta draws}
+  \item{betadraw}{R/keep x 1 vector of beta draws}
+  \item{gammadraw}{R/keep x dim(gamma) array of gamma draws }
+  \item{Sigmadraw}{R/keep x 4 array of Sigma draws}
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Rossi, Allenby and McCulloch, Chapter 5. \cr
+  \url{http://www.perossi.org/home/bsm-1}
+}
+
+\author{ Rob McCulloch and Peter Rossi, Anderson School, UCLA,
+  \email{perossichi at gmail.com}.
+}
+\examples{
+##
+if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10}
+
+set.seed(66)
+simIV = function(delta,beta,Sigma,n,z,w,gamma) {
+eps = matrix(rnorm(2*n),ncol=2) \%*\% chol(Sigma)
+x = z \%*\% delta + eps[,1]; y = beta*x +  eps[,2] + w\%*\%gamma
+list(x=as.vector(x),y=as.vector(y)) }
+n = 200 ; p=1 # number of instruments
+z = cbind(rep(1,n),matrix(runif(n*p),ncol=p))
+w = matrix(1,n,1)
+rho=.8
+Sigma = matrix(c(1,rho,rho,1),ncol=2)
+delta = c(1,4); beta = .5; gamma = c(1)
+simiv = simIV(delta,beta,Sigma,n,z,w,gamma)
+
+Mcmc1=list();  Data1 = list()
+Data1$z = z; Data1$w=w; Data1$x=simiv$x; Data1$y=simiv$y
+Mcmc1$R = R
+Mcmc1$keep=1
+out=rivGibbs(Data=Data1,Mcmc=Mcmc1)
+
+cat("Summary of Beta draws",fill=TRUE)
+summary(out$betadraw,tvalues=beta)
+cat("Summary of Sigma draws",fill=TRUE)
+summary(out$Sigmadraw,tvalues=as.vector(Sigma[upper.tri(Sigma,diag=TRUE)]))
+
+if(0){
+## plotting examples
+plot(out$betadraw)
+}
+}
+\keyword{ models }
diff --git a/man/rmixGibbs.Rd b/man/rmixGibbs.Rd
old mode 100755
new mode 100644
index 4b6a542..a39722d
--- a/man/rmixGibbs.Rd
+++ b/man/rmixGibbs.Rd
@@ -5,7 +5,7 @@
   \code{rmixGibbs} makes one draw using the Gibbs Sampler for a mixture of multivariate normals.
 }
 \usage{
-rmixGibbs(y, Bbar, A, nu, V, a, p, z, comps)
+rmixGibbs(y, Bbar, A, nu, V, a, p, z)
 }
 \arguments{
   \item{y}{ data array - rows are obs }
@@ -16,7 +16,6 @@ rmixGibbs(y, Bbar, A, nu, V, a, p, z, comps)
   \item{a}{ Dirichlet prior parms }
   \item{p}{ prior prob of each mixture component }
   \item{z}{ component identities for each observation -- "indicators"}
-  \item{comps}{ list of components for the normal mixture   }
 }
 \details{
   \code{rmixGibbs} is not designed to be called directly. Instead, use \code{rnmixGibbs} wrapper function.
diff --git a/man/rmixture.Rd b/man/rmixture.Rd
old mode 100755
new mode 100644
index 43ecc23..e7dc8e5
--- a/man/rmixture.Rd
+++ b/man/rmixture.Rd
@@ -17,7 +17,7 @@ rmixture(n, pvec, comps)
 }
 \details{
   comps is a list of length, ncomp = length(pvec). comps[[j]][[1]] is mean vector for the jth component. 
-  comps[[j]][[2]] is the inverse of the cholesky root of Sigma for that component
+  comps[[j]][[2]] is the inverse of the cholesky root of \eqn{\Sigma} for that component
 }
 \value{
   A list containing \ldots
diff --git a/man/rmnlIndepMetrop.Rd b/man/rmnlIndepMetrop.Rd
old mode 100755
new mode 100644
index 72db34c..04765d0
--- a/man/rmnlIndepMetrop.Rd
+++ b/man/rmnlIndepMetrop.Rd
@@ -1,93 +1,94 @@
-\name{rmnlIndepMetrop}
-\alias{rmnlIndepMetrop}
-\concept{MCMC}
-\concept{multinomial logit}
-\concept{Metropolis algorithm}
-\concept{bayes}
-\title{ MCMC Algorithm for Multinomial Logit Model }
-\description{
-  \code{rmnIndepMetrop} implements Independence Metropolis for the MNL.
-}
-\usage{
-rmnlIndepMetrop(Data, Prior, Mcmc)
-}
-\arguments{
-  \item{Data}{ list(p,y,X)}
-  \item{Prior}{ list(A,betabar)  optional}
-  \item{Mcmc}{ list(R,keep,nu) }
-}
-\details{
-  Model:   y \eqn{\sim}{~} MNL(X,beta). \eqn{Pr(y=j) = exp(x_j'beta)/\sum_k{e^{x_k'beta}}}. \cr
-
-  Prior:   \eqn{beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})} \cr
-
-  list arguments contain:
-  \itemize{
-    \item{\code{p}}{number of alternatives}
-    \item{\code{y}}{ nobs vector of multinomial outcomes (1,\ldots, p)}
-    \item{\code{X}}{nobs*p x nvar matrix}
-    \item{\code{A}}{ nvar x nvar pds prior prec matrix (def: .01I)}
-    \item{\code{betabar}}{ nvar x 1 prior mean (def: 0)}
-    \item{\code{R}}{ number of MCMC draws}
-    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
-    \item{\code{nu}}{ degrees of freedom parameter for independence t density (def: 6) }
-  }
-}
-\value{
-  a list containing: 
-  \item{betadraw}{R/keep x nvar array of beta draws}
-  \item{loglike}{R/keep vector of loglike values for each draw}
-  \item{acceptr}{acceptance rate of Metropolis draws}
-}
-\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
-  by Rossi, Allenby and McCulloch, Chapter 5. \cr
-  \url{http://www.perossi.org/home/bsm-1l}
-}
-\author{ Peter Rossi, Anderson School, UCLA,
-  \email{perossichi at gmail.com}.
-}
-\seealso{ \code{\link{rhierMnlRwMixture}} }
-\examples{
-##
-
-if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10}
-
-set.seed(66)
-n=200; p=3; beta=c(1,-1,1.5,.5)
-
-simmnl= function(p,n,beta) {
-  #   note: create X array with 2 alt.spec vars
-  k=length(beta)
-  X1=matrix(runif(n*p,min=-1,max=1),ncol=p)
-  X2=matrix(runif(n*p,min=-1,max=1),ncol=p)
-  X=createX(p,na=2,nd=NULL,Xd=NULL,Xa=cbind(X1,X2),base=1)
-  Xbeta=X\%*\%beta # now do probs
-  p=nrow(Xbeta)/n
-  Xbeta=matrix(Xbeta,byrow=TRUE,ncol=p)
-  Prob=exp(Xbeta)
-  iota=c(rep(1,p))
-  denom=Prob\%*\%iota
-  Prob=Prob/as.vector(denom)
-  # draw y
-  y=vector("double",n)
-  ind=1:p
-  for (i in 1:n) 
-        { yvec=rmultinom(1,1,Prob[i,]); y[i]=ind\%*\%yvec }
-   return(list(y=y,X=X,beta=beta,prob=Prob))
-}
-
-simout=simmnl(p,n,beta)
-
-Data1=list(y=simout$y,X=simout$X,p=p); Mcmc1=list(R=R,keep=1)
-out=rmnlIndepMetrop(Data=Data1,Mcmc=Mcmc1)
-
-cat("Summary of beta draws",fill=TRUE)
-summary(out$betadraw,tvalues=beta)
-
-if(0){
-## plotting examples
-plot(out$betadraw)
-}
-
-}
-\keyword{ models }
+\name{rmnlIndepMetrop}
+\alias{rmnlIndepMetrop}
+\concept{MCMC}
+\concept{multinomial logit}
+\concept{Metropolis algorithm}
+\concept{bayes}
+\title{ MCMC Algorithm for Multinomial Logit Model }
+\description{
+  \code{rmnIndepMetrop} implements Independence Metropolis for the MNL.
+}
+\usage{
+rmnlIndepMetrop(Data, Prior, Mcmc)
+}
+\arguments{
+  \item{Data}{ list(p,y,X)}
+  \item{Prior}{ list(A,betabar)  optional}
+  \item{Mcmc}{ list(R,keep,nprint,nu) }
+}
+\details{
+  Model:   y \eqn{\sim}{~} MNL(X,\eqn{\beta}). \eqn{\Pr(y=j) = exp(x_j'\beta)/\sum_k{e^{x_k'\beta}}}. \cr
+
+  Prior:   \eqn{\beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})} \cr
+
+  list arguments contain:
+  \itemize{
+    \item{\code{p}}{ number of alternatives}
+    \item{\code{y}}{ nobs vector of multinomial outcomes (1,\ldots, p)}
+    \item{\code{X}}{ nobs*p x nvar matrix}
+    \item{\code{A}}{ nvar x nvar pds prior prec matrix (def: .01I)}
+    \item{\code{betabar}}{ nvar x 1 prior mean (def: 0)}
+    \item{\code{R}}{ number of MCMC draws}
+    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
+    \item{\code{nu}}{ degrees of freedom parameter for independence t density (def: 6) }
+  }
+}
+\value{
+  a list containing: 
+  \item{betadraw}{R/keep x nvar array of beta draws}
+  \item{loglike}{R/keep vector of loglike values for each draw}
+  \item{acceptr}{acceptance rate of Metropolis draws}
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Rossi, Allenby and McCulloch, Chapter 5. \cr
+  \url{http://www.perossi.org/home/bsm-1}
+}
+\author{ Peter Rossi, Anderson School, UCLA,
+  \email{perossichi at gmail.com}.
+}
+\seealso{ \code{\link{rhierMnlRwMixture}} }
+\examples{
+##
+
+if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10}
+
+set.seed(66)
+n=200; p=3; beta=c(1,-1,1.5,.5)
+
+simmnl= function(p,n,beta) {
+  #   note: create X array with 2 alt.spec vars
+  k=length(beta)
+  X1=matrix(runif(n*p,min=-1,max=1),ncol=p)
+  X2=matrix(runif(n*p,min=-1,max=1),ncol=p)
+  X=createX(p,na=2,nd=NULL,Xd=NULL,Xa=cbind(X1,X2),base=1)
+  Xbeta=X\%*\%beta # now do probs
+  p=nrow(Xbeta)/n
+  Xbeta=matrix(Xbeta,byrow=TRUE,ncol=p)
+  Prob=exp(Xbeta)
+  iota=c(rep(1,p))
+  denom=Prob\%*\%iota
+  Prob=Prob/as.vector(denom)
+  # draw y
+  y=vector("double",n)
+  ind=1:p
+  for (i in 1:n) 
+        { yvec=rmultinom(1,1,Prob[i,]); y[i]=ind\%*\%yvec }
+   return(list(y=y,X=X,beta=beta,prob=Prob))
+}
+
+simout=simmnl(p,n,beta)
+
+Data1=list(y=simout$y,X=simout$X,p=p); Mcmc1=list(R=R,keep=1)
+out=rmnlIndepMetrop(Data=Data1,Mcmc=Mcmc1)
+
+cat("Summary of beta draws",fill=TRUE)
+summary(out$betadraw,tvalues=beta)
+
+if(0){
+## plotting examples
+plot(out$betadraw)
+}
+
+}
+\keyword{ models }
diff --git a/man/rmnpGibbs.Rd b/man/rmnpGibbs.Rd
old mode 100755
new mode 100644
index 0725212..a557bf5
--- a/man/rmnpGibbs.Rd
+++ b/man/rmnpGibbs.Rd
@@ -1,119 +1,120 @@
-\name{rmnpGibbs}
-\alias{rmnpGibbs}
-\concept{bayes}
-\concept{multinomial probit}
-\concept{MCMC}
-\concept{Gibbs Sampling}
-
-\title{ Gibbs Sampler for Multinomial Probit }
-\description{
-  \code{rmnpGibbs} implements the McCulloch/Rossi Gibbs Sampler for the multinomial probit model.
-}
-
-\usage{
-rmnpGibbs(Data, Prior, Mcmc)
-}
-
-\arguments{
-  \item{Data}{ list(p, y, X)}
-  \item{Prior}{ list(betabar,A,nu,V) (optional)}
-  \item{Mcmc}{ list(beta0,sigma0,R,keep) (R required) }
-}
-
-\details{
-  model:  \cr
-    \eqn{w_i = X_i\beta + e}.    \eqn{e} \eqn{\sim}{~} \eqn{N(0,Sigma)}.     note: \eqn{w_i, e} are (p-1) x 1.\cr
-    \eqn{y_i = j},  if \eqn{w_{ij} > max(0,w_{i,-j})}  j=1,\ldots,p-1.  \eqn{w_{i,-j}} means elements of \eqn{w_i}
-     other than the jth. \cr
-    \eqn{y_i = p},  if all \eqn{w_i < 0}.\cr
-  
-  priors:\cr
-    \eqn{beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})} \cr
-    \eqn{Sigma} \eqn{\sim}{~} IW(nu,V)\cr
-
-  to make up X matrix use \code{\link{createX}} with \code{DIFF=TRUE}.
-
-  List arguments contain  
-  \itemize{
-    \item{\code{p}}{number of choices or possible multinomial outcomes}
-    \item{\code{y}}{n x 1 vector of multinomial outcomes}
-    \item{\code{X}}{n*(p-1) x k Design Matrix}
-    \item{\code{betabar}}{k x 1 prior mean (def: 0)}
-    \item{\code{A}}{k x k prior precision matrix (def: .01I)} 
-    \item{\code{nu}}{ d.f. parm for IWishart prior (def: (p-1) + 3)}
-    \item{\code{V}}{ pds location parm for IWishart prior (def: nu*I)}
-    \item{\code{beta0}}{ initial value for beta}
-    \item{\code{sigma0}}{ initial value for sigma }
-    \item{\code{R}}{ number of MCMC draws }
-    \item{\code{keep}}{ thinning parameter - keep every keepth draw (def: 1)}
-  }
-}
-
-\value{
-  a list containing: 
-  \item{betadraw }{R/keep x k array of betadraws}
-  \item{sigmadraw}{R/keep x (p-1)*(p-1) array of sigma draws -- each row is in vector form}
-}
-\note{
-  beta is not identified.  beta/sqrt(\eqn{sigma_{11}}) and Sigma/\eqn{sigma_{11}} are.  See Allenby et al or
-  example below for details.
-}
-
-\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
-  by Rossi, Allenby and McCulloch, Chapter 4. \cr
-  \url{http://www.perossi.org/home/bsm-1l}
-}
-
-\author{ Peter Rossi, Anderson School, UCLA,
-  \email{perossichi at gmail.com}.
-}
-
-\seealso{ \code{\link{rmvpGibbs}} }
-\examples{
-##
-if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10}
-
-set.seed(66)
-p=3
-n=500
-beta=c(-1,1,1,2)
-Sigma=matrix(c(1,.5,.5,1),ncol=2)
-k=length(beta)
-X1=matrix(runif(n*p,min=0,max=2),ncol=p); X2=matrix(runif(n*p,min=0,max=2),ncol=p)
-X=createX(p,na=2,nd=NULL,Xa=cbind(X1,X2),Xd=NULL,DIFF=TRUE,base=p)
-
-simmnp= function(X,p,n,beta,sigma) {
-  indmax=function(x) {which(max(x)==x)}
-  Xbeta=X\%*\%beta
-  w=as.vector(crossprod(chol(sigma),matrix(rnorm((p-1)*n),ncol=n)))+ Xbeta
-  w=matrix(w,ncol=(p-1),byrow=TRUE)
-  maxw=apply(w,1,max)
-  y=apply(w,1,indmax)
-  y=ifelse(maxw < 0,p,y)
-  return(list(y=y,X=X,beta=beta,sigma=sigma))
-}
-
-simout=simmnp(X,p,500,beta,Sigma)
-
-Data1=list(p=p,y=simout$y,X=simout$X)
-Mcmc1=list(R=R,keep=1)
-
-out=rmnpGibbs(Data=Data1,Mcmc=Mcmc1)
-
-cat(" Summary of Betadraws ",fill=TRUE)
-betatilde=out$betadraw/sqrt(out$sigmadraw[,1])
-attributes(betatilde)$class="bayesm.mat"
-summary(betatilde,tvalues=beta)
-
-cat(" Summary of Sigmadraws ",fill=TRUE)
-sigmadraw=out$sigmadraw/out$sigmadraw[,1]
-attributes(sigmadraw)$class="bayesm.var"
-summary(sigmadraw,tvalues=as.vector(Sigma[upper.tri(Sigma,diag=TRUE)]))
-
-
-if(0){
-## plotting examples
-plot(betatilde,tvalues=beta)
-}
-}
-\keyword{ models }
+\name{rmnpGibbs}
+\alias{rmnpGibbs}
+\concept{bayes}
+\concept{multinomial probit}
+\concept{MCMC}
+\concept{Gibbs Sampling}
+
+\title{ Gibbs Sampler for Multinomial Probit }
+\description{
+  \code{rmnpGibbs} implements the McCulloch/Rossi Gibbs Sampler for the multinomial probit model.
+}
+
+\usage{
+rmnpGibbs(Data, Prior, Mcmc)
+}
+
+\arguments{
+  \item{Data}{ list(p, y, X)}
+  \item{Prior}{ list(betabar,A,nu,V) (optional)}
+  \item{Mcmc}{ list(beta0,sigma0,R,keep,nprint) (R required) }
+}
+
+\details{
+  model:  \cr
+    \eqn{w_i = X_i\beta + e}.    \eqn{e} \eqn{\sim}{~} \eqn{N(0,\Sigma)}.     note: \eqn{w_i, e} are (p-1) x 1.\cr
+    \eqn{y_i = j},  if \eqn{w_{ij} > max(0,w_{i,-j})}  j=1,\ldots,p-1.  \eqn{w_{i,-j}} means elements of \eqn{w_i}
+     other than the jth. \cr
+    \eqn{y_i = p},  if all \eqn{w_i < 0}.\cr
+  
+  priors:\cr
+    \eqn{\beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})} \cr
+    \eqn{\Sigma} \eqn{\sim}{~} IW(nu,V)\cr
+
+  to make up X matrix use \code{\link{createX}} with \code{DIFF=TRUE}.
+
+  List arguments contain  
+  \itemize{
+    \item{\code{p}}{number of choices or possible multinomial outcomes}
+    \item{\code{y}}{n x 1 vector of multinomial outcomes}
+    \item{\code{X}}{n*(p-1) x k Design Matrix}
+    \item{\code{betabar}}{k x 1 prior mean (def: 0)}
+    \item{\code{A}}{k x k prior precision matrix (def: .01I)} 
+    \item{\code{nu}}{ d.f. parm for IWishart prior (def: (p-1) + 3)}
+    \item{\code{V}}{ pds location parm for IWishart prior (def: nu*I)}
+    \item{\code{beta0}}{ initial value for beta}
+    \item{\code{sigma0}}{ initial value for sigma }
+    \item{\code{R}}{ number of MCMC draws }
+    \item{\code{keep}}{ thinning parameter - keep every keepth draw (def: 1)}
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
+  }
+}
+
+\value{
+  a list containing: 
+  \item{betadraw }{R/keep x k array of betadraws}
+  \item{sigmadraw}{R/keep x (p-1)*(p-1) array of sigma draws -- each row is in vector form}
+}
+\note{
+  \eqn{\beta} is not identified.  \eqn{\beta}/sqrt(\eqn{\sigma_{11}}) and \eqn{\Sigma}/\eqn{\sigma_{11}} are.  See Allenby et al or
+  example below for details.
+}
+
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Rossi, Allenby and McCulloch, Chapter 4. \cr
+  \url{http://www.perossi.org/home/bsm-1}
+}
+
+\author{ Peter Rossi, Anderson School, UCLA,
+  \email{perossichi at gmail.com}.
+}
+
+\seealso{ \code{\link{rmvpGibbs}} }
+\examples{
+##
+if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10}
+
+set.seed(66)
+p=3
+n=500
+beta=c(-1,1,1,2)
+Sigma=matrix(c(1,.5,.5,1),ncol=2)
+k=length(beta)
+X1=matrix(runif(n*p,min=0,max=2),ncol=p); X2=matrix(runif(n*p,min=0,max=2),ncol=p)
+X=createX(p,na=2,nd=NULL,Xa=cbind(X1,X2),Xd=NULL,DIFF=TRUE,base=p)
+
+simmnp= function(X,p,n,beta,sigma) {
+  indmax=function(x) {which(max(x)==x)}
+  Xbeta=X\%*\%beta
+  w=as.vector(crossprod(chol(sigma),matrix(rnorm((p-1)*n),ncol=n)))+ Xbeta
+  w=matrix(w,ncol=(p-1),byrow=TRUE)
+  maxw=apply(w,1,max)
+  y=apply(w,1,indmax)
+  y=ifelse(maxw < 0,p,y)
+  return(list(y=y,X=X,beta=beta,sigma=sigma))
+}
+
+simout=simmnp(X,p,500,beta,Sigma)
+
+Data1=list(p=p,y=simout$y,X=simout$X)
+Mcmc1=list(R=R,keep=1)
+
+out=rmnpGibbs(Data=Data1,Mcmc=Mcmc1)
+
+cat(" Summary of Betadraws ",fill=TRUE)
+betatilde=out$betadraw/sqrt(out$sigmadraw[,1])
+attributes(betatilde)$class="bayesm.mat"
+summary(betatilde,tvalues=beta)
+
+cat(" Summary of Sigmadraws ",fill=TRUE)
+sigmadraw=out$sigmadraw/out$sigmadraw[,1]
+attributes(sigmadraw)$class="bayesm.var"
+summary(sigmadraw,tvalues=as.vector(Sigma[upper.tri(Sigma,diag=TRUE)]))
+
+
+if(0){
+## plotting examples
+plot(betatilde,tvalues=beta)
+}
+}
+\keyword{ models }
diff --git a/man/rmultireg.Rd b/man/rmultireg.Rd
old mode 100755
new mode 100644
index e47a666..f18e48f
--- a/man/rmultireg.Rd
+++ b/man/rmultireg.Rd
@@ -23,11 +23,11 @@ rmultireg(Y, X, Bbar, A, nu, V)
   \item{V}{ m x m pdf location parameter for prior on Sigma }
 }
 \details{
-  Model: \eqn{Y=XB+U}.  \eqn{cov(u_i) = Sigma}.  \eqn{B} is k x m matrix of coefficients. \eqn{Sigma} is m x m covariance.
+  Model: \eqn{Y=XB+U}.  \eqn{cov(u_i) = \Sigma}.  \eqn{B} is k x m matrix of coefficients. \eqn{\Sigma} is m x m covariance.
 
-  Priors:  \eqn{beta} given \eqn{Sigma}  \eqn{\sim}{~} \eqn{N(betabar,Sigma (x) A^{-1})}. 
-  \eqn{betabar=vec(Bbar)};  \eqn{beta = vec(B)} \cr
-          \eqn{Sigma} \eqn{\sim}{~} IW(nu,V). 
+  Priors:  \eqn{\beta} given \eqn{\Sigma}  \eqn{\sim}{~} \eqn{N(betabar,\Sigma (x) A^{-1})}. 
+  \eqn{betabar=vec(Bbar)};  \eqn{\beta = vec(B)} \cr
+          \eqn{\Sigma} \eqn{\sim}{~} IW(nu,V). 
 }
 \value{
   A list of the components of a draw from the posterior
diff --git a/man/rmvpGibbs.Rd b/man/rmvpGibbs.Rd
old mode 100755
new mode 100644
index dc421da..365c7c1
--- a/man/rmvpGibbs.Rd
+++ b/man/rmvpGibbs.Rd
@@ -17,17 +17,17 @@ rmvpGibbs(Data, Prior, Mcmc)
 \arguments{
   \item{Data}{ list(p,y,X)}
   \item{Prior}{ list(betabar,A,nu,V) (optional)}
-  \item{Mcmc}{ list(beta0,sigma0,R,keep) (R required) }
+  \item{Mcmc}{ list(beta0,sigma0,R,keep,nprint) (R required) }
 }
 
 \details{
   model:  \cr
-    \eqn{w_i = X_i beta + e}.    \eqn{e} \eqn{\sim}{~} N(0,Sigma).     note: \eqn{w_i} is p x 1.\cr
+    \eqn{w_i = X_i\beta + e}.    \eqn{e} \eqn{\sim}{~} N(0,\eqn{\Sigma}).     note: \eqn{w_i} is p x 1.\cr
     \eqn{y_{ij} = 1},  if \eqn{w_{ij} > 0}, else \eqn{y_i=0}.  j=1,\ldots,p.   \cr
   
   priors:\cr
-    \eqn{beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})}\cr
-    \eqn{Sigma} \eqn{\sim}{~} IW(nu,V)\cr
+    \eqn{\beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})}\cr
+    \eqn{\Sigma} \eqn{\sim}{~} IW(nu,V)\cr
 
   to make up X matrix use \code{createX}
 
@@ -44,6 +44,7 @@ rmvpGibbs(Data, Prior, Mcmc)
     \item{\code{sigma0}}{ initial value for sigma }
     \item{\code{R}}{ number of MCMC draws }
     \item{\code{keep}}{ thinning parameter - keep every keepth draw (def: 1)}
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
   }
 }
 
diff --git a/man/rnegbinRw.Rd b/man/rnegbinRw.Rd
old mode 100755
new mode 100644
index 5a77230..68d6fdf
--- a/man/rnegbinRw.Rd
+++ b/man/rnegbinRw.Rd
@@ -18,13 +18,13 @@ rnegbinRw(Data, Prior, Mcmc)
 \arguments{
   \item{Data}{ list(y,X) }
   \item{Prior}{ list(betabar,A,a,b) }
-  \item{Mcmc}{ list(R,keep,s\_beta,s\_alpha,beta0 }
+  \item{Mcmc}{ list(R,keep,s_beta,s_alpha,beta0 }
 }
 \details{
-  Model:   \eqn{y} \eqn{\sim}{~} \eqn{NBD(mean=lambda, over-dispersion=alpha)}.  \cr
-           \eqn{lambda=exp(x'beta)}
+  Model:   \eqn{y} \eqn{\sim}{~} \eqn{NBD(mean=\lambda, over-dispersion=alpha)}.  \cr
+           \eqn{\lambda=exp(x'\beta)}
 
-  Prior:   \eqn{beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})} \cr
+  Prior:   \eqn{\beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})} \cr
            \eqn{alpha} \eqn{\sim}{~} \eqn{Gamma(a,b)}. \cr
             note: prior mean of \eqn{alpha = a/b}, \eqn{variance = a/(b^2)}
 
@@ -38,8 +38,9 @@ rnegbinRw(Data, Prior, Mcmc)
     \item{\code{b}}{ Gamma prior parm (def: .1)}
     \item{\code{R}}{ number of MCMC draws}
     \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
-    \item{\code{s\_beta}}{ scaling for beta| alpha RW inc cov matrix (def: 2.93/sqrt(nvar)}
-    \item{\code{s\_alpha}}{ scaling for alpha | beta RW inc cov matrix (def: 2.93)}
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
+    \item{\code{s_beta}}{ scaling for beta| alpha RW inc cov matrix (def: 2.93/sqrt(nvar))}
+    \item{\code{s_alpha}}{ scaling for alpha | beta RW inc cov matrix (def: 2.93)}
   }
 }
 \value{
diff --git a/man/rnmixGibbs.Rd b/man/rnmixGibbs.Rd
old mode 100755
new mode 100644
index d59f61b..80a2666
--- a/man/rnmixGibbs.Rd
+++ b/man/rnmixGibbs.Rd
@@ -15,16 +15,16 @@ rnmixGibbs(Data, Prior, Mcmc)
 \arguments{
   \item{Data}{ list(y) }
   \item{Prior}{ list(Mubar,A,nu,V,a,ncomp) (only ncomp required)}
-  \item{Mcmc}{ list(R,keep,Loglike) (R required) }
+  \item{Mcmc}{ list(R,keep,nprint,Loglike) (R required) }
 }
 \details{
   Model: \cr
-        \eqn{y_i} \eqn{\sim}{~} \eqn{N(mu_{ind_i},Sigma_{ind_i})}. \cr
+        \eqn{y_i} \eqn{\sim}{~} \eqn{N(\mu_{ind_i},\Sigma_{ind_i})}. \cr
         ind \eqn{\sim}{~} iid multinomial(p).  p is a ncomp x 1 vector of probs. 
 
   Priors:\cr
-        \eqn{mu_j} \eqn{\sim}{~} \eqn{N(mubar,Sigma_j (x) A^{-1})}. \eqn{mubar=vec(Mubar)}. \cr
-        \eqn{Sigma_j} \eqn{\sim}{~} IW(nu,V).\cr
+        \eqn{\mu_j} \eqn{\sim}{~} \eqn{N(mubar,\Sigma_j (x) A^{-1})}. \eqn{mubar=vec(Mubar)}. \cr
+        \eqn{\Sigma_j} \eqn{\sim}{~} IW(nu,V).\cr
         note: this is the natural conjugate prior -- a special case of multivariate 
              regression.\cr
         \eqn{p} \eqn{\sim}{~} Dirchlet(a).
@@ -32,8 +32,8 @@ rnmixGibbs(Data, Prior, Mcmc)
   Output of the components is in the form of a list of lists. \cr
   compsdraw[[i]] is ith draw -- list of ncomp lists. \cr
   compsdraw[[i]][[j]] is list of parms for jth normal component. \cr
-  jcomp=compsdraw[[i]][j]]. Then jth comp \eqn{\sim}{~} \eqn{N(jcomp[[1]],Sigma)}, 
-  \eqn{Sigma} = t(R)\%*\%R, \eqn{R^{-1}} = jcomp[[2]].
+  jcomp=compsdraw[[i]][j]]. Then jth comp \eqn{\sim}{~} \eqn{N(jcomp[[1]],\Sigma)}, 
+  \eqn{\Sigma} = t(R)\%*\%R, \eqn{R^{-1}} = jcomp[[2]].
 
   List arguments contain:
   \itemize{
@@ -46,6 +46,7 @@ rnmixGibbs(Data, Prior, Mcmc)
     \item{ncomp}{ number of normal components to be included }
     \item{R}{ number of MCMC draws }
     \item{keep}{ MCMC thinning parm: keep every keepth draw (def: 1)}
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
     \item{LogLike}{ logical flag for compute log-likelihood (def: FALSE)}
   }
 }
diff --git a/man/rordprobitGibbs.Rd b/man/rordprobitGibbs.Rd
old mode 100755
new mode 100644
index 3bf1a9f..53d7b33
--- a/man/rordprobitGibbs.Rd
+++ b/man/rordprobitGibbs.Rd
@@ -1,115 +1,116 @@
-\name{rordprobitGibbs}
-\alias{rordprobitGibbs}
-\concept{bayes}
-\concept{MCMC}
-\concept{probit}
-\concept{Gibbs Sampling}
-
-\title{ Gibbs Sampler for Ordered Probit }
-\description{
-  \code{rordprobitGibbs} implements a Gibbs Sampler for the ordered probit model.
-
-}
-\usage{
-rordprobitGibbs(Data, Prior, Mcmc)
-}
-
-\arguments{
-  \item{Data}{ list(X, y, k)}
-  \item{Prior}{ list(betabar, A, dstarbar, Ad)}
-  \item{Mcmc}{ list(R, keep, s, change, draw)  }
-}
-
-\details{
-  Model: \eqn{z = X\beta + e}.  \eqn{e} \eqn{\sim}{~} \eqn{N(0,I)}. 
-          y=1,..,k. cutoff=c( c [1] ,..c [k+1] ).   \cr 
-          y=k, if c [k] <= z < c [k+1] .     
-
-  Prior:  \eqn{\beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})}. 
-          \eqn{dstar} \eqn{\sim}{~} \eqn{N(dstarbar,Ad^{-1})}.
-
-  List arguments contain  
-  \describe{
-    \item{\code{X}}{n x nvar Design Matrix}
-    \item{\code{y}}{n x 1 vector of observations, (1,...,k)} 
-    \item{\code{k}}{the largest possible value of y} 
-    \item{\code{betabar}}{nvar x 1 prior mean (def: 0)}
-    \item{\code{A}}{nvar x nvar prior precision matrix (def: .01I)} 
-    \item{\code{dstarbar}}{ndstar x 1 prior mean, ndstar=k-2 (def: 0)}
-    \item{\code{Ad}}{ndstar x ndstar prior precision matrix (def:I)} 
-    \item{\code{s}}{ scaling parm for RW Metropolis (def: 2.93/sqrt(nvar))}
-    \item{\code{R}}{ number of MCMC draws }
-    \item{\code{keep}}{ thinning parameter - keep every keepth draw (def: 1)}
-  }
-}
-
-\value{
-  \item{betadraw }{R/keep x k matrix of betadraws}
-  \item{cutdraw }{R/keep x (k-1) matrix of cutdraws}
-  \item{dstardraw }{R/keep x (k-2) matrix of dstardraws}
-  \item{accept }{a value of acceptance rate in RW Metropolis}
-}
-\note{ 
-   set c[1]=-100. c[k+1]=100. c[2] is set to 0 for identification.   \cr
-
-   The relationship between cut-offs and dstar is    \cr
-   c[3] = exp(dstar[1]), c[4]=c[3]+exp(dstar[2]),..., c[k] = c[k-1] + exp(datsr[k-2])              
-  
-   Be careful in assessing prior parameter, Ad.  .1 is too small for many applications. 
-} 
-
-\references{  \emph{Bayesian Statistics and Marketing}
-  by Rossi, Allenby and McCulloch\cr
-  \url{http://www.perossi.org/home/bsm-1}
-}
-
-\author{ Peter Rossi, Anderson School, UCLA,
-  \email{perossichi at gmail.com}.
-}
-
-\seealso{ \code{\link{rbprobitGibbs}} }
-\examples{
-##
-## rordprobitGibbs example
-##
-if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10}
-
-## simulate data for ordered probit model
-
-   simordprobit=function(X, betas, cutoff){
-    z = X\%*\%betas + rnorm(nobs)   
-    y = cut(z, br = cutoff, right=TRUE, include.lowest = TRUE, labels = FALSE)  
-    return(list(y = y, X = X, k=(length(cutoff)-1), betas= betas, cutoff=cutoff ))
-   }
-
-   set.seed(66)  
-   nobs=300 
-   X=cbind(rep(1,nobs),runif(nobs, min=0, max=5),runif(nobs,min=0, max=5))
-   k=5
-   betas=c(0.5, 1, -0.5)       
-   cutoff=c(-100, 0, 1.0, 1.8, 3.2,  100)
-   simout=simordprobit(X, betas, cutoff)   
-   Data=list(X=simout$X,y=simout$y, k=k)
-
-## set Mcmc for ordered probit model
-   
-   Mcmc=list(R=R)   
-   out=rordprobitGibbs(Data=Data,Mcmc=Mcmc)
-  
-   cat(" ", fill=TRUE)
-   cat("acceptance rate= ",accept=out$accept,fill=TRUE)
- 
-## outputs of betadraw and cut-off draws
-  
-   cat(" Summary of betadraws",fill=TRUE)
-   summary(out$betadraw,tvalues=betas)
-   cat(" Summary of cut-off draws",fill=TRUE) 
-   summary(out$cutdraw,tvalues=cutoff[2:k])
-
-if(0){
-## plotting examples
-plot(out$cutdraw)
-}
-
-}
-\keyword{ models }
+\name{rordprobitGibbs}
+\alias{rordprobitGibbs}
+\concept{bayes}
+\concept{MCMC}
+\concept{probit}
+\concept{Gibbs Sampling}
+
+\title{ Gibbs Sampler for Ordered Probit }
+\description{
+  \code{rordprobitGibbs} implements a Gibbs Sampler for the ordered probit model.
+
+}
+\usage{
+rordprobitGibbs(Data, Prior, Mcmc)
+}
+
+\arguments{
+  \item{Data}{ list(X, y, k)}
+  \item{Prior}{ list(betabar, A, dstarbar, Ad)}
+  \item{Mcmc}{ list(R, keep, nprint s, change, draw)  }
+}
+
+\details{
+  Model: \eqn{z = X\beta + e}.  \eqn{e} \eqn{\sim}{~} \eqn{N(0,I)}. 
+          y=1,..,k. cutoff=c( c [1] ,..c [k+1] ).   \cr 
+          y=k, if c [k] <= z < c [k+1] .     
+
+  Prior:  \eqn{\beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})}. 
+          \eqn{dstar} \eqn{\sim}{~} \eqn{N(dstarbar,Ad^{-1})}.
+
+  List arguments contain  
+  \describe{
+    \item{\code{X}}{n x nvar Design Matrix}
+    \item{\code{y}}{n x 1 vector of observations, (1,...,k)} 
+    \item{\code{k}}{the largest possible value of y} 
+    \item{\code{betabar}}{nvar x 1 prior mean (def: 0)}
+    \item{\code{A}}{nvar x nvar prior precision matrix (def: .01I)} 
+    \item{\code{dstarbar}}{ndstar x 1 prior mean, ndstar=k-2 (def: 0)}
+    \item{\code{Ad}}{ndstar x ndstar prior precision matrix (def:I)} 
+    \item{\code{s}}{ scaling parm for RW Metropolis (def: 2.93/sqrt(nvar))}
+    \item{\code{R}}{ number of MCMC draws }
+    \item{\code{keep}}{ thinning parameter - keep every keepth draw (def: 1)}
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
+  }
+}
+
+\value{
+  \item{betadraw }{R/keep x k matrix of betadraws}
+  \item{cutdraw }{R/keep x (k-1) matrix of cutdraws}
+  \item{dstardraw }{R/keep x (k-2) matrix of dstardraws}
+  \item{accept }{a value of acceptance rate in RW Metropolis}
+}
+\note{ 
+   set c[1]=-100. c[k+1]=100. c[2] is set to 0 for identification.   \cr
+
+   The relationship between cut-offs and dstar is    \cr
+   c[3] = exp(dstar[1]), c[4]=c[3]+exp(dstar[2]),..., c[k] = c[k-1] + exp(datsr[k-2])              
+  
+   Be careful in assessing prior parameter, Ad.  .1 is too small for many applications. 
+} 
+
+\references{  \emph{Bayesian Statistics and Marketing}
+  by Rossi, Allenby and McCulloch\cr
+  \url{http://www.perossi.org/home/bsm-1}
+}
+
+\author{ Peter Rossi, Anderson School, UCLA,
+  \email{perossichi at gmail.com}.
+}
+
+\seealso{ \code{\link{rbprobitGibbs}} }
+\examples{
+##
+## rordprobitGibbs example
+##
+if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10}
+
+## simulate data for ordered probit model
+
+   simordprobit=function(X, betas, cutoff){
+    z = X\%*\%betas + rnorm(nobs)   
+    y = cut(z, br = cutoff, right=TRUE, include.lowest = TRUE, labels = FALSE)  
+    return(list(y = y, X = X, k=(length(cutoff)-1), betas= betas, cutoff=cutoff ))
+   }
+
+   set.seed(66)  
+   nobs=300 
+   X=cbind(rep(1,nobs),runif(nobs, min=0, max=5),runif(nobs,min=0, max=5))
+   k=5
+   betas=c(0.5, 1, -0.5)       
+   cutoff=c(-100, 0, 1.0, 1.8, 3.2,  100)
+   simout=simordprobit(X, betas, cutoff)   
+   Data=list(X=simout$X,y=simout$y, k=k)
+
+## set Mcmc for ordered probit model
+   
+   Mcmc=list(R=R)   
+   out=rordprobitGibbs(Data=Data,Mcmc=Mcmc)
+  
+   cat(" ", fill=TRUE)
+   cat("acceptance rate= ",accept=out$accept,fill=TRUE)
+ 
+## outputs of betadraw and cut-off draws
+  
+   cat(" Summary of betadraws",fill=TRUE)
+   summary(out$betadraw,tvalues=betas)
+   cat(" Summary of cut-off draws",fill=TRUE) 
+   summary(out$cutdraw,tvalues=cutoff[2:k])
+
+if(0){
+## plotting examples
+plot(out$cutdraw)
+}
+
+}
+\keyword{ models }
diff --git a/man/rscaleUsage.Rd b/man/rscaleUsage.Rd
old mode 100755
new mode 100644
index 45892bd..92a11bb
--- a/man/rscaleUsage.Rd
+++ b/man/rscaleUsage.Rd
@@ -16,7 +16,7 @@ rscaleUsage(Data,Prior, Mcmc)
 \arguments{
   \item{Data}{ list(k,x)}
   \item{Prior}{ list(nu,V,mubar,Am,gsigma,gl11,gl22,gl12,Lambdanu,LambdaV,ge) (optional) }
-  \item{Mcmc}{ list(R,keep,ndghk,printevery,e,y,mu,Sigma,sigma,tau,Lambda) (optional) }
+  \item{Mcmc}{ list(R,keep,ndghk,nprint,e,y,mu,Sigma,sigma,tau,Lambda) (optional) }
 }
 \details{
   Model: n=nrow(x) individuals respond to m=ncol(x) questions. all questions are on a scale 1, \ldots, k.
@@ -29,7 +29,7 @@ rscaleUsage(Data,Prior, Mcmc)
 
 
   Priors:\cr
-  \eqn{(tau_i,ln(sigma_i))} \eqn{\sim}{~} \eqn{N(phi,Lamda)}.  \eqn{phi=(0,lambda_{22})}. \cr
+  \eqn{(tau_i,ln(sigma_i))} \eqn{\sim}{~} \eqn{N(\phi,Lamda)}.  \eqn{\phi=(0,lambda_{22})}. \cr
   mu \eqn{\sim}{~} \eqn{N(mubar, Am{^-1})}.\cr
   Sigma \eqn{\sim}{~} IW(nu,V).\cr
   Lambda \eqn{\sim}{~} IW(Lambdanu,LambdaV).\cr
diff --git a/man/rsurGibbs.Rd b/man/rsurGibbs.Rd
old mode 100755
new mode 100644
index a035f97..b4aa572
--- a/man/rsurGibbs.Rd
+++ b/man/rsurGibbs.Rd
@@ -21,16 +21,16 @@ rsurGibbs(Data, Prior, Mcmc)
   \item{Mcmc}{ list(R,keep)}
 }
 \details{
-  Model: \eqn{y_i = X_ibeta_i + e_i}.  i=1,\ldots,m. m regressions. \cr
-  (e(1,k), \ldots, e(m,k)) \eqn{\sim}{~} \eqn{N(0,Sigma)}. k=1, \ldots, nobs. 
+  Model: \eqn{y_i = X_i\beta_i + e_i}.  i=1,\ldots,m. m regressions. \cr
+  (e(1,k), \ldots, e(m,k)) \eqn{\sim}{~} \eqn{N(0,\Sigma)}. k=1, \ldots, nobs. 
 
   We can also write as the stacked model: \cr
-  \eqn{y = Xbeta + e} where y is a nobs*m long vector and k=length(beta)=sum(length(betai)).
+  \eqn{y = X\beta + e} where y is a nobs*m long vector and k=length(beta)=sum(length(betai)).
 
   Note: we must have the same number of observations in each equation but we can have different numbers
   of X variables 
 
-  Priors: \eqn{beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})}.  \eqn{Sigma} \eqn{\sim}{~} \eqn{IW(nu,V)}.
+  Priors: \eqn{\beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})}.  \eqn{\Sigma} \eqn{\sim}{~} \eqn{IW(nu,V)}.
 
   List arguments contain  
   \itemize{
@@ -41,6 +41,7 @@ rsurGibbs(Data, Prior, Mcmc)
     \item{\code{V}}{ scale parm for Inverted Wishart prior (def: nu*I)}
     \item{\code{R}}{ number of MCMC draws }
     \item{\code{keep}}{ thinning parameter - keep every keepth draw }
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
   }
 }
 \value{
diff --git a/man/runireg.Rd b/man/runireg.Rd
old mode 100755
new mode 100644
index cdaaa1a..ee5b04c
--- a/man/runireg.Rd
+++ b/man/runireg.Rd
@@ -13,13 +13,13 @@ runireg(Data, Prior, Mcmc)
 \arguments{
   \item{Data}{ list(y,X)}
   \item{Prior}{ list(betabar,A, nu, ssq) }
-  \item{Mcmc}{ list(R,keep)}
+  \item{Mcmc}{ list(R,keep,nprint)}
 }
 \details{
-  Model: \eqn{y = Xbeta + e}.  \eqn{e} \eqn{\sim}{~} \eqn{N(0,sigmasq)}. \cr
+  Model: \eqn{y = X\beta + e}.  \eqn{e} \eqn{\sim}{~} \eqn{N(0,\sigma^2)}. \cr
 
-  Priors: \eqn{beta} \eqn{\sim}{~} \eqn{N(betabar,sigmasq*A^{-1})}. 
- \eqn{sigmasq} \eqn{\sim}{~} \eqn{(nu*ssq)/chisq_{nu}}.
+  Priors: \eqn{\beta} \eqn{\sim}{~} \eqn{N(betabar,\sigma^2*A^{-1})}. 
+ \eqn{\sigma^2} \eqn{\sim}{~} \eqn{(nu*ssq)/\chi^2_{nu}}.
   List arguments contain  
   \itemize{
     \item{\code{X}}{n x k Design Matrix}
@@ -29,7 +29,8 @@ runireg(Data, Prior, Mcmc)
     \item{\code{nu}}{ d.f. parm for Inverted Chi-square prior (def: 3)}
     \item{\code{ssq}}{ scale parm for Inverted Chi-square prior (def: var(y))}
     \item{\code{R}}{ number of draws }
-    \item{\code{keep}}{ thinning parameter - keep every keepth draw }
+    \item{\code{keep}}{ thinning parameter - keep every keepth draw}
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
   }
 }
 \value{
diff --git a/man/runiregGibbs.Rd b/man/runiregGibbs.Rd
old mode 100755
new mode 100644
index 10fad0e..974fbd5
--- a/man/runiregGibbs.Rd
+++ b/man/runiregGibbs.Rd
@@ -15,12 +15,12 @@ runiregGibbs(Data, Prior, Mcmc)
 \arguments{
   \item{Data}{ list(y,X)}
   \item{Prior}{ list(betabar,A, nu, ssq) }
-  \item{Mcmc}{ list(sigmasq,R,keep)}
+  \item{Mcmc}{ list(sigmasq,R,keep,nprint)}
 }
 \details{
-  Model: \eqn{y = Xbeta + e}.  \eqn{e} \eqn{\sim}{~} \eqn{N(0,sigmasq)}. \cr
+  Model: \eqn{y = X\beta + e}.  \eqn{e} \eqn{\sim}{~} \eqn{N(0,\sigma^2)}. \cr
 
-  Priors: \eqn{beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})}.  \eqn{sigmasq} \eqn{\sim}{~} \eqn{(nu*ssq)/chisq_{nu}}.
+  Priors: \eqn{\beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})}.  \eqn{\sigma^2} \eqn{\sim}{~} \eqn{(nu*ssq)/\chi^2_{nu}}.
   List arguments contain  
   \itemize{
     \item{\code{X}}{n x k Design Matrix}
@@ -31,6 +31,7 @@ runiregGibbs(Data, Prior, Mcmc)
     \item{\code{ssq}}{ scale parm for Inverted Chi-square prior (def:var(y))}
     \item{\code{R}}{ number of MCMC draws }
     \item{\code{keep}}{ thinning parameter - keep every keepth draw }
+    \item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
   }
 }
 \value{
diff --git a/man/rwishart.Rd b/man/rwishart.Rd
old mode 100755
new mode 100644
index 13df02d..3eb5e1d
--- a/man/rwishart.Rd
+++ b/man/rwishart.Rd
@@ -20,7 +20,7 @@ rwishart(nu, V)
 
   If you want to use an Inverted Wishart prior, you \emph{must invert the location matrix} 
   before calling \code{rwishart}, e.g. \cr
-  \eqn{Sigma} \eqn{\sim}{~} IW(nu,V);  \eqn{Sigma^{-1}} \eqn{\sim}{~} \eqn{W(nu,V^{-1})}.
+  \eqn{\Sigma} \eqn{\sim}{~} IW(nu,V);  \eqn{\Sigma^{-1}} \eqn{\sim}{~} \eqn{W(nu,V^{-1})}.
 }
 \value{
   \item{W}{ Wishart draw }
diff --git a/man/simnhlogit.Rd b/man/simnhlogit.Rd
index 876cc04..2110866 100755
--- a/man/simnhlogit.Rd
+++ b/man/simnhlogit.Rd
@@ -1,45 +1,56 @@
-\name{simnhlogit}
-\alias{simnhlogit}
-\concept{logit}
-\concept{non-homothetic}
-\title{ Simulate from Non-homothetic Logit Model }
-\description{
-  \code{simnhlogit} simulates from the non-homothetic logit model
-}
-\usage{
-simnhlogit(theta, lnprices, Xexpend)
-}
-\arguments{
-  \item{theta}{ coefficient vector }
-  \item{lnprices}{ n x p array of prices }
-  \item{Xexpend}{ n x k array of values of expenditure variables}
-}
-\details{
-  For detail on parameterization, see \code{llnhlogit}.
-}
-\value{
-  a list containing: 
-  \item{y}{n x 1 vector of multinomial outcomes (1, \ldots, p)}
-  \item{Xexpend}{expenditure variables}
-  \item{lnprices}{ price array }
-  \item{theta}{coefficients}
-  \item{prob}{n x p array of choice probabilities}
-}
-
-\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
-  by Rossi, Allenby and McCulloch, Chapter 4. \cr
-  \url{http://www.perossi.org/home/bsm-1}
-}
-
-\author{ Peter Rossi, Anderson School, UCLA,
-  \email{perossichi at gmail.com}.
-}
-
-\section{Warning}{
-  This routine is a utility routine that does \strong{not} check the
-  input arguments for proper dimensions and type.
-}
-
-\seealso{ \code{\link{llnhlogit}} }
-
-\keyword{ models }
+\name{simnhlogit}
+\alias{simnhlogit}
+\concept{logit}
+\concept{non-homothetic}
+\title{ Simulate from Non-homothetic Logit Model }
+\description{
+  \code{simnhlogit} simulates from the non-homothetic logit model
+}
+\usage{
+simnhlogit(theta, lnprices, Xexpend)
+}
+\arguments{
+  \item{theta}{ coefficient vector }
+  \item{lnprices}{ n x p array of prices }
+  \item{Xexpend}{ n x k array of values of expenditure variables}
+}
+\details{
+  For details on parameterization, see \code{llnhlogit}.
+}
+\value{
+  a list containing: 
+  \item{y}{n x 1 vector of multinomial outcomes (1, \ldots, p)}
+  \item{Xexpend}{expenditure variables}
+  \item{lnprices}{ price array }
+  \item{theta}{coefficients}
+  \item{prob}{n x p array of choice probabilities}
+}
+
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Rossi, Allenby and McCulloch, Chapter 4. \cr
+  \url{http://www.perossi.org/home/bsm-1}
+}
+
+\author{ Peter Rossi, Anderson School, UCLA,
+  \email{perossichi at gmail.com}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{llnhlogit}} }
+
+\examples{
+##
+N=1000
+p=3
+k=1
+theta = c(rep(1,p),seq(from=-1,to=1,length=p),rep(2,k),.5)
+lnprices = matrix(runif(N*p),ncol=p)
+Xexpend = matrix(runif(N*k),ncol=k)
+simdata = simnhlogit(theta,lnprices,Xexpend)
+}
+
+\keyword{ models }
diff --git a/man/summary.bayesm.mat.Rd b/man/summary.bayesm.mat.Rd
old mode 100755
new mode 100644
index 56d9464..3e7c8d4
--- a/man/summary.bayesm.mat.Rd
+++ b/man/summary.bayesm.mat.Rd
@@ -6,15 +6,16 @@
   array of draws
 }
 \usage{
-\method{summary}{bayesm.mat}(object, names, burnin = trunc(0.1 * nrow(X)), tvalues, QUANTILES = TRUE, TRAILER = TRUE,...)
+\method{summary}{bayesm.mat}(object, names, burnin = trunc(0.1 * nrow(X)), 
+  tvalues, QUANTILES = TRUE, TRAILER = TRUE,...)
 }
 \arguments{
   \item{object}{ \code{object} (hereafter \code{X}) is an array of draws, usually an object of class "bayesm.mat" }
   \item{names}{ optional character vector of names for the columns of \code{X}}
-  \item{burnin}{ number of draws to burn-in, def: .1*nrow(X) }
+  \item{burnin}{ number of draws to burn-in (def: .1*nrow(X))}
   \item{tvalues}{ optional vector of "true" values for use in simulation examples }
-  \item{QUANTILES}{ logical for should quantiles be displayed, def: TRUE }
-  \item{TRAILER}{ logical for should a trailer be displayed, def: TRUE }
+  \item{QUANTILES}{ logical for should quantiles be displayed (def: TRUE)}
+  \item{TRAILER}{ logical for should a trailer be displayed (def: TRUE)}
   \item{...}{ optional arguments for generic function }
 }
 \details{
diff --git a/man/summary.bayesm.nmix.Rd b/man/summary.bayesm.nmix.Rd
old mode 100755
new mode 100644
index ab3ba5e..cd2359e
--- a/man/summary.bayesm.nmix.Rd
+++ b/man/summary.bayesm.nmix.Rd
@@ -15,7 +15,7 @@
 \arguments{
   \item{object}{ an object of class "bayesm.nmix" -- a list of lists of draws}
   \item{names}{ optional character vector of names fo reach dimension of the density}
-  \item{burnin}{ number of draws to burn-in, def: .1*nrow(probdraw)}
+  \item{burnin}{ number of draws to burn-in (def: .1*nrow(probdraw))}
   \item{...}{ parms to send to summary} 
 }
 \details{
diff --git a/man/summary.bayesm.var.Rd b/man/summary.bayesm.var.Rd
old mode 100755
new mode 100644
index 3349ae1..7e550d8
--- a/man/summary.bayesm.var.Rd
+++ b/man/summary.bayesm.var.Rd
@@ -11,9 +11,9 @@
 \arguments{
   \item{object}{ \code{object} (herafter, \code{Vard}) is an array of draws of a covariance matrix }
   \item{names}{ optional character vector of names for the columns of \code{Vard}}
-  \item{burnin}{ number of draws to burn-in, def: .1*nrow(Vard) }
+  \item{burnin}{ number of draws to burn-in (def: .1*nrow(Vard))}
   \item{tvalues}{ optional vector of "true" values for use in simulation examples }
-  \item{QUANTILES}{ logical for should quantiles be displayed, def: TRUE }
+  \item{QUANTILES}{ logical for should quantiles be displayed (def: TRUE)}
   \item{...}{ optional arguments for generic function }
 }
 \details{
diff --git a/man/tuna.Rd b/man/tuna.Rd
old mode 100755
new mode 100644
index 6f95806..056e1b3
--- a/man/tuna.Rd
+++ b/man/tuna.Rd
@@ -1,111 +1,111 @@
-\name{tuna}
-\alias{tuna}
-\docType{data}
-\title{Data on Canned Tuna Sales}
-\description{
-  Volume of canned tuna sales as well as a measure of display activity, log price and log wholesale price.  
-  Weekly data aggregated to the chain level.  This data is extracted from the Dominick's Finer Foods database
-  maintained by the University of Chicago \url{http://http://research.chicagogsb.edu/marketing/databases/dominicks/dataset.aspx}.
-  Brands are seven of the top 10 UPCs in the canned tuna product category.
-}
-\usage{data(tuna)}
-\format{
-  A data frame with 338 observations on the following 30 variables.
-  \describe{
-  \item{\code{WEEK}}{a numeric vector}
-    \item{\code{MOVE1}}{unit sales of Star Kist 6 oz.}
-    \item{\code{MOVE2}}{unit sales of Chicken of the Sea 6 oz.}
-    \item{\code{MOVE3}}{unit sales of Bumble Bee Solid 6.12 oz.}
-    \item{\code{MOVE4}}{unit sales of Bumble Bee Chunk 6.12 oz.}
-    \item{\code{MOVE5}}{unit sales of Geisha 6 oz.}
-    \item{\code{MOVE6}}{unit sales of Bumble Bee Large Cans.}
-    \item{\code{MOVE7}}{unit sales of HH Chunk Lite 6.5 oz.}
-    \item{\code{NSALE1}}{a measure of display activity of Star Kist 6 oz.}
-    \item{\code{NSALE2}}{a measure of display activity of Chicken of the Sea 6 oz.}
-    \item{\code{NSALE3}}{a measure of display activity of Bumble Bee Solid 6.12 oz.}
-    \item{\code{NSALE4}}{a measure of display activity of Bumble Bee Chunk 6.12 oz.}
-    \item{\code{NSALE5}}{a measure of display activity of Geisha 6 oz.}
-    \item{\code{NSALE6}}{a measure of display activity of Bumble Bee Large Cans.}
-    \item{\code{NSALE7}}{a measure of display activity of HH Chunk Lite 6.5 oz.}
-    \item{\code{LPRICE1}}{log of price of Star Kist 6 oz.}
-    \item{\code{LPRICE2}}{log of price of Chicken of the Sea 6 oz.}
-    \item{\code{LPRICE3}}{log of price of Bumble Bee Solid 6.12 oz.}
-    \item{\code{LPRICE4}}{log of price of Bumble Bee Chunk 6.12 oz.}
-    \item{\code{LPRICE5}}{log of price of Geisha 6 oz.}
-    \item{\code{LPRICE6}}{log of price of Bumble Bee Large Cans.}
-    \item{\code{LPRICE7}}{log of price of HH Chunk Lite 6.5 oz.}
-    \item{\code{LWHPRIC1}}{log of wholesale price of Star Kist 6 oz.}
-    \item{\code{LWHPRIC2}}{log of wholesale price of Chicken of the Sea 6 oz.}
-    \item{\code{LWHPRIC3}}{log of wholesale price of Bumble Bee Solid 6.12 oz.}
-    \item{\code{LWHPRIC4}}{log of wholesale price of Bumble Bee Chunk 6.12 oz.}
-    \item{\code{LWHPRIC5}}{log of wholesale price of Geisha 6 oz.}
-    \item{\code{LWHPRIC6}}{log of wholesale price of Bumble Bee Large Cans.}
-    \item{\code{LWHPRIC7}}{log of wholesale price of HH Chunk Lite 6.5 oz.}
-    \item{\code{FULLCUST}}{total customers visits}
-  }
-}
-\source{
-  Chevalier, A. Judith, Anil K. Kashyap and Peter E. Rossi
-  (2003), "Why Don't Prices Rise During Periods of Peak Demand? Evidence from Scanner Data," 
-  \emph{The American Economic Review} , 93(1), 15-37.
-}
-\references{
- Chapter 7, \emph{Bayesian Statistics and Marketing} by Rossi et al. \cr
- \url{hhttp://www.perossi.org/home/bsm-1}
-}
-\examples{
-data(tuna)
-cat(" Quantiles of sales",fill=TRUE)
-mat=apply(as.matrix(tuna[,2:5]),2,quantile)
-print(mat)
-
-##
-## example of processing for use with rivGibbs
-##
-if(0)
-{
-  data(tuna)                          
-  t = dim(tuna)[1]    
-  customers = tuna[,30]                 
-  sales = tuna[,2:8]                                                        
-  lnprice = tuna[,16:22]      
-  lnwhPrice= tuna[,23:29]      
-  share=sales/mean(customers)
-  shareout=as.vector(1-rowSums(share))
-  lnprob=log(share/shareout)  
-
-# create w matrix
-
-  I1=as.matrix(rep(1, t))
-  I0=as.matrix(rep(0, t))
-  intercept=rep(I1, 4)
-  brand1=rbind(I1, I0, I0, I0)
-  brand2=rbind(I0, I1, I0, I0)
-  brand3=rbind(I0, I0, I1, I0)
-  w=cbind(intercept, brand1, brand2, brand3)  
-  
-## choose brand 1 to 4        
-          
-  y=as.vector(as.matrix(lnprob[,1:4])) 
-  X=as.vector(as.matrix(lnprice[,1:4]))     
-  lnwhPrice=as.vector(as.matrix (lnwhPrice[1:4]))   
-  z=cbind(w, lnwhPrice)
-                        
-  Data=list(z=z, w=w, x=X, y=y)
-  Mcmc=list(R=R, keep=1)
-  set.seed(66)
-  out=rivGibbs(Data=Data,Mcmc=Mcmc)
-
-  cat(" betadraws ",fill=TRUE)
-  summary(out$betadraw)
-
-
-if(0){
-## plotting examples
-plot(out$betadraw)
-}
-}
-
-  
-}
-\keyword{datasets}
+\name{tuna}
+\alias{tuna}
+\docType{data}
+\title{Data on Canned Tuna Sales}
+\description{
+  Volume of canned tuna sales as well as a measure of display activity, log price and log wholesale price.  
+  Weekly data aggregated to the chain level.  This data is extracted from the Dominick's Finer Foods database
+  maintained by the Kilts center for marketing at the University of Chicago's Booth School of Business.
+  Brands are seven of the top 10 UPCs in the canned tuna product category.
+}
+\usage{data(tuna)}
+\format{
+  A data frame with 338 observations on the following 30 variables.
+  \describe{
+  \item{\code{WEEK}}{a numeric vector}
+    \item{\code{MOVE1}}{unit sales of Star Kist 6 oz.}
+    \item{\code{MOVE2}}{unit sales of Chicken of the Sea 6 oz.}
+    \item{\code{MOVE3}}{unit sales of Bumble Bee Solid 6.12 oz.}
+    \item{\code{MOVE4}}{unit sales of Bumble Bee Chunk 6.12 oz.}
+    \item{\code{MOVE5}}{unit sales of Geisha 6 oz.}
+    \item{\code{MOVE6}}{unit sales of Bumble Bee Large Cans.}
+    \item{\code{MOVE7}}{unit sales of HH Chunk Lite 6.5 oz.}
+    \item{\code{NSALE1}}{a measure of display activity of Star Kist 6 oz.}
+    \item{\code{NSALE2}}{a measure of display activity of Chicken of the Sea 6 oz.}
+    \item{\code{NSALE3}}{a measure of display activity of Bumble Bee Solid 6.12 oz.}
+    \item{\code{NSALE4}}{a measure of display activity of Bumble Bee Chunk 6.12 oz.}
+    \item{\code{NSALE5}}{a measure of display activity of Geisha 6 oz.}
+    \item{\code{NSALE6}}{a measure of display activity of Bumble Bee Large Cans.}
+    \item{\code{NSALE7}}{a measure of display activity of HH Chunk Lite 6.5 oz.}
+    \item{\code{LPRICE1}}{log of price of Star Kist 6 oz.}
+    \item{\code{LPRICE2}}{log of price of Chicken of the Sea 6 oz.}
+    \item{\code{LPRICE3}}{log of price of Bumble Bee Solid 6.12 oz.}
+    \item{\code{LPRICE4}}{log of price of Bumble Bee Chunk 6.12 oz.}
+    \item{\code{LPRICE5}}{log of price of Geisha 6 oz.}
+    \item{\code{LPRICE6}}{log of price of Bumble Bee Large Cans.}
+    \item{\code{LPRICE7}}{log of price of HH Chunk Lite 6.5 oz.}
+    \item{\code{LWHPRIC1}}{log of wholesale price of Star Kist 6 oz.}
+    \item{\code{LWHPRIC2}}{log of wholesale price of Chicken of the Sea 6 oz.}
+    \item{\code{LWHPRIC3}}{log of wholesale price of Bumble Bee Solid 6.12 oz.}
+    \item{\code{LWHPRIC4}}{log of wholesale price of Bumble Bee Chunk 6.12 oz.}
+    \item{\code{LWHPRIC5}}{log of wholesale price of Geisha 6 oz.}
+    \item{\code{LWHPRIC6}}{log of wholesale price of Bumble Bee Large Cans.}
+    \item{\code{LWHPRIC7}}{log of wholesale price of HH Chunk Lite 6.5 oz.}
+    \item{\code{FULLCUST}}{total customers visits}
+  }
+}
+\source{
+  Chevalier, A. Judith, Anil K. Kashyap and Peter E. Rossi
+  (2003), "Why Don't Prices Rise During Periods of Peak Demand? Evidence from Scanner Data," 
+  \emph{The American Economic Review} , 93(1), 15-37.
+}
+\references{
+ Chapter 7, \emph{Bayesian Statistics and Marketing} by Rossi, Allenby and McCulloch. \cr
+ \url{http://www.perossi.org/home/bsm-1}
+}
+\examples{
+data(tuna)
+cat(" Quantiles of sales",fill=TRUE)
+mat=apply(as.matrix(tuna[,2:5]),2,quantile)
+print(mat)
+
+##
+## example of processing for use with rivGibbs
+##
+if(0)
+{
+  data(tuna)                          
+  t = dim(tuna)[1]    
+  customers = tuna[,30]                 
+  sales = tuna[,2:8]                                                        
+  lnprice = tuna[,16:22]      
+  lnwhPrice= tuna[,23:29]      
+  share=sales/mean(customers)
+  shareout=as.vector(1-rowSums(share))
+  lnprob=log(share/shareout)  
+
+# create w matrix
+
+  I1=as.matrix(rep(1, t))
+  I0=as.matrix(rep(0, t))
+  intercept=rep(I1, 4)
+  brand1=rbind(I1, I0, I0, I0)
+  brand2=rbind(I0, I1, I0, I0)
+  brand3=rbind(I0, I0, I1, I0)
+  w=cbind(intercept, brand1, brand2, brand3)  
+  
+## choose brand 1 to 4        
+          
+  y=as.vector(as.matrix(lnprob[,1:4])) 
+  X=as.vector(as.matrix(lnprice[,1:4]))     
+  lnwhPrice=as.vector(as.matrix (lnwhPrice[1:4]))   
+  z=cbind(w, lnwhPrice)
+                        
+  Data=list(z=z, w=w, x=X, y=y)
+  Mcmc=list(R=R, keep=1)
+  set.seed(66)
+  out=rivGibbs(Data=Data,Mcmc=Mcmc)
+
+  cat(" betadraws ",fill=TRUE)
+  summary(out$betadraw)
+
+
+if(0){
+## plotting examples
+plot(out$betadraw)
+}
+}
+
+  
+}
+\keyword{datasets}
diff --git a/src/Makevars b/src/Makevars
new file mode 100644
index 0000000..c316a57
--- /dev/null
+++ b/src/Makevars
@@ -0,0 +1,2 @@
+PKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS)
+PKG_CPPFLAGS = -I../inst/include/
diff --git a/src/Makevars.win b/src/Makevars.win
new file mode 100644
index 0000000..037acc6
--- /dev/null
+++ b/src/Makevars.win
@@ -0,0 +1,2 @@
+PKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS)
+PKG_CPPFLAGS = -I../inst/include/ 
diff --git a/src/bayesBLP_rcpp_loop.cpp b/src/bayesBLP_rcpp_loop.cpp
new file mode 100644
index 0000000..480586a
--- /dev/null
+++ b/src/bayesBLP_rcpp_loop.cpp
@@ -0,0 +1,508 @@
+#include "bayesm.h"
+ 
+//SUPPORT FUNCTIONS SPECIFIC TO MAIN FUNCTION--------------------------------------------------------------------------------------
+mat r2Sigma(vec const& r, int K){
+//
+// Keunwoo Kim 10/28/2014
+//
+// Purpose: 
+//      convert r (vector) into Sigma (matrix)
+//
+// Arguments:
+//      r : K*(K+1)/2 length vector
+//      K : number of parameters (=nrow(Sigma))
+//
+// Output: 
+//      Sigma (K by K matrix)
+//
+  int k, i, j;
+  mat L = zeros<mat>(K, K);
+  L.diag() = exp(r(span(0,K-1)));
+  k = 0;
+  for (i=0; i<K-1; i++){
+	  for (j=i+1; j<K; j++){
+		  L(j,i) = r[K+k];
+		  k = k + 1;
+	  }
+  }
+  return (L*trans(L));
+}
+
+double logJacob(mat const& choiceProb, int J){
+//
+// Keunwoo Kim 10/28/2014
+//
+// Purpose: 
+//      compute log(det(Jacobian)) of mapping from ubobserved shock to share
+//      (change-of-variables)
+//
+// Arguments:
+//      choiceProb: T*J by H
+//      J: number of alternatives (without outside option)
+//
+// Output: 
+//      log(det(Jacobian)) of mapping from ubobserved shock to share
+//
+  int t;
+  mat blockMat;
+  double detblockMat;
+
+  int H = choiceProb.n_cols;
+  int T = choiceProb.n_rows / J;
+
+  mat onesJJ = ones<mat>(J, J);  
+  // equivalent to struc = kron(eye(T, T), onesJJ)
+  mat struc = zeros<mat>(J*T,J*T);
+  for (t=0; t<T; t++){
+    struc(span(t*J,t*J+J-1),span(t*J,t*J+J-1)) = onesJJ;
+  } 
+  
+  struc = struc - eye<mat>(T*J, T*J);
+  mat offDiag = -choiceProb*trans(choiceProb)/H;
+  mat Jac = struc%offDiag; 
+  Jac.diag() = sum(choiceProb%(1-choiceProb), 1)/H;
+
+  double sumlogJacob = 0;
+  for (t=0; t<T; t++){
+    blockMat = Jac(span(t*J, (t+1)*J-1), span(t*J, (t+1)*J-1)); 
+    detblockMat = det(blockMat);
+    // abs cannot be used for a scalor.
+    sumlogJacob = sumlogJacob + log(sqrt(detblockMat*detblockMat));      
+  }
+  
+  return (-sumlogJacob);  
+}
+
+mat share2mu(mat const& Sigma, mat const& X, mat const& v, vec const& share, int J, double tol){
+//
+// Keunwoo Kim 10/28/2014
+//
+// Purpose: 
+//      contraction mapping (BLP)
+//
+// Arguments:
+//      Sigma: var-cov matrix of random coefficients
+//      X: J*T by K      
+//      share: observed share (length J*T)
+//      v: random draws from standard normal (K by H)
+//      J: number of alternatives (without outside option)
+//      tol: convergence tolerance for the contraction mapping
+//
+// Output: 
+//      a matrix of mean utility (first column) and 
+//      individual choice probabilities (second~last column)
+//
+  int t;
+  mat expU, temp1, expSum, choiceProb;
+  vec share_hat;  
+  
+  int H = v.n_cols;
+  int T = X.n_rows/J;
+  mat temp2(T*J,H);
+  
+  // T*J by H
+  mat u = X*(trans(chol(Sigma))*v);
+  int iter = 0;
+  vec mu0 = ones<vec>(J*T);
+  vec mu1 = mu0/2;
+  
+  //relative increasement
+  vec rel = (mu1 - mu0)/mu0;
+  double max_rel = max(abs(rel));
+  while (max_rel > tol){
+	  mu0 = mu1;
+	  expU = exp(u + mu0*ones<mat>(1,H));
+
+	  temp1 = reshape(expU, J, T*H);
+	  expSum = 1 + sum(temp1, 0);
+	  expSum = reshape(expSum, T, H);
+	  // equivalent to expSum = kron(expSum, ones<vec>(J));    
+    for (t=0; t<T; t++){
+      temp2(span(t*J, t*J+J-1), span::all) = ones<vec>(J)*expSum(t, span::all);
+    }
+    expSum = temp2;
+	  choiceProb = expU/expSum;
+	  share_hat = sum(choiceProb, 1)/H;
+
+	  mu1 = mu0 + log(share/share_hat);
+	  iter = iter + 1;
+	  rel = (mu0 - mu1)/mu0;
+	  max_rel = max(abs(rel));
+  }
+  mat rtn = zeros(J*T, H+1);
+  rtn(span::all,0) = mu1;
+  rtn(span::all,span(1,H)) = choiceProb;
+
+  return (rtn);
+}
+
+List rivDraw(vec const& mu, vec const& Xend, mat const& z, mat const& Xexo, vec const& theta_hat, mat const& A, 
+                  vec const& deltabar, mat const& Ad, mat const& V, int nu, vec const& delta_old, mat const& Omega_old){
+//
+// Keunwoo Kim 05/21/2015
+//
+// Purpose: draw from posterior for linear I.V. model
+//
+// Arguments:
+//        mu is vector of obs on lhs var in structural equation
+//        Xend is "endogenous" var in structural eqn
+//        Xexo is matrix of obs on "exogenous" vars in the structural eqn
+//        z is matrix of obs on instruments
+//
+//        deltabar is prior mean of delta
+//        Ad is prior prec
+//        theta_hat is prior mean vector for theta2,theta1
+//        A is prior prec of same
+//        nu,V parms for IW on Omega
+//
+//        delta_old is the starting value from the previous chain
+//        Omega_old is the starting value from the previous chain
+//
+// Output: list of draws of delta,thetabar,Omega
+// 
+// Model:
+//    Xend=z'delta + e1
+//    mu=thetabar1*Xend + Xexo'thetabar2 + e2
+//        e1,e2 ~ N(0,Omega)
+//
+// Prior:
+//   delta ~ N(deltabar,Ad^-1)
+//   thetabar = vec(theta2,theta1) ~ N(theta_hat,A^-1)
+//   Omega ~ IW(nu,V)
+// 
+  vec e1, ee2, bg, u, theta2;
+  mat xt, Res, S, B, L, Li, z2, zt1, zt2, ucholinv, VSinv, mut;
+  double sig,theta1;
+  List out;
+  int i;  
+
+  int n = mu.size();
+  int dimd = z.n_cols;
+  int dimg = Xexo.n_cols;
+  vec thetabar(dimg+1);
+
+  mat C = eye(2,2);
+
+  // set initial values
+  mat Omega = Omega_old;
+  vec delta = delta_old;
+
+  mat xtd(2*n, dimd);  
+  vec zvec = vectorise(trans(z));  
+     
+  //
+  // draw beta,gamma
+  //
+  e1 = Xend - z*delta;
+  ee2 = (Omega(0,1)/Omega(0,0)) * e1;
+  sig = sqrt(Omega(1,1)-((Omega(0,1)*Omega(0,1))/Omega(0,0)));
+  mut = (mu-ee2)/sig;
+  xt = join_rows(Xend,Xexo)/sig;
+  bg = breg(mut,xt,theta_hat,A);
+  theta1 = bg[0];
+  theta2 = bg(span(1,bg.size()-1));
+    
+  //
+  // draw delta
+  //
+  C(1,0) = theta1;
+  B = C*Omega*trans(C);
+  L = trans(chol(B));
+  Li = solve(trimatl(L),eye(2,2));
+  u = mu - Xexo*theta2;
+  mut = vectorise(Li * trans(join_rows(Xend,u)));
+  z2 = trans(join_rows(zvec, theta1*zvec));
+  z2 = Li*z2;
+  zt1 = z2(0,span::all);
+  zt2 = z2(1,span::all);
+  zt1.reshape(dimd,n);    
+  zt1 = trans(zt1);
+  zt2.reshape(dimd,n);    
+  zt2 = trans(zt2);
+  for (i=0; i<n; i++){
+    xtd(2*i,span::all) = zt1(i,span::all);
+    xtd(2*i+1,span::all) = zt2(i,span::all);
+  }
+  delta = breg(mut,xtd,deltabar,Ad);
+    
+  //
+  // draw Sigma
+  //
+  Res = join_rows(Xend-z*delta, mu-theta1*Xend-Xexo*theta2);
+  S = trans(Res)*Res;
+    
+  // compute the inverse of V+S
+  ucholinv = solve(trimatu(chol(V+S)), eye(2,2));
+  VSinv = ucholinv*trans(ucholinv);
+    
+  out = rwishart(nu+n, VSinv);
+  Omega = as<mat>(out["IW"]);
+  
+  thetabar(span(0,dimg-1)) = theta2;
+  thetabar[dimg] = theta1;
+  
+  return List::create(
+      Named("deltadraw") = delta,
+      Named("thetabardraw") = thetabar,      
+      Named("Omegadraw") = Omega
+  );   
+}
+
+//MAIN FUNCTION---------------------------------------------------------------------------------------
+// [[Rcpp::export]]
+List bayesBLP_rcpp_loop(bool IV, mat const& X, mat const& Z, vec const& share, 
+                        int J, int T, mat const& v, int R,
+                        vec const& sigmasqR, 
+                        mat const& A, vec const& theta_hat, 
+                        vec const& deltabar, mat const& Ad,
+                        int nu0, double s0_sq, mat const& VOmega, 
+                        double ssq, mat const& cand_cov, 
+                        vec const& theta_bar_initial, vec const& r_initial, 
+                        double tau_sq_initial, mat const& Omega_initial, vec const& delta_initial,
+                        double tol, int keep, int nprint){
+//
+// Keunwoo Kim 05/21/2015
+//
+// Purpose: 
+//      draw theta_bar and Sigma via hybrid Gibbs sampler (Jiang, Manchanda, and Rossi, 2009)
+//
+// Arguments:
+//    Observation
+//      IV: whether to use instrumental variable (TRUE or FALSE)
+//      X: J*T by H (If IV is TRUE, the last column is endogenous variable.)
+//      z: instrumental variables (If IV is FALSE, it is not used.)
+//      share: vector of length J*T
+//
+//    Dimension
+//      J: number of alternatives
+//      T: number of time
+//      R: number of Gibbs sampling
+//
+//    Prior
+//      sigmasqR
+//      theta_hat
+//      A
+//      deltabar (used when IV is TRUE)
+//      Ad (used when IV is TRUE)
+//      nu0
+//      s0_sq (used when IV is FALSE)
+//      VOmega (used when IV is TRUE)
+//
+//    Metropolis-Hastings
+//      ssq: scaling parameter
+//      cand_cov: var-cov matrix of random walk
+//
+//    Initial values
+//      theta_bar_initial
+//      r_initial
+//      tau_sq_initial (used when IV is FALSE)
+//      Omega_initial (used when IV is TRUE)
+//      delta_initial (used when IV is TRUE)
+//
+//    Contraction mapping
+//      tol: convergence tolerance for the contraction mapping
+//      v: draws used for Monte-Carlo integration
+//
+// Output:
+//      a List of theta_bar, r (Sigma), tau_sq, Omega, and delta  draws
+//      number of acceptance and loglikelihood
+//
+// Model & Prior: 
+//      shown in the below comments.
+
+  int nu1, mkeep, I, jt;
+  mat prob_t, Sigma_new, b, S, Sigma, Sigma_inv, rel, expU, share_hat, choiceProb, expSum, L, ucholinv, XXAinv, out_cont,
+      Xexo, Xend, Omega_all, delta_all, zetaeta_old, zetaeta_new, rootiOmega;
+  vec r_new, mu_new, theta_tilde, z, mu, err, mu0, mu1, eta_new, eta_old, tau_sq_all, zeta;
+  double alpha, ll_new, ll_old, sumLogJaco_new, prior_new, prior_old, s1_sq, acceptrate;
+  List ivout;
+  
+  double pi = M_PI;
+  int K = theta_hat.size();  
+  
+  if (IV==TRUE){
+    Xexo = X(span::all, span(0,K-2));
+    Xend = X(span::all, K-1);
+    I = Z.n_cols;
+  }
+  
+  // number of MC integration draws
+  int H = v.n_cols;
+
+  // Allocate matrix for draws to be stored during MCMC
+  if (IV==TRUE){
+    Omega_all = zeros<mat>(4,R/keep);
+    delta_all = zeros<mat>(I,R/keep);
+  }else{
+    tau_sq_all = zeros<vec>(R/keep);
+  }
+  mat theta_bar_all = zeros<mat>(K,R/keep);
+  mat r_all = zeros<mat>(K*(K+1)/2,R/keep);
+  mat Sigma_all = zeros<mat>(K*K,R/keep);  
+  vec ll_all = zeros<vec>(R/keep);
+
+  // list to be returned to R  
+  List rtn;
+
+  // initial values
+  vec theta_bar = theta_bar_initial;
+  mat Omega = Omega_initial;
+  vec delta = delta_initial;
+  vec r_old = r_initial;
+  double tau_sq = tau_sq_initial;  
+  mat Sigma_old = r2Sigma(r_old, K);
+
+  //===================================================================
+  // get initial mu and sumLogJaco: Contraction Mapping
+  //===================================================================
+  // convert shares into mu
+  out_cont = share2mu(Sigma_old, X, v, share, J, tol);
+  mu = out_cont(span::all,0);
+  choiceProb = out_cont(span::all,span(1,H));
+
+  // Jacobian
+  double sumLogJaco_old = logJacob(choiceProb, J);
+  vec mu_old = mu;
+
+  //===================================================================
+  // Start MCMC
+  //===================================================================
+  if (nprint>0) startMcmcTimer();
+  double n_accept = 0.0;
+  for (int rep=0; rep<R; rep++){
+	  //========================================================================
+	  // STEP 1
+	  // Draw r (for Sigma): Metropolis Hasting
+	  // r_new = r_old + N(0, ssq*cand_cov)
+	  // Prior:
+	  // r ~ N(0, diag(sigmasqR)), that is, independent prior
+	  //========================================================================
+	  // get candidate
+	  r_new = r_old + trans(chol(ssq*cand_cov))*randn<vec>(K*(K+1)/2);
+	  Sigma_new = r2Sigma(r_new, K);
+	  // convert share into mu_new
+    out_cont = share2mu(Sigma_new, X, v, share, J, tol);
+    mu_new = out_cont(span::all,0);
+    choiceProb = out_cont(span::all,span(1,H));
+	  // get eta_new
+	  eta_new = mu_new - X*theta_bar;
+	  // get eta_old
+	  eta_old = mu_old - X*theta_bar;
+    
+    if (IV==TRUE){
+      // get zeta
+      zeta = Xend - Z*delta;
+      // get ll_old
+      zetaeta_old = join_rows(zeta, eta_old);
+      rootiOmega = solve(trimatu(chol(Omega)), eye(2,2));
+      ll_old = 0;
+      for (jt=0; jt<J*T; jt++){
+        ll_old = ll_old + lndMvn(vectorise(zetaeta_old(jt, span::all)), 
+                          zeros<vec>(2), 
+                          rootiOmega);
+      }    
+      ll_old = ll_old + sumLogJaco_old;
+      // get ll_new    
+      zetaeta_new = join_rows(zeta, eta_new);
+	    sumLogJaco_new = logJacob(choiceProb, J);
+      ll_new = 0;
+      for (jt=0; jt<J*T; jt++){
+        ll_new = ll_new + lndMvn(vectorise(zetaeta_new(jt, span::all)), 
+                          zeros<vec>(2), 
+                          rootiOmega);
+      }    
+      ll_new = ll_new + sumLogJaco_new;
+    }else{
+      // get ll_old
+	    ll_old = sum(log((1/sqrt(2*pi*tau_sq)) * exp(-(eta_old%eta_old)/(2*tau_sq)))) + sumLogJaco_old;
+	    // get ll_new	  
+	    sumLogJaco_new = logJacob(choiceProb, J);
+	    ll_new = sum(log((1/sqrt(2*pi*tau_sq)) * exp(-(eta_new%eta_new)/(2*tau_sq)))) + sumLogJaco_new;
+    }
+    
+	  // priors
+	  prior_new = sum(log((1/sqrt(2*pi*sigmasqR)) % exp(-(r_new%r_new)/(2*sigmasqR))));
+	  prior_old = sum(log((1/sqrt(2*pi*sigmasqR)) % exp(-(r_old%r_old)/(2*sigmasqR))));
+
+	  alpha = exp(ll_new + prior_new - ll_old - prior_old);
+	  if (alpha>1) {alpha = 1;}
+
+	  if (runif(1)[0]<=alpha) {
+		  r_old = r_new;
+		  Sigma_old = Sigma_new;
+		  mu_old = mu_new;
+		  sumLogJaco_old = sumLogJaco_new;
+		  n_accept = n_accept + 1; 
+	  }	
+	  //========================================================================	
+	  // STEP 2
+	  // Draw theta_bar & tau^2 (or Omega & delta): Gibbs Sampler
+    // mu = X*theta_bar + eta, eta~N(0,tau_sq)
+    // (For IV case, see the comments in rivDraw above.)
+	  // Prior:
+	  // 1. theta_bar ~ N(theta_hat, A^-1)
+	  // 2. tau_sq ~ nu0*s0_sq/chisq(nu0)
+	  // Posterior:
+	  // 1. theta_bar | tau_sq ~ N(theta_tilde, (X^t X/tau_sq + A)^-1)
+	  // theta_tilde = (X^t X/tau_sq + A)^-1 * (tau_sq^-1*X^t mu + A*theta_hat)
+	  // 2. tau_sq | theta_bar ~ nu1*s1_sq/chisq(nu1)
+	  // nu1 = nu0 + n (n=J*T)
+	  // s1_sq = [nu0*s0_sq + (mu-X^t theta_bar)^t (mu-X^t theta_bar)]/[nu0 + n]
+	  //========================================================================    
+    if (IV==TRUE){
+      ivout = rivDraw(mu_old, Xend, Z, Xexo, theta_hat, A, 
+                          deltabar, Ad, VOmega, nu0, delta, Omega);    
+      delta = as<vec>(ivout["deltadraw"]);
+      theta_bar = as<vec>(ivout["thetabardraw"]);
+      Omega = as<mat>(ivout["Omegadraw"]);
+    }else{    
+      // compute the inverse of (trans(X)*X)/tau_sq + A
+      ucholinv = solve(trimatu(chol((trans(X)*X)/tau_sq + A)), eye(K,K));
+      XXAinv = ucholinv*trans(ucholinv);
+ 
+      theta_tilde = XXAinv * (trans(X)*mu_old/tau_sq + A*theta_hat);
+	    theta_bar = theta_tilde + ucholinv*vec(rnorm(K));
+
+	    nu1 = nu0 + J*T;
+	    err = mu_old - X*theta_bar;
+	    s1_sq = (nu0*s0_sq + sum(err%err))/nu1;
+	    z = vec(rnorm(nu1));	  
+	    tau_sq = nu1*s1_sq/sum(z%z);
+    }
+
+	  //
+    // print time to completion and draw # every nprint'th draw
+    //
+    if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+    
+	  //========================================================================
+	  // STEP 3
+	  // Store Draws
+	  //========================================================================
+	  if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;      
+      if (IV==TRUE){
+        Omega_all(span::all,mkeep-1) = vectorise(Omega);      
+        delta_all(span::all,mkeep-1) = delta;
+      }else{
+        tau_sq_all[mkeep-1] = tau_sq;
+      }
+	    theta_bar_all(span::all,mkeep-1) = theta_bar; 
+      r_all(span::all,mkeep-1) = r_old;  
+	    Sigma_all(span::all,mkeep-1) = vectorise(r2Sigma(r_old, K));	  
+	    ll_all[mkeep-1] = ll_old;
+	  }
+  }
+  acceptrate = n_accept/R;
+  rtn["tausqdraw"] = tau_sq_all;
+  rtn["Omegadraw"] = Omega_all;  
+  rtn["deltadraw"] = delta_all;
+  rtn["thetabardraw"] = theta_bar_all;
+  rtn["rdraw"] = r_all;
+  rtn["Sigmadraw"] = Sigma_all;  
+  rtn["ll"] = ll_all;
+  rtn["acceptrate"] = acceptrate;
+
+  if (nprint>0) endMcmcTimer();
+  return (rtn);
+}
diff --git a/src/bayesmc.c b/src/bayesmc.c
deleted file mode 100755
index c1e47de..0000000
--- a/src/bayesmc.c
+++ /dev/null
@@ -1,359 +0,0 @@
-#include <R.h>
-#include <Rmath.h>
-#include <math.h>
-
-void condmom(double *x, double *mu, double *sigi, int p, int j, double *m, double *csig)
-{
-/*	function to compute moments of x[j] | x[-j]  */
-
-	int ind,i,jm1;
-	double csigsq;
-	jm1=j-1;
-	ind = p*jm1;
-	csigsq = 1./sigi[ind+jm1];
-
-	*m = 0.0;
-	for (i=0 ; i < p; ++i)
-		{
-		if (i != jm1) 
-			{*m +=  - csigsq*sigi[ind+i]*(x[i]-mu[i]);}
-		}
-	*m=mu[jm1]+*m ;
-	*csig=sqrt(csigsq);
-}
-
-double rtrun(double mu, double sigma,double trunpt, int above) 
-{
-/*	function to draw truncated normal
-		above=1 means from above b=trunpt, a=-inf
-		above=0 means from below a=trunpt, b= +inf   
-		modified by rossi 6/05 to check arg to qnorm
-*/
-	double FA,FB,rnd,result,arg ;
-	if (above) {
-		FA=0.0; FB=pnorm(((trunpt-mu)/(sigma)),0.0,1.0,1,0);
-			}
-	else {
-		FB=1.0; FA=pnorm(((trunpt-mu)/(sigma)),0.0,1.0,1,0);
-		}
-	
-	GetRNGstate();
-	rnd=unif_rand();
-	arg=rnd*(FB-FA)+FA;
-	if(arg > .999999999) arg=.999999999;
-	if(arg < .0000000001) arg=.0000000001;
-	result = mu + sigma*qnorm(arg,0.0,1.0,1,0);
-	PutRNGstate();
-	return result;
-}
-
-
-void drawwi(double *w, double *mu, double *sigmai,int *p, int *y)
-{
-/*	function to draw w_i by Gibbing's thru p vector   */
-
-	int i,j,above;
-	double bound;
-	double mean, csig;
-
-		for (i=0; i < *p; ++i) 
-		{	
-			bound=0.0;
-		    	for (j=0; j < *p ; ++j) 
-				{ if (j != i) {bound=fmax2(bound,w[j]); }}
-			if (*y == i+1) 	
-				above = 0;
-			else 
-				above = 1;
-
-		condmom(w,mu,sigmai,*p,(i+1),&mean,&csig);
-		w[i]=rtrun(mean,csig,bound,above);
-
-		}
-}
-
-void draww(double *w, double *mu, double *sigmai, int *n, int *p, int *y) 
-{
-/*	function to gibbs down entire w vector for all n obs  */
-	int i, ind;
-	for (i=0; i < *n; ++i)
-	{
-		ind= *p * i;
-		drawwi(w+ind,mu+ind,sigmai,p,y+i);
-	}
-}
-
-
-void drawwi_mvp(double *w, double *mu, double *sigmai,int *p, int *y)
-{
-/*	function to draw w_i for Multivariate Probit  */
-
-	int i,above;
-	double mean, csig;
-
-		for (i=0; i < *p; ++i) 
-		{	
-			if (y[i]) 	
-				above = 0;
-			else 
-				above = 1;
-
-		condmom(w,mu,sigmai,*p,(i+1),&mean,&csig);
-		w[i]=rtrun(mean,csig,0.0,above);
-
-		}
-}
-
-void draww_mvp(double *w, double *mu, double *sigmai, int *n, int *p, int *y) 
-{
-/*	function to gibbs down entire w vector for all n obs  */
-	int i, ind;
-	for (i=0; i < *n; ++i)
-	{
-		ind= *p * i;
-		drawwi_mvp(w+ind,mu+ind,sigmai,p,y+ind);
-	}
-}
-
-double root(double c1, double  c2, double *tol,int *iterlim)
-{
-/*	function to find root of c1 - c2u = lnu */
-   int iter;
-   double uold, unew;
-   uold=1.;
-   unew=0.00001;
-   iter=0;
-   while (iter <= *iterlim && fabs(uold-unew) > *tol )
-      {
-      uold=unew;
-      unew=uold + (uold*(c1 -c2*uold -  log(uold)))/(1. + c2*uold); 
-      if(unew < 1.0e-50) unew=1.0e-50;
-      iter=iter+1;
-      }
-   return unew;
-}
-   
-void callroot(int *n,double *c1, double *c2, double *tol, int *iterlim,double *u)
-{
-   int i;
-   for (i=0;i < *n; ++i)
-   {
-	u[i]=root(c1[i],c2[i],tol,iterlim);
-   }
-}
-
-
-
-void ghk_oneside(double *L, double* trunpt, int *above, int *dim, int *n, double *res)
-/*	routine to implement ghk with a region defined by truncation only on one- side
- 						r mcculloch 8/04
-        if above=1, then we truncate component i from above at point trunpt[i-1]
-        L is lower triangular root of Sigma
-	random vector is assumed to have zero mean
-    	n is number of draws to use in GHK	
-	modified 6/05 by rossi to check arg into qnorm
-*/
-{
-   int i,j,k;
-   double mu,tpz,u,prod,pa,pb,arg;
-   double *z;
-   z = (double *)R_alloc(*dim,sizeof(double));
-   GetRNGstate();
-   *res = 0.0;
-   for(i=0;i<*n;i++) {
-      prod=1.0;
-      for(j=0;j<*dim;j++) {
-         mu=0.0; for(k=0;k<j;k++) mu += L[k*(*dim)+j]*z[k];
-	 tpz = (trunpt[j]-mu)/L[j*(*dim)+j];
-	 if(above[j]) {
-	    pa=0.0; pb = pnorm(tpz,0.0,1.0,1,0);   
-	 }
-	 else {
-	    pb=1.0; pa = pnorm(tpz,0.0,1.0,1,0);
-	 }
-	 prod *= pb-pa;
-	 u = unif_rand();
-	 arg=u*pb+(1.-u)*pa;
-	 if(arg > .999999999) arg=.999999999;
-	 if(arg < .0000000001) arg=.0000000001;
-	 z[j] = qnorm(arg,0.0,1.0,1,0);
-      }
-      *res += prod;
-   }
-   *res /= (double)(*n);
-   PutRNGstate();
-}
-void ghk(double *L, double* a, double *b, int *dim, int *n, double *res)
-/*	routine to implement ghk with a region : a[i-1] <= x_i <= b[i-1]
- 						r mcculloch 8/04
-        L is lower triangular root of Sigma
-	random vector is assumed to have zero mean
-    	n is number of draws to use in GHK	
-	modified 6/05 by rossi to check arg into qnorm
-*/
-{
-   int i,j,k;
-   double aa,bb,pa,pb,u,prod,mu,arg;
-   double *z;
-   z = (double *)R_alloc(*dim,sizeof(double));
-   GetRNGstate();
-   *res=0.0;
-   for(i=0;i<*n;i++) {
-      prod = 1.0;
-      for(j=0;j<*dim;j++) {
-         mu=0.0; for(k=0;k<j;k++) mu += L[k*(*dim)+j]*z[k];
-	 aa=(a[j]-mu)/L[j*(*dim)+j]; bb = (b[j]-mu)/L[j*(*dim)+j];
-	 pa = pnorm(aa,0.0,1.0,1,0); pb = pnorm(bb,0.0,1.0,1,0);
-	 prod *= pb-pa;
-	 u = unif_rand();
-	 arg=u*pb+(1.-u)*pa;
-	 if(arg > .999999999) arg=.999999999;
-	 if(arg < .0000000001) arg=.0000000001;
-	 z[j] = qnorm(arg,0.0,1.0,1,0);
-      }
-      *res += prod;
-   }
-   *res /= (double)(*n);
-   PutRNGstate();
-}
-
-void ghk_vec(int *n,double *L, double *trunpt,int *above, int *dim, int *r, double *res)
-{
-/* routine to call ghk_oneside for n different truncation points stacked in to the
-   vector trunpt  -- puts n results in vector res
-                                          p rossi 12/04
-*/
-	int i, ind;
-	for (i=0; i < *n; ++i)
-	{
-		ind = *dim * i;
-		ghk_oneside(L,trunpt + ind,above,dim,r,res+i);
-	}
-}
-
-void cuttov(double *ut,double *v, int *dim)
-/*
-purpose: write upper triangular (ut) to vector (v), goes down columns, omitting zeros
-arguments:
-   ut: upper triangular matrix, stored as series of columns (including the zeros)
-   v: vector ut is copied to, on input must have correct length
-   dim: ut is dim x dim, v is dim*(dim+1)/2
-*/
-{
-   int ind=0;
-   int i,j;
-   for(i=0;i<(*dim);i++) {
-      for(j=0;j<=i;j++) {
-         v[ind] = ut[i*(*dim)+j];
-         ind += 1;
-      }
-   }
-}
-void cvtout(double *v, double *ut, int *dim)
-/*
-purpose: write vector (v) to upper triangular (inverse of cuttov above)
-arguments:
-   v: vector
-   ut: upper triangulare matrix, columns stacked, zeros included
-   dim: ut is dim x dim, v is dim*(dim+1)/2
-*/
-{
-   int ind=0;
-   int i,j;
-   for(i=0;i<(*dim);i++) {
-      for(j=(i+1);j<(*dim);j++) ut[i*(*dim)+j]=0.0;
-      for(j=0;j<=i;j++) {
-         ut[i*(*dim)+j] = v[ind];
-         ind += 1;
-      }
-   }
-}
-void clmvn(double *x, double *mu, double *riv, int *dim, double *res)
-/*
-purpose:
-   calculate log of multivariate density 
-   evaluated at x
-   mean is mu, and covariance matrix t(R)%*%R and riv is vector version of the inverse of R
-arguments:
-   x: compute log(f(x))
-   mu, riv: x~N(mu,t(R)%*%R), riv is vector version of R^{-1}
-   dim: dimension of x
-   res: place to put result
-*/
-{
-   int i,j;
-   double sum = 0.0;
-   double prod = 1.0;
-   double z;
-   int ind = 0;
-   for(i=0;i<(*dim);i++) {
-      z = 0.0;
-      for(j=0;j<=i;j++) {z += riv[ind]*(x[j]-mu[j]); ind += 1;}
-      sum += z*z;
-      prod *= riv[ind-1];
-   }
-   *res = log(prod) -.5*sum;
-}
-void crdisc(double *p, int *res)
-/*
-purpose: draw from a discrete distribution
-arguments:
-   p: vector of probabilities
-   res: draw is in {1,2,...length(p)}, giving the draw's category
-*/
-{
-   double u,sum;
-   GetRNGstate();
-   u = unif_rand();
-   *res = 1;
-   sum = p[*res -1];
-   while(sum<u) {sum += p[*res];(*res) +=1;}
-   PutRNGstate();
-}
-void crcomp(double *x, double *mu, double *riv, double *p, int *dim, int *nc, int *res)
-/*
-purpose: draw component of x, where x is drawn form mixture of multivariate normal components
-arguments:
-   x: observed vector x
-   mu: matrix of class means, each column gives a mean vector
-   riv: matrix of class covariances, each column gives a vector version of R^{-1}, Sigma = t(R)%*%R
-   p: prior class probabilities
-   dim: dimension of x (and mu, and Sigma)
-   nc: number of classes
-   res: result
-note:
-   mu is column stacked version of a dim x nc matrix
-   riv is column stacked version of a dim*(dim+1)/2 x nc matrix
-*/
-{
-   double *post;
-   double max,sum;
-   int dim_riv = (*dim)*((*dim)+1)/2;
-   int i;
-   post = (double *)R_alloc(*nc,sizeof(double));
-   clmvn(x,mu,riv,dim,post);
-   max = *post;
-   for(i=1;i<(*nc);i++) {
-      clmvn(x,mu+i*(*dim),riv+i*dim_riv,dim,post+i);
-      if(*(post+i) > max) max = *(post+i);
-   }
-   sum = 0.0;
-   for(i=0;i<(*nc);i++) { post[i] = exp(post[i]-max)*p[i]; sum += post[i];}
-   for(i=0;i<(*nc);i++) post[i] /= sum;
-   crdisc(post,res);
-}
-void crcomps(double *x, double *mu, double *riv, double *p, int *dim, int *nc, int *nob, int *res)
-/*
-purpose: x represents a matrix, whose columns are draws from a normal mixture, draw component membership for each x
-arguments:
-   all the same as crcomp, except x is now column stacked version of dim x nob matrix
-   and nob is the number of observations
-   res is now of length nob
-*/
-{
-   int i;
-   for(i=0;i<(*nob);i++) {
-      crcomp(x+i*(*dim),mu,riv,p,dim,nc,res+i);
-   }
-}
-
diff --git a/src/bayesmcpp.cpp b/src/bayesmcpp.cpp
deleted file mode 100755
index b15c6e8..0000000
--- a/src/bayesmcpp.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-// R.McCulloch, 12/04  code for scale usage R function (rScaleUsage)
-//  changed to R error function, P. Rossi 05/12
-#include <iostream>
-#include <algorithm>
-
-extern "C" {
-#include <R.h>
-#include <Rmath.h>
-};
-
-extern "C" void getC(double *ep,int *kp, double *m1p, double *m2p,  double *c);
-extern "C" void dy(int *p, int *nob,  double *y, int *x, double *c, double *mu, double *beta, double *s, double *tau, double *sigma);
-
-void getC(double *ep,int *kp, double *m1p, double *m2p, double *c)
-{
-   double e = *ep;
-   int k = *kp;
-   double m1 = *m1p;
-   double m2 = *m2p;
-
-   //first sum to get s's, this is a waste since it should be done
-   //once but I don't want to see this things anywhere else and it should take no time
-   double s0 = (double)(k-1);
-   double s1=0.0,s2=0.0,s3=0.0,s4=0.0;
-   for(int i=1;i<k;i++) {s1+=i; s2+=i*i; s3+= i*i*i; s4+=i*i*i*i;}
-
-   // now make quadratic for b (just as in Peter's code)
-   double aq = s0*s2-s1*s1;
-   double bq = 2*e*s0*s3-2*e*s1*s2;
-   double cq = m1*m1 - m2*s0 + e*e*s0*s4 - e*e*s2*s2;
-
-   //get a and b
-   double det = bq*bq - 4*aq*cq;
-   if(det<0) error("no solution for c's given e and m1, m2 \n");
-   double b=(-bq+sqrt(det))/(2.0*aq);
-   double a=(m1-b*s1-e*s2)/s0;
-
-   //make c
-   c[0]= -1000.0;
-   c[k]= 1000.0;
-   for(int i=1;i<k;i++) c[i] = a+b*i+e*i*i;
-   
-   std::sort(c,c+k+1);
-
-}
-
-
-
-void d1y(int p, double *y, int *x, double *c, double *mu, double *beta, double *s, double tau, double sigma)
-{
-   //std::cout << "int main of d1y" << std::endl;
-
-   GetRNGstate();
-   double cm,cs; //cm = conditional mean, cs = condtional standard deviation
-   double u;    // uniform for truncated normal draw
-   double a,b;  // standardized truncation points
-   double pa,pb; // cdf at truncation points
-
-   //loop over coordinates of y
-   for(int i=0;i<p;i++) {
-      //compute conditonal mean and standard deviation
-      cs = s[i]*sigma;
-      cm = mu[i]+tau;
-      for(int j=0;j<i;j++) cm += (*(beta+i*(p-1)+j))*(y[j]-mu[j]-tau);
-      for(int j=(i+1);j<p;j++) cm += (*(beta+i*(p-1)+j-1))*(y[j]-mu[j]-tau);
-      //draw truncated normal
-      // y~N(cm,cs^2) I[c[x[i]-1],c[x[i])
-      a = (c[x[i]-1]-cm)/cs;  b = (c[x[i]]-cm)/cs;
-      pa = pnorm(a,0.0,1.0,1,0); pb = pnorm(b,0.0,1.0,1,0);
-      u = unif_rand();
-      y[i] = cm + cs*qnorm(u*pb + (1-u)*pa,0.0,1.0,1,0);
-   }
-   PutRNGstate();
-}
-
-void dy(int *p, int *nob, double *y, int *x, double *c, double *mu, double *beta, double *s, double *tau, double *sigma)
-{
-   for(int i=0;i<(*nob);i++) {
-      d1y(*p,y+i*(*p),x+i*(*p),c,mu,beta,s,*(tau+i),*(sigma+i));
-   }
-}
diff --git a/src/breg_rcpp.cpp b/src/breg_rcpp.cpp
new file mode 100644
index 0000000..780c2a5
--- /dev/null
+++ b/src/breg_rcpp.cpp
@@ -0,0 +1,23 @@
+#include "bayesm.h"
+
+//[[Rcpp::export]]
+vec breg(vec const& y, mat const& X, vec const& betabar, mat const& A) {
+
+// Keunwoo Kim 06/20/2014
+
+// Purpose: draw from posterior for linear regression, sigmasq=1.0
+
+// Output: draw from posterior
+ 
+// Model: y = Xbeta + e  e ~ N(0,I)
+
+// Prior:  beta ~ N(betabar,A^-1)
+
+  int k = betabar.size();
+  mat RA = chol(A);
+  mat W = join_cols(X, RA); //same as rbind(X,RA)
+  vec z = join_cols(y, RA*betabar);
+  mat IR = solve(trimatu(chol(trans(W)*W)), eye(k,k)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+  
+  return ((IR*trans(IR))*(trans(W)*z) + IR*vec(rnorm(k)));
+} 
diff --git a/src/cgetC_rcpp.cpp b/src/cgetC_rcpp.cpp
new file mode 100644
index 0000000..2fa6f1c
--- /dev/null
+++ b/src/cgetC_rcpp.cpp
@@ -0,0 +1,47 @@
+#include "bayesm.h"
+ 
+// [[Rcpp::export]]
+vec cgetC(double e, int k){
+
+//Wayne Taylor 4/29/15
+
+//purpose: get a list of cutoffs for use with scale usage problems
+
+//arguments:
+//   e: the "e" parameter from the paper
+//   k: the point scale, eg. items are rated from 1,2,...k
+// output:
+//   vector of grid points
+
+  vec temp = zeros<vec>(k-1);
+  for(int i = 0; i<(k-1); i++) temp[i] = i + 1.5;
+  double m1 = sum(temp);
+  temp = pow(temp,2);
+  double m2 = sum(temp);
+    
+  vec c = zeros<vec>(k+1);
+  
+  //first sum to get s's, this is a waste since it should be done
+  //once but I don't want to see this things anywhere else and it should take no time
+  double s0 = k-1;
+  double s1=0.0,s2=0.0,s3=0.0,s4=0.0;
+  for(int i=1;i<k;i++) {s1+=i; s2+=i*i; s3+= i*i*i; s4+=i*i*i*i;}
+
+  // now make quadratic for b (just as in Peter's code)
+  double aq = s0*s2-s1*s1;
+  double bq = 2*e*s0*s3-2*e*s1*s2;
+  double cq = m1*m1 - m2*s0 + e*e*s0*s4 - e*e*s2*s2;
+
+  //get a and b
+  double det = bq*bq - 4*aq*cq;
+  if(det<0) stop("no solution for c's given e and m1, m2 \n");
+  double b=(-bq+sqrt(det))/(2.0*aq);
+  double a=(m1-b*s1-e*s2)/s0;
+
+  //make c
+  c[0]= -1000.0;
+  c[k]= 1000.0;
+  for(int i=1;i<k;i++) c[i] = a+b*i+e*i*i;
+   
+  return(sort(c));
+}
diff --git a/src/clusterMix_rcpp_loop.cpp b/src/clusterMix_rcpp_loop.cpp
new file mode 100644
index 0000000..fb04345
--- /dev/null
+++ b/src/clusterMix_rcpp_loop.cpp
@@ -0,0 +1,145 @@
+#include "bayesm.h"
+ 
+//EXTRA FUNCTIONS SPECIFIC TO THE MAIN FUNCTION--------------------------------------------
+mat ztoSim(vec const& z){
+
+// function to convert indicator vector to Similarity matrix
+// Sim is n x n matrix, Sim[i,j]=1 if pair(i,j) are in same group
+// z is n x 1 vector of indicators (1,...,p)
+
+  int n = z.size();
+  vec onevec = ones<vec>(n); // equivalent to zvec=c(rep(z,n)) in R
+  vec zvec = kron(onevec, z);
+  vec zcomp = kron(z, onevec);// equivalent to as.numeric((zvec==zcomp)) in R
+  mat Sim = zeros<mat>(n*n,1);
+  
+  for (int i=0; i<n*n; i++){
+    if (zvec[i]==zcomp[i]) Sim(i,0) = 1;
+  }
+  
+  Sim.reshape(n,n);
+  
+  return (Sim);
+}
+
+vec Simtoz(mat const& Sim){
+
+// function to convert Similarity matrix to indicator vector
+// Sim is n x n matrix, Sim[i,j]=1 if pair(i,j) are in same group
+// z is vector of indicators from (1,...,p) of group memberships (dim n)
+
+  int count, i, j;
+  int n = Sim.n_cols;
+  vec z = zeros<vec>(n);  
+  int groupn = 1;
+  
+  for (i=0; i<n; i++){
+    count = 0;
+    for (j=0; j<n; j++){    
+      if ((z[j]==0) & (Sim(j,i)==1)){
+        z[j] = groupn;
+        count = count + 1;
+      }      
+    }
+    if (count>0){
+      groupn = groupn + 1;
+    }
+  }
+  
+  return (z);
+} 
+
+//MAIN FUNCTION---------------------------------------------------------------------------------------
+// [[Rcpp::export]]
+List clusterMix_rcpp_loop(mat const& zdraw, double cutoff, bool SILENT, int nprint){
+
+// Keunwoo Kim 10/06/2014
+
+// Purpose: 
+//    cluster observations based on draws of indicators of
+//    normal mixture components
+
+// Arguments:
+//    zdraw is a R x nobs matrix of draws of indicators (typically output from rnmixGibbs)
+//    the rth row of zdraw contains rth draw of indicators for each observations
+//    each element of zdraw takes on up to p values for up to p groups. The maximum
+//    number of groups is nobs.  Typically, however, the number of groups will be small
+//    and equal to the number of components used in the normal mixture fit.
+
+//    cutoff is a cutoff used in determining one clustering scheme it must be 
+//    a number between .5 and 1.
+
+//    nprint - print every nprint'th draw
+
+// Output: 
+//    two clustering schemes each with a vector of length nobs which gives the assignment
+//    of each observation to a cluster
+
+//    clustera (finds zdraw with similarity matrix closest to posterior mean of similarity)
+//    clusterb (finds clustering scheme by assigning ones if posterior mean of similarity matrix cutoff and computing associated z )
+ 
+  int rep, i;
+  uword index; // type uword means unsigned integer. Necessary for finding the index of min.    
+  int nobs = zdraw.n_cols;    
+  char buf[32];
+  
+  // compute posterior mean of Similarity matrix
+  if (!SILENT){
+    Rcout << "Computing Posterior Expectation of Similarity Matrix\n";
+    Rcout << "processing draws ...\n";
+  }
+  
+  mat Pmean = zeros<mat>(nobs, nobs);
+  int R = zdraw.n_rows;
+  
+  for (rep=0; rep<R; rep++){
+    Pmean = Pmean + ztoSim(trans(zdraw(rep,span::all)));
+    if (!SILENT){
+      if ((rep+1)%nprint==0){        
+        sprintf(buf, "  %d\n", rep+1);
+        Rcout <<  buf;
+      }
+    }
+  }
+  
+  Pmean = Pmean/R;
+  
+  // now find index for draw which minimizes discrepancy between
+  // post exp of similarity and sim implied by that z
+  if (!SILENT){
+    Rcout << " \n";
+    Rcout << "Look for zdraw which minimizes loss \n";
+    Rcout << "processing draws ... \n";
+  }
+  
+  vec loss = zeros<vec>(R);
+  
+  for (rep=0; rep<R; rep++){
+    loss[rep] = accu(abs(Pmean-ztoSim(trans(zdraw(rep,span::all)))));
+    if (!SILENT){
+      if ((rep+1)%nprint==0){
+        sprintf(buf, "  %d\n", rep+1);
+        Rcout <<  buf;
+      }
+    }
+  }
+  
+  loss.min(index);  
+  vec clustera = trans(zdraw(index,span::all));
+  
+  // now due clustering by assigning Similarity to any (i,j) pair for which
+  // Pmean > cutoff
+  vec Pmeanvec = vectorise(Pmean);
+  mat Sim = zeros<mat>(nobs*nobs,1);
+  
+  for (i=0; i<nobs*nobs; i++){
+    if (Pmeanvec[i]>=cutoff) Sim(i,0) = 1;      
+  }
+  
+  Sim.reshape(nobs,nobs);  
+  vec clusterb = Simtoz(Sim);
+
+  return List::create(
+      Named("clustera") = clustera,
+      Named("clusterb") = clusterb);
+}
diff --git a/src/functionTiming.cpp b/src/functionTiming.cpp
new file mode 100644
index 0000000..8ee9912
--- /dev/null
+++ b/src/functionTiming.cpp
@@ -0,0 +1,30 @@
+#include "bayesm.h"
+ 
+//The functions below are used to print the output from MCMC draws for many of the bayesm functions
+
+time_t itime;
+char buf[100];
+
+void startMcmcTimer() {
+    itime = time(NULL);
+    Rcout << " MCMC Iteration (est time to end - min) \n";
+}
+
+void infoMcmcTimer(int rep, int R) {
+    time_t ctime = time(NULL);    
+    char buf[32];
+    
+    double timetoend = difftime(ctime, itime) / 60.0 * (R - rep - 1) / (rep+1);
+    sprintf(buf, " %d (%.1f)\n", rep+1, timetoend);
+    Rcout <<  buf;
+}
+
+void endMcmcTimer() {
+    time_t ctime = time(NULL);
+    char buf[32];
+
+    sprintf(buf, " Total Time Elapsed: %.2f \n", difftime(ctime, itime) / 60.0);     
+    Rcout << buf;
+
+    itime = 0;
+}
diff --git a/src/ghkvec_rcpp.cpp b/src/ghkvec_rcpp.cpp
new file mode 100644
index 0000000..af1f06c
--- /dev/null
+++ b/src/ghkvec_rcpp.cpp
@@ -0,0 +1,196 @@
+#include "bayesm.h"
+ 
+//SUPPORT FUNCTIONS SPECIFIC TO MAIN FUNCTION--------------------------------------------------------------------------------------
+vec HaltonSeq(int pn, int r, int burnin, bool rand){
+
+// Keunwoo Kim 10/28/2014
+
+// Purpose: 
+//    create a random Halton sequence
+
+// Arguments:
+//    pn: prime number
+//    r: number of draws
+//    burnin: number of initial burn
+//    rand: if TRUE, add a random scalor to sequence
+
+// Output: 
+//    a vector of Halton sequence, size r
+ 
+  int t;
+  vec add;
+  
+  // start at 0
+  vec seq = zeros<vec>(r+burnin+1);
+  // how many numbers I have drawn so far.
+  // I have 1 draw (0) now.
+  int index = 1;
+  // if done==1, it is done.
+  int done = 0;
+  int factor = pn;
+  do{		
+		for (t=0; t<pn-1; t++){
+      if (done==0){
+        add = seq(span(0, index-1)) + ones<vec>(index)*(t+1)/factor;
+        if ((t+2)*index-1>r+burnin){
+          seq(span((t+1)*index, r+burnin)) = add(span(0, r+burnin-(t+1)*index));
+          done = 1;
+        }else{
+          seq(span((t+1)*index, (t+2)*index-1)) = add;
+          if ((t+2)*index==r+burnin+1){
+            done = 1;
+          }
+        }
+      }
+		}		
+		factor = factor*pn;
+    index = index*pn;
+	}while (done==0);
+
+  // exclude the first 0 and some initial draws
+  seq = seq(span(burnin+1,burnin+r));	
+  
+  if (rand==TRUE){
+    // make it random
+    seq = seq + runif(1)[0];
+    for (int i=0; i<r; i++){
+      if (seq[i]>=1) seq[i] = seq[i]-1;
+    }
+  }
+	return (seq);
+}
+
+bool IsPrime(int number){
+
+// Keunwoo Kim 5/14/2015
+
+// This function is to check whether a number is prime or not.
+// This is used for setting default prime numbers.
+
+  for (int f=2; f<number; f++){
+    if (number%f==0 && f!=number){
+      return false;
+    }
+  }
+  return true;
+}
+
+double ghk_oneside(vec const& L, vec const& trunpt, vec const& above, int r, bool HALTON, vec const& pn, int burnin){
+//
+// routine to implement ghk with a region defined by truncation only on one-side
+// if above=1, then we truncate component i from above at point trunpt[i-1]
+// L is lower triangular root of Sigma
+// random vector is assumed to have zero mean
+// n is number of draws to use in GHK	
+//
+  int i, j, k;
+  double mu, tpz, u, prod, pa, pb, arg;
+    
+  int dim = trunpt.size();
+  vec z = zeros<vec>(dim);  
+  double res = 0;
+  
+  // choose R::runif vs. Halton draws
+  vec udraw(r*dim);
+  mat udrawHalton(dim, r);
+  if (HALTON){
+    for (j=0; j<dim; j++){
+      udrawHalton(j, span::all) = trans(HaltonSeq(pn[j], r, burnin, TRUE));
+    }
+    udraw = vectorise(udrawHalton);
+  }else{
+    for (i=0; i<r*dim; i++){
+      udraw[i] = runif(1)[0];
+    }    
+  }
+  
+  // main integration
+  for (i=0; i<r; i++){
+    prod = 1.0;
+    for (j=0; j<dim; j++){
+      mu = 0.0; 
+      for (k=0; k<j; k++){
+        mu = mu + L[k*dim+j] * z[k];
+      }
+      tpz = (trunpt[j]-mu) / L[j*dim+j];
+      if (above[j]>0){
+        pa = 0.0;
+        pb = R::pnorm(tpz,0,1,1,0);
+      }else{
+        pb = 1.0;
+        pa = R::pnorm(tpz,0,1,1,0);
+      }
+      prod = prod * (pb-pa);      
+      u = udraw[i*dim+j];
+      arg = u*pb + (1.0-u)*pa;
+      if (arg > .999999999) arg=.999999999;
+      if (arg < .0000000001) arg=.0000000001;
+      z[j] = R::qnorm(arg,0,1,1,0);
+    }
+    res = res + prod;
+  }
+  res = res / r;  
+  return (res);  
+}
+
+//MAIN FUNCTION---------------------------------------------------------------------------------------
+// [[Rcpp::export]]
+vec ghkvec(mat const& L, vec const& trunpt, vec const& above, int r, bool HALTON=true, 
+                vec pn=IntegerVector::create(0)){
+
+// Keunwoo Kim 5/14/2015
+
+// Purpose: 
+//      routine to call ghk_oneside for n different truncation points stacked in to the
+//      vector trunpt -- puts n results in vector res
+
+// Arguments:
+//      L: lower Cholesky root of cov. matrix
+//      trunpt: vector of truncation points
+//      above: truncation above(1) or below(0)
+//      r: number of draws
+//      HALTON: TRUE or FALSE. If FALSE, use R::runif random number generator.
+//      pn: prime number used in Halton sequence.
+//      burnin: number of initial burnin of draws. Only applied when HALTON is TRUE. (not used any more)
+
+// Output: 
+//      a vector of integration values
+
+
+  int dim = above.size();
+  int n = trunpt.size()/dim;
+
+  //
+  // handling default arguments
+  //
+  // generate prime numbers
+  if (HALTON==true && pn[0]==0){
+    Rcout << "Halton sequence is generated by the smallest prime numbers: \n";
+    Rcout << "   ";
+    pn = zeros<vec>(dim);
+    
+    int cand = 2;
+    int which = 0;
+    while (pn[dim-1]==0){      
+      if (IsPrime(cand)){
+        pn[which] = cand;
+        which = which + 1;        
+        Rprintf("%d ", cand);
+      }
+      cand = cand + 1;
+    }
+    Rcout << "\n";
+  }
+  // burn-in
+  //if (HALTON==true && burnin==NA_INTEGER){    
+  //  burnin = max(pn);
+  //  Rprintf("Initial %d (= max of prime numbers) draws are burned. \n", burnin);
+  //}
+  int burnin = 0;
+  
+  vec res(n);
+  for (int i=0; i<n; i++){    
+    res[i] = ghk_oneside(vectorise(L), trunpt(span(i*dim, (i+1)*dim-1)), above, r, HALTON, pn, burnin);
+  }  
+  return (res);
+}
diff --git a/src/llmnl_rcpp.cpp b/src/llmnl_rcpp.cpp
new file mode 100644
index 0000000..aa263a8
--- /dev/null
+++ b/src/llmnl_rcpp.cpp
@@ -0,0 +1,23 @@
+#include "bayesm.h"
+ 
+//[[Rcpp::export]]
+double llmnl(vec const& beta, vec const& y, mat const& X){
+  
+// Wayne Taylor 9/7/2014
+
+// Evaluates log-likelihood for the multinomial logit model
+
+  int n = y.size();
+  int j = X.n_rows/n;
+  mat Xbeta = X*beta;
+      
+  vec xby = zeros<vec>(n);
+  vec denom = zeros<vec>(n);
+  
+  for(int i = 0; i<n;i++){      
+    for(int p=0;p<j;p++) denom[i]=denom[i]+exp(Xbeta[i*j+p]);
+    xby[i] = Xbeta[i*j+y[i]-1];
+  }
+  
+  return(sum(xby - log(denom)));
+}
diff --git a/src/lndIChisq_rcpp.cpp b/src/lndIChisq_rcpp.cpp
new file mode 100644
index 0000000..3470666
--- /dev/null
+++ b/src/lndIChisq_rcpp.cpp
@@ -0,0 +1,12 @@
+#include "bayesm.h"
+ 
+// [[Rcpp::export]]
+mat lndIChisq(double nu, double ssq, mat const& X) {
+
+// Keunwoo Kim 07/24/2014
+
+// Purpose: evaluate log-density of scaled Inverse Chi-sq density of random variable Z=nu*ssq/chisq(nu)
+   
+  return(-lgamma(nu/2)+(nu/2)*log((nu*ssq)/2)-((nu/2)+1)*log(X)-(nu*ssq)/(2*X));
+}
+
diff --git a/src/lndIWishart_rcpp.cpp b/src/lndIWishart_rcpp.cpp
new file mode 100644
index 0000000..f396245
--- /dev/null
+++ b/src/lndIWishart_rcpp.cpp
@@ -0,0 +1,36 @@
+#include "bayesm.h"
+ 
+// [[Rcpp::export]]
+double lndIWishart(double nu, mat const& V, mat const& IW){
+
+// Keunwoo Kim 07/24/2014
+
+// Purpose: evaluate log-density of inverted Wishart with normalizing constant
+
+// Arguments: 
+//        nu is d. f. parm
+//        V is location matrix
+//        IW is the value at which the density should be evaluated
+
+// Note: in this parameterization, E[IW]=V/(nu-k-1)
+
+  int k = V.n_cols;
+  mat Uiw = chol(IW);
+  mat Uiwi = solve(trimatu(Uiw), eye(k,k)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+  mat IWi = Uiwi*trans(Uiwi);
+  mat cholV = chol(V);
+  double lndetVd2 = sum(log(cholV.diag()));
+  double lndetIWd2 = sum(log(Uiw.diag()));
+  
+  // first evaluate constant
+  double cnst = ((nu*k)/2)*log(2.0)+((k*(k-1))/4.0)*log(M_PI); // (k*(k-1))/4 is recognized as integer. "4.0" allows it to be recognized as a double.
+  vec seq_1_k = cumsum(ones<vec>(k)); // build c(1:k) through cumsum function
+  vec arg = (nu+1-seq_1_k)/2.0;
+  
+  // lgamma cannot receive arma::vec input. Compute cnst+sum(lgamma(arg)).
+  for (int i=0; i<k; i++){
+    cnst = cnst+lgamma(arg[i]);
+  }
+  
+  return (-cnst+nu*lndetVd2-(nu+k+1)*lndetIWd2-.5*sum((V*IWi.diag())));
+}
diff --git a/src/lndMvn_rcpp.cpp b/src/lndMvn_rcpp.cpp
new file mode 100644
index 0000000..2df8c34
--- /dev/null
+++ b/src/lndMvn_rcpp.cpp
@@ -0,0 +1,18 @@
+#include "bayesm.h"
+ 
+// [[Rcpp::export]]
+double lndMvn(vec const& x, vec const& mu, mat const& rooti){
+
+//Wayne Taylor 9/7/2014
+
+// function to evaluate log of MV Normal density with  mean mu, var Sigma
+// Sigma=t(root)%*%root   (root is upper tri cholesky root)
+// Sigma^-1=rooti%*%t(rooti)   
+// rooti is in the inverse of upper triangular chol root of sigma
+//          note: this is the UL decomp of sigmai not LU!
+//                Sigma=root'root   root=inv(rooti)
+
+  vec z = vectorise(trans(rooti)*(x-mu));
+  
+  return((-(x.size()/2.0)*log(2*M_PI) -.5*(trans(z)*z) + sum(log(diagvec(rooti))))[0]);
+}
diff --git a/src/lndMvst_rcpp.cpp b/src/lndMvst_rcpp.cpp
new file mode 100644
index 0000000..b444d04
--- /dev/null
+++ b/src/lndMvst_rcpp.cpp
@@ -0,0 +1,25 @@
+#include "bayesm.h"
+ 
+// [[Rcpp::export]]
+double lndMvst(vec const& x, int nu, vec const& mu, mat const& rooti, bool NORMC = false){
+
+// Wayne Taylor 9/7/2014
+
+// function to evaluate log of MVstudent t density with nu df, mean mu,
+// and with sigmai=rooti%*%t(rooti)   note: this is the UL decomp of sigmai not LU!
+// rooti is in the inverse of upper triangular chol root of sigma
+// or Sigma=root'root   root=inv(rooti)
+
+  int dim = x.size();
+  double constant;
+  
+  if(NORMC){
+    constant = (nu/2.0)*log((double)nu)+lgamma((nu+dim)/2.0)-(dim/2.0)*log(M_PI)-lgamma(nu/2.0); //"2.0"" is used versus "2" so that the division is not truncated as an "int"
+  } else {
+    constant = 0.0;
+  }
+  
+  vec z = vectorise(trans(rooti)*(x-mu));
+  
+  return((constant-((dim+nu)/2.0)*log(nu+trans(z)*z)+sum(log(diagvec(rooti))))[0]);
+}
diff --git a/src/rDPGibbs_rcpp_loop.cpp b/src/rDPGibbs_rcpp_loop.cpp
new file mode 100644
index 0000000..a9a9003
--- /dev/null
+++ b/src/rDPGibbs_rcpp_loop.cpp
@@ -0,0 +1,168 @@
+#include "bayesm.h"
+ 
+//[[Rcpp::export]]
+List rDPGibbs_rcpp_loop(int R, int keep, int nprint,
+                        mat y, List const& lambda_hyper, bool SCALE, int maxuniq, List const& PrioralphaList, int gridsize,
+                        double BayesmConstantA, int BayesmConstantnuInc, double BayesmConstantDPalpha) {
+
+// Wayne Taylor 2/4/2015
+
+  int dimy = y.n_cols;
+  int n = y.n_rows;
+  
+  //initialize indic, thetaStar, thetaNp1
+  ivec indic = ones<ivec>(n);
+  
+  std::vector<murooti> thetaStar_vector(1);
+  murooti thetaStar0_struct;
+    thetaStar0_struct.mu = zeros<vec>(dimy);
+    thetaStar0_struct.rooti = eye(dimy,dimy);
+  thetaStar_vector[0] = thetaStar0_struct;
+
+  //convert Prioralpha from List to struct
+  priorAlpha priorAlpha_struct;
+    priorAlpha_struct.power = PrioralphaList["power"];
+    priorAlpha_struct.alphamin = PrioralphaList["alphamin"];
+    priorAlpha_struct.alphamax = PrioralphaList["alphamax"];
+    priorAlpha_struct.n = PrioralphaList["n"];
+
+ //initialize lambda
+  lambda lambda_struct;
+    lambda_struct.mubar = zeros<vec>(dimy);
+    lambda_struct.Amu = BayesmConstantA;
+    lambda_struct.nu = dimy+BayesmConstantnuInc;
+    lambda_struct.V = lambda_struct.nu*eye(dimy,dimy);
+
+  //initialize alpha
+  double alpha = BayesmConstantDPalpha;
+  
+  //intialize remaining variables
+  thetaStarIndex thetaStarDrawOut_struct;
+  std::vector<murooti> new_utheta_vector(1), thetaNp1_vector(1);
+  murooti thetaNp10_struct, out_struct;
+  mat ydenmat;
+  vec q0v, probs;
+  uvec ind;
+  int nunique, indsize;
+  uvec spanall(dimy); for(int i = 0; i<dimy ; i++) spanall[i] = i; //creates a uvec of [0,1,...,dimy-1]
+  double nu;
+
+  //allocate storage
+  vec alphadraw = zeros<vec>(R/keep);
+  vec Istardraw = zeros<vec>(R/keep);
+  vec adraw = zeros<vec>(R/keep);
+  vec nudraw = zeros<vec>(R/keep);
+  vec vdraw = zeros<vec>(R/keep);
+  List thetaNp1draw(R/keep);
+  imat inddraw = zeros<imat>(R/keep,n);
+
+  //do scaling
+  rowvec dvec, ybar;
+  if(SCALE){
+    dvec = 1/sqrt(var(y,0,0)); //norm_type=0 performs normalisation using N-1, dim=0 is by column
+    ybar = mean(y,0);
+    y.each_row() -= ybar; //subtract ybar from each row
+    y.each_row() %= dvec; //divide each row by dvec
+  } 
+  //note on scaling
+  //we model scaled y, z_i=D(y_i-ybar)   D=diag(1/sigma1, ..., 1/sigma_dimy)
+  
+  //if p_z= 1/R sum(phi(z|mu,Sigma))
+  // p_y=1/R sum(phi(y|D^-1mu+ybar,D^-1SigmaD^-1)
+  // rooti_y=Drooti_z
+  
+  //you might want to use quantiles instead, like median and (10,90)
+
+  // start main iteration loop
+  int mkeep = 0;
+  
+  if(nprint>0) startMcmcTimer();
+
+  for(int rep = 0; rep<R; rep++) {
+    
+    q0v = q0(y,lambda_struct);
+   
+    nunique = thetaStar_vector.size();
+  
+    if(nunique > maxuniq) stop("maximum number of unique thetas exceeded");
+   
+    //ydenmat is a length(thetaStar) x n array of density values given f(y[j,] | thetaStar[[i]]
+    //  note: due to remix step (below) we must recompute ydenmat each time!
+    ydenmat = zeros<mat>(maxuniq,n);
+                     
+    ydenmat(span(0,nunique-1),span::all) = yden(thetaStar_vector,y);
+  
+    thetaStarDrawOut_struct = thetaStarDraw(indic, thetaStar_vector, y, ydenmat, q0v, alpha, lambda_struct, maxuniq);
+    thetaStar_vector = thetaStarDrawOut_struct.thetaStar_vector;
+    indic = thetaStarDrawOut_struct.indic;
+    nunique = thetaStar_vector.size();
+  
+    //thetaNp1 and remix
+    probs = zeros<vec>(nunique+1);
+    for(int j = 0; j < nunique; j++){
+      ind = find(indic == (j+1));
+      indsize = ind.size();
+      probs[j] = indsize/(alpha + n + 0.0);
+      new_utheta_vector[0] = thetaD(y(ind,spanall),lambda_struct);
+      thetaStar_vector[j] = new_utheta_vector[0];
+    }
+                  
+    probs[nunique] = alpha/(alpha+n+0.0);
+    int ind = rmultinomF(probs);
+    int probssize = probs.size();
+    if(ind == probssize) {
+      out_struct = GD(lambda_struct);
+      thetaNp10_struct.mu = out_struct.mu;
+      thetaNp10_struct.rooti = out_struct.rooti;
+      thetaNp1_vector[0] = thetaNp10_struct;
+    } else {
+      out_struct = thetaStar_vector[ind-1];
+      thetaNp10_struct.mu = out_struct.mu;
+      thetaNp10_struct.rooti = out_struct.rooti;
+      thetaNp1_vector[0] = thetaNp10_struct;
+    }
+  
+    //draw alpha
+    alpha = alphaD(priorAlpha_struct,nunique,gridsize);
+  
+    //draw lambda
+    lambda_struct = lambdaD(lambda_struct,thetaStar_vector,lambda_hyper["alim"],lambda_hyper["nulim"],lambda_hyper["vlim"],gridsize);
+  
+    //print time to completion
+    if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+   
+    //save every keepth draw
+    if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      alphadraw[mkeep-1] = alpha;
+      Istardraw[mkeep-1] = nunique;
+      adraw[mkeep-1] = lambda_struct.Amu;
+      nu = lambda_struct.nu;
+      nudraw[mkeep-1] = nu;
+      mat V = lambda_struct.V;
+      vdraw[mkeep-1] = V(0,0)/(nu+0.0);
+      inddraw(mkeep-1,span::all) = trans(indic);
+      
+      thetaNp10_struct = thetaNp1_vector[0];
+      if(SCALE){
+        thetaNp10_struct.mu = thetaNp10_struct.mu/trans(dvec)+trans(ybar);
+        thetaNp10_struct.rooti = diagmat(dvec)*thetaNp10_struct.rooti;
+      }
+
+      //here we put the draws into the list of lists of list format useful for finite mixture of normals utilities
+      //we have to convetr to a NumericVector for the plotting functions to work
+      thetaNp1draw[mkeep-1] = List::create(List::create(Named("mu") = NumericVector(thetaNp10_struct.mu.begin(),thetaNp10_struct.mu.end()),Named("rooti") = thetaNp10_struct.rooti));
+     }  
+  }
+
+  if(nprint>0) endMcmcTimer();
+  
+  return List::create(
+    Named("inddraw") = inddraw,
+    Named("thetaNp1draw") = thetaNp1draw,
+    Named("alphadraw") = alphadraw,
+    Named("Istardraw") = Istardraw,
+    Named("adraw") = adraw,
+    Named("nudraw") = nudraw,
+    Named("vdraw") = vdraw);
+}
diff --git a/src/rbprobitGibbs_rcpp_loop.cpp b/src/rbprobitGibbs_rcpp_loop.cpp
new file mode 100644
index 0000000..580624b
--- /dev/null
+++ b/src/rbprobitGibbs_rcpp_loop.cpp
@@ -0,0 +1,55 @@
+#include "bayesm.h"
+ 
+// [[Rcpp::export]]
+List rbprobitGibbs_rcpp_loop(vec const& y, mat const& X, vec const& Abetabar, mat const& root, 
+                        vec beta, vec const& sigma, vec const& a, vec const& b, int R, int keep, int nprint){
+
+// Keunwoo Kim 09/09/2014
+
+// Purpose: draw from posterior for binary probit using Gibbs Sampler
+
+// Arguments:
+//  X is nobs x nvar, y is nobs vector of 0,1
+//  A is nvar x nvar prior preci matrix
+//  betabar is nvar x 1 prior mean
+//  R is number of draws
+//  keep is thinning parameter
+//  nprint - prints the estimated time remaining for every nprint'th draw
+
+// Output: list of betadraws
+ 
+// Model: y = 1 if  w=Xbeta+e>0  e~N(0,1)
+
+// Prior: beta ~ N(betabar,A^-1)
+ 
+  int mkeep;
+  vec mu;
+  vec z;
+
+  int nvar = X.n_cols;
+  
+  mat betadraw(R/keep, nvar);
+  
+  if (nprint>0) startMcmcTimer();
+  
+  //start main iteration loop
+  for (int rep=0; rep<R; rep++){
+    
+    // draw z given beta(i-1)
+    mu = X*beta;
+    z = rtrunVec(mu, sigma, a, b);
+    beta = breg1(root, X, z, Abetabar);
+
+    // print time to completion and draw # every nprint'th draw
+    if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+    
+    if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      betadraw(mkeep-1, span::all) = trans(beta);      
+    }
+  }
+  
+  if (nprint>0) endMcmcTimer();
+  
+  return List::create(Named("betadraw") = betadraw);
+}
diff --git a/src/rcppexports.cpp b/src/rcppexports.cpp
new file mode 100644
index 0000000..06d6776
--- /dev/null
+++ b/src/rcppexports.cpp
@@ -0,0 +1,758 @@
+// This file was generated by Rcpp::compileAttributes
+// Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
+
+#include "../inst/include/bayesm.h"
+#include <RcppArmadillo.h>
+#include <Rcpp.h>
+
+using namespace Rcpp;
+
+// bayesBLP_rcpp_loop
+List bayesBLP_rcpp_loop(bool IV, mat const& X, mat const& Z, vec const& share, int J, int T, mat const& v, int R, vec const& sigmasqR, mat const& A, vec const& theta_hat, vec const& deltabar, mat const& Ad, int nu0, double s0_sq, mat const& VOmega, double ssq, mat const& cand_cov, vec const& theta_bar_initial, vec const& r_initial, double tau_sq_initial, mat const& Omega_initial, vec const& delta_initial, double tol, int keep, int nprint);
+RcppExport SEXP bayesm_bayesBLP_rcpp_loop(SEXP IVSEXP, SEXP XSEXP, SEXP ZSEXP, SEXP shareSEXP, SEXP JSEXP, SEXP TSEXP, SEXP vSEXP, SEXP RSEXP, SEXP sigmasqRSEXP, SEXP ASEXP, SEXP theta_hatSEXP, SEXP deltabarSEXP, SEXP AdSEXP, SEXP nu0SEXP, SEXP s0_sqSEXP, SEXP VOmegaSEXP, SEXP ssqSEXP, SEXP cand_covSEXP, SEXP theta_bar_initialSEXP, SEXP r_initialSEXP, SEXP tau_sq_initialSEXP, SEXP Omega_initialSEXP, SEXP delta_initialSEXP, SEXP tolSEXP, SEXP keepSEXP, SEXP nprintSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< bool >::type IV(IVSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type X(XSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Z(ZSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type share(shareSEXP);
+    Rcpp::traits::input_parameter< int >::type J(JSEXP);
+    Rcpp::traits::input_parameter< int >::type T(TSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type v(vSEXP);
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type sigmasqR(sigmasqRSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type A(ASEXP);
+    Rcpp::traits::input_parameter< vec const& >::type theta_hat(theta_hatSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type deltabar(deltabarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Ad(AdSEXP);
+    Rcpp::traits::input_parameter< int >::type nu0(nu0SEXP);
+    Rcpp::traits::input_parameter< double >::type s0_sq(s0_sqSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type VOmega(VOmegaSEXP);
+    Rcpp::traits::input_parameter< double >::type ssq(ssqSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type cand_cov(cand_covSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type theta_bar_initial(theta_bar_initialSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type r_initial(r_initialSEXP);
+    Rcpp::traits::input_parameter< double >::type tau_sq_initial(tau_sq_initialSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Omega_initial(Omega_initialSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type delta_initial(delta_initialSEXP);
+    Rcpp::traits::input_parameter< double >::type tol(tolSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    __result = Rcpp::wrap(bayesBLP_rcpp_loop(IV, X, Z, share, J, T, v, R, sigmasqR, A, theta_hat, deltabar, Ad, nu0, s0_sq, VOmega, ssq, cand_cov, theta_bar_initial, r_initial, tau_sq_initial, Omega_initial, delta_initial, tol, keep, nprint));
+    return __result;
+END_RCPP
+}
+// breg
+vec breg(vec const& y, mat const& X, vec const& betabar, mat const& A);
+RcppExport SEXP bayesm_breg(SEXP ySEXP, SEXP XSEXP, SEXP betabarSEXP, SEXP ASEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< vec const& >::type y(ySEXP);
+    Rcpp::traits::input_parameter< mat const& >::type X(XSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type betabar(betabarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type A(ASEXP);
+    __result = Rcpp::wrap(breg(y, X, betabar, A));
+    return __result;
+END_RCPP
+}
+// cgetC
+vec cgetC(double e, int k);
+RcppExport SEXP bayesm_cgetC(SEXP eSEXP, SEXP kSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< double >::type e(eSEXP);
+    Rcpp::traits::input_parameter< int >::type k(kSEXP);
+    __result = Rcpp::wrap(cgetC(e, k));
+    return __result;
+END_RCPP
+}
+// clusterMix_rcpp_loop
+List clusterMix_rcpp_loop(mat const& zdraw, double cutoff, bool SILENT, int nprint);
+RcppExport SEXP bayesm_clusterMix_rcpp_loop(SEXP zdrawSEXP, SEXP cutoffSEXP, SEXP SILENTSEXP, SEXP nprintSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< mat const& >::type zdraw(zdrawSEXP);
+    Rcpp::traits::input_parameter< double >::type cutoff(cutoffSEXP);
+    Rcpp::traits::input_parameter< bool >::type SILENT(SILENTSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    __result = Rcpp::wrap(clusterMix_rcpp_loop(zdraw, cutoff, SILENT, nprint));
+    return __result;
+END_RCPP
+}
+// ghkvec
+vec ghkvec(mat const& L, vec const& trunpt, vec const& above, int r, bool HALTON, vec pn);
+RcppExport SEXP bayesm_ghkvec(SEXP LSEXP, SEXP trunptSEXP, SEXP aboveSEXP, SEXP rSEXP, SEXP HALTONSEXP, SEXP pnSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< mat const& >::type L(LSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type trunpt(trunptSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type above(aboveSEXP);
+    Rcpp::traits::input_parameter< int >::type r(rSEXP);
+    Rcpp::traits::input_parameter< bool >::type HALTON(HALTONSEXP);
+    Rcpp::traits::input_parameter< vec >::type pn(pnSEXP);
+    __result = Rcpp::wrap(ghkvec(L, trunpt, above, r, HALTON, pn));
+    return __result;
+END_RCPP
+}
+// llmnl
+double llmnl(vec const& beta, vec const& y, mat const& X);
+RcppExport SEXP bayesm_llmnl(SEXP betaSEXP, SEXP ySEXP, SEXP XSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< vec const& >::type beta(betaSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type y(ySEXP);
+    Rcpp::traits::input_parameter< mat const& >::type X(XSEXP);
+    __result = Rcpp::wrap(llmnl(beta, y, X));
+    return __result;
+END_RCPP
+}
+// lndIChisq
+mat lndIChisq(double nu, double ssq, mat const& X);
+RcppExport SEXP bayesm_lndIChisq(SEXP nuSEXP, SEXP ssqSEXP, SEXP XSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< double >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< double >::type ssq(ssqSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type X(XSEXP);
+    __result = Rcpp::wrap(lndIChisq(nu, ssq, X));
+    return __result;
+END_RCPP
+}
+// lndIWishart
+double lndIWishart(double nu, mat const& V, mat const& IW);
+RcppExport SEXP bayesm_lndIWishart(SEXP nuSEXP, SEXP VSEXP, SEXP IWSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< double >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type V(VSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type IW(IWSEXP);
+    __result = Rcpp::wrap(lndIWishart(nu, V, IW));
+    return __result;
+END_RCPP
+}
+// lndMvn
+double lndMvn(vec const& x, vec const& mu, mat const& rooti);
+RcppExport SEXP bayesm_lndMvn(SEXP xSEXP, SEXP muSEXP, SEXP rootiSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< vec const& >::type x(xSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type mu(muSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type rooti(rootiSEXP);
+    __result = Rcpp::wrap(lndMvn(x, mu, rooti));
+    return __result;
+END_RCPP
+}
+// lndMvst
+double lndMvst(vec const& x, int nu, vec const& mu, mat const& rooti, bool NORMC);
+RcppExport SEXP bayesm_lndMvst(SEXP xSEXP, SEXP nuSEXP, SEXP muSEXP, SEXP rootiSEXP, SEXP NORMCSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< vec const& >::type x(xSEXP);
+    Rcpp::traits::input_parameter< int >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type mu(muSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type rooti(rootiSEXP);
+    Rcpp::traits::input_parameter< bool >::type NORMC(NORMCSEXP);
+    __result = Rcpp::wrap(lndMvst(x, nu, mu, rooti, NORMC));
+    return __result;
+END_RCPP
+}
+// rbprobitGibbs_rcpp_loop
+List rbprobitGibbs_rcpp_loop(vec const& y, mat const& X, vec const& Abetabar, mat const& root, vec beta, vec const& sigma, vec const& a, vec const& b, int R, int keep, int nprint);
+RcppExport SEXP bayesm_rbprobitGibbs_rcpp_loop(SEXP ySEXP, SEXP XSEXP, SEXP AbetabarSEXP, SEXP rootSEXP, SEXP betaSEXP, SEXP sigmaSEXP, SEXP aSEXP, SEXP bSEXP, SEXP RSEXP, SEXP keepSEXP, SEXP nprintSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< vec const& >::type y(ySEXP);
+    Rcpp::traits::input_parameter< mat const& >::type X(XSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type Abetabar(AbetabarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type root(rootSEXP);
+    Rcpp::traits::input_parameter< vec >::type beta(betaSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type sigma(sigmaSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type a(aSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type b(bSEXP);
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    __result = Rcpp::wrap(rbprobitGibbs_rcpp_loop(y, X, Abetabar, root, beta, sigma, a, b, R, keep, nprint));
+    return __result;
+END_RCPP
+}
+// rdirichlet
+vec rdirichlet(vec const& alpha);
+RcppExport SEXP bayesm_rdirichlet(SEXP alphaSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< vec const& >::type alpha(alphaSEXP);
+    __result = Rcpp::wrap(rdirichlet(alpha));
+    return __result;
+END_RCPP
+}
+// rDPGibbs_rcpp_loop
+List rDPGibbs_rcpp_loop(int R, int keep, int nprint, mat y, List const& lambda_hyper, bool SCALE, int maxuniq, List const& PrioralphaList, int gridsize, double BayesmConstantA, int BayesmConstantnuInc, double BayesmConstantDPalpha);
+RcppExport SEXP bayesm_rDPGibbs_rcpp_loop(SEXP RSEXP, SEXP keepSEXP, SEXP nprintSEXP, SEXP ySEXP, SEXP lambda_hyperSEXP, SEXP SCALESEXP, SEXP maxuniqSEXP, SEXP PrioralphaListSEXP, SEXP gridsizeSEXP, SEXP BayesmConstantASEXP, SEXP BayesmConstantnuIncSEXP, SEXP BayesmConstantDPalphaSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    Rcpp::traits::input_parameter< mat >::type y(ySEXP);
+    Rcpp::traits::input_parameter< List const& >::type lambda_hyper(lambda_hyperSEXP);
+    Rcpp::traits::input_parameter< bool >::type SCALE(SCALESEXP);
+    Rcpp::traits::input_parameter< int >::type maxuniq(maxuniqSEXP);
+    Rcpp::traits::input_parameter< List const& >::type PrioralphaList(PrioralphaListSEXP);
+    Rcpp::traits::input_parameter< int >::type gridsize(gridsizeSEXP);
+    Rcpp::traits::input_parameter< double >::type BayesmConstantA(BayesmConstantASEXP);
+    Rcpp::traits::input_parameter< int >::type BayesmConstantnuInc(BayesmConstantnuIncSEXP);
+    Rcpp::traits::input_parameter< double >::type BayesmConstantDPalpha(BayesmConstantDPalphaSEXP);
+    __result = Rcpp::wrap(rDPGibbs_rcpp_loop(R, keep, nprint, y, lambda_hyper, SCALE, maxuniq, PrioralphaList, gridsize, BayesmConstantA, BayesmConstantnuInc, BayesmConstantDPalpha));
+    return __result;
+END_RCPP
+}
+// rhierLinearMixture_rcpp_loop
+List rhierLinearMixture_rcpp_loop(List const& regdata, mat const& Z, vec const& deltabar, mat const& Ad, mat const& mubar, mat const& Amu, int const& nu, mat const& V, int nu_e, vec const& ssq, int R, int keep, int nprint, bool drawdelta, mat olddelta, vec const& a, vec oldprob, vec ind, vec tau);
+RcppExport SEXP bayesm_rhierLinearMixture_rcpp_loop(SEXP regdataSEXP, SEXP ZSEXP, SEXP deltabarSEXP, SEXP AdSEXP, SEXP mubarSEXP, SEXP AmuSEXP, SEXP nuSEXP, SEXP VSEXP, SEXP nu_eSEXP, SEXP ssqSEXP, SEXP RSEXP, SEXP keepSEXP, SEXP nprintSEXP, SEXP drawdeltaSEXP, SEXP olddeltaSEXP, SEXP aSEXP, SEXP oldprobSEXP, SEXP indSEXP, SEXP tauSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< List const& >::type regdata(regdataSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Z(ZSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type deltabar(deltabarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Ad(AdSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type mubar(mubarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Amu(AmuSEXP);
+    Rcpp::traits::input_parameter< int const& >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type V(VSEXP);
+    Rcpp::traits::input_parameter< int >::type nu_e(nu_eSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type ssq(ssqSEXP);
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    Rcpp::traits::input_parameter< bool >::type drawdelta(drawdeltaSEXP);
+    Rcpp::traits::input_parameter< mat >::type olddelta(olddeltaSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type a(aSEXP);
+    Rcpp::traits::input_parameter< vec >::type oldprob(oldprobSEXP);
+    Rcpp::traits::input_parameter< vec >::type ind(indSEXP);
+    Rcpp::traits::input_parameter< vec >::type tau(tauSEXP);
+    __result = Rcpp::wrap(rhierLinearMixture_rcpp_loop(regdata, Z, deltabar, Ad, mubar, Amu, nu, V, nu_e, ssq, R, keep, nprint, drawdelta, olddelta, a, oldprob, ind, tau));
+    return __result;
+END_RCPP
+}
+// rhierLinearModel_rcpp_loop
+List rhierLinearModel_rcpp_loop(List const& regdata, mat const& Z, mat const& Deltabar, mat const& A, int nu, mat const& V, int nu_e, vec const& ssq, vec tau, mat Delta, mat Vbeta, int R, int keep, int nprint);
+RcppExport SEXP bayesm_rhierLinearModel_rcpp_loop(SEXP regdataSEXP, SEXP ZSEXP, SEXP DeltabarSEXP, SEXP ASEXP, SEXP nuSEXP, SEXP VSEXP, SEXP nu_eSEXP, SEXP ssqSEXP, SEXP tauSEXP, SEXP DeltaSEXP, SEXP VbetaSEXP, SEXP RSEXP, SEXP keepSEXP, SEXP nprintSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< List const& >::type regdata(regdataSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Z(ZSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Deltabar(DeltabarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type A(ASEXP);
+    Rcpp::traits::input_parameter< int >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type V(VSEXP);
+    Rcpp::traits::input_parameter< int >::type nu_e(nu_eSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type ssq(ssqSEXP);
+    Rcpp::traits::input_parameter< vec >::type tau(tauSEXP);
+    Rcpp::traits::input_parameter< mat >::type Delta(DeltaSEXP);
+    Rcpp::traits::input_parameter< mat >::type Vbeta(VbetaSEXP);
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    __result = Rcpp::wrap(rhierLinearModel_rcpp_loop(regdata, Z, Deltabar, A, nu, V, nu_e, ssq, tau, Delta, Vbeta, R, keep, nprint));
+    return __result;
+END_RCPP
+}
+// rhierMnlDP_rcpp_loop
+List rhierMnlDP_rcpp_loop(int R, int keep, int nprint, List const& lgtdata, mat const& Z, vec const& deltabar, mat const& Ad, List const& PrioralphaList, List const& lambda_hyper, bool drawdelta, int nvar, mat oldbetas, double s, int maxuniq, int gridsize, double BayesmConstantA, int BayesmConstantnuInc, double BayesmConstantDPalpha);
+RcppExport SEXP bayesm_rhierMnlDP_rcpp_loop(SEXP RSEXP, SEXP keepSEXP, SEXP nprintSEXP, SEXP lgtdataSEXP, SEXP ZSEXP, SEXP deltabarSEXP, SEXP AdSEXP, SEXP PrioralphaListSEXP, SEXP lambda_hyperSEXP, SEXP drawdeltaSEXP, SEXP nvarSEXP, SEXP oldbetasSEXP, SEXP sSEXP, SEXP maxuniqSEXP, SEXP gridsizeSEXP, SEXP BayesmConstantASEXP, SEXP BayesmConstantnuIncSEXP, SEXP BayesmConstantDPalphaSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    Rcpp::traits::input_parameter< List const& >::type lgtdata(lgtdataSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Z(ZSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type deltabar(deltabarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Ad(AdSEXP);
+    Rcpp::traits::input_parameter< List const& >::type PrioralphaList(PrioralphaListSEXP);
+    Rcpp::traits::input_parameter< List const& >::type lambda_hyper(lambda_hyperSEXP);
+    Rcpp::traits::input_parameter< bool >::type drawdelta(drawdeltaSEXP);
+    Rcpp::traits::input_parameter< int >::type nvar(nvarSEXP);
+    Rcpp::traits::input_parameter< mat >::type oldbetas(oldbetasSEXP);
+    Rcpp::traits::input_parameter< double >::type s(sSEXP);
+    Rcpp::traits::input_parameter< int >::type maxuniq(maxuniqSEXP);
+    Rcpp::traits::input_parameter< int >::type gridsize(gridsizeSEXP);
+    Rcpp::traits::input_parameter< double >::type BayesmConstantA(BayesmConstantASEXP);
+    Rcpp::traits::input_parameter< int >::type BayesmConstantnuInc(BayesmConstantnuIncSEXP);
+    Rcpp::traits::input_parameter< double >::type BayesmConstantDPalpha(BayesmConstantDPalphaSEXP);
+    __result = Rcpp::wrap(rhierMnlDP_rcpp_loop(R, keep, nprint, lgtdata, Z, deltabar, Ad, PrioralphaList, lambda_hyper, drawdelta, nvar, oldbetas, s, maxuniq, gridsize, BayesmConstantA, BayesmConstantnuInc, BayesmConstantDPalpha));
+    return __result;
+END_RCPP
+}
+// rhierMnlRwMixture_rcpp_loop
+List rhierMnlRwMixture_rcpp_loop(List const& lgtdata, mat const& Z, vec const& deltabar, mat const& Ad, mat const& mubar, mat const& Amu, int const& nu, mat const& V, double s, int R, int keep, int nprint, bool drawdelta, mat olddelta, vec const& a, vec oldprob, mat oldbetas, vec ind);
+RcppExport SEXP bayesm_rhierMnlRwMixture_rcpp_loop(SEXP lgtdataSEXP, SEXP ZSEXP, SEXP deltabarSEXP, SEXP AdSEXP, SEXP mubarSEXP, SEXP AmuSEXP, SEXP nuSEXP, SEXP VSEXP, SEXP sSEXP, SEXP RSEXP, SEXP keepSEXP, SEXP nprintSEXP, SEXP drawdeltaSEXP, SEXP olddeltaSEXP, SEXP aSEXP, SEXP oldprobSEXP, SEXP oldbetasSEXP, SEXP indSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< List const& >::type lgtdata(lgtdataSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Z(ZSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type deltabar(deltabarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Ad(AdSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type mubar(mubarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Amu(AmuSEXP);
+    Rcpp::traits::input_parameter< int const& >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type V(VSEXP);
+    Rcpp::traits::input_parameter< double >::type s(sSEXP);
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    Rcpp::traits::input_parameter< bool >::type drawdelta(drawdeltaSEXP);
+    Rcpp::traits::input_parameter< mat >::type olddelta(olddeltaSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type a(aSEXP);
+    Rcpp::traits::input_parameter< vec >::type oldprob(oldprobSEXP);
+    Rcpp::traits::input_parameter< mat >::type oldbetas(oldbetasSEXP);
+    Rcpp::traits::input_parameter< vec >::type ind(indSEXP);
+    __result = Rcpp::wrap(rhierMnlRwMixture_rcpp_loop(lgtdata, Z, deltabar, Ad, mubar, Amu, nu, V, s, R, keep, nprint, drawdelta, olddelta, a, oldprob, oldbetas, ind));
+    return __result;
+END_RCPP
+}
+// rhierNegbinRw_rcpp_loop
+List rhierNegbinRw_rcpp_loop(List const& regdata, List const& hessdata, mat const& Z, mat Beta, mat Delta, mat const& Deltabar, mat const& Adelta, int nu, mat const& V, double a, double b, int R, int keep, double sbeta, double alphacroot, int nprint, mat rootA, double alpha, bool fixalpha);
+RcppExport SEXP bayesm_rhierNegbinRw_rcpp_loop(SEXP regdataSEXP, SEXP hessdataSEXP, SEXP ZSEXP, SEXP BetaSEXP, SEXP DeltaSEXP, SEXP DeltabarSEXP, SEXP AdeltaSEXP, SEXP nuSEXP, SEXP VSEXP, SEXP aSEXP, SEXP bSEXP, SEXP RSEXP, SEXP keepSEXP, SEXP sbetaSEXP, SEXP alphacrootSEXP, SEXP nprintSEXP, SEXP rootASEXP, SEXP alphaSEXP, SEXP fixalphaSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< List const& >::type regdata(regdataSEXP);
+    Rcpp::traits::input_parameter< List const& >::type hessdata(hessdataSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Z(ZSEXP);
+    Rcpp::traits::input_parameter< mat >::type Beta(BetaSEXP);
+    Rcpp::traits::input_parameter< mat >::type Delta(DeltaSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Deltabar(DeltabarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Adelta(AdeltaSEXP);
+    Rcpp::traits::input_parameter< int >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type V(VSEXP);
+    Rcpp::traits::input_parameter< double >::type a(aSEXP);
+    Rcpp::traits::input_parameter< double >::type b(bSEXP);
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< double >::type sbeta(sbetaSEXP);
+    Rcpp::traits::input_parameter< double >::type alphacroot(alphacrootSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    Rcpp::traits::input_parameter< mat >::type rootA(rootASEXP);
+    Rcpp::traits::input_parameter< double >::type alpha(alphaSEXP);
+    Rcpp::traits::input_parameter< bool >::type fixalpha(fixalphaSEXP);
+    __result = Rcpp::wrap(rhierNegbinRw_rcpp_loop(regdata, hessdata, Z, Beta, Delta, Deltabar, Adelta, nu, V, a, b, R, keep, sbeta, alphacroot, nprint, rootA, alpha, fixalpha));
+    return __result;
+END_RCPP
+}
+// rivDP_rcpp_loop
+List rivDP_rcpp_loop(int R, int keep, int nprint, int dimd, vec const& mbg, mat const& Abg, vec const& md, mat const& Ad, vec const& y, bool isgamma, mat const& z, vec const& x, mat const& w, vec delta, List const& PrioralphaList, int gridsize, bool SCALE, int maxuniq, double scalex, double scaley, List const& lambda_hyper, double BayesmConstantA, int BayesmConstantnu);
+RcppExport SEXP bayesm_rivDP_rcpp_loop(SEXP RSEXP, SEXP keepSEXP, SEXP nprintSEXP, SEXP dimdSEXP, SEXP mbgSEXP, SEXP AbgSEXP, SEXP mdSEXP, SEXP AdSEXP, SEXP ySEXP, SEXP isgammaSEXP, SEXP zSEXP, SEXP xSEXP, SEXP wSEXP, SEXP deltaSEXP, SEXP PrioralphaListSEXP, SEXP gridsizeSEXP, SEXP SCALESEXP, SEXP maxuniqSEXP, SEXP scalexSEXP, SEXP scaleySEXP, SEXP lambda_hyperSEXP, SEXP BayesmConstantASEXP, SEXP BayesmConstantnuSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    Rcpp::traits::input_parameter< int >::type dimd(dimdSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type mbg(mbgSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Abg(AbgSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type md(mdSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Ad(AdSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type y(ySEXP);
+    Rcpp::traits::input_parameter< bool >::type isgamma(isgammaSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type z(zSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type x(xSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type w(wSEXP);
+    Rcpp::traits::input_parameter< vec >::type delta(deltaSEXP);
+    Rcpp::traits::input_parameter< List const& >::type PrioralphaList(PrioralphaListSEXP);
+    Rcpp::traits::input_parameter< int >::type gridsize(gridsizeSEXP);
+    Rcpp::traits::input_parameter< bool >::type SCALE(SCALESEXP);
+    Rcpp::traits::input_parameter< int >::type maxuniq(maxuniqSEXP);
+    Rcpp::traits::input_parameter< double >::type scalex(scalexSEXP);
+    Rcpp::traits::input_parameter< double >::type scaley(scaleySEXP);
+    Rcpp::traits::input_parameter< List const& >::type lambda_hyper(lambda_hyperSEXP);
+    Rcpp::traits::input_parameter< double >::type BayesmConstantA(BayesmConstantASEXP);
+    Rcpp::traits::input_parameter< int >::type BayesmConstantnu(BayesmConstantnuSEXP);
+    __result = Rcpp::wrap(rivDP_rcpp_loop(R, keep, nprint, dimd, mbg, Abg, md, Ad, y, isgamma, z, x, w, delta, PrioralphaList, gridsize, SCALE, maxuniq, scalex, scaley, lambda_hyper, BayesmConstantA, BayesmConstantnu));
+    return __result;
+END_RCPP
+}
+// rivGibbs_rcpp_loop
+List rivGibbs_rcpp_loop(vec const& y, vec const& x, mat const& z, mat const& w, vec const& mbg, mat const& Abg, vec const& md, mat const& Ad, mat const& V, int nu, int R, int keep, int nprint);
+RcppExport SEXP bayesm_rivGibbs_rcpp_loop(SEXP ySEXP, SEXP xSEXP, SEXP zSEXP, SEXP wSEXP, SEXP mbgSEXP, SEXP AbgSEXP, SEXP mdSEXP, SEXP AdSEXP, SEXP VSEXP, SEXP nuSEXP, SEXP RSEXP, SEXP keepSEXP, SEXP nprintSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< vec const& >::type y(ySEXP);
+    Rcpp::traits::input_parameter< vec const& >::type x(xSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type z(zSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type w(wSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type mbg(mbgSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Abg(AbgSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type md(mdSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Ad(AdSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type V(VSEXP);
+    Rcpp::traits::input_parameter< int >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    __result = Rcpp::wrap(rivGibbs_rcpp_loop(y, x, z, w, mbg, Abg, md, Ad, V, nu, R, keep, nprint));
+    return __result;
+END_RCPP
+}
+// rmixGibbs
+List rmixGibbs(mat const& y, mat const& Bbar, mat const& A, int nu, mat const& V, vec const& a, vec const& p, vec const& z);
+RcppExport SEXP bayesm_rmixGibbs(SEXP ySEXP, SEXP BbarSEXP, SEXP ASEXP, SEXP nuSEXP, SEXP VSEXP, SEXP aSEXP, SEXP pSEXP, SEXP zSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< mat const& >::type y(ySEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Bbar(BbarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type A(ASEXP);
+    Rcpp::traits::input_parameter< int >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type V(VSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type a(aSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type p(pSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type z(zSEXP);
+    __result = Rcpp::wrap(rmixGibbs(y, Bbar, A, nu, V, a, p, z));
+    return __result;
+END_RCPP
+}
+// rmixture
+List rmixture(int n, vec pvec, List comps);
+RcppExport SEXP bayesm_rmixture(SEXP nSEXP, SEXP pvecSEXP, SEXP compsSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< int >::type n(nSEXP);
+    Rcpp::traits::input_parameter< vec >::type pvec(pvecSEXP);
+    Rcpp::traits::input_parameter< List >::type comps(compsSEXP);
+    __result = Rcpp::wrap(rmixture(n, pvec, comps));
+    return __result;
+END_RCPP
+}
+// rmnlIndepMetrop_rcpp_loop
+List rmnlIndepMetrop_rcpp_loop(int R, int keep, int nu, vec const& betastar, mat const& root, vec const& y, mat const& X, vec const& betabar, mat const& rootpi, mat const& rooti, double oldlimp, double oldlpost, int nprint);
+RcppExport SEXP bayesm_rmnlIndepMetrop_rcpp_loop(SEXP RSEXP, SEXP keepSEXP, SEXP nuSEXP, SEXP betastarSEXP, SEXP rootSEXP, SEXP ySEXP, SEXP XSEXP, SEXP betabarSEXP, SEXP rootpiSEXP, SEXP rootiSEXP, SEXP oldlimpSEXP, SEXP oldlpostSEXP, SEXP nprintSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type betastar(betastarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type root(rootSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type y(ySEXP);
+    Rcpp::traits::input_parameter< mat const& >::type X(XSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type betabar(betabarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type rootpi(rootpiSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type rooti(rootiSEXP);
+    Rcpp::traits::input_parameter< double >::type oldlimp(oldlimpSEXP);
+    Rcpp::traits::input_parameter< double >::type oldlpost(oldlpostSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    __result = Rcpp::wrap(rmnlIndepMetrop_rcpp_loop(R, keep, nu, betastar, root, y, X, betabar, rootpi, rooti, oldlimp, oldlpost, nprint));
+    return __result;
+END_RCPP
+}
+// rmnpGibbs_rcpp_loop
+List rmnpGibbs_rcpp_loop(int R, int keep, int nprint, int pm1, ivec const& y, mat const& X, vec const& beta0, mat const& sigma0, mat const& V, int nu, vec const& betabar, mat const& A);
+RcppExport SEXP bayesm_rmnpGibbs_rcpp_loop(SEXP RSEXP, SEXP keepSEXP, SEXP nprintSEXP, SEXP pm1SEXP, SEXP ySEXP, SEXP XSEXP, SEXP beta0SEXP, SEXP sigma0SEXP, SEXP VSEXP, SEXP nuSEXP, SEXP betabarSEXP, SEXP ASEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    Rcpp::traits::input_parameter< int >::type pm1(pm1SEXP);
+    Rcpp::traits::input_parameter< ivec const& >::type y(ySEXP);
+    Rcpp::traits::input_parameter< mat const& >::type X(XSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type beta0(beta0SEXP);
+    Rcpp::traits::input_parameter< mat const& >::type sigma0(sigma0SEXP);
+    Rcpp::traits::input_parameter< mat const& >::type V(VSEXP);
+    Rcpp::traits::input_parameter< int >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type betabar(betabarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type A(ASEXP);
+    __result = Rcpp::wrap(rmnpGibbs_rcpp_loop(R, keep, nprint, pm1, y, X, beta0, sigma0, V, nu, betabar, A));
+    return __result;
+END_RCPP
+}
+// rmultireg
+List rmultireg(mat const& Y, mat const& X, mat const& Bbar, mat const& A, int nu, mat const& V);
+RcppExport SEXP bayesm_rmultireg(SEXP YSEXP, SEXP XSEXP, SEXP BbarSEXP, SEXP ASEXP, SEXP nuSEXP, SEXP VSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< mat const& >::type Y(YSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type X(XSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Bbar(BbarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type A(ASEXP);
+    Rcpp::traits::input_parameter< int >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type V(VSEXP);
+    __result = Rcpp::wrap(rmultireg(Y, X, Bbar, A, nu, V));
+    return __result;
+END_RCPP
+}
+// rmvpGibbs_rcpp_loop
+List rmvpGibbs_rcpp_loop(int R, int keep, int nprint, int p, ivec const& y, mat const& X, vec const& beta0, mat const& sigma0, mat const& V, int nu, vec const& betabar, mat const& A);
+RcppExport SEXP bayesm_rmvpGibbs_rcpp_loop(SEXP RSEXP, SEXP keepSEXP, SEXP nprintSEXP, SEXP pSEXP, SEXP ySEXP, SEXP XSEXP, SEXP beta0SEXP, SEXP sigma0SEXP, SEXP VSEXP, SEXP nuSEXP, SEXP betabarSEXP, SEXP ASEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    Rcpp::traits::input_parameter< int >::type p(pSEXP);
+    Rcpp::traits::input_parameter< ivec const& >::type y(ySEXP);
+    Rcpp::traits::input_parameter< mat const& >::type X(XSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type beta0(beta0SEXP);
+    Rcpp::traits::input_parameter< mat const& >::type sigma0(sigma0SEXP);
+    Rcpp::traits::input_parameter< mat const& >::type V(VSEXP);
+    Rcpp::traits::input_parameter< int >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type betabar(betabarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type A(ASEXP);
+    __result = Rcpp::wrap(rmvpGibbs_rcpp_loop(R, keep, nprint, p, y, X, beta0, sigma0, V, nu, betabar, A));
+    return __result;
+END_RCPP
+}
+// rmvst
+vec rmvst(int nu, vec const& mu, mat const& root);
+RcppExport SEXP bayesm_rmvst(SEXP nuSEXP, SEXP muSEXP, SEXP rootSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< int >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type mu(muSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type root(rootSEXP);
+    __result = Rcpp::wrap(rmvst(nu, mu, root));
+    return __result;
+END_RCPP
+}
+// rnegbinRw_rcpp_loop
+List rnegbinRw_rcpp_loop(vec const& y, mat const& X, vec const& betabar, mat const& rootA, double a, double b, vec beta, double alpha, bool fixalpha, mat const& betaroot, double const& alphacroot, int R, int keep, int nprint);
+RcppExport SEXP bayesm_rnegbinRw_rcpp_loop(SEXP ySEXP, SEXP XSEXP, SEXP betabarSEXP, SEXP rootASEXP, SEXP aSEXP, SEXP bSEXP, SEXP betaSEXP, SEXP alphaSEXP, SEXP fixalphaSEXP, SEXP betarootSEXP, SEXP alphacrootSEXP, SEXP RSEXP, SEXP keepSEXP, SEXP nprintSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< vec const& >::type y(ySEXP);
+    Rcpp::traits::input_parameter< mat const& >::type X(XSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type betabar(betabarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type rootA(rootASEXP);
+    Rcpp::traits::input_parameter< double >::type a(aSEXP);
+    Rcpp::traits::input_parameter< double >::type b(bSEXP);
+    Rcpp::traits::input_parameter< vec >::type beta(betaSEXP);
+    Rcpp::traits::input_parameter< double >::type alpha(alphaSEXP);
+    Rcpp::traits::input_parameter< bool >::type fixalpha(fixalphaSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type betaroot(betarootSEXP);
+    Rcpp::traits::input_parameter< double const& >::type alphacroot(alphacrootSEXP);
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    __result = Rcpp::wrap(rnegbinRw_rcpp_loop(y, X, betabar, rootA, a, b, beta, alpha, fixalpha, betaroot, alphacroot, R, keep, nprint));
+    return __result;
+END_RCPP
+}
+// rnmixGibbs_rcpp_loop
+List rnmixGibbs_rcpp_loop(mat const& y, mat const& Mubar, mat const& A, int nu, mat const& V, vec const& a, vec p, vec z, int const& R, int const& keep, int const& nprint);
+RcppExport SEXP bayesm_rnmixGibbs_rcpp_loop(SEXP ySEXP, SEXP MubarSEXP, SEXP ASEXP, SEXP nuSEXP, SEXP VSEXP, SEXP aSEXP, SEXP pSEXP, SEXP zSEXP, SEXP RSEXP, SEXP keepSEXP, SEXP nprintSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< mat const& >::type y(ySEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Mubar(MubarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type A(ASEXP);
+    Rcpp::traits::input_parameter< int >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type V(VSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type a(aSEXP);
+    Rcpp::traits::input_parameter< vec >::type p(pSEXP);
+    Rcpp::traits::input_parameter< vec >::type z(zSEXP);
+    Rcpp::traits::input_parameter< int const& >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int const& >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int const& >::type nprint(nprintSEXP);
+    __result = Rcpp::wrap(rnmixGibbs_rcpp_loop(y, Mubar, A, nu, V, a, p, z, R, keep, nprint));
+    return __result;
+END_RCPP
+}
+// rordprobitGibbs_rcpp_loop
+List rordprobitGibbs_rcpp_loop(vec const& y, mat const& X, int k, mat const& A, vec const& betabar, mat const& Ad, double s, mat const& inc_root, vec const& dstarbar, vec const& betahat, int R, int keep, int nprint);
+RcppExport SEXP bayesm_rordprobitGibbs_rcpp_loop(SEXP ySEXP, SEXP XSEXP, SEXP kSEXP, SEXP ASEXP, SEXP betabarSEXP, SEXP AdSEXP, SEXP sSEXP, SEXP inc_rootSEXP, SEXP dstarbarSEXP, SEXP betahatSEXP, SEXP RSEXP, SEXP keepSEXP, SEXP nprintSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< vec const& >::type y(ySEXP);
+    Rcpp::traits::input_parameter< mat const& >::type X(XSEXP);
+    Rcpp::traits::input_parameter< int >::type k(kSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type A(ASEXP);
+    Rcpp::traits::input_parameter< vec const& >::type betabar(betabarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Ad(AdSEXP);
+    Rcpp::traits::input_parameter< double >::type s(sSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type inc_root(inc_rootSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type dstarbar(dstarbarSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type betahat(betahatSEXP);
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    __result = Rcpp::wrap(rordprobitGibbs_rcpp_loop(y, X, k, A, betabar, Ad, s, inc_root, dstarbar, betahat, R, keep, nprint));
+    return __result;
+END_RCPP
+}
+// rscaleUsage_rcpp_loop
+List rscaleUsage_rcpp_loop(int k, mat const& x, int p, int n, int R, int keep, int ndghk, int nprint, mat y, vec mu, mat Sigma, vec tau, vec sigma, mat Lambda, double e, bool domu, bool doSigma, bool dosigma, bool dotau, bool doLambda, bool doe, int nu, mat const& V, mat const& mubar, mat const& Am, vec const& gsigma, vec const& gl11, vec const& gl22, vec const& gl12, int nuL, mat const& VL, vec const& ge);
+RcppExport SEXP bayesm_rscaleUsage_rcpp_loop(SEXP kSEXP, SEXP xSEXP, SEXP pSEXP, SEXP nSEXP, SEXP RSEXP, SEXP keepSEXP, SEXP ndghkSEXP, SEXP nprintSEXP, SEXP ySEXP, SEXP muSEXP, SEXP SigmaSEXP, SEXP tauSEXP, SEXP sigmaSEXP, SEXP LambdaSEXP, SEXP eSEXP, SEXP domuSEXP, SEXP doSigmaSEXP, SEXP dosigmaSEXP, SEXP dotauSEXP, SEXP doLambdaSEXP, SEXP doeSEXP, SEXP nuSEXP, SEXP VSEXP, SEXP mubarSEXP, SEXP AmSEXP, SEXP gsigmaSEXP, SEXP gl11SEXP, SEXP gl22SEXP, SEXP gl12SEXP, SEXP nuLSEXP, SEXP VLSE [...]
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< int >::type k(kSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type x(xSEXP);
+    Rcpp::traits::input_parameter< int >::type p(pSEXP);
+    Rcpp::traits::input_parameter< int >::type n(nSEXP);
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type ndghk(ndghkSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    Rcpp::traits::input_parameter< mat >::type y(ySEXP);
+    Rcpp::traits::input_parameter< vec >::type mu(muSEXP);
+    Rcpp::traits::input_parameter< mat >::type Sigma(SigmaSEXP);
+    Rcpp::traits::input_parameter< vec >::type tau(tauSEXP);
+    Rcpp::traits::input_parameter< vec >::type sigma(sigmaSEXP);
+    Rcpp::traits::input_parameter< mat >::type Lambda(LambdaSEXP);
+    Rcpp::traits::input_parameter< double >::type e(eSEXP);
+    Rcpp::traits::input_parameter< bool >::type domu(domuSEXP);
+    Rcpp::traits::input_parameter< bool >::type doSigma(doSigmaSEXP);
+    Rcpp::traits::input_parameter< bool >::type dosigma(dosigmaSEXP);
+    Rcpp::traits::input_parameter< bool >::type dotau(dotauSEXP);
+    Rcpp::traits::input_parameter< bool >::type doLambda(doLambdaSEXP);
+    Rcpp::traits::input_parameter< bool >::type doe(doeSEXP);
+    Rcpp::traits::input_parameter< int >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type V(VSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type mubar(mubarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Am(AmSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type gsigma(gsigmaSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type gl11(gl11SEXP);
+    Rcpp::traits::input_parameter< vec const& >::type gl22(gl22SEXP);
+    Rcpp::traits::input_parameter< vec const& >::type gl12(gl12SEXP);
+    Rcpp::traits::input_parameter< int >::type nuL(nuLSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type VL(VLSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type ge(geSEXP);
+    __result = Rcpp::wrap(rscaleUsage_rcpp_loop(k, x, p, n, R, keep, ndghk, nprint, y, mu, Sigma, tau, sigma, Lambda, e, domu, doSigma, dosigma, dotau, doLambda, doe, nu, V, mubar, Am, gsigma, gl11, gl22, gl12, nuL, VL, ge));
+    return __result;
+END_RCPP
+}
+// rsurGibbs_rcpp_loop
+List rsurGibbs_rcpp_loop(List const& regdata, vec const& indreg, vec const& cumnk, vec const& nk, mat const& XspXs, mat Sigmainv, mat const& A, vec const& Abetabar, int nu, mat const& V, int nvar, mat E, mat const& Y, int R, int keep, int nprint);
+RcppExport SEXP bayesm_rsurGibbs_rcpp_loop(SEXP regdataSEXP, SEXP indregSEXP, SEXP cumnkSEXP, SEXP nkSEXP, SEXP XspXsSEXP, SEXP SigmainvSEXP, SEXP ASEXP, SEXP AbetabarSEXP, SEXP nuSEXP, SEXP VSEXP, SEXP nvarSEXP, SEXP ESEXP, SEXP YSEXP, SEXP RSEXP, SEXP keepSEXP, SEXP nprintSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< List const& >::type regdata(regdataSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type indreg(indregSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type cumnk(cumnkSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type nk(nkSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type XspXs(XspXsSEXP);
+    Rcpp::traits::input_parameter< mat >::type Sigmainv(SigmainvSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type A(ASEXP);
+    Rcpp::traits::input_parameter< vec const& >::type Abetabar(AbetabarSEXP);
+    Rcpp::traits::input_parameter< int >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type V(VSEXP);
+    Rcpp::traits::input_parameter< int >::type nvar(nvarSEXP);
+    Rcpp::traits::input_parameter< mat >::type E(ESEXP);
+    Rcpp::traits::input_parameter< mat const& >::type Y(YSEXP);
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    __result = Rcpp::wrap(rsurGibbs_rcpp_loop(regdata, indreg, cumnk, nk, XspXs, Sigmainv, A, Abetabar, nu, V, nvar, E, Y, R, keep, nprint));
+    return __result;
+END_RCPP
+}
+// rtrun
+NumericVector rtrun(NumericVector const& mu, NumericVector const& sigma, NumericVector const& a, NumericVector const& b);
+RcppExport SEXP bayesm_rtrun(SEXP muSEXP, SEXP sigmaSEXP, SEXP aSEXP, SEXP bSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< NumericVector const& >::type mu(muSEXP);
+    Rcpp::traits::input_parameter< NumericVector const& >::type sigma(sigmaSEXP);
+    Rcpp::traits::input_parameter< NumericVector const& >::type a(aSEXP);
+    Rcpp::traits::input_parameter< NumericVector const& >::type b(bSEXP);
+    __result = Rcpp::wrap(rtrun(mu, sigma, a, b));
+    return __result;
+END_RCPP
+}
+// runireg_rcpp_loop
+List runireg_rcpp_loop(vec const& y, mat const& X, vec const& betabar, mat const& A, int nu, double ssq, int R, int keep, int nprint);
+RcppExport SEXP bayesm_runireg_rcpp_loop(SEXP ySEXP, SEXP XSEXP, SEXP betabarSEXP, SEXP ASEXP, SEXP nuSEXP, SEXP ssqSEXP, SEXP RSEXP, SEXP keepSEXP, SEXP nprintSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< vec const& >::type y(ySEXP);
+    Rcpp::traits::input_parameter< mat const& >::type X(XSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type betabar(betabarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type A(ASEXP);
+    Rcpp::traits::input_parameter< int >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< double >::type ssq(ssqSEXP);
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    __result = Rcpp::wrap(runireg_rcpp_loop(y, X, betabar, A, nu, ssq, R, keep, nprint));
+    return __result;
+END_RCPP
+}
+// runiregGibbs_rcpp_loop
+List runiregGibbs_rcpp_loop(vec const& y, mat const& X, vec const& betabar, mat const& A, int nu, double ssq, double sigmasq, int R, int keep, int nprint);
+RcppExport SEXP bayesm_runiregGibbs_rcpp_loop(SEXP ySEXP, SEXP XSEXP, SEXP betabarSEXP, SEXP ASEXP, SEXP nuSEXP, SEXP ssqSEXP, SEXP sigmasqSEXP, SEXP RSEXP, SEXP keepSEXP, SEXP nprintSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< vec const& >::type y(ySEXP);
+    Rcpp::traits::input_parameter< mat const& >::type X(XSEXP);
+    Rcpp::traits::input_parameter< vec const& >::type betabar(betabarSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type A(ASEXP);
+    Rcpp::traits::input_parameter< int >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< double >::type ssq(ssqSEXP);
+    Rcpp::traits::input_parameter< double >::type sigmasq(sigmasqSEXP);
+    Rcpp::traits::input_parameter< int >::type R(RSEXP);
+    Rcpp::traits::input_parameter< int >::type keep(keepSEXP);
+    Rcpp::traits::input_parameter< int >::type nprint(nprintSEXP);
+    __result = Rcpp::wrap(runiregGibbs_rcpp_loop(y, X, betabar, A, nu, ssq, sigmasq, R, keep, nprint));
+    return __result;
+END_RCPP
+}
+// rwishart
+List rwishart(int const& nu, mat const& V);
+RcppExport SEXP bayesm_rwishart(SEXP nuSEXP, SEXP VSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< int const& >::type nu(nuSEXP);
+    Rcpp::traits::input_parameter< mat const& >::type V(VSEXP);
+    __result = Rcpp::wrap(rwishart(nu, V));
+    return __result;
+END_RCPP
+}
+// callroot
+vec callroot(vec const& c1, vec const& c2, double tol, int iterlim);
+RcppExport SEXP bayesm_callroot(SEXP c1SEXP, SEXP c2SEXP, SEXP tolSEXP, SEXP iterlimSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject __result;
+    Rcpp::RNGScope __rngScope;
+    Rcpp::traits::input_parameter< vec const& >::type c1(c1SEXP);
+    Rcpp::traits::input_parameter< vec const& >::type c2(c2SEXP);
+    Rcpp::traits::input_parameter< double >::type tol(tolSEXP);
+    Rcpp::traits::input_parameter< int >::type iterlim(iterlimSEXP);
+    __result = Rcpp::wrap(callroot(c1, c2, tol, iterlim));
+    return __result;
+END_RCPP
+}
diff --git a/src/rdirichlet_rcpp.cpp b/src/rdirichlet_rcpp.cpp
new file mode 100644
index 0000000..b06e28a
--- /dev/null
+++ b/src/rdirichlet_rcpp.cpp
@@ -0,0 +1,19 @@
+#include "bayesm.h"
+ 
+// [[Rcpp::export]]
+vec rdirichlet(vec const& alpha){
+  
+// Wayne Taylor 4/7/2015
+
+// Purpose:
+// draw from Dirichlet(alpha)
+
+  int dim = alpha.size();
+  vec y = zeros<vec>(dim);
+  
+  for(int i = 0; i<dim; i++) {    
+      y[i] = rgamma(1,alpha[i])[0]; //rgamma returns a NumericVector, so adding [0] extracts the first element and treats it as type "double"
+    }
+  
+  return(y/sum(y));
+}
diff --git a/src/rhierLinearMixture_rcpp_loop.cpp b/src/rhierLinearMixture_rcpp_loop.cpp
new file mode 100644
index 0000000..4cd2bd0
--- /dev/null
+++ b/src/rhierLinearMixture_rcpp_loop.cpp
@@ -0,0 +1,120 @@
+#include "bayesm.h"
+ 
+//[[Rcpp::export]]
+List rhierLinearMixture_rcpp_loop(List const& regdata, mat const& Z,
+                                  vec const& deltabar, mat const& Ad, mat const& mubar, mat const& Amu,
+                                  int const& nu, mat const& V, int nu_e, vec const& ssq,
+                                  int R, int keep, int nprint, bool drawdelta,
+                                  mat olddelta,  vec const& a, vec oldprob, vec ind, vec tau){
+
+// Wayne Taylor 10/02/2014
+
+  int nreg = regdata.size();
+  int nvar = V.n_cols;
+  int nz = Z.n_cols;
+  
+  mat rootpi, betabar, Abeta, Abetabar;
+  int mkeep;
+  unireg runiregout_struct;
+  List regdatai, nmix;
+  
+  // convert List to std::vector of type "moments"
+  std::vector<moments> regdata_vector;
+  moments regdatai_struct;
+  
+  // store vector with struct
+  for (int reg = 0; reg<nreg; reg++){
+    regdatai = regdata[reg];
+    
+    regdatai_struct.y = as<vec>(regdatai["y"]);
+    regdatai_struct.X = as<mat>(regdatai["X"]);
+    regdatai_struct.XpX = as<mat>(regdatai["XpX"]);
+    regdatai_struct.Xpy = as<vec>(regdatai["Xpy"]);
+    regdata_vector.push_back(regdatai_struct);    
+  }
+  
+  // allocate space for draws
+  mat oldbetas = zeros<mat>(nreg,nvar);
+  mat taudraw(R/keep, nreg);
+  cube betadraw(nreg, nvar, R/keep);
+  mat probdraw(R/keep, oldprob.size());
+  mat Deltadraw(1,1); if(drawdelta) Deltadraw.zeros(R/keep, nz*nvar);//enlarge Deltadraw only if the space is required
+  List compdraw(R/keep);
+  
+  if (nprint>0) startMcmcTimer();
+
+  for (int rep = 0; rep<R; rep++){
+   
+   //first draw comps,ind,p | {beta_i}, delta
+   // ind,p need initialization comps is drawn first in sub-Gibbs
+   List mgout;
+   if(drawdelta) {
+      olddelta.reshape(nvar,nz);
+      mgout = rmixGibbs(oldbetas-Z*trans(olddelta),mubar,Amu,nu,V,a,oldprob,ind);
+    } else {
+      mgout = rmixGibbs(oldbetas,mubar,Amu,nu,V,a,oldprob,ind);
+    }
+   
+   List oldcomp = mgout["comps"];
+   oldprob = as<vec>(mgout["p"]); //conversion from Rcpp to Armadillo requires explict declaration of variable type using as<>
+   ind = as<vec>(mgout["z"]); //conversion from Rcpp to Armadillo requires explict declaration of variable type using as<>
+   
+  //now draw delta | {beta_i}, ind, comps
+   if(drawdelta) olddelta = drawDelta(Z,oldbetas,ind,oldcomp,deltabar,Ad);
+   
+  //loop over all regression equations drawing beta_i | ind[i],z[i,],mu[ind[i]],rooti[ind[i]]
+      for(int reg = 0; reg<nreg; reg++){
+        List oldcompreg = oldcomp[ind[reg]-1];
+        rootpi = as<mat>(oldcompreg[1]);
+        
+        //note: beta_i = Delta*z_i + u_i  Delta is nvar x nz
+        if(drawdelta){
+          olddelta.reshape(nvar,nz);
+          betabar = as<vec>(oldcompreg[0])+olddelta*vectorise(Z(reg,span::all));
+        } else {
+          betabar = as<vec>(oldcompreg[0]);
+        }
+      
+        Abeta = trans(rootpi)*rootpi;
+        Abetabar = Abeta*betabar;
+
+        runiregout_struct = runiregG(regdata_vector[reg].y, regdata_vector[reg].X,
+                                regdata_vector[reg].XpX, regdata_vector[reg].Xpy, 
+                                tau[reg], Abeta, Abetabar, nu_e, ssq[reg]);
+      
+        oldbetas(reg,span::all) = trans(runiregout_struct.beta);
+        tau[reg] = runiregout_struct.sigmasq;
+      }
+      
+  //print time to completion and draw # every nprint'th draw
+  if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+    
+    if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      taudraw(mkeep-1, span::all) = trans(tau);
+      betadraw.slice(mkeep-1) = oldbetas;
+      probdraw(mkeep-1, span::all) = trans(oldprob);
+      if(drawdelta) Deltadraw(mkeep-1, span::all) = trans(vectorise(olddelta));
+      compdraw[mkeep-1] = oldcomp;
+    }
+  }
+  
+  if (nprint>0) endMcmcTimer();
+  
+  nmix = List::create(Named("probdraw") = probdraw,
+  				  Named("zdraw") = R_NilValue, //sets the value to NULL in R
+					  Named("compdraw") = compdraw);
+	
+  if(drawdelta){
+    return(List::create(
+      Named("taudraw") = taudraw,
+      Named("Deltadraw") = Deltadraw,
+      Named("betadraw") = betadraw,
+      Named("nmix") = nmix));
+	} else {
+    return(List::create(
+      Named("taudraw") = taudraw,
+      Named("betadraw") = betadraw,
+      Named("nmix") = nmix));
+  }
+}
diff --git a/src/rhierLinearModel_rcpp_loop.cpp b/src/rhierLinearModel_rcpp_loop.cpp
new file mode 100644
index 0000000..9d9c8e4
--- /dev/null
+++ b/src/rhierLinearModel_rcpp_loop.cpp
@@ -0,0 +1,137 @@
+#include "bayesm.h"
+
+// [[Rcpp::export]]
+List rhierLinearModel_rcpp_loop(List const& regdata, mat const& Z, mat const& Deltabar, mat const& A, int nu, 
+                          mat const& V, int nu_e, vec const& ssq, vec tau, mat Delta, mat Vbeta, int R, int keep, int nprint){
+
+// Keunwoo Kim 09/16/2014
+
+// Purpose: run hiearchical regression model
+
+// Arguments:
+//   Data list of regdata,Z 
+//     regdata is a list of lists each list with members y, X
+//        e.g. regdata[[i]]=list(y=y,X=X)
+//     X has nvar columns
+//     Z is nreg=length(regdata) x nz
+
+//   Prior list of prior hyperparameters
+//     Deltabar,A, nu.e,ssq,nu,V
+//          note: ssq is a nreg x 1 vector!
+
+//   Mcmc
+//     list of Mcmc parameters
+//     R is number of draws
+//     keep is thining parameter -- keep every keepth draw
+//     nprint - print estimated time remaining on every nprint'th draw
+
+// Output: 
+//   list of 
+//   betadraw -- nreg x nvar x R/keep array of individual regression betas
+//   taudraw -- R/keep x nreg  array of error variances for each regression
+//   Deltadraw -- R/keep x nz x nvar array of Delta draws
+//   Vbetadraw -- R/keep x nvar*nvar array of Vbeta draws
+
+// Model:
+// nreg regression equations 
+//        y_i = X_ibeta_i + epsilon_i  
+//        epsilon_i ~ N(0,tau_i)
+//             nvar X vars in each equation
+
+// Prior:
+//        tau_i ~ nu.e*ssq_i/chisq(nu.e)  tau_i is the variance of epsilon_i
+//        beta_i ~ N(ZDelta[i,],V_beta)
+//               Note:  ZDelta is the matrix Z * Delta; [i,] refers to ith row of this product!
+
+//          vec(Delta) | V_beta ~ N(vec(Deltabar),Vbeta (x) A^-1)
+//          V_beta ~ IW(nu,V)  or V_beta^-1 ~ W(nu,V^-1)
+//              Delta, Deltabar are nz x nvar
+//              A is nz x nz
+//              Vbeta is nvar x nvar
+        
+//          NOTE: if you don't have any z vars, set Z=iota (nreg x 1)
+ 
+// Update Note:
+//        (Keunwoo Kim 04/07/2015)
+//        Changed "rmultireg" to return List object, which is the original function.
+//        Efficiency is almost same as when the output is a struct object.
+//        Nothing different from "rmultireg1" in the previous R version.
+
+  int reg, mkeep;
+  mat Abeta, betabar, ucholinv, Abetabar;
+  List regdatai, rmregout;
+  unireg regout_struct;
+  
+  int nreg = regdata.size();
+  int nvar = V.n_cols;
+  int nz = Z.n_cols;
+  
+  // convert List to std::vector of struct
+  std::vector<moments> regdata_vector;
+  moments regdatai_struct;
+  
+  // store vector with struct
+  for (reg=0; reg<nreg; reg++){
+    regdatai = regdata[reg];
+    
+    regdatai_struct.y = as<vec>(regdatai["y"]);
+    regdatai_struct.X = as<mat>(regdatai["X"]);
+    regdatai_struct.XpX = as<mat>(regdatai["XpX"]);
+    regdatai_struct.Xpy = as<vec>(regdatai["Xpy"]);
+    regdata_vector.push_back(regdatai_struct);    
+  } 
+  
+  mat betas(nreg, nvar);
+  mat Vbetadraw(R/keep, nvar*nvar);
+  mat Deltadraw(R/keep, nz*nvar);
+  mat taudraw(R/keep, nreg);
+  cube betadraw(nreg, nvar, R/keep);
+
+  if (nprint>0) startMcmcTimer();
+  
+  //start main iteration loop
+  for (int rep=0; rep<R; rep++){    
+
+    // compute the inverse of Vbeta
+    ucholinv = solve(trimatu(chol(Vbeta)), eye(nvar,nvar)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+    Abeta = ucholinv*trans(ucholinv);
+    
+    betabar = Z*Delta;
+    Abetabar = Abeta*trans(betabar);
+    
+    //loop over all regressions
+    for (reg=0; reg<nreg; reg++){      
+    
+      regout_struct = runiregG(regdata_vector[reg].y, regdata_vector[reg].X, 
+                                regdata_vector[reg].XpX, regdata_vector[reg].Xpy, 
+                                tau[reg], Abeta, Abetabar(span::all,reg), 
+                                nu_e, ssq[reg]);
+      betas(reg,span::all) = trans(regout_struct.beta);
+      tau[reg] = regout_struct.sigmasq;
+    }
+    
+    //draw Vbeta, Delta | {beta_i}
+    rmregout = rmultireg(betas,Z,Deltabar,A,nu,V);
+    Vbeta = as<mat>(rmregout["Sigma"]); //conversion from Rcpp to Armadillo requires explict declaration of variable type using as<>
+    Delta = as<mat>(rmregout["B"]);
+  
+    //print time to completion and draw # every nprint'th draw
+    if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+    
+    if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      Vbetadraw(mkeep-1, span::all) = trans(vectorise(Vbeta));
+      Deltadraw(mkeep-1, span::all) = trans(vectorise(Delta));
+      taudraw(mkeep-1, span::all) = trans(tau);
+      betadraw.slice(mkeep-1) = betas;
+    }    
+  }
+  
+  if (nprint>0) endMcmcTimer();
+  
+  return List::create(
+    Named("Vbetadraw") = Vbetadraw,
+    Named("Deltadraw") = Deltadraw,
+	  Named("betadraw") = betadraw,
+	  Named("taudraw") = taudraw);
+}
diff --git a/src/rhierMnlDP_rcpp_loop.cpp b/src/rhierMnlDP_rcpp_loop.cpp
new file mode 100644
index 0000000..be8dbac
--- /dev/null
+++ b/src/rhierMnlDP_rcpp_loop.cpp
@@ -0,0 +1,343 @@
+#include "bayesm.h"
+ 
+//FUNCTIONS SPECIFIC TO MAIN FUNCTION------------------------------------------------------
+mat drawDelta(mat const& x,mat const& y,ivec const& z,std::vector<murooti> const& comps_vector,vec const& deltabar,mat const& Ad){
+
+// Wayne Taylor 2/21/2015
+
+// delta = vec(D)
+//  given z and comps (z[i] gives component indicator for the ith observation, 
+//   comps is a list of mu and rooti)
+// y is n x p
+// x is n x k
+// y = xD' + U , rows of U are indep with covs Sigma_i given by z and comps
+
+  int p = y.n_cols;
+  int k = x.n_cols;
+  int ncomp  = comps_vector.size();
+  mat xtx = zeros<mat>(k*p,k*p);
+  mat xty = zeros<mat>(p,k); //this is the unvecced version, reshaped after the sum
+  
+  //Create the index vectors, the colAll vectors are equal to span::all but with uvecs (as required by .submat)
+  uvec colAlly(p), colAllx(k);
+  for(int i = 0; i<p; i++) colAlly(i) = i;
+  for(int i = 0; i<k; i++) colAllx(i) = i;
+  
+  //Loop through the components
+  for(int compi = 0; compi<ncomp; compi++){
+    
+    //Create an index vector ind, to be used like y[ind,]
+    uvec ind = find(z == (compi+1));
+  
+    //If there are observations in this component
+    if(ind.size()>0){
+      mat yi = y.submat(ind,colAlly);
+      mat xi = x.submat(ind,colAllx);
+      
+      murooti compsi_struct = comps_vector[compi];
+      yi.each_row() -= trans(compsi_struct.mu); //the subtraction operation is repeated on each row of yi
+      mat sigi = compsi_struct.rooti*trans(compsi_struct.rooti);
+      xtx = xtx + kron(trans(xi)*xi,sigi);
+      xty = xty + (sigi * (trans(yi)*xi));
+    }
+  }
+  xty.reshape(xty.n_rows*xty.n_cols,1);
+  
+  //vec(t(D)) ~ N(V^{-1}(xty + Ad*deltabar),V^{-1}) where V = (xtx+Ad)
+  // compute the inverse of xtx+Ad
+  mat ucholinv = solve(trimatu(chol(xtx+Ad)), eye(k*p,k*p)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+  mat Vinv = ucholinv*trans(ucholinv);
+  
+  return(Vinv*(xty+Ad*deltabar) + trans(chol(Vinv))*as<vec>(rnorm(deltabar.size())));
+}
+
+DPOut rDPGibbs1(mat y, lambda lambda_struct, std::vector<murooti> thetaStar_vector, int maxuniq, ivec indic, 
+            vec q0v, double alpha, priorAlpha const& priorAlpha_struct, int gridsize, List const& lambda_hyper){
+
+// Wayne Taylor 2/21/2015
+
+//revision history:
+//created from rDPGibbs by Rossi 3/08
+
+//do one draw of DP Gibbs sampler with normal base
+
+//Model:
+//  y_i ~ N(y|thetai)
+//  thetai|G ~ G
+//  G|lambda,alpha ~ DP(G|G0(lambda),alpha)
+
+//Priors:
+//  alpha: starting value
+//  lambda:
+//    G0 ~ N(mubar,Sigma (x) Amu^-1)
+//    mubar=vec(mubar)
+//    Sigma ~ IW(nu,nu*V) V=v*I  note: mode(Sigma)=nu/(nu+2)*v*I
+//    mubar=0
+//    amu is uniform on grid specified by alim
+//    nu is log uniform, nu=d-1+exp(Z) z is uniform on seq defined bvy nulim
+//    v is uniform on sequence specificd by vlim
+
+//  priorAlpha_struct:
+//    alpha ~ (1-(alpha-alphamin)/(alphamax-alphamin))^power
+//    alphamin=exp(digamma(Istarmin)-log(gamma+log(N)))
+//    alphamax=exp(digamma(Istarmax)-log(gamma+log(N)))
+//    gamma= .5772156649015328606
+
+//output:
+//  ind - vector of indicators for which observations are associated with which comp in thetaStar
+//  thetaStar - list of unique normal component parms
+//  lambda  - list of of (a,nu,V)
+//  alpha 
+//  thetaNp1 - one draw from predictive given thetaStar, lambda,alphama
+
+  int n = y.n_rows;
+  int dimy = y.n_cols;
+  int nunique, indsize, indp, probssize;
+  vec probs;
+  uvec ind;
+  mat ydenmat;
+  uvec spanall(dimy); for(int i = 0; i<dimy ; i++) spanall[i] = i; //creates a uvec of [0,1,...,dimy-1]
+  thetaStarIndex thetaStarDrawOut_struct;
+  std::vector<murooti> new_utheta_vector(1), thetaNp1_vector(1);
+  murooti thetaNp10_struct, outGD_struct;
+
+  for(int rep = 0; rep<1; rep++) { //note we only do one loop!
+    
+    q0v = q0(y,lambda_struct);
+   
+    nunique = thetaStar_vector.size();
+  
+    if(nunique > maxuniq) stop("maximum number of unique thetas exceeded");
+   
+    //ydenmat is a length(thetaStar) x n array of density values given f(y[j,] | thetaStar[[i]]
+    //  note: due to remix step (below) we must recompute ydenmat each time!
+    ydenmat = zeros<mat>(maxuniq,n);
+                     
+    ydenmat(span(0,nunique-1),span::all) = yden(thetaStar_vector,y);
+  
+    thetaStarDrawOut_struct = thetaStarDraw(indic, thetaStar_vector, y, ydenmat, q0v, alpha, lambda_struct, maxuniq);
+    thetaStar_vector = thetaStarDrawOut_struct.thetaStar_vector;
+    indic = thetaStarDrawOut_struct.indic;
+    nunique = thetaStar_vector.size();
+  
+    //thetaNp1 and remix
+    probs = zeros<vec>(nunique+1);
+    for(int j = 0; j < nunique; j++){
+      ind = find(indic == (j+1));
+      indsize = ind.size();
+      probs[j] = indsize/(alpha + n + 0.0);
+      new_utheta_vector[0] = thetaD(y(ind,spanall),lambda_struct);
+      thetaStar_vector[j] = new_utheta_vector[0];
+    }
+                  
+    probs[nunique] = alpha/(alpha+n+0.0);
+    indp = rmultinomF(probs);
+    probssize = probs.size();
+    if(indp == probssize) {
+      outGD_struct = GD(lambda_struct);
+      thetaNp10_struct.mu = outGD_struct.mu;
+      thetaNp10_struct.rooti = outGD_struct.rooti;
+      thetaNp1_vector[0] = thetaNp10_struct;
+    } else {
+      outGD_struct = thetaStar_vector[indp-1];
+      thetaNp10_struct.mu = outGD_struct.mu;
+      thetaNp10_struct.rooti = outGD_struct.rooti;
+      thetaNp1_vector[0] = thetaNp10_struct;
+    }
+  
+    //draw alpha
+    alpha = alphaD(priorAlpha_struct,nunique,gridsize);
+  
+    //draw lambda
+    lambda_struct = lambdaD(lambda_struct,thetaStar_vector,lambda_hyper["alim"],lambda_hyper["nulim"],lambda_hyper["vlim"],gridsize);
+  }
+
+  //note indic is the vector of indicators for each obs correspond to which thetaStar
+  DPOut out_struct;
+    out_struct.thetaStar_vector = thetaStar_vector;
+    out_struct.thetaNp1_vector = thetaNp1_vector;
+    out_struct.alpha = alpha;
+    out_struct.lambda_struct = lambda_struct;
+    out_struct.indic = indic;
+
+  return(out_struct);
+}
+
+//MAIN FUNCTION-------------------------------------------------------------------------------------
+//[[Rcpp::export]]
+List rhierMnlDP_rcpp_loop(int R, int keep, int nprint,
+                          List const& lgtdata, mat const& Z,
+                          vec const& deltabar, mat const& Ad, List const& PrioralphaList, List const& lambda_hyper,
+                          bool drawdelta, int nvar, mat oldbetas, double s,
+                          int maxuniq, int gridsize,
+                          double BayesmConstantA, int BayesmConstantnuInc, double BayesmConstantDPalpha){
+
+// Wayne Taylor 2/21/2015
+
+  //Initialize variable placeholders
+  int mkeep, Istar;
+  vec betabar, q0v;
+  mat rootpi, ucholinv, incroot, V;
+  List compdraw(R/keep), nmix;
+  DPOut mgout_struct;
+  mnlMetropOnceOut metropout_struct;
+  murooti thetaStarLgt_struct;
+  
+  int nz = Z.n_cols;
+  int nlgt = lgtdata.size();
+  
+  // convert List to std::vector of struct
+  List lgtdatai;
+  std::vector<moments> lgtdata_vector;
+  moments lgtdatai_struct;
+  for (int lgt = 0; lgt<nlgt; lgt++){
+    lgtdatai = lgtdata[lgt];
+    
+    lgtdatai_struct.y = as<vec>(lgtdatai["y"]);
+    lgtdatai_struct.X = as<mat>(lgtdatai["X"]);
+    lgtdatai_struct.hess = as<mat>(lgtdatai["hess"]);
+    lgtdata_vector.push_back(lgtdatai_struct);    
+  }
+  
+  //initialize indicator vector, delta, thetaStar, thetaNp10, alpha, oldprob
+  ivec indic = ones<ivec>(nlgt);
+  
+  mat olddelta;
+  if (drawdelta) olddelta = zeros<vec>(nz*nvar);
+   
+  std::vector<murooti> thetaStar_vector(1);
+  murooti thetaNp10_struct, thetaStar0_struct;
+    thetaStar0_struct.mu = zeros<vec>(nvar);
+    thetaStar0_struct.rooti = eye(nvar,nvar);
+  thetaStar_vector[0] = thetaStar0_struct;
+  
+  double alpha = BayesmConstantDPalpha;
+
+  //fix oldprob (only one comp)
+  double oldprob = 1.0;
+
+  //convert Prioralpha from List to struct
+  priorAlpha priorAlpha_struct;
+    priorAlpha_struct.power = PrioralphaList["power"];
+    priorAlpha_struct.alphamin = PrioralphaList["alphamin"];
+    priorAlpha_struct.alphamax = PrioralphaList["alphamax"];
+    priorAlpha_struct.n = PrioralphaList["n"];
+
+ //initialize lambda
+  lambda lambda_struct;
+    lambda_struct.mubar = zeros<vec>(nvar);
+    lambda_struct.Amu = BayesmConstantA;
+    lambda_struct.nu = nvar+BayesmConstantnuInc;
+    lambda_struct.V = lambda_struct.nu*eye(nvar,nvar);
+  
+  //allocate space for draws
+  mat Deltadraw(1,1); if(drawdelta) Deltadraw.zeros(R/keep, nz*nvar);//enlarge Deltadraw only if the space is required
+  cube betadraw(nlgt, nvar, R/keep);
+  vec probdraw = zeros<vec>(R/keep);
+  vec oldll = zeros<vec>(nlgt);
+  vec loglike = zeros<vec>(R/keep);
+  vec Istardraw = zeros<vec>(R/keep);
+  vec alphadraw = zeros<vec>(R/keep);
+  vec nudraw = zeros<vec>(R/keep);
+  vec vdraw = zeros<vec>(R/keep);
+  vec adraw = zeros<vec>(R/keep);
+  
+  if (nprint>0) startMcmcTimer();
+  
+  //start main iteration loop
+  for(int rep = 0; rep<R; rep++) {
+    
+    //first draw comps,indic,p | {beta_i}, delta
+    //  indic,p need initialization comps is drawn first in sub-Gibbs
+    if(drawdelta){
+      olddelta.reshape(nvar,nz);
+      mgout_struct = rDPGibbs1(oldbetas-Z*trans(olddelta),lambda_struct,thetaStar_vector,maxuniq,indic,q0v,alpha,priorAlpha_struct,gridsize,lambda_hyper);
+    } else {
+      mgout_struct = rDPGibbs1(oldbetas,lambda_struct,thetaStar_vector,maxuniq,indic,q0v,alpha,priorAlpha_struct,gridsize,lambda_hyper);
+    }
+  
+    indic = mgout_struct.indic;
+    lambda_struct = mgout_struct.lambda_struct;
+    alpha = mgout_struct.alpha;
+    thetaStar_vector = mgout_struct.thetaStar_vector;
+    Istar = thetaStar_vector.size();
+
+    //now draw delta | {beta_i}, ind, comps
+    if(drawdelta) {olddelta = drawDelta(Z,oldbetas,indic,thetaStar_vector,deltabar,Ad);}
+  
+    //loop over all lgt equations drawing beta_i | ind[i],z[i,],mu[ind[i]],rooti[ind[i]]
+    for (int lgt=0; lgt<nlgt; lgt++){
+      thetaStarLgt_struct = thetaStar_vector[indic[lgt]-1];
+      rootpi = thetaStarLgt_struct.rooti;
+      
+      //note: beta_i = Delta*z_i + u_i  Delta is nvar x nz
+      if(drawdelta){
+        olddelta.reshape(nvar,nz);
+        betabar = thetaStarLgt_struct.mu + olddelta * trans(Z(lgt,span::all));
+      } else {
+        betabar = thetaStarLgt_struct.mu;
+      }
+      
+      if (rep == 0) oldll[lgt] = llmnl(vectorise(oldbetas(lgt,span::all)),lgtdata_vector[lgt].y,lgtdata_vector[lgt].X);
+      
+      //compute inc.root
+      ucholinv = solve(trimatu(chol(lgtdata_vector[lgt].hess+rootpi*trans(rootpi))), eye(nvar,nvar)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+      incroot = chol(ucholinv*trans(ucholinv));
+      
+      metropout_struct = mnlMetropOnce(lgtdata_vector[lgt].y,lgtdata_vector[lgt].X,vectorise(oldbetas(lgt,span::all)),
+                                           oldll[lgt],s,incroot,betabar,rootpi);
+      
+      oldbetas(lgt,span::all) = trans(metropout_struct.betadraw);
+      oldll[lgt] = metropout_struct.oldll;   
+    }
+  
+    //print time to completion and draw # every nprint'th draw
+    if (nprint>0) if((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+      
+    if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      betadraw.slice(mkeep-1) = oldbetas;
+      probdraw[mkeep-1] = oldprob;
+      alphadraw[mkeep-1] = alpha;
+      Istardraw[mkeep-1] = Istar;
+      adraw[mkeep-1] = lambda_struct.Amu;
+      nudraw[mkeep-1] = lambda_struct.nu;
+      V = lambda_struct.V;
+      vdraw[mkeep-1] = V(0,0)/(lambda_struct.nu+0.0);
+      loglike[mkeep-1] = sum(oldll);
+      if(drawdelta) Deltadraw(mkeep-1, span::all) = trans(vectorise(olddelta));
+      thetaNp10_struct = mgout_struct.thetaNp1_vector[0];
+      //we have to convert to a NumericVector for the plotting functions to work
+      compdraw[mkeep-1] = List::create(List::create(Named("mu") = NumericVector(thetaNp10_struct.mu.begin(),thetaNp10_struct.mu.end()),Named("rooti") = thetaNp10_struct.rooti));
+    }
+  }
+  
+  if (nprint>0) endMcmcTimer();
+  
+  nmix = List::create(Named("probdraw") = probdraw,
+    			  Named("zdraw") = R_NilValue, //sets the value to NULL in R
+					  Named("compdraw") = compdraw);
+	
+  if(drawdelta){
+    return(List::create(
+      Named("Deltadraw") = Deltadraw,
+      Named("betadraw") = betadraw,
+      Named("nmix") = nmix,
+      Named("alphadraw") = alphadraw,
+      Named("Istardraw") = Istardraw,
+      Named("adraw") = adraw,
+      Named("nudraw") = nudraw,
+      Named("vdraw") = vdraw,
+      Named("loglike") = loglike));
+	} else {
+    return(List::create(
+      Named("betadraw") = betadraw,
+      Named("nmix") = nmix,
+      Named("alphadraw") = alphadraw,
+      Named("Istardraw") = Istardraw,
+      Named("adraw") = adraw,
+      Named("nudraw") = nudraw,
+      Named("vdraw") = vdraw,
+      Named("loglike") = loglike));
+	}
+}
diff --git a/src/rhierMnlRwMixture_rcpp_loop.cpp b/src/rhierMnlRwMixture_rcpp_loop.cpp
new file mode 100644
index 0000000..fc35168
--- /dev/null
+++ b/src/rhierMnlRwMixture_rcpp_loop.cpp
@@ -0,0 +1,119 @@
+#include "bayesm.h"
+ 
+//[[Rcpp::export]]
+List rhierMnlRwMixture_rcpp_loop(List const& lgtdata, mat const& Z,
+                                  vec const& deltabar, mat const& Ad, mat const& mubar, mat const& Amu,
+                                  int const& nu, mat const& V, double s,
+                                  int R, int keep, int nprint, bool drawdelta,
+                                  mat olddelta,  vec const& a, vec oldprob, mat oldbetas, vec ind){
+
+// Wayne Taylor 10/01/2014
+
+  int nlgt = lgtdata.size();
+  int nvar = V.n_cols;
+  int nz = Z.n_cols;
+  
+  mat rootpi, betabar, ucholinv, incroot;
+  int mkeep;
+  mnlMetropOnceOut metropout_struct;
+  List lgtdatai, nmix;
+  
+  // convert List to std::vector of struct
+  std::vector<moments> lgtdata_vector;
+  moments lgtdatai_struct;
+  for (int lgt = 0; lgt<nlgt; lgt++){
+    lgtdatai = lgtdata[lgt];
+    
+    lgtdatai_struct.y = as<vec>(lgtdatai["y"]);
+    lgtdatai_struct.X = as<mat>(lgtdatai["X"]);
+    lgtdatai_struct.hess = as<mat>(lgtdatai["hess"]);
+    lgtdata_vector.push_back(lgtdatai_struct);    
+  }
+    
+  // allocate space for draws
+  vec oldll = zeros<vec>(nlgt);
+  cube betadraw(nlgt, nvar, R/keep);
+  mat probdraw(R/keep, oldprob.size());
+  vec loglike(R/keep);
+  mat Deltadraw(1,1); if(drawdelta) Deltadraw.zeros(R/keep, nz*nvar);//enlarge Deltadraw only if the space is required
+  List compdraw(R/keep);
+  
+  if (nprint>0) startMcmcTimer();
+    
+  for (int rep = 0; rep<R; rep++){
+    
+    //first draw comps,ind,p | {beta_i}, delta
+    // ind,p need initialization comps is drawn first in sub-Gibbs
+    List mgout;
+    if(drawdelta) {
+      olddelta.reshape(nvar,nz);
+      mgout = rmixGibbs (oldbetas-Z*trans(olddelta),mubar,Amu,nu,V,a,oldprob,ind);
+    } else {
+      mgout = rmixGibbs(oldbetas,mubar,Amu,nu,V,a,oldprob,ind);
+    }
+    
+    List oldcomp = mgout["comps"];
+    oldprob = as<vec>(mgout["p"]); //conversion from Rcpp to Armadillo requires explict declaration of variable type using as<>
+    ind = as<vec>(mgout["z"]);
+    
+    //now draw delta | {beta_i}, ind, comps
+    if(drawdelta) olddelta = drawDelta(Z,oldbetas,ind,oldcomp,deltabar,Ad);
+    
+    //loop over all LGT equations drawing beta_i | ind[i],z[i,],mu[ind[i]],rooti[ind[i]]
+      for(int lgt = 0; lgt<nlgt; lgt++){
+        List oldcomplgt = oldcomp[ind[lgt]-1];
+        rootpi = as<mat>(oldcomplgt[1]);
+        
+        //note: beta_i = Delta*z_i + u_i  Delta is nvar x nz
+        if(drawdelta){
+          olddelta.reshape(nvar,nz);
+          betabar = as<vec>(oldcomplgt[0])+olddelta*vectorise(Z(lgt,span::all));
+        } else {
+          betabar = as<vec>(oldcomplgt[0]);
+        }
+        
+        if (rep == 0) oldll[lgt] = llmnl(vectorise(oldbetas(lgt,span::all)),lgtdata_vector[lgt].y,lgtdata_vector[lgt].X);
+        
+        //compute inc.root
+        ucholinv = solve(trimatu(chol(lgtdata_vector[lgt].hess+rootpi*trans(rootpi))), eye(nvar,nvar)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+        incroot = chol(ucholinv*trans(ucholinv));
+                
+        metropout_struct = mnlMetropOnce(lgtdata_vector[lgt].y,lgtdata_vector[lgt].X,vectorise(oldbetas(lgt,span::all)),
+                                         oldll[lgt],s,incroot,betabar,rootpi);
+         
+         oldbetas(lgt,span::all) = trans(metropout_struct.betadraw);
+         oldll[lgt] = metropout_struct.oldll;  
+      }
+      
+    //print time to completion and draw # every nprint'th draw
+    if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+    
+    if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      betadraw.slice(mkeep-1) = oldbetas;
+      probdraw(mkeep-1, span::all) = trans(oldprob);
+      loglike[mkeep-1] = sum(oldll);
+      if(drawdelta) Deltadraw(mkeep-1, span::all) = trans(vectorise(olddelta));
+      compdraw[mkeep-1] = oldcomp;
+    }
+  }
+  
+  if (nprint>0) endMcmcTimer();
+  
+  nmix = List::create(Named("probdraw") = probdraw,
+    		  Named("zdraw") = R_NilValue, //sets the value to NULL in R
+				  Named("compdraw") = compdraw);
+
+  if(drawdelta){
+    return(List::create(
+      Named("Deltadraw") = Deltadraw,
+      Named("betadraw") = betadraw,
+      Named("nmix") = nmix,
+      Named("loglike") = loglike));  
+  } else {
+    return(List::create(
+      Named("betadraw") = betadraw,
+      Named("nmix") = nmix,
+      Named("loglike") = loglike));
+  }
+}
diff --git a/src/rhierNegbinRw_rcpp_loop.cpp b/src/rhierNegbinRw_rcpp_loop.cpp
new file mode 100644
index 0000000..2a65009
--- /dev/null
+++ b/src/rhierNegbinRw_rcpp_loop.cpp
@@ -0,0 +1,169 @@
+#include "bayesm.h"
+ 
+//EXTRA FUNCTIONS SPECIFIC TO THE MAIN FUNCTION--------------------------------------------
+double llnegbinpooled(std::vector<moments> regdata_vector, mat Beta, double alpha){
+  
+// Wayne Taylor 12/01/2014
+
+// "Unlists" the regdata and calculates the negative binomial loglikelihood using individual-level betas
+  
+  int nreg = regdata_vector.size();
+  double ll = 0.0;
+  
+  for(int reg = 0; reg<nreg; reg++){
+  vec lambda = exp(regdata_vector[reg].X*trans(Beta(reg,span::all)));
+  ll = ll + llnegbin(regdata_vector[reg].y,lambda,alpha,TRUE);
+  }
+  
+  return(ll);
+}
+
+// [[Rcpp::export]]
+List rhierNegbinRw_rcpp_loop(List const& regdata, List const& hessdata, mat const& Z, mat Beta, mat Delta,
+                             mat const& Deltabar, mat const& Adelta, int nu, mat const& V, double a, double b,
+                             int R, int keep, double sbeta, double alphacroot, int nprint, mat rootA,
+                             double alpha, bool fixalpha){
+                            
+// Wayne Taylor 12/01/2014                          
+
+//   Model
+//       (y_i|lambda_i,alpha) ~ Negative Binomial(Mean = lambda_i, Overdispersion par = alpha)
+//
+//       ln(lambda_i) =  X_i * beta_i
+//
+//       beta_i = Delta'*z_i + nu_i
+//               nu_i~N(0,Vbeta)
+//       Note: rootA = the Cholesky root of the inverse of Vbeta
+//
+//   Priors
+//       vec(Delta|Vbeta) ~ N(vec(Deltabar), Vbeta (x) (Adelta^-1))
+//       Vbeta ~ Inv Wishart(nu, V)
+//       alpha ~ Gamma(a,b) where mean = a/b and variance = a/(b^2)
+//
+//   Arguments
+//       Data = list of regdata,Z 
+//           regdata is a list of lists each list with members y, X
+//              e.g. regdata[[i]]=list(y=y,X=X)
+//              X has nvar columns including a first column of ones
+//              Z is nreg=length(regdata) x nz with a first column of ones
+//
+//       Prior - list containing the prior parameters
+//           Deltabar, Adelta - mean of Delta prior, inverse of variance covariance of Delta prior
+//           nu, V - parameters of Vbeta prior
+//           a, b - parameters of alpha prior
+//
+//       Mcmc - list containing
+//           R is number of draws
+//           keep is thinning parameter (def = 1)
+//           nprint - print estimated time remaining on every nprint'th draw (def = 100)
+//           s_beta - scaling parameter for beta RW (def = 2.93/sqrt(nvar))
+//           s_alpha - scaling parameter for alpha RW (def = 2.93)
+//           w - fractional weighting parameter (def = .1)
+//           Vbeta0, Delta0 - initial guesses for parameters, if not supplied default values are used
+
+  double ldiff, acc, unif, logalphac, oldlpostalpha, clpostalpha;
+  int mkeep, rep;
+  int nreg = regdata.size();
+  int nz = Z.n_cols;
+  int nvar = rootA.n_cols;  
+  int nacceptbeta = 0;
+  int nacceptalpha = 0; 
+  
+  mat Vbetainv = trans(rootA)*rootA;
+
+  // allocate space for draws
+  vec oldlpostbeta = zeros<vec>(nreg);
+  vec clpostbeta = zeros<vec>(nreg);
+  cube Betadraw = zeros<cube>(nreg, nvar, R/keep);
+  vec alphadraw = zeros<vec>(R/keep);
+  vec llike = zeros<vec>(R/keep);
+  mat Vbetadraw = zeros<mat>(R/keep,nvar*nvar);
+  mat Deltadraw = zeros<mat>(R/keep,nvar*nz);
+  
+  // convert regdata and hessdata Lists to std::vector of struct
+  std::vector<moments> regdata_vector;
+  moments regdatai_struct;
+  List regdatai,hessi;
+
+  // store vector with struct
+  for (int reg = 0; reg<nreg; reg++){
+    regdatai = regdata[reg];
+    hessi = hessdata[reg];
+  
+    regdatai_struct.y = as<vec>(regdatai["y"]);
+    regdatai_struct.X = as<mat>(regdatai["X"]);
+    regdatai_struct.hess = as<mat>(hessi["hess"]);
+    regdata_vector.push_back(regdatai_struct);    
+  }
+
+  if (nprint>0) startMcmcTimer();
+  
+  //  start main iteration loop
+  for (rep = 0; rep < R; rep++){
+    
+    mat betabar = Z*Delta;
+    
+    // Draw betai
+    for(int reg = 0; reg<nreg; reg++){
+        vec betabari = trans(betabar(reg,span::all));
+        mat betacvar = sbeta*solve(regdata_vector[reg].hess+Vbetainv,eye(nvar,nvar));
+        mat betaroot = trans(chol(betacvar));
+        vec betac = vectorise(Beta(reg,span::all)) + betaroot*vec(rnorm(nvar));
+       
+        oldlpostbeta[reg] = lpostbeta(alpha, trans(Beta(reg,span::all)), regdata_vector[reg].X, regdata_vector[reg].y, betabari, rootA);
+        clpostbeta[reg] = lpostbeta(alpha, betac, regdata_vector[reg].X, regdata_vector[reg].y, betabari, rootA);
+        ldiff = clpostbeta[reg] - oldlpostbeta[reg];
+        acc = exp(ldiff);
+        if (acc > 1) acc = 1;    
+        if(acc < 1) {unif=runif(1)[0];} else {unif=0;} //runif returns a NumericVector, so using [0] allows for conversion to double by extracting the first element
+        if (unif <= acc){
+          Beta(reg,span::all) = trans(betac);
+          nacceptbeta = nacceptbeta + 1;
+        }
+    }
+    
+    // Draw alpha
+    if (!fixalpha){
+      logalphac = log(alpha) + alphacroot*rnorm(1)[0]; //rnorm returns a NumericVector, so using [0] allows for conversion to double
+      oldlpostalpha = llnegbinpooled(regdata_vector,Beta,alpha)+(a-1)*log(alpha) - b*alpha;
+      clpostalpha = llnegbinpooled(regdata_vector,Beta,exp(logalphac))+(a-1)*logalphac - b*exp(logalphac);
+      ldiff = clpostalpha - oldlpostalpha;
+      acc = exp(ldiff);
+      if (acc > 1) acc = 1;    
+      if(acc < 1) {unif=runif(1)[0];} else {unif=0;} //runif returns a NumericVector, so using [0] allows for conversion to double by extracting the first element
+      if (unif <= acc){
+        alpha = exp(logalphac);
+        nacceptalpha = nacceptalpha + 1;
+      }
+    }  
+
+    // Draw Vbeta and Delta using rmultireg
+    List temp = rmultireg(Beta,Z,Deltabar,Adelta,nu,V);
+    mat Vbeta = as<mat>(temp["Sigma"]); //conversion from Rcpp to Armadillo requires explict declaration of variable type using as<>
+    Vbetainv = solve(Vbeta,eye(nvar,nvar));
+    rootA = chol(Vbetainv);
+    Delta = as<mat>(temp["B"]);
+
+    if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);    
+    
+    if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      Betadraw.slice(mkeep-1) = Beta;
+      alphadraw[mkeep-1] = alpha;
+      Vbetadraw(mkeep-1,span::all) = trans(vectorise(Vbeta));
+      Deltadraw(mkeep-1,span::all) = trans(vectorise(Delta));
+      llike[mkeep-1] = llnegbinpooled(regdata_vector,Beta,alpha);
+      } 
+  }
+  
+  if (nprint>0) endMcmcTimer();
+  
+  return List::create(
+    Named("llike") = llike,
+    Named("Betadraw") = Betadraw,
+    Named("alphadraw") = alphadraw,      
+    Named("Vbetadraw") = Vbetadraw,
+    Named("Deltadraw") = Deltadraw,
+    Named("acceptrbeta") = nacceptbeta/(R*nreg*1.0)*100,
+    Named("acceptralpha") = nacceptalpha/(R*1.0)*100);
+}
diff --git a/src/rivDP_rcpp_loop.cpp b/src/rivDP_rcpp_loop.cpp
new file mode 100644
index 0000000..3989e40
--- /dev/null
+++ b/src/rivDP_rcpp_loop.cpp
@@ -0,0 +1,426 @@
+#include "bayesm.h"
+ 
+//FUNCTIONS SPECIFIC TO MAIN FUNCTION------------------------------------------------------
+struct ytxtxtd{
+  vec yt;
+  vec xt;
+  mat xtd;
+};
+
+ytxtxtd get_ytxt(vec const& y, mat const& z, mat const& delta, mat const& x, mat const& w,
+                              int ncomp,ivec const& indic, std::vector<murooti> const& thetaStar_vector){
+
+  // Wayne Taylor 3/14/2015
+  
+  int dimz = z.n_cols;
+  int dimx = x.n_cols;
+  
+  //variable type initializaion
+  double sig;
+  mat wk, zk, xk, rooti, Sigma, xt;
+  vec yk, mu, e1, ee2, yt;
+  uvec ind, colAllw, colAllz(dimz), colAllx(dimx);
+
+  //Create the index vectors, the colAll vectors are equal to span::all but with uvecs (as required by .submat)
+  for(int i = 0; i<dimz; i++) colAllz(i) = i;
+  for(int i = 0; i<dimx; i++) colAllx(i) = i;
+
+  bool isw = false;
+  if(!w.is_empty()){
+    isw = true;
+    int ncolw = w.n_cols;
+    uvec colAllw(ncolw);
+    for(int i = 0; i<ncolw; i++) colAllw(i) = i;
+  }
+  
+  for (int k = 0; k < ncomp; k++){ 
+    
+    //Create an index vector ind, to be used like y[ind,]
+    ind = find(indic == (k+1));
+  
+    //If there are observations in this component
+    if(ind.size()>0){
+      
+      if(isw) wk = w.submat(ind,colAllw);
+      zk = z.submat(ind,colAllz);
+      yk = y(ind);
+      xk = x.submat(ind,colAllx);
+      
+      murooti thetaStark_struct = thetaStar_vector[k];
+      mu = thetaStark_struct.mu;
+      rooti = thetaStark_struct.rooti;
+      
+      Sigma = solve(rooti,eye(2,2));
+      Sigma = trans(Sigma)*Sigma;
+    
+      e1 = xk-zk*delta;  
+      ee2 = mu[1] + (Sigma(0,1)/Sigma(0,0))*(e1-mu[0]);
+      sig = sqrt(Sigma(1,1)-pow(Sigma(0,1),2.0)/Sigma(0,0));
+      yt = join_cols(yt,(yk-ee2)/sig); //analogous to rbind()
+      
+      if(isw) {
+        xt = join_cols(xt,join_rows(xk,wk)/sig);
+      } else {
+        xt = join_cols(xt,xk/sig);
+      }
+    }
+  }
+  
+  ytxtxtd out_struct;
+    out_struct.yt = yt;
+    out_struct.xt = xt;
+    
+  return(out_struct);
+}
+
+ytxtxtd get_ytxtd(vec const& y, mat const& z, double beta, vec const& gamma, mat const& x, mat const& w,
+                              int ncomp, ivec const& indic,std::vector<murooti> const& thetaStar_vector, int dimd){
+
+  // Wayne Taylor 3/14/2015
+
+  int dimx = x.n_cols;
+
+  //variable type initializaion
+  int indsize, indicsize;
+  vec zveck, yk, mu, ytk, u, yt;
+  mat C, wk, zk, xk, rooti, Sigma, B, L, Li, z2, zt1, zt2, xtd;
+  uvec colAllw, colAllz(dimd), colAllx(dimx), ind, seqindk, negseqindk;
+
+  //Create index vectors (uvec) for submatrix views
+  indicsize = indic.size();
+  //here the uvecs are declared once, and within each loop the correctly sized vector is extracted as needed
+  uvec seqind(indicsize);for(int i = 0;i<indicsize;i++){seqind[i] = i*2;} //element 0,2,4,. . .
+  uvec negseqind(indicsize);for(int i = 0;i<indicsize;i++){negseqind[i] = (i*2)+1;} //element 1,3,5,...
+  
+  //colAll vectors are equal to span::all but with uvecs (as required by .submat)
+  for(int i = 0; i<dimd; i++) colAllz(i) = i;
+  for(int i = 0; i<dimx; i++) colAllx(i) = i;
+  
+  bool isw = false;
+  if(!w.is_empty()){
+    isw = true;
+    int ncolw = w.n_cols;
+    uvec colAllw(ncolw);
+    for(int i = 0; i<ncolw; i++) colAllw(i) = i;
+  }
+  
+  C = eye(2,2); C(1,0) = beta;
+  
+  for(int k = 0;k<ncomp;k++){
+      //Create an index vector ind, to be used like y[ind,]
+    ind = find(indic == (k+1));
+    indsize = ind.size();
+  
+    //If there are observations in this component
+    if(indsize>0){
+  
+      mat xtdk(2*indsize,dimd);    
+      
+      //extract the properly sized vector section
+      seqindk = seqind.subvec(0,indsize-1);
+      negseqindk = negseqind.subvec(0,indsize-1);
+      
+      if(isw) wk = w.submat(ind,colAllw);
+      zk = z.submat(ind,colAllz);
+      zveck = vectorise(trans(zk));
+      yk = y(ind);
+      xk = x.submat(ind,colAllx);
+  
+      murooti thetaStark_struct = thetaStar_vector[k];
+      mu = thetaStark_struct.mu;
+      rooti = thetaStark_struct.rooti;
+
+      Sigma = solve(rooti,eye(2,2));
+      Sigma = trans(Sigma)*Sigma;
+      
+      B = C*Sigma*trans(C);
+      L = trans(chol(B));
+      Li = solve(trimatl(L),eye(2,2)); // L is lower triangular, trimatl interprets the matrix as lower triangular and makes solve more efficient
+      if(isw) {
+        u = vectorise(yk-wk*gamma-mu[1]-beta*mu[0]);
+      } else {
+        u = vectorise(yk-mu[1]-beta*mu[0]);
+      }
+      
+      ytk = vectorise(Li * join_cols(trans(xk-mu[0]),trans(u)));
+      
+      z2 = trans(join_rows(zveck,beta*zveck)); //join_rows is analogous to cbind()
+      z2 = Li*z2;
+      zt1 = z2(0,span::all);
+      zt2 = z2(1,span::all);
+      
+      zt1.reshape(dimd,indsize);
+      zt1 = trans(zt1);
+      zt2.reshape(dimd,indsize);
+      zt2=trans(zt2);
+      
+      xtdk(seqindk,colAllz) = zt1;
+      xtdk(negseqindk,colAllz) = zt2;
+      
+      yt = join_cols(yt,ytk);
+      xtd = join_cols(xtd,xtdk);    
+    }
+  }
+
+  ytxtxtd out_struct;
+    out_struct.yt = yt;
+    out_struct.xtd = xtd;
+    
+  return(out_struct);  
+}
+
+DPOut rthetaDP(int maxuniq, double alpha, lambda lambda_struct, priorAlpha const& priorAlpha_struct, 
+                              std::vector<murooti> thetaStar_vector, ivec indic, vec const& q0v, mat const& y, int gridsize,
+                              List lambda_hyper){
+ 
+  // Wayne Taylor 3/14/2015
+
+//  function to make one draw from DP process 
+
+//  P. Rossi 1/06
+//  added draw of alpha 2/06
+//  removed lambdaD,etaD and function arguments 5/06
+//  removed thetaStar argument to .Call and creation of newthetaStar 7/06
+//  removed q0 computations as eta is not drawn  7/06
+//  changed for new version of thetadraw and removed calculation of thetaStar before
+//    .Call  7/07
+
+//      y(i) ~ f(y|theta[[i]],eta)
+//      theta ~ DP(alpha,G(lambda))
+
+//output:
+//   list with components:
+//      thetaDraws: list, [[i]] is a list of the ith draw of the n theta's
+//                  where n is the length of the input theta and nrow(y)
+//      thetaNp1Draws: list, [[i]] is ith draw of theta_{n+1}
+//args:
+//   maxuniq: the maximum number of unique thetaStar values -- an error will be raised
+//            if this is exceeded
+//   alpha,lambda: starting values (or fixed DP prior values if not drawn).
+//   Prioralpha: list of hyperparms of alpha prior
+//   theta: list of starting value for theta's
+//   thetaStar: list of unique values of theta, thetaStar[[i]]
+//   indic:  n vector of indicator for which unique theta (in thetaStar)
+//   y: is a matrix nxk
+//         thetaStar: list of unique values of theta, thetaStar[[i]]
+//   q0v:a double vector with the same number of rows as y, giving \Int f(y(i)|theta,eta) dG_{lambda}(theta).
+
+  int n = y.n_rows;
+  int dimy = y.n_cols;
+  
+  //variable type initializaion
+  int nunique, indsize, indp, probssize;
+  vec probs;
+  uvec ind;
+  mat ydenmat;
+  uvec spanall(dimy); for(int i = 0; i<dimy ; i++) spanall[i] = i; //creates a uvec of [0,1,...,dimy-1]
+  thetaStarIndex thetaStarDrawOut_struct;
+  std::vector<murooti> new_utheta(1), thetaNp1_vector(1);
+  murooti thetaNp10_struct, outGD;
+  
+  vec p(n);
+  p[n-1] =  alpha/(alpha+(n-1));
+  for(int i = 0; i<(n-1); i++){
+   p[i] = 1/(alpha+(n-1));
+  }
+
+  nunique = thetaStar_vector.size();
+  
+  if(nunique > maxuniq) stop("maximum number of unique thetas exceeded");
+   
+  //ydenmat is a length(thetaStar) x n array of density values given f(y[j,] | thetaStar[[i]]
+  //  note: due to remix step (below) we must recompute ydenmat each time!
+  ydenmat = zeros<mat>(maxuniq,n);
+  
+  ydenmat(span(0,nunique-1),span::all) = yden(thetaStar_vector,y);
+
+  thetaStarDrawOut_struct = thetaStarDraw(indic, thetaStar_vector, y, ydenmat, q0v, alpha, lambda_struct, maxuniq);
+  thetaStar_vector = thetaStarDrawOut_struct.thetaStar_vector;
+  indic = thetaStarDrawOut_struct.indic;
+  nunique = thetaStar_vector.size();
+
+  //thetaNp1 and remix
+  probs = zeros<vec>(nunique+1);
+  for(int j = 0; j < nunique; j++){
+    ind = find(indic == (j+1));
+    indsize = ind.size();
+    probs[j] = indsize/(alpha + n + 0.0);
+    new_utheta[0] = thetaD(y(ind,spanall),lambda_struct);
+    thetaStar_vector[j] = new_utheta[0];
+  }
+  
+  probs[nunique] = alpha/(alpha+n+0.0);
+  indp = rmultinomF(probs);
+  probssize = probs.size();
+  if(indp == probssize) {
+    outGD = GD(lambda_struct);
+    thetaNp10_struct.mu = outGD.mu;
+    thetaNp10_struct.rooti = outGD.rooti;
+    thetaNp1_vector[0] = thetaNp10_struct;
+  } else {
+    outGD = thetaStar_vector[indp-1];
+    thetaNp10_struct.mu = outGD.mu;
+    thetaNp10_struct.rooti = outGD.rooti;
+    thetaNp1_vector[0] = thetaNp10_struct;
+  }
+    
+  //draw alpha
+  alpha = alphaD(priorAlpha_struct,nunique,gridsize);
+
+  //draw lambda
+  lambda_struct = lambdaD(lambda_struct,thetaStar_vector,lambda_hyper["alim"],lambda_hyper["nulim"],lambda_hyper["vlim"],gridsize);
+
+  DPOut out_struct;
+    out_struct.indic = indic;
+    out_struct.thetaStar_vector = thetaStar_vector;
+    out_struct.thetaNp1_vector = thetaNp1_vector;
+    out_struct.alpha = alpha;
+    out_struct.Istar = nunique;
+    out_struct.lambda_struct = lambda_struct;
+    
+  return(out_struct);
+}
+
+//RCPP SECTION----
+//[[Rcpp::export]]
+List rivDP_rcpp_loop(int R, int keep, int nprint,
+                     int dimd, vec const& mbg, mat const& Abg, vec const& md, mat const& Ad,
+                     vec const& y, bool isgamma, mat const& z, vec const& x, mat const& w, vec delta,
+                     List const& PrioralphaList, int gridsize, bool SCALE, int maxuniq, double scalex, double scaley,
+                     List const& lambda_hyper,double BayesmConstantA, int BayesmConstantnu){
+
+  // Wayne Taylor 3/14/2015
+
+  int n = y.size();
+  int dimg = 1;
+  if(isgamma) dimg = w.n_cols;
+
+  //variable type initializaion
+  int Istar, bgsize, mkeep;
+  double beta;
+  vec gammaVec, q0v, bg;
+  mat errMat, wEmpty, V;
+  wEmpty.reset(); //enforce 0 elements
+  ytxtxtd out_struct;
+  
+  //initialize indicator vector, thetaStar, ncomp, alpha
+  ivec indic = ones<ivec>(n);
+
+  std::vector<murooti> thetaStar_vector(1), thetaNp1_vector(1);
+  murooti thetaNp10_struct, thetaStar0_struct;
+    thetaStar0_struct.mu = zeros<vec>(2);
+    thetaStar0_struct.rooti = eye(2,2);
+  thetaStar_vector[0] = thetaStar0_struct;
+  
+  //Initialize lambda
+  lambda lambda_struct;
+    lambda_struct.mubar = zeros<vec>(2);
+    lambda_struct.Amu = BayesmConstantA;
+    lambda_struct.nu = BayesmConstantnu;
+    lambda_struct.V = lambda_struct.nu*eye(2,2);  
+    
+  //convert Prioralpha from List to struct
+  priorAlpha priorAlpha_struct;
+    priorAlpha_struct.power = PrioralphaList["power"];
+    priorAlpha_struct.alphamin = PrioralphaList["alphamin"];
+    priorAlpha_struct.alphamax = PrioralphaList["alphamax"];
+    priorAlpha_struct.n = PrioralphaList["n"];  
+  
+  int ncomp = 1;
+  
+  double alpha = 1.0;
+  
+  //allocate space for draws
+  mat deltadraw = zeros<mat>(R/keep,dimd);
+  vec betadraw = zeros<vec>(R/keep);
+  vec alphadraw = zeros<vec>(R/keep);
+  vec Istardraw = zeros<vec>(R/keep);
+  mat gammadraw = zeros<mat>(R/keep,dimg);
+  List thetaNp1draw(R/keep);
+  vec nudraw = zeros<vec>(R/keep);
+  vec vdraw = zeros<vec>(R/keep);
+  vec adraw = zeros<vec>(R/keep);
+
+  if(nprint>0) startMcmcTimer();
+
+  for(int rep = 0; rep < R; rep++) {
+    
+    //draw beta and gamma
+    if(isgamma){
+      out_struct = get_ytxt(y,z,delta,x,w,ncomp,indic,thetaStar_vector);
+    } else {
+      out_struct = get_ytxt(y,z,delta,x,wEmpty,ncomp,indic,thetaStar_vector);
+    }
+    
+    bg = breg(out_struct.yt,out_struct.xt,mbg,Abg); 
+    beta = bg[0];
+    bgsize = bg.size()-1;
+    if(isgamma) gammaVec = bg.subvec(1,bgsize);
+
+    //draw delta
+    if(isgamma){
+      out_struct=get_ytxtd(y,z,beta,gammaVec,x,w,ncomp,indic,thetaStar_vector,dimd);
+    } else {
+      out_struct=get_ytxtd(y,z,beta,gammaVec,x,wEmpty,ncomp,indic,thetaStar_vector,dimd);
+    }
+  
+    delta = breg(out_struct.yt,out_struct.xtd,md,Ad);
+        
+    //DP process stuff- theta | lambda
+    if(isgamma) {
+      errMat = join_rows(x-z*delta,y-beta*x-w*gammaVec);
+    } else {
+      errMat = join_rows(x-z*delta,y-beta*x);
+    }
+    
+    q0v = q0(errMat,lambda_struct);
+    
+    DPOut DPout_struct = rthetaDP(maxuniq,alpha,lambda_struct,priorAlpha_struct,thetaStar_vector,indic,q0v,errMat,gridsize,lambda_hyper);
+    
+    indic = DPout_struct.indic;
+    thetaStar_vector = DPout_struct.thetaStar_vector;
+    alpha = DPout_struct.alpha;
+    Istar = DPout_struct.Istar;
+    thetaNp1_vector = DPout_struct.thetaNp1_vector;
+    thetaNp10_struct = thetaNp1_vector[0];
+    ncomp=thetaStar_vector.size();
+    lambda_struct = DPout_struct.lambda_struct;
+    
+    if (nprint>0) if((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+    
+    if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      deltadraw(mkeep-1,span::all) = trans(delta);
+      betadraw[mkeep-1] = beta;
+      alphadraw[mkeep-1] = alpha;
+      Istardraw[mkeep-1] = Istar;
+      if(isgamma) gammadraw(mkeep-1,span::all) = trans(gammaVec);
+      //We need to convert from to NumericVector so that the nmix plotting works properly (it does not work for an nx1 matrix)
+      thetaNp1draw[mkeep-1] = List::create(List::create(Named("mu") = NumericVector(thetaNp10_struct.mu.begin(),thetaNp10_struct.mu.end()),Named("rooti") = thetaNp10_struct.rooti));
+      adraw[mkeep-1] = lambda_struct.Amu;
+      nudraw[mkeep-1] = lambda_struct.nu;
+      V = lambda_struct.V;
+      vdraw[mkeep-1] = V(0,0)/(lambda_struct.nu+0.0);
+    }
+  }
+  
+  //rescale
+  if(SCALE){
+    deltadraw=deltadraw*scalex;
+    betadraw=betadraw*scaley/scalex;
+    if(isgamma) gammadraw=gammadraw*scaley;
+  }  
+  
+  if (nprint>0) endMcmcTimer();
+  
+  return List::create(
+    Named("deltadraw") = deltadraw,
+    Named("betadraw") = betadraw,
+    Named("alphadraw") = alphadraw,
+    Named("Istardraw") = Istardraw,
+    Named("gammadraw") = gammadraw,
+    Named("thetaNp1draw") = thetaNp1draw,
+    Named("adraw") = adraw,
+    Named("nudraw") = nudraw,
+    Named("vdraw") = vdraw);
+}
diff --git a/src/rivgibbs_rcpp_loop.cpp b/src/rivgibbs_rcpp_loop.cpp
new file mode 100644
index 0000000..5ab47ac
--- /dev/null
+++ b/src/rivgibbs_rcpp_loop.cpp
@@ -0,0 +1,135 @@
+#include "bayesm.h"
+ 
+// [[Rcpp::export]]
+List rivGibbs_rcpp_loop(vec const& y, vec const& x, mat const& z, mat const& w, vec const& mbg, mat const& Abg, 
+                  vec const& md, mat const& Ad, mat const& V, int nu, int R, int keep, int nprint){
+
+// Keunwoo Kim 09/09/2014
+
+// Purpose: draw from posterior for linear I.V. model
+
+// Arguments:
+//   Data -- list of z,w,x,y
+//        y is vector of obs on lhs var in structural equation
+//        x is "endogenous" var in structural eqn
+//        w is matrix of obs on "exogenous" vars in the structural eqn
+//        z is matrix of obs on instruments
+//   Prior -- list of md,Ad,mbg,Abg,nu,V
+//        md is prior mean of delta
+//        Ad is prior prec
+//        mbg is prior mean vector for beta,gamma
+//        Abg is prior prec of same
+//        nu,V parms for IW on Sigma
+
+//   Mcmc -- list of R,keep 
+//        R is number of draws
+//        keep is thinning parameter
+//        nprint - print estimated time remaining on every nprint'th draw
+
+// Output: list of draws of delta,beta,gamma and Sigma
+ 
+// Model:
+//    x=z'delta + e1
+//    y=beta*x + w'gamma + e2
+//        e1,e2 ~ N(0,Sigma)
+//
+// Prior:
+//   delta ~ N(md,Ad^-1)
+//   vec(beta,gamma) ~ N(mbg,Abg^-1)
+//   Sigma ~ IW(nu,V)
+// 
+
+  vec e1, ee2, bg, u, gamma;
+  mat xt, Res, S, B, L, Li, z2, zt1, zt2, ucholinv, VSinv, yt;
+  double sig,beta;
+  List out;
+  int i, mkeep;
+
+  int n = y.size();
+  int dimd = z.n_cols;
+  int dimg = w.n_cols;
+
+  mat deltadraw(R/keep, dimd);
+  vec betadraw(R/keep);
+  mat gammadraw(R/keep, dimg);
+  mat Sigmadraw(R/keep, 4);  
+  mat C = eye(2,2); //eye creates a diagonal matrix
+
+  // set initial values
+  mat Sigma = eye(2,2);
+  vec delta = 0.1 * ones<vec>(dimd);
+
+  if (nprint>0) startMcmcTimer();  
+  
+  mat xtd(2*n, dimd);  
+  vec zvec = vectorise(trans(z));
+  
+  // start main iteration loop
+  for (int rep=0; rep<R; rep++){   
+    
+    // draw beta,gamma
+    e1 = x - z*delta;
+    ee2 = (Sigma(0,1)/Sigma(0,0)) * e1;
+    sig = sqrt(Sigma(1,1)-((Sigma(0,1)*Sigma(0,1))/Sigma(0,0)));
+    yt = (y-ee2)/sig;
+    xt = join_rows(x,w)/sig; //similar to cbind(x,w)
+    bg = breg(yt,xt,mbg,Abg);
+    beta = bg[0];
+    gamma = bg(span(1,bg.size()-1));
+    
+    // draw delta
+    C(1,0) = beta;
+    B = C*Sigma*trans(C);
+    L = trans(chol(B));
+    Li = solve(trimatl(L),eye(2,2)); //trimatl interprets the matrix as lower triangular and makes solve more efficient
+    u = y - w*gamma;
+    yt = vectorise(Li * trans(join_rows(x,u)));
+    z2 = trans(join_rows(zvec, beta*zvec));
+    z2 = Li*z2;
+    zt1 = z2(0,span::all);
+    zt2 = z2(1,span::all);
+    zt1.reshape(dimd,n);    
+    zt1 = trans(zt1);
+    zt2.reshape(dimd,n);    
+    zt2 = trans(zt2);
+    for (i=0; i<n; i++){
+      xtd(2*i,span::all) = zt1(i,span::all);
+      xtd(2*i+1,span::all) = zt2(i,span::all);
+    }
+    delta = breg(yt,xtd,md,Ad);
+    
+    // draw Sigma
+    Res = join_rows(x-z*delta, y-beta*x-w*gamma); //analogous to cbind() 
+    S = trans(Res)*Res;
+    
+    // compute the inverse of V+S
+    ucholinv = solve(trimatu(chol(V+S)), eye(2,2));
+    VSinv = ucholinv*trans(ucholinv);
+    
+    out = rwishart(nu+n, VSinv);
+    Sigma = as<mat>(out["IW"]); //conversion from Rcpp to Armadillo requires explict declaration of variable type using as<>
+    
+    // print time to completion and draw # every nprint'th draw
+    if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+    
+    if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      deltadraw(mkeep-1, span::all) = trans(delta);
+      betadraw[mkeep-1] = beta;
+      gammadraw(mkeep-1, span::all) = trans(gamma);
+      Sigmadraw(mkeep-1, span::all) = trans(vectorise(Sigma));
+    }    
+  }
+  
+  if (nprint>0) endMcmcTimer();
+  
+  return List::create(
+      Named("deltadraw") = deltadraw,
+      Named("betadraw") = NumericVector(betadraw.begin(),betadraw.end()),
+      Named("gammadraw") = gammadraw,
+      Named("Sigmadraw") = Sigmadraw);   
+}
+
+
+
+
diff --git a/src/rmixGibbs_rcpp.cpp b/src/rmixGibbs_rcpp.cpp
new file mode 100644
index 0000000..085af5c
--- /dev/null
+++ b/src/rmixGibbs_rcpp.cpp
@@ -0,0 +1,178 @@
+#include "bayesm.h"
+ 
+//W. Taylor: we considered moving the output to struct formats but the efficiency
+//  gains were limited and the conversions back and forth between Lists and struct were cumbersome
+
+List drawCompsFromLabels(mat const& y,  mat const& Bbar, 
+                         mat const& A, int nu, 
+                         mat const& V,  int ncomp,
+                         vec const& z){
+                           
+// Wayne Taylor 3/18/2015
+
+// Function to draw the components based on the z labels
+  
+  vec b, r, mu;
+  mat yk, Xk, Ck, sigma, rooti, S, IW, CI;
+  List temp, rw, comps(ncomp);
+  
+  int n = z.n_rows;
+  vec nobincomp = zeros<vec>(ncomp);
+  
+  //Determine the number of observations in each component
+  for(int i = 0; i<n; i++) {
+    nobincomp[z[i]-1]++; //Note z starts at 1, not 0
+  }
+  
+  //Draw comps
+  for(int k = 0; k<ncomp; k++){
+    
+    if(nobincomp[k] > 0) {
+      // If there are observations in this component, draw from the posterior
+      
+      yk = y.rows(find(z==(k+1))); //Note k starts at 0 and z starts at 1
+      Xk = ones(nobincomp[k], 1);
+
+      temp = rmultireg(yk, Xk, Bbar, A, nu, V);
+      
+      sigma = as<mat>(temp["Sigma"]); //conversion from Rcpp to Armadillo requires explict declaration of variable type using as<>
+      rooti = solve(trimatu(chol(sigma)),eye(sigma.n_rows,sigma.n_cols)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+      
+      mu = as<vec>(temp["B"]);
+
+      comps(k) = List::create(
+        Named("mu") = NumericVector(mu.begin(),mu.end()), //converts to a NumericVector, otherwise it will be interpretted as a matrix
+        Named("rooti") = rooti
+      );
+      
+    } else {
+      // If there are no obervations in this component, draw from the prior
+      S = solve(trimatu(chol(V)),eye(V.n_rows,V.n_cols));
+      S = S * trans(S); 
+      
+      rw = rwishart(nu, S);
+      
+      IW = as<mat>(rw["IW"]);
+      CI = as<mat>(rw["CI"]);
+      
+      rooti = solve(trimatu(chol(IW)),eye(IW.n_rows,IW.n_cols));        
+      b = vectorise(Bbar);
+      r = rnorm(b.n_rows,0,1);
+      
+      mu = b + (CI * r) / sqrt(A(0,0));
+  	
+		  comps(k) = List::create(
+			  Named("mu") = NumericVector(mu.begin(),mu.end()), //converts to a NumericVector, otherwise it will be interpretted as a matrix
+			  Named("rooti") = rooti);
+    } 
+  }
+
+  return(comps);
+}
+
+vec drawLabelsFromComps(mat const& y, vec const& p, List comps) {
+  
+// Wayne Taylor 3/18/2015
+
+// Function to determine which label is associated with each y value
+  
+  double logprod;
+  vec mu, u;
+  mat rooti;
+  List compsk;
+  
+  int n = y.n_rows;
+  vec res = zeros<vec>(n);
+  int ncomp  = comps.size();
+  mat prob(n,ncomp);
+  
+  for(int k = 0; k<ncomp; k++) {
+    compsk = comps[k];
+    mu = as<vec>(compsk["mu"]); //conversion from Rcpp to Armadillo requires explict declaration of variable type using as<>
+    rooti = as<mat>(compsk["rooti"]);
+
+    //Find log of MVN density using matrices
+    logprod = log(prod(diagvec(rooti)));
+    mat z(y);
+    z.each_row() -= trans(mu); //subtracts mu from each row in z
+    z = trans(rooti) * trans(z);
+    z = -(y.n_cols/2.0) * log(2*M_PI) + logprod - .5 * sum(z % z, 0); // operator % performs element-wise multiplication
+      
+    prob.col(k) =  trans(z);
+  }
+
+  prob = exp(prob);
+  prob.each_row() %= trans(p); //element-wise multiplication
+
+  // Cumulatively add each row and take a uniform draw between 0 and the cumulative sum
+  prob = cumsum(prob, 1);
+  u = as<vec>(runif(n)) % prob.col(ncomp-1);
+  
+  // Evaluative each column of "prob" until the uniform draw is less than the cumulative value
+  for(int i = 0; i<n; i++) {
+    while(u[i] > prob(i, res[i]++));
+  }
+  
+  return(res);
+}
+
+vec drawPFromLabels(vec const& a, vec const& z) {
+  
+// Wayne Taylor 9/10/2014
+
+// Function to draw the probabilities based on the label proportions
+  
+  vec a2 = a;
+  int n = z.n_rows;
+  
+  //Count number of observations in each component
+  for(int i = 0; i<n; i++) a2[z[i]-1]++; //z starts at 1, not 0
+  
+  return rdirichlet(a2);
+}
+
+//[[Rcpp::export]]
+List rmixGibbs( mat const& y,  mat const& Bbar, 
+                mat const& A, int nu, 
+                mat const& V,  vec const& a, 
+                vec const& p,  vec const& z) {
+
+// Wayne Taylor 9/10/2014
+
+/*
+    // Revision History: R. McCulloch 11/04 P. Rossi 3/05 put in
+    // backsolve and improved documentation
+    // 
+    // purpose: do gibbs sampling inference for a mixture of
+    // multivariate normals
+    // 
+    // arguments: y: data, rows are observations, assumed to be iid
+    // draws from normal mixture Bbar,A,nu,V: common prior for mean
+    // and variance of each normal component
+    // 
+    // note: Bbar should be a matrix. usually with only one row
+    // 
+    // beta ~ N(betabar,Sigma (x) A^-1) betabar=vec(Bbar) Sigma ~
+    // IW(nu,V) or Sigma^-1 ~ W(nu, V^-1) note: if you want Sigma ~
+    // A, use nu big and rwishart(nu,nu(A)^{-1})$IW a: Dirichlet
+    // parameters for prior on p p: prior probabilities of normal
+    // components z: components indentities for each observation
+    // (vector of intergers each in {1,2,...number of components})
+    // comps: list, each member is a list comp with ith normal
+    // component ~N(comp[[1]],Sigma), Sigma = t(R)%*%R, R^{-1} =
+    // comp[[2]] Output: list with elements [[1]=$p, [[2]]=$z, and
+    // [[3]]=$comps, with the updated values
+
+    */
+  
+  List comps = drawCompsFromLabels(y, Bbar, A, nu, V, a.size(), z);
+  
+  vec z2 = drawLabelsFromComps(y, p, comps);
+  
+  vec p2 = drawPFromLabels(a, z2);
+
+  return List::create(
+    Named("p") = p2,
+    Named("z") = z2,
+    Named("comps") = comps);
+}
diff --git a/src/rmixture_rcpp.cpp b/src/rmixture_rcpp.cpp
new file mode 100644
index 0000000..694fb2d
--- /dev/null
+++ b/src/rmixture_rcpp.cpp
@@ -0,0 +1,65 @@
+#include "bayesm.h"
+ 
+//FUNCTION SPECIFIC TO MAIN FUNCTION--------------------------------
+vec rcomp(List comp) {
+  
+// Wayne Taylor 9/10/14
+
+//purpose: draw multivariate normal with mean and variance given by comp 
+// arguments:
+//     comp is a list of length 2,
+//     comp[[1]] is the mean and comp[[2]] is R^{-1} = comp[[2]], Sigma = t(R)%*%R
+
+  vec mu = comp[0];
+  mat rooti = comp[1];
+
+  int dim = rooti.n_cols;
+  mat root = solve(trimatu(rooti),eye(dim,dim)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+  
+  return(vectorise(mu+trans(root)*as<vec>(rnorm(mu.size()))));
+}
+
+//[[Rcpp::export]]
+List rmixture(int n, vec pvec, List comps) {
+                           
+// Wayne Taylor 9/10/2014
+
+// revision history:
+//   commented by rossi 3/05
+//
+// purpose: iid draws from mixture of multivariate normals
+// arguments:
+//     n: number of draws
+//     pvec: prior probabilities of normal components
+//     comps: list, each member is a list comp with ith normal component
+//                     ~N(comp[[1]],Sigma), Sigma = t(R)%*%R, R^{-1} = comp[[2]]
+// output:
+//  list of x (n by length(comp[[1]]) matrix of draws) and z latent indicators of
+//  component
+
+  //Draw vector of indices using base R 'sample' function
+  mat prob(n,pvec.size());
+  for(int i = 0; i<n; i++) prob(i,span::all) = trans(pvec);
+  
+  // Cumulatively add each row and take a uniform draw between 0 and the cumulative sum
+  prob = cumsum(prob, 1);
+  vec u = as<vec>(runif(n)) % prob.col(pvec.size()-1);
+  
+  // Evaluative each column of "prob" until the uniform draw is less than the cumulative value
+  vec z = zeros<vec>(n);
+  for(int i = 0; i<n; i++) while(u[i] > prob(i, z[i]++));
+
+  List comp0 = comps[0];
+  vec mu0 = comp0[0];
+  mat x(n,mu0.size());
+  
+  //Draw from MVN from comp determined from z index
+  //Note z starts at 1, not 0
+  for(int i = 0; i<n; i++) {
+    x(i,span::all) = trans(rcomp(comps[z[i]-1]));
+  }
+
+  return List::create(
+    Named("x") = x, 
+    Named("z") = z);
+}
diff --git a/src/rmnlIndepMetrop_rcpp_loop.cpp b/src/rmnlIndepMetrop_rcpp_loop.cpp
new file mode 100644
index 0000000..4fc97cf
--- /dev/null
+++ b/src/rmnlIndepMetrop_rcpp_loop.cpp
@@ -0,0 +1,63 @@
+#include "bayesm.h"
+ 
+//[[Rcpp::export]]
+List rmnlIndepMetrop_rcpp_loop(int R, int keep, int nu,
+                                vec const& betastar, mat const& root,vec const& y,mat const& X,
+                                vec const& betabar,mat const& rootpi,mat const& rooti,
+                                double oldlimp,double oldlpost,int nprint) {
+
+// Wayne Taylor 9/7/2014
+
+  int mkeep = 0;
+  int naccept = 0;    
+  int ncolX = X.n_cols;
+  
+  mat betadraw(R/keep, ncolX);
+  vec loglike(R/keep);
+  vec betac = zeros<vec>(ncolX);
+  rowvec beta = zeros<rowvec>(ncolX);
+  double cloglike, clpost, climp, ldiff, alpha, unif, oldloglike;
+  vec alphaminv;
+  
+  if(nprint>0) startMcmcTimer();
+  
+  // start main iteration loop
+  for(int rep = 0; rep<R; rep++) {
+    
+    betac = rmvst(nu,betastar,root);
+    cloglike = llmnl(betac,y,X);
+    clpost = cloglike+lndMvn(betac,betabar,rootpi);
+    climp = lndMvst(betac,nu,betastar,rooti,false);
+    ldiff = clpost+oldlimp-oldlpost-climp;
+    alphaminv << 1 << exp(ldiff); //intializes variables in the alphaminv vec: c(1,exp(ldiff))
+    alpha = min(alphaminv);
+  
+    if(alpha < 1.0) {
+        unif = runif(1)[0]; //rnorm returns a NumericVector, so using [0] allows for conversion to double
+      }else{
+        unif = 0.0;
+      }
+    if (unif <= alpha){ 
+      beta = trans(betac);
+      oldloglike = cloglike;
+      oldlpost = clpost;
+      oldlimp = climp;
+      naccept++;
+    }
+          
+    if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+    
+    if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      betadraw(mkeep-1,span::all) = beta;
+      loglike[mkeep-1] = oldloglike;
+    }
+  }
+  
+  if(nprint>0) endMcmcTimer();
+      
+  return List::create(
+    Named("betadraw") = betadraw, 
+    Named("loglike") = loglike, 
+    Named("naccept") = naccept);
+}
diff --git a/src/rmnpGibbs_rcpp_loop.cpp b/src/rmnpGibbs_rcpp_loop.cpp
new file mode 100644
index 0000000..4c09f52
--- /dev/null
+++ b/src/rmnpGibbs_rcpp_loop.cpp
@@ -0,0 +1,143 @@
+#include "bayesm.h"
+ 
+//EXTRA FUNCTIONS SPECIFIC TO THE MAIN FUNCTION--------------------------------------------
+vec drawwi(vec const& w, vec const& mu, mat const& sigmai, int p, int y){
+
+// Wayne Taylor 9/8/2014
+
+//function to draw w_i by Gibbing thru p vector
+
+  int above;
+	double bound;
+  vec outwi = w;
+  vec maxInd(2);
+
+	for(int i = 0; i<p; i++){	
+		bound = 0.0;
+		for(int j = 0; j<p; j++) if(j!=i) {
+        maxInd[0] = bound;
+        maxInd[1] = outwi[j];
+        bound = max(maxInd);}
+    
+    if (y==(i+1))
+			above = 0;
+		else 
+			above = 1;
+    
+		vec CMout = condmom(outwi,mu,sigmai,p,i+1);
+    outwi[i] = rtrun1(CMout[0],CMout[1],bound,above);
+  }
+
+  return (outwi);
+}
+
+vec draww(vec const& w, vec const& mu, mat const& sigmai, ivec const& y){
+
+// Wayne Taylor 9/8/2014 
+
+//function to gibbs down entire w vector for all n obs
+  
+  int n = y.n_rows;
+  int p = sigmai.n_cols;
+  int ind; 
+  vec outw = zeros<vec>(w.n_rows);
+  
+	for(int i = 0; i<n; i++){
+    ind = p*i;
+		outw.subvec(ind,ind+p-1) = drawwi(w.subvec(ind,ind+p-1),mu.subvec(ind,ind+p-1),sigmai,p,y[i]);
+	}
+
+  return (outw);
+}
+
+//MAIN FUNCTION---------------------------------------------------------------------------------------
+//[[Rcpp::export]]
+List rmnpGibbs_rcpp_loop(int R, int keep, int nprint, int pm1, 
+                         ivec const& y, mat const& X, vec const& beta0, mat const& sigma0, 
+                         mat const& V, int nu, vec const& betabar, mat const& A) {
+
+// Wayne Taylor 9/24/2014
+
+  int n = y.n_rows;
+  int k = X.n_cols;
+  int Xrows = X.n_rows;
+  
+  //allocate space for draws
+  mat sigmadraw = zeros<mat>(R/keep, pm1*pm1);
+  mat betadraw = zeros<mat>(R/keep,k);
+  vec wnew = zeros<vec>(Xrows);
+  
+  //set initial values of w,beta, sigma (or root of inv)
+  vec wold = wnew;
+  vec betaold = beta0;
+  
+  mat C = chol(solve(trimatu(sigma0),eye(sigma0.n_cols,sigma0.n_cols))); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+  //C is upper triangular root of sigma^-1 (G) = C'C
+  
+  mat sigmai, zmat, epsilon, S, IW, ucholinv, VSinv;
+  vec betanew;
+  List W;
+  
+  // start main iteration loop
+  int mkeep = 0;
+  
+  if(nprint>0) startMcmcTimer();
+  
+    for(int rep = 0; rep<R; rep++) {
+      
+      //draw w given beta(rep-1),sigma(rep-1)
+      sigmai = trans(C)*C;
+      //    draw latent vector
+    
+      //    w is n x (p-1) vector
+      //       X ix n(p-1) x k  matrix
+      //       y is multinomial 1,..., p
+      //       beta is k x 1 vector
+      //       sigmai is (p-1) x (p-1) 
+          
+      wnew = draww(wold,X*betaold,sigmai,y);
+      
+      //draw beta given w(rep) and sigma(rep-1)
+      //  note:  if Sigma^-1 (G) = C'C then Var(Ce)=CSigmaC' = I
+      //  first, transform w_i = X_ibeta + e_i by premultiply by C
+      
+      zmat = join_rows(wnew,X);
+      zmat.reshape(pm1,n*(k+1));
+      zmat = C*zmat;
+      zmat.reshape(Xrows,k+1);
+      
+      betanew = breg(zmat(span::all,0),zmat(span::all,span(1,k)),betabar,A);
+      
+      //draw sigmai given w and beta
+      epsilon = wnew-X*betanew;
+      epsilon.reshape(pm1,n);  
+      S = epsilon*trans(epsilon);
+      
+      //same as chol2inv(chol(V+S))
+      ucholinv = solve(trimatu(chol(V+S)), eye(S.n_cols,S.n_cols));
+      VSinv = ucholinv*trans(ucholinv);
+      
+      W = rwishart(nu+n,VSinv);
+      C = as<mat>(W["C"]); //conversion from Rcpp to Armadillo requires explict declaration of variable type using as<>
+      
+      //print time to completion
+      if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+      
+      //save every keepth draw
+        if((rep+1)%keep==0){
+          mkeep = (rep+1)/keep;
+          betadraw(mkeep-1,span::all) = trans(betanew);
+          IW  = as<mat>(W["IW"]);
+          sigmadraw(mkeep-1,span::all) = trans(vectorise(IW));
+         }
+        
+      wold = wnew;
+      betaold = betanew;
+    }
+  
+  if(nprint>0) endMcmcTimer();
+      
+  return List::create(
+    Named("betadraw") = betadraw, 
+    Named("sigmadraw") = sigmadraw);
+}
diff --git a/src/rmultireg_rcpp.cpp b/src/rmultireg_rcpp.cpp
new file mode 100644
index 0000000..ac6f1cf
--- /dev/null
+++ b/src/rmultireg_rcpp.cpp
@@ -0,0 +1,70 @@
+#include "bayesm.h"
+ 
+// [[Rcpp::export]]
+List rmultireg(mat const& Y, mat const& X, mat const& Bbar, mat const& A, int nu, mat const& V) {
+
+// Keunwoo Kim 09/09/2014
+
+// Purpose: draw from posterior for Multivariate Regression Model with natural conjugate prior
+
+// Arguments:
+//  Y is n x m matrix
+//  X is n x k
+//  Bbar is the prior mean of regression coefficients  (k x m)
+//  A is prior precision matrix
+//  nu, V are parameters for prior on Sigma
+
+// Output: list of B, Sigma draws of matrix of coefficients and Sigma matrix
+ 
+// Model: 
+//  Y=XB+U  cov(u_i) = Sigma
+//  B is k x m matrix of coefficients
+
+// Prior:  
+//  beta|Sigma  ~ N(betabar,Sigma (x) A^-1)
+//  betabar=vec(Bbar)
+//  beta = vec(B) 
+//  Sigma ~ IW(nu,V) or Sigma^-1 ~ W(nu, V^-1)
+
+  int n = Y.n_rows;
+  int m = Y.n_cols;
+  int k = X.n_cols;
+  
+  //first draw Sigma
+  mat RA = chol(A);
+  mat W = join_cols(X, RA); //analogous to rbind() in R
+  mat Z = join_cols(Y, RA*Bbar);
+  // note:  Y,X,A,Bbar must be matrices!
+  mat IR = solve(trimatu(chol(trans(W)*W)), eye(k,k)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+  // W'W = R'R  &  (W'W)^-1 = IRIR'  -- this is the UL decomp!
+  mat Btilde = (IR*trans(IR)) * (trans(W)*Z);
+  // IRIR'(W'Z) = (X'X+A)^-1(X'Y + ABbar)
+  mat E = Z-W*Btilde;
+  mat S = trans(E)*E;
+  // E'E
+  
+  // compute the inverse of V+S
+  mat ucholinv = solve(trimatu(chol(V+S)), eye(m,m));
+  mat VSinv = ucholinv*trans(ucholinv);
+  
+  List rwout = rwishart(nu+n, VSinv);
+  
+  // now draw B given Sigma
+  //   note beta ~ N(vec(Btilde),Sigma (x) Covxxa)
+  //       Cov=(X'X + A)^-1  = IR t(IR)  
+  //       Sigma=CICI'    
+  //       therefore, cov(beta)= Omega = CICI' (x) IR IR' = (CI (x) IR) (CI (x) IR)'
+  //  so to draw beta we do beta= vec(Btilde) +(CI (x) IR)vec(Z_mk)  
+  //  		Z_mk is m x k matrix of N(0,1)
+  //	since vec(ABC) = (C' (x) A)vec(B), we have 
+  //		B = Btilde + IR Z_mk CI'
+
+  mat CI = rwout["CI"]; //there is no need to use as<mat>(rwout["CI"]) since CI is being initiated as a mat in the same line
+  mat draw = mat(rnorm(k*m));
+  draw.reshape(k,m);
+  mat B = Btilde + IR*draw*trans(CI);
+    
+  return List::create(
+      Named("B") = B, 
+      Named("Sigma") = rwout["IW"]);
+}
diff --git a/src/rmvpGibbs_rcpp_loop.cpp b/src/rmvpGibbs_rcpp_loop.cpp
new file mode 100644
index 0000000..1b29921
--- /dev/null
+++ b/src/rmvpGibbs_rcpp_loop.cpp
@@ -0,0 +1,135 @@
+#include "bayesm.h"
+ 
+//EXTRA FUNCTIONS SPECIFIC TO THE MAIN FUNCTION--------------------------------------------
+vec drawwi_mvp(vec const& w, vec const& mu, mat const& sigmai, int p, ivec y){
+  
+//Wayne Taylor 9/8/2014
+  
+//function to draw w_i by Gibbing thru p vector
+
+  int above;
+  vec outwi = w;
+
+	for(int i = 0; i<p; i++){	
+    if (y[i]){
+			above = 0;
+	  } else { 
+			above = 1;
+	  }
+  
+  vec CMout = condmom(outwi,mu,sigmai,p,i+1);
+  outwi[i] = rtrun1(CMout[0],CMout[1],0.0,above);
+  }
+
+  return (outwi);
+}
+
+vec draww_mvp(vec const& w, vec const& mu, mat const& sigmai, ivec const& y){
+  
+// Wayne Taylor 9/8/2014
+  
+//function to gibbs down entire w vector for all n obs
+
+  int p = sigmai.n_cols;
+  int n = w.size()/p;
+  int ind; 
+  vec outw = zeros<vec>(w.size());
+  
+  for(int i = 0; i<n; i++){
+    ind = p*i;
+		outw.subvec(ind,ind+p-1) = drawwi_mvp(w.subvec(ind,ind+p-1),mu.subvec(ind,ind+p-1),sigmai,p,y.subvec(ind,ind+p-1));
+	}
+
+  return (outw);
+}
+
+//MAIN FUNCTION---------------------------------------------------------------------------------------
+//[[Rcpp::export]]
+List rmvpGibbs_rcpp_loop(int R, int keep, int nprint, int p, 
+                         ivec const& y, mat const& X, vec const& beta0, mat const& sigma0, 
+                         mat const& V, int nu, vec const& betabar, mat const& A) {
+                           
+// Wayne Taylor 9/24/2014
+
+  int n = y.size()/p;
+  int k = X.n_cols;
+  
+  //allocate space for draws
+  mat sigmadraw = zeros<mat>(R/keep, p*p);
+  mat betadraw = zeros<mat>(R/keep,k);
+  vec wnew = zeros<vec>(X.n_rows);
+  
+  //set initial values of w,beta, sigma (or root of inv)
+  vec wold = wnew;
+  vec betaold = beta0;
+  mat C = chol(solve(trimatu(sigma0),eye(sigma0.n_cols,sigma0.n_cols))); //C is upper triangular root of sigma^-1 (G) = C'C
+                                                                         //trimatu interprets the matrix as upper triangular and makes solve more efficient
+  
+  mat sigmai, zmat, epsilon, S, IW, ucholinv, VSinv; 
+  vec betanew;
+  List W;
+  
+  // start main iteration loop
+  int mkeep = 0;
+  
+  if(nprint>0) startMcmcTimer();
+  
+    for(int rep = 0; rep<R; rep++) {
+    
+      //draw w given beta(rep-1),sigma(rep-1)
+      sigmai = trans(C)*C;
+  
+      //draw latent vector
+      
+      //w is n x (p-1) vector
+      //   X ix n(p-1) x k  matrix
+      //   y is n x (p-1) vector of binary (0,1) outcomes 
+      //   beta is k x 1 vector
+      //   sigmai is (p-1) x (p-1) 
+          
+      wnew = draww_mvp(wold,X*betaold,sigmai,y);
+  
+      //draw beta given w(rep) and sigma(rep-1)
+      //  note:  if Sigma^-1 (G) = C'C then Var(Ce)=CSigmaC' = I
+      //  first, transform w_i = X_ibeta + e_i by premultiply by C
+      
+      zmat = join_rows(wnew,X); //similar to cbind(wnew,X)
+      zmat.reshape(p,n*(k+1));
+      zmat = C*zmat;
+      zmat.reshape(X.n_rows,k+1);
+      
+      betanew = breg(zmat(span::all,0),zmat(span::all,span(1,k)),betabar,A);
+      
+      //draw sigmai given w and beta
+      epsilon = wnew-X*betanew;
+      epsilon.reshape(p,n);  
+      S = epsilon*trans(epsilon);
+      
+      //same as chol2inv(chol(V+S))
+      ucholinv = solve(trimatu(chol(V+S)), eye(p,p));
+      VSinv = ucholinv*trans(ucholinv);
+      
+      W = rwishart(nu+n,VSinv);
+      C = as<mat>(W["C"]); //conversion from Rcpp to Armadillo requires explict declaration of variable type using as<>
+      
+      //print time to completion
+      if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+      
+      //save every keepth draw
+        if((rep+1)%keep==0){
+          mkeep = (rep+1)/keep;
+          betadraw(mkeep-1,span::all) = trans(betanew);
+          IW  = as<mat>(W["IW"]);
+          sigmadraw(mkeep-1,span::all) = trans(vectorise(IW));
+         }
+        
+      wold = wnew;
+      betaold = betanew;
+    }
+  
+  if(nprint>0) endMcmcTimer();
+      
+  return List::create(
+    Named("betadraw") = betadraw, 
+    Named("sigmadraw") = sigmadraw);
+}
diff --git a/src/rmvst_rcpp.cpp b/src/rmvst_rcpp.cpp
new file mode 100644
index 0000000..a26af41
--- /dev/null
+++ b/src/rmvst_rcpp.cpp
@@ -0,0 +1,15 @@
+#include "bayesm.h"
+ 
+// [[Rcpp::export]]
+vec rmvst(int nu, vec const& mu, mat const& root){
+  
+// Wayne Taylor 9/7/2014
+
+// function to draw from MV s-t  with nu df, mean mu, Sigma=t(root)%*%root
+//  root is upper triangular cholesky root
+
+  vec rnormd = rnorm(mu.size());
+  vec nvec = trans(root)*rnormd;
+  
+  return(nvec/sqrt(rchisq(1,nu)[0]/nu) + mu); //rchisq returns a vectorized object, so using [0] allows for the conversion to double
+}
diff --git a/src/rnegbinRw_rcpp_loop.cpp b/src/rnegbinRw_rcpp_loop.cpp
new file mode 100644
index 0000000..a883877
--- /dev/null
+++ b/src/rnegbinRw_rcpp_loop.cpp
@@ -0,0 +1,98 @@
+#include "bayesm.h"
+ 
+// [[Rcpp::export]]
+List rnegbinRw_rcpp_loop(vec const& y, mat const& X, vec const& betabar, mat const& rootA, double a, double b, 
+                          vec beta, double alpha, bool fixalpha,
+                          mat const& betaroot, double const& alphacroot, int R, int keep, int nprint){
+
+// Keunwoo Kim 11/02/2014
+
+// Arguments:
+//       Data
+//           X is nobs X nvar matrix
+//           y is nobs vector
+
+//       Prior - list containing the prior parameters
+//           betabar, rootA - mean of beta prior, chol-root of inverse of variance covariance of beta prior
+//           a, b - parameters of alpha prior
+
+//       Mcmc - list containing
+//           R is number of draws
+//           keep is thinning parameter (def = 1)
+//           nprint - print estimated time remaining on every nprint'th draw (def = 100)
+//           betaroot - step size for beta RW
+//           alphacroot - step size for alpha RW
+//           beta - initial guesses for beta
+//           alpha - initial guess for alpha
+//           fixalpha - if TRUE, fix alpha and draw only beta
+//
+// Output: 
+// 
+// Model:
+//       (y|lambda,alpha) ~ Negative Binomial(Mean = lambda, Overdispersion par = alpha)
+//       ln(lambda) =  X * beta
+//
+// Prior:
+//       beta ~ N(betabar, A^-1)
+//       alpha ~ Gamma(a,b) where mean = a/b and variance = a/(b^2)
+//
+  vec betac;
+  double ldiff, acc, unif, logalphac, oldlpostalpha, oldlpostbeta, clpostbeta, clpostalpha;
+  int mkeep, rep;
+  
+  int nvar = X.n_cols;  
+  int nacceptbeta = 0;
+  int nacceptalpha = 0;  
+
+  vec alphadraw(R/keep);
+  mat betadraw(R/keep, nvar);
+  
+  if (nprint>0) startMcmcTimer();
+  
+  //start main iteration loop
+  for (rep=0; rep<R; rep++){
+    
+    // Draw beta
+    betac = beta + betaroot*vec(rnorm(nvar));
+    oldlpostbeta = lpostbeta(alpha, beta, X, y, betabar, rootA);
+    clpostbeta = lpostbeta(alpha, betac, X, y, betabar, rootA);
+    ldiff = clpostbeta - oldlpostbeta;
+    acc = exp(ldiff);
+    if (acc > 1) acc = 1;    
+    if(acc < 1) {unif=runif(1)[0];} else {unif=0;} //runif returns a NumericVector, so using [0] allows for conversion to double by extracting the first element
+    if (unif <= acc){
+      beta = betac;
+      nacceptbeta = nacceptbeta + 1;
+    } 
+    
+    // Draw alpha
+    if (!fixalpha){
+      logalphac = log(alpha) + alphacroot*rnorm(1)[0]; //rnorm returns a NumericVector, so using [0] allows for conversion to double
+      oldlpostalpha = lpostalpha(alpha, beta, X, y, a, b);
+      clpostalpha = lpostalpha(exp(logalphac), beta, X, y, a, b);
+      ldiff = clpostalpha - oldlpostalpha;
+      acc = exp(ldiff);
+      if (acc > 1) acc = 1;    
+      if(acc < 1) {unif=runif(1)[0];} else {unif=0;} //runif returns a NumericVector, so using [0] allows for conversion to double by extracting the first element
+      if (unif <= acc){
+        alpha = exp(logalphac);
+        nacceptalpha = nacceptalpha + 1;
+      }
+    }
+
+    if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);    
+    
+    if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      betadraw(mkeep-1, span::all) = trans(beta);
+      alphadraw[mkeep-1] = alpha;           
+    } 
+  }
+    
+  if (nprint>0) endMcmcTimer();
+  return List::create(
+      Named("betadraw") = betadraw,
+      Named("alphadraw") = alphadraw,      
+      Named("nacceptbeta") = nacceptbeta,
+      Named("nacceptalpha") = nacceptalpha);
+}
diff --git a/src/rnmixGibbs_rcpp_loop.cpp b/src/rnmixGibbs_rcpp_loop.cpp
new file mode 100644
index 0000000..514728d
--- /dev/null
+++ b/src/rnmixGibbs_rcpp_loop.cpp
@@ -0,0 +1,46 @@
+#include "bayesm.h"
+ 
+//[[Rcpp::export]]
+List rnmixGibbs_rcpp_loop(mat const& y, mat const& Mubar, 
+                     mat const& A, int nu, 
+                     mat const& V, vec const& a, 
+                     vec p, vec z,
+                     int const& R, int const& keep, int const& nprint) {
+
+// Wayne Taylor 9/10/2014
+
+  int mkeep = 0;    
+  
+  mat pdraw(R/keep,p.size());
+  mat zdraw(R/keep,z.size());
+  List compdraw(R/keep);
+  
+  if(nprint>0) startMcmcTimer();
+  
+  // start main iteration loop
+  for(int rep = 0; rep<R; rep++) {
+    
+    List out = rmixGibbs(y, Mubar, A, nu, V, a, p, z);
+    
+    List compsd = out["comps"];
+    p = as<vec>(out["p"]); //conversion from Rcpp to Armadillo requires explict declaration of variable type using as<>
+    z = as<vec>(out["z"]);
+          
+    // print time to completion and draw # every nprint'th draw
+    if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+            
+    if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      pdraw(mkeep-1,span::all) = trans(p);
+      zdraw(mkeep-1,span::all) = trans(z);
+      compdraw[mkeep-1] = compsd;
+    }
+  }
+  
+  if(nprint>0) endMcmcTimer();
+      
+  return List::create(
+    Named("probdraw") = pdraw, 
+    Named("zdraw")    = zdraw, 
+    Named("compdraw") = compdraw);
+}
diff --git a/src/rordprobitGibbs_rcpp_loop.cpp b/src/rordprobitGibbs_rcpp_loop.cpp
new file mode 100644
index 0000000..12a56b9
--- /dev/null
+++ b/src/rordprobitGibbs_rcpp_loop.cpp
@@ -0,0 +1,211 @@
+#include "bayesm.h"
+ 
+//EXTRA FUNCTIONS SPECIFIC TO THE MAIN FUNCTION--------------------------------------------
+//dstartoc is a fuction to transfer dstar to its cut-off value    
+vec dstartoc(vec const& dstar){
+  int ndstar = dstar.size();
+  vec c(ndstar+3);
+  c[0] = -100;
+  c[1] = 0;
+  c(span(2,ndstar+1)) = cumsum(exp(dstar));
+  c[ndstar+2] = 100;
+  
+  return (c);
+} 
+
+// compute conditional likelihood of data given cut-offs
+double lldstar(vec const& dstar, vec const& y, vec const& mu){
+  vec gamma = dstartoc(dstar);
+  
+  int ny = y.size();
+  NumericVector gamma1(ny);
+  NumericVector gamma2(ny);
+  for (int i=0; i<ny; i++){
+    gamma1[i] = gamma(y[i]);
+    gamma2[i] = gamma(y[i]-1);
+  }
+  NumericVector temp = pnorm(gamma1-as<NumericVector>(wrap(mu)))-pnorm(gamma2-as<NumericVector>(wrap(mu))); //pnorm takes Rcpp type NumericVector, NOT arma objects of type vec
+  vec arg = as<vec>(temp);
+  double epsilon = 1.0/(10^-50);
+  for (int j=0; j<ny; j++){
+    if (arg[j]<epsilon){
+      arg[j] = epsilon;
+    }
+  }
+  return (sum(log(arg)));
+}
+
+List dstarRwMetrop(vec const& y, vec const& mu, vec const& olddstar, double s, mat const& inc_root, 
+                    vec const& dstarbar, double oldll, mat const& rootdi, int ncut){ 
+
+// function to execute rw metropolis for the dstar
+// y is n vector with element = 1,...,j 
+// X is n x k matrix of x values 
+// RW increments are N(0,s^2*t(inc.root)%*%inc.root)
+// prior on dstar is N(dstarbar,Sigma)  Sigma^-1=rootdi*t(rootdi)
+//  inc.root, rootdi are upper triangular
+//  this means that we are using the UL decomp of Sigma^-1 for prior 
+// olddstar is the current
+//
+  int stay = 0;
+  double unif;
+  vec dstardraw;
+
+  vec dstarc = olddstar + s*trans(inc_root)*vec(rnorm(ncut));
+  double cll = lldstar(dstarc, y, mu);
+  double clpost = cll + lndMvn(dstarc, dstarbar, rootdi);
+  double ldiff = clpost - oldll - lndMvn(olddstar, dstarbar, rootdi);
+  double alpha = exp(ldiff);
+  
+  if (alpha>1){
+    alpha = 1.0;
+  } 
+
+  if (alpha<1){
+    unif = runif(1)[0]; //runif returns a NumericVector, so using [0] allows for conversion to double by extracting the first element
+  }
+  else{
+    unif = 0;
+  }
+  
+  if (unif<=alpha){
+    dstardraw = dstarc; 
+    oldll = cll;
+  }
+  else{
+    dstardraw = olddstar;
+    stay = 1;
+  }
+  
+  return List::create(
+      Named("dstardraw") = dstardraw,
+      Named("oldll") = oldll,
+      Named("stay") = stay
+  );
+}   
+
+//MAIN FUNCTION---------------------------------------------------------------------------------------
+// [[Rcpp::export]]
+List rordprobitGibbs_rcpp_loop(vec const& y, mat const& X, int k, mat const& A, vec const& betabar, mat const& Ad, 
+                          double s, mat const& inc_root, vec const& dstarbar, vec const& betahat, 
+                          int R, int keep, int nprint){
+
+// Keunwoo Kim 09/09/2014
+
+// Purpose: draw from posterior for ordered probit using Gibbs Sampler and metropolis RW
+
+// Arguments:
+//  Data
+//    X is nobs x nvar, y is nobs vector of 1,2,.,k (ordinal variable)
+//  Prior
+//    A is nvar x nvar prior preci matrix
+//    betabar is nvar x 1 prior mean
+//    Ad is ndstar x ndstar prior preci matrix of dstar (ncut is number of cut-offs being estimated)
+//    dstarbar is ndstar x 1 prior mean of dstar
+//  Mcmc
+//    R is number of draws
+//    keep is thinning parameter
+//    nprint - prints the estimated time remaining for every nprint'th draw
+//    s is scale parameter of random work Metropolis
+
+// Output: list of betadraws and cutdraws
+ 
+// Model: 
+//    z=Xbeta + e  < 0  e ~N(0,1)
+//    y=1,..,k, if z~c(c[k], c[k+1])
+
+//    cutoffs = c[1],..,c[k+1]
+//    dstar = dstar[1],dstar[k-2]
+//    set c[1]=-100, c[2]=0, ...,c[k+1]=100
+
+//    c[3]=exp(dstar[1]),c[4]=c[3]+exp(dstar[2]),...,
+//    c[k]=c[k-1]+exp(datsr[k-2])
+    
+// Note: 1. length of dstar = length of cutoffs - 3
+//       2. Be careful in assessing prior parameter, Ad.  .1 is too small for many applications.
+
+// Prior: 
+//  beta ~ N(betabar,A^-1)
+//  dstar ~ N(dstarbar, Ad^-1)
+
+  int stay, i, mkeep;
+  vec z;
+  List metropout;
+ 
+  int nvar = X.n_cols;
+  int ncuts = k+1;
+  int ncut = ncuts-3;
+  int ndstar = k-2;
+  int ny = y.size();
+
+  mat betadraw(R/keep, nvar);
+  mat cutdraw(R/keep, ncuts);
+  mat dstardraw(R/keep, ndstar);
+  vec staydraw(R/keep);
+  vec cutoff1(ny);
+  vec cutoff2(ny);
+  vec sigma(X.n_rows); sigma.ones();
+  
+  // compute the inverse of trans(X)*X+A
+  mat ucholinv = solve(trimatu(chol(trans(X)*X+A)), eye(nvar,nvar)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+  mat XXAinv = ucholinv*trans(ucholinv);
+
+  mat root = chol(XXAinv);
+  vec Abetabar = trans(A)*betabar;
+  
+  // compute the inverse of Ad
+  ucholinv = solve(trimatu(chol(Ad)), eye(ndstar,ndstar));
+  mat Adinv = ucholinv*trans(ucholinv);
+  
+  mat rootdi = chol(Adinv);
+  
+  // set initial values for MCMC  
+  vec olddstar(ndstar); 
+  olddstar.zeros();
+  vec beta = betahat;    
+  vec cutoffs = dstartoc(olddstar);  
+  double oldll = lldstar(olddstar, y, X*betahat);
+  
+  if (nprint>0) startMcmcTimer();
+  
+  //start main iteration loop
+  for (int rep=0; rep<R; rep++){
+    
+    //draw z given beta(i-1), sigma, y, cut-offs
+    for (i=0; i<ny; i++){
+      cutoff1[i] = cutoffs[y[i]-1];
+      cutoff2[i] = cutoffs[y[i]];
+    }
+    z = rtrunVec(X*beta, sigma, cutoff1, cutoff2);
+
+    //draw beta given z and rest
+    beta = breg1(root,X,z,Abetabar);
+   
+    //draw gamma given z
+    metropout = dstarRwMetrop(y,X*beta,olddstar,s,inc_root,dstarbar,oldll,rootdi, ncut);   
+    olddstar = as<vec>(metropout["dstardraw"]); //conversion from Rcpp to Armadillo requires explict declaration of variable type using as<>
+    oldll =  as<double>(metropout["oldll"]);
+    cutoffs = dstartoc(olddstar);
+    stay = as<int>(metropout["stay"]);  
+
+    //print time to completion and draw # every nprint'th draw
+    if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+    
+    if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      cutdraw(mkeep-1,span::all) = trans(cutoffs);
+      dstardraw(mkeep-1,span::all) = trans(olddstar);
+      betadraw(mkeep-1,span::all) = trans(beta);
+      staydraw[mkeep-1] = stay;
+    }                
+  }
+  double accept = 1-sum(staydraw)/(R/keep);
+  if (nprint>0) endMcmcTimer();
+
+  return List::create(
+      Named("cutdraw") = cutdraw,
+      Named("dstardraw") = dstardraw,
+      Named("betadraw") = betadraw,
+      Named("accept") = accept
+  );
+}
diff --git a/src/rscaleUsage_rcpp_loop.cpp b/src/rscaleUsage_rcpp_loop.cpp
new file mode 100644
index 0000000..ff183c0
--- /dev/null
+++ b/src/rscaleUsage_rcpp_loop.cpp
@@ -0,0 +1,442 @@
+#include "bayesm.h"
+#include <RcppArmadilloExtensions/sample.h> //used for "sample" function
+ 
+//SUPPORT FUNCTIONS SPECIFIC TO MAIN FUNCTION--------------------------------------------------------------------------------------
+double ghk(mat const& L, vec const& a, vec const& b, int const& n, int const& dim){
+
+//  Wayne Taylor 4/29/15
+
+//routine to implement ghk with a region : a[i-1] <= x_i <= b[i-1]
+//r mcculloch 8/04
+//L is lower triangular root of Sigma random vector is assumed to have zero mean
+//n is number of draws to use in GHK
+//dim is the dimension of L
+//modified 6/05 by rossi to check arg into qnorm
+//converted to rcpp 5/15
+
+  int i,j;
+  
+  NumericVector aa(1),bb(1),pa(1),pb(1),arg(1);
+  double u,prod,mu;
+  vec z(dim);
+  
+  double res=0.0;
+  
+  for(i=0;i<n;i++) {
+      
+    prod = 1.0;
+      
+    for(j=0;j<dim;j++) {
+         
+      mu = 0.0; 
+      if(j>0) mu = as_scalar(L(j,span(0,j-1))*z(span(0,j-1))); //previously done via a loop for(k=0;k<j;k++) mu += L(k*dim+j)*z[k];
+      
+      aa[0] = (a[j]-mu)/L(j,j); //when using one element length NumericVectors, use [0] as much as possible
+      bb[0] = (b[j]-mu)/L(j,j);
+      
+      pa[0] = pnorm(aa,0.0,1.0)[0];
+      pb[0] = pnorm(bb,0.0,1.0)[0];
+       
+      prod *= pb[0]-pa[0];
+      
+      u = unif_rand(); //unif_rand() is slightly faster than runif() for single draws double
+      
+      arg[0] = u*pb[0]+(1.0-u)*pa[0];
+      
+      if(arg[0] > .999999999) arg[0]=.999999999;
+      if(arg[0] < .0000000001) arg[0]=.0000000001;
+      
+      z[j] = qnorm(arg,0.0,1.0)[0];
+    }
+    
+    res += prod;
+  }
+  
+  res /= n; 
+  
+  return (res);
+}
+
+mat dy(mat y, mat const& x, vec const& c, vec const& mu, mat const& beta, vec const& s, vec const& tau, vec const& sigma){
+
+// Wayne Taylor 4/29/15
+
+  //Variable declaration
+  double sigman,taun;
+  rowvec yn;
+  vec xn;
+  
+  int p = y.n_cols;
+  int nobs = y.n_rows;
+   
+  //cm = conditional mean, cs = condtional standard deviation
+  //u =uniform for truncated normal draw
+  double cm,cs,u;
+  
+  // standardized truncation points (a,b)
+  // cdf at truncation points (pa,pb)
+  NumericVector a(1),b(1),pa(1),pb(1);
+  double qout;
+
+  //loop over coordinates of y - first by rows, then by columns
+  for(int n = 0; n<nobs; n++){
+    
+    sigman = sigma[n];
+    taun = tau[n];
+    yn = trans(vectorise(y(n,span::all)));
+    xn = vectorise(x(n,span::all));
+    
+    for(int i = 0; i<p; i++) {
+      
+      //compute conditonal mean and standard deviation
+      cs = s[i]*sigman;
+      cm = mu[i]+taun;
+      
+      for(int j=0;j<i;j++) cm += (beta(i*(p-1)+j))*(yn[j]-mu[j]-taun);
+      for(int j=(i+1);j<p;j++) cm += (beta(i*(p-1)+j-1))*(yn[j]-mu[j]-taun);
+
+      //draw truncated normal
+      // y~N(cm,cs^2) I[c[x[i]-1],c[x[i])
+      //a = (c[x(_,i)-1]-cm)/cs;  b = (c[x(_,i)]-cm)/cs;
+      a[0] = ((c[xn[i]-1]-cm)/cs); //when using one element length NumericVectors, use [0] as much as possible
+      b[0] = ((c[xn[i]]-cm)/cs);
+      
+      pa[0] = pnorm(a,0.0,1.0)[0];
+      pb[0] = pnorm(b,0.0,1.0)[0];
+      
+      u = unif_rand(); //unif_rand() is slightly faster than runif() for single draws double
+      
+      qout = qnorm(u*pb + (1-u)*pa,0.0,1.0)[0];
+      yn[i] = cm + cs*qout;
+    }
+    
+    //put yn values back into y
+    y(n,span::all) = yn;
+  }
+   
+  return(y);
+}
+
+double rlpx(mat const& x, double e,int k, vec const& mu,vec const& tau,mat const& Sigma,vec const& sigma,int nd=500) {
+  
+//Wayne Taylor 4/29/15
+
+  int n = x.n_rows;
+  int p = x.n_cols;
+  vec cc = cgetC(e,k);
+  mat L = trans(chol(Sigma));
+  vec lpv = zeros<vec>(n);
+  double offset = p*log((double)k);
+
+  vec a,b;
+  double ghkres,lghkres;
+  uvec xia(p),xib(p);
+  mat Li;
+  
+  for(int i = 0; i<n; i++){
+    Li = sigma[i]*L;
+  
+    for(int u = 0;u<p;u++){
+      xia[u] = x(i,u)-1;
+      xib[u] = x(i,u);
+    }
+    
+    a = cc.elem(xia)-mu-tau[i];
+    b = cc.elem(xib)-mu-tau[i];
+    
+    ghkres = ghk(Li,a,b,nd,L.n_rows);
+    lghkres = trunc_log(ghkres); //natural log, truncated to avoid +/- infinity. Note on my machine it truncates to ~log(1e-308)
+    lpv[i] = lghkres + offset;
+  }
+  
+  return(sum(lpv));
+}
+
+List condd(mat const& Sigma) {
+  
+//Wayne Taylor 4/29/15
+  
+  int p = Sigma.n_rows;
+  mat Si = solve(Sigma,eye(p,p));
+  int cbetarows = p-1;
+  mat cbeta = zeros<mat>(cbetarows,p);
+  uvec ui(1),ind(p-1);
+  int counter;
+
+  uvec cbetaAllRow(cbetarows);
+  for(int i = 0; i<cbetarows; i++) cbetaAllRow[i] = i;    
+
+  for(int i = 0; i<p; i++){
+    ui[0] = i;
+    
+    counter = 0;
+    for(int j = 0; j<cbetarows; j++){
+      if(j==i) counter = counter + 1;
+      ind[j] = counter;
+      counter = counter + 1;  
+    }
+
+    cbeta(cbetaAllRow,ui) = -Si(ind,ui)/as_scalar(Si(ui,ui));
+  }
+  
+  return List::create(
+    Named("beta") = cbeta,
+    Named("s") = sqrt(1/Si.diag()));  
+}
+
+mat getS(mat const& Lam, int n, vec const& moms){
+  
+//Wayne Taylor 4/29/15
+  
+  mat S = zeros<mat>(2,2);
+  
+  S(0,0) = (n-1)*moms[2] + n*pow(moms[0],2);
+  S(0,1) = (n-1)*moms[3] + n*moms[0]*(moms[1]-Lam(1,1));
+  S(1,0) = S(0,1);
+  S(1,1) = (n-1)*moms[4] + n*pow(moms[1]-Lam(1,1),2);
+
+  return(S);
+}
+
+double llL(mat const& Lam, int n, mat const& S, mat const& V,int nu){
+  
+//Wayne Taylor 4/29/15  
+
+  int d = Lam.n_cols;
+  double dlam = Lam(0,0)*Lam(1,1)-pow(Lam(0,1),2);
+  mat M = (S+V) *  solve(Lam,eye(d,d));
+  double ll = -.5*(n+nu+3)*log(dlam) -.5*sum(M.diag());
+  
+  return(ll);
+}
+
+//MAIN FUNCTION------------------------------------------------------------------------------------
+//[[Rcpp::export]]
+List rscaleUsage_rcpp_loop(int k, mat const& x, int p, int n,
+                           int R, int keep, int ndghk, int nprint,
+                           mat y, vec mu, mat Sigma, vec tau, vec sigma, mat Lambda, double e,
+                           bool domu, bool doSigma, bool dosigma, bool dotau, bool doLambda, bool doe,
+                           int nu, mat const& V, mat const& mubar, mat const& Am,
+                           vec const& gsigma, vec const& gl11,vec const& gl22, vec const& gl12,
+                           int nuL, mat const& VL, vec const& ge){
+
+// R.McCulloch, 12/04  code for scale usage R function (rScaleUsage)
+//  changed to R error function, P. Rossi 05/12
+//  converted to rcpp W. Taylor 04/15
+
+  //variable declaration
+  int mkeep, ng, ei, pi;
+  double eprop, eold;
+  double Ai, A, xtx, beta, s2, m, a, b, s, qr, llold, llprop, lrat, paccept;
+  vec cc, xty, ete, pv, h, moms(5),rgl11, rgl12a, rgl12, rgl22, absege, minvec;
+  uvec eiu;
+  mat Res, S, yd, Si, Vmi, Rm, Ri, Vm, mm, onev, xx, ytemp, yy, eps, dat, temp, SS;
+  List bs, rwout;
+  
+  rowvec onesp = ones<rowvec>(p);
+  int nk = R/keep;
+  int ndpost = nk*keep;
+  
+  mat drSigma = zeros<mat>(nk,pow(p,2.0));
+  mat drmu = zeros<mat>(nk,p);
+  mat drtau = zeros<mat>(nk,n);
+  mat drsigma = zeros<mat>(nk,n);
+  mat drLambda = zeros<mat>(nk,4);
+  vec dre = zeros<vec>(nk);
+
+  if(nprint>0) startMcmcTimer();
+
+  for(int rep = 0; rep < ndpost; rep++) {
+    
+    cc = cgetC(e,k);
+    bs = condd(Sigma);
+    y = dy(y,x,cc,mu,as<mat>(bs["beta"]),as<vec>(bs["s"]),tau,sigma);
+    
+    //draw Sigma
+    if(doSigma) {
+      Res = y;
+      Res.each_row() -= trans(mu);
+      Res.each_col() -= tau;
+      Res.each_col() /= sigma;
+      
+      S = trans(Res)*Res;
+      rwout = rwishart(nu+n,solve(V+S,eye(p,p)));
+      Sigma = as<mat>(rwout["IW"]);
+    }
+  
+    //draw mu
+    if(domu) {
+      yd = y;
+      yd.each_col() -= tau;
+      Si = solve(Sigma,eye(p,p));
+      Vmi = as_scalar(sum(1/pow(sigma,2)))*Si + Am;
+      Rm = chol(Vmi);
+      Ri = solve(trimatu(Rm),eye(p,p));
+      Vm = solve(Vmi,eye(p,p));
+      mm = Vm * (Si * (trans(yd) * (1/pow(sigma,2))) + Am * mubar);
+      mu = vectorise(mm + Ri * as<vec>(rnorm(p)));
+    }
+      
+    //draw tau
+    if(dotau) {
+      Ai = Lambda(0,0) - pow(Lambda(0,1),2)/Lambda(1,1);
+      A = 1.0/Ai;
+      onev = ones<mat>(p,1);
+      Rm = chol(Sigma);
+      xx = trans(solve(trans(Rm),onev));
+      ytemp = trans(y);
+      ytemp.each_col() -= mu;
+      yy = trans(solve(trans(Rm),ytemp));
+      xtx = accu(pow(xx,2)); //To get a sum of all the elements regardless of the argument type (ie. matrix or vector), use accu()
+      xty = vectorise(xx*trans(yy));
+      beta = A*Lambda(0,1)/Lambda(1,1);
+      
+      for(int j = 0; j<n; j++){
+        s2 = xtx/pow(sigma[j],2) + A;
+        s2 = 1.0/s2;
+        m = s2*((xty[j]/pow(sigma[j],2)) + beta*(log(sigma[j])-Lambda(1,1)));
+        tau[j] = m + sqrt(s2)*rnorm(1)[0];
+      }
+    }
+     
+    //draw sigma
+    if(dosigma) {
+      Rm = chol(Sigma);
+      ytemp = y;
+      ytemp.each_col() -= tau;
+      ytemp = trans(ytemp);
+      ytemp.each_col() -= mu;
+      eps = solve(trans(Rm),ytemp);
+      onesp = ones<rowvec>(p);
+      ete = vectorise(onesp * pow(eps,2));
+      
+      a = Lambda(1,1);
+      b = Lambda(0,1)/Lambda(0,0);
+      s = sqrt(Lambda(1,1)-pow(Lambda(0,1),2)/Lambda(0,0));
+
+      for(int j = 0; j<n; j++){
+        pv = -(p+1)*log(gsigma) -.5*ete[j]/pow(gsigma,2) -.5*pow((log(gsigma)-(a+b*tau[j]))/s,2);
+  	    pv = exp(pv-max(pv));
+  	    pv = pv/sum(pv);
+        //see http://gallery.rcpp.org/articles/using-the-Rcpp-based-sample-implementation/ for using sample
+        sigma[j] = Rcpp::RcppArmadillo::sample(NumericVector(gsigma.begin(),gsigma.end()),1,false,NumericVector(pv.begin(),pv.end()))[0];
+      }
+    }
+     
+    //draw Lambda
+    if(doLambda) {
+      h = log(sigma);
+      dat = join_rows(tau,h);        
+      temp = cov(dat);
+      moms << mean(tau) << mean(h) << temp(0,0) << temp(0,1) << temp(1,1); //element intialization
+     
+      SS = getS(Lambda,n,moms);
+      rgl11 = gl11.elem(find(gl11 > pow(Lambda(0,1),2)/Lambda(1,1)));
+      ng = rgl11.size();
+      pv = zeros<vec>(ng);
+      
+      for(int j  = 0; j<ng; j++){
+        Lambda(0,0) = rgl11[j];
+        pv[j] = llL(Lambda,n,SS,VL,nuL);
+      }
+      
+      pv = exp(pv-max(pv));
+      pv = pv/sum(pv);
+      Lambda(0,0) = Rcpp::RcppArmadillo::sample(NumericVector(rgl11.begin(),rgl11.end()),1,false,NumericVector(pv.begin(),pv.end()))[0];
+      
+      //cannot do multiple conditions per find() so it is done in two stages
+      rgl12a = gl12.elem(find(gl12 < sqrt(Lambda(0,0)*Lambda(1,1))));
+      rgl12 = rgl12a.elem(find(rgl12a > -sqrt(Lambda(0,0)*Lambda(1,1))));
+      ng = rgl12.size();
+      pv = zeros<vec>(ng);
+      
+      for(int j = 0; j<ng;j++){
+        Lambda(0,1) = rgl12[j];
+        Lambda(1,0) = Lambda(0,1);
+        pv[j] = llL(Lambda,n,SS,VL,nuL);
+      }
+      
+      pv = exp(pv-max(pv));
+      pv = pv/sum(pv);
+      
+      Lambda(0,1) = Rcpp::RcppArmadillo::sample(NumericVector(rgl12.begin(),rgl12.end()),1,false,NumericVector(pv.begin(),pv.end()))[0];
+      Lambda(1,0) = Lambda(0,1);
+      
+      rgl22 = gl22.elem(find(gl22 > pow(Lambda(0,1),2)/Lambda(0,0)));
+      ng = rgl22.size();
+      pv = zeros<vec>(ng);
+      
+      for(int j = 0;j<ng;j++){
+        Lambda(1,1) = rgl22[j];
+        SS = getS(Lambda,n,moms);
+        pv[j] = llL(Lambda,n,SS,VL,nuL);
+      }
+      
+      pv = exp(pv-max(pv));
+      pv = pv/sum(pv);
+      Lambda(1,1) = Rcpp::RcppArmadillo::sample(NumericVector(rgl22.begin(),rgl22.end()),1,false,NumericVector(pv.begin(),pv.end()))[0];
+    }
+    
+    //draw e
+    if(doe) {
+      ng = ge.size();
+      absege = abs(e-ge); 
+      eiu = find(absege == min(absege));
+      ei = eiu[0];
+      
+      if(ei == 1){
+        pi = 2;
+        qr = .5;
+      } else if (ei == ng) {
+        pi = ng-1;
+        qr = .5;
+      } else {
+        pi = ei + rbinom(1,1,.5)[0]*2-1;
+        qr = 1;
+      }
+
+      eold = ge[ei];
+      eprop = ge[pi];
+      
+      llold = rlpx(x,eold,k,mu,tau,Sigma,sigma,ndghk);
+      llprop = rlpx(x,eprop,k,mu,tau,Sigma,sigma,ndghk);
+      lrat = llprop - llold + log(qr);
+      
+      if(lrat>0) {
+        e = eprop;
+      } else {
+        minvec << 1 << exp(lrat);
+        paccept = min(minvec);
+        
+        if(rbinom(1,1,paccept)[0]==1){
+          e = eprop;
+        } else {
+          e = eold;
+        }
+      }
+    }
+     
+    if (nprint>0) if((rep+1)%nprint==0) infoMcmcTimer(rep, R); 
+  
+    if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      drSigma(mkeep-1,span::all) = trans(vectorise(Sigma));
+      drmu(mkeep-1,span::all) = trans(mu);
+      drtau(mkeep-1,span::all) = trans(tau);
+      drsigma(mkeep-1,span::all) = trans(sigma); 
+      drLambda(mkeep-1,span::all) = trans(vectorise(Lambda));
+      dre[mkeep-1] = e;
+    }
+  }
+  
+  if (nprint>0) endMcmcTimer();
+  
+  return List::create(
+    Named("ndpost") = ndpost,
+    Named("drmu") = drmu,
+    Named("drtau") = drtau,
+    Named("drsigma") = drsigma,
+    Named("drLambda") = drLambda,
+    Named("dre") = dre,
+    Named("drSigma") = drSigma);
+}
diff --git a/src/rsurGibbs_rcpp_loop.cpp b/src/rsurGibbs_rcpp_loop.cpp
new file mode 100644
index 0000000..267cca7
--- /dev/null
+++ b/src/rsurGibbs_rcpp_loop.cpp
@@ -0,0 +1,129 @@
+#include "bayesm.h"
+ 
+// [[Rcpp::export]]
+List rsurGibbs_rcpp_loop(List const& regdata, vec const& indreg, vec const& cumnk, vec const& nk, mat const& XspXs, 
+                              mat Sigmainv, mat const& A, vec const& Abetabar, int nu, mat const& V, int nvar, 
+                              mat E, mat const& Y, int R, int keep, int nprint){
+
+// Keunwoo Kim 09/19/2014
+
+// Purpose: implement Gibbs Sampler for SUR
+
+// Arguments:
+//   Data -- regdata
+//           regdata is a list of lists of data for each regression
+//           regdata[[i]] contains data for regression equation i
+//           regdata[[i]]$y is y, regdata[[i]]$X is X
+//           note: each regression can have differing numbers of X vars
+//                 but you must have same no of obs in each equation. 
+//   Prior -- list of prior hyperparameters
+//     betabar,A      prior mean, prior precision
+//     nu, V          prior on Sigma
+//   Mcmc -- list of MCMC parms
+//     R number of draws
+//     keep -- thinning parameter
+//     nprint - print estimated time remaining on every nprint'th draw
+
+// Output: list of betadraw,Sigmadraw
+ 
+// Model:
+//   y_i = X_ibeta + e_i  
+//          y is nobs x 1
+//          X is nobs x k_i
+//          beta is k_i x 1 vector of coefficients
+//          i=1,nreg total regressions
+
+//         (e_1,k,...,e_nreg,k) ~ N(0,Sigma) k=1,...,nobs
+
+//   we can also write as stacked regression
+//   y = Xbeta+e
+//       y is nobs*nreg x 1,X is nobs*nreg x (sum(k_i))
+//   routine draws beta -- the stacked vector of all coefficients
+
+// Prior:
+//          beta ~ N(betabar,A^-1)
+//          Sigma ~ IW(nu,V)
+
+  int reg, mkeep, i, j;
+  vec beta, btilde, yti;
+  mat IR, ucholinv, EEVinv, Sigma, Xtipyti, Ydti;
+  List regdatai, rwout;
+  
+  int nreg = regdata.size();  
+  
+  // convert List to std::vector of struct
+  std::vector<moments> regdata_vector;
+  moments regdatai_struct;
+  
+  // store vector with struct
+  for (reg=0; reg<nreg; reg++){
+    regdatai = regdata[reg];
+    
+    regdatai_struct.y = as<vec>(regdatai["y"]);
+    regdatai_struct.X = as<mat>(regdatai["X"]);    
+    regdata_vector.push_back(regdatai_struct);    
+  }
+  
+  int nobs = (regdatai_struct.y).size();
+  
+  mat XtipXti = zeros<mat>(sum(nk), sum(nk));
+  mat Sigmadraw(R/keep, nreg*nreg);
+  mat betadraw(R/keep, nvar);
+
+  if (nprint>0) startMcmcTimer();
+
+  for (int rep=0; rep<R; rep++){
+    
+    
+    //first draw beta | Sigma
+    
+    // compute Xtilde'Xtilde
+    for (i=0; i<nreg; i++){
+      for (j=0; j<nreg; j++){
+        XtipXti(span(cumnk[i]-nk[i],cumnk[i]-1), span(cumnk[j]-nk[j],cumnk[j]-1)) =
+                  Sigmainv(i,j) * XspXs(span(cumnk[i]-nk[i],cumnk[i]-1), span(cumnk[j]-nk[j],cumnk[j]-1));              
+      }      
+    }    
+    
+    // now compute Xtilde'ytilde
+    Ydti = Y*Sigmainv;
+    Xtipyti = trans(regdata_vector[0].X)*Ydti(span::all,0);
+    for (reg=1; reg<nreg; reg++){
+      Xtipyti = join_cols(Xtipyti, trans(regdata_vector[reg].X)*Ydti(span::all,reg)); //join_cols is analogous to rbind()
+    }     
+    
+    IR = solve(trimatu(chol(XtipXti + A)), eye(nvar,nvar)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+    btilde = (IR*trans(IR)) * (Xtipyti + Abetabar);
+    beta = btilde + IR*vec(rnorm(nvar));
+    
+    
+    //now draw Sigma | beta
+    for (reg=0; reg<nreg; reg++){
+      E(span::all,reg) = regdata_vector[reg].y - 
+                          regdata_vector[reg].X * beta(span(indreg[reg]-1,indreg[reg+1]-2));
+    }
+    
+    // compute the inverse of E'E+V
+    ucholinv = solve(trimatu(chol(trans(E)*E+V)), eye(nreg,nreg));
+    EEVinv = ucholinv*trans(ucholinv);
+    
+    rwout = rwishart(nu+nobs, EEVinv);
+    Sigma = as<mat>(rwout["IW"]); //conversion from Rcpp to Armadillo requires explict declaration of variable type using as<>
+    Sigmainv = as<mat>(rwout["W"]);
+    
+    //print time to completion and draw # every nprint'th draw
+    if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+    
+    if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      betadraw(mkeep-1, span::all) = trans(beta);
+      Sigmadraw(mkeep-1, span::all) = trans(vectorise(Sigma));      
+    }
+  }
+  
+  if (nprint>0) endMcmcTimer();
+  
+  return List::create(
+      Named("betadraw") = betadraw,
+      Named("Sigmadraw") = Sigmadraw);
+}
diff --git a/src/rtrun_rcpp.cpp b/src/rtrun_rcpp.cpp
new file mode 100644
index 0000000..aa35996
--- /dev/null
+++ b/src/rtrun_rcpp.cpp
@@ -0,0 +1,17 @@
+#include "bayesm.h"
+ 
+// [[Rcpp::export]]
+NumericVector rtrun(NumericVector const& mu, NumericVector const& sigma, 
+                         NumericVector const& a, NumericVector const& b){
+                           
+// Wayne Taylor 9/7/2014
+
+// function to draw from univariate truncated norm
+// a is vector of lower bounds for truncation
+// b is vector of upper bounds for truncation
+
+  NumericVector FA = pnorm((a-mu)/sigma);
+  NumericVector FB = pnorm((b-mu)/sigma);
+  
+  return(mu+sigma*qnorm(runif(mu.size())*(FB-FA)+FA));
+}
diff --git a/src/runiregGibbs_rcpp_loop.cpp b/src/runiregGibbs_rcpp_loop.cpp
new file mode 100644
index 0000000..9fbf0f3
--- /dev/null
+++ b/src/runiregGibbs_rcpp_loop.cpp
@@ -0,0 +1,74 @@
+#include "bayesm.h"
+ 
+// [[Rcpp::export]]
+List runiregGibbs_rcpp_loop(vec const& y, mat const& X, vec const& betabar, mat const& A, int nu, double ssq, 
+                      double sigmasq, int R, int keep, int nprint) {
+
+// Keunwoo Kim 09/09/2014
+
+// Purpose: perform iid draws from posterior of regression model using conjugate prior
+
+// Arguments:
+//  y,X
+//  betabar,A      prior mean, prior precision
+//  nu, ssq        prior on sigmasq
+//  R number of draws
+//  keep thinning parameter
+
+// Output: list of beta, sigmasq
+ 
+// Model: 
+//  y = Xbeta + e  e ~N(0,sigmasq)
+//  y is n x 1
+//  X is n x k
+//  beta is k x 1 vector of coefficients
+
+// Prior: 
+//  beta ~ N(betabar,sigmasq*A^-1)
+//  sigmasq ~ (nu*ssq)/chisq_nu
+// 
+  int mkeep;
+  double s;
+  mat RA, W, IR;
+  vec z, btilde, beta;
+  
+  int nvar = X.n_cols;
+  int nobs = y.size();
+  
+  vec sigmasqdraw(R/keep);
+  mat betadraw(R/keep, nvar);
+  
+  mat XpX = trans(X)*X;
+  vec Xpy = trans(X)*y;
+  
+  vec Abetabar = A*betabar;
+  
+  if (nprint>0) startMcmcTimer();
+
+  for (int rep=0; rep<R; rep++){   
+    
+    //first draw beta | sigmasq
+    IR = solve(trimatu(chol(XpX/sigmasq+A)), eye(nvar,nvar)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+    btilde = (IR*trans(IR)) * (Xpy/sigmasq+Abetabar);
+    beta = btilde + IR*vec(rnorm(nvar));
+    
+    //now draw sigmasq | beta
+    s = sum(square(y-X*beta));
+    sigmasq = (nu*ssq+s) / rchisq(1,nu+nobs)[0]; //rchisq returns a vectorized object, so using [0] allows for the conversion to double
+    
+    //print time to completion and draw # every nprint'th draw
+    if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+    
+    if((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      betadraw(mkeep-1, span::all) = trans(beta);
+      sigmasqdraw[mkeep-1] = sigmasq;
+    }   
+  }  
+  
+  if (nprint>0) endMcmcTimer();
+  
+  return List::create(
+      Named("betadraw") = betadraw, 
+      Named("sigmasqdraw") = NumericVector(sigmasqdraw.begin(),sigmasqdraw.end()));
+}
diff --git a/src/runireg_rcpp_loop.cpp b/src/runireg_rcpp_loop.cpp
new file mode 100644
index 0000000..0ba9f4d
--- /dev/null
+++ b/src/runireg_rcpp_loop.cpp
@@ -0,0 +1,74 @@
+#include "bayesm.h"
+ 
+// [[Rcpp::export]]
+List runireg_rcpp_loop(vec const& y, mat const& X, vec const& betabar, mat const& A, int nu, double ssq, 
+                  int R, int keep, int nprint) {
+
+// Keunwoo Kim 09/09/2014
+
+// Purpose: perform iid draws from posterior of regression model using conjugate prior
+
+// Arguments:
+//  y,X
+//  betabar,A      prior mean, prior precision
+//  nu, ssq        prior on sigmasq
+//  R number of draws
+//  keep thinning parameter
+
+// Output: list of beta, sigmasq
+ 
+// Model: 
+//  y = Xbeta + e  e ~N(0,sigmasq)
+//  y is n x 1
+//  X is n x k
+//  beta is k x 1 vector of coefficients
+
+// Prior:  
+//  beta ~ N(betabar,sigmasq*A^-1)
+//  sigmasq ~ (nu*ssq)/chisq_nu
+
+  int mkeep;
+  double s, sigmasq;
+  mat RA, W, IR;
+  vec z, btilde, res, beta;
+  
+  int nvar = X.n_cols;
+  int nobs = y.size();
+  
+  vec sigmasqdraw(R/keep);
+  mat betadraw(R/keep, nvar);
+  
+  if (nprint>0) startMcmcTimer();
+
+  for (int rep=0; rep<R; rep++){    
+    RA = chol(A);
+    W = join_cols(X, RA); //analogous to rbind() in R
+    z = join_cols(y, RA*betabar);
+    // W'W=R'R ;  (W'W)^-1 = IR IR'  -- this is UL decomp
+    IR = solve(trimatu(chol(trans(W)*W)), eye(nvar,nvar)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+    btilde = (IR*trans(IR)) * (trans(W)*z);
+    res = z-W*btilde;
+    s = as_scalar(trans(res)*res); //converts the matrix to a scalar
+    
+    // first draw Sigma
+    sigmasq = (nu*ssq+s) / rchisq(1,nu+nobs)[0]; //rchisq returns a vectorized object, so using [0] allows for the conversion to double
+  
+    // now draw beta given Sigma
+    beta = btilde + sqrt(sigmasq)*(IR*vec(rnorm(nvar)));
+    
+    // print time to completion and draw # every nprint'th draw
+    if (nprint>0) if ((rep+1)%nprint==0) infoMcmcTimer(rep, R);
+    
+    if ((rep+1)%keep==0){
+      mkeep = (rep+1)/keep;
+      betadraw(mkeep-1, span::all) = trans(beta);
+      sigmasqdraw[mkeep-1] = sigmasq;
+    }    
+  }  
+  
+  if (nprint>0) endMcmcTimer();
+  
+  return List::create(
+      Named("betadraw") = betadraw, 
+      Named("sigmasqdraw") = NumericVector(sigmasqdraw.begin(),sigmasqdraw.end()));
+}
diff --git a/src/rwishart_rcpp.cpp b/src/rwishart_rcpp.cpp
new file mode 100644
index 0000000..9a0f319
--- /dev/null
+++ b/src/rwishart_rcpp.cpp
@@ -0,0 +1,43 @@
+#include "bayesm.h"
+ 
+// [[Rcpp::export]]
+List rwishart(int const& nu, mat const& V){
+
+// Wayne Taylor 4/7/2015
+
+// Function to draw from Wishart (nu,V) and IW
+ 
+// W ~ W(nu,V)
+// E[W]=nuV
+
+// WI=W^-1
+// E[WI]=V^-1/(nu-m-1)
+  
+  // T has sqrt chisqs on diagonal and normals below diagonal
+  int m = V.n_rows;
+  mat T = zeros(m,m);
+  
+  for(int i = 0; i < m; i++) {
+    T(i,i) = sqrt(rchisq(1,nu-i)[0]); //rchisq returns a vectorized object, so using [0] allows for the conversion to double
+  }
+  
+  for(int j = 0; j < m; j++) {  
+    for(int i = j+1; i < m; i++) {    
+      T(i,j) = rnorm(1)[0]; //rnorm returns a NumericVector, so using [0] allows for conversion to double
+  }}
+  
+  mat C = trans(T)*chol(V);
+  mat CI = solve(trimatu(C),eye(m,m)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+  
+  // C is the upper triangular root of Wishart therefore, W=C'C
+  // this is the LU decomposition Inv(W) = CICI' Note: this is
+  // the UL decomp not LU!
+  
+  // W is Wishart draw, IW is W^-1
+  
+  return List::create(
+    Named("W") = trans(C) * C,
+    Named("IW") = CI * trans(CI),
+    Named("C") = C,
+    Named("CI") = CI);
+}
diff --git a/src/thetadraw.c b/src/thetadraw.c
deleted file mode 100755
index 2c63ede..0000000
--- a/src/thetadraw.c
+++ /dev/null
@@ -1,151 +0,0 @@
-#include <R.h>
-#include <Rmath.h>
-#include <math.h>
-#include <Rinternals.h>
-#include <Rdefines.h>
-
-/* modified by rossi 7/06 to remove thetaStar argument and copy */
-/* modified by rossi 7/06 to remove theta copy and modify directly */
-/* modified by rossi 8/10 to fix error in allocation of size of newrow */
-
-/* function to make multinomial draws */
-
-int rmultin(double *probs, int nprob )
-{
-	double cumprob,rnd;
-	int i;
-	GetRNGstate();
-	rnd=unif_rand();
-	cumprob=0.0;
-	for (i=0; i < nprob; i++){
-		if(rnd > cumprob && rnd <= cumprob+probs[i])
-		{ break; }
-		else
-		{cumprob=cumprob+probs[i];}
-	}
-	PutRNGstate(); 
-	return i+1 ;
-}
-		
-
-/* gets a row of a matrix and returns this as a matrix by setting dim */
-
-SEXP getrow(SEXP mat, int row, int nrow, int ncol){
-   int i,ind;
-   SEXP ans, ndim;
-   PROTECT(ans=NEW_NUMERIC(ncol));
-   PROTECT(ndim=NEW_INTEGER(2));
-   for(i =0; i < ncol; i++){
-	   ind=i*nrow+row;
-	   NUMERIC_POINTER(ans)[i]=NUMERIC_POINTER(mat)[ind];
-   }
-   INTEGER_POINTER(ndim)[0]=1;
-   INTEGER_POINTER(ndim)[1]=ncol;
-   SET_DIM(ans,ndim);
-   UNPROTECT(2);
-return(ans);
-}
-
-
-/* theta draw routine to be used with .Call */
-
-SEXP  thetadraw( SEXP y,  SEXP ydenmatO, SEXP indicO, SEXP q0v, SEXP p,
-	SEXP theta,  SEXP lambda, SEXP eta,
-                  SEXP thetaD, SEXP yden,
-		  SEXP maxuniqS,SEXP nuniqueS,
-                  SEXP rho) {
-   int nunique,n,ncol,j,i,maxuniq,inc,index,ii,jj,ind ;
-   SEXP R_fc_thetaD, R_fc_yden, yrow, ydim, onetheta, lofone, newrow,
-	ydenmat, ydendim ;
-   double *probs;
-   int *indmi;
-   int *indic;
-   double sprob;
-
-   nunique=INTEGER_VALUE(nuniqueS);
-   n=length(theta);
-   maxuniq=INTEGER_VALUE(maxuniqS);
-
-   /* create new lists for use and output */ 
-   PROTECT(lofone=NEW_LIST(1));
-  
-   /* create R function call object, lang4 creates a pairwise (linked) list with
-      4 values -- function, first arg, sec arg, third arg.  R_NilValue is a placeholder until
-      we associate first argument (which varies in our case) */
-   PROTECT(R_fc_thetaD=lang4(thetaD,R_NilValue,lambda,eta));
-   PROTECT(R_fc_yden=lang4(yden,R_NilValue,y,eta));
-
-   PROTECT(ydim=GET_DIM(y));
-   ncol=INTEGER_POINTER(ydim)[1];
-   PROTECT(yrow=NEW_NUMERIC(ncol));
-   PROTECT(newrow=NEW_NUMERIC(n));
-   PROTECT(ydenmat=NEW_NUMERIC(maxuniq*n));
-   PROTECT(ydendim=NEW_INTEGER(2));
-   INTEGER_POINTER(ydendim)[0]=maxuniq;
-   INTEGER_POINTER(ydendim)[1]=n;
-
-   /* copy iformation from R objects that will be modified      
-      note that we must access elements in the lists (generic vectors) by using VECTOR_ELT
-      we can't use the pointer and deferencing directly like we can for numeric and integer
-      vectors */
-   for(j=0;j < maxuniq*n; j++){NUMERIC_POINTER(ydenmat)[j]=NUMERIC_POINTER(ydenmatO)[j];}
-   SET_DIM(ydenmat,ydendim); 
-
-   /* allocate space for local vectors */
-   probs=(double *)R_alloc(n,sizeof(double));
-   indmi=(int *)R_alloc((n-1),sizeof(int));
-   indic=(int *)R_alloc(n,sizeof(int));
-   
-   /* copy information from R object indicO to indic */
-   for(j=0;j < n; j++) {indic[j]=NUMERIC_POINTER(indicO)[j];}
-
-   /* start loop over observations */
-
-   for(i=0;i < n; i++){
-	 probs[n-1]=NUMERIC_POINTER(q0v)[i]*NUMERIC_POINTER(p)[n-1];
-
-	 /* make up indmi -- vector of length n-1 consisting of -i as in R notation --
-	    1, ...,i-1, ,i+1,...,n */
-	 inc=0;
-	 for(j=0;j < (n-1); j++){
-		 if(j==i) {inc=inc+1;};
-		 indmi[j]=inc;
-		 inc=inc+1;
-	 }
-	 for(j=0;j < (n-1); j++){
-		 ii=indic[indmi[j]]; jj=i;      /* find element ydenmat(ii,jj+1) */
-		 index=jj*maxuniq+(ii-1);
-		 probs[j]=NUMERIC_POINTER(p)[j]*NUMERIC_POINTER(ydenmat)[index];
-	 }
-	 sprob=0.0;
-	 for(j=0;j<n;j++){sprob=sprob+probs[j];}
-	 for(j=0;j<n;j++){probs[j]=probs[j]/sprob;}
-	 ind=rmultin(probs,n);
-          
-	 if(ind == n){
-                 yrow=getrow(y,i,n,ncol);
-                 SETCADR(R_fc_thetaD,yrow);   /* set the second argument to yrow -- head of the tail */
-        	 onetheta=eval(R_fc_thetaD,rho);
-                 SET_ELEMENT(theta,i,onetheta);
-		 if((nunique) > (maxuniq-1)) {error("max number of unique thetas exceeded");}
-		                             /* check to make sure we don't exceed max number of unique theta */
-	         SET_ELEMENT(lofone,0,onetheta);
-	         SETCADR(R_fc_yden,lofone);
-	         newrow=eval(R_fc_yden,rho);
-	         for(j=0;j<n; j++)
-		    { NUMERIC_POINTER(ydenmat)[j*maxuniq+nunique]=NUMERIC_POINTER(newrow)[j];}
-		 indic[i]=nunique+1;
-		 nunique=nunique+1;
-	 }
-	 else {
-		 onetheta=VECTOR_ELT(theta,indmi[ind-1]);
-		 SET_ELEMENT(theta,i,onetheta);
-		 indic[i]=indic[indmi[ind-1]];
-	 }
-    }
-
-    UNPROTECT(8);
-    return(nuniqueS);     /* returns argument -- function now is called for its effect on theta */
-}
- 
-
diff --git a/src/utilityFunctions.cpp b/src/utilityFunctions.cpp
new file mode 100644
index 0000000..b99fbca
--- /dev/null
+++ b/src/utilityFunctions.cpp
@@ -0,0 +1,789 @@
+#include "bayesm.h"
+ 
+//Used in rmvpGibbs and rmnpGibbs---------------------------------------------------------------------------------
+vec condmom(vec const& x, vec const& mu, mat const& sigmai, int p, int j){
+  
+// Wayne Taylor 9/24/2014
+
+//function to compute moments of x[j] | x[-j]
+//output is a vec: the first element is the conditional mean
+//                 the second element is the conditional sd
+
+  vec out(2);
+  int jm1 = j-1;
+  int ind = p*jm1;
+  
+  double csigsq = 1./sigmai(ind+jm1);
+  double m = 0.0;
+  
+  for(int i = 0; i<p; i++) if (i!=jm1) m += - csigsq*sigmai(ind+i)*(x[i]-mu[i]);
+  
+  out[0] = mu[jm1]+m;
+  out[1] = sqrt(csigsq);
+  
+  return (out);
+}
+
+double rtrun1(double mu, double sigma,double trunpt, int above) {
+
+// Wayne Taylor 9/8/2014
+  
+//function to draw truncated normal
+//above=1 means from above b=trunpt, a=-inf
+//above=0 means from below a=trunpt, b= +inf   
+//modified by rossi 6/05 to check arg to qnorm
+
+	double FA,FB,rnd,result,arg;
+	if (above) {
+		FA = 0.0; FB = R::pnorm(((trunpt-mu)/(sigma)),0.0,1.0,1,0);
+	} else {
+		FB = 1.0; FA = R::pnorm(((trunpt-mu)/(sigma)),0.0,1.0,1,0);
+	}
+	
+  rnd = runif(1)[0]; //runif returns a NumericVector, so using [0] allows for conversion to double
+	arg = rnd*(FB-FA)+FA;
+	if(arg > .999999999) arg = .999999999;
+	if(arg < .0000000001) arg = .0000000001;
+	result = mu + sigma*R::qnorm(arg,0.0,1.0,1,0);
+
+	return (result);
+}
+
+//Used in rhierLinearModel, rhierLinearMixture and rhierMnlRWMixture------------------------------------------------------
+mat drawDelta(mat const& x,mat const& y,vec const& z,List const& comps,vec const& deltabar,mat const& Ad){
+
+// Wayne Taylor 10/01/2014
+
+// delta = vec(D)
+//  given z and comps (z[i] gives component indicator for the ith observation, 
+//   comps is a list of mu and rooti)
+// y is n x p
+// x is n x k
+// y = xD' + U , rows of U are indep with covs Sigma_i given by z and comps
+
+  int p = y.n_cols;
+  int k = x.n_cols;
+  int ncomp  = comps.length();
+  mat xtx = zeros<mat>(k*p,k*p);
+  mat xty = zeros<mat>(p,k); //this is the unvecced version, reshaped after the sum
+  
+  //Create the index vectors, the colAll vectors are equal to span::all but with uvecs (as required by .submat)
+  uvec colAlly(p), colAllx(k);
+  for(int i = 0; i<p; i++) colAlly(i) = i;
+  for(int i = 0; i<k; i++) colAllx(i) = i;
+  
+  //Loop through the components
+  for(int compi = 0; compi<ncomp; compi++){
+    
+    //Create an index vector ind, to be used like y[ind,]
+    uvec ind = find(z == (compi+1));
+  
+    //If there are observations in this component
+    if(ind.size()>0){
+      mat yi = y.submat(ind,colAlly);
+      mat xi = x.submat(ind,colAllx);
+      
+      List compsi = comps[compi];
+      rowvec mui = as<rowvec>(compsi[0]); //conversion from Rcpp to Armadillo requires explict declaration of variable type using as<>
+      mat rootii = trimatu(as<mat>(compsi[1])); //trimatu interprets the matrix as upper triangular
+      yi.each_row() -= mui; //subtracts mui from each row of yi
+      mat sigi = rootii*trans(rootii);
+      xtx = xtx + kron(trans(xi)*xi,sigi);
+      xty = xty + (sigi * (trans(yi)*xi));
+    }
+  }
+  xty.reshape(xty.n_rows*xty.n_cols,1);
+  
+  //vec(t(D)) ~ N(V^{-1}(xty + Ad*deltabar),V^{-1}) where V = (xtx+Ad)
+  // compute the inverse of xtx+Ad
+  mat ucholinv = solve(trimatu(chol(xtx+Ad)), eye(k*p,k*p)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+  mat Vinv = ucholinv*trans(ucholinv);
+  
+  return(Vinv*(xty+Ad*deltabar) + trans(chol(Vinv))*as<vec>(rnorm(deltabar.size())));
+}
+
+unireg runiregG(vec const& y, mat const& X, mat const& XpX, vec const& Xpy, double sigmasq, mat const& A, 
+              vec const& Abetabar, int nu, double ssq) {
+
+// Keunwoo Kim 09/16/2014
+
+// Purpose: 
+//  perform one Gibbs iteration for Univ Regression Model
+//  only does one iteration so can be used in rhierLinearModel
+
+// Model:
+//  y = Xbeta + e  e ~N(0,sigmasq)
+//  y is n x 1
+//  X is n x k
+//  beta is k x 1 vector of coefficients
+
+// Prior:  
+//  beta ~ N(betabar,A^-1)
+//  sigmasq ~ (nu*ssq)/chisq_nu
+
+  unireg out_struct;
+  
+  int n = y.size();
+  int k = XpX.n_cols;
+  
+  //first draw beta | sigmasq
+  mat IR = solve(trimatu(chol(XpX/sigmasq+A)), eye(k,k)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+  vec btilde = (IR*trans(IR)) * (Xpy/sigmasq + Abetabar);
+  vec beta = btilde + IR*vec(rnorm(k));
+  
+  //now draw sigmasq | beta
+  double s = sum(square(y-X*beta));
+  sigmasq = (s + nu*ssq)/rchisq(1,nu+n)[0]; //rchisq returns a vectorized object, so using [0] allows for the conversion to double
+  
+  out_struct.beta = beta;
+  out_struct.sigmasq = sigmasq;  
+
+  return (out_struct);
+}
+
+//Used in rnegbinRW and rhierNegbinRw-------------------------------------------------------------------------------------
+double llnegbin(vec const& y, vec const& lambda, double alpha, bool constant){
+
+// Keunwoo Kim 11/02/2014
+
+// Computes the log-likelihood
+
+// Arguments
+//      y - a vector of observation
+//      lambda - a vector of mean parameter (=exp(X*beta))
+//      alpha - dispersion parameter
+//      constant - TRUE(FALSE) if it computes (un)normalized log-likeihood
+
+// PMF
+//      pmf(y) = (y+alpha-1)Choose(y) * p^alpha * (1-p)^y
+//      (y+alpha-1)Choose(y) = (alpha)*(alpha+1)*...*(alpha+y-1) / y! when y>=1 (0 when y=0)
+
+  int i;
+  int nobs = y.size();  
+  vec prob = alpha/(alpha+lambda);    
+  vec logp(nobs);
+  if (constant){
+    // normalized log-likelihood
+    for (i=0; i<nobs; i++){
+      // the fourth argument "1" indicates log-density
+      logp[i] = R::dnbinom(y[i], alpha, prob[i], 1);
+    }    
+  }else{
+    // unnormalized log-likelihood
+    logp = sum(alpha*log(prob) + y % log(1-prob)); //% does element-wise multiplication
+  }
+  return (sum(logp));
+}
+
+double lpostbeta(double alpha, vec const& beta, mat const& X, vec const& y, vec const& betabar, mat const& rootA){
+
+// Keunwoo Kim 11/02/2014
+
+// Computes log posterior for beta | alpha
+
+// Arguments
+//        alpha - dispersion parameter of negative-binomial
+//        beta - parameter of our interests
+//        X, y - observation from data
+//        betabar - mean of beta prior
+//        rootA - t(rootA)%*%rootA = A (A^-1 is var-cov matrix of beta prior)
+
+// Prior
+//        beta ~ N(betabar, A^-1)
+
+  vec lambda = exp(X*beta);
+  double ll = llnegbin(y, lambda, alpha, FALSE);
+
+  // unormalized prior
+  vec z = rootA*(beta-betabar);
+  double lprior = - 0.5*sum(z%z);
+  
+  return (ll+lprior);
+}
+
+double lpostalpha(double alpha, vec const& beta, mat const& X, vec const& y, double a, double b){
+
+// Keunwoo Kim 11/02/2014
+
+// Computes log posterior for alpha | beta
+
+// Arguments
+//        alpha - dispersion parameter of negative-binomial
+//        beta - parameter of our interests
+//        X, y - observation from data
+//        a,b - parameters for Gamma distribution, alpha prior
+
+// Prior
+//        alpha ~ Gamma(a,b)
+//        pdf(alpha) = b^a / Gamma(a) * alpha^(a-1) * e^(b*alpha)
+
+  vec lambda = exp(X*beta);
+  double ll = llnegbin(y, lambda, alpha, TRUE);
+  // unormalized prior
+  double lprior = (a-1)*log(alpha) - b*alpha;  
+  
+  return (ll+lprior);
+}
+
+//Used in rbprobitGibbs and rordprobitGibbs-----------------------------------------------------------------------
+vec breg1(mat const& root, mat const& X, vec const& y, vec const& Abetabar) {
+
+// Keunwoo Kim 06/20/2014
+
+// Purpose: draw from posterior for linear regression, sigmasq=1.0
+
+// Arguments:
+//  root = chol((X'X+A)^-1)
+//  Abetabar = A*betabar
+
+// Output: draw from posterior
+
+// Model: y = Xbeta + e  e ~ N(0,I)
+
+// Prior: beta ~ N(betabar,A^-1)
+
+  mat cov = trans(root)*root;  
+    
+  return (cov*(trans(X)*y+Abetabar) + trans(root)*vec(rnorm(root.n_cols)));
+}
+
+vec rtrunVec(vec const& mu,vec const& sigma, vec const& a, vec const& b){
+  
+// Keunwoo Kim 06/20/2014  
+
+//function to draw from univariate truncated norm
+//a is vector of lower bounds for truncation
+//b is vector of upper bounds for truncation
+
+  int n = mu.size();
+  vec FA(n);
+  vec FB(n);
+  vec out(n);
+  for (int i=0; i<n; i++) {
+    FA[i] = R::pnorm((a[i]-mu[i])/sigma[i],0,1,1,0);
+    FB[i] = R::pnorm((b[i]-mu[i])/sigma[i],0,1,1,0);
+    out[i] = mu[i]+sigma[i]*R::qnorm(R::runif(0,1)*(FB[i]-FA[i])+FA[i],0,1,1,0);
+  }
+
+  return(out);
+}
+
+//Used in rhierMnlDP and rhierMnlRwMixture------------------------------------------------------------------------
+mnlMetropOnceOut mnlMetropOnce(vec const& y, mat const& X, vec const& oldbeta, 
+                                                 double oldll,double s, mat const& incroot, 
+                                                 vec const& betabar, mat const& rootpi){ 
+// Wayne Taylor 10/01/2014
+
+// function to execute rw metropolis for the MNL
+// y is n vector with element = 1,...,j indicating which alt chosen
+// X is nj x k matrix of xvalues for each of j alt on each of n occasions
+// RW increments are N(0,s^2*t(inc.root)%*%inc.root)
+// prior on beta is N(betabar,Sigma)  Sigma^-1=rootpi*t(rootpi)
+//  inc.root, rootpi are upper triangular
+//  this means that we are using the UL decomp of Sigma^-1 for prior 
+// oldbeta is the current
+
+
+mnlMetropOnceOut metropout_struct;
+
+double unif;
+vec betadraw, alphaminv;
+
+int stay = 0;
+vec betac = oldbeta + s*trans(incroot)*as<vec>(rnorm(X.n_cols));
+double cll = llmnl(betac,y,X);
+double clpost = cll+lndMvn(betac,betabar,rootpi);
+double ldiff = clpost-oldll-lndMvn(oldbeta,betabar,rootpi);
+alphaminv << 1 << exp(ldiff);
+double alpha = min(alphaminv);
+
+     if(alpha < 1) {
+       unif = runif(1)[0]; //runif returns a NumericVector, so using [0] allows for conversion to double
+      } else { 
+        unif=0;}
+     if (unif <= alpha) {
+       betadraw = betac;
+       oldll = cll;
+      } else {
+        betadraw = oldbeta;
+        stay = 1;
+      }
+
+metropout_struct.betadraw = betadraw;
+metropout_struct.stay = stay;  
+metropout_struct.oldll = oldll;
+
+return (metropout_struct);
+}
+
+//Used in rDPGibbs, rhierMnlDP, rivDP-----------------------------------------------------------------------------
+int rmultinomF(vec const& p){
+  
+// Wayne Taylor 1/28/2015
+
+  vec csp = cumsum(p);
+  double rnd = runif(1)[0]; //runif returns a NumericVector, so using [0] allows for conversion to double
+  int res = 0;
+  int psize = p.size();
+  
+  for(int i = 0; i < psize; i++){
+    if(rnd > csp[i]) res = res+1;
+  }
+  
+  return(res+1);
+}
+
+mat yden(std::vector<murooti> const& thetaStar_vector, mat const& y){
+
+// Wayne Taylor 2/4/2015
+  
+// function to compute f(y | theta) 
+// computes f for all values of theta in theta list of lists
+      
+// arguments:
+//  thetaStar is a list of lists.  thetaStar[[i]] is a list with components, mu, rooti
+//  y |theta[[i]] ~ N(mu,(rooti %*% t(rooti))^-1)  rooti is inverse of Chol root of Sigma
+
+// output:
+//  length(thetaStar) x n array of values of f(y[j,]|thetaStar[[i]]
+  
+  int nunique = thetaStar_vector.size();
+  int n = y.n_rows;
+  int k = y.n_cols;
+  mat ydenmat = zeros<mat>(nunique,n);
+  
+  vec mu;
+  mat rooti, transy, quads;
+  
+  for(int i = 0; i < nunique; i++){
+    //now compute vectorized version of lndMvn 
+    //compute y_i'RIRI'y_i for all i
+        
+    mu = thetaStar_vector[i].mu;
+    rooti = thetaStar_vector[i].rooti;
+  
+    transy = trans(y);
+    transy.each_col() -= mu; //column-wise subtraction
+    
+    quads = sum(square(trans(rooti) * transy),0); //same as colSums
+    ydenmat(i,span::all) = exp(-(k/2.0)*log(2*M_PI) + sum(log(rooti.diag())) - .5*quads);
+  }
+  
+  return(ydenmat);
+}
+
+ivec numcomp(ivec const& indic, int k){
+
+// Wayne Taylor 1/28/2015
+  
+  //find the number of times each of k integers is in the vector indic
+  ivec ncomp(k);
+  
+  for(int comp = 0; comp < k; comp++){
+    ncomp[comp]=sum(indic == (comp+1));
+  }
+  
+  return(ncomp);
+}
+
+murooti thetaD(mat const& y, lambda const& lambda_struct){
+
+// Wayne Taylor 2/4/2015
+  
+// function to draw from posterior of theta given data y and base prior G0(lambda)
+      
+// here y ~ N(mu,Sigma)
+// theta = list(mu=mu,rooti=chol(Sigma)^-1)
+// mu|Sigma ~ N(mubar,Sigma (x) Amu-1)
+// Sigma ~ IW(nu,V)
+      
+// arguments: 
+//  y is n x k matrix of obs
+//  lambda is list(mubar,Amu,nu,V)
+
+// output:
+//  one draw of theta, list(mu,rooti)
+//  Sigma=inv(rooti)%*%t(inv(rooti))
+      
+// note: we assume that y is a matrix. if there is only one obs, y is a 1 x k matrix
+
+  mat X = ones<mat>(y.n_rows,1);
+  mat A(1,1); A.fill(lambda_struct.Amu);
+  
+  List rout = rmultireg(y,X,trans(lambda_struct.mubar),A,lambda_struct.nu,lambda_struct.V);
+  
+  murooti out_struct;
+    out_struct.mu = as<vec>(rout["B"]); //conversion from Rcpp to Armadillo requires explict declaration of variable type using as<>
+    out_struct.rooti = solve(chol(trimatu(as<mat>(rout["Sigma"]))),eye(y.n_cols,y.n_cols)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+  
+  return(out_struct);
+}
+
+thetaStarIndex thetaStarDraw(ivec indic, std::vector<murooti> thetaStar_vector, mat const& y, mat ydenmat, vec const& q0v, double alpha, 
+                        lambda const& lambda_struct, int maxuniq) {
+                          
+// Wayne Taylor 2/4/2015
+                               
+// indic is n x 1 vector of indicator of which of thetaStar is assigned to each observation
+// thetaStar is list of the current components (some of which may never be used)
+// y is n x d matrix of observations
+// ydenmat is maxuniq x n matrix to store density evaluations - we assume first 
+// length(Thetastar) rows are filled in with density evals
+// q0v is vector of bayes factors for new component and each observation
+// alpha is DP process tightness prior
+// lambda is list of priors for the base DP process measure
+// maxuniq maximum number of mixture components
+// yden is function to fill out an array
+// thetaD is function to draw theta from posterior of theta given y and G0
+   
+  int n = indic.size();
+  ivec ncomp, indicC;
+  int k, inc, cntNonzero;
+  std::vector<murooti> listofone_vector(1);
+  std::vector<murooti> thetaStarC_vector;
+  
+  //draw theta_i given theta_-i
+  for(int i = 0; i<n; i++){
+   k = thetaStar_vector.size();
+   vec probs(k+1);
+   probs[k] = q0v[i]*(alpha/(alpha+(n-1)));
+   
+   //same as to indicmi = indic[-i]
+   ivec indicmi = zeros<ivec>(n-1);
+   inc = 0;
+   for(int j = 0; j<(n-1); j++){
+     if(j == i) {inc = inc + 1;}
+     indicmi[j] = indic[inc];
+     inc = inc+1;
+   }
+   
+   ncomp = numcomp(indicmi,k);
+   
+   for(int comp = 0; comp<k; comp++){
+     probs[comp] = ydenmat(comp,i)*ncomp[comp]/(alpha+(n-1));
+   }
+   
+   probs = probs/sum(probs);
+   indic[i] = rmultinomF(probs);
+  
+   if(indic[i] == (k+1)){
+     if((k+1) > maxuniq) {
+        stop("max number of comps exceeded");
+     } else {
+      listofone_vector[0] = thetaD(y(i,span::all),lambda_struct);
+      thetaStar_vector.push_back(listofone_vector[0]);
+      ydenmat(k,span::all) = yden(listofone_vector,y);
+    }}
+  }
+  
+  //clean out thetaStar of any components which have zero observations associated with them
+  //and re-write indic vector 
+  k = thetaStar_vector.size();
+  indicC = zeros<ivec>(n);
+  ncomp = numcomp(indic,k);
+  
+  cntNonzero = 0;
+  for(int comp = 0; comp<k; comp++){
+   if(ncomp[comp] != 0){
+     thetaStarC_vector.push_back(thetaStar_vector[comp]);
+     cntNonzero=cntNonzero+1;
+   for(int i = 0; i<n; i++){if(indic[i] == (comp+1)) indicC[i] = cntNonzero;} //same as indicC(indic==comp) = cntNonzero;
+   }
+  }
+
+  thetaStarIndex out_struct;
+    out_struct.indic = indicC;
+    out_struct.thetaStar_vector = thetaStarC_vector;
+
+  return(out_struct);
+}
+
+vec q0(mat const& y, lambda const& lambda_struct){
+  
+// Wayne Taylor 2/4/2015
+
+// function to compute a vector of int f(y[i]|theta) p(theta|lambda)dlambda
+// here p(theta|lambda) is G0 the base prior
+
+// implemented for a multivariate normal data density and standard conjugate prior:
+//  theta=list(mu,Sigma)
+//  f(y|theta,eta) is N(mu,Sigma)
+//  lambda=list(mubar,Amu,nu,V)
+//    mu|Sigma ~ N(mubar,Sigma (x) Amu^-1)
+//    Sigma ~ IW(nu,V)
+
+// arguments:
+//  Y is n x k matrix of observations
+//  lambda=list(mubar,Amu,nu,V)
+ 
+// output:
+//  vector of q0 values for each obs (row of Y)
+
+// p. rossi 12/05
+//  here y is matrix of observations (each row is an obs)
+  
+  int k = y.n_cols;
+  mat R = chol(lambda_struct.V);
+  double logdetR = sum(log(R.diag()));
+  double lnk1k2, constant;
+  mat transy, m, vivi, lnq0v;
+  
+  if (k > 1) {
+    vec km1(k-1); for(int i = 0; i < (k-1); i++) km1[i] = i+1; //vector of 1:k SEE SEQ_ALONG
+    lnk1k2 = (k/2.0)*log(2.0)+log((lambda_struct.nu-k)/2)+lgamma((lambda_struct.nu-k)/2)-lgamma(lambda_struct.nu/2)+sum(log(lambda_struct.nu/2-km1/2));
+  } else {
+    lnk1k2 = (k/2.0)*log(2.0)+log((lambda_struct.nu-k)/2)+lgamma((lambda_struct.nu-k)/2)-lgamma(lambda_struct.nu/2);
+  }
+  
+  constant = -(k/2.0)*log(2*M_PI)+(k/2.0)*log(lambda_struct.Amu/(1+lambda_struct.Amu)) + lnk1k2 + lambda_struct.nu*logdetR;
+
+// note: here we are using the fact that |V + S_i | = |R|^2 (1 + v_i'v_i)
+//  where v_i = sqrt(Amu/(1+Amu))*t(R^-1)*(y_i-mubar), R is chol(V)
+//  and S_i = Amu/(1+Amu) * (y_i-mubar)(y_i-mubar)'
+      
+  transy = trans(y);
+  transy.each_col() -= lambda_struct.mubar;
+  
+  m = sqrt(lambda_struct.Amu/(1+lambda_struct.Amu))*trans(solve(trimatu(R),eye(y.n_cols,y.n_cols)))*transy; //trimatu interprets the matrix as upper triangular and makes solve more efficient
+  
+  vivi = sum(square(m),0);
+  
+  lnq0v = constant - ((lambda_struct.nu+1)/2)*(2*logdetR+log(1+vivi));
+  
+  return(trans(exp(lnq0v)));
+}
+
+vec seq_rcpp(double from, double to, int len){
+
+// Wayne Taylor 1/28/2015
+
+// Same as R::seq()
+
+  vec res(len);
+  res[len-1] = to; res[0] = from; //note the order of these two statements is important, when gridsize = 1 res[0] will be rewritten to the correct number
+  double increment = (res[len-1]-res[0])/(len-1);
+  for(int i = 1; i<(len-1); i++) res[i] = res[i-1] + increment;
+  return(res);
+}
+
+double alphaD(priorAlpha const& priorAlpha_struct, int Istar, int gridsize){
+
+// Wayne Taylor 2/4/2015
+  
+// function to draw alpha using prior, p(alpha)= (1-(alpha-alphamin)/(alphamax-alphamin))**power
+      
+  //same as seq
+  vec alpha = seq_rcpp(priorAlpha_struct.alphamin,priorAlpha_struct.alphamax-.000001,gridsize);
+  
+  vec lnprob(gridsize);
+  for(int i = 0; i<gridsize; i++){
+    lnprob[i] = Istar*log(alpha[i]) + lgamma(alpha[i]) - lgamma(priorAlpha_struct.n+alpha[i]) + priorAlpha_struct.power*log(1-(alpha[i]-priorAlpha_struct.alphamin)/(priorAlpha_struct.alphamax-priorAlpha_struct.alphamin));
+  }
+  
+  lnprob = lnprob - median(lnprob);
+  vec probs=exp(lnprob);
+  probs=probs/sum(probs);
+  
+  return(alpha(rmultinomF(probs)-1));
+}
+
+murooti GD(lambda const& lambda_struct){
+  
+// Wayne Taylor 2/4/2015
+      
+// function to draw from prior for Multivariate Normal Model
+      
+// mu|Sigma ~ N(mubar,Sigma x Amu^-1)
+// Sigma ~ IW(nu,V)
+
+// note: we must insure that mu is a vector to use most efficient lndMvn routine
+
+  int k = lambda_struct.mubar.size();
+  
+  List Rout = rwishart(lambda_struct.nu,solve(trimatu(lambda_struct.V),eye(k,k))); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+  mat Sigma = as<mat>(Rout["IW"]); //conversion from Rcpp to Armadillo requires explict declaration of variable type using as<>
+  mat root = chol(Sigma);
+  mat draws = rnorm(k);
+  mat mu = lambda_struct.mubar + (1/sqrt(lambda_struct.Amu))*trans(root)*draws;
+  
+  murooti out_struct;
+    out_struct.mu = mu;
+    out_struct.rooti = solve(trimatu(root),eye(k,k)); //trimatu interprets the matrix as upper triangular and makes solve more efficient
+
+  return(out_struct);
+}
+
+
+lambda lambdaD(lambda const& lambda_struct, std::vector<murooti> const& thetaStar_vector, vec const& alim, vec const& nulim, vec const& vlim, int gridsize){
+
+// Wayne Taylor 2/4/2015
+
+// revision history
+//  p. rossi 7/06
+//  vectorized 1/07
+//  changed 2/08 to paramaterize V matrix of IW prior to nu*v*I; then mode of Sigma=nu/(nu+2)vI
+//    this means that we have a reparameterization to v* = nu*v
+
+// function to draw (nu, v, a) using uniform priors
+
+// theta_j=(mu_j,Sigma_j)  mu_j~N(0,Sigma_j/a)  Sigma_j~IW(nu,vI)
+//  recall E[Sigma]= vI/(nu-dim-1)
+
+  vec lnprob, probs, rowSumslgammaarg;
+  int ind; //placeholder for matrix indexing
+  murooti thetaStari_struct; mat rootii; vec mui;
+  mat mout, rimu, arg, lgammaarg;
+  double sumdiagriri, sumlogdiag, sumquads, adraw, nudraw, vdraw;
+
+  murooti thetaStar0_struct = thetaStar_vector[0];
+  int d = thetaStar0_struct.mu.size();
+  int Istar = thetaStar_vector.size();
+  
+  vec aseq = seq_rcpp(alim[0],alim[1],gridsize);
+  vec nuseq = d-1+exp(seq_rcpp(nulim[0],nulim[1],gridsize)); //log uniform grid
+  vec vseq = seq_rcpp(vlim[0],vlim[1],gridsize);
+
+// "brute" force approach would simply loop over the 
+//  "observations" (theta_j) and use log of the appropriate densities.  To vectorize, we
+// notice that the "data" comes via various statistics:
+//  1. sum of log(diag(rooti_j)
+//  2. sum of tr(V%*%rooti_j%*%t(rooti_j)) where V=vI_d
+//  3. quadratic form t(mu_j-0)%*%rooti%*%t(rooti)%*%(mu_j-0)
+// thus, we will compute these first.
+// for documentation purposes, we leave brute force code in comment fields
+
+// extract needed info from thetastar list
+  
+  //mout has the rootis in form: [t(rooti_1), t(rooti_2), ...,t(rooti_Istar)]
+  mout = zeros<mat>(d,Istar*d);
+  ind = 0;
+  for(int i = 0; i < Istar; i++){
+    thetaStari_struct = thetaStar_vector[i];
+    rootii = thetaStari_struct.rooti;
+    ind = i*d;
+    mout.submat(0, ind,d-1,ind+d-1) = trans(rootii);
+  }
+  sumdiagriri = sum(sum(square(mout),0)); //sum_i trace(rooti_i*trans(rooti_i))
+
+// now get diagonals of rooti
+  sumlogdiag = 0.0;
+  for(int i = 0; i < Istar; i++){
+    ind = i*d;
+    for(int j = 0; j < d; j++){
+      sumlogdiag = sumlogdiag+log(mout(j,ind+j));
+    }
+  }
+
+  //columns of rimu contain trans(rooti_i)*mu_i
+  rimu = zeros<mat>(d,Istar);
+  for(int i = 0; i < Istar; i++){
+    thetaStari_struct = thetaStar_vector[i];
+    mui = thetaStari_struct.mu;
+    rootii = thetaStari_struct.rooti;
+    rimu(span::all,i) = trans(rootii) * mui;
+  }
+  sumquads = sum(sum(square(rimu),0));
+
+// draw a  (conditionally indep of nu,v given theta_j)
+  lnprob = zeros<vec>(aseq.size());
+// for(i in seq(along=aseq)){
+// for(j in seq(along=thetastar)){
+// lnprob[i]=lnprob[i]+lndMvn(thetastar[[j]]$mu,c(rep(0,d)),thetastar[[j]]$rooti*sqrt(aseq[i]))}
+  lnprob = Istar*(-(d/2.0)*log(2*M_PI))-.5*aseq*sumquads+Istar*d*log(sqrt(aseq))+sumlogdiag;
+  lnprob = lnprob-max(lnprob) + 200;
+  probs = exp(lnprob);
+  probs = probs/sum(probs);
+  adraw = aseq[rmultinomF(probs)-1];
+
+// draw nu given v
+
+  lnprob = zeros<vec>(nuseq.size());
+// for(i in seq(along=nuseq)){
+// for(j in seq(along=thetastar)){
+// Sigma_j=crossprod(backsolve(thetastar[[j]]$rooti,diag(d)))
+// lnprob[i]=lnprob[i]+lndIWishart(nuseq[i],V,Sigma_j)}
+
+  //same as arg = (nuseq+1-arg)/2.0;
+  arg = zeros<mat>(gridsize,d);
+  for(int i = 0; i < d; i++) {
+    vec indvec(gridsize);
+    indvec.fill(-(i+1)+1);
+    arg(span::all,i) = indvec;
+  }
+  arg.each_col() += nuseq;
+  arg = arg/2.0;
+
+  lgammaarg = zeros<mat>(gridsize,d);
+  for(int i = 0; i < gridsize; i++){
+    for(int j = 0; j < d; j++){
+      lgammaarg(i,j) = lgamma(arg(i,j));
+  }}
+  rowSumslgammaarg = sum(lgammaarg,1);
+  
+  lnprob = zeros<vec>(gridsize);
+  for(int i = 0; i<gridsize; i++){
+    lnprob[i] = -Istar*log(2.0)*d/2.0*nuseq[i] - Istar*rowSumslgammaarg[i] + Istar*d*log(sqrt(lambda_struct.V(0,0)))*nuseq[i] + sumlogdiag*nuseq[i];
+  }
+  
+  lnprob = lnprob-max(lnprob)+200;
+  probs = exp(lnprob);
+  probs = probs/sum(probs);
+  nudraw = nuseq[rmultinomF(probs)-1];
+
+// draw v given nu 
+      
+  lnprob = zeros<vec>(vseq.size());
+// for(i in seq(along=vseq)){
+// V=vseq[i]*diag(d)
+// for(j in seq(along=thetastar)){
+// Sigma_j=crossprod(backsolve(thetastar[[j]]$rooti,diag(d)))
+// lnprob[i]=lnprob[i]+lndIWishart(nudraw,V,Sigma_j)}
+// lnprob=Istar*nudraw*d*log(sqrt(vseq))-.5*sumdiagriri*vseq
+      
+  lnprob = Istar*nudraw*d*log(sqrt(vseq*nudraw))-.5*sumdiagriri*vseq*nudraw;
+  lnprob = lnprob-max(lnprob)+200;
+  probs = exp(lnprob);
+  probs = probs/sum(probs);
+  vdraw = vseq[rmultinomF(probs)-1];
+
+// put back into lambda
+  lambda out_struct;
+    out_struct.mubar = zeros<vec>(d);
+    out_struct.Amu = adraw;
+    out_struct.nu = nudraw;
+    out_struct.V = nudraw*vdraw*eye(d,d);
+  
+  return(out_struct);
+}
+
+//Used in llnhlogit and simnhlogit---------------------------------------------------------------------------------
+double root(double c1, double c2, double tol, int iterlim){
+
+//function to find root of c1 - c2u = lnu
+   
+   int iter = 0;
+   double uold = .1;
+   double unew = .00001;
+   
+   while (iter <= iterlim && fabs(uold-unew) > tol){
+     uold = unew;
+     unew=uold + (uold*(c1 -c2*uold -  log(uold)))/(1. + c2*uold);
+     if(unew < 1.0e-50) unew=1.0e-50;
+     iter=iter+1;
+   }
+   
+   return(unew);
+}
+
+//[[Rcpp::export]]
+vec callroot(vec const& c1, vec const& c2, double tol, int iterlim){
+  
+  int n = c1.size();
+  vec u = zeros<vec>(n);
+  
+  for(int i = 0; i<n; i++){
+    u[i] = root(c1[i],c2[i],tol,iterlim);
+  }
+  
+  return(u);
+}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-bayesm.git



More information about the debian-science-commits mailing list