[med-svn] [r-cran-vegan] 01/05: Imported Upstream version 2.4-0

Andreas Tille tille at debian.org
Mon Aug 8 08:35:05 UTC 2016


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-vegan.

commit d118e5b42fcf77ac591583c7895c330e4cf415bd
Author: Andreas Tille <tille at debian.org>
Date:   Mon Aug 8 09:05:58 2016 +0200

    Imported Upstream version 2.4-0
---
 DESCRIPTION                  |  17 +--
 MD5                          | 293 ++++++++++++++++++++++---------------------
 NAMESPACE                    |  31 +++--
 R/GowerDblcen.R              |  48 +++++++
 R/MDSrotate.R                |  13 ++
 R/RsquareAdj.R               |  22 +---
 R/adipart.default.R          |   9 +-
 R/adipart.formula.R          |  11 +-
 R/adonis2.R                  | 109 ++++++++++++++++
 R/anova.cca.R                |   2 +-
 R/anova.ccabyterm.R          |  36 +++++-
 R/anova.ccalist.R            |   2 +-
 R/anova.ccanull.R            |   2 +-
 R/as.fisher.R                |   4 +-
 R/as.mcmc.oecosimu.R         |  40 +++++-
 R/as.ts.oecosimu.R           |   5 +-
 R/betadisper.R               |  19 ++-
 R/betadiver.R                |   2 +-
 R/calibrate.cca.R            |   7 ++
 R/capscale.R                 |  64 +++++++---
 R/contribdiv.R               |   2 +
 R/dbrda.R                    | 252 +++++++++++++++++++++++++++++++++++++
 R/decorana.R                 |   6 +-
 R/designdist.R               |  13 +-
 R/deviance.rda.R             |   2 +-
 R/eigenvals.R                |  22 +++-
 R/eventstar.R                |   6 +-
 R/extractAIC.cca.R           |   6 +-
 R/fitted.capscale.R          |  18 ++-
 R/fitted.dbrda.R             |  54 ++++++++
 R/goodness.cca.R             | 118 ++++++++---------
 R/hiersimu.default.R         |   9 +-
 R/hiersimu.formula.R         |  10 +-
 R/inertcomp.R                |  60 +++++----
 R/intersetcor.R              |   3 +-
 R/lines.spantree.R           |   8 +-
 R/make.commsim.R             |  86 ++++++++++---
 R/metaMDS.R                  |  16 ++-
 R/metaMDSdist.R              |   5 +-
 R/metaMDSiter.R              |  37 ++++--
 R/monoMDS.R                  |   6 +-
 R/mso.R                      |   4 +-
 R/multipart.default.R        |   7 +-
 R/multipart.formula.R        |  10 +-
 R/nesteddisc.R               |  28 ++---
 R/nobs.R                     |   4 +-
 R/oecosimu.R                 |  17 ++-
 R/oldCapscale.R              |  30 +++++
 R/ordiGetData.R              |   2 +-
 R/ordiR2step.R               |  20 ++-
 R/ordiareatest.R             |   8 +-
 R/ordiarrows.R               |  18 ++-
 R/ordibar.R                  |  80 ++++++++++++
 R/ordicluster.R              |  19 ++-
 R/ordiellipse.R              |  91 +++++++++++---
 R/ordihull.R                 |  77 +++++++++---
 R/ordisegments.R             |  11 +-
 R/ordispider.R               |  20 ++-
 R/pcnm.R                     |  14 +--
 R/permustats.R               |  86 +++++++++++--
 R/permutest.cca.R            |  43 +++++--
 R/plot.betadisper.R          |  87 ++++++++++---
 R/plot.isomap.R              |   6 +
 R/plot.spantree.R            |  16 ++-
 R/plot.specaccum.R           |   6 +-
 R/points.orditkplot.R        |   7 +-
 R/postMDS.R                  |   7 +-
 R/predict.rda.R              |  58 ++++++---
 R/print.capscale.R           |   9 --
 R/print.cca.R                |  47 ++++---
 R/print.simmat.R             |  10 +-
 R/print.varpart.R            |   6 +-
 R/print.varpart234.R         |  10 +-
 R/print.wcmdscale.R          |   2 +
 R/rarecurve.R                |   2 +
 R/renyi.R                    |  12 +-
 R/scores.betadisper.R        |   4 +-
 R/scores.rda.R               |  16 ++-
 R/simpleRDA2.R               |  19 ++-
 R/simulate.nullmodel.R       |  18 +--
 R/simulate.rda.R             |  11 ++
 R/smbind.R                   | 170 +++++++++++++++++++++++++
 R/spenvcor.R                 |   4 +-
 R/stressplot.wcmdscale.R     | 113 +++++++++++++++--
 R/summary.ordiellipse.R      |   4 +-
 R/summary.ordihull.R         |   2 +-
 R/text.orditkplot.R          |  11 +-
 R/update.nullmodel.R         |   1 +
 R/varpart.R                  |  58 +++++++--
 R/varpart2.R                 |  15 ++-
 R/varpart3.R                 |  15 ++-
 R/varpart4.R                 |  15 ++-
 R/vegan-defunct.R            |  74 ++++++++++-
 R/vegan-deprecated.R         | 188 +--------------------------
 R/wascores.R                 |   6 +-
 R/wcmdscale.R                |  22 +++-
 R/weights.rda.R              |   4 +-
 build/partial.rdb            | Bin 0 -> 21794 bytes
 data/BCI.env.rda             | Bin 337 -> 705 bytes
 data/BCI.rda                 | Bin 8503 -> 8759 bytes
 inst/NEWS.Rd                 | 216 +++++++++++++++++++++++++++++++
 inst/doc/decision-vegan.R    |  30 ++---
 inst/doc/decision-vegan.Rnw  | 141 +++++++++------------
 inst/doc/decision-vegan.pdf  | Bin 342272 -> 340954 bytes
 inst/doc/diversity-vegan.pdf | Bin 365179 -> 365970 bytes
 inst/doc/intro-vegan.R       |  42 ++++---
 inst/doc/intro-vegan.Rnw     |  16 +--
 inst/doc/intro-vegan.pdf     | Bin 234604 -> 238817 bytes
 inst/doc/partitioning.pdf    | Bin 137301 -> 137524 bytes
 man/BCI.Rd                   |  76 ++++++++---
 man/MDSrotate.Rd             |  28 ++++-
 man/RsquareAdj.Rd            |  17 ++-
 man/adipart.Rd               |  32 +++--
 man/adonis.Rd                | 198 +++++++++++++----------------
 man/anosim.Rd                |   4 +-
 man/betadisper.Rd            |  49 +++++++-
 man/betadiver.Rd             |  21 ++--
 man/capscale.Rd              | 204 +++++++++++++++---------------
 man/cca.object.Rd            |  65 +++++++++-
 man/commsim.Rd               | 157 +++++++++++++----------
 man/decorana.Rd              |  14 +--
 man/designdist.Rd            |  38 ++++--
 man/envfit.Rd                |   2 +-
 man/isomap.Rd                |  12 +-
 man/linestack.Rd             |   2 +-
 man/metaMDS.Rd               |  71 ++++++++---
 man/monoMDS.Rd               |  37 ++++++
 man/mrpp.Rd                  |   4 +-
 man/multipart.Rd             | 101 +++++++++------
 man/nullmodel.Rd             | 126 ++++++++++++++++---
 man/ordiarrows.Rd            |  18 ++-
 man/ordihull.Rd              | 118 +++++++++++------
 man/ordipointlabel.Rd        |   8 +-
 man/ordistep.Rd              |  39 +++---
 man/ordisurf.Rd              |   2 +-
 man/orditkplot.Rd            |  20 +--
 man/ordixyplot.Rd            |   2 +-
 man/permustats.Rd            |  63 +++++++---
 man/permutest.betadisper.Rd  |   4 +-
 man/plot.cca.Rd              |   2 +-
 man/predict.cca.Rd           |   3 +-
 man/spantree.Rd              |  15 ++-
 man/specaccum.Rd             |   6 +-
 man/stressplot.wcmdscale.Rd  |  14 ++-
 man/varpart.Rd               | 144 ++++++++++++++-------
 man/vegan-defunct.Rd         |  38 ++++--
 man/vegan-deprecated.Rd      |  72 +----------
 man/vegan-internal.Rd        |  18 +++
 man/wcmdscale.Rd             |  36 ++++--
 src/nestedness.c             | 138 ++++++++++++++++----
 vignettes/decision-vegan.Rnw | 141 +++++++++------------
 vignettes/intro-vegan.Rnw    |  16 +--
 152 files changed, 3965 insertions(+), 1774 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
index 861e15d..b13c131 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,12 +1,13 @@
 Package: vegan
 Title: Community Ecology Package
-Version: 2.3-5
-Date: 2016-04-08
-Author: Jari Oksanen, F. Guillaume Blanchet, Roeland Kindt, Pierre Legendre, 
-   Peter R. Minchin, R. B. O'Hara, Gavin L. Simpson, Peter Solymos, 
-   M. Henry H. Stevens, Helene Wagner  
+Version: 2.4-0
+Date: 2016-06-15
+Author: Jari Oksanen, F. Guillaume Blanchet, Michael Friendly, Roeland Kindt,
+   Pierre Legendre, Dan McGlinn, Peter R. Minchin, R. B. O'Hara,
+   Gavin L. Simpson, Peter Solymos, M. Henry H. Stevens, Eduard Szoecs,
+   Helene Wagner
 Maintainer: Jari Oksanen <jari.oksanen at oulu.fi>
-Depends: permute (>= 0.8-0), lattice, R (>= 3.0.0)
+Depends: permute (>= 0.9-0), lattice, R (>= 3.0.0)
 Suggests: parallel, tcltk, knitr
 Imports: MASS, cluster, mgcv
 VignetteBuilder: utils, knitr
@@ -16,6 +17,6 @@ License: GPL-2
 BugReports: https://github.com/vegandevs/vegan/issues
 URL: https://cran.r-project.org, https://github.com/vegandevs/vegan
 NeedsCompilation: yes
-Packaged: 2016-04-08 11:49:44 UTC; jarioksa
+Packaged: 2016-06-15 08:01:21 UTC; jarioksa
 Repository: CRAN
-Date/Publication: 2016-04-09 00:53:13
+Date/Publication: 2016-06-15 15:38:31
diff --git a/MD5 b/MD5
index e9572c0..6a8690f 100644
--- a/MD5
+++ b/MD5
@@ -1,10 +1,11 @@
-262e49ed1a1d0652c5e0fcf01219bc03 *DESCRIPTION
-f03b4540cc1c0730cd76b342676a07ae *NAMESPACE
+913b4eb81e502abb0b231510783fb52c *DESCRIPTION
+ad7596a4d60e6ab02877ab7a75271d33 *NAMESPACE
 4b8531b446af54510e5fb31f841aed2f *R/AIC.radfit.R
 e9814d051bdf49db5606399ac3a4704e *R/CCorA.R
-6d37bd49a8a0048cbb7ff8f49487ba63 *R/MDSrotate.R
+da0ea7ae9e283690809aba8077a84671 *R/GowerDblcen.R
+1f6d929d13d2150d2dc023f997a1c9cc *R/MDSrotate.R
 1a95c5873b1546683487e17aae4fe511 *R/MOStest.R
-8b2a65acb851c032affb73e3897e926c *R/RsquareAdj.R
+e3142dcdfd4d5e7d2b2c653ea8e8ff06 *R/RsquareAdj.R
 4ff8f584b8619b4262b0453c178b4e4c *R/SSarrhenius.R
 b16c440e6425bb8275af82740c88b06c *R/SSgitay.R
 5c50828bd1466847e54ffd44ddf62df0 *R/SSgleason.R
@@ -13,31 +14,32 @@ d80688d78aba3cd9367ffaaaec6ec252 *R/TukeyHSD.betadisper.R
 7119c3a30046011fc164ebde2dd624bc *R/add1.cca.R
 0f953ea124a0d579b79d32974961ec87 *R/ade2vegancca.R
 3fea698281bc0b4c3a5ad26f4d44d0e2 *R/adipart.R
-385380b5137c957be9f03e464100faa4 *R/adipart.default.R
-05387ee9e552fcec123b4b922e837eaa *R/adipart.formula.R
+03fbe03df3167289ed34307a258359cc *R/adipart.default.R
+6e0cc43b676b69a43ca460c886da2f28 *R/adipart.formula.R
 23af28a7ddb2957549409a1e80481b82 *R/adonis.R
+3da02c8549ac302efdccf84f99de44c7 *R/adonis2.R
 7331b1428563ed9add00b088b50617fd *R/alias.cca.R
 4bdae1555a954b5266ac30300783ffd8 *R/anosim.R
 a4f23289c4a5eab2a3587292b306d497 *R/anova.betadisper.R
-ebcfa2c586bdb2d7f4756a44670fcb89 *R/anova.cca.R
-e5ca7c161ec8be59955e3c761828417c *R/anova.ccabyterm.R
-cd3fff81e381b3eb800e36042f766252 *R/anova.ccalist.R
-41393fc55cc5e6a0ec3f12e18e086fd2 *R/anova.ccanull.R
+74c2e9a04d759679b59363f641c91ba7 *R/anova.cca.R
+f6891cf038af4209dfdf9eac1ab4de08 *R/anova.ccabyterm.R
+cd89ea579a34db076f699d05645a7b8e *R/anova.ccalist.R
+4f82772f1ce6f20d1b2485138aaa7609 *R/anova.ccanull.R
 7fab08bcc596df60a22c4b04c8507121 *R/anova.prc.R
-6fb2bf929aed44ef41bfd4dfc6e010cc *R/as.fisher.R
+eded8dec556ee875d56887859b0abee8 *R/as.fisher.R
 66c29064fff4854203ab2cd50e661558 *R/as.hclust.spantree.R
-eee49f0fce625a3b8266b82f05c86aac *R/as.mcmc.oecosimu.R
+d128010944f6a304a1255f04355df2f6 *R/as.mcmc.oecosimu.R
 cfaa7dc7d3f45c29e59e83326f1371d4 *R/as.mcmc.permat.R
 71fe13d6613d600ccb8b5894a55b87a3 *R/as.mlm.R
 340f6a922578c8965803c63e08c5abbf *R/as.mlm.cca.R
 ec4c60b2bae11116b56946b136a78ed0 *R/as.mlm.rda.R
 a7f01bd69394d5554cf10279a2690080 *R/as.preston.R
 50aa2e700cc26301885d9ef7ca7f12c4 *R/as.rad.R
-02981974a248d55081272a40e038c8c8 *R/as.ts.oecosimu.R
+a9297920fdeeb053cfe57930118ca20c *R/as.ts.oecosimu.R
 704239604b0ed8420cb22a31b28a01d3 *R/as.ts.permat.R
 fbec6d133dea10372ce082c7035a8ab2 *R/beals.R
-72c63138e0fdd6c5a6e37bb0e24fa95e *R/betadisper.R
-a57f498db59e6daea477a052b7f8d901 *R/betadiver.R
+4a231d3e36f8eb21bc6ef032bbb8ce3a *R/betadisper.R
+2943ef31dcda3c6189e1ffda42641aba *R/betadiver.R
 46ae3f75a0b483fecab589637d72a307 *R/bgdispersal.R
 4603ea944d470a9e284cb6cab6d75529 *R/bioenv.R
 68f7dc3b2c6c698c3288605c90f5cd80 *R/bioenv.default.R
@@ -54,9 +56,9 @@ b5a4874f7763f1793b3068eec4e859d5 *R/bstick.default.R
 2ad6f2071ad822c631e04705df2b245c *R/bstick.princomp.R
 b98443c6f47168edc9fd96e8a30c82e1 *R/cIndexKM.R
 a6df607186ceb18d204494b6a33816d4 *R/calibrate.R
-0f9d81a8d4474972499f42a2e9ddf96c *R/calibrate.cca.R
+d66bb19eac276433e8ae56c2bb106e48 *R/calibrate.cca.R
 f56b52d53b17c7dc8d8c9accd5a3401a *R/calibrate.ordisurf.R
-5396cfd628eccfbba4820aef5c9ed7d9 *R/capscale.R
+a56b4c5eafc2fb9f7c27b4f577dac731 *R/capscale.R
 52b06d758d53934d0b67b4e7653dc3dd *R/cascadeKM.R
 2e09a82ec52e211afc2ac6e8d4b40898 *R/cca.R
 fe3b7320b434d46d2308d880ef26787a *R/cca.default.R
@@ -69,14 +71,15 @@ c66d8fbe69ccca94f2ee8f777ff16ae2 *R/checkSelect.R
 6b212de1ac255f3d8157fd8f8801aeca *R/coef.rda.R
 855938510011daed294a819e40c7dfb8 *R/commsim.R
 397c7044d52639a5a0ce2557638de486 *R/confint.MOStest.R
-d8351b54b16d7327adb545d86fcdac5e *R/contribdiv.R
+191ebc7278f8bdc69ae353d1c02a1d60 *R/contribdiv.R
 e0449c3666763adaef0b70a5fffc864c *R/cophenetic.spantree.R
 edee3aaced61290b219985d0ce69155c *R/coverscale.R
-61f5010b6412918cc9e25bc1a8fdd9d6 *R/decorana.R
+a3b446f23cacb17801c8b5c3ae7cab8e *R/dbrda.R
+0732c6a49ad12d5cc56aeced9bd47714 *R/decorana.R
 c22bdcfe87e2bf710db3b301d880a54a *R/decostand.R
-e5b54fa580331ab24d28dc59110c45fe *R/designdist.R
+e450e8f1e80659cedc734f0931ba5059 *R/designdist.R
 8fb0105cb21a5e425f72e0085fa669a2 *R/deviance.cca.R
-52f41185396ddd9acdcee9df7298d65a *R/deviance.rda.R
+c8b0d7a7a0ab293b4b215d9e1967351e *R/deviance.rda.R
 1898b0d7b1ee3f860ab52aeb525839b8 *R/dispindmorisita.R
 50948e10cb607ffc89adc46d9335d640 *R/dispweight.R
 cafeabc2133997b3381c9edf6a971abf *R/distconnected.R
@@ -84,7 +87,7 @@ cafeabc2133997b3381c9edf6a971abf *R/distconnected.R
 00136d6f9d6dd5ca16db7633eceb4e92 *R/downweight.R
 54fbab991f938a0c3e9353121cc34fec *R/drop1.cca.R
 79c66a10794aacaf08f1d28192228bea *R/eigengrad.R
-f09f2e7694c28ed5c377145443edb301 *R/eigenvals.R
+3481fc55ba439f64100b3954f2e775a3 *R/eigenvals.R
 17a62527ee103c09bfba0c851ab12560 *R/envfit.R
 abdc99957cd34d0c5f79ca1d9dd68c68 *R/envfit.default.R
 1ef64854841e194d35484feffe7914e5 *R/envfit.formula.R
@@ -93,35 +96,36 @@ abdc99957cd34d0c5f79ca1d9dd68c68 *R/envfit.default.R
 cf0a0bf7116ef7a21e090d0c1a76f8d0 *R/estimateR.data.frame.R
 1ae8328aa077ec86d41eb74d12270bb2 *R/estimateR.default.R
 1df3194c88598964282c114cb8db5513 *R/estimateR.matrix.R
-8a07a85be771af60a831d8b4ed3c8973 *R/eventstar.R
-5ad3db71edac392b0513ccb96700af0d *R/extractAIC.cca.R
+c50bc8ea278de3acaf5a4f097a6db58e *R/eventstar.R
+a184abc0cc159b4c71f7e62cf6c7034c *R/extractAIC.cca.R
 abb000be405120e4e8d0f562d74af473 *R/factorfit.R
 7e304b1c384c4d8588e5dbedd9459c73 *R/fieller.MOStest.R
 ee8330855e6a7bc2350047d76b2209a4 *R/fisher.alpha.R
 2776f68ef40e177303c3b73163036969 *R/fisherfit.R
 15075c7f443896f54b5e0befd21c17bf *R/fitspecaccum.R
-55db9d51c5dcefafff9b772b6862e565 *R/fitted.capscale.R
+e21af71caed12a2e9ac2c84fb1e35a0d *R/fitted.capscale.R
 ee2e3daa463fb46ffce01206f8b44fa5 *R/fitted.cca.R
+000a71e4b3ec565a541325caac6efb11 *R/fitted.dbrda.R
 0080b65cfd48bac5e53961b8e12682e5 *R/fitted.procrustes.R
 892cc9bf94b232f6a5c2936ea4f63592 *R/fitted.radfit.R
 4fa5eb43fa72a6e09773888ba01d4668 *R/fitted.rda.R
 9af5bead3fcebce9c225681044652ac1 *R/gdispweight.R
 76b1ffb784bab6671ebaa51c3b4bdb0b *R/getPermuteMatrix.R
 57c9a7ccff6a9c066b2aba3475f2330b *R/goodness.R
-aa3a75ac055af0ac8cf6347c87b2cf3b *R/goodness.cca.R
+896f837466216f664562c3d956ca0cb2 *R/goodness.cca.R
 5364f16346047d3b8719ddef653a70bb *R/goodness.metaMDS.R
 8a767726c40223a58d4055759bf41efe *R/head.summary.cca.R
 d17f4f6be45b52e01cd605b09b56a80a *R/hierParseFormula.R
 3d19236ee5dd2f1c678061773895e86f *R/hiersimu.R
-3ba5159beba75e010720f18116fbd919 *R/hiersimu.default.R
-edf53c3358944421756412b991432bd7 *R/hiersimu.formula.R
+cf72f3b9b00b81e863abd542893e6e01 *R/hiersimu.default.R
+848772d209bbabe3515a9b4c7fcd15be *R/hiersimu.formula.R
 d10f4168f5119180dfd4a7bf57f444d6 *R/howHead.R
 d02fc9c672a9b2c4a31065702a3381be *R/humpfit.R
 1637bd10b39801c14b65656f29dafcf1 *R/identify.ordiplot.R
 9e731fa2cfb821bbe7ed62336d5fa3b3 *R/indpower.R
-6d30d57bbf47d448d0c267964ad7233a *R/inertcomp.R
+7ac7eeb57f0f514cf4d05f757bd48a70 *R/inertcomp.R
 bf423cb7cf07abc3a4c64229bcc8fc14 *R/initMDS.R
-e999e62071e5f5432881f40fbbb6d4c6 *R/intersetcor.R
+5d5904a8fe3090744de68cc871241b38 *R/intersetcor.R
 c63972a171f76f92652feeb2daf30e82 *R/isomap.R
 1e167e69edcee4aa651d97bef81b31e9 *R/isomapdist.R
 5abdcd58cf3811e482543d5207114331 *R/kendall.global.R
@@ -132,61 +136,63 @@ f1d30acca998f0fe17e8363203f1b920 *R/lines.permat.R
 eb4e11e71eeefa6ec64e4a2580b8af75 *R/lines.prestonfit.R
 27a5c4e66e0c7d54f458904b596cc7e1 *R/lines.procrustes.R
 39604c069428cda7c9d2ed199ac4e28a *R/lines.radline.R
-9a9366b4e132861f1671e5617930b012 *R/lines.spantree.R
+66d749d120812f26c7bb7a50e014e0b6 *R/lines.spantree.R
 34d6aa49317a7ed2dcf2a598252efa3b *R/linestack.R
 1dcc7e0504b5468a3bb2253924901e50 *R/make.cepnames.R
-8a269b68b5abd56f36fdb59a8c142210 *R/make.commsim.R
+68f06098a78ccef4c962d97a5e433b9f *R/make.commsim.R
 f25f916c75667aa6eb64136817e79256 *R/mantel.R
 fdb2f4786b31866197c80d827584edaf *R/mantel.correlog.R
 3e005ec1cc5a4231ee7c470cb30df01b *R/mantel.partial.R
 e054f13ad65a7f2616561c73557b412b *R/meandist.R
-0f8b31e3a2303b80dac2f68784745323 *R/metaMDS.R
-4b0744e9e9fa385991c9cafcf0abd222 *R/metaMDSdist.R
-0b5c1f0bdf937223613e4c6b6e74764e *R/metaMDSiter.R
+f7df1eb786cb44781111ce82fb67308b *R/metaMDS.R
+77c522a4c1174e0045c6b0e7a4dfb1d1 *R/metaMDSdist.R
+22829153822a85b138e67e6657122d48 *R/metaMDSiter.R
 f63315501ad2f3a96dee9ee27a867131 *R/metaMDSredist.R
 928df675822d321e4533ba2b7cf0c79f *R/model.frame.cca.R
 9406148bd2cfa3e74b83adfe24858c46 *R/model.matrix.cca.R
-443471a651954a1869401c12d3936fe9 *R/monoMDS.R
+f8393616833643a1e45ef000b6ab878d *R/monoMDS.R
 6d34db023ade607438d62d041a14fb12 *R/mrpp.R
-bb6dfd6874ec47d875dc3042e822e459 *R/mso.R
+16130eb8f8dd845b2e3cd4c5609a53fa *R/mso.R
 7e428f1adfdae287a1d64a79c6f2c3bc *R/msoplot.R
 7c219818ce5841e957db47f16986080b *R/multipart.R
-9b638597fb52736dc7c3b1c1e31f8726 *R/multipart.default.R
-4f3e2c82d5783c04f9a50761c82e2f02 *R/multipart.formula.R
+53ecea3bc1493624156b0fa4cf15f61f *R/multipart.default.R
+29b7db2c1d5a97651bacf96ee4f8baaf *R/multipart.formula.R
 f5e79cb1c2dc1fcabb6e6b5cb4dc0828 *R/nestedbetasor.R
 6100179a10b511a93105c40ac194b088 *R/nestedchecker.R
-61fd2dd194d36e49f46c56dde858a607 *R/nesteddisc.R
+8710b8b289de29493cf03f917015713a *R/nesteddisc.R
 20cd3b812b25a8012ea80be97b12520a *R/nestedn0.R
 36f7ec0897cd6517784bc7dcb12ce827 *R/nestednodf.R
 e7ddbbc85fd1a656e4343f5469621124 *R/nestedtemp.R
 74b2723851155de631716fa479f8ea38 *R/no.shared.R
-47973ff187f68836a19d20ea37c60868 *R/nobs.R
+e32f7eed2a94f7b20ed48598b79b3003 *R/nobs.R
 9c89764ae10460148c1dcf9d25e05649 *R/nullmodel.R
-a9b2db1c561cef462be0939cdcc2b090 *R/oecosimu.R
+80ff8785e21cd7a4a1362c46d3536f3e *R/oecosimu.R
+ed3928f1675b3211ddd239f6144b1f9e *R/oldCapscale.R
 7b3988a207ecfe1ea574c5857ffcd2a3 *R/orderingKM.R
 fe4f72fa1928f93c92840af2ae08b52e *R/ordiArgAbsorber.R
 ffd002ae0ed83062dabb963f02f3c854 *R/ordiArrowMul.R
 1f50c54490f6cbfa7300255219939ccb *R/ordiArrowTextXY.R
-1d06482d11f1e2ebd41a61a07ad97dd1 *R/ordiGetData.R
+dcb0331100aee15cee54c25c241ffd35 *R/ordiGetData.R
 99c1ec285e2afe4fb8beccbd507a123e *R/ordiNAexclude.R
 045e89399470e8c72a875570e1229bf2 *R/ordiParseFormula.R
-c27474e73a1f151539f5a663bd390c7f *R/ordiR2step.R
+477035081490c768593968825b58b831 *R/ordiR2step.R
 7757339f5b8899cb54f13da274abda66 *R/ordiTerminfo.R
-19f1264c85545c30ab0dbe044e8689cd *R/ordiareatest.R
-e06d56a6e7d47767b9e71a73cbd3a80b *R/ordiarrows.R
+a1a229f63a785534a5135815564d4581 *R/ordiareatest.R
+a7f82442062699d56ea176a7d87e9ac4 *R/ordiarrows.R
+2b2c4ee6b157be9425eff260647b5c39 *R/ordibar.R
 85f3047b80ab9a2ea57dd7935d07b583 *R/ordicloud.R
-fbac9ffef8d6734caaed82577e3c672e *R/ordicluster.R
-15f64711cfb3e9800f0b6aaedf72735b *R/ordiellipse.R
+bb7416f58eebbdb1e87127cefa2e70d5 *R/ordicluster.R
+04da9ccfd4ac2a36a08bb7911b01cf0c *R/ordiellipse.R
 c253906529c77aead16b293275f1afc3 *R/ordigrid.R
-b71c80707ad2e1160f2480b5c1787c12 *R/ordihull.R
+e28c083009568ab3cc82d5fab2ba109b *R/ordihull.R
 208b0fe7875e5c79cd400970406a44ce *R/ordilabel.R
 20a6f500c31e6640de535f0730b69a55 *R/ordilattice.getEnvfit.R
 c805e6801ef30758b9f7718269cabdfc *R/ordimedian.R
 55d2f18681d065ea6dd29e0f1e64772e *R/ordiplot.R
 4f4fd722823b3825edd1b5c1fdb6888c *R/ordipointlabel.R
 e57a2b904e572829a5fd97f5b6576644 *R/ordiresids.R
-41949d4c859f08bc5a978791d387d1a4 *R/ordisegments.R
-03fd63d78c762930fd87ec787d9d4bac *R/ordispider.R
+247ea2a9441bf23573e6ee169bad16b3 *R/ordisegments.R
+a36e6b384a4e198d0466fb0a883102aa *R/ordispider.R
 1de439b5ffaf18640e08fadcaf7193ee *R/ordisplom.R
 c8316cf02745d66e730cdd6b9d62375e *R/ordistep.R
 a6108f550b749db7139b143cc9e36c9c *R/ordisurf.R
@@ -196,18 +202,18 @@ bc3671e5b7a30e2849d3b59f65783c97 *R/orditorp.R
 9fe401c201c03826754ec5613f6ecd71 *R/panel.ordi.R
 94ff61e031b751b9594400a51decc13b *R/panel.ordi3d.R
 3bab50920d7e58e2bf0b96a6b874cd9d *R/pasteCall.R
-baeed5122d5d27cc47b3fc00068895a8 *R/pcnm.R
+dda3814acdd10fc04554cb0a17c6b1ca *R/pcnm.R
 b5b164724f3872370bff36ef767f8efb *R/permatfull.R
 eeeaf4245033bd2a4ce822c919e42c6e *R/permatswap.R
-5d610edd65b5f89db90704e191ed9100 *R/permustats.R
+2b1a9af8b638ed1e2138267237d6a497 *R/permustats.R
 3d6a5ecd5feab93db30c063fd144d422 *R/permuted.index.R
 d2b4ce957bcc6376391f045b7046c697 *R/permutest.betadisper.R
-4230453c5955f4e777b20a8cd916efa7 *R/permutest.cca.R
+42e69f6ffd88f0840cbc512a9d618fd4 *R/permutest.cca.R
 b4e77b98f86c4b567d687b64e3aa8812 *R/persp.renyiaccum.R
 b499c6eea710aa0c65a580dba30f2914 *R/persp.tsallisaccum.R
 f7c8d52c791489d956a7fd833913f242 *R/plot.MOStest.R
 5334397cbe3c0188928c4bd3146e118d *R/plot.anosim.R
-58d63201c7f61f455b0394b7a0e1235d *R/plot.betadisper.R
+d22b613aa14eb3dedb83eaa752435b21 *R/plot.betadisper.R
 de416206dba5566d70080bc14e86382e *R/plot.betadiver.R
 9023fd332556779fe9405e0714ec5dae *R/plot.cascadeKM.R
 4981e07d470a1fc9843357dc8f453e19 *R/plot.cca.R
@@ -217,7 +223,7 @@ dc793c47c821ba2961bf284132997ba3 *R/plot.contribdiv.R
 6295a9cb63408e65a01a0faf2635e90a *R/plot.envfit.R
 10bf121300b684a8173f680de54f452a *R/plot.fisherfit.R
 9a4f1746e6e5b80b48994f404e72eb74 *R/plot.humpfit.R
-ed258eefbe3facce3533a16395217fab *R/plot.isomap.R
+23154f6b1812303cbfbfde78711d465a *R/plot.isomap.R
 55a0851dbcb231f65f8da29e2b904780 *R/plot.mantel.correlog.R
 75636876eb91eb8098c807a2ea508efb *R/plot.meandist.R
 aeddafb96f777e9905a1c4660ed3cadc *R/plot.metaMDS.R
@@ -237,8 +243,8 @@ fc2dc1b63ae6f50067a7a376c736394b *R/plot.radfit.R
 360dec911e8d4e772f888d89b8e0f6f7 *R/plot.radline.R
 08f6b41506125e27b37a08b3bb730ffb *R/plot.renyi.R
 20893b15e8b9db8b2282fef8c63299fa *R/plot.renyiaccum.R
-7cc22df38f8c928da73cd37c93e596f3 *R/plot.spantree.R
-d668dd7351acfcbfa686fea4ddb85218 *R/plot.specaccum.R
+2c7dbfef95af275ebe0d3ba551dc3fb5 *R/plot.spantree.R
+210d611cdfc9b637dceea1d79078cd51 *R/plot.specaccum.R
 abc96c8853871035d494dfa9086d4d6e *R/plot.taxondive.R
 6104fadf391072e78a8f2825ac41ceb2 *R/plot.varpart.R
 00d109fe7fc29440698b9f1a4bbc876f *R/plot.varpart234.R
@@ -247,18 +253,18 @@ abc96c8853871035d494dfa9086d4d6e *R/plot.taxondive.R
 b5661457c540b56e77eba97b8b290a91 *R/points.humpfit.R
 a0e1e2d579fa8c1992a26a2e8d435750 *R/points.metaMDS.R
 a54bcddf1b7a44ee1f86ae4eaccb7179 *R/points.ordiplot.R
-e352171f478eb27cf4a875cc3a1693fc *R/points.orditkplot.R
+e0ff3ea4aa76f85a87c3d8f559df18f0 *R/points.orditkplot.R
 d30277aec2fdeea8473e00ebe318be0a *R/points.procrustes.R
 80d9cee7ff1fa7ee8cb18850711a14b2 *R/points.radline.R
 06defcf59464ba92af271dca87943029 *R/poolaccum.R
-91aa7fd2fbd99f8e325932d59886dac7 *R/postMDS.R
+ad8536ec52efcda9e9824ad442fab2bd *R/postMDS.R
 f9dcd972e5c81ce936c9ec5b296d484c *R/prc.R
 ca38da30d3e3d1e91d5534ec6b885834 *R/predict.cca.R
 049f41cca1b39bf0a221723855cffcff *R/predict.decorana.R
 ca99e94ed4bc39135b84f61ed64bf9fd *R/predict.fitspecaccum.R
 06cca728e43d29da2528b01dccb26962 *R/predict.humpfit.R
 3eaaaf25580077e7dff217c3f237e37a *R/predict.radline.R
-fe5ea19cd8e2f6bf7cfa822b58ff9ece *R/predict.rda.R
+02545ea3fcfb0989266ad7fe6b260ded *R/predict.rda.R
 6b10b84b569e5eed32629912b19e9c8b *R/predict.specaccum.R
 4f56d16f5bf8f9af3477c23137a70fb5 *R/pregraphKM.R
 81bb150e264f7da07989c909f4531a45 *R/prepanel.ordi3d.R
@@ -270,8 +276,7 @@ fe5ea19cd8e2f6bf7cfa822b58ff9ece *R/predict.rda.R
 dbce7c674b2e53109295fc280e96356c *R/print.anosim.R
 a530724906dc69888c27a538fc388cbf *R/print.betadisper.R
 2f1f189154aec84360211e3ae195693d *R/print.bioenv.R
-528c225f34769670a4a3049a0e29ae59 *R/print.capscale.R
-3b989d330c0f95eff362eee77405b2c7 *R/print.cca.R
+a80794e01fc52d56c1eec6ca5273ee2e *R/print.cca.R
 a88f54aacef2ff5cdfa99215de346349 *R/print.commsim.R
 6d0cd7929afcbe0d192c980dc5196555 *R/print.decorana.R
 65e888e34fa8a8e1d5b577fbadb3161a *R/print.envfit.R
@@ -302,7 +307,7 @@ e6b0897025e574a25a533aaabe8f6e5f *R/print.protest.R
 480adb7c75b99e469b5600c62aa8d12d *R/print.radfit.R
 8b1a0e667c16cbd376a88962cb3baf19 *R/print.radfit.frame.R
 a589e795a078e79a758c1c848b728be3 *R/print.radline.R
-d64b127c8d733171d69f09f54c756e7b *R/print.simmat.R
+f6d57fffc3ac828f47cf1f4006141602 *R/print.simmat.R
 738123a37474bd6cc4b5a142f46527c2 *R/print.specaccum.R
 aacebed613c7a8b259424efc39b4e061 *R/print.summary.bioenv.R
 e5b625997dd416b43f2635501e486712 *R/print.summary.cca.R
@@ -316,10 +321,10 @@ db1dc929d679ce1641c6a7d319091e2c *R/print.summary.permat.R
 0e4bd2b0b5395b17365888876460fe85 *R/print.summary.procrustes.R
 148aa3651ac7d6a0fcc2b5f3dfb98d9f *R/print.summary.taxondive.R
 0511b5e09f1af985431cac7c1b74a8cf *R/print.taxondive.R
-84296d0a0ad35ed90252ef66ce239c19 *R/print.varpart.R
-07989e7126de6949c281631c075282da *R/print.varpart234.R
+37d66480c803e43434e7242f20d4166a *R/print.varpart.R
+8c2e8161fd7de24c4b5dc04daec52d0f *R/print.varpart234.R
 0001f633e4db1a1498820b0b0b45deac *R/print.vectorfit.R
-8917f5ef5398c984e0e2675c83e74c5c *R/print.wcmdscale.R
+4ea92396a5e70ac3f152ba0353c574ee *R/print.wcmdscale.R
 083d526f54611d40ce749ffe95f169ae *R/procrustes.R
 819af0297e5d0a907f7fa91319c67e96 *R/profile.MOStest.R
 2f6b69115ea549102dad9b1b22c88034 *R/profile.humpfit.R
@@ -334,7 +339,7 @@ b129148e6efbbe1c45482c93d66f959b *R/rad.null.R
 2f6d8082f39540bbe7cf0e0cf3c666c9 *R/radfit.default.R
 36cfb246e391a7434c714fbb2269cdb6 *R/radlattice.R
 7608e7f23ebe04e3a7ea6e5fe384c431 *R/rankindex.R
-4b9d8d8e7c684ec95d0676f6dabb354f *R/rarecurve.R
+f62b0ebf24922d65ae1ce7bbcab2473f *R/rarecurve.R
 05a28bb5983bafed95bd827dc6fdd2b0 *R/rarefy.R
 9c65025c61d4c25ce25234533e7b14c6 *R/rareslope.R
 d9a219ae6f3e6155ae76bc59d3e14d30 *R/raupcrick.R
@@ -342,14 +347,14 @@ d9a219ae6f3e6155ae76bc59d3e14d30 *R/raupcrick.R
 22c320cd450c1088dfb0a05e577b9a73 *R/rda.default.R
 90b562e8a94febce8430a344392a2943 *R/rda.formula.R
 66f9447f8ac8388ac02c39aa1f5db95a *R/read.cep.R
-ef65ea5fb624aef7e34284d932503876 *R/renyi.R
+c8b0d740574bc39b929d3216d19d3026 *R/renyi.R
 3af80e7b694a975fcaf69f53bba241eb *R/renyiaccum.R
 90a897e14094cc1eba66c5f59a5bb79c *R/residuals.cca.R
 38df11064481bc21f8555152cfd3d115 *R/residuals.procrustes.R
 3d4d41203c08b11773e55cffb0c34b75 *R/rrarefy.R
 c94cf53d2345f590de45c0f9db6fe272 *R/scalingUtils.R
 ed66f1e11f53f7fbdbd8663de2b7f5dd *R/scores.R
-d46cc2163dbc117a978f64d54df7bbd4 *R/scores.betadisper.R
+afdabd5d19efde6d0c383fc7562077ac *R/scores.betadisper.R
 341ee43f8524dccb5e369513a16923b1 *R/scores.betadiver.R
 6052d447e0b7e6a8605629055fd2c5d0 *R/scores.cca.R
 447810692c53fab8cd6907ec920e0852 *R/scores.decorana.R
@@ -361,16 +366,17 @@ e4b321e08dfaaf89bd548af364902738 *R/scores.ordihull.R
 f146575a3f60358567dfed56e8cbb2cd *R/scores.ordiplot.R
 512cedf50891372019cae370b240a742 *R/scores.orditkplot.R
 4755a38c8b83b76f123b8e84cf47b700 *R/scores.pcnm.R
-d703f32e3ad9459232643e2293edf9d4 *R/scores.rda.R
+27294f4a051e9f5eb8955493ee437b06 *R/scores.rda.R
 42e3e9222d18a33abb561bac1db5bc6f *R/screeplot.cca.R
 71a7f620655b068c3a53561fc16bfd39 *R/screeplot.decorana.R
 3fe910b739d447ba5026f077cb0c670d *R/screeplot.prcomp.R
 66d8c6dfecb51ca1afdf309926c00d08 *R/screeplot.princomp.R
 96e51f8fd78641579487ed079ee51170 *R/showvarparts.R
 9cb0fc3a15596992bff286c98c8f9650 *R/simper.R
-b35ee7d9cdc86eecefb5dcf478fc8abf *R/simpleRDA2.R
-6670475eff913b3586560d4b2ec65149 *R/simulate.nullmodel.R
-a5e793142ae74276a02e761cfe255f22 *R/simulate.rda.R
+82fb9f429cda2543095b52ccdc4bc3a1 *R/simpleRDA2.R
+f2d46ee718949e4a5bfb90d1c90142d4 *R/simulate.nullmodel.R
+da0d0878390e5a1661dc80c72577ec38 *R/simulate.rda.R
+f6a71e261975457650d1094d08329f4c *R/smbind.R
 9f235c650efc4217a3cc88996b627e1d *R/spandepth.R
 f4554cf72cc501fad09662c612b1c34c *R/spantree.R
 03330c53fd06adc913961918f0f55c32 *R/specaccum.R
@@ -378,11 +384,11 @@ f4554cf72cc501fad09662c612b1c34c *R/spantree.R
 4899dc4b2bcaf5c269cbd0f69cb3fc12 *R/specpool.R
 77cc19684e9ceb27500ca7f802923328 *R/specpool2vect.R
 d24743b3fb58c8a195608e814eeed02c *R/specslope.R
-2cf0545588fb2bb86185f71c21bda1c5 *R/spenvcor.R
+25c178b8a56af16bf12135dce7d226d8 *R/spenvcor.R
 33d884aae53dcc5fa80d9e9ffae4515e *R/stepacross.R
 bd2d1d998f18e7a9c65d5072932cbef2 *R/str.nullmodel.R
 301ba29a09201611845f7adb2b2d7d81 *R/stressplot.R
-f687d03b090a0962026ca60272ab90d5 *R/stressplot.wcmdscale.R
+3b089e8b36e218bb16023eefc4f5f162 *R/stressplot.wcmdscale.R
 55b28298153f00b4c1f8574b0784eb0c *R/summary.anosim.R
 19ce7c501fff2cacc8ad322fd0500a48 *R/summary.bioenv.R
 7fea8ad8bbbfb61348faaa76ea44725b *R/summary.cca.R
@@ -392,8 +398,8 @@ bf8be2e9b02c1a3cd5f3ad0005e8354c *R/summary.decorana.R
 4c9fc84fd70c4555d5b5bfc1768dc5a8 *R/summary.humpfit.R
 51d3b042e199d201235f10f3d4a57f70 *R/summary.isomap.R
 76171bbaa9984ffbb31cbdd9e1771f4c *R/summary.meandist.R
-76587e48a9cc631cf4e9f2e369099fce *R/summary.ordiellipse.R
-27c7f052d2d9674d898f0aa3d741a8c4 *R/summary.ordihull.R
+ac4d6166f7fc1c177503c364345e1278 *R/summary.ordiellipse.R
+1d7c7e8f3309004cf3e5cf782facd2f6 *R/summary.ordihull.R
 11578277712acd07ebb5f7c66c0a47b8 *R/summary.permat.R
 3ef798c28399894f4bf0ba649360e69e *R/summary.poolaccum.R
 4db5bd385c6c52e7c370647f0fc0abc8 *R/summary.prc.R
@@ -409,7 +415,7 @@ c103958b08a39e45f44ed5b55c380d25 *R/text.cca.R
 1f4d9ba97695c0fa99456f427114b049 *R/text.decorana.R
 6a6e426f6e464bb7bdaa75d92674562c *R/text.metaMDS.R
 974bdc93cd9b352d30debf3e93111136 *R/text.ordiplot.R
-846003f5f9de23241805042ac459ed1d *R/text.orditkplot.R
+dbc282f5aef3c9729d098b4fb80004b7 *R/text.orditkplot.R
 0fc7a75cf414d76cc751cc33ed5d6384 *R/tolerance.R
 7a3aedecb5fc64e07b919d867321a4ba *R/tolerance.cca.R
 48c49511d26ea0e18e752198ecde97ba *R/treedist.R
@@ -417,14 +423,14 @@ c103958b08a39e45f44ed5b55c380d25 *R/text.cca.R
 b7181b8d28c9da1019a0b2fe5f117e05 *R/treeheight.R
 26fffea5380da4106dfe1f97681524cd *R/tsallis.R
 45f807b2d58c564c147978ac72de8546 *R/tsallisaccum.R
-78a5b5292f78b0fd84b943dceddceb97 *R/update.nullmodel.R
-4a337cc0efb81cf89aa2aeafcf695b54 *R/varpart.R
-8d09b6b6390c2866234763beae855cf3 *R/varpart2.R
-77fef5d5355715c9928edd3b9995d415 *R/varpart3.R
-7f4f5d715a9b3e1a916f72ffbfebcc19 *R/varpart4.R
+b0941cf2b3322fcf2dd4d51578ee4e3c *R/update.nullmodel.R
+42056aae91bf89cc3d06cfb0627bec15 *R/varpart.R
+1d1539d371aecc720193877e1bc7b9ce *R/varpart2.R
+191e39bf769a4fcb31b26c7aef01670d *R/varpart3.R
+9094db1c78d5b86f45c9e8b63a1c71dd *R/varpart4.R
 1486696c7f5277981e34aa33c50945c9 *R/vectorfit.R
-6f433537ff5ce5811a0ca8c4ac4c729d *R/vegan-defunct.R
-593e3e9774284bfc0362a5c0b0b2fbcc *R/vegan-deprecated.R
+bf26b2480075f8f1357d5324391b527e *R/vegan-defunct.R
+1df623721f7674c43c13a696b3082249 *R/vegan-deprecated.R
 129a1cf5e913a365ffd679b63378811b *R/veganCovEllipse.R
 5656cc97f30992a5e02dba21b2846485 *R/veganMahatrans.R
 d52de59290b2eced0f79926ea655652c *R/vegandocs.R
@@ -432,16 +438,17 @@ da1732f7e84a448d32d929d00c9f2bc6 *R/vegdist.R
 b1855fd70cd8c70e9b480605297bcc63 *R/vegemite.R
 5d6047d7f63f04ae9ff40179c534aa0b *R/veiledspec.R
 4d0f113e697fb166ba912ac34b40b3dc *R/vif.cca.R
-322254f8bc3b02f7a971058cbdfa9edd *R/wascores.R
-860e4de36a01011c639b9eafd909b673 *R/wcmdscale.R
+a5fd2c591f54d57e52f9738d6239fe65 *R/wascores.R
+29f00372b408558f301bdb7847e6e386 *R/wcmdscale.R
 ecfd48e2f4df6bcd683a87203dd80e12 *R/weights.cca.R
 76c939e9fe7776e51eeabc1d44be83cb *R/weights.decorana.R
-73babeed9df14635d99b1a619a1286e4 *R/weights.rda.R
+9ed0ac2ab6e01950db0cc181a42e523f *R/weights.rda.R
 4138f57726620d493f218e5e3da0013c *R/wisconsin.R
 678368022e0d66a4fd7722ab6bcc8beb *R/zzz.R
+4d8569d7f6e105c0ec2bf1b1ba443226 *build/partial.rdb
 bf482d265609ebdc74921368caf131a8 *build/vignette.rds
-45a61c1583ecb67f7191a0b0be901a77 *data/BCI.env.rda
-0f283f2be37fdfec65ec6e5b0146889c *data/BCI.rda
+72cc26156e4b9ce7e2655c228ab87107 *data/BCI.env.rda
+0a5c36b1ebd892c80a84d0d6417e1d6f *data/BCI.rda
 412ea5cf443401fe54f0b14c14c45806 *data/dune.env.rda
 b0a8834b45c79fc017717838d700f0f6 *data/dune.phylodis.rda
 339a47050fe72465c659e82378562781 *data/dune.rda
@@ -455,51 +462,51 @@ ee3c343418d7cf2e435028adf93205f1 *data/sipoo.rda
 f87df84297865b5faf31e232e97a0f94 *data/varechem.rda
 7136b8666250a538d60c88869390a085 *data/varespec.rda
 1fb35aec7042529e18e4673818fecf7f *inst/ChangeLog
-82b7c27d3e1042056a3f85bcf8b20f35 *inst/NEWS.Rd
+04be21bf86569f2b3a6b1ef2dea9b9a2 *inst/NEWS.Rd
 9abfab8b05c34dd283379a7d87500ffb *inst/ONEWS
 b21accfb0a73075d80634960e461ac7e *inst/doc/FAQ-vegan.R
 f986cedbb0f80cd1bdc50b53a87154a2 *inst/doc/FAQ-vegan.Rmd
 9ddaa4c26242a0722273755cb80ec754 *inst/doc/FAQ-vegan.html
-e3e19be6e4226ef4b943c5dd46c3e161 *inst/doc/decision-vegan.R
-5d19c22cfbe24cb0b05cd31e46c5fc70 *inst/doc/decision-vegan.Rnw
-c7d9fa2da61c5116a9d3a447657ce01f *inst/doc/decision-vegan.pdf
+92fd39278aa2666ce4fec88c032b33ed *inst/doc/decision-vegan.R
+c910da720c0da49de1808c688a450206 *inst/doc/decision-vegan.Rnw
+c23ea6c3a6db32a51807a6201d4035ad *inst/doc/decision-vegan.pdf
 41fae44349a8a602825bddba8750102d *inst/doc/diversity-vegan.R
 06cfa11a83ca0330979d500549f2415a *inst/doc/diversity-vegan.Rnw
-bef1b2e7297f77c3279d0b1588516f8e *inst/doc/diversity-vegan.pdf
-42c6873fda4c73ed0ccdeddef41563b2 *inst/doc/intro-vegan.R
-ddee3279ac0982a3da0bcf9fc10947ac *inst/doc/intro-vegan.Rnw
-fe672cbf88fa43e7f6cf58d0252ba114 *inst/doc/intro-vegan.pdf
+52b38e91fa8bd7a2f232a9614552afa0 *inst/doc/diversity-vegan.pdf
+eff5665df3e4437135528b753d664a7f *inst/doc/intro-vegan.R
+6b35943a07c04a6afc77222b0e17b7f8 *inst/doc/intro-vegan.Rnw
+76a9e2ebf7276210b58ff429ed97010f *inst/doc/intro-vegan.pdf
 d56c6fb7eaff59b945aad2459b96bce7 *inst/doc/partitioning.R
 5b17ce6c86e3334b796a658328d426f9 *inst/doc/partitioning.Rnw
-937569b6523de607587795e92e2dc714 *inst/doc/partitioning.pdf
-fa807ecb01eb4910956e4e1f5f29c437 *man/BCI.Rd
+783e37c2b7cd7d42f49a0d93c759cb36 *inst/doc/partitioning.pdf
+1f5ef97a44dee865f33b8eadb2e206c1 *man/BCI.Rd
 d4d97e3b71561f61bd9f1f0686a57434 *man/CCorA.Rd
-e540cd18b5f99d385a8d2945052dc70e *man/MDSrotate.Rd
+2b36702bf2b1931f955fb7dd9d044c99 *man/MDSrotate.Rd
 fd218be03aa2591e5123d11780ccba1a *man/MOStest.Rd
-f2823a48acb6f861404b6682b3f52a45 *man/RsquareAdj.Rd
+603087a66c6c10720229d00bc499545d *man/RsquareAdj.Rd
 73f141f28a1aca961cd4e992d8610934 *man/SSarrhenius.Rd
 32f805196e58b526c3a2bab5c87116b3 *man/add1.cca.Rd
-d4395104c6b5d4e6fae91324cb817559 *man/adipart.Rd
-caf191d6c5c1e618e11cb8d7441407b4 *man/adonis.Rd
-9a341d0716f7d6cc46b427d7cc017d2d *man/anosim.Rd
+13da867f859157284eee4217561d4c3c *man/adipart.Rd
+84fa6a1565509707d87ee887c746cc19 *man/adonis.Rd
+1788008f46807f84083a0964eb0a9559 *man/anosim.Rd
 5b83e39817e231c0a01c8496fcde00dc *man/anova.cca.Rd
 c57af27fa11dadcd48981fcf42b2d221 *man/as.mlm.Rd
 8e3718248ff8d48e724654ab17caa2e2 *man/beals.Rd
-f17b3ca5ef9b2e18cce9688b806e59f6 *man/betadisper.Rd
-1336f0afb69a05bee9f6e7706d81d038 *man/betadiver.Rd
+4901cdd1859b2f49cf62819031d0e64d *man/betadisper.Rd
+653c0566697c40d57405e1b6a06cd7c8 *man/betadiver.Rd
 b04c2fae35dba2d97cb248814d5e2fe9 *man/bgdispersal.Rd
 860b9c7f2325f500c27f3c903831efae *man/bioenv.Rd
 783cc695729b9ce5ce9331944337541f *man/biplot.rda.Rd
-88602656153ee95f10335487273e132d *man/capscale.Rd
+777d378c6bdd91306058c915aefd144e *man/capscale.Rd
 644e253ebcab91a5ddce85294dda278d *man/cascadeKM.Rd
 aabb7dbe6885e0362b5c92a47c856b54 *man/cca.Rd
-9a2708af1831b9ddce1004971b6f4efc *man/cca.object.Rd
+bd548455eb7f153d33932dc4f3ba44a9 *man/cca.object.Rd
 b97d41cd8bf114b7ca7560b4dd9e58e6 *man/clamtest.Rd
-362992febcb1479b750a995203626e40 *man/commsim.Rd
+15ffd9b28bf53976177020d25e9ae870 *man/commsim.Rd
 335d0f7691ad9d0c48fffce9f9db6201 *man/contribdiv.Rd
-c41033fb9c572365490cc23b9870c950 *man/decorana.Rd
+2fc9628fa53c9680142b00fda94473c8 *man/decorana.Rd
 e485bac4360ba510f8db3c9d361701f8 *man/decostand.Rd
-22e3451a1cc9e294c2ad0e1a4531b136 *man/designdist.Rd
+5ec0c86e7f7b3ecbf90152de265d1c87 *man/designdist.Rd
 c01e0664652fbc8ef4963059bee4e422 *man/deviance.cca.Rd
 f58b474141a1b0fdf438bfe6dd8da0c9 *man/dispindmorisita.Rd
 70c0ef1ef267a37e2677476a43b72265 *man/dispweight.Rd
@@ -508,50 +515,50 @@ f3f742efa7511a4c33108a00b512ebd9 *man/distconnected.Rd
 08b96c1a45c11ffcb7f0da33a888421a *man/dune.Rd
 91fa409075b3bd64706c8ff380b3d01d *man/dune.taxon.Rd
 5f5f8c7df063606ccde6124c5dbe8add *man/eigenvals.Rd
-49dae9ef03e94ffd08e34ed4a92e1c68 *man/envfit.Rd
+190f23f7405cc5be2266faaf27be2183 *man/envfit.Rd
 d2cf422a3d7702ac6293fcd3ff046afc *man/eventstar.Rd
 5857c2307b1dfd69953a88bd3c384180 *man/fisherfit.Rd
 4135cbc750171f53a71e727291162bf8 *man/goodness.cca.Rd
 afc00cd6ac8f9b56bffbbb77e369057d *man/goodness.metaMDS.Rd
 81f6bbc59aedfa21953278c285c250bf *man/humpfit.Rd
 c8fea575af3da292987d4f8c4aa831b0 *man/indpower.Rd
-4e59bd79cf1c24f62d511ee3175a83ff *man/isomap.Rd
+f7b596bf1541f22609fabbdd9be82791 *man/isomap.Rd
 1455f24df0b577f7f65a28c5826081d2 *man/kendall.global.Rd
-6e4d92733c15b69cace0349288d61cc6 *man/linestack.Rd
+554ee3df1eca35eadde995d14e20780c *man/linestack.Rd
 59ce2773a5d92535708137747a52f358 *man/make.cepnames.Rd
 f8d6f3bd27a07dc00c6779405652ec07 *man/mantel.Rd
 85d798177d90587416f9ca88e2f445c9 *man/mantel.correlog.Rd
-e598d23fdc8a162bb793a3aa774559b9 *man/metaMDS.Rd
+f053b9a61134bcda5daaffa21509c530 *man/metaMDS.Rd
 4cfb02239809fa03b28e10ec8e8c9c6b *man/mite.Rd
 c50bd45c9e8c6e892d2dd8f7fe5f0bd9 *man/model.matrix.cca.Rd
-599ee1759c06b4171a363f3de6a4438c *man/monoMDS.Rd
-b897a6552d7524c853e91f9d8b972cb6 *man/mrpp.Rd
+9ce82e46d744c868c00924d8866dc228 *man/monoMDS.Rd
+735dd0c405cedd8f6eeedddae86d86b4 *man/mrpp.Rd
 dedc3d36bc5430ef525ee1998206ed3b *man/mso.Rd
-10d5049f8819e378f7f95fdb3858e6e7 *man/multipart.Rd
+7ed0b0f6beff14cc292c361550f562e8 *man/multipart.Rd
 9bc57e3bd36786573133cbddd63ba0a9 *man/nestedtemp.Rd
 c7f768b6f36aec4bc9d5b4c8f72c1141 *man/nobs.adonis.Rd
-d1b6a742f96e2dd5f422008221b05ae0 *man/nullmodel.Rd
+9785ec12d621524ef2a081de68812a3d *man/nullmodel.Rd
 ab73188440cdbffe2f3e3889fc7d5959 *man/oecosimu.Rd
 990fc366c31c3695bd6ed0b1d82bb7fb *man/ordiArrowTextXY.Rd
-ef3799c616e8b99501e05a575c15e6b7 *man/ordiarrows.Rd
-de06b800bfbded5bd5b17775f930a3c8 *man/ordihull.Rd
+294b58bdae6b5439f25bf08c5eb7c483 *man/ordiarrows.Rd
+409cb3e47ce4d87875a43668b27cc04a *man/ordihull.Rd
 8f8a34c5fcfcc1fe9f88ca16e84a1da6 *man/ordilabel.Rd
 994cfc973f88c682b741e48377e1b9b4 *man/ordiplot.Rd
-61d4e0e9ab3c3cd566d541f6954b0cda *man/ordipointlabel.Rd
+b23bb4e1b39d8d0e0309bd909a247786 *man/ordipointlabel.Rd
 d4d27a34b2e7d9d1b732a0d06cb9d9f4 *man/ordiresids.Rd
-da0b3d8e0681a5ddc2bea83fd1796048 *man/ordistep.Rd
-72b2485b893cc2cfb63ddecb095492f9 *man/ordisurf.Rd
-7f66b287db7afcdba2180f3fa430d7b6 *man/orditkplot.Rd
+9831ceea3c75b168317477a916f4c49b *man/ordistep.Rd
+8bf72310b1707b85af6eb08fc9180671 *man/ordisurf.Rd
+fa3c7cc7f1084be4507b1a87ed012718 *man/orditkplot.Rd
 8785cc44c56d1b24fbcbde8de9e325d5 *man/orditorp.Rd
-d971701b3c6f89b3a6b358a3966a43d2 *man/ordixyplot.Rd
+45ac08204e0f9a3fa674b437b72a3d4e *man/ordixyplot.Rd
 e8a307f119251e6651dacf18c182f73f *man/pcnm.Rd
 d3fd306546c43339ad7d8fd985a28801 *man/permatfull.Rd
-0c3dc2962707c6a19910a579e2ac0f01 *man/permustats.Rd
+6812983f8e154a66bd4ec3c736d1b36a *man/permustats.Rd
 4a2ed8481b1f6805d343e83fda91e0ed *man/permutations.Rd
-10e7cc018db792a9c75f3cad7ca3e999 *man/permutest.betadisper.Rd
-9b317f0ee6d6cf4efdd9fccc6fab6932 *man/plot.cca.Rd
+7533f16237c68f522d66d70b05a99c76 *man/permutest.betadisper.Rd
+1e8bee4a00a9e945676bc461ba949a0c *man/plot.cca.Rd
 d45a85e1ccef663ad3bc6d87286f5904 *man/prc.Rd
-59371b694baddaa44f8a2dba9a741057 *man/predict.cca.Rd
+0f7fb32afada9a6d1c274875465abad7 *man/predict.cca.Rd
 e29ed0c997c75aa9e229ae847e3d1cf6 *man/procrustes.Rd
 01a6ca946df5ad493adfb54003ad8e00 *man/pyrifos.Rd
 f61f64cc1be643149fd02f08a0cd7f9f *man/radfit.Rd
@@ -566,40 +573,40 @@ eec06fd5cfdddadb56bca849f88b38f0 *man/reorder.hclust.Rd
 91bdb07ef795744a77763a026e1cd141 *man/simper.Rd
 621f8a2810727ab3523fc0bd69a56dca *man/simulate.rda.Rd
 2a9336794ae5a322bf2ce6b71edb3f0c *man/sipoo.Rd
-37121fc0a195e97b3b1287678d175bab *man/spantree.Rd
-0858ab26917c2c5ca8704d690a28874b *man/specaccum.Rd
+fa1142a6350045acf397a99bbed4ef04 *man/spantree.Rd
+1bfd519e9cad6de99a4081375d376dad *man/specaccum.Rd
 c334f2fca856d5073044392713ee0894 *man/specpool.Rd
 5b9e51c85395f80f8504954e4175f877 *man/stepacross.Rd
-812fedada0ae3582c28f4f91bbcedc09 *man/stressplot.wcmdscale.Rd
+9a022e15270dc4d340207f34beb038a1 *man/stressplot.wcmdscale.Rd
 0aac5f5c8f58fc8fe1cb6c0ba819b196 *man/taxondive.Rd
 21d5137a2335a80efddff8dc9c55370f *man/tolerance.Rd
 a4b37297402220dee75997c4f49a729c *man/treedive.Rd
 14cc64af5f8a8c5965563a2b03c408f2 *man/tsallis.Rd
 033dd7d7917185cea81e4d7afcd59df9 *man/varechem.Rd
-6131d1225162c19932969552d9f1e7ba *man/varpart.Rd
-0e0e4db86ab5afa92f6d5a921c5e14ff *man/vegan-defunct.Rd
-76c332552a660a95a4e652c251187da9 *man/vegan-deprecated.Rd
-250740e48f2243a9119d2ca4d8e638f3 *man/vegan-internal.Rd
+a6280a68ce4a9298bc68d7ca5f768a2f *man/varpart.Rd
+3c0ab1f2a60eff99bbc3f7a33c3f6eee *man/vegan-defunct.Rd
+00b4cef1efbd187368d0905e3b298cfd *man/vegan-deprecated.Rd
+9c7d6e52f7182e300ee5170664011925 *man/vegan-internal.Rd
 cef033ea30c92a1508f82924ce9f2402 *man/vegan-package.Rd
 5280fd8e6478cff76373f5e85422ae5b *man/vegandocs.Rd
 ad48b24429d673e1af3120d0cf6c3eb3 *man/vegdist.Rd
 a2cc1d837017b4de0b4bec617e29533d *man/vegemite.Rd
 c3209a8eff0fe638d3a43b25ea5bec16 *man/wascores.Rd
-9410503f25833dcbdd38f7072fb7cea1 *man/wcmdscale.Rd
+e8085b39e46823189e312a5776835adc *man/wcmdscale.Rd
 dd4512521b5b7a678f87c7f27d6b986c *src/cepin.f
 dd22a1632081402e62320a4c0d6b2aa9 *src/data2hill.c
 7703ffdb64c5f31de56bfd8253877ad6 *src/decorana.f
 87b05dd087e591f1f8e92ecbf1983207 *src/goffactor.c
 ee2c80e02663dc727c33c7fb76cc70b1 *src/monoMDS.f
-a42c4629717137858295a1eb6f3e89de *src/nestedness.c
+f64ced55260d166a69e691d592457228 *src/nestedness.c
 0299086afe16bd7a4b57835d1b11f6d8 *src/ordering.f
 31bdbe9b08340e1662a62cf6e61ade6a *src/pnpoly.c
 b9b647fcf8a3e59e10b9351fae60ec06 *src/stepacross.c
 36ea09c9a6553010e786f0e787185d60 *src/vegdist.c
 f986cedbb0f80cd1bdc50b53a87154a2 *vignettes/FAQ-vegan.Rmd
-5d19c22cfbe24cb0b05cd31e46c5fc70 *vignettes/decision-vegan.Rnw
+c910da720c0da49de1808c688a450206 *vignettes/decision-vegan.Rnw
 06cfa11a83ca0330979d500549f2415a *vignettes/diversity-vegan.Rnw
-ddee3279ac0982a3da0bcf9fc10947ac *vignettes/intro-vegan.Rnw
+6b35943a07c04a6afc77222b0e17b7f8 *vignettes/intro-vegan.Rnw
 5b17ce6c86e3334b796a658328d426f9 *vignettes/partitioning.Rnw
 c4b0ae26e992a5bd0d91745aea647c5f *vignettes/varpart23.pdf
 0732ef41487d0bfca3cd1a972fb14c27 *vignettes/varpart4.pdf
diff --git a/NAMESPACE b/NAMESPACE
index 7919f08..ec2dcf9 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -6,7 +6,7 @@ useDynLib(vegan)
 export(CCorA, MOStest, RsquareAdj, SSarrhenius, SSgitay, SSgleason,
 SSlomolino, adipart, adonis, anosim, beals, betadisper, betadiver,
 bgdispersal, bioenv, bioenvdist, bstick, cIndexKM, calibrate, capscale,
-cascadeKM, cca, contribdiv, clamtest, commsim, decorana,
+cascadeKM, cca, contribdiv, clamtest, commsim, dbrda, decorana,
 decostand, designdist, coverscale, dispweight, dispindmorisita, distconnected,
 diversity, downweight, drarefy, eigengrad, eigenvals, envfit,
 estaccumR, estimateR, eventstar, factorfit, fisherfit, fitspecaccum,
@@ -14,9 +14,10 @@ gdispweight,goodness, hiersimu, humpfit, indpower, inertcomp, initMDS,
 intersetcor, isomapdist, isomap, linestack, mantel, meandist,
 metaMDSdist, metaMDSiter, metaMDSredist, MDSrotate, metaMDS, monoMDS,
 mrpp, msoplot, mso, multipart, make.commsim, nestedbetajac, nestedbetasor, nestedchecker,
-nesteddisc, nestedn0, nestednodf, nestedtemp, nullmodel, oecosimu,
-ordiareatest,
-ordiR2step, ordiarrows, ordiArrowMul, ordiArrowTextXY, ordicloud, ordicluster, ordiellipse, ordigrid,
+nesteddisc, nestedn0, nestednodf, nestedtemp, nullmodel, oecosimu, smbind,
+ordiareatest, ordiR2step,
+ordiarrows, ordiArrowMul, ordiArrowTextXY, ordibar, ordicloud,
+ordicluster, ordiellipse, ordigrid,
 ordihull, ordilabel, ordiplot, ordipointlabel, ordiresids,
 ordisegments, ordispider, ordisplom, ordistep, ordisurf,
 orditkplot, orditorp, ordixyplot,
@@ -34,10 +35,11 @@ wcmdscale, wisconsin)
 export(pasteCall)
 ## export anova.cca for 'BiodiversityR': this should be fixed there
 export(anova.cca)
-## Export as.mcmc for coda
 export(as.mcmc.oecosimu, as.mcmc.permat)
-## DEFUNCT: export names defined in vegan-defunct
-export(metaMDSrotate)
+## export oldCapscale for compatibility after new capscale design
+export(oldCapscale)
+## alternative implementation of adonis: may be eliminated later
+export(adonis2)
 
 ## export regular functions with dot names
 
@@ -52,6 +54,7 @@ export(panel.ordi, panel.ordiarrows, panel.ordi3d, prepanel.ordi3d)
 
 ## Export .Depracated functions (to be removed later)
 export(commsimulator)
+## Export .Defunct function names (on their way out)
 S3method(density, adonis)
 S3method(density, anosim)
 S3method(density, mantel)
@@ -80,8 +83,8 @@ import(grDevices) ## too many functions to be listed separately
 import(lattice)
 importFrom(parallel, mclapply, makeCluster, stopCluster, clusterEvalQ,
            parApply, parLapply, parSapply, parRapply, parCapply)
-importFrom(MASS, isoMDS, sammon, Shepard, mvrnorm)
-importFrom(cluster, daisy)
+importFrom(MASS, isoMDS, sammon, Shepard, mvrnorm, lda)
+importFrom(cluster, daisy, ellipsoidhull)
 ## 's' must be imported in mgcv < 1.8-0 (not needed later)
 importFrom(mgcv, gam, s, te, predict.gam, summary.gam)
 ## Registration of S3 methods defined in vegan
@@ -98,7 +101,6 @@ S3method(RsquareAdj, default)
 S3method(RsquareAdj, glm)
 S3method(RsquareAdj, lm)
 S3method(RsquareAdj, rda)
-S3method(RsquareAdj, capscale)
 # TukeyHSD: stats
 S3method(TukeyHSD, betadisper)
 # add1: stats
@@ -137,6 +139,8 @@ S3method(bstick, decorana)
 S3method(bstick, default)
 S3method(bstick, prcomp)
 S3method(bstick, princomp)
+## c: base
+S3method(c, permustats)
 # calibrate: vegan
 S3method(calibrate, cca)
 S3method(calibrate, ordisurf)
@@ -188,6 +192,7 @@ S3method(extractAIC, cca)
 # fitted: stats
 S3method(fitted, capscale)
 S3method(fitted, cca)
+S3method(fitted, dbrda)
 S3method(fitted, procrustes)
 S3method(fitted, radfit)
 S3method(fitted, radfit.frame)
@@ -231,6 +236,7 @@ S3method(multipart, formula)
 # nobs: stats
 S3method(nobs, CCorA)
 S3method(nobs, adonis)
+S3method(nobs, anova.cca)
 S3method(nobs, betadisper)
 S3method(nobs, cca)
 S3method(nobs, decorana)
@@ -316,7 +322,7 @@ S3method(plot, specaccum)
 S3method(plot, taxondive)
 S3method(plot, varpart)
 S3method(plot, varpart234)
-S3method(plot, vegandensity)
+S3method(plot, vegandensity) # <- .defunct in 2.4-0
 S3method(plot, wcmdscale)
 # points: graphics
 S3method(points, cca)
@@ -346,7 +352,6 @@ S3method(print, adonis)
 S3method(print, anosim)
 S3method(print, betadisper)
 S3method(print, bioenv)
-S3method(print, capscale)
 S3method(print, cca)
 S3method(print, commsim)
 S3method(print, decorana)
@@ -446,6 +451,7 @@ S3method(screeplot, princomp)
 # simulate: stats
 S3method(simulate, capscale)
 S3method(simulate, cca)
+S3method(simulate, dbrda)
 S3method(simulate, rda)
 S3method(simulate, nullmodel)
 # specslope: vegan
@@ -458,6 +464,7 @@ S3method(stressplot, default)
 S3method(stressplot, monoMDS)
 S3method(stressplot, wcmdscale)
 S3method(stressplot, capscale)
+S3method(stressplot, dbrda)
 S3method(stressplot, cca)
 S3method(stressplot, rda)
 S3method(stressplot, prcomp)
diff --git a/R/GowerDblcen.R b/R/GowerDblcen.R
new file mode 100644
index 0000000..df828a4
--- /dev/null
+++ b/R/GowerDblcen.R
@@ -0,0 +1,48 @@
+### Internal function for double centring of a *matrix* of
+### dissimilarities. We used .C("dblcen", ..., PACKAGE = "stats")
+### which does not dublicate its argument, but it was removed from R
+### in r60360 | ripley | 2012-08-22 07:59:00 UTC (Wed, 22 Aug 2012)
+### "more conversion to .Call, clean up". Input 'x' *must* be a
+### matrix. This was originally an internal function in betadisper.R
+### (commit 7cbd4529 Thu Aug 23 08:45:31 2012 +0000)
+GowerDblcen <- function(x, na.rm = TRUE)
+{
+    cnt <- colMeans(x, na.rm = na.rm)
+    x <- sweep(x, 2L, cnt, check.margin = FALSE)
+    cnt <- rowMeans(x, na.rm = na.rm)
+    sweep(x, 1L, cnt, check.margin = FALSE)
+}
+
+### Internal functions to find additive constants to non-diagonal
+### dissimilarities so that there are no negative eigenvalues. The
+### Cailliez constant is added to dissimilarities and the Lingoes
+### constant is added to squared dissimilarities. Legendre & Anderson
+### (Ecol Monogr 69, 1-24; 1999) recommend Lingoes, but
+### stats::cmdscale() only provides Cailliez. Input parameters: d are
+### a matrix of dissimilarities.
+
+addCailliez <- function(d)
+{
+    n <- nrow(d)
+    q1 <- seq_len(n)
+    q2 <- n + q1
+    ## Cailliez makes a 2x2 block matrix with blocks of n x n elements.
+    ## Blocks anti-clockwise, upper left [0]
+    z <- matrix(0, 2*n, 2*n)
+    diag(z[q2,q1]) <- -1
+    z[q1,q2] <- -GowerDblcen(d^2)
+    z[q2,q2] <- GowerDblcen(2 * d)
+    ## Largest real eigenvalue
+    e <- eigen(z, symmetric = FALSE, only.values = TRUE)$values
+    out <- max(Re(e))
+    max(out, 0)
+}
+
+addLingoes <- function(d)
+{
+    ## smallest negative eigenvalue (or zero)
+    d <- -GowerDblcen(d^2)/2
+    e <- eigen(d, symmetric = TRUE, only.values = TRUE)$values
+    out <- min(e)
+    max(-out, 0)
+}
diff --git a/R/MDSrotate.R b/R/MDSrotate.R
index 53313c5..af33e9b 100644
--- a/R/MDSrotate.R
+++ b/R/MDSrotate.R
@@ -15,6 +15,19 @@
     N <- NCOL(x)
     if (N < 2)
         stop(gettextf("needs at least 2 dimensions"))
+    ## check if vec is a factor and then use lda to find a matrix that
+    ## separates optimally factor levels
+    if (is.factor(vec) || is.character(vec)) {
+        da <- lda(x, vec)
+        vec <- predict(da, dimen = N - 1)$x
+        message(sprintf(ngettext(NCOL(vec),
+                         "Factor replaced with discriminant axis.",
+                         "Factor replaced with %d discriminant axes.",
+                                 ), NCOL(vec)))
+        if (NCOL(vec) > 1)
+            message("Proportional traces:", gettextf(" %.3f",
+                             da$svd[1:NCOL(vec)]^2/sum(da$svd^2)))
+    }
     vec <- as.matrix(vec)
     NV <- NCOL(vec)
     if (NV >= N)
diff --git a/R/RsquareAdj.R b/R/RsquareAdj.R
index 00db6a7..ded502a 100644
--- a/R/RsquareAdj.R
+++ b/R/RsquareAdj.R
@@ -33,26 +33,14 @@
     list(r.squared = R2, adj.r.squared = radj)
 }
 
-## dbRDA: Euclidean style distances with no imaginary component can be
-## handled as rda, but I have no idea how to handle objects with
-## imaginary inertia.
-
-`RsquareAdj.capscale` <-
-    function(x, ...)
-{
-    if (!is.null(x$CA$imaginary.chi))
-        list(r.squared = NA, adj.r.squared = NA)
-    else
-        NextMethod("RsquareAdj", x, ...)
-}
-
 ## cca result: no RsquareAdj
 RsquareAdj.cca <-
-    function(x, ...)
+    function (x, permutations = 1000, ...) 
 {
-    R2 <- x$CCA$tot.chi/x$tot.chi
-    radj <- NA
-    list(r.squared = R2, adj.r.squared = radj)
+    r2 <- x$CCA$tot.chi / x$tot.chi
+    p <- permutest(x, permutations, ...)
+    radj <- 1 - ((1 - r2) / (1 - mean(p$num / x$tot.chi)))
+    list(r.squared = r2, adj.r.squared = radj)
 }
 
 ## Linear model: take the result from the summary
diff --git a/R/adipart.default.R b/R/adipart.default.R
index fc96322..da5a90d 100644
--- a/R/adipart.default.R
+++ b/R/adipart.default.R
@@ -1,11 +1,12 @@
 adipart.default <-
 function(y, x, index=c("richness", "shannon", "simpson"),
-    weights=c("unif", "prop"), relative = FALSE, nsimul=99, ...)
+    weights=c("unif", "prop"), relative = FALSE, nsimul=99,
+    method = "r2dtable", ...)
 {
     ## evaluate formula
     lhs <- as.matrix(y)
     if (missing(x))
-        x <- cbind(level_1=seq_len(nrow(lhs)), 
+        x <- cbind(level_1=seq_len(nrow(lhs)),
             leve_2=rep(1, nrow(lhs)))
     rhs <- data.frame(x)
     rhs[] <- lapply(rhs, as.factor)
@@ -45,9 +46,7 @@ function(y, x, index=c("richness", "shannon", "simpson"),
         ftmp[[i]] <- as.formula(paste("~", tlab[i], "- 1"))
     }
 
-    ## is there a method/burnin/thin in ... ?
-    method <- if (is.null(list(...)$method))
-        "r2dtable" else list(...)$method
+    ## is there burnin/thin in ... ?
     burnin <- if (is.null(list(...)$burnin))
         0 else list(...)$burnin
     thin <- if (is.null(list(...)$thin))
diff --git a/R/adipart.formula.R b/R/adipart.formula.R
index e9f7265..b7cd34d 100644
--- a/R/adipart.formula.R
+++ b/R/adipart.formula.R
@@ -1,17 +1,16 @@
 `adipart.formula` <-
     function(formula, data, index=c("richness", "shannon", "simpson"),
-             weights=c("unif", "prop"), relative = FALSE, nsimul=99, ...)
+             weights=c("unif", "prop"), relative = FALSE, nsimul=99,
+             method = "r2dtable", ...)
 {
     ## evaluate formula
     if (missing(data))
         data <- parent.frame()
     tmp <- hierParseFormula(formula, data)
-    lhs <- tmp$lhs
-    rhs <- tmp$rhs
-
     ## run simulations
-    sim <- adipart.default(lhs, rhs, index = index, weights = weights,
-                           relative = relative, nsimul = nsimul, ...)
+    sim <- adipart.default(tmp$lhs, tmp$rhs, index = index, weights = weights,
+                           relative = relative, nsimul = nsimul,
+                           method = method, ...)
     call <- match.call()
     call[[1]] <- as.name("adipart")
     attr(sim, "call") <- call
diff --git a/R/adonis2.R b/R/adonis2.R
new file mode 100644
index 0000000..cbedb26
--- /dev/null
+++ b/R/adonis2.R
@@ -0,0 +1,109 @@
+`adonis2` <-
+    function(formula, data, permutations = 999, method = "bray",
+             sqrt.dist = FALSE, add = FALSE, by = "terms",
+             parallel = getOption("mc.cores"), ...)
+{
+    ## we accept only by = "terms", "margin" or NULL
+    if (!is.null(by))
+        by <- match.arg(by, c("terms", "margin"))
+    ## evaluate lhs
+    YVAR <- formula[[2]]
+    lhs <- eval(YVAR, environment(formula), globalenv())
+    environment(formula) <- environment()
+    ## Take care that input lhs are dissimilarities
+    if ((is.matrix(lhs) || is.data.frame(lhs)) &&
+        isSymmetric(unname(as.matrix(lhs))))
+        lhs <- as.dist(lhs)
+    if (!inherits(lhs, "dist"))
+        lhs <- vegdist(as.matrix(lhs), method=method, ...)
+    ## adjust distances if requested
+    if (sqrt.dist)
+        lhs <- sqrt(lhs)
+    if (is.logical(add) && isTRUE(add))
+        add <- "lingoes"
+    if (is.character(add)) {
+        add <- match.arg(add, c("lingoes", "cailliez"))
+        if (add == "lingoes") {
+            ac <- addLingoes(as.matrix(lhs))
+            lhs <- sqrt(lhs^2 + 2 * ac)
+        }
+        else if (add == "cailliez") {
+            ac <- addCailliez(as.matrix(lhs))
+            lhs <- lhs + ac
+        }
+    }
+    ## adonis0 & anova.cca should see only dissimilarities (lhs)
+    if (!missing(data)) # expand and check terms
+        formula <- terms(formula, data=data)
+    formula <- update(formula, lhs ~ .)
+    ## no data? find variables in .GlobalEnv
+    if (missing(data))
+        data <- model.frame(delete.response(terms(formula)))
+    sol <- adonis0(formula, data = data, method = method)
+    out <- anova(sol, permutations = permutations, by = by,
+                 parallel = parallel)
+    ## Fix output header to show the adonis2() call instead of adonis0()
+    head <- attr(out, "heading")
+    head[2] <- deparse(match.call(), width.cutoff = 500L)
+    attr(out, "heading") <- head
+    out
+}
+`adonis0` <-
+    function(formula, data=NULL, method="bray", ...)
+{
+    ## evaluate data
+    if (missing(data))
+        data <- .GlobalEnv
+    else
+        data <- ordiGetData(match.call(), environment(formula))
+    ## First we collect info for the uppermost level of the analysed
+    ## object
+    Trms <- terms(delete.response(formula), data = data)
+    sol <- list(call = match.call(),
+                method = "adonis",
+                terms = Trms,
+                terminfo = list(terms = Trms))
+    sol$call$formula <- formula(Trms)
+    TOL <- 1e-7
+    Terms <- terms(formula, data = data)
+    lhs <- formula[[2]]
+    lhs <- eval(lhs, environment(formula)) # to force evaluation
+    formula[[2]] <- NULL                # to remove the lhs
+    rhs.frame <- model.frame(formula, data, drop.unused.levels = TRUE) # to get the data frame of rhs
+    rhs <- model.matrix(formula, rhs.frame) # and finally the model.matrix
+    rhs <- rhs[,-1, drop=FALSE] # remove the (Intercept) to get rank right
+    rhs <- scale(rhs, scale = FALSE, center = TRUE) # center
+    qrhs <- qr(rhs)
+    ## input lhs should always be dissimilarities
+    if (!inherits(lhs, "dist"))
+        stop("internal error: contact developers")
+    if (any(lhs < -TOL))
+        stop("dissimilarities must be non-negative")
+    dmat <- as.matrix(lhs^2)
+    n <- nrow(dmat)
+    ## G is -dmat/2 centred
+    G <- -GowerDblcen(dmat)/2
+    ## preliminaries are over: start working
+    Gfit <- qr.fitted(qrhs, G)
+    Gres <- qr.resid(qrhs, G)
+    ## collect data for the fit
+    if(!is.null(qrhs$rank) && qrhs$rank > 0) 
+        CCA <- list(rank = qrhs$rank,
+                    qrank = qrhs$rank,
+                    tot.chi = sum(diag(Gfit)),
+                    QR = qrhs,
+                    G = G)
+    else
+        CCA <- NULL # empty model
+    ## collect data for the residuals
+    CA <- list(rank = n - max(qrhs$rank, 0) - 1,
+               u = matrix(0, nrow=n),
+               tot.chi = sum(diag(Gres)),
+               Xbar = Gres)
+    ## all together
+    sol$tot.chi <- sum(diag(G))
+    sol$CCA <- CCA
+    sol$CA <- CA
+    class(sol) <- c("adonis2", "capscale", "rda", "cca")
+    sol
+}
diff --git a/R/anova.cca.R b/R/anova.cca.R
index c5fe6d0..d0ba523 100644
--- a/R/anova.cca.R
+++ b/R/anova.cca.R
@@ -64,7 +64,7 @@
     Pval <- (sum(tst$F.perm >= tst$F.0 - EPS) + 1)/(tst$nperm + 1)
     Pval <- c(Pval, NA)
     table <- data.frame(tst$df, tst$chi, Fval, Pval)
-    if (inherits(object, "capscale") &&
+    if (inherits(object, c("capscale", "dbrda")) &&
         (object$adjust != 1 || is.null(object$adjust)))
         varname <- "SumOfSqs"
     else if (inherits(object, "rda"))
diff --git a/R/anova.ccabyterm.R b/R/anova.ccabyterm.R
index 76f4841..84cf30c 100644
--- a/R/anova.ccabyterm.R
+++ b/R/anova.ccabyterm.R
@@ -23,6 +23,9 @@
         fla <- paste(". ~ . + ", trmlab[i])
         mods[[i+1]] <- update(mods[[i]], fla)
     }
+    ## for compatibility with the old capscale design we need the following
+    if (inherits(object, "oldcapscale")) # uh -- get rid of this later
+        mods <- suppressMessages(lapply(mods, oldCapscale))
     ## The result
     sol <- anova.ccalist(mods, permutations = permutations,
                          model = model, parallel = parallel)
@@ -31,7 +34,7 @@
                       c(sol[-1, 4], sol[ntrm+1, 2]),
                       c(sol[-1, 5], NA),
                       c(sol[-1, 6], NA))
-    if (inherits(object, "capscale") &&
+    if (inherits(object, c("capscale", "dbrda")) &&
         (object$adjust != 1 || is.null(object$adjust)))
         varname <- "SumOfSqs"
     else if (inherits(object, "rda"))
@@ -62,6 +65,9 @@
     ## Refuse to handle models with missing data
     if (!is.null(object$na.action))
         stop("by = 'margin' models cannot handle missing data")
+    ## Refuse to handle oldCapscale models
+    if (inherits(object, "oldcapscale"))
+        stop("by = 'margin' models cannot handle oldCapscale results")
     ## We need term labels but without Condition() terms
     if (!is.null(scope) && is.character(scope))
         trms <- scope
@@ -101,7 +107,7 @@
     ## Collect results to anova data.frame
     out <- data.frame(c(Df, dfbig), c(Chisq, chibig),
                       c(Fstat, NA), c(Pval, NA))
-    if (inherits(object, "capscale") &&
+    if (inherits(object, c("capscale", "dbrda")) &&
         (object$adjust != 1 || is.null(object$adjust)))
         varname <- "SumOfSqs"
     else if (inherits(object, "rda"))
@@ -127,6 +133,17 @@
     function(object, permutations, model, parallel, cutoff = 1)
 {
     EPS <- sqrt(.Machine$double.eps)
+    ## capscale axes are still based only on real components and we
+    ## need to cast to old format to get the correct residual
+    ## variation. This should give a message().
+    if (!is.null(object$CA$imaginary.chi))
+        object <- oldCapscale(object)
+    ## On 29/10/15 (983ba7726) we assumed that dbrda(d ~ dbrda(d ~
+    ## x)$CCA$u) is not equal to dbrda(d ~ x) when there are negative
+    ## eigenvalues, but it seems that it is OK if constrained
+    ## eigenvalues are non-negative
+    if (inherits(object, "dbrda") && any(object$CCA$eig < 0))
+        stop("by = 'axis' cannot be used when constraints have negative eigenvalues")
     nperm <- nrow(permutations)
     ## Observed F-values and Df
     eig <- object$CCA$eig
@@ -149,9 +166,16 @@
     }
     LC <- as.data.frame(LC)
     fla <- reformulate(names(LC))
-    Pvals <- rep(NA, length(eig))
-    F.perm <- matrix(ncol = length(eig), nrow = nperm)
+    Pvals <- rep(NA, ncol(LC))
+    F.perm <- matrix(ncol = ncol(LC), nrow = nperm)
     environment(object$terms) <- environment()
+    ## in dbrda, some axes can be imaginary, but we only want to have
+    ## an analysis of real-valued dimensions, and we must adjust data
+    if (ncol(LC) < length(eig)) {
+        eig <- eig[seq_len(ncol(LC))]
+        Df <- Df[seq_len(ncol(LC))]
+        Fstat <- Fstat[seq_len(ncol(LC))]
+    }
     for (i in seq_along(eig)) {
         part <- paste("~ . +Condition(",
                       paste(names(LC)[-i], collapse = "+"), ")")
@@ -165,7 +189,7 @@
                 permutest(update(object, upfla, data = LC),
                           permutations, model = model,
                           parallel = parallel)
-        Pvals[i] <- (sum(mod$F.perm >= mod$F.0) + 1) / (nperm + 1)
+        Pvals[i] <- (sum(mod$F.perm >= mod$F.0 - EPS) + 1) / (nperm + 1)
         F.perm[ , i] <- mod$F.perm
         if (Pvals[i] > cutoff)
             break
@@ -173,7 +197,7 @@
     out <- data.frame(c(Df, resdf), c(eig, object$CA$tot.chi),
                       c(Fstat, NA), c(Pvals,NA))
     rownames(out) <- c(names(eig), "Residual")
-    if (inherits(object, "capscale") &&
+    if (inherits(object, c("capscale", "dbrda")) &&
         (object$adjust != 1 || is.null(object$adjust)))
         varname <- "SumOfSqs"
     else if (inherits(object, "rda"))
diff --git a/R/anova.ccalist.R b/R/anova.ccalist.R
index d1b2ca9..5ef62cd 100644
--- a/R/anova.ccalist.R
+++ b/R/anova.ccalist.R
@@ -76,7 +76,7 @@
     ## collect table
     table <- data.frame(resdf, resdev, c(NA, df),
                         c(NA,changedev), c(NA,fval), c(NA,pval))
-    if (inherits(object, "capscale") &&
+    if (inherits(object, c("capscale", "dbrda")) &&
         (object$adjust != 1 || is.null(object$adjust)))
         varname <- "SumOfSqs"
     else if (inherits(object, "rda"))
diff --git a/R/anova.ccanull.R b/R/anova.ccanull.R
index 7ffb84c..f47bda7 100644
--- a/R/anova.ccanull.R
+++ b/R/anova.ccanull.R
@@ -14,7 +14,7 @@
         table[2,] <- c(nrow(object$CA$u) - 1, object$CA$tot.chi, NA, NA)
     }
     rownames(table) <- c("Model", "Residual")
-    if (inherits(object, "capscale") &&
+    if (inherits(object, c("capscale", "dbrda")) &&
         (object$adjust != 1 || is.null(object$adjust)))
         varname <- "SumOfSqs"
     else if (inherits(object, "rda"))
diff --git a/R/as.fisher.R b/R/as.fisher.R
index 8fc763a..590b578 100644
--- a/R/as.fisher.R
+++ b/R/as.fisher.R
@@ -1,8 +1,10 @@
-"as.fisher" <-
+`as.fisher` <-
     function (x, ...) 
 {
     if (inherits(x, "fisher")) 
         return(x)
+    ## is not fisher but a 1 x n data.frame or matrix: matrix is faster
+    x <- as.matrix(x)
     if (!identical(all.equal(x, round(x)), TRUE))
         stop("function accepts only integers (counts)")
     freq <- x[x > 0]
diff --git a/R/as.mcmc.oecosimu.R b/R/as.mcmc.oecosimu.R
index 2c3ada1..fbb9518 100644
--- a/R/as.mcmc.oecosimu.R
+++ b/R/as.mcmc.oecosimu.R
@@ -1,10 +1,38 @@
 `as.mcmc.oecosimu` <-
-    function(x) 
+    function(x)
 {
-    x <- as.ts(x)
-    mcpar <- attr(x, "tsp")
-    mcpar[3] <- round(1/mcpar[3])
-    attr(x, "mcpar") <- mcpar
-    class(x) <- c("mcmc", class(x))
+    ## mcmc only for sequential methods
+    if (!x$oecosimu$isSeq)
+        stop("as.mcmc available only for sequential null models")
+    ## named variables
+    varnames <- names(x$oecosimu$z)
+    x <- x$oecosimu$simulated
+    rownames(x) <- varnames
+    chains <- attr(x, "chains")
+    ## chains: will make each chain as an mcmc object and combine
+    ## these to an mcmc.list
+    if (!is.null(chains) && chains > 1) {
+        nsim <- dim(x)[2]
+        niter <- nsim / chains
+        ## iterate over chains
+        x <- lapply(1:chains, function(i) {
+                        z <- x[, ((i-1) * niter + 1):(i * niter), drop = FALSE]
+                        attr(z, "mcpar") <-
+                            c(attr(x, "burnin") + attr(x, "thin"),
+                              attr(x, "burnin") + attr(x, "thin") * niter,
+                              attr(x, "thin"))
+                        attr(z, "class") <- c("mcmc", class(z))
+                        t(z)
+                    })
+        ## combine list of mcmc objects to a coda mcmc.list
+        #x <- as.mcmc.list(x)
+        class(x) <- "mcmc.list"
+    } else { # one chain: make to a single mcmc object
+        x <- as.ts(x)
+        mcpar <- attr(x, "tsp")
+        mcpar[3] <- round(1/mcpar[3])
+        attr(x, "mcpar") <- mcpar
+        class(x) <- c("mcmc", class(x))
+    }
     x
 }
diff --git a/R/as.ts.oecosimu.R b/R/as.ts.oecosimu.R
index 3f94f7c..444d637 100644
--- a/R/as.ts.oecosimu.R
+++ b/R/as.ts.oecosimu.R
@@ -3,8 +3,11 @@
 {
     if  (!x$oecosimu$isSeq)
         stop("as.ts available only for sequential methods")
-    startval <- attr(x$oecosimu$simulated, "burnin") + 1 
+    chains <- attr(x$oecosimu$simulated, "chains")
+    if (!is.null(chains) && chains > 1)
+        stop("as.ts available only for single chain")
     thin <- attr(x$oecosimu$simulated, "thin")
+    startval <- attr(x$oecosimu$simulated, "burnin") + thin
     out <- ts(t(x$oecosimu$simulated), start = startval, deltat=thin,
         names = names(x$oecosimu$z))
     attr(out, "burnin") <- NULL
diff --git a/R/betadisper.R b/R/betadisper.R
index 6d96f09..c29a0ba 100644
--- a/R/betadisper.R
+++ b/R/betadisper.R
@@ -1,5 +1,6 @@
 `betadisper` <-
-    function(d, group, type = c("median","centroid"), bias.adjust=FALSE)
+    function(d, group, type = c("median","centroid"), bias.adjust=FALSE,
+             sqrt.dist = FALSE, add = FALSE)
 {
     ## inline function for double centring. We used .C("dblcen", ...,
     ## PACKAGE = "stats") which does not dublicate its argument, but
@@ -46,6 +47,22 @@
     ## Someone really tried to analyse correlation like object in range -1..+1
     if (any(d < -TOL, na.rm = TRUE))
         stop("dissimilarities 'd' must be non-negative")
+    ## adjust to avoid negative eigenvalues (if they disturb you)
+    if (sqrt.dist)
+        d <- sqrt(d)
+    if (is.logical(add) && isTRUE(add))
+        add <- "lingoes"
+    if (is.character(add)) {
+        add <- match.arg(add, c("lingoes", "cailliez"))
+        if (add == "lingoes") {
+            ac <- addLingoes(as.matrix(d))
+            d <- sqrt(d^2 + 2 * ac)
+        }
+        else if (add == "cailliez") {
+            ac <- addCailliez(as.matrix(d))
+            d <- d + ac
+        }
+    }
     if(missing(type))
         type <- "median"
     type <- match.arg(type)
diff --git a/R/betadiver.R b/R/betadiver.R
index 0b338b8..5947500 100644
--- a/R/betadiver.R
+++ b/R/betadiver.R
@@ -20,7 +20,7 @@
                  "co"="(a*c+a*b+2*b*c)/(2*(a+b)*(a+c))",
                  "cc"="(b+c)/(a+b+c)", "g"="(b+c)/(a+b+c)",
                  "-3"="pmin(b,c)/(a+b+c)", "l"="(b+c)/2",
-                 "19"="2*(b*c+1)/((a+b+c)^2-(a+b+c))",
+                 "19"="2*(b*c+1)/(a+b+c)/(a+b+c-1)",
                  "hk"="(b+c)/(2*a+b+c)", "rlb"="a/(a+c)",
                  "sim"="pmin(b,c)/(pmin(b,c)+a)",
                  "gl"="2*abs(b-c)/(2*a+b+c)",
diff --git a/R/calibrate.cca.R b/R/calibrate.cca.R
index f6c4a3f..ba9c986 100644
--- a/R/calibrate.cca.R
+++ b/R/calibrate.cca.R
@@ -1,6 +1,13 @@
 `calibrate.cca` <-
     function(object, newdata, rank = "full", ...)
 {
+    ## inversion solve(b) requires a square matrix, and we should
+    ## append imaginary dims to get those in dbrda with negative
+    ## constrained eigenvalues. Work is need to to verify this can be
+    ## done, and therefore we just disable calibrate with negative
+    ## eigenvalues in constraints.
+    if (inherits(object, "dbrda") && object$CCA$poseig < object$CCA$qrank)
+        stop("cannot be used with 'dbrda' with imaginary constrained dimensions")
     if (!is.null(object$pCCA))
         stop("does not work with conditioned (partial) models")
     if (is.null(object$CCA) || object$CCA$rank == 0)
diff --git a/R/capscale.R b/R/capscale.R
index ffd3389..1580f27 100644
--- a/R/capscale.R
+++ b/R/capscale.R
@@ -38,13 +38,12 @@
     inertia <- attr(X, "method")
     if (is.null(inertia))
         inertia <- "unknown"
-    inertia <- paste(toupper(substr(inertia, 1, 1)), substr(inertia, 
-                                                            2, 256), sep = "")
+    inertia <- paste(toupper(substr(inertia, 1, 1)),
+                     substring(inertia,  2), sep = "")
     inertia <- paste(inertia, "distance")
     if (!sqrt.dist)
         inertia <- paste("squared", inertia)
-    if (add) 
-        inertia <- paste(inertia, "(euclidified)")
+    ## postpone info on euclidification till we have done so
 
     ## evaluate formula: ordiParseFormula will return dissimilarities
     ## as a symmetric square matrix (except that some rows may be
@@ -76,17 +75,13 @@
         adjust <- sqrt(k)
     }
     nm <- attr(X, "Labels")    
-    ## cmdscale is only used if 'add = TRUE': it cannot properly
-    ## handle negative eigenvalues and therefore we normally use
-    ## wcmdscale. If we have 'add = TRUE' there will be no negative
-    ## eigenvalues and this is not a problem.
-    if (add) {
-        X <- cmdscale(X, k = k, eig = TRUE, add = add)
-        ## All eigenvalues *should* be positive, but see that they are
-        X$eig <- X$eig[X$eig > 0]
-    }
-    else
-        X <- wcmdscale(X, eig = TRUE)
+    ## wcmdscale, optionally with additive adjustment
+    X <- wcmdscale(X, x.ret = TRUE, add = add)
+    ## this may have been euclidified: update inertia
+    if (!is.na(X$ac) && X$ac > sqrt(.Machine$double.eps))
+        inertia <- paste(paste0(toupper(substring(X$add, 1, 1)),
+                                substring(X$add, 2)),
+                         "adjusted", inertia)
     if (is.null(rownames(X$points))) 
         rownames(X$points) <- nm
     X$points <- adjust * X$points
@@ -98,6 +93,39 @@
             X$negaxes <- X$negaxes/sqrt(k)
     }
     sol <- rda.default(X$points, d$Y, d$Z, ...)
+    ## Get components of inertia with negative eigenvalues following
+    ## McArdle & Anderson (2001), section "Theory". G is their
+    ## double-centred Gower matrix, but instead of hat matrix, we use
+    ## use QR decomposition to get the components of inertia.
+    hasNegEig <- any(X$eig < 0)
+    G <- -X$x/2
+    if (adjust == 1)
+        G <- G/k
+    if (hasNegEig)
+        sol$real.tot.chi <- sol$tot.chi
+    sol$tot.chi <- sum(diag(G))
+    if (!is.null(sol$pCCA)) {
+        sol$pCCA$G <- G
+        if (hasNegEig) {
+            sol$pCCA$real.tot.chi <- sol$pCCA$tot.chi
+            sol$pCCA$tot.chi <- sum(diag(qr.fitted(sol$pCCA$QR, G)))
+        }
+        G <- qr.resid(sol$pCCA$QR, t(qr.resid(sol$pCCA$QR, G)))
+    }
+    if (!is.null(sol$CCA) && sol$CCA$rank > 0) {
+        sol$CCA$G <- G
+        if (hasNegEig) {
+            sol$CCA$real.tot.chi <- sol$CCA$tot.chi
+            sol$CCA$tot.chi <- sum(diag(qr.fitted(sol$CCA$QR, G)))
+        }
+    }
+    if (hasNegEig) {
+        sol$CA$real.tot.chi <- sol$CA$tot.chi
+        if (!is.null(sol$CA) && !is.null(sol$CCA$QR))
+            sol$CA$tot.chi <- sum(diag(qr.resid(sol$CCA$QR, G)))
+        else
+            sol$CA$tot.chi <- sum(diag(G))
+    }
     if (!is.null(sol$CCA) && sol$CCA$rank > 0) {
         colnames(sol$CCA$u) <- colnames(sol$CCA$biplot) <- names(sol$CCA$eig) <-
             colnames(sol$CCA$wa) <- colnames(sol$CCA$v) <-
@@ -112,7 +140,6 @@
     if (any(X$eig < 0)) {
         negax <- X$eig[X$eig < 0]
         sol$CA$imaginary.chi <- sum(negax)
-        sol$tot.chi <- sol$tot.chi + sol$CA$imaginary.chi
         sol$CA$imaginary.rank <- length(negax)
         sol$CA$imaginary.u.eig <- X$negaxes
     }
@@ -161,8 +188,11 @@
     sol$call$formula <- formula(d$terms, width.cutoff = 500)
     sol$call$formula[[2]] <- formula[[2]]
     sol$method <- "capscale"
-    if (add)
+    sol$sqrt.dist <- sqrt.dist
+    if (!is.na(X$ac) && X$ac > 0) {
         sol$ac <- X$ac
+        sol$add <- X$add
+    }
     sol$adjust <- adjust
     sol$inertia <- inertia
     if (metaMDSdist)
diff --git a/R/contribdiv.R b/R/contribdiv.R
index 07134f9..721af1d 100644
--- a/R/contribdiv.R
+++ b/R/contribdiv.R
@@ -9,6 +9,8 @@
 
     index <- match.arg(index)
 
+    comm <- as.matrix(comm) # faster than data.frame
+
     x <- comm[rowSums(comm) > 0, colSums(comm) > 0]
     n <- nrow(x)
     S <- ncol(x)
diff --git a/R/dbrda.R b/R/dbrda.R
new file mode 100644
index 0000000..b88e294
--- /dev/null
+++ b/R/dbrda.R
@@ -0,0 +1,252 @@
+`dbrda` <-
+    function (formula, data, distance = "euclidean",
+              sqrt.dist = FALSE,  add = FALSE, dfun = vegdist,
+              metaMDSdist = FALSE, na.action = na.fail,
+              subset = NULL, ...) 
+{
+    EPS <- sqrt(.Machine$double.eps)
+    if (!inherits(formula, "formula")) 
+        stop("Needs a model formula")
+    if (missing(data)) {
+        data <- parent.frame()
+    }
+    else {
+        data <- ordiGetData(match.call(), environment(formula))
+    }
+    formula <- formula(terms(formula, data = data))
+    ## The following line was eval'ed in environment(formula), but
+    ## that made update() fail. Rethink the line if capscale() fails
+    ## mysteriously at this point.
+    X <- eval(formula[[2]], envir=environment(formula),
+              enclos = globalenv())
+    if ((is.matrix(X) || is.data.frame(X)) &&
+               isSymmetric(unname(as.matrix(X))))
+        X <- as.dist(X)
+    if (!inherits(X, "dist")) {
+        comm <- X
+        dfun <- match.fun(dfun)
+        if (metaMDSdist) {
+            commname <- as.character(formula[[2]])
+            X <- metaMDSdist(comm, distance = distance, zerodist = "ignore",
+                             commname = commname, distfun = dfun, ...)
+            commname <- attr(X, "commname")
+            comm <- eval.parent(parse(text=commname))
+        } else {
+            X <- dfun(X, distance)
+        }
+    }
+    ## get the name of the inertia
+    inertia <- attr(X, "method")
+    if (is.null(inertia))
+        inertia <- "unknown"
+    inertia <- paste(toupper(substr(inertia, 1, 1)),
+                     substring(inertia, 2), sep = "")
+    inertia <- paste(inertia, "distance")
+
+    ## evaluate formula: ordiParseFormula will return dissimilarities
+    ## as a symmetric square matrix (except that some rows may be
+    ## deleted due to missing values)
+    d <- ordiParseFormula(formula,
+                          data,
+                          na.action = na.action,
+                          subset = substitute(subset))
+    ## ordiParseFormula subsets rows of dissimilarities: do the same
+    ## for columns ('comm' is handled later). ordiParseFormula
+    ## returned the original data, but we use instead the potentially
+    ## changed X and discard d$X.
+    if (!is.null(d$subset)) {
+        X <- as.matrix(X)[d$subset, d$subset, drop = FALSE]
+    }
+    ## Delete columns if rows were deleted due to missing values
+    if (!is.null(d$na.action)) {
+        X <- as.matrix(X)[-d$na.action, -d$na.action, drop = FALSE]
+    }
+    X <- as.matrix(X)
+    k <- NROW(X) - 1
+    ## sqrt & add adjustments
+    if (sqrt.dist)
+        X <- sqrt(X)
+    if (is.logical(add) && isTRUE(add))
+        add <- "lingoes"
+    if (is.character(add)) {
+        add <- match.arg(add, c("lingoes", "cailliez"))
+        if (add == "lingoes") {
+            ac <- addLingoes(X)
+            X <- sqrt(X^2 + 2 * ac)
+        } else if (add == "cailliez") {
+            ac <- addCailliez(X)
+            X <- X + ac
+        }
+        diag(X) <- 0
+    } else {
+        ac <- 0
+    }
+    ## update the name of the inertia
+    if (!sqrt.dist)
+        inertia <- paste("squared", inertia)
+    if (ac > sqrt(.Machine$double.eps))
+        inertia <- paste(paste0(toupper(substring(add, 1, 1)),
+                              substring(add, 2)), "adjusted", inertia)
+    if (max(X) >= 4 + .Machine$double.eps) {
+        inertia <- paste("mean", inertia)
+        adjust <- 1
+    }
+    else {
+        adjust <- sqrt(k)
+    }
+    nm <- attr(X, "Labels")    
+    ## Get components of inertia with negative eigenvalues following
+    ## McArdle & Anderson (2001), section "Theory". G is their
+    ## double-centred Gower matrix, but instead of hat matrix, we use
+    ## QR decomposition to get the components of inertia.
+    G <- -GowerDblcen(X^2)/2
+    if (adjust == 1)
+        G <- G/k
+    ## Solution: this shows the algorithmic steps
+    tot.chi <- sum(diag(G))
+    pCCA <- CCA <-  CA <- NULL
+    ## pCCA
+    if (!is.null(d$Z)) {
+        d$Z <- scale(d$Z, scale = FALSE)
+        Q <- qr(d$Z, tol = 1e-6)
+        HGH <- qr.fitted(Q, t(qr.fitted(Q, G)))
+        pCCA <- list(rank = Q$rank, tot.chi = sum(diag(HGH)),
+                     QR = Q, Fit = HGH,
+                     envcentre = attr(d$Z, "scaled:center"),
+                     G = G)
+        G <- qr.resid(Q, t(qr.resid(Q, G)))
+    }
+    ## CCA
+    if (!is.null(d$Y)) {
+        d$Y <- scale(d$Y, scale = FALSE) 
+        Q <- qr(cbind(d$Z, d$Y), tol = 1e-6)
+        HGH <- qr.fitted(Q, t(qr.fitted(Q, G)))
+        e <- eigen(HGH, symmetric = TRUE)
+        nz <- abs(e$values) > EPS
+        if (any(nz)) {
+            e$values <- e$values[nz]
+            e$vectors <- e$vectors[, nz, drop = FALSE]
+            pos <- e$values > 0
+            if (any(e$values < 0)) {
+                imaginary.u <- e$vectors[, !pos, drop = FALSE]
+                e$vectors <- e$vectors[, pos, drop = FALSE]
+            } else {
+                imaginary.u <- NULL
+            }
+            wa <- G %*% e$vectors %*% diag(1/e$values[pos], sum(pos))
+            v <- matrix(NA, ncol = ncol(wa))
+            oo <- Q$pivot[seq_len(Q$rank)]
+            rank <- Q$rank
+            if (!is.null(pCCA)) {
+                oo <- oo[-seq_len(pCCA$rank)] - ncol(d$Z)
+                rank <- rank - pCCA$rank
+            }
+            CCA <- list(eig = e$values,
+                        u = e$vectors,
+                        imaginary.u = imaginary.u,
+                        poseig = sum(pos),
+                        v = v, wa = wa,
+                        alias =  if (rank < ncol(d$Y))
+                                     colnames(d$Y)[-oo],
+                        biplot = cor(d$Y[,oo, drop=FALSE], e$vectors),
+                        qrank = rank, rank = rank,
+                        tot.chi = sum(diag(HGH)),
+                        QR = Q,
+                        envcentre = attr(d$Y, "scaled:center"),
+                        Xbar = NA, G = G)
+        } else {
+            CCA <- NULL
+        }
+        G <- qr.resid(Q, t(qr.resid(Q, G)))
+    }
+    ## CA
+    e <- eigen(G, symmetric = TRUE)
+    nz <- abs(e$values) > EPS # positively or negatively non-zero
+    if (any(nz)) {
+        e$values <- e$values[nz]
+        e$vectors <- e$vectors[, nz, drop = FALSE]
+        if (any(e$values < 0)) {
+            imaginary.u <- e$vectors[, e$values < 0, drop = FALSE]
+            e$vectors <- e$vectors[, e$values > 0, drop = FALSE]
+        } else {
+            imaginary.u <- NULL
+        }
+        v <- matrix(NA, ncol = ncol(e$vectors))
+        CA <- list(eig = e$values,
+                   u = e$vectors,
+                   imaginary.u = imaginary.u,
+                   poseig = sum(e$values > 0),
+                   v = v,
+                   rank = sum(nz),
+                   tot.chi = sum(diag(G)),
+                   Xbar = NA, G = G)
+    } else {
+        CA <- NULL
+    }
+    ## output
+    sol <- list(tot.chi = tot.chi, pCCA = pCCA, CCA = CCA, CA = CA)
+    if (!is.null(sol$CCA) && sol$CCA$rank > 0) {
+        colnames(sol$CCA$u) <-
+            colnames(sol$CCA$wa) <-
+            colnames(sol$CCA$v) <-
+            names(sol$CCA$eig) <-
+                paste("dbRDA", seq_len(ncol(sol$CCA$u)), sep = "")
+        colnames(sol$CCA$biplot) <-
+            names(sol$CCA$eig)[sol$CCA$eig > 0]
+        rownames(sol$CCA$u) <- rownames(d$X)
+        if (!is.null(sol$CCA$imaginary.u)) {
+            negax <- sol$CCA$eig < 0
+            negnm <- paste0("idbRDA", seq_len(sum(negax)))
+            names(sol$CCA$eig)[negax] <- negnm
+            colnames(sol$CCA$imaginary.u) <- negnm
+            rownames(sol$CCA$imaginary.u) <- rownames(d$X)
+        }
+    }
+    if (!is.null(sol$CA) && sol$CA$rank > 0) {
+        colnames(sol$CA$u) <- colnames(sol$CA$v) <- names(sol$CA$eig) <-
+            paste("MDS", seq_len(ncol(sol$CA$u)), sep = "")
+        rownames(sol$CA$u) <- rownames(d$X)
+        if (!is.null(sol$CA$imaginary.u)) {
+            negax <- sol$CA$eig < 0
+            negnm <- paste0("iMDS", seq_len(sum(negax)))
+            names(sol$CA$eig)[negax] <- negnm
+            colnames(sol$CA$imaginary.u) <- negnm
+            rownames(sol$CA$imaginary.u) <- rownames(d$X)
+        }
+    }
+
+    sol$colsum <- NA
+    if (!is.null(sol$CCA) && sol$CCA$rank > 0) 
+        sol$CCA$centroids <-
+            centroids.cca(sol$CCA$u, d$modelframe)
+    if (!is.null(sol$CCA$alias)) 
+        sol$CCA$centroids <- unique(sol$CCA$centroids)
+    if (!is.null(sol$CCA$centroids)) {
+        rs <- rowSums(sol$CCA$centroids^2)
+        sol$CCA$centroids <- sol$CCA$centroids[rs > 1e-04, , 
+                                               drop = FALSE]
+        if (nrow(sol$CCA$centroids) == 0)
+            sol$CCA$centroids <- NULL
+    }
+    sol$call <- match.call()
+    sol$terms <- terms(formula, "Condition", data = data)
+    sol$terminfo <- ordiTerminfo(d, data)
+    sol$call$formula <- formula(d$terms, width.cutoff = 500)
+    sol$call$formula[[2]] <- formula[[2]]
+    sol$method <- "dbrda"
+    sol$sqrt.dist <- sqrt.dist
+    if (!is.na(ac) && ac > 0) {
+        sol$ac <- ac
+        sol$add <- add
+    }
+    sol$adjust <- adjust
+    sol$inertia <- inertia
+    if (metaMDSdist)
+        sol$metaMDSdist <- commname
+    sol$subset <- d$subset
+    sol$na.action <- d$na.action
+    class(sol) <- c("dbrda", "rda", "cca")
+    if (!is.null(sol$na.action))
+        sol <- ordiNAexclude(sol, d$excluded)
+    sol
+}
diff --git a/R/decorana.R b/R/decorana.R
index 1afb43b..e855734 100644
--- a/R/decorana.R
+++ b/R/decorana.R
@@ -157,10 +157,8 @@
     }
     else {
         evals.decorana <- evals
-        var.r <- cov.wt(rproj, aidot)
-        var.r <- diag(var.r$cov) * (1 - sum(var.r$wt^2))
-        var.c <- cov.wt(cproj, adotj)
-        var.c <- diag(var.c$cov) * (1 - sum(var.c$wt^2))
+        var.r <- diag(cov.wt(rproj, aidot, method = "ML")$cov)
+        var.c <- diag(cov.wt(cproj, adotj, method = "ML")$cov)
         evals <- var.r/var.c
         if (any(ze <- evals.decorana < ZEROEIG))
             evals[ze] <- 0
diff --git a/R/designdist.R b/R/designdist.R
index ddacfce..39e277f 100644
--- a/R/designdist.R
+++ b/R/designdist.R
@@ -1,11 +1,11 @@
 `designdist` <-
     function (x, method = "(A+B-2*J)/(A+B)",
               terms = c("binary", "quadratic", "minimum"),
-              abcd = FALSE, name) 
+              abcd = FALSE, alphagamma = FALSE, name) 
 {
     terms <- match.arg(terms)
-    if (abcd && terms != "binary")
-        warning("abcd = TRUE and terms are not 'binary':\nresults may be meaningless")
+    if ((abcd || alphagamma) && terms != "binary")
+        warning("Perhaps terms should be 'binary' with 'abcd' or 'alphagamma'?")
     x <- as.matrix(x)
     N <- nrow(x)
     P <- ncol(x)
@@ -22,12 +22,19 @@
     A <- as.dist(outer(rep(1, N), d))
     B <- as.dist(outer(d, rep(1, N)))
     J <- as.dist(x)
+    ## 2x2 contingency table notation
     if (abcd) {
         a <- J
         b <- A - J
         c <- B - J
         d <- P - A - B + J
     }
+    ## beta diversity notation
+    if (alphagamma) {
+        alpha <- (A + B)/2
+        gamma <- A + B - J
+        delta <- abs(A - B)/2
+    }
     dis <- eval(parse(text = method))
     attributes(dis) <- attributes(J)
     attr(dis, "call") <- match.call()
diff --git a/R/deviance.rda.R b/R/deviance.rda.R
index c8ea837..b73af90 100644
--- a/R/deviance.rda.R
+++ b/R/deviance.rda.R
@@ -1,5 +1,5 @@
 `deviance.rda` <-
     function(object, ...)
 {
-    object$CA$tot.chi * (nrow(object$CA$Xbar) - 1)
+    object$CA$tot.chi * (nobs(object) - 1)
 }
diff --git a/R/eigenvals.R b/R/eigenvals.R
index 006faa7..9d006f5 100644
--- a/R/eigenvals.R
+++ b/R/eigenvals.R
@@ -33,6 +33,12 @@
 {
     out <- x$sdev^2
     names(out) <- colnames(x$rotation)
+    ## honour prcomp(..., rank.=) which only requests rank. eigenvalues
+    if (ncol(x$rotation) < length(out)) {
+        sumev <- sum(out)
+        out <- out[seq_len(ncol(x$rotation))]
+        attr(out, "sumev") <- sumev
+    }
     class(out) <- "eigenvals"
     out
 }
@@ -130,12 +136,20 @@
 `summary.eigenvals` <-
     function(object, ...)
 {
-    ## abs(object) is to handle neg eigenvalues of wcmdscale and
-    ## capscale
-    vars <- object/sum(abs(object))
+    ## dbRDA can have negative eigenvalues: do not give cumulative
+    ## proportions
+    if(!is.null(attr(object, "sumev")))
+        sumev <- attr(object, "sumev")
+    else
+        sumev <- sum(object)
+    vars <- object/sumev
+    cumvars <- if (all(vars >= 0))
+                   cumsum(vars)
+               else
+                   NA
     importance <- rbind(`Eigenvalue` = object,
                         `Proportion Explained` = round(abs(vars), 5),
-                        `Cumulative Proportion`= round(cumsum(abs(vars)), 5))
+                        `Cumulative Proportion` = round(cumvars, 5))
     out <- list(importance = importance)
     class(out) <- c("summary.eigenvals")
     out
diff --git a/R/eventstar.R b/R/eventstar.R
index 11137e6..5844b19 100644
--- a/R/eventstar.R
+++ b/R/eventstar.R
@@ -1,6 +1,10 @@
-eventstar <- function(x, qmax=5) {
+`eventstar` <-
+    function(x, qmax=5)
+{
     if (is.null(dim(x)))
         x <- matrix(x, 1, length(x))
+    else
+        x <- as.matrix(x) # faster than data.frame
     lossfun <- function(q, x)
         tsallis(x, scales=q, norm=TRUE)
     qstarfun <- function(x) {
diff --git a/R/extractAIC.cca.R b/R/extractAIC.cca.R
index 1ba3e63..3e6653c 100644
--- a/R/extractAIC.cca.R
+++ b/R/extractAIC.cca.R
@@ -1,7 +1,7 @@
-"extractAIC.cca" <-
-function (fit, scale = 0, k = 2, ...)
+`extractAIC.cca` <-
+    function (fit, scale = 0, k = 2, ...)
 {
-   n <- nrow(fit$CA$Xbar)
+   n <- nobs(fit)
    edf <- 1
    if (!is.null(fit$CCA$rank)) edf <- edf + fit$CCA$qrank
    if (!is.null(fit$pCCA$rank)) edf <- edf + fit$pCCA$rank
diff --git a/R/fitted.capscale.R b/R/fitted.capscale.R
index f59ed72..89c6f09 100644
--- a/R/fitted.capscale.R
+++ b/R/fitted.capscale.R
@@ -3,7 +3,7 @@
              type = c("response", "working"), ...)
 {
     model <- match.arg(model)
-    if (is.null(object[[model]]))
+    if (is.null(object[[model]]) && model != "Imaginary")
         stop("component ", model, " does not exist")
     type <- match.arg(type)
     ## Return scaled eigenvalues
@@ -12,12 +12,24 @@
                 CA = object$CA$u %*% diag(sqrt(object$CA$eig)),
                 Imaginary = object$CA$imaginary.u.eig,
                 pCCA = object$pCCA$Fit/object$adjust)
+    if (is.null(U))
+        stop("component ", model, " does not exist")
     ## Distances or working scores U
     if (type == "response") {
         U <- dist(U)
         ## remove additive constant (if add = TRUE)
-        if (!is.null(object$ac))
-            U <- U - object$ac
+        if (!is.null(object$ac)) {
+            if (object$add == "lingoes")
+                U <- sqrt(U^2 - 2 * object$ac)
+            else if (object$add == "cailliez")
+                U <- U - object$ac
+            else
+                stop("unknown Euclidifying adjustment")
+        }
+        ## undo sqrt.dist -- sqrt.dist was applied first in capscale,
+        ## so it must be last here
+        if (object$sqrt.dist)
+            U <- U^2
     }
     U
 }
diff --git a/R/fitted.dbrda.R b/R/fitted.dbrda.R
new file mode 100644
index 0000000..61a18d5
--- /dev/null
+++ b/R/fitted.dbrda.R
@@ -0,0 +1,54 @@
+### 'working' will be Gower's G = -GowerDblcen(dis^2)/2
+`fitted.dbrda` <-
+    function (object, model = c("CCA", "CA", "pCCA"),
+              type = c("response", "working"), ...) 
+{
+    type <- match.arg(type)
+    model <- match.arg(model)
+    if (object$adjust == 1)
+        const <- nobs(object) - 1
+    else
+        const <- 1
+    if (is.null(object[[model]]))
+        stop("component ", model, " does not exist")
+    if (type == "working") {
+        if (model == "pCCA")
+            G <- object$pCCA$Fit
+        else
+            G <- object[[model]]$G
+        if (model == "CCA") {
+            H <- tcrossprod(
+                qr.Q(object$CCA$QR)[, seq_len(object$CCA$QR$rank),
+                                    drop=FALSE])
+            G <- H %*% G %*% H
+        }
+        out <- G
+    }
+    if (type == "response") {
+        if (model == "pCCA")
+            stop("type = 'response' is unavailable for 'pCCA'")
+        eig <- object[[model]]$eig
+        U <- object[[model]]$u
+        U <- sweep(U, 2, sqrt(eig[eig>0]), "*")
+        D <- dist(U)
+        ## remove additive constant
+        if (!is.null(object$ac)) {
+            if (object$add == "lingoes")
+                D <- sqrt(D^2 - 2 * object$ac)
+            else if (object$add == "cailliez")
+                D <- D - object$ac
+            else stop("unknown Euclidifying adjustment")
+        }
+        ## remove negative distances in imaginary space
+        if (any(eig < 0)) {
+            U <- object[[model]]$imaginary.u
+            U <- sweep(U, 2, sqrt(abs(eig[eig<0])), "*")
+            D <- sqrt(D^2 - dist(U)^2)
+        }
+        ## undo internal sqrt.dist
+        if (object$sqrt.dist)
+            D <- D^2
+        out <- D * sqrt(const)
+    }
+    out
+}
diff --git a/R/goodness.cca.R b/R/goodness.cca.R
index f19a73a..cf96847 100644
--- a/R/goodness.cca.R
+++ b/R/goodness.cca.R
@@ -1,79 +1,65 @@
 `goodness.cca` <-
     function (object, display = c("species", "sites"), choices,
-              model = c("CCA", "CA"), statistic = c("explained", "distance"),
+              model = c("CCA", "CA"),
+              statistic = c("explained", "distance"),
               summarize = FALSE, addprevious = FALSE, ...)
 {
-    model <- match.arg(model)
     display <- match.arg(display)
-    if (inherits(object, "capscale") && display == "species") 
-        stop("display = \"species\" not available for 'capscale'")
-    if (inherits(object, "rda"))
-        NR <- nobs(object) - 1
-    else
-        NR <- 1
-    if (is.null(object$CCA)) 
-        model <- "CA"
-    if (is.null(object[[model]]) || object[[model]]$rank == 0) 
-        stop("model ", model, " is not available")
+    model <- match.arg(model)
     statistic <- match.arg(statistic)
+    if (!inherits(object, "cca"))
+        stop("can be used only with objects inheriting from 'cca'")
+    if (inherits(object, c("capscale", "dbrda")) && display == "species")
+        stop(gettextf("cannot analyse species with '%s'", object$method))
+    what <- if(display == "species") "v" else "u"
+    w <- weights(object, display = display)
+    pCCA <- object$pCCA$Fit
+    CA <- object[[model]][[what]]
+    eig <- object[[model]]$eig
+    eig <- eig[eig > 0]
+    ## imaginary dimensions for dbrda
+    if (inherits(object, "dbrda"))
+        CA <- cbind(CA, object[[model]][["imaginary.u"]])
+    att <- attributes(CA)
     if (inherits(object, "rda"))
-        cs <- 1
-    else {
-        cs <-
-            if (display == "species") object$colsum else object$rowsum
-    }
-    lambda2 <- sqrt(object[[model]]$eig)
-    ## collect contributions to the variation and scores
-    ptot <- ctot <- rtot <- 0
-    if (display == "species") {
-        if (!is.null(object$pCCA))
-            ptot <- diag(crossprod(object$pCCA$Fit)) / NR
-        if (!is.null(object$CCA)) {
-            Xbar <- qr.fitted(object$CCA$QR, object$CCA$Xbar)
-            ctot <- diag(crossprod(Xbar)) / NR
-        }
-        if (!is.null(object$CA))
-            rtot <- diag(crossprod(object$CA$Xbar)) / NR
-        v <- sweep(object[[model]]$v, 2, lambda2, "*")
-    }
-    else {
-        if (!is.null(object$pCCA))
-            ptot <- diag(tcrossprod(object$pCCA$Fit)) / NR
-        if (!is.null(object$CCA)) {
-            Xbar <- qr.fitted(object$CCA$QR, object$CCA$Xbar)
-            ctot <- diag(tcrossprod(Xbar)) / NR
-        }
-        if (!is.null(object$CA))
-            rtot <- diag(tcrossprod(object$CA$Xbar)) / NR
-        v <- sweep(object[[model]]$u, 2, lambda2, "*")
-    }
-    v <- sweep(v, 1, sqrt(cs), "*")
-    if (ncol(v) > 1)
-        vexp <- t(apply(v^2, 1, cumsum))
+        nr <- nobs(object) - 1
     else
-        vexp <- v^2
-    if (!missing(choices)) 
-        vexp <- vexp[, choices, drop = FALSE]
+        nr <- 1
+    if (!is.null(pCCA)) {
+        if (display == "sites")
+            pCCA <- t(pCCA)
+        if (inherits(object, "dbrda"))
+            pCCA <- diag(pCCA)
+        else
+            pCCA <- diag(crossprod(pCCA))/nr
+    }
+    CA <- t(apply(diag(w) %*% CA^2 %*% diag(eig), 1,
+                  cumsum))
+    totals <- inertcomp(object, display = display)
+    comps <- colnames(totals)
     if (statistic == "explained") {
-        tot <- ptot + ctot + rtot
+        tot <- rowSums(totals)
         if (addprevious) {
-            if (!is.null(object$pCCA))
-                vexp <- sweep(vexp, 1, ptot, "+")
-            if (model == "CA" && !is.null(object$CCA))
-                vexp <- sweep(vexp, 1, ctot, "+")
+            if ("pCCA" %in% comps)
+                CA <- sweep(CA, 1, totals[,"pCCA"], "+")
+            if (model == "CA" && "CCA" %in% comps)
+                CA <- sweep(CA, 1, totals[, "CCA"], "+")
         }
-        vexp <- sweep(vexp, 1, tot, "/")
-    }
-    else {
-        tot <- rtot
-        if (model == "CCA")
-            tot <- tot + ctot
-        vexp <- sweep(-(vexp), 1, tot, "+")
-        vexp[vexp < 0] <- 0
-        vexp <- sqrt(vexp)
-        vexp <- sweep(vexp, 1, sqrt(cs), "/")
+        CA <- sweep(CA, 1, tot, "/")
+    } else {
+        if ("CA" %in% comps)
+            tot <- totals[,"CA"]
+        else
+            tot <- 0
+        if (model == "CCA" && "CCA" %in% comps)
+            tot <- totals[,"CCA"] + tot
+        CA <- sweep(-CA, 1, tot, "+")
+        CA[CA < 0] <- 0
+        CA <- sqrt(CA)
+        CA <- sweep(CA, 1, sqrt(w), "/")
     }
-    if (summarize) 
-        vexp <- vexp[, ncol(vexp)]
-    vexp
+    attributes(CA) <- att
+    if (summarize)
+        CA <- CA[,ncol(CA)]
+    CA
 }
diff --git a/R/hiersimu.default.R b/R/hiersimu.default.R
index 57ccccf..5d0da8c 100644
--- a/R/hiersimu.default.R
+++ b/R/hiersimu.default.R
@@ -1,11 +1,12 @@
 hiersimu.default <-
 function(y, x, FUN, location = c("mean", "median"),
-relative = FALSE, drop.highest = FALSE, nsimul=99, ...)
+         relative = FALSE, drop.highest = FALSE, nsimul=99,
+         method = "r2dtable", ...)
 {
     ## evaluate formula
     lhs <- as.matrix(y)
     if (missing(x))
-        x <- cbind(level_1=seq_len(nrow(lhs)), 
+        x <- cbind(level_1=seq_len(nrow(lhs)),
             leve_2=rep(1, nrow(lhs)))
     rhs <- data.frame(x)
     rhs[] <- lapply(rhs, as.factor)
@@ -46,9 +47,7 @@ relative = FALSE, drop.highest = FALSE, nsimul=99, ...)
         ftmp[[i]] <- as.formula(paste("~", tlab[i], "- 1"))
     }
 
-    ## is there a method/burnin/thin in ... ?
-    method <- if (is.null(list(...)$method))
-        "r2dtable" else list(...)$method
+    ## is there burnin/thin in ... ?
     burnin <- if (is.null(list(...)$burnin))
         0 else list(...)$burnin
     thin <- if (is.null(list(...)$thin))
diff --git a/R/hiersimu.formula.R b/R/hiersimu.formula.R
index a2b9b11..260fcbc 100644
--- a/R/hiersimu.formula.R
+++ b/R/hiersimu.formula.R
@@ -1,18 +1,16 @@
 `hiersimu.formula` <-
     function(formula, data, FUN, location = c("mean", "median"),
-             relative = FALSE, drop.highest = FALSE, nsimul=99, ...)
+             relative = FALSE, drop.highest = FALSE, nsimul=99,
+             method = "r2dtable", ...)
 {
     ## evaluate formula
     if (missing(data))
         data <- parent.frame()
     tmp <- hierParseFormula(formula, data)
-    lhs <- tmp$lhs
-    rhs <- tmp$rhs
-
     ## run simulations
-    sim <- hiersimu.default(lhs, rhs, FUN = FUN, location = location,
+    sim <- hiersimu.default(tmp$lhs, tmp$rhs, FUN = FUN, location = location,
                             relative = relative, drop.highest = drop.highest,
-                            nsimul = nsimul, ...)
+                            nsimul = nsimul, method = method, ...)
     call <- match.call()
     call[[1]] <- as.name("hiersimu")
     attr(sim, "call") <- call
diff --git a/R/inertcomp.R b/R/inertcomp.R
index c3241c9..e6591d3 100644
--- a/R/inertcomp.R
+++ b/R/inertcomp.R
@@ -1,42 +1,48 @@
-"inertcomp" <-
-    function (object, display = c("species", "sites"), statistic = c("explained", 
-                                                       "distance"), proportional = FALSE) 
+`inertcomp` <-
+    function (object, display = c("species", "sites"),
+              statistic = c("explained", "distance"), proportional = FALSE)
 {
     display <- match.arg(display)
     statistic <- match.arg(statistic)
     if (!inherits(object, "cca"))
         stop("can be used only with objects inheriting from 'cca'")
-    if (inherits(object, "capscale") && display == "species")
-        stop("cannot analyse species with 'capscale'")
+    if (inherits(object, c("capscale", "dbrda")) && display == "species")
+        stop(gettextf("cannot analyse species with '%s'", object$method))
+    what <- if(display == "species") "v" else "u"
+    w <- weights(object, display = display)
     pCCA <- object$pCCA$Fit
-    CCA <- object$CCA$Xbar
-    CA <- object$CA$Xbar
+    CCA <- object$CCA[[what]]
+    CA <- object$CA[[what]]
+    ## row names will be lost later: save here
+    labels <- if(!is.null(CA))
+                  rownames(CCA)
+              else if(!is.null(CCA))
+                  rownames(CA)
+              else
+                  rownames(pCCA)
+    ## imaginary dimensions for dbrda
+    if (inherits(object, "dbrda")) {
+        CCA <- cbind(CCA, object$CCA$imaginary.u)
+        CA <- cbind(CA, object$CA$imaginary.u)
+    }
     if (inherits(object, "rda")) {
-        nr <- nrow(CA) - 1
-        if (is.null(nr)) 
-            nr <- nrow(CCA) - 1
-        if (is.null(nr)) 
-            nr <- nrow(pCCA) - 1
+        nr <- nobs(object) - 1
     }
     else {
         nr <- 1
     }
     if (!is.null(pCCA)) {
-        if (display == "sites") 
+        if (display == "sites")
             pCCA <- t(pCCA)
-        pCCA <- diag(crossprod(pCCA))/nr
-    }
-    if (!is.null(CCA)) {
-        CCA <- qr.fitted(object$CCA$QR, CCA)
-        if (display == "sites") 
-            CCA <- t(CCA)
-        CCA <- diag(crossprod(CCA))/nr
-    }
-    if (!is.null(CA)) {
-        if (display == "sites") 
-            CA <- t(CA)
-        CA <- diag(crossprod(CA))/nr
+        if (inherits(object, "dbrda"))
+            pCCA <- diag(pCCA)
+        else
+            pCCA <- diag(crossprod(pCCA))/nr
     }
+    if (!is.null(CCA))
+        CCA <- rowSums(diag(w) %*% CCA^2 %*% diag(object$CCA$eig))
+    if (!is.null(CA))
+        CA <- rowSums(diag(w) %*% CA^2 %*% diag(object$CA$eig))
     out <- cbind(pCCA, CCA, CA)
     if (statistic == "distance" && !proportional) {
         w <- weights(object, display = display)
@@ -46,7 +52,9 @@
             w <- w[-object$na.action]
         out <- sweep(out, 1, w, "/")
     }
-    if (proportional) 
+    if (proportional)
         out <- sweep(out, 1, rowSums(out), "/")
+    ## get back names
+    rownames(out) <- labels
     out
 }
diff --git a/R/intersetcor.R b/R/intersetcor.R
index 88fb403..419843e 100644
--- a/R/intersetcor.R
+++ b/R/intersetcor.R
@@ -1,4 +1,5 @@
-intersetcor <- function(object) 
+`intersetcor` <-
+    function(object) 
 {
     if (!inherits(object, "cca"))
         stop("can be used only with objects inheriting from 'cca'")
diff --git a/R/lines.spantree.R b/R/lines.spantree.R
index 6909fa7..3d4eb5f 100644
--- a/R/lines.spantree.R
+++ b/R/lines.spantree.R
@@ -1,10 +1,16 @@
 `lines.spantree` <-
-    function (x, ord, display = "sites", ...)
+    function (x, ord, display = "sites", col = 1, ...)
 {
     ord <- scores(ord, display = display, ...)
     tree <- x$kid
+    ## recycle colours and use a mixture of joined points for line segments
+    col <- rep(col, length = nrow(ord))
+    col <- col2rgb(col)/255
+    ## average colour for pairs of points
+    col <- rgb(t(col[,-1] + col[,tree])/2)
     if (x$n > 1)
         ordiArgAbsorber(ord[-1, 1], ord[-1, 2], ord[tree, 1], ord[tree, 2],
+                        col = col,
                         FUN = segments, ...)
     invisible()
 }
diff --git a/R/make.commsim.R b/R/make.commsim.R
index c4a3bf6..e08e19f 100644
--- a/R/make.commsim.R
+++ b/R/make.commsim.R
@@ -75,11 +75,13 @@ function(method)
         "quasiswap" = commsim(method="quasiswap", binary=TRUE, isSeq=FALSE,
         mode="integer",
         fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            if (nr < 2L || nc < 2)
+                stop("needs at least 2 items")
             out <- array(unlist(r2dtable(n, rs, cs)), c(nr, nc, n))
             storage.mode(out) <- "integer"
             for (k in seq_len(n))
                 out[,,k] <- .C("quasiswap",
-                    m = out[,,k], nr, nc, PACKAGE = "vegan")$m
+                    m = out[,,k], nr, nc, thin, PACKAGE = "vegan")$m
             out
         }),
         "swap" = commsim(method="swap", binary=TRUE, isSeq=TRUE,
@@ -105,9 +107,25 @@ function(method)
                     m = out[,,k], nr, nc, thin, PACKAGE = "vegan")$m
             out
         }),
+        "curveball" = commsim(method="curveball", binary=TRUE, isSeq=TRUE,
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            out <- array(0L, c(nr, nc, n))
+            out[,,1] <- .C("curveball", m = x, nr, nc, thin,
+                           integer(2L*nc),
+                           PACKAGE = "vegan")$m
+            for (k in seq_len(n-1))
+                out[,,k+1] <- .C("curveball",
+                                 m = out[,,k], nr, nc, thin,
+                                 integer(2L*nc),
+                                 PACKAGE = "vegan")$m
+            out
+        }),
         "backtrack" = commsim(method="backtrack", binary=TRUE, isSeq=FALSE,
         mode="integer",
         fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            if (nr < 2L || nc < 2)
+                stop("needs at least 2 items")
             btrfun <- function() {
                 all <- matrix(as.integer(1:(nr * nc)), nrow = nr, ncol = nc)
                 out <- matrix(0L, nrow = nr, ncol = nc)
@@ -129,15 +147,15 @@ function(method)
                 for (i in seq_len(10000)) {
                     oldout <- out
                     oldn <- sum(out)
-                    drop <- sample(all[out == 1L], ndrop)
+                    drop <- sample(all[as.logical(out)], ndrop)
                     out[drop] <- 0L
-                    candi <- outer(rowSums(out) < rs, colSums(out) < cs, "&") & out == 0L
+                    candi <- outer(rowSums(out) < rs, colSums(out) < cs) * !out
                     while (sum(candi) > 0) {
                         if (sum(candi) > 1)
-                          ij <- sample(all[candi], 1)
-                        else ij <- all[candi]
+                          ij <- sample(all[as.logical(candi)], 1)
+                        else ij <- all[as.logical(candi)]
                         out[ij] <- 1L
-                        candi <- outer(rowSums(out) < rs, colSums(out) < cs, "&") & out == 0
+                        candi <- outer(rowSums(out) < rs, colSums(out) < cs) * !out
                     }
                     if (sum(out) >= fill)
                         break
@@ -157,6 +175,8 @@ function(method)
         "r2dtable" = commsim(method="r2dtable", binary=FALSE, isSeq=FALSE,
         mode="integer",
         fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            if (nr < 2L || nc < 2)
+                stop("needs at least 2 items")
             out <- array(unlist(r2dtable(n, rs, cs)), c(nr, nc, n))
             storage.mode(out) <- "integer"
             out
@@ -164,6 +184,8 @@ function(method)
         "swap_count" = commsim(method="swap_count", binary=FALSE, isSeq=TRUE,
         mode="integer",
         fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            if (nr < 2L || nc < 2)
+                stop("needs at least 2 items")
             out <- array(0L, c(nr, nc, n))
             out[,,1] <- .C("swapcount",
                 m = x, nr, nc, thin, PACKAGE = "vegan")$m
@@ -175,6 +197,8 @@ function(method)
         "quasiswap_count" = commsim(method="quasiswap_count", binary=FALSE, isSeq=FALSE,
         mode="integer",
         fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            if (nr < 2L || nc < 2)
+                stop("needs at least 2 items")
             out <- array(unlist(r2dtable(n, rs, cs)), c(nr, nc, n))
             storage.mode(out) <- "integer"
             for (k in seq_len(n))
@@ -185,12 +209,15 @@ function(method)
         "swsh_samp" = commsim(method="swsh_samp", binary=FALSE, isSeq=FALSE,
         mode="double",
         fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            if (nr < 2L || nc < 2)
+                stop("needs at least 2 items")
             nz <- x[x > 0]
             out <- array(unlist(r2dtable(fill, rf, cf)), c(nr, nc, n))
             storage.mode(out) <- "double"
             for (k in seq_len(n)) {
                 out[,,k] <- .C("quasiswap",
-                    m = as.integer(out[,,k]), nr, nc, PACKAGE = "vegan")$m
+                               m = as.integer(out[,,k]), nr, nc, thin,
+                               PACKAGE = "vegan")$m
                 out[,,k][out[,,k] > 0] <- sample(nz) # we assume that length(nz)>1
             }
             out
@@ -198,6 +225,8 @@ function(method)
         "swsh_both" = commsim(method="swsh_both", binary=FALSE, isSeq=FALSE,
         mode="integer",
         fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            if (nr < 2L || nc < 2)
+                stop("needs at least 2 items")
             indshuffle <- function(x) {
                 drop(rmultinom(1, sum(x), rep(1, length(x))))
             }
@@ -206,7 +235,8 @@ function(method)
             storage.mode(out) <- "integer"
             for (k in seq_len(n)) {
                 out[,,k] <- .C("quasiswap",
-                    m = out[,,k], nr, nc, PACKAGE = "vegan")$m
+                               m = out[,,k], nr, nc, thin,
+                               PACKAGE = "vegan")$m
                 out[,,k][out[,,k] > 0] <- indshuffle(nz - 1L) + 1L  # we assume that length(nz)>1
             }
             out
@@ -214,12 +244,15 @@ function(method)
         "swsh_samp_r" = commsim(method="swsh_samp_r", binary=FALSE, isSeq=FALSE,
         mode="double",
         fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            if (nr < 2L || nc < 2)
+                stop("needs at least 2 items")
             out <- array(unlist(r2dtable(fill, rf, cf)), c(nr, nc, n))
             storage.mode(out) <- "double"
             I <- seq_len(nr)
             for (k in seq_len(n)) {
                 out[,,k] <- .C("quasiswap",
-                    m = as.integer(out[,,k]), nr, nc, PACKAGE = "vegan")$m
+                               m = as.integer(out[,,k]), nr, nc, thin,
+                               PACKAGE = "vegan")$m
                 for (i in I) {
                     nz <- x[i,][x[i,] > 0]
                     if (length(nz) == 1)
@@ -233,12 +266,15 @@ function(method)
         "swsh_samp_c" = commsim(method="swsh_samp_c", binary=FALSE, isSeq=FALSE,
         mode="double",
         fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            if (nr < 2L || nc < 2)
+                stop("needs at least 2 items")
             out <- array(unlist(r2dtable(fill, rf, cf)), c(nr, nc, n))
             storage.mode(out) <- "double"
             J <- seq_len(nc)
             for (k in seq_len(n)) {
                 out[,,k] <- .C("quasiswap",
-                    m = as.integer(out[,,k]), nr, nc, PACKAGE = "vegan")$m
+                               m = as.integer(out[,,k]), nr, nc, thin,
+                               PACKAGE = "vegan")$m
                 for (j in J) {
                     nz <- x[,j][x[,j] > 0]
                     if (length(nz) == 1)
@@ -252,6 +288,8 @@ function(method)
         "swsh_both_r" = commsim(method="swsh_both_r", binary=FALSE, isSeq=FALSE,
         mode="integer",
         fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            if (nr < 2L || nc < 2)
+                stop("needs at least 2 items")
             indshuffle <- function(x) {
                 drop(rmultinom(1, sum(x), rep(1, length(x))))
             }
@@ -260,7 +298,8 @@ function(method)
             storage.mode(out) <- "integer"
             for (k in seq_len(n)) {
                 out[,,k] <- .C("quasiswap",
-                    m = out[,,k], nr, nc, PACKAGE = "vegan")$m
+                               m = out[,,k], nr, nc, thin,
+                               PACKAGE = "vegan")$m
                 for (i in I) {
                     nz <- as.integer(x[i,][x[i,] > 0])
                     if (length(nz) == 1)
@@ -274,6 +313,8 @@ function(method)
         "swsh_both_c" = commsim(method="swsh_both_c", binary=FALSE, isSeq=FALSE,
         mode="integer",
         fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            if (nr < 2L || nc < 2)
+                stop("needs at least 2 items")
             indshuffle <- function(x) {
                 drop(rmultinom(1, sum(x), rep(1, length(x))))
             }
@@ -282,7 +323,8 @@ function(method)
             storage.mode(out) <- "integer"
             for (k in seq_len(n)) {
                 out[,,k] <- .C("quasiswap",
-                    m = out[,,k], nr, nc,  PACKAGE = "vegan")$m
+                               m = out[,,k], nr, nc, thin,
+                               PACKAGE = "vegan")$m
                 for (j in J) {
                     nz <- as.integer(x[,j][x[,j] > 0])
                     if (length(nz) == 1)
@@ -331,7 +373,8 @@ function(method)
             J <- seq_len(nc)
             for (k in seq_len(n))
                 for (j in J)
-                    out[, j, k] <- sample(x[,j])
+                    out[, j, k] <- if (nr < 2)
+                        x[,j] else sample(x[,j])
             out
         }),
         "r0_samp" = commsim(method="r0_samp", binary=FALSE, isSeq=FALSE,
@@ -341,7 +384,8 @@ function(method)
             I <- seq_len(nr)
             for (k in seq_len(n))
                 for (i in I)
-                    out[i, , k] <- sample(x[i,])
+                    out[i, , k] <- if (nc < 2)
+                        x[i,] else sample(x[i,])
             out
         }),
         "r00_ind" = commsim(method="r00_ind", binary=FALSE, isSeq=FALSE,
@@ -406,8 +450,11 @@ function(method)
             J <- seq_len(nc)
             for (k in seq_len(n))
                 for (j in J) {
-                    out[,j,k][x[,j] > 0] <- indshuffle(x[,j][x[,j] > 0] - 1L) + 1L
-                    out[,j,k] <- sample(out[,j,k])
+                    if (sum(x[,j]) > 0) {
+                        out[,j,k][x[,j] > 0] <- indshuffle(x[,j][x[,j] > 0] - 1L) + 1L
+                        out[,j,k] <- if (nr < 2)
+                            out[,j,k] else sample(out[,j,k])
+                    }
                 }
             out
         }),
@@ -421,8 +468,11 @@ function(method)
             I <- seq_len(nr)
             for (k in seq_len(n))
                 for (i in I) {
-                    out[i,,k][x[i,] > 0] <- indshuffle(x[i,][x[i,] > 0] - 1L) + 1L
-                    out[i,,k] <- sample(out[i,,k])
+                    if (sum(x[i,]) > 0) {
+                        out[i,,k][x[i,] > 0] <- indshuffle(x[i,][x[i,] > 0] - 1L) + 1L
+                        out[i,,k] <- if (nc < 2)
+                            out[i,,k] else sample(out[i,,k])
+                    }
                 }
             out
         })
diff --git a/R/metaMDS.R b/R/metaMDS.R
index f6a7eb6..6969e9c 100644
--- a/R/metaMDS.R
+++ b/R/metaMDS.R
@@ -1,11 +1,13 @@
 `metaMDS` <-
-    function (comm, distance = "bray", k = 2, trymax = 20,
+    function (comm, distance = "bray", k = 2, try = 20, trymax = 20,
               engine = c("monoMDS", "isoMDS"), 
               autotransform = TRUE, noshare = (engine == "isoMDS"),
               wascores = TRUE, expand = TRUE, trace = 1,
               plot = FALSE, previous.best,  ...) 
 {
     engine <- match.arg(engine)
+    ## take care that trymax >= try
+    trymax <- max(trymax, try)
     ## This could be a character vector of length > 1L
     commname <- deparse(substitute(comm), width.cutoff = 500L)
     if (length(commname) > 1L) {
@@ -27,8 +29,8 @@
         if (is.null(attr(dis, "method")))
             attr(dis, "method") <- "user supplied"
         wascores <- FALSE
-    } else if (length(dim(comm) == 2) && ncol(comm) == nrow(comm) &&
-                all(comm == t(comm))) {
+    } else if ((is.matrix(comm) || is.data.frame(comm)) &&
+               isSymmetric(unname(as.matrix(comm)))) {
         dis <- as.dist(comm)
         attr(dis, "method") <- "user supplied"
         wascores <- FALSE
@@ -44,7 +46,8 @@
         previous.best <- NULL
     if (trace > 2)
         cat(">>> NMDS iterations\n")
-    out <- metaMDSiter(dis, k = k, trymax = trymax, trace = trace, 
+    out <- metaMDSiter(dis, k = k, try = try, trymax = trymax,
+                       trace = trace,
                        plot = plot, previous.best = previous.best,
                        engine = engine, ...)
     ## Nearly zero stress is usually not a good thing but a symptom of
@@ -55,6 +58,11 @@
     if (trace > 2)
         cat(">>> Post-processing NMDS\n")
     points <- postMDS(out$points, dis, plot = max(0, plot - 1), ...)
+    ## rescale monoMDS scaling if postMDS scaled 'points'
+    if (!is.null(scl <- attr(points, "internalscaling"))) {
+        out$dist <- out$dist/scl
+        out$dhat <- out$dhat/scl
+    }
     if (is.null(rownames(points))) 
         rownames(points) <- rownames(comm)
     wa <- if (wascores) {
diff --git a/R/metaMDSdist.R b/R/metaMDSdist.R
index 8b91648..31f20ec 100644
--- a/R/metaMDSdist.R
+++ b/R/metaMDSdist.R
@@ -5,8 +5,9 @@
 {
     ## metaMDSdist should get a raw data matrix, but if it gets a
     ## 'dist' object return that unchanged and quit silently.
-    if (inherits(comm, "dist")  || ncol(comm) == nrow(comm) &&
-        all(comm == t(comm)))
+    if (inherits(comm, "dist")  ||
+        ((is.matrix(comm) || is.data.frame(comm)) &&
+             isSymmetric(unname(as.matrix(comm)))))
         return(comm)
     distname <- deparse(substitute(distfun))
     distfun <- match.fun(distfun)
diff --git a/R/metaMDSiter.R b/R/metaMDSiter.R
index b54044a..eb223ab 100644
--- a/R/metaMDSiter.R
+++ b/R/metaMDSiter.R
@@ -1,5 +1,5 @@
 `metaMDSiter` <-
-    function (dist, k = 2, trymax = 20, trace = 1, plot = FALSE, 
+    function (dist, k = 2, try = 20, trymax = 20, trace = 1, plot = FALSE,
               previous.best, engine = "monoMDS", maxit = 200,
               parallel = getOption("mc.cores"), ...) 
 {
@@ -14,16 +14,21 @@
     ## set tracing for engines
     isotrace <- max(0, trace - 1)
     monotrace <- engine == "monoMDS" && trace > 1
+    ## explain monoMDS convergence codes (sol$icause)
+    monomsg <- c("no. of iterations >= maxit",
+                 "stress < smin",
+                 "stress ratio > sratmax",
+                 "scale factor of the gradient < sfgrmin")
+    ## monoMDS trace >= 2
     monostop <- function(mod) {
         if (mod$maxits == 0)
             return(NULL)
-        lab <- switch(mod$icause,
-                      "no. of iterations >= maxit",
-                      "stress < smin",
-                      "stress ratio > sratmax",
-                      "scale factor of the gradient < sfgrmin")
+        lab <- monomsg[mod$icause]
         cat("   ", mod$iters, "iterations: ", lab, "\n")
     }
+    ## collect monoMDS convergence code for trace
+    if (trace && engine == "monoMDS")
+        stopcoz <- numeric(4)
     ## Previous best or initial configuration 
     if (!missing(previous.best) && !is.null(previous.best)) {
         ## check if previous.best is from metaMDS or isoMDS
@@ -86,7 +91,7 @@
     else
         nclus <- parallel
     ## proper iterations
-    while(tries < trymax && !converged) {
+    while(tries < try || tries < trymax && !converged) {
         init <- replicate(nclus, initMDS(dist, k = k))
         if (nclus > 1) isotrace <- FALSE
         if (isParal) {
@@ -121,6 +126,8 @@
             tries <- tries + 1
             if (trace)
                 cat("Run", tries, "stress", stry[[i]]$stress, "\n")
+            if (trace && engine == "monoMDS")
+                stopcoz[stry[[i]]$icause] <- stopcoz[stry[[i]]$icause] + 1L
             if (monotrace)
                 monostop(stry[[i]])
             if ((s0$stress - stry[[i]]$stress) > -EPS) {
@@ -137,17 +144,29 @@
                 }
                 summ <- summary(pro)
                 if (trace) 
-                    cat("... procrustes: rmse", summ$rmse, " max resid", 
+                    cat("... Procrustes: rmse", summ$rmse, " max resid",
                         max(summ$resid), "\n")
                 if (summ$rmse < RMSELIM && max(summ$resid) < RESLIM) {
                     if (trace) 
-                        cat("*** Solution reached\n")
+                        cat("... Similar to previous best\n")
                     converged <- TRUE
                 }
             }
             flush.console()
         }
     }
+    if (trace) {
+        if (converged)
+            cat("*** Solution reached\n")
+        else if (engine == "monoMDS") {
+            cat(gettextf(
+                "*** No convergence -- %s stopping criteria:\n",
+                engine))
+            for (i in seq_along(stopcoz))
+                if (stopcoz[i] > 0)
+                    cat(gettextf("%6d: %s\n", stopcoz[i], monomsg[i]))
+        }
+    }
     ## stop socket cluster
     if (isParal && !isMulticore && !hasClus)
         stopCluster(parallel)
diff --git a/R/monoMDS.R b/R/monoMDS.R
index a3a5fd5..a5bf8f1 100644
--- a/R/monoMDS.R
+++ b/R/monoMDS.R
@@ -6,9 +6,9 @@
              sratmax=0.99999, ...) 
 {
     ## Check that 'dist' are distances or a symmetric square matrix
-    if (!(inherits(dist, "dist") || (is.matrix(dist) || is.data.frame(dist))
-          && ncol(dist) == nrow(dist)
-          && isTRUE(all.equal(dist[lower.tri(dist)], t(dist)[lower.tri(dist)]))))
+    if (!(inherits(dist, "dist") ||
+              ((is.matrix(dist) || is.data.frame(dist)) &&
+                   isSymmetric(unname(as.matrix(dist))))))
         stop("'dist' must be a distance object (class \"dist\") or a symmetric square matrix")
     if (any(dist < -sqrt(.Machine$double.eps), na.rm = TRUE))
         warning("some dissimilarities are negative -- is this intentional?")
diff --git a/R/mso.R b/R/mso.R
index 39f42cd..fff0a8d 100644
--- a/R/mso.R
+++ b/R/mso.R
@@ -2,6 +2,8 @@
     function (object.cca, object.xy, grain = 1, round.up = FALSE,
               permutations = 0) 
 {
+    if (inherits(object.cca, "dbrda"))
+        stop("'mso' is not yet implemented for 'dbrda'\ncontact developers or submit a pull request with your code in github")
     EPS <- sqrt(.Machine$double.eps)
     if (inherits(object.cca, "mso")) {
         rm <- which(class(object.cca) == "mso")
@@ -9,7 +11,7 @@
     }
     object <- object.cca
     xy <- object.xy
-    N <- nrow(object$CA$Xbar)
+    N <- nobs(object)
     if (inherits(object, "rda")) 
         N <- 1
     ## we expect xy are coordinates and calculate distances, but a
diff --git a/R/multipart.default.R b/R/multipart.default.R
index 90702de..2ece325 100644
--- a/R/multipart.default.R
+++ b/R/multipart.default.R
@@ -1,6 +1,7 @@
 `multipart.default` <-
     function(y, x, index=c("renyi", "tsallis"), scales = 1,
-             global = FALSE, relative = FALSE, nsimul=99, ...)
+             global = FALSE, relative = FALSE, nsimul=99,
+             method = "r2dtable", ...)
 {
     if (length(scales) > 1)
         stop("length of 'scales' must be 1")
@@ -49,9 +50,7 @@
         ftmp[[i]] <- as.formula(paste("~", tlab[i], "- 1"))
     }
 
-    ## is there a method/burnin/thin in ... ?
-    method <- if (is.null(list(...)$method))
-        "r2dtable" else list(...)$method
+    ## is there burnin/thin in ... ?
     burnin <- if (is.null(list(...)$burnin))
         0 else list(...)$burnin
     thin <- if (is.null(list(...)$thin))
diff --git a/R/multipart.formula.R b/R/multipart.formula.R
index e6c089e..e7b6135 100644
--- a/R/multipart.formula.R
+++ b/R/multipart.formula.R
@@ -1,18 +1,16 @@
 `multipart.formula` <-
     function(formula, data, index=c("renyi", "tsallis"), scales = 1,
-             global = FALSE, relative = FALSE, nsimul=99, ...)
+             global = FALSE, relative = FALSE, nsimul=99,
+             method = "r2dtable", ...)
 {
     ## evaluate formula
     if (missing(data))
         data <- parent.frame()
     tmp <- hierParseFormula(formula, data)
-    lhs <- tmp$lhs
-    rhs <- tmp$rhs
-
     ## run simulations
-    sim <- multipart.default(lhs, rhs, index = index, scales = scales,
+    sim <- multipart.default(tmp$lhs, tmp$rhs, index = index, scales = scales,
                              global = global, relative = relative,
-                             nsimul = nsimul, ...)
+                             nsimul = nsimul, method = method, ...)
     call <- match.call()
     call[[1]] <- as.name("multipart")
     attr(sim, "call") <- call
diff --git a/R/nesteddisc.R b/R/nesteddisc.R
index c31dbca..def6edf 100644
--- a/R/nesteddisc.R
+++ b/R/nesteddisc.R
@@ -10,6 +10,7 @@
 
     ## starting values and CONSTANTS
     NALL <- 7
+    allperm <- factorial(NALL)
     ties <- FALSE
     trace <- FALSE
     ## Code
@@ -28,31 +29,22 @@
     ## Function to evaluate discrepancy
     FUN <- function(x) sum(comm[col(comm)[,x] <= rowSums(comm)] == 0)
     Ad <- FUN(x)
-    ## Go through all le-items and permute ties. Functions allPerms
-    ## and shuffleSet are in permute package.
+    ## Go through all le-items and permute ties. Function shuffleSet
+    ## is in permute package, and its minperm argument triggers
+    ## complete enumeration for no. of ties <= NALL!.
     for (i in seq_along(le)) {
         if (le[i] > 1) {
             take <- x
-            idx <- (1:le[i]) + cle[i]
+            idx <- seq_len(le[i]) + cle[i]
             ## Can swaps influence discrepancy?
             if (idx[1] > rs[2] || idx[le[i]] < rs[1])
                 next
-            ## Complete enumeration if no. of tied value <= NALL
-            if (le[i] <= NALL) {
-                perm <- matrix(allPerms(le[i]), ncol=le[i]) + cle[i]
-                ## Take at maximum NITER cases from complete enumeration
-                if (nrow(perm) > niter) {
-                    perm <- perm[sample.int(nrow(perm), niter),]
-                    ties <- TRUE
-                }
-            }
-            ## No complete enumeration, but a sample and potentially
-            ## duplicated orders
-            else {
+            perm <- shuffleSet(le[i], niter, control = how(minperm = allperm),
+                               quietly = TRUE)
+            ## maxperm is a double -- needs EPS -0.5
+            if ((attr(perm, "control")$maxperm - 0.5) > niter)
                 ties <- TRUE
-                perm <- shuffleSet(le[i], niter)
-                perm <- perm + cle[i]
-            }
+            perm <- matrix(perm, ncol = le[i]) + cle[i]
             vals <- sapply(1:nrow(perm), function(j) {
                 take[idx] <- perm[j,]
                 FUN(take)
diff --git a/R/nobs.R b/R/nobs.R
index 75e7465..7569f09 100644
--- a/R/nobs.R
+++ b/R/nobs.R
@@ -1,7 +1,9 @@
 ### R 2.13.0 introduces nobs() method to get the number of
 ### observations. This file provides methods for vegan classes.
 
-`nobs.adonis` <- function(object, ...) NROW(object$coef.sites)
+`nobs.adonis` <- function(object, ...) NCOL(object$coef.sites)
+
+`nobs.anova.cca` <- function(object, ...) NA
 
 `nobs.betadisper` <- function(object, ...) length(object$distances)
 
diff --git a/R/oecosimu.R b/R/oecosimu.R
index 4bcc0d8..9a4b3fe 100644
--- a/R/oecosimu.R
+++ b/R/oecosimu.R
@@ -19,6 +19,7 @@
             else
                 tmp
     }
+    chains <- NULL
     if (inherits(comm, "simmat")) {
         x <- comm
         method <- attr(x, "method")
@@ -26,6 +27,9 @@
         if (nsimul == 1)
             stop("only one simulation in ", sQuote(deparse(substitute(comm))))
         comm <- attr(comm, "data")
+        #thin <- attr(comm, "thin")
+        burnin <- attr(x, "start") - attr(x, "thin")
+        chains <- attr(x, "chains")
         simmat_in <- TRUE
     } else {
         simmat_in <- FALSE
@@ -50,7 +54,7 @@
     if (!simmat_in && !is.na(batchsize)) {
         commsize <- object.size(comm)
         totsize <- commsize * nsimul
-        if (totsize > batchsize) { 
+        if (totsize > batchsize) {
             nbatch <- ceiling(unclass(totsize/batchsize))
             batches <- diff(round(seq(0, nsimul, by = nsimul/nbatch)))
         } else {
@@ -61,7 +65,7 @@
     }
     if (nbatch == 1)
         batches <- nsimul
-    
+
     ind <- nestfun(comm, ...)
     indstat <-
         if (is.list(ind))
@@ -72,7 +76,7 @@
     if (!simmat_in && nm$commsim$isSeq) {
         ## estimate thinning for "tswap" (trial swap)
         if (nm$commsim$method == "tswap") {
-            checkbrd <-sum(designdist(comm, "(J-A)*(J-B)", 
+            checkbrd <-sum(designdist(comm, "(J-A)*(J-B)",
                                       "binary"))
             M <- nm$ncol
             N <- nm$nrow
@@ -130,14 +134,15 @@
                                           statistic = statistic, ...))
         }
     }
-    
+
     simind <- matrix(simind, ncol = nsimul)
 
     if (attr(x, "isSeq")) {
         attr(simind, "thin") <- attr(x, "thin")
         attr(simind, "burnin") <- burnin
+        attr(simind, "chains") <- chains
     }
-    
+
     sd <- apply(simind, 1, sd, na.rm = TRUE)
     means <- rowMeans(simind, na.rm = TRUE)
     z <- (indstat - means)/sd
@@ -161,7 +166,7 @@
                 less = pless,
                 greater = pmore)
     p <- pmin(1, (p + 1)/(nsimul + 1))
-    
+
     ## ADDITION: if z is NA then it is not correct to calculate p values
     ## try e.g. oecosimu(dune, sum, "permat")
     if (any(is.na(z)))
diff --git a/R/oldCapscale.R b/R/oldCapscale.R
new file mode 100644
index 0000000..fbe40c7
--- /dev/null
+++ b/R/oldCapscale.R
@@ -0,0 +1,30 @@
+### Internal function to remove changes in capscale in vegan
+### 2.4-0. From vegan 2.4-0 inertia components include negative
+### eigenvalues of PCoA whereas earlier ordination and the components
+### were only based on real components of PCoA. This function puts
+### back the inertia components that ignore imaginary
+### dimensions. F-statistics etc will change as a result.  The
+### function is provided to maintain compatibility with the old vegan,
+### and may be eliminated in the future.
+`oldCapscale` <-
+    function(object)
+{
+    ## no imaginary component: nothing need be done
+    if (is.null(object$CA$imaginary.rank))
+        return(object)
+    ## inertia components based only on real dimensions
+    object$tot.chi <- object$real.tot.chi
+    if (!is.null(object$pCCA)) {
+        object$pCCA$tot.chi <- object$pCCA$real.tot.chi
+    }
+    if (!is.null(object$CCA)) {
+        object$CCA$tot.chi <- object$CCA$real.tot.chi
+    }
+    if (!is.null(object$CA)) {
+        object$CA$tot.chi <- object$CA$real.tot.chi
+    }
+    ## tell what you did
+    message("imaginary variation was discarded")
+    class(object) <- c("oldcapscale", class(object))
+    object
+}
diff --git a/R/ordiGetData.R b/R/ordiGetData.R
index 46736fd..26c4f2b 100644
--- a/R/ordiGetData.R
+++ b/R/ordiGetData.R
@@ -1,7 +1,7 @@
 `ordiGetData` <-
 function (call, env) 
 {
-    call$scale <- call$distance <- call$comm <- call$add <-
+    call$scale <- call$distance <- call$comm <- call$add <- call$method <- 
         call$dfun <- call$sqrt.dist <- call$metaMDSdist <- call$subset <- NULL
     call$na.action <- na.pass
     call[[2]] <- NULL
diff --git a/R/ordiR2step.R b/R/ordiR2step.R
index ecae1a5..6608a02 100644
--- a/R/ordiR2step.R
+++ b/R/ordiR2step.R
@@ -5,24 +5,19 @@
 `ordiR2step` <-
     function(object, scope, direction = c("both", "forward"),
              Pin = 0.05, R2scope = TRUE, permutations = how(nperm=499),
-             trace = TRUE, ...)
+             trace = TRUE, R2permutations = 1000, ...)
 {
     direction <- match.arg(direction)
     if (is.null(object$terms))
         stop("ordination model must be fitted using formula")
     if (missing(scope))
         stop("needs scope")
-    ## Works only for rda(): cca() does not have (yet) R2.adjusted
-    if (!inherits(object, "rda"))
-        stop("can be used only with rda() or capscale()")
-    ## No R2 for capscale with negative eigenvalues
-    if (inherits(object, "capscale") && !is.null(object$CA$imaginary.chi))
-        stop("cannot be used when capscale() has negative eigenvalues")
     ## Get R2 of the original object
     if (is.null(object$CCA))
         R2.0 <- 0
     else
-        R2.0 <- RsquareAdj(object)$adj.r.squared
+        R2.0 <- RsquareAdj(object,
+                           permutations = R2permutations, ...)$adj.r.squared
     ## only accepts upper scope
     if (is.list(scope) && length(scope) <= 2L)
         scope <- scope$upper
@@ -34,7 +29,8 @@
     if (!inherits(scope, "formula"))
         scope <- reformulate(scope)
     if (R2scope)
-        R2.all <- RsquareAdj(update(object, scope))
+        R2.all <- RsquareAdj(update(object, scope),
+                             permutations = R2permutations, ...)
     else
         R2.all <- list(adj.r.squared = NA)
     ## Check that the full model can be evaluated
@@ -67,7 +63,8 @@
         ## Loop over add scope
         for (trm in seq_along(R2.adds)) {
             fla <- paste(". ~ .", names(R2.adds[trm]))
-            R2.tmp <- RsquareAdj(update(object, fla))$adj.r.squared
+            R2.tmp <- RsquareAdj(update(object, fla),
+                                 permutations = R2permutations, ...)$adj.r.squared
             if (!length(R2.tmp))
                 R2.tmp <- 0
             R2.adds[trm] <- R2.tmp
@@ -98,7 +95,8 @@
                 break
             fla <- paste("~  .", adds[best])
             object <- update(object, fla)
-            R2.previous <- RsquareAdj(object)$adj.r.squared
+            R2.previous <- RsquareAdj(object,
+                                      permutations = R2permutations, ...)$adj.r.squared
             anotab <- rbind(anotab, cbind("R2.adj" = R2.previous, tst[2,]))
         } else {
             break
diff --git a/R/ordiareatest.R b/R/ordiareatest.R
index a46a064..daec1b9 100644
--- a/R/ordiareatest.R
+++ b/R/ordiareatest.R
@@ -13,8 +13,8 @@
 #'
 #' @author Jari Oksanen
 `ordiareatest` <-
-    function(ord, groups, area = c("hull", "ellipse"), permutations = 999,
-             parallel = getOption("mc.cores"), ...)
+    function(ord, groups, area = c("hull", "ellipse"), kind = "sd",
+             permutations = 999, parallel = getOption("mc.cores"), ...)
 {
     EPS <- sqrt(.Machine$double.eps)
     ## Function to find area
@@ -22,10 +22,10 @@
     areafun <- if (area == "hull") ordihull else ordiellipse
     areafun <- match.fun(areafun)
     ## Observed statistics
-    obs <- summary(areafun(ord, groups, draw = "none", ...))["Area",]
+    obs <- summary(areafun(ord, groups, draw = "none", kind = kind))["Area",]
     ## permutations
     pfun <- function(take, ...)
-        summary(areafun(ord, groups[take], draw = "none", ...))["Area",]
+        summary(areafun(ord, groups[take], draw = "none", kind = kind))["Area",]
     perm <- getPermuteMatrix(permutations, length(groups))
     nperm <- nrow(perm)
     if (is.null(parallel))
diff --git a/R/ordiarrows.R b/R/ordiarrows.R
index 594066c..cd9ab85 100644
--- a/R/ordiarrows.R
+++ b/R/ordiarrows.R
@@ -1,6 +1,7 @@
 `ordiarrows` <-
     function (ord, groups, levels, replicates, order.by, 
-              display = "sites", show.groups, startmark, label = FALSE, ...)
+              display = "sites", col = 1, show.groups, startmark,
+              label = FALSE, length = 0.1, ...)
 {
     pts <- scores(ord, display = display, ...)
     npoints <- nrow(pts)
@@ -21,6 +22,10 @@
     }
     out <- seq(along = groups)
     inds <- names(table(groups))
+    if (is.factor(col))
+        col <- as.numeric(col)
+    col <- rep(col, length=length(inds))
+    names(col) <- inds
     starts <- names <- NULL
     for (is in inds) {
         gr <- out[groups == is]
@@ -30,19 +35,22 @@
             X1 <- X[-1, , drop = FALSE]
             nseg <- nrow(X0)
             if (!missing(startmark))
-                points(X0[1,1], X0[1,2], pch=startmark, ...)
+                points(X0[1,1], X0[1,2], pch=startmark, col = col[is], ...)
             if (label) {
                 starts <- rbind(starts, X0[1,])
                 names <- c(names, is)
             }
             if (nseg > 1)
                 ordiArgAbsorber(X0[-nseg,1], X0[-nseg,2], X1[-nseg,1],
-                                X1[-nseg,2], FUN = segments, ...)
-            ordiArgAbsorber(X0[nseg, 1], X0[nseg, 2], X1[nseg, 1], X1[nseg, 2],
+                                X1[-nseg,2], col = col[is],
+                                FUN = segments, ...)
+            ordiArgAbsorber(X0[nseg, 1], X0[nseg, 2], X1[nseg, 1],
+                            X1[nseg, 2], col = col[is], length = length,
                             FUN = arrows, ...)
         }
     }
     if (label)
-        ordiArgAbsorber(starts, labels = names, FUN = ordilabel, ...)
+        ordiArgAbsorber(starts, labels = names, border = col, col = par("fg"),
+                        FUN = ordilabel, ...)
     invisible()
 }
diff --git a/R/ordibar.R b/R/ordibar.R
new file mode 100644
index 0000000..b3d770e
--- /dev/null
+++ b/R/ordibar.R
@@ -0,0 +1,80 @@
+### draws crossed error bars for classes in ordination. These are
+### oblique to axis because so are th clouds of the points and their
+### standard errors and confidence regions. The bars are principal
+### axes of corresponding ellipse (as drawn in ordiellipse), and found
+### as principal components of the associate covariance matrix. The
+### function is modelled after ordiellipse.
+`ordibar` <-
+    function (ord, groups, display = "sites", kind = c("sd", "se"),
+              conf,  w = weights(ord, display), col = 1,
+              show.groups, label = FALSE, lwd = NULL, length = 0,  ...)
+{
+    weights.default <- function(object, ...) NULL
+    kind <- match.arg(kind)
+    draw <- TRUE
+    pts <- scores(ord, display = display, ...)
+    ## ordibar only works with 2D data (2 columns)
+    pts <- as.matrix(pts)
+    if (ncol(pts) > 2)
+        pts <- pts[ , 1:2, drop = FALSE]
+    if (ncol(pts) < 2)
+        stop("ordibar needs two dimensions")
+    w <- eval(w)
+    if (length(w) == 1)
+        w <- rep(1, nrow(pts))
+    if (is.null(w))
+        w <- rep(1, nrow(pts))
+    if (!missing(show.groups)) {
+        take <- groups %in% show.groups
+        pts <- pts[take, , drop = FALSE]
+        groups <- groups[take]
+        w <- w[take]
+    }
+    out <- seq(along = groups)
+    inds <- names(table(groups))
+    if (label) {
+        cntrs <- matrix(NA, nrow=length(inds), ncol=2)
+        rownames(cntrs) <- inds
+    }
+    col <- rep(col, length = length(inds))
+    names(col) <- inds
+    res <- list()
+    ## Remove NA scores
+    kk <- complete.cases(pts) & !is.na(groups)
+    for (is in inds) {
+        gr <- out[groups == is & kk]
+        if (length(gr)) {
+            X <- pts[gr, , drop = FALSE]
+            W <- w[gr]
+            mat <- cov.wt(X, W)
+            if (mat$n.obs == 1)
+                mat$cov[] <- 0
+            if (kind == "se")
+                mat$cov <- mat$cov * sum(mat$wt^2)
+            if (missing(conf))
+                t <- 1
+            else t <- sqrt(qchisq(conf, 2))
+            if (mat$n.obs > 1) {
+                eig <- eigen(mat$cov)
+                v <- sweep(eig$vectors, 2, sqrt(eig$values), "*") * t
+                cnt <- mat$center
+                ordiArgAbsorber(v[1,] + cnt[1], v[2,] + cnt[2],
+                                -v[1,] + cnt[1], -v[2,] + cnt[2],
+                                col = col[is], lwd = lwd,
+                                length = length/2, angle = 90, code = 3,
+                                FUN = arrows, ...)
+            }
+            if (label) {
+                cntrs[is,] <- mat$center
+            }
+            mat$scale <- t
+            res[[is]] <- mat
+        }
+    }
+    if (label) {
+        ordiArgAbsorber(cntrs, col = par("fg"), border = col, 
+                        FUN = ordilabel, ...)
+    }
+    class(res) <- "ordibar"
+    invisible(res)
+}
diff --git a/R/ordicluster.R b/R/ordicluster.R
index 0e61dc3..cd11abd 100644
--- a/R/ordicluster.R
+++ b/R/ordicluster.R
@@ -1,6 +1,6 @@
 `ordicluster` <-
-    function (ord, cluster, prune=0, display="sites", w = weights(ord, display),
-              ...)
+    function (ord, cluster, prune=0, display="sites",
+              w = weights(ord, display), col = 1, ...)
 {
     weights.default <- function(object, ...) NULL
     w <- eval(w)
@@ -12,6 +12,10 @@
     n <- if (is.null(w)) rep(1, nrow(ord)) else w
     noden <- numeric(nrow(ord))
     go <- ord
+    ## recycle colours for points and prepare to get node colours
+    col <- rep(col, length = nrow(ord))
+    col <- col2rgb(col)/255
+    nodecol <- matrix(NA, nrow(mrg) - prune, 3)
     for (i in 1: (nrow(mrg) - prune)) {
         a <- mrg[i,1]
         b <- mrg[i,2]
@@ -19,11 +23,16 @@
         two <- if (b < 0) ord[-b,] else go[b,]
         n1 <- if (a < 0) n[-a] else noden[a]
         n2 <- if (b < 0) n[-b] else noden[b]
-        ordiArgAbsorber(one[1], one[2], two[1], two[2], FUN = segments, ...)
-        xm <- weighted.mean(c(one[1],two[1]), w=c(n1,n2))
-        ym <- weighted.mean(c(one[2],two[2]), w=c(n1,n2))
+        xm <- weighted.mean(c(one[1],two[1]), w = c(n1,n2))
+        ym <- weighted.mean(c(one[2],two[2]), w = c(n1,n2))
         go[i,] <- c(xm,ym)
         noden[i] <- n1 + n2
+        colone <- if (a < 0) col[,-a] else nodecol[a,]
+        coltwo <- if (b < 0) col[,-b] else nodecol[b,]
+        nodecol[i,] <- (n1 * colone + n2 * coltwo)/noden[i]
+        ordiArgAbsorber(one[1], one[2], two[1], two[2],
+                        col = rgb(t(nodecol[i,])),
+                        FUN = segments, ...)
     }
     invisible(cbind(go, "w"=noden))
 }
diff --git a/R/ordiellipse.R b/R/ordiellipse.R
index 272a504..5610fc3 100644
--- a/R/ordiellipse.R
+++ b/R/ordiellipse.R
@@ -1,8 +1,9 @@
 `ordiellipse` <-
-    function (ord, groups, display = "sites", kind = c("sd", "se"),
+    function (ord, groups, display = "sites", kind = c("sd", "se", "ehull"),
               conf, draw = c("lines", "polygon", "none"),
               w = weights(ord, display), col = NULL, alpha = 127,
-              show.groups, label = FALSE,  ...)
+              show.groups, label = FALSE, border = NULL, lty = NULL,
+              lwd = NULL, ...)
 {
     weights.default <- function(object, ...) NULL
     kind <- match.arg(kind)
@@ -19,7 +20,10 @@
         w <- rep(1, nrow(pts))
     if (is.null(w))
         w <- rep(1, nrow(pts))
-    ## make semitransparent fill
+    ## make semitransparent fill; alpha should be integer in 0..255,
+    ## but users may have given that as real in 0..1
+    if (alpha < 1)
+        alpha <- round(alpha * 255)
     if (draw == "polygon" && !is.null(col))
         col <- rgb(t(col2rgb(col)), alpha = alpha, maxColorValue = 255)
     if (!missing(show.groups)) {
@@ -30,33 +34,79 @@
     }
     out <- seq(along = groups)
     inds <- names(table(groups))
+    
+    ## fill in graphical vectors with default values if unspecified
+    ## and recycles shorter vectors
+    col.new <- border.new <- lty.new <- lwd.new <- NULL
+    for(arg in c("col","border","lty","lwd")){
+      tmp <- mget(arg,ifnotfound=list(NULL))[[1]]
+      if(is.null(tmp))
+          tmp <- ifelse(suppressWarnings(is.null(par(arg))),
+                        par("fg"), par(arg))
+      if(length(inds) != length(tmp)) {tmp <- rep_len(tmp, length(inds))}
+      assign(paste(arg,".new", sep=""), tmp)
+      
+    }
+    ## default colour for "polygon" fill is "transparent", for lines
+    ## is par("fg")
+    if(is.null(col) && draw=="polygon")
+        col.new <- rep_len("transparent", length(inds))
+    else if(is.null(col) && draw=="lines")
+            col.new <- rep_len(par("fg"), length(inds))
+    
     res <- list()
-    if (label)
-        cntrs <- names <- NULL
+    if (label) {
+        cntrs <- matrix(NA, nrow=length(inds), ncol=2)
+        rownames(cntrs) <- inds
+    }
     ## Remove NA scores
     kk <- complete.cases(pts) & !is.na(groups)
     for (is in inds) {
         gr <- out[groups == is & kk]
-        if (length(gr) > 1) {
-            X <- pts[gr, ]
+        if (length(gr)) {
+            X <- pts[gr, , drop = FALSE]
             W <- w[gr]
-            mat <- cov.wt(X, W)
+            if (kind == "ehull") {
+                tmp <- ellipsoidhull(X)
+                mat <- list(cov = tmp$cov,
+                            center = tmp$loc,
+                            n.obs = nrow(X))
+            } else
+                mat <- cov.wt(X, W)
+            if (mat$n.obs == 1)
+                mat$cov[] <- 0
             if (kind == "se")
                 mat$cov <- mat$cov * sum(mat$wt^2)
-            if (missing(conf))
-                t <- 1
-            else t <- sqrt(qchisq(conf, 2))
-            xy <- veganCovEllipse(mat$cov, mat$center, t)
+            if (kind == "ehull")
+                t <- sqrt(tmp$d2)
+            else {
+                if (missing(conf))
+                    t <- 1
+                else t <- sqrt(qchisq(conf, 2))
+            }
+            if (mat$n.obs > 1)
+                xy <- veganCovEllipse(mat$cov, mat$center, t)
+            else
+                xy <- X
             if (draw == "lines")
                 ordiArgAbsorber(xy, FUN = lines,
-                                col = if(is.null(col)) par("fg") else col,
-                                ...)
+                                col = if (is.null(col)) 
+                                          par("fg")
+                                      else
+                                          col.new[match(is, inds)],
+                                lty=lty.new[match(is,inds)],
+                                lwd=lwd.new[match(is,inds)], ...)
+                      
             else if (draw == "polygon") 
-                ordiArgAbsorber(xy[, 1], xy[, 2], col = col, FUN = polygon,
+                ordiArgAbsorber(xy[, 1], xy[, 2],
+                                col = col.new[match(is, inds)],
+                                border=border.new[match(is,inds)],
+                                lty = lty.new[match(is,inds)],
+                                lwd = lwd.new[match(is,inds)],
+                                FUN = polygon,
                                 ...)
             if (label && draw != "none") {
-                cntrs <- rbind(cntrs, mat$center)
-                names <- c(names, is)
+                cntrs[is,] <- mat$center
             }
             mat$scale <- t
             res[[is]] <- mat
@@ -64,10 +114,11 @@
     }
     if (label && draw != "none") {
         if (draw == "lines")
-            ordiArgAbsorber(cntrs[,1], cntrs[,2], labels=names, col = col,  
-                            FUN = text, ...)
+            ordiArgAbsorber(cntrs[,1], cntrs[,2],
+                            labels = rownames(cntrs),
+                            col = col.new,  FUN = text, ...)
         else 
-            ordiArgAbsorber(cntrs, labels = names, col = NULL,
+            ordiArgAbsorber(cntrs, col = NULL,
                             FUN = ordilabel, ...)
     }
     class(res) <- "ordiellipse"
diff --git a/R/ordihull.R b/R/ordihull.R
index 42d678b..4273d24 100644
--- a/R/ordihull.R
+++ b/R/ordihull.R
@@ -1,21 +1,26 @@
 `ordihull` <-
     function (ord, groups, display = "sites",
               draw = c("lines", "polygon", "none"),
-              col = NULL, alpha = 127, show.groups, label = FALSE, ...)
+              col = NULL, alpha = 127, show.groups, label = FALSE,
+              border = NULL, lty = NULL, lwd = NULL, ...)
+      
 {
     draw <- match.arg(draw)
     ## Internal function to find the polygon centre
     polycentre <- function(x) {
         n <- nrow(x)
         if (n < 4) 
-            return(colMeans(x[-n, ]))
+            return(colMeans(x[-n, , drop = FALSE]))
         xy <- x[-n, 1] * x[-1, 2] - x[-1, 1] * x[-n, 2]
         A <- sum(xy)/2
         xc <- sum((x[-n, 1] + x[-1, 1]) * xy)/A/6
         yc <- sum((x[-n, 2] + x[-1, 2]) * xy)/A/6
         c(xc, yc)
     }
-    ## Make semitransparent fill colour
+    ## Make semitransparent fill colour; alpha should be integer
+    ## 0..255, but we also handle real values < 1
+    if (alpha < 1)
+        alpha <- round(alpha * 255)
     if (draw == "polygon" && !is.null(col))
         col <- rgb(t(col2rgb(col)), alpha = alpha, maxColorValue = 255)
     pts <- scores(ord, display = display, ...)
@@ -26,36 +31,70 @@
     }
     out <- seq(along = groups)
     inds <- names(table(groups))
+    
+    ## fill in graphical vectors with default values if unspecified
+    ## and recycles shorter vectors
+    col.new <- border.new <- lty.new <- lwd.new <- NULL
+    for(arg in c("col","border","lty","lwd")){
+      tmp <- mget(arg,ifnotfound=list(NULL))[[1]]
+      if(is.null(tmp))
+          tmp <- ifelse(suppressWarnings(is.null(par(arg))),
+                        par("fg"), par(arg))
+      if(length(inds) != length(tmp))
+          tmp <- rep_len(tmp, length(inds))
+      assign(paste(arg,".new", sep=""), tmp)
+    }
+    ## default colour for "polygon" fill is "transparent", for lines
+    ## is par("fg")
+    if(is.null(col) && draw=="polygon")
+        col.new <- rep_len("transparent", length(inds))
+    else if(is.null(col) && draw=="lines")
+        col.new <- rep_len(par("fg"), length(inds))
     res <- list()
-    if (label)
-        cntrs <- names <- NULL
+    if (label) {
+        cntrs <- matrix(NA, nrow=length(inds), ncol=2)
+        rownames(cntrs) <- inds
+    }
     ## Remove NA scores
     kk <- complete.cases(pts) & !is.na(groups)
     for (is in inds) {
         gr <- out[groups == is & kk]
-        if (length(gr) > 1) {
-            X <- pts[gr, ]
+        if (length(gr)) {
+            X <- pts[gr,, drop = FALSE]
             hpts <- chull(X)
             hpts <- c(hpts, hpts[1])
-            if (draw == "lines")
+            if (draw == "lines") 
                 ordiArgAbsorber(X[hpts, ], FUN = lines,
-                                col = if(is.null(col)) par("fg") else col, ...)
-            else if (draw == "polygon")
-                ordiArgAbsorber(X[hpts,], FUN = polygon, col = col, ...)
+                                col = if (is.null(col)) 
+                                          par("fg")
+                                      else
+                                          col.new[match(is, inds)],
+                                lty = lty.new[match(is,inds)],
+                                lwd = lwd.new[match(is,inds)], ...)
+            else if (draw == "polygon") 
+
+                ordiArgAbsorber(X[hpts, ],
+                                border= border.new[match(is,inds)],
+                                FUN = polygon,
+                                col = col.new[match(is, inds)],
+                                lty = lty.new[match(is,inds)],
+                                lwd=lwd.new[match(is,inds)], ...)
+
             if (label && draw != "none") {
-                cntrs <- rbind(cntrs, polycentre(X[hpts,]))
-                names <- c(names, is)
+                cntrs[is,] <- polycentre(X[hpts,])
             }
             res[[is]] <- X[hpts,]
         }
     }
     if (label && draw != "none") {
-        if (draw == "lines")
-            ordiArgAbsorber(cntrs[,1], cntrs[,2], labels = names,
-                            col = col, FUN = text, ...)
-        else
-            ordiArgAbsorber(cntrs, labels = names, col = NULL,
-                            FUN = ordilabel, ...)
+      if (draw == "lines") 
+          ordiArgAbsorber(cntrs[, 1], cntrs[, 2],
+                          labels = rownames(cntrs), 
+                          col = col.new[match(is, inds)],
+                          FUN = text, ...)
+      else ordiArgAbsorber(cntrs, labels = rownames(cntrs),
+                           col = NULL, 
+                           FUN = ordilabel, ...)
     }
     class(res) <- "ordihull"
     invisible(res)
diff --git a/R/ordisegments.R b/R/ordisegments.R
index 183cbd6..b224cb3 100644
--- a/R/ordisegments.R
+++ b/R/ordisegments.R
@@ -1,6 +1,6 @@
 `ordisegments` <-
     function (ord, groups, levels, replicates, order.by, display = "sites",
-              show.groups, label = FALSE, ...)
+              col = 1, show.groups, label = FALSE, ...)
 {
     pts <- scores(ord, display = display, ...)
     npoints <- nrow(pts)
@@ -21,6 +21,10 @@
     }
     out <- seq(along = groups)
     inds <- names(table(groups))
+    if (is.factor(col))
+        col <- as.numeric(col)
+    col <- rep(col, length=length(inds))
+    names(col) <- inds
     ends <- names <- NULL
     for (is in inds) {
         gr <- out[groups == is]
@@ -29,7 +33,7 @@
             X0 <- X[-nrow(X), , drop = FALSE]
             X1 <- X[-1, , drop = FALSE]
             ordiArgAbsorber(X0[, 1], X0[, 2], X1[, 1], X1[, 2],
-                            FUN = segments, ...)
+                            col = col[is], FUN = segments, ...)
             if (label) {
                 ends <- rbind(ends, X[c(1, nrow(X)), ])
                 names <- c(names, is, is)
@@ -37,6 +41,7 @@
         }
     }
     if (label)
-        ordiArgAbsorber(ends, labels = names, FUN = ordilabel, ...)
+        ordiArgAbsorber(ends, labels = names, border = col, col = par("fg"),
+                        FUN = ordilabel, ...)
     invisible()
 }
diff --git a/R/ordispider.R b/R/ordispider.R
index 128c5de..2fb24c7 100644
--- a/R/ordispider.R
+++ b/R/ordispider.R
@@ -1,7 +1,7 @@
 `ordispider` <-
     function (ord, groups, display = "sites", w = weights(ord, display),
-              spiders = c("centroid", "median"),
-              show.groups, label = FALSE, ...)
+              spiders = c("centroid", "median"), show.groups,
+              label = FALSE, col = NULL, lty = NULL, lwd = NULL, ...)
 {
     weights.default <- function(object, ...) NULL
     spiders <- match.arg(spiders)
@@ -38,6 +38,18 @@
     inds <- names(table(groups))
     if (label) 
     cntrs <- names <- NULL
+    
+    ## fill in graphical vectors with default values if unspecified
+    ## and recycles shorter vectors
+    for(arg in c("col","lty","lwd")) {
+        tmp <- mget(arg,ifnotfound=list(NULL))[[1]]
+        if(is.null(tmp))
+            tmp <- ifelse(suppressWarnings(is.null(par(arg))),
+                          par("fg"), par(arg))
+        if(length(inds) != length(tmp))
+            tmp <- rep_len(tmp, length(inds))
+        assign(arg, tmp)
+    }
     ## 'kk' removes NA scores and NA groups
     kk <- complete.cases(pts) & !is.na(groups)
     for (is in inds) {
@@ -50,7 +62,9 @@
                               "centroid" = apply(X, 2, weighted.mean, w = W),
                               "median" = ordimedian(X, rep(1, nrow(X))))
                 ordiArgAbsorber(ave[1], ave[2], X[, 1], X[, 2],
-                                FUN = segments, ...)
+                                FUN = segments, col[match(is, inds)],
+                                lty = lty[match(is,inds)],
+                                lwd = lwd[match(is,inds)],...)
             } else {
                 ave <- X
             }
diff --git a/R/pcnm.R b/R/pcnm.R
index 229bce4..99a6760 100644
--- a/R/pcnm.R
+++ b/R/pcnm.R
@@ -1,15 +1,11 @@
 `pcnm` <- function(dis, threshold, w, dist.ret = FALSE) {
-    if (!inherits(dis, "dist")) {
-        dims <- dim(dis)
-        if (length(unique(dims)) >1) {
-            stop("'dis' does not appear to be a square distance matrix.")
-        }
+    ## square matrix to dist
+    if ((is.matrix(dis) || is.data.frame(dis)) &&
+        isSymmetric(unname(as.matrix(dis))))
         dis <- as.dist(dis)
-    }
+    if (!inherits(dis, "dist"))
+        stop("'dis' does not appear to be distances")
     EPS <- sqrt(.Machine$double.eps)
-    wa.old <- options(warn = -1)
-    on.exit(options(wa.old))
-    dis <- as.dist(dis)
     if (missing(threshold)) {
         threshold <- max(spantree(dis)$dist)
     }
diff --git a/R/permustats.R b/R/permustats.R
index c4e6246..da14bb2 100644
--- a/R/permustats.R
+++ b/R/permustats.R
@@ -21,19 +21,26 @@
 ### modelled after print.oecosimu (should perhaps have oecosimu() args
 ### like 'alternative'
 
-`summary.permustats` <- function(object, interval = 0.95, ...) {
-    nalt <- length(object$alternative)
+`summary.permustats` <-
+    function(object, interval = 0.95, alternative, ...)
+{
+    TAB <- c("two.sided", "greater", "less")
+    if (missing(alternative))
+        alt <- TAB[match(object$alternative, TAB)]
+    else
+        alt <- match.arg(alternative, TAB, several.ok = TRUE)
+    if (any(is.na(alt)))
+        stop("alternative missing")
     nstat <- length(object$statistic)
+    nalt <- length(alt)
     ## Replicate alternative to length of statistic
     if ((nalt < nstat) && identical(nalt, 1L)) {
-        object$alternative <- rep(object$alternative, length.out = nstat)
+        alt <- rep(alt, length.out = nstat)
     }
-    TAB <- c("two.sided", "greater", "less")
     compint <- (1 - interval) / 2
     PROBS <- list(two.sided = c(compint, 0.5, interval + compint),
                   greater = c(NA, 0.5, interval),
                   less = c(1 - interval, 0.5, NA))
-    alt <- match(object$alternative, TAB)
     probs <- PROBS[alt]
     ## take care that permutations are in a column matrix
     permutations <- as.matrix(object$permutations)
@@ -48,7 +55,30 @@
     object$quantile <- do.call("rbind", object$quantile)
     dimnames(object$quantile) <- list(NULL, c("lower", "median", "upper"))
     object$interval <- interval
-    ## not (yet) P-values...
+    ## P-values
+    if (is.integer(object$statistic) && is.integer(permutations)) {
+        pless <- rowSums(object$statistic >= t(permutations), na.rm = TRUE)
+        pmore <- rowSums(object$statistic <= t(permutations), na.rm = TRUE)
+    } else {
+        EPS <- sqrt(.Machine$double.eps)
+        pless <- rowSums(object$statistic + EPS >= t(permutations),
+                         na.rm = TRUE)
+        pmore <- rowSums(object$statistic - EPS <= t(permutations),
+                         na.rm = TRUE)
+    }
+    nsimul <- nrow(permutations)
+    if (any(is.na(permutations))) {
+        warning("some simulated values were NA and were removed")
+        nsimul <- nsimul - colSums(is.na(permutations))
+    }
+    p <- rep(NA, length(object$statistic))
+    for(i in seq_along(p)) 
+        p[i] <- switch(alt[i],
+                       two.sided = 2*pmin(pless[i], pmore[i]),
+                       greater = pmore[i],
+                       less = pless[i])
+    object$p <- pmin(1, (p + 1)/(nsimul + 1))
+    ## out
     class(object) <- "summary.permustats"
     object
 }
@@ -57,14 +87,43 @@
     m <- cbind("statistic" = x$statistic,
                "SES" = x$z,
                "mean" = x$means,
-               x$quantile)
+               x$quantile,
+               "Pr(perm)" = x$p)
     cat("\n")
-    printCoefmat(m, tst.ind = 1:ncol(m), na.print = "", ...)
+    printCoefmat(m, tst.ind = 1:(ncol(m)-1), na.print = "", ...)
     writeLines(strwrap(paste0("(Interval (Upper - Lower) = ", x$interval, ")", sep = ""),
                        initial = "\n"))
     invisible(x)
 }
 
+### combine permustats objects. Function checks that statistic field
+### is equal (name, value) before proceeding, sees if the alternative
+### is equal, and then combines permutations.
+
+`c.permustats` <-
+    function(..., recursive = FALSE)
+{
+    mods <- list(...)
+    ## check stats
+    stats <- lapply(mods, function(z) z$statistic)
+    if (!all(sapply(stats[-1], function(z) identical(stats[[1]], z))))
+        stop("statistics are not equal")
+    stats <- stats[[1]]
+    ## check alternative
+    alt <- lapply(mods, function(z) z$alternative)
+    if (all(sapply(alt[-1], function(z) identical(alt[[1]], z))))
+        alt <- alt[[1]]
+    else
+        alt <- NA
+    ## combine permutations
+    p <- do.call(rbind, lapply(mods, function(z) z$permutations))
+    ## return permustats
+    structure(list(statistic = stats,
+                   permutations = p,
+                   alternative = alt),
+              class = "permustats")
+}
+
 ### densityplot
 
 `densityplot.permustats` <-
@@ -121,8 +180,15 @@
 }
 
 `qqmath.permustats` <-
-    function(x, data, observed = TRUE, ylab = "Permutations", ...)
+    function(x, data, observed = TRUE, sd.scale = FALSE,
+             ylab = "Permutations", ...)
 {
+    ## sd.scale: standardize before use
+    if (sd.scale) {
+        x$permutations <- scale(x$permutations)
+        x$statistic <- (x$statistic - attr(x$permutations, "scaled:center"))/
+            attr(x$permutations, "scaled:scale")
+    }
     obs <- x$statistic
     if (observed)
         sim <- rbind(x$statistic, as.matrix(x$permutations))
@@ -284,4 +350,4 @@
        "permutations" = F.perm,
        "alternative" = "greater"),
        class = "permustats")
-}
\ No newline at end of file
+}
diff --git a/R/permutest.cca.R b/R/permutest.cca.R
index 06655fb..110f42a 100644
--- a/R/permutest.cca.R
+++ b/R/permutest.cca.R
@@ -21,18 +21,31 @@ permutest.default <- function(x, ...)
         return(sol)
     }
     model <- match.arg(model)
-    isCCA <- !inherits(x, "rda")
-    isPartial <- !is.null(x$pCCA)
+    ## special cases
+    isCCA <- !inherits(x, "rda")    # weighting
+    isPartial <- !is.null(x$pCCA)   # handle conditions
+    isDB <- inherits(x, c("capscale", "dbrda")) &&
+        !inherits(x, "oldcapscale")  # distance-based & new design
     ## Function to get the F statistics in one loop
     getF <- function (indx, ...)
     {
+        getEV <- function(x, isDB=FALSE)
+        {
+            if (isDB)
+                sum(diag(x))
+            else
+                sum(x*x)
+        }
         if (!is.matrix(indx))
             dim(indx) <- c(1, length(indx))
         R <- nrow(indx)
         mat <- matrix(0, nrow = R, ncol = 3)
         for (i in seq_len(R)) {
             take <- indx[i,]
-            Y <- E[take, ]
+            if (isDB)
+                Y <- E[take, take]
+            else
+                Y <- E[take, ]
             if (isCCA)
                 wtake <- w[take]
             if (isPartial) {
@@ -54,11 +67,15 @@ permutest.default <- function(x, ...)
             }
             tmp <- qr.fitted(Q, Y)
             if (first)
-                cca.ev <- La.svd(tmp, nv = 0, nu = 0)$d[1]^2
-            else cca.ev <- sum(tmp * tmp)
+                if (isDB)
+                    cca.ev <- eigen(tmp)$values[1]
+                else
+                    cca.ev <- La.svd(tmp, nv = 0, nu = 0)$d[1]^2
+            else
+                cca.ev <- getEV(tmp, isDB)
             if (isPartial || first) {
                 tmp <- qr.resid(Q, Y)
-                ca.ev <- sum(tmp * tmp)
+                ca.ev <- getEV(tmp, isDB)
             }
             else ca.ev <- Chi.tot - cca.ev
             mat[i,] <- cbind(cca.ev, ca.ev, (cca.ev/q)/(ca.ev/r))
@@ -78,11 +95,11 @@ permutest.default <- function(x, ...)
     ## Set up
     Chi.xz <- x$CA$tot.chi
     names(Chi.xz) <- "Residual"
-    r <- nrow(x$CA$Xbar) - x$CCA$QR$rank - 1
+    r <- nobs(x) - x$CCA$QR$rank - 1
     if (model == "full")
         Chi.tot <- Chi.xz
     else Chi.tot <- Chi.z + Chi.xz
-    if (!isCCA)
+    if (!isCCA && !isDB)
         Chi.tot <- Chi.tot * (nrow(x$CCA$Xbar) - 1)
     F.0 <- (Chi.z/q)/(Chi.xz/r)
     Q <- x$CCA$QR
@@ -92,7 +109,7 @@ permutest.default <- function(x, ...)
         X <- sweep(X, 1, sqrt(w), "/")
     }
     if (isPartial) {
-        Y.Z <- x$pCCA$Fit
+        Y.Z <- if (isDB) x$pCCA$G else x$pCCA$Fit
         QZ <- x$pCCA$QR
         if (isCCA) {
             Z <- qr.X(QZ)
@@ -100,10 +117,12 @@ permutest.default <- function(x, ...)
         }
     }
     if (model == "reduced" || model == "direct")
-        E <- x$CCA$Xbar
-    else E <- x$CA$Xbar
+        E <- if (isDB) x$CCA$G else x$CCA$Xbar
+    else E <-
+        if (isDB) stop(gettextf("%s cannot be used with 'full' model"), x$method)
+        else x$CA$Xbar
     if (isPartial && model == "direct")
-        E <- E + Y.Z
+        E <- if (isDB) x$pCCA$G else E + Y.Z
     ## Save dimensions
     N <- nrow(E)
     if (isCCA) {
diff --git a/R/plot.betadisper.R b/R/plot.betadisper.R
index 9959b90..84df2e2 100644
--- a/R/plot.betadisper.R
+++ b/R/plot.betadisper.R
@@ -1,9 +1,25 @@
-`plot.betadisper` <- function(x, axes = c(1,2), cex = 0.7, hull = TRUE,
+`plot.betadisper` <- function(x, axes = c(1,2), cex = 0.7, pch = seq_len(ng),
+                              col = NULL, lty = "solid", lwd = 1, hull = TRUE,
+                              ellipse = FALSE, conf,
+                              segments = TRUE, seg.col = "grey",
+                              seg.lty = lty, seg.lwd = lwd,
+                              label = TRUE, label.cex = 1,
                               ylab, xlab, main, sub, ...)
 {
     localAxis <- function(..., col, bg, pch, cex, lty, lwd) axis(...)
     localBox <- function(..., col, bg, pch, cex, lty, lwd) box(...)
     localTitle <- function(..., col, bg, pch, cex, lty, lwd) title(...)
+    Ellipse <- function(scrs, centres, conf, col, lty, lwd, ...) {
+        mat <- cov.wt(scrs, center = centres)
+        if (mat$n.obs == 1)
+            mat$cov[] <- 0
+        xy <- if (mat$n.obs > 1) {
+                  veganCovEllipse(mat$cov, mat$center, conf)
+        } else {
+            scrs
+        }
+        ordiArgAbsorber(xy, FUN = lines, col = col, lty = lty, lwd = lwd, ...)
+    }
     if(missing(main))
         main <- deparse(substitute(x))
     if(missing(sub))
@@ -12,37 +28,74 @@
         xlab <- paste("PCoA", axes[1])
     if(missing(ylab))
         ylab <- paste("PCoA", axes[2])
+    t <- if (missing(conf)) {
+        1
+    } else {
+        sqrt(qchisq(conf, df = 2))
+    }
     g <- scores(x, choices = axes)
+    ng <- length(levels(x$group))
+    lev <- levels(x$group)
+    ## sort out colour vector if none supplied
+    if (is.null(col)) {
+        col <- palette()
+    }
+    col <- rep_len(col, ng)        # make sure there are enough colors
+    seg.col <- rep_len(seg.col, ng)     # ditto for segments
     plot(g$sites, asp = 1, type = "n", axes = FALSE, ann = FALSE, ...)
     ## if more than 1 group level
     if(is.matrix(g$centroids)) {
-        for(i in levels(x$group)) {
-            j <- which(levels(x$group) == i)
-            segments(g$centroids[j, 1L], g$centroids[j, 2L],
-                     g$sites[x$group == i, 1L],
-                     g$sites[x$group == i, 2L], col = "blue", ...)
+        for(i in seq_along(lev)) {
+            curlev <- lev[i]
+            take <- x$group == curlev
+            j <- which(lev == curlev)
+            if (segments) {
+                segments(g$centroids[j, 1L], g$centroids[j, 2L],
+                         g$sites[take, 1L],
+                         g$sites[take, 2L], col = seg.col[i], lty = seg.lty,
+                         lwd = seg.lwd)
+            }
             if(hull) {
-                ch <- chull(g$sites[x$group == i, ])
+                ch <- chull(g$sites[take, ])
                 ch <- c(ch, ch[1])
-                lines(x$vectors[x$group == i, axes][ch, ],
-                      col = "black", lty = "dashed", ...)
+                lines(x$vectors[take, axes][ch, ], col = col[i], lty = lty,
+                      lwd = lwd, ...)
+            }
+            if (ellipse) {
+                Ellipse(g$sites[take, , drop = FALSE],
+                        centres = g$centroids[j, ],
+                        conf = t,
+                        col = col[i], lty = lty, lwd = lwd, ...)
             }
+            points(g$centroids[j, , drop = FALSE], pch = 16, cex = 1,
+                   col = col[i], ...)
         }
-        points(g$centroids, pch = 16, cex = 1, col = "red", ...)
     } else {
         ## single group
-        segments(g$centroids[1L], g$centroids[2L],
-                 g$sites[, 1L], g$sites[, 2L], col = "blue", ...)
+        if (segments) {
+            segments(g$centroids[, 1L], g$centroids[, 2L],
+                     g$sites[, 1L], g$sites[, 2L], col = seg.col,
+                     lty = seg.lty, ...)
+        }
         if(hull) {
             ch <- chull(g$sites)
             ch <- c(ch, ch[1])
-            lines(x$vectors[, axes][ch, ],
-                  col = "black", lty = "dashed", ...)
+            lines(x$vectors[, axes][ch, ], col = col[1L], lty = lty,
+                  lwd = lwd, ...)
         }
-        points(g$centroids[1L], g$centroids[1L],
-               pch = 16, cex = 1, col = "red", ...)
+        if (ellipse) {
+                Ellipse(g$sites,
+                        centres = g$centroids,
+                        conf = t,
+                        col = col[1L], lty = lty, lwd = lwd,...)
+        }
+        points(g$centroids[, 1L], g$centroids[, 2L],
+               pch = 16, cex = 1, col = col[1L], ...)
+    }
+    points(g$sites, pch = pch[x$group], cex = cex, col = col[x$group], ...)
+    if (label) {
+        ordilabel(x, display = "centroids", choices = axes, cex = label.cex)
     }
-    points(g$sites, pch = as.numeric(x$group), cex = cex, ...)
     localTitle(main = main, xlab = xlab, ylab = ylab, sub = sub, ...)
     localAxis(1, ...)
     localAxis(2, ...)
diff --git a/R/plot.isomap.R b/R/plot.isomap.R
index fcae152..1c894aa 100644
--- a/R/plot.isomap.R
+++ b/R/plot.isomap.R
@@ -8,6 +8,12 @@
         pl <- ordiplot(x, display = "sites", type = "none", ...)
         z <- scores(pl, "sites")
         k <- x$net
+        ## recycle colour for points
+        n.col <- rep(n.col, length = nrow(z))
+        n.col <- col2rgb(n.col)/255
+        ## get average of colours of connected points
+        n.col <- (n.col[,k[,1]] + n.col[,k[,2]])/2
+        n.col <- rgb(t(n.col))
         segments(z[k[,1],1], z[k[,1],2], z[k[,2],1], z[k[,2],2], col=n.col)
         if (type == "points")
             points(pl, "sites", ...)
diff --git a/R/plot.spantree.R b/R/plot.spantree.R
index 0a3d712..c67c3d6 100644
--- a/R/plot.spantree.R
+++ b/R/plot.spantree.R
@@ -12,17 +12,21 @@
         if (!missing(dlim)) 
             d[d > dlim ] <- dlim
         if (n > 2) {
-            y <- cmdscale(d)
-            dup <- duplicated(y)
-            if (any(dup))
-            y[dup, ] <- y[dup,] + runif(2*sum(dup), -0.01, 0.01)
-            ord <- FUN(d, y)
+            ## sammon needs extra care, for other cases we just try FUN(d)
+            if (FUNname == "sammon") {
+                y <- cmdscale(d)
+                dup <- duplicated(y)
+                if (any(dup))
+                    y[dup, ] <- y[dup,] + runif(2*sum(dup), -0.01, 0.01)
+                ord <- FUN(d, y = y)
+            } else
+                ord <- FUN(d)
         } else
             ord <- cbind(seq_len(n), rep(0,n))
     }
     ord <- scores(ord, display = "sites", ...)
     ordiArgAbsorber(ord, asp = 1, type = "n", FUN = "plot", ...)
-    lines(x, ord)
+    lines(x, ord, ...)
     if (type == "p" || type == "b") 
         ordiArgAbsorber(ord, cex = cex, FUN = "points", ...)
     else if (type == "t") {
diff --git a/R/plot.specaccum.R b/R/plot.specaccum.R
index d907734..de5257d 100644
--- a/R/plot.specaccum.R
+++ b/R/plot.specaccum.R
@@ -1,7 +1,7 @@
 `plot.specaccum` <-
     function(x, add = FALSE, random = FALSE, ci = 2,
-             ci.type = c("bar","line","polygon"), col = par("fg"), ci.col = col,
-             ci.lty = 1, xlab, ylab = x$method, ylim,
+             ci.type = c("bar","line","polygon"), col = par("fg"), lty = 1,
+             ci.col = col, ci.lty = 1, xlab, ylab = x$method, ylim,
              xvar = c("sites", "individuals", "effort"), ...)
 {
     if(random && !(x$method %in% c("random", "collector")))
@@ -51,7 +51,7 @@
                 lines(x$weights[,i]*adj, x$perm[,i], col=col, ...)
         }
     } else
-        lines(xaxvar, x$richness,col=col, ...)
+        lines(xaxvar, x$richness, col=col, lty = lty, ...)
     invisible()
 }
 
diff --git a/R/points.orditkplot.R b/R/points.orditkplot.R
index 80e6944..e2dd8e0 100644
--- a/R/points.orditkplot.R
+++ b/R/points.orditkplot.R
@@ -1,6 +1,5 @@
-`points.orditkplot` <-
-    function(x, ...)
-{
-    points(x$points, ...)
+`points.orditkplot` <- function(x, pch = x$args$pch, cex = x$args$pcex,
+                                col = x$args$pcol, bg = x$args$pbg, ...) {
+    points(x$points, pch = pch, cex = cex, col = col, bg = bg, ...)
 }
 
diff --git a/R/postMDS.R b/R/postMDS.R
index 6ea6e78..b75f99a 100644
--- a/R/postMDS.R
+++ b/R/postMDS.R
@@ -1,4 +1,4 @@
-"postMDS" <-
+`postMDS` <-
     function (X, dist, pc = TRUE, center = TRUE, halfchange, 
               threshold = 0.8, nthreshold = 10, plot = FALSE, ...) 
 {
@@ -39,8 +39,8 @@
         }
     }
     if (!halfchange) {
-        scl <- max(dist, na.rm = TRUE)/max(vegdist(x, "euclidean"))
-        x <- x*scl
+        hc <- max(dist(x, "euclidean"))/max(dist, na.rm = TRUE)
+        x <- x/hc
     }
     if (plot && halfchange) {
         cross.lim <- 45
@@ -71,6 +71,7 @@
     attr(x, "centre") <- center
     attr(x, "pc") <- pc
     attr(x, "halfchange") <- halfchange
+    attr(x, "internalscaling") <- hc
     if (any(names(X) == "points")) 
         X$points <- x
     else X <- x
diff --git a/R/predict.rda.R b/R/predict.rda.R
index cb43f71..0b858d8 100644
--- a/R/predict.rda.R
+++ b/R/predict.rda.R
@@ -1,3 +1,13 @@
+### predict.rda handles rda plus distance-based capscale and
+### dbrda. Distance-based methods have some limitations:
+###
+### - type = "response" returns dissimilarities (and ignores imaginary dims)
+### - Euclidean distances type = "working" give type = "response"
+### - there are no meaningful species scores
+### - WA scores with newdata cannot be calculated in capscale.
+### - only type = "response", "working" and "lc" work with dbrda
+### - only type = "lc" can be used with newdata with dbrda
+
 `predict.rda` <-
     function (object, newdata, type = c("response", "wa", "sp", "lc", "working"), 
               rank = "full", model = c("CCA", "CA"), scaling = "none",
@@ -7,23 +17,30 @@
     model <- match.arg(model)
     if (model == "CCA" && is.null(object$CCA)) 
         model <- "CA"
-    take <- object[[model]]$rank
+    if (inherits(object, "dbrda"))
+        take <- object[[model]]$poseig
+    else
+        take <- object[[model]]$rank
     if (take == 0)
         stop("model ", dQuote(model), " has rank 0")
     if (rank != "full") 
         take <- min(take, rank)
-    if (is.null(object$CCA)) 
-        tmp <- object$CA$Xbar
-    else tmp <- object$CCA$Xbar
-    cent <- attr(tmp, "scaled:center")
-    scal <- attr(tmp, "scaled:scale")
-    scaled.PCA <- !is.null(scal)
-    nr <- nrow(tmp) - 1
+    if (!inherits(object, "dbrda")) {
+        if (is.null(object$CCA))
+            tmp <- object$CA$Xbar
+        else tmp <- object$CCA$Xbar
+        cent <- attr(tmp, "scaled:center")
+        scal <- attr(tmp, "scaled:scale")
+        scaled.PCA <- !is.null(scal)
+    }
+    nr <- nobs(object) - 1
     u <- object[[model]]$u[, 1:take, drop = FALSE]
-    v <- object[[model]]$v[, 1:take, drop = FALSE]
     w <- object[[model]]$wa[, 1:take, drop = FALSE]
-    if (is.null(w)) 
+    if (is.null(w))
         w <- u
+    if (!inherits(object, "dbrda")) {
+        v <- object[[model]]$v[, 1:take, drop = FALSE]
+    }
     slam <- diag(sqrt(object[[model]]$eig[1:take] * nr), nrow = take)
     ## process scaling arg, scaling used later so needs to be a numeric
     scaling <- scalingType(scaling = scaling, correlation = correlation)
@@ -32,13 +49,21 @@
             u <- predict(object, type = if(model == "CCA") "lc" else "wa",
                          newdata = newdata, rank = take)
         }
-        if (inherits(object, "capscale")) {
+        if (inherits(object, c("capscale", "dbrda"))) {
             if (take > 0) {
                 out <- u %*% slam/object$adjust
                 if (type == "response") {
                     out <- dist(out)
-                    if (!is.null(object$ac))
-                        out <- out - object$ac
+                    if (!is.null(object$ac)) {
+                        if (object$add == "lingoes")
+                            out <- sqrt(out^2 - 2 * object$ac)
+                        else if (object$add == "cailliez")
+                            out <- out - object$ac
+                        else
+                            stop("unknown euclidifying adjustment")
+                    }
+                    if (object$sqrt.dist)
+                        out <- out^2
                 }
             }
         } else {
@@ -86,8 +111,9 @@
     }
     else if (type == "wa") {
         if (!missing(newdata)) {
-            if (inherits(object, "capscale")) 
-                stop("'wa' scores not available in capscale with 'newdata'")
+            if (inherits(object, c("capscale", "dbrda")))
+                stop(gettextf("'wa' scores not available in %s with 'newdata'",
+                     object$method))
             if (!is.null(object$pCCA)) 
                 stop("No 'wa' scores available (yet) in partial RDA")
             nm <- rownames(v)
@@ -115,6 +141,8 @@
     else if (type == "sp") {
         if (inherits(object, "capscale")) 
             warning("'sp' scores may be meaningless in 'capscale'")
+        if (inherits(object, "dbrda"))
+            stop("'sp' scores are not available in 'dbrda'")
         if (!missing(newdata)) {
             nm <- rownames(u)
             if (!is.null(nm)) {
diff --git a/R/print.capscale.R b/R/print.capscale.R
deleted file mode 100644
index ca6f197..0000000
--- a/R/print.capscale.R
+++ /dev/null
@@ -1,9 +0,0 @@
-`print.capscale` <-
-    function(x, ...)
-{
-    NextMethod("print", x, ...)
-    if (!is.null(x$metaMDSdist))
-        cat("metaMDSdist transformed data:", x$metaMDSdist, "\n\n")
-    if (!is.null(x$ac))
-        cat("Constant added to distances:", x$ac, "\n\n")
-}
diff --git a/R/print.cca.R b/R/print.cca.R
index d6cf8b7..2e72a02 100644
--- a/R/print.cca.R
+++ b/R/print.cca.R
@@ -7,27 +7,40 @@
     }
     writeLines(strwrap(pasteCall(x$call)))
     cat("\n")
-    chi <- c(x$tot.chi, if (!is.null(x$CA$imaginary.chi)) x$tot.chi - x$CA$imaginary.chi,
-                            x$pCCA$tot.chi, x$CCA$tot.chi, x$CA$tot.chi,
-             x$CA$imaginary.chi)
-    ## Proportions of inertia only for Real dimensions in capscale
-    if (is.null(x$CA$imaginary.chi))
-        props <- chi/chi[1]
+    chi <- c(x$tot.chi, x$pCCA$tot.chi, x$CCA$tot.chi, x$CA$tot.chi)
+    props <- chi/chi[1]
+    rnk <- c(NA, x$pCCA$rank, x$CCA$rank, x$CA$rank)
+    ## handle negative eigenvalues of capscale
+    if (!is.null(x$CA$imaginary.chi)) 
+        rchi <- c(x$real.tot.chi, x$pCCA$real.tot.chi,
+                  x$CCA$real.tot.chi, x$CA$real.tot.chi)
     else
-        props <- c(NA, chi[-c(1, length(chi))]/chi[2], NA)
-    rnk <- c(NA, if (!is.null(x$CA$imaginary.rank)) NA, x$pCCA$rank, x$CCA$rank, x$CA$rank,
-             x$CA$imaginary.rank)
-    tbl <- cbind(chi, props, rnk)
-    colnames(tbl) <- c("Inertia", "Proportion", "Rank")
-    rn <- c("Total", "Real Total",  "Conditional", "Constrained", "Unconstrained",
+        rchi <- NULL
+    ## report no. of real axes in dbrda if any negative eigenvalues
+    if (inherits(x, "dbrda") &&
+        (!is.null(x$CCA) && x$CCA$poseig < x$CCA$qrank ||
+             !is.null(x$CA) && x$CA$poseig < x$CA$rank))
+        poseig <- c(NA, if (!is.null(x$pCCA)) NA, x$CCA$poseig, x$CA$poseig)
+    else
+        poseig <- NULL
+    tbl <- cbind(chi, props, rchi, rnk, poseig)
+    if (!is.null(rchi))
+        tbl <- rbind(tbl, c(NA, NA, x$CA$imaginary.chi,
+                            x$CA$imaginary.rank))
+    colnames(tbl) <- c("Inertia", "Proportion",
+                       if(!is.null(rchi)) "Eigenvals", "Rank",
+                       if (!is.null(poseig)) "RealDims")
+    rn <- c("Total", "Conditional", "Constrained", "Unconstrained",
             "Imaginary")
-    rownames(tbl) <- rn[c(TRUE, !is.null(x$CA$imaginary.chi), !is.null(x$pCCA),
+    rownames(tbl) <- rn[c(TRUE,!is.null(x$pCCA),
                           !is.null(x$CCA),  !is.null(x$CA),
                           !is.null(x$CA$imaginary.chi))]
     ## Remove "Proportion" if only one component
     if (is.null(x$CCA) && is.null(x$pCCA))
         tbl <- tbl[,-2]
-    printCoefmat(tbl, digits = digits, na.print = "")
+    ## 'cs' columns before "Rank" are non-integer
+    cs <- which(colnames(tbl) == "Rank") - 1
+    printCoefmat(tbl, digits = digits, na.print = "", cs.ind = seq_len(cs))
     cat("Inertia is", x$inertia, "\n")
     if (!is.null(x$CCA$alias))
         cat("Some constraints were aliased because they were collinear (redundant)\n")
@@ -56,5 +69,11 @@
         else print(zapsmall(x$CA$eig, digits = digits), ...)
     }
     cat("\n")
+    if (inherits(x, c("capscale", "dbrda"))) {
+        if (!is.null(x$metaMDSdist))
+            cat("metaMDSdist transformed data:", x$metaMDSdist, "\n\n")
+        if (!is.null(x$ac))
+            cat("Constant added to distances:", x$ac, "\n\n")
+    }
     invisible(x)
 }
diff --git a/R/print.simmat.R b/R/print.simmat.R
index d815edd..f5fea52 100644
--- a/R/print.simmat.R
+++ b/R/print.simmat.R
@@ -1,9 +1,10 @@
 print.simmat <- function(x, ...) {
     isSeq <- ifelse(attr(x, "isSeq"), "sequential", "non-sequential")
-    if (attr(x, "binary"))
+    if (attr(x, "binary")) {
         kind <- "binary"
-    else
+    } else {
         kind <- ifelse(attr(x, "mode") == "integer", "count", "abundance")
+    }
     d <- dim(x)
     cat("An object of class", dQuote(class(x)[1L]), "\n")
     cat(sQuote(attr(x, "method")), " method (", 
@@ -11,8 +12,11 @@ print.simmat <- function(x, ...) {
     cat(d[1L], "x", d[2L], "matrix\n")
     cat("Number of permuted matrices =", d[3L], "\n")
     if (attr(x, "isSeq")) {
+        chainInfo <- ""
+        if (!is.null(attr(x, "chains")) && attr(x, "chains") > 1L)
+            chainInfo <- paste0(" (", attr(x, "chains"), " chains)")
         cat("Start = ", attr(x, "start"), ", End = ", attr(x, "end"), 
-            ", Thin = ", attr(x, "thin"), "\n\n", sep="") 
+            ", Thin = ", attr(x, "thin"), chainInfo, "\n\n", sep="") 
         } else cat("\n")
     invisible(x)
 }
diff --git a/R/print.varpart.R b/R/print.varpart.R
index 5b063a2..df7af16 100644
--- a/R/print.varpart.R
+++ b/R/print.varpart.R
@@ -1,7 +1,7 @@
-"print.varpart" <-
-function (x, ...)
+`print.varpart` <-
+    function (x, ...)
 {
-    cat("\nPartition of variation in RDA\n\n")
+    cat("\nPartition of", x$inert, "in", x$RDA, "\n\n")
     writeLines(strwrap(pasteCall(x$call)))
     if (x$scale)
         cat("Columns of Y were scaled to unit variance\n")
diff --git a/R/print.varpart234.R b/R/print.varpart234.R
index b9736ee..a73c363 100644
--- a/R/print.varpart234.R
+++ b/R/print.varpart234.R
@@ -1,9 +1,10 @@
-"print.varpart234" <-
-function(x, digits = 5, ...)
+`print.varpart234` <-
+    function(x, digits = 5, ...)
 {
     cat("No. of explanatory tables:", x$nsets, "\n")
     cat("Total variation (SS):", format(x$SS.Y, digits=digits), "\n")
-    cat("            Variance:", format(x$SS.Y/(x$n-1), digits=digits), "\n")
+    if (x$ordination == "rda")
+        cat("            Variance:", format(x$SS.Y/(x$n-1), digits=digits), "\n")
     cat("No. of observations:",  x$n, "\n")
     cat("\nPartition table:\n")
     out <- rbind(x$fract, "Individual fractions" = NA, x$indfract)
@@ -14,7 +15,8 @@ function(x, digits = 5, ...)
     out[,2:3] <- round(out[,2:3], digits=digits)
     out[,1:4] <- sapply(out[,1:4], function(x) gsub("NA", "  ", format(x, digits=digits)))
     print(out)
-    cat("---\nUse function 'rda' to test significance of fractions of interest\n")
+    cat("---\nUse function", sQuote(x$ordination),
+        "to test significance of fractions of interest\n")
     if (!is.null(x$bigwarning))
         for (i in seq_along(x$bigwarning))
             warning("collinearity detected: redundant variable(s)  between tables ",
diff --git a/R/print.wcmdscale.R b/R/print.wcmdscale.R
index 225091a..0de8cd4 100644
--- a/R/print.wcmdscale.R
+++ b/R/print.wcmdscale.R
@@ -20,6 +20,8 @@
     }
     tbl <- cbind("Inertia" = evs, "Rank" = ranks)
     printCoefmat(tbl, digits = digits, na.print = "")
+    if (!is.na(x$ac) && x$ac > 0)
+        cat("additive constant ", x$ac, " (method ", x$add, ")\n", sep = "")
     cat("\nResults have", NROW(x$points), "points,", NCOL(x$points), "axes\n")
     ## print eigenvalues, but truncate very long lists
     PRINLIM <- 120
diff --git a/R/rarecurve.R b/R/rarecurve.R
index de63cb1..f5e694e 100644
--- a/R/rarecurve.R
+++ b/R/rarecurve.R
@@ -2,6 +2,8 @@
     function(x, step = 1, sample, xlab = "Sample Size", ylab = "Species",
              label = TRUE, col, lty, ...)
 {
+    ## matrix is faster than data.frame
+    x <- as.matrix(x)
     ## check input data: must be counts
     if (!identical(all.equal(x, round(x)), TRUE))
         stop("function accepts only integers (counts)")
diff --git a/R/renyi.R b/R/renyi.R
index 63071cf..ca7a534 100644
--- a/R/renyi.R
+++ b/R/renyi.R
@@ -10,7 +10,10 @@
         n <- nrow(x)
         p <- ncol(x)
     }
-    x <- decostand(x, "total", 1)
+    ## do not make total=1 if not needed (diversity() does anyway,
+    ## species richness does not need)
+    if (!all(scales %in% c(0,1)))
+        x <- sweep(x, 1, rowSums(x), "/")
     m <- length(scales)
     result <- array(0, dim = c(n, m))
     dimnames(result) <- list(sites = rownames(x), scale = scales)
@@ -34,9 +37,10 @@
     }
     if (hill) 
         result <- exp(result)
-    result <- as.data.frame(result)
-    if (any(dim(result) == 1)) 
-        result <- unlist(result, use.names = TRUE)
+    if (any(dim(result) == 1))
+        result <- drop(result)
+    else
+        result <- as.data.frame(result)
     class(result) <- c("renyi", class(result))
     result
 }
diff --git a/R/scores.betadisper.R b/R/scores.betadisper.R
index 8dce0fe..0a5deb5 100644
--- a/R/scores.betadisper.R
+++ b/R/scores.betadisper.R
@@ -8,9 +8,9 @@
         sol$sites <- x$vectors[, choices]
     if("centroids" %in% display) {
         if(is.matrix(x$centroids))
-            sol$centroids <- x$centroids[, choices]
+            sol$centroids <- x$centroids[, choices, drop = FALSE]
         else
-            sol$centroids <- x$centroids[choices]
+            sol$centroids <- matrix(x$centroids[choices], ncol = length(choices), byrow = TRUE)
     }
     if (length(sol) == 1)
         sol <- sol[[1]]
diff --git a/R/scores.rda.R b/R/scores.rda.R
index 05aa97e..9244d8e 100644
--- a/R/scores.rda.R
+++ b/R/scores.rda.R
@@ -1,3 +1,6 @@
+### extract scores from rda, capscale and dbrda results. The two
+### latter can have special features which are commented below. cca
+### results are handled by scores.cca.
 `scores.rda` <-
     function (x, choices = c(1, 2), display = c("sp", "wa", "cn"),
               scaling = "species", const, correlation = FALSE, ...)
@@ -20,7 +23,12 @@
       display[display == "species"] <- "sp"
     take <- tabula[display]
     sumev <- x$tot.chi
-    slam <- sqrt(c(x$CCA$eig, x$CA$eig)[choices]/sumev)
+    ## dbrda can have negative eigenvalues, but have scores only for
+    ## positive
+    eigval <- eigenvals(x)
+    if (inherits(x, "dbrda") && any(eigval < 0))
+        eigval <- eigval[eigval > 0]
+    slam <- sqrt(eigval[choices]/sumev)
     nr <- if (is.null(x$CCA))
         nrow(x$CA$u)
     else
@@ -37,7 +45,11 @@
     if (length(const) == 1) {
         const <- c(const, const)
     }
-    rnk <- x$CCA$rank
+    ## in dbrda we only have scores for positive eigenvalues
+    if (inherits(x, "dbrda"))
+        rnk <- x$CCA$poseig
+    else
+        rnk <- x$CCA$rank
     sol <- list()
     ## process scaling; numeric scaling will just be returned as is
     scaling <- scalingType(scaling = scaling, correlation = correlation)
diff --git a/R/simpleRDA2.R b/R/simpleRDA2.R
index 5a47106..334e9f2 100644
--- a/R/simpleRDA2.R
+++ b/R/simpleRDA2.R
@@ -1,5 +1,7 @@
-"simpleRDA2" <-
-function (Y, X, SS.Y, ...)
+### An internal function used in varpart(): Returns only the raw
+### Rsquare and the rank of constraints in RDA.
+`simpleRDA2` <-
+    function (Y, X, SS.Y, ...)
 {
     Q <- qr(X, tol=1e-6)
     Yfit.X <- qr.fitted(Q, Y)
@@ -9,3 +11,16 @@ function (Y, X, SS.Y, ...)
     list(Rsquare = Rsquare, m = Q$rank)
 }
 
+### Analogous function, but the input must be Gower double-centred
+### dissimilarities 'G = -GowerDblcen(as.matrix(dist(Y)^2))/2'. The
+### math is based on McArdle & Anderson, Ecology 82: 290-297 (2001).
+`simpleDBRDA` <-
+    function(G, X, SS.G, ...)
+{
+    Q <- qr(X, tol=1e-6)
+    Yfit.X <- qr.fitted(Q, G)
+    SS <- sum(diag(Yfit.X))
+    if (missing(SS.G)) SS.G <- sum(diag(G))
+    Rsquare <- SS/SS.G
+    list(Rsquare = Rsquare, m = Q$rank)
+}
diff --git a/R/simulate.nullmodel.R b/R/simulate.nullmodel.R
index f81556e..97128a1 100644
--- a/R/simulate.nullmodel.R
+++ b/R/simulate.nullmodel.R
@@ -21,11 +21,8 @@ function(object, nsim=1, seed = NULL, burnin=0, thin=1, ...)
         x <- object$state
     } else {
         x <- m
-#        if (thin != 1)
-#            message("non-sequential model: 'thin' set to 1")
-        thin <- 1L
-#        if (burnin != 0)
-#            message("non-sequential model: 'burnin' set to 0")
+        ## non-sequential models have no burnin -- but they may have
+        ## thinning: set burnin=0, but leave thin like user set it.
         burnin <- 0L
     }
     perm <- object$commsim$fun(x=x,
@@ -40,12 +37,17 @@ function(object, nsim=1, seed = NULL, burnin=0, thin=1, ...)
         fill=object$fill,
         thin=as.integer(thin), ...)
     if (object$commsim$isSeq) {
-        Start <- as.integer(object$iter + 1L)
-        End <- as.integer(object$iter + nsim * thin)
+        Start <- object$iter + thin
+        End <- object$iter + nsim * thin
+        ## sequence can overflow integer
+        if (Start <= .Machine$integer.max)
+            Start <- as.integer(Start)
+        if (End <= .Machine$integer.max)
+            End <- as.integer(End)
         state <- perm[,,nsim]
         storage.mode(state) <- object$commsim$mode
         assign("state", state, envir=object)
-        assign("iter", as.integer(End), envir=object)
+        assign("iter", End, envir=object)
     } else {
         Start <- 1L
         End <- as.integer(nsim)
diff --git a/R/simulate.rda.R b/R/simulate.rda.R
index 70ed381..02a2200 100644
--- a/R/simulate.rda.R
+++ b/R/simulate.rda.R
@@ -246,3 +246,14 @@
     ans
 }
 
+### simulate.dbrda cannot be done along similar lines as
+### simulate.capscale, because low-rank approximation needs column
+### scores v and cannot be found only from row scores u that are the
+### only ones we have in dbrda(). Residuals also need exra thinking,
+### and therefore we just disable simulate.dbrda()
+
+`simulate.dbrda` <-
+    function(object, nsim = 1, seed = NULL, ...)
+{
+    .NotYetImplemented()
+}
diff --git a/R/smbind.R b/R/smbind.R
new file mode 100644
index 0000000..68fdcd7
--- /dev/null
+++ b/R/smbind.R
@@ -0,0 +1,170 @@
+`smbind` <-
+    function (object, ..., MARGIN, strict = TRUE)
+{
+    if (missing(MARGIN))
+        stop("MARGIN argument must be specified")
+    MARGIN <- as.integer(MARGIN)
+    if (length(MARGIN) != 1L)
+        stop("MARGIN length must be 1")
+    if (!(MARGIN %in% 1L:3L))
+        stop("MARGIN value must be in 1:3")
+
+    if (is.list(object)) {
+        obj <- object
+        if (!missing(...))
+            warning("'object' was a list, '...' ignored")
+    } else {
+        obj <- list(object, ...)
+    }
+    l <- length(obj)
+    if (l < 2L)
+        return(obj[[1L]])
+    att <- lapply(obj, attributes)
+    isSeq <- att[[1L]]$isSeq
+    startEq <- endEq <- thinEq <- OKseed <- TRUE
+    for (i in 2L:l) {
+        ## data must be identical when MARGIN=3
+        if (MARGIN == 3L && !identical(att[[1L]][["data"]], att[[i]][["data"]]))
+            stop("'data' attributes not identical")
+        ## dimensions need to match except for MARGIN
+        if (!identical(att[[1L]][["dim"]][-MARGIN], att[[i]][["dim"]][-MARGIN]))
+            stop("dimension mismatch")
+        ## method settings need to be set on return object
+        ## thus these need to be identical
+        for (NAM in c("method", "binary", "isSeq", "mode", "class")) {
+            if (!identical(att[[1L]][[NAM]], att[[i]][[NAM]]))
+                stop("'", NAM, "' attributes not identical")
+        }
+        ## ts attributes are tricky: evaluate outside of the loop
+        for (NAM in c("start", "end", "thin")) {
+            if (!identical(att[[1L]][["start"]], att[[i]][["start"]]))
+                startEq <- FALSE
+            if (!identical(att[[1L]][["end"]], att[[i]][["end"]]))
+                endEq <- FALSE
+            if (!identical(att[[1L]][["thin"]], att[[i]][["thin"]]))
+                thinEq <- FALSE
+        }
+        ## seed is important when 'data' are the same (MARGIN=3)
+        ## but it is up to the user
+        ## return value has NULL seed attribute
+        if (MARGIN == 3L && identical(att[[1L]][["seed"]], att[[i]][["seed"]])) {
+            OKseed <- FALSE
+        }
+    }
+    if (!OKseed)
+        warning("identical 'seed' attributes found")
+    if (isSeq) {
+        outStart <- outEnd <- outThin <- NA
+        type <- "none"
+        ## if MARGIN != 3
+        ##   all match or fail
+        ##   when all match: keep ts attributes, type: "strat"
+        ## if MARGIN==3
+        ##   sequential algorithms need identical ts attributes
+        ##   * if parallel (start/end/thin identical): "par"
+        ##   --> original start, end, thin, + set chains attr
+        ##   * if subsequent (start/end/thin form a sequence): "seq"
+        ##   --> calculate start & end, thin same
+        ##   * all else: "none"
+        ##   --> fail unless strict=FALSE (when start=NA, end=NA, thin=NA)
+        if (MARGIN != 3L) {
+            if (startEq && endEq && thinEq) {
+                type <- "strat"
+                outStart <- att[[1L]]$start
+                outEnd <- att[[1L]]$end
+                outThin <- att[[1L]]$thin
+            }
+        } else {
+            if (startEq && endEq && thinEq) {
+                type <- "par"
+                outStart <- att[[1L]]$start
+                outEnd <- att[[1L]]$end
+                outThin <- att[[1L]]$thin
+            }
+            if (!startEq && !endEq && thinEq) {
+                stv <- sapply(att, "[[", "start")
+                o <- order(stv)
+                att <- att[o]
+                obj <- obj[o]
+                stv <- sapply(att, "[[", "start")
+                env <- sapply(att, "[[", "end")
+                thv <- att[[1L]]$thin
+                nsv <- sapply(obj, function(z) dim(z)[3L])
+                vals <- lapply(1:l, function(i)
+                    seq(stv[i], env[i], by=thv))
+                OK <- logical(4L)
+                if (length(stv) == length(unique(stv)))
+                    OK[1L] <- TRUE
+                if (length(env) == length(unique(env)))
+                    OK[2L] <- TRUE
+                if (all(nsv == sapply(vals, length)))
+                    OK[3L] <- TRUE
+                if (length(seq(stv[1], env[l], by=thv)) == length(unlist(vals)))
+                    OK[4L] <- TRUE
+                if (all(OK)) {
+                    if (all(seq(stv[1], env[l], by=thv) == unlist(vals))) {
+                            type <- "seq"
+                            outStart <- stv[1]
+                            outEnd <- env[l]
+                            outThin <- thv
+                    }
+                }
+            }
+        }
+        if (type == "none") {
+            if (strict) {
+                stop("incosistent 'start', 'end', 'thin' attributes")
+            } else {
+                warning("incosistent 'start', 'end', 'thin' attributes")
+            }
+        }
+    }
+    ## set final dimensions
+    DIM <- att[[1L]]$dim
+    DIMs <- sapply(att, function(z) z$dim[MARGIN])
+    cDIMs <- cumsum(DIMs)
+    DIM[MARGIN] <- cDIMs[l]
+    out <- array(NA, dim = DIM)
+    ## copy the 1st object
+    if (MARGIN == 1L)
+        out[1L:dim(obj[[1L]])[1L],,] <- obj[[1L]]
+    if (MARGIN == 2L)
+        out[,1L:dim(obj[[1L]])[2L],] <- obj[[1L]]
+    if (MARGIN == 3L)
+        out[,,1L:dim(obj[[1L]])[3L]] <- obj[[1L]]
+    ## data attribute will change when MARGIN != 3
+    DATA <- att[[1L]]$data
+    ## copy 2:l objects and data argument
+    for (i in 2L:l) {
+        j <- (cDIMs[i - 1L] + 1L):cDIMs[i]
+        if (MARGIN == 1L) {
+            out[j,,] <- obj[[i]]
+            DATA <- rbind(DATA, att[[i]]$data)
+        }
+        if (MARGIN == 2L) {
+            out[,j,] <- obj[[i]]
+            DATA <- cbind(DATA, att[[i]]$data)
+        }
+        if (MARGIN == 3L) {
+            out[,,j] <- obj[[i]]
+        }
+    }
+    ## assembling return object
+    ratt <- att[[1L]]
+    ratt$data <- DATA
+    ratt$seed <- NA
+    ratt$dim <- DIM
+    if (!isSeq)
+        ratt$end <- cDIMs[l]
+    if (isSeq) {
+        ratt$start <- outStart
+        ratt$end <- outEnd
+        ratt$thin <- outThin
+        if (type == "par")
+            ratt$chains <- l
+    }
+    ratt$dimnames[[MARGIN]] <- make.names(unlist(lapply(att, function(z)
+        z$dimnames[[MARGIN]])), unique = TRUE)
+    attributes(out) <- ratt
+    out
+}
diff --git a/R/spenvcor.R b/R/spenvcor.R
index 56ed143..3ba6643 100644
--- a/R/spenvcor.R
+++ b/R/spenvcor.R
@@ -1,5 +1,5 @@
-"spenvcor" <-
-function (object) 
+`spenvcor` <-
+    function (object) 
 {
     if (is.null(object$CCA))
         stop("Needs results from constrained ordination")
diff --git a/R/stressplot.wcmdscale.R b/R/stressplot.wcmdscale.R
index 0db46e5..ca622ed 100644
--- a/R/stressplot.wcmdscale.R
+++ b/R/stressplot.wcmdscale.R
@@ -1,5 +1,5 @@
 ### stressplot() methods for eigenvector ordinations wcmdscale, rda,
-### cca, capscale
+### cca, capscale, dbrda
 
 `stressplot.wcmdscale` <-
     function(object, k = 2, pch,  p.col = "blue", l.col = "red", lwd = 2, ...)
@@ -13,7 +13,8 @@
         stop("observed distances cannot be reconstructed: all axes were not calculated")
     ## Get the ordination distances in k dimensions
     if (k > NCOL(object$points))
-        stop("'k' cannot exceed the number of real dimensions")
+        warning(gettextf("max allowed rank is k = %d", NCOL(object$points)))
+    k <- min(NCOL(object$points), k)
     w <- sqrt(object$weights)
     u <- diag(w) %*% object$points
     odis <- dist(u[,1:k, drop = FALSE])
@@ -21,13 +22,15 @@
     dis <- dist(u)
     if (!is.null(object$negaxes))
         dis <- sqrt(dis^2 - dist(diag(w) %*% object$negaxes)^2)
-    ## additive constant is not implemented in wcmdscale (which
-    ## returns 'ac = NA'), but the next statement would take care of
-    ## that: we want to have the input distances as observed distances
-    ## so that we need to subtract 'ac' here, although ordination
-    ## distances 'odis' do not add up to 'dis' but to 'dis + ac'.
-    if (!is.na(object$ac))
-        dis <- dis - object$ac
+    ## Remove additive constant to get original dissimilarities
+    if (!is.na(object$ac)) {
+        if (object$add == "lingoes")
+            dis <- sqrt(dis^2 - 2 * object$ac)
+        else if (object$add == "cailliez")
+            dis <- dis - object$ac
+        else
+            stop("unknown Euclidifying adjustment: no idea what to do")
+    }
     ##Plot
     if (missing(pch))
         if (length(dis) > 5000)
@@ -109,6 +112,10 @@
 {
     ## Scores to reconstruct data
     u <- cbind(object$CCA$u, object$CA$u)
+    ## check rank
+    if (k > NCOL(u))
+        warning(gettextf("max allowed rank is k = %d", ncol(u)))
+    k <- min(k, ncol(u))
     ev <- c(object$CCA$eig, object$CA$eig)
     if (object$adjust == 1)
         const <- sqrt(NROW(u) - 1)
@@ -135,10 +142,25 @@
     ## Distances
     dis <- dist(Xbar)
     odis <- dist(Xbark)
-    if (!is.null(object$CA$imaginary.u.eig))
-        dis <- sqrt(dis^2 - dist(object$CA$imaginary.u.eig)^2)
-    if (!is.null(object$ac))
-        dis <- dis - object$ac
+    if (!is.null(object$CA$imaginary.u.eig)) {
+        dis <- dis^2 - dist(object$CA$imaginary.u.eig)^2
+        if (all(dis > -sqrt(.Machine$double.eps)))
+            dis <- sqrt(pmax(dis, 0))
+        else # neg dis will be NaN with a warning
+            dis <- sqrt(dis)
+    }
+    ## Remove additive constant to get original dissimilarities
+    if (!is.null(object$ac)) {
+        if (object$add == "lingoes")
+            dis <- sqrt(dis^2 - 2 * object$ac)
+        else if (object$add == "cailliez")
+            dis <- dis - object$ac
+        else
+            stop("unknown Euclidifying adjustment: no idea what to do")
+    }
+    ## undo internal sqrt.dist
+    if (object$sqrt.dist)
+        dis <- dis^2
     ## plot like above
         ## Plot
     if (missing(pch))
@@ -152,6 +174,71 @@
     invisible(odis)
 }
 
+### dbrda() returns only row scores 'u' (LC scores for constraints,
+### site scores for unconstrained part), and these can be used to
+### reconstitute dissimilarities only in unconstrained ordination or
+### for constrained component.
+
+`stressplot.dbrda` <-
+    function(object, k = 2, pch, p.col = "blue", l.col = "red", lwd = 2, ...)
+{
+    ## Does not work correctly for p-dbRDA
+    if (!is.null(object$pCCA))
+        stop("cannot be used with partial dbrda")
+    ## Reconstruct original distances from Gower 'G'
+    dis <- if (is.null(object$CCA))
+               object$CA$G
+           else
+               object$CCA$G
+    if (object$adjust == 1)
+        const <- nobs(object) - 1
+    else
+        const <- 1
+    dia <- diag(dis)
+    dis <- -2 * dis + outer(dia, dia, "+")
+    dis <- sqrt(as.dist(dis) * const)
+    ## Remove additive constant to get original dissimilarities
+    if (!is.null(object$ac)) {
+        if (object$add == "lingoes")
+            dis <- sqrt(dis^2 - 2 * object$ac)
+        else if (object$add == "cailliez")
+            dis <- dis - object$ac
+        else
+            stop("unknown Euclidifying adjustment: no idea what to do")
+    }
+    ## undo internal sqrt.dist
+    if (object$sqrt.dist)
+        dis <- dis^2
+    ## Approximate dissimilarities from real components. Can only be
+    ## used for one component.
+    if (is.null(object$CCA)) {
+        U <- object$CA$u
+        eig <- object$CA$eig
+    } else {
+        U <- object$CCA$u
+        eig <- object$CCA$eig
+    }
+    eig <- eig[eig > 0] 
+    ## check that 'k' does not exceed real rank
+    if (k > ncol(U))
+        warning(gettextf("max allowed rank is k = %d", ncol(U)))
+    k <- min(k, ncol(U))
+    Gk <- tcrossprod(sweep(U[, seq_len(k), drop=FALSE], 2,
+                  sqrt(eig[seq_len(k)]), "*"))
+    dia <- diag(Gk)
+    odis <- sqrt(as.dist(-2 * Gk + outer(dia, dia, "+")) * const)
+    ## Plot
+    if (missing(pch))
+        if (length(dis) > 5000)
+            pch <- "."
+        else
+            pch <- 1
+    plot(dis, odis, pch = pch, col = p.col, xlab = "Observed Dissimilarity",
+         ylab = "Ordination Distance", ...)
+    abline(0, 1, col = l.col, lwd = lwd, ...)
+    invisible(odis)
+}
+
 ## Standard R PCA functions
 
 `stressplot.prcomp` <-
diff --git a/R/summary.ordiellipse.R b/R/summary.ordiellipse.R
index 2366e7c..f3fafe2 100644
--- a/R/summary.ordiellipse.R
+++ b/R/summary.ordiellipse.R
@@ -5,8 +5,10 @@
     function(object, ...)
 {
     cnts <- sapply(object, function(x) x$center)
+    ## 2nd eigenvalue should be zero if points are on line (like two
+    ## points), but sometimes it comes out negative, and area is NaN
     areas <- sapply(object,
                     function(x)
-                    prod(sqrt(eigen(x$cov)$values)) * pi * x$scale^2)
+                        sqrt(pmax(0, det(x$cov))) * pi * x$scale^2)
     rbind(cnts, `Area` = areas)
 }
diff --git a/R/summary.ordihull.R b/R/summary.ordihull.R
index 9f353e2..e55568a 100644
--- a/R/summary.ordihull.R
+++ b/R/summary.ordihull.R
@@ -12,7 +12,7 @@
     polycentre <- function(x) {
         n <- nrow(x)
         if (n < 4)
-            return(colMeans(x[-n,]))
+            return(colMeans(x[-n,, drop = FALSE]))
         xy <- x[-n,1]*x[-1,2] - x[-1,1]*x[-n,2]
         A <- sum(xy)/2
         xc <- sum((x[-n,1] + x[-1,1]) * xy)/A/6
diff --git a/R/text.orditkplot.R b/R/text.orditkplot.R
index 236d534..d021760 100644
--- a/R/text.orditkplot.R
+++ b/R/text.orditkplot.R
@@ -1,6 +1,9 @@
-`text.orditkplot` <-
-    function(x, ...)
-{
-    text(x$labels, labels = rownames(x$labels), ...)
+`text.orditkplot` <- function(x, cex = x$args$tcex, col = x$args$tcol,
+                              font = attr(x$labels, "font"), ...) {
+    if (is.null(font)) {
+        font <- par("font")
+    }
+    text(x$labels, labels = rownames(x$labels), cex = cex, col = col,
+         font = font, ...)
 }
 
diff --git a/R/update.nullmodel.R b/R/update.nullmodel.R
index a777eea..0539112 100644
--- a/R/update.nullmodel.R
+++ b/R/update.nullmodel.R
@@ -31,6 +31,7 @@ function(object, nsim=1, seed = NULL, ...)
 #        attr(state, "iter") <- iter
         out <- nullmodel(state, object$commsim)
         out$iter <- iter
+        out$data <- object$data
     } else {
 #        state <- NULL
         out <- object
diff --git a/R/varpart.R b/R/varpart.R
index 131c005..552c6cc 100644
--- a/R/varpart.R
+++ b/R/varpart.R
@@ -1,20 +1,54 @@
 `varpart` <-
-    function (Y, X, ..., data, transfo, scale = FALSE)
+    function (Y, X, ..., data, transfo, scale = FALSE, add = FALSE,
+              sqrt.dist = FALSE)
 {
     if (missing(data))
         data <- parent.frame()
     X <- list(X, ...)
     if ((length(X) < 2 || length(X) > 4))
         stop("needs 2 to 4 explanatory tables")
-    if (!missing(transfo)) {
-        Y <- decostand(Y, transfo)
-        transfo <- attr(Y, "decostand")
+    ## transfo and scale can be used only with non-distance data
+    if (inherits(Y, "dist")) {
+        inert <- attr(Y, "method")
+        inert <- paste(paste0(toupper(substring(inert, 1, 1)),
+                              substring(inert, 2)), "distance")
+        ## sqrt of distances?
+        if (sqrt.dist)
+            Y <- sqrt(Y)
+        else
+            inert <- paste("squared", inert)
+        ## additive constant to euclidify distances?
+        if (is.logical(add) && isTRUE(add))
+            add <- "lingoes"
+        if (is.character(add)) {
+            add <- match.arg(add, c("lingoes", "cailliez"))
+            if (add == "lingoes") {
+                ac <- addLingoes(as.matrix(Y))
+                Y <- sqrt(Y^2 + 2 * ac)
+            } else if (add == "cailliez") {
+                ac <- addCailliez(as.matrix(Y))
+                Y <- Y + ac
+            }
+            if (ac > sqrt(.Machine$double.eps))
+                inert <- paste(paste0(toupper(substring(add, 1, 1)),
+                                    substring(add, 2)), "adjusted", inert)
+        }
+        RDA <- "dbRDA"
+        if(!missing(transfo) || !missing(scale))
+            message("arguments 'transfo' and 'scale' are ignored with distances")
+    } else {
+        inert <- "variance"
+        RDA <- "RDA"
+        if (!missing(transfo)) {
+            Y <- decostand(Y, transfo)
+            transfo <- attr(Y, "decostand")
+        }
+        if (!missing(transfo) && (is.null(dim(Y)) || ncol(Y) == 1))
+            warning("Transformations probably are meaningless to a single variable")
+        if (scale && !missing(transfo))
+            warning("Y should not be both transformed and scaled (standardized)")
+        Y <- scale(Y, center = TRUE, scale = scale)
     }
-    if (!missing(transfo) && (is.null(dim(Y)) || ncol(Y) == 1))
-        warning("Transformations probably are meaningless to a single variable")
-    if (scale && !missing(transfo))
-        warning("Y should not be both transformed and scaled (standardized)")
-    Y <- scale(Y, center = TRUE, scale = scale)
     Sets <- list()
     for (i in seq_along(X)) {
         if (inherits(X[[i]], "formula")) {
@@ -35,9 +69,15 @@
                        varpart2(Y, Sets[[1]], Sets[[2]]),
                        varpart3(Y, Sets[[1]], Sets[[2]], Sets[[3]]),
                        varpart4(Y, Sets[[1]], Sets[[2]], Sets[[3]], Sets[[4]]))
+    if (inherits(Y, "dist"))
+        out$part$ordination <- "capscale"
+    else
+        out$part$ordination <- "rda"
     out$scale <- scale
     if (!missing(transfo))
         out$transfo <- transfo
+    out$inert <- inert
+    out$RDA <- RDA
     out$call <- match.call()
     mx <- rep(" ", length(X))
     for (i in seq_along(X)) mx[i] <- deparse(out$call[[i+2]], width.cutoff = 500)
diff --git a/R/varpart2.R b/R/varpart2.R
index df04d30..82c8d47 100644
--- a/R/varpart2.R
+++ b/R/varpart2.R
@@ -1,7 +1,16 @@
-"varpart2" <-
+`varpart2` <-
     function (Y, X1, X2) 
 {
-    Y <- as.matrix(Y)
+    if (inherits(Y, "dist")) {
+        Y <- GowerDblcen(as.matrix(Y^2), na.rm = FALSE)
+        Y <- -Y/2
+        SS.Y <- sum(diag(Y))
+        simpleRDA2 <- match.fun(simpleDBRDA)
+    } else {
+        Y <- as.matrix(Y)
+        Y <- scale(Y, center = TRUE, scale = FALSE)
+        SS.Y <- sum(Y * Y)
+    }
     X1 <- as.matrix(X1)
     X2 <- as.matrix(X2)
     n <- nrow(Y)
@@ -14,10 +23,8 @@
         stop("Y and X1 do not have the same number of rows")
     if (n2 != n) 
         stop("Y and X2 do not have the same number of rows")
-    Y <- scale(Y, center = TRUE, scale = FALSE)
     X1 <- scale(X1, center = TRUE, scale = FALSE)
     X2 <- scale(X2, center = TRUE, scale = FALSE)
-    SS.Y <- sum(Y * Y)
     dummy <- simpleRDA2(Y, X1, SS.Y, mm1)
     ab.ua <- dummy$Rsquare
     m1 <- dummy$m
diff --git a/R/varpart3.R b/R/varpart3.R
index 7a1df7e..75dc619 100644
--- a/R/varpart3.R
+++ b/R/varpart3.R
@@ -1,7 +1,16 @@
-"varpart3" <-
+`varpart3` <-
     function (Y, X1, X2, X3) 
 {
-    Y <- as.matrix(Y)
+    if (inherits(Y, "dist")) {
+        Y <- GowerDblcen(as.matrix(Y^2), na.rm = FALSE)
+        Y <- -Y/2
+        SS.Y <- sum(diag(Y))
+        simpleRDA2 <- match.fun(simpleDBRDA)
+    } else {
+        Y <- as.matrix(Y)
+        Y <- scale(Y, center = TRUE, scale = FALSE)
+        SS.Y <- sum(Y * Y)
+    }
     X1 <- as.matrix(X1)
     X2 <- as.matrix(X2)
     X3 <- as.matrix(X3)
@@ -19,11 +28,9 @@
         stop("Y and X2 do not have the same number of rows")
     if (n3 != n) 
         stop("Y and X3 do not have the same number of rows")
-    Y <- scale(Y, center = TRUE, scale = FALSE)
     X1 <- scale(X1, center = TRUE, scale = FALSE)
     X2 <- scale(X2, center = TRUE, scale = FALSE)
     X3 <- scale(X3, center = TRUE, scale = FALSE)
-    SS.Y <- sum(Y * Y)
     dummy <- simpleRDA2(Y, X1, SS.Y, mm1)
     adfg.ua <- dummy$Rsquare
     m1 <- dummy$m
diff --git a/R/varpart4.R b/R/varpart4.R
index d7ef19f..edbb25e 100644
--- a/R/varpart4.R
+++ b/R/varpart4.R
@@ -1,7 +1,16 @@
-"varpart4" <-
+`varpart4` <-
     function (Y, X1, X2, X3, X4) 
 {
-    Y <- as.matrix(Y)
+    if (inherits(Y, "dist")) {
+        Y <- GowerDblcen(as.matrix(Y^2), na.rm = FALSE)
+        Y <- -Y/2
+        SS.Y <- sum(diag(Y))
+        simpleRDA2 <- match.fun(simpleDBRDA)
+    } else {
+        Y <- as.matrix(Y)
+        Y <- scale(Y, center = TRUE, scale = FALSE)
+        SS.Y <- sum(Y * Y)
+    }
     X1 <- as.matrix(X1)
     X2 <- as.matrix(X2)
     X3 <- as.matrix(X3)
@@ -24,12 +33,10 @@
         stop("Y and X3 do not have the same number of rows")
     if (n4 != n) 
         stop("Y and X4 do not have the same number of rows")
-    Y <- scale(Y, center = TRUE, scale = FALSE)
     X1 <- scale(X1, center = TRUE, scale = FALSE)
     X2 <- scale(X2, center = TRUE, scale = FALSE)
     X3 <- scale(X3, center = TRUE, scale = FALSE)
     X4 <- scale(X4, center = TRUE, scale = FALSE)
-    SS.Y <- sum(Y * Y)
     dummy <- simpleRDA2(Y, X1, SS.Y)
     aeghklno.ua <- dummy$Rsquare
     m1 <- dummy$m
diff --git a/R/vegan-defunct.R b/R/vegan-defunct.R
index 927dc92..b468d1a 100644
--- a/R/vegan-defunct.R
+++ b/R/vegan-defunct.R
@@ -1,8 +1,72 @@
-## "new" permutation code was moved to package 'permute' in R 2.0-0.
-## Here we list as defunct those functions that are not in 'permute'.
+### density and densityplot were deprecated in vegan 2.2-0 in favour
+### of corresponding functions for permustats()
 
-`metaMDSrotate` <-
-    function(object, vec, na.rm = FALSE, ...)
+`density.anosim` <-
+    function(x, ...)
 {
-    .Defunct(new="MDSrotate", "vegan")
+    .Defunct("densityplot(permustats(<anosim.result>))",
+                package="vegan")
+}
+
+`density.adonis` <-
+    function(x, ...)
+{
+    .Defunct("densityplot(permustats(<adonis.result>))",
+                package="vegan")
+}
+
+`densityplot.adonis` <-
+    function(x, data, xlab = "Null", ...)
+{
+    .Defunct("densityplot(permustats(<adonis.result>))",
+                package="vegan")
+}
+
+`density.mantel` <-
+    function(x, ...)
+{
+    .Defunct("densityplot(permustats(<mantel.result>))",
+                package="vegan")
+}
+
+`density.mrpp` <-
+    function(x, ...)
+{
+    .Defunct("densityplot(permustats(<mrpp.result>))",
+                package="vegan")
+}
+
+`density.permutest.cca` <-
+    function(x, ...)
+{
+    .Defunct("densityplot(permustats(<permutest.result>))",
+                package="vegan")
+}
+
+`density.protest` <-
+    function(x, ...)
+{
+    .Defunct("densityplot(permustats(<protest.result>))",
+                package="vegan")
+}
+
+`plot.vegandensity` <-
+    function (x, main = NULL, xlab = NULL, ylab = "Density", type = "l", 
+    zero.line = TRUE, obs.line = TRUE, ...) 
+{
+    .Defunct("permustats methods", package = "vegan")
+}
+
+`density.oecosimu` <-
+    function(x, ...)
+{
+    .Defunct("densityplot(permustats(<oecosimu.result>))",
+                package="vegan") 
+}
+
+`densityplot.oecosimu` <-
+    function(x, data, xlab = "Simulated", ...)
+{
+    .Defunct("densityplot(permustats(<oecosimu.result>))",
+                package="vegan")
 }
diff --git a/R/vegan-deprecated.R b/R/vegan-deprecated.R
index e90a8af..8a53a21 100644
--- a/R/vegan-deprecated.R
+++ b/R/vegan-deprecated.R
@@ -4,7 +4,7 @@
 function (x, method, thin = 1) 
 {
     ## Do not yet warn on deprecation to allow smooth transition
-    ##.Deprecated("nullmodel", package="vegan")
+    .Deprecated("simulate(nullmodel(x, method))", package="vegan")
     method <- match.arg(method, 
                         c("r0","r1","r2","r00","c0","swap", "tswap",
                           "backtrack", "quasiswap"))
@@ -16,189 +16,3 @@ function (x, method, thin = 1)
     attributes(out) <- attributes(x)
     out
 }
-
-### density and densityplot
-
-### density & densityplot methods for vegan functions returning
-### statistics from permuted/simulated data. These are modelled after
-### density.oecosimu and densityplot.oecosimu (which are in their
-### separate files).
-
-## anosim
-
-`density.anosim` <-
-    function(x, ...)
-{
-    .Deprecated("densityplot(permustats(<anosim.result>))",
-                package="vegan")
-    obs <- x$statistic
-    ## Put observed statistic among permutations
-    out <- density(c(obs, x$perm), ...)
-    out$call <- match.call()
-    out$observed <- obs
-    out$call[[1]] <- as.name("density")
-    class(out) <- c("vegandensity", class(out))
-    out
-}
-
-## adonis can return a matrix of terms, hence we also have densityplot()
-
-`density.adonis` <-
-    function(x, ...)
-{
-    .Deprecated("densityplot(permustats(<adonis.result>))",
-                package="vegan")
-    cols <- ncol(x$f.perms)
-    if (cols > 1)
-        warning("'density' is meaningful only with one term, you have ", cols)
-    obs <- x$aov.tab$F.Model
-    obs <- obs[!is.na(obs)]
-    out <- density(c(obs, x$f.perms), ...)
-    out$observed <- obs
-    out$call <- match.call()
-    out$call[[1]] <- as.name("density")
-    class(out) <- c("vegandensity", class(out))
-    out
-}
-
-`densityplot.adonis` <-
-    function(x, data, xlab = "Null", ...)
-{
-    .Deprecated("densityplot(permustats(<adonis.result>))",
-                package="vegan")
-    obs <- x$aov.tab$F.Model
-    obs <- obs[!is.na(obs)]
-    sim <- rbind(obs, x$f.perms)
-    nm <- rownames(x$aov.tab)[col(sim)]
-    densityplot( ~ as.vector(sim) | factor(nm, levels = unique(nm)),
-                xlab = xlab,
-                panel = function(x, ...) {
-                    panel.densityplot(x, ...)
-                    panel.abline(v = obs[panel.number()], ...)
-                },
-                ...)
-}
-
-## mantel
-
-`density.mantel` <-
-    function(x, ...)
-{
-    .Deprecated("densityplot(permustats(<mantel.result>))",
-                package="vegan")
-    obs <- x$statistic
-    out <- density(c(obs, x$perm), ...)
-    out$observed <- obs
-    out$call <- match.call()
-    out$call[[1]] <- as.name("density")
-    class(out) <- c("vegandensity", class(out))
-    out
-}
-
-## mrpp
-
-`density.mrpp` <-
-    function(x, ...)
-{
-    .Deprecated("densityplot(permustats(<mrpp.result>))",
-                package="vegan")
-    obs <- x$delta
-    out <- density(c(obs, x$boot.deltas), ...)
-    out$observed <- obs
-    out$call <- match.call()
-    out$call[[1]] <- as.name("density")
-    class(out) <- c("vegandensity", class(out))
-    out
-}
-
-## anova.cca does not return permutation results, but permutest.cca
-## does. However, permutest.cca always finds only one statistic. Full
-## tables anova.cca are found by repeated calls to permutest.cca.
-
-`density.permutest.cca` <-
-    function(x, ...)
-{
-    .Deprecated("densityplot(permustats(<permutest.result>))",
-                package="vegan")
-    obs <- x$F.0
-    out <- density(c(obs, x$F.perm), ...)
-    out$observed <- obs
-    out$call <- match.call()
-    out$call[[1]] <- as.name("density")
-    class(out) <- c("vegandensity", class(out))
-    out
-}
-
-## protest
-
-`density.protest` <-
-    function(x, ...)
-{
-    .Deprecated("densityplot(permustats(<protest.result>))",
-                package="vegan")
-    obs <- x$t0
-    out <- density(c(obs, x$t), ...)
-    out$observed <- obs
-    out$call <- match.call()
-    out$call[[1]] <- as.name("density")
-    class(out) <- c("vegandensity", class(out))
-    out
-}
-
-#### plot method: the following copies stats::plot.density() code but
-#### adds one new argument to draw abline(v=...) for the observed
-#### statistic
-
-`plot.vegandensity` <-
-    function (x, main = NULL, xlab = NULL, ylab = "Density", type = "l", 
-    zero.line = TRUE, obs.line = TRUE, ...) 
-{
-    if (is.null(xlab)) 
-        xlab <- paste("N =", x$n, "  Bandwidth =", formatC(x$bw))
-    if (is.null(main)) 
-        main <- deparse(x$call)
-    ## change obs.line to col=2 (red) if it was logical TRUE
-    if (isTRUE(obs.line))
-        obs.line <- 2
-    plot.default(x, main = main, xlab = xlab, ylab = ylab, type = type,
-                 ...)
-    if (zero.line) 
-        abline(h = 0, lwd = 0.1, col = "gray")
-    if (is.character(obs.line) || obs.line)
-        abline(v = x$observed, col = obs.line)
-    invisible(NULL)
-}
-
-`density.oecosimu` <-
-    function(x, ...)
-{
-    .Deprecated("densityplot(permustats(<oecosimu.result>))",
-                package="vegan") 
-    cols <- nrow(x$oecosimu$simulated)
-    if (cols > 1)
-        warning("'density' is meaningful only with one statistic, you have ", cols)
-    obs <- x$oecosimu$statistic
-    out <- density(rbind(obs, t(x$oecosimu$simulated)), ...)
-    out$observed <- obs
-    out$call <- match.call()
-    out$call[[1]] <- as.name("density")
-    class(out) <- c("vegandensity", class(out))
-    out
-}
-
-`densityplot.oecosimu` <-
-    function(x, data, xlab = "Simulated", ...)
-{
-    .Deprecated("densityplot(permustats(<oecosimu.result>))",
-                package="vegan")
-    obs <- x$oecosimu$statistic
-    sim <- rbind(obs, t(x$oecosimu$simulated))
-    nm <- names(obs)[col(sim)]
-    densityplot( ~ as.vector(sim) | factor(nm, levels = unique(nm)),
-                xlab = xlab,
-                panel = function(x, ...) {
-                    panel.densityplot(x, ...)
-                    panel.abline(v = obs[panel.number()], ...)
-                },
-                ...)
-}
diff --git a/R/wascores.R b/R/wascores.R
index 8189399..0350a78 100644
--- a/R/wascores.R
+++ b/R/wascores.R
@@ -18,10 +18,8 @@
         x.w <- rowSums(w)
         ewa.w <- colSums(w[,i, drop=FALSE])
         ewa <- wa[i,, drop=FALSE]
-        x.cov <- cov.wt(x, x.w)
-        wa.cov <- cov.wt(ewa, ewa.w)
-        x.cov$cov <- x.cov$cov * (1 - sum(x.cov$wt^2))
-        wa.cov$cov <- wa.cov$cov * (1 - sum(wa.cov$wt^2))
+        x.cov <- cov.wt(x, x.w, method = "ML")
+        wa.cov <- cov.wt(ewa, ewa.w, method = "ML")
         mul <- sqrt(diag(x.cov$cov)/diag(wa.cov$cov))
         ewa <- sweep(ewa, 2, wa.cov$center, "-")
         ewa <- sweep(ewa, 2, mul, "*")
diff --git a/R/wcmdscale.R b/R/wcmdscale.R
index ce6aa72..a12df5a 100644
--- a/R/wcmdscale.R
+++ b/R/wcmdscale.R
@@ -6,8 +6,6 @@ function(d, k, eig = FALSE, add = FALSE, x.ret = FALSE, w)
         x <- sweep(x, 2, w.c, "-")
         x
     }
-    if (add)
-        .NotYetUsed("add")
     ## Force eig=TRUE if add, x.ret or !missing(w)
     if(x.ret)
         eig <- TRUE
@@ -18,6 +16,22 @@ function(d, k, eig = FALSE, add = FALSE, x.ret = FALSE, w)
         d <- as.dist(d)
         options(op)
     }
+    ## handle add constant to make d Euclidean
+    if (is.logical(add) && isTRUE(add))
+        add <- "lingoes"
+    if (is.character(add)) {
+        add <- match.arg(add, c("lingoes", "cailliez"))
+        if (add == "lingoes") {
+            ac <- addLingoes(as.matrix(d))
+            d <- sqrt(d^2 + 2 * ac)
+        } else if (add == "cailliez") {
+            ac <- addCailliez(as.matrix(d))
+            d <- d + ac
+        }
+    } else {
+        ac <- NA
+    }
+    ## Gower centring
     m <- as.matrix(d^2)
     n <- nrow(m)
     if (missing(w))
@@ -53,8 +67,8 @@ function(d, k, eig = FALSE, add = FALSE, x.ret = FALSE, w)
     if (eig || x.ret) {
         colnames(points) <- paste("Dim", seq_len(NCOL(points)), sep="") 
         out <- list(points = points, eig = if (eig) e$values,
-                    x = if (x.ret) m, ac = NA, GOF = GOF, weights = w,
-                    negaxes = negaxes, call = match.call())
+                    x = if (x.ret) m, ac = ac, add = add, GOF = GOF,
+                    weights = w, negaxes = negaxes, call = match.call())
         class(out) <- "wcmdscale"
     }
     else out <- points
diff --git a/R/weights.rda.R b/R/weights.rda.R
index 855f7d0..bc9e251 100644
--- a/R/weights.rda.R
+++ b/R/weights.rda.R
@@ -1,10 +1,10 @@
-"weights.rda" <-
+`weights.rda` <-
     function (object, display = "sites", ...) 
 {
     display <- match.arg(display, c("sites", "species", "lc", 
                                     "wa"))
     if (display %in% c("sites", "lc", "wa")) {
-        n <- max(nrow(object$CA$Xbar), nrow(object$CCA$Xbar))
+        n <- nobs(object)
         if (!is.null(object$na.action) &&
             inherits(object$na.action, "exclude"))
             n <- n + length(object$na.action)
diff --git a/build/partial.rdb b/build/partial.rdb
new file mode 100644
index 0000000..8e3af07
Binary files /dev/null and b/build/partial.rdb differ
diff --git a/data/BCI.env.rda b/data/BCI.env.rda
index 13ffee6..b5aecc0 100644
Binary files a/data/BCI.env.rda and b/data/BCI.env.rda differ
diff --git a/data/BCI.rda b/data/BCI.rda
index abeffeb..3b4e16c 100644
Binary files a/data/BCI.rda and b/data/BCI.rda differ
diff --git a/inst/NEWS.Rd b/inst/NEWS.Rd
index 765fd2f..745eea8 100644
--- a/inst/NEWS.Rd
+++ b/inst/NEWS.Rd
@@ -2,6 +2,222 @@
 \title{vegan News}
 \encoding{UTF-8}
 
+\section{Changes in version 2.4-0}{
+  \subsection{DISTANCE-BASED ANALYSIS}{
+     \itemize{
+
+       \item Distance-based methods were redesigned and made
+       consistent for ordination (\code{capscale}, new \code{dbrda}),
+       permutational ANOVA (\code{adonis}, new \code{adonis2}),
+       multivariate dispersion (\code{betadisper}) and variation
+       partitioning (\code{varpart}).  These methods can produce
+       negative eigenvalues with several popular semimetric
+       dissimilarity indices, and they were not handled similarly by
+       all functions. Now all functions are designed after McArdle &
+       Anderson (\emph{Ecology} 82, 290--297; 2001).
+
+       \item \code{dbrda} is a new function for distance-based
+       Redundancy Analysis following McArdle & Anderson
+       (\emph{Ecology} 82, 290--297; 2001). With metric
+       dissimilarities, the function is equivalent to old
+       \code{capscale}, but negative eigenvalues of semimetric indices
+       are handled differently. In \code{dbrda} the dissimilarities
+       are decomposed directly into conditions, constraints and
+       residuals with their negative eigenvalues, and any of the
+       components can have imaginary dimensions. Function is mostly
+       compatible with \code{capscale} and other constrained
+       ordination methods, but full compatibility cannot be achieved
+       (see issue
+       \href{https://github.com/vegandevs/vegan/issues/140}{#140} in
+       Github). The function is based on the code by Pierre Legendre.
+
+       \item The old \code{capscale} function for constrained
+       ordination is still based only on real components, but the
+       total inertia of the components is assessed similarly as in
+       \code{dbrda}.
+
+       The significance tests will differ from the previous version,
+       but function \code{oldCapscale} will cast the \code{capscale}
+       result to a similar form as previously.
+
+       \item \code{adonis2} is a new function for permutational ANOVA
+       of dissimilarities. It is based on the same algorithm as the
+       \code{dbrda}. The function can perform overall tests of all
+       independent variables as well as sequential and marginal tests
+       of each term. The old \code{adonis} is still available, but it
+       can only perform sequential tests. With same settings,
+       \code{adonis} and \code{adonis2} give identical results (but
+       see Github issue
+       \href{https://github.com/vegandevs/vegan/issues/156}{#156} for
+       differences).
+
+       \item Function \code{varpart} can partition dissimilarities
+       using the same algorithm as \code{dbrda}.
+
+       \item Argument \code{sqrt.dist} takes square roots of
+       dissimilarities and these can change many popular semimetric
+       indices to metric distances in \code{capscale}, \code{dbrda},
+       \code{wcmdscale}, \code{adonis2}, \code{varpart} and
+       \code{betadisper} (issue
+       \href{https://github.com/vegandevs/vegan/issues/179}{#179} in
+       Github).
+
+       \item Lingoes and Cailliez adjustments change any dissimilarity
+       into metric distance in \code{capscale}, \code{dbrda},
+       \code{adonis2}, \code{varpart}, \code{betadisper} and
+       \code{wcmdscale}.  Earlier we had only Cailliez adjustment in
+       \code{capscale} (issue
+       \href{https://github.com/vegandevs/vegan/issues/179}{#179} in
+       Github).
+
+       \item \code{RsquareAdj} works with \code{capscale} and
+       \code{dbrda} and this allows using \code{ordiR2step} in model
+       building.
+
+     } % itemize
+  } % distance-based
+
+  \subsection{BUG FIXES}{
+    \itemize{
+
+      \item \code{specaccum}: \code{plot} failed if line type
+      (\code{lty}) was given. Reported by Lila Nath Sharma (Univ
+      Bergen, Norway)
+
+    } %itemize
+  } %bug fixes
+
+  \subsection{NEW FUNCTIONS}{
+    \itemize{
+
+      \item \code{ordibar} is a new function to draw crosses of
+      standard deviations or standard errors in ordination diagrams
+      instead of corresponding ellipses.
+
+      \item Several \code{permustats} results can be combined with a
+      new \code{c()} function.
+
+      \item New function \code{smbind} binds together null models by
+      row, column or replication. If sequential models are bound
+      together, they can be treated as parallel chains in subsequent
+      analysis (e.g., after \code{as.mcmc}). See issue
+      \href{https://github.com/vegandevs/vegan/issues/164}{#164} in
+      Github.
+
+    } %itemize
+  } % new functions
+  
+  \subsection{NEW FEATURES}{
+    \itemize{
+
+      \item Null model analysis was upgraded:
+
+      New \code{"curveball"} algorithm provides a fast null model with
+      fixed row and column sums for binary matrices after Strona et
+      al. (\emph{Nature Commun.} 5: 4114; 2014).
+
+      The \code{"quasiswap"} algorithm gained argument \code{thin}
+      which can reduce the bias of null models.
+
+      \code{"backtracking"} is now much faster, but it is still very
+      slow, and provided mainly to allow comparison against better and
+      faster methods.
+
+      Compiled code can now be interrupted in null model simulations.
+
+      \item \code{designdist} can now use beta diversity notation
+      (\code{gamma}, \code{alpha}) for easier definition of beta
+      diversity indices.
+
+      \item \code{metaMDS} has new iteration strategy: Argument
+      \code{try} gives the minimum number of random starts, and
+      \code{trymax} the maximum number. Earlier we only hand
+      \code{try} which gave the maximum number, but now we run at
+      least \code{try} times. This reduces the risk of being trapped
+      in a local optimum (issue
+      \href{https://github.com/vegandevs/vegan/issues/154}{#154} in
+      Github).
+
+      If there were no convergent solutions, \code{metaMDS} will now
+      tabulate stopping criteria (if \code{trace = TRUE}).  This can
+      help in deciding if any of the criteria should be made more
+      stringent or the number of iterations increased. The
+      documentation for \code{monoMDS} and \code{metaMDS} give more
+      detailed information on convergence criteria.
+
+      \item The \code{summary} of \code{permustats} prints now
+      \emph{P}-values, and the test direction (\code{alternative}) can
+      be changed.
+
+      The \code{qqmath} function of \code{permustats} can now plot
+      standardized statistics. This is a partial solution to issue
+      \href{https://github.com/vegandevs/vegan/issues/172}{#172} in
+      Github.
+
+      \item \code{MDSrotate} can rotate ordination to show maximum
+      separation of factor levels (classes) using linear discriminant
+      analysis (\code{lda} in \pkg{MASS} package).
+
+      \item \code{adipart}, \code{hiersimu} and \code{multipart}
+      expose argument \code{method} to specify the null model.
+
+      \item \code{RsquareAdj} works with \code{cca} and this allows
+      using \code{ordiR2step} in model building. The code was
+      developed by Dan McGlinn (issue
+      \href{https://github.com/vegandevs/vegan/issues/161}{#161} in
+      Github). However, \code{cca} still cannot be used in
+      \code{varpart}.
+
+      \item \code{ordiellipse} and \code{ordihull} allow setting
+      colours, line types and other graphical parameters.
+
+      The alpha channel can now be given also as a real number in 0 \dots 1
+      in addition to integer 0 \dots 255.
+
+      \item \code{ordiellipse} can now draw ellipsoid hulls that
+      enclose points in a group.
+      
+      \item \code{ordicluster}, \code{ordisegments}, \code{ordispider}
+      and \code{lines} and \code{plot} functions for \code{isomap} and
+      \code{spantree} can use a mixture of colours of connected
+      points. Their behaviour is similar as in analogous functions in
+      the the \pkg{vegan3d} package.
+
+      \item \code{plot} of \code{betadisper} is more configurable. See
+      issues
+      \href{https://github.com/vegandevs/vegan/issues/128}{#128} and
+      \href{https://github.com/vegandevs/vegan/issues/166}{#166} in
+      Github for details.
+
+      \item \code{text} and \code{points} methods for
+      \code{orditkplot} respect stored graphical parameters.
+
+      \item Environmental data for the Barro Colorado Island forest
+      plots gained new variables from Harms et al. (\emph{J. Ecol.} 89,
+      947--959; 2001). Issue
+      \href{https://github.com/vegandevs/vegan/issues/178}{#178} in
+      Github.
+
+    } %itemize
+  } % features
+
+  \subsection{DEPRECATED AND DEFUNCT}{
+    \itemize{
+
+      \item Function \code{metaMDSrotate} was removed and replaced
+      with \code{MDSrotate}.
+
+      \item \code{density} and \code{densityplot} methods for
+      various \pkg{vegan} objects were deprecated and replaced with
+      \code{density} and \code{densityplot} for \code{permustats}.
+      Function \code{permustats} can extract the permutation and
+      simulation results of \pkg{vegan} result objects.
+
+    } %itemize
+  } % deprecated & defunct
+
+} % v2.4-0
+
 \section{Changes in version 2.3-5}{
   \subsection{BUG FIXES}{
     \itemize{
diff --git a/inst/doc/decision-vegan.R b/inst/doc/decision-vegan.R
index 5aaded0..7bcfc37 100644
--- a/inst/doc/decision-vegan.R
+++ b/inst/doc/decision-vegan.R
@@ -17,7 +17,7 @@ require(vegan)
 
 
 ###################################################
-### code chunk number 3: decision-vegan.Rnw:126-137 (eval = FALSE)
+### code chunk number 3: decision-vegan.Rnw:125-136 (eval = FALSE)
 ###################################################
 ## ## start up and define meandist()
 ## library(vegan)
@@ -33,7 +33,7 @@ require(vegan)
 
 
 ###################################################
-### code chunk number 4: decision-vegan.Rnw:241-252
+### code chunk number 4: decision-vegan.Rnw:240-251
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 data(sipoo)
@@ -50,7 +50,7 @@ arrows(x,y, r, f(r, mod$p), lwd=4)
 
 
 ###################################################
-### code chunk number 5: decision-vegan.Rnw:609-613
+### code chunk number 5: decision-vegan.Rnw:592-596
 ###################################################
 library(vegan)
 data(varespec)
@@ -65,21 +65,21 @@ plot(orig, dis=c("lc","bp"))
 
 
 ###################################################
-### code chunk number 7: decision-vegan.Rnw:622-623
+### code chunk number 7: decision-vegan.Rnw:605-606
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(orig, dis=c("lc","bp"))
 
 
 ###################################################
-### code chunk number 8: decision-vegan.Rnw:632-634
+### code chunk number 8: decision-vegan.Rnw:615-617
 ###################################################
 i <- sample(nrow(varespec))
 shuff <- cca(varespec[i,] ~ Al + K, varechem)
 
 
 ###################################################
-### code chunk number 9: decision-vegan.Rnw:637-638
+### code chunk number 9: decision-vegan.Rnw:620-621
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(shuff, dis=c("lc","bp"))
@@ -93,7 +93,7 @@ plot(procrustes(scores(orig, dis="lc"),
 
 
 ###################################################
-### code chunk number 11: decision-vegan.Rnw:651-652
+### code chunk number 11: decision-vegan.Rnw:634-635
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(procrustes(scores(orig, dis="lc"), 
@@ -101,7 +101,7 @@ plot(procrustes(scores(orig, dis="lc"),
 
 
 ###################################################
-### code chunk number 12: decision-vegan.Rnw:660-663
+### code chunk number 12: decision-vegan.Rnw:643-646
 ###################################################
 tmp1 <- rda(varespec ~ Al + K, varechem)
 i <- sample(nrow(varespec)) # Different shuffling
@@ -109,7 +109,7 @@ tmp2 <- rda(varespec[i,] ~ Al + K, varechem)
 
 
 ###################################################
-### code chunk number 13: decision-vegan.Rnw:666-668
+### code chunk number 13: decision-vegan.Rnw:649-651
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(procrustes(scores(tmp1, dis="lc"), 
@@ -117,21 +117,21 @@ plot(procrustes(scores(tmp1, dis="lc"),
 
 
 ###################################################
-### code chunk number 14: decision-vegan.Rnw:685-687
+### code chunk number 14: decision-vegan.Rnw:668-670
 ###################################################
 orig
 shuff
 
 
 ###################################################
-### code chunk number 15: decision-vegan.Rnw:692-693
+### code chunk number 15: decision-vegan.Rnw:675-676
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(procrustes(orig, shuff))
 
 
 ###################################################
-### code chunk number 16: decision-vegan.Rnw:706-711
+### code chunk number 16: decision-vegan.Rnw:689-694
 ###################################################
 tmp1 <- rda(varespec ~ ., varechem)
 tmp2 <- rda(varespec[i,] ~ ., varechem)
@@ -141,7 +141,7 @@ max(residuals(proc))
 
 
 ###################################################
-### code chunk number 17: decision-vegan.Rnw:723-726
+### code chunk number 17: decision-vegan.Rnw:706-709
 ###################################################
 data(dune)
 data(dune.env)
@@ -149,7 +149,7 @@ orig <- cca(dune ~ Moisture, dune.env)
 
 
 ###################################################
-### code chunk number 18: decision-vegan.Rnw:731-732
+### code chunk number 18: decision-vegan.Rnw:714-715
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(orig, dis="lc")
@@ -164,7 +164,7 @@ text(orig, dis="cn", col="blue")
 
 
 ###################################################
-### code chunk number 20: decision-vegan.Rnw:756-757
+### code chunk number 20: decision-vegan.Rnw:739-740
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(orig, display="wa", type="points")
diff --git a/inst/doc/decision-vegan.Rnw b/inst/doc/decision-vegan.Rnw
index 5658dcb..6b245cd 100644
--- a/inst/doc/decision-vegan.Rnw
+++ b/inst/doc/decision-vegan.Rnw
@@ -91,9 +91,8 @@ non-parallel computation.  The \code{mc.cores} option can be set by
 the environmental variable \code{MC_CORES} when the \pkg{parallel}
 package is loaded.
 
-\R{} allows\footnote{Since \R{} version 2.15.0.}
-setting up a default socket cluster (\code{setDefaultCluster}), but
-this will not be used in \pkg{vegan}. 
+\R{} allows setting up a default socket cluster
+(\code{setDefaultCluster}), but this will not be used in \pkg{vegan}.
 
 \subsubsection{Setting up socket clusters}
 \label{sec:parallel:socket}
@@ -379,39 +378,23 @@ This chapter discusses the scaling of scores (results) in redundancy
 analysis and principal component analysis performed by function
 \code{rda} in the \pkg{vegan} library.  
 
-Principal component analysis, and hence redundancy analysis, is a case
-of singular value decomposition (\textsc{svd}).  Functions
-\code{rda} and \code{prcomp} even use \textsc{svd} internally in
-their algorithm.
-
-In \textsc{svd} a centred data matrix $\mathbf{X} = \{x_{ij}\}$ is decomposed into orthogonal
-components so that $x_{ij} = \sum_k \sigma_k u_{ik} v_{jk}$, where
-$u_{ik}$ and $v_{jk}$ are orthonormal coefficient matrices and
-$\sigma_k$ are singular values.  Orthonormality means that sums of
-squared columns is one and their cross-product is zero, or $\sum_i
-u_{ik}^2 = \sum_j v_{jk}^2 = 1$, and $\sum_i u_{ik} u_{il} = \sum_j
-v_{jk} v_{jl} = 0$ for $k \neq l$. This is a decomposition, and the
-original matrix is found exactly from the singular vectors and
-corresponding singular values, and first two singular components give
-the rank $=2$ least squares estimate of the original matrix.
-
-Principal component analysis is often presented (and performed in
-legacy software) as an eigenanalysis of covariance matrices.  Instead
-of a data matrix, we analyse a matrix of covariances and variances
-$\mathbf{S}$.  The result are orthonormal coefficient matrix
-$\mathbf{U}$ and eigenvalues $\mathbf{\Lambda}$.  The coefficients
-$u_{ik}$ ares identical to \textsc{svd} (except for possible sign
-changes), and eigenvalues $\lambda_k$ are related to the corresponding
-singular values by $\lambda_k = \sigma_k^2 /(n-1)$.  With classical
-definitions, the sum of all eigenvalues equals the sum of variances of
-species, or $\sum_k \lambda_k = \sum_j s_j^2$, and it is often said
-that first axes explain a certain proportion of total variance in the
-data.  The orthonormal matrix $\mathbf{V}$ of \textsc{svd} can be
-found indirectly as well, so that we have the same components in both
-methods.
+Principal component analysis decomposes a centred data matrix
+$\mathbf{X} = \{x_{ij}\}$ into $K$ orthogonal components so that
+$x_{ij} = \sqrt{n-1} \sum_{k=1}^K u_{ik} \sqrt{\lambda_k} v_{jk}$,
+where $u_{ik}$ and $v_{jk}$ are orthonormal coefficient matrices and
+$\lambda_k$ are eigenvalues. In \pkg{vegan} the eigenvalues sum up to
+variance of the data, and therefore we need to multiply with the
+square root of degrees of freedom $n-1$.  Orthonormality means that
+sums of squared columns is one and their cross-product is zero, or
+$\sum_i u_{ik}^2 = \sum_j v_{jk}^2 = 1$, and
+$\sum_i u_{ik} u_{il} = \sum_j v_{jk} v_{jl} = 0$ for $k \neq l$. This
+is a decomposition, and the original matrix is found exactly from the
+singular vectors and corresponding singular values, and first two
+singular components give the rank $=2$ least squares estimate of the
+original matrix.
 
 The coefficients $u_{ik}$ and $v_{jk}$ are scaled to unit length for all
-axes $k$. Singular values $\sigma_k$ or eigenvalues $\lambda_k$ give
+axes $k$. Eigenvalues $\lambda_k$ give
 the information of the importance of axes, or the `axis lengths.'
 Instead of the orthonormal coefficients, or equal length axes, it is
 customary to scale species (column) or site (row) scores or both by
@@ -446,17 +429,23 @@ weighted averaging scores have somewhat wider dispersion.
 \code{prcomp, princomp} &
 $u_{ik} \sqrt{n-1} \sqrt{\lambda_k}$ &
 $v_{jk}$ \\
-\code{rda, scaling=1} &
+\code{stats::biplot} &
+$u_{ik}$ &
+$v_{jk} \sqrt{n} \sqrt{\lambda_k}$ \\
+\code{stats::biplot, pc.biplot=TRUE} &
+$u_{ik} \sqrt{n-1}$ &
+$v_{jk} \sqrt{\lambda_k}$\\
+\code{rda, scaling="sites"} &
 $u_{ik} \sqrt{\lambda_k/ \sum \lambda_k} \times \mathrm{const}$ &
 $v_{jk} \times \mathrm{const}$
 \\
-\code{rda, scaling=2} &
+\code{rda, scaling="species"} &
 $u_{ik} \times \mathrm{const}$ &
 $v_{jk} \sqrt{\lambda_k/ \sum \lambda_k} \times \mathrm{const}$  \\
-\code{rda, scaling=3} &
+\code{rda, scaling="symmetric"} &
 $u_{ik} \sqrt[4]{\lambda_k/ \sum \lambda_k} \times \mathrm{const}$ &
 $v_{jk} \sqrt[4]{\lambda_k/ \sum \lambda_k} \times \mathrm{const}$ \\
-\code{rda, scaling < 0} &
+\code{rda, correlation=TRUE} &
 $u_{ik}^*$ &
 $\sqrt{\sum \lambda_k /(n-1)} s_j^{-1} v_{jk}^*$
 \\
@@ -474,17 +463,15 @@ $\sqrt{\sum \lambda_k /(n-1)} s_j^{-1} v_{jk}^*$
 \end{tabular}
 \end{table*}
 
-
-
 In community ecology, it is common to plot both species and sites in
-the same graph.  If this graph is a graphical display of \textsc{svd},
+the same graph.  If this graph is a graphical display of \textsc{pca},
 or a graphical, low-dimensional approximation of the data, the graph
 is called a biplot.  The graph is a biplot if the transformed scores
 satisfy $x_{ij} = c \sum_k u_{ij}^* v_{jk}^*$ where $c$ is a scaling
-constant.  In functions \code{princomp}, \code{prcomp} and
-\code{rda}, $c=1$ and the plotted scores are a biplot so that the
-singular values (or eigenvalues) are expressed for sites, and species
-are left unscaled.  
+constant.  In functions \code{princomp}, \code{prcomp} and \code{rda}
+with \code{scaling = "sites"}, the plotted scores define a biplot so that
+the eigenvalues are expressed for sites, and species are left
+unscaled.
 % For \texttt{Canoco 3} $c = n^{-1} \sqrt{n-1}
 % \sqrt{\sum \lambda_k}$ with negative \proglang{Canoco} scaling
 % values. All these $c$ are constants for a matrix, so these are all
@@ -496,22 +483,23 @@ are left unscaled.
 
 There is no natural way of scaling species and site scores to each
 other.  The eigenvalues in redundancy and principal components
-analysis are scale-dependent and change when the  data are
-multiplied by a constant.  If we have percent cover data, the
-eigenvalues are typically very high, and the scores scaled by
-eigenvalues will have much wider dispersion than the orthonormal set.
-If we express the percentages as proportions, and divide the matrix by
-$100$, the eigenvalues will be reduced by factor $100^2$, and the
-scores scaled by eigenvalues will have a narrower dispersion.  For
-graphical biplots we should be able to fix the relations of row and
-column scores to be invariant against scaling of data.  The solution
-in \proglang{R} standard function \code{biplot} is to scale site and species
-scores independently, and typically very differently, but plot each
-independently to fill the graph area.  The solution in \proglang{Canoco} and 
-\code{rda} is to use proportional eigenvalues $\lambda_k / \sum
-\lambda_k$ instead of original eigenvalues.  These proportions are
-invariant with scale changes, and typically they have a nice range for
-plotting two data sets in the same graph.
+analysis are scale-dependent and change when the data are multiplied
+by a constant.  If we have percent cover data, the eigenvalues are
+typically very high, and the scores scaled by eigenvalues will have
+much wider dispersion than the orthonormal set.  If we express the
+percentages as proportions, and divide the matrix by $100$, the
+eigenvalues will be reduced by factor $100^2$, and the scores scaled
+by eigenvalues will have a narrower dispersion.  For graphical biplots
+we should be able to fix the relations of row and column scores to be
+invariant against scaling of data.  The solution in \proglang{R}
+standard function \code{biplot} is to scale site and species scores
+independently, and typically very differently
+(Table~\ref{tab:scales}), but plot each independently to fill the
+graph area.  The solution in \proglang{Canoco} and \code{rda} is to
+use proportional eigenvalues $\lambda_k / \sum \lambda_k$ instead of
+original eigenvalues.  These proportions are invariant with scale
+changes, and typically they have a nice range for plotting two data
+sets in the same graph.
 
 The \textbf{vegan} package uses a scaling constant $c = \sqrt[4]{(n-1)
   \sum \lambda_k}$ in order to be able to use scaling by proportional
@@ -544,24 +532,19 @@ other software or \proglang{R} functions (Table \ref{tab:rdaconst}).
 \end{tabular}
 \end{table*}
 
-In this chapter, I used always centred data matrices.  In principle
-\textsc{svd} could be done with original, non-centred data, but
-there is no option for this in \code{rda}, because I think that
-non-centred analysis is dubious and I do not want to encourage its use
-(if you think you need it, you are certainly so good in programming
-that you can change that one line in \code{rda.default}).  I do
-think that the arguments for non-centred analysis are often twisted,
-and the method is not very good for its intended purpose, but there
-are better methods for finding fuzzy classes.  Normal, centred
-analysis moves the origin to the average of all species, and the
-dimensions describe differences from this average.  Non-centred
-analysis leaves the origin in the empty site with no species, and the
-first axis usually runs from the empty site to the average
-site. Second and third non-centred components are often very similar
-to first and second (etc.) centred components, and the best way to use
-non-centred analysis is to discard the first component and use only
-the rest. This is better done with directly centred analysis.
-
+The scaling is controlled by three arguments in the \code{scores}
+function in \pkg{vegan}:
+\begin{enumerate}
+  \item \code{scaling} with options \code{"sites"}, \code{"species"}
+    and \code{"symmetric"} defines the set of scores which is scaled
+    by eigenvalues (Table~\ref{tab:scales}).
+  \item \code{const} can be used to set the numeric scaling constant
+    to non-default values (Table~\ref{tab:rdaconst}).
+  \item \code{correlation} can be used to modify species scores so
+    that they show the relative change of species abundance, or their
+    correlation with the ordination (Table~\ref{tab:scales}). This is
+    no longer a biplot scaling.
+\end{enumerate}
 
 \section{Weighted average and linear combination scores}
 
diff --git a/inst/doc/decision-vegan.pdf b/inst/doc/decision-vegan.pdf
index 2b0ed37..e341094 100644
Binary files a/inst/doc/decision-vegan.pdf and b/inst/doc/decision-vegan.pdf differ
diff --git a/inst/doc/diversity-vegan.pdf b/inst/doc/diversity-vegan.pdf
index 264cbe9..26a1bee 100644
Binary files a/inst/doc/diversity-vegan.pdf and b/inst/doc/diversity-vegan.pdf differ
diff --git a/inst/doc/intro-vegan.R b/inst/doc/intro-vegan.R
index 36f9c34..02324eb 100644
--- a/inst/doc/intro-vegan.R
+++ b/inst/doc/intro-vegan.R
@@ -62,7 +62,7 @@ text(ord, display = "spec", cex=0.7, col="blue")
 
 
 ###################################################
-### code chunk number 9: intro-vegan.Rnw:205-207
+### code chunk number 9: intro-vegan.Rnw:206-208
 ###################################################
 data(dune.env)
 attach(dune.env)
@@ -72,25 +72,27 @@ attach(dune.env)
 ### code chunk number 10: a
 ###################################################
 plot(ord, disp="sites", type="n")
-ordihull(ord, Management, col="blue")
-ordiellipse(ord, Management, col=3,lwd=2)
-ordispider(ord, Management, col="red", label = TRUE)
+ordihull(ord, Management, col=1:4, lwd=3)
+ordiellipse(ord, Management, col=1:4, kind = "ehull", lwd=3)
+ordiellipse(ord, Management, col=1:4, draw="polygon")
+ordispider(ord, Management, col=1:4, label = TRUE)
 points(ord, disp="sites", pch=21, col="red", bg="yellow", cex=1.3)
 
 
 ###################################################
-### code chunk number 11: intro-vegan.Rnw:217-218
+### code chunk number 11: intro-vegan.Rnw:219-220
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(ord, disp="sites", type="n")
-ordihull(ord, Management, col="blue")
-ordiellipse(ord, Management, col=3,lwd=2)
-ordispider(ord, Management, col="red", label = TRUE)
+ordihull(ord, Management, col=1:4, lwd=3)
+ordiellipse(ord, Management, col=1:4, kind = "ehull", lwd=3)
+ordiellipse(ord, Management, col=1:4, draw="polygon")
+ordispider(ord, Management, col=1:4, label = TRUE)
 points(ord, disp="sites", pch=21, col="red", bg="yellow", cex=1.3)
 
 
 ###################################################
-### code chunk number 12: intro-vegan.Rnw:248-250
+### code chunk number 12: intro-vegan.Rnw:250-252
 ###################################################
 ord.fit <- envfit(ord ~ A1 + Management, data=dune.env, perm=999)
 ord.fit
@@ -110,7 +112,7 @@ ordisurf(ord, A1, add=TRUE)
 
 
 ###################################################
-### code chunk number 15: intro-vegan.Rnw:266-268
+### code chunk number 15: intro-vegan.Rnw:268-270
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(ord, dis="site")
@@ -119,7 +121,7 @@ ordisurf(ord, A1, add=TRUE)
 
 
 ###################################################
-### code chunk number 16: intro-vegan.Rnw:288-290
+### code chunk number 16: intro-vegan.Rnw:290-292
 ###################################################
 ord <- cca(dune ~ A1 + Management, data=dune.env)
 ord
@@ -132,32 +134,32 @@ plot(ord)
 
 
 ###################################################
-### code chunk number 18: intro-vegan.Rnw:297-298
+### code chunk number 18: intro-vegan.Rnw:299-300
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(ord)
 
 
 ###################################################
-### code chunk number 19: intro-vegan.Rnw:315-316
+### code chunk number 19: intro-vegan.Rnw:317-318
 ###################################################
 cca(dune ~ ., data=dune.env)
 
 
 ###################################################
-### code chunk number 20: intro-vegan.Rnw:325-326
+### code chunk number 20: intro-vegan.Rnw:327-328
 ###################################################
 anova(ord)
 
 
 ###################################################
-### code chunk number 21: intro-vegan.Rnw:334-335
+### code chunk number 21: intro-vegan.Rnw:336-337
 ###################################################
 anova(ord, by="term", permutations=199)
 
 
 ###################################################
-### code chunk number 22: intro-vegan.Rnw:340-341
+### code chunk number 22: intro-vegan.Rnw:342-343
 ###################################################
 anova(ord, by="mar", permutations=199)
 
@@ -169,27 +171,27 @@ anova(ord, by="axis", permutations=499)
 
 
 ###################################################
-### code chunk number 24: intro-vegan.Rnw:353-355
+### code chunk number 24: intro-vegan.Rnw:355-357
 ###################################################
 ord <- cca(dune ~ A1 + Management + Condition(Moisture), data=dune.env)
 ord
 
 
 ###################################################
-### code chunk number 25: intro-vegan.Rnw:360-361
+### code chunk number 25: intro-vegan.Rnw:362-363
 ###################################################
 anova(ord, by="term", permutations=499)
 
 
 ###################################################
-### code chunk number 26: intro-vegan.Rnw:369-371
+### code chunk number 26: intro-vegan.Rnw:371-373
 ###################################################
 how <- how(nperm=499, plots = Plots(strata=dune.env$Moisture))
 anova(ord, by="term", permutations = how)
 
 
 ###################################################
-### code chunk number 27: intro-vegan.Rnw:375-376
+### code chunk number 27: intro-vegan.Rnw:377-378
 ###################################################
 detach(dune.env)
 
diff --git a/inst/doc/intro-vegan.Rnw b/inst/doc/intro-vegan.Rnw
index 1be1c38..30bc862 100644
--- a/inst/doc/intro-vegan.Rnw
+++ b/inst/doc/intro-vegan.Rnw
@@ -198,9 +198,10 @@ methods you can try:
 
 \pkg{Vegan} has a group of functions for adding information about
 classification or grouping of points onto ordination diagrams.
-Function \code{ordihull} adds convex hulls, \code{ordiellipse}
-adds ellipses of standard deviation, standard error or confidence
-areas, and \code{ordispider} combines items to their centroid
+Function \code{ordihull} adds convex hulls, \code{ordiellipse} adds
+ellipses enclosing all points in the group (ellipsoid hulls) or
+ellipses of standard deviation, standard error or confidence areas,
+and \code{ordispider} combines items to their centroid
 (Fig. \ref{fig:ordihull}):
 <<>>=
 data(dune.env)
@@ -208,16 +209,17 @@ attach(dune.env)
 @
 <<a>>=
 plot(ord, disp="sites", type="n")
-ordihull(ord, Management, col="blue")
-ordiellipse(ord, Management, col=3,lwd=2)
-ordispider(ord, Management, col="red", label = TRUE)
+ordihull(ord, Management, col=1:4, lwd=3)
+ordiellipse(ord, Management, col=1:4, kind = "ehull", lwd=3)
+ordiellipse(ord, Management, col=1:4, draw="polygon")
+ordispider(ord, Management, col=1:4, label = TRUE)
 points(ord, disp="sites", pch=21, col="red", bg="yellow", cex=1.3)
 @
 \begin{figure}
 <<fig=true,echo=false>>=
 <<a>>
 @
-\caption{Convex hull, standard error ellipse and a spider web diagram
+\caption{Convex hull, ellipsoid hull, standard error ellipse and a spider web diagram
   for Management levels in ordination.}
 \label{fig:ordihull}
 \end{figure}
diff --git a/inst/doc/intro-vegan.pdf b/inst/doc/intro-vegan.pdf
index 7f1fc42..55cdb23 100644
Binary files a/inst/doc/intro-vegan.pdf and b/inst/doc/intro-vegan.pdf differ
diff --git a/inst/doc/partitioning.pdf b/inst/doc/partitioning.pdf
index b1b0176..aa1ab43 100644
Binary files a/inst/doc/partitioning.pdf and b/inst/doc/partitioning.pdf differ
diff --git a/man/BCI.Rd b/man/BCI.Rd
index 2481486..3772904 100644
--- a/man/BCI.Rd
+++ b/man/BCI.Rd
@@ -17,22 +17,38 @@ data(BCI.env)
   A data frame with 50 plots (rows) of 1 hectare with counts of trees
   on each plot with total of 225 species (columns). Full Latin names
   are used for tree species. The names were updated against
-  \url{http://www.theplantlist.org} in Jan 2014 (see ChangeLog 2.1-41
-  for details) which allows matching 206 of species against
+  \url{http://www.theplantlist.org} and Kress et al. (2009) which
+  allows matching 207 of species against
   \url{http://datadryad.org/resource/doi:10.5061/dryad.63q27} (Zanne
-  et al., 2014).
+  et al., 2014). The original species names are available as attribute
+  \code{original.names} of \code{BCI}. See Examples for changed names.
 
-  For \code{BCI.env}, a data frame with 50 plots (rows) and six site
-  variables:
+  For \code{BCI.env}, a data frame with 50 plots (rows) and nine site
+  variables derived from Pyke et al. (2001) and Harms et al. (2001):
 
   \describe{
-    \item{\code{UTM.EW}: }{UTM coordinates (zone 17N) East-West}
-    \item{\code{UTM.NS}: }{UTM coordinates (zone 17N) North-South}
-    \item{\code{Precipitation}: }{Precipitation in mm per year}
-    \item{\code{Elevation}: }{Elevation in m above sea level}
-    \item{\code{Age.cat}: }{Forest age category}
-    \item{\code{Geology}: }{The Underlying geological formation}
-  }
+    \item{\code{UTM.EW}: }{UTM coordinates (zone 17N) East-West.}
+    \item{\code{UTM.NS}: }{UTM coordinates (zone 17N) North-South.}
+    \item{\code{Precipitation}: }{Precipitation in mm per year.}
+    \item{\code{Elevation}: }{Elevation in m above sea level.}
+    \item{\code{Age.cat}: }{Forest age category.}
+    \item{\code{Geology}: }{The Underlying geological formation.}
+
+    \item{\code{Habitat}: }{Dominant habitat type based on the map of
+    habitat types in 25 grid cells in each plot (Harms et al. 2001,
+    excluding streamside habitat). The habitat types are \code{Young}
+    forests (\emph{ca.} 100 years), old forests on > 7 degree slopes
+    (\code{OldSlope}), old forests under 152 m elevation
+    (\code{OldLow}) and at higher elevation (\code{OldHigh}) and
+    \code{Swamp} forests.}
+
+    \item{\code{River}: }{\code{"Yes"} if there is streamside habitat
+    in the plot.}
+    \item{\code{EnvHet}: }{Environmental Heterogeneity assessed as the
+    Simpson diversity of frequencies of \code{Habitat} types in 25
+    grid cells in the plot.}
+
+}
 
  }
 \details{
@@ -47,12 +63,25 @@ data(BCI.env)
   The quadrats are located in a regular grid. See \code{BCI.env} for the
   coordinates.
 
-  A full description of the site information in \code{BCI.env} is given
-  in Pyke et al (2001)
+  A full description of the site information in \code{BCI.env} is
+  given in Pyke et al. (2001) and Harms et al. (2001). \emph{N.B.}
+  Pyke et al. (2001) and Harms et al. (2001) give conflicting
+  information about forest age categories and elevation.
 
 }
 \source{
-  \url{http://www.sciencemag.org/cgi/content/full/295/5555/666/DC1}
+  \url{http://www.sciencemag.org/cgi/content/full/295/5555/666/DC1} for
+  community data and References for environmental data.
+}
+
+\seealso{
+
+  Extra-CRAN package \pkg{natto}
+  (\url{https://github.com/jarioksa/natto}) has data set
+  \code{BCI.env2} with original grid data of Harms et al. (2001)
+  habitat classification, and data set \code{BCI.taxon} of APG III
+  classification of tree species.
+
 }
 
 \references{
@@ -63,14 +92,23 @@ data(BCI.env)
   Beta-diversity in tropical forest trees. \emph{Science} 295,
   666--669.
 
+  Harms K.E., Condit R., Hubbell S.P. & Foster R.B. (2001) Habitat
+  associations of trees and shrubs in a 50-ha neotropical forest
+  plot. \emph{J. Ecol.} 89, 947--959.
+
+  Kress W.J., Erickson D.L, Jones F.A., Swenson N.G, Perez R., Sanjur
+  O. & Bermingham E. (2009) Plant DNA barcodes and a community
+  phylogeny of a tropical forest dynamics plot in Panama. \emph{PNAS}
+  106, 18621--18626.
+
   Zanne A.E., Tank D.C., Cornwell, W.K., Eastman J.M., Smith, S.A.,
   FitzJohn, R.G., McGlinn, D.J., O’Meara, B.C., Moles, A.T., Reich,
   P.B., Royer, D.L., Soltis, D.E., Stevens, P.F., Westoby, M., Wright,
   I.J., Aarssen, L., Bertin, R.I., Calaminus, A., Govaerts, R.,
   Hemmings, F., Leishman, M.R., Oleksyn, J., Soltis, P.S., Swenson,
   N.G., Warman, L. & Beaulieu, J.M. (2014) Three keys to the radiation
-  of angiosperms into freezing environments. \emph{Nature}
-  doi:10.1038/nature12872 (published online Dec 22, 2013).
+  of angiosperms into freezing environments. \emph{Nature} 506,
+  89--92.  doi:10.1038/nature12872 (published online Dec 22, 2013).
 
   Pyke, C. R., Condit, R., Aguilar, S., & Lao, S. (2001). Floristic
   composition across a climatic gradient in a neotropical lowland
@@ -81,5 +119,9 @@ data(BCI.env)
 \examples{
 data(BCI, BCI.env)
 head(BCI.env)
+## see changed species names
+oldnames <- attr(BCI, "original.names")
+taxa <- cbind("Old Names" = oldnames, "Current Names" = names(BCI))
+noquote(taxa[taxa[,1] != taxa[,2], ])
 }
 \keyword{datasets}
diff --git a/man/MDSrotate.Rd b/man/MDSrotate.Rd
index b898b4c..23ab708 100644
--- a/man/MDSrotate.Rd
+++ b/man/MDSrotate.Rd
@@ -19,10 +19,12 @@ MDSrotate(object, vec, na.rm = FALSE, ...)
  \item{object}{ A result object from \code{\link{metaMDS}} or
     \code{\link{monoMDS}}.}
 
-  \item{vec}{ A continuous environmental variable or a matrix of such
-    variables. The number of variables must be lower than the number of
-    dimensions, and the solution is rotated to these variables in the
-    order they appear in the matrix.}
+  \item{vec}{An environmental variable or a matrix of such
+    variables. The number of variables must be lower than the number
+    of dimensions, and the solution is rotated to these variables in
+    the order they appear in the matrix. Alternatively \code{vec} can
+    be a factor, and the solution is rotated to optimal separation of
+    factor levels using \code{\link[MASS]{lda}}.}
 
   \item{na.rm}{ Remove missing values from the continuous variable
     \code{vec}.}
@@ -46,13 +48,29 @@ MDSrotate(object, vec, na.rm = FALSE, ...)
   uncorrelated to later dimensions. There must be at least one free
   dimension: the number of external variables must be lower than the
   number of dimensions, and all used environmental variables are
-  uncorrelated with that free dimension.}
+  uncorrelated with that free dimension.
+
+  Alternatively the method can rotate to discriminate the levels of a
+  factor using linear discriminant analysis
+  (\code{\link[MASS]{lda}}). This is hardly meaningful for
+  two-dimensional solutions, since all rotations in two dimensions
+  have the same separation of cluster levels. However, the function
+  can be useful in finding a two-dimensional projection of clusters
+  from more than two dimensions. The last dimension will always show
+  the residual variation, and for \eqn{k} dimensions, only \eqn{k-1}
+  discrimination vectors are used.
+
+}
 
 \value{ Function returns the original ordination result, but with
   rotated scores (both site and species if available), and the
   \code{pc} attribute of scores set to \code{FALSE}.  
 }
 
+\note{Rotation to a factor variable is an experimental feature and may
+  be removed. The discriminant analysis weights dimensions by their
+  discriminating power, but \code{MDSrotate} performs a rigid
+  rotation. Therefore the solution may not be optimal.}
 
 \author{
   Jari Oksanen
diff --git a/man/RsquareAdj.Rd b/man/RsquareAdj.Rd
index 0cd90f7..fa77464 100644
--- a/man/RsquareAdj.Rd
+++ b/man/RsquareAdj.Rd
@@ -17,6 +17,7 @@ Adjusted R-square
 \usage{
 \method{RsquareAdj}{default}(x, n, m, ...)
 \method{RsquareAdj}{rda}(x, ...)
+\method{RsquareAdj}{cca}(x, permutations = 1000, ...)
 }
 
 \arguments{
@@ -26,8 +27,13 @@ Adjusted R-square
   
   \item{n, m}{Number of observations and number of degrees of freedom
   in the fitted model.}
+  
+  \item{permutations}{Number of permutations to use when computing the adjusted 
+  R-squared for a cca. The permutations can be calculated in parallel by
+  specifying the number of cores which is passed to \code{\link{permutest}}}
 
-  \item{\dots}{ Other arguments (ignored).} 
+  \item{\dots}{ Other arguments (ignored) except in the case of cca in 
+  which these arguments are passed to \code{\link{permutest}}.} 
 }
 
 \details{ The default method finds the adjusted
@@ -38,10 +44,14 @@ Adjusted R-square
   \code{\link{cca}}, \code{\link{lm}} and \code{\link{glm}}. Adjusted,
   or even unadjusted, R-squared may not be available in some cases,
   and then the functions will return \code{NA}. There is no adjusted
-  R-squared in \code{\link{cca}}, in partial \code{\link{rda}}, and
+  in partial \code{\link{rda}}, and
   R-squared values are available only for \code{\link{gaussian}}
   models in \code{\link{glm}}.
 
+  The adjusted, \eqn{R^2}{R-squared} of \code{cca} is computed using a 
+  permutation approach developed by Peres-Neto et al. (2006). By default 1000
+  permutations are used.
+  
   The raw \eqn{R^2}{R-squared} of partial \code{rda} gives the
   proportion explained after removing the variation due to conditioning
   (partial) terms; Legendre et al. (2011) call this semi-partial
@@ -76,6 +86,9 @@ data(mite.env)
 ## rda
 m <- rda(decostand(mite, "hell") ~  ., mite.env)
 RsquareAdj(m)
+## cca
+m <- cca(decostand(mite, "hell") ~  ., mite.env)
+RsquareAdj(m)
 ## default method
 RsquareAdj(0.8, 20, 5)
 }
diff --git a/man/adipart.Rd b/man/adipart.Rd
index 5b79fb7..c9e9678 100644
--- a/man/adipart.Rd
+++ b/man/adipart.Rd
@@ -9,23 +9,27 @@
 
 \title{Additive Diversity Partitioning and Hierarchical Null Model Testing}
 \description{
-In additive diversity partitioning, mean values of alpha diversity at lower levels of a sampling 
-hierarchy are compared to the total diversity in the entire data set (gamma diversity). 
-In hierarchical null model testing, a statistic returned by a function is evaluated 
+In additive diversity partitioning, mean values of alpha diversity at lower levels of a sampling
+hierarchy are compared to the total diversity in the entire data set (gamma diversity).
+In hierarchical null model testing, a statistic returned by a function is evaluated
 according to a nested hierarchical sampling design (\code{hiersimu}).
 }
 \usage{
 adipart(...)
 \method{adipart}{default}(y, x, index=c("richness", "shannon", "simpson"),
-    weights=c("unif", "prop"), relative = FALSE, nsimul=99, ...)
+    weights=c("unif", "prop"), relative = FALSE, nsimul=99,
+    method = "r2dtable", ...)
 \method{adipart}{formula}(formula, data, index=c("richness", "shannon", "simpson"),
-    weights=c("unif", "prop"), relative = FALSE, nsimul=99, ...)
+    weights=c("unif", "prop"), relative = FALSE, nsimul=99,
+    method = "r2dtable", ...)
 
 hiersimu(...)
 \method{hiersimu}{default}(y, x, FUN, location = c("mean", "median"),
-    relative = FALSE, drop.highest = FALSE, nsimul=99, ...)
+    relative = FALSE, drop.highest = FALSE, nsimul=99,
+    method = "r2dtable", ...)
 \method{hiersimu}{formula}(formula, data, FUN, location = c("mean", "median"),
-    relative = FALSE, drop.highest = FALSE, nsimul=99, ...)
+    relative = FALSE, drop.highest = FALSE, nsimul=99,
+    method = "r2dtable", ...)
 }
 \arguments{
   \item{y}{A community matrix.}
@@ -57,10 +61,16 @@ hiersimu(...)
     values are given relative to the value of gamma for function
     \code{adipart}.}
 
-  \item{nsimul}{Number of permutations to use if \code{matr} is not of
-    class 'permat'.  If \code{nsimul = 0}, only the \code{FUN} argument
-    is evaluated. It is thus possible to reuse the statistic values
-    without using a null model.}
+  \item{nsimul}{Number of permutations to use.  If \code{nsimul = 0},
+    only the \code{FUN} argument is evaluated.
+    It is thus possible to reuse the statistic values
+    without a null model.}
+
+  \item{method}{Null model method: either a name (character string) of
+    a method defined in \code{\link{make.commsim}} or a
+    \code{\link{commsim}} function.
+    The default \code{"r2dtable"} keeps row sums and column sums fixed.
+    See \code{\link{oecosimu}} for Details and Examples.}
 
   \item{FUN}{A function to be used by \code{hiersimu}. This must be
     fully specified, because currently other arguments cannot be passed
diff --git a/man/adonis.Rd b/man/adonis.Rd
index d59520a..038a43d 100644
--- a/man/adonis.Rd
+++ b/man/adonis.Rd
@@ -1,6 +1,7 @@
 \encoding{UTF-8}
 \name{adonis}
 \alias{adonis}
+\alias{adonis2}
 
 \title{Permutational Multivariate Analysis of Variance Using Distance Matrices}
 
@@ -10,27 +11,49 @@
   matrices; uses a permutation test with pseudo-\eqn{F} ratios.}
 
 \usage{
+adonis2(formula, data, permutations = 999, method = "bray",
+    sqrt.dist = FALSE, add = FALSE, by = "terms",
+    parallel = getOption("mc.cores"), ...)
 adonis(formula, data, permutations = 999, method = "bray",
-       strata = NULL, contr.unordered = "contr.sum",
-       contr.ordered = "contr.poly", parallel = getOption("mc.cores"), ...)
+    strata = NULL, contr.unordered = "contr.sum",
+    contr.ordered = "contr.poly", parallel = getOption("mc.cores"), ...)
 }
 
-\arguments{  
-  \item{formula}{a typical model formula such as \code{Y ~ A + B*C}, but
-    where \code{Y} is either a dissimilarity object (inheriting from
-    class \code{"dist"}) or data frame or a matrix; \code{A}, \code{B}, and
-    \code{C} may be factors or continuous variables. If a dissimilarity
-  object is supplied, no species coefficients can be calculated (see
-  Value below).} 
-  \item{data}{ the data frame from which \code{A}, \code{B}, and
-    \code{C} would be drawn.} 
+\arguments{
+
+  \item{formula}{Model formula. The LHS must be either a community
+    data matrix or a dissimilarity matrix, e.g., from
+    \code{\link{vegdist}} or \code{\link{dist}}.  If the LHS is a data
+    matrix, function \code{\link{vegdist}} will be used to find the
+    dissimilarities. The RHS defines the independent variables. These
+    can be continuous variables or factors, they can be transformed
+    within the formula, and they can have interactions as in a typical
+    \code{\link{formula}}. If a dissimilarity object is supplied, no
+    species coefficients can be calculated in \code{adonis} (see Value
+    below).}
+
+  \item{data}{ the data frame for the independent variables.}
   \item{permutations}{a list of control values for the permutations
     as returned by the function \code{\link[permute]{how}}, or the
     number of permutations required, or a permutation matrix where each
     row gives the permuted indices.}
   \item{method}{ the name of any method used in \code{\link{vegdist}} to
     calculate pairwise distances if the left hand side of the
-    \code{formula} was a data frame or a matrix. } 
+    \code{formula} was a data frame or a matrix. }
+  \item{sqrt.dist}{Take square root of dissimilarities. This often
+    euclidifies dissimilarities.}
+  \item{add}{Add a constant to the non-diagonal dissimilarities such
+    that all eigenvalues are non-negative in the underlying Principal
+    Co-ordinates Analysis (see \code{\link{wcmdscale}} for
+    details). Choice \code{"lingoes"} (or \code{TRUE}) use the
+    recommended method of Legendre & Anderson (1999: \dQuote{method
+    1}) and \code{"cailliez"} uses their \dQuote{method 2}.}
+  \item{by}{\code{by = "terms"} will assess significance for each term
+    (sequentially from first to last), setting \code{by = "margin"}
+    will assess the marginal effects of the terms (each marginal term
+    analysed in a model with all other variables), and \code{by =
+    NULL} will assess the overall significance of all terms
+    together. The arguments is passed on to \code{\link{anova.cca}}.}
   \item{strata}{ groups (strata) within which to constrain permutations.  }
   \item{contr.unordered, contr.ordered}{contrasts used for the design
     matrix (default in R is dummy or treatment contrasts for unordered
@@ -42,83 +65,46 @@ adonis(formula, data, permutations = 999, method = "bray",
   \item{\dots}{Other arguments passed to \code{vegdist}.}
 }
 
-\details{\code{adonis} is a function for the analysis and partitioning
-sums of squares using semimetric and metric distance matrices. Insofar
-as it partitions sums of squares of a multivariate data set, it is
-directly analogous to MANOVA (multivariate analysis of
-variance). M.J. Anderson (McArdle and Anderson 2001, Anderson 2001) refers to the
-method as \dQuote{permutational manova} (formerly \dQuote{nonparametric manova}). Further, as its inputs are
-linear predictors, and a response matrix of an arbitrary number of
-columns (2 to millions), it is a robust alternative to both parametric
-MANOVA and to ordination methods for describing how variation is
-attributed to different experimental treatments or uncontrolled
-covariates. It is also analogous to redundancy analysis (Legendre and
-Anderson 1999).
-
-Typical uses of \code{adonis} include analysis of ecological community
-data (samples X species matrices) or genetic data where we might have a
-limited number of samples of individuals and thousands or millions of
-columns of gene expression data (e.g. Zapala and Schork 2006).
-
-\code{adonis} is an alternative to AMOVA (nested analysis of molecular
-variance, Excoffier, Smouse, and Quattro, 1992;
-\code{\link[ade4]{amova}} in the \pkg{ade4} package) for both crossed
-and nested factors.
-
-If the experimental design has nestedness, then use \code{strata} to
-test hypotheses. For instance, imagine we are testing whether a
-plant community is influenced by nitrate amendments, and we have two
-replicate plots at each of two levels of nitrate (0, 10 ppm). We have
-replicated the experiment in three fields with (perhaps) different
-average productivity. In this design, we would need to specify
-\code{strata = field} so that randomizations occur only \emph{within
-each field} and not across all fields . See example below.
-
-Like AMOVA (Excoffier et al. 1992), \code{adonis} relies on a
-long-understood phenomenon that allows one to partition sums of squared
-deviations from a centroid in two different ways (McArdle and Anderson
-2001). The most widely recognized method, used, e.g., for ANOVA and
-MANOVA, is to first identify the relevant centroids and then to
-calculated the squared deviations from these points. For a centered
-\eqn{n \times p}{n x p} response matrix \eqn{Y}, this method uses the
-\eqn{p \times p}{p x p} inner product matrix \eqn{Y'Y}. The less
-appreciated method is to use the \eqn{n \times n}{n x n} outer product
-matrix \eqn{YY'}. Both AMOVA and \code{adonis} use this latter
-method. This allows the use of any semimetric (e.g. Bray-Curtis, aka
-Steinhaus, Czekanowski, and \enc{Sørensen}{Sorensen}) or metric
-(e.g. Euclidean) distance matrix (McArdle and Anderson 2001). Using
-Euclidean distances with the second method results in the same analysis
-as the first method.
-
-Significance tests are done using \eqn{F}-tests based on sequential sums
-of squares from permutations of the raw data, and not permutations of
-residuals. Permutations of the raw data may have better small sample
-characteristics. Further, the precise meaning of hypothesis tests will
-depend upon precisely what is permuted. The strata argument keeps groups
-intact for a particular hypothesis test where one does not want to
-permute the data among particular groups. For instance, \code{strata = B} 
-causes permutations among levels of \code{A} but retains data within
-levels of \code{B} (no permutation among levels of \code{B}). See
-\code{\link{permutations}} for additional details on permutation tests
-in Vegan.
-
-The default \code{\link{contrasts}} are different than in \R in
-general. Specifically, they use \dQuote{sum} contrasts, sometimes known
-as \dQuote{ANOVA} contrasts. See a useful text (e.g. Crawley,
-2002) for a transparent introduction to linear model contrasts. This
-choice of contrasts is simply a personal
-pedagogical preference. The particular contrasts can be set to any
-\code{\link{contrasts}} specified in \R, including Helmert and treatment
-contrasts.
-
-Rules associated with formulae apply. See "An Introduction to R" for an
-overview of rules.
-
-\code{print.adonis} shows the \code{aov.tab} component of the output.
+\details{
+
+\code{adonis2} and \code{adonis} are functions for the analysis and
+partitioning sums of squares using dissimilarities. Function
+\code{adonis} is directly based on the algorithm of Anderson (2001) and
+performs a sequential test of terms. Function \code{adonis2} is based on
+the principles of McArdle & Anderson (2001) and can perform sequential,
+marginal and overall tests. Function \code{adonis2} also allows using
+additive constants or squareroot of dissimilarities to avoid negative
+eigenvalues. but both functions can handle semimetric indices (such as
+Bray-Curtis) that produce negative eigenvalues. Function \code{adonis2}
+can be much slower than \code{adonis}, in particular with several
+terms. With the same random permutation, tests are identical in both
+functions, and the results are also identical to \code{\link{anova.cca}}
+of \code{\link{dbrda}} and \code{\link{capscale}}. With Euclidean
+distances, the tests are also identical to \code{\link{anova.cca}} of
+\code{\link{rda}}.
+
+The functions partition sums of squares of a multivariate data set, and
+they are directly analogous to MANOVA (multivariate analysis of
+variance). McArdle and Anderson (2001) and Anderson (2001) refer to the
+method as \dQuote{permutational manova} (formerly \dQuote{nonparametric
+manova}). Further, as the inputs are linear predictors, and a response
+matrix of an arbitrary number of columns, they are a robust alternative
+to both parametric MANOVA and to ordination methods for describing how
+variation is attributed to different experimental treatments or
+uncontrolled covariates. Functions are also analogous to distance-based
+redundancy analysis in functions \code{\link{dbrda}} and
+\code{\link{capscale}} (Legendre and Anderson 1999).  Functions provide
+an alternative to AMOVA (nested analysis of molecular variance,
+Excoffier, Smouse, and Quattro, 1992; \code{\link[ade4]{amova}} in the
+\pkg{ade4} package) for both crossed and nested factors.
+
 }
+
 \value{
-  This function returns typical, but limited, output for analysis of
-  variance (general linear models). 
+
+  Function \code{adonis2} returns an \code{\link{anova.cca}} result
+  object.  Function \code{adonis} returns an object of class
+  \code{"adonis"} with following components:
 
   \item{aov.tab}{Typical AOV table showing sources of variation,
     degrees of freedom, sequential sums of squares, mean squares,
@@ -160,9 +146,6 @@ overview of rules.
 Anderson, M.J. 2001. A new method for non-parametric multivariate
 analysis of variance. \emph{Austral Ecology}, \strong{26}: 32--46.
 
-Crawley, M.J. 2002. \emph{Statistical Computing: An Introduction to Data
-  Analysis Using S-PLUS} 
-
 Excoffier, L., P.E. Smouse, and J.M. Quattro. 1992. Analysis of
 molecular variance inferred from metric distances among DNA haplotypes:
 Application to human mitochondrial DNA restriction data. \emph{Genetics},
@@ -179,26 +162,22 @@ analysis. \emph{Ecology}, \strong{82}: 290--297.
 Warton, D.I., Wright, T.W., Wang, Y. 2012. Distance-based multivariate
 analyses confound location and dispersion effects. \emph{Methods in
 Ecology and Evolution}, 3, 89--101.
-
-Zapala, M.A. and N.J. Schork. 2006. Multivariate regression analysis of
-distance matrices for testing associations between gene expression
-patterns and related variables. \emph{Proceedings of the National Academy of
-Sciences, USA}, \strong{103}:19430--19435.
 }
-\author{Martin Henry H. Stevens
-\email{HStevens at muohio.edu},
-  adapted to \pkg{vegan} by Jari Oksanen. }
+
+\author{Martin Henry H. Stevens (\code{adonis}) and Jari Oksanen
+  (\code{adonis2}). }
 
 \seealso{ \code{\link{mrpp}}, \code{\link{anosim}},
   \code{\link{mantel}}, \code{\link{varpart}}. }
 \examples{
 data(dune)
 data(dune.env)
-adonis(dune ~ Management*A1, data=dune.env, permutations=99)
-
+## default test by terms
+adonis2(dune ~ Management*A1, data = dune.env)
+## overall tests
+adonis2(dune ~ Management*A1, data = dune.env, by = NULL)
 
 ### Example of use with strata, for nested (e.g., block) designs.
-
 dat <- expand.grid(rep=gl(2,1), NO3=factor(c(0,10)),field=gl(3,1) )
 dat
 Agropyron <- with(dat, as.numeric(field) + as.numeric(NO3)+2) +rnorm(12)/2
@@ -210,17 +189,18 @@ dotplot(total ~ NO3, dat, jitter.x=TRUE, groups=field,
 Y <- data.frame(Agropyron, Schizachyrium)
 mod <- metaMDS(Y)
 plot(mod)
-### Hulls show treatment
-with(dat, ordihull(mod, group=NO3, show="0"))
-with(dat, ordihull(mod, group=NO3, show="10", col=3))
+### Ellipsoid hulls show treatment
+with(dat, ordiellipse(mod, field, kind = "ehull", label = TRUE))
 ### Spider shows fields
-with(dat, ordispider(mod, group=field, lty=3, col="red"))
-
-### Correct hypothesis test (with strata)
-adonis(Y ~ NO3, data=dat, strata=dat$field, perm=999)
+with(dat, ordispider(mod, field, lty=3, col="red"))
 
 ### Incorrect (no strata)
-adonis(Y ~ NO3, data=dat, perm=999)
+perm <- how(nperm = 199)
+adonis2 (Y ~ NO3, data = dat, permutations = perm)
+
+## Correct with strata
+setBlocks(perm) <- with(dat, field)
+adonis2(Y ~ NO3, data = dat, permutations = perm)
 }
 
 \keyword{multivariate }
diff --git a/man/anosim.Rd b/man/anosim.Rd
index 35a0487..d42915f 100644
--- a/man/anosim.Rd
+++ b/man/anosim.Rd
@@ -105,7 +105,7 @@ anosim(dat, grouping, permutations = 999, distance = "bray", strata = NULL,
   and dispersion within groups and the results can be difficult to
   interpret (cf. Warton et al. 2012).  The function returns a lot of
   information to ease studying its performance. Most \code{anosim}
-  models could be analysed with \code{\link{adonis}} which seems to be a
+  models could be analysed with \code{\link{adonis2}} which seems to be a
   more robust alternative.
 
 }
@@ -115,7 +115,7 @@ dissimilarities instead of their ranks.
 \code{\link{dist}} and \code{\link{vegdist}} for obtaining
   dissimilarities, and \code{\link{rank}} for ranking real values.  For
   comparing dissimilarities against continuous variables, see
-  \code{\link{mantel}}. Function \code{\link{adonis}} is a more robust
+  \code{\link{mantel}}. Function \code{\link{adonis2}} is a more robust
   alternative that should preferred. }
 
 \examples{
diff --git a/man/betadisper.Rd b/man/betadisper.Rd
index 4d83afb..27476c6 100644
--- a/man/betadisper.Rd
+++ b/man/betadisper.Rd
@@ -27,7 +27,8 @@
   Tukey's 'Honest Significant Difference' method.
 }
 \usage{
-betadisper(d, group, type = c("median","centroid"), bias.adjust = FALSE)
+betadisper(d, group, type = c("median","centroid"), bias.adjust = FALSE,
+       sqrt.dist = FALSE, add = FALSE)
 
 \method{anova}{betadisper}(object, \dots)
 
@@ -36,7 +37,11 @@ betadisper(d, group, type = c("median","centroid"), bias.adjust = FALSE)
 
 \method{eigenvals}{betadisper}(x, \dots)
 
-\method{plot}{betadisper}(x, axes = c(1,2), cex = 0.7, hull = TRUE,
+\method{plot}{betadisper}(x, axes = c(1,2), cex = 0.7,
+     pch = seq_len(ng), col = NULL, lty = "solid", lwd = 1, hull = TRUE,
+     ellipse = FALSE, conf,
+     segments = TRUE, seg.col = "grey", seg.lty = lty, seg.lwd = lwd,
+     label = TRUE, label.cex = 1,
      ylab, xlab, main, sub, \dots)
 
 \method{boxplot}{betadisper}(x, ylab = "Distance to centroid", ...)
@@ -55,19 +60,49 @@ betadisper(d, group, type = c("median","centroid"), bias.adjust = FALSE)
     level (i.e., one group).}
   \item{type}{the type of analysis to perform. Use the spatial median or
     the group centroid? The spatial median is now the default.}
-  \item{bias.adjust}{logical: adjust for small sample bias in beta diversity estimates?}
+  \item{bias.adjust}{logical: adjust for small sample bias in beta
+    diversity estimates?}
+  \item{sqrt.dist}{Take square root of dissimilarities. This often
+    euclidifies dissimilarities.}
+  \item{add}{Add a constant to the non-diagonal dissimilarities such
+    that all eigenvalues are non-negative in the underlying Principal
+    Co-ordinates Analysis (see \code{\link{wcmdscale}} for
+    details). Choice \code{"lingoes"} (or \code{TRUE}) use the
+    recommended method of Legendre & Anderson (1999: \dQuote{method
+    1}) and \code{"cailliez"} uses their \dQuote{method 2}.}
   \item{display}{character; partial match to access scores for
     \code{"sites"} or \code{"species"}.}
   \item{object, x}{an object of class \code{"betadisper"}, the result of a
     call to \code{betadisper}.}
   \item{choices, axes}{the principal coordinate axes wanted.}
   \item{hull}{logical; should the convex hull for each group be plotted?}
+  \item{ellipse}{logical; should the standard deviation data ellipse for
+    each group be plotted?}
+  \item{conf}{Expected fractions of data coverage for data ellipses,
+    e.g. 0.95. The default is to draw a 1 standard deviation data
+    ellipse, but if supplied, \code{conf} is multiplied with the
+    corresponding value found from the Chi-squared distribution with 2df
+    to provide the requested coverage (probability contour).}
+  \item{pch}{plot symbols for the groups, a vector of length equal to
+    the number of groups.}
+  \item{col}{colors for the plot symbols for the groups, a vector of
+    length equal to the number of groups.}
+  \item{lty, lwd}{linetype, linewidth for convex hulls and confidence
+    ellipses.}
+  \item{segments}{logical; should segments joining points to their
+    centroid be drawn?}
+  \item{seg.col}{colour to draw segments between points and their
+    centroid. Can be a vector, in which case one colour per group.}
+  \item{seg.lty, seg.lwd}{linetype and line width for segments.}
+  \item{label}{logical; should the centroids by labelled with their
+    respective factor label?}
+  \item{label.cex}{numeric; character expansion for centroid labels.}
   \item{cex, ylab, xlab, main, sub}{graphical parameters. For details,
     see \code{\link{plot.default}}.}
   \item{which}{A character vector listing terms in the fitted model for
     which the intervals should be calculated. Defaults to the grouping
     factor.}
-  \item{ordered}{Logical; see \code{\link{TukeyHSD}}.}
+  \item{ordered}{logical; see \code{\link{TukeyHSD}}.}
   \item{conf.level}{A numeric value between zero and one giving the
     family-wise confidence level to use.}
   \item{\dots}{arguments, including graphical parameters (for
@@ -247,8 +282,12 @@ plot(mod.HSD)
 ## first two PCoA axes
 plot(mod)
 
+## with data ellipses instead of hulls
+plot(mod, ellipse = TRUE, hull = FALSE) # 1 sd data ellipse
+plot(mod, ellipse = TRUE, hull = FALSE, conf = 0.90) # 90% data ellipse
+
 ## can also specify which axes to plot, ordering respected
-plot(mod, axes = c(3,1))
+plot(mod, axes = c(3,1), seg.col = "forestgreen", seg.lty = "dashed")
 
 ## Draw a boxplot of the distances to centroid for each group
 boxplot(mod)
diff --git a/man/betadiver.Rd b/man/betadiver.Rd
index c5fe882..a6ccca7 100644
--- a/man/betadiver.Rd
+++ b/man/betadiver.Rd
@@ -55,7 +55,7 @@ betadiver(x, method = NA, order = FALSE, help = FALSE, ...)
   \code{\link{designdist}}, but the current function provides a
   conventional shortcut. The function only finds the indices. The proper
   analysis must be done with functions such as \code{\link{betadisper}},
-  \code{\link{adonis}} or \code{\link{mantel}}.
+  \code{\link{adonis2}} or \code{\link{mantel}}.
 
   The indices are directly taken from Table 1 of Koleff et al. (2003),
   and they can be selected either by the index number or the subscript
@@ -82,7 +82,7 @@ betadiver(x, method = NA, order = FALSE, help = FALSE, ...)
   \code{"dist"} object which can be used in any function analysing
   dissimilarities. For beta diversity, particularly useful functions
   are \code{\link{betadisper}} to study the betadiversity in groups,
-  \code{\link{adonis}} for any model, and \code{\link{mantel}} to
+  \code{\link{adonis2}} for any model, and \code{\link{mantel}} to
   compare beta diversities to other dissimilarities or distances
   (including geographical distances). Although \code{betadiver}
   returns a \code{"dist"} object, some indices are similarities and
@@ -109,15 +109,18 @@ betadiver(x, method = NA, order = FALSE, help = FALSE, ...)
 \author{Jari Oksanen }
 \section{Warning }{Some indices return similarities instead of dissimilarities.} 
 
-\seealso{ \code{\link{designdist}} for an alternative to implement all
-  these functions, \code{\link{vegdist}} for some canned alternatives,
-  and \code{\link{betadisper}}, \code{\link{adonis}},
-  \code{\link{mantel}} for analysing beta diversity objects.
-  Functions \code{\link{nestedbetasor}} and
+\seealso{ \code{\link{designdist}} can be used to implement all these
+  functions, and also allows using notation with \code{alpha} and
+  \code{gamma} diversities.  \code{\link{vegdist}} has some canned
+  alternatives.  Functions \code{\link{betadisper}},
+  \code{\link{adonis2}} and \code{\link{mantel}} can be used for
+  analysing beta diversity objects. The returned dissimilarities can
+  be used in any distance-based methods, such as
+  \code{\link{metaMDS}}, \code{\link{capscale}} and
+  \code{\link{dbrda}}. Functions \code{\link{nestedbetasor}} and
   \code{\link{nestedbetajac}} implement decomposition beta diversity
   measures (\enc{Sørensen}{Sorensen} and Jaccard) into turnover and
-  nestedness components following Baselga (2010).
-}
+  nestedness components following Baselga (2010).  }
 
 
 \examples{
diff --git a/man/capscale.Rd b/man/capscale.Rd
index c5e286f..fa99738 100644
--- a/man/capscale.Rd
+++ b/man/capscale.Rd
@@ -1,19 +1,20 @@
 \name{capscale}
 \alias{capscale}
+\alias{oldCapscale}
+\alias{dbrda}
 
-\title{[Partial] Constrained Analysis of Principal Coordinates or
-  distance-based RDA }
+\title{[Partial] Distance-based Redundancy Analysis }
 \description{
-  Constrained Analysis of Principal Coordinates (CAP) is an ordination method
+  Distance-based redundancy analysis (dbRDA) is an ordination method
   similar to Redundancy Analysis (\code{\link{rda}}), but it allows
   non-Euclidean dissimilarity indices, such as Manhattan or
   Bray--Curtis distance. Despite this non-Euclidean feature, the analysis
   is strictly linear and metric. If called with Euclidean distance,
-  the results are identical to \code{\link{rda}}, but \code{capscale}
-  will be much more inefficient. Function \code{capscale} is a
-  constrained version of metric scaling, a.k.a. principal coordinates
-  analysis, which is based on the Euclidean distance but can be used,
-  and is more useful, with other dissimilarity measures. The function
+  the results are identical to \code{\link{rda}}, but dbRDA
+  will be less efficient. Functions \code{capscale} and \code{dbrda} are
+  constrained versions of metric scaling, a.k.a. principal coordinates
+  analysis, which are based on the Euclidean distance but can be used,
+  and are more useful, with other dissimilarity measures. The functions
   can also perform unconstrained principal coordinates analysis,
   optionally using extended dissimilarities.
 }
@@ -21,6 +22,9 @@
 capscale(formula, data, distance = "euclidean", sqrt.dist = FALSE,
     comm = NULL, add = FALSE,  dfun = vegdist, metaMDSdist = FALSE,
     na.action = na.fail, subset = NULL, ...)
+dbrda(formula, data, distance = "euclidean", sqrt.dist = FALSE,
+    add = FALSE, dfun = vegdist, metaMDSdist = FALSE,
+    na.action = na.fail, subset = NULL, ...)
 }
 
 \arguments{
@@ -30,7 +34,8 @@ capscale(formula, data, distance = "euclidean", sqrt.dist = FALSE,
     LHS must be either a community data matrix or a dissimilarity matrix,
     e.g., from
     \code{\link{vegdist}} or \code{\link{dist}}.
-    If the LHS is a data matrix, function \code{\link{vegdist}}
+    If the LHS is a data matrix, function \code{\link{vegdist}} or
+    function given in \code{dfun}
     will be used to find the dissimilarities. The RHS defines the
     constraints. The constraints can be continuous variables or factors,
     they can be transformed within the formula, and they can have
@@ -44,24 +49,19 @@ capscale(formula, data, distance = "euclidean", sqrt.dist = FALSE,
     the LHS of the \code{formula} is a data frame instead of
     dissimilarity matrix.}
   \item{sqrt.dist}{Take square roots of dissimilarities. See section
-  \code{Notes} below.}
+    \code{Details} below.}
   \item{comm}{ Community data frame which will be used for finding
     species scores when the LHS of the \code{formula} was a
     dissimilarity matrix. This is not used if the LHS is a data
-    frame. If this is not supplied, the ``species scores'' are the axes
-    of initial metric scaling (\code{\link{cmdscale}}) and may be
-    confusing.}
-  \item{add}{Logical indicating if an additive constant should be
-     computed, and added to the non-diagonal dissimilarities such
-     that all eigenvalues are non-negative in the underlying
-     Principal Co-ordinates Analysis (see \code{\link{cmdscale}} 
-     for details). This implements \dQuote{correction method 2} of
-     Legendre & Legendre (2012, p. 503). The negative eigenvalues are
-     caused by using semi-metric or non-metric dissimilarities with
-     basically metric \code{\link{cmdscale}}. They are harmless and
-     ignored in \code{capscale}, but you also can avoid warnings with
-     this option. }
-   \item{dfun}{Distance or dissimilarity function used. Any function
+    frame. If this is not supplied, the ``species scores'' unavailable.}
+  \item{add}{Add a constant to the non-diagonal dissimilarities such
+     that all eigenvalues are non-negative in the underlying Principal
+     Co-ordinates Analysis (see \code{\link{wcmdscale}} for
+     details). Choice \code{"lingoes"} (or \code{TRUE}) use the
+     recommended method of Legendre & Anderson (1999: \dQuote{method
+     1}) and \code{"cailliez"} uses their \dQuote{method 2}. The
+     latter is the only one in \code{\link{cmdscale}}.}
+  \item{dfun}{Distance or dissimilarity function used. Any function
      returning standard \code{"dist"} and taking the index name as the
      first argument can be used. }
    \item{metaMDSdist}{Use \code{\link{metaMDSdist}} similarly as in
@@ -87,53 +87,53 @@ capscale(formula, data, distance = "euclidean", sqrt.dist = FALSE,
      \code{\link{metaMDSdist}}.  }
 }
 \details{
-  Canonical Analysis of Principal Coordinates (CAP) is simply a
-  Redundancy Analysis of results of Metric (Classical) Multidimensional
-  Scaling (Anderson & Willis 2003). Function capscale uses two steps:
-  (1) it ordinates the dissimilarity matrix using
-  \code{\link{cmdscale}} and (2) analyses these results using
-  \code{\link{rda}}. If the user supplied a community data frame instead
-  of dissimilarities, the function will find the needed dissimilarity
-  matrix using \code{\link{vegdist}} with specified
-  \code{distance}. However, the method will accept dissimilarity
-  matrices from \code{\link{vegdist}}, \code{\link{dist}}, or any
-  other method producing similar matrices. The constraining variables can be
-  continuous or factors or both, they can have interaction terms,
-  or they can be transformed in the call. Moreover, there can be a
-  special term
-  \code{Condition} just like in \code{\link{rda}} and \code{\link{cca}}
-  so that ``partial'' CAP can be performed.
-
-  The current implementation  differs from the method suggested by
-  Anderson & Willis (2003) in three major points which actually make it
-  similar to distance-based redundancy analysis (Legendre & Anderson
-  1999):
-  \enumerate{
-    \item Anderson & Willis used the orthonormal solution of
-    \code{\link{cmdscale}}, whereas \code{capscale} uses axes
-    weighted by corresponding eigenvalues, so that the ordination
-    distances are the best approximations of original
-    dissimilarities. In the original method, later ``noise'' axes are
-    just as important as first major axes.
-    \item Anderson & Willis take only a subset of axes, whereas 
-    \code{capscale} uses all axes with positive eigenvalues. The use of
-    subset is necessary with orthonormal axes to chop off some
-    ``noise'', but the use of all axes guarantees that the results are
-    the best approximation of original dissimilarities.
-    \item Function \code{capscale} adds species scores as weighted sums
-    of (residual) community matrix (if the matrix is available), whereas
-    Anderson & Willis have no fixed method for adding species scores.
-  }
-  With these definitions, function \code{capscale} with Euclidean
-  distances will be identical to \code{\link{rda}} in eigenvalues and
-  in site, species and biplot scores (except for possible sign
-  reversal). 
-  However, it makes no sense to use \code{capscale} with
-  Euclidean distances, since direct use of \code{\link{rda}} is much more
-  efficient. Even with non-Euclidean dissimilarities, the
-  rest of the analysis will be metric and linear.
-
-  The function can be also used to perform ordinary metric scaling
+
+  Functions \code{capscale} and \code{dbrda} provide two alternative
+  implementations of dbRDA. Function \code{capscale} is based on
+  Legendre & Anderson (1999): the dissimilarity data are first
+  ordinated using metric scaling, and the ordination results are
+  analysed with \code{\link{rda}}. Function \code{dbrda} is based on
+  McArdle & Anderson (2001) and directly decomposes
+  dissimilarities. It does not use \code{\link{rda}} but a parallel
+  implementation adapted for analysing dissimilarities and returns a
+  subset of \code{\link{rda}} items. With Euclidean distances both
+  results are identical to \code{\link{rda}}.  Other dissimmilarities
+  may give negative eigenvalues associated with imaginary
+  axes. Negative eigenvalues are handled differently: \code{capscale}
+  ignores imaginary axes and analyses only real axes with positive
+  eigenvalues, and \code{dbrda} directly analyses dissimilarities and
+  can give negative eigenvalues in any component. Both methods define
+  total inertia of conditions, constraints and residuals identically.
+  
+  If the user supplied a community data frame instead of
+  dissimilarities, the functions will find dissimilarities using
+  \code{\link{vegdist}} or distance function given in \code{dfun} with
+  specified \code{distance}. The functions will accept distance
+  objects from \code{\link{vegdist}}, \code{\link{dist}}, or any other
+  method producing similar objects. The constraining variables can be
+  continuous or factors or both, they can have interaction terms, or
+  they can be transformed in the call. Moreover, there can be a
+  special term \code{Condition} just like in \code{\link{rda}} and
+  \code{\link{cca}} so that ``partial'' analysis can be performed.
+
+  Non-Euclidean dissimilarities can produce negative eigenvalues
+  (Legendre & Anderson 1999, McArdle & Anderson 2001). The total
+  inertia and \code{\link{anova.cca}} tests for constraints will also
+  include the effects of imaginary axes with negative eigenvalues
+  following McArdle & Anderson (2001). If there are negative
+  eigenvalues, the printed output of \code{capscale} will add a column
+  with sums of positive eigenvalues and an item of sum of negative
+  eigenvalues, and \code{dbrda} will add a column giving the number of
+  real dimensions with postive eigenvalues.  If negative eigenvalues
+  are disturbing, \code{capscale} lets you to distort the
+  dissimilarities so that only non-negative eigenvalues will be
+  produced using argument \code{add = TRUE} (this argument is passed
+  to \code{\link{cmdscale}}). Alternatively, with
+  \code{sqrt.dist = TRUE}, square roots of dissimilarities will be used
+  which may help in avoiding negative eigenvalues (Legendre & Anderson
+  1999).
+
+  The functions can be also used to perform ordinary metric scaling
   a.k.a. principal coordinates analysis by using a formula with only a
   constant on the left hand side, or \code{comm ~ 1}. With
   \code{metaMDSdist = TRUE}, the function can do automatic data
@@ -143,14 +143,11 @@ capscale(formula, data, distance = "euclidean", sqrt.dist = FALSE,
   
 }
 \value{
-  The function returns an object of class \code{capscale} which is
-  identical to the result of \code{\link{rda}}. At the moment,
-  \code{capscale} does not have specific methods, but it uses
-  \code{\link{cca}} and \code{\link{rda}} methods
-  \code{\link{plot.cca}},
-  \code{\link{scores.rda}}  etc. Moreover, you
-  can use \code{\link{anova.cca}} for permutation tests of
-  ``significance'' of the results.
+
+  The functions return an object of class \code{capscale} or
+  \code{dbrda} which inherits from \code{\link{rda}}. See
+  \code{\link{cca.object}} for description of the result object.
+
 }
 \references{
   Anderson, M.J. & Willis, T.J. (2003). Canonical analysis of principal
@@ -165,31 +162,24 @@ capscale(formula, data, distance = "euclidean", sqrt.dist = FALSE,
   experiments. \emph{Ecological Monographs} 69, 1--24.
 
   Legendre, P. & Legendre, L. (2012).  \emph{Numerical Ecology}. 3rd English
-  Edition. Elsevier
+  Edition. Elsevier.
+
+  McArdle, B.H. & Anderson, M.J. (2001). Fitting multivariate models
+  to community data: a comment on distance-based redundancy
+  analysis. \emph{Ecology} 82, 290--297.
 }
+
 \author{ Jari Oksanen }
 
-\note{ The function produces negative eigenvalues with non-Euclidean
-  dissimilarity indices. The non-Euclidean component of inertia is
-  given under the title \code{Imaginary} in the printed output. The
-  \code{Total} inertia is the sum of all eigenvalues, but the sum of
-  all non-negative eigenvalues is given as \code{Real Total} (which is
-  higher than the \code{Total}). The ordination is based only on the
-  real dimensions with positive eigenvalues, and therefore the
-  proportions of inertia components only apply to the \code{Real
-  Total} and ignore the \code{Imaginary} component. Permutation tests
-  with \code{\link{anova.cca}} use only the real solution of positive
-  eigenvalues. Function \code{\link{adonis}} gives similar
-  significance tests, but it also handles the imaginary dimensions
-  (negative eigenvalues) and therefore its results may differ from
-  permutation test results of \code{capscale}.
-
-  If the negative eigenvalues are disturbing, you can
-  use argument \code{add = TRUE} passed to \code{\link{cmdscale}}, or,
-  preferably, a distance measure that does not cause these warnings.
-  Alternatively, after square root transformation of distances
-  (argument \code{sqrt.dist = TRUE}) many indices do not produce
-  negative eigenvalues.
+\note{ The function \code{capscale} was originally developed as a
+  variant of constrained analysis of proximities (Anderson & Willis
+  2003), but these developments made it became identical to dbRDA.  In
+  older version of \pkg{vegan} the total inertia and permutation tests
+  were based only on real axes, but in \pkg{vegan} 2.4-0 they also
+  include the imaginary components following McArdle & Anderson
+  (2001). For compatibility with the old versions of \pkg{vegan}, you
+  can use function \code{oldCapscale} to discard the effects of
+  imaginary dimensions (negative eigenvalues).
 
   The inertia is named after the dissimilarity index as defined in the
   dissimilarity data, or as \code{unknown distance} if such an
@@ -207,18 +197,18 @@ capscale(formula, data, distance = "euclidean", sqrt.dist = FALSE,
   square root transformed (argument \code{sqrt.dist = TRUE}). If an
   additive constant was used, keyword \code{euclidified} is added to the
   the name of inertia, and the value of the constant is printed
- (argument \code{add = TRUE}).
+  (argument \code{add = TRUE}).
 }
 
 
 \seealso{\code{\link{rda}}, \code{\link{cca}}, \code{\link{plot.cca}},
   \code{\link{anova.cca}}, \code{\link{vegdist}},
-  \code{\link{dist}}, \code{\link{cmdscale}}.
+  \code{\link{dist}}, \code{\link{cmdscale}}, \code{\link{wcmdscale}}.
 
   The function returns similar result object as \code{\link{rda}} (see
   \code{\link{cca.object}}). This section for \code{\link{rda}} gives a
   more complete list of functions that can be used to access and
-  analyse \code{capscale} results.
+  analyse dbRDA results.
 
 }
 \examples{
@@ -238,6 +228,12 @@ capscale(varespec ~ N + P + K + Condition(Al), varechem,
                      dist = "bray", sqrt.dist= TRUE)
 ## Principal coordinates analysis with extended dissimilarities
 capscale(varespec ~ 1, dist="bray", metaMDS = TRUE)
+## dbrda
+dbrda(varespec ~ N + P + K + Condition(Al), varechem,
+                     dist="bray")
+## avoid negative eigenvalues also with Jaccard distances
+dbrda(varespec ~ N + P + K + Condition(Al), varechem,
+                     dist="jaccard")
 }
 \keyword{ multivariate }
 
diff --git a/man/cca.object.Rd b/man/cca.object.Rd
index a83241a..87520d2 100644
--- a/man/cca.object.Rd
+++ b/man/cca.object.Rd
@@ -95,6 +95,9 @@
      \code{CCA} components. Only in \code{CCA}.}
     \item{\code{tot.chi}}{Total inertia or the sum of all eigenvalues of the
       component.}
+    \item{\code{real.tot.chi}}{If there are negative eigenvalues in
+      \code{\link{capscale}}, these will be included in \code{tot.chi},
+      and the sum of positive eigenvalues will be given in these items.}
     \item{\code{imaginary.chi}, \code{imaginary.rank},
      \code{imaginary.u.eig}}{The sum, rank (number) of negative
      eigenvalues and scaled site scores for imaginary axes in
@@ -128,9 +131,10 @@
       after both \code{pCCA} and \code{CCA}. In \code{\link{cca}} the
       standardization is Chi-square, and in \code{\link{rda}} centring
       and optional scaling by species standard deviations using function
-      \code{\link{scale}}.} }
+      \code{\link{scale}}.}
   }
 }
+}
 
 \section{NA Action and Subset}{
   If the constraints had missing values or subsets, and \code{\link{na.action}}
@@ -163,17 +167,70 @@
   }
 }
 
-\section{capscale}{
-  Function \code{capscale} may add some items depending on its arguments:
+\section{capscale and dbrda}{
+
+  \pkg{Vegan} has two functions for distance-based Redundancy
+  analysis: \code{\link{capscale}} and \code{\link{dbrda}}.  Function
+  \code{\link{capscale}} uses \code{\link{rda}} and returns its result
+  object, but it may add some items depending on its arguments:
+  
   \describe{
+    \item{\code{real.tot.chi}}{Sum of positive eigenvalues if there are
+       negative eigenvalues. The item \code{tot.chi} gives the total
+       inertia with negative eigenvalues. This item is given for the
+       whole model and for each component \code{pCCA}, \code{CCA} and
+       \code{CA} if there are negative eigenvalues.}
     \item{\code{metaMDSdist}}{The data set name if 
-       \code{metaMDSdist = TRUE}.} 
+       \code{metaMDSdist = TRUE}.}
+    \item{\code{sqrt.dist}}{Logical value, \code{TRUE} if squareroots of
+       dissimilarities were used.}
     \item{\code{ac}}{Additive constant used if \code{add = TRUE}.}
+    \item{\code{add}}{The adjustment method to find \code{ac}, either
+       \code{"lingoes"} or \code{"cailliez"} (Legendre & Legendre
+       2012).}
     \item{\code{adjust}}{Adjustment of dissimilarities: see
       \code{\link{capscale}}, section \dQuote{Notes}.}
+    \item{\code{G}}{The working structure of Gower transformed
+      dissimilarities defined as \eqn{-(D^2 - M)/2}, where \eqn{D} are
+      the dissimilarities and \eqn{M} is the centring matrix. This
+      structure is used to asses the total inertia, and it will be used
+      also in permutation tests. This is given for items \code{pCCA} and
+      \code{CCA}, and for \code{CCA} it is the residual \eqn{G} after
+      \code{pCCA}.}
+  }
+
+  Function \code{\link{dbrda}} does not use \code{\link{rda}} but
+  provides a parallel implementation for dissimilarities. Its result
+  output is very similar to \code{\link{capscale}} described above
+  with the following differences:
+
+  \describe{
+  
+    \item{\code{Xbar}, \code{v}}{are \code{NA} because they cannot be
+      calculated from dissimilarities.}
+
+    \item{\code{Fit}}{ of \code{pCCA} is from Gower double centred
+      dissimilarities \code{G} instead of \code{Xbar} (that does not
+      exist).}
+      
+    \item{\code{G}}{ is returned with \code{pCCA}, \code{CCA} and
+      \code{CA} components. It always gives the transformed
+      dissimilarities as they enter the stage of analysis, i.e.,
+      before applying conditions or constraints.}
+
+    \item{\code{eig}}{lists also negative eigenvalues for \code{CCA}
+      and \code{pCCA}.}
+
+   \item{\code{u}}{or row scores only give real axes with positive
+      eigenvalues. The imaginary scores (if any) are in
+      \code{imaginary.u}. The number of columns of real scores
+      (positive eigenvalues) is given in item \code{poseig}. There is
+      no \code{imaginary.u.eig}.}
+      
   }
 }
 
+
 \note{
   In old versions of \pkg{vegan} the object also included scores
   scaled by eigenvalues (\code{u.eig}, \code{v.eig} and \code{wa.eig}),
diff --git a/man/commsim.Rd b/man/commsim.Rd
index 02cc443..a623078 100644
--- a/man/commsim.Rd
+++ b/man/commsim.Rd
@@ -4,7 +4,7 @@
 \alias{make.commsim}
 \alias{print.commsim}
 \title{
-Create a Object for Null Model Algorithms
+Create an Object for Null Model Algorithms
 }
 \description{
 The \code{commsim} function can be used to feed Null Model algorithms into
@@ -32,7 +32,7 @@ see Details.
 Logical, if the algorithm applies to presence-absence or count matrices.
 }
   \item{isSeq}{
-Logical, if the algorithm is sequential (needs burnin) or not.
+Logical, if the algorithm is sequential (needs burnin and thinning) or not.
 }
   \item{mode}{
 Character, storage mode of the community matrix, either 
@@ -63,7 +63,8 @@ and must take some of the following arguments:
   \item{\code{...}: }{additional arguments.}
 }
 
-  Several null model algorithm are pre-defined and can be called by
+  You can define your own null model, but
+  several null model algorithm are pre-defined and can be called by
   their name. The predefined algorithms are described in detail in the
   following chapters. The binary null models produce matrices of zeros
   (absences) and ones (presences) also when input matrix is
@@ -73,7 +74,7 @@ and must take some of the following arguments:
   natural subunit for shuffling. All quantitative models can handle
   counts, but only some are able to handle real values. Some of the null
   models are sequential so that the next matrix is derived from the
-  current one. This makes models dependent on each other, and usually
+  current one. This makes models dependent from previous models, and usually
   you must thin these matrices and study the sequences for stability:
   see \code{oecosimu} for details and instructions.
 
@@ -85,37 +86,38 @@ and must take some of the following arguments:
 
 \section{Binary null models}{
 
-  All binary null models retain fill: number of absences or conversely
-  the number of absences. The classic models may also column (species)
-  frequencies (\code{c0}) or row frequencies or species richness of each
-  site (\code{r0}) and take into account commonness and rarity of
-  species (\code{r1}, \code{r2}).  Algorithms \code{swap}, \code{tswap},
-  \code{quasiswap} and \code{backtracking} preserve both row and column
-  frequencies. Two first of these are sequential but the two latter are
-  non-sequential and produce independent matrices. Basic algorithms are
-  reviewed by Wright et al. (1998).
+  All binary null models preserve fill: number of presences or
+  conversely the number of absences. The classic models may also
+  preserve column (species) frequencies (\code{c0}) or row frequencies
+  or species richness of each site (\code{r0}) and take into account
+  commonness and rarity of species (\code{r1}, \code{r2}).  Algorithms
+  \code{swap}, \code{tswap}, \code{curveball}, \code{quasiswap} and
+  \code{backtracking} preserve both row and column frequencies. Three
+  first ones are sequential but the two latter are non-sequential
+  and produce independent matrices. Basic algorithms are reviewed by
+  Wright et al. (1998).
 
 \itemize{
   \item{\code{"r00"}: }{non-sequential algorithm for binary matrices
-    that only  maintains the number of presences (fill).}
+    that only  preserves the number of presences (fill).}
 
   \item{\code{"r0", "r0_old"}: }{non-sequential algorithm for binary
-    matrices that maintains the site (row) frequencies.
+    matrices that preserves the site (row) frequencies.
     Methods \code{"r0"} and \code{"r0_old"} implement the
     same method, but use different random number sequences; use
     \code{"r0_old"} if you want to reproduce results in \pkg{vegan
     2.0-0} or older using \code{commsimulator} (now deprecated).}
 
   \item{\code{"r1"}: }{non-sequential algorithm for binary matrices
-    that maintains the site (row) frequencies, but uses column marginal
+    that preserves the site (row) frequencies, but uses column marginal
     frequencies as probabilities of selecting species.}
 
   \item{\code{"r2"}: }{non-sequential algorithm for binary matrices
-    that maintains the site (row) frequencies, and uses squared column
-    sums as as probabilities of selecting species.}
+    that preserves the site (row) frequencies, and uses squared column
+    marginal frequencies as as probabilities of selecting species.}
   
   \item{\code{"c0"}: }{non-sequential algorithm for binary matrices
-    that maintains species frequencies (Jonsson 2001). }
+    that preserves species frequencies (Jonsson 2001). }
   
   \item{\code{"swap"}: }{sequential algorithm for binary matrices that
     changes the matrix structure, but does not influence marginal sums
@@ -125,20 +127,32 @@ and must take some of the following arguments:
   \item{\code{"tswap"}: }{sequential algorithm for binary matrices.
     Same as the \code{"swap"} algorithm, but it tries a fixed
     number of times and performs zero to many swaps at one step
-    (according the thin argument in later call). This
+    (according to the thin argument in the call). This
     approach was suggested by \enc{Miklós}{Miklos} & Podani (2004)
     because they found that ordinary swap may lead to biased
-    sequences, since some columns or rows may be more easily swapped.}
+    sequences, since some columns or rows are more easily swapped.}
+
+  \item{\code{"curveball"}: }{sequential method for binary matrices that
+    implements the \sQuote{Curveball} algorithm of Strona et
+    al. (2014). The algorithm selects two random rows and finds the set
+    of unique species that occur only in one of these rows. The
+    algorithm distributes the set of unique species to rows preserving
+    the original row frequencies.  Zero to several species are swapped
+    in one step, and usually the matrix is perturbed more strongly than
+    in other sequential methods.}
 
   \item{\code{"quasiswap"}: }{non-sequential algorithm for binary
     matrices that implements a method where matrix is first filled
     honouring row and column totals, but with integers that may be
-    larger than one.  Then the method inspects random \eqn{2 \times
-    2}{2 by 2} matrices and performs a quasiswap on them. Quasiswap is
-    similar to ordinary swap, but it can reduce numbers above one
-    to ones maintaining marginal totals (\enc{Miklós}{Miklos} & Podani
-    2004).  This is the recommended algorithm if you want to retain both
-    species and row frequencies.}
+    larger than one.  Then the method inspects random
+    \eqn{2 \times 2}{2 by 2} matrices and performs a quasiswap on
+    them. In addition to ordinary swaps, quasiswap can reduce numbers
+    above one to ones preserving marginal totals (\enc{Miklós}{Miklos} &
+    Podani 2004). The method is non-sequential, but it accepts
+    \code{thin} argument: the convergence is checked at every
+    \code{thin} steps. This allows performing several ordinary swaps in
+    addition to fill changing swaps which helps in reducing or removing
+    the bias.}
 
   \item{\code{"backtracking"}: }{non-sequential algorithm for binary
     matrices that implements a filling method with constraints both
@@ -149,17 +163,18 @@ and must take some of the following arguments:
     where some of the points are removed, and then filling is started
     again, and this backtracking is done so may times that all
     incidences will be filled into matrix. The function may be very slow
-    for some matrices.}
+    for some matrices. The results may be biased and should be inspected
+    carefully before use.}
 }
 }
 
 \section{Quantitative Models for Counts with Fixed Marginal Sums}{
 
-  These models shuffle individuals of counts but keep marginal sums
+  These models shuffle individuals of counts and keep marginal sums
   fixed, but marginal frequencies are not preserved. Algorithm
   \code{r2dtable} uses standard \R function \code{\link{r2dtable}} also
   used for simulated \eqn{P}-values in \code{\link{chisq.test}}.
-  Algorithm \code{quasiswap_count} uses the same, but retains the
+  Algorithm \code{quasiswap_count} uses the same, but preserves the
   original fill. Typically this means increasing numbers of zero cells
   and the result is zero-inflated with respect to \code{r2dtable}. 
 
@@ -173,7 +188,7 @@ and must take some of the following arguments:
     matrices.  This algorithm is similar as Carsten Dormann's
     \code{\link[bipartite]{swap.web}} function in the package
     \pkg{bipartite}. First, a random matrix is generated by the
-    \code{\link{r2dtable}} function retaining row and column sums.  Then
+    \code{\link{r2dtable}} function preserving row and column sums.  Then
     the original matrix fill is reconstructed by sequential steps to
     increase or decrease matrix fill in the random matrix. These steps
     are based on swapping \eqn{2 \times 2}{2 x 2} submatrices (see
@@ -186,13 +201,15 @@ and must take some of the following arguments:
 
   Quantitative swap models are similar to binary \code{swap}, but they
   swap the largest permissible value. The models in this section all
-  maintain the fill and perform a quantitative swap only if this can be
-  done without changing the fill. Single step of swap often changes the
-  matrix very little. In particular, if cell counts are variable, high
-  values change very slowly. Checking the chain stability and
-  independence is even more crucial than in binary swap, and very strong
-  \code{thin}ning is often needed. These models should never be used
-  without inspecting their properties for the current data.
+  maintain the fill and perform a quantitative swap only if this can
+  be done without changing the fill. Single step of swap often changes
+  the matrix very little. In particular, if cell counts are variable,
+  high values change very slowly. Checking the chain stability and
+  independence is even more crucial than in binary swap, and very
+  strong \code{thin}ning is often needed. These models should never be
+  used without inspecting their properties for the current data. These
+  null models can also be defined using \code{\link{permatswap}}
+  function.
 
  \itemize{ 
 
@@ -214,13 +231,12 @@ and must take some of the following arguments:
     row/column frequencies (Hardy 2008; randomization scheme 2x).}
 
   \item{\code{"abuswap_c"}: }{sequential algorithm for count or
-    nonnegative real valued matrices with fixed column frequencies (see
-    also \code{\link{permatswap}}).  The algorithm is similar as the
-    previous one, but operates on columns.  2 x 2 submatrices. Each step
-    changes the the corresponding row sums, but honours matrix fill,
-    column sums, and row/column frequencies (Hardy 2008; randomization
-    scheme 3x).}  }
-}
+    nonnegative real valued matrices with fixed column frequencies
+    (see also \code{\link{permatswap}}).  The algorithm is similar as
+    the previous one, but operates on columns.  Each step changes the
+    the corresponding row sums, but honours matrix fill, column sums,
+    and row/column frequencies (Hardy 2008; randomization scheme 3x).}
+    } }
 
 \section{Quantitative Swap and Shuffle Models}{
 
@@ -234,7 +250,8 @@ and must take some of the following arguments:
   be used with integer data. The shuffling is either free over the
   whole matrix, or within rows (\code{r} methods) or within columns
   (\code{c} methods). Shuffling within a row preserves row sums, and
-  shuffling within a column preserves column sums.
+  shuffling within a column preserves column sums. These models can
+  also be defined with \code{\link{permatswap}}.
 
 \itemize{ 
 
@@ -264,57 +281,57 @@ and must take some of the following arguments:
 \section{Quantitative Shuffle Methods}{
 
   Quantitative shuffle methods are generalizations of binary models
-  \code{r00}, \code{r0} and \code{c0}.  The \code{_ind} methods shuffle
-  individuals so that the grand sum, row sum or column sums are similar
-  as in the observed matrix. These methods are similar as
-  \code{r2dtable} but with still slacker constraints on marginal
-  sums. The \code{_samp} and \code{_both} methods first perform the
-  correspongind binary model with similar restriction on marginal
-  frequencies, and then distribute quantitative values over non-zero
-  cells. The \code{_samp} models shuffle original cell values and can
-  therefore handle also non-count real values. The \code{_both} models
-  shuffle individuals among non-zero values. The shuffling is over the
-  whole matrix in \code{r00_}, and within row in \code{r0_} and within
-  column in \code{c0_} in all cases.
+  \code{r00}, \code{r0} and \code{c0}.  The \code{_ind} methods
+  shuffle individuals so that the grand sum, row sum or column sums
+  are preserved.  These methods are similar as \code{r2dtable} but
+  with still slacker constraints on marginal sums. The \code{_samp}
+  and \code{_both} methods first apply the correspongind binary model
+  with similar restriction on marginal frequencies and then distribute
+  quantitative values over non-zero cells. The \code{_samp} models
+  shuffle original cell values and can therefore handle also non-count
+  real values. The \code{_both} models shuffle individuals among
+  non-zero values. The shuffling is over the whole matrix in
+  \code{r00_}, and within row in \code{r0_} and within column in
+  \code{c0_} in all cases.
 
 \itemize{
   \item{\code{"r00_ind"}: }{non-sequential algorithm for count matrices. 
-    This algorithm keeps total sum constant,
+    This algorithm preserves grand sum and
     individuals are shuffled among cells of the matrix.}
 
   \item{\code{"r0_ind"}: }{non-sequential algorithm for count matrices. 
-    This algorithm keeps row sums constant,
+    This algorithm preserves row sums and
     individuals are shuffled among cells of each row of the matrix.}
 
   \item{\code{"c0_ind"}: }{non-sequential algorithm for count matrices. 
-    This algorithm keeps column sums constant,
+    This algorithm preserves column sums and
     individuals are shuffled among cells of each column of the matrix.}
 
   \item{\code{"r00_samp"}: }{non-sequential algorithm for count 
     or nonnegative real valued (\code{mode = "double"}) matrices. 
-    This algorithm keeps total sum constant,
+    This algorithm preserves grand sum and
     cells of the matrix are shuffled.}
 
   \item{\code{"r0_samp"}: }{non-sequential algorithm for count 
     or nonnegative real valued (\code{mode = "double"}) matrices. 
-    This algorithm keeps row sums constant,
+    This algorithm preserves row sums and
     cells within each row are shuffled.}
 
   \item{\code{"c0_samp"}: }{non-sequential algorithm for count 
     or nonnegative real valued (\code{mode = "double"}) matrices. 
-    This algorithm keeps column sums constant,
+    This algorithm preseerves column sums constant and
     cells within each column are shuffled.}
 
   \item{\code{"r00_both"}: }{non-sequential algorithm for count matrices. 
-    This algorithm keeps total sum constant,
+    This algorithm preserves grand sum and
     cells and individuals among cells of the matrix are shuffled.}
 
   \item{\code{"r0_both"}: }{non-sequential algorithm for count matrices. 
-    This algorithm keeps total sum constant,
+    This algorithm preserves grand sum and
     cells and individuals among cells of each row are shuffled.}
 
   \item{\code{"c0_both"}: }{non-sequential algorithm for count matrices. 
-    This algorithm keeps total sum constant,
+    This algorithm preserves grand sum and
     cells and individuals among cells of each column are shuffled.}
 }
 }
@@ -356,6 +373,12 @@ null model algorithms as a character vector.
   generating r x c tables with given row and column totals.
   \emph{Applied Statistics} 30, 91--97.
 
+  Strona, G., Nappo, D., Boccacci, F., Fattorini, S. &
+  San-Miguel-Ayanz, J. (2014). A fast and unbiased procedure to
+  randomize ecological binary matrices with fixed row and column
+  totals. \emph{Nature Communications} 5:4114
+  \doi{10.1038/ncomms5114}.
+
   Wright, D.H., Patterson, B.D., Mikkelson, G.M., Cutler, A. & Atmar,
   W. (1998). A comparative analysis of nested subset patterns of species
   composition. \emph{Oecologia} 113, 1--20.
diff --git a/man/decorana.Rd b/man/decorana.Rd
index 6890640..686d2e4 100644
--- a/man/decorana.Rd
+++ b/man/decorana.Rd
@@ -166,14 +166,14 @@ downweight(veg, fraction = 5)
 \seealso{
   For unconstrained ordination, non-metric multidimensional scaling in
   \code{\link{monoMDS}} may be more robust (see also
-  \code{\link{metaMDS}}).  Constrained (or
-  \sQuote{canonical}) correspondence analysis can be made with
-  \code{\link{cca}}.  Orthogonal correspondence analysis can be
-  made with \code{\link[MASS]{corresp}}, or with \code{decorana} or
+  \code{\link{metaMDS}}).  Constrained (or \sQuote{canonical})
+  correspondence analysis can be made with \code{\link{cca}}.
+  Orthogonal correspondence analysis can be made with
+  \code{\link[MASS]{corresp}}, or with \code{decorana} or
   \code{\link{cca}}, but the scaling of results vary (and the one in
-  \code{decorana} corresponds to \code{scaling = -1} in
-  \code{\link{cca}}.).
-  See \code{\link{predict.decorana}} for adding new points to an
+  \code{decorana} corresponds to \code{scaling = "sites"} and
+  \code{hill = TRUE} in \code{\link{cca}}.).  See
+  \code{\link{predict.decorana}} for adding new points to an
   ordination.
 }
 
diff --git a/man/designdist.Rd b/man/designdist.Rd
index 2f5734c..eb36106 100644
--- a/man/designdist.Rd
+++ b/man/designdist.Rd
@@ -15,7 +15,7 @@
 \usage{
 designdist(x, method = "(A+B-2*J)/(A+B)",
            terms = c("binary", "quadratic", "minimum"), 
-           abcd = FALSE, name)
+           abcd = FALSE, alphagamma = FALSE, name)
 }
 
 \arguments{
@@ -34,9 +34,15 @@ designdist(x, method = "(A+B-2*J)/(A+B)",
     data into binary form (shared number of species, and number of
     species for each row). }
   \item{abcd}{Use 2x2 contingency table notation for binary data:
-  \eqn{a} is the number of shared species, \eqn{b} and \eqn{c} are the
-  numbers of species occurring only one of the sites but not in both,
-  and \eqn{d} is the number of species that occur on neither of the sites.}
+    \eqn{a} is the number of shared species, \eqn{b} and \eqn{c} are the
+    numbers of species occurring only one of the sites but not in both,
+    and \eqn{d} is the number of species that occur on neither of the sites.}
+  \item{alphagamma}{Use beta diversity notation with terms
+    \code{alpha} for average alpha diversity for compared sites,
+    \code{gamma} for diversity in pooled sites, and \code{delta} for the
+    absolute value of difference of average \code{alpha} and alpha
+    diversities of compared sites. Terms \code{A} and
+    \code{B} refer to alpha diversities of compared sites.}
   \item{name}{The name you want to use for your index. The default is to
     combine the \code{method} equation and \code{terms} argument.}
 }
@@ -74,7 +80,19 @@ designdist(x, method = "(A+B-2*J)/(A+B)",
   contingency table notation, you can set \code{abcd = TRUE}. In this
   notation \code{a = J}, \code{b = A-J}, \code{c = B-J}, \code{d = P-A-B+J}. 
   This notation is often used instead of the more more
-  tangible default notation for reasons that are opaque to me. 
+  tangible default notation for reasons that are opaque to me.
+
+  With \code{alphagamma = TRUE} it is possible to use beta diversity
+  notation with terms \code{alpha} for average alpha diversity and
+  \code{gamma} for gamma diversity in two compared sites. The terms
+  are calculated as \code{alpha = (A+B)/2}, \code{gamma = A+B-J} and
+  \code{delta = abs(A-B)/2}.  Terms \code{A} and \code{B} are also
+  available and give the alpha diversities of the individual compared
+  sites.  The beta diversity terms may make sense only for binary
+  terms (so that diversities are expressed in numbers of species), but
+  they are calculated for quadratic and minimum terms as well (with a
+  warning).
+
 }
 
 \value{
@@ -96,14 +114,20 @@ designdist(x, method = "(A+B-2*J)/(A+B)",
   function  using compiled code, it is better to use the canned
   alternative.
 }
-\seealso{ \code{\link{vegdist}}, \code{\link{betadiver}}, \code{\link{dist}}. }
+\seealso{ \code{\link{vegdist}}, \code{\link{betadiver}}, \code{\link{dist}},
+  \code{\link{raupcrick}}.}
 \examples{
+data(BCI)
+## Four ways of calculating the same Sørensen dissimilarity
+d0 <- vegdist(BCI, "bray", binary = TRUE)
+d1 <- designdist(BCI, "(A+B-2*J)/(A+B)")
+d2 <- designdist(BCI, "(b+c)/(2*a+b+c)", abcd = TRUE)
+d3 <- designdist(BCI, "gamma/alpha - 1", alphagamma = TRUE)
 ## Arrhenius dissimilarity: the value of z in the species-area model
 ## S = c*A^z when combining two sites of equal areas, where S is the
 ## number of species, A is the area, and c and z are model parameters.
 ## The A below is not the area (which cancels out), but number of
 ## species in one of the sites, as defined in designdist().
-data(BCI)
 dis <- designdist(BCI, "(log(A+B-J)-log(A+B)+log(2))/log(2)")
 ## This can be used in clustering or ordination...
 ordiplot(cmdscale(dis))
diff --git a/man/envfit.Rd b/man/envfit.Rd
index aca9642..62a7255 100644
--- a/man/envfit.Rd
+++ b/man/envfit.Rd
@@ -148,7 +148,7 @@ factorfit(X, P, permutations = 0, strata = NULL, w, ...)
   arrows and class centroids in \code{\link{cca}}.
   For complete
   similarity between fitted vectors and biplot arrows, you should set
-  \code{display = "lc"} (and possibly \code{scaling = 2}).
+  \code{display = "lc"}.
 
   The lengths of arrows for fitted vectors are automatically adjusted
   for the physical size of the plot, and the arrow lengths cannot be
diff --git a/man/isomap.Rd b/man/isomap.Rd
index 5f1f72b..a38ba87 100644
--- a/man/isomap.Rd
+++ b/man/isomap.Rd
@@ -34,14 +34,14 @@ isomapdist(dist, epsilon, k, path = "shortest", fragmentedOK =FALSE, ...)
   \item{x, object}{An \code{isomap} result object.}
   \item{axes}{Number of axes displayed.}
   \item{net}{Draw the net of retained dissimilarities.}
-  \item{n.col}{Colour of drawn net segments.}
-
+  \item{n.col}{Colour of drawn net segments. This can also be a vector
+    that is recycled for points, and the colour of the net segment is
+    a mixture of joined points.}
   \item{type}{Plot observations either as \code{"points"},
     \code{"text"} or use \code{"none"} to plot no observations. The
     \code{"text"} will use \code{\link{ordilabel}} if \code{net = TRUE} 
     and \code{\link{ordiplot}} if \code{net = FALSE}, and pass
     extra arguments to these functions.}
-
   \item{\dots}{Other parameters passed to functions. }
 }
 \details{
@@ -127,6 +127,12 @@ lines(tr, pl, col="red")
 pl <- plot(isomap(dis, epsilon=0.45), main="isomap epsilon=0.45")
 lines(tr, pl, col="red")
 par(op)
+## colour points and web by the dominant species
+dom <- apply(BCI, 1, which.max)
+## need nine colours, but default palette  has only eight
+op <- palette(c(palette("default"), "sienna"))
+plot(ord, pch = 16, col = dom, n.col = dom) 
+palette(op)
 }
 \keyword{ multivariate}
 
diff --git a/man/linestack.Rd b/man/linestack.Rd
index a6e992f..fdfeaf1 100644
--- a/man/linestack.Rd
+++ b/man/linestack.Rd
@@ -56,7 +56,7 @@ labs <- expression(Ca^{2+phantom()},
                    NO[3]^{-phantom()},
                    SO[4]^{-2},
                    K^{+phantom()})
-scl <- 1
+scl <- "sites"
 linestack(scores(ord, choices = 1, display = "species", scaling = scl),
           labels = labs, air = 2)
 linestack(scores(ord, choices = 1, display = "site", scaling = scl),
diff --git a/man/metaMDS.Rd b/man/metaMDS.Rd
index 98e66d1..b90b5fb 100644
--- a/man/metaMDS.Rd
+++ b/man/metaMDS.Rd
@@ -25,7 +25,7 @@
   \code{\link[MASS]{isoMDS}} (\pkg{MASS} package). }
 
 \usage{
-metaMDS(comm, distance = "bray", k = 2, trymax = 20, 
+metaMDS(comm, distance = "bray", k = 2, try = 20, trymax = 20, 
     engine = c("monoMDS", "isoMDS"), autotransform =TRUE,
     noshare = (engine == "isoMDS"), wascores = TRUE, expand = TRUE, 
     trace = 1, plot = FALSE, previous.best,  ...)
@@ -40,7 +40,7 @@ metaMDS(comm, distance = "bray", k = 2, trymax = 20,
 metaMDSdist(comm, distance = "bray", autotransform = TRUE, 
     noshare = TRUE, trace = 1, commname, zerodist = "ignore", 
     distfun = vegdist, ...)
-metaMDSiter(dist, k = 2, trymax = 20, trace = 1, plot = FALSE, 
+metaMDSiter(dist, k = 2, try = 20, trymax = 20, trace = 1, plot = FALSE, 
     previous.best, engine = "monoMDS", maxit = 200,
     parallel = getOption("mc.cores"), ...)   
 initMDS(x, k=2)
@@ -58,9 +58,10 @@ metaMDSredist(object, ...)
   \item{k}{Number of dimensions.  NB., the number of points \eqn{n}
     should be \eqn{n > 2k + 1}{n > 2*k + 1}, and preferably higher in
     non-metric MDS.}
-  \item{trymax}{Maximum number of random starts in search of stable
-    solution.}
-
+  \item{try, trymax}{Minimum and maximum numbers of random starts in
+    search of stable solution. After \code{try} has been reached, the
+    iteration will stop when two convergent solutions were found or
+    \code{trymax} was reached.}
   \item{engine}{The function used for MDS. The default is to use the
     \code{\link{monoMDS}} function in \pkg{vegan}, but for backward
     compatibility it is also possible to use \code{\link{isoMDS}} of
@@ -211,21 +212,19 @@ metaMDSredist(object, ...)
     solution but often close to a local optimum), or use the
     \code{previous.best} solution if supplied, and take its solution
     as the standard (\code{Run 0}). Then \code{metaMDS} starts NMDS
-    from several random starts (maximum number is given by
-    \code{trymax}). Function \code{\link{monoMDS}} defaults random
-    starts, but \code{\link{isoMDS}} defaults to
-    \code{\link{cmdscale}}, and there random starts are generated by
-    \code{initMDS}. If a solution is better (has a lower stress) than
-    the previous standard, it is taken as the new standard. If the
-    solution is better or close to a standard, \code{metaMDS} compares
-    two solutions using Procrustes analysis (function
-    \code{\link{procrustes}} with option \code{symmetric = TRUE}). If
-    the solutions are very similar in their Procrustes \code{rmse} and
-    the largest residual is very small, the solutions are regarded as
-    convergent and the better one is taken as the new standard. Please
-    note that the conditions are stringent, and you may have found
-    good and relatively stable solutions although the function is not
-    yet satisfied. Setting \code{trace = TRUE} will monitor the final
+    from several random starts (minimum number is given by \code{try}
+    and maximum number by \code{trymax}). These random starts are
+    generated by \code{initMDS}. If a solution is better (has a lower
+    stress) than the previous standard, it is taken as the new
+    standard. If the solution is better or close to a standard,
+    \code{metaMDS} compares two solutions using Procrustes analysis
+    (function \code{\link{procrustes}} with option
+    \code{symmetric = TRUE}). If the solutions are very similar in their
+    Procrustes \code{rmse} and the largest residual is very small, the
+    solutions are regarded as convergent and the better one is taken as
+    the new standard.  The conditions are stringent, and you may have
+    found good and relatively stable solutions although the function is
+    not yet satisfied. Setting \code{trace = TRUE} will monitor the final
     stresses, and \code{plot = TRUE} will display Procrustes overlay
     plots from each comparison. This step is performed using
     \code{metaMDSiter}. This is the only step performed if input data
@@ -265,6 +264,38 @@ metaMDSredist(object, ...)
 } 
 }
 
+\section{Convergence Problems}{
+
+    The function tries hard to find two convergent solutions, but it
+    may fail. With default \code{engine = "monoMDS"} the function will
+    tabulate the stopping criteria used, so that you can see which
+    criterion should be made more stringent. The criteria can be given
+    as arguments to \code{metaMDS} and their current values are
+    described in \code{\link{monoMDS}}. In particular, if you reach
+    the maximum number of iterations, you should increase the value of
+    \code{maxit}. You may ask for a larger number of random starts
+    without losing the old ones giving the previous solution in
+    argument \code{previous.best}.
+
+    In addition to too slack convergence criteria and too low number
+    of random starts, wrong number of dimensions (argument \code{k})
+    is the most common reason for not finding convergent
+    solutions. NMDS is usually run with a low number dimensions
+    (\code{k=2} or \code{k=3}), and for complex data increasing
+    \code{k} by one may help. If you run NMDS with much higher number
+    of dimensions (say, \code{k=10} or more), you should reconsider
+    what you are doing and drastically reduce \code{k}. For very
+    heterogeneous data sets with partial disjunctions, it may help to
+    set \code{stepacross}, but for most data sets the default
+    \code{weakties = TRUE} is sufficient.
+
+    Please note that you can give all arguments of other
+    \code{metaMDS*} functions and NMDS engine (default
+    \code{\link{monoMDS}}) in your \code{metaMDS} command,and you
+    should check documentation of these functions for details.
+
+}
+
 \value{ Function \code{metaMDS} returns an object of class
   \code{metaMDS}. The final site ordination is stored in the item
   \code{points}, and species ordination in the item \code{species},
diff --git a/man/monoMDS.Rd b/man/monoMDS.Rd
index cbdac7c..bd51fe7 100644
--- a/man/monoMDS.Rd
+++ b/man/monoMDS.Rd
@@ -142,6 +142,43 @@ monoMDS(dist, y, k = 2, model = c("global", "local", "linear", "hybrid"),
 
 }
 
+\section{Convergence Criteria}{
+
+  NMDS is iterative, and the function stops when any of its
+  convergence criteria is met. There is actually no criterion of
+  assured convergence, and any solution can be a local optimum. You
+  should compare several random starts (or use \code{monoMDS} via
+  \code{\link{metaMDS}}) to assess if the solutions is likely a global
+  optimum.
+
+  The stopping criteria are:
+  \describe{
+
+     \item{\code{maxit}:}{ Maximum number of iterations. Reaching this
+     criterion means that solutions was almost certainly not found,
+     and \code{maxit} should be increased.}
+
+     \item{\code{smin}:}{ Minimum stress. If stress is nearly zero,
+     the fit is almost perfect. Usually this means that data set is
+     too small for the requested analysis, and there may be several
+     different solutions that are almost as perfect. You should reduce
+     the number of dimensions (\code{k}), get more data (more
+     observations) or use some other method, such as metric scaling
+     (\code{\link{cmdscale}}, \code{\link{wcmdscale}}).}
+
+     \item{\code{sratmax}:}{ Change in stress. Values close to one
+     mean almost unchanged stress. This may mean a solution, but it
+     can also signal stranding on suboptimal solution with flat stress
+     surface.}
+
+     \item{\code{sfgrmin}:}{ Minimum scale factor. Values close to
+     zero mean almost unchanged configuration. This may mean a
+     solution, but will also happen in local optima.}
+
+  }
+
+}
+
 \value{ Returns an object of class \code{"monoMDS"}. The final scores
   are returned in item \code{points} (function \code{scores} extracts
   these results), and the stress in item \code{stress}. In addition,
diff --git a/man/mrpp.Rd b/man/mrpp.Rd
index 11808c6..bb6a8a8 100644
--- a/man/mrpp.Rd
+++ b/man/mrpp.Rd
@@ -176,7 +176,7 @@ This difference may be one of location (differences in mean) or one of
 spread (differences in within-group distance). That is, it may find a
 significant difference between two groups simply because one of those
 groups has a greater dissimilarities among its sampling units. Most
-\code{mrpp} models can be analysed with \code{\link{adonis}} which seems
+\code{mrpp} models can be analysed with \code{\link{adonis2}} which seems
 not suffer from the same problems as \code{mrpp} and is a more robust
 alternative.
 }
@@ -185,7 +185,7 @@ alternative.
   \code{\link{mantel}} for comparing dissimilarities against continuous
   variables, and
   \code{\link{vegdist}} for obtaining dissimilarities,
-  \code{\link{adonis}} is a more robust alternative in most cases.
+  \code{\link{adonis2}} is a more robust alternative in most cases.
 }
 \examples{
 data(dune)
diff --git a/man/multipart.Rd b/man/multipart.Rd
index 5b8cd35..8e7cba1 100644
--- a/man/multipart.Rd
+++ b/man/multipart.Rd
@@ -6,73 +6,98 @@
 \title{Multiplicative Diversity Partitioning}
 
 \description{
-In multiplicative diversity partitioning, mean values of alpha diversity at lower levels of a sampling 
-hierarchy are compared to the total diversity in the entire data set or the pooled samples (gamma diversity). 
+In multiplicative diversity partitioning, mean values of alpha diversity at lower levels of a sampling
+hierarchy are compared to the total diversity in the entire data set or the pooled samples (gamma diversity).
 }
 \usage{
 multipart(...)
 \method{multipart}{default}(y, x, index=c("renyi", "tsallis"), scales = 1,
-    global = FALSE, relative = FALSE, nsimul=99, ...)
+    global = FALSE, relative = FALSE, nsimul=99, method = "r2dtable", ...)
 \method{multipart}{formula}(formula, data, index=c("renyi", "tsallis"), scales = 1,
-    global = FALSE, relative = FALSE, nsimul=99, ...)
+    global = FALSE, relative = FALSE, nsimul=99, method = "r2dtable", ...)
 }
 \arguments{
   \item{y}{A community matrix.}
+
   \item{x}{A matrix with same number of rows as in \code{y}, columns
     coding the levels of sampling hierarchy. The number of groups within
     the hierarchy must decrease from left to right. If \code{x} is missing,
     two levels are assumed: each row is a group in the first level, and
     all rows are in the same group in the second level.}
-  \item{formula}{A two sided model formula in the form \code{y ~ x}, where \code{y} 
-    is the community data matrix with samples as rows and species as column. Right 
-    hand side (\code{x}) must be grouping variables referring to levels of sampling hierarchy, 
-    terms from right to left will be treated as nested (first column is the lowest, 
-    last is the highest level, at least two levels specified). Interaction terms are not allowed.}
-  \item{data}{A data frame where to look for variables defined in the right hand side 
-    of \code{formula}. If missing, variables are looked in the global environment.}
+
+  \item{formula}{A two sided model formula in the form \code{y ~ x},
+    where \code{y} is the community data matrix with samples as rows and
+    species as column. Right hand side (\code{x}) must be grouping variables
+    referring to levels of sampling hierarchy, terms from right to left will
+    be treated as nested (first column is the lowest,
+    last is the highest level, at least two levels specified).
+    Interaction terms are not allowed.}
+
+  \item{data}{A data frame where to look for variables defined in the
+    right hand side of \code{formula}. If missing, variables are looked
+    in the global environment.}
+
   \item{index}{Character, the entropy index to be calculated (see Details).}
+
   \item{relative}{Logical, if \code{TRUE} then beta diversity is
     standardized by its maximum (see Details).}
-  \item{scales}{Numeric, of length 1, the order of the generalized diversity index 
-    to be used.}
-  \item{global}{Logical, indicates the calculation of beta diversity values, see Details.}
-  \item{nsimul}{Number of permutation to use if \code{matr} is not of class 'permat'.
-    If \code{nsimul = 0}, only the \code{FUN} argument is evaluated. It is thus possible
-    to reuse the statistic values without using a null model.}
-  \item{\dots}{Other arguments passed to \code{\link{oecosimu}}, i.e. 
+
+  \item{scales}{Numeric, of length 1, the order of the generalized
+    diversity index to be used.}
+
+  \item{global}{Logical, indicates the calculation of beta diversity values,
+    see Details.}
+
+  \item{nsimul}{Number of permutations to use.  If \code{nsimul = 0},
+    only the \code{FUN} argument is evaluated.
+    It is thus possible to reuse the statistic values
+    without a null model.}
+
+  \item{method}{Null model method: either a name (character string) of
+    a method defined in \code{\link{make.commsim}} or a
+    \code{\link{commsim}} function.
+    The default \code{"r2dtable"} keeps row sums and column sums fixed.
+    See \code{\link{oecosimu}} for Details and Examples.}
+
+  \item{\dots}{Other arguments passed to \code{\link{oecosimu}}, i.e.
     \code{method}, \code{thin} or \code{burnin}.}
 }
 \details{
-Multiplicative diversity partitioning is based on Whittaker's (1972) ideas, that has 
-recently been generalised to one parametric diversity families (i.e. \enc{Rényi}{Renyi} 
-and Tsallis) by Jost (2006, 2007). Jost recommends to use the numbers equivalents 
-(Hill numbers), instead of pure diversities, and proofs, that this satisfies the 
+Multiplicative diversity partitioning is based on Whittaker's (1972) ideas,
+that has recently been generalised to one parametric diversity families
+(i.e. \enc{Rényi}{Renyi} and Tsallis) by Jost (2006, 2007).
+Jost recommends to use the numbers equivalents (Hill numbers),
+instead of pure diversities, and proofs, that this satisfies the
 multiplicative partitioning requirements.
 
-The current implementation of \code{multipart} calculates Hill numbers based on the 
-functions \code{\link{renyi}} and \code{\link{tsallis}} (provided as \code{index} argument). 
-If values for more than one \code{scales} are desired, it should be done in separate 
-runs, because it adds extra dimensionality to the implementation, which has not been resolved 
-efficiently.
+The current implementation of \code{multipart} calculates Hill numbers
+based on the functions \code{\link{renyi}} and \code{\link{tsallis}}
+(provided as \code{index} argument).
+If values for more than one \code{scales} are desired,
+it should be done in separate runs, because it adds extra dimensionality
+to the implementation, which has not been resolved efficiently.
 
-Alpha diversities are then the averages of these Hill numbers for each hierarchy levels, 
-the global gamma diversity is the alpha value calculated for the highest hierarchy level. 
+Alpha diversities are then the averages of these Hill numbers for
+each hierarchy levels, the global gamma diversity is the alpha value
+calculated for the highest hierarchy level.
 When \code{global = TRUE}, beta is calculated relative to the global gamma value:
 \deqn{\beta_i = \gamma / \alpha_{i}}{beta_i = gamma / alpha_i}
-when \code{global = FALSE}, beta is calculated relative to local gamma values (local gamma
-means the diversity calculated for a particular cluster based on the pooled abundance vector):
+when \code{global = FALSE}, beta is calculated relative to local
+gamma values (local gamma means the diversity calculated for a particular
+cluster based on the pooled abundance vector):
 \deqn{\beta_ij = \alpha_{(i+1)j} / mean(\alpha_{ij})}{beta_ij = alpha_(i+1)j / mean(alpha_i)}
-where \eqn{j} is a particular cluster at hierarchy level \eqn{i}. Then beta diversity value for
-level \eqn{i} is the mean of the beta values of the clusters at that level,
-\eqn{\beta_{i} = mean(\beta_{ij})}.
+where \eqn{j} is a particular cluster at hierarchy level \eqn{i}.
+Then beta diversity value for level \eqn{i} is the mean of the beta
+values of the clusters at that level, \eqn{\beta_{i} = mean(\beta_{ij})}.
 
 If \code{relative = TRUE}, the respective beta diversity values are
 standardized by their maximum possible values (\eqn{mean(\beta_{ij}) / \beta_{max,ij}})
-given as \eqn{\beta_{max,ij} = n_{j}} (the number of lower level units in a given cluster \eqn{j}).
+given as \eqn{\beta_{max,ij} = n_{j}} (the number of lower level units
+in a given cluster \eqn{j}).
 
-The expected diversity components are calculated \code{nsimul} times by individual based 
-randomisation of the community data matrix. This is done by the \code{"r2dtable"} method
-in \code{\link{oecosimu}} by default.
+The expected diversity components are calculated \code{nsimul}
+times by individual based randomization of the community data matrix.
+This is done by the \code{"r2dtable"} method in \code{\link{oecosimu}} by default.
 }
 \value{
 An object of class 'multipart' with same structure as 'oecosimu' objects.
diff --git a/man/nullmodel.Rd b/man/nullmodel.Rd
index 964c033..e8fa444 100644
--- a/man/nullmodel.Rd
+++ b/man/nullmodel.Rd
@@ -1,10 +1,12 @@
 \name{nullmodel}
 \alias{nullmodel}
+\alias{simmat}
 \alias{print.nullmodel}
 \alias{simulate.nullmodel}
 \alias{update.nullmodel}
 \alias{str.nullmodel}
 \alias{print.simmat}
+\alias{smbind}
 \title{
 Null Model and Simulation
 }
@@ -14,15 +16,17 @@ which can serve as a basis for Null Model simulation
 via the \code{\link{simulate}} method.
 The \code{\link{update}} method updates the nullmodel
 object without sampling (effective for sequential algorithms).
+\code{smbind} binds together multiple \code{simmat} objects.
 }
 \usage{
 nullmodel(x, method)
 \method{print}{nullmodel}(x, ...)
-\method{simulate}{nullmodel}(object, nsim = 1, 
+\method{simulate}{nullmodel}(object, nsim = 1,
 seed = NULL, burnin = 0, thin = 1, ...)
-\method{update}{nullmodel}(object, nsim = 1, 
+\method{update}{nullmodel}(object, nsim = 1,
 seed = NULL, ...)
 \method{print}{simmat}(x, ...)
+smbind(object, ..., MARGIN, strict = TRUE)
 }
 \arguments{
   \item{x}{
@@ -37,6 +41,8 @@ It can be a user supplied object of class \code{commsim}.
   \item{object}{
 An object of class \code{nullmodel} returned by
 the function \code{nullmodel}.
+In case of \code{smbind} it is a \code{simmat} object
+as returned by the \code{update} or \code{simulate} methods.
 }
   \item{nsim}{
 Positive integer, the number of simulated matrices to return.
@@ -45,17 +51,17 @@ burnin steps made for sequential algorithms
 to update the status of the input model \code{object}.
 }
   \item{seed}{
-An object specifying if and how the random number 
+An object specifying if and how the random number
 generator should be initialized ("seeded").
-Either \code{NULL} or an integer that will be 
-used in a call to \code{\link{set.seed}} before 
-simulating the matrices. 
-If set, the value is saved as the 
-\code{"seed"} attribute of the returned value. 
-The default, \code{NULL} will not change the 
-random generator state, and return 
+Either \code{NULL} or an integer that will be
+used in a call to \code{\link{set.seed}} before
+simulating the matrices.
+If set, the value is saved as the
+\code{"seed"} attribute of the returned value.
+The default, \code{NULL} will not change the
+random generator state, and return
 \code{\link{.Random.seed}} as the \code{"seed"}
- attribute, see Value. 
+ attribute, see Value.
 }
   \item{burnin}{
 Nonnegative integer, specifying the number of steps
@@ -69,8 +75,24 @@ made between each returned matrix.
 Active only for sequential null model algorithms.
 Ignored for non-sequential null model algorithms.
 }
+  \item{MARGIN}{
+Integer, indicating the dimansion over which multiple
+\code{simmat} objects are to be boud together by \code{smbind}.
+1: matrices are stacked (row bound), 2: matrices are column bound,
+3: iterations are combined. Needs to be of length 1.
+The other dimensions are expected to match across the objects.
+}
+  \item{strict}{
+Logical, if consistency of the time series attributes
+(\code{"start"}, \code{"end"}, \code{"thin"}, and number of simulated matrices)
+of \code{simmat} objects are strictly enforced when
+binding multiple objects together using \code{smbind}.
+Applies only to input objects based on sequential
+null model algorithms.
+}
   \item{\dots}{
 Additional arguments supplied to algorithms.
+In case of \code{smbind} it can contain multiple \code{simmat} objects.
 }
 }
 \details{
@@ -91,7 +113,7 @@ and make further simulations, or use
 increased thinning value if desired.
 
 The \code{update} method makes burnin steps in case
-of sequential algorithms to update the status of the 
+of sequential algorithms to update the status of the
 input model without any attempt to return matrices.
 For non-sequential algorithms the method does nothing.
 
@@ -103,6 +125,21 @@ diagnostics, it is recommended to use the
 The input nullmodel object is updated, so further
 samples can be simulated if desired without having
 to start the process all over again. See Examples.
+
+The \code{smbind} function can be used to combine multiple
+\code{simmat} objects. This comes handy when null model
+simulations are stratified by sites (\code{MARGIN = 1})
+or by species (\code{MARGIN = 2}), or in the case when
+multiple objects are returned by identical/consistent settings
+e.g. during parallel computations (\code{MARGIN = 3}).
+Sanity checks are made to ensure that combining multiple
+objects is sensible, but it is the user's responsibility
+to check independence of the simulated matrices
+and the null distribution has converged
+in case of sequential null model algorithms.
+The \code{strict = FALSE} setting can relax
+checks regarding start, end, and thinning values
+for sequential null models.
 }
 \value{
 The function \code{nullmodel} returns an object of class \code{nullmodel}.
@@ -119,10 +156,10 @@ It is a set of objects sharing the same environment:
   \item{\code{fill}: }{number of nonzero cells in the matrix.}
   \item{\code{commsim}: }{the \code{commsim} object as a result
     of the \code{method} argument.}
-  \item{\code{state}: }{current state of the permutations, 
+  \item{\code{state}: }{current state of the permutations,
     a matrix similar to the original.
     It is \code{NULL} for non-sequential algorithms.}
-  \item{\code{iter}: }{current number of iterations 
+  \item{\code{iter}: }{current number of iterations
   for sequential algorithms.
     It is \code{NULL} for non-sequential algorithms.}
 }
@@ -134,16 +171,25 @@ corresponding to \code{nsim} argument).
 The \code{update} method returns the current state (last updated matrix)
 invisibly, and update the input object for sequential algorithms.
 For non sequential algorithms, it returns \code{NULL}.
+
+The \code{smbind} function returns an object of class \code{simmat}.
 }
 \author{
 Jari Oksanen and Peter Solymos
 }
 \seealso{
-\code{\link{commsim}}, \code{\link{make.commsim}}, 
+\code{\link{commsim}}, \code{\link{make.commsim}},
 \code{\link{permatfull}}, \code{\link{permatswap}}
 }
+\note{
+Care must be taken when the input matrix only contains a single
+row or column. Such input is invalid for swapping and hypergeometric
+distribution (calling \code{\link{r2dtable}}) based algorithms.
+This also applies to cases when the input is stratified into subsets.
+}
 \examples{
-x <- matrix(rbinom(12*10, 1, 0.5)*rpois(12*10, 3), 12, 10)
+data(mite)
+x <- as.matrix(mite)[1:12, 21:30]
 
 ## non-sequential nullmodel
 (nm <- nullmodel(x, "r00"))
@@ -163,6 +209,54 @@ x <- matrix(rbinom(12*10, 1, 0.5)*rpois(12*10, 3), 12, 10)
 (nm <- nullmodel(x, "swap"))
 nm <- update(nm, nsim=10)
 (sm2 <- simulate(nm, nsim=10, thin=5))
+
+## combining multiple simmat objects
+
+## stratification
+nm1 <- nullmodel(x[1:6,], "r00")
+sm1 <- simulate(nm1, nsim=10)
+nm2 <- nullmodel(x[7:12,], "r00")
+sm2 <- simulate(nm2, nsim=10)
+smbind(sm1, sm2, MARGIN=1)
+
+## binding subsequent samples from sequential algorithms
+## start, end, thin retained
+nm <- nullmodel(x, "swap")
+nm <- update(nm, nsim=10)
+sm1 <- simulate(nm, nsim=10, thin=5)
+sm2 <- simulate(nm, nsim=20, thin=5)
+sm3 <- simulate(nm, nsim=10, thin=5)
+smbind(sm3, sm2, sm1, MARGIN=3)
+
+## 'replicate' based usage which is similar to the output
+## of 'parLapply' or 'mclapply' in the 'parallel' package
+## start, end, thin are set, also noting number of chains
+smfun <- function(x, burnin, nsim, thin) {
+    nm <- nullmodel(x, "swap")
+    nm <- update(nm, nsim=burnin)
+    simulate(nm, nsim=nsim, thin=thin)
+}
+smlist <- replicate(3, smfun(x, burnin=50, nsim=10, thin=5), simplify=FALSE)
+smbind(smlist, MARGIN=3) # Number of permuted matrices = 30
+
+\dontrun{
+## parallel null model calculations
+library(parallel)
+
+if (.Platform$OS.type == "unix") {
+## forking on Unix systems
+smlist <- mclapply(1:3, function(i) smfun(x, burnin=50, nsim=10, thin=5))
+smbind(smlist, MARGIN=3)
+}
+
+## socket type cluster, works on all platforms
+cl <- makeCluster(3)
+clusterEvalQ(cl, library(vegan))
+clusterExport(cl, c("smfun", "x"))
+smlist <- parLapply(cl, 1:3, function(i) smfun(x, burnin=50, nsim=10, thin=5))
+stopCluster(cl)
+smbind(smlist, MARGIN=3)
+}
 }
 \keyword{ multivariate }
 \keyword{ datagen }
diff --git a/man/ordiarrows.Rd b/man/ordiarrows.Rd
index 2d0052e..8519cec 100644
--- a/man/ordiarrows.Rd
+++ b/man/ordiarrows.Rd
@@ -12,9 +12,9 @@
 
 \usage{
 ordiarrows(ord, groups, levels, replicates, order.by, display = "sites",
-         show.groups, startmark, label = FALSE, ...)
+         col = 1, show.groups, startmark, label = FALSE, length = 0.1, ...)
 ordisegments(ord, groups, levels, replicates, order.by, display = "sites",
-         show.groups, label = FALSE, ...)
+         col = 1, show.groups, label = FALSE, ...)
 ordigrid(ord, levels, replicates, display = "sites",  lty = c(1,1), 
          col = c(1,1), lwd = c(1,1), ...)
 }
@@ -48,8 +48,13 @@ ordigrid(ord, levels, replicates, display = "sites",  lty = c(1,1),
     will draw a circle.  For other plotting characters, see \code{pch}
     in \code{\link{points}}. }
 
-  \item{col}{Colour of lines in \code{ordigrid}.  This argument is
-    also passed to other functions to change the colour of lines.}
+  \item{col}{Colour of lines, \code{label} borders and
+    \code{startmark} in \code{ordiarrows} and
+    \code{ordisegments}. This can be a vector recycled for
+    \code{groups}. In \code{ordigrid} it can be a vector of length 2
+    used for \code{levels} and \code{replicates}.}
+
+  \item{length}{Length of edges of the arrow head (in inches).}
 
   \item{lty, lwd}{Line type, line width used for 
     \code{level}s and \code{replicate}s in \code{ordigrid}.}
@@ -82,8 +87,9 @@ ordigrid(ord, levels, replicates, display = "sites",  lty = c(1,1),
 example(pyrifos)
 mod <- rda(pyrifos)
 plot(mod, type = "n")
-## Annual succession by ditches
-ordiarrows(mod, ditch, label = TRUE)
+## Annual succession by ditches, colour by dose
+ordiarrows(mod, ditch, label = TRUE, col = as.numeric(dose))
+legend("topright", levels(dose), lty=1, col=1:5, title="Dose")
 ## Show only control and highest Pyrifos treatment
 plot(mod, type = "n")
 ordiarrows(mod, ditch, label = TRUE, 
diff --git a/man/ordihull.Rd b/man/ordihull.Rd
index a0ca353..164f54d 100644
--- a/man/ordihull.Rd
+++ b/man/ordihull.Rd
@@ -2,6 +2,7 @@
 \alias{ordihull}
 \alias{ordispider}
 \alias{ordiellipse}
+\alias{ordibar}
 \alias{ordicluster}
 \alias{weights.cca}
 \alias{weights.rda}
@@ -20,19 +21,24 @@
 
 \usage{
 ordihull(ord, groups, display = "sites", draw = c("lines","polygon", "none"),
-         col = NULL, alpha = 127, show.groups, label = FALSE,  ...)
-ordiellipse(ord, groups, display="sites", kind = c("sd","se"), conf,
-         draw = c("lines","polygon", "none"), w = weights(ord, display),
-         col = NULL, alpha = 127, show.groups, label = FALSE, ...)
+         col = NULL, alpha = 127, show.groups, label = FALSE,
+         border = NULL, lty = NULL, lwd = NULL, ...)
+ordiellipse(ord, groups, display="sites", kind = c("sd","se", "ehull"),
+         conf, draw = c("lines","polygon", "none"),
+	 w = weights(ord, display), col = NULL, alpha = 127, show.groups,
+	 label = FALSE, border = NULL, lty = NULL, lwd=NULL, ...)
+ordibar(ord, groups, display = "sites", kind = c("sd", "se"), conf,
+         w = weights(ord, display), col = 1, show.groups, label = FALSE,
+	 lwd = NULL, length = 0,  ...)
 ordispider(ord, groups, display="sites", w = weights(ord, display),
 	 spiders = c("centroid", "median"),  show.groups, 
-         label = FALSE, ...)
+         label = FALSE, col = NULL, lty = NULL, lwd = NULL, ...)
 ordicluster(ord, cluster, prune = 0, display = "sites",
-         w = weights(ord, display), ...)
+         w = weights(ord, display), col = 1, ...)
 \method{summary}{ordihull}(object, ...)
 \method{summary}{ordiellipse}(object, ...)
-ordiareatest(ord, groups, area = c("hull", "ellipse"), permutations = 999,
-         parallel = getOption("mc.cores"), ...)
+ordiareatest(ord, groups, area = c("hull", "ellipse"), kind = "sd",
+         permutations = 999, parallel = getOption("mc.cores"), ...)
 }
 
 \arguments{
@@ -52,7 +58,13 @@ ordiareatest(ord, groups, area = c("hull", "ellipse"), permutations = 999,
     \code{ordiellipse}.  When \code{draw = "polygon"}, the colour of
     bordering lines can be set with argument \code{border} of the
     \code{\link{polygon}} function. For other functions the effect
-    depends on the underlining functions this argument is passed to.}
+    depends on the underlining functions this argument is passed to.
+    When multiple values of \code{col} are specified these are used
+    for each element of \code{names(table(groups))} (in that order),
+    shorter vectors are recycled. Function \code{ordicluster} has
+    no \code{groups}, and there the argument will be recycled for
+    points, and the colour of connecting lines is a mixture of point
+    s in the cluster.}
 
   \item{alpha}{Transparency of the fill \code{col}our with \code{draw
     = "polygon"} in \code{ordihull} and \code{ordiellipse}.  The
@@ -75,9 +87,9 @@ ordiareatest(ord, groups, area = c("hull", "ellipse"), permutations = 999,
     and \code{\link{decorana}} results, unless undone by the
     user. \code{w=NULL} sets equal weights to all points. }
 
-  \item{kind}{Whether standard deviations of points (\code{sd}) or
-    standard deviations of their (weighted) averages (\code{se}) are
-    used.}
+  \item{kind}{Draw standard deviations of points (\code{sd}), standard
+    errors (\code{se}) or ellipsoid hulls that enclose all points in
+    the group (\code{ehull}).}
 
   \item{conf}{Confidence limit for ellipses, e.g. 0.95. If given, the
     corresponding \code{sd} or \code{se} is multiplied with the
@@ -111,30 +123,49 @@ ordiareatest(ord, groups, area = c("hull", "ellipse"), permutations = 999,
     cluster.  With \code{parallel = 1} uses ordinary, non-parallel
     processing. The parallel processing is done with \pkg{parallel}
     package.}
+    
+  \item{lty, lwd, border}{Vectors of these parameters can be supplied
+    and will be applied (if appropriate) for each element of
+    \code{names(table(groups))} (in that order). Shorter vectors will be
+    recycled.}
+
+  \item{length}{Width (in inches) of the small (\dQuote{caps}) at the
+    ends of the bar segment (passed to \code{\link{arrows}}).}
 
   \item{\dots}{Parameters passed to graphical functions or to
     \code{\link{scores}} to select axes and scaling etc. } 
 }
 
 \details{
+
   Function \code{ordihull} draws \code{\link{lines}} or
   \code{\link{polygon}}s for the convex
   hulls found by function \code{\link{chull}} encircling
   the items in the groups. 
-  
-  Function \code{ordiellipse} draws \code{\link{lines}} or
-  \code{\link{polygon}}s for dispersion ellipses
-  using either standard deviation of point scores or standard error of
-  the (weighted) average of scores, and the (weighted) correlation
-  defines the direction of the principal axis of the ellipse. 
-  An ellipsoid hull can be drawn with function
-  \code{\link[cluster]{ellipsoidhull}} of package \pkg{cluster}.
 
-  Function \code{ordihull} and \code{ordiellipse} return invisibly an
+  Function \code{ordiellipse} draws \code{\link{lines}} or
+  \code{\link{polygon}}s for ellipses by \code{groups}. The function
+  can either draw standard deviation of point scores
+  (\code{kind="sd"}) or standard error of the (weighted) average of
+  scores (\code{kind="sd"}), and the (weighted) correlation defines
+  the direction of the principal axis of the ellipse. With
+  \code{kind="ehull"} the function draws an ellipse that encloses all
+  points of a group using \code{\link[cluster]{ellipsoidhull}}
+  (\pkg{cluster} package).
+
+  Function \code{ordibar} draws crossed \dQuote{error bars} using
+  either either standard deviation of point scores or standard error
+  of the (weighted) average of scores. These are the principal axes of
+  the correspoding \code{ordiellipse}, and are found by principal
+  component analysis of the (weighted) covariance matrix.
+
+  Functions \code{ordihull} and \code{ordiellipse} return invisibly an
   object that has a \code{summary} method that returns the coordinates
   of centroids and areas of the hulls or ellipses. Function
   \code{ordiareatest} studies the one-sided hypothesis that these
-  areas are smaller than with randomized \code{groups}.
+  areas are smaller than with randomized \code{groups}. Argument
+  \code{kind} can be used to select the kind of ellipse, and has no
+  effect with convex hulls.
 
   Function \code{ordispider} draws a \sQuote{spider} diagram where
   each point is connected to the group centroid with
@@ -165,15 +196,16 @@ ordiareatest(ord, groups, area = c("hull", "ellipse"), permutations = 999,
 \value{
 
   Functions \code{ordihull}, \code{ordiellipse} and \code{ordispider}
-  return the \code{\link{invisible}} plotting structure. 
+  return the \code{\link{invisible}} plotting structure.
 
   Function \code{ordispider} return the coordinates to which each
   point is connected (centroids or \sQuote{LC} scores).
 
-  Function \code{ordihull} returns a list of coordinates of the hulls
-  (which can be extracted with \code{scores}), and \code{ordiellipse}
-  returns a list of covariance matrices and scales used in drawing the
-  ellipses.
+  Function \code{ordihull} and \code{ordiellipse} return invisibly an
+  object that has a \code{summary} method that returns the coordinates
+  of centroids and areas of the hulls or ellipses. Function
+  \code{ordiareatest} studies the one-sided hypothesis that these
+  areas are smaller than with randomized \code{groups}.
 
 }
 
@@ -183,34 +215,42 @@ ordiareatest(ord, groups, area = c("hull", "ellipse"), permutations = 999,
   and you may wish to change the default values in
   \code{\link{lines}}, \code{\link{segments}} and
   \code{\link{polygon}}. You can pass parameters to
-  \code{\link{scores}} as well. Underlying function for
-  \code{ordihull} is \code{\link{chull}}.  }
+  \code{\link{scores}} as well. Underlying functions for
+  \code{ordihull} is \code{\link{chull}}. The underlying function for
+  ellipsoid hulls in \code{ordiellipse} is
+  \code{\link[cluster]{ellipsoidhull}}. }
 
 \examples{
 data(dune)
 data(dune.env)
 mod <- cca(dune ~ Management, dune.env)
 attach(dune.env)
-## pass non-graphical arguments without warnings
-plot(mod, type="n", scaling = 3)
+plot(mod, type="n", scaling = "symmetric")
 ## Catch the invisible result of ordihull...
-pl <- ordihull(mod, Management, scaling = 3, label = TRUE)
+pl <- ordihull(mod, Management, scaling = "symmetric", label = TRUE)
 ## ... and find centres and areas of the hulls
 summary(pl)
-## use ordispider to label and mark the hull
+## use more colours and add ellipsoid hulls
 plot(mod, type = "n")
-pl <- ordihull(mod, Management, scaling = 3)
-ordispider(pl, col="red", lty=3, label = TRUE )
+pl <- ordihull(mod, Management, scaling = "symmetric", col = 1:4,
+  draw="polygon", label =TRUE)
+ordiellipse(mod, Management, scaling = "symmetric", kind = "ehull",
+  col = 1:4, lwd=3)
 ## ordispider to connect WA and LC scores
 plot(mod, dis=c("wa","lc"), type="p")
 ordispider(mod)
 ## Other types of plots
 plot(mod, type = "p", display="sites")
-ordicluster(mod, hclust(vegdist(dune)), prune=3, col = "blue")
+cl <- hclust(vegdist(dune))
+ordicluster(mod, cl, prune=3, col = cutree(cl, 4))
+## confidence ellipse: location of the class centroids
 plot(mod, type="n", display = "sites")
-text(mod, display="sites", labels = as.character(Management))
-pl <- ordiellipse(mod, Management, kind="se", conf=0.95, lwd=2, draw = "polygon", 
-  col="skyblue", border = "blue")
+text(mod, display="sites", labels = as.character(Management),
+  col=as.numeric(Management))
+pl <- ordiellipse(mod, Management, kind="se", conf=0.95, lwd=2,
+  draw = "polygon", col=1:4, border=1:4, alpha=63)
 summary(pl)
+## add confidence bars
+ordibar(mod, Management, kind="se", conf=0.95, lwd=2, col=1:4, label=TRUE)
 }
 \keyword{aplot }
diff --git a/man/ordipointlabel.Rd b/man/ordipointlabel.Rd
index b49f209..9f5564a 100644
--- a/man/ordipointlabel.Rd
+++ b/man/ordipointlabel.Rd
@@ -87,12 +87,12 @@ ord <- cca(dune)
 plt <- ordipointlabel(ord)
 
 ## set scaling - should be no warnings!
-ordipointlabel(ord, scaling = 1)
+ordipointlabel(ord, scaling = "sites")
 
 ## plot then add
-plot(ord, scaling = 3, type = "n")
-ordipointlabel(ord, display = "species", scaling = 3, add = TRUE)
-ordipointlabel(ord, display = "sites", scaling = 3, add = TRUE)
+plot(ord, scaling = "symmetric", type = "n")
+ordipointlabel(ord, display = "species", scaling = "symm", add = TRUE)
+ordipointlabel(ord, display = "sites", scaling = "symm", add = TRUE)
 
 ## redraw plot without rerunning SANN optimisation
 plot(plt)
diff --git a/man/ordistep.Rd b/man/ordistep.Rd
index ba85e89..f89e683 100644
--- a/man/ordistep.Rd
+++ b/man/ordistep.Rd
@@ -20,7 +20,7 @@ ordistep(object, scope, direction = c("both", "backward", "forward"),
    trace = TRUE, ...)
 ordiR2step(object, scope, direction = c("both", "forward"),
    Pin = 0.05, R2scope = TRUE, permutations = how(nperm = 499),
-   trace = TRUE, ...)
+   trace = TRUE, R2permutations = 1000, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -64,6 +64,12 @@ ordiR2step(object, scope, direction = c("both", "forward"),
   If positive, information is printed during the model building. Larger
   values may give more information.
 }
+
+\item{R2permutations}{Number of permutations used in the estimation of
+  adjusted \eqn{R^2}{R2} for \code{\link{cca}} using
+  \code{\link{RsquareAdj}}.
+}
+
   \item{\dots}{
   Any additional arguments to \code{\link{add1.cca}} and 
   \code{\link{drop1.cca}}.
@@ -91,21 +97,22 @@ ordiR2step(object, scope, direction = c("both", "forward"),
   is often sensible to have \code{Pout} \eqn{>} \code{Pin} in stepwise
   models to avoid cyclic adds and drops of single terms. 
 
-  Function \code{ordiR2step} builds model so that it maximizes adjusted
-  \eqn{R^2}{R2} (function \code{\link{RsquareAdj}}) at every step, and
-  stopping when the adjusted \eqn{R^2}{R2} starts to decrease, or the
-  adjusted \eqn{R^2}{R2} of the \code{scope} is exceeded, or the
-  selected permutation \eqn{P}-value is exceeded (Blanchet et
-  al. 2008). The second criterion is ignored with option
-  \code{R2step = FALSE}, and the third criterion can be ignored setting
-  \code{Pin = 1} (or higher).  The \code{direction} has choices
-  \code{"forward"} and \code{"both"}, but it is very exceptional that a
-  term is dropped with the adjusted \eqn{R^2}{R2} criterion.  Function
-  uses adjusted \eqn{R^2}{R2} as the criterion, and it cannot be used if
-  the criterion cannot be calculated.  Therefore it is unavailable for
-  \code{\link{cca}}.  Adjusted \eqn{R^2}{R2} cannot be calculated if the
-  number of predictors is higher than the number of observations, but
-  such models can be analysed with \code{R2scope = FALSE}.
+  Function \code{ordiR2step} builds model so that it maximizes
+  adjusted \eqn{R^2}{R2} (function \code{\link{RsquareAdj}}) at every
+  step, and stopping when the adjusted \eqn{R^2}{R2} starts to
+  decrease, or the adjusted \eqn{R^2}{R2} of the \code{scope} is
+  exceeded, or the selected permutation \eqn{P}-value is exceeded
+  (Blanchet et al. 2008). The second criterion is ignored with option
+  \code{R2step = FALSE}, and the third criterion can be ignored
+  setting \code{Pin = 1} (or higher).  The \code{direction} has
+  choices \code{"forward"} and \code{"both"}, but it is very
+  exceptional that a term is dropped with the adjusted \eqn{R^2}{R2}
+  criterion. Adjusted \eqn{R^2}{R2} cannot be calculated if the number
+  of predictors is higher than the number of observations, but such
+  models can be analysed with \code{R2scope = FALSE}.  The
+  \eqn{R^2}{R2} of \code{\link{cca}} is based on simulations (see
+  \code{\link{RsquareAdj}}) and different runs of \code{ordiR2step}
+  can give different results.
 
   Functions \code{ordistep} (based on \eqn{P} values) and \code{ordiR2step}
   (based on adjusted \eqn{R^2}{R2} and hence on eigenvalues) can select
diff --git a/man/ordisurf.Rd b/man/ordisurf.Rd
index 356bbdc..334e671 100644
--- a/man/ordisurf.Rd
+++ b/man/ordisurf.Rd
@@ -172,7 +172,7 @@
   to give equal weights to all sites, you should set \code{w =
   NULL}. The behaviour is consistent with \code{\link{envfit}}. For
   complete accordance with constrained \code{\link{cca}}, you should set
-  \code{display = "lc"} (and possibly \code{scaling = 2}).
+  \code{display = "lc"}.
 
   Function \code{calibrate} returns the fitted values of the response
   variable. The \code{newdata} must be coordinates of points for which
diff --git a/man/orditkplot.Rd b/man/orditkplot.Rd
index 3410ef1..a8897a2 100644
--- a/man/orditkplot.Rd
+++ b/man/orditkplot.Rd
@@ -17,8 +17,10 @@
 orditkplot(x, display = "species", choices = 1:2, width, xlim, ylim, 
    tcex = 0.8, tcol, pch = 1,  pcol, pbg, pcex = 0.7, labels,  ...)
 \method{plot}{orditkplot}(x, ...)
-\method{points}{orditkplot}(x, ...)
-\method{text}{orditkplot}(x, ...)
+\method{points}{orditkplot}(x, pch = x$args$pch, cex = x$args$pcex,
+       col = x$args$pcol, bg = x$args$pbg, ...)
+\method{text}{orditkplot}(x, cex = x$args$tcex, col = x$args$tcol,
+     font = attr(x$labels, "font"), ...)
 \method{scores}{orditkplot}(x, display, ...)
 }
 
@@ -39,10 +41,12 @@ orditkplot(x, display = "species", choices = 1:2, width, xlim, ylim,
   \item{tcol}{Colour of text labels.}
   \item{pch, pcol, pbg}{Point type and outline and fill colours. 
     Defaults \code{pcol="black"}  and \code{pbg="transparent"}. 
-   Argument \code{pbg} has an effect
-    only in filled plotting characters \code{pch = 21} to \code{25}.  } 
+    Argument \code{pbg} has an effect only in filled plotting characters
+    \code{pch = 21} to \code{25}.} 
   \item{pcex}{Expansion factor for point size.}  
   \item{labels}{Labels used instead of row names.}
+  \item{cex, col, bg, font}{graphical parameters used in the
+    \code{points} and \code{text} methods. See \code{\link{par}}.}
   \item{\dots}{Other arguments passed to the function. These can be
     graphical parameters (see \code{\link{par}}) used in the plot, or
     extra arguments to \code{\link{scores}}. These arguments are
@@ -85,10 +89,10 @@ orditkplot(x, display = "species", choices = 1:2, width, xlim, ylim,
   must have similar dimensions as the \code{orditkplot} canvas had
   originally. The \code{plot} function cannot be configured, but it
   uses the same settings as the original Tcl/Tk plot. However,
-  \code{points} and \code{text} functions are fully configurable, and
-  unaware of the original Tcl/Tk plot settings (probably you must set
-  \code{cex} at least to get a decent plot). Finally, button
-  \strong{Dismiss} closes the window.
+  \code{points} and \code{text} functions are fully configurable, but
+  use the stored defaults for consistency with \code{plot.orditkplot} if
+  non are supplied by the user. Finally, button \strong{Dismiss} closes
+  the window.
 
   The produced plot will have equal aspect ratio. The width of the
   horizontal axis is fixed, but vertical axes will be scaled to needed
diff --git a/man/ordixyplot.Rd b/man/ordixyplot.Rd
index b6449d8..afa4952 100644
--- a/man/ordixyplot.Rd
+++ b/man/ordixyplot.Rd
@@ -131,7 +131,7 @@ ordisplom(ord, data=dune.env, form = ~ . | Management, groups=Manure)
 ordixyplot(ord, data=dune.env, form = CA1 ~ CA2 | Management,
   groups=Manure)
 ## Choose a different scaling
-ordixyplot(ord, scaling = 3)
+ordixyplot(ord, scaling = "symmetric")
 ## ... Slices of third axis
 ordixyplot(ord, form = CA1 ~ CA2 | equal.count(CA3, 4), type = c("g","p"))
 ## Display environemntal variables
diff --git a/man/permustats.Rd b/man/permustats.Rd
index b4049e3..1558a41 100644
--- a/man/permustats.Rd
+++ b/man/permustats.Rd
@@ -17,6 +17,7 @@
 \alias{permustats.protest}
 \alias{permustats.vectorfit}
 \alias{summary.permustats}
+\alias{c.permustats}
 \alias{densityplot.permustats}
 \alias{density.permustats}
 \alias{qqnorm.permustats}
@@ -34,20 +35,29 @@
 
 \usage{
 permustats(x, ...)
-\method{summary}{permustats}(object, interval = 0.95, ...)
+\method{summary}{permustats}(object, interval = 0.95, alternative, ...)
 \method{densityplot}{permustats}(x, data, xlab = "Permutations", ...)
 \method{density}{permustats}(x, observed = TRUE, ...)
 \method{qqnorm}{permustats}(y, observed = TRUE, ...)
-\method{qqmath}{permustats}(x, data, observed = TRUE, ylab = "Permutations", ...)
+\method{qqmath}{permustats}(x, data, observed = TRUE, sd.scale = FALSE,
+    ylab = "Permutations", ...)
 }
 
 \arguments{
   \item{object, x, y}{The object to be handled.}
   \item{interval}{numeric; the coverage interval reported.}
+  \item{alternative}{A character string specifying the limits used for
+    the \code{interval} and the direction of the test when evaluating
+    the \eqn{p}-values. Must be one of \code{"two.sided"} (both upper
+    and lower limit), \code{"greater"} (upper limit), \code{"less"}
+    (lower limit). Usually \code{alternative} is given in the result
+    object, but it can be specified with this argument.}
   \item{xlab, ylab}{Arguments of
     \code{\link[lattice]{densityplot}} and
     \code{\link[lattice]{qqmath}} functions.}
   \item{observed}{Add observed statistic among permutations.}
+  \item{sd.scale}{Scale permutations to unit standard deviation and observed
+    statistic to standardized effect size.}
   \item{data}{Ignored.}
   \item{\dots}{ Other arguments passed to the function. In
     \code{density} these are passed to \code{\link{density.default}}.}
@@ -66,9 +76,15 @@ permustats(x, ...)
   the mean, median, and limits which contain \code{interval} percent
   of permuted values. With the default (\code{interval = 0.95}), for
   two-sided test these are (2.5\%, 97.5\%) and for one-sided tests
-  either 5\% or 95\% quantile depending on the test direction. The
-  mean, quantiles and \eqn{z} values are evaluated from permuted
-  values without observed statistic.
+  either 5\% or 95\% quantile and the \eqn{p}-value depending on the
+  test direction. The mean, quantiles and \eqn{z} values are evaluated
+  from permuted values without observed statistic, but the
+  \eqn{p}-value is evaluated with the observed statistic. The
+  intervals and the \eqn{p}-value are evaluated with the same test
+  direction as in the original test, but this can be changed with
+  argument \code{alternative}. Several \code{permustats} objects can
+  be combined with \code{c} function. The \code{c} function checks
+  that statistics are equal, but performs no other sanity tests.
 
   The \code{density} and \code{densityplot} methods display the
   kernel density estimates of permuted values. When observed value of
@@ -82,22 +98,32 @@ permustats(x, ...)
   which is shown as horizontal line in plots. \code{qqnorm} plots
   permutation values against standard Normal variate. \code{qqmath}
   defaults to the standard Normal as well, but can accept other
-  alternatives (see standard \code{\link[lattice]{qqmath}}).
-
-  Functions \code{\link{density}} and \code{\link{qqnorm}} are based on
-  standard \R methods and accept their arguments. They only handle one
-  statistic, and cannot be used when several test statistic were
+  alternatives (see standard \code{\link[lattice]{qqmath}}). The
+  \code{qqmath} function can also plot observed statistic as
+  standardized effect size (SES) with standandized permutations
+  (argument \code{sd.scale}). The permutations are standardized
+  without the observed statistic, similarly as in \code{summary}.
+
+  Functions \code{\link{density}} and \code{\link{qqnorm}} are based
+  on standard \R methods and accept their arguments. They only handle
+  one statistic, and cannot be used when several test statistic were
   evaluated. The \code{\link[lattice]{densityplot}} and
   \code{\link[lattice]{qqmath}} are \pkg{lattice} graphics, and can be
-  used both for one and several statistics.  All these functions pass
-  arguments to their underlying functions; see their documentation.
+  used either for one or for several statistics.  All these functions
+  pass arguments to their underlying functions; see their
+  documentation. Functions \code{\link[lattice]{qqmath}} and
+  \code{\link[lattice]{densityplot}} default to use same axis scaling
+  in all subplots of the lattice. You can use argument \code{scales} to
+  set independent scaling for subplots when this is appropriate (see
+  \code{\link[lattice]{xyplot}} for an exhaustive list of arguments).
 
   The \code{permustats} can extract permutation statistics from the
-  results of \code{\link{adonis}}, \code{\link{anosim}}, 
-  \code{\link{anova.cca}}, \code{\link{mantel}}, \code{\link{mantel.partial}},
-  \code{\link{mrpp}}, \code{\link{oecosimu}}, \code{\link{ordiareatest}},
+  results of \code{\link{adonis2}}, \code{\link{adonis}},
+  \code{\link{anosim}}, \code{\link{anova.cca}}, \code{\link{mantel}},
+  \code{\link{mantel.partial}}, \code{\link{mrpp}},
+  \code{\link{oecosimu}}, \code{\link{ordiareatest}},
   \code{\link{permutest.cca}}, \code{\link{protest}}, and
-  \code{\link{permutest.betadisper}}. 
+  \code{\link{permutest.betadisper}}.
 
 }
 
@@ -124,9 +150,8 @@ permustats(x, ...)
 }
 
 \examples{
-data(dune)
-data(dune.env)
-mod <- adonis(dune ~ Management + A1, data = dune.env)
+data(dune, dune.env)
+mod <- adonis2(dune ~ Management + A1, data = dune.env)
 ## use permustats
 perm <- permustats(mod)
 summary(perm)
diff --git a/man/permutest.betadisper.Rd b/man/permutest.betadisper.Rd
index 1a5c90e..27e6f56 100644
--- a/man/permutest.betadisper.Rd
+++ b/man/permutest.betadisper.Rd
@@ -94,8 +94,8 @@ plot(mod.HSD)
 
 ## Has permustats() method
 pstat <- permustats(pmod)
-densityplot(pstat)
-qqmath(pstat)
+densityplot(pstat, scales = list(x = list(relation = "free")))
+qqmath(pstat, scales = list(relation = "free"))
 }
 \keyword{methods}
 \keyword{multivariate}
diff --git a/man/plot.cca.Rd b/man/plot.cca.Rd
index a7be07f..6a6e5be 100644
--- a/man/plot.cca.Rd
+++ b/man/plot.cca.Rd
@@ -56,7 +56,7 @@
     results with \eqn{\sqrt(1/(1-\lambda))}.  This scaling is know as Hill
     scaling (although it has nothing to do with Hill's rescaling of
     \code{\link{decorana}}). With corresponding negative values
-    in\code{rda}, species scores are divided by standard deviation of each
+    in \code{rda}, species scores are divided by standard deviation of each
     species and multiplied with an equalizing constant. Unscaled raw
     scores stored in the result can be accessed with \code{scaling = 0}.
 
diff --git a/man/predict.cca.Rd b/man/predict.cca.Rd
index ea206e2..fefa265 100644
--- a/man/predict.cca.Rd
+++ b/man/predict.cca.Rd
@@ -2,6 +2,7 @@
 \alias{fitted.cca}
 \alias{fitted.rda}
 \alias{fitted.capscale}
+\alias{fitted.dbrda}
 \alias{residuals.cca}
 \alias{predict.cca}
 \alias{predict.rda}
@@ -190,7 +191,7 @@ cca(residuals(mod))
 freq <- specnumber(dune, MARGIN=2)
 freq
 mod <- cca(dune[, freq>1] ~ A1 + Management + Condition(Moisture), dune.env)
-predict(mod, type="sp", newdata=dune[, freq==1], scaling=2)
+predict(mod, type="sp", newdata=dune[, freq==1], scaling="species")
 # New sites
 predict(mod, type="lc", new=data.frame(A1 = 3, Management="NM", Moisture="2"), scal=2)
 # Calibration and residual plot
diff --git a/man/spantree.Rd b/man/spantree.Rd
index 34a226a..20d47ba 100644
--- a/man/spantree.Rd
+++ b/man/spantree.Rd
@@ -19,7 +19,7 @@ spantree(d, toolong = 0)
 spandepth(x)
 \method{plot}{spantree}(x, ord, cex = 0.7, type = "p", labels, dlim,
      FUN = sammon,  ...)
-\method{lines}{spantree}(x, ord, display="sites", ...)
+\method{lines}{spantree}(x, ord, display="sites", col = 1, ...)
 }
 
 \arguments{
@@ -45,9 +45,13 @@ spandepth(x)
     missing.}
   \item{dlim}{A ceiling value used to highest \code{cophenetic} dissimilarity.}
   \item{FUN}{Ordination function to find the configuration from
-    cophenetic dissimilarities. }
+    cophenetic dissimilarities. If the supplied \code{FUN} does not work,
+    supply ordination result as argument \code{ord}. }
   \item{display}{Type of \code{\link{scores}} used for \code{ord}.}
-  \item{\dots}{Other parameters passed to functions.}
+  \item{col}{Colour of line segments. This can be a vector which is
+    recycled for points, and the line colour will be a mixture of two
+    joined points.}
+\item{\dots}{Other parameters passed to functions.}
 }
 \details{
   
@@ -144,7 +148,10 @@ plot(tr, type = "t")
 depths <- spandepth(tr)
 plot(tr, type = "t", label = depths)
 ## Plot as a dendrogram
-plot(as.hclust(tr))
+cl <- as.hclust(tr)
+plot(cl)
+## cut hclust tree to classes and show in colours in spantree
+plot(tr, col = cutree(cl, 5), pch=16)
 }
 \keyword{ multivariate}
 
diff --git a/man/specaccum.Rd b/man/specaccum.Rd
index 82410b5..33b8cf8 100644
--- a/man/specaccum.Rd
+++ b/man/specaccum.Rd
@@ -24,8 +24,8 @@
 specaccum(comm, method = "exact", permutations = 100,
           conditioned =TRUE, gamma = "jack1",  w = NULL, subset, ...)
 \method{plot}{specaccum}(x, add = FALSE, random = FALSE, ci = 2, 
-    ci.type = c("bar", "line", "polygon"), col = par("fg"), ci.col = col, 
-    ci.lty = 1, xlab, ylab = x$method, ylim, 
+    ci.type = c("bar", "line", "polygon"), col = par("fg"), lty = 1,
+    ci.col = col, ci.lty = 1, xlab, ylab = x$method, ylim,
     xvar = c("sites", "individuals", "effort"), ...)
 \method{boxplot}{specaccum}(x, add = FALSE, ...)
 fitspecaccum(object, model, method = "random", ...)
@@ -70,6 +70,7 @@ specslope(object, at)
     draws vertical bars, \code{"line"} draws lines, and
     \code{"polygon"} draws a shaded area.}
   \item{col}{Colour for drawing lines.}
+  \item{lty}{line type (see \code{\link{par}}).}
   \item{ci.col}{Colour for drawing lines or filling the
     \code{"polygon"}.}
   \item{ci.lty}{Line type for confidence intervals or border of the
@@ -82,7 +83,6 @@ specslope(object, at)
     \code{method = "rarefaction"}. }
   \item{object}{Either a community data set or fitted \code{specaccum} model.}
   \item{model}{Nonlinear regression model (\code{\link{nls}}). See Details.}
-  \item{lty}{line type code (see \code{\link{par}}.}
   
   \item{newdata}{Optional data used in prediction interpreted as
     number of sampling units (sites). If missing, fitted values are
diff --git a/man/stressplot.wcmdscale.Rd b/man/stressplot.wcmdscale.Rd
index d1465fd..531e05c 100644
--- a/man/stressplot.wcmdscale.Rd
+++ b/man/stressplot.wcmdscale.Rd
@@ -3,6 +3,7 @@
 \alias{stressplot.cca}
 \alias{stressplot.rda}
 \alias{stressplot.capscale}
+\alias{stressplot.dbrda}
 \alias{stressplot.prcomp}
 \alias{stressplot.princomp}
 
@@ -19,7 +20,7 @@
   linear relationship of the eigenvector ordinations. The
   \code{stressplot} methods are available for \code{\link{wcmdscale}},
   \code{\link{rda}}, \code{\link{cca}}, \code{\link{capscale}},
-  \code{\link{prcomp}} and \code{\link{princomp}}. 
+  \code{\link{dbrda}}, \code{\link{prcomp}} and \code{\link{princomp}}. 
 }
 
 \usage{
@@ -52,14 +53,14 @@
   space (with all ordination axes) are equal to observed distances, and
   the fit line shows this equality. In general, the fit line does not go
   through the points, but the points for observed distances approach the
-  fit line from below. However, with non-metric distances (in
+  fit line from below. However, with non-Euclidean distances (in
   \code{\link{wcmdscale}} or \code{\link{capscale}}) with negative
   eigenvalues the ordination distances can exceed the observed distances
   in real dimensions; the imaginary dimensions with negative eigenvalues
   will correct these excess distances. If you have used
-  \code{\link{capscale}} with argument \code{add = TRUE} to avoid
-  negative eigenvalues, the ordination distances will exceed the
-  observed dissimilarities by the additive constant.
+  \code{\link{capscale}} or \code{\link{wcmdscale}} with argument
+  \code{add} to avoid negative eigenvalues, the ordination distances
+  will exceed the observed dissimilarities.
 
   In partial ordination (\code{\link{cca}}, \code{\link{rda}} and
   \code{\link{capscale}} with \code{Condition} in the formula), the
@@ -70,7 +71,8 @@
 }
 
 \value{
-  Functions draw a graph and return invisibly the ordination distances.
+  Functions draw a graph and return invisibly the ordination distances
+  or the ordination distances.
 }
 
 \author{
diff --git a/man/varpart.Rd b/man/varpart.Rd
index ef71c15..12c255d 100644
--- a/man/varpart.Rd
+++ b/man/varpart.Rd
@@ -7,29 +7,37 @@
 \alias{plot.varpart}
 \alias{plot.varpart234}
 \alias{simpleRDA2}
+\alias{simpleDBRDA}
 
 \title{Partition the Variation of Community Matrix by 2, 3, or 4 Explanatory Matrices }
 
 \description{ 
-  The function partitions the variation of response table Y with
-  respect to two, three, or four explanatory tables, using adjusted
-  \eqn{R^2}{R-squared} in redundancy analysis ordination (RDA). If Y
-  contains a single vector, partitioning is by partial regression.
-  Collinear variables in the explanatory tables do NOT have to be
-  removed prior to partitioning.  
+
+  The function partitions the variation in community data or community
+  dissimilarities with respect to two, three, or four explanatory
+  tables, using adjusted \eqn{R^2}{R-squared} in redundancy analysis
+  ordination (RDA) or distance-based redundancy analysis. If response
+  is a single vector, partitioning is by partial regression. Collinear
+  variables in the explanatory tables do NOT have to be removed prior
+  to partitioning.
+
 }
 
 \usage{
-varpart(Y, X, ..., data, transfo, scale = FALSE)
+varpart(Y, X, ..., data, transfo, scale = FALSE, add = FALSE,
+    sqrt.dist = FALSE)
 showvarparts(parts, labels, bg = NULL, alpha = 63, Xnames,
     id.size = 1.2,  ...)
 \method{plot}{varpart234}(x, cutoff = 0, digits = 1, ...)
 }
 
 \arguments{
-\item{Y}{ Data frame or matrix containing the response data
-table. In community ecology, that table is often a site-by-species
-table. }
+
+\item{Y}{ Data frame or matrix containing the response data table or
+  dissimilarity structure inheriting from \code{\link{dist}}. In
+  community ecology, that table is often a site-by-species table or a
+  dissimilarity object. }
+
 \item{X}{Two to four explanatory models, variables or tables.  These can
   be defined in three alternative ways: (1) one-sided model formulae
   beginning with \code{~} and then defining the model, (2) name of a
@@ -46,11 +54,28 @@ table. }
   }
 \item{data}{The data frame with the variables used in the formulae in
   \code{X}.} 
+
 \item{transfo}{ Transformation for \code{Y} (community data) using
-  \code{\link{decostand}}.  All alternatives in \code{decostand} can be
-    used, and those preserving Euclidean metric include
-    \code{"hellinger"}, \code{"chi.square"}, \code{"total"}, \code{"norm"}.}
-\item{scale}{Should the columns of \code{Y} be standardized to unit variance}
+  \code{\link{decostand}}.  All alternatives in \code{decostand} can
+  be used, and those preserving Euclidean metric include
+  \code{"hellinger"}, \code{"chi.square"}, \code{"total"},
+  \code{"norm"}. Ignored if \code{Y} are dissimilarities.}
+
+\item{scale}{Should the columns of \code{Y} be standardized to unit
+  variance. Ignored if \code{Y} are dissimilarities.}
+
+\item{add}{Add a constant to the non-diagonal values to euclidify
+  dissimilarities (see \code{\link{wcmdscale}} for details). Choice
+  \code{"lingoes"} (or \code{TRUE}) use the recommended method of
+  Legendre & Anderson (1999: \dQuote{method 1}) and \code{"cailliez"}
+  uses their \dQuote{method 2}. The argument has an effect only when
+  \code{Y} are dissimilarities.}
+
+\item{sqrt.dist}{Take square root of dissimilarities. This often
+  euclidifies dissimilarities. NB., the argument name cannot be
+  abbreviated. The argument has an effect only when \code{Y} are
+  dissimilarities.}
+
 \item{parts}{Number of explanatory tables (circles) displayed.}
 \item{labels}{Labels used for displayed fractions. Default is to use
   the same letters as in the printed output.}
@@ -73,16 +98,30 @@ table. }
 \item{cutoff}{The values below \code{cutoff} will not be displayed.}
 \item{digits}{The number of significant digits; the number of decimal
   places is at least one higher.}
-\item{...}{Other parameters passed to functions.}
+\item{...}{Other parameters passed to functions. NB, arguments after
+  dots cannot be abbreviated but they must be spelt out completely.}
 }
 
 \details{
+
   The functions partition the variation in \code{Y} into components
   accounted for by two to four explanatory tables and their combined
-  effects. If \code{Y} is a multicolumn data frame or
-  matrix, the partitioning is based on redundancy analysis (RDA, see
+  effects. If \code{Y} is a multicolumn data frame or matrix, the
+  partitioning is based on redundancy analysis (RDA, see
   \code{\link{rda}}), and if \code{Y} is a single variable, the
-  partitioning is based on linear regression.  
+  partitioning is based on linear regression.  If \code{Y} are
+  dissimilarities, the decomposition is based on distance-based
+  redundancy analysis (db-RDA, see \code{\link{capscale}}) following
+  McArdle & Anderson (2001). The input dissimilarities must be
+  compatible to the results of \code{\link{dist}}. \pkg{Vegan} functions
+  \code{\link{vegdist}}, \code{\link{designdist}},
+  \code{\link{raupcrick}} and \code{\link{betadiver}} produce such
+  objects, as do many other dissimilarity functions in \R
+  packages. However, symmetric square matrices are not recognized as
+  dissimilarities but must be transformed with \code{\link{as.dist}}.
+  Partitioning will be made to squared dissimilarities analogously to
+  using variance with rectangular data -- unless \code{sqrt.dist = TRUE}
+  was specified.
 
   The function primarily uses adjusted \eqn{R^2}{R-squared} to assess
   the partitions explained by the explanatory tables and their
@@ -96,14 +135,13 @@ table. }
   \code{browseVignettes("vegan")}), or can be displayed graphically
   using function \code{showvarparts}.
 
-  A fraction is testable if it can be directly
-  expressed as an RDA model.  In these cases the printed output also
-  displays the corresponding RDA model using notation where explanatory
-  tables after \code{|} are conditions (partialled out; see
-  \code{\link{rda}} for details). Although single fractions can be
-  testable, this does not mean that all fractions simultaneously can be
-  tested, since there number of  testable fractions  is higher than
-  the number of estimated models.
+  A fraction is testable if it can be directly expressed as an RDA or
+  db-RDA model.  In these cases the printed output also displays the
+  corresponding RDA model using notation where explanatory tables after
+  \code{|} are conditions (partialled out; see \code{\link{rda}} for
+  details). Although single fractions can be testable, this does not
+  mean that all fractions simultaneously can be tested, since the number
+  of testable fractions is higher than the number of estimated models.
 
   An abridged explanation of the alphabetic symbols for the individual
   fractions follows, but computational details should be checked in the
@@ -169,12 +207,12 @@ table. }
   \code{indfract}, \code{contr1} and \code{contr2} are all data frames with
   items:
   \itemize{
-  \item{Df}{Degrees of freedom of numerator of the \eqn{F}-statistic
+  \item{\code{Df}: }{Degrees of freedom of numerator of the \eqn{F}-statistic
     for the fraction.}
-  \item{R.square}{Raw \eqn{R^2}{R-squared}. This is calculated only for
+  \item{\code{R.square}: }{Raw \eqn{R^2}{R-squared}. This is calculated only for
     \code{fract} and this is \code{NA} in other items.}
-  \item{Adj.R.square}{Adjusted \eqn{R^2}{R-squared}.}
-  \item{Testable}{If the fraction can be expressed as a (partial) RDA
+  \item{\code{Adj.R.square}: }{Adjusted \eqn{R^2}{R-squared}.}
+  \item{\code{Testable}: }{If the fraction can be expressed as a (partial) RDA
     model, it is directly \code{Testable}, and this field is
     \code{TRUE}.  In that case the fraction label also gives the
     specification of the testable RDA model.}
@@ -201,10 +239,21 @@ transformations for ordination of species data. Oecologia 129: 271--280.
 Peres-Neto, P., P. Legendre, S. Dray and D. Borcard. 2006. Variation partitioning
 of species data matrices: estimation and comparison of fractions.
 Ecology 87: 2614--2625.
- }
+
+(d) References on partitioning of dissimilarities
+
+Legendre, P. & Anderson, M. J. (1999). Distance-based redundancy
+analysis: testing multispecies responses in multifactorial ecological
+experiments. \emph{Ecological Monographs} 69, 1--24.
+
+McArdle, B.H. & Anderson, M.J. (2001). Fitting multivariate models
+to community data: a comment on distance-based redundancy
+analysis. Ecology 82, 290-297.
+
+}
 
 \author{ Pierre Legendre, Departement de Sciences Biologiques, Universite de
-Montreal, Canada.  Adapted to \pkg{vegan} by Jari Oksanen. }
+Montreal, Canada.  Further developed by Jari Oksanen. }
 
 \note{
 
@@ -216,22 +265,31 @@ Montreal, Canada.  Adapted to \pkg{vegan} by Jari Oksanen. }
 
   The functions frequently give negative estimates of variation.
   Adjusted \eqn{R^2}{R-squared} can be negative for any fraction;
-  unadjusted \eqn{R^2}{R-squared} of testable fractions always will be
-  non-negative.  Non-testable fractions cannot be found directly, but
-  by subtracting different models, and these subtraction results can
-  be negative.  The fractions are orthogonal, or linearly independent,
-  but more complicated or nonlinear dependencies can cause negative
-  non-testable fractions.
+  unadjusted \eqn{R^2}{R-squared} of testable fractions of variances
+  will be non-negative.  Non-testable fractions cannot be found
+  directly, but by subtracting different models, and these subtraction
+  results can be negative.  The fractions are orthogonal, or linearly
+  independent, but more complicated or nonlinear dependencies can
+  cause negative non-testable fractions. Any fraction can be negative
+  for non-Euclidean dissimilarities because the underlying db-RDA model
+  can yield negative eigenvalues (see \code{\link{capscale}},
+  \code{\link{dbrda}}). These negative eigenvalues in the underlying
+  analysis can be avoided with arguments \code{sqrt.dist} and \code{add}
+  which have a similar effect as in \code{\link{capscale}}: the square
+  roots of several dissimilarities do not have negative eigenvalues, and
+  no negative eigenvalues are produced after Lingoes or Cailliez
+  adjustment, which in effect add random variation to the
+  dissimilarities.
 
   The current function will only use RDA in multivariate
   partitioning. It is much more complicated to estimate the adjusted
   R-squares for CCA, and unbiased analysis of CCA is not currently
   implemented.
 
-  A simplified, fast version of RDA is used (function
-  \code{simpleRDA2}).  The actual calculations are done in functions
-  \code{varpart2} to \code{varpart4}, but these are not intended to be
-  called directly by the user.
+  A simplified, fast version of RDA or dbRDA are used (functions
+  \code{simpleRDA2} and \code{simpleDBRDA}).  The actual calculations
+  are done in functions \code{varpart2} to \code{varpart4}, but these
+  are not intended to be called directly by the user.
 
 }
 
@@ -269,6 +327,8 @@ anova(aFrac, step=200, perm.max=200)
 # RsquareAdj gives the same result as component [a] of varpart
 RsquareAdj(aFrac)
 
+# Partition Bray-Curtis dissimilarities
+varpart(vegdist(mite), ~ ., mite.pcnm, data = mite.env)
 # Three explanatory matrices 
 mod <- varpart(mite, ~ SubsDens + WatrCont, ~ Substrate + Shrub + Topo,
    mite.pcnm, data=mite.env, transfo="hel")
diff --git a/man/vegan-defunct.Rd b/man/vegan-defunct.Rd
index 09235d0..526421a 100644
--- a/man/vegan-defunct.Rd
+++ b/man/vegan-defunct.Rd
@@ -1,26 +1,38 @@
 \name{vegan-defunct}
-%--- The following functions were moved to the 'permute' package and
-%    removed from vegan, but here we document only those that were
-%    renamed and are not documented in 'permute'
- 
-\alias{metaMDSrotate}
 
+\alias{density.adonis}
+\alias{density.anosim}
+\alias{density.mantel}
+\alias{density.mrpp}
+\alias{density.permutest.cca}
+\alias{density.protest}
+\alias{plot.vegandensity}
+\alias{densityplot.adonis}
+\alias{density.oecosimu}
+\alias{densityplot.oecosimu}
 \alias{vegan-defunct}
-%------ NOTE:  ../R/vegan-deprecated.R   must be synchronized with this!
+%------ NOTE:  ../R/vegan-defunct.R   must be synchronized with this!
 \title{Defunct Functions in Package \pkg{vegan}}
 %------ PLEASE: one \alias{.} for EACH ! (+ one \usage{} & \arguments{} for all)
 \description{
-  The functions or variables listed here are no longer part of \pkg{vegan} as
-  they are no longer needed.
+  The functions or variables listed here are no longer part of
+  \pkg{vegan} as they are no longer needed.
 }
 \usage{
-metaMDSrotate(object, vec, na.rm = FALSE, ...)
+\method{density}{adonis}(x, ...)
+\method{plot}{vegandensity}(x, main = NULL, xlab = NULL, ylab = "Density", 
+   type = "l", zero.line = TRUE, obs.line = TRUE, ...)
+\method{densityplot}{adonis}(x, data, xlab = "Null", ...)
 }
 
-\details{ 
-  Function \code{metaMDSrotate} is replaced with
-  \code{\link{MDSrotate}} which can handle \code{\link{monoMDS}}
-  results in addition to \code{\link{metaMDS}}.
+\details{
+  The deprecated \code{density} and \code{densityplot} methods are
+  replaced with similar methods for \code{\link{permustats}}. The
+  \code{\link{permustats}} offers more powerful analysis tools for
+  permutations, including \code{\link{summary.permustats}} giving
+  \eqn{z} values (a.k.a. standardized effect sizes, SES), and Q-Q
+  plots (\code{\link{qqnorm.permustats}},
+  \code{\link{qqmath.permustats}}.
 }
 
 \seealso{
diff --git a/man/vegan-deprecated.Rd b/man/vegan-deprecated.Rd
index f034d1a..d28da3c 100644
--- a/man/vegan-deprecated.Rd
+++ b/man/vegan-deprecated.Rd
@@ -1,17 +1,7 @@
 \encoding{UTF-8}
 \name{vegan-deprecated}
-\alias{commsimulator}
-\alias{density.adonis}
-\alias{density.anosim}
-\alias{density.mantel}
-\alias{density.mrpp}
-\alias{density.permutest.cca}
-\alias{density.protest}
-\alias{plot.vegandensity}
-\alias{densityplot.adonis}
-\alias{density.oecosimu}
-\alias{densityplot.oecosimu}
 
+\alias{commsimulator}
 \alias{vegan-deprecated}
 %------ NOTE:  ../R/vegan-deprecated.R   must be synchronized with this!
 \title{Deprecated Functions in vegan package}
@@ -22,16 +12,10 @@
 }
 \usage{
 commsimulator(x, method, thin=1)
-\method{density}{adonis}(x, ...)
-\method{plot}{vegandensity}(x, main = NULL, xlab = NULL, ylab = "Density", 
-   type = "l", zero.line = TRUE, obs.line = TRUE, ...)
-\method{densityplot}{adonis}(x, data, xlab = "Null", ...)
 }
 
 \arguments{
- \item{x}{Community data for \code{commsimulator},or an object to be
-   handled by \code{density} or \code{densityplot}}
- ## commsimulator
+ \item{x}{Community data.}
  \item{method}{Null model method: either a name (character string) of
    a method defined in \code{\link{make.commsim}} or a
    \code{\link{commsim}} function.}
@@ -40,15 +24,7 @@ commsimulator(x, method, thin=1)
    \code{"swap"} and \code{"tswap"} (ignored with non-sequential
    methods)}
   ## density and densityplot
-  \item{main, xlab, ylab, type, zero.line}{Arguments of
-    \code{\link{plot.density}}, \code{\link[lattice]{densityplot}}.}
-  \item{obs.line}{Draw vertical line for the observed
-    statistic. Logical value \code{TRUE} draws a red line, and
-    \code{FALSE} draws nothing. Alternatively, \code{obs.line} can be a
-    definition of the colour used for the line, either as a numerical
-    value from the \code{\link[grDevices]{palette}} or as the name of
-    the colour, or other normal definition of the colour.}
-  \item{data}{Ignored.}
+
   \item{\dots}{ Other arguments passed to functions. }
 
 }
@@ -139,48 +115,6 @@ commsimulator(x, method, thin=1)
   backtracking is done so may times that all incidences will be filled
   into matrix. The \code{quasiswap} method is not sequential, but it produces
   a random incidence matrix with given marginal totals. 
-
-  The \code{density} function can directly access permutation results
-  of the same function as \code{permustats}.  The \code{density}
-  function is identical to \code{\link{density.default}} and takes all
-  its arguments, but adds the observed statistic to the result as item
-  \code{"observed"}. The observed statistic is also put among the
-  permuted values so that the results are consistent with significance
-  tests. The \code{plot} method is similar to the default
-  \code{\link{plot.density}}, but can also add the observed statistic
-  to the graph as a vertical line.  In \code{\link{adonis}} it is also
-  possible to use direclty \code{densityplot} function.
-
-  The deprecated \code{density} and \code{densityplot} methods are
-  replaced with similar methods for \code{\link{permustats}}. The
-  \code{\link{permustats}} offers more powerful analysis tools for
-  permutations, including \code{\link{summary.permustats}} giving
-  \eqn{z} values (a.k.a. standardized effect sizes, SES), and Q-Q
-  plots (\code{\link{qqnorm.permustats}},
-  \code{\link{qqmath.permustats}}. Below the old documentation: 
-
-  The density methods are available for \pkg{vegan} functions
-  \code{\link{adonis}}, \code{\link{anosim}}, \code{\link{mantel}},
-  \code{\link{mantel.partial}}, \code{\link{mrpp}},
-  \code{\link{permutest.cca}}, and \code{\link{protest}}.  The
-  \code{density} function for \code{\link{oecosimu}} is documented
-  separately, and it is also used for \code{\link{adipart}},
-  \code{\link{hiersimu}} and \code{\link{multipart}}.
-
-  All \pkg{vegan} \code{density} functions return an object of class
-  \code{"vegandensity"} inheriting from \code{\link{density}}, and can
-  be plotted with its \code{plot} method.  This is identical to the
-  standard \code{plot} of \code{densiy} objects, but can also add a
-  vertical line for the observed statistic.
-
-  Functions that can return several permuted statistics simultaneously
-  also have \code{\link[lattice]{densityplot}} method
-  (\code{\link{adonis}}, \code{\link{oecosimu}} and diversity
-  partitioning functions based on \code{oecosimu}).  The standard
-  \code{\link{density}} can only handle univariate data, and a warning
-  is issued if the function is used for a model with several observed
-  statistics
-
 }
 
 \references{
diff --git a/man/vegan-internal.Rd b/man/vegan-internal.Rd
index 33652e7..eb64b8d 100644
--- a/man/vegan-internal.Rd
+++ b/man/vegan-internal.Rd
@@ -12,6 +12,9 @@
 \alias{veganCovEllipse}
 \alias{hierParseFormula}
 \alias{veganMahatrans}
+\alias{GowerDblcen}
+\alias{addLingoes}
+\alias{addCailliez}
 
 \title{Internal vegan functions}
 
@@ -35,6 +38,9 @@ pasteCall(call, prefix = "Call:")
 veganCovEllipse(cov, center = c(0, 0), scale = 1, npoints = 100)
 veganMahatrans(x, s2, tol = 1e-8)
 hierParseFormula(formula, data)
+GowerDblcen(x, na.rm = TRUE)
+addLingoes(d)
+addCailliez(d)
 }
 
 \details{ The description here is only intended for \pkg{vegan}
@@ -97,6 +103,18 @@ hierParseFormula(formula, data)
   and a model frame with factors representing hierarchy levels 
   (right hand side) to be used in \code{\link{adipart}}, 
   \code{\link{multipart}} and \code{\link{hiersimu}}.
+
+  \code{GowerDblcen} performs the Gower double centring of a matrix of
+  dissimilarities. Similar function was earlier available as a compiled
+  code in \pkg{stats}, but it is not a part of official API, and
+  therefore we have this poorer replacement.
+
+  \code{addLingoes} and \code{addCailliez} find the constant added to
+  non-diagonal (squared) dissimilarities to make all eigenvalues
+  non-negative in Principal Co-ordinates Analysis
+  (\code{\link{wcmdscale}}, \code{\link{capscale}}). Function
+  \code{\link{cmdscale}} implements the Cailliez method. The argument
+  is a matrix of dissimilarities.
 }
 
 \keyword{internal }
diff --git a/man/wcmdscale.Rd b/man/wcmdscale.Rd
index 433e395..eaac3e7 100644
--- a/man/wcmdscale.Rd
+++ b/man/wcmdscale.Rd
@@ -26,9 +26,12 @@ wcmdscale(d, k, eig = FALSE, add = FALSE, x.ret = FALSE, w)
     represented in; must be in \eqn{\{1,2,\ldots,n-1\}}{{1,2,\ldots,n-1}}. 
     If missing, all dimensions with above zero eigenvalue.}
   \item{eig}{indicates whether eigenvalues should be returned.}
-  \item{add}{logical indicating if an additive constant \eqn{c*} should
-    be computed, and added to the non-diagonal dissimilarities such that
-    all \eqn{n-1} eigenvalues are non-negative. \strong{Not implemented}. }
+  \item{add}{an additive constant \eqn{c} is added to the non-diagonal
+    dissimilarities such that all \eqn{n-1} eigenvalues are
+    non-negative. Alternatives are \code{"lingoes"} (default, also
+    used with \code{TRUE}) and \code{"cailliez"} (which is the only
+    alternative in \code{\link{cmdscale}}). See Legendre & Anderson
+    (1999).}
   \item{x.ret}{indicates whether the doubly centred symmetric distance
     matrix should be returned.}
   \item{w}{Weights of points.}
@@ -52,7 +55,19 @@ wcmdscale(d, k, eig = FALSE, add = FALSE, x.ret = FALSE, w)
   with \code{eig = TRUE} or \code{x.ret = TRUE}, the function returns
   an object of class \code{"wcmdscale"} with \code{print},
   \code{plot}, \code{scores}, \code{\link{eigenvals}} and
-  \code{\link{stressplot}} methods, and described in section Value.  
+  \code{\link{stressplot}} methods, and described in section Value.
+
+  The method is Euclidean, and with non-Euclidean dissimilarities some
+  eigenvalues can be negative. If this disturbs you, this can be
+  avoided by adding a constant to non-diagonal dissimilarities making
+  all eigenvalues non-negative. The function implements methods
+  discussed by Legendre & Anderson (1999): The method of Lingoes
+  (\code{add="lingoes"}) adds the constant \eqn{c} to squared
+  dissimilarities \eqn{d} using \eqn{\sqrt{d^2 + 2 c}}{sqrt(d^2 + 2*c)}
+  and the method of Cailliez (\code{add="cailliez"}) to
+  dissimilarities using \eqn{d + c}. Legendre & Anderson (1999)
+  recommend the method of Lingoes, and base \R{} function
+  \code{\link{cmdscale}} implements the method of Cailliez.
 }
 
 \value{ If \code{eig = FALSE} and \code{x.ret = FALSE} (default), a
@@ -67,7 +82,10 @@ wcmdscale(d, k, eig = FALSE, add = FALSE, x.ret = FALSE, w)
   \item{eig}{the \eqn{n-1} eigenvalues computed during the scaling
     process if \code{eig} is true.}
   \item{x}{the doubly centred and weighted distance matrix if
-  \code{x.ret} is true.}
+    \code{x.ret} is true.}
+  \item{ac, add}{additive constant and adjustment method used to avoid
+    negative eigenvalues. These are \code{NA} and \code{FALSE} if no
+    adjustment was done.}
   \item{GOF}{Goodness of fit statistics for \code{k} axes. The first
     value is based on the sum of absolute values of all eigenvalues,
     and the second value is based on the sum of positive eigenvalues}
@@ -86,6 +104,10 @@ wcmdscale(d, k, eig = FALSE, add = FALSE, x.ret = FALSE, w)
   methods used in multivariate analysis.  
   \emph{Biometrika} \bold{53}, 325--328.
 
+  Legendre, P. & Anderson, M. J. (1999). Distance-based redundancy
+    analysis: testing multispecies responses in multifactorial
+    ecological experiments. \emph{Ecology} \bold{69}, 1--24.
+
   Mardia, K. V., Kent, J. T. and Bibby, J. M. (1979).  Chapter 14 of
   \emph{Multivariate Analysis}, London: Academic Press.
 }
@@ -111,8 +133,8 @@ ca <- cca(dune)
 ca$CA$eig - ord$eig
 ## Configurations are similar when site scores are scaled by
 ## eigenvalues in CA
-procrustes(ord, ca, choices=1:19, scaling = 1)
-plot(procrustes(ord, ca, choices=1:2, scaling=1))
+procrustes(ord, ca, choices=1:19, scaling = "sites")
+plot(procrustes(ord, ca, choices=1:2, scaling="sites"))
 ## Reconstruction of non-Euclidean distances with negative eigenvalues
 d <- vegdist(dune)
 ord <- wcmdscale(d, eig = TRUE)
diff --git a/src/nestedness.c b/src/nestedness.c
index 86b659e..e097a74 100644
--- a/src/nestedness.c
+++ b/src/nestedness.c
@@ -1,5 +1,6 @@
 #include <R.h>
 #include <Rmath.h>
+#include <R_ext/Utils.h> /* check user interrupts */
 
 /* Utility functions */
 
@@ -11,6 +12,8 @@
 
 void i2rand(int *vec, int imax)
 {
+    if (imax < 1)
+	error("needs at least 2 items");
     vec[0] = IRAND(imax);
     do {
 	vec[1] = IRAND(imax);
@@ -35,9 +38,9 @@ void i2rand(int *vec, int imax)
 
 #define INDX(i, j, nr) (i) + (nr)*(j)
 
-void quasiswap(int *m, int *nr, int *nc)
+void quasiswap(int *m, int *nr, int *nc, int *thin)
 {
-    int i, n, mtot, ss, row[2], col[2], nr1, nc1, a, b, c, d;
+    int i, intcheck, n, mtot, ss, row[2], col[2], nr1, nc1, a, b, c, d;
 
     nr1 = (*nr) - 1;
     nc1 = (*nc) - 1;
@@ -55,28 +58,35 @@ void quasiswap(int *m, int *nr, int *nc)
 
     /* Quasiswap while there are entries > 1 */
 
+    intcheck  = 0; /* check interrupts */
     while (ss > mtot) {
-	i2rand(row, nr1);
-	i2rand(col, nc1);
-	/* a,b,c,d notation for a 2x2 table */
-	a = INDX(row[0], col[0], *nr);
-	b = INDX(row[0], col[1], *nr);
-	c = INDX(row[1], col[0], *nr);
-	d = INDX(row[1], col[1], *nr);
-	if (m[a] > 0 && m[d] > 0 && m[a] + m[d] - m[b] - m[c] >= 2) {
-	    ss -= 2 * (m[a] + m[d] - m[b] - m[c] - 2);
-	    m[a]--;
-	    m[d]--;
-	    m[b]++;
-	    m[c]++;
-	} else if (m[b] > 0 && m[c] > 0 &&
-		   m[b] + m[c] - m[a] - m[d] >= 2) {
-	    ss -= 2 * (m[b] + m[c] - m[a] - m[d] - 2);
-	    m[a]++;
-	    m[d]++;
-	    m[b]--;
-	    m[c]--;
+	for (i = 0; i < *thin; i++) {
+	    i2rand(row, nr1);
+	    i2rand(col, nc1);
+	    /* a,b,c,d notation for a 2x2 table */
+	    a = INDX(row[0], col[0], *nr);
+	    b = INDX(row[0], col[1], *nr);
+	    c = INDX(row[1], col[0], *nr);
+	    d = INDX(row[1], col[1], *nr);
+	    if (m[a] > 0 && m[d] > 0 && m[a] + m[d] - m[b] - m[c] >= 2) {
+		ss -= 2 * (m[a] + m[d] - m[b] - m[c] - 2);
+		m[a]--;
+		m[d]--;
+		m[b]++;
+		m[c]++;
+	    } else if (m[b] > 0 && m[c] > 0 &&
+		       m[b] + m[c] - m[a] - m[d] >= 2) {
+		ss -= 2 * (m[b] + m[c] - m[a] - m[d] - 2);
+		m[a]++;
+		m[d]++;
+		m[b]--;
+		m[c]--;
+	    }
 	}
+	/* interrupt? */
+	if (intcheck % 1000 == 999)
+	    R_CheckUserInterrupt();
+	intcheck++;
     }
 
     /* Set R RNG */
@@ -129,12 +139,15 @@ void trialswap(int *m, int *nr, int *nc, int *thin)
 void swap(int *m, int *nr, int *nc, int *thin)
 {
 
-    int i, a, b, c, d, row[2], col[2], sX;
+    int i, intcheck, a, b, c, d, row[2], col[2], sX;
 
     GetRNGstate();
 
-    for (i=0; i < *thin; i++) {
+    for (i=0, intcheck=0; i < *thin; i++) {
 	for(;;) {
+	    if (intcheck % 1000 == 999)
+		R_CheckUserInterrupt();
+	    intcheck++;
 	    i2rand(row, (*nr) - 1);
 	    i2rand(col, (*nc) - 1);
 	    a = INDX(row[0], col[0], *nr);
@@ -163,6 +176,65 @@ void swap(int *m, int *nr, int *nc, int *thin)
     PutRNGstate();
 }
 
+/* Strona et al. 2014 (NATURE COMMUNICATIONS | 5:4114 |
+ * DOI:10.1038/ncomms5114 | www.nature.com/naturecommunications)
+ * suggested a boosted sequential binary swap method. Instead of
+ * looking for random 2x2 submatrices, they look for 2 rows and
+ * collect a list of unique species that occur only in one row, and
+ * allocate these randomly to rows preserving counts.
+ */
+
+/* uniq is a work vector to hold indices of unique species (occurring
+ * only in one of two random rows). uniq must be allocated in the
+ * calling function, with safe size 2 * (max. number of species) or
+ * with belt and suspenders 2 * (*nc). */
+
+void curveball(int *m, int *nr, int *nc, int *thin, int *uniq)
+{
+    int row[2], i, j, jind, ind, nsp1, nsp2, itmp, tmp;
+
+    /* Set RNG */
+    GetRNGstate(); 
+
+    for (i = 0; i < *thin; i++) {
+	/* Random sites */
+	i2rand(row, (*nr)-1);
+	/* uniq is a vector of unique species for a random pair of
+	   rows, It need not be zeroed between thin loops because ind
+	   keeps track of used elements. */
+	for (j = 0, ind = -1, nsp1 = 0, nsp2 = 0; j < (*nc); j++) {
+	    jind = j * (*nr);
+	    if (m[row[0] + jind] > 0 && m[row[1] + jind] == 0) {
+		uniq[++ind] = j;
+		nsp1++;
+	    }
+	    if (m[row[1] + jind] > 0 && m[row[0] + jind] == 0) {
+		uniq[++ind] = j;
+		nsp2++;
+	    }
+	}
+	/* uniq contains indices of unique species: shuffle these and
+	 * allocate nsp1 first to row[0] and the rest to row[1] */
+	if (nsp1 > 0 && nsp2 > 0) { /* something to swap? */
+	    for (j = ind; j >= nsp1; j--) {
+		tmp = uniq[j];
+		itmp = IRAND(j);
+		uniq[j] = uniq[itmp];
+		uniq[itmp] = tmp;
+	    }
+	    for (j = 0; j < nsp1; j++) {
+		m[INDX(row[0], uniq[j], *nr)] = 1;
+		m[INDX(row[1], uniq[j], *nr)] = 0;
+	    }
+	    for (j = nsp1; j <= ind; j++) {
+		m[INDX(row[0], uniq[j], *nr)] = 0;
+		m[INDX(row[1], uniq[j], *nr)] = 1;
+	    }
+	}
+    }
+
+    PutRNGstate();
+}
 
 /* 'swapcount' is a C translation of Peter Solymos's R code. It is
  * similar to 'swap', but can swap > 1 values and so works for
@@ -281,13 +353,14 @@ int isDiagFill(int *sm)
 
 void swapcount(int *m, int *nr, int *nc, int *thin)
 {
-    int row[2], col[2], k, ij[4], changed, 
+    int row[2], col[2], k, ij[4], changed, intcheck,
 	pm[4] = {1, -1, -1, 1} ;
     int sm[4], ev;
 
     GetRNGstate();
 
     changed = 0;
+    intcheck = 0;
     while (changed < *thin) {
 	/* Select a random 2x2 matrix*/
 	i2rand(row, *nr - 1);
@@ -305,6 +378,9 @@ void swapcount(int *m, int *nr, int *nc, int *thin)
 			m[ij[k]] += pm[k]*ev;
 		changed++;
 	}
+	if (intcheck % 1000 == 999)
+	    R_CheckUserInterrupt();
+	intcheck++;
     }
 
     PutRNGstate();
@@ -319,7 +395,7 @@ void swapcount(int *m, int *nr, int *nc, int *thin)
 
 void rswapcount(int *m, int *nr, int *nc, int *mfill)
 {
-    int row[2], col[2], i, k, ij[4], n, change, cfill,
+    int row[2], col[2], i, intcheck, k, ij[4], n, change, cfill,
        pm[4] = {1, -1, -1, 1} ;
     int sm[4], ev;
 
@@ -333,6 +409,7 @@ void rswapcount(int *m, int *nr, int *nc, int *mfill)
     GetRNGstate();
 
     /* Loop while fills differ */
+    intcheck = 0;
     while (cfill != *mfill) {
 	/* Select a random 2x2 matrix*/
 	i2rand(row, *nr - 1);
@@ -359,6 +436,9 @@ void rswapcount(int *m, int *nr, int *nc, int *mfill)
 		cfill += change;
 	    } 
 	}
+	if (intcheck % 1000 == 999)
+	    R_CheckUserInterrupt();
+	intcheck++;
     }
     PutRNGstate();
 }
@@ -404,12 +484,13 @@ int isDiagSimple(double *sm)
 
 void abuswap(double *m, int *nr, int *nc, int *thin, int *direct)
 {
-    int row[2], col[2], k, ij[4], changed, ev;
+    int row[2], col[2], k, ij[4], intcheck, changed, ev;
     double sm[4];
 
     GetRNGstate();
 
     changed = 0;
+    intcheck = 0;
     while (changed < *thin) {
 	/* Select a random 2x2 matrix*/
 	 i2rand(row, *nr - 1);
@@ -439,6 +520,9 @@ void abuswap(double *m, int *nr, int *nc, int *thin, int *direct)
 	      }
 	      changed++;
 	 }
+	 if (intcheck % 1000 == 999)
+	     R_CheckUserInterrupt();
+	 intcheck++;
     }
     
     PutRNGstate();
diff --git a/vignettes/decision-vegan.Rnw b/vignettes/decision-vegan.Rnw
index 5658dcb..6b245cd 100644
--- a/vignettes/decision-vegan.Rnw
+++ b/vignettes/decision-vegan.Rnw
@@ -91,9 +91,8 @@ non-parallel computation.  The \code{mc.cores} option can be set by
 the environmental variable \code{MC_CORES} when the \pkg{parallel}
 package is loaded.
 
-\R{} allows\footnote{Since \R{} version 2.15.0.}
-setting up a default socket cluster (\code{setDefaultCluster}), but
-this will not be used in \pkg{vegan}. 
+\R{} allows setting up a default socket cluster
+(\code{setDefaultCluster}), but this will not be used in \pkg{vegan}.
 
 \subsubsection{Setting up socket clusters}
 \label{sec:parallel:socket}
@@ -379,39 +378,23 @@ This chapter discusses the scaling of scores (results) in redundancy
 analysis and principal component analysis performed by function
 \code{rda} in the \pkg{vegan} library.  
 
-Principal component analysis, and hence redundancy analysis, is a case
-of singular value decomposition (\textsc{svd}).  Functions
-\code{rda} and \code{prcomp} even use \textsc{svd} internally in
-their algorithm.
-
-In \textsc{svd} a centred data matrix $\mathbf{X} = \{x_{ij}\}$ is decomposed into orthogonal
-components so that $x_{ij} = \sum_k \sigma_k u_{ik} v_{jk}$, where
-$u_{ik}$ and $v_{jk}$ are orthonormal coefficient matrices and
-$\sigma_k$ are singular values.  Orthonormality means that sums of
-squared columns is one and their cross-product is zero, or $\sum_i
-u_{ik}^2 = \sum_j v_{jk}^2 = 1$, and $\sum_i u_{ik} u_{il} = \sum_j
-v_{jk} v_{jl} = 0$ for $k \neq l$. This is a decomposition, and the
-original matrix is found exactly from the singular vectors and
-corresponding singular values, and first two singular components give
-the rank $=2$ least squares estimate of the original matrix.
-
-Principal component analysis is often presented (and performed in
-legacy software) as an eigenanalysis of covariance matrices.  Instead
-of a data matrix, we analyse a matrix of covariances and variances
-$\mathbf{S}$.  The result are orthonormal coefficient matrix
-$\mathbf{U}$ and eigenvalues $\mathbf{\Lambda}$.  The coefficients
-$u_{ik}$ ares identical to \textsc{svd} (except for possible sign
-changes), and eigenvalues $\lambda_k$ are related to the corresponding
-singular values by $\lambda_k = \sigma_k^2 /(n-1)$.  With classical
-definitions, the sum of all eigenvalues equals the sum of variances of
-species, or $\sum_k \lambda_k = \sum_j s_j^2$, and it is often said
-that first axes explain a certain proportion of total variance in the
-data.  The orthonormal matrix $\mathbf{V}$ of \textsc{svd} can be
-found indirectly as well, so that we have the same components in both
-methods.
+Principal component analysis decomposes a centred data matrix
+$\mathbf{X} = \{x_{ij}\}$ into $K$ orthogonal components so that
+$x_{ij} = \sqrt{n-1} \sum_{k=1}^K u_{ik} \sqrt{\lambda_k} v_{jk}$,
+where $u_{ik}$ and $v_{jk}$ are orthonormal coefficient matrices and
+$\lambda_k$ are eigenvalues. In \pkg{vegan} the eigenvalues sum up to
+variance of the data, and therefore we need to multiply with the
+square root of degrees of freedom $n-1$.  Orthonormality means that
+sums of squared columns is one and their cross-product is zero, or
+$\sum_i u_{ik}^2 = \sum_j v_{jk}^2 = 1$, and
+$\sum_i u_{ik} u_{il} = \sum_j v_{jk} v_{jl} = 0$ for $k \neq l$. This
+is a decomposition, and the original matrix is found exactly from the
+singular vectors and corresponding singular values, and first two
+singular components give the rank $=2$ least squares estimate of the
+original matrix.
 
 The coefficients $u_{ik}$ and $v_{jk}$ are scaled to unit length for all
-axes $k$. Singular values $\sigma_k$ or eigenvalues $\lambda_k$ give
+axes $k$. Eigenvalues $\lambda_k$ give
 the information of the importance of axes, or the `axis lengths.'
 Instead of the orthonormal coefficients, or equal length axes, it is
 customary to scale species (column) or site (row) scores or both by
@@ -446,17 +429,23 @@ weighted averaging scores have somewhat wider dispersion.
 \code{prcomp, princomp} &
 $u_{ik} \sqrt{n-1} \sqrt{\lambda_k}$ &
 $v_{jk}$ \\
-\code{rda, scaling=1} &
+\code{stats::biplot} &
+$u_{ik}$ &
+$v_{jk} \sqrt{n} \sqrt{\lambda_k}$ \\
+\code{stats::biplot, pc.biplot=TRUE} &
+$u_{ik} \sqrt{n-1}$ &
+$v_{jk} \sqrt{\lambda_k}$\\
+\code{rda, scaling="sites"} &
 $u_{ik} \sqrt{\lambda_k/ \sum \lambda_k} \times \mathrm{const}$ &
 $v_{jk} \times \mathrm{const}$
 \\
-\code{rda, scaling=2} &
+\code{rda, scaling="species"} &
 $u_{ik} \times \mathrm{const}$ &
 $v_{jk} \sqrt{\lambda_k/ \sum \lambda_k} \times \mathrm{const}$  \\
-\code{rda, scaling=3} &
+\code{rda, scaling="symmetric"} &
 $u_{ik} \sqrt[4]{\lambda_k/ \sum \lambda_k} \times \mathrm{const}$ &
 $v_{jk} \sqrt[4]{\lambda_k/ \sum \lambda_k} \times \mathrm{const}$ \\
-\code{rda, scaling < 0} &
+\code{rda, correlation=TRUE} &
 $u_{ik}^*$ &
 $\sqrt{\sum \lambda_k /(n-1)} s_j^{-1} v_{jk}^*$
 \\
@@ -474,17 +463,15 @@ $\sqrt{\sum \lambda_k /(n-1)} s_j^{-1} v_{jk}^*$
 \end{tabular}
 \end{table*}
 
-
-
 In community ecology, it is common to plot both species and sites in
-the same graph.  If this graph is a graphical display of \textsc{svd},
+the same graph.  If this graph is a graphical display of \textsc{pca},
 or a graphical, low-dimensional approximation of the data, the graph
 is called a biplot.  The graph is a biplot if the transformed scores
 satisfy $x_{ij} = c \sum_k u_{ij}^* v_{jk}^*$ where $c$ is a scaling
-constant.  In functions \code{princomp}, \code{prcomp} and
-\code{rda}, $c=1$ and the plotted scores are a biplot so that the
-singular values (or eigenvalues) are expressed for sites, and species
-are left unscaled.  
+constant.  In functions \code{princomp}, \code{prcomp} and \code{rda}
+with \code{scaling = "sites"}, the plotted scores define a biplot so that
+the eigenvalues are expressed for sites, and species are left
+unscaled.
 % For \texttt{Canoco 3} $c = n^{-1} \sqrt{n-1}
 % \sqrt{\sum \lambda_k}$ with negative \proglang{Canoco} scaling
 % values. All these $c$ are constants for a matrix, so these are all
@@ -496,22 +483,23 @@ are left unscaled.
 
 There is no natural way of scaling species and site scores to each
 other.  The eigenvalues in redundancy and principal components
-analysis are scale-dependent and change when the  data are
-multiplied by a constant.  If we have percent cover data, the
-eigenvalues are typically very high, and the scores scaled by
-eigenvalues will have much wider dispersion than the orthonormal set.
-If we express the percentages as proportions, and divide the matrix by
-$100$, the eigenvalues will be reduced by factor $100^2$, and the
-scores scaled by eigenvalues will have a narrower dispersion.  For
-graphical biplots we should be able to fix the relations of row and
-column scores to be invariant against scaling of data.  The solution
-in \proglang{R} standard function \code{biplot} is to scale site and species
-scores independently, and typically very differently, but plot each
-independently to fill the graph area.  The solution in \proglang{Canoco} and 
-\code{rda} is to use proportional eigenvalues $\lambda_k / \sum
-\lambda_k$ instead of original eigenvalues.  These proportions are
-invariant with scale changes, and typically they have a nice range for
-plotting two data sets in the same graph.
+analysis are scale-dependent and change when the data are multiplied
+by a constant.  If we have percent cover data, the eigenvalues are
+typically very high, and the scores scaled by eigenvalues will have
+much wider dispersion than the orthonormal set.  If we express the
+percentages as proportions, and divide the matrix by $100$, the
+eigenvalues will be reduced by factor $100^2$, and the scores scaled
+by eigenvalues will have a narrower dispersion.  For graphical biplots
+we should be able to fix the relations of row and column scores to be
+invariant against scaling of data.  The solution in \proglang{R}
+standard function \code{biplot} is to scale site and species scores
+independently, and typically very differently
+(Table~\ref{tab:scales}), but plot each independently to fill the
+graph area.  The solution in \proglang{Canoco} and \code{rda} is to
+use proportional eigenvalues $\lambda_k / \sum \lambda_k$ instead of
+original eigenvalues.  These proportions are invariant with scale
+changes, and typically they have a nice range for plotting two data
+sets in the same graph.
 
 The \textbf{vegan} package uses a scaling constant $c = \sqrt[4]{(n-1)
   \sum \lambda_k}$ in order to be able to use scaling by proportional
@@ -544,24 +532,19 @@ other software or \proglang{R} functions (Table \ref{tab:rdaconst}).
 \end{tabular}
 \end{table*}
 
-In this chapter, I used always centred data matrices.  In principle
-\textsc{svd} could be done with original, non-centred data, but
-there is no option for this in \code{rda}, because I think that
-non-centred analysis is dubious and I do not want to encourage its use
-(if you think you need it, you are certainly so good in programming
-that you can change that one line in \code{rda.default}).  I do
-think that the arguments for non-centred analysis are often twisted,
-and the method is not very good for its intended purpose, but there
-are better methods for finding fuzzy classes.  Normal, centred
-analysis moves the origin to the average of all species, and the
-dimensions describe differences from this average.  Non-centred
-analysis leaves the origin in the empty site with no species, and the
-first axis usually runs from the empty site to the average
-site. Second and third non-centred components are often very similar
-to first and second (etc.) centred components, and the best way to use
-non-centred analysis is to discard the first component and use only
-the rest. This is better done with directly centred analysis.
-
+The scaling is controlled by three arguments in the \code{scores}
+function in \pkg{vegan}:
+\begin{enumerate}
+  \item \code{scaling} with options \code{"sites"}, \code{"species"}
+    and \code{"symmetric"} defines the set of scores which is scaled
+    by eigenvalues (Table~\ref{tab:scales}).
+  \item \code{const} can be used to set the numeric scaling constant
+    to non-default values (Table~\ref{tab:rdaconst}).
+  \item \code{correlation} can be used to modify species scores so
+    that they show the relative change of species abundance, or their
+    correlation with the ordination (Table~\ref{tab:scales}). This is
+    no longer a biplot scaling.
+\end{enumerate}
 
 \section{Weighted average and linear combination scores}
 
diff --git a/vignettes/intro-vegan.Rnw b/vignettes/intro-vegan.Rnw
index 1be1c38..30bc862 100644
--- a/vignettes/intro-vegan.Rnw
+++ b/vignettes/intro-vegan.Rnw
@@ -198,9 +198,10 @@ methods you can try:
 
 \pkg{Vegan} has a group of functions for adding information about
 classification or grouping of points onto ordination diagrams.
-Function \code{ordihull} adds convex hulls, \code{ordiellipse}
-adds ellipses of standard deviation, standard error or confidence
-areas, and \code{ordispider} combines items to their centroid
+Function \code{ordihull} adds convex hulls, \code{ordiellipse} adds
+ellipses enclosing all points in the group (ellipsoid hulls) or
+ellipses of standard deviation, standard error or confidence areas,
+and \code{ordispider} combines items to their centroid
 (Fig. \ref{fig:ordihull}):
 <<>>=
 data(dune.env)
@@ -208,16 +209,17 @@ attach(dune.env)
 @
 <<a>>=
 plot(ord, disp="sites", type="n")
-ordihull(ord, Management, col="blue")
-ordiellipse(ord, Management, col=3,lwd=2)
-ordispider(ord, Management, col="red", label = TRUE)
+ordihull(ord, Management, col=1:4, lwd=3)
+ordiellipse(ord, Management, col=1:4, kind = "ehull", lwd=3)
+ordiellipse(ord, Management, col=1:4, draw="polygon")
+ordispider(ord, Management, col=1:4, label = TRUE)
 points(ord, disp="sites", pch=21, col="red", bg="yellow", cex=1.3)
 @
 \begin{figure}
 <<fig=true,echo=false>>=
 <<a>>
 @
-\caption{Convex hull, standard error ellipse and a spider web diagram
+\caption{Convex hull, ellipsoid hull, standard error ellipse and a spider web diagram
   for Management levels in ordination.}
 \label{fig:ordihull}
 \end{figure}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/r-cran-vegan.git



More information about the debian-med-commit mailing list